1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <ctype.h> 6 #include <errno.h> 7 #include <inttypes.h> 8 #include <stdbool.h> 9 #include <stdint.h> 10 #include <stdio.h> 11 #include <stdlib.h> 12 #include <string.h> 13 #include <sys/queue.h> 14 15 #include <bus_driver.h> 16 #include <rte_log.h> 17 #include <rte_interrupts.h> 18 #include <rte_memcpy.h> 19 #include <rte_common.h> 20 #include <rte_mempool.h> 21 #include <rte_malloc.h> 22 #include <rte_mbuf.h> 23 #include <rte_errno.h> 24 #include <rte_spinlock.h> 25 #include <rte_string_fns.h> 26 #include <rte_class.h> 27 #include <rte_ether.h> 28 #include <rte_telemetry.h> 29 30 #include "rte_ethdev.h" 31 #include "rte_ethdev_trace_fp.h" 32 #include "ethdev_driver.h" 33 #include "ethdev_profile.h" 34 #include "ethdev_private.h" 35 #include "ethdev_trace.h" 36 #include "sff_telemetry.h" 37 38 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS]; 39 40 /* public fast-path API */ 41 struct rte_eth_fp_ops rte_eth_fp_ops[RTE_MAX_ETHPORTS]; 42 43 /* spinlock for add/remove Rx callbacks */ 44 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER; 45 46 /* spinlock for add/remove Tx callbacks */ 47 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER; 48 49 /* store statistics names and its offset in stats structure */ 50 struct rte_eth_xstats_name_off { 51 char name[RTE_ETH_XSTATS_NAME_SIZE]; 52 unsigned offset; 53 }; 54 55 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = { 56 {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)}, 57 {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)}, 58 {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)}, 59 {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)}, 60 {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)}, 61 {"rx_errors", offsetof(struct rte_eth_stats, ierrors)}, 62 {"tx_errors", offsetof(struct rte_eth_stats, oerrors)}, 63 {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats, 64 rx_nombuf)}, 65 }; 66 67 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings) 68 69 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = { 70 {"packets", offsetof(struct rte_eth_stats, q_ipackets)}, 71 {"bytes", offsetof(struct rte_eth_stats, q_ibytes)}, 72 {"errors", offsetof(struct rte_eth_stats, q_errors)}, 73 }; 74 75 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings) 76 77 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = { 78 {"packets", offsetof(struct rte_eth_stats, q_opackets)}, 79 {"bytes", offsetof(struct rte_eth_stats, q_obytes)}, 80 }; 81 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings) 82 83 #define RTE_RX_OFFLOAD_BIT2STR(_name) \ 84 { RTE_ETH_RX_OFFLOAD_##_name, #_name } 85 86 static const struct { 87 uint64_t offload; 88 const char *name; 89 } eth_dev_rx_offload_names[] = { 90 RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP), 91 RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM), 92 RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM), 93 RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM), 94 RTE_RX_OFFLOAD_BIT2STR(TCP_LRO), 95 RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP), 96 RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 97 RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP), 98 RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER), 99 RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND), 100 RTE_RX_OFFLOAD_BIT2STR(SCATTER), 101 RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP), 102 RTE_RX_OFFLOAD_BIT2STR(SECURITY), 103 RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC), 104 RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM), 105 RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 106 RTE_RX_OFFLOAD_BIT2STR(RSS_HASH), 107 RTE_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT), 108 }; 109 110 #undef RTE_RX_OFFLOAD_BIT2STR 111 #undef RTE_ETH_RX_OFFLOAD_BIT2STR 112 113 #define RTE_TX_OFFLOAD_BIT2STR(_name) \ 114 { RTE_ETH_TX_OFFLOAD_##_name, #_name } 115 116 static const struct { 117 uint64_t offload; 118 const char *name; 119 } eth_dev_tx_offload_names[] = { 120 RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT), 121 RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM), 122 RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM), 123 RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM), 124 RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM), 125 RTE_TX_OFFLOAD_BIT2STR(TCP_TSO), 126 RTE_TX_OFFLOAD_BIT2STR(UDP_TSO), 127 RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 128 RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT), 129 RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO), 130 RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO), 131 RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO), 132 RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO), 133 RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT), 134 RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE), 135 RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS), 136 RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE), 137 RTE_TX_OFFLOAD_BIT2STR(SECURITY), 138 RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO), 139 RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO), 140 RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 141 RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP), 142 }; 143 144 #undef RTE_TX_OFFLOAD_BIT2STR 145 146 static const struct { 147 uint64_t offload; 148 const char *name; 149 } rte_eth_dev_capa_names[] = { 150 {RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP, "RUNTIME_RX_QUEUE_SETUP"}, 151 {RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP, "RUNTIME_TX_QUEUE_SETUP"}, 152 {RTE_ETH_DEV_CAPA_RXQ_SHARE, "RXQ_SHARE"}, 153 {RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP, "FLOW_RULE_KEEP"}, 154 {RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP, "FLOW_SHARED_OBJECT_KEEP"}, 155 }; 156 157 enum { 158 STAT_QMAP_TX = 0, 159 STAT_QMAP_RX 160 }; 161 162 int 163 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str) 164 { 165 int ret; 166 struct rte_devargs devargs; 167 const char *bus_param_key; 168 char *bus_str = NULL; 169 char *cls_str = NULL; 170 int str_size; 171 172 if (iter == NULL) { 173 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL iterator\n"); 174 return -EINVAL; 175 } 176 177 if (devargs_str == NULL) { 178 RTE_ETHDEV_LOG(ERR, 179 "Cannot initialize iterator from NULL device description string\n"); 180 return -EINVAL; 181 } 182 183 memset(iter, 0, sizeof(*iter)); 184 memset(&devargs, 0, sizeof(devargs)); 185 186 /* 187 * The devargs string may use various syntaxes: 188 * - 0000:08:00.0,representor=[1-3] 189 * - pci:0000:06:00.0,representor=[0,5] 190 * - class=eth,mac=00:11:22:33:44:55 191 * - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z 192 */ 193 194 /* 195 * Handle pure class filter (i.e. without any bus-level argument), 196 * from future new syntax. 197 * rte_devargs_parse() is not yet supporting the new syntax, 198 * that's why this simple case is temporarily parsed here. 199 */ 200 #define iter_anybus_str "class=eth," 201 if (strncmp(devargs_str, iter_anybus_str, 202 strlen(iter_anybus_str)) == 0) { 203 iter->cls_str = devargs_str + strlen(iter_anybus_str); 204 goto end; 205 } 206 207 /* Split bus, device and parameters. */ 208 ret = rte_devargs_parse(&devargs, devargs_str); 209 if (ret != 0) 210 goto error; 211 212 /* 213 * Assume parameters of old syntax can match only at ethdev level. 214 * Extra parameters will be ignored, thanks to "+" prefix. 215 */ 216 str_size = strlen(devargs.args) + 2; 217 cls_str = malloc(str_size); 218 if (cls_str == NULL) { 219 ret = -ENOMEM; 220 goto error; 221 } 222 ret = snprintf(cls_str, str_size, "+%s", devargs.args); 223 if (ret != str_size - 1) { 224 ret = -EINVAL; 225 goto error; 226 } 227 iter->cls_str = cls_str; 228 229 iter->bus = devargs.bus; 230 if (iter->bus->dev_iterate == NULL) { 231 ret = -ENOTSUP; 232 goto error; 233 } 234 235 /* Convert bus args to new syntax for use with new API dev_iterate. */ 236 if ((strcmp(iter->bus->name, "vdev") == 0) || 237 (strcmp(iter->bus->name, "fslmc") == 0) || 238 (strcmp(iter->bus->name, "dpaa_bus") == 0)) { 239 bus_param_key = "name"; 240 } else if (strcmp(iter->bus->name, "pci") == 0) { 241 bus_param_key = "addr"; 242 } else { 243 ret = -ENOTSUP; 244 goto error; 245 } 246 str_size = strlen(bus_param_key) + strlen(devargs.name) + 2; 247 bus_str = malloc(str_size); 248 if (bus_str == NULL) { 249 ret = -ENOMEM; 250 goto error; 251 } 252 ret = snprintf(bus_str, str_size, "%s=%s", 253 bus_param_key, devargs.name); 254 if (ret != str_size - 1) { 255 ret = -EINVAL; 256 goto error; 257 } 258 iter->bus_str = bus_str; 259 260 end: 261 iter->cls = rte_class_find_by_name("eth"); 262 rte_devargs_reset(&devargs); 263 264 rte_eth_trace_iterator_init(devargs_str); 265 266 return 0; 267 268 error: 269 if (ret == -ENOTSUP) 270 RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n", 271 iter->bus->name); 272 rte_devargs_reset(&devargs); 273 free(bus_str); 274 free(cls_str); 275 return ret; 276 } 277 278 uint16_t 279 rte_eth_iterator_next(struct rte_dev_iterator *iter) 280 { 281 if (iter == NULL) { 282 RTE_ETHDEV_LOG(ERR, 283 "Cannot get next device from NULL iterator\n"); 284 return RTE_MAX_ETHPORTS; 285 } 286 287 if (iter->cls == NULL) /* invalid ethdev iterator */ 288 return RTE_MAX_ETHPORTS; 289 290 do { /* loop to try all matching rte_device */ 291 /* If not pure ethdev filter and */ 292 if (iter->bus != NULL && 293 /* not in middle of rte_eth_dev iteration, */ 294 iter->class_device == NULL) { 295 /* get next rte_device to try. */ 296 iter->device = iter->bus->dev_iterate( 297 iter->device, iter->bus_str, iter); 298 if (iter->device == NULL) 299 break; /* no more rte_device candidate */ 300 } 301 /* A device is matching bus part, need to check ethdev part. */ 302 iter->class_device = iter->cls->dev_iterate( 303 iter->class_device, iter->cls_str, iter); 304 if (iter->class_device != NULL) { 305 uint16_t id = eth_dev_to_id(iter->class_device); 306 307 rte_eth_trace_iterator_next(iter, id); 308 309 return id; /* match */ 310 } 311 } while (iter->bus != NULL); /* need to try next rte_device */ 312 313 /* No more ethdev port to iterate. */ 314 rte_eth_iterator_cleanup(iter); 315 return RTE_MAX_ETHPORTS; 316 } 317 318 void 319 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter) 320 { 321 if (iter == NULL) { 322 RTE_ETHDEV_LOG(ERR, "Cannot do clean up from NULL iterator\n"); 323 return; 324 } 325 326 if (iter->bus_str == NULL) 327 return; /* nothing to free in pure class filter */ 328 329 rte_eth_trace_iterator_cleanup(iter); 330 331 free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */ 332 free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */ 333 memset(iter, 0, sizeof(*iter)); 334 } 335 336 uint16_t 337 rte_eth_find_next(uint16_t port_id) 338 { 339 while (port_id < RTE_MAX_ETHPORTS && 340 rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED) 341 port_id++; 342 343 if (port_id >= RTE_MAX_ETHPORTS) 344 return RTE_MAX_ETHPORTS; 345 346 rte_eth_trace_find_next(port_id); 347 348 return port_id; 349 } 350 351 /* 352 * Macro to iterate over all valid ports for internal usage. 353 * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports. 354 */ 355 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \ 356 for (port_id = rte_eth_find_next(0); \ 357 port_id < RTE_MAX_ETHPORTS; \ 358 port_id = rte_eth_find_next(port_id + 1)) 359 360 uint16_t 361 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent) 362 { 363 port_id = rte_eth_find_next(port_id); 364 while (port_id < RTE_MAX_ETHPORTS && 365 rte_eth_devices[port_id].device != parent) 366 port_id = rte_eth_find_next(port_id + 1); 367 368 rte_eth_trace_find_next_of(port_id, parent); 369 370 return port_id; 371 } 372 373 uint16_t 374 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id) 375 { 376 uint16_t ret; 377 378 RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS); 379 ret = rte_eth_find_next_of(port_id, 380 rte_eth_devices[ref_port_id].device); 381 382 rte_eth_trace_find_next_sibling(port_id, ref_port_id, ret); 383 384 return ret; 385 } 386 387 static bool 388 eth_dev_is_allocated(const struct rte_eth_dev *ethdev) 389 { 390 return ethdev->data->name[0] != '\0'; 391 } 392 393 int 394 rte_eth_dev_is_valid_port(uint16_t port_id) 395 { 396 int is_valid; 397 398 if (port_id >= RTE_MAX_ETHPORTS || 399 (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)) 400 is_valid = 0; 401 else 402 is_valid = 1; 403 404 rte_ethdev_trace_is_valid_port(port_id, is_valid); 405 406 return is_valid; 407 } 408 409 static int 410 eth_is_valid_owner_id(uint64_t owner_id) 411 { 412 if (owner_id == RTE_ETH_DEV_NO_OWNER || 413 eth_dev_shared_data->next_owner_id <= owner_id) 414 return 0; 415 return 1; 416 } 417 418 uint64_t 419 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id) 420 { 421 port_id = rte_eth_find_next(port_id); 422 while (port_id < RTE_MAX_ETHPORTS && 423 rte_eth_devices[port_id].data->owner.id != owner_id) 424 port_id = rte_eth_find_next(port_id + 1); 425 426 rte_eth_trace_find_next_owned_by(port_id, owner_id); 427 428 return port_id; 429 } 430 431 int 432 rte_eth_dev_owner_new(uint64_t *owner_id) 433 { 434 if (owner_id == NULL) { 435 RTE_ETHDEV_LOG(ERR, "Cannot get new owner ID to NULL\n"); 436 return -EINVAL; 437 } 438 439 eth_dev_shared_data_prepare(); 440 441 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 442 443 *owner_id = eth_dev_shared_data->next_owner_id++; 444 445 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 446 447 rte_ethdev_trace_owner_new(*owner_id); 448 449 return 0; 450 } 451 452 static int 453 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id, 454 const struct rte_eth_dev_owner *new_owner) 455 { 456 struct rte_eth_dev *ethdev = &rte_eth_devices[port_id]; 457 struct rte_eth_dev_owner *port_owner; 458 459 if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) { 460 RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n", 461 port_id); 462 return -ENODEV; 463 } 464 465 if (new_owner == NULL) { 466 RTE_ETHDEV_LOG(ERR, 467 "Cannot set ethdev port %u owner from NULL owner\n", 468 port_id); 469 return -EINVAL; 470 } 471 472 if (!eth_is_valid_owner_id(new_owner->id) && 473 !eth_is_valid_owner_id(old_owner_id)) { 474 RTE_ETHDEV_LOG(ERR, 475 "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n", 476 old_owner_id, new_owner->id); 477 return -EINVAL; 478 } 479 480 port_owner = &rte_eth_devices[port_id].data->owner; 481 if (port_owner->id != old_owner_id) { 482 RTE_ETHDEV_LOG(ERR, 483 "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n", 484 port_id, port_owner->name, port_owner->id); 485 return -EPERM; 486 } 487 488 /* can not truncate (same structure) */ 489 strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN); 490 491 port_owner->id = new_owner->id; 492 493 RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n", 494 port_id, new_owner->name, new_owner->id); 495 496 return 0; 497 } 498 499 int 500 rte_eth_dev_owner_set(const uint16_t port_id, 501 const struct rte_eth_dev_owner *owner) 502 { 503 int ret; 504 505 eth_dev_shared_data_prepare(); 506 507 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 508 509 ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner); 510 511 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 512 513 rte_ethdev_trace_owner_set(port_id, owner, ret); 514 515 return ret; 516 } 517 518 int 519 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id) 520 { 521 const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner) 522 {.id = RTE_ETH_DEV_NO_OWNER, .name = ""}; 523 int ret; 524 525 eth_dev_shared_data_prepare(); 526 527 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 528 529 ret = eth_dev_owner_set(port_id, owner_id, &new_owner); 530 531 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 532 533 rte_ethdev_trace_owner_unset(port_id, owner_id, ret); 534 535 return ret; 536 } 537 538 int 539 rte_eth_dev_owner_delete(const uint64_t owner_id) 540 { 541 uint16_t port_id; 542 int ret = 0; 543 544 eth_dev_shared_data_prepare(); 545 546 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 547 548 if (eth_is_valid_owner_id(owner_id)) { 549 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) { 550 struct rte_eth_dev_data *data = 551 rte_eth_devices[port_id].data; 552 if (data != NULL && data->owner.id == owner_id) 553 memset(&data->owner, 0, 554 sizeof(struct rte_eth_dev_owner)); 555 } 556 RTE_ETHDEV_LOG(NOTICE, 557 "All port owners owned by %016"PRIx64" identifier have removed\n", 558 owner_id); 559 } else { 560 RTE_ETHDEV_LOG(ERR, 561 "Invalid owner ID=%016"PRIx64"\n", 562 owner_id); 563 ret = -EINVAL; 564 } 565 566 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 567 568 rte_ethdev_trace_owner_delete(owner_id, ret); 569 570 return ret; 571 } 572 573 int 574 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner) 575 { 576 struct rte_eth_dev *ethdev; 577 578 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 579 ethdev = &rte_eth_devices[port_id]; 580 581 if (!eth_dev_is_allocated(ethdev)) { 582 RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n", 583 port_id); 584 return -ENODEV; 585 } 586 587 if (owner == NULL) { 588 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u owner to NULL\n", 589 port_id); 590 return -EINVAL; 591 } 592 593 eth_dev_shared_data_prepare(); 594 595 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 596 rte_memcpy(owner, ðdev->data->owner, sizeof(*owner)); 597 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 598 599 rte_ethdev_trace_owner_get(port_id, owner); 600 601 return 0; 602 } 603 604 int 605 rte_eth_dev_socket_id(uint16_t port_id) 606 { 607 int socket_id = SOCKET_ID_ANY; 608 609 if (!rte_eth_dev_is_valid_port(port_id)) { 610 rte_errno = EINVAL; 611 } else { 612 socket_id = rte_eth_devices[port_id].data->numa_node; 613 if (socket_id == SOCKET_ID_ANY) 614 rte_errno = 0; 615 } 616 617 rte_ethdev_trace_socket_id(port_id, socket_id); 618 619 return socket_id; 620 } 621 622 void * 623 rte_eth_dev_get_sec_ctx(uint16_t port_id) 624 { 625 void *ctx; 626 627 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL); 628 ctx = rte_eth_devices[port_id].security_ctx; 629 630 rte_ethdev_trace_get_sec_ctx(port_id, ctx); 631 632 return ctx; 633 } 634 635 uint16_t 636 rte_eth_dev_count_avail(void) 637 { 638 uint16_t p; 639 uint16_t count; 640 641 count = 0; 642 643 RTE_ETH_FOREACH_DEV(p) 644 count++; 645 646 rte_ethdev_trace_count_avail(count); 647 648 return count; 649 } 650 651 uint16_t 652 rte_eth_dev_count_total(void) 653 { 654 uint16_t port, count = 0; 655 656 RTE_ETH_FOREACH_VALID_DEV(port) 657 count++; 658 659 rte_ethdev_trace_count_total(count); 660 661 return count; 662 } 663 664 int 665 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name) 666 { 667 char *tmp; 668 669 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 670 671 if (name == NULL) { 672 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u name to NULL\n", 673 port_id); 674 return -EINVAL; 675 } 676 677 /* shouldn't check 'rte_eth_devices[i].data', 678 * because it might be overwritten by VDEV PMD */ 679 tmp = eth_dev_shared_data->data[port_id].name; 680 strcpy(name, tmp); 681 682 rte_ethdev_trace_get_name_by_port(port_id, name); 683 684 return 0; 685 } 686 687 int 688 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id) 689 { 690 uint16_t pid; 691 692 if (name == NULL) { 693 RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name"); 694 return -EINVAL; 695 } 696 697 if (port_id == NULL) { 698 RTE_ETHDEV_LOG(ERR, 699 "Cannot get port ID to NULL for %s\n", name); 700 return -EINVAL; 701 } 702 703 RTE_ETH_FOREACH_VALID_DEV(pid) 704 if (!strcmp(name, eth_dev_shared_data->data[pid].name)) { 705 *port_id = pid; 706 707 rte_ethdev_trace_get_port_by_name(name, *port_id); 708 709 return 0; 710 } 711 712 return -ENODEV; 713 } 714 715 int 716 eth_err(uint16_t port_id, int ret) 717 { 718 if (ret == 0) 719 return 0; 720 if (rte_eth_dev_is_removed(port_id)) 721 return -EIO; 722 return ret; 723 } 724 725 static int 726 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id) 727 { 728 uint16_t port_id; 729 730 if (rx_queue_id >= dev->data->nb_rx_queues) { 731 port_id = dev->data->port_id; 732 RTE_ETHDEV_LOG(ERR, 733 "Invalid Rx queue_id=%u of device with port_id=%u\n", 734 rx_queue_id, port_id); 735 return -EINVAL; 736 } 737 738 if (dev->data->rx_queues[rx_queue_id] == NULL) { 739 port_id = dev->data->port_id; 740 RTE_ETHDEV_LOG(ERR, 741 "Queue %u of device with port_id=%u has not been setup\n", 742 rx_queue_id, port_id); 743 return -EINVAL; 744 } 745 746 return 0; 747 } 748 749 static int 750 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id) 751 { 752 uint16_t port_id; 753 754 if (tx_queue_id >= dev->data->nb_tx_queues) { 755 port_id = dev->data->port_id; 756 RTE_ETHDEV_LOG(ERR, 757 "Invalid Tx queue_id=%u of device with port_id=%u\n", 758 tx_queue_id, port_id); 759 return -EINVAL; 760 } 761 762 if (dev->data->tx_queues[tx_queue_id] == NULL) { 763 port_id = dev->data->port_id; 764 RTE_ETHDEV_LOG(ERR, 765 "Queue %u of device with port_id=%u has not been setup\n", 766 tx_queue_id, port_id); 767 return -EINVAL; 768 } 769 770 return 0; 771 } 772 773 int 774 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id) 775 { 776 struct rte_eth_dev *dev; 777 int ret; 778 779 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 780 dev = &rte_eth_devices[port_id]; 781 782 if (!dev->data->dev_started) { 783 RTE_ETHDEV_LOG(ERR, 784 "Port %u must be started before start any queue\n", 785 port_id); 786 return -EINVAL; 787 } 788 789 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 790 if (ret != 0) 791 return ret; 792 793 if (*dev->dev_ops->rx_queue_start == NULL) 794 return -ENOTSUP; 795 796 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 797 RTE_ETHDEV_LOG(INFO, 798 "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 799 rx_queue_id, port_id); 800 return -EINVAL; 801 } 802 803 if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 804 RTE_ETHDEV_LOG(INFO, 805 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 806 rx_queue_id, port_id); 807 return 0; 808 } 809 810 ret = eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id)); 811 812 rte_ethdev_trace_rx_queue_start(port_id, rx_queue_id, ret); 813 814 return ret; 815 } 816 817 int 818 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id) 819 { 820 struct rte_eth_dev *dev; 821 int ret; 822 823 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 824 dev = &rte_eth_devices[port_id]; 825 826 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 827 if (ret != 0) 828 return ret; 829 830 if (*dev->dev_ops->rx_queue_stop == NULL) 831 return -ENOTSUP; 832 833 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 834 RTE_ETHDEV_LOG(INFO, 835 "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 836 rx_queue_id, port_id); 837 return -EINVAL; 838 } 839 840 if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 841 RTE_ETHDEV_LOG(INFO, 842 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 843 rx_queue_id, port_id); 844 return 0; 845 } 846 847 ret = eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id)); 848 849 rte_ethdev_trace_rx_queue_stop(port_id, rx_queue_id, ret); 850 851 return ret; 852 } 853 854 int 855 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id) 856 { 857 struct rte_eth_dev *dev; 858 int ret; 859 860 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 861 dev = &rte_eth_devices[port_id]; 862 863 if (!dev->data->dev_started) { 864 RTE_ETHDEV_LOG(ERR, 865 "Port %u must be started before start any queue\n", 866 port_id); 867 return -EINVAL; 868 } 869 870 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 871 if (ret != 0) 872 return ret; 873 874 if (*dev->dev_ops->tx_queue_start == NULL) 875 return -ENOTSUP; 876 877 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 878 RTE_ETHDEV_LOG(INFO, 879 "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 880 tx_queue_id, port_id); 881 return -EINVAL; 882 } 883 884 if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 885 RTE_ETHDEV_LOG(INFO, 886 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 887 tx_queue_id, port_id); 888 return 0; 889 } 890 891 ret = eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id)); 892 893 rte_ethdev_trace_tx_queue_start(port_id, tx_queue_id, ret); 894 895 return ret; 896 } 897 898 int 899 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id) 900 { 901 struct rte_eth_dev *dev; 902 int ret; 903 904 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 905 dev = &rte_eth_devices[port_id]; 906 907 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 908 if (ret != 0) 909 return ret; 910 911 if (*dev->dev_ops->tx_queue_stop == NULL) 912 return -ENOTSUP; 913 914 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 915 RTE_ETHDEV_LOG(INFO, 916 "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 917 tx_queue_id, port_id); 918 return -EINVAL; 919 } 920 921 if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 922 RTE_ETHDEV_LOG(INFO, 923 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 924 tx_queue_id, port_id); 925 return 0; 926 } 927 928 ret = eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id)); 929 930 rte_ethdev_trace_tx_queue_stop(port_id, tx_queue_id, ret); 931 932 return ret; 933 } 934 935 uint32_t 936 rte_eth_speed_bitflag(uint32_t speed, int duplex) 937 { 938 uint32_t ret; 939 940 switch (speed) { 941 case RTE_ETH_SPEED_NUM_10M: 942 ret = duplex ? RTE_ETH_LINK_SPEED_10M : RTE_ETH_LINK_SPEED_10M_HD; 943 break; 944 case RTE_ETH_SPEED_NUM_100M: 945 ret = duplex ? RTE_ETH_LINK_SPEED_100M : RTE_ETH_LINK_SPEED_100M_HD; 946 break; 947 case RTE_ETH_SPEED_NUM_1G: 948 ret = RTE_ETH_LINK_SPEED_1G; 949 break; 950 case RTE_ETH_SPEED_NUM_2_5G: 951 ret = RTE_ETH_LINK_SPEED_2_5G; 952 break; 953 case RTE_ETH_SPEED_NUM_5G: 954 ret = RTE_ETH_LINK_SPEED_5G; 955 break; 956 case RTE_ETH_SPEED_NUM_10G: 957 ret = RTE_ETH_LINK_SPEED_10G; 958 break; 959 case RTE_ETH_SPEED_NUM_20G: 960 ret = RTE_ETH_LINK_SPEED_20G; 961 break; 962 case RTE_ETH_SPEED_NUM_25G: 963 ret = RTE_ETH_LINK_SPEED_25G; 964 break; 965 case RTE_ETH_SPEED_NUM_40G: 966 ret = RTE_ETH_LINK_SPEED_40G; 967 break; 968 case RTE_ETH_SPEED_NUM_50G: 969 ret = RTE_ETH_LINK_SPEED_50G; 970 break; 971 case RTE_ETH_SPEED_NUM_56G: 972 ret = RTE_ETH_LINK_SPEED_56G; 973 break; 974 case RTE_ETH_SPEED_NUM_100G: 975 ret = RTE_ETH_LINK_SPEED_100G; 976 break; 977 case RTE_ETH_SPEED_NUM_200G: 978 ret = RTE_ETH_LINK_SPEED_200G; 979 break; 980 default: 981 ret = 0; 982 } 983 984 rte_eth_trace_speed_bitflag(speed, duplex, ret); 985 986 return ret; 987 } 988 989 const char * 990 rte_eth_dev_rx_offload_name(uint64_t offload) 991 { 992 const char *name = "UNKNOWN"; 993 unsigned int i; 994 995 for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) { 996 if (offload == eth_dev_rx_offload_names[i].offload) { 997 name = eth_dev_rx_offload_names[i].name; 998 break; 999 } 1000 } 1001 1002 rte_ethdev_trace_rx_offload_name(offload, name); 1003 1004 return name; 1005 } 1006 1007 const char * 1008 rte_eth_dev_tx_offload_name(uint64_t offload) 1009 { 1010 const char *name = "UNKNOWN"; 1011 unsigned int i; 1012 1013 for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) { 1014 if (offload == eth_dev_tx_offload_names[i].offload) { 1015 name = eth_dev_tx_offload_names[i].name; 1016 break; 1017 } 1018 } 1019 1020 rte_ethdev_trace_tx_offload_name(offload, name); 1021 1022 return name; 1023 } 1024 1025 const char * 1026 rte_eth_dev_capability_name(uint64_t capability) 1027 { 1028 const char *name = "UNKNOWN"; 1029 unsigned int i; 1030 1031 for (i = 0; i < RTE_DIM(rte_eth_dev_capa_names); ++i) { 1032 if (capability == rte_eth_dev_capa_names[i].offload) { 1033 name = rte_eth_dev_capa_names[i].name; 1034 break; 1035 } 1036 } 1037 1038 rte_ethdev_trace_capability_name(capability, name); 1039 1040 return name; 1041 } 1042 1043 static inline int 1044 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size, 1045 uint32_t max_rx_pkt_len, uint32_t dev_info_size) 1046 { 1047 int ret = 0; 1048 1049 if (dev_info_size == 0) { 1050 if (config_size != max_rx_pkt_len) { 1051 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size" 1052 " %u != %u is not allowed\n", 1053 port_id, config_size, max_rx_pkt_len); 1054 ret = -EINVAL; 1055 } 1056 } else if (config_size > dev_info_size) { 1057 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 1058 "> max allowed value %u\n", port_id, config_size, 1059 dev_info_size); 1060 ret = -EINVAL; 1061 } else if (config_size < RTE_ETHER_MIN_LEN) { 1062 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 1063 "< min allowed value %u\n", port_id, config_size, 1064 (unsigned int)RTE_ETHER_MIN_LEN); 1065 ret = -EINVAL; 1066 } 1067 return ret; 1068 } 1069 1070 /* 1071 * Validate offloads that are requested through rte_eth_dev_configure against 1072 * the offloads successfully set by the Ethernet device. 1073 * 1074 * @param port_id 1075 * The port identifier of the Ethernet device. 1076 * @param req_offloads 1077 * The offloads that have been requested through `rte_eth_dev_configure`. 1078 * @param set_offloads 1079 * The offloads successfully set by the Ethernet device. 1080 * @param offload_type 1081 * The offload type i.e. Rx/Tx string. 1082 * @param offload_name 1083 * The function that prints the offload name. 1084 * @return 1085 * - (0) if validation successful. 1086 * - (-EINVAL) if requested offload has been silently disabled. 1087 * 1088 */ 1089 static int 1090 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads, 1091 uint64_t set_offloads, const char *offload_type, 1092 const char *(*offload_name)(uint64_t)) 1093 { 1094 uint64_t offloads_diff = req_offloads ^ set_offloads; 1095 uint64_t offload; 1096 int ret = 0; 1097 1098 while (offloads_diff != 0) { 1099 /* Check if any offload is requested but not enabled. */ 1100 offload = RTE_BIT64(__builtin_ctzll(offloads_diff)); 1101 if (offload & req_offloads) { 1102 RTE_ETHDEV_LOG(ERR, 1103 "Port %u failed to enable %s offload %s\n", 1104 port_id, offload_type, offload_name(offload)); 1105 ret = -EINVAL; 1106 } 1107 1108 /* Check if offload couldn't be disabled. */ 1109 if (offload & set_offloads) { 1110 RTE_ETHDEV_LOG(DEBUG, 1111 "Port %u %s offload %s is not requested but enabled\n", 1112 port_id, offload_type, offload_name(offload)); 1113 } 1114 1115 offloads_diff &= ~offload; 1116 } 1117 1118 return ret; 1119 } 1120 1121 static uint32_t 1122 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu) 1123 { 1124 uint32_t overhead_len; 1125 1126 if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu) 1127 overhead_len = max_rx_pktlen - max_mtu; 1128 else 1129 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 1130 1131 return overhead_len; 1132 } 1133 1134 /* rte_eth_dev_info_get() should be called prior to this function */ 1135 static int 1136 eth_dev_validate_mtu(uint16_t port_id, struct rte_eth_dev_info *dev_info, 1137 uint16_t mtu) 1138 { 1139 uint32_t overhead_len; 1140 uint32_t frame_size; 1141 1142 if (mtu < dev_info->min_mtu) { 1143 RTE_ETHDEV_LOG(ERR, 1144 "MTU (%u) < device min MTU (%u) for port_id %u\n", 1145 mtu, dev_info->min_mtu, port_id); 1146 return -EINVAL; 1147 } 1148 if (mtu > dev_info->max_mtu) { 1149 RTE_ETHDEV_LOG(ERR, 1150 "MTU (%u) > device max MTU (%u) for port_id %u\n", 1151 mtu, dev_info->max_mtu, port_id); 1152 return -EINVAL; 1153 } 1154 1155 overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen, 1156 dev_info->max_mtu); 1157 frame_size = mtu + overhead_len; 1158 if (frame_size < RTE_ETHER_MIN_LEN) { 1159 RTE_ETHDEV_LOG(ERR, 1160 "Frame size (%u) < min frame size (%u) for port_id %u\n", 1161 frame_size, RTE_ETHER_MIN_LEN, port_id); 1162 return -EINVAL; 1163 } 1164 1165 if (frame_size > dev_info->max_rx_pktlen) { 1166 RTE_ETHDEV_LOG(ERR, 1167 "Frame size (%u) > device max frame size (%u) for port_id %u\n", 1168 frame_size, dev_info->max_rx_pktlen, port_id); 1169 return -EINVAL; 1170 } 1171 1172 return 0; 1173 } 1174 1175 int 1176 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, 1177 const struct rte_eth_conf *dev_conf) 1178 { 1179 struct rte_eth_dev *dev; 1180 struct rte_eth_dev_info dev_info; 1181 struct rte_eth_conf orig_conf; 1182 int diag; 1183 int ret; 1184 uint16_t old_mtu; 1185 1186 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1187 dev = &rte_eth_devices[port_id]; 1188 1189 if (dev_conf == NULL) { 1190 RTE_ETHDEV_LOG(ERR, 1191 "Cannot configure ethdev port %u from NULL config\n", 1192 port_id); 1193 return -EINVAL; 1194 } 1195 1196 if (*dev->dev_ops->dev_configure == NULL) 1197 return -ENOTSUP; 1198 1199 if (dev->data->dev_started) { 1200 RTE_ETHDEV_LOG(ERR, 1201 "Port %u must be stopped to allow configuration\n", 1202 port_id); 1203 return -EBUSY; 1204 } 1205 1206 /* 1207 * Ensure that "dev_configured" is always 0 each time prepare to do 1208 * dev_configure() to avoid any non-anticipated behaviour. 1209 * And set to 1 when dev_configure() is executed successfully. 1210 */ 1211 dev->data->dev_configured = 0; 1212 1213 /* Store original config, as rollback required on failure */ 1214 memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf)); 1215 1216 /* 1217 * Copy the dev_conf parameter into the dev structure. 1218 * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get 1219 */ 1220 if (dev_conf != &dev->data->dev_conf) 1221 memcpy(&dev->data->dev_conf, dev_conf, 1222 sizeof(dev->data->dev_conf)); 1223 1224 /* Backup mtu for rollback */ 1225 old_mtu = dev->data->mtu; 1226 1227 ret = rte_eth_dev_info_get(port_id, &dev_info); 1228 if (ret != 0) 1229 goto rollback; 1230 1231 /* If number of queues specified by application for both Rx and Tx is 1232 * zero, use driver preferred values. This cannot be done individually 1233 * as it is valid for either Tx or Rx (but not both) to be zero. 1234 * If driver does not provide any preferred valued, fall back on 1235 * EAL defaults. 1236 */ 1237 if (nb_rx_q == 0 && nb_tx_q == 0) { 1238 nb_rx_q = dev_info.default_rxportconf.nb_queues; 1239 if (nb_rx_q == 0) 1240 nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES; 1241 nb_tx_q = dev_info.default_txportconf.nb_queues; 1242 if (nb_tx_q == 0) 1243 nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES; 1244 } 1245 1246 if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) { 1247 RTE_ETHDEV_LOG(ERR, 1248 "Number of Rx queues requested (%u) is greater than max supported(%d)\n", 1249 nb_rx_q, RTE_MAX_QUEUES_PER_PORT); 1250 ret = -EINVAL; 1251 goto rollback; 1252 } 1253 1254 if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) { 1255 RTE_ETHDEV_LOG(ERR, 1256 "Number of Tx queues requested (%u) is greater than max supported(%d)\n", 1257 nb_tx_q, RTE_MAX_QUEUES_PER_PORT); 1258 ret = -EINVAL; 1259 goto rollback; 1260 } 1261 1262 /* 1263 * Check that the numbers of Rx and Tx queues are not greater 1264 * than the maximum number of Rx and Tx queues supported by the 1265 * configured device. 1266 */ 1267 if (nb_rx_q > dev_info.max_rx_queues) { 1268 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n", 1269 port_id, nb_rx_q, dev_info.max_rx_queues); 1270 ret = -EINVAL; 1271 goto rollback; 1272 } 1273 1274 if (nb_tx_q > dev_info.max_tx_queues) { 1275 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n", 1276 port_id, nb_tx_q, dev_info.max_tx_queues); 1277 ret = -EINVAL; 1278 goto rollback; 1279 } 1280 1281 /* Check that the device supports requested interrupts */ 1282 if ((dev_conf->intr_conf.lsc == 1) && 1283 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) { 1284 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n", 1285 dev->device->driver->name); 1286 ret = -EINVAL; 1287 goto rollback; 1288 } 1289 if ((dev_conf->intr_conf.rmv == 1) && 1290 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) { 1291 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n", 1292 dev->device->driver->name); 1293 ret = -EINVAL; 1294 goto rollback; 1295 } 1296 1297 if (dev_conf->rxmode.mtu == 0) 1298 dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU; 1299 1300 ret = eth_dev_validate_mtu(port_id, &dev_info, 1301 dev->data->dev_conf.rxmode.mtu); 1302 if (ret != 0) 1303 goto rollback; 1304 1305 dev->data->mtu = dev->data->dev_conf.rxmode.mtu; 1306 1307 /* 1308 * If LRO is enabled, check that the maximum aggregated packet 1309 * size is supported by the configured device. 1310 */ 1311 if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) { 1312 uint32_t max_rx_pktlen; 1313 uint32_t overhead_len; 1314 1315 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 1316 dev_info.max_mtu); 1317 max_rx_pktlen = dev->data->dev_conf.rxmode.mtu + overhead_len; 1318 if (dev_conf->rxmode.max_lro_pkt_size == 0) 1319 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 1320 ret = eth_dev_check_lro_pkt_size(port_id, 1321 dev->data->dev_conf.rxmode.max_lro_pkt_size, 1322 max_rx_pktlen, 1323 dev_info.max_lro_pkt_size); 1324 if (ret != 0) 1325 goto rollback; 1326 } 1327 1328 /* Any requested offloading must be within its device capabilities */ 1329 if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) != 1330 dev_conf->rxmode.offloads) { 1331 RTE_ETHDEV_LOG(ERR, 1332 "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads " 1333 "capabilities 0x%"PRIx64" in %s()\n", 1334 port_id, dev_conf->rxmode.offloads, 1335 dev_info.rx_offload_capa, 1336 __func__); 1337 ret = -EINVAL; 1338 goto rollback; 1339 } 1340 if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) != 1341 dev_conf->txmode.offloads) { 1342 RTE_ETHDEV_LOG(ERR, 1343 "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads " 1344 "capabilities 0x%"PRIx64" in %s()\n", 1345 port_id, dev_conf->txmode.offloads, 1346 dev_info.tx_offload_capa, 1347 __func__); 1348 ret = -EINVAL; 1349 goto rollback; 1350 } 1351 1352 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = 1353 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf); 1354 1355 /* Check that device supports requested rss hash functions. */ 1356 if ((dev_info.flow_type_rss_offloads | 1357 dev_conf->rx_adv_conf.rss_conf.rss_hf) != 1358 dev_info.flow_type_rss_offloads) { 1359 RTE_ETHDEV_LOG(ERR, 1360 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 1361 port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf, 1362 dev_info.flow_type_rss_offloads); 1363 ret = -EINVAL; 1364 goto rollback; 1365 } 1366 1367 /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */ 1368 if (((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0) && 1369 (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) { 1370 RTE_ETHDEV_LOG(ERR, 1371 "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n", 1372 port_id, 1373 rte_eth_dev_rx_offload_name(RTE_ETH_RX_OFFLOAD_RSS_HASH)); 1374 ret = -EINVAL; 1375 goto rollback; 1376 } 1377 1378 /* 1379 * Setup new number of Rx/Tx queues and reconfigure device. 1380 */ 1381 diag = eth_dev_rx_queue_config(dev, nb_rx_q); 1382 if (diag != 0) { 1383 RTE_ETHDEV_LOG(ERR, 1384 "Port%u eth_dev_rx_queue_config = %d\n", 1385 port_id, diag); 1386 ret = diag; 1387 goto rollback; 1388 } 1389 1390 diag = eth_dev_tx_queue_config(dev, nb_tx_q); 1391 if (diag != 0) { 1392 RTE_ETHDEV_LOG(ERR, 1393 "Port%u eth_dev_tx_queue_config = %d\n", 1394 port_id, diag); 1395 eth_dev_rx_queue_config(dev, 0); 1396 ret = diag; 1397 goto rollback; 1398 } 1399 1400 diag = (*dev->dev_ops->dev_configure)(dev); 1401 if (diag != 0) { 1402 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n", 1403 port_id, diag); 1404 ret = eth_err(port_id, diag); 1405 goto reset_queues; 1406 } 1407 1408 /* Initialize Rx profiling if enabled at compilation time. */ 1409 diag = __rte_eth_dev_profile_init(port_id, dev); 1410 if (diag != 0) { 1411 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n", 1412 port_id, diag); 1413 ret = eth_err(port_id, diag); 1414 goto reset_queues; 1415 } 1416 1417 /* Validate Rx offloads. */ 1418 diag = eth_dev_validate_offloads(port_id, 1419 dev_conf->rxmode.offloads, 1420 dev->data->dev_conf.rxmode.offloads, "Rx", 1421 rte_eth_dev_rx_offload_name); 1422 if (diag != 0) { 1423 ret = diag; 1424 goto reset_queues; 1425 } 1426 1427 /* Validate Tx offloads. */ 1428 diag = eth_dev_validate_offloads(port_id, 1429 dev_conf->txmode.offloads, 1430 dev->data->dev_conf.txmode.offloads, "Tx", 1431 rte_eth_dev_tx_offload_name); 1432 if (diag != 0) { 1433 ret = diag; 1434 goto reset_queues; 1435 } 1436 1437 dev->data->dev_configured = 1; 1438 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0); 1439 return 0; 1440 reset_queues: 1441 eth_dev_rx_queue_config(dev, 0); 1442 eth_dev_tx_queue_config(dev, 0); 1443 rollback: 1444 memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf)); 1445 if (old_mtu != dev->data->mtu) 1446 dev->data->mtu = old_mtu; 1447 1448 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret); 1449 return ret; 1450 } 1451 1452 static void 1453 eth_dev_mac_restore(struct rte_eth_dev *dev, 1454 struct rte_eth_dev_info *dev_info) 1455 { 1456 struct rte_ether_addr *addr; 1457 uint16_t i; 1458 uint32_t pool = 0; 1459 uint64_t pool_mask; 1460 1461 /* replay MAC address configuration including default MAC */ 1462 addr = &dev->data->mac_addrs[0]; 1463 if (*dev->dev_ops->mac_addr_set != NULL) 1464 (*dev->dev_ops->mac_addr_set)(dev, addr); 1465 else if (*dev->dev_ops->mac_addr_add != NULL) 1466 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool); 1467 1468 if (*dev->dev_ops->mac_addr_add != NULL) { 1469 for (i = 1; i < dev_info->max_mac_addrs; i++) { 1470 addr = &dev->data->mac_addrs[i]; 1471 1472 /* skip zero address */ 1473 if (rte_is_zero_ether_addr(addr)) 1474 continue; 1475 1476 pool = 0; 1477 pool_mask = dev->data->mac_pool_sel[i]; 1478 1479 do { 1480 if (pool_mask & UINT64_C(1)) 1481 (*dev->dev_ops->mac_addr_add)(dev, 1482 addr, i, pool); 1483 pool_mask >>= 1; 1484 pool++; 1485 } while (pool_mask); 1486 } 1487 } 1488 } 1489 1490 static int 1491 eth_dev_config_restore(struct rte_eth_dev *dev, 1492 struct rte_eth_dev_info *dev_info, uint16_t port_id) 1493 { 1494 int ret; 1495 1496 if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)) 1497 eth_dev_mac_restore(dev, dev_info); 1498 1499 /* replay promiscuous configuration */ 1500 /* 1501 * use callbacks directly since we don't need port_id check and 1502 * would like to bypass the same value set 1503 */ 1504 if (rte_eth_promiscuous_get(port_id) == 1 && 1505 *dev->dev_ops->promiscuous_enable != NULL) { 1506 ret = eth_err(port_id, 1507 (*dev->dev_ops->promiscuous_enable)(dev)); 1508 if (ret != 0 && ret != -ENOTSUP) { 1509 RTE_ETHDEV_LOG(ERR, 1510 "Failed to enable promiscuous mode for device (port %u): %s\n", 1511 port_id, rte_strerror(-ret)); 1512 return ret; 1513 } 1514 } else if (rte_eth_promiscuous_get(port_id) == 0 && 1515 *dev->dev_ops->promiscuous_disable != NULL) { 1516 ret = eth_err(port_id, 1517 (*dev->dev_ops->promiscuous_disable)(dev)); 1518 if (ret != 0 && ret != -ENOTSUP) { 1519 RTE_ETHDEV_LOG(ERR, 1520 "Failed to disable promiscuous mode for device (port %u): %s\n", 1521 port_id, rte_strerror(-ret)); 1522 return ret; 1523 } 1524 } 1525 1526 /* replay all multicast configuration */ 1527 /* 1528 * use callbacks directly since we don't need port_id check and 1529 * would like to bypass the same value set 1530 */ 1531 if (rte_eth_allmulticast_get(port_id) == 1 && 1532 *dev->dev_ops->allmulticast_enable != NULL) { 1533 ret = eth_err(port_id, 1534 (*dev->dev_ops->allmulticast_enable)(dev)); 1535 if (ret != 0 && ret != -ENOTSUP) { 1536 RTE_ETHDEV_LOG(ERR, 1537 "Failed to enable allmulticast mode for device (port %u): %s\n", 1538 port_id, rte_strerror(-ret)); 1539 return ret; 1540 } 1541 } else if (rte_eth_allmulticast_get(port_id) == 0 && 1542 *dev->dev_ops->allmulticast_disable != NULL) { 1543 ret = eth_err(port_id, 1544 (*dev->dev_ops->allmulticast_disable)(dev)); 1545 if (ret != 0 && ret != -ENOTSUP) { 1546 RTE_ETHDEV_LOG(ERR, 1547 "Failed to disable allmulticast mode for device (port %u): %s\n", 1548 port_id, rte_strerror(-ret)); 1549 return ret; 1550 } 1551 } 1552 1553 return 0; 1554 } 1555 1556 int 1557 rte_eth_dev_start(uint16_t port_id) 1558 { 1559 struct rte_eth_dev *dev; 1560 struct rte_eth_dev_info dev_info; 1561 int diag; 1562 int ret, ret_stop; 1563 1564 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1565 dev = &rte_eth_devices[port_id]; 1566 1567 if (*dev->dev_ops->dev_start == NULL) 1568 return -ENOTSUP; 1569 1570 if (dev->data->dev_configured == 0) { 1571 RTE_ETHDEV_LOG(INFO, 1572 "Device with port_id=%"PRIu16" is not configured.\n", 1573 port_id); 1574 return -EINVAL; 1575 } 1576 1577 if (dev->data->dev_started != 0) { 1578 RTE_ETHDEV_LOG(INFO, 1579 "Device with port_id=%"PRIu16" already started\n", 1580 port_id); 1581 return 0; 1582 } 1583 1584 ret = rte_eth_dev_info_get(port_id, &dev_info); 1585 if (ret != 0) 1586 return ret; 1587 1588 /* Lets restore MAC now if device does not support live change */ 1589 if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR) 1590 eth_dev_mac_restore(dev, &dev_info); 1591 1592 diag = (*dev->dev_ops->dev_start)(dev); 1593 if (diag == 0) 1594 dev->data->dev_started = 1; 1595 else 1596 return eth_err(port_id, diag); 1597 1598 ret = eth_dev_config_restore(dev, &dev_info, port_id); 1599 if (ret != 0) { 1600 RTE_ETHDEV_LOG(ERR, 1601 "Error during restoring configuration for device (port %u): %s\n", 1602 port_id, rte_strerror(-ret)); 1603 ret_stop = rte_eth_dev_stop(port_id); 1604 if (ret_stop != 0) { 1605 RTE_ETHDEV_LOG(ERR, 1606 "Failed to stop device (port %u): %s\n", 1607 port_id, rte_strerror(-ret_stop)); 1608 } 1609 1610 return ret; 1611 } 1612 1613 if (dev->data->dev_conf.intr_conf.lsc == 0) { 1614 if (*dev->dev_ops->link_update == NULL) 1615 return -ENOTSUP; 1616 (*dev->dev_ops->link_update)(dev, 0); 1617 } 1618 1619 /* expose selection of PMD fast-path functions */ 1620 eth_dev_fp_ops_setup(rte_eth_fp_ops + port_id, dev); 1621 1622 rte_ethdev_trace_start(port_id); 1623 return 0; 1624 } 1625 1626 int 1627 rte_eth_dev_stop(uint16_t port_id) 1628 { 1629 struct rte_eth_dev *dev; 1630 int ret; 1631 1632 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1633 dev = &rte_eth_devices[port_id]; 1634 1635 if (*dev->dev_ops->dev_stop == NULL) 1636 return -ENOTSUP; 1637 1638 if (dev->data->dev_started == 0) { 1639 RTE_ETHDEV_LOG(INFO, 1640 "Device with port_id=%"PRIu16" already stopped\n", 1641 port_id); 1642 return 0; 1643 } 1644 1645 /* point fast-path functions to dummy ones */ 1646 eth_dev_fp_ops_reset(rte_eth_fp_ops + port_id); 1647 1648 ret = (*dev->dev_ops->dev_stop)(dev); 1649 if (ret == 0) 1650 dev->data->dev_started = 0; 1651 rte_ethdev_trace_stop(port_id, ret); 1652 1653 return ret; 1654 } 1655 1656 int 1657 rte_eth_dev_set_link_up(uint16_t port_id) 1658 { 1659 struct rte_eth_dev *dev; 1660 int ret; 1661 1662 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1663 dev = &rte_eth_devices[port_id]; 1664 1665 if (*dev->dev_ops->dev_set_link_up == NULL) 1666 return -ENOTSUP; 1667 ret = eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev)); 1668 1669 rte_ethdev_trace_set_link_up(port_id, ret); 1670 1671 return ret; 1672 } 1673 1674 int 1675 rte_eth_dev_set_link_down(uint16_t port_id) 1676 { 1677 struct rte_eth_dev *dev; 1678 int ret; 1679 1680 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1681 dev = &rte_eth_devices[port_id]; 1682 1683 if (*dev->dev_ops->dev_set_link_down == NULL) 1684 return -ENOTSUP; 1685 ret = eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev)); 1686 1687 rte_ethdev_trace_set_link_down(port_id, ret); 1688 1689 return ret; 1690 } 1691 1692 int 1693 rte_eth_dev_close(uint16_t port_id) 1694 { 1695 struct rte_eth_dev *dev; 1696 int firsterr, binerr; 1697 int *lasterr = &firsterr; 1698 1699 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1700 dev = &rte_eth_devices[port_id]; 1701 1702 /* 1703 * Secondary process needs to close device to release process private 1704 * resources. But secondary process should not be obliged to wait 1705 * for device stop before closing ethdev. 1706 */ 1707 if (rte_eal_process_type() == RTE_PROC_PRIMARY && 1708 dev->data->dev_started) { 1709 RTE_ETHDEV_LOG(ERR, "Cannot close started device (port %u)\n", 1710 port_id); 1711 return -EINVAL; 1712 } 1713 1714 if (*dev->dev_ops->dev_close == NULL) 1715 return -ENOTSUP; 1716 *lasterr = (*dev->dev_ops->dev_close)(dev); 1717 if (*lasterr != 0) 1718 lasterr = &binerr; 1719 1720 rte_ethdev_trace_close(port_id); 1721 *lasterr = rte_eth_dev_release_port(dev); 1722 1723 return firsterr; 1724 } 1725 1726 int 1727 rte_eth_dev_reset(uint16_t port_id) 1728 { 1729 struct rte_eth_dev *dev; 1730 int ret; 1731 1732 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1733 dev = &rte_eth_devices[port_id]; 1734 1735 if (*dev->dev_ops->dev_reset == NULL) 1736 return -ENOTSUP; 1737 1738 ret = rte_eth_dev_stop(port_id); 1739 if (ret != 0) { 1740 RTE_ETHDEV_LOG(ERR, 1741 "Failed to stop device (port %u) before reset: %s - ignore\n", 1742 port_id, rte_strerror(-ret)); 1743 } 1744 ret = eth_err(port_id, dev->dev_ops->dev_reset(dev)); 1745 1746 rte_ethdev_trace_reset(port_id, ret); 1747 1748 return ret; 1749 } 1750 1751 int 1752 rte_eth_dev_is_removed(uint16_t port_id) 1753 { 1754 struct rte_eth_dev *dev; 1755 int ret; 1756 1757 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0); 1758 dev = &rte_eth_devices[port_id]; 1759 1760 if (dev->state == RTE_ETH_DEV_REMOVED) 1761 return 1; 1762 1763 if (*dev->dev_ops->is_removed == NULL) 1764 return 0; 1765 1766 ret = dev->dev_ops->is_removed(dev); 1767 if (ret != 0) 1768 /* Device is physically removed. */ 1769 dev->state = RTE_ETH_DEV_REMOVED; 1770 1771 rte_ethdev_trace_is_removed(port_id, ret); 1772 1773 return ret; 1774 } 1775 1776 static int 1777 rte_eth_check_rx_mempool(struct rte_mempool *mp, uint16_t offset, 1778 uint16_t min_length) 1779 { 1780 uint16_t data_room_size; 1781 1782 /* 1783 * Check the size of the mbuf data buffer, this value 1784 * must be provided in the private data of the memory pool. 1785 * First check that the memory pool(s) has a valid private data. 1786 */ 1787 if (mp->private_data_size < 1788 sizeof(struct rte_pktmbuf_pool_private)) { 1789 RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n", 1790 mp->name, mp->private_data_size, 1791 (unsigned int) 1792 sizeof(struct rte_pktmbuf_pool_private)); 1793 return -ENOSPC; 1794 } 1795 data_room_size = rte_pktmbuf_data_room_size(mp); 1796 if (data_room_size < offset + min_length) { 1797 RTE_ETHDEV_LOG(ERR, 1798 "%s mbuf_data_room_size %u < %u (%u + %u)\n", 1799 mp->name, data_room_size, 1800 offset + min_length, offset, min_length); 1801 return -EINVAL; 1802 } 1803 return 0; 1804 } 1805 1806 static int 1807 eth_dev_buffer_split_get_supported_hdrs_helper(uint16_t port_id, uint32_t **ptypes) 1808 { 1809 int cnt; 1810 1811 cnt = rte_eth_buffer_split_get_supported_hdr_ptypes(port_id, NULL, 0); 1812 if (cnt <= 0) 1813 return cnt; 1814 1815 *ptypes = malloc(sizeof(uint32_t) * cnt); 1816 if (*ptypes == NULL) 1817 return -ENOMEM; 1818 1819 cnt = rte_eth_buffer_split_get_supported_hdr_ptypes(port_id, *ptypes, cnt); 1820 if (cnt <= 0) { 1821 free(*ptypes); 1822 *ptypes = NULL; 1823 } 1824 return cnt; 1825 } 1826 1827 static int 1828 rte_eth_rx_queue_check_split(uint16_t port_id, 1829 const struct rte_eth_rxseg_split *rx_seg, 1830 uint16_t n_seg, uint32_t *mbp_buf_size, 1831 const struct rte_eth_dev_info *dev_info) 1832 { 1833 const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa; 1834 struct rte_mempool *mp_first; 1835 uint32_t offset_mask; 1836 uint16_t seg_idx; 1837 int ret = 0; 1838 int ptype_cnt; 1839 uint32_t *ptypes; 1840 uint32_t prev_proto_hdrs = RTE_PTYPE_UNKNOWN; 1841 int i; 1842 1843 if (n_seg > seg_capa->max_nseg) { 1844 RTE_ETHDEV_LOG(ERR, 1845 "Requested Rx segments %u exceed supported %u\n", 1846 n_seg, seg_capa->max_nseg); 1847 return -EINVAL; 1848 } 1849 /* 1850 * Check the sizes and offsets against buffer sizes 1851 * for each segment specified in extended configuration. 1852 */ 1853 mp_first = rx_seg[0].mp; 1854 offset_mask = RTE_BIT32(seg_capa->offset_align_log2) - 1; 1855 1856 ptypes = NULL; 1857 ptype_cnt = eth_dev_buffer_split_get_supported_hdrs_helper(port_id, &ptypes); 1858 1859 for (seg_idx = 0; seg_idx < n_seg; seg_idx++) { 1860 struct rte_mempool *mpl = rx_seg[seg_idx].mp; 1861 uint32_t length = rx_seg[seg_idx].length; 1862 uint32_t offset = rx_seg[seg_idx].offset; 1863 uint32_t proto_hdr = rx_seg[seg_idx].proto_hdr; 1864 1865 if (mpl == NULL) { 1866 RTE_ETHDEV_LOG(ERR, "null mempool pointer\n"); 1867 ret = -EINVAL; 1868 goto out; 1869 } 1870 if (seg_idx != 0 && mp_first != mpl && 1871 seg_capa->multi_pools == 0) { 1872 RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n"); 1873 ret = -ENOTSUP; 1874 goto out; 1875 } 1876 if (offset != 0) { 1877 if (seg_capa->offset_allowed == 0) { 1878 RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n"); 1879 ret = -ENOTSUP; 1880 goto out; 1881 } 1882 if (offset & offset_mask) { 1883 RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n", 1884 offset, 1885 seg_capa->offset_align_log2); 1886 ret = -EINVAL; 1887 goto out; 1888 } 1889 } 1890 1891 offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM; 1892 *mbp_buf_size = rte_pktmbuf_data_room_size(mpl); 1893 if (proto_hdr != 0) { 1894 /* Split based on protocol headers. */ 1895 if (length != 0) { 1896 RTE_ETHDEV_LOG(ERR, 1897 "Do not set length split and protocol split within a segment\n" 1898 ); 1899 ret = -EINVAL; 1900 goto out; 1901 } 1902 if ((proto_hdr & prev_proto_hdrs) != 0) { 1903 RTE_ETHDEV_LOG(ERR, 1904 "Repeat with previous protocol headers or proto-split after length-based split\n" 1905 ); 1906 ret = -EINVAL; 1907 goto out; 1908 } 1909 if (ptype_cnt <= 0) { 1910 RTE_ETHDEV_LOG(ERR, 1911 "Port %u failed to get supported buffer split header protocols\n", 1912 port_id); 1913 ret = -ENOTSUP; 1914 goto out; 1915 } 1916 for (i = 0; i < ptype_cnt; i++) { 1917 if ((prev_proto_hdrs | proto_hdr) == ptypes[i]) 1918 break; 1919 } 1920 if (i == ptype_cnt) { 1921 RTE_ETHDEV_LOG(ERR, 1922 "Requested Rx split header protocols 0x%x is not supported.\n", 1923 proto_hdr); 1924 ret = -EINVAL; 1925 goto out; 1926 } 1927 prev_proto_hdrs |= proto_hdr; 1928 } else { 1929 /* Split at fixed length. */ 1930 length = length != 0 ? length : *mbp_buf_size; 1931 prev_proto_hdrs = RTE_PTYPE_ALL_MASK; 1932 } 1933 1934 ret = rte_eth_check_rx_mempool(mpl, offset, length); 1935 if (ret != 0) 1936 goto out; 1937 } 1938 out: 1939 free(ptypes); 1940 return ret; 1941 } 1942 1943 static int 1944 rte_eth_rx_queue_check_mempools(struct rte_mempool **rx_mempools, 1945 uint16_t n_mempools, uint32_t *min_buf_size, 1946 const struct rte_eth_dev_info *dev_info) 1947 { 1948 uint16_t pool_idx; 1949 int ret; 1950 1951 if (n_mempools > dev_info->max_rx_mempools) { 1952 RTE_ETHDEV_LOG(ERR, 1953 "Too many Rx mempools %u vs maximum %u\n", 1954 n_mempools, dev_info->max_rx_mempools); 1955 return -EINVAL; 1956 } 1957 1958 for (pool_idx = 0; pool_idx < n_mempools; pool_idx++) { 1959 struct rte_mempool *mp = rx_mempools[pool_idx]; 1960 1961 if (mp == NULL) { 1962 RTE_ETHDEV_LOG(ERR, "null Rx mempool pointer\n"); 1963 return -EINVAL; 1964 } 1965 1966 ret = rte_eth_check_rx_mempool(mp, RTE_PKTMBUF_HEADROOM, 1967 dev_info->min_rx_bufsize); 1968 if (ret != 0) 1969 return ret; 1970 1971 *min_buf_size = RTE_MIN(*min_buf_size, 1972 rte_pktmbuf_data_room_size(mp)); 1973 } 1974 1975 return 0; 1976 } 1977 1978 int 1979 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 1980 uint16_t nb_rx_desc, unsigned int socket_id, 1981 const struct rte_eth_rxconf *rx_conf, 1982 struct rte_mempool *mp) 1983 { 1984 int ret; 1985 uint64_t rx_offloads; 1986 uint32_t mbp_buf_size = UINT32_MAX; 1987 struct rte_eth_dev *dev; 1988 struct rte_eth_dev_info dev_info; 1989 struct rte_eth_rxconf local_conf; 1990 1991 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1992 dev = &rte_eth_devices[port_id]; 1993 1994 if (rx_queue_id >= dev->data->nb_rx_queues) { 1995 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id); 1996 return -EINVAL; 1997 } 1998 1999 if (*dev->dev_ops->rx_queue_setup == NULL) 2000 return -ENOTSUP; 2001 2002 ret = rte_eth_dev_info_get(port_id, &dev_info); 2003 if (ret != 0) 2004 return ret; 2005 2006 rx_offloads = dev->data->dev_conf.rxmode.offloads; 2007 if (rx_conf != NULL) 2008 rx_offloads |= rx_conf->offloads; 2009 2010 /* Ensure that we have one and only one source of Rx buffers */ 2011 if ((mp != NULL) + 2012 (rx_conf != NULL && rx_conf->rx_nseg > 0) + 2013 (rx_conf != NULL && rx_conf->rx_nmempool > 0) != 1) { 2014 RTE_ETHDEV_LOG(ERR, 2015 "Ambiguous Rx mempools configuration\n"); 2016 return -EINVAL; 2017 } 2018 2019 if (mp != NULL) { 2020 /* Single pool configuration check. */ 2021 ret = rte_eth_check_rx_mempool(mp, RTE_PKTMBUF_HEADROOM, 2022 dev_info.min_rx_bufsize); 2023 if (ret != 0) 2024 return ret; 2025 2026 mbp_buf_size = rte_pktmbuf_data_room_size(mp); 2027 } else if (rx_conf != NULL && rx_conf->rx_nseg > 0) { 2028 const struct rte_eth_rxseg_split *rx_seg; 2029 uint16_t n_seg; 2030 2031 /* Extended multi-segment configuration check. */ 2032 if (rx_conf->rx_seg == NULL) { 2033 RTE_ETHDEV_LOG(ERR, 2034 "Memory pool is null and no multi-segment configuration provided\n"); 2035 return -EINVAL; 2036 } 2037 2038 rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg; 2039 n_seg = rx_conf->rx_nseg; 2040 2041 if (rx_offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { 2042 ret = rte_eth_rx_queue_check_split(port_id, rx_seg, n_seg, 2043 &mbp_buf_size, 2044 &dev_info); 2045 if (ret != 0) 2046 return ret; 2047 } else { 2048 RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n"); 2049 return -EINVAL; 2050 } 2051 } else if (rx_conf != NULL && rx_conf->rx_nmempool > 0) { 2052 /* Extended multi-pool configuration check. */ 2053 if (rx_conf->rx_mempools == NULL) { 2054 RTE_ETHDEV_LOG(ERR, "Memory pools array is null\n"); 2055 return -EINVAL; 2056 } 2057 2058 ret = rte_eth_rx_queue_check_mempools(rx_conf->rx_mempools, 2059 rx_conf->rx_nmempool, 2060 &mbp_buf_size, 2061 &dev_info); 2062 if (ret != 0) 2063 return ret; 2064 } else { 2065 RTE_ETHDEV_LOG(ERR, "Missing Rx mempool configuration\n"); 2066 return -EINVAL; 2067 } 2068 2069 /* Use default specified by driver, if nb_rx_desc is zero */ 2070 if (nb_rx_desc == 0) { 2071 nb_rx_desc = dev_info.default_rxportconf.ring_size; 2072 /* If driver default is also zero, fall back on EAL default */ 2073 if (nb_rx_desc == 0) 2074 nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 2075 } 2076 2077 if (nb_rx_desc > dev_info.rx_desc_lim.nb_max || 2078 nb_rx_desc < dev_info.rx_desc_lim.nb_min || 2079 nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) { 2080 2081 RTE_ETHDEV_LOG(ERR, 2082 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2083 nb_rx_desc, dev_info.rx_desc_lim.nb_max, 2084 dev_info.rx_desc_lim.nb_min, 2085 dev_info.rx_desc_lim.nb_align); 2086 return -EINVAL; 2087 } 2088 2089 if (dev->data->dev_started && 2090 !(dev_info.dev_capa & 2091 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP)) 2092 return -EBUSY; 2093 2094 if (dev->data->dev_started && 2095 (dev->data->rx_queue_state[rx_queue_id] != 2096 RTE_ETH_QUEUE_STATE_STOPPED)) 2097 return -EBUSY; 2098 2099 eth_dev_rxq_release(dev, rx_queue_id); 2100 2101 if (rx_conf == NULL) 2102 rx_conf = &dev_info.default_rxconf; 2103 2104 local_conf = *rx_conf; 2105 2106 /* 2107 * If an offloading has already been enabled in 2108 * rte_eth_dev_configure(), it has been enabled on all queues, 2109 * so there is no need to enable it in this queue again. 2110 * The local_conf.offloads input to underlying PMD only carries 2111 * those offloadings which are only enabled on this queue and 2112 * not enabled on all queues. 2113 */ 2114 local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads; 2115 2116 /* 2117 * New added offloadings for this queue are those not enabled in 2118 * rte_eth_dev_configure() and they must be per-queue type. 2119 * A pure per-port offloading can't be enabled on a queue while 2120 * disabled on another queue. A pure per-port offloading can't 2121 * be enabled for any queue as new added one if it hasn't been 2122 * enabled in rte_eth_dev_configure(). 2123 */ 2124 if ((local_conf.offloads & dev_info.rx_queue_offload_capa) != 2125 local_conf.offloads) { 2126 RTE_ETHDEV_LOG(ERR, 2127 "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2128 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2129 port_id, rx_queue_id, local_conf.offloads, 2130 dev_info.rx_queue_offload_capa, 2131 __func__); 2132 return -EINVAL; 2133 } 2134 2135 if (local_conf.share_group > 0 && 2136 (dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) == 0) { 2137 RTE_ETHDEV_LOG(ERR, 2138 "Ethdev port_id=%d rx_queue_id=%d, enabled share_group=%hu while device doesn't support Rx queue share\n", 2139 port_id, rx_queue_id, local_conf.share_group); 2140 return -EINVAL; 2141 } 2142 2143 /* 2144 * If LRO is enabled, check that the maximum aggregated packet 2145 * size is supported by the configured device. 2146 */ 2147 /* Get the real Ethernet overhead length */ 2148 if (local_conf.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) { 2149 uint32_t overhead_len; 2150 uint32_t max_rx_pktlen; 2151 int ret; 2152 2153 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 2154 dev_info.max_mtu); 2155 max_rx_pktlen = dev->data->mtu + overhead_len; 2156 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0) 2157 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 2158 ret = eth_dev_check_lro_pkt_size(port_id, 2159 dev->data->dev_conf.rxmode.max_lro_pkt_size, 2160 max_rx_pktlen, 2161 dev_info.max_lro_pkt_size); 2162 if (ret != 0) 2163 return ret; 2164 } 2165 2166 ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc, 2167 socket_id, &local_conf, mp); 2168 if (!ret) { 2169 if (!dev->data->min_rx_buf_size || 2170 dev->data->min_rx_buf_size > mbp_buf_size) 2171 dev->data->min_rx_buf_size = mbp_buf_size; 2172 } 2173 2174 rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp, 2175 rx_conf, ret); 2176 return eth_err(port_id, ret); 2177 } 2178 2179 int 2180 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 2181 uint16_t nb_rx_desc, 2182 const struct rte_eth_hairpin_conf *conf) 2183 { 2184 int ret; 2185 struct rte_eth_dev *dev; 2186 struct rte_eth_hairpin_cap cap; 2187 int i; 2188 int count; 2189 2190 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2191 dev = &rte_eth_devices[port_id]; 2192 2193 if (rx_queue_id >= dev->data->nb_rx_queues) { 2194 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id); 2195 return -EINVAL; 2196 } 2197 2198 if (conf == NULL) { 2199 RTE_ETHDEV_LOG(ERR, 2200 "Cannot setup ethdev port %u Rx hairpin queue from NULL config\n", 2201 port_id); 2202 return -EINVAL; 2203 } 2204 2205 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2206 if (ret != 0) 2207 return ret; 2208 if (*dev->dev_ops->rx_hairpin_queue_setup == NULL) 2209 return -ENOTSUP; 2210 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2211 if (nb_rx_desc == 0) 2212 nb_rx_desc = cap.max_nb_desc; 2213 if (nb_rx_desc > cap.max_nb_desc) { 2214 RTE_ETHDEV_LOG(ERR, 2215 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu", 2216 nb_rx_desc, cap.max_nb_desc); 2217 return -EINVAL; 2218 } 2219 if (conf->peer_count > cap.max_rx_2_tx) { 2220 RTE_ETHDEV_LOG(ERR, 2221 "Invalid value for number of peers for Rx queue(=%u), should be: <= %hu", 2222 conf->peer_count, cap.max_rx_2_tx); 2223 return -EINVAL; 2224 } 2225 if (conf->use_locked_device_memory && !cap.rx_cap.locked_device_memory) { 2226 RTE_ETHDEV_LOG(ERR, 2227 "Attempt to use locked device memory for Rx queue, which is not supported"); 2228 return -EINVAL; 2229 } 2230 if (conf->use_rte_memory && !cap.rx_cap.rte_memory) { 2231 RTE_ETHDEV_LOG(ERR, 2232 "Attempt to use DPDK memory for Rx queue, which is not supported"); 2233 return -EINVAL; 2234 } 2235 if (conf->use_locked_device_memory && conf->use_rte_memory) { 2236 RTE_ETHDEV_LOG(ERR, 2237 "Attempt to use mutually exclusive memory settings for Rx queue"); 2238 return -EINVAL; 2239 } 2240 if (conf->force_memory && 2241 !conf->use_locked_device_memory && 2242 !conf->use_rte_memory) { 2243 RTE_ETHDEV_LOG(ERR, 2244 "Attempt to force Rx queue memory settings, but none is set"); 2245 return -EINVAL; 2246 } 2247 if (conf->peer_count == 0) { 2248 RTE_ETHDEV_LOG(ERR, 2249 "Invalid value for number of peers for Rx queue(=%u), should be: > 0", 2250 conf->peer_count); 2251 return -EINVAL; 2252 } 2253 for (i = 0, count = 0; i < dev->data->nb_rx_queues && 2254 cap.max_nb_queues != UINT16_MAX; i++) { 2255 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i)) 2256 count++; 2257 } 2258 if (count > cap.max_nb_queues) { 2259 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d", 2260 cap.max_nb_queues); 2261 return -EINVAL; 2262 } 2263 if (dev->data->dev_started) 2264 return -EBUSY; 2265 eth_dev_rxq_release(dev, rx_queue_id); 2266 ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id, 2267 nb_rx_desc, conf); 2268 if (ret == 0) 2269 dev->data->rx_queue_state[rx_queue_id] = 2270 RTE_ETH_QUEUE_STATE_HAIRPIN; 2271 ret = eth_err(port_id, ret); 2272 2273 rte_eth_trace_rx_hairpin_queue_setup(port_id, rx_queue_id, nb_rx_desc, 2274 conf, ret); 2275 2276 return ret; 2277 } 2278 2279 int 2280 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2281 uint16_t nb_tx_desc, unsigned int socket_id, 2282 const struct rte_eth_txconf *tx_conf) 2283 { 2284 struct rte_eth_dev *dev; 2285 struct rte_eth_dev_info dev_info; 2286 struct rte_eth_txconf local_conf; 2287 int ret; 2288 2289 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2290 dev = &rte_eth_devices[port_id]; 2291 2292 if (tx_queue_id >= dev->data->nb_tx_queues) { 2293 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id); 2294 return -EINVAL; 2295 } 2296 2297 if (*dev->dev_ops->tx_queue_setup == NULL) 2298 return -ENOTSUP; 2299 2300 ret = rte_eth_dev_info_get(port_id, &dev_info); 2301 if (ret != 0) 2302 return ret; 2303 2304 /* Use default specified by driver, if nb_tx_desc is zero */ 2305 if (nb_tx_desc == 0) { 2306 nb_tx_desc = dev_info.default_txportconf.ring_size; 2307 /* If driver default is zero, fall back on EAL default */ 2308 if (nb_tx_desc == 0) 2309 nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 2310 } 2311 if (nb_tx_desc > dev_info.tx_desc_lim.nb_max || 2312 nb_tx_desc < dev_info.tx_desc_lim.nb_min || 2313 nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) { 2314 RTE_ETHDEV_LOG(ERR, 2315 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2316 nb_tx_desc, dev_info.tx_desc_lim.nb_max, 2317 dev_info.tx_desc_lim.nb_min, 2318 dev_info.tx_desc_lim.nb_align); 2319 return -EINVAL; 2320 } 2321 2322 if (dev->data->dev_started && 2323 !(dev_info.dev_capa & 2324 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP)) 2325 return -EBUSY; 2326 2327 if (dev->data->dev_started && 2328 (dev->data->tx_queue_state[tx_queue_id] != 2329 RTE_ETH_QUEUE_STATE_STOPPED)) 2330 return -EBUSY; 2331 2332 eth_dev_txq_release(dev, tx_queue_id); 2333 2334 if (tx_conf == NULL) 2335 tx_conf = &dev_info.default_txconf; 2336 2337 local_conf = *tx_conf; 2338 2339 /* 2340 * If an offloading has already been enabled in 2341 * rte_eth_dev_configure(), it has been enabled on all queues, 2342 * so there is no need to enable it in this queue again. 2343 * The local_conf.offloads input to underlying PMD only carries 2344 * those offloadings which are only enabled on this queue and 2345 * not enabled on all queues. 2346 */ 2347 local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads; 2348 2349 /* 2350 * New added offloadings for this queue are those not enabled in 2351 * rte_eth_dev_configure() and they must be per-queue type. 2352 * A pure per-port offloading can't be enabled on a queue while 2353 * disabled on another queue. A pure per-port offloading can't 2354 * be enabled for any queue as new added one if it hasn't been 2355 * enabled in rte_eth_dev_configure(). 2356 */ 2357 if ((local_conf.offloads & dev_info.tx_queue_offload_capa) != 2358 local_conf.offloads) { 2359 RTE_ETHDEV_LOG(ERR, 2360 "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2361 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2362 port_id, tx_queue_id, local_conf.offloads, 2363 dev_info.tx_queue_offload_capa, 2364 __func__); 2365 return -EINVAL; 2366 } 2367 2368 rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf); 2369 return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev, 2370 tx_queue_id, nb_tx_desc, socket_id, &local_conf)); 2371 } 2372 2373 int 2374 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2375 uint16_t nb_tx_desc, 2376 const struct rte_eth_hairpin_conf *conf) 2377 { 2378 struct rte_eth_dev *dev; 2379 struct rte_eth_hairpin_cap cap; 2380 int i; 2381 int count; 2382 int ret; 2383 2384 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2385 dev = &rte_eth_devices[port_id]; 2386 2387 if (tx_queue_id >= dev->data->nb_tx_queues) { 2388 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id); 2389 return -EINVAL; 2390 } 2391 2392 if (conf == NULL) { 2393 RTE_ETHDEV_LOG(ERR, 2394 "Cannot setup ethdev port %u Tx hairpin queue from NULL config\n", 2395 port_id); 2396 return -EINVAL; 2397 } 2398 2399 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2400 if (ret != 0) 2401 return ret; 2402 if (*dev->dev_ops->tx_hairpin_queue_setup == NULL) 2403 return -ENOTSUP; 2404 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2405 if (nb_tx_desc == 0) 2406 nb_tx_desc = cap.max_nb_desc; 2407 if (nb_tx_desc > cap.max_nb_desc) { 2408 RTE_ETHDEV_LOG(ERR, 2409 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu", 2410 nb_tx_desc, cap.max_nb_desc); 2411 return -EINVAL; 2412 } 2413 if (conf->peer_count > cap.max_tx_2_rx) { 2414 RTE_ETHDEV_LOG(ERR, 2415 "Invalid value for number of peers for Tx queue(=%u), should be: <= %hu", 2416 conf->peer_count, cap.max_tx_2_rx); 2417 return -EINVAL; 2418 } 2419 if (conf->use_locked_device_memory && !cap.tx_cap.locked_device_memory) { 2420 RTE_ETHDEV_LOG(ERR, 2421 "Attempt to use locked device memory for Tx queue, which is not supported"); 2422 return -EINVAL; 2423 } 2424 if (conf->use_rte_memory && !cap.tx_cap.rte_memory) { 2425 RTE_ETHDEV_LOG(ERR, 2426 "Attempt to use DPDK memory for Tx queue, which is not supported"); 2427 return -EINVAL; 2428 } 2429 if (conf->use_locked_device_memory && conf->use_rte_memory) { 2430 RTE_ETHDEV_LOG(ERR, 2431 "Attempt to use mutually exclusive memory settings for Tx queue"); 2432 return -EINVAL; 2433 } 2434 if (conf->force_memory && 2435 !conf->use_locked_device_memory && 2436 !conf->use_rte_memory) { 2437 RTE_ETHDEV_LOG(ERR, 2438 "Attempt to force Tx queue memory settings, but none is set"); 2439 return -EINVAL; 2440 } 2441 if (conf->peer_count == 0) { 2442 RTE_ETHDEV_LOG(ERR, 2443 "Invalid value for number of peers for Tx queue(=%u), should be: > 0", 2444 conf->peer_count); 2445 return -EINVAL; 2446 } 2447 for (i = 0, count = 0; i < dev->data->nb_tx_queues && 2448 cap.max_nb_queues != UINT16_MAX; i++) { 2449 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i)) 2450 count++; 2451 } 2452 if (count > cap.max_nb_queues) { 2453 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d", 2454 cap.max_nb_queues); 2455 return -EINVAL; 2456 } 2457 if (dev->data->dev_started) 2458 return -EBUSY; 2459 eth_dev_txq_release(dev, tx_queue_id); 2460 ret = (*dev->dev_ops->tx_hairpin_queue_setup) 2461 (dev, tx_queue_id, nb_tx_desc, conf); 2462 if (ret == 0) 2463 dev->data->tx_queue_state[tx_queue_id] = 2464 RTE_ETH_QUEUE_STATE_HAIRPIN; 2465 ret = eth_err(port_id, ret); 2466 2467 rte_eth_trace_tx_hairpin_queue_setup(port_id, tx_queue_id, nb_tx_desc, 2468 conf, ret); 2469 2470 return ret; 2471 } 2472 2473 int 2474 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port) 2475 { 2476 struct rte_eth_dev *dev; 2477 int ret; 2478 2479 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2480 dev = &rte_eth_devices[tx_port]; 2481 2482 if (dev->data->dev_started == 0) { 2483 RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port); 2484 return -EBUSY; 2485 } 2486 2487 if (*dev->dev_ops->hairpin_bind == NULL) 2488 return -ENOTSUP; 2489 ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port); 2490 if (ret != 0) 2491 RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d" 2492 " to Rx %d (%d - all ports)\n", 2493 tx_port, rx_port, RTE_MAX_ETHPORTS); 2494 2495 rte_eth_trace_hairpin_bind(tx_port, rx_port, ret); 2496 2497 return ret; 2498 } 2499 2500 int 2501 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port) 2502 { 2503 struct rte_eth_dev *dev; 2504 int ret; 2505 2506 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2507 dev = &rte_eth_devices[tx_port]; 2508 2509 if (dev->data->dev_started == 0) { 2510 RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port); 2511 return -EBUSY; 2512 } 2513 2514 if (*dev->dev_ops->hairpin_unbind == NULL) 2515 return -ENOTSUP; 2516 ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port); 2517 if (ret != 0) 2518 RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d" 2519 " from Rx %d (%d - all ports)\n", 2520 tx_port, rx_port, RTE_MAX_ETHPORTS); 2521 2522 rte_eth_trace_hairpin_unbind(tx_port, rx_port, ret); 2523 2524 return ret; 2525 } 2526 2527 int 2528 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, 2529 size_t len, uint32_t direction) 2530 { 2531 struct rte_eth_dev *dev; 2532 int ret; 2533 2534 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2535 dev = &rte_eth_devices[port_id]; 2536 2537 if (peer_ports == NULL) { 2538 RTE_ETHDEV_LOG(ERR, 2539 "Cannot get ethdev port %u hairpin peer ports to NULL\n", 2540 port_id); 2541 return -EINVAL; 2542 } 2543 2544 if (len == 0) { 2545 RTE_ETHDEV_LOG(ERR, 2546 "Cannot get ethdev port %u hairpin peer ports to array with zero size\n", 2547 port_id); 2548 return -EINVAL; 2549 } 2550 2551 if (*dev->dev_ops->hairpin_get_peer_ports == NULL) 2552 return -ENOTSUP; 2553 2554 ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports, 2555 len, direction); 2556 if (ret < 0) 2557 RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n", 2558 port_id, direction ? "Rx" : "Tx"); 2559 2560 rte_eth_trace_hairpin_get_peer_ports(port_id, peer_ports, len, 2561 direction, ret); 2562 2563 return ret; 2564 } 2565 2566 void 2567 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, 2568 void *userdata __rte_unused) 2569 { 2570 rte_pktmbuf_free_bulk(pkts, unsent); 2571 2572 rte_eth_trace_tx_buffer_drop_callback((void **)pkts, unsent); 2573 } 2574 2575 void 2576 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, 2577 void *userdata) 2578 { 2579 uint64_t *count = userdata; 2580 2581 rte_pktmbuf_free_bulk(pkts, unsent); 2582 *count += unsent; 2583 2584 rte_eth_trace_tx_buffer_count_callback((void **)pkts, unsent, *count); 2585 } 2586 2587 int 2588 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, 2589 buffer_tx_error_fn cbfn, void *userdata) 2590 { 2591 if (buffer == NULL) { 2592 RTE_ETHDEV_LOG(ERR, 2593 "Cannot set Tx buffer error callback to NULL buffer\n"); 2594 return -EINVAL; 2595 } 2596 2597 buffer->error_callback = cbfn; 2598 buffer->error_userdata = userdata; 2599 2600 rte_eth_trace_tx_buffer_set_err_callback(buffer); 2601 2602 return 0; 2603 } 2604 2605 int 2606 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size) 2607 { 2608 int ret = 0; 2609 2610 if (buffer == NULL) { 2611 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL buffer\n"); 2612 return -EINVAL; 2613 } 2614 2615 buffer->size = size; 2616 if (buffer->error_callback == NULL) { 2617 ret = rte_eth_tx_buffer_set_err_callback( 2618 buffer, rte_eth_tx_buffer_drop_callback, NULL); 2619 } 2620 2621 rte_eth_trace_tx_buffer_init(buffer, size, ret); 2622 2623 return ret; 2624 } 2625 2626 int 2627 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt) 2628 { 2629 struct rte_eth_dev *dev; 2630 int ret; 2631 2632 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2633 dev = &rte_eth_devices[port_id]; 2634 2635 if (*dev->dev_ops->tx_done_cleanup == NULL) 2636 return -ENOTSUP; 2637 2638 /* Call driver to free pending mbufs. */ 2639 ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id], 2640 free_cnt); 2641 ret = eth_err(port_id, ret); 2642 2643 rte_eth_trace_tx_done_cleanup(port_id, queue_id, free_cnt, ret); 2644 2645 return ret; 2646 } 2647 2648 int 2649 rte_eth_promiscuous_enable(uint16_t port_id) 2650 { 2651 struct rte_eth_dev *dev; 2652 int diag = 0; 2653 2654 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2655 dev = &rte_eth_devices[port_id]; 2656 2657 if (dev->data->promiscuous == 1) 2658 return 0; 2659 2660 if (*dev->dev_ops->promiscuous_enable == NULL) 2661 return -ENOTSUP; 2662 2663 diag = (*dev->dev_ops->promiscuous_enable)(dev); 2664 dev->data->promiscuous = (diag == 0) ? 1 : 0; 2665 2666 diag = eth_err(port_id, diag); 2667 2668 rte_eth_trace_promiscuous_enable(port_id, dev->data->promiscuous, 2669 diag); 2670 2671 return diag; 2672 } 2673 2674 int 2675 rte_eth_promiscuous_disable(uint16_t port_id) 2676 { 2677 struct rte_eth_dev *dev; 2678 int diag = 0; 2679 2680 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2681 dev = &rte_eth_devices[port_id]; 2682 2683 if (dev->data->promiscuous == 0) 2684 return 0; 2685 2686 if (*dev->dev_ops->promiscuous_disable == NULL) 2687 return -ENOTSUP; 2688 2689 dev->data->promiscuous = 0; 2690 diag = (*dev->dev_ops->promiscuous_disable)(dev); 2691 if (diag != 0) 2692 dev->data->promiscuous = 1; 2693 2694 diag = eth_err(port_id, diag); 2695 2696 rte_eth_trace_promiscuous_disable(port_id, dev->data->promiscuous, 2697 diag); 2698 2699 return diag; 2700 } 2701 2702 int 2703 rte_eth_promiscuous_get(uint16_t port_id) 2704 { 2705 struct rte_eth_dev *dev; 2706 2707 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2708 dev = &rte_eth_devices[port_id]; 2709 2710 rte_eth_trace_promiscuous_get(port_id, dev->data->promiscuous); 2711 2712 return dev->data->promiscuous; 2713 } 2714 2715 int 2716 rte_eth_allmulticast_enable(uint16_t port_id) 2717 { 2718 struct rte_eth_dev *dev; 2719 int diag; 2720 2721 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2722 dev = &rte_eth_devices[port_id]; 2723 2724 if (dev->data->all_multicast == 1) 2725 return 0; 2726 2727 if (*dev->dev_ops->allmulticast_enable == NULL) 2728 return -ENOTSUP; 2729 diag = (*dev->dev_ops->allmulticast_enable)(dev); 2730 dev->data->all_multicast = (diag == 0) ? 1 : 0; 2731 2732 diag = eth_err(port_id, diag); 2733 2734 rte_eth_trace_allmulticast_enable(port_id, dev->data->all_multicast, 2735 diag); 2736 2737 return diag; 2738 } 2739 2740 int 2741 rte_eth_allmulticast_disable(uint16_t port_id) 2742 { 2743 struct rte_eth_dev *dev; 2744 int diag; 2745 2746 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2747 dev = &rte_eth_devices[port_id]; 2748 2749 if (dev->data->all_multicast == 0) 2750 return 0; 2751 2752 if (*dev->dev_ops->allmulticast_disable == NULL) 2753 return -ENOTSUP; 2754 dev->data->all_multicast = 0; 2755 diag = (*dev->dev_ops->allmulticast_disable)(dev); 2756 if (diag != 0) 2757 dev->data->all_multicast = 1; 2758 2759 diag = eth_err(port_id, diag); 2760 2761 rte_eth_trace_allmulticast_disable(port_id, dev->data->all_multicast, 2762 diag); 2763 2764 return diag; 2765 } 2766 2767 int 2768 rte_eth_allmulticast_get(uint16_t port_id) 2769 { 2770 struct rte_eth_dev *dev; 2771 2772 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2773 dev = &rte_eth_devices[port_id]; 2774 2775 rte_eth_trace_allmulticast_get(port_id, dev->data->all_multicast); 2776 2777 return dev->data->all_multicast; 2778 } 2779 2780 int 2781 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link) 2782 { 2783 struct rte_eth_dev *dev; 2784 2785 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2786 dev = &rte_eth_devices[port_id]; 2787 2788 if (eth_link == NULL) { 2789 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2790 port_id); 2791 return -EINVAL; 2792 } 2793 2794 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2795 rte_eth_linkstatus_get(dev, eth_link); 2796 else { 2797 if (*dev->dev_ops->link_update == NULL) 2798 return -ENOTSUP; 2799 (*dev->dev_ops->link_update)(dev, 1); 2800 *eth_link = dev->data->dev_link; 2801 } 2802 2803 rte_eth_trace_link_get(port_id, eth_link); 2804 2805 return 0; 2806 } 2807 2808 int 2809 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link) 2810 { 2811 struct rte_eth_dev *dev; 2812 2813 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2814 dev = &rte_eth_devices[port_id]; 2815 2816 if (eth_link == NULL) { 2817 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2818 port_id); 2819 return -EINVAL; 2820 } 2821 2822 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2823 rte_eth_linkstatus_get(dev, eth_link); 2824 else { 2825 if (*dev->dev_ops->link_update == NULL) 2826 return -ENOTSUP; 2827 (*dev->dev_ops->link_update)(dev, 0); 2828 *eth_link = dev->data->dev_link; 2829 } 2830 2831 rte_eth_trace_link_get_nowait(port_id, eth_link); 2832 2833 return 0; 2834 } 2835 2836 const char * 2837 rte_eth_link_speed_to_str(uint32_t link_speed) 2838 { 2839 const char *ret; 2840 2841 switch (link_speed) { 2842 case RTE_ETH_SPEED_NUM_NONE: 2843 ret = "None"; 2844 break; 2845 case RTE_ETH_SPEED_NUM_10M: 2846 ret = "10 Mbps"; 2847 break; 2848 case RTE_ETH_SPEED_NUM_100M: 2849 ret = "100 Mbps"; 2850 break; 2851 case RTE_ETH_SPEED_NUM_1G: 2852 ret = "1 Gbps"; 2853 break; 2854 case RTE_ETH_SPEED_NUM_2_5G: 2855 ret = "2.5 Gbps"; 2856 break; 2857 case RTE_ETH_SPEED_NUM_5G: 2858 ret = "5 Gbps"; 2859 break; 2860 case RTE_ETH_SPEED_NUM_10G: 2861 ret = "10 Gbps"; 2862 break; 2863 case RTE_ETH_SPEED_NUM_20G: 2864 ret = "20 Gbps"; 2865 break; 2866 case RTE_ETH_SPEED_NUM_25G: 2867 ret = "25 Gbps"; 2868 break; 2869 case RTE_ETH_SPEED_NUM_40G: 2870 ret = "40 Gbps"; 2871 break; 2872 case RTE_ETH_SPEED_NUM_50G: 2873 ret = "50 Gbps"; 2874 break; 2875 case RTE_ETH_SPEED_NUM_56G: 2876 ret = "56 Gbps"; 2877 break; 2878 case RTE_ETH_SPEED_NUM_100G: 2879 ret = "100 Gbps"; 2880 break; 2881 case RTE_ETH_SPEED_NUM_200G: 2882 ret = "200 Gbps"; 2883 break; 2884 case RTE_ETH_SPEED_NUM_UNKNOWN: 2885 ret = "Unknown"; 2886 break; 2887 default: 2888 ret = "Invalid"; 2889 } 2890 2891 rte_eth_trace_link_speed_to_str(link_speed, ret); 2892 2893 return ret; 2894 } 2895 2896 int 2897 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link) 2898 { 2899 int ret; 2900 2901 if (str == NULL) { 2902 RTE_ETHDEV_LOG(ERR, "Cannot convert link to NULL string\n"); 2903 return -EINVAL; 2904 } 2905 2906 if (len == 0) { 2907 RTE_ETHDEV_LOG(ERR, 2908 "Cannot convert link to string with zero size\n"); 2909 return -EINVAL; 2910 } 2911 2912 if (eth_link == NULL) { 2913 RTE_ETHDEV_LOG(ERR, "Cannot convert to string from NULL link\n"); 2914 return -EINVAL; 2915 } 2916 2917 if (eth_link->link_status == RTE_ETH_LINK_DOWN) 2918 ret = snprintf(str, len, "Link down"); 2919 else 2920 ret = snprintf(str, len, "Link up at %s %s %s", 2921 rte_eth_link_speed_to_str(eth_link->link_speed), 2922 (eth_link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 2923 "FDX" : "HDX", 2924 (eth_link->link_autoneg == RTE_ETH_LINK_AUTONEG) ? 2925 "Autoneg" : "Fixed"); 2926 2927 rte_eth_trace_link_to_str(len, eth_link, str, ret); 2928 2929 return ret; 2930 } 2931 2932 int 2933 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats) 2934 { 2935 struct rte_eth_dev *dev; 2936 int ret; 2937 2938 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2939 dev = &rte_eth_devices[port_id]; 2940 2941 if (stats == NULL) { 2942 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u stats to NULL\n", 2943 port_id); 2944 return -EINVAL; 2945 } 2946 2947 memset(stats, 0, sizeof(*stats)); 2948 2949 if (*dev->dev_ops->stats_get == NULL) 2950 return -ENOTSUP; 2951 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed; 2952 ret = eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats)); 2953 2954 rte_eth_trace_stats_get(port_id, stats, ret); 2955 2956 return ret; 2957 } 2958 2959 int 2960 rte_eth_stats_reset(uint16_t port_id) 2961 { 2962 struct rte_eth_dev *dev; 2963 int ret; 2964 2965 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2966 dev = &rte_eth_devices[port_id]; 2967 2968 if (*dev->dev_ops->stats_reset == NULL) 2969 return -ENOTSUP; 2970 ret = (*dev->dev_ops->stats_reset)(dev); 2971 if (ret != 0) 2972 return eth_err(port_id, ret); 2973 2974 dev->data->rx_mbuf_alloc_failed = 0; 2975 2976 rte_eth_trace_stats_reset(port_id); 2977 2978 return 0; 2979 } 2980 2981 static inline int 2982 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev) 2983 { 2984 uint16_t nb_rxqs, nb_txqs; 2985 int count; 2986 2987 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2988 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2989 2990 count = RTE_NB_STATS; 2991 if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) { 2992 count += nb_rxqs * RTE_NB_RXQ_STATS; 2993 count += nb_txqs * RTE_NB_TXQ_STATS; 2994 } 2995 2996 return count; 2997 } 2998 2999 static int 3000 eth_dev_get_xstats_count(uint16_t port_id) 3001 { 3002 struct rte_eth_dev *dev; 3003 int count; 3004 3005 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3006 dev = &rte_eth_devices[port_id]; 3007 if (dev->dev_ops->xstats_get_names != NULL) { 3008 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0); 3009 if (count < 0) 3010 return eth_err(port_id, count); 3011 } else 3012 count = 0; 3013 3014 3015 count += eth_dev_get_xstats_basic_count(dev); 3016 3017 return count; 3018 } 3019 3020 int 3021 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, 3022 uint64_t *id) 3023 { 3024 int cnt_xstats, idx_xstat; 3025 3026 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3027 3028 if (xstat_name == NULL) { 3029 RTE_ETHDEV_LOG(ERR, 3030 "Cannot get ethdev port %u xstats ID from NULL xstat name\n", 3031 port_id); 3032 return -ENOMEM; 3033 } 3034 3035 if (id == NULL) { 3036 RTE_ETHDEV_LOG(ERR, 3037 "Cannot get ethdev port %u xstats ID to NULL\n", 3038 port_id); 3039 return -ENOMEM; 3040 } 3041 3042 /* Get count */ 3043 cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL); 3044 if (cnt_xstats < 0) { 3045 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n"); 3046 return -ENODEV; 3047 } 3048 3049 /* Get id-name lookup table */ 3050 struct rte_eth_xstat_name xstats_names[cnt_xstats]; 3051 3052 if (cnt_xstats != rte_eth_xstats_get_names_by_id( 3053 port_id, xstats_names, cnt_xstats, NULL)) { 3054 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n"); 3055 return -1; 3056 } 3057 3058 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 3059 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) { 3060 *id = idx_xstat; 3061 3062 rte_eth_trace_xstats_get_id_by_name(port_id, 3063 xstat_name, *id); 3064 3065 return 0; 3066 }; 3067 } 3068 3069 return -EINVAL; 3070 } 3071 3072 /* retrieve basic stats names */ 3073 static int 3074 eth_basic_stats_get_names(struct rte_eth_dev *dev, 3075 struct rte_eth_xstat_name *xstats_names) 3076 { 3077 int cnt_used_entries = 0; 3078 uint32_t idx, id_queue; 3079 uint16_t num_q; 3080 3081 for (idx = 0; idx < RTE_NB_STATS; idx++) { 3082 strlcpy(xstats_names[cnt_used_entries].name, 3083 eth_dev_stats_strings[idx].name, 3084 sizeof(xstats_names[0].name)); 3085 cnt_used_entries++; 3086 } 3087 3088 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 3089 return cnt_used_entries; 3090 3091 num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3092 for (id_queue = 0; id_queue < num_q; id_queue++) { 3093 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) { 3094 snprintf(xstats_names[cnt_used_entries].name, 3095 sizeof(xstats_names[0].name), 3096 "rx_q%u_%s", 3097 id_queue, eth_dev_rxq_stats_strings[idx].name); 3098 cnt_used_entries++; 3099 } 3100 3101 } 3102 num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3103 for (id_queue = 0; id_queue < num_q; id_queue++) { 3104 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) { 3105 snprintf(xstats_names[cnt_used_entries].name, 3106 sizeof(xstats_names[0].name), 3107 "tx_q%u_%s", 3108 id_queue, eth_dev_txq_stats_strings[idx].name); 3109 cnt_used_entries++; 3110 } 3111 } 3112 return cnt_used_entries; 3113 } 3114 3115 /* retrieve ethdev extended statistics names */ 3116 int 3117 rte_eth_xstats_get_names_by_id(uint16_t port_id, 3118 struct rte_eth_xstat_name *xstats_names, unsigned int size, 3119 uint64_t *ids) 3120 { 3121 struct rte_eth_xstat_name *xstats_names_copy; 3122 unsigned int no_basic_stat_requested = 1; 3123 unsigned int no_ext_stat_requested = 1; 3124 unsigned int expected_entries; 3125 unsigned int basic_count; 3126 struct rte_eth_dev *dev; 3127 unsigned int i; 3128 int ret; 3129 3130 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3131 dev = &rte_eth_devices[port_id]; 3132 3133 basic_count = eth_dev_get_xstats_basic_count(dev); 3134 ret = eth_dev_get_xstats_count(port_id); 3135 if (ret < 0) 3136 return ret; 3137 expected_entries = (unsigned int)ret; 3138 3139 /* Return max number of stats if no ids given */ 3140 if (!ids) { 3141 if (!xstats_names) 3142 return expected_entries; 3143 else if (xstats_names && size < expected_entries) 3144 return expected_entries; 3145 } 3146 3147 if (ids && !xstats_names) 3148 return -EINVAL; 3149 3150 if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) { 3151 uint64_t ids_copy[size]; 3152 3153 for (i = 0; i < size; i++) { 3154 if (ids[i] < basic_count) { 3155 no_basic_stat_requested = 0; 3156 break; 3157 } 3158 3159 /* 3160 * Convert ids to xstats ids that PMD knows. 3161 * ids known by user are basic + extended stats. 3162 */ 3163 ids_copy[i] = ids[i] - basic_count; 3164 } 3165 3166 if (no_basic_stat_requested) 3167 return (*dev->dev_ops->xstats_get_names_by_id)(dev, 3168 ids_copy, xstats_names, size); 3169 } 3170 3171 /* Retrieve all stats */ 3172 if (!ids) { 3173 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names, 3174 expected_entries); 3175 if (num_stats < 0 || num_stats > (int)expected_entries) 3176 return num_stats; 3177 else 3178 return expected_entries; 3179 } 3180 3181 xstats_names_copy = calloc(expected_entries, 3182 sizeof(struct rte_eth_xstat_name)); 3183 3184 if (!xstats_names_copy) { 3185 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n"); 3186 return -ENOMEM; 3187 } 3188 3189 if (ids) { 3190 for (i = 0; i < size; i++) { 3191 if (ids[i] >= basic_count) { 3192 no_ext_stat_requested = 0; 3193 break; 3194 } 3195 } 3196 } 3197 3198 /* Fill xstats_names_copy structure */ 3199 if (ids && no_ext_stat_requested) { 3200 eth_basic_stats_get_names(dev, xstats_names_copy); 3201 } else { 3202 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy, 3203 expected_entries); 3204 if (ret < 0) { 3205 free(xstats_names_copy); 3206 return ret; 3207 } 3208 } 3209 3210 /* Filter stats */ 3211 for (i = 0; i < size; i++) { 3212 if (ids[i] >= expected_entries) { 3213 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 3214 free(xstats_names_copy); 3215 return -1; 3216 } 3217 xstats_names[i] = xstats_names_copy[ids[i]]; 3218 3219 rte_eth_trace_xstats_get_names_by_id(port_id, &xstats_names[i], 3220 ids[i]); 3221 } 3222 3223 free(xstats_names_copy); 3224 return size; 3225 } 3226 3227 int 3228 rte_eth_xstats_get_names(uint16_t port_id, 3229 struct rte_eth_xstat_name *xstats_names, 3230 unsigned int size) 3231 { 3232 struct rte_eth_dev *dev; 3233 int cnt_used_entries; 3234 int cnt_expected_entries; 3235 int cnt_driver_entries; 3236 int i; 3237 3238 cnt_expected_entries = eth_dev_get_xstats_count(port_id); 3239 if (xstats_names == NULL || cnt_expected_entries < 0 || 3240 (int)size < cnt_expected_entries) 3241 return cnt_expected_entries; 3242 3243 /* port_id checked in eth_dev_get_xstats_count() */ 3244 dev = &rte_eth_devices[port_id]; 3245 3246 cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names); 3247 3248 if (dev->dev_ops->xstats_get_names != NULL) { 3249 /* If there are any driver-specific xstats, append them 3250 * to end of list. 3251 */ 3252 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)( 3253 dev, 3254 xstats_names + cnt_used_entries, 3255 size - cnt_used_entries); 3256 if (cnt_driver_entries < 0) 3257 return eth_err(port_id, cnt_driver_entries); 3258 cnt_used_entries += cnt_driver_entries; 3259 } 3260 3261 for (i = 0; i < cnt_used_entries; i++) 3262 rte_eth_trace_xstats_get_names(port_id, i, xstats_names[i], 3263 size, cnt_used_entries); 3264 3265 return cnt_used_entries; 3266 } 3267 3268 3269 static int 3270 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats) 3271 { 3272 struct rte_eth_dev *dev; 3273 struct rte_eth_stats eth_stats; 3274 unsigned int count = 0, i, q; 3275 uint64_t val, *stats_ptr; 3276 uint16_t nb_rxqs, nb_txqs; 3277 int ret; 3278 3279 ret = rte_eth_stats_get(port_id, ð_stats); 3280 if (ret < 0) 3281 return ret; 3282 3283 dev = &rte_eth_devices[port_id]; 3284 3285 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3286 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3287 3288 /* global stats */ 3289 for (i = 0; i < RTE_NB_STATS; i++) { 3290 stats_ptr = RTE_PTR_ADD(ð_stats, 3291 eth_dev_stats_strings[i].offset); 3292 val = *stats_ptr; 3293 xstats[count++].value = val; 3294 } 3295 3296 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 3297 return count; 3298 3299 /* per-rxq stats */ 3300 for (q = 0; q < nb_rxqs; q++) { 3301 for (i = 0; i < RTE_NB_RXQ_STATS; i++) { 3302 stats_ptr = RTE_PTR_ADD(ð_stats, 3303 eth_dev_rxq_stats_strings[i].offset + 3304 q * sizeof(uint64_t)); 3305 val = *stats_ptr; 3306 xstats[count++].value = val; 3307 } 3308 } 3309 3310 /* per-txq stats */ 3311 for (q = 0; q < nb_txqs; q++) { 3312 for (i = 0; i < RTE_NB_TXQ_STATS; i++) { 3313 stats_ptr = RTE_PTR_ADD(ð_stats, 3314 eth_dev_txq_stats_strings[i].offset + 3315 q * sizeof(uint64_t)); 3316 val = *stats_ptr; 3317 xstats[count++].value = val; 3318 } 3319 } 3320 return count; 3321 } 3322 3323 /* retrieve ethdev extended statistics */ 3324 int 3325 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, 3326 uint64_t *values, unsigned int size) 3327 { 3328 unsigned int no_basic_stat_requested = 1; 3329 unsigned int no_ext_stat_requested = 1; 3330 unsigned int num_xstats_filled; 3331 unsigned int basic_count; 3332 uint16_t expected_entries; 3333 struct rte_eth_dev *dev; 3334 unsigned int i; 3335 int ret; 3336 3337 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3338 dev = &rte_eth_devices[port_id]; 3339 3340 ret = eth_dev_get_xstats_count(port_id); 3341 if (ret < 0) 3342 return ret; 3343 expected_entries = (uint16_t)ret; 3344 struct rte_eth_xstat xstats[expected_entries]; 3345 basic_count = eth_dev_get_xstats_basic_count(dev); 3346 3347 /* Return max number of stats if no ids given */ 3348 if (!ids) { 3349 if (!values) 3350 return expected_entries; 3351 else if (values && size < expected_entries) 3352 return expected_entries; 3353 } 3354 3355 if (ids && !values) 3356 return -EINVAL; 3357 3358 if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) { 3359 unsigned int basic_count = eth_dev_get_xstats_basic_count(dev); 3360 uint64_t ids_copy[size]; 3361 3362 for (i = 0; i < size; i++) { 3363 if (ids[i] < basic_count) { 3364 no_basic_stat_requested = 0; 3365 break; 3366 } 3367 3368 /* 3369 * Convert ids to xstats ids that PMD knows. 3370 * ids known by user are basic + extended stats. 3371 */ 3372 ids_copy[i] = ids[i] - basic_count; 3373 } 3374 3375 if (no_basic_stat_requested) 3376 return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy, 3377 values, size); 3378 } 3379 3380 if (ids) { 3381 for (i = 0; i < size; i++) { 3382 if (ids[i] >= basic_count) { 3383 no_ext_stat_requested = 0; 3384 break; 3385 } 3386 } 3387 } 3388 3389 /* Fill the xstats structure */ 3390 if (ids && no_ext_stat_requested) 3391 ret = eth_basic_stats_get(port_id, xstats); 3392 else 3393 ret = rte_eth_xstats_get(port_id, xstats, expected_entries); 3394 3395 if (ret < 0) 3396 return ret; 3397 num_xstats_filled = (unsigned int)ret; 3398 3399 /* Return all stats */ 3400 if (!ids) { 3401 for (i = 0; i < num_xstats_filled; i++) 3402 values[i] = xstats[i].value; 3403 return expected_entries; 3404 } 3405 3406 /* Filter stats */ 3407 for (i = 0; i < size; i++) { 3408 if (ids[i] >= expected_entries) { 3409 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 3410 return -1; 3411 } 3412 values[i] = xstats[ids[i]].value; 3413 } 3414 3415 rte_eth_trace_xstats_get_by_id(port_id, ids, values, size); 3416 3417 return size; 3418 } 3419 3420 int 3421 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, 3422 unsigned int n) 3423 { 3424 struct rte_eth_dev *dev; 3425 unsigned int count, i; 3426 signed int xcount = 0; 3427 int ret; 3428 3429 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3430 if (xstats == NULL && n > 0) 3431 return -EINVAL; 3432 dev = &rte_eth_devices[port_id]; 3433 3434 count = eth_dev_get_xstats_basic_count(dev); 3435 3436 /* implemented by the driver */ 3437 if (dev->dev_ops->xstats_get != NULL) { 3438 /* Retrieve the xstats from the driver at the end of the 3439 * xstats struct. 3440 */ 3441 xcount = (*dev->dev_ops->xstats_get)(dev, 3442 (n > count) ? xstats + count : NULL, 3443 (n > count) ? n - count : 0); 3444 3445 if (xcount < 0) 3446 return eth_err(port_id, xcount); 3447 } 3448 3449 if (n < count + xcount || xstats == NULL) 3450 return count + xcount; 3451 3452 /* now fill the xstats structure */ 3453 ret = eth_basic_stats_get(port_id, xstats); 3454 if (ret < 0) 3455 return ret; 3456 count = ret; 3457 3458 for (i = 0; i < count; i++) 3459 xstats[i].id = i; 3460 /* add an offset to driver-specific stats */ 3461 for ( ; i < count + xcount; i++) 3462 xstats[i].id += count; 3463 3464 for (i = 0; i < n; i++) 3465 rte_eth_trace_xstats_get(port_id, xstats[i]); 3466 3467 return count + xcount; 3468 } 3469 3470 /* reset ethdev extended statistics */ 3471 int 3472 rte_eth_xstats_reset(uint16_t port_id) 3473 { 3474 struct rte_eth_dev *dev; 3475 3476 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3477 dev = &rte_eth_devices[port_id]; 3478 3479 /* implemented by the driver */ 3480 if (dev->dev_ops->xstats_reset != NULL) { 3481 int ret = eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev)); 3482 3483 rte_eth_trace_xstats_reset(port_id, ret); 3484 3485 return ret; 3486 } 3487 3488 /* fallback to default */ 3489 return rte_eth_stats_reset(port_id); 3490 } 3491 3492 static int 3493 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, 3494 uint8_t stat_idx, uint8_t is_rx) 3495 { 3496 struct rte_eth_dev *dev; 3497 3498 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3499 dev = &rte_eth_devices[port_id]; 3500 3501 if (is_rx && (queue_id >= dev->data->nb_rx_queues)) 3502 return -EINVAL; 3503 3504 if (!is_rx && (queue_id >= dev->data->nb_tx_queues)) 3505 return -EINVAL; 3506 3507 if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) 3508 return -EINVAL; 3509 3510 if (*dev->dev_ops->queue_stats_mapping_set == NULL) 3511 return -ENOTSUP; 3512 return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx); 3513 } 3514 3515 int 3516 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, 3517 uint8_t stat_idx) 3518 { 3519 int ret; 3520 3521 ret = eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3522 tx_queue_id, 3523 stat_idx, STAT_QMAP_TX)); 3524 3525 rte_ethdev_trace_set_tx_queue_stats_mapping(port_id, tx_queue_id, 3526 stat_idx, ret); 3527 3528 return ret; 3529 } 3530 3531 int 3532 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, 3533 uint8_t stat_idx) 3534 { 3535 int ret; 3536 3537 ret = eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3538 rx_queue_id, 3539 stat_idx, STAT_QMAP_RX)); 3540 3541 rte_ethdev_trace_set_rx_queue_stats_mapping(port_id, rx_queue_id, 3542 stat_idx, ret); 3543 3544 return ret; 3545 } 3546 3547 int 3548 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size) 3549 { 3550 struct rte_eth_dev *dev; 3551 int ret; 3552 3553 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3554 dev = &rte_eth_devices[port_id]; 3555 3556 if (fw_version == NULL && fw_size > 0) { 3557 RTE_ETHDEV_LOG(ERR, 3558 "Cannot get ethdev port %u FW version to NULL when string size is non zero\n", 3559 port_id); 3560 return -EINVAL; 3561 } 3562 3563 if (*dev->dev_ops->fw_version_get == NULL) 3564 return -ENOTSUP; 3565 ret = eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev, 3566 fw_version, fw_size)); 3567 3568 rte_ethdev_trace_fw_version_get(port_id, fw_version, fw_size, ret); 3569 3570 return ret; 3571 } 3572 3573 int 3574 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) 3575 { 3576 struct rte_eth_dev *dev; 3577 const struct rte_eth_desc_lim lim = { 3578 .nb_max = UINT16_MAX, 3579 .nb_min = 0, 3580 .nb_align = 1, 3581 .nb_seg_max = UINT16_MAX, 3582 .nb_mtu_seg_max = UINT16_MAX, 3583 }; 3584 int diag; 3585 3586 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3587 dev = &rte_eth_devices[port_id]; 3588 3589 if (dev_info == NULL) { 3590 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u info to NULL\n", 3591 port_id); 3592 return -EINVAL; 3593 } 3594 3595 /* 3596 * Init dev_info before port_id check since caller does not have 3597 * return status and does not know if get is successful or not. 3598 */ 3599 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3600 dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 3601 3602 dev_info->rx_desc_lim = lim; 3603 dev_info->tx_desc_lim = lim; 3604 dev_info->device = dev->device; 3605 dev_info->min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN - 3606 RTE_ETHER_CRC_LEN; 3607 dev_info->max_mtu = UINT16_MAX; 3608 3609 if (*dev->dev_ops->dev_infos_get == NULL) 3610 return -ENOTSUP; 3611 diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info); 3612 if (diag != 0) { 3613 /* Cleanup already filled in device information */ 3614 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3615 return eth_err(port_id, diag); 3616 } 3617 3618 /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */ 3619 dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues, 3620 RTE_MAX_QUEUES_PER_PORT); 3621 dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues, 3622 RTE_MAX_QUEUES_PER_PORT); 3623 3624 dev_info->driver_name = dev->device->driver->name; 3625 dev_info->nb_rx_queues = dev->data->nb_rx_queues; 3626 dev_info->nb_tx_queues = dev->data->nb_tx_queues; 3627 3628 dev_info->dev_flags = &dev->data->dev_flags; 3629 3630 rte_ethdev_trace_info_get(port_id, dev_info); 3631 3632 return 0; 3633 } 3634 3635 int 3636 rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf) 3637 { 3638 struct rte_eth_dev *dev; 3639 3640 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3641 dev = &rte_eth_devices[port_id]; 3642 3643 if (dev_conf == NULL) { 3644 RTE_ETHDEV_LOG(ERR, 3645 "Cannot get ethdev port %u configuration to NULL\n", 3646 port_id); 3647 return -EINVAL; 3648 } 3649 3650 memcpy(dev_conf, &dev->data->dev_conf, sizeof(struct rte_eth_conf)); 3651 3652 rte_ethdev_trace_conf_get(port_id, dev_conf); 3653 3654 return 0; 3655 } 3656 3657 int 3658 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, 3659 uint32_t *ptypes, int num) 3660 { 3661 int i, j; 3662 struct rte_eth_dev *dev; 3663 const uint32_t *all_ptypes; 3664 3665 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3666 dev = &rte_eth_devices[port_id]; 3667 3668 if (ptypes == NULL && num > 0) { 3669 RTE_ETHDEV_LOG(ERR, 3670 "Cannot get ethdev port %u supported packet types to NULL when array size is non zero\n", 3671 port_id); 3672 return -EINVAL; 3673 } 3674 3675 if (*dev->dev_ops->dev_supported_ptypes_get == NULL) 3676 return 0; 3677 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3678 3679 if (!all_ptypes) 3680 return 0; 3681 3682 for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i) 3683 if (all_ptypes[i] & ptype_mask) { 3684 if (j < num) { 3685 ptypes[j] = all_ptypes[i]; 3686 3687 rte_ethdev_trace_get_supported_ptypes(port_id, 3688 j, num, ptypes[j]); 3689 } 3690 j++; 3691 } 3692 3693 return j; 3694 } 3695 3696 int 3697 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, 3698 uint32_t *set_ptypes, unsigned int num) 3699 { 3700 const uint32_t valid_ptype_masks[] = { 3701 RTE_PTYPE_L2_MASK, 3702 RTE_PTYPE_L3_MASK, 3703 RTE_PTYPE_L4_MASK, 3704 RTE_PTYPE_TUNNEL_MASK, 3705 RTE_PTYPE_INNER_L2_MASK, 3706 RTE_PTYPE_INNER_L3_MASK, 3707 RTE_PTYPE_INNER_L4_MASK, 3708 }; 3709 const uint32_t *all_ptypes; 3710 struct rte_eth_dev *dev; 3711 uint32_t unused_mask; 3712 unsigned int i, j; 3713 int ret; 3714 3715 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3716 dev = &rte_eth_devices[port_id]; 3717 3718 if (num > 0 && set_ptypes == NULL) { 3719 RTE_ETHDEV_LOG(ERR, 3720 "Cannot get ethdev port %u set packet types to NULL when array size is non zero\n", 3721 port_id); 3722 return -EINVAL; 3723 } 3724 3725 if (*dev->dev_ops->dev_supported_ptypes_get == NULL || 3726 *dev->dev_ops->dev_ptypes_set == NULL) { 3727 ret = 0; 3728 goto ptype_unknown; 3729 } 3730 3731 if (ptype_mask == 0) { 3732 ret = (*dev->dev_ops->dev_ptypes_set)(dev, 3733 ptype_mask); 3734 goto ptype_unknown; 3735 } 3736 3737 unused_mask = ptype_mask; 3738 for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) { 3739 uint32_t mask = ptype_mask & valid_ptype_masks[i]; 3740 if (mask && mask != valid_ptype_masks[i]) { 3741 ret = -EINVAL; 3742 goto ptype_unknown; 3743 } 3744 unused_mask &= ~valid_ptype_masks[i]; 3745 } 3746 3747 if (unused_mask) { 3748 ret = -EINVAL; 3749 goto ptype_unknown; 3750 } 3751 3752 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3753 if (all_ptypes == NULL) { 3754 ret = 0; 3755 goto ptype_unknown; 3756 } 3757 3758 /* 3759 * Accommodate as many set_ptypes as possible. If the supplied 3760 * set_ptypes array is insufficient fill it partially. 3761 */ 3762 for (i = 0, j = 0; set_ptypes != NULL && 3763 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) { 3764 if (ptype_mask & all_ptypes[i]) { 3765 if (j < num - 1) { 3766 set_ptypes[j] = all_ptypes[i]; 3767 3768 rte_ethdev_trace_set_ptypes(port_id, j, num, 3769 set_ptypes[j]); 3770 3771 j++; 3772 continue; 3773 } 3774 break; 3775 } 3776 } 3777 3778 if (set_ptypes != NULL && j < num) 3779 set_ptypes[j] = RTE_PTYPE_UNKNOWN; 3780 3781 return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask); 3782 3783 ptype_unknown: 3784 if (num > 0) 3785 set_ptypes[0] = RTE_PTYPE_UNKNOWN; 3786 3787 return ret; 3788 } 3789 3790 int 3791 rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma, 3792 unsigned int num) 3793 { 3794 int32_t ret; 3795 struct rte_eth_dev *dev; 3796 struct rte_eth_dev_info dev_info; 3797 3798 if (ma == NULL) { 3799 RTE_ETHDEV_LOG(ERR, "%s: invalid parameters\n", __func__); 3800 return -EINVAL; 3801 } 3802 3803 /* will check for us that port_id is a valid one */ 3804 ret = rte_eth_dev_info_get(port_id, &dev_info); 3805 if (ret != 0) 3806 return ret; 3807 3808 dev = &rte_eth_devices[port_id]; 3809 num = RTE_MIN(dev_info.max_mac_addrs, num); 3810 memcpy(ma, dev->data->mac_addrs, num * sizeof(ma[0])); 3811 3812 rte_eth_trace_macaddrs_get(port_id, num); 3813 3814 return num; 3815 } 3816 3817 int 3818 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr) 3819 { 3820 struct rte_eth_dev *dev; 3821 3822 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3823 dev = &rte_eth_devices[port_id]; 3824 3825 if (mac_addr == NULL) { 3826 RTE_ETHDEV_LOG(ERR, 3827 "Cannot get ethdev port %u MAC address to NULL\n", 3828 port_id); 3829 return -EINVAL; 3830 } 3831 3832 rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr); 3833 3834 rte_eth_trace_macaddr_get(port_id, mac_addr); 3835 3836 return 0; 3837 } 3838 3839 int 3840 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu) 3841 { 3842 struct rte_eth_dev *dev; 3843 3844 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3845 dev = &rte_eth_devices[port_id]; 3846 3847 if (mtu == NULL) { 3848 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u MTU to NULL\n", 3849 port_id); 3850 return -EINVAL; 3851 } 3852 3853 *mtu = dev->data->mtu; 3854 3855 rte_ethdev_trace_get_mtu(port_id, *mtu); 3856 3857 return 0; 3858 } 3859 3860 int 3861 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu) 3862 { 3863 int ret; 3864 struct rte_eth_dev_info dev_info; 3865 struct rte_eth_dev *dev; 3866 3867 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3868 dev = &rte_eth_devices[port_id]; 3869 if (*dev->dev_ops->mtu_set == NULL) 3870 return -ENOTSUP; 3871 3872 /* 3873 * Check if the device supports dev_infos_get, if it does not 3874 * skip min_mtu/max_mtu validation here as this requires values 3875 * that are populated within the call to rte_eth_dev_info_get() 3876 * which relies on dev->dev_ops->dev_infos_get. 3877 */ 3878 if (*dev->dev_ops->dev_infos_get != NULL) { 3879 ret = rte_eth_dev_info_get(port_id, &dev_info); 3880 if (ret != 0) 3881 return ret; 3882 3883 ret = eth_dev_validate_mtu(port_id, &dev_info, mtu); 3884 if (ret != 0) 3885 return ret; 3886 } 3887 3888 if (dev->data->dev_configured == 0) { 3889 RTE_ETHDEV_LOG(ERR, 3890 "Port %u must be configured before MTU set\n", 3891 port_id); 3892 return -EINVAL; 3893 } 3894 3895 ret = (*dev->dev_ops->mtu_set)(dev, mtu); 3896 if (ret == 0) 3897 dev->data->mtu = mtu; 3898 3899 ret = eth_err(port_id, ret); 3900 3901 rte_ethdev_trace_set_mtu(port_id, mtu, ret); 3902 3903 return ret; 3904 } 3905 3906 int 3907 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on) 3908 { 3909 struct rte_eth_dev *dev; 3910 int ret; 3911 3912 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3913 dev = &rte_eth_devices[port_id]; 3914 3915 if (!(dev->data->dev_conf.rxmode.offloads & 3916 RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) { 3917 RTE_ETHDEV_LOG(ERR, "Port %u: VLAN-filtering disabled\n", 3918 port_id); 3919 return -ENOSYS; 3920 } 3921 3922 if (vlan_id > 4095) { 3923 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n", 3924 port_id, vlan_id); 3925 return -EINVAL; 3926 } 3927 if (*dev->dev_ops->vlan_filter_set == NULL) 3928 return -ENOTSUP; 3929 3930 ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on); 3931 if (ret == 0) { 3932 struct rte_vlan_filter_conf *vfc; 3933 int vidx; 3934 int vbit; 3935 3936 vfc = &dev->data->vlan_filter_conf; 3937 vidx = vlan_id / 64; 3938 vbit = vlan_id % 64; 3939 3940 if (on) 3941 vfc->ids[vidx] |= RTE_BIT64(vbit); 3942 else 3943 vfc->ids[vidx] &= ~RTE_BIT64(vbit); 3944 } 3945 3946 ret = eth_err(port_id, ret); 3947 3948 rte_ethdev_trace_vlan_filter(port_id, vlan_id, on, ret); 3949 3950 return ret; 3951 } 3952 3953 int 3954 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, 3955 int on) 3956 { 3957 struct rte_eth_dev *dev; 3958 3959 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3960 dev = &rte_eth_devices[port_id]; 3961 3962 if (rx_queue_id >= dev->data->nb_rx_queues) { 3963 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id); 3964 return -EINVAL; 3965 } 3966 3967 if (*dev->dev_ops->vlan_strip_queue_set == NULL) 3968 return -ENOTSUP; 3969 (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on); 3970 3971 rte_ethdev_trace_set_vlan_strip_on_queue(port_id, rx_queue_id, on); 3972 3973 return 0; 3974 } 3975 3976 int 3977 rte_eth_dev_set_vlan_ether_type(uint16_t port_id, 3978 enum rte_vlan_type vlan_type, 3979 uint16_t tpid) 3980 { 3981 struct rte_eth_dev *dev; 3982 int ret; 3983 3984 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3985 dev = &rte_eth_devices[port_id]; 3986 3987 if (*dev->dev_ops->vlan_tpid_set == NULL) 3988 return -ENOTSUP; 3989 ret = eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, 3990 tpid)); 3991 3992 rte_ethdev_trace_set_vlan_ether_type(port_id, vlan_type, tpid, ret); 3993 3994 return ret; 3995 } 3996 3997 int 3998 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask) 3999 { 4000 struct rte_eth_dev_info dev_info; 4001 struct rte_eth_dev *dev; 4002 int ret = 0; 4003 int mask = 0; 4004 int cur, org = 0; 4005 uint64_t orig_offloads; 4006 uint64_t dev_offloads; 4007 uint64_t new_offloads; 4008 4009 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4010 dev = &rte_eth_devices[port_id]; 4011 4012 /* save original values in case of failure */ 4013 orig_offloads = dev->data->dev_conf.rxmode.offloads; 4014 dev_offloads = orig_offloads; 4015 4016 /* check which option changed by application */ 4017 cur = !!(offload_mask & RTE_ETH_VLAN_STRIP_OFFLOAD); 4018 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP); 4019 if (cur != org) { 4020 if (cur) 4021 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 4022 else 4023 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 4024 mask |= RTE_ETH_VLAN_STRIP_MASK; 4025 } 4026 4027 cur = !!(offload_mask & RTE_ETH_VLAN_FILTER_OFFLOAD); 4028 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER); 4029 if (cur != org) { 4030 if (cur) 4031 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 4032 else 4033 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 4034 mask |= RTE_ETH_VLAN_FILTER_MASK; 4035 } 4036 4037 cur = !!(offload_mask & RTE_ETH_VLAN_EXTEND_OFFLOAD); 4038 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND); 4039 if (cur != org) { 4040 if (cur) 4041 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 4042 else 4043 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 4044 mask |= RTE_ETH_VLAN_EXTEND_MASK; 4045 } 4046 4047 cur = !!(offload_mask & RTE_ETH_QINQ_STRIP_OFFLOAD); 4048 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP); 4049 if (cur != org) { 4050 if (cur) 4051 dev_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 4052 else 4053 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 4054 mask |= RTE_ETH_QINQ_STRIP_MASK; 4055 } 4056 4057 /*no change*/ 4058 if (mask == 0) 4059 return ret; 4060 4061 ret = rte_eth_dev_info_get(port_id, &dev_info); 4062 if (ret != 0) 4063 return ret; 4064 4065 /* Rx VLAN offloading must be within its device capabilities */ 4066 if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) { 4067 new_offloads = dev_offloads & ~orig_offloads; 4068 RTE_ETHDEV_LOG(ERR, 4069 "Ethdev port_id=%u requested new added VLAN offloads " 4070 "0x%" PRIx64 " must be within Rx offloads capabilities " 4071 "0x%" PRIx64 " in %s()\n", 4072 port_id, new_offloads, dev_info.rx_offload_capa, 4073 __func__); 4074 return -EINVAL; 4075 } 4076 4077 if (*dev->dev_ops->vlan_offload_set == NULL) 4078 return -ENOTSUP; 4079 dev->data->dev_conf.rxmode.offloads = dev_offloads; 4080 ret = (*dev->dev_ops->vlan_offload_set)(dev, mask); 4081 if (ret) { 4082 /* hit an error restore original values */ 4083 dev->data->dev_conf.rxmode.offloads = orig_offloads; 4084 } 4085 4086 ret = eth_err(port_id, ret); 4087 4088 rte_ethdev_trace_set_vlan_offload(port_id, offload_mask, ret); 4089 4090 return ret; 4091 } 4092 4093 int 4094 rte_eth_dev_get_vlan_offload(uint16_t port_id) 4095 { 4096 struct rte_eth_dev *dev; 4097 uint64_t *dev_offloads; 4098 int ret = 0; 4099 4100 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4101 dev = &rte_eth_devices[port_id]; 4102 dev_offloads = &dev->data->dev_conf.rxmode.offloads; 4103 4104 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 4105 ret |= RTE_ETH_VLAN_STRIP_OFFLOAD; 4106 4107 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) 4108 ret |= RTE_ETH_VLAN_FILTER_OFFLOAD; 4109 4110 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) 4111 ret |= RTE_ETH_VLAN_EXTEND_OFFLOAD; 4112 4113 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) 4114 ret |= RTE_ETH_QINQ_STRIP_OFFLOAD; 4115 4116 rte_ethdev_trace_get_vlan_offload(port_id, ret); 4117 4118 return ret; 4119 } 4120 4121 int 4122 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on) 4123 { 4124 struct rte_eth_dev *dev; 4125 int ret; 4126 4127 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4128 dev = &rte_eth_devices[port_id]; 4129 4130 if (*dev->dev_ops->vlan_pvid_set == NULL) 4131 return -ENOTSUP; 4132 ret = eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on)); 4133 4134 rte_ethdev_trace_set_vlan_pvid(port_id, pvid, on, ret); 4135 4136 return ret; 4137 } 4138 4139 int 4140 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 4141 { 4142 struct rte_eth_dev *dev; 4143 int ret; 4144 4145 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4146 dev = &rte_eth_devices[port_id]; 4147 4148 if (fc_conf == NULL) { 4149 RTE_ETHDEV_LOG(ERR, 4150 "Cannot get ethdev port %u flow control config to NULL\n", 4151 port_id); 4152 return -EINVAL; 4153 } 4154 4155 if (*dev->dev_ops->flow_ctrl_get == NULL) 4156 return -ENOTSUP; 4157 memset(fc_conf, 0, sizeof(*fc_conf)); 4158 ret = eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf)); 4159 4160 rte_ethdev_trace_flow_ctrl_get(port_id, fc_conf, ret); 4161 4162 return ret; 4163 } 4164 4165 int 4166 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 4167 { 4168 struct rte_eth_dev *dev; 4169 int ret; 4170 4171 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4172 dev = &rte_eth_devices[port_id]; 4173 4174 if (fc_conf == NULL) { 4175 RTE_ETHDEV_LOG(ERR, 4176 "Cannot set ethdev port %u flow control from NULL config\n", 4177 port_id); 4178 return -EINVAL; 4179 } 4180 4181 if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) { 4182 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n"); 4183 return -EINVAL; 4184 } 4185 4186 if (*dev->dev_ops->flow_ctrl_set == NULL) 4187 return -ENOTSUP; 4188 ret = eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf)); 4189 4190 rte_ethdev_trace_flow_ctrl_set(port_id, fc_conf, ret); 4191 4192 return ret; 4193 } 4194 4195 int 4196 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, 4197 struct rte_eth_pfc_conf *pfc_conf) 4198 { 4199 struct rte_eth_dev *dev; 4200 int ret; 4201 4202 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4203 dev = &rte_eth_devices[port_id]; 4204 4205 if (pfc_conf == NULL) { 4206 RTE_ETHDEV_LOG(ERR, 4207 "Cannot set ethdev port %u priority flow control from NULL config\n", 4208 port_id); 4209 return -EINVAL; 4210 } 4211 4212 if (pfc_conf->priority > (RTE_ETH_DCB_NUM_USER_PRIORITIES - 1)) { 4213 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n"); 4214 return -EINVAL; 4215 } 4216 4217 /* High water, low water validation are device specific */ 4218 if (*dev->dev_ops->priority_flow_ctrl_set == NULL) 4219 return -ENOTSUP; 4220 ret = eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set) 4221 (dev, pfc_conf)); 4222 4223 rte_ethdev_trace_priority_flow_ctrl_set(port_id, pfc_conf, ret); 4224 4225 return ret; 4226 } 4227 4228 static int 4229 validate_rx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max, 4230 struct rte_eth_pfc_queue_conf *pfc_queue_conf) 4231 { 4232 if ((pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) || 4233 (pfc_queue_conf->mode == RTE_ETH_FC_FULL)) { 4234 if (pfc_queue_conf->rx_pause.tx_qid >= dev_info->nb_tx_queues) { 4235 RTE_ETHDEV_LOG(ERR, 4236 "PFC Tx queue not in range for Rx pause requested:%d configured:%d\n", 4237 pfc_queue_conf->rx_pause.tx_qid, 4238 dev_info->nb_tx_queues); 4239 return -EINVAL; 4240 } 4241 4242 if (pfc_queue_conf->rx_pause.tc >= tc_max) { 4243 RTE_ETHDEV_LOG(ERR, 4244 "PFC TC not in range for Rx pause requested:%d max:%d\n", 4245 pfc_queue_conf->rx_pause.tc, tc_max); 4246 return -EINVAL; 4247 } 4248 } 4249 4250 return 0; 4251 } 4252 4253 static int 4254 validate_tx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max, 4255 struct rte_eth_pfc_queue_conf *pfc_queue_conf) 4256 { 4257 if ((pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) || 4258 (pfc_queue_conf->mode == RTE_ETH_FC_FULL)) { 4259 if (pfc_queue_conf->tx_pause.rx_qid >= dev_info->nb_rx_queues) { 4260 RTE_ETHDEV_LOG(ERR, 4261 "PFC Rx queue not in range for Tx pause requested:%d configured:%d\n", 4262 pfc_queue_conf->tx_pause.rx_qid, 4263 dev_info->nb_rx_queues); 4264 return -EINVAL; 4265 } 4266 4267 if (pfc_queue_conf->tx_pause.tc >= tc_max) { 4268 RTE_ETHDEV_LOG(ERR, 4269 "PFC TC not in range for Tx pause requested:%d max:%d\n", 4270 pfc_queue_conf->tx_pause.tc, tc_max); 4271 return -EINVAL; 4272 } 4273 } 4274 4275 return 0; 4276 } 4277 4278 int 4279 rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id, 4280 struct rte_eth_pfc_queue_info *pfc_queue_info) 4281 { 4282 struct rte_eth_dev *dev; 4283 int ret; 4284 4285 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4286 dev = &rte_eth_devices[port_id]; 4287 4288 if (pfc_queue_info == NULL) { 4289 RTE_ETHDEV_LOG(ERR, "PFC info param is NULL for port (%u)\n", 4290 port_id); 4291 return -EINVAL; 4292 } 4293 4294 if (*dev->dev_ops->priority_flow_ctrl_queue_info_get == NULL) 4295 return -ENOTSUP; 4296 ret = eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_queue_info_get) 4297 (dev, pfc_queue_info)); 4298 4299 rte_ethdev_trace_priority_flow_ctrl_queue_info_get(port_id, 4300 pfc_queue_info, ret); 4301 4302 return ret; 4303 } 4304 4305 int 4306 rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id, 4307 struct rte_eth_pfc_queue_conf *pfc_queue_conf) 4308 { 4309 struct rte_eth_pfc_queue_info pfc_info; 4310 struct rte_eth_dev_info dev_info; 4311 struct rte_eth_dev *dev; 4312 int ret; 4313 4314 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4315 dev = &rte_eth_devices[port_id]; 4316 4317 if (pfc_queue_conf == NULL) { 4318 RTE_ETHDEV_LOG(ERR, "PFC parameters are NULL for port (%u)\n", 4319 port_id); 4320 return -EINVAL; 4321 } 4322 4323 ret = rte_eth_dev_info_get(port_id, &dev_info); 4324 if (ret != 0) 4325 return ret; 4326 4327 ret = rte_eth_dev_priority_flow_ctrl_queue_info_get(port_id, &pfc_info); 4328 if (ret != 0) 4329 return ret; 4330 4331 if (pfc_info.tc_max == 0) { 4332 RTE_ETHDEV_LOG(ERR, "Ethdev port %u does not support PFC TC values\n", 4333 port_id); 4334 return -ENOTSUP; 4335 } 4336 4337 /* Check requested mode supported or not */ 4338 if (pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE && 4339 pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) { 4340 RTE_ETHDEV_LOG(ERR, "PFC Tx pause unsupported for port (%d)\n", 4341 port_id); 4342 return -EINVAL; 4343 } 4344 4345 if (pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE && 4346 pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) { 4347 RTE_ETHDEV_LOG(ERR, "PFC Rx pause unsupported for port (%d)\n", 4348 port_id); 4349 return -EINVAL; 4350 } 4351 4352 /* Validate Rx pause parameters */ 4353 if (pfc_info.mode_capa == RTE_ETH_FC_FULL || 4354 pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE) { 4355 ret = validate_rx_pause_config(&dev_info, pfc_info.tc_max, 4356 pfc_queue_conf); 4357 if (ret != 0) 4358 return ret; 4359 } 4360 4361 /* Validate Tx pause parameters */ 4362 if (pfc_info.mode_capa == RTE_ETH_FC_FULL || 4363 pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE) { 4364 ret = validate_tx_pause_config(&dev_info, pfc_info.tc_max, 4365 pfc_queue_conf); 4366 if (ret != 0) 4367 return ret; 4368 } 4369 4370 if (*dev->dev_ops->priority_flow_ctrl_queue_config == NULL) 4371 return -ENOTSUP; 4372 ret = eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_queue_config) 4373 (dev, pfc_queue_conf)); 4374 4375 rte_ethdev_trace_priority_flow_ctrl_queue_configure(port_id, 4376 pfc_queue_conf, ret); 4377 4378 return ret; 4379 } 4380 4381 static int 4382 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf, 4383 uint16_t reta_size) 4384 { 4385 uint16_t i, num; 4386 4387 num = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) / RTE_ETH_RETA_GROUP_SIZE; 4388 for (i = 0; i < num; i++) { 4389 if (reta_conf[i].mask) 4390 return 0; 4391 } 4392 4393 return -EINVAL; 4394 } 4395 4396 static int 4397 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf, 4398 uint16_t reta_size, 4399 uint16_t max_rxq) 4400 { 4401 uint16_t i, idx, shift; 4402 4403 if (max_rxq == 0) { 4404 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n"); 4405 return -EINVAL; 4406 } 4407 4408 for (i = 0; i < reta_size; i++) { 4409 idx = i / RTE_ETH_RETA_GROUP_SIZE; 4410 shift = i % RTE_ETH_RETA_GROUP_SIZE; 4411 if ((reta_conf[idx].mask & RTE_BIT64(shift)) && 4412 (reta_conf[idx].reta[shift] >= max_rxq)) { 4413 RTE_ETHDEV_LOG(ERR, 4414 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n", 4415 idx, shift, 4416 reta_conf[idx].reta[shift], max_rxq); 4417 return -EINVAL; 4418 } 4419 } 4420 4421 return 0; 4422 } 4423 4424 int 4425 rte_eth_dev_rss_reta_update(uint16_t port_id, 4426 struct rte_eth_rss_reta_entry64 *reta_conf, 4427 uint16_t reta_size) 4428 { 4429 enum rte_eth_rx_mq_mode mq_mode; 4430 struct rte_eth_dev *dev; 4431 int ret; 4432 4433 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4434 dev = &rte_eth_devices[port_id]; 4435 4436 if (reta_conf == NULL) { 4437 RTE_ETHDEV_LOG(ERR, 4438 "Cannot update ethdev port %u RSS RETA to NULL\n", 4439 port_id); 4440 return -EINVAL; 4441 } 4442 4443 if (reta_size == 0) { 4444 RTE_ETHDEV_LOG(ERR, 4445 "Cannot update ethdev port %u RSS RETA with zero size\n", 4446 port_id); 4447 return -EINVAL; 4448 } 4449 4450 /* Check mask bits */ 4451 ret = eth_check_reta_mask(reta_conf, reta_size); 4452 if (ret < 0) 4453 return ret; 4454 4455 /* Check entry value */ 4456 ret = eth_check_reta_entry(reta_conf, reta_size, 4457 dev->data->nb_rx_queues); 4458 if (ret < 0) 4459 return ret; 4460 4461 mq_mode = dev->data->dev_conf.rxmode.mq_mode; 4462 if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) { 4463 RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n"); 4464 return -ENOTSUP; 4465 } 4466 4467 if (*dev->dev_ops->reta_update == NULL) 4468 return -ENOTSUP; 4469 ret = eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf, 4470 reta_size)); 4471 4472 rte_ethdev_trace_rss_reta_update(port_id, reta_conf, reta_size, ret); 4473 4474 return ret; 4475 } 4476 4477 int 4478 rte_eth_dev_rss_reta_query(uint16_t port_id, 4479 struct rte_eth_rss_reta_entry64 *reta_conf, 4480 uint16_t reta_size) 4481 { 4482 struct rte_eth_dev *dev; 4483 int ret; 4484 4485 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4486 dev = &rte_eth_devices[port_id]; 4487 4488 if (reta_conf == NULL) { 4489 RTE_ETHDEV_LOG(ERR, 4490 "Cannot query ethdev port %u RSS RETA from NULL config\n", 4491 port_id); 4492 return -EINVAL; 4493 } 4494 4495 /* Check mask bits */ 4496 ret = eth_check_reta_mask(reta_conf, reta_size); 4497 if (ret < 0) 4498 return ret; 4499 4500 if (*dev->dev_ops->reta_query == NULL) 4501 return -ENOTSUP; 4502 ret = eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf, 4503 reta_size)); 4504 4505 rte_ethdev_trace_rss_reta_query(port_id, reta_conf, reta_size, ret); 4506 4507 return ret; 4508 } 4509 4510 int 4511 rte_eth_dev_rss_hash_update(uint16_t port_id, 4512 struct rte_eth_rss_conf *rss_conf) 4513 { 4514 struct rte_eth_dev *dev; 4515 struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, }; 4516 enum rte_eth_rx_mq_mode mq_mode; 4517 int ret; 4518 4519 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4520 dev = &rte_eth_devices[port_id]; 4521 4522 if (rss_conf == NULL) { 4523 RTE_ETHDEV_LOG(ERR, 4524 "Cannot update ethdev port %u RSS hash from NULL config\n", 4525 port_id); 4526 return -EINVAL; 4527 } 4528 4529 ret = rte_eth_dev_info_get(port_id, &dev_info); 4530 if (ret != 0) 4531 return ret; 4532 4533 rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf); 4534 if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) != 4535 dev_info.flow_type_rss_offloads) { 4536 RTE_ETHDEV_LOG(ERR, 4537 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 4538 port_id, rss_conf->rss_hf, 4539 dev_info.flow_type_rss_offloads); 4540 return -EINVAL; 4541 } 4542 4543 mq_mode = dev->data->dev_conf.rxmode.mq_mode; 4544 if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) { 4545 RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n"); 4546 return -ENOTSUP; 4547 } 4548 4549 if (*dev->dev_ops->rss_hash_update == NULL) 4550 return -ENOTSUP; 4551 ret = eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev, 4552 rss_conf)); 4553 4554 rte_ethdev_trace_rss_hash_update(port_id, rss_conf, ret); 4555 4556 return ret; 4557 } 4558 4559 int 4560 rte_eth_dev_rss_hash_conf_get(uint16_t port_id, 4561 struct rte_eth_rss_conf *rss_conf) 4562 { 4563 struct rte_eth_dev *dev; 4564 int ret; 4565 4566 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4567 dev = &rte_eth_devices[port_id]; 4568 4569 if (rss_conf == NULL) { 4570 RTE_ETHDEV_LOG(ERR, 4571 "Cannot get ethdev port %u RSS hash config to NULL\n", 4572 port_id); 4573 return -EINVAL; 4574 } 4575 4576 if (*dev->dev_ops->rss_hash_conf_get == NULL) 4577 return -ENOTSUP; 4578 ret = eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev, 4579 rss_conf)); 4580 4581 rte_ethdev_trace_rss_hash_conf_get(port_id, rss_conf, ret); 4582 4583 return ret; 4584 } 4585 4586 int 4587 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, 4588 struct rte_eth_udp_tunnel *udp_tunnel) 4589 { 4590 struct rte_eth_dev *dev; 4591 int ret; 4592 4593 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4594 dev = &rte_eth_devices[port_id]; 4595 4596 if (udp_tunnel == NULL) { 4597 RTE_ETHDEV_LOG(ERR, 4598 "Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4599 port_id); 4600 return -EINVAL; 4601 } 4602 4603 if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) { 4604 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4605 return -EINVAL; 4606 } 4607 4608 if (*dev->dev_ops->udp_tunnel_port_add == NULL) 4609 return -ENOTSUP; 4610 ret = eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev, 4611 udp_tunnel)); 4612 4613 rte_ethdev_trace_udp_tunnel_port_add(port_id, udp_tunnel, ret); 4614 4615 return ret; 4616 } 4617 4618 int 4619 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, 4620 struct rte_eth_udp_tunnel *udp_tunnel) 4621 { 4622 struct rte_eth_dev *dev; 4623 int ret; 4624 4625 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4626 dev = &rte_eth_devices[port_id]; 4627 4628 if (udp_tunnel == NULL) { 4629 RTE_ETHDEV_LOG(ERR, 4630 "Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4631 port_id); 4632 return -EINVAL; 4633 } 4634 4635 if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) { 4636 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4637 return -EINVAL; 4638 } 4639 4640 if (*dev->dev_ops->udp_tunnel_port_del == NULL) 4641 return -ENOTSUP; 4642 ret = eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev, 4643 udp_tunnel)); 4644 4645 rte_ethdev_trace_udp_tunnel_port_delete(port_id, udp_tunnel, ret); 4646 4647 return ret; 4648 } 4649 4650 int 4651 rte_eth_led_on(uint16_t port_id) 4652 { 4653 struct rte_eth_dev *dev; 4654 int ret; 4655 4656 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4657 dev = &rte_eth_devices[port_id]; 4658 4659 if (*dev->dev_ops->dev_led_on == NULL) 4660 return -ENOTSUP; 4661 ret = eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev)); 4662 4663 rte_eth_trace_led_on(port_id, ret); 4664 4665 return ret; 4666 } 4667 4668 int 4669 rte_eth_led_off(uint16_t port_id) 4670 { 4671 struct rte_eth_dev *dev; 4672 int ret; 4673 4674 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4675 dev = &rte_eth_devices[port_id]; 4676 4677 if (*dev->dev_ops->dev_led_off == NULL) 4678 return -ENOTSUP; 4679 ret = eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev)); 4680 4681 rte_eth_trace_led_off(port_id, ret); 4682 4683 return ret; 4684 } 4685 4686 int 4687 rte_eth_fec_get_capability(uint16_t port_id, 4688 struct rte_eth_fec_capa *speed_fec_capa, 4689 unsigned int num) 4690 { 4691 struct rte_eth_dev *dev; 4692 int ret; 4693 4694 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4695 dev = &rte_eth_devices[port_id]; 4696 4697 if (speed_fec_capa == NULL && num > 0) { 4698 RTE_ETHDEV_LOG(ERR, 4699 "Cannot get ethdev port %u FEC capability to NULL when array size is non zero\n", 4700 port_id); 4701 return -EINVAL; 4702 } 4703 4704 if (*dev->dev_ops->fec_get_capability == NULL) 4705 return -ENOTSUP; 4706 ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num); 4707 4708 rte_eth_trace_fec_get_capability(port_id, speed_fec_capa, num, ret); 4709 4710 return ret; 4711 } 4712 4713 int 4714 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa) 4715 { 4716 struct rte_eth_dev *dev; 4717 int ret; 4718 4719 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4720 dev = &rte_eth_devices[port_id]; 4721 4722 if (fec_capa == NULL) { 4723 RTE_ETHDEV_LOG(ERR, 4724 "Cannot get ethdev port %u current FEC mode to NULL\n", 4725 port_id); 4726 return -EINVAL; 4727 } 4728 4729 if (*dev->dev_ops->fec_get == NULL) 4730 return -ENOTSUP; 4731 ret = eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa)); 4732 4733 rte_eth_trace_fec_get(port_id, fec_capa, ret); 4734 4735 return ret; 4736 } 4737 4738 int 4739 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa) 4740 { 4741 struct rte_eth_dev *dev; 4742 int ret; 4743 4744 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4745 dev = &rte_eth_devices[port_id]; 4746 4747 if (*dev->dev_ops->fec_set == NULL) 4748 return -ENOTSUP; 4749 ret = eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa)); 4750 4751 rte_eth_trace_fec_set(port_id, fec_capa, ret); 4752 4753 return ret; 4754 } 4755 4756 /* 4757 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 4758 * an empty spot. 4759 */ 4760 static int 4761 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr) 4762 { 4763 struct rte_eth_dev_info dev_info; 4764 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4765 unsigned i; 4766 int ret; 4767 4768 ret = rte_eth_dev_info_get(port_id, &dev_info); 4769 if (ret != 0) 4770 return -1; 4771 4772 for (i = 0; i < dev_info.max_mac_addrs; i++) 4773 if (memcmp(addr, &dev->data->mac_addrs[i], 4774 RTE_ETHER_ADDR_LEN) == 0) 4775 return i; 4776 4777 return -1; 4778 } 4779 4780 static const struct rte_ether_addr null_mac_addr; 4781 4782 int 4783 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr, 4784 uint32_t pool) 4785 { 4786 struct rte_eth_dev *dev; 4787 int index; 4788 uint64_t pool_mask; 4789 int ret; 4790 4791 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4792 dev = &rte_eth_devices[port_id]; 4793 4794 if (addr == NULL) { 4795 RTE_ETHDEV_LOG(ERR, 4796 "Cannot add ethdev port %u MAC address from NULL address\n", 4797 port_id); 4798 return -EINVAL; 4799 } 4800 4801 if (*dev->dev_ops->mac_addr_add == NULL) 4802 return -ENOTSUP; 4803 4804 if (rte_is_zero_ether_addr(addr)) { 4805 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 4806 port_id); 4807 return -EINVAL; 4808 } 4809 if (pool >= RTE_ETH_64_POOLS) { 4810 RTE_ETHDEV_LOG(ERR, "Pool ID must be 0-%d\n", RTE_ETH_64_POOLS - 1); 4811 return -EINVAL; 4812 } 4813 4814 index = eth_dev_get_mac_addr_index(port_id, addr); 4815 if (index < 0) { 4816 index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr); 4817 if (index < 0) { 4818 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 4819 port_id); 4820 return -ENOSPC; 4821 } 4822 } else { 4823 pool_mask = dev->data->mac_pool_sel[index]; 4824 4825 /* Check if both MAC address and pool is already there, and do nothing */ 4826 if (pool_mask & RTE_BIT64(pool)) 4827 return 0; 4828 } 4829 4830 /* Update NIC */ 4831 ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool); 4832 4833 if (ret == 0) { 4834 /* Update address in NIC data structure */ 4835 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]); 4836 4837 /* Update pool bitmap in NIC data structure */ 4838 dev->data->mac_pool_sel[index] |= RTE_BIT64(pool); 4839 } 4840 4841 ret = eth_err(port_id, ret); 4842 4843 rte_ethdev_trace_mac_addr_add(port_id, addr, pool, ret); 4844 4845 return ret; 4846 } 4847 4848 int 4849 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr) 4850 { 4851 struct rte_eth_dev *dev; 4852 int index; 4853 4854 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4855 dev = &rte_eth_devices[port_id]; 4856 4857 if (addr == NULL) { 4858 RTE_ETHDEV_LOG(ERR, 4859 "Cannot remove ethdev port %u MAC address from NULL address\n", 4860 port_id); 4861 return -EINVAL; 4862 } 4863 4864 if (*dev->dev_ops->mac_addr_remove == NULL) 4865 return -ENOTSUP; 4866 4867 index = eth_dev_get_mac_addr_index(port_id, addr); 4868 if (index == 0) { 4869 RTE_ETHDEV_LOG(ERR, 4870 "Port %u: Cannot remove default MAC address\n", 4871 port_id); 4872 return -EADDRINUSE; 4873 } else if (index < 0) 4874 return 0; /* Do nothing if address wasn't found */ 4875 4876 /* Update NIC */ 4877 (*dev->dev_ops->mac_addr_remove)(dev, index); 4878 4879 /* Update address in NIC data structure */ 4880 rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]); 4881 4882 /* reset pool bitmap */ 4883 dev->data->mac_pool_sel[index] = 0; 4884 4885 rte_ethdev_trace_mac_addr_remove(port_id, addr); 4886 4887 return 0; 4888 } 4889 4890 int 4891 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr) 4892 { 4893 struct rte_eth_dev *dev; 4894 int ret; 4895 4896 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4897 dev = &rte_eth_devices[port_id]; 4898 4899 if (addr == NULL) { 4900 RTE_ETHDEV_LOG(ERR, 4901 "Cannot set ethdev port %u default MAC address from NULL address\n", 4902 port_id); 4903 return -EINVAL; 4904 } 4905 4906 if (!rte_is_valid_assigned_ether_addr(addr)) 4907 return -EINVAL; 4908 4909 if (*dev->dev_ops->mac_addr_set == NULL) 4910 return -ENOTSUP; 4911 4912 ret = (*dev->dev_ops->mac_addr_set)(dev, addr); 4913 if (ret < 0) 4914 return ret; 4915 4916 /* Update default address in NIC data structure */ 4917 rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]); 4918 4919 rte_ethdev_trace_default_mac_addr_set(port_id, addr); 4920 4921 return 0; 4922 } 4923 4924 4925 /* 4926 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 4927 * an empty spot. 4928 */ 4929 static int 4930 eth_dev_get_hash_mac_addr_index(uint16_t port_id, 4931 const struct rte_ether_addr *addr) 4932 { 4933 struct rte_eth_dev_info dev_info; 4934 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4935 unsigned i; 4936 int ret; 4937 4938 ret = rte_eth_dev_info_get(port_id, &dev_info); 4939 if (ret != 0) 4940 return -1; 4941 4942 if (!dev->data->hash_mac_addrs) 4943 return -1; 4944 4945 for (i = 0; i < dev_info.max_hash_mac_addrs; i++) 4946 if (memcmp(addr, &dev->data->hash_mac_addrs[i], 4947 RTE_ETHER_ADDR_LEN) == 0) 4948 return i; 4949 4950 return -1; 4951 } 4952 4953 int 4954 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, 4955 uint8_t on) 4956 { 4957 int index; 4958 int ret; 4959 struct rte_eth_dev *dev; 4960 4961 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4962 dev = &rte_eth_devices[port_id]; 4963 4964 if (addr == NULL) { 4965 RTE_ETHDEV_LOG(ERR, 4966 "Cannot set ethdev port %u unicast hash table from NULL address\n", 4967 port_id); 4968 return -EINVAL; 4969 } 4970 4971 if (rte_is_zero_ether_addr(addr)) { 4972 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 4973 port_id); 4974 return -EINVAL; 4975 } 4976 4977 index = eth_dev_get_hash_mac_addr_index(port_id, addr); 4978 /* Check if it's already there, and do nothing */ 4979 if ((index >= 0) && on) 4980 return 0; 4981 4982 if (index < 0) { 4983 if (!on) { 4984 RTE_ETHDEV_LOG(ERR, 4985 "Port %u: the MAC address was not set in UTA\n", 4986 port_id); 4987 return -EINVAL; 4988 } 4989 4990 index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr); 4991 if (index < 0) { 4992 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 4993 port_id); 4994 return -ENOSPC; 4995 } 4996 } 4997 4998 if (*dev->dev_ops->uc_hash_table_set == NULL) 4999 return -ENOTSUP; 5000 ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on); 5001 if (ret == 0) { 5002 /* Update address in NIC data structure */ 5003 if (on) 5004 rte_ether_addr_copy(addr, 5005 &dev->data->hash_mac_addrs[index]); 5006 else 5007 rte_ether_addr_copy(&null_mac_addr, 5008 &dev->data->hash_mac_addrs[index]); 5009 } 5010 5011 ret = eth_err(port_id, ret); 5012 5013 rte_ethdev_trace_uc_hash_table_set(port_id, on, ret); 5014 5015 return ret; 5016 } 5017 5018 int 5019 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on) 5020 { 5021 struct rte_eth_dev *dev; 5022 int ret; 5023 5024 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5025 dev = &rte_eth_devices[port_id]; 5026 5027 if (*dev->dev_ops->uc_all_hash_table_set == NULL) 5028 return -ENOTSUP; 5029 ret = eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev, on)); 5030 5031 rte_ethdev_trace_uc_all_hash_table_set(port_id, on, ret); 5032 5033 return ret; 5034 } 5035 5036 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, 5037 uint32_t tx_rate) 5038 { 5039 struct rte_eth_dev *dev; 5040 struct rte_eth_dev_info dev_info; 5041 struct rte_eth_link link; 5042 int ret; 5043 5044 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5045 dev = &rte_eth_devices[port_id]; 5046 5047 ret = rte_eth_dev_info_get(port_id, &dev_info); 5048 if (ret != 0) 5049 return ret; 5050 5051 link = dev->data->dev_link; 5052 5053 if (queue_idx > dev_info.max_tx_queues) { 5054 RTE_ETHDEV_LOG(ERR, 5055 "Set queue rate limit:port %u: invalid queue ID=%u\n", 5056 port_id, queue_idx); 5057 return -EINVAL; 5058 } 5059 5060 if (tx_rate > link.link_speed) { 5061 RTE_ETHDEV_LOG(ERR, 5062 "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n", 5063 tx_rate, link.link_speed); 5064 return -EINVAL; 5065 } 5066 5067 if (*dev->dev_ops->set_queue_rate_limit == NULL) 5068 return -ENOTSUP; 5069 ret = eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev, 5070 queue_idx, tx_rate)); 5071 5072 rte_eth_trace_set_queue_rate_limit(port_id, queue_idx, tx_rate, ret); 5073 5074 return ret; 5075 } 5076 5077 int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id, 5078 uint8_t avail_thresh) 5079 { 5080 struct rte_eth_dev *dev; 5081 int ret; 5082 5083 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5084 dev = &rte_eth_devices[port_id]; 5085 5086 if (queue_id > dev->data->nb_rx_queues) { 5087 RTE_ETHDEV_LOG(ERR, 5088 "Set queue avail thresh: port %u: invalid queue ID=%u.\n", 5089 port_id, queue_id); 5090 return -EINVAL; 5091 } 5092 5093 if (avail_thresh > 99) { 5094 RTE_ETHDEV_LOG(ERR, 5095 "Set queue avail thresh: port %u: threshold should be <= 99.\n", 5096 port_id); 5097 return -EINVAL; 5098 } 5099 if (*dev->dev_ops->rx_queue_avail_thresh_set == NULL) 5100 return -ENOTSUP; 5101 ret = eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_set)(dev, 5102 queue_id, avail_thresh)); 5103 5104 rte_eth_trace_rx_avail_thresh_set(port_id, queue_id, avail_thresh, ret); 5105 5106 return ret; 5107 } 5108 5109 int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id, 5110 uint8_t *avail_thresh) 5111 { 5112 struct rte_eth_dev *dev; 5113 int ret; 5114 5115 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5116 dev = &rte_eth_devices[port_id]; 5117 5118 if (queue_id == NULL) 5119 return -EINVAL; 5120 if (*queue_id >= dev->data->nb_rx_queues) 5121 *queue_id = 0; 5122 5123 if (*dev->dev_ops->rx_queue_avail_thresh_query == NULL) 5124 return -ENOTSUP; 5125 ret = eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_query)(dev, 5126 queue_id, avail_thresh)); 5127 5128 rte_eth_trace_rx_avail_thresh_query(port_id, *queue_id, ret); 5129 5130 return ret; 5131 } 5132 5133 RTE_INIT(eth_dev_init_fp_ops) 5134 { 5135 uint32_t i; 5136 5137 for (i = 0; i != RTE_DIM(rte_eth_fp_ops); i++) 5138 eth_dev_fp_ops_reset(rte_eth_fp_ops + i); 5139 } 5140 5141 RTE_INIT(eth_dev_init_cb_lists) 5142 { 5143 uint16_t i; 5144 5145 for (i = 0; i < RTE_MAX_ETHPORTS; i++) 5146 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs); 5147 } 5148 5149 int 5150 rte_eth_dev_callback_register(uint16_t port_id, 5151 enum rte_eth_event_type event, 5152 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 5153 { 5154 struct rte_eth_dev *dev; 5155 struct rte_eth_dev_callback *user_cb; 5156 uint16_t next_port; 5157 uint16_t last_port; 5158 5159 if (cb_fn == NULL) { 5160 RTE_ETHDEV_LOG(ERR, 5161 "Cannot register ethdev port %u callback from NULL\n", 5162 port_id); 5163 return -EINVAL; 5164 } 5165 5166 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 5167 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 5168 return -EINVAL; 5169 } 5170 5171 if (port_id == RTE_ETH_ALL) { 5172 next_port = 0; 5173 last_port = RTE_MAX_ETHPORTS - 1; 5174 } else { 5175 next_port = last_port = port_id; 5176 } 5177 5178 rte_spinlock_lock(ð_dev_cb_lock); 5179 5180 do { 5181 dev = &rte_eth_devices[next_port]; 5182 5183 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) { 5184 if (user_cb->cb_fn == cb_fn && 5185 user_cb->cb_arg == cb_arg && 5186 user_cb->event == event) { 5187 break; 5188 } 5189 } 5190 5191 /* create a new callback. */ 5192 if (user_cb == NULL) { 5193 user_cb = rte_zmalloc("INTR_USER_CALLBACK", 5194 sizeof(struct rte_eth_dev_callback), 0); 5195 if (user_cb != NULL) { 5196 user_cb->cb_fn = cb_fn; 5197 user_cb->cb_arg = cb_arg; 5198 user_cb->event = event; 5199 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), 5200 user_cb, next); 5201 } else { 5202 rte_spinlock_unlock(ð_dev_cb_lock); 5203 rte_eth_dev_callback_unregister(port_id, event, 5204 cb_fn, cb_arg); 5205 return -ENOMEM; 5206 } 5207 5208 } 5209 } while (++next_port <= last_port); 5210 5211 rte_spinlock_unlock(ð_dev_cb_lock); 5212 5213 rte_ethdev_trace_callback_register(port_id, event, cb_fn, cb_arg); 5214 5215 return 0; 5216 } 5217 5218 int 5219 rte_eth_dev_callback_unregister(uint16_t port_id, 5220 enum rte_eth_event_type event, 5221 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 5222 { 5223 int ret; 5224 struct rte_eth_dev *dev; 5225 struct rte_eth_dev_callback *cb, *next; 5226 uint16_t next_port; 5227 uint16_t last_port; 5228 5229 if (cb_fn == NULL) { 5230 RTE_ETHDEV_LOG(ERR, 5231 "Cannot unregister ethdev port %u callback from NULL\n", 5232 port_id); 5233 return -EINVAL; 5234 } 5235 5236 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 5237 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 5238 return -EINVAL; 5239 } 5240 5241 if (port_id == RTE_ETH_ALL) { 5242 next_port = 0; 5243 last_port = RTE_MAX_ETHPORTS - 1; 5244 } else { 5245 next_port = last_port = port_id; 5246 } 5247 5248 rte_spinlock_lock(ð_dev_cb_lock); 5249 5250 do { 5251 dev = &rte_eth_devices[next_port]; 5252 ret = 0; 5253 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; 5254 cb = next) { 5255 5256 next = TAILQ_NEXT(cb, next); 5257 5258 if (cb->cb_fn != cb_fn || cb->event != event || 5259 (cb_arg != (void *)-1 && cb->cb_arg != cb_arg)) 5260 continue; 5261 5262 /* 5263 * if this callback is not executing right now, 5264 * then remove it. 5265 */ 5266 if (cb->active == 0) { 5267 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next); 5268 rte_free(cb); 5269 } else { 5270 ret = -EAGAIN; 5271 } 5272 } 5273 } while (++next_port <= last_port); 5274 5275 rte_spinlock_unlock(ð_dev_cb_lock); 5276 5277 rte_ethdev_trace_callback_unregister(port_id, event, cb_fn, cb_arg, 5278 ret); 5279 5280 return ret; 5281 } 5282 5283 int 5284 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data) 5285 { 5286 uint32_t vec; 5287 struct rte_eth_dev *dev; 5288 struct rte_intr_handle *intr_handle; 5289 uint16_t qid; 5290 int rc; 5291 5292 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5293 dev = &rte_eth_devices[port_id]; 5294 5295 if (!dev->intr_handle) { 5296 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 5297 return -ENOTSUP; 5298 } 5299 5300 intr_handle = dev->intr_handle; 5301 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 5302 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 5303 return -EPERM; 5304 } 5305 5306 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) { 5307 vec = rte_intr_vec_list_index_get(intr_handle, qid); 5308 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 5309 5310 rte_ethdev_trace_rx_intr_ctl(port_id, qid, epfd, op, data, rc); 5311 5312 if (rc && rc != -EEXIST) { 5313 RTE_ETHDEV_LOG(ERR, 5314 "p %u q %u Rx ctl error op %d epfd %d vec %u\n", 5315 port_id, qid, op, epfd, vec); 5316 } 5317 } 5318 5319 return 0; 5320 } 5321 5322 int 5323 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id) 5324 { 5325 struct rte_intr_handle *intr_handle; 5326 struct rte_eth_dev *dev; 5327 unsigned int efd_idx; 5328 uint32_t vec; 5329 int fd; 5330 5331 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 5332 dev = &rte_eth_devices[port_id]; 5333 5334 if (queue_id >= dev->data->nb_rx_queues) { 5335 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5336 return -1; 5337 } 5338 5339 if (!dev->intr_handle) { 5340 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 5341 return -1; 5342 } 5343 5344 intr_handle = dev->intr_handle; 5345 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 5346 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 5347 return -1; 5348 } 5349 5350 vec = rte_intr_vec_list_index_get(intr_handle, queue_id); 5351 efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ? 5352 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec; 5353 fd = rte_intr_efds_index_get(intr_handle, efd_idx); 5354 5355 rte_ethdev_trace_rx_intr_ctl_q_get_fd(port_id, queue_id, fd); 5356 5357 return fd; 5358 } 5359 5360 int 5361 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, 5362 int epfd, int op, void *data) 5363 { 5364 uint32_t vec; 5365 struct rte_eth_dev *dev; 5366 struct rte_intr_handle *intr_handle; 5367 int rc; 5368 5369 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5370 dev = &rte_eth_devices[port_id]; 5371 5372 if (queue_id >= dev->data->nb_rx_queues) { 5373 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5374 return -EINVAL; 5375 } 5376 5377 if (!dev->intr_handle) { 5378 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 5379 return -ENOTSUP; 5380 } 5381 5382 intr_handle = dev->intr_handle; 5383 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 5384 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 5385 return -EPERM; 5386 } 5387 5388 vec = rte_intr_vec_list_index_get(intr_handle, queue_id); 5389 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 5390 5391 rte_ethdev_trace_rx_intr_ctl_q(port_id, queue_id, epfd, op, data, rc); 5392 5393 if (rc && rc != -EEXIST) { 5394 RTE_ETHDEV_LOG(ERR, 5395 "p %u q %u Rx ctl error op %d epfd %d vec %u\n", 5396 port_id, queue_id, op, epfd, vec); 5397 return rc; 5398 } 5399 5400 return 0; 5401 } 5402 5403 int 5404 rte_eth_dev_rx_intr_enable(uint16_t port_id, 5405 uint16_t queue_id) 5406 { 5407 struct rte_eth_dev *dev; 5408 int ret; 5409 5410 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5411 dev = &rte_eth_devices[port_id]; 5412 5413 ret = eth_dev_validate_rx_queue(dev, queue_id); 5414 if (ret != 0) 5415 return ret; 5416 5417 if (*dev->dev_ops->rx_queue_intr_enable == NULL) 5418 return -ENOTSUP; 5419 ret = eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id)); 5420 5421 rte_ethdev_trace_rx_intr_enable(port_id, queue_id, ret); 5422 5423 return ret; 5424 } 5425 5426 int 5427 rte_eth_dev_rx_intr_disable(uint16_t port_id, 5428 uint16_t queue_id) 5429 { 5430 struct rte_eth_dev *dev; 5431 int ret; 5432 5433 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5434 dev = &rte_eth_devices[port_id]; 5435 5436 ret = eth_dev_validate_rx_queue(dev, queue_id); 5437 if (ret != 0) 5438 return ret; 5439 5440 if (*dev->dev_ops->rx_queue_intr_disable == NULL) 5441 return -ENOTSUP; 5442 ret = eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id)); 5443 5444 rte_ethdev_trace_rx_intr_disable(port_id, queue_id, ret); 5445 5446 return ret; 5447 } 5448 5449 5450 const struct rte_eth_rxtx_callback * 5451 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, 5452 rte_rx_callback_fn fn, void *user_param) 5453 { 5454 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5455 rte_errno = ENOTSUP; 5456 return NULL; 5457 #endif 5458 struct rte_eth_dev *dev; 5459 5460 /* check input parameters */ 5461 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5462 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 5463 rte_errno = EINVAL; 5464 return NULL; 5465 } 5466 dev = &rte_eth_devices[port_id]; 5467 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 5468 rte_errno = EINVAL; 5469 return NULL; 5470 } 5471 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5472 5473 if (cb == NULL) { 5474 rte_errno = ENOMEM; 5475 return NULL; 5476 } 5477 5478 cb->fn.rx = fn; 5479 cb->param = user_param; 5480 5481 rte_spinlock_lock(ð_dev_rx_cb_lock); 5482 /* Add the callbacks in fifo order. */ 5483 struct rte_eth_rxtx_callback *tail = 5484 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 5485 5486 if (!tail) { 5487 /* Stores to cb->fn and cb->param should complete before 5488 * cb is visible to data plane. 5489 */ 5490 __atomic_store_n( 5491 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 5492 cb, __ATOMIC_RELEASE); 5493 5494 } else { 5495 while (tail->next) 5496 tail = tail->next; 5497 /* Stores to cb->fn and cb->param should complete before 5498 * cb is visible to data plane. 5499 */ 5500 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 5501 } 5502 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5503 5504 rte_eth_trace_add_rx_callback(port_id, queue_id, fn, user_param, cb); 5505 5506 return cb; 5507 } 5508 5509 const struct rte_eth_rxtx_callback * 5510 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, 5511 rte_rx_callback_fn fn, void *user_param) 5512 { 5513 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5514 rte_errno = ENOTSUP; 5515 return NULL; 5516 #endif 5517 /* check input parameters */ 5518 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5519 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 5520 rte_errno = EINVAL; 5521 return NULL; 5522 } 5523 5524 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5525 5526 if (cb == NULL) { 5527 rte_errno = ENOMEM; 5528 return NULL; 5529 } 5530 5531 cb->fn.rx = fn; 5532 cb->param = user_param; 5533 5534 rte_spinlock_lock(ð_dev_rx_cb_lock); 5535 /* Add the callbacks at first position */ 5536 cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 5537 /* Stores to cb->fn, cb->param and cb->next should complete before 5538 * cb is visible to data plane threads. 5539 */ 5540 __atomic_store_n( 5541 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 5542 cb, __ATOMIC_RELEASE); 5543 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5544 5545 rte_eth_trace_add_first_rx_callback(port_id, queue_id, fn, user_param, 5546 cb); 5547 5548 return cb; 5549 } 5550 5551 const struct rte_eth_rxtx_callback * 5552 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, 5553 rte_tx_callback_fn fn, void *user_param) 5554 { 5555 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5556 rte_errno = ENOTSUP; 5557 return NULL; 5558 #endif 5559 struct rte_eth_dev *dev; 5560 5561 /* check input parameters */ 5562 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5563 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) { 5564 rte_errno = EINVAL; 5565 return NULL; 5566 } 5567 5568 dev = &rte_eth_devices[port_id]; 5569 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 5570 rte_errno = EINVAL; 5571 return NULL; 5572 } 5573 5574 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5575 5576 if (cb == NULL) { 5577 rte_errno = ENOMEM; 5578 return NULL; 5579 } 5580 5581 cb->fn.tx = fn; 5582 cb->param = user_param; 5583 5584 rte_spinlock_lock(ð_dev_tx_cb_lock); 5585 /* Add the callbacks in fifo order. */ 5586 struct rte_eth_rxtx_callback *tail = 5587 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id]; 5588 5589 if (!tail) { 5590 /* Stores to cb->fn and cb->param should complete before 5591 * cb is visible to data plane. 5592 */ 5593 __atomic_store_n( 5594 &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id], 5595 cb, __ATOMIC_RELEASE); 5596 5597 } else { 5598 while (tail->next) 5599 tail = tail->next; 5600 /* Stores to cb->fn and cb->param should complete before 5601 * cb is visible to data plane. 5602 */ 5603 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 5604 } 5605 rte_spinlock_unlock(ð_dev_tx_cb_lock); 5606 5607 rte_eth_trace_add_tx_callback(port_id, queue_id, fn, user_param, cb); 5608 5609 return cb; 5610 } 5611 5612 int 5613 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, 5614 const struct rte_eth_rxtx_callback *user_cb) 5615 { 5616 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5617 return -ENOTSUP; 5618 #endif 5619 /* Check input parameters. */ 5620 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5621 if (user_cb == NULL || 5622 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) 5623 return -EINVAL; 5624 5625 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5626 struct rte_eth_rxtx_callback *cb; 5627 struct rte_eth_rxtx_callback **prev_cb; 5628 int ret = -EINVAL; 5629 5630 rte_spinlock_lock(ð_dev_rx_cb_lock); 5631 prev_cb = &dev->post_rx_burst_cbs[queue_id]; 5632 for (; *prev_cb != NULL; prev_cb = &cb->next) { 5633 cb = *prev_cb; 5634 if (cb == user_cb) { 5635 /* Remove the user cb from the callback list. */ 5636 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 5637 ret = 0; 5638 break; 5639 } 5640 } 5641 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5642 5643 rte_eth_trace_remove_rx_callback(port_id, queue_id, user_cb, ret); 5644 5645 return ret; 5646 } 5647 5648 int 5649 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, 5650 const struct rte_eth_rxtx_callback *user_cb) 5651 { 5652 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5653 return -ENOTSUP; 5654 #endif 5655 /* Check input parameters. */ 5656 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5657 if (user_cb == NULL || 5658 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) 5659 return -EINVAL; 5660 5661 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5662 int ret = -EINVAL; 5663 struct rte_eth_rxtx_callback *cb; 5664 struct rte_eth_rxtx_callback **prev_cb; 5665 5666 rte_spinlock_lock(ð_dev_tx_cb_lock); 5667 prev_cb = &dev->pre_tx_burst_cbs[queue_id]; 5668 for (; *prev_cb != NULL; prev_cb = &cb->next) { 5669 cb = *prev_cb; 5670 if (cb == user_cb) { 5671 /* Remove the user cb from the callback list. */ 5672 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 5673 ret = 0; 5674 break; 5675 } 5676 } 5677 rte_spinlock_unlock(ð_dev_tx_cb_lock); 5678 5679 rte_eth_trace_remove_tx_callback(port_id, queue_id, user_cb, ret); 5680 5681 return ret; 5682 } 5683 5684 int 5685 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5686 struct rte_eth_rxq_info *qinfo) 5687 { 5688 struct rte_eth_dev *dev; 5689 5690 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5691 dev = &rte_eth_devices[port_id]; 5692 5693 if (queue_id >= dev->data->nb_rx_queues) { 5694 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5695 return -EINVAL; 5696 } 5697 5698 if (qinfo == NULL) { 5699 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL\n", 5700 port_id, queue_id); 5701 return -EINVAL; 5702 } 5703 5704 if (dev->data->rx_queues == NULL || 5705 dev->data->rx_queues[queue_id] == NULL) { 5706 RTE_ETHDEV_LOG(ERR, 5707 "Rx queue %"PRIu16" of device with port_id=%" 5708 PRIu16" has not been setup\n", 5709 queue_id, port_id); 5710 return -EINVAL; 5711 } 5712 5713 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 5714 RTE_ETHDEV_LOG(INFO, 5715 "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5716 queue_id, port_id); 5717 return -EINVAL; 5718 } 5719 5720 if (*dev->dev_ops->rxq_info_get == NULL) 5721 return -ENOTSUP; 5722 5723 memset(qinfo, 0, sizeof(*qinfo)); 5724 dev->dev_ops->rxq_info_get(dev, queue_id, qinfo); 5725 qinfo->queue_state = dev->data->rx_queue_state[queue_id]; 5726 5727 rte_eth_trace_rx_queue_info_get(port_id, queue_id, qinfo); 5728 5729 return 0; 5730 } 5731 5732 int 5733 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5734 struct rte_eth_txq_info *qinfo) 5735 { 5736 struct rte_eth_dev *dev; 5737 5738 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5739 dev = &rte_eth_devices[port_id]; 5740 5741 if (queue_id >= dev->data->nb_tx_queues) { 5742 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 5743 return -EINVAL; 5744 } 5745 5746 if (qinfo == NULL) { 5747 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL\n", 5748 port_id, queue_id); 5749 return -EINVAL; 5750 } 5751 5752 if (dev->data->tx_queues == NULL || 5753 dev->data->tx_queues[queue_id] == NULL) { 5754 RTE_ETHDEV_LOG(ERR, 5755 "Tx queue %"PRIu16" of device with port_id=%" 5756 PRIu16" has not been setup\n", 5757 queue_id, port_id); 5758 return -EINVAL; 5759 } 5760 5761 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 5762 RTE_ETHDEV_LOG(INFO, 5763 "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5764 queue_id, port_id); 5765 return -EINVAL; 5766 } 5767 5768 if (*dev->dev_ops->txq_info_get == NULL) 5769 return -ENOTSUP; 5770 5771 memset(qinfo, 0, sizeof(*qinfo)); 5772 dev->dev_ops->txq_info_get(dev, queue_id, qinfo); 5773 qinfo->queue_state = dev->data->tx_queue_state[queue_id]; 5774 5775 rte_eth_trace_tx_queue_info_get(port_id, queue_id, qinfo); 5776 5777 return 0; 5778 } 5779 5780 int 5781 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5782 struct rte_eth_burst_mode *mode) 5783 { 5784 struct rte_eth_dev *dev; 5785 int ret; 5786 5787 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5788 dev = &rte_eth_devices[port_id]; 5789 5790 if (queue_id >= dev->data->nb_rx_queues) { 5791 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5792 return -EINVAL; 5793 } 5794 5795 if (mode == NULL) { 5796 RTE_ETHDEV_LOG(ERR, 5797 "Cannot get ethdev port %u Rx queue %u burst mode to NULL\n", 5798 port_id, queue_id); 5799 return -EINVAL; 5800 } 5801 5802 if (*dev->dev_ops->rx_burst_mode_get == NULL) 5803 return -ENOTSUP; 5804 memset(mode, 0, sizeof(*mode)); 5805 ret = eth_err(port_id, 5806 dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode)); 5807 5808 rte_eth_trace_rx_burst_mode_get(port_id, queue_id, mode, ret); 5809 5810 return ret; 5811 } 5812 5813 int 5814 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5815 struct rte_eth_burst_mode *mode) 5816 { 5817 struct rte_eth_dev *dev; 5818 int ret; 5819 5820 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5821 dev = &rte_eth_devices[port_id]; 5822 5823 if (queue_id >= dev->data->nb_tx_queues) { 5824 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 5825 return -EINVAL; 5826 } 5827 5828 if (mode == NULL) { 5829 RTE_ETHDEV_LOG(ERR, 5830 "Cannot get ethdev port %u Tx queue %u burst mode to NULL\n", 5831 port_id, queue_id); 5832 return -EINVAL; 5833 } 5834 5835 if (*dev->dev_ops->tx_burst_mode_get == NULL) 5836 return -ENOTSUP; 5837 memset(mode, 0, sizeof(*mode)); 5838 ret = eth_err(port_id, 5839 dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode)); 5840 5841 rte_eth_trace_tx_burst_mode_get(port_id, queue_id, mode, ret); 5842 5843 return ret; 5844 } 5845 5846 int 5847 rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id, 5848 struct rte_power_monitor_cond *pmc) 5849 { 5850 struct rte_eth_dev *dev; 5851 int ret; 5852 5853 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5854 dev = &rte_eth_devices[port_id]; 5855 5856 if (queue_id >= dev->data->nb_rx_queues) { 5857 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5858 return -EINVAL; 5859 } 5860 5861 if (pmc == NULL) { 5862 RTE_ETHDEV_LOG(ERR, 5863 "Cannot get ethdev port %u Rx queue %u power monitor condition to NULL\n", 5864 port_id, queue_id); 5865 return -EINVAL; 5866 } 5867 5868 if (*dev->dev_ops->get_monitor_addr == NULL) 5869 return -ENOTSUP; 5870 ret = eth_err(port_id, 5871 dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc)); 5872 5873 rte_eth_trace_get_monitor_addr(port_id, queue_id, pmc, ret); 5874 5875 return ret; 5876 } 5877 5878 int 5879 rte_eth_dev_set_mc_addr_list(uint16_t port_id, 5880 struct rte_ether_addr *mc_addr_set, 5881 uint32_t nb_mc_addr) 5882 { 5883 struct rte_eth_dev *dev; 5884 int ret; 5885 5886 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5887 dev = &rte_eth_devices[port_id]; 5888 5889 if (*dev->dev_ops->set_mc_addr_list == NULL) 5890 return -ENOTSUP; 5891 ret = eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev, 5892 mc_addr_set, nb_mc_addr)); 5893 5894 rte_ethdev_trace_set_mc_addr_list(port_id, mc_addr_set, nb_mc_addr, 5895 ret); 5896 5897 return ret; 5898 } 5899 5900 int 5901 rte_eth_timesync_enable(uint16_t port_id) 5902 { 5903 struct rte_eth_dev *dev; 5904 int ret; 5905 5906 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5907 dev = &rte_eth_devices[port_id]; 5908 5909 if (*dev->dev_ops->timesync_enable == NULL) 5910 return -ENOTSUP; 5911 ret = eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev)); 5912 5913 rte_eth_trace_timesync_enable(port_id, ret); 5914 5915 return ret; 5916 } 5917 5918 int 5919 rte_eth_timesync_disable(uint16_t port_id) 5920 { 5921 struct rte_eth_dev *dev; 5922 int ret; 5923 5924 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5925 dev = &rte_eth_devices[port_id]; 5926 5927 if (*dev->dev_ops->timesync_disable == NULL) 5928 return -ENOTSUP; 5929 ret = eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev)); 5930 5931 rte_eth_trace_timesync_disable(port_id, ret); 5932 5933 return ret; 5934 } 5935 5936 int 5937 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, 5938 uint32_t flags) 5939 { 5940 struct rte_eth_dev *dev; 5941 int ret; 5942 5943 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5944 dev = &rte_eth_devices[port_id]; 5945 5946 if (timestamp == NULL) { 5947 RTE_ETHDEV_LOG(ERR, 5948 "Cannot read ethdev port %u Rx timestamp to NULL\n", 5949 port_id); 5950 return -EINVAL; 5951 } 5952 5953 if (*dev->dev_ops->timesync_read_rx_timestamp == NULL) 5954 return -ENOTSUP; 5955 5956 ret = eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp) 5957 (dev, timestamp, flags)); 5958 5959 rte_eth_trace_timesync_read_rx_timestamp(port_id, timestamp, flags, 5960 ret); 5961 5962 return ret; 5963 } 5964 5965 int 5966 rte_eth_timesync_read_tx_timestamp(uint16_t port_id, 5967 struct timespec *timestamp) 5968 { 5969 struct rte_eth_dev *dev; 5970 int ret; 5971 5972 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5973 dev = &rte_eth_devices[port_id]; 5974 5975 if (timestamp == NULL) { 5976 RTE_ETHDEV_LOG(ERR, 5977 "Cannot read ethdev port %u Tx timestamp to NULL\n", 5978 port_id); 5979 return -EINVAL; 5980 } 5981 5982 if (*dev->dev_ops->timesync_read_tx_timestamp == NULL) 5983 return -ENOTSUP; 5984 5985 ret = eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp) 5986 (dev, timestamp)); 5987 5988 rte_eth_trace_timesync_read_tx_timestamp(port_id, timestamp, ret); 5989 5990 return ret; 5991 5992 } 5993 5994 int 5995 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta) 5996 { 5997 struct rte_eth_dev *dev; 5998 int ret; 5999 6000 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6001 dev = &rte_eth_devices[port_id]; 6002 6003 if (*dev->dev_ops->timesync_adjust_time == NULL) 6004 return -ENOTSUP; 6005 ret = eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta)); 6006 6007 rte_eth_trace_timesync_adjust_time(port_id, delta, ret); 6008 6009 return ret; 6010 } 6011 6012 int 6013 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp) 6014 { 6015 struct rte_eth_dev *dev; 6016 int ret; 6017 6018 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6019 dev = &rte_eth_devices[port_id]; 6020 6021 if (timestamp == NULL) { 6022 RTE_ETHDEV_LOG(ERR, 6023 "Cannot read ethdev port %u timesync time to NULL\n", 6024 port_id); 6025 return -EINVAL; 6026 } 6027 6028 if (*dev->dev_ops->timesync_read_time == NULL) 6029 return -ENOTSUP; 6030 ret = eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev, 6031 timestamp)); 6032 6033 rte_eth_trace_timesync_read_time(port_id, timestamp, ret); 6034 6035 return ret; 6036 } 6037 6038 int 6039 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp) 6040 { 6041 struct rte_eth_dev *dev; 6042 int ret; 6043 6044 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6045 dev = &rte_eth_devices[port_id]; 6046 6047 if (timestamp == NULL) { 6048 RTE_ETHDEV_LOG(ERR, 6049 "Cannot write ethdev port %u timesync from NULL time\n", 6050 port_id); 6051 return -EINVAL; 6052 } 6053 6054 if (*dev->dev_ops->timesync_write_time == NULL) 6055 return -ENOTSUP; 6056 ret = eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev, 6057 timestamp)); 6058 6059 rte_eth_trace_timesync_write_time(port_id, timestamp, ret); 6060 6061 return ret; 6062 } 6063 6064 int 6065 rte_eth_read_clock(uint16_t port_id, uint64_t *clock) 6066 { 6067 struct rte_eth_dev *dev; 6068 int ret; 6069 6070 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6071 dev = &rte_eth_devices[port_id]; 6072 6073 if (clock == NULL) { 6074 RTE_ETHDEV_LOG(ERR, "Cannot read ethdev port %u clock to NULL\n", 6075 port_id); 6076 return -EINVAL; 6077 } 6078 6079 if (*dev->dev_ops->read_clock == NULL) 6080 return -ENOTSUP; 6081 ret = eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock)); 6082 6083 rte_eth_trace_read_clock(port_id, clock, ret); 6084 6085 return ret; 6086 } 6087 6088 int 6089 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info) 6090 { 6091 struct rte_eth_dev *dev; 6092 int ret; 6093 6094 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6095 dev = &rte_eth_devices[port_id]; 6096 6097 if (info == NULL) { 6098 RTE_ETHDEV_LOG(ERR, 6099 "Cannot get ethdev port %u register info to NULL\n", 6100 port_id); 6101 return -EINVAL; 6102 } 6103 6104 if (*dev->dev_ops->get_reg == NULL) 6105 return -ENOTSUP; 6106 ret = eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info)); 6107 6108 rte_ethdev_trace_get_reg_info(port_id, info, ret); 6109 6110 return ret; 6111 } 6112 6113 int 6114 rte_eth_dev_get_eeprom_length(uint16_t port_id) 6115 { 6116 struct rte_eth_dev *dev; 6117 int ret; 6118 6119 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6120 dev = &rte_eth_devices[port_id]; 6121 6122 if (*dev->dev_ops->get_eeprom_length == NULL) 6123 return -ENOTSUP; 6124 ret = eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev)); 6125 6126 rte_ethdev_trace_get_eeprom_length(port_id, ret); 6127 6128 return ret; 6129 } 6130 6131 int 6132 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 6133 { 6134 struct rte_eth_dev *dev; 6135 int ret; 6136 6137 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6138 dev = &rte_eth_devices[port_id]; 6139 6140 if (info == NULL) { 6141 RTE_ETHDEV_LOG(ERR, 6142 "Cannot get ethdev port %u EEPROM info to NULL\n", 6143 port_id); 6144 return -EINVAL; 6145 } 6146 6147 if (*dev->dev_ops->get_eeprom == NULL) 6148 return -ENOTSUP; 6149 ret = eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info)); 6150 6151 rte_ethdev_trace_get_eeprom(port_id, info, ret); 6152 6153 return ret; 6154 } 6155 6156 int 6157 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 6158 { 6159 struct rte_eth_dev *dev; 6160 int ret; 6161 6162 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6163 dev = &rte_eth_devices[port_id]; 6164 6165 if (info == NULL) { 6166 RTE_ETHDEV_LOG(ERR, 6167 "Cannot set ethdev port %u EEPROM from NULL info\n", 6168 port_id); 6169 return -EINVAL; 6170 } 6171 6172 if (*dev->dev_ops->set_eeprom == NULL) 6173 return -ENOTSUP; 6174 ret = eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info)); 6175 6176 rte_ethdev_trace_set_eeprom(port_id, info, ret); 6177 6178 return ret; 6179 } 6180 6181 int 6182 rte_eth_dev_get_module_info(uint16_t port_id, 6183 struct rte_eth_dev_module_info *modinfo) 6184 { 6185 struct rte_eth_dev *dev; 6186 int ret; 6187 6188 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6189 dev = &rte_eth_devices[port_id]; 6190 6191 if (modinfo == NULL) { 6192 RTE_ETHDEV_LOG(ERR, 6193 "Cannot get ethdev port %u EEPROM module info to NULL\n", 6194 port_id); 6195 return -EINVAL; 6196 } 6197 6198 if (*dev->dev_ops->get_module_info == NULL) 6199 return -ENOTSUP; 6200 ret = (*dev->dev_ops->get_module_info)(dev, modinfo); 6201 6202 rte_ethdev_trace_get_module_info(port_id, modinfo, ret); 6203 6204 return ret; 6205 } 6206 6207 int 6208 rte_eth_dev_get_module_eeprom(uint16_t port_id, 6209 struct rte_dev_eeprom_info *info) 6210 { 6211 struct rte_eth_dev *dev; 6212 int ret; 6213 6214 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6215 dev = &rte_eth_devices[port_id]; 6216 6217 if (info == NULL) { 6218 RTE_ETHDEV_LOG(ERR, 6219 "Cannot get ethdev port %u module EEPROM info to NULL\n", 6220 port_id); 6221 return -EINVAL; 6222 } 6223 6224 if (info->data == NULL) { 6225 RTE_ETHDEV_LOG(ERR, 6226 "Cannot get ethdev port %u module EEPROM data to NULL\n", 6227 port_id); 6228 return -EINVAL; 6229 } 6230 6231 if (info->length == 0) { 6232 RTE_ETHDEV_LOG(ERR, 6233 "Cannot get ethdev port %u module EEPROM to data with zero size\n", 6234 port_id); 6235 return -EINVAL; 6236 } 6237 6238 if (*dev->dev_ops->get_module_eeprom == NULL) 6239 return -ENOTSUP; 6240 ret = (*dev->dev_ops->get_module_eeprom)(dev, info); 6241 6242 rte_ethdev_trace_get_module_eeprom(port_id, info, ret); 6243 6244 return ret; 6245 } 6246 6247 int 6248 rte_eth_dev_get_dcb_info(uint16_t port_id, 6249 struct rte_eth_dcb_info *dcb_info) 6250 { 6251 struct rte_eth_dev *dev; 6252 int ret; 6253 6254 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6255 dev = &rte_eth_devices[port_id]; 6256 6257 if (dcb_info == NULL) { 6258 RTE_ETHDEV_LOG(ERR, 6259 "Cannot get ethdev port %u DCB info to NULL\n", 6260 port_id); 6261 return -EINVAL; 6262 } 6263 6264 memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info)); 6265 6266 if (*dev->dev_ops->get_dcb_info == NULL) 6267 return -ENOTSUP; 6268 ret = eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info)); 6269 6270 rte_ethdev_trace_get_dcb_info(port_id, dcb_info, ret); 6271 6272 return ret; 6273 } 6274 6275 static void 6276 eth_dev_adjust_nb_desc(uint16_t *nb_desc, 6277 const struct rte_eth_desc_lim *desc_lim) 6278 { 6279 if (desc_lim->nb_align != 0) 6280 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align); 6281 6282 if (desc_lim->nb_max != 0) 6283 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max); 6284 6285 *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min); 6286 } 6287 6288 int 6289 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, 6290 uint16_t *nb_rx_desc, 6291 uint16_t *nb_tx_desc) 6292 { 6293 struct rte_eth_dev_info dev_info; 6294 int ret; 6295 6296 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6297 6298 ret = rte_eth_dev_info_get(port_id, &dev_info); 6299 if (ret != 0) 6300 return ret; 6301 6302 if (nb_rx_desc != NULL) 6303 eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim); 6304 6305 if (nb_tx_desc != NULL) 6306 eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim); 6307 6308 rte_ethdev_trace_adjust_nb_rx_tx_desc(port_id); 6309 6310 return 0; 6311 } 6312 6313 int 6314 rte_eth_dev_hairpin_capability_get(uint16_t port_id, 6315 struct rte_eth_hairpin_cap *cap) 6316 { 6317 struct rte_eth_dev *dev; 6318 int ret; 6319 6320 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6321 dev = &rte_eth_devices[port_id]; 6322 6323 if (cap == NULL) { 6324 RTE_ETHDEV_LOG(ERR, 6325 "Cannot get ethdev port %u hairpin capability to NULL\n", 6326 port_id); 6327 return -EINVAL; 6328 } 6329 6330 if (*dev->dev_ops->hairpin_cap_get == NULL) 6331 return -ENOTSUP; 6332 memset(cap, 0, sizeof(*cap)); 6333 ret = eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap)); 6334 6335 rte_ethdev_trace_hairpin_capability_get(port_id, cap, ret); 6336 6337 return ret; 6338 } 6339 6340 int 6341 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool) 6342 { 6343 struct rte_eth_dev *dev; 6344 int ret; 6345 6346 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6347 dev = &rte_eth_devices[port_id]; 6348 6349 if (pool == NULL) { 6350 RTE_ETHDEV_LOG(ERR, 6351 "Cannot test ethdev port %u mempool operation from NULL pool\n", 6352 port_id); 6353 return -EINVAL; 6354 } 6355 6356 if (*dev->dev_ops->pool_ops_supported == NULL) 6357 return 1; /* all pools are supported */ 6358 6359 ret = (*dev->dev_ops->pool_ops_supported)(dev, pool); 6360 6361 rte_ethdev_trace_pool_ops_supported(port_id, pool, ret); 6362 6363 return ret; 6364 } 6365 6366 static int 6367 eth_dev_handle_port_list(const char *cmd __rte_unused, 6368 const char *params __rte_unused, 6369 struct rte_tel_data *d) 6370 { 6371 int port_id; 6372 6373 rte_tel_data_start_array(d, RTE_TEL_INT_VAL); 6374 RTE_ETH_FOREACH_DEV(port_id) 6375 rte_tel_data_add_array_int(d, port_id); 6376 return 0; 6377 } 6378 6379 static void 6380 eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats, 6381 const char *stat_name) 6382 { 6383 int q; 6384 struct rte_tel_data *q_data = rte_tel_data_alloc(); 6385 if (q_data == NULL) 6386 return; 6387 rte_tel_data_start_array(q_data, RTE_TEL_UINT_VAL); 6388 for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++) 6389 rte_tel_data_add_array_uint(q_data, q_stats[q]); 6390 rte_tel_data_add_dict_container(d, stat_name, q_data, 0); 6391 } 6392 6393 #define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_uint(d, #s, stats.s) 6394 6395 static int 6396 eth_dev_handle_port_stats(const char *cmd __rte_unused, 6397 const char *params, 6398 struct rte_tel_data *d) 6399 { 6400 struct rte_eth_stats stats; 6401 int port_id, ret; 6402 6403 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6404 return -1; 6405 6406 port_id = atoi(params); 6407 if (!rte_eth_dev_is_valid_port(port_id)) 6408 return -1; 6409 6410 ret = rte_eth_stats_get(port_id, &stats); 6411 if (ret < 0) 6412 return -1; 6413 6414 rte_tel_data_start_dict(d); 6415 ADD_DICT_STAT(stats, ipackets); 6416 ADD_DICT_STAT(stats, opackets); 6417 ADD_DICT_STAT(stats, ibytes); 6418 ADD_DICT_STAT(stats, obytes); 6419 ADD_DICT_STAT(stats, imissed); 6420 ADD_DICT_STAT(stats, ierrors); 6421 ADD_DICT_STAT(stats, oerrors); 6422 ADD_DICT_STAT(stats, rx_nombuf); 6423 eth_dev_add_port_queue_stats(d, stats.q_ipackets, "q_ipackets"); 6424 eth_dev_add_port_queue_stats(d, stats.q_opackets, "q_opackets"); 6425 eth_dev_add_port_queue_stats(d, stats.q_ibytes, "q_ibytes"); 6426 eth_dev_add_port_queue_stats(d, stats.q_obytes, "q_obytes"); 6427 eth_dev_add_port_queue_stats(d, stats.q_errors, "q_errors"); 6428 6429 return 0; 6430 } 6431 6432 static int 6433 eth_dev_handle_port_xstats(const char *cmd __rte_unused, 6434 const char *params, 6435 struct rte_tel_data *d) 6436 { 6437 struct rte_eth_xstat *eth_xstats; 6438 struct rte_eth_xstat_name *xstat_names; 6439 int port_id, num_xstats; 6440 int i, ret; 6441 char *end_param; 6442 6443 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6444 return -1; 6445 6446 port_id = strtoul(params, &end_param, 0); 6447 if (*end_param != '\0') 6448 RTE_ETHDEV_LOG(NOTICE, 6449 "Extra parameters passed to ethdev telemetry command, ignoring"); 6450 if (!rte_eth_dev_is_valid_port(port_id)) 6451 return -1; 6452 6453 num_xstats = rte_eth_xstats_get(port_id, NULL, 0); 6454 if (num_xstats < 0) 6455 return -1; 6456 6457 /* use one malloc for both names and stats */ 6458 eth_xstats = malloc((sizeof(struct rte_eth_xstat) + 6459 sizeof(struct rte_eth_xstat_name)) * num_xstats); 6460 if (eth_xstats == NULL) 6461 return -1; 6462 xstat_names = (void *)ð_xstats[num_xstats]; 6463 6464 ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats); 6465 if (ret < 0 || ret > num_xstats) { 6466 free(eth_xstats); 6467 return -1; 6468 } 6469 6470 ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats); 6471 if (ret < 0 || ret > num_xstats) { 6472 free(eth_xstats); 6473 return -1; 6474 } 6475 6476 rte_tel_data_start_dict(d); 6477 for (i = 0; i < num_xstats; i++) 6478 rte_tel_data_add_dict_uint(d, xstat_names[i].name, 6479 eth_xstats[i].value); 6480 free(eth_xstats); 6481 return 0; 6482 } 6483 6484 #ifndef RTE_EXEC_ENV_WINDOWS 6485 static int 6486 eth_dev_handle_port_dump_priv(const char *cmd __rte_unused, 6487 const char *params, 6488 struct rte_tel_data *d) 6489 { 6490 char *buf, *end_param; 6491 int port_id, ret; 6492 FILE *f; 6493 6494 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6495 return -EINVAL; 6496 6497 port_id = strtoul(params, &end_param, 0); 6498 if (*end_param != '\0') 6499 RTE_ETHDEV_LOG(NOTICE, 6500 "Extra parameters passed to ethdev telemetry command, ignoring"); 6501 if (!rte_eth_dev_is_valid_port(port_id)) 6502 return -EINVAL; 6503 6504 buf = calloc(sizeof(char), RTE_TEL_MAX_SINGLE_STRING_LEN); 6505 if (buf == NULL) 6506 return -ENOMEM; 6507 6508 f = fmemopen(buf, RTE_TEL_MAX_SINGLE_STRING_LEN - 1, "w+"); 6509 if (f == NULL) { 6510 free(buf); 6511 return -EINVAL; 6512 } 6513 6514 ret = rte_eth_dev_priv_dump(port_id, f); 6515 fclose(f); 6516 if (ret == 0) { 6517 rte_tel_data_start_dict(d); 6518 rte_tel_data_string(d, buf); 6519 } 6520 6521 free(buf); 6522 return 0; 6523 } 6524 #endif /* !RTE_EXEC_ENV_WINDOWS */ 6525 6526 static int 6527 eth_dev_handle_port_link_status(const char *cmd __rte_unused, 6528 const char *params, 6529 struct rte_tel_data *d) 6530 { 6531 static const char *status_str = "status"; 6532 int ret, port_id; 6533 struct rte_eth_link link; 6534 char *end_param; 6535 6536 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6537 return -1; 6538 6539 port_id = strtoul(params, &end_param, 0); 6540 if (*end_param != '\0') 6541 RTE_ETHDEV_LOG(NOTICE, 6542 "Extra parameters passed to ethdev telemetry command, ignoring"); 6543 if (!rte_eth_dev_is_valid_port(port_id)) 6544 return -1; 6545 6546 ret = rte_eth_link_get_nowait(port_id, &link); 6547 if (ret < 0) 6548 return -1; 6549 6550 rte_tel_data_start_dict(d); 6551 if (!link.link_status) { 6552 rte_tel_data_add_dict_string(d, status_str, "DOWN"); 6553 return 0; 6554 } 6555 rte_tel_data_add_dict_string(d, status_str, "UP"); 6556 rte_tel_data_add_dict_uint(d, "speed", link.link_speed); 6557 rte_tel_data_add_dict_string(d, "duplex", 6558 (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 6559 "full-duplex" : "half-duplex"); 6560 return 0; 6561 } 6562 6563 static int 6564 eth_dev_handle_port_info(const char *cmd __rte_unused, 6565 const char *params, 6566 struct rte_tel_data *d) 6567 { 6568 struct rte_tel_data *rxq_state, *txq_state; 6569 char mac_addr[RTE_ETHER_ADDR_FMT_SIZE]; 6570 struct rte_eth_dev *eth_dev; 6571 char *end_param; 6572 int port_id, i; 6573 6574 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6575 return -1; 6576 6577 port_id = strtoul(params, &end_param, 0); 6578 if (*end_param != '\0') 6579 RTE_ETHDEV_LOG(NOTICE, 6580 "Extra parameters passed to ethdev telemetry command, ignoring"); 6581 6582 if (!rte_eth_dev_is_valid_port(port_id)) 6583 return -EINVAL; 6584 6585 eth_dev = &rte_eth_devices[port_id]; 6586 6587 rxq_state = rte_tel_data_alloc(); 6588 if (!rxq_state) 6589 return -ENOMEM; 6590 6591 txq_state = rte_tel_data_alloc(); 6592 if (!txq_state) { 6593 rte_tel_data_free(rxq_state); 6594 return -ENOMEM; 6595 } 6596 6597 rte_tel_data_start_dict(d); 6598 rte_tel_data_add_dict_string(d, "name", eth_dev->data->name); 6599 rte_tel_data_add_dict_int(d, "state", eth_dev->state); 6600 rte_tel_data_add_dict_int(d, "nb_rx_queues", 6601 eth_dev->data->nb_rx_queues); 6602 rte_tel_data_add_dict_int(d, "nb_tx_queues", 6603 eth_dev->data->nb_tx_queues); 6604 rte_tel_data_add_dict_int(d, "port_id", eth_dev->data->port_id); 6605 rte_tel_data_add_dict_int(d, "mtu", eth_dev->data->mtu); 6606 rte_tel_data_add_dict_uint(d, "rx_mbuf_size_min", 6607 eth_dev->data->min_rx_buf_size); 6608 rte_tel_data_add_dict_uint(d, "rx_mbuf_alloc_fail", 6609 eth_dev->data->rx_mbuf_alloc_failed); 6610 rte_ether_format_addr(mac_addr, sizeof(mac_addr), 6611 eth_dev->data->mac_addrs); 6612 rte_tel_data_add_dict_string(d, "mac_addr", mac_addr); 6613 rte_tel_data_add_dict_int(d, "promiscuous", 6614 eth_dev->data->promiscuous); 6615 rte_tel_data_add_dict_int(d, "scattered_rx", 6616 eth_dev->data->scattered_rx); 6617 rte_tel_data_add_dict_int(d, "all_multicast", 6618 eth_dev->data->all_multicast); 6619 rte_tel_data_add_dict_int(d, "dev_started", eth_dev->data->dev_started); 6620 rte_tel_data_add_dict_int(d, "lro", eth_dev->data->lro); 6621 rte_tel_data_add_dict_int(d, "dev_configured", 6622 eth_dev->data->dev_configured); 6623 6624 rte_tel_data_start_array(rxq_state, RTE_TEL_INT_VAL); 6625 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) 6626 rte_tel_data_add_array_int(rxq_state, 6627 eth_dev->data->rx_queue_state[i]); 6628 6629 rte_tel_data_start_array(txq_state, RTE_TEL_INT_VAL); 6630 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) 6631 rte_tel_data_add_array_int(txq_state, 6632 eth_dev->data->tx_queue_state[i]); 6633 6634 rte_tel_data_add_dict_container(d, "rxq_state", rxq_state, 0); 6635 rte_tel_data_add_dict_container(d, "txq_state", txq_state, 0); 6636 rte_tel_data_add_dict_int(d, "numa_node", eth_dev->data->numa_node); 6637 rte_tel_data_add_dict_uint_hex(d, "dev_flags", 6638 eth_dev->data->dev_flags, 0); 6639 rte_tel_data_add_dict_uint_hex(d, "rx_offloads", 6640 eth_dev->data->dev_conf.rxmode.offloads, 0); 6641 rte_tel_data_add_dict_uint_hex(d, "tx_offloads", 6642 eth_dev->data->dev_conf.txmode.offloads, 0); 6643 rte_tel_data_add_dict_uint_hex(d, "ethdev_rss_hf", 6644 eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf, 0); 6645 6646 return 0; 6647 } 6648 6649 int 6650 rte_eth_representor_info_get(uint16_t port_id, 6651 struct rte_eth_representor_info *info) 6652 { 6653 struct rte_eth_dev *dev; 6654 int ret; 6655 6656 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6657 dev = &rte_eth_devices[port_id]; 6658 6659 if (*dev->dev_ops->representor_info_get == NULL) 6660 return -ENOTSUP; 6661 ret = eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info)); 6662 6663 rte_eth_trace_representor_info_get(port_id, info, ret); 6664 6665 return ret; 6666 } 6667 6668 int 6669 rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features) 6670 { 6671 struct rte_eth_dev *dev; 6672 int ret; 6673 6674 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6675 dev = &rte_eth_devices[port_id]; 6676 6677 if (dev->data->dev_configured != 0) { 6678 RTE_ETHDEV_LOG(ERR, 6679 "The port (ID=%"PRIu16") is already configured\n", 6680 port_id); 6681 return -EBUSY; 6682 } 6683 6684 if (features == NULL) { 6685 RTE_ETHDEV_LOG(ERR, "Invalid features (NULL)\n"); 6686 return -EINVAL; 6687 } 6688 6689 if (*dev->dev_ops->rx_metadata_negotiate == NULL) 6690 return -ENOTSUP; 6691 ret = eth_err(port_id, 6692 (*dev->dev_ops->rx_metadata_negotiate)(dev, features)); 6693 6694 rte_eth_trace_rx_metadata_negotiate(port_id, *features, ret); 6695 6696 return ret; 6697 } 6698 6699 int 6700 rte_eth_ip_reassembly_capability_get(uint16_t port_id, 6701 struct rte_eth_ip_reassembly_params *reassembly_capa) 6702 { 6703 struct rte_eth_dev *dev; 6704 int ret; 6705 6706 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6707 dev = &rte_eth_devices[port_id]; 6708 6709 if (dev->data->dev_configured == 0) { 6710 RTE_ETHDEV_LOG(ERR, 6711 "Device with port_id=%u is not configured.\n" 6712 "Cannot get IP reassembly capability\n", 6713 port_id); 6714 return -EINVAL; 6715 } 6716 6717 if (reassembly_capa == NULL) { 6718 RTE_ETHDEV_LOG(ERR, "Cannot get reassembly capability to NULL"); 6719 return -EINVAL; 6720 } 6721 6722 if (*dev->dev_ops->ip_reassembly_capability_get == NULL) 6723 return -ENOTSUP; 6724 memset(reassembly_capa, 0, sizeof(struct rte_eth_ip_reassembly_params)); 6725 6726 ret = eth_err(port_id, (*dev->dev_ops->ip_reassembly_capability_get) 6727 (dev, reassembly_capa)); 6728 6729 rte_eth_trace_ip_reassembly_capability_get(port_id, reassembly_capa, 6730 ret); 6731 6732 return ret; 6733 } 6734 6735 int 6736 rte_eth_ip_reassembly_conf_get(uint16_t port_id, 6737 struct rte_eth_ip_reassembly_params *conf) 6738 { 6739 struct rte_eth_dev *dev; 6740 int ret; 6741 6742 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6743 dev = &rte_eth_devices[port_id]; 6744 6745 if (dev->data->dev_configured == 0) { 6746 RTE_ETHDEV_LOG(ERR, 6747 "Device with port_id=%u is not configured.\n" 6748 "Cannot get IP reassembly configuration\n", 6749 port_id); 6750 return -EINVAL; 6751 } 6752 6753 if (conf == NULL) { 6754 RTE_ETHDEV_LOG(ERR, "Cannot get reassembly info to NULL"); 6755 return -EINVAL; 6756 } 6757 6758 if (*dev->dev_ops->ip_reassembly_conf_get == NULL) 6759 return -ENOTSUP; 6760 memset(conf, 0, sizeof(struct rte_eth_ip_reassembly_params)); 6761 ret = eth_err(port_id, 6762 (*dev->dev_ops->ip_reassembly_conf_get)(dev, conf)); 6763 6764 rte_eth_trace_ip_reassembly_conf_get(port_id, conf, ret); 6765 6766 return ret; 6767 } 6768 6769 int 6770 rte_eth_ip_reassembly_conf_set(uint16_t port_id, 6771 const struct rte_eth_ip_reassembly_params *conf) 6772 { 6773 struct rte_eth_dev *dev; 6774 int ret; 6775 6776 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6777 dev = &rte_eth_devices[port_id]; 6778 6779 if (dev->data->dev_configured == 0) { 6780 RTE_ETHDEV_LOG(ERR, 6781 "Device with port_id=%u is not configured.\n" 6782 "Cannot set IP reassembly configuration", 6783 port_id); 6784 return -EINVAL; 6785 } 6786 6787 if (dev->data->dev_started != 0) { 6788 RTE_ETHDEV_LOG(ERR, 6789 "Device with port_id=%u started,\n" 6790 "cannot configure IP reassembly params.\n", 6791 port_id); 6792 return -EINVAL; 6793 } 6794 6795 if (conf == NULL) { 6796 RTE_ETHDEV_LOG(ERR, 6797 "Invalid IP reassembly configuration (NULL)\n"); 6798 return -EINVAL; 6799 } 6800 6801 if (*dev->dev_ops->ip_reassembly_conf_set == NULL) 6802 return -ENOTSUP; 6803 ret = eth_err(port_id, 6804 (*dev->dev_ops->ip_reassembly_conf_set)(dev, conf)); 6805 6806 rte_eth_trace_ip_reassembly_conf_set(port_id, conf, ret); 6807 6808 return ret; 6809 } 6810 6811 int 6812 rte_eth_dev_priv_dump(uint16_t port_id, FILE *file) 6813 { 6814 struct rte_eth_dev *dev; 6815 6816 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6817 dev = &rte_eth_devices[port_id]; 6818 6819 if (file == NULL) { 6820 RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n"); 6821 return -EINVAL; 6822 } 6823 6824 if (*dev->dev_ops->eth_dev_priv_dump == NULL) 6825 return -ENOTSUP; 6826 return eth_err(port_id, (*dev->dev_ops->eth_dev_priv_dump)(dev, file)); 6827 } 6828 6829 int 6830 rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id, 6831 uint16_t offset, uint16_t num, FILE *file) 6832 { 6833 struct rte_eth_dev *dev; 6834 6835 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6836 dev = &rte_eth_devices[port_id]; 6837 6838 if (queue_id >= dev->data->nb_rx_queues) { 6839 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 6840 return -EINVAL; 6841 } 6842 6843 if (file == NULL) { 6844 RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n"); 6845 return -EINVAL; 6846 } 6847 6848 if (*dev->dev_ops->eth_rx_descriptor_dump == NULL) 6849 return -ENOTSUP; 6850 6851 return eth_err(port_id, (*dev->dev_ops->eth_rx_descriptor_dump)(dev, 6852 queue_id, offset, num, file)); 6853 } 6854 6855 int 6856 rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id, 6857 uint16_t offset, uint16_t num, FILE *file) 6858 { 6859 struct rte_eth_dev *dev; 6860 6861 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6862 dev = &rte_eth_devices[port_id]; 6863 6864 if (queue_id >= dev->data->nb_tx_queues) { 6865 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 6866 return -EINVAL; 6867 } 6868 6869 if (file == NULL) { 6870 RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n"); 6871 return -EINVAL; 6872 } 6873 6874 if (*dev->dev_ops->eth_tx_descriptor_dump == NULL) 6875 return -ENOTSUP; 6876 6877 return eth_err(port_id, (*dev->dev_ops->eth_tx_descriptor_dump)(dev, 6878 queue_id, offset, num, file)); 6879 } 6880 6881 int 6882 rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num) 6883 { 6884 int i, j; 6885 struct rte_eth_dev *dev; 6886 const uint32_t *all_types; 6887 6888 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6889 dev = &rte_eth_devices[port_id]; 6890 6891 if (ptypes == NULL && num > 0) { 6892 RTE_ETHDEV_LOG(ERR, 6893 "Cannot get ethdev port %u supported header protocol types to NULL when array size is non zero\n", 6894 port_id); 6895 return -EINVAL; 6896 } 6897 6898 if (*dev->dev_ops->buffer_split_supported_hdr_ptypes_get == NULL) 6899 return -ENOTSUP; 6900 all_types = (*dev->dev_ops->buffer_split_supported_hdr_ptypes_get)(dev); 6901 6902 if (all_types == NULL) 6903 return 0; 6904 6905 for (i = 0, j = 0; all_types[i] != RTE_PTYPE_UNKNOWN; ++i) { 6906 if (j < num) { 6907 ptypes[j] = all_types[i]; 6908 6909 rte_eth_trace_buffer_split_get_supported_hdr_ptypes( 6910 port_id, j, ptypes[j]); 6911 } 6912 j++; 6913 } 6914 6915 return j; 6916 } 6917 6918 RTE_LOG_REGISTER_DEFAULT(rte_eth_dev_logtype, INFO); 6919 6920 RTE_INIT(ethdev_init_telemetry) 6921 { 6922 rte_telemetry_register_cmd("/ethdev/list", eth_dev_handle_port_list, 6923 "Returns list of available ethdev ports. Takes no parameters"); 6924 rte_telemetry_register_cmd("/ethdev/stats", eth_dev_handle_port_stats, 6925 "Returns the common stats for a port. Parameters: int port_id"); 6926 rte_telemetry_register_cmd("/ethdev/xstats", eth_dev_handle_port_xstats, 6927 "Returns the extended stats for a port. Parameters: int port_id"); 6928 #ifndef RTE_EXEC_ENV_WINDOWS 6929 rte_telemetry_register_cmd("/ethdev/dump_priv", eth_dev_handle_port_dump_priv, 6930 "Returns dump private information for a port. Parameters: int port_id"); 6931 #endif 6932 rte_telemetry_register_cmd("/ethdev/link_status", 6933 eth_dev_handle_port_link_status, 6934 "Returns the link status for a port. Parameters: int port_id"); 6935 rte_telemetry_register_cmd("/ethdev/info", eth_dev_handle_port_info, 6936 "Returns the device info for a port. Parameters: int port_id"); 6937 rte_telemetry_register_cmd("/ethdev/module_eeprom", eth_dev_handle_port_module_eeprom, 6938 "Returns module EEPROM info with SFF specs. Parameters: int port_id"); 6939 } 6940