1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <ctype.h> 6 #include <errno.h> 7 #include <inttypes.h> 8 #include <stdbool.h> 9 #include <stdint.h> 10 #include <stdio.h> 11 #include <stdlib.h> 12 #include <string.h> 13 #include <sys/queue.h> 14 15 #include <bus_driver.h> 16 #include <rte_log.h> 17 #include <rte_interrupts.h> 18 #include <rte_kvargs.h> 19 #include <rte_memcpy.h> 20 #include <rte_common.h> 21 #include <rte_mempool.h> 22 #include <rte_malloc.h> 23 #include <rte_mbuf.h> 24 #include <rte_errno.h> 25 #include <rte_spinlock.h> 26 #include <rte_string_fns.h> 27 #include <rte_class.h> 28 #include <rte_ether.h> 29 #include <rte_telemetry.h> 30 31 #include "rte_ethdev.h" 32 #include "rte_ethdev_trace_fp.h" 33 #include "ethdev_driver.h" 34 #include "ethdev_profile.h" 35 #include "ethdev_private.h" 36 #include "ethdev_trace.h" 37 #include "sff_telemetry.h" 38 39 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS]; 40 41 /* public fast-path API */ 42 struct rte_eth_fp_ops rte_eth_fp_ops[RTE_MAX_ETHPORTS]; 43 44 /* spinlock for add/remove Rx callbacks */ 45 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER; 46 47 /* spinlock for add/remove Tx callbacks */ 48 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER; 49 50 /* store statistics names and its offset in stats structure */ 51 struct rte_eth_xstats_name_off { 52 char name[RTE_ETH_XSTATS_NAME_SIZE]; 53 unsigned offset; 54 }; 55 56 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = { 57 {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)}, 58 {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)}, 59 {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)}, 60 {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)}, 61 {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)}, 62 {"rx_errors", offsetof(struct rte_eth_stats, ierrors)}, 63 {"tx_errors", offsetof(struct rte_eth_stats, oerrors)}, 64 {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats, 65 rx_nombuf)}, 66 }; 67 68 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings) 69 70 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = { 71 {"packets", offsetof(struct rte_eth_stats, q_ipackets)}, 72 {"bytes", offsetof(struct rte_eth_stats, q_ibytes)}, 73 {"errors", offsetof(struct rte_eth_stats, q_errors)}, 74 }; 75 76 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings) 77 78 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = { 79 {"packets", offsetof(struct rte_eth_stats, q_opackets)}, 80 {"bytes", offsetof(struct rte_eth_stats, q_obytes)}, 81 }; 82 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings) 83 84 #define RTE_RX_OFFLOAD_BIT2STR(_name) \ 85 { RTE_ETH_RX_OFFLOAD_##_name, #_name } 86 87 static const struct { 88 uint64_t offload; 89 const char *name; 90 } eth_dev_rx_offload_names[] = { 91 RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP), 92 RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM), 93 RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM), 94 RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM), 95 RTE_RX_OFFLOAD_BIT2STR(TCP_LRO), 96 RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP), 97 RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 98 RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP), 99 RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER), 100 RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND), 101 RTE_RX_OFFLOAD_BIT2STR(SCATTER), 102 RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP), 103 RTE_RX_OFFLOAD_BIT2STR(SECURITY), 104 RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC), 105 RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM), 106 RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 107 RTE_RX_OFFLOAD_BIT2STR(RSS_HASH), 108 RTE_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT), 109 }; 110 111 #undef RTE_RX_OFFLOAD_BIT2STR 112 #undef RTE_ETH_RX_OFFLOAD_BIT2STR 113 114 #define RTE_TX_OFFLOAD_BIT2STR(_name) \ 115 { RTE_ETH_TX_OFFLOAD_##_name, #_name } 116 117 static const struct { 118 uint64_t offload; 119 const char *name; 120 } eth_dev_tx_offload_names[] = { 121 RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT), 122 RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM), 123 RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM), 124 RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM), 125 RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM), 126 RTE_TX_OFFLOAD_BIT2STR(TCP_TSO), 127 RTE_TX_OFFLOAD_BIT2STR(UDP_TSO), 128 RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 129 RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT), 130 RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO), 131 RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO), 132 RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO), 133 RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO), 134 RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT), 135 RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE), 136 RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS), 137 RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE), 138 RTE_TX_OFFLOAD_BIT2STR(SECURITY), 139 RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO), 140 RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO), 141 RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 142 RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP), 143 }; 144 145 #undef RTE_TX_OFFLOAD_BIT2STR 146 147 static const struct { 148 uint64_t offload; 149 const char *name; 150 } rte_eth_dev_capa_names[] = { 151 {RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP, "RUNTIME_RX_QUEUE_SETUP"}, 152 {RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP, "RUNTIME_TX_QUEUE_SETUP"}, 153 {RTE_ETH_DEV_CAPA_RXQ_SHARE, "RXQ_SHARE"}, 154 {RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP, "FLOW_RULE_KEEP"}, 155 {RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP, "FLOW_SHARED_OBJECT_KEEP"}, 156 }; 157 158 enum { 159 STAT_QMAP_TX = 0, 160 STAT_QMAP_RX 161 }; 162 163 int 164 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str) 165 { 166 int ret; 167 struct rte_devargs devargs; 168 const char *bus_param_key; 169 char *bus_str = NULL; 170 char *cls_str = NULL; 171 int str_size; 172 173 if (iter == NULL) { 174 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL iterator\n"); 175 return -EINVAL; 176 } 177 178 if (devargs_str == NULL) { 179 RTE_ETHDEV_LOG(ERR, 180 "Cannot initialize iterator from NULL device description string\n"); 181 return -EINVAL; 182 } 183 184 memset(iter, 0, sizeof(*iter)); 185 memset(&devargs, 0, sizeof(devargs)); 186 187 /* 188 * The devargs string may use various syntaxes: 189 * - 0000:08:00.0,representor=[1-3] 190 * - pci:0000:06:00.0,representor=[0,5] 191 * - class=eth,mac=00:11:22:33:44:55 192 * - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z 193 */ 194 195 /* 196 * Handle pure class filter (i.e. without any bus-level argument), 197 * from future new syntax. 198 * rte_devargs_parse() is not yet supporting the new syntax, 199 * that's why this simple case is temporarily parsed here. 200 */ 201 #define iter_anybus_str "class=eth," 202 if (strncmp(devargs_str, iter_anybus_str, 203 strlen(iter_anybus_str)) == 0) { 204 iter->cls_str = devargs_str + strlen(iter_anybus_str); 205 goto end; 206 } 207 208 /* Split bus, device and parameters. */ 209 ret = rte_devargs_parse(&devargs, devargs_str); 210 if (ret != 0) 211 goto error; 212 213 /* 214 * Assume parameters of old syntax can match only at ethdev level. 215 * Extra parameters will be ignored, thanks to "+" prefix. 216 */ 217 str_size = strlen(devargs.args) + 2; 218 cls_str = malloc(str_size); 219 if (cls_str == NULL) { 220 ret = -ENOMEM; 221 goto error; 222 } 223 ret = snprintf(cls_str, str_size, "+%s", devargs.args); 224 if (ret != str_size - 1) { 225 ret = -EINVAL; 226 goto error; 227 } 228 iter->cls_str = cls_str; 229 230 iter->bus = devargs.bus; 231 if (iter->bus->dev_iterate == NULL) { 232 ret = -ENOTSUP; 233 goto error; 234 } 235 236 /* Convert bus args to new syntax for use with new API dev_iterate. */ 237 if ((strcmp(iter->bus->name, "vdev") == 0) || 238 (strcmp(iter->bus->name, "fslmc") == 0) || 239 (strcmp(iter->bus->name, "dpaa_bus") == 0)) { 240 bus_param_key = "name"; 241 } else if (strcmp(iter->bus->name, "pci") == 0) { 242 bus_param_key = "addr"; 243 } else { 244 ret = -ENOTSUP; 245 goto error; 246 } 247 str_size = strlen(bus_param_key) + strlen(devargs.name) + 2; 248 bus_str = malloc(str_size); 249 if (bus_str == NULL) { 250 ret = -ENOMEM; 251 goto error; 252 } 253 ret = snprintf(bus_str, str_size, "%s=%s", 254 bus_param_key, devargs.name); 255 if (ret != str_size - 1) { 256 ret = -EINVAL; 257 goto error; 258 } 259 iter->bus_str = bus_str; 260 261 end: 262 iter->cls = rte_class_find_by_name("eth"); 263 rte_devargs_reset(&devargs); 264 265 rte_eth_trace_iterator_init(devargs_str); 266 267 return 0; 268 269 error: 270 if (ret == -ENOTSUP) 271 RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n", 272 iter->bus->name); 273 rte_devargs_reset(&devargs); 274 free(bus_str); 275 free(cls_str); 276 return ret; 277 } 278 279 uint16_t 280 rte_eth_iterator_next(struct rte_dev_iterator *iter) 281 { 282 if (iter == NULL) { 283 RTE_ETHDEV_LOG(ERR, 284 "Cannot get next device from NULL iterator\n"); 285 return RTE_MAX_ETHPORTS; 286 } 287 288 if (iter->cls == NULL) /* invalid ethdev iterator */ 289 return RTE_MAX_ETHPORTS; 290 291 do { /* loop to try all matching rte_device */ 292 /* If not pure ethdev filter and */ 293 if (iter->bus != NULL && 294 /* not in middle of rte_eth_dev iteration, */ 295 iter->class_device == NULL) { 296 /* get next rte_device to try. */ 297 iter->device = iter->bus->dev_iterate( 298 iter->device, iter->bus_str, iter); 299 if (iter->device == NULL) 300 break; /* no more rte_device candidate */ 301 } 302 /* A device is matching bus part, need to check ethdev part. */ 303 iter->class_device = iter->cls->dev_iterate( 304 iter->class_device, iter->cls_str, iter); 305 if (iter->class_device != NULL) { 306 uint16_t id = eth_dev_to_id(iter->class_device); 307 308 rte_eth_trace_iterator_next(iter, id); 309 310 return id; /* match */ 311 } 312 } while (iter->bus != NULL); /* need to try next rte_device */ 313 314 /* No more ethdev port to iterate. */ 315 rte_eth_iterator_cleanup(iter); 316 return RTE_MAX_ETHPORTS; 317 } 318 319 void 320 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter) 321 { 322 if (iter == NULL) { 323 RTE_ETHDEV_LOG(ERR, "Cannot do clean up from NULL iterator\n"); 324 return; 325 } 326 327 if (iter->bus_str == NULL) 328 return; /* nothing to free in pure class filter */ 329 330 rte_eth_trace_iterator_cleanup(iter); 331 332 free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */ 333 free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */ 334 memset(iter, 0, sizeof(*iter)); 335 } 336 337 uint16_t 338 rte_eth_find_next(uint16_t port_id) 339 { 340 while (port_id < RTE_MAX_ETHPORTS && 341 rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED) 342 port_id++; 343 344 if (port_id >= RTE_MAX_ETHPORTS) 345 return RTE_MAX_ETHPORTS; 346 347 rte_eth_trace_find_next(port_id); 348 349 return port_id; 350 } 351 352 /* 353 * Macro to iterate over all valid ports for internal usage. 354 * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports. 355 */ 356 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \ 357 for (port_id = rte_eth_find_next(0); \ 358 port_id < RTE_MAX_ETHPORTS; \ 359 port_id = rte_eth_find_next(port_id + 1)) 360 361 uint16_t 362 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent) 363 { 364 port_id = rte_eth_find_next(port_id); 365 while (port_id < RTE_MAX_ETHPORTS && 366 rte_eth_devices[port_id].device != parent) 367 port_id = rte_eth_find_next(port_id + 1); 368 369 rte_eth_trace_find_next_of(port_id, parent); 370 371 return port_id; 372 } 373 374 uint16_t 375 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id) 376 { 377 uint16_t ret; 378 379 RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS); 380 ret = rte_eth_find_next_of(port_id, 381 rte_eth_devices[ref_port_id].device); 382 383 rte_eth_trace_find_next_sibling(port_id, ref_port_id, ret); 384 385 return ret; 386 } 387 388 static bool 389 eth_dev_is_allocated(const struct rte_eth_dev *ethdev) 390 { 391 return ethdev->data->name[0] != '\0'; 392 } 393 394 int 395 rte_eth_dev_is_valid_port(uint16_t port_id) 396 { 397 int is_valid; 398 399 if (port_id >= RTE_MAX_ETHPORTS || 400 (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)) 401 is_valid = 0; 402 else 403 is_valid = 1; 404 405 rte_ethdev_trace_is_valid_port(port_id, is_valid); 406 407 return is_valid; 408 } 409 410 static int 411 eth_is_valid_owner_id(uint64_t owner_id) 412 { 413 if (owner_id == RTE_ETH_DEV_NO_OWNER || 414 eth_dev_shared_data->next_owner_id <= owner_id) 415 return 0; 416 return 1; 417 } 418 419 uint64_t 420 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id) 421 { 422 port_id = rte_eth_find_next(port_id); 423 while (port_id < RTE_MAX_ETHPORTS && 424 rte_eth_devices[port_id].data->owner.id != owner_id) 425 port_id = rte_eth_find_next(port_id + 1); 426 427 rte_eth_trace_find_next_owned_by(port_id, owner_id); 428 429 return port_id; 430 } 431 432 int 433 rte_eth_dev_owner_new(uint64_t *owner_id) 434 { 435 if (owner_id == NULL) { 436 RTE_ETHDEV_LOG(ERR, "Cannot get new owner ID to NULL\n"); 437 return -EINVAL; 438 } 439 440 eth_dev_shared_data_prepare(); 441 442 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 443 444 *owner_id = eth_dev_shared_data->next_owner_id++; 445 446 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 447 448 rte_ethdev_trace_owner_new(*owner_id); 449 450 return 0; 451 } 452 453 static int 454 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id, 455 const struct rte_eth_dev_owner *new_owner) 456 { 457 struct rte_eth_dev *ethdev = &rte_eth_devices[port_id]; 458 struct rte_eth_dev_owner *port_owner; 459 460 if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) { 461 RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n", 462 port_id); 463 return -ENODEV; 464 } 465 466 if (new_owner == NULL) { 467 RTE_ETHDEV_LOG(ERR, 468 "Cannot set ethdev port %u owner from NULL owner\n", 469 port_id); 470 return -EINVAL; 471 } 472 473 if (!eth_is_valid_owner_id(new_owner->id) && 474 !eth_is_valid_owner_id(old_owner_id)) { 475 RTE_ETHDEV_LOG(ERR, 476 "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n", 477 old_owner_id, new_owner->id); 478 return -EINVAL; 479 } 480 481 port_owner = &rte_eth_devices[port_id].data->owner; 482 if (port_owner->id != old_owner_id) { 483 RTE_ETHDEV_LOG(ERR, 484 "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n", 485 port_id, port_owner->name, port_owner->id); 486 return -EPERM; 487 } 488 489 /* can not truncate (same structure) */ 490 strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN); 491 492 port_owner->id = new_owner->id; 493 494 RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n", 495 port_id, new_owner->name, new_owner->id); 496 497 return 0; 498 } 499 500 int 501 rte_eth_dev_owner_set(const uint16_t port_id, 502 const struct rte_eth_dev_owner *owner) 503 { 504 int ret; 505 506 eth_dev_shared_data_prepare(); 507 508 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 509 510 ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner); 511 512 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 513 514 rte_ethdev_trace_owner_set(port_id, owner, ret); 515 516 return ret; 517 } 518 519 int 520 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id) 521 { 522 const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner) 523 {.id = RTE_ETH_DEV_NO_OWNER, .name = ""}; 524 int ret; 525 526 eth_dev_shared_data_prepare(); 527 528 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 529 530 ret = eth_dev_owner_set(port_id, owner_id, &new_owner); 531 532 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 533 534 rte_ethdev_trace_owner_unset(port_id, owner_id, ret); 535 536 return ret; 537 } 538 539 int 540 rte_eth_dev_owner_delete(const uint64_t owner_id) 541 { 542 uint16_t port_id; 543 int ret = 0; 544 545 eth_dev_shared_data_prepare(); 546 547 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 548 549 if (eth_is_valid_owner_id(owner_id)) { 550 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) { 551 struct rte_eth_dev_data *data = 552 rte_eth_devices[port_id].data; 553 if (data != NULL && data->owner.id == owner_id) 554 memset(&data->owner, 0, 555 sizeof(struct rte_eth_dev_owner)); 556 } 557 RTE_ETHDEV_LOG(NOTICE, 558 "All port owners owned by %016"PRIx64" identifier have removed\n", 559 owner_id); 560 } else { 561 RTE_ETHDEV_LOG(ERR, 562 "Invalid owner ID=%016"PRIx64"\n", 563 owner_id); 564 ret = -EINVAL; 565 } 566 567 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 568 569 rte_ethdev_trace_owner_delete(owner_id, ret); 570 571 return ret; 572 } 573 574 int 575 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner) 576 { 577 struct rte_eth_dev *ethdev; 578 579 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 580 ethdev = &rte_eth_devices[port_id]; 581 582 if (!eth_dev_is_allocated(ethdev)) { 583 RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n", 584 port_id); 585 return -ENODEV; 586 } 587 588 if (owner == NULL) { 589 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u owner to NULL\n", 590 port_id); 591 return -EINVAL; 592 } 593 594 eth_dev_shared_data_prepare(); 595 596 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 597 rte_memcpy(owner, ðdev->data->owner, sizeof(*owner)); 598 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 599 600 rte_ethdev_trace_owner_get(port_id, owner); 601 602 return 0; 603 } 604 605 int 606 rte_eth_dev_socket_id(uint16_t port_id) 607 { 608 int socket_id = SOCKET_ID_ANY; 609 610 if (!rte_eth_dev_is_valid_port(port_id)) { 611 rte_errno = EINVAL; 612 } else { 613 socket_id = rte_eth_devices[port_id].data->numa_node; 614 if (socket_id == SOCKET_ID_ANY) 615 rte_errno = 0; 616 } 617 618 rte_ethdev_trace_socket_id(port_id, socket_id); 619 620 return socket_id; 621 } 622 623 void * 624 rte_eth_dev_get_sec_ctx(uint16_t port_id) 625 { 626 void *ctx; 627 628 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL); 629 ctx = rte_eth_devices[port_id].security_ctx; 630 631 rte_ethdev_trace_get_sec_ctx(port_id, ctx); 632 633 return ctx; 634 } 635 636 uint16_t 637 rte_eth_dev_count_avail(void) 638 { 639 uint16_t p; 640 uint16_t count; 641 642 count = 0; 643 644 RTE_ETH_FOREACH_DEV(p) 645 count++; 646 647 rte_ethdev_trace_count_avail(count); 648 649 return count; 650 } 651 652 uint16_t 653 rte_eth_dev_count_total(void) 654 { 655 uint16_t port, count = 0; 656 657 RTE_ETH_FOREACH_VALID_DEV(port) 658 count++; 659 660 rte_ethdev_trace_count_total(count); 661 662 return count; 663 } 664 665 int 666 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name) 667 { 668 char *tmp; 669 670 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 671 672 if (name == NULL) { 673 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u name to NULL\n", 674 port_id); 675 return -EINVAL; 676 } 677 678 /* shouldn't check 'rte_eth_devices[i].data', 679 * because it might be overwritten by VDEV PMD */ 680 tmp = eth_dev_shared_data->data[port_id].name; 681 strcpy(name, tmp); 682 683 rte_ethdev_trace_get_name_by_port(port_id, name); 684 685 return 0; 686 } 687 688 int 689 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id) 690 { 691 uint16_t pid; 692 693 if (name == NULL) { 694 RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name"); 695 return -EINVAL; 696 } 697 698 if (port_id == NULL) { 699 RTE_ETHDEV_LOG(ERR, 700 "Cannot get port ID to NULL for %s\n", name); 701 return -EINVAL; 702 } 703 704 RTE_ETH_FOREACH_VALID_DEV(pid) 705 if (!strcmp(name, eth_dev_shared_data->data[pid].name)) { 706 *port_id = pid; 707 708 rte_ethdev_trace_get_port_by_name(name, *port_id); 709 710 return 0; 711 } 712 713 return -ENODEV; 714 } 715 716 int 717 eth_err(uint16_t port_id, int ret) 718 { 719 if (ret == 0) 720 return 0; 721 if (rte_eth_dev_is_removed(port_id)) 722 return -EIO; 723 return ret; 724 } 725 726 static int 727 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id) 728 { 729 uint16_t port_id; 730 731 if (rx_queue_id >= dev->data->nb_rx_queues) { 732 port_id = dev->data->port_id; 733 RTE_ETHDEV_LOG(ERR, 734 "Invalid Rx queue_id=%u of device with port_id=%u\n", 735 rx_queue_id, port_id); 736 return -EINVAL; 737 } 738 739 if (dev->data->rx_queues[rx_queue_id] == NULL) { 740 port_id = dev->data->port_id; 741 RTE_ETHDEV_LOG(ERR, 742 "Queue %u of device with port_id=%u has not been setup\n", 743 rx_queue_id, port_id); 744 return -EINVAL; 745 } 746 747 return 0; 748 } 749 750 static int 751 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id) 752 { 753 uint16_t port_id; 754 755 if (tx_queue_id >= dev->data->nb_tx_queues) { 756 port_id = dev->data->port_id; 757 RTE_ETHDEV_LOG(ERR, 758 "Invalid Tx queue_id=%u of device with port_id=%u\n", 759 tx_queue_id, port_id); 760 return -EINVAL; 761 } 762 763 if (dev->data->tx_queues[tx_queue_id] == NULL) { 764 port_id = dev->data->port_id; 765 RTE_ETHDEV_LOG(ERR, 766 "Queue %u of device with port_id=%u has not been setup\n", 767 tx_queue_id, port_id); 768 return -EINVAL; 769 } 770 771 return 0; 772 } 773 774 int 775 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id) 776 { 777 struct rte_eth_dev *dev; 778 int ret; 779 780 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 781 dev = &rte_eth_devices[port_id]; 782 783 if (!dev->data->dev_started) { 784 RTE_ETHDEV_LOG(ERR, 785 "Port %u must be started before start any queue\n", 786 port_id); 787 return -EINVAL; 788 } 789 790 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 791 if (ret != 0) 792 return ret; 793 794 if (*dev->dev_ops->rx_queue_start == NULL) 795 return -ENOTSUP; 796 797 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 798 RTE_ETHDEV_LOG(INFO, 799 "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 800 rx_queue_id, port_id); 801 return -EINVAL; 802 } 803 804 if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 805 RTE_ETHDEV_LOG(INFO, 806 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 807 rx_queue_id, port_id); 808 return 0; 809 } 810 811 ret = eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id)); 812 813 rte_ethdev_trace_rx_queue_start(port_id, rx_queue_id, ret); 814 815 return ret; 816 } 817 818 int 819 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id) 820 { 821 struct rte_eth_dev *dev; 822 int ret; 823 824 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 825 dev = &rte_eth_devices[port_id]; 826 827 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 828 if (ret != 0) 829 return ret; 830 831 if (*dev->dev_ops->rx_queue_stop == NULL) 832 return -ENOTSUP; 833 834 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 835 RTE_ETHDEV_LOG(INFO, 836 "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 837 rx_queue_id, port_id); 838 return -EINVAL; 839 } 840 841 if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 842 RTE_ETHDEV_LOG(INFO, 843 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 844 rx_queue_id, port_id); 845 return 0; 846 } 847 848 ret = eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id)); 849 850 rte_ethdev_trace_rx_queue_stop(port_id, rx_queue_id, ret); 851 852 return ret; 853 } 854 855 int 856 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id) 857 { 858 struct rte_eth_dev *dev; 859 int ret; 860 861 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 862 dev = &rte_eth_devices[port_id]; 863 864 if (!dev->data->dev_started) { 865 RTE_ETHDEV_LOG(ERR, 866 "Port %u must be started before start any queue\n", 867 port_id); 868 return -EINVAL; 869 } 870 871 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 872 if (ret != 0) 873 return ret; 874 875 if (*dev->dev_ops->tx_queue_start == NULL) 876 return -ENOTSUP; 877 878 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 879 RTE_ETHDEV_LOG(INFO, 880 "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 881 tx_queue_id, port_id); 882 return -EINVAL; 883 } 884 885 if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 886 RTE_ETHDEV_LOG(INFO, 887 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 888 tx_queue_id, port_id); 889 return 0; 890 } 891 892 ret = eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id)); 893 894 rte_ethdev_trace_tx_queue_start(port_id, tx_queue_id, ret); 895 896 return ret; 897 } 898 899 int 900 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id) 901 { 902 struct rte_eth_dev *dev; 903 int ret; 904 905 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 906 dev = &rte_eth_devices[port_id]; 907 908 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 909 if (ret != 0) 910 return ret; 911 912 if (*dev->dev_ops->tx_queue_stop == NULL) 913 return -ENOTSUP; 914 915 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 916 RTE_ETHDEV_LOG(INFO, 917 "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 918 tx_queue_id, port_id); 919 return -EINVAL; 920 } 921 922 if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 923 RTE_ETHDEV_LOG(INFO, 924 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 925 tx_queue_id, port_id); 926 return 0; 927 } 928 929 ret = eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id)); 930 931 rte_ethdev_trace_tx_queue_stop(port_id, tx_queue_id, ret); 932 933 return ret; 934 } 935 936 uint32_t 937 rte_eth_speed_bitflag(uint32_t speed, int duplex) 938 { 939 uint32_t ret; 940 941 switch (speed) { 942 case RTE_ETH_SPEED_NUM_10M: 943 ret = duplex ? RTE_ETH_LINK_SPEED_10M : RTE_ETH_LINK_SPEED_10M_HD; 944 break; 945 case RTE_ETH_SPEED_NUM_100M: 946 ret = duplex ? RTE_ETH_LINK_SPEED_100M : RTE_ETH_LINK_SPEED_100M_HD; 947 break; 948 case RTE_ETH_SPEED_NUM_1G: 949 ret = RTE_ETH_LINK_SPEED_1G; 950 break; 951 case RTE_ETH_SPEED_NUM_2_5G: 952 ret = RTE_ETH_LINK_SPEED_2_5G; 953 break; 954 case RTE_ETH_SPEED_NUM_5G: 955 ret = RTE_ETH_LINK_SPEED_5G; 956 break; 957 case RTE_ETH_SPEED_NUM_10G: 958 ret = RTE_ETH_LINK_SPEED_10G; 959 break; 960 case RTE_ETH_SPEED_NUM_20G: 961 ret = RTE_ETH_LINK_SPEED_20G; 962 break; 963 case RTE_ETH_SPEED_NUM_25G: 964 ret = RTE_ETH_LINK_SPEED_25G; 965 break; 966 case RTE_ETH_SPEED_NUM_40G: 967 ret = RTE_ETH_LINK_SPEED_40G; 968 break; 969 case RTE_ETH_SPEED_NUM_50G: 970 ret = RTE_ETH_LINK_SPEED_50G; 971 break; 972 case RTE_ETH_SPEED_NUM_56G: 973 ret = RTE_ETH_LINK_SPEED_56G; 974 break; 975 case RTE_ETH_SPEED_NUM_100G: 976 ret = RTE_ETH_LINK_SPEED_100G; 977 break; 978 case RTE_ETH_SPEED_NUM_200G: 979 ret = RTE_ETH_LINK_SPEED_200G; 980 break; 981 case RTE_ETH_SPEED_NUM_400G: 982 ret = RTE_ETH_LINK_SPEED_400G; 983 break; 984 default: 985 ret = 0; 986 } 987 988 rte_eth_trace_speed_bitflag(speed, duplex, ret); 989 990 return ret; 991 } 992 993 const char * 994 rte_eth_dev_rx_offload_name(uint64_t offload) 995 { 996 const char *name = "UNKNOWN"; 997 unsigned int i; 998 999 for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) { 1000 if (offload == eth_dev_rx_offload_names[i].offload) { 1001 name = eth_dev_rx_offload_names[i].name; 1002 break; 1003 } 1004 } 1005 1006 rte_ethdev_trace_rx_offload_name(offload, name); 1007 1008 return name; 1009 } 1010 1011 const char * 1012 rte_eth_dev_tx_offload_name(uint64_t offload) 1013 { 1014 const char *name = "UNKNOWN"; 1015 unsigned int i; 1016 1017 for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) { 1018 if (offload == eth_dev_tx_offload_names[i].offload) { 1019 name = eth_dev_tx_offload_names[i].name; 1020 break; 1021 } 1022 } 1023 1024 rte_ethdev_trace_tx_offload_name(offload, name); 1025 1026 return name; 1027 } 1028 1029 const char * 1030 rte_eth_dev_capability_name(uint64_t capability) 1031 { 1032 const char *name = "UNKNOWN"; 1033 unsigned int i; 1034 1035 for (i = 0; i < RTE_DIM(rte_eth_dev_capa_names); ++i) { 1036 if (capability == rte_eth_dev_capa_names[i].offload) { 1037 name = rte_eth_dev_capa_names[i].name; 1038 break; 1039 } 1040 } 1041 1042 rte_ethdev_trace_capability_name(capability, name); 1043 1044 return name; 1045 } 1046 1047 static inline int 1048 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size, 1049 uint32_t max_rx_pkt_len, uint32_t dev_info_size) 1050 { 1051 int ret = 0; 1052 1053 if (dev_info_size == 0) { 1054 if (config_size != max_rx_pkt_len) { 1055 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size" 1056 " %u != %u is not allowed\n", 1057 port_id, config_size, max_rx_pkt_len); 1058 ret = -EINVAL; 1059 } 1060 } else if (config_size > dev_info_size) { 1061 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 1062 "> max allowed value %u\n", port_id, config_size, 1063 dev_info_size); 1064 ret = -EINVAL; 1065 } else if (config_size < RTE_ETHER_MIN_LEN) { 1066 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 1067 "< min allowed value %u\n", port_id, config_size, 1068 (unsigned int)RTE_ETHER_MIN_LEN); 1069 ret = -EINVAL; 1070 } 1071 return ret; 1072 } 1073 1074 /* 1075 * Validate offloads that are requested through rte_eth_dev_configure against 1076 * the offloads successfully set by the Ethernet device. 1077 * 1078 * @param port_id 1079 * The port identifier of the Ethernet device. 1080 * @param req_offloads 1081 * The offloads that have been requested through `rte_eth_dev_configure`. 1082 * @param set_offloads 1083 * The offloads successfully set by the Ethernet device. 1084 * @param offload_type 1085 * The offload type i.e. Rx/Tx string. 1086 * @param offload_name 1087 * The function that prints the offload name. 1088 * @return 1089 * - (0) if validation successful. 1090 * - (-EINVAL) if requested offload has been silently disabled. 1091 * 1092 */ 1093 static int 1094 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads, 1095 uint64_t set_offloads, const char *offload_type, 1096 const char *(*offload_name)(uint64_t)) 1097 { 1098 uint64_t offloads_diff = req_offloads ^ set_offloads; 1099 uint64_t offload; 1100 int ret = 0; 1101 1102 while (offloads_diff != 0) { 1103 /* Check if any offload is requested but not enabled. */ 1104 offload = RTE_BIT64(__builtin_ctzll(offloads_diff)); 1105 if (offload & req_offloads) { 1106 RTE_ETHDEV_LOG(ERR, 1107 "Port %u failed to enable %s offload %s\n", 1108 port_id, offload_type, offload_name(offload)); 1109 ret = -EINVAL; 1110 } 1111 1112 /* Check if offload couldn't be disabled. */ 1113 if (offload & set_offloads) { 1114 RTE_ETHDEV_LOG(DEBUG, 1115 "Port %u %s offload %s is not requested but enabled\n", 1116 port_id, offload_type, offload_name(offload)); 1117 } 1118 1119 offloads_diff &= ~offload; 1120 } 1121 1122 return ret; 1123 } 1124 1125 static uint32_t 1126 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu) 1127 { 1128 uint32_t overhead_len; 1129 1130 if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu) 1131 overhead_len = max_rx_pktlen - max_mtu; 1132 else 1133 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 1134 1135 return overhead_len; 1136 } 1137 1138 /* rte_eth_dev_info_get() should be called prior to this function */ 1139 static int 1140 eth_dev_validate_mtu(uint16_t port_id, struct rte_eth_dev_info *dev_info, 1141 uint16_t mtu) 1142 { 1143 uint32_t overhead_len; 1144 uint32_t frame_size; 1145 1146 if (mtu < dev_info->min_mtu) { 1147 RTE_ETHDEV_LOG(ERR, 1148 "MTU (%u) < device min MTU (%u) for port_id %u\n", 1149 mtu, dev_info->min_mtu, port_id); 1150 return -EINVAL; 1151 } 1152 if (mtu > dev_info->max_mtu) { 1153 RTE_ETHDEV_LOG(ERR, 1154 "MTU (%u) > device max MTU (%u) for port_id %u\n", 1155 mtu, dev_info->max_mtu, port_id); 1156 return -EINVAL; 1157 } 1158 1159 overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen, 1160 dev_info->max_mtu); 1161 frame_size = mtu + overhead_len; 1162 if (frame_size < RTE_ETHER_MIN_LEN) { 1163 RTE_ETHDEV_LOG(ERR, 1164 "Frame size (%u) < min frame size (%u) for port_id %u\n", 1165 frame_size, RTE_ETHER_MIN_LEN, port_id); 1166 return -EINVAL; 1167 } 1168 1169 if (frame_size > dev_info->max_rx_pktlen) { 1170 RTE_ETHDEV_LOG(ERR, 1171 "Frame size (%u) > device max frame size (%u) for port_id %u\n", 1172 frame_size, dev_info->max_rx_pktlen, port_id); 1173 return -EINVAL; 1174 } 1175 1176 return 0; 1177 } 1178 1179 int 1180 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, 1181 const struct rte_eth_conf *dev_conf) 1182 { 1183 struct rte_eth_dev *dev; 1184 struct rte_eth_dev_info dev_info; 1185 struct rte_eth_conf orig_conf; 1186 int diag; 1187 int ret; 1188 uint16_t old_mtu; 1189 1190 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1191 dev = &rte_eth_devices[port_id]; 1192 1193 if (dev_conf == NULL) { 1194 RTE_ETHDEV_LOG(ERR, 1195 "Cannot configure ethdev port %u from NULL config\n", 1196 port_id); 1197 return -EINVAL; 1198 } 1199 1200 if (*dev->dev_ops->dev_configure == NULL) 1201 return -ENOTSUP; 1202 1203 if (dev->data->dev_started) { 1204 RTE_ETHDEV_LOG(ERR, 1205 "Port %u must be stopped to allow configuration\n", 1206 port_id); 1207 return -EBUSY; 1208 } 1209 1210 /* 1211 * Ensure that "dev_configured" is always 0 each time prepare to do 1212 * dev_configure() to avoid any non-anticipated behaviour. 1213 * And set to 1 when dev_configure() is executed successfully. 1214 */ 1215 dev->data->dev_configured = 0; 1216 1217 /* Store original config, as rollback required on failure */ 1218 memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf)); 1219 1220 /* 1221 * Copy the dev_conf parameter into the dev structure. 1222 * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get 1223 */ 1224 if (dev_conf != &dev->data->dev_conf) 1225 memcpy(&dev->data->dev_conf, dev_conf, 1226 sizeof(dev->data->dev_conf)); 1227 1228 /* Backup mtu for rollback */ 1229 old_mtu = dev->data->mtu; 1230 1231 ret = rte_eth_dev_info_get(port_id, &dev_info); 1232 if (ret != 0) 1233 goto rollback; 1234 1235 /* If number of queues specified by application for both Rx and Tx is 1236 * zero, use driver preferred values. This cannot be done individually 1237 * as it is valid for either Tx or Rx (but not both) to be zero. 1238 * If driver does not provide any preferred valued, fall back on 1239 * EAL defaults. 1240 */ 1241 if (nb_rx_q == 0 && nb_tx_q == 0) { 1242 nb_rx_q = dev_info.default_rxportconf.nb_queues; 1243 if (nb_rx_q == 0) 1244 nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES; 1245 nb_tx_q = dev_info.default_txportconf.nb_queues; 1246 if (nb_tx_q == 0) 1247 nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES; 1248 } 1249 1250 if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) { 1251 RTE_ETHDEV_LOG(ERR, 1252 "Number of Rx queues requested (%u) is greater than max supported(%d)\n", 1253 nb_rx_q, RTE_MAX_QUEUES_PER_PORT); 1254 ret = -EINVAL; 1255 goto rollback; 1256 } 1257 1258 if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) { 1259 RTE_ETHDEV_LOG(ERR, 1260 "Number of Tx queues requested (%u) is greater than max supported(%d)\n", 1261 nb_tx_q, RTE_MAX_QUEUES_PER_PORT); 1262 ret = -EINVAL; 1263 goto rollback; 1264 } 1265 1266 /* 1267 * Check that the numbers of Rx and Tx queues are not greater 1268 * than the maximum number of Rx and Tx queues supported by the 1269 * configured device. 1270 */ 1271 if (nb_rx_q > dev_info.max_rx_queues) { 1272 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n", 1273 port_id, nb_rx_q, dev_info.max_rx_queues); 1274 ret = -EINVAL; 1275 goto rollback; 1276 } 1277 1278 if (nb_tx_q > dev_info.max_tx_queues) { 1279 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n", 1280 port_id, nb_tx_q, dev_info.max_tx_queues); 1281 ret = -EINVAL; 1282 goto rollback; 1283 } 1284 1285 /* Check that the device supports requested interrupts */ 1286 if ((dev_conf->intr_conf.lsc == 1) && 1287 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) { 1288 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n", 1289 dev->device->driver->name); 1290 ret = -EINVAL; 1291 goto rollback; 1292 } 1293 if ((dev_conf->intr_conf.rmv == 1) && 1294 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) { 1295 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n", 1296 dev->device->driver->name); 1297 ret = -EINVAL; 1298 goto rollback; 1299 } 1300 1301 if (dev_conf->rxmode.mtu == 0) 1302 dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU; 1303 1304 ret = eth_dev_validate_mtu(port_id, &dev_info, 1305 dev->data->dev_conf.rxmode.mtu); 1306 if (ret != 0) 1307 goto rollback; 1308 1309 dev->data->mtu = dev->data->dev_conf.rxmode.mtu; 1310 1311 /* 1312 * If LRO is enabled, check that the maximum aggregated packet 1313 * size is supported by the configured device. 1314 */ 1315 if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) { 1316 uint32_t max_rx_pktlen; 1317 uint32_t overhead_len; 1318 1319 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 1320 dev_info.max_mtu); 1321 max_rx_pktlen = dev->data->dev_conf.rxmode.mtu + overhead_len; 1322 if (dev_conf->rxmode.max_lro_pkt_size == 0) 1323 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 1324 ret = eth_dev_check_lro_pkt_size(port_id, 1325 dev->data->dev_conf.rxmode.max_lro_pkt_size, 1326 max_rx_pktlen, 1327 dev_info.max_lro_pkt_size); 1328 if (ret != 0) 1329 goto rollback; 1330 } 1331 1332 /* Any requested offloading must be within its device capabilities */ 1333 if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) != 1334 dev_conf->rxmode.offloads) { 1335 RTE_ETHDEV_LOG(ERR, 1336 "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads " 1337 "capabilities 0x%"PRIx64" in %s()\n", 1338 port_id, dev_conf->rxmode.offloads, 1339 dev_info.rx_offload_capa, 1340 __func__); 1341 ret = -EINVAL; 1342 goto rollback; 1343 } 1344 if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) != 1345 dev_conf->txmode.offloads) { 1346 RTE_ETHDEV_LOG(ERR, 1347 "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads " 1348 "capabilities 0x%"PRIx64" in %s()\n", 1349 port_id, dev_conf->txmode.offloads, 1350 dev_info.tx_offload_capa, 1351 __func__); 1352 ret = -EINVAL; 1353 goto rollback; 1354 } 1355 1356 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = 1357 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf); 1358 1359 /* Check that device supports requested rss hash functions. */ 1360 if ((dev_info.flow_type_rss_offloads | 1361 dev_conf->rx_adv_conf.rss_conf.rss_hf) != 1362 dev_info.flow_type_rss_offloads) { 1363 RTE_ETHDEV_LOG(ERR, 1364 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 1365 port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf, 1366 dev_info.flow_type_rss_offloads); 1367 ret = -EINVAL; 1368 goto rollback; 1369 } 1370 1371 /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */ 1372 if (((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0) && 1373 (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) { 1374 RTE_ETHDEV_LOG(ERR, 1375 "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n", 1376 port_id, 1377 rte_eth_dev_rx_offload_name(RTE_ETH_RX_OFFLOAD_RSS_HASH)); 1378 ret = -EINVAL; 1379 goto rollback; 1380 } 1381 1382 /* 1383 * Setup new number of Rx/Tx queues and reconfigure device. 1384 */ 1385 diag = eth_dev_rx_queue_config(dev, nb_rx_q); 1386 if (diag != 0) { 1387 RTE_ETHDEV_LOG(ERR, 1388 "Port%u eth_dev_rx_queue_config = %d\n", 1389 port_id, diag); 1390 ret = diag; 1391 goto rollback; 1392 } 1393 1394 diag = eth_dev_tx_queue_config(dev, nb_tx_q); 1395 if (diag != 0) { 1396 RTE_ETHDEV_LOG(ERR, 1397 "Port%u eth_dev_tx_queue_config = %d\n", 1398 port_id, diag); 1399 eth_dev_rx_queue_config(dev, 0); 1400 ret = diag; 1401 goto rollback; 1402 } 1403 1404 diag = (*dev->dev_ops->dev_configure)(dev); 1405 if (diag != 0) { 1406 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n", 1407 port_id, diag); 1408 ret = eth_err(port_id, diag); 1409 goto reset_queues; 1410 } 1411 1412 /* Initialize Rx profiling if enabled at compilation time. */ 1413 diag = __rte_eth_dev_profile_init(port_id, dev); 1414 if (diag != 0) { 1415 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n", 1416 port_id, diag); 1417 ret = eth_err(port_id, diag); 1418 goto reset_queues; 1419 } 1420 1421 /* Validate Rx offloads. */ 1422 diag = eth_dev_validate_offloads(port_id, 1423 dev_conf->rxmode.offloads, 1424 dev->data->dev_conf.rxmode.offloads, "Rx", 1425 rte_eth_dev_rx_offload_name); 1426 if (diag != 0) { 1427 ret = diag; 1428 goto reset_queues; 1429 } 1430 1431 /* Validate Tx offloads. */ 1432 diag = eth_dev_validate_offloads(port_id, 1433 dev_conf->txmode.offloads, 1434 dev->data->dev_conf.txmode.offloads, "Tx", 1435 rte_eth_dev_tx_offload_name); 1436 if (diag != 0) { 1437 ret = diag; 1438 goto reset_queues; 1439 } 1440 1441 dev->data->dev_configured = 1; 1442 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0); 1443 return 0; 1444 reset_queues: 1445 eth_dev_rx_queue_config(dev, 0); 1446 eth_dev_tx_queue_config(dev, 0); 1447 rollback: 1448 memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf)); 1449 if (old_mtu != dev->data->mtu) 1450 dev->data->mtu = old_mtu; 1451 1452 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret); 1453 return ret; 1454 } 1455 1456 static void 1457 eth_dev_mac_restore(struct rte_eth_dev *dev, 1458 struct rte_eth_dev_info *dev_info) 1459 { 1460 struct rte_ether_addr *addr; 1461 uint16_t i; 1462 uint32_t pool = 0; 1463 uint64_t pool_mask; 1464 1465 /* replay MAC address configuration including default MAC */ 1466 addr = &dev->data->mac_addrs[0]; 1467 if (*dev->dev_ops->mac_addr_set != NULL) 1468 (*dev->dev_ops->mac_addr_set)(dev, addr); 1469 else if (*dev->dev_ops->mac_addr_add != NULL) 1470 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool); 1471 1472 if (*dev->dev_ops->mac_addr_add != NULL) { 1473 for (i = 1; i < dev_info->max_mac_addrs; i++) { 1474 addr = &dev->data->mac_addrs[i]; 1475 1476 /* skip zero address */ 1477 if (rte_is_zero_ether_addr(addr)) 1478 continue; 1479 1480 pool = 0; 1481 pool_mask = dev->data->mac_pool_sel[i]; 1482 1483 do { 1484 if (pool_mask & UINT64_C(1)) 1485 (*dev->dev_ops->mac_addr_add)(dev, 1486 addr, i, pool); 1487 pool_mask >>= 1; 1488 pool++; 1489 } while (pool_mask); 1490 } 1491 } 1492 } 1493 1494 static int 1495 eth_dev_config_restore(struct rte_eth_dev *dev, 1496 struct rte_eth_dev_info *dev_info, uint16_t port_id) 1497 { 1498 int ret; 1499 1500 if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)) 1501 eth_dev_mac_restore(dev, dev_info); 1502 1503 /* replay promiscuous configuration */ 1504 /* 1505 * use callbacks directly since we don't need port_id check and 1506 * would like to bypass the same value set 1507 */ 1508 if (rte_eth_promiscuous_get(port_id) == 1 && 1509 *dev->dev_ops->promiscuous_enable != NULL) { 1510 ret = eth_err(port_id, 1511 (*dev->dev_ops->promiscuous_enable)(dev)); 1512 if (ret != 0 && ret != -ENOTSUP) { 1513 RTE_ETHDEV_LOG(ERR, 1514 "Failed to enable promiscuous mode for device (port %u): %s\n", 1515 port_id, rte_strerror(-ret)); 1516 return ret; 1517 } 1518 } else if (rte_eth_promiscuous_get(port_id) == 0 && 1519 *dev->dev_ops->promiscuous_disable != NULL) { 1520 ret = eth_err(port_id, 1521 (*dev->dev_ops->promiscuous_disable)(dev)); 1522 if (ret != 0 && ret != -ENOTSUP) { 1523 RTE_ETHDEV_LOG(ERR, 1524 "Failed to disable promiscuous mode for device (port %u): %s\n", 1525 port_id, rte_strerror(-ret)); 1526 return ret; 1527 } 1528 } 1529 1530 /* replay all multicast configuration */ 1531 /* 1532 * use callbacks directly since we don't need port_id check and 1533 * would like to bypass the same value set 1534 */ 1535 if (rte_eth_allmulticast_get(port_id) == 1 && 1536 *dev->dev_ops->allmulticast_enable != NULL) { 1537 ret = eth_err(port_id, 1538 (*dev->dev_ops->allmulticast_enable)(dev)); 1539 if (ret != 0 && ret != -ENOTSUP) { 1540 RTE_ETHDEV_LOG(ERR, 1541 "Failed to enable allmulticast mode for device (port %u): %s\n", 1542 port_id, rte_strerror(-ret)); 1543 return ret; 1544 } 1545 } else if (rte_eth_allmulticast_get(port_id) == 0 && 1546 *dev->dev_ops->allmulticast_disable != NULL) { 1547 ret = eth_err(port_id, 1548 (*dev->dev_ops->allmulticast_disable)(dev)); 1549 if (ret != 0 && ret != -ENOTSUP) { 1550 RTE_ETHDEV_LOG(ERR, 1551 "Failed to disable allmulticast mode for device (port %u): %s\n", 1552 port_id, rte_strerror(-ret)); 1553 return ret; 1554 } 1555 } 1556 1557 return 0; 1558 } 1559 1560 int 1561 rte_eth_dev_start(uint16_t port_id) 1562 { 1563 struct rte_eth_dev *dev; 1564 struct rte_eth_dev_info dev_info; 1565 int diag; 1566 int ret, ret_stop; 1567 1568 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1569 dev = &rte_eth_devices[port_id]; 1570 1571 if (*dev->dev_ops->dev_start == NULL) 1572 return -ENOTSUP; 1573 1574 if (dev->data->dev_configured == 0) { 1575 RTE_ETHDEV_LOG(INFO, 1576 "Device with port_id=%"PRIu16" is not configured.\n", 1577 port_id); 1578 return -EINVAL; 1579 } 1580 1581 if (dev->data->dev_started != 0) { 1582 RTE_ETHDEV_LOG(INFO, 1583 "Device with port_id=%"PRIu16" already started\n", 1584 port_id); 1585 return 0; 1586 } 1587 1588 ret = rte_eth_dev_info_get(port_id, &dev_info); 1589 if (ret != 0) 1590 return ret; 1591 1592 /* Lets restore MAC now if device does not support live change */ 1593 if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR) 1594 eth_dev_mac_restore(dev, &dev_info); 1595 1596 diag = (*dev->dev_ops->dev_start)(dev); 1597 if (diag == 0) 1598 dev->data->dev_started = 1; 1599 else 1600 return eth_err(port_id, diag); 1601 1602 ret = eth_dev_config_restore(dev, &dev_info, port_id); 1603 if (ret != 0) { 1604 RTE_ETHDEV_LOG(ERR, 1605 "Error during restoring configuration for device (port %u): %s\n", 1606 port_id, rte_strerror(-ret)); 1607 ret_stop = rte_eth_dev_stop(port_id); 1608 if (ret_stop != 0) { 1609 RTE_ETHDEV_LOG(ERR, 1610 "Failed to stop device (port %u): %s\n", 1611 port_id, rte_strerror(-ret_stop)); 1612 } 1613 1614 return ret; 1615 } 1616 1617 if (dev->data->dev_conf.intr_conf.lsc == 0) { 1618 if (*dev->dev_ops->link_update == NULL) 1619 return -ENOTSUP; 1620 (*dev->dev_ops->link_update)(dev, 0); 1621 } 1622 1623 /* expose selection of PMD fast-path functions */ 1624 eth_dev_fp_ops_setup(rte_eth_fp_ops + port_id, dev); 1625 1626 rte_ethdev_trace_start(port_id); 1627 return 0; 1628 } 1629 1630 int 1631 rte_eth_dev_stop(uint16_t port_id) 1632 { 1633 struct rte_eth_dev *dev; 1634 int ret; 1635 1636 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1637 dev = &rte_eth_devices[port_id]; 1638 1639 if (*dev->dev_ops->dev_stop == NULL) 1640 return -ENOTSUP; 1641 1642 if (dev->data->dev_started == 0) { 1643 RTE_ETHDEV_LOG(INFO, 1644 "Device with port_id=%"PRIu16" already stopped\n", 1645 port_id); 1646 return 0; 1647 } 1648 1649 /* point fast-path functions to dummy ones */ 1650 eth_dev_fp_ops_reset(rte_eth_fp_ops + port_id); 1651 1652 ret = (*dev->dev_ops->dev_stop)(dev); 1653 if (ret == 0) 1654 dev->data->dev_started = 0; 1655 rte_ethdev_trace_stop(port_id, ret); 1656 1657 return ret; 1658 } 1659 1660 int 1661 rte_eth_dev_set_link_up(uint16_t port_id) 1662 { 1663 struct rte_eth_dev *dev; 1664 int ret; 1665 1666 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1667 dev = &rte_eth_devices[port_id]; 1668 1669 if (*dev->dev_ops->dev_set_link_up == NULL) 1670 return -ENOTSUP; 1671 ret = eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev)); 1672 1673 rte_ethdev_trace_set_link_up(port_id, ret); 1674 1675 return ret; 1676 } 1677 1678 int 1679 rte_eth_dev_set_link_down(uint16_t port_id) 1680 { 1681 struct rte_eth_dev *dev; 1682 int ret; 1683 1684 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1685 dev = &rte_eth_devices[port_id]; 1686 1687 if (*dev->dev_ops->dev_set_link_down == NULL) 1688 return -ENOTSUP; 1689 ret = eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev)); 1690 1691 rte_ethdev_trace_set_link_down(port_id, ret); 1692 1693 return ret; 1694 } 1695 1696 int 1697 rte_eth_dev_close(uint16_t port_id) 1698 { 1699 struct rte_eth_dev *dev; 1700 int firsterr, binerr; 1701 int *lasterr = &firsterr; 1702 1703 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1704 dev = &rte_eth_devices[port_id]; 1705 1706 /* 1707 * Secondary process needs to close device to release process private 1708 * resources. But secondary process should not be obliged to wait 1709 * for device stop before closing ethdev. 1710 */ 1711 if (rte_eal_process_type() == RTE_PROC_PRIMARY && 1712 dev->data->dev_started) { 1713 RTE_ETHDEV_LOG(ERR, "Cannot close started device (port %u)\n", 1714 port_id); 1715 return -EINVAL; 1716 } 1717 1718 if (*dev->dev_ops->dev_close == NULL) 1719 return -ENOTSUP; 1720 *lasterr = (*dev->dev_ops->dev_close)(dev); 1721 if (*lasterr != 0) 1722 lasterr = &binerr; 1723 1724 rte_ethdev_trace_close(port_id); 1725 *lasterr = rte_eth_dev_release_port(dev); 1726 1727 return firsterr; 1728 } 1729 1730 int 1731 rte_eth_dev_reset(uint16_t port_id) 1732 { 1733 struct rte_eth_dev *dev; 1734 int ret; 1735 1736 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1737 dev = &rte_eth_devices[port_id]; 1738 1739 if (*dev->dev_ops->dev_reset == NULL) 1740 return -ENOTSUP; 1741 1742 ret = rte_eth_dev_stop(port_id); 1743 if (ret != 0) { 1744 RTE_ETHDEV_LOG(ERR, 1745 "Failed to stop device (port %u) before reset: %s - ignore\n", 1746 port_id, rte_strerror(-ret)); 1747 } 1748 ret = eth_err(port_id, dev->dev_ops->dev_reset(dev)); 1749 1750 rte_ethdev_trace_reset(port_id, ret); 1751 1752 return ret; 1753 } 1754 1755 int 1756 rte_eth_dev_is_removed(uint16_t port_id) 1757 { 1758 struct rte_eth_dev *dev; 1759 int ret; 1760 1761 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0); 1762 dev = &rte_eth_devices[port_id]; 1763 1764 if (dev->state == RTE_ETH_DEV_REMOVED) 1765 return 1; 1766 1767 if (*dev->dev_ops->is_removed == NULL) 1768 return 0; 1769 1770 ret = dev->dev_ops->is_removed(dev); 1771 if (ret != 0) 1772 /* Device is physically removed. */ 1773 dev->state = RTE_ETH_DEV_REMOVED; 1774 1775 rte_ethdev_trace_is_removed(port_id, ret); 1776 1777 return ret; 1778 } 1779 1780 static int 1781 rte_eth_check_rx_mempool(struct rte_mempool *mp, uint16_t offset, 1782 uint16_t min_length) 1783 { 1784 uint16_t data_room_size; 1785 1786 /* 1787 * Check the size of the mbuf data buffer, this value 1788 * must be provided in the private data of the memory pool. 1789 * First check that the memory pool(s) has a valid private data. 1790 */ 1791 if (mp->private_data_size < 1792 sizeof(struct rte_pktmbuf_pool_private)) { 1793 RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n", 1794 mp->name, mp->private_data_size, 1795 (unsigned int) 1796 sizeof(struct rte_pktmbuf_pool_private)); 1797 return -ENOSPC; 1798 } 1799 data_room_size = rte_pktmbuf_data_room_size(mp); 1800 if (data_room_size < offset + min_length) { 1801 RTE_ETHDEV_LOG(ERR, 1802 "%s mbuf_data_room_size %u < %u (%u + %u)\n", 1803 mp->name, data_room_size, 1804 offset + min_length, offset, min_length); 1805 return -EINVAL; 1806 } 1807 return 0; 1808 } 1809 1810 static int 1811 eth_dev_buffer_split_get_supported_hdrs_helper(uint16_t port_id, uint32_t **ptypes) 1812 { 1813 int cnt; 1814 1815 cnt = rte_eth_buffer_split_get_supported_hdr_ptypes(port_id, NULL, 0); 1816 if (cnt <= 0) 1817 return cnt; 1818 1819 *ptypes = malloc(sizeof(uint32_t) * cnt); 1820 if (*ptypes == NULL) 1821 return -ENOMEM; 1822 1823 cnt = rte_eth_buffer_split_get_supported_hdr_ptypes(port_id, *ptypes, cnt); 1824 if (cnt <= 0) { 1825 free(*ptypes); 1826 *ptypes = NULL; 1827 } 1828 return cnt; 1829 } 1830 1831 static int 1832 rte_eth_rx_queue_check_split(uint16_t port_id, 1833 const struct rte_eth_rxseg_split *rx_seg, 1834 uint16_t n_seg, uint32_t *mbp_buf_size, 1835 const struct rte_eth_dev_info *dev_info) 1836 { 1837 const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa; 1838 struct rte_mempool *mp_first; 1839 uint32_t offset_mask; 1840 uint16_t seg_idx; 1841 int ret = 0; 1842 int ptype_cnt; 1843 uint32_t *ptypes; 1844 uint32_t prev_proto_hdrs = RTE_PTYPE_UNKNOWN; 1845 int i; 1846 1847 if (n_seg > seg_capa->max_nseg) { 1848 RTE_ETHDEV_LOG(ERR, 1849 "Requested Rx segments %u exceed supported %u\n", 1850 n_seg, seg_capa->max_nseg); 1851 return -EINVAL; 1852 } 1853 /* 1854 * Check the sizes and offsets against buffer sizes 1855 * for each segment specified in extended configuration. 1856 */ 1857 mp_first = rx_seg[0].mp; 1858 offset_mask = RTE_BIT32(seg_capa->offset_align_log2) - 1; 1859 1860 ptypes = NULL; 1861 ptype_cnt = eth_dev_buffer_split_get_supported_hdrs_helper(port_id, &ptypes); 1862 1863 for (seg_idx = 0; seg_idx < n_seg; seg_idx++) { 1864 struct rte_mempool *mpl = rx_seg[seg_idx].mp; 1865 uint32_t length = rx_seg[seg_idx].length; 1866 uint32_t offset = rx_seg[seg_idx].offset; 1867 uint32_t proto_hdr = rx_seg[seg_idx].proto_hdr; 1868 1869 if (mpl == NULL) { 1870 RTE_ETHDEV_LOG(ERR, "null mempool pointer\n"); 1871 ret = -EINVAL; 1872 goto out; 1873 } 1874 if (seg_idx != 0 && mp_first != mpl && 1875 seg_capa->multi_pools == 0) { 1876 RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n"); 1877 ret = -ENOTSUP; 1878 goto out; 1879 } 1880 if (offset != 0) { 1881 if (seg_capa->offset_allowed == 0) { 1882 RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n"); 1883 ret = -ENOTSUP; 1884 goto out; 1885 } 1886 if (offset & offset_mask) { 1887 RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n", 1888 offset, 1889 seg_capa->offset_align_log2); 1890 ret = -EINVAL; 1891 goto out; 1892 } 1893 } 1894 1895 offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM; 1896 *mbp_buf_size = rte_pktmbuf_data_room_size(mpl); 1897 if (proto_hdr != 0) { 1898 /* Split based on protocol headers. */ 1899 if (length != 0) { 1900 RTE_ETHDEV_LOG(ERR, 1901 "Do not set length split and protocol split within a segment\n" 1902 ); 1903 ret = -EINVAL; 1904 goto out; 1905 } 1906 if ((proto_hdr & prev_proto_hdrs) != 0) { 1907 RTE_ETHDEV_LOG(ERR, 1908 "Repeat with previous protocol headers or proto-split after length-based split\n" 1909 ); 1910 ret = -EINVAL; 1911 goto out; 1912 } 1913 if (ptype_cnt <= 0) { 1914 RTE_ETHDEV_LOG(ERR, 1915 "Port %u failed to get supported buffer split header protocols\n", 1916 port_id); 1917 ret = -ENOTSUP; 1918 goto out; 1919 } 1920 for (i = 0; i < ptype_cnt; i++) { 1921 if ((prev_proto_hdrs | proto_hdr) == ptypes[i]) 1922 break; 1923 } 1924 if (i == ptype_cnt) { 1925 RTE_ETHDEV_LOG(ERR, 1926 "Requested Rx split header protocols 0x%x is not supported.\n", 1927 proto_hdr); 1928 ret = -EINVAL; 1929 goto out; 1930 } 1931 prev_proto_hdrs |= proto_hdr; 1932 } else { 1933 /* Split at fixed length. */ 1934 length = length != 0 ? length : *mbp_buf_size; 1935 prev_proto_hdrs = RTE_PTYPE_ALL_MASK; 1936 } 1937 1938 ret = rte_eth_check_rx_mempool(mpl, offset, length); 1939 if (ret != 0) 1940 goto out; 1941 } 1942 out: 1943 free(ptypes); 1944 return ret; 1945 } 1946 1947 static int 1948 rte_eth_rx_queue_check_mempools(struct rte_mempool **rx_mempools, 1949 uint16_t n_mempools, uint32_t *min_buf_size, 1950 const struct rte_eth_dev_info *dev_info) 1951 { 1952 uint16_t pool_idx; 1953 int ret; 1954 1955 if (n_mempools > dev_info->max_rx_mempools) { 1956 RTE_ETHDEV_LOG(ERR, 1957 "Too many Rx mempools %u vs maximum %u\n", 1958 n_mempools, dev_info->max_rx_mempools); 1959 return -EINVAL; 1960 } 1961 1962 for (pool_idx = 0; pool_idx < n_mempools; pool_idx++) { 1963 struct rte_mempool *mp = rx_mempools[pool_idx]; 1964 1965 if (mp == NULL) { 1966 RTE_ETHDEV_LOG(ERR, "null Rx mempool pointer\n"); 1967 return -EINVAL; 1968 } 1969 1970 ret = rte_eth_check_rx_mempool(mp, RTE_PKTMBUF_HEADROOM, 1971 dev_info->min_rx_bufsize); 1972 if (ret != 0) 1973 return ret; 1974 1975 *min_buf_size = RTE_MIN(*min_buf_size, 1976 rte_pktmbuf_data_room_size(mp)); 1977 } 1978 1979 return 0; 1980 } 1981 1982 int 1983 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 1984 uint16_t nb_rx_desc, unsigned int socket_id, 1985 const struct rte_eth_rxconf *rx_conf, 1986 struct rte_mempool *mp) 1987 { 1988 int ret; 1989 uint64_t rx_offloads; 1990 uint32_t mbp_buf_size = UINT32_MAX; 1991 struct rte_eth_dev *dev; 1992 struct rte_eth_dev_info dev_info; 1993 struct rte_eth_rxconf local_conf; 1994 1995 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1996 dev = &rte_eth_devices[port_id]; 1997 1998 if (rx_queue_id >= dev->data->nb_rx_queues) { 1999 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id); 2000 return -EINVAL; 2001 } 2002 2003 if (*dev->dev_ops->rx_queue_setup == NULL) 2004 return -ENOTSUP; 2005 2006 ret = rte_eth_dev_info_get(port_id, &dev_info); 2007 if (ret != 0) 2008 return ret; 2009 2010 rx_offloads = dev->data->dev_conf.rxmode.offloads; 2011 if (rx_conf != NULL) 2012 rx_offloads |= rx_conf->offloads; 2013 2014 /* Ensure that we have one and only one source of Rx buffers */ 2015 if ((mp != NULL) + 2016 (rx_conf != NULL && rx_conf->rx_nseg > 0) + 2017 (rx_conf != NULL && rx_conf->rx_nmempool > 0) != 1) { 2018 RTE_ETHDEV_LOG(ERR, 2019 "Ambiguous Rx mempools configuration\n"); 2020 return -EINVAL; 2021 } 2022 2023 if (mp != NULL) { 2024 /* Single pool configuration check. */ 2025 ret = rte_eth_check_rx_mempool(mp, RTE_PKTMBUF_HEADROOM, 2026 dev_info.min_rx_bufsize); 2027 if (ret != 0) 2028 return ret; 2029 2030 mbp_buf_size = rte_pktmbuf_data_room_size(mp); 2031 } else if (rx_conf != NULL && rx_conf->rx_nseg > 0) { 2032 const struct rte_eth_rxseg_split *rx_seg; 2033 uint16_t n_seg; 2034 2035 /* Extended multi-segment configuration check. */ 2036 if (rx_conf->rx_seg == NULL) { 2037 RTE_ETHDEV_LOG(ERR, 2038 "Memory pool is null and no multi-segment configuration provided\n"); 2039 return -EINVAL; 2040 } 2041 2042 rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg; 2043 n_seg = rx_conf->rx_nseg; 2044 2045 if (rx_offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { 2046 ret = rte_eth_rx_queue_check_split(port_id, rx_seg, n_seg, 2047 &mbp_buf_size, 2048 &dev_info); 2049 if (ret != 0) 2050 return ret; 2051 } else { 2052 RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n"); 2053 return -EINVAL; 2054 } 2055 } else if (rx_conf != NULL && rx_conf->rx_nmempool > 0) { 2056 /* Extended multi-pool configuration check. */ 2057 if (rx_conf->rx_mempools == NULL) { 2058 RTE_ETHDEV_LOG(ERR, "Memory pools array is null\n"); 2059 return -EINVAL; 2060 } 2061 2062 ret = rte_eth_rx_queue_check_mempools(rx_conf->rx_mempools, 2063 rx_conf->rx_nmempool, 2064 &mbp_buf_size, 2065 &dev_info); 2066 if (ret != 0) 2067 return ret; 2068 } else { 2069 RTE_ETHDEV_LOG(ERR, "Missing Rx mempool configuration\n"); 2070 return -EINVAL; 2071 } 2072 2073 /* Use default specified by driver, if nb_rx_desc is zero */ 2074 if (nb_rx_desc == 0) { 2075 nb_rx_desc = dev_info.default_rxportconf.ring_size; 2076 /* If driver default is also zero, fall back on EAL default */ 2077 if (nb_rx_desc == 0) 2078 nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 2079 } 2080 2081 if (nb_rx_desc > dev_info.rx_desc_lim.nb_max || 2082 nb_rx_desc < dev_info.rx_desc_lim.nb_min || 2083 nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) { 2084 2085 RTE_ETHDEV_LOG(ERR, 2086 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2087 nb_rx_desc, dev_info.rx_desc_lim.nb_max, 2088 dev_info.rx_desc_lim.nb_min, 2089 dev_info.rx_desc_lim.nb_align); 2090 return -EINVAL; 2091 } 2092 2093 if (dev->data->dev_started && 2094 !(dev_info.dev_capa & 2095 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP)) 2096 return -EBUSY; 2097 2098 if (dev->data->dev_started && 2099 (dev->data->rx_queue_state[rx_queue_id] != 2100 RTE_ETH_QUEUE_STATE_STOPPED)) 2101 return -EBUSY; 2102 2103 eth_dev_rxq_release(dev, rx_queue_id); 2104 2105 if (rx_conf == NULL) 2106 rx_conf = &dev_info.default_rxconf; 2107 2108 local_conf = *rx_conf; 2109 2110 /* 2111 * If an offloading has already been enabled in 2112 * rte_eth_dev_configure(), it has been enabled on all queues, 2113 * so there is no need to enable it in this queue again. 2114 * The local_conf.offloads input to underlying PMD only carries 2115 * those offloadings which are only enabled on this queue and 2116 * not enabled on all queues. 2117 */ 2118 local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads; 2119 2120 /* 2121 * New added offloadings for this queue are those not enabled in 2122 * rte_eth_dev_configure() and they must be per-queue type. 2123 * A pure per-port offloading can't be enabled on a queue while 2124 * disabled on another queue. A pure per-port offloading can't 2125 * be enabled for any queue as new added one if it hasn't been 2126 * enabled in rte_eth_dev_configure(). 2127 */ 2128 if ((local_conf.offloads & dev_info.rx_queue_offload_capa) != 2129 local_conf.offloads) { 2130 RTE_ETHDEV_LOG(ERR, 2131 "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2132 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2133 port_id, rx_queue_id, local_conf.offloads, 2134 dev_info.rx_queue_offload_capa, 2135 __func__); 2136 return -EINVAL; 2137 } 2138 2139 if (local_conf.share_group > 0 && 2140 (dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) == 0) { 2141 RTE_ETHDEV_LOG(ERR, 2142 "Ethdev port_id=%d rx_queue_id=%d, enabled share_group=%hu while device doesn't support Rx queue share\n", 2143 port_id, rx_queue_id, local_conf.share_group); 2144 return -EINVAL; 2145 } 2146 2147 /* 2148 * If LRO is enabled, check that the maximum aggregated packet 2149 * size is supported by the configured device. 2150 */ 2151 /* Get the real Ethernet overhead length */ 2152 if (local_conf.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) { 2153 uint32_t overhead_len; 2154 uint32_t max_rx_pktlen; 2155 int ret; 2156 2157 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 2158 dev_info.max_mtu); 2159 max_rx_pktlen = dev->data->mtu + overhead_len; 2160 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0) 2161 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 2162 ret = eth_dev_check_lro_pkt_size(port_id, 2163 dev->data->dev_conf.rxmode.max_lro_pkt_size, 2164 max_rx_pktlen, 2165 dev_info.max_lro_pkt_size); 2166 if (ret != 0) 2167 return ret; 2168 } 2169 2170 ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc, 2171 socket_id, &local_conf, mp); 2172 if (!ret) { 2173 if (!dev->data->min_rx_buf_size || 2174 dev->data->min_rx_buf_size > mbp_buf_size) 2175 dev->data->min_rx_buf_size = mbp_buf_size; 2176 } 2177 2178 rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp, 2179 rx_conf, ret); 2180 return eth_err(port_id, ret); 2181 } 2182 2183 int 2184 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 2185 uint16_t nb_rx_desc, 2186 const struct rte_eth_hairpin_conf *conf) 2187 { 2188 int ret; 2189 struct rte_eth_dev *dev; 2190 struct rte_eth_hairpin_cap cap; 2191 int i; 2192 int count; 2193 2194 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2195 dev = &rte_eth_devices[port_id]; 2196 2197 if (rx_queue_id >= dev->data->nb_rx_queues) { 2198 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id); 2199 return -EINVAL; 2200 } 2201 2202 if (conf == NULL) { 2203 RTE_ETHDEV_LOG(ERR, 2204 "Cannot setup ethdev port %u Rx hairpin queue from NULL config\n", 2205 port_id); 2206 return -EINVAL; 2207 } 2208 2209 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2210 if (ret != 0) 2211 return ret; 2212 if (*dev->dev_ops->rx_hairpin_queue_setup == NULL) 2213 return -ENOTSUP; 2214 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2215 if (nb_rx_desc == 0) 2216 nb_rx_desc = cap.max_nb_desc; 2217 if (nb_rx_desc > cap.max_nb_desc) { 2218 RTE_ETHDEV_LOG(ERR, 2219 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu", 2220 nb_rx_desc, cap.max_nb_desc); 2221 return -EINVAL; 2222 } 2223 if (conf->peer_count > cap.max_rx_2_tx) { 2224 RTE_ETHDEV_LOG(ERR, 2225 "Invalid value for number of peers for Rx queue(=%u), should be: <= %hu", 2226 conf->peer_count, cap.max_rx_2_tx); 2227 return -EINVAL; 2228 } 2229 if (conf->use_locked_device_memory && !cap.rx_cap.locked_device_memory) { 2230 RTE_ETHDEV_LOG(ERR, 2231 "Attempt to use locked device memory for Rx queue, which is not supported"); 2232 return -EINVAL; 2233 } 2234 if (conf->use_rte_memory && !cap.rx_cap.rte_memory) { 2235 RTE_ETHDEV_LOG(ERR, 2236 "Attempt to use DPDK memory for Rx queue, which is not supported"); 2237 return -EINVAL; 2238 } 2239 if (conf->use_locked_device_memory && conf->use_rte_memory) { 2240 RTE_ETHDEV_LOG(ERR, 2241 "Attempt to use mutually exclusive memory settings for Rx queue"); 2242 return -EINVAL; 2243 } 2244 if (conf->force_memory && 2245 !conf->use_locked_device_memory && 2246 !conf->use_rte_memory) { 2247 RTE_ETHDEV_LOG(ERR, 2248 "Attempt to force Rx queue memory settings, but none is set"); 2249 return -EINVAL; 2250 } 2251 if (conf->peer_count == 0) { 2252 RTE_ETHDEV_LOG(ERR, 2253 "Invalid value for number of peers for Rx queue(=%u), should be: > 0", 2254 conf->peer_count); 2255 return -EINVAL; 2256 } 2257 for (i = 0, count = 0; i < dev->data->nb_rx_queues && 2258 cap.max_nb_queues != UINT16_MAX; i++) { 2259 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i)) 2260 count++; 2261 } 2262 if (count > cap.max_nb_queues) { 2263 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d", 2264 cap.max_nb_queues); 2265 return -EINVAL; 2266 } 2267 if (dev->data->dev_started) 2268 return -EBUSY; 2269 eth_dev_rxq_release(dev, rx_queue_id); 2270 ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id, 2271 nb_rx_desc, conf); 2272 if (ret == 0) 2273 dev->data->rx_queue_state[rx_queue_id] = 2274 RTE_ETH_QUEUE_STATE_HAIRPIN; 2275 ret = eth_err(port_id, ret); 2276 2277 rte_eth_trace_rx_hairpin_queue_setup(port_id, rx_queue_id, nb_rx_desc, 2278 conf, ret); 2279 2280 return ret; 2281 } 2282 2283 int 2284 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2285 uint16_t nb_tx_desc, unsigned int socket_id, 2286 const struct rte_eth_txconf *tx_conf) 2287 { 2288 struct rte_eth_dev *dev; 2289 struct rte_eth_dev_info dev_info; 2290 struct rte_eth_txconf local_conf; 2291 int ret; 2292 2293 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2294 dev = &rte_eth_devices[port_id]; 2295 2296 if (tx_queue_id >= dev->data->nb_tx_queues) { 2297 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id); 2298 return -EINVAL; 2299 } 2300 2301 if (*dev->dev_ops->tx_queue_setup == NULL) 2302 return -ENOTSUP; 2303 2304 ret = rte_eth_dev_info_get(port_id, &dev_info); 2305 if (ret != 0) 2306 return ret; 2307 2308 /* Use default specified by driver, if nb_tx_desc is zero */ 2309 if (nb_tx_desc == 0) { 2310 nb_tx_desc = dev_info.default_txportconf.ring_size; 2311 /* If driver default is zero, fall back on EAL default */ 2312 if (nb_tx_desc == 0) 2313 nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 2314 } 2315 if (nb_tx_desc > dev_info.tx_desc_lim.nb_max || 2316 nb_tx_desc < dev_info.tx_desc_lim.nb_min || 2317 nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) { 2318 RTE_ETHDEV_LOG(ERR, 2319 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2320 nb_tx_desc, dev_info.tx_desc_lim.nb_max, 2321 dev_info.tx_desc_lim.nb_min, 2322 dev_info.tx_desc_lim.nb_align); 2323 return -EINVAL; 2324 } 2325 2326 if (dev->data->dev_started && 2327 !(dev_info.dev_capa & 2328 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP)) 2329 return -EBUSY; 2330 2331 if (dev->data->dev_started && 2332 (dev->data->tx_queue_state[tx_queue_id] != 2333 RTE_ETH_QUEUE_STATE_STOPPED)) 2334 return -EBUSY; 2335 2336 eth_dev_txq_release(dev, tx_queue_id); 2337 2338 if (tx_conf == NULL) 2339 tx_conf = &dev_info.default_txconf; 2340 2341 local_conf = *tx_conf; 2342 2343 /* 2344 * If an offloading has already been enabled in 2345 * rte_eth_dev_configure(), it has been enabled on all queues, 2346 * so there is no need to enable it in this queue again. 2347 * The local_conf.offloads input to underlying PMD only carries 2348 * those offloadings which are only enabled on this queue and 2349 * not enabled on all queues. 2350 */ 2351 local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads; 2352 2353 /* 2354 * New added offloadings for this queue are those not enabled in 2355 * rte_eth_dev_configure() and they must be per-queue type. 2356 * A pure per-port offloading can't be enabled on a queue while 2357 * disabled on another queue. A pure per-port offloading can't 2358 * be enabled for any queue as new added one if it hasn't been 2359 * enabled in rte_eth_dev_configure(). 2360 */ 2361 if ((local_conf.offloads & dev_info.tx_queue_offload_capa) != 2362 local_conf.offloads) { 2363 RTE_ETHDEV_LOG(ERR, 2364 "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2365 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2366 port_id, tx_queue_id, local_conf.offloads, 2367 dev_info.tx_queue_offload_capa, 2368 __func__); 2369 return -EINVAL; 2370 } 2371 2372 rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf); 2373 return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev, 2374 tx_queue_id, nb_tx_desc, socket_id, &local_conf)); 2375 } 2376 2377 int 2378 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2379 uint16_t nb_tx_desc, 2380 const struct rte_eth_hairpin_conf *conf) 2381 { 2382 struct rte_eth_dev *dev; 2383 struct rte_eth_hairpin_cap cap; 2384 int i; 2385 int count; 2386 int ret; 2387 2388 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2389 dev = &rte_eth_devices[port_id]; 2390 2391 if (tx_queue_id >= dev->data->nb_tx_queues) { 2392 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id); 2393 return -EINVAL; 2394 } 2395 2396 if (conf == NULL) { 2397 RTE_ETHDEV_LOG(ERR, 2398 "Cannot setup ethdev port %u Tx hairpin queue from NULL config\n", 2399 port_id); 2400 return -EINVAL; 2401 } 2402 2403 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2404 if (ret != 0) 2405 return ret; 2406 if (*dev->dev_ops->tx_hairpin_queue_setup == NULL) 2407 return -ENOTSUP; 2408 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2409 if (nb_tx_desc == 0) 2410 nb_tx_desc = cap.max_nb_desc; 2411 if (nb_tx_desc > cap.max_nb_desc) { 2412 RTE_ETHDEV_LOG(ERR, 2413 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu", 2414 nb_tx_desc, cap.max_nb_desc); 2415 return -EINVAL; 2416 } 2417 if (conf->peer_count > cap.max_tx_2_rx) { 2418 RTE_ETHDEV_LOG(ERR, 2419 "Invalid value for number of peers for Tx queue(=%u), should be: <= %hu", 2420 conf->peer_count, cap.max_tx_2_rx); 2421 return -EINVAL; 2422 } 2423 if (conf->use_locked_device_memory && !cap.tx_cap.locked_device_memory) { 2424 RTE_ETHDEV_LOG(ERR, 2425 "Attempt to use locked device memory for Tx queue, which is not supported"); 2426 return -EINVAL; 2427 } 2428 if (conf->use_rte_memory && !cap.tx_cap.rte_memory) { 2429 RTE_ETHDEV_LOG(ERR, 2430 "Attempt to use DPDK memory for Tx queue, which is not supported"); 2431 return -EINVAL; 2432 } 2433 if (conf->use_locked_device_memory && conf->use_rte_memory) { 2434 RTE_ETHDEV_LOG(ERR, 2435 "Attempt to use mutually exclusive memory settings for Tx queue"); 2436 return -EINVAL; 2437 } 2438 if (conf->force_memory && 2439 !conf->use_locked_device_memory && 2440 !conf->use_rte_memory) { 2441 RTE_ETHDEV_LOG(ERR, 2442 "Attempt to force Tx queue memory settings, but none is set"); 2443 return -EINVAL; 2444 } 2445 if (conf->peer_count == 0) { 2446 RTE_ETHDEV_LOG(ERR, 2447 "Invalid value for number of peers for Tx queue(=%u), should be: > 0", 2448 conf->peer_count); 2449 return -EINVAL; 2450 } 2451 for (i = 0, count = 0; i < dev->data->nb_tx_queues && 2452 cap.max_nb_queues != UINT16_MAX; i++) { 2453 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i)) 2454 count++; 2455 } 2456 if (count > cap.max_nb_queues) { 2457 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d", 2458 cap.max_nb_queues); 2459 return -EINVAL; 2460 } 2461 if (dev->data->dev_started) 2462 return -EBUSY; 2463 eth_dev_txq_release(dev, tx_queue_id); 2464 ret = (*dev->dev_ops->tx_hairpin_queue_setup) 2465 (dev, tx_queue_id, nb_tx_desc, conf); 2466 if (ret == 0) 2467 dev->data->tx_queue_state[tx_queue_id] = 2468 RTE_ETH_QUEUE_STATE_HAIRPIN; 2469 ret = eth_err(port_id, ret); 2470 2471 rte_eth_trace_tx_hairpin_queue_setup(port_id, tx_queue_id, nb_tx_desc, 2472 conf, ret); 2473 2474 return ret; 2475 } 2476 2477 int 2478 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port) 2479 { 2480 struct rte_eth_dev *dev; 2481 int ret; 2482 2483 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2484 dev = &rte_eth_devices[tx_port]; 2485 2486 if (dev->data->dev_started == 0) { 2487 RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port); 2488 return -EBUSY; 2489 } 2490 2491 if (*dev->dev_ops->hairpin_bind == NULL) 2492 return -ENOTSUP; 2493 ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port); 2494 if (ret != 0) 2495 RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d" 2496 " to Rx %d (%d - all ports)\n", 2497 tx_port, rx_port, RTE_MAX_ETHPORTS); 2498 2499 rte_eth_trace_hairpin_bind(tx_port, rx_port, ret); 2500 2501 return ret; 2502 } 2503 2504 int 2505 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port) 2506 { 2507 struct rte_eth_dev *dev; 2508 int ret; 2509 2510 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2511 dev = &rte_eth_devices[tx_port]; 2512 2513 if (dev->data->dev_started == 0) { 2514 RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port); 2515 return -EBUSY; 2516 } 2517 2518 if (*dev->dev_ops->hairpin_unbind == NULL) 2519 return -ENOTSUP; 2520 ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port); 2521 if (ret != 0) 2522 RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d" 2523 " from Rx %d (%d - all ports)\n", 2524 tx_port, rx_port, RTE_MAX_ETHPORTS); 2525 2526 rte_eth_trace_hairpin_unbind(tx_port, rx_port, ret); 2527 2528 return ret; 2529 } 2530 2531 int 2532 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, 2533 size_t len, uint32_t direction) 2534 { 2535 struct rte_eth_dev *dev; 2536 int ret; 2537 2538 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2539 dev = &rte_eth_devices[port_id]; 2540 2541 if (peer_ports == NULL) { 2542 RTE_ETHDEV_LOG(ERR, 2543 "Cannot get ethdev port %u hairpin peer ports to NULL\n", 2544 port_id); 2545 return -EINVAL; 2546 } 2547 2548 if (len == 0) { 2549 RTE_ETHDEV_LOG(ERR, 2550 "Cannot get ethdev port %u hairpin peer ports to array with zero size\n", 2551 port_id); 2552 return -EINVAL; 2553 } 2554 2555 if (*dev->dev_ops->hairpin_get_peer_ports == NULL) 2556 return -ENOTSUP; 2557 2558 ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports, 2559 len, direction); 2560 if (ret < 0) 2561 RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n", 2562 port_id, direction ? "Rx" : "Tx"); 2563 2564 rte_eth_trace_hairpin_get_peer_ports(port_id, peer_ports, len, 2565 direction, ret); 2566 2567 return ret; 2568 } 2569 2570 void 2571 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, 2572 void *userdata __rte_unused) 2573 { 2574 rte_pktmbuf_free_bulk(pkts, unsent); 2575 2576 rte_eth_trace_tx_buffer_drop_callback((void **)pkts, unsent); 2577 } 2578 2579 void 2580 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, 2581 void *userdata) 2582 { 2583 uint64_t *count = userdata; 2584 2585 rte_pktmbuf_free_bulk(pkts, unsent); 2586 *count += unsent; 2587 2588 rte_eth_trace_tx_buffer_count_callback((void **)pkts, unsent, *count); 2589 } 2590 2591 int 2592 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, 2593 buffer_tx_error_fn cbfn, void *userdata) 2594 { 2595 if (buffer == NULL) { 2596 RTE_ETHDEV_LOG(ERR, 2597 "Cannot set Tx buffer error callback to NULL buffer\n"); 2598 return -EINVAL; 2599 } 2600 2601 buffer->error_callback = cbfn; 2602 buffer->error_userdata = userdata; 2603 2604 rte_eth_trace_tx_buffer_set_err_callback(buffer); 2605 2606 return 0; 2607 } 2608 2609 int 2610 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size) 2611 { 2612 int ret = 0; 2613 2614 if (buffer == NULL) { 2615 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL buffer\n"); 2616 return -EINVAL; 2617 } 2618 2619 buffer->size = size; 2620 if (buffer->error_callback == NULL) { 2621 ret = rte_eth_tx_buffer_set_err_callback( 2622 buffer, rte_eth_tx_buffer_drop_callback, NULL); 2623 } 2624 2625 rte_eth_trace_tx_buffer_init(buffer, size, ret); 2626 2627 return ret; 2628 } 2629 2630 int 2631 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt) 2632 { 2633 struct rte_eth_dev *dev; 2634 int ret; 2635 2636 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2637 dev = &rte_eth_devices[port_id]; 2638 2639 if (*dev->dev_ops->tx_done_cleanup == NULL) 2640 return -ENOTSUP; 2641 2642 /* Call driver to free pending mbufs. */ 2643 ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id], 2644 free_cnt); 2645 ret = eth_err(port_id, ret); 2646 2647 rte_eth_trace_tx_done_cleanup(port_id, queue_id, free_cnt, ret); 2648 2649 return ret; 2650 } 2651 2652 int 2653 rte_eth_promiscuous_enable(uint16_t port_id) 2654 { 2655 struct rte_eth_dev *dev; 2656 int diag = 0; 2657 2658 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2659 dev = &rte_eth_devices[port_id]; 2660 2661 if (dev->data->promiscuous == 1) 2662 return 0; 2663 2664 if (*dev->dev_ops->promiscuous_enable == NULL) 2665 return -ENOTSUP; 2666 2667 diag = (*dev->dev_ops->promiscuous_enable)(dev); 2668 dev->data->promiscuous = (diag == 0) ? 1 : 0; 2669 2670 diag = eth_err(port_id, diag); 2671 2672 rte_eth_trace_promiscuous_enable(port_id, dev->data->promiscuous, 2673 diag); 2674 2675 return diag; 2676 } 2677 2678 int 2679 rte_eth_promiscuous_disable(uint16_t port_id) 2680 { 2681 struct rte_eth_dev *dev; 2682 int diag = 0; 2683 2684 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2685 dev = &rte_eth_devices[port_id]; 2686 2687 if (dev->data->promiscuous == 0) 2688 return 0; 2689 2690 if (*dev->dev_ops->promiscuous_disable == NULL) 2691 return -ENOTSUP; 2692 2693 dev->data->promiscuous = 0; 2694 diag = (*dev->dev_ops->promiscuous_disable)(dev); 2695 if (diag != 0) 2696 dev->data->promiscuous = 1; 2697 2698 diag = eth_err(port_id, diag); 2699 2700 rte_eth_trace_promiscuous_disable(port_id, dev->data->promiscuous, 2701 diag); 2702 2703 return diag; 2704 } 2705 2706 int 2707 rte_eth_promiscuous_get(uint16_t port_id) 2708 { 2709 struct rte_eth_dev *dev; 2710 2711 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2712 dev = &rte_eth_devices[port_id]; 2713 2714 rte_eth_trace_promiscuous_get(port_id, dev->data->promiscuous); 2715 2716 return dev->data->promiscuous; 2717 } 2718 2719 int 2720 rte_eth_allmulticast_enable(uint16_t port_id) 2721 { 2722 struct rte_eth_dev *dev; 2723 int diag; 2724 2725 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2726 dev = &rte_eth_devices[port_id]; 2727 2728 if (dev->data->all_multicast == 1) 2729 return 0; 2730 2731 if (*dev->dev_ops->allmulticast_enable == NULL) 2732 return -ENOTSUP; 2733 diag = (*dev->dev_ops->allmulticast_enable)(dev); 2734 dev->data->all_multicast = (diag == 0) ? 1 : 0; 2735 2736 diag = eth_err(port_id, diag); 2737 2738 rte_eth_trace_allmulticast_enable(port_id, dev->data->all_multicast, 2739 diag); 2740 2741 return diag; 2742 } 2743 2744 int 2745 rte_eth_allmulticast_disable(uint16_t port_id) 2746 { 2747 struct rte_eth_dev *dev; 2748 int diag; 2749 2750 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2751 dev = &rte_eth_devices[port_id]; 2752 2753 if (dev->data->all_multicast == 0) 2754 return 0; 2755 2756 if (*dev->dev_ops->allmulticast_disable == NULL) 2757 return -ENOTSUP; 2758 dev->data->all_multicast = 0; 2759 diag = (*dev->dev_ops->allmulticast_disable)(dev); 2760 if (diag != 0) 2761 dev->data->all_multicast = 1; 2762 2763 diag = eth_err(port_id, diag); 2764 2765 rte_eth_trace_allmulticast_disable(port_id, dev->data->all_multicast, 2766 diag); 2767 2768 return diag; 2769 } 2770 2771 int 2772 rte_eth_allmulticast_get(uint16_t port_id) 2773 { 2774 struct rte_eth_dev *dev; 2775 2776 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2777 dev = &rte_eth_devices[port_id]; 2778 2779 rte_eth_trace_allmulticast_get(port_id, dev->data->all_multicast); 2780 2781 return dev->data->all_multicast; 2782 } 2783 2784 int 2785 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link) 2786 { 2787 struct rte_eth_dev *dev; 2788 2789 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2790 dev = &rte_eth_devices[port_id]; 2791 2792 if (eth_link == NULL) { 2793 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2794 port_id); 2795 return -EINVAL; 2796 } 2797 2798 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2799 rte_eth_linkstatus_get(dev, eth_link); 2800 else { 2801 if (*dev->dev_ops->link_update == NULL) 2802 return -ENOTSUP; 2803 (*dev->dev_ops->link_update)(dev, 1); 2804 *eth_link = dev->data->dev_link; 2805 } 2806 2807 rte_eth_trace_link_get(port_id, eth_link); 2808 2809 return 0; 2810 } 2811 2812 int 2813 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link) 2814 { 2815 struct rte_eth_dev *dev; 2816 2817 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2818 dev = &rte_eth_devices[port_id]; 2819 2820 if (eth_link == NULL) { 2821 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2822 port_id); 2823 return -EINVAL; 2824 } 2825 2826 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2827 rte_eth_linkstatus_get(dev, eth_link); 2828 else { 2829 if (*dev->dev_ops->link_update == NULL) 2830 return -ENOTSUP; 2831 (*dev->dev_ops->link_update)(dev, 0); 2832 *eth_link = dev->data->dev_link; 2833 } 2834 2835 rte_eth_trace_link_get_nowait(port_id, eth_link); 2836 2837 return 0; 2838 } 2839 2840 const char * 2841 rte_eth_link_speed_to_str(uint32_t link_speed) 2842 { 2843 const char *ret; 2844 2845 switch (link_speed) { 2846 case RTE_ETH_SPEED_NUM_NONE: 2847 ret = "None"; 2848 break; 2849 case RTE_ETH_SPEED_NUM_10M: 2850 ret = "10 Mbps"; 2851 break; 2852 case RTE_ETH_SPEED_NUM_100M: 2853 ret = "100 Mbps"; 2854 break; 2855 case RTE_ETH_SPEED_NUM_1G: 2856 ret = "1 Gbps"; 2857 break; 2858 case RTE_ETH_SPEED_NUM_2_5G: 2859 ret = "2.5 Gbps"; 2860 break; 2861 case RTE_ETH_SPEED_NUM_5G: 2862 ret = "5 Gbps"; 2863 break; 2864 case RTE_ETH_SPEED_NUM_10G: 2865 ret = "10 Gbps"; 2866 break; 2867 case RTE_ETH_SPEED_NUM_20G: 2868 ret = "20 Gbps"; 2869 break; 2870 case RTE_ETH_SPEED_NUM_25G: 2871 ret = "25 Gbps"; 2872 break; 2873 case RTE_ETH_SPEED_NUM_40G: 2874 ret = "40 Gbps"; 2875 break; 2876 case RTE_ETH_SPEED_NUM_50G: 2877 ret = "50 Gbps"; 2878 break; 2879 case RTE_ETH_SPEED_NUM_56G: 2880 ret = "56 Gbps"; 2881 break; 2882 case RTE_ETH_SPEED_NUM_100G: 2883 ret = "100 Gbps"; 2884 break; 2885 case RTE_ETH_SPEED_NUM_200G: 2886 ret = "200 Gbps"; 2887 break; 2888 case RTE_ETH_SPEED_NUM_400G: 2889 ret = "400 Gbps"; 2890 break; 2891 case RTE_ETH_SPEED_NUM_UNKNOWN: 2892 ret = "Unknown"; 2893 break; 2894 default: 2895 ret = "Invalid"; 2896 } 2897 2898 rte_eth_trace_link_speed_to_str(link_speed, ret); 2899 2900 return ret; 2901 } 2902 2903 int 2904 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link) 2905 { 2906 int ret; 2907 2908 if (str == NULL) { 2909 RTE_ETHDEV_LOG(ERR, "Cannot convert link to NULL string\n"); 2910 return -EINVAL; 2911 } 2912 2913 if (len == 0) { 2914 RTE_ETHDEV_LOG(ERR, 2915 "Cannot convert link to string with zero size\n"); 2916 return -EINVAL; 2917 } 2918 2919 if (eth_link == NULL) { 2920 RTE_ETHDEV_LOG(ERR, "Cannot convert to string from NULL link\n"); 2921 return -EINVAL; 2922 } 2923 2924 if (eth_link->link_status == RTE_ETH_LINK_DOWN) 2925 ret = snprintf(str, len, "Link down"); 2926 else 2927 ret = snprintf(str, len, "Link up at %s %s %s", 2928 rte_eth_link_speed_to_str(eth_link->link_speed), 2929 (eth_link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 2930 "FDX" : "HDX", 2931 (eth_link->link_autoneg == RTE_ETH_LINK_AUTONEG) ? 2932 "Autoneg" : "Fixed"); 2933 2934 rte_eth_trace_link_to_str(len, eth_link, str, ret); 2935 2936 return ret; 2937 } 2938 2939 int 2940 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats) 2941 { 2942 struct rte_eth_dev *dev; 2943 int ret; 2944 2945 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2946 dev = &rte_eth_devices[port_id]; 2947 2948 if (stats == NULL) { 2949 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u stats to NULL\n", 2950 port_id); 2951 return -EINVAL; 2952 } 2953 2954 memset(stats, 0, sizeof(*stats)); 2955 2956 if (*dev->dev_ops->stats_get == NULL) 2957 return -ENOTSUP; 2958 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed; 2959 ret = eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats)); 2960 2961 rte_eth_trace_stats_get(port_id, stats, ret); 2962 2963 return ret; 2964 } 2965 2966 int 2967 rte_eth_stats_reset(uint16_t port_id) 2968 { 2969 struct rte_eth_dev *dev; 2970 int ret; 2971 2972 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2973 dev = &rte_eth_devices[port_id]; 2974 2975 if (*dev->dev_ops->stats_reset == NULL) 2976 return -ENOTSUP; 2977 ret = (*dev->dev_ops->stats_reset)(dev); 2978 if (ret != 0) 2979 return eth_err(port_id, ret); 2980 2981 dev->data->rx_mbuf_alloc_failed = 0; 2982 2983 rte_eth_trace_stats_reset(port_id); 2984 2985 return 0; 2986 } 2987 2988 static inline int 2989 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev) 2990 { 2991 uint16_t nb_rxqs, nb_txqs; 2992 int count; 2993 2994 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2995 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2996 2997 count = RTE_NB_STATS; 2998 if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) { 2999 count += nb_rxqs * RTE_NB_RXQ_STATS; 3000 count += nb_txqs * RTE_NB_TXQ_STATS; 3001 } 3002 3003 return count; 3004 } 3005 3006 static int 3007 eth_dev_get_xstats_count(uint16_t port_id) 3008 { 3009 struct rte_eth_dev *dev; 3010 int count; 3011 3012 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3013 dev = &rte_eth_devices[port_id]; 3014 if (dev->dev_ops->xstats_get_names != NULL) { 3015 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0); 3016 if (count < 0) 3017 return eth_err(port_id, count); 3018 } else 3019 count = 0; 3020 3021 3022 count += eth_dev_get_xstats_basic_count(dev); 3023 3024 return count; 3025 } 3026 3027 int 3028 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, 3029 uint64_t *id) 3030 { 3031 int cnt_xstats, idx_xstat; 3032 3033 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3034 3035 if (xstat_name == NULL) { 3036 RTE_ETHDEV_LOG(ERR, 3037 "Cannot get ethdev port %u xstats ID from NULL xstat name\n", 3038 port_id); 3039 return -ENOMEM; 3040 } 3041 3042 if (id == NULL) { 3043 RTE_ETHDEV_LOG(ERR, 3044 "Cannot get ethdev port %u xstats ID to NULL\n", 3045 port_id); 3046 return -ENOMEM; 3047 } 3048 3049 /* Get count */ 3050 cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL); 3051 if (cnt_xstats < 0) { 3052 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n"); 3053 return -ENODEV; 3054 } 3055 3056 /* Get id-name lookup table */ 3057 struct rte_eth_xstat_name xstats_names[cnt_xstats]; 3058 3059 if (cnt_xstats != rte_eth_xstats_get_names_by_id( 3060 port_id, xstats_names, cnt_xstats, NULL)) { 3061 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n"); 3062 return -1; 3063 } 3064 3065 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 3066 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) { 3067 *id = idx_xstat; 3068 3069 rte_eth_trace_xstats_get_id_by_name(port_id, 3070 xstat_name, *id); 3071 3072 return 0; 3073 }; 3074 } 3075 3076 return -EINVAL; 3077 } 3078 3079 /* retrieve basic stats names */ 3080 static int 3081 eth_basic_stats_get_names(struct rte_eth_dev *dev, 3082 struct rte_eth_xstat_name *xstats_names) 3083 { 3084 int cnt_used_entries = 0; 3085 uint32_t idx, id_queue; 3086 uint16_t num_q; 3087 3088 for (idx = 0; idx < RTE_NB_STATS; idx++) { 3089 strlcpy(xstats_names[cnt_used_entries].name, 3090 eth_dev_stats_strings[idx].name, 3091 sizeof(xstats_names[0].name)); 3092 cnt_used_entries++; 3093 } 3094 3095 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 3096 return cnt_used_entries; 3097 3098 num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3099 for (id_queue = 0; id_queue < num_q; id_queue++) { 3100 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) { 3101 snprintf(xstats_names[cnt_used_entries].name, 3102 sizeof(xstats_names[0].name), 3103 "rx_q%u_%s", 3104 id_queue, eth_dev_rxq_stats_strings[idx].name); 3105 cnt_used_entries++; 3106 } 3107 3108 } 3109 num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3110 for (id_queue = 0; id_queue < num_q; id_queue++) { 3111 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) { 3112 snprintf(xstats_names[cnt_used_entries].name, 3113 sizeof(xstats_names[0].name), 3114 "tx_q%u_%s", 3115 id_queue, eth_dev_txq_stats_strings[idx].name); 3116 cnt_used_entries++; 3117 } 3118 } 3119 return cnt_used_entries; 3120 } 3121 3122 /* retrieve ethdev extended statistics names */ 3123 int 3124 rte_eth_xstats_get_names_by_id(uint16_t port_id, 3125 struct rte_eth_xstat_name *xstats_names, unsigned int size, 3126 uint64_t *ids) 3127 { 3128 struct rte_eth_xstat_name *xstats_names_copy; 3129 unsigned int no_basic_stat_requested = 1; 3130 unsigned int no_ext_stat_requested = 1; 3131 unsigned int expected_entries; 3132 unsigned int basic_count; 3133 struct rte_eth_dev *dev; 3134 unsigned int i; 3135 int ret; 3136 3137 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3138 dev = &rte_eth_devices[port_id]; 3139 3140 basic_count = eth_dev_get_xstats_basic_count(dev); 3141 ret = eth_dev_get_xstats_count(port_id); 3142 if (ret < 0) 3143 return ret; 3144 expected_entries = (unsigned int)ret; 3145 3146 /* Return max number of stats if no ids given */ 3147 if (!ids) { 3148 if (!xstats_names) 3149 return expected_entries; 3150 else if (xstats_names && size < expected_entries) 3151 return expected_entries; 3152 } 3153 3154 if (ids && !xstats_names) 3155 return -EINVAL; 3156 3157 if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) { 3158 uint64_t ids_copy[size]; 3159 3160 for (i = 0; i < size; i++) { 3161 if (ids[i] < basic_count) { 3162 no_basic_stat_requested = 0; 3163 break; 3164 } 3165 3166 /* 3167 * Convert ids to xstats ids that PMD knows. 3168 * ids known by user are basic + extended stats. 3169 */ 3170 ids_copy[i] = ids[i] - basic_count; 3171 } 3172 3173 if (no_basic_stat_requested) 3174 return (*dev->dev_ops->xstats_get_names_by_id)(dev, 3175 ids_copy, xstats_names, size); 3176 } 3177 3178 /* Retrieve all stats */ 3179 if (!ids) { 3180 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names, 3181 expected_entries); 3182 if (num_stats < 0 || num_stats > (int)expected_entries) 3183 return num_stats; 3184 else 3185 return expected_entries; 3186 } 3187 3188 xstats_names_copy = calloc(expected_entries, 3189 sizeof(struct rte_eth_xstat_name)); 3190 3191 if (!xstats_names_copy) { 3192 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n"); 3193 return -ENOMEM; 3194 } 3195 3196 if (ids) { 3197 for (i = 0; i < size; i++) { 3198 if (ids[i] >= basic_count) { 3199 no_ext_stat_requested = 0; 3200 break; 3201 } 3202 } 3203 } 3204 3205 /* Fill xstats_names_copy structure */ 3206 if (ids && no_ext_stat_requested) { 3207 eth_basic_stats_get_names(dev, xstats_names_copy); 3208 } else { 3209 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy, 3210 expected_entries); 3211 if (ret < 0) { 3212 free(xstats_names_copy); 3213 return ret; 3214 } 3215 } 3216 3217 /* Filter stats */ 3218 for (i = 0; i < size; i++) { 3219 if (ids[i] >= expected_entries) { 3220 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 3221 free(xstats_names_copy); 3222 return -1; 3223 } 3224 xstats_names[i] = xstats_names_copy[ids[i]]; 3225 3226 rte_eth_trace_xstats_get_names_by_id(port_id, &xstats_names[i], 3227 ids[i]); 3228 } 3229 3230 free(xstats_names_copy); 3231 return size; 3232 } 3233 3234 int 3235 rte_eth_xstats_get_names(uint16_t port_id, 3236 struct rte_eth_xstat_name *xstats_names, 3237 unsigned int size) 3238 { 3239 struct rte_eth_dev *dev; 3240 int cnt_used_entries; 3241 int cnt_expected_entries; 3242 int cnt_driver_entries; 3243 int i; 3244 3245 cnt_expected_entries = eth_dev_get_xstats_count(port_id); 3246 if (xstats_names == NULL || cnt_expected_entries < 0 || 3247 (int)size < cnt_expected_entries) 3248 return cnt_expected_entries; 3249 3250 /* port_id checked in eth_dev_get_xstats_count() */ 3251 dev = &rte_eth_devices[port_id]; 3252 3253 cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names); 3254 3255 if (dev->dev_ops->xstats_get_names != NULL) { 3256 /* If there are any driver-specific xstats, append them 3257 * to end of list. 3258 */ 3259 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)( 3260 dev, 3261 xstats_names + cnt_used_entries, 3262 size - cnt_used_entries); 3263 if (cnt_driver_entries < 0) 3264 return eth_err(port_id, cnt_driver_entries); 3265 cnt_used_entries += cnt_driver_entries; 3266 } 3267 3268 for (i = 0; i < cnt_used_entries; i++) 3269 rte_eth_trace_xstats_get_names(port_id, i, &xstats_names[i], 3270 size, cnt_used_entries); 3271 3272 return cnt_used_entries; 3273 } 3274 3275 3276 static int 3277 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats) 3278 { 3279 struct rte_eth_dev *dev; 3280 struct rte_eth_stats eth_stats; 3281 unsigned int count = 0, i, q; 3282 uint64_t val, *stats_ptr; 3283 uint16_t nb_rxqs, nb_txqs; 3284 int ret; 3285 3286 ret = rte_eth_stats_get(port_id, ð_stats); 3287 if (ret < 0) 3288 return ret; 3289 3290 dev = &rte_eth_devices[port_id]; 3291 3292 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3293 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3294 3295 /* global stats */ 3296 for (i = 0; i < RTE_NB_STATS; i++) { 3297 stats_ptr = RTE_PTR_ADD(ð_stats, 3298 eth_dev_stats_strings[i].offset); 3299 val = *stats_ptr; 3300 xstats[count++].value = val; 3301 } 3302 3303 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 3304 return count; 3305 3306 /* per-rxq stats */ 3307 for (q = 0; q < nb_rxqs; q++) { 3308 for (i = 0; i < RTE_NB_RXQ_STATS; i++) { 3309 stats_ptr = RTE_PTR_ADD(ð_stats, 3310 eth_dev_rxq_stats_strings[i].offset + 3311 q * sizeof(uint64_t)); 3312 val = *stats_ptr; 3313 xstats[count++].value = val; 3314 } 3315 } 3316 3317 /* per-txq stats */ 3318 for (q = 0; q < nb_txqs; q++) { 3319 for (i = 0; i < RTE_NB_TXQ_STATS; i++) { 3320 stats_ptr = RTE_PTR_ADD(ð_stats, 3321 eth_dev_txq_stats_strings[i].offset + 3322 q * sizeof(uint64_t)); 3323 val = *stats_ptr; 3324 xstats[count++].value = val; 3325 } 3326 } 3327 return count; 3328 } 3329 3330 /* retrieve ethdev extended statistics */ 3331 int 3332 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, 3333 uint64_t *values, unsigned int size) 3334 { 3335 unsigned int no_basic_stat_requested = 1; 3336 unsigned int no_ext_stat_requested = 1; 3337 unsigned int num_xstats_filled; 3338 unsigned int basic_count; 3339 uint16_t expected_entries; 3340 struct rte_eth_dev *dev; 3341 unsigned int i; 3342 int ret; 3343 3344 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3345 dev = &rte_eth_devices[port_id]; 3346 3347 ret = eth_dev_get_xstats_count(port_id); 3348 if (ret < 0) 3349 return ret; 3350 expected_entries = (uint16_t)ret; 3351 struct rte_eth_xstat xstats[expected_entries]; 3352 basic_count = eth_dev_get_xstats_basic_count(dev); 3353 3354 /* Return max number of stats if no ids given */ 3355 if (!ids) { 3356 if (!values) 3357 return expected_entries; 3358 else if (values && size < expected_entries) 3359 return expected_entries; 3360 } 3361 3362 if (ids && !values) 3363 return -EINVAL; 3364 3365 if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) { 3366 unsigned int basic_count = eth_dev_get_xstats_basic_count(dev); 3367 uint64_t ids_copy[size]; 3368 3369 for (i = 0; i < size; i++) { 3370 if (ids[i] < basic_count) { 3371 no_basic_stat_requested = 0; 3372 break; 3373 } 3374 3375 /* 3376 * Convert ids to xstats ids that PMD knows. 3377 * ids known by user are basic + extended stats. 3378 */ 3379 ids_copy[i] = ids[i] - basic_count; 3380 } 3381 3382 if (no_basic_stat_requested) 3383 return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy, 3384 values, size); 3385 } 3386 3387 if (ids) { 3388 for (i = 0; i < size; i++) { 3389 if (ids[i] >= basic_count) { 3390 no_ext_stat_requested = 0; 3391 break; 3392 } 3393 } 3394 } 3395 3396 /* Fill the xstats structure */ 3397 if (ids && no_ext_stat_requested) 3398 ret = eth_basic_stats_get(port_id, xstats); 3399 else 3400 ret = rte_eth_xstats_get(port_id, xstats, expected_entries); 3401 3402 if (ret < 0) 3403 return ret; 3404 num_xstats_filled = (unsigned int)ret; 3405 3406 /* Return all stats */ 3407 if (!ids) { 3408 for (i = 0; i < num_xstats_filled; i++) 3409 values[i] = xstats[i].value; 3410 return expected_entries; 3411 } 3412 3413 /* Filter stats */ 3414 for (i = 0; i < size; i++) { 3415 if (ids[i] >= expected_entries) { 3416 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 3417 return -1; 3418 } 3419 values[i] = xstats[ids[i]].value; 3420 } 3421 3422 rte_eth_trace_xstats_get_by_id(port_id, ids, values, size); 3423 3424 return size; 3425 } 3426 3427 int 3428 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, 3429 unsigned int n) 3430 { 3431 struct rte_eth_dev *dev; 3432 unsigned int count, i; 3433 signed int xcount = 0; 3434 int ret; 3435 3436 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3437 if (xstats == NULL && n > 0) 3438 return -EINVAL; 3439 dev = &rte_eth_devices[port_id]; 3440 3441 count = eth_dev_get_xstats_basic_count(dev); 3442 3443 /* implemented by the driver */ 3444 if (dev->dev_ops->xstats_get != NULL) { 3445 /* Retrieve the xstats from the driver at the end of the 3446 * xstats struct. 3447 */ 3448 xcount = (*dev->dev_ops->xstats_get)(dev, 3449 (n > count) ? xstats + count : NULL, 3450 (n > count) ? n - count : 0); 3451 3452 if (xcount < 0) 3453 return eth_err(port_id, xcount); 3454 } 3455 3456 if (n < count + xcount || xstats == NULL) 3457 return count + xcount; 3458 3459 /* now fill the xstats structure */ 3460 ret = eth_basic_stats_get(port_id, xstats); 3461 if (ret < 0) 3462 return ret; 3463 count = ret; 3464 3465 for (i = 0; i < count; i++) 3466 xstats[i].id = i; 3467 /* add an offset to driver-specific stats */ 3468 for ( ; i < count + xcount; i++) 3469 xstats[i].id += count; 3470 3471 for (i = 0; i < n; i++) 3472 rte_eth_trace_xstats_get(port_id, xstats[i]); 3473 3474 return count + xcount; 3475 } 3476 3477 /* reset ethdev extended statistics */ 3478 int 3479 rte_eth_xstats_reset(uint16_t port_id) 3480 { 3481 struct rte_eth_dev *dev; 3482 3483 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3484 dev = &rte_eth_devices[port_id]; 3485 3486 /* implemented by the driver */ 3487 if (dev->dev_ops->xstats_reset != NULL) { 3488 int ret = eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev)); 3489 3490 rte_eth_trace_xstats_reset(port_id, ret); 3491 3492 return ret; 3493 } 3494 3495 /* fallback to default */ 3496 return rte_eth_stats_reset(port_id); 3497 } 3498 3499 static int 3500 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, 3501 uint8_t stat_idx, uint8_t is_rx) 3502 { 3503 struct rte_eth_dev *dev; 3504 3505 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3506 dev = &rte_eth_devices[port_id]; 3507 3508 if (is_rx && (queue_id >= dev->data->nb_rx_queues)) 3509 return -EINVAL; 3510 3511 if (!is_rx && (queue_id >= dev->data->nb_tx_queues)) 3512 return -EINVAL; 3513 3514 if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) 3515 return -EINVAL; 3516 3517 if (*dev->dev_ops->queue_stats_mapping_set == NULL) 3518 return -ENOTSUP; 3519 return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx); 3520 } 3521 3522 int 3523 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, 3524 uint8_t stat_idx) 3525 { 3526 int ret; 3527 3528 ret = eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3529 tx_queue_id, 3530 stat_idx, STAT_QMAP_TX)); 3531 3532 rte_ethdev_trace_set_tx_queue_stats_mapping(port_id, tx_queue_id, 3533 stat_idx, ret); 3534 3535 return ret; 3536 } 3537 3538 int 3539 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, 3540 uint8_t stat_idx) 3541 { 3542 int ret; 3543 3544 ret = eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3545 rx_queue_id, 3546 stat_idx, STAT_QMAP_RX)); 3547 3548 rte_ethdev_trace_set_rx_queue_stats_mapping(port_id, rx_queue_id, 3549 stat_idx, ret); 3550 3551 return ret; 3552 } 3553 3554 int 3555 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size) 3556 { 3557 struct rte_eth_dev *dev; 3558 int ret; 3559 3560 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3561 dev = &rte_eth_devices[port_id]; 3562 3563 if (fw_version == NULL && fw_size > 0) { 3564 RTE_ETHDEV_LOG(ERR, 3565 "Cannot get ethdev port %u FW version to NULL when string size is non zero\n", 3566 port_id); 3567 return -EINVAL; 3568 } 3569 3570 if (*dev->dev_ops->fw_version_get == NULL) 3571 return -ENOTSUP; 3572 ret = eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev, 3573 fw_version, fw_size)); 3574 3575 rte_ethdev_trace_fw_version_get(port_id, fw_version, fw_size, ret); 3576 3577 return ret; 3578 } 3579 3580 int 3581 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) 3582 { 3583 struct rte_eth_dev *dev; 3584 const struct rte_eth_desc_lim lim = { 3585 .nb_max = UINT16_MAX, 3586 .nb_min = 0, 3587 .nb_align = 1, 3588 .nb_seg_max = UINT16_MAX, 3589 .nb_mtu_seg_max = UINT16_MAX, 3590 }; 3591 int diag; 3592 3593 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3594 dev = &rte_eth_devices[port_id]; 3595 3596 if (dev_info == NULL) { 3597 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u info to NULL\n", 3598 port_id); 3599 return -EINVAL; 3600 } 3601 3602 /* 3603 * Init dev_info before port_id check since caller does not have 3604 * return status and does not know if get is successful or not. 3605 */ 3606 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3607 dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 3608 3609 dev_info->rx_desc_lim = lim; 3610 dev_info->tx_desc_lim = lim; 3611 dev_info->device = dev->device; 3612 dev_info->min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN - 3613 RTE_ETHER_CRC_LEN; 3614 dev_info->max_mtu = UINT16_MAX; 3615 3616 if (*dev->dev_ops->dev_infos_get == NULL) 3617 return -ENOTSUP; 3618 diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info); 3619 if (diag != 0) { 3620 /* Cleanup already filled in device information */ 3621 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3622 return eth_err(port_id, diag); 3623 } 3624 3625 /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */ 3626 dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues, 3627 RTE_MAX_QUEUES_PER_PORT); 3628 dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues, 3629 RTE_MAX_QUEUES_PER_PORT); 3630 3631 dev_info->driver_name = dev->device->driver->name; 3632 dev_info->nb_rx_queues = dev->data->nb_rx_queues; 3633 dev_info->nb_tx_queues = dev->data->nb_tx_queues; 3634 3635 dev_info->dev_flags = &dev->data->dev_flags; 3636 3637 rte_ethdev_trace_info_get(port_id, dev_info); 3638 3639 return 0; 3640 } 3641 3642 int 3643 rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf) 3644 { 3645 struct rte_eth_dev *dev; 3646 3647 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3648 dev = &rte_eth_devices[port_id]; 3649 3650 if (dev_conf == NULL) { 3651 RTE_ETHDEV_LOG(ERR, 3652 "Cannot get ethdev port %u configuration to NULL\n", 3653 port_id); 3654 return -EINVAL; 3655 } 3656 3657 memcpy(dev_conf, &dev->data->dev_conf, sizeof(struct rte_eth_conf)); 3658 3659 rte_ethdev_trace_conf_get(port_id, dev_conf); 3660 3661 return 0; 3662 } 3663 3664 int 3665 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, 3666 uint32_t *ptypes, int num) 3667 { 3668 int i, j; 3669 struct rte_eth_dev *dev; 3670 const uint32_t *all_ptypes; 3671 3672 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3673 dev = &rte_eth_devices[port_id]; 3674 3675 if (ptypes == NULL && num > 0) { 3676 RTE_ETHDEV_LOG(ERR, 3677 "Cannot get ethdev port %u supported packet types to NULL when array size is non zero\n", 3678 port_id); 3679 return -EINVAL; 3680 } 3681 3682 if (*dev->dev_ops->dev_supported_ptypes_get == NULL) 3683 return 0; 3684 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3685 3686 if (!all_ptypes) 3687 return 0; 3688 3689 for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i) 3690 if (all_ptypes[i] & ptype_mask) { 3691 if (j < num) { 3692 ptypes[j] = all_ptypes[i]; 3693 3694 rte_ethdev_trace_get_supported_ptypes(port_id, 3695 j, num, ptypes[j]); 3696 } 3697 j++; 3698 } 3699 3700 return j; 3701 } 3702 3703 int 3704 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, 3705 uint32_t *set_ptypes, unsigned int num) 3706 { 3707 const uint32_t valid_ptype_masks[] = { 3708 RTE_PTYPE_L2_MASK, 3709 RTE_PTYPE_L3_MASK, 3710 RTE_PTYPE_L4_MASK, 3711 RTE_PTYPE_TUNNEL_MASK, 3712 RTE_PTYPE_INNER_L2_MASK, 3713 RTE_PTYPE_INNER_L3_MASK, 3714 RTE_PTYPE_INNER_L4_MASK, 3715 }; 3716 const uint32_t *all_ptypes; 3717 struct rte_eth_dev *dev; 3718 uint32_t unused_mask; 3719 unsigned int i, j; 3720 int ret; 3721 3722 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3723 dev = &rte_eth_devices[port_id]; 3724 3725 if (num > 0 && set_ptypes == NULL) { 3726 RTE_ETHDEV_LOG(ERR, 3727 "Cannot get ethdev port %u set packet types to NULL when array size is non zero\n", 3728 port_id); 3729 return -EINVAL; 3730 } 3731 3732 if (*dev->dev_ops->dev_supported_ptypes_get == NULL || 3733 *dev->dev_ops->dev_ptypes_set == NULL) { 3734 ret = 0; 3735 goto ptype_unknown; 3736 } 3737 3738 if (ptype_mask == 0) { 3739 ret = (*dev->dev_ops->dev_ptypes_set)(dev, 3740 ptype_mask); 3741 goto ptype_unknown; 3742 } 3743 3744 unused_mask = ptype_mask; 3745 for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) { 3746 uint32_t mask = ptype_mask & valid_ptype_masks[i]; 3747 if (mask && mask != valid_ptype_masks[i]) { 3748 ret = -EINVAL; 3749 goto ptype_unknown; 3750 } 3751 unused_mask &= ~valid_ptype_masks[i]; 3752 } 3753 3754 if (unused_mask) { 3755 ret = -EINVAL; 3756 goto ptype_unknown; 3757 } 3758 3759 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3760 if (all_ptypes == NULL) { 3761 ret = 0; 3762 goto ptype_unknown; 3763 } 3764 3765 /* 3766 * Accommodate as many set_ptypes as possible. If the supplied 3767 * set_ptypes array is insufficient fill it partially. 3768 */ 3769 for (i = 0, j = 0; set_ptypes != NULL && 3770 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) { 3771 if (ptype_mask & all_ptypes[i]) { 3772 if (j < num - 1) { 3773 set_ptypes[j] = all_ptypes[i]; 3774 3775 rte_ethdev_trace_set_ptypes(port_id, j, num, 3776 set_ptypes[j]); 3777 3778 j++; 3779 continue; 3780 } 3781 break; 3782 } 3783 } 3784 3785 if (set_ptypes != NULL && j < num) 3786 set_ptypes[j] = RTE_PTYPE_UNKNOWN; 3787 3788 return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask); 3789 3790 ptype_unknown: 3791 if (num > 0) 3792 set_ptypes[0] = RTE_PTYPE_UNKNOWN; 3793 3794 return ret; 3795 } 3796 3797 int 3798 rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma, 3799 unsigned int num) 3800 { 3801 int32_t ret; 3802 struct rte_eth_dev *dev; 3803 struct rte_eth_dev_info dev_info; 3804 3805 if (ma == NULL) { 3806 RTE_ETHDEV_LOG(ERR, "%s: invalid parameters\n", __func__); 3807 return -EINVAL; 3808 } 3809 3810 /* will check for us that port_id is a valid one */ 3811 ret = rte_eth_dev_info_get(port_id, &dev_info); 3812 if (ret != 0) 3813 return ret; 3814 3815 dev = &rte_eth_devices[port_id]; 3816 num = RTE_MIN(dev_info.max_mac_addrs, num); 3817 memcpy(ma, dev->data->mac_addrs, num * sizeof(ma[0])); 3818 3819 rte_eth_trace_macaddrs_get(port_id, num); 3820 3821 return num; 3822 } 3823 3824 int 3825 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr) 3826 { 3827 struct rte_eth_dev *dev; 3828 3829 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3830 dev = &rte_eth_devices[port_id]; 3831 3832 if (mac_addr == NULL) { 3833 RTE_ETHDEV_LOG(ERR, 3834 "Cannot get ethdev port %u MAC address to NULL\n", 3835 port_id); 3836 return -EINVAL; 3837 } 3838 3839 rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr); 3840 3841 rte_eth_trace_macaddr_get(port_id, mac_addr); 3842 3843 return 0; 3844 } 3845 3846 int 3847 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu) 3848 { 3849 struct rte_eth_dev *dev; 3850 3851 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3852 dev = &rte_eth_devices[port_id]; 3853 3854 if (mtu == NULL) { 3855 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u MTU to NULL\n", 3856 port_id); 3857 return -EINVAL; 3858 } 3859 3860 *mtu = dev->data->mtu; 3861 3862 rte_ethdev_trace_get_mtu(port_id, *mtu); 3863 3864 return 0; 3865 } 3866 3867 int 3868 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu) 3869 { 3870 int ret; 3871 struct rte_eth_dev_info dev_info; 3872 struct rte_eth_dev *dev; 3873 3874 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3875 dev = &rte_eth_devices[port_id]; 3876 if (*dev->dev_ops->mtu_set == NULL) 3877 return -ENOTSUP; 3878 3879 /* 3880 * Check if the device supports dev_infos_get, if it does not 3881 * skip min_mtu/max_mtu validation here as this requires values 3882 * that are populated within the call to rte_eth_dev_info_get() 3883 * which relies on dev->dev_ops->dev_infos_get. 3884 */ 3885 if (*dev->dev_ops->dev_infos_get != NULL) { 3886 ret = rte_eth_dev_info_get(port_id, &dev_info); 3887 if (ret != 0) 3888 return ret; 3889 3890 ret = eth_dev_validate_mtu(port_id, &dev_info, mtu); 3891 if (ret != 0) 3892 return ret; 3893 } 3894 3895 if (dev->data->dev_configured == 0) { 3896 RTE_ETHDEV_LOG(ERR, 3897 "Port %u must be configured before MTU set\n", 3898 port_id); 3899 return -EINVAL; 3900 } 3901 3902 ret = (*dev->dev_ops->mtu_set)(dev, mtu); 3903 if (ret == 0) 3904 dev->data->mtu = mtu; 3905 3906 ret = eth_err(port_id, ret); 3907 3908 rte_ethdev_trace_set_mtu(port_id, mtu, ret); 3909 3910 return ret; 3911 } 3912 3913 int 3914 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on) 3915 { 3916 struct rte_eth_dev *dev; 3917 int ret; 3918 3919 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3920 dev = &rte_eth_devices[port_id]; 3921 3922 if (!(dev->data->dev_conf.rxmode.offloads & 3923 RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) { 3924 RTE_ETHDEV_LOG(ERR, "Port %u: VLAN-filtering disabled\n", 3925 port_id); 3926 return -ENOSYS; 3927 } 3928 3929 if (vlan_id > 4095) { 3930 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n", 3931 port_id, vlan_id); 3932 return -EINVAL; 3933 } 3934 if (*dev->dev_ops->vlan_filter_set == NULL) 3935 return -ENOTSUP; 3936 3937 ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on); 3938 if (ret == 0) { 3939 struct rte_vlan_filter_conf *vfc; 3940 int vidx; 3941 int vbit; 3942 3943 vfc = &dev->data->vlan_filter_conf; 3944 vidx = vlan_id / 64; 3945 vbit = vlan_id % 64; 3946 3947 if (on) 3948 vfc->ids[vidx] |= RTE_BIT64(vbit); 3949 else 3950 vfc->ids[vidx] &= ~RTE_BIT64(vbit); 3951 } 3952 3953 ret = eth_err(port_id, ret); 3954 3955 rte_ethdev_trace_vlan_filter(port_id, vlan_id, on, ret); 3956 3957 return ret; 3958 } 3959 3960 int 3961 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, 3962 int on) 3963 { 3964 struct rte_eth_dev *dev; 3965 3966 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3967 dev = &rte_eth_devices[port_id]; 3968 3969 if (rx_queue_id >= dev->data->nb_rx_queues) { 3970 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id); 3971 return -EINVAL; 3972 } 3973 3974 if (*dev->dev_ops->vlan_strip_queue_set == NULL) 3975 return -ENOTSUP; 3976 (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on); 3977 3978 rte_ethdev_trace_set_vlan_strip_on_queue(port_id, rx_queue_id, on); 3979 3980 return 0; 3981 } 3982 3983 int 3984 rte_eth_dev_set_vlan_ether_type(uint16_t port_id, 3985 enum rte_vlan_type vlan_type, 3986 uint16_t tpid) 3987 { 3988 struct rte_eth_dev *dev; 3989 int ret; 3990 3991 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3992 dev = &rte_eth_devices[port_id]; 3993 3994 if (*dev->dev_ops->vlan_tpid_set == NULL) 3995 return -ENOTSUP; 3996 ret = eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, 3997 tpid)); 3998 3999 rte_ethdev_trace_set_vlan_ether_type(port_id, vlan_type, tpid, ret); 4000 4001 return ret; 4002 } 4003 4004 int 4005 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask) 4006 { 4007 struct rte_eth_dev_info dev_info; 4008 struct rte_eth_dev *dev; 4009 int ret = 0; 4010 int mask = 0; 4011 int cur, org = 0; 4012 uint64_t orig_offloads; 4013 uint64_t dev_offloads; 4014 uint64_t new_offloads; 4015 4016 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4017 dev = &rte_eth_devices[port_id]; 4018 4019 /* save original values in case of failure */ 4020 orig_offloads = dev->data->dev_conf.rxmode.offloads; 4021 dev_offloads = orig_offloads; 4022 4023 /* check which option changed by application */ 4024 cur = !!(offload_mask & RTE_ETH_VLAN_STRIP_OFFLOAD); 4025 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP); 4026 if (cur != org) { 4027 if (cur) 4028 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 4029 else 4030 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 4031 mask |= RTE_ETH_VLAN_STRIP_MASK; 4032 } 4033 4034 cur = !!(offload_mask & RTE_ETH_VLAN_FILTER_OFFLOAD); 4035 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER); 4036 if (cur != org) { 4037 if (cur) 4038 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 4039 else 4040 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 4041 mask |= RTE_ETH_VLAN_FILTER_MASK; 4042 } 4043 4044 cur = !!(offload_mask & RTE_ETH_VLAN_EXTEND_OFFLOAD); 4045 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND); 4046 if (cur != org) { 4047 if (cur) 4048 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 4049 else 4050 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 4051 mask |= RTE_ETH_VLAN_EXTEND_MASK; 4052 } 4053 4054 cur = !!(offload_mask & RTE_ETH_QINQ_STRIP_OFFLOAD); 4055 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP); 4056 if (cur != org) { 4057 if (cur) 4058 dev_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 4059 else 4060 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 4061 mask |= RTE_ETH_QINQ_STRIP_MASK; 4062 } 4063 4064 /*no change*/ 4065 if (mask == 0) 4066 return ret; 4067 4068 ret = rte_eth_dev_info_get(port_id, &dev_info); 4069 if (ret != 0) 4070 return ret; 4071 4072 /* Rx VLAN offloading must be within its device capabilities */ 4073 if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) { 4074 new_offloads = dev_offloads & ~orig_offloads; 4075 RTE_ETHDEV_LOG(ERR, 4076 "Ethdev port_id=%u requested new added VLAN offloads " 4077 "0x%" PRIx64 " must be within Rx offloads capabilities " 4078 "0x%" PRIx64 " in %s()\n", 4079 port_id, new_offloads, dev_info.rx_offload_capa, 4080 __func__); 4081 return -EINVAL; 4082 } 4083 4084 if (*dev->dev_ops->vlan_offload_set == NULL) 4085 return -ENOTSUP; 4086 dev->data->dev_conf.rxmode.offloads = dev_offloads; 4087 ret = (*dev->dev_ops->vlan_offload_set)(dev, mask); 4088 if (ret) { 4089 /* hit an error restore original values */ 4090 dev->data->dev_conf.rxmode.offloads = orig_offloads; 4091 } 4092 4093 ret = eth_err(port_id, ret); 4094 4095 rte_ethdev_trace_set_vlan_offload(port_id, offload_mask, ret); 4096 4097 return ret; 4098 } 4099 4100 int 4101 rte_eth_dev_get_vlan_offload(uint16_t port_id) 4102 { 4103 struct rte_eth_dev *dev; 4104 uint64_t *dev_offloads; 4105 int ret = 0; 4106 4107 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4108 dev = &rte_eth_devices[port_id]; 4109 dev_offloads = &dev->data->dev_conf.rxmode.offloads; 4110 4111 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 4112 ret |= RTE_ETH_VLAN_STRIP_OFFLOAD; 4113 4114 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) 4115 ret |= RTE_ETH_VLAN_FILTER_OFFLOAD; 4116 4117 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) 4118 ret |= RTE_ETH_VLAN_EXTEND_OFFLOAD; 4119 4120 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) 4121 ret |= RTE_ETH_QINQ_STRIP_OFFLOAD; 4122 4123 rte_ethdev_trace_get_vlan_offload(port_id, ret); 4124 4125 return ret; 4126 } 4127 4128 int 4129 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on) 4130 { 4131 struct rte_eth_dev *dev; 4132 int ret; 4133 4134 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4135 dev = &rte_eth_devices[port_id]; 4136 4137 if (*dev->dev_ops->vlan_pvid_set == NULL) 4138 return -ENOTSUP; 4139 ret = eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on)); 4140 4141 rte_ethdev_trace_set_vlan_pvid(port_id, pvid, on, ret); 4142 4143 return ret; 4144 } 4145 4146 int 4147 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 4148 { 4149 struct rte_eth_dev *dev; 4150 int ret; 4151 4152 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4153 dev = &rte_eth_devices[port_id]; 4154 4155 if (fc_conf == NULL) { 4156 RTE_ETHDEV_LOG(ERR, 4157 "Cannot get ethdev port %u flow control config to NULL\n", 4158 port_id); 4159 return -EINVAL; 4160 } 4161 4162 if (*dev->dev_ops->flow_ctrl_get == NULL) 4163 return -ENOTSUP; 4164 memset(fc_conf, 0, sizeof(*fc_conf)); 4165 ret = eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf)); 4166 4167 rte_ethdev_trace_flow_ctrl_get(port_id, fc_conf, ret); 4168 4169 return ret; 4170 } 4171 4172 int 4173 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 4174 { 4175 struct rte_eth_dev *dev; 4176 int ret; 4177 4178 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4179 dev = &rte_eth_devices[port_id]; 4180 4181 if (fc_conf == NULL) { 4182 RTE_ETHDEV_LOG(ERR, 4183 "Cannot set ethdev port %u flow control from NULL config\n", 4184 port_id); 4185 return -EINVAL; 4186 } 4187 4188 if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) { 4189 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n"); 4190 return -EINVAL; 4191 } 4192 4193 if (*dev->dev_ops->flow_ctrl_set == NULL) 4194 return -ENOTSUP; 4195 ret = eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf)); 4196 4197 rte_ethdev_trace_flow_ctrl_set(port_id, fc_conf, ret); 4198 4199 return ret; 4200 } 4201 4202 int 4203 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, 4204 struct rte_eth_pfc_conf *pfc_conf) 4205 { 4206 struct rte_eth_dev *dev; 4207 int ret; 4208 4209 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4210 dev = &rte_eth_devices[port_id]; 4211 4212 if (pfc_conf == NULL) { 4213 RTE_ETHDEV_LOG(ERR, 4214 "Cannot set ethdev port %u priority flow control from NULL config\n", 4215 port_id); 4216 return -EINVAL; 4217 } 4218 4219 if (pfc_conf->priority > (RTE_ETH_DCB_NUM_USER_PRIORITIES - 1)) { 4220 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n"); 4221 return -EINVAL; 4222 } 4223 4224 /* High water, low water validation are device specific */ 4225 if (*dev->dev_ops->priority_flow_ctrl_set == NULL) 4226 return -ENOTSUP; 4227 ret = eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set) 4228 (dev, pfc_conf)); 4229 4230 rte_ethdev_trace_priority_flow_ctrl_set(port_id, pfc_conf, ret); 4231 4232 return ret; 4233 } 4234 4235 static int 4236 validate_rx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max, 4237 struct rte_eth_pfc_queue_conf *pfc_queue_conf) 4238 { 4239 if ((pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) || 4240 (pfc_queue_conf->mode == RTE_ETH_FC_FULL)) { 4241 if (pfc_queue_conf->rx_pause.tx_qid >= dev_info->nb_tx_queues) { 4242 RTE_ETHDEV_LOG(ERR, 4243 "PFC Tx queue not in range for Rx pause requested:%d configured:%d\n", 4244 pfc_queue_conf->rx_pause.tx_qid, 4245 dev_info->nb_tx_queues); 4246 return -EINVAL; 4247 } 4248 4249 if (pfc_queue_conf->rx_pause.tc >= tc_max) { 4250 RTE_ETHDEV_LOG(ERR, 4251 "PFC TC not in range for Rx pause requested:%d max:%d\n", 4252 pfc_queue_conf->rx_pause.tc, tc_max); 4253 return -EINVAL; 4254 } 4255 } 4256 4257 return 0; 4258 } 4259 4260 static int 4261 validate_tx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max, 4262 struct rte_eth_pfc_queue_conf *pfc_queue_conf) 4263 { 4264 if ((pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) || 4265 (pfc_queue_conf->mode == RTE_ETH_FC_FULL)) { 4266 if (pfc_queue_conf->tx_pause.rx_qid >= dev_info->nb_rx_queues) { 4267 RTE_ETHDEV_LOG(ERR, 4268 "PFC Rx queue not in range for Tx pause requested:%d configured:%d\n", 4269 pfc_queue_conf->tx_pause.rx_qid, 4270 dev_info->nb_rx_queues); 4271 return -EINVAL; 4272 } 4273 4274 if (pfc_queue_conf->tx_pause.tc >= tc_max) { 4275 RTE_ETHDEV_LOG(ERR, 4276 "PFC TC not in range for Tx pause requested:%d max:%d\n", 4277 pfc_queue_conf->tx_pause.tc, tc_max); 4278 return -EINVAL; 4279 } 4280 } 4281 4282 return 0; 4283 } 4284 4285 int 4286 rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id, 4287 struct rte_eth_pfc_queue_info *pfc_queue_info) 4288 { 4289 struct rte_eth_dev *dev; 4290 int ret; 4291 4292 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4293 dev = &rte_eth_devices[port_id]; 4294 4295 if (pfc_queue_info == NULL) { 4296 RTE_ETHDEV_LOG(ERR, "PFC info param is NULL for port (%u)\n", 4297 port_id); 4298 return -EINVAL; 4299 } 4300 4301 if (*dev->dev_ops->priority_flow_ctrl_queue_info_get == NULL) 4302 return -ENOTSUP; 4303 ret = eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_queue_info_get) 4304 (dev, pfc_queue_info)); 4305 4306 rte_ethdev_trace_priority_flow_ctrl_queue_info_get(port_id, 4307 pfc_queue_info, ret); 4308 4309 return ret; 4310 } 4311 4312 int 4313 rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id, 4314 struct rte_eth_pfc_queue_conf *pfc_queue_conf) 4315 { 4316 struct rte_eth_pfc_queue_info pfc_info; 4317 struct rte_eth_dev_info dev_info; 4318 struct rte_eth_dev *dev; 4319 int ret; 4320 4321 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4322 dev = &rte_eth_devices[port_id]; 4323 4324 if (pfc_queue_conf == NULL) { 4325 RTE_ETHDEV_LOG(ERR, "PFC parameters are NULL for port (%u)\n", 4326 port_id); 4327 return -EINVAL; 4328 } 4329 4330 ret = rte_eth_dev_info_get(port_id, &dev_info); 4331 if (ret != 0) 4332 return ret; 4333 4334 ret = rte_eth_dev_priority_flow_ctrl_queue_info_get(port_id, &pfc_info); 4335 if (ret != 0) 4336 return ret; 4337 4338 if (pfc_info.tc_max == 0) { 4339 RTE_ETHDEV_LOG(ERR, "Ethdev port %u does not support PFC TC values\n", 4340 port_id); 4341 return -ENOTSUP; 4342 } 4343 4344 /* Check requested mode supported or not */ 4345 if (pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE && 4346 pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) { 4347 RTE_ETHDEV_LOG(ERR, "PFC Tx pause unsupported for port (%d)\n", 4348 port_id); 4349 return -EINVAL; 4350 } 4351 4352 if (pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE && 4353 pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) { 4354 RTE_ETHDEV_LOG(ERR, "PFC Rx pause unsupported for port (%d)\n", 4355 port_id); 4356 return -EINVAL; 4357 } 4358 4359 /* Validate Rx pause parameters */ 4360 if (pfc_info.mode_capa == RTE_ETH_FC_FULL || 4361 pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE) { 4362 ret = validate_rx_pause_config(&dev_info, pfc_info.tc_max, 4363 pfc_queue_conf); 4364 if (ret != 0) 4365 return ret; 4366 } 4367 4368 /* Validate Tx pause parameters */ 4369 if (pfc_info.mode_capa == RTE_ETH_FC_FULL || 4370 pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE) { 4371 ret = validate_tx_pause_config(&dev_info, pfc_info.tc_max, 4372 pfc_queue_conf); 4373 if (ret != 0) 4374 return ret; 4375 } 4376 4377 if (*dev->dev_ops->priority_flow_ctrl_queue_config == NULL) 4378 return -ENOTSUP; 4379 ret = eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_queue_config) 4380 (dev, pfc_queue_conf)); 4381 4382 rte_ethdev_trace_priority_flow_ctrl_queue_configure(port_id, 4383 pfc_queue_conf, ret); 4384 4385 return ret; 4386 } 4387 4388 static int 4389 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf, 4390 uint16_t reta_size) 4391 { 4392 uint16_t i, num; 4393 4394 num = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) / RTE_ETH_RETA_GROUP_SIZE; 4395 for (i = 0; i < num; i++) { 4396 if (reta_conf[i].mask) 4397 return 0; 4398 } 4399 4400 return -EINVAL; 4401 } 4402 4403 static int 4404 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf, 4405 uint16_t reta_size, 4406 uint16_t max_rxq) 4407 { 4408 uint16_t i, idx, shift; 4409 4410 if (max_rxq == 0) { 4411 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n"); 4412 return -EINVAL; 4413 } 4414 4415 for (i = 0; i < reta_size; i++) { 4416 idx = i / RTE_ETH_RETA_GROUP_SIZE; 4417 shift = i % RTE_ETH_RETA_GROUP_SIZE; 4418 if ((reta_conf[idx].mask & RTE_BIT64(shift)) && 4419 (reta_conf[idx].reta[shift] >= max_rxq)) { 4420 RTE_ETHDEV_LOG(ERR, 4421 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n", 4422 idx, shift, 4423 reta_conf[idx].reta[shift], max_rxq); 4424 return -EINVAL; 4425 } 4426 } 4427 4428 return 0; 4429 } 4430 4431 int 4432 rte_eth_dev_rss_reta_update(uint16_t port_id, 4433 struct rte_eth_rss_reta_entry64 *reta_conf, 4434 uint16_t reta_size) 4435 { 4436 enum rte_eth_rx_mq_mode mq_mode; 4437 struct rte_eth_dev *dev; 4438 int ret; 4439 4440 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4441 dev = &rte_eth_devices[port_id]; 4442 4443 if (reta_conf == NULL) { 4444 RTE_ETHDEV_LOG(ERR, 4445 "Cannot update ethdev port %u RSS RETA to NULL\n", 4446 port_id); 4447 return -EINVAL; 4448 } 4449 4450 if (reta_size == 0) { 4451 RTE_ETHDEV_LOG(ERR, 4452 "Cannot update ethdev port %u RSS RETA with zero size\n", 4453 port_id); 4454 return -EINVAL; 4455 } 4456 4457 /* Check mask bits */ 4458 ret = eth_check_reta_mask(reta_conf, reta_size); 4459 if (ret < 0) 4460 return ret; 4461 4462 /* Check entry value */ 4463 ret = eth_check_reta_entry(reta_conf, reta_size, 4464 dev->data->nb_rx_queues); 4465 if (ret < 0) 4466 return ret; 4467 4468 mq_mode = dev->data->dev_conf.rxmode.mq_mode; 4469 if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) { 4470 RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n"); 4471 return -ENOTSUP; 4472 } 4473 4474 if (*dev->dev_ops->reta_update == NULL) 4475 return -ENOTSUP; 4476 ret = eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf, 4477 reta_size)); 4478 4479 rte_ethdev_trace_rss_reta_update(port_id, reta_conf, reta_size, ret); 4480 4481 return ret; 4482 } 4483 4484 int 4485 rte_eth_dev_rss_reta_query(uint16_t port_id, 4486 struct rte_eth_rss_reta_entry64 *reta_conf, 4487 uint16_t reta_size) 4488 { 4489 struct rte_eth_dev *dev; 4490 int ret; 4491 4492 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4493 dev = &rte_eth_devices[port_id]; 4494 4495 if (reta_conf == NULL) { 4496 RTE_ETHDEV_LOG(ERR, 4497 "Cannot query ethdev port %u RSS RETA from NULL config\n", 4498 port_id); 4499 return -EINVAL; 4500 } 4501 4502 /* Check mask bits */ 4503 ret = eth_check_reta_mask(reta_conf, reta_size); 4504 if (ret < 0) 4505 return ret; 4506 4507 if (*dev->dev_ops->reta_query == NULL) 4508 return -ENOTSUP; 4509 ret = eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf, 4510 reta_size)); 4511 4512 rte_ethdev_trace_rss_reta_query(port_id, reta_conf, reta_size, ret); 4513 4514 return ret; 4515 } 4516 4517 int 4518 rte_eth_dev_rss_hash_update(uint16_t port_id, 4519 struct rte_eth_rss_conf *rss_conf) 4520 { 4521 struct rte_eth_dev *dev; 4522 struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, }; 4523 enum rte_eth_rx_mq_mode mq_mode; 4524 int ret; 4525 4526 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4527 dev = &rte_eth_devices[port_id]; 4528 4529 if (rss_conf == NULL) { 4530 RTE_ETHDEV_LOG(ERR, 4531 "Cannot update ethdev port %u RSS hash from NULL config\n", 4532 port_id); 4533 return -EINVAL; 4534 } 4535 4536 ret = rte_eth_dev_info_get(port_id, &dev_info); 4537 if (ret != 0) 4538 return ret; 4539 4540 rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf); 4541 if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) != 4542 dev_info.flow_type_rss_offloads) { 4543 RTE_ETHDEV_LOG(ERR, 4544 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 4545 port_id, rss_conf->rss_hf, 4546 dev_info.flow_type_rss_offloads); 4547 return -EINVAL; 4548 } 4549 4550 mq_mode = dev->data->dev_conf.rxmode.mq_mode; 4551 if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) { 4552 RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n"); 4553 return -ENOTSUP; 4554 } 4555 4556 if (*dev->dev_ops->rss_hash_update == NULL) 4557 return -ENOTSUP; 4558 ret = eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev, 4559 rss_conf)); 4560 4561 rte_ethdev_trace_rss_hash_update(port_id, rss_conf, ret); 4562 4563 return ret; 4564 } 4565 4566 int 4567 rte_eth_dev_rss_hash_conf_get(uint16_t port_id, 4568 struct rte_eth_rss_conf *rss_conf) 4569 { 4570 struct rte_eth_dev *dev; 4571 int ret; 4572 4573 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4574 dev = &rte_eth_devices[port_id]; 4575 4576 if (rss_conf == NULL) { 4577 RTE_ETHDEV_LOG(ERR, 4578 "Cannot get ethdev port %u RSS hash config to NULL\n", 4579 port_id); 4580 return -EINVAL; 4581 } 4582 4583 if (*dev->dev_ops->rss_hash_conf_get == NULL) 4584 return -ENOTSUP; 4585 ret = eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev, 4586 rss_conf)); 4587 4588 rte_ethdev_trace_rss_hash_conf_get(port_id, rss_conf, ret); 4589 4590 return ret; 4591 } 4592 4593 int 4594 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, 4595 struct rte_eth_udp_tunnel *udp_tunnel) 4596 { 4597 struct rte_eth_dev *dev; 4598 int ret; 4599 4600 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4601 dev = &rte_eth_devices[port_id]; 4602 4603 if (udp_tunnel == NULL) { 4604 RTE_ETHDEV_LOG(ERR, 4605 "Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4606 port_id); 4607 return -EINVAL; 4608 } 4609 4610 if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) { 4611 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4612 return -EINVAL; 4613 } 4614 4615 if (*dev->dev_ops->udp_tunnel_port_add == NULL) 4616 return -ENOTSUP; 4617 ret = eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev, 4618 udp_tunnel)); 4619 4620 rte_ethdev_trace_udp_tunnel_port_add(port_id, udp_tunnel, ret); 4621 4622 return ret; 4623 } 4624 4625 int 4626 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, 4627 struct rte_eth_udp_tunnel *udp_tunnel) 4628 { 4629 struct rte_eth_dev *dev; 4630 int ret; 4631 4632 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4633 dev = &rte_eth_devices[port_id]; 4634 4635 if (udp_tunnel == NULL) { 4636 RTE_ETHDEV_LOG(ERR, 4637 "Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4638 port_id); 4639 return -EINVAL; 4640 } 4641 4642 if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) { 4643 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4644 return -EINVAL; 4645 } 4646 4647 if (*dev->dev_ops->udp_tunnel_port_del == NULL) 4648 return -ENOTSUP; 4649 ret = eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev, 4650 udp_tunnel)); 4651 4652 rte_ethdev_trace_udp_tunnel_port_delete(port_id, udp_tunnel, ret); 4653 4654 return ret; 4655 } 4656 4657 int 4658 rte_eth_led_on(uint16_t port_id) 4659 { 4660 struct rte_eth_dev *dev; 4661 int ret; 4662 4663 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4664 dev = &rte_eth_devices[port_id]; 4665 4666 if (*dev->dev_ops->dev_led_on == NULL) 4667 return -ENOTSUP; 4668 ret = eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev)); 4669 4670 rte_eth_trace_led_on(port_id, ret); 4671 4672 return ret; 4673 } 4674 4675 int 4676 rte_eth_led_off(uint16_t port_id) 4677 { 4678 struct rte_eth_dev *dev; 4679 int ret; 4680 4681 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4682 dev = &rte_eth_devices[port_id]; 4683 4684 if (*dev->dev_ops->dev_led_off == NULL) 4685 return -ENOTSUP; 4686 ret = eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev)); 4687 4688 rte_eth_trace_led_off(port_id, ret); 4689 4690 return ret; 4691 } 4692 4693 int 4694 rte_eth_fec_get_capability(uint16_t port_id, 4695 struct rte_eth_fec_capa *speed_fec_capa, 4696 unsigned int num) 4697 { 4698 struct rte_eth_dev *dev; 4699 int ret; 4700 4701 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4702 dev = &rte_eth_devices[port_id]; 4703 4704 if (speed_fec_capa == NULL && num > 0) { 4705 RTE_ETHDEV_LOG(ERR, 4706 "Cannot get ethdev port %u FEC capability to NULL when array size is non zero\n", 4707 port_id); 4708 return -EINVAL; 4709 } 4710 4711 if (*dev->dev_ops->fec_get_capability == NULL) 4712 return -ENOTSUP; 4713 ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num); 4714 4715 rte_eth_trace_fec_get_capability(port_id, speed_fec_capa, num, ret); 4716 4717 return ret; 4718 } 4719 4720 int 4721 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa) 4722 { 4723 struct rte_eth_dev *dev; 4724 int ret; 4725 4726 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4727 dev = &rte_eth_devices[port_id]; 4728 4729 if (fec_capa == NULL) { 4730 RTE_ETHDEV_LOG(ERR, 4731 "Cannot get ethdev port %u current FEC mode to NULL\n", 4732 port_id); 4733 return -EINVAL; 4734 } 4735 4736 if (*dev->dev_ops->fec_get == NULL) 4737 return -ENOTSUP; 4738 ret = eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa)); 4739 4740 rte_eth_trace_fec_get(port_id, fec_capa, ret); 4741 4742 return ret; 4743 } 4744 4745 int 4746 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa) 4747 { 4748 struct rte_eth_dev *dev; 4749 int ret; 4750 4751 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4752 dev = &rte_eth_devices[port_id]; 4753 4754 if (*dev->dev_ops->fec_set == NULL) 4755 return -ENOTSUP; 4756 ret = eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa)); 4757 4758 rte_eth_trace_fec_set(port_id, fec_capa, ret); 4759 4760 return ret; 4761 } 4762 4763 /* 4764 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 4765 * an empty spot. 4766 */ 4767 static int 4768 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr) 4769 { 4770 struct rte_eth_dev_info dev_info; 4771 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4772 unsigned i; 4773 int ret; 4774 4775 ret = rte_eth_dev_info_get(port_id, &dev_info); 4776 if (ret != 0) 4777 return -1; 4778 4779 for (i = 0; i < dev_info.max_mac_addrs; i++) 4780 if (memcmp(addr, &dev->data->mac_addrs[i], 4781 RTE_ETHER_ADDR_LEN) == 0) 4782 return i; 4783 4784 return -1; 4785 } 4786 4787 static const struct rte_ether_addr null_mac_addr; 4788 4789 int 4790 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr, 4791 uint32_t pool) 4792 { 4793 struct rte_eth_dev *dev; 4794 int index; 4795 uint64_t pool_mask; 4796 int ret; 4797 4798 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4799 dev = &rte_eth_devices[port_id]; 4800 4801 if (addr == NULL) { 4802 RTE_ETHDEV_LOG(ERR, 4803 "Cannot add ethdev port %u MAC address from NULL address\n", 4804 port_id); 4805 return -EINVAL; 4806 } 4807 4808 if (*dev->dev_ops->mac_addr_add == NULL) 4809 return -ENOTSUP; 4810 4811 if (rte_is_zero_ether_addr(addr)) { 4812 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 4813 port_id); 4814 return -EINVAL; 4815 } 4816 if (pool >= RTE_ETH_64_POOLS) { 4817 RTE_ETHDEV_LOG(ERR, "Pool ID must be 0-%d\n", RTE_ETH_64_POOLS - 1); 4818 return -EINVAL; 4819 } 4820 4821 index = eth_dev_get_mac_addr_index(port_id, addr); 4822 if (index < 0) { 4823 index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr); 4824 if (index < 0) { 4825 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 4826 port_id); 4827 return -ENOSPC; 4828 } 4829 } else { 4830 pool_mask = dev->data->mac_pool_sel[index]; 4831 4832 /* Check if both MAC address and pool is already there, and do nothing */ 4833 if (pool_mask & RTE_BIT64(pool)) 4834 return 0; 4835 } 4836 4837 /* Update NIC */ 4838 ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool); 4839 4840 if (ret == 0) { 4841 /* Update address in NIC data structure */ 4842 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]); 4843 4844 /* Update pool bitmap in NIC data structure */ 4845 dev->data->mac_pool_sel[index] |= RTE_BIT64(pool); 4846 } 4847 4848 ret = eth_err(port_id, ret); 4849 4850 rte_ethdev_trace_mac_addr_add(port_id, addr, pool, ret); 4851 4852 return ret; 4853 } 4854 4855 int 4856 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr) 4857 { 4858 struct rte_eth_dev *dev; 4859 int index; 4860 4861 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4862 dev = &rte_eth_devices[port_id]; 4863 4864 if (addr == NULL) { 4865 RTE_ETHDEV_LOG(ERR, 4866 "Cannot remove ethdev port %u MAC address from NULL address\n", 4867 port_id); 4868 return -EINVAL; 4869 } 4870 4871 if (*dev->dev_ops->mac_addr_remove == NULL) 4872 return -ENOTSUP; 4873 4874 index = eth_dev_get_mac_addr_index(port_id, addr); 4875 if (index == 0) { 4876 RTE_ETHDEV_LOG(ERR, 4877 "Port %u: Cannot remove default MAC address\n", 4878 port_id); 4879 return -EADDRINUSE; 4880 } else if (index < 0) 4881 return 0; /* Do nothing if address wasn't found */ 4882 4883 /* Update NIC */ 4884 (*dev->dev_ops->mac_addr_remove)(dev, index); 4885 4886 /* Update address in NIC data structure */ 4887 rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]); 4888 4889 /* reset pool bitmap */ 4890 dev->data->mac_pool_sel[index] = 0; 4891 4892 rte_ethdev_trace_mac_addr_remove(port_id, addr); 4893 4894 return 0; 4895 } 4896 4897 int 4898 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr) 4899 { 4900 struct rte_eth_dev *dev; 4901 int index; 4902 int ret; 4903 4904 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4905 dev = &rte_eth_devices[port_id]; 4906 4907 if (addr == NULL) { 4908 RTE_ETHDEV_LOG(ERR, 4909 "Cannot set ethdev port %u default MAC address from NULL address\n", 4910 port_id); 4911 return -EINVAL; 4912 } 4913 4914 if (!rte_is_valid_assigned_ether_addr(addr)) 4915 return -EINVAL; 4916 4917 if (*dev->dev_ops->mac_addr_set == NULL) 4918 return -ENOTSUP; 4919 4920 /* Keep address unique in dev->data->mac_addrs[]. */ 4921 index = eth_dev_get_mac_addr_index(port_id, addr); 4922 if (index > 0) { 4923 RTE_ETHDEV_LOG(ERR, 4924 "New default address for port %u was already in the address list. Please remove it first.\n", 4925 port_id); 4926 return -EEXIST; 4927 } 4928 4929 ret = (*dev->dev_ops->mac_addr_set)(dev, addr); 4930 if (ret < 0) 4931 return ret; 4932 4933 /* Update default address in NIC data structure */ 4934 rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]); 4935 4936 rte_ethdev_trace_default_mac_addr_set(port_id, addr); 4937 4938 return 0; 4939 } 4940 4941 4942 /* 4943 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 4944 * an empty spot. 4945 */ 4946 static int 4947 eth_dev_get_hash_mac_addr_index(uint16_t port_id, 4948 const struct rte_ether_addr *addr) 4949 { 4950 struct rte_eth_dev_info dev_info; 4951 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4952 unsigned i; 4953 int ret; 4954 4955 ret = rte_eth_dev_info_get(port_id, &dev_info); 4956 if (ret != 0) 4957 return -1; 4958 4959 if (!dev->data->hash_mac_addrs) 4960 return -1; 4961 4962 for (i = 0; i < dev_info.max_hash_mac_addrs; i++) 4963 if (memcmp(addr, &dev->data->hash_mac_addrs[i], 4964 RTE_ETHER_ADDR_LEN) == 0) 4965 return i; 4966 4967 return -1; 4968 } 4969 4970 int 4971 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, 4972 uint8_t on) 4973 { 4974 int index; 4975 int ret; 4976 struct rte_eth_dev *dev; 4977 4978 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4979 dev = &rte_eth_devices[port_id]; 4980 4981 if (addr == NULL) { 4982 RTE_ETHDEV_LOG(ERR, 4983 "Cannot set ethdev port %u unicast hash table from NULL address\n", 4984 port_id); 4985 return -EINVAL; 4986 } 4987 4988 if (rte_is_zero_ether_addr(addr)) { 4989 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 4990 port_id); 4991 return -EINVAL; 4992 } 4993 4994 index = eth_dev_get_hash_mac_addr_index(port_id, addr); 4995 /* Check if it's already there, and do nothing */ 4996 if ((index >= 0) && on) 4997 return 0; 4998 4999 if (index < 0) { 5000 if (!on) { 5001 RTE_ETHDEV_LOG(ERR, 5002 "Port %u: the MAC address was not set in UTA\n", 5003 port_id); 5004 return -EINVAL; 5005 } 5006 5007 index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr); 5008 if (index < 0) { 5009 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 5010 port_id); 5011 return -ENOSPC; 5012 } 5013 } 5014 5015 if (*dev->dev_ops->uc_hash_table_set == NULL) 5016 return -ENOTSUP; 5017 ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on); 5018 if (ret == 0) { 5019 /* Update address in NIC data structure */ 5020 if (on) 5021 rte_ether_addr_copy(addr, 5022 &dev->data->hash_mac_addrs[index]); 5023 else 5024 rte_ether_addr_copy(&null_mac_addr, 5025 &dev->data->hash_mac_addrs[index]); 5026 } 5027 5028 ret = eth_err(port_id, ret); 5029 5030 rte_ethdev_trace_uc_hash_table_set(port_id, on, ret); 5031 5032 return ret; 5033 } 5034 5035 int 5036 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on) 5037 { 5038 struct rte_eth_dev *dev; 5039 int ret; 5040 5041 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5042 dev = &rte_eth_devices[port_id]; 5043 5044 if (*dev->dev_ops->uc_all_hash_table_set == NULL) 5045 return -ENOTSUP; 5046 ret = eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev, on)); 5047 5048 rte_ethdev_trace_uc_all_hash_table_set(port_id, on, ret); 5049 5050 return ret; 5051 } 5052 5053 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, 5054 uint32_t tx_rate) 5055 { 5056 struct rte_eth_dev *dev; 5057 struct rte_eth_dev_info dev_info; 5058 struct rte_eth_link link; 5059 int ret; 5060 5061 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5062 dev = &rte_eth_devices[port_id]; 5063 5064 ret = rte_eth_dev_info_get(port_id, &dev_info); 5065 if (ret != 0) 5066 return ret; 5067 5068 link = dev->data->dev_link; 5069 5070 if (queue_idx > dev_info.max_tx_queues) { 5071 RTE_ETHDEV_LOG(ERR, 5072 "Set queue rate limit:port %u: invalid queue ID=%u\n", 5073 port_id, queue_idx); 5074 return -EINVAL; 5075 } 5076 5077 if (tx_rate > link.link_speed) { 5078 RTE_ETHDEV_LOG(ERR, 5079 "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n", 5080 tx_rate, link.link_speed); 5081 return -EINVAL; 5082 } 5083 5084 if (*dev->dev_ops->set_queue_rate_limit == NULL) 5085 return -ENOTSUP; 5086 ret = eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev, 5087 queue_idx, tx_rate)); 5088 5089 rte_eth_trace_set_queue_rate_limit(port_id, queue_idx, tx_rate, ret); 5090 5091 return ret; 5092 } 5093 5094 int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id, 5095 uint8_t avail_thresh) 5096 { 5097 struct rte_eth_dev *dev; 5098 int ret; 5099 5100 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5101 dev = &rte_eth_devices[port_id]; 5102 5103 if (queue_id > dev->data->nb_rx_queues) { 5104 RTE_ETHDEV_LOG(ERR, 5105 "Set queue avail thresh: port %u: invalid queue ID=%u.\n", 5106 port_id, queue_id); 5107 return -EINVAL; 5108 } 5109 5110 if (avail_thresh > 99) { 5111 RTE_ETHDEV_LOG(ERR, 5112 "Set queue avail thresh: port %u: threshold should be <= 99.\n", 5113 port_id); 5114 return -EINVAL; 5115 } 5116 if (*dev->dev_ops->rx_queue_avail_thresh_set == NULL) 5117 return -ENOTSUP; 5118 ret = eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_set)(dev, 5119 queue_id, avail_thresh)); 5120 5121 rte_eth_trace_rx_avail_thresh_set(port_id, queue_id, avail_thresh, ret); 5122 5123 return ret; 5124 } 5125 5126 int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id, 5127 uint8_t *avail_thresh) 5128 { 5129 struct rte_eth_dev *dev; 5130 int ret; 5131 5132 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5133 dev = &rte_eth_devices[port_id]; 5134 5135 if (queue_id == NULL) 5136 return -EINVAL; 5137 if (*queue_id >= dev->data->nb_rx_queues) 5138 *queue_id = 0; 5139 5140 if (*dev->dev_ops->rx_queue_avail_thresh_query == NULL) 5141 return -ENOTSUP; 5142 ret = eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_query)(dev, 5143 queue_id, avail_thresh)); 5144 5145 rte_eth_trace_rx_avail_thresh_query(port_id, *queue_id, ret); 5146 5147 return ret; 5148 } 5149 5150 RTE_INIT(eth_dev_init_fp_ops) 5151 { 5152 uint32_t i; 5153 5154 for (i = 0; i != RTE_DIM(rte_eth_fp_ops); i++) 5155 eth_dev_fp_ops_reset(rte_eth_fp_ops + i); 5156 } 5157 5158 RTE_INIT(eth_dev_init_cb_lists) 5159 { 5160 uint16_t i; 5161 5162 for (i = 0; i < RTE_MAX_ETHPORTS; i++) 5163 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs); 5164 } 5165 5166 int 5167 rte_eth_dev_callback_register(uint16_t port_id, 5168 enum rte_eth_event_type event, 5169 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 5170 { 5171 struct rte_eth_dev *dev; 5172 struct rte_eth_dev_callback *user_cb; 5173 uint16_t next_port; 5174 uint16_t last_port; 5175 5176 if (cb_fn == NULL) { 5177 RTE_ETHDEV_LOG(ERR, 5178 "Cannot register ethdev port %u callback from NULL\n", 5179 port_id); 5180 return -EINVAL; 5181 } 5182 5183 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 5184 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 5185 return -EINVAL; 5186 } 5187 5188 if (port_id == RTE_ETH_ALL) { 5189 next_port = 0; 5190 last_port = RTE_MAX_ETHPORTS - 1; 5191 } else { 5192 next_port = last_port = port_id; 5193 } 5194 5195 rte_spinlock_lock(ð_dev_cb_lock); 5196 5197 do { 5198 dev = &rte_eth_devices[next_port]; 5199 5200 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) { 5201 if (user_cb->cb_fn == cb_fn && 5202 user_cb->cb_arg == cb_arg && 5203 user_cb->event == event) { 5204 break; 5205 } 5206 } 5207 5208 /* create a new callback. */ 5209 if (user_cb == NULL) { 5210 user_cb = rte_zmalloc("INTR_USER_CALLBACK", 5211 sizeof(struct rte_eth_dev_callback), 0); 5212 if (user_cb != NULL) { 5213 user_cb->cb_fn = cb_fn; 5214 user_cb->cb_arg = cb_arg; 5215 user_cb->event = event; 5216 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), 5217 user_cb, next); 5218 } else { 5219 rte_spinlock_unlock(ð_dev_cb_lock); 5220 rte_eth_dev_callback_unregister(port_id, event, 5221 cb_fn, cb_arg); 5222 return -ENOMEM; 5223 } 5224 5225 } 5226 } while (++next_port <= last_port); 5227 5228 rte_spinlock_unlock(ð_dev_cb_lock); 5229 5230 rte_ethdev_trace_callback_register(port_id, event, cb_fn, cb_arg); 5231 5232 return 0; 5233 } 5234 5235 int 5236 rte_eth_dev_callback_unregister(uint16_t port_id, 5237 enum rte_eth_event_type event, 5238 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 5239 { 5240 int ret; 5241 struct rte_eth_dev *dev; 5242 struct rte_eth_dev_callback *cb, *next; 5243 uint16_t next_port; 5244 uint16_t last_port; 5245 5246 if (cb_fn == NULL) { 5247 RTE_ETHDEV_LOG(ERR, 5248 "Cannot unregister ethdev port %u callback from NULL\n", 5249 port_id); 5250 return -EINVAL; 5251 } 5252 5253 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 5254 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 5255 return -EINVAL; 5256 } 5257 5258 if (port_id == RTE_ETH_ALL) { 5259 next_port = 0; 5260 last_port = RTE_MAX_ETHPORTS - 1; 5261 } else { 5262 next_port = last_port = port_id; 5263 } 5264 5265 rte_spinlock_lock(ð_dev_cb_lock); 5266 5267 do { 5268 dev = &rte_eth_devices[next_port]; 5269 ret = 0; 5270 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; 5271 cb = next) { 5272 5273 next = TAILQ_NEXT(cb, next); 5274 5275 if (cb->cb_fn != cb_fn || cb->event != event || 5276 (cb_arg != (void *)-1 && cb->cb_arg != cb_arg)) 5277 continue; 5278 5279 /* 5280 * if this callback is not executing right now, 5281 * then remove it. 5282 */ 5283 if (cb->active == 0) { 5284 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next); 5285 rte_free(cb); 5286 } else { 5287 ret = -EAGAIN; 5288 } 5289 } 5290 } while (++next_port <= last_port); 5291 5292 rte_spinlock_unlock(ð_dev_cb_lock); 5293 5294 rte_ethdev_trace_callback_unregister(port_id, event, cb_fn, cb_arg, 5295 ret); 5296 5297 return ret; 5298 } 5299 5300 int 5301 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data) 5302 { 5303 uint32_t vec; 5304 struct rte_eth_dev *dev; 5305 struct rte_intr_handle *intr_handle; 5306 uint16_t qid; 5307 int rc; 5308 5309 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5310 dev = &rte_eth_devices[port_id]; 5311 5312 if (!dev->intr_handle) { 5313 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 5314 return -ENOTSUP; 5315 } 5316 5317 intr_handle = dev->intr_handle; 5318 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 5319 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 5320 return -EPERM; 5321 } 5322 5323 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) { 5324 vec = rte_intr_vec_list_index_get(intr_handle, qid); 5325 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 5326 5327 rte_ethdev_trace_rx_intr_ctl(port_id, qid, epfd, op, data, rc); 5328 5329 if (rc && rc != -EEXIST) { 5330 RTE_ETHDEV_LOG(ERR, 5331 "p %u q %u Rx ctl error op %d epfd %d vec %u\n", 5332 port_id, qid, op, epfd, vec); 5333 } 5334 } 5335 5336 return 0; 5337 } 5338 5339 int 5340 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id) 5341 { 5342 struct rte_intr_handle *intr_handle; 5343 struct rte_eth_dev *dev; 5344 unsigned int efd_idx; 5345 uint32_t vec; 5346 int fd; 5347 5348 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 5349 dev = &rte_eth_devices[port_id]; 5350 5351 if (queue_id >= dev->data->nb_rx_queues) { 5352 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5353 return -1; 5354 } 5355 5356 if (!dev->intr_handle) { 5357 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 5358 return -1; 5359 } 5360 5361 intr_handle = dev->intr_handle; 5362 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 5363 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 5364 return -1; 5365 } 5366 5367 vec = rte_intr_vec_list_index_get(intr_handle, queue_id); 5368 efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ? 5369 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec; 5370 fd = rte_intr_efds_index_get(intr_handle, efd_idx); 5371 5372 rte_ethdev_trace_rx_intr_ctl_q_get_fd(port_id, queue_id, fd); 5373 5374 return fd; 5375 } 5376 5377 int 5378 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, 5379 int epfd, int op, void *data) 5380 { 5381 uint32_t vec; 5382 struct rte_eth_dev *dev; 5383 struct rte_intr_handle *intr_handle; 5384 int rc; 5385 5386 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5387 dev = &rte_eth_devices[port_id]; 5388 5389 if (queue_id >= dev->data->nb_rx_queues) { 5390 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5391 return -EINVAL; 5392 } 5393 5394 if (!dev->intr_handle) { 5395 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 5396 return -ENOTSUP; 5397 } 5398 5399 intr_handle = dev->intr_handle; 5400 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 5401 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 5402 return -EPERM; 5403 } 5404 5405 vec = rte_intr_vec_list_index_get(intr_handle, queue_id); 5406 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 5407 5408 rte_ethdev_trace_rx_intr_ctl_q(port_id, queue_id, epfd, op, data, rc); 5409 5410 if (rc && rc != -EEXIST) { 5411 RTE_ETHDEV_LOG(ERR, 5412 "p %u q %u Rx ctl error op %d epfd %d vec %u\n", 5413 port_id, queue_id, op, epfd, vec); 5414 return rc; 5415 } 5416 5417 return 0; 5418 } 5419 5420 int 5421 rte_eth_dev_rx_intr_enable(uint16_t port_id, 5422 uint16_t queue_id) 5423 { 5424 struct rte_eth_dev *dev; 5425 int ret; 5426 5427 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5428 dev = &rte_eth_devices[port_id]; 5429 5430 ret = eth_dev_validate_rx_queue(dev, queue_id); 5431 if (ret != 0) 5432 return ret; 5433 5434 if (*dev->dev_ops->rx_queue_intr_enable == NULL) 5435 return -ENOTSUP; 5436 ret = eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id)); 5437 5438 rte_ethdev_trace_rx_intr_enable(port_id, queue_id, ret); 5439 5440 return ret; 5441 } 5442 5443 int 5444 rte_eth_dev_rx_intr_disable(uint16_t port_id, 5445 uint16_t queue_id) 5446 { 5447 struct rte_eth_dev *dev; 5448 int ret; 5449 5450 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5451 dev = &rte_eth_devices[port_id]; 5452 5453 ret = eth_dev_validate_rx_queue(dev, queue_id); 5454 if (ret != 0) 5455 return ret; 5456 5457 if (*dev->dev_ops->rx_queue_intr_disable == NULL) 5458 return -ENOTSUP; 5459 ret = eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id)); 5460 5461 rte_ethdev_trace_rx_intr_disable(port_id, queue_id, ret); 5462 5463 return ret; 5464 } 5465 5466 5467 const struct rte_eth_rxtx_callback * 5468 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, 5469 rte_rx_callback_fn fn, void *user_param) 5470 { 5471 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5472 rte_errno = ENOTSUP; 5473 return NULL; 5474 #endif 5475 struct rte_eth_dev *dev; 5476 5477 /* check input parameters */ 5478 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5479 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 5480 rte_errno = EINVAL; 5481 return NULL; 5482 } 5483 dev = &rte_eth_devices[port_id]; 5484 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 5485 rte_errno = EINVAL; 5486 return NULL; 5487 } 5488 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5489 5490 if (cb == NULL) { 5491 rte_errno = ENOMEM; 5492 return NULL; 5493 } 5494 5495 cb->fn.rx = fn; 5496 cb->param = user_param; 5497 5498 rte_spinlock_lock(ð_dev_rx_cb_lock); 5499 /* Add the callbacks in fifo order. */ 5500 struct rte_eth_rxtx_callback *tail = 5501 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 5502 5503 if (!tail) { 5504 /* Stores to cb->fn and cb->param should complete before 5505 * cb is visible to data plane. 5506 */ 5507 __atomic_store_n( 5508 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 5509 cb, __ATOMIC_RELEASE); 5510 5511 } else { 5512 while (tail->next) 5513 tail = tail->next; 5514 /* Stores to cb->fn and cb->param should complete before 5515 * cb is visible to data plane. 5516 */ 5517 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 5518 } 5519 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5520 5521 rte_eth_trace_add_rx_callback(port_id, queue_id, fn, user_param, cb); 5522 5523 return cb; 5524 } 5525 5526 const struct rte_eth_rxtx_callback * 5527 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, 5528 rte_rx_callback_fn fn, void *user_param) 5529 { 5530 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5531 rte_errno = ENOTSUP; 5532 return NULL; 5533 #endif 5534 /* check input parameters */ 5535 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5536 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 5537 rte_errno = EINVAL; 5538 return NULL; 5539 } 5540 5541 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5542 5543 if (cb == NULL) { 5544 rte_errno = ENOMEM; 5545 return NULL; 5546 } 5547 5548 cb->fn.rx = fn; 5549 cb->param = user_param; 5550 5551 rte_spinlock_lock(ð_dev_rx_cb_lock); 5552 /* Add the callbacks at first position */ 5553 cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 5554 /* Stores to cb->fn, cb->param and cb->next should complete before 5555 * cb is visible to data plane threads. 5556 */ 5557 __atomic_store_n( 5558 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 5559 cb, __ATOMIC_RELEASE); 5560 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5561 5562 rte_eth_trace_add_first_rx_callback(port_id, queue_id, fn, user_param, 5563 cb); 5564 5565 return cb; 5566 } 5567 5568 const struct rte_eth_rxtx_callback * 5569 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, 5570 rte_tx_callback_fn fn, void *user_param) 5571 { 5572 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5573 rte_errno = ENOTSUP; 5574 return NULL; 5575 #endif 5576 struct rte_eth_dev *dev; 5577 5578 /* check input parameters */ 5579 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5580 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) { 5581 rte_errno = EINVAL; 5582 return NULL; 5583 } 5584 5585 dev = &rte_eth_devices[port_id]; 5586 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 5587 rte_errno = EINVAL; 5588 return NULL; 5589 } 5590 5591 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5592 5593 if (cb == NULL) { 5594 rte_errno = ENOMEM; 5595 return NULL; 5596 } 5597 5598 cb->fn.tx = fn; 5599 cb->param = user_param; 5600 5601 rte_spinlock_lock(ð_dev_tx_cb_lock); 5602 /* Add the callbacks in fifo order. */ 5603 struct rte_eth_rxtx_callback *tail = 5604 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id]; 5605 5606 if (!tail) { 5607 /* Stores to cb->fn and cb->param should complete before 5608 * cb is visible to data plane. 5609 */ 5610 __atomic_store_n( 5611 &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id], 5612 cb, __ATOMIC_RELEASE); 5613 5614 } else { 5615 while (tail->next) 5616 tail = tail->next; 5617 /* Stores to cb->fn and cb->param should complete before 5618 * cb is visible to data plane. 5619 */ 5620 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 5621 } 5622 rte_spinlock_unlock(ð_dev_tx_cb_lock); 5623 5624 rte_eth_trace_add_tx_callback(port_id, queue_id, fn, user_param, cb); 5625 5626 return cb; 5627 } 5628 5629 int 5630 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, 5631 const struct rte_eth_rxtx_callback *user_cb) 5632 { 5633 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5634 return -ENOTSUP; 5635 #endif 5636 /* Check input parameters. */ 5637 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5638 if (user_cb == NULL || 5639 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) 5640 return -EINVAL; 5641 5642 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5643 struct rte_eth_rxtx_callback *cb; 5644 struct rte_eth_rxtx_callback **prev_cb; 5645 int ret = -EINVAL; 5646 5647 rte_spinlock_lock(ð_dev_rx_cb_lock); 5648 prev_cb = &dev->post_rx_burst_cbs[queue_id]; 5649 for (; *prev_cb != NULL; prev_cb = &cb->next) { 5650 cb = *prev_cb; 5651 if (cb == user_cb) { 5652 /* Remove the user cb from the callback list. */ 5653 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 5654 ret = 0; 5655 break; 5656 } 5657 } 5658 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5659 5660 rte_eth_trace_remove_rx_callback(port_id, queue_id, user_cb, ret); 5661 5662 return ret; 5663 } 5664 5665 int 5666 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, 5667 const struct rte_eth_rxtx_callback *user_cb) 5668 { 5669 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5670 return -ENOTSUP; 5671 #endif 5672 /* Check input parameters. */ 5673 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5674 if (user_cb == NULL || 5675 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) 5676 return -EINVAL; 5677 5678 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5679 int ret = -EINVAL; 5680 struct rte_eth_rxtx_callback *cb; 5681 struct rte_eth_rxtx_callback **prev_cb; 5682 5683 rte_spinlock_lock(ð_dev_tx_cb_lock); 5684 prev_cb = &dev->pre_tx_burst_cbs[queue_id]; 5685 for (; *prev_cb != NULL; prev_cb = &cb->next) { 5686 cb = *prev_cb; 5687 if (cb == user_cb) { 5688 /* Remove the user cb from the callback list. */ 5689 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 5690 ret = 0; 5691 break; 5692 } 5693 } 5694 rte_spinlock_unlock(ð_dev_tx_cb_lock); 5695 5696 rte_eth_trace_remove_tx_callback(port_id, queue_id, user_cb, ret); 5697 5698 return ret; 5699 } 5700 5701 int 5702 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5703 struct rte_eth_rxq_info *qinfo) 5704 { 5705 struct rte_eth_dev *dev; 5706 5707 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5708 dev = &rte_eth_devices[port_id]; 5709 5710 if (queue_id >= dev->data->nb_rx_queues) { 5711 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5712 return -EINVAL; 5713 } 5714 5715 if (qinfo == NULL) { 5716 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL\n", 5717 port_id, queue_id); 5718 return -EINVAL; 5719 } 5720 5721 if (dev->data->rx_queues == NULL || 5722 dev->data->rx_queues[queue_id] == NULL) { 5723 RTE_ETHDEV_LOG(ERR, 5724 "Rx queue %"PRIu16" of device with port_id=%" 5725 PRIu16" has not been setup\n", 5726 queue_id, port_id); 5727 return -EINVAL; 5728 } 5729 5730 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 5731 RTE_ETHDEV_LOG(INFO, 5732 "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5733 queue_id, port_id); 5734 return -EINVAL; 5735 } 5736 5737 if (*dev->dev_ops->rxq_info_get == NULL) 5738 return -ENOTSUP; 5739 5740 memset(qinfo, 0, sizeof(*qinfo)); 5741 dev->dev_ops->rxq_info_get(dev, queue_id, qinfo); 5742 qinfo->queue_state = dev->data->rx_queue_state[queue_id]; 5743 5744 rte_eth_trace_rx_queue_info_get(port_id, queue_id, qinfo); 5745 5746 return 0; 5747 } 5748 5749 int 5750 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5751 struct rte_eth_txq_info *qinfo) 5752 { 5753 struct rte_eth_dev *dev; 5754 5755 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5756 dev = &rte_eth_devices[port_id]; 5757 5758 if (queue_id >= dev->data->nb_tx_queues) { 5759 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 5760 return -EINVAL; 5761 } 5762 5763 if (qinfo == NULL) { 5764 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL\n", 5765 port_id, queue_id); 5766 return -EINVAL; 5767 } 5768 5769 if (dev->data->tx_queues == NULL || 5770 dev->data->tx_queues[queue_id] == NULL) { 5771 RTE_ETHDEV_LOG(ERR, 5772 "Tx queue %"PRIu16" of device with port_id=%" 5773 PRIu16" has not been setup\n", 5774 queue_id, port_id); 5775 return -EINVAL; 5776 } 5777 5778 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 5779 RTE_ETHDEV_LOG(INFO, 5780 "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5781 queue_id, port_id); 5782 return -EINVAL; 5783 } 5784 5785 if (*dev->dev_ops->txq_info_get == NULL) 5786 return -ENOTSUP; 5787 5788 memset(qinfo, 0, sizeof(*qinfo)); 5789 dev->dev_ops->txq_info_get(dev, queue_id, qinfo); 5790 qinfo->queue_state = dev->data->tx_queue_state[queue_id]; 5791 5792 rte_eth_trace_tx_queue_info_get(port_id, queue_id, qinfo); 5793 5794 return 0; 5795 } 5796 5797 int 5798 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5799 struct rte_eth_burst_mode *mode) 5800 { 5801 struct rte_eth_dev *dev; 5802 int ret; 5803 5804 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5805 dev = &rte_eth_devices[port_id]; 5806 5807 if (queue_id >= dev->data->nb_rx_queues) { 5808 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5809 return -EINVAL; 5810 } 5811 5812 if (mode == NULL) { 5813 RTE_ETHDEV_LOG(ERR, 5814 "Cannot get ethdev port %u Rx queue %u burst mode to NULL\n", 5815 port_id, queue_id); 5816 return -EINVAL; 5817 } 5818 5819 if (*dev->dev_ops->rx_burst_mode_get == NULL) 5820 return -ENOTSUP; 5821 memset(mode, 0, sizeof(*mode)); 5822 ret = eth_err(port_id, 5823 dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode)); 5824 5825 rte_eth_trace_rx_burst_mode_get(port_id, queue_id, mode, ret); 5826 5827 return ret; 5828 } 5829 5830 int 5831 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5832 struct rte_eth_burst_mode *mode) 5833 { 5834 struct rte_eth_dev *dev; 5835 int ret; 5836 5837 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5838 dev = &rte_eth_devices[port_id]; 5839 5840 if (queue_id >= dev->data->nb_tx_queues) { 5841 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 5842 return -EINVAL; 5843 } 5844 5845 if (mode == NULL) { 5846 RTE_ETHDEV_LOG(ERR, 5847 "Cannot get ethdev port %u Tx queue %u burst mode to NULL\n", 5848 port_id, queue_id); 5849 return -EINVAL; 5850 } 5851 5852 if (*dev->dev_ops->tx_burst_mode_get == NULL) 5853 return -ENOTSUP; 5854 memset(mode, 0, sizeof(*mode)); 5855 ret = eth_err(port_id, 5856 dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode)); 5857 5858 rte_eth_trace_tx_burst_mode_get(port_id, queue_id, mode, ret); 5859 5860 return ret; 5861 } 5862 5863 int 5864 rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id, 5865 struct rte_power_monitor_cond *pmc) 5866 { 5867 struct rte_eth_dev *dev; 5868 int ret; 5869 5870 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5871 dev = &rte_eth_devices[port_id]; 5872 5873 if (queue_id >= dev->data->nb_rx_queues) { 5874 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5875 return -EINVAL; 5876 } 5877 5878 if (pmc == NULL) { 5879 RTE_ETHDEV_LOG(ERR, 5880 "Cannot get ethdev port %u Rx queue %u power monitor condition to NULL\n", 5881 port_id, queue_id); 5882 return -EINVAL; 5883 } 5884 5885 if (*dev->dev_ops->get_monitor_addr == NULL) 5886 return -ENOTSUP; 5887 ret = eth_err(port_id, 5888 dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc)); 5889 5890 rte_eth_trace_get_monitor_addr(port_id, queue_id, pmc, ret); 5891 5892 return ret; 5893 } 5894 5895 int 5896 rte_eth_dev_set_mc_addr_list(uint16_t port_id, 5897 struct rte_ether_addr *mc_addr_set, 5898 uint32_t nb_mc_addr) 5899 { 5900 struct rte_eth_dev *dev; 5901 int ret; 5902 5903 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5904 dev = &rte_eth_devices[port_id]; 5905 5906 if (*dev->dev_ops->set_mc_addr_list == NULL) 5907 return -ENOTSUP; 5908 ret = eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev, 5909 mc_addr_set, nb_mc_addr)); 5910 5911 rte_ethdev_trace_set_mc_addr_list(port_id, mc_addr_set, nb_mc_addr, 5912 ret); 5913 5914 return ret; 5915 } 5916 5917 int 5918 rte_eth_timesync_enable(uint16_t port_id) 5919 { 5920 struct rte_eth_dev *dev; 5921 int ret; 5922 5923 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5924 dev = &rte_eth_devices[port_id]; 5925 5926 if (*dev->dev_ops->timesync_enable == NULL) 5927 return -ENOTSUP; 5928 ret = eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev)); 5929 5930 rte_eth_trace_timesync_enable(port_id, ret); 5931 5932 return ret; 5933 } 5934 5935 int 5936 rte_eth_timesync_disable(uint16_t port_id) 5937 { 5938 struct rte_eth_dev *dev; 5939 int ret; 5940 5941 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5942 dev = &rte_eth_devices[port_id]; 5943 5944 if (*dev->dev_ops->timesync_disable == NULL) 5945 return -ENOTSUP; 5946 ret = eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev)); 5947 5948 rte_eth_trace_timesync_disable(port_id, ret); 5949 5950 return ret; 5951 } 5952 5953 int 5954 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, 5955 uint32_t flags) 5956 { 5957 struct rte_eth_dev *dev; 5958 int ret; 5959 5960 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5961 dev = &rte_eth_devices[port_id]; 5962 5963 if (timestamp == NULL) { 5964 RTE_ETHDEV_LOG(ERR, 5965 "Cannot read ethdev port %u Rx timestamp to NULL\n", 5966 port_id); 5967 return -EINVAL; 5968 } 5969 5970 if (*dev->dev_ops->timesync_read_rx_timestamp == NULL) 5971 return -ENOTSUP; 5972 5973 ret = eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp) 5974 (dev, timestamp, flags)); 5975 5976 rte_eth_trace_timesync_read_rx_timestamp(port_id, timestamp, flags, 5977 ret); 5978 5979 return ret; 5980 } 5981 5982 int 5983 rte_eth_timesync_read_tx_timestamp(uint16_t port_id, 5984 struct timespec *timestamp) 5985 { 5986 struct rte_eth_dev *dev; 5987 int ret; 5988 5989 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5990 dev = &rte_eth_devices[port_id]; 5991 5992 if (timestamp == NULL) { 5993 RTE_ETHDEV_LOG(ERR, 5994 "Cannot read ethdev port %u Tx timestamp to NULL\n", 5995 port_id); 5996 return -EINVAL; 5997 } 5998 5999 if (*dev->dev_ops->timesync_read_tx_timestamp == NULL) 6000 return -ENOTSUP; 6001 6002 ret = eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp) 6003 (dev, timestamp)); 6004 6005 rte_eth_trace_timesync_read_tx_timestamp(port_id, timestamp, ret); 6006 6007 return ret; 6008 6009 } 6010 6011 int 6012 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta) 6013 { 6014 struct rte_eth_dev *dev; 6015 int ret; 6016 6017 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6018 dev = &rte_eth_devices[port_id]; 6019 6020 if (*dev->dev_ops->timesync_adjust_time == NULL) 6021 return -ENOTSUP; 6022 ret = eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta)); 6023 6024 rte_eth_trace_timesync_adjust_time(port_id, delta, ret); 6025 6026 return ret; 6027 } 6028 6029 int 6030 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp) 6031 { 6032 struct rte_eth_dev *dev; 6033 int ret; 6034 6035 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6036 dev = &rte_eth_devices[port_id]; 6037 6038 if (timestamp == NULL) { 6039 RTE_ETHDEV_LOG(ERR, 6040 "Cannot read ethdev port %u timesync time to NULL\n", 6041 port_id); 6042 return -EINVAL; 6043 } 6044 6045 if (*dev->dev_ops->timesync_read_time == NULL) 6046 return -ENOTSUP; 6047 ret = eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev, 6048 timestamp)); 6049 6050 rte_eth_trace_timesync_read_time(port_id, timestamp, ret); 6051 6052 return ret; 6053 } 6054 6055 int 6056 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp) 6057 { 6058 struct rte_eth_dev *dev; 6059 int ret; 6060 6061 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6062 dev = &rte_eth_devices[port_id]; 6063 6064 if (timestamp == NULL) { 6065 RTE_ETHDEV_LOG(ERR, 6066 "Cannot write ethdev port %u timesync from NULL time\n", 6067 port_id); 6068 return -EINVAL; 6069 } 6070 6071 if (*dev->dev_ops->timesync_write_time == NULL) 6072 return -ENOTSUP; 6073 ret = eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev, 6074 timestamp)); 6075 6076 rte_eth_trace_timesync_write_time(port_id, timestamp, ret); 6077 6078 return ret; 6079 } 6080 6081 int 6082 rte_eth_read_clock(uint16_t port_id, uint64_t *clock) 6083 { 6084 struct rte_eth_dev *dev; 6085 int ret; 6086 6087 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6088 dev = &rte_eth_devices[port_id]; 6089 6090 if (clock == NULL) { 6091 RTE_ETHDEV_LOG(ERR, "Cannot read ethdev port %u clock to NULL\n", 6092 port_id); 6093 return -EINVAL; 6094 } 6095 6096 if (*dev->dev_ops->read_clock == NULL) 6097 return -ENOTSUP; 6098 ret = eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock)); 6099 6100 rte_eth_trace_read_clock(port_id, clock, ret); 6101 6102 return ret; 6103 } 6104 6105 int 6106 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info) 6107 { 6108 struct rte_eth_dev *dev; 6109 int ret; 6110 6111 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6112 dev = &rte_eth_devices[port_id]; 6113 6114 if (info == NULL) { 6115 RTE_ETHDEV_LOG(ERR, 6116 "Cannot get ethdev port %u register info to NULL\n", 6117 port_id); 6118 return -EINVAL; 6119 } 6120 6121 if (*dev->dev_ops->get_reg == NULL) 6122 return -ENOTSUP; 6123 ret = eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info)); 6124 6125 rte_ethdev_trace_get_reg_info(port_id, info, ret); 6126 6127 return ret; 6128 } 6129 6130 int 6131 rte_eth_dev_get_eeprom_length(uint16_t port_id) 6132 { 6133 struct rte_eth_dev *dev; 6134 int ret; 6135 6136 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6137 dev = &rte_eth_devices[port_id]; 6138 6139 if (*dev->dev_ops->get_eeprom_length == NULL) 6140 return -ENOTSUP; 6141 ret = eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev)); 6142 6143 rte_ethdev_trace_get_eeprom_length(port_id, ret); 6144 6145 return ret; 6146 } 6147 6148 int 6149 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 6150 { 6151 struct rte_eth_dev *dev; 6152 int ret; 6153 6154 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6155 dev = &rte_eth_devices[port_id]; 6156 6157 if (info == NULL) { 6158 RTE_ETHDEV_LOG(ERR, 6159 "Cannot get ethdev port %u EEPROM info to NULL\n", 6160 port_id); 6161 return -EINVAL; 6162 } 6163 6164 if (*dev->dev_ops->get_eeprom == NULL) 6165 return -ENOTSUP; 6166 ret = eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info)); 6167 6168 rte_ethdev_trace_get_eeprom(port_id, info, ret); 6169 6170 return ret; 6171 } 6172 6173 int 6174 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 6175 { 6176 struct rte_eth_dev *dev; 6177 int ret; 6178 6179 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6180 dev = &rte_eth_devices[port_id]; 6181 6182 if (info == NULL) { 6183 RTE_ETHDEV_LOG(ERR, 6184 "Cannot set ethdev port %u EEPROM from NULL info\n", 6185 port_id); 6186 return -EINVAL; 6187 } 6188 6189 if (*dev->dev_ops->set_eeprom == NULL) 6190 return -ENOTSUP; 6191 ret = eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info)); 6192 6193 rte_ethdev_trace_set_eeprom(port_id, info, ret); 6194 6195 return ret; 6196 } 6197 6198 int 6199 rte_eth_dev_get_module_info(uint16_t port_id, 6200 struct rte_eth_dev_module_info *modinfo) 6201 { 6202 struct rte_eth_dev *dev; 6203 int ret; 6204 6205 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6206 dev = &rte_eth_devices[port_id]; 6207 6208 if (modinfo == NULL) { 6209 RTE_ETHDEV_LOG(ERR, 6210 "Cannot get ethdev port %u EEPROM module info to NULL\n", 6211 port_id); 6212 return -EINVAL; 6213 } 6214 6215 if (*dev->dev_ops->get_module_info == NULL) 6216 return -ENOTSUP; 6217 ret = (*dev->dev_ops->get_module_info)(dev, modinfo); 6218 6219 rte_ethdev_trace_get_module_info(port_id, modinfo, ret); 6220 6221 return ret; 6222 } 6223 6224 int 6225 rte_eth_dev_get_module_eeprom(uint16_t port_id, 6226 struct rte_dev_eeprom_info *info) 6227 { 6228 struct rte_eth_dev *dev; 6229 int ret; 6230 6231 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6232 dev = &rte_eth_devices[port_id]; 6233 6234 if (info == NULL) { 6235 RTE_ETHDEV_LOG(ERR, 6236 "Cannot get ethdev port %u module EEPROM info to NULL\n", 6237 port_id); 6238 return -EINVAL; 6239 } 6240 6241 if (info->data == NULL) { 6242 RTE_ETHDEV_LOG(ERR, 6243 "Cannot get ethdev port %u module EEPROM data to NULL\n", 6244 port_id); 6245 return -EINVAL; 6246 } 6247 6248 if (info->length == 0) { 6249 RTE_ETHDEV_LOG(ERR, 6250 "Cannot get ethdev port %u module EEPROM to data with zero size\n", 6251 port_id); 6252 return -EINVAL; 6253 } 6254 6255 if (*dev->dev_ops->get_module_eeprom == NULL) 6256 return -ENOTSUP; 6257 ret = (*dev->dev_ops->get_module_eeprom)(dev, info); 6258 6259 rte_ethdev_trace_get_module_eeprom(port_id, info, ret); 6260 6261 return ret; 6262 } 6263 6264 int 6265 rte_eth_dev_get_dcb_info(uint16_t port_id, 6266 struct rte_eth_dcb_info *dcb_info) 6267 { 6268 struct rte_eth_dev *dev; 6269 int ret; 6270 6271 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6272 dev = &rte_eth_devices[port_id]; 6273 6274 if (dcb_info == NULL) { 6275 RTE_ETHDEV_LOG(ERR, 6276 "Cannot get ethdev port %u DCB info to NULL\n", 6277 port_id); 6278 return -EINVAL; 6279 } 6280 6281 memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info)); 6282 6283 if (*dev->dev_ops->get_dcb_info == NULL) 6284 return -ENOTSUP; 6285 ret = eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info)); 6286 6287 rte_ethdev_trace_get_dcb_info(port_id, dcb_info, ret); 6288 6289 return ret; 6290 } 6291 6292 static void 6293 eth_dev_adjust_nb_desc(uint16_t *nb_desc, 6294 const struct rte_eth_desc_lim *desc_lim) 6295 { 6296 if (desc_lim->nb_align != 0) 6297 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align); 6298 6299 if (desc_lim->nb_max != 0) 6300 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max); 6301 6302 *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min); 6303 } 6304 6305 int 6306 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, 6307 uint16_t *nb_rx_desc, 6308 uint16_t *nb_tx_desc) 6309 { 6310 struct rte_eth_dev_info dev_info; 6311 int ret; 6312 6313 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6314 6315 ret = rte_eth_dev_info_get(port_id, &dev_info); 6316 if (ret != 0) 6317 return ret; 6318 6319 if (nb_rx_desc != NULL) 6320 eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim); 6321 6322 if (nb_tx_desc != NULL) 6323 eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim); 6324 6325 rte_ethdev_trace_adjust_nb_rx_tx_desc(port_id); 6326 6327 return 0; 6328 } 6329 6330 int 6331 rte_eth_dev_hairpin_capability_get(uint16_t port_id, 6332 struct rte_eth_hairpin_cap *cap) 6333 { 6334 struct rte_eth_dev *dev; 6335 int ret; 6336 6337 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6338 dev = &rte_eth_devices[port_id]; 6339 6340 if (cap == NULL) { 6341 RTE_ETHDEV_LOG(ERR, 6342 "Cannot get ethdev port %u hairpin capability to NULL\n", 6343 port_id); 6344 return -EINVAL; 6345 } 6346 6347 if (*dev->dev_ops->hairpin_cap_get == NULL) 6348 return -ENOTSUP; 6349 memset(cap, 0, sizeof(*cap)); 6350 ret = eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap)); 6351 6352 rte_ethdev_trace_hairpin_capability_get(port_id, cap, ret); 6353 6354 return ret; 6355 } 6356 6357 int 6358 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool) 6359 { 6360 struct rte_eth_dev *dev; 6361 int ret; 6362 6363 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6364 dev = &rte_eth_devices[port_id]; 6365 6366 if (pool == NULL) { 6367 RTE_ETHDEV_LOG(ERR, 6368 "Cannot test ethdev port %u mempool operation from NULL pool\n", 6369 port_id); 6370 return -EINVAL; 6371 } 6372 6373 if (*dev->dev_ops->pool_ops_supported == NULL) 6374 return 1; /* all pools are supported */ 6375 6376 ret = (*dev->dev_ops->pool_ops_supported)(dev, pool); 6377 6378 rte_ethdev_trace_pool_ops_supported(port_id, pool, ret); 6379 6380 return ret; 6381 } 6382 6383 static int 6384 eth_dev_handle_port_list(const char *cmd __rte_unused, 6385 const char *params __rte_unused, 6386 struct rte_tel_data *d) 6387 { 6388 int port_id; 6389 6390 rte_tel_data_start_array(d, RTE_TEL_INT_VAL); 6391 RTE_ETH_FOREACH_DEV(port_id) 6392 rte_tel_data_add_array_int(d, port_id); 6393 return 0; 6394 } 6395 6396 static void 6397 eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats, 6398 const char *stat_name) 6399 { 6400 int q; 6401 struct rte_tel_data *q_data = rte_tel_data_alloc(); 6402 if (q_data == NULL) 6403 return; 6404 rte_tel_data_start_array(q_data, RTE_TEL_UINT_VAL); 6405 for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++) 6406 rte_tel_data_add_array_uint(q_data, q_stats[q]); 6407 rte_tel_data_add_dict_container(d, stat_name, q_data, 0); 6408 } 6409 6410 #define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_uint(d, #s, stats.s) 6411 6412 static int 6413 eth_dev_handle_port_stats(const char *cmd __rte_unused, 6414 const char *params, 6415 struct rte_tel_data *d) 6416 { 6417 struct rte_eth_stats stats; 6418 int port_id, ret; 6419 6420 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6421 return -1; 6422 6423 port_id = atoi(params); 6424 if (!rte_eth_dev_is_valid_port(port_id)) 6425 return -1; 6426 6427 ret = rte_eth_stats_get(port_id, &stats); 6428 if (ret < 0) 6429 return -1; 6430 6431 rte_tel_data_start_dict(d); 6432 ADD_DICT_STAT(stats, ipackets); 6433 ADD_DICT_STAT(stats, opackets); 6434 ADD_DICT_STAT(stats, ibytes); 6435 ADD_DICT_STAT(stats, obytes); 6436 ADD_DICT_STAT(stats, imissed); 6437 ADD_DICT_STAT(stats, ierrors); 6438 ADD_DICT_STAT(stats, oerrors); 6439 ADD_DICT_STAT(stats, rx_nombuf); 6440 eth_dev_add_port_queue_stats(d, stats.q_ipackets, "q_ipackets"); 6441 eth_dev_add_port_queue_stats(d, stats.q_opackets, "q_opackets"); 6442 eth_dev_add_port_queue_stats(d, stats.q_ibytes, "q_ibytes"); 6443 eth_dev_add_port_queue_stats(d, stats.q_obytes, "q_obytes"); 6444 eth_dev_add_port_queue_stats(d, stats.q_errors, "q_errors"); 6445 6446 return 0; 6447 } 6448 6449 static int 6450 eth_dev_parse_hide_zero(const char *key, const char *value, void *extra_args) 6451 { 6452 RTE_SET_USED(key); 6453 6454 if (value == NULL) 6455 return -1; 6456 6457 if (strcmp(value, "true") == 0) 6458 *(bool *)extra_args = true; 6459 else if (strcmp(value, "false") == 0) 6460 *(bool *)extra_args = false; 6461 else 6462 return -1; 6463 6464 return 0; 6465 } 6466 6467 static int 6468 eth_dev_handle_port_xstats(const char *cmd __rte_unused, 6469 const char *params, 6470 struct rte_tel_data *d) 6471 { 6472 const char *const valid_keys[] = { "hide_zero", NULL }; 6473 struct rte_eth_xstat *eth_xstats; 6474 struct rte_eth_xstat_name *xstat_names; 6475 struct rte_kvargs *kvlist; 6476 int port_id, num_xstats; 6477 bool hide_zero = false; 6478 char *end_param; 6479 int i, ret; 6480 6481 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6482 return -1; 6483 6484 port_id = strtoul(params, &end_param, 0); 6485 if (!rte_eth_dev_is_valid_port(port_id)) 6486 return -1; 6487 6488 if (*end_param != '\0') { 6489 kvlist = rte_kvargs_parse(end_param, valid_keys); 6490 ret = rte_kvargs_process(kvlist, NULL, eth_dev_parse_hide_zero, &hide_zero); 6491 if (kvlist == NULL || ret != 0) 6492 RTE_ETHDEV_LOG(NOTICE, 6493 "Unknown extra parameters passed to ethdev telemetry command, ignoring\n"); 6494 rte_kvargs_free(kvlist); 6495 } 6496 6497 num_xstats = rte_eth_xstats_get(port_id, NULL, 0); 6498 if (num_xstats < 0) 6499 return -1; 6500 6501 /* use one malloc for both names and stats */ 6502 eth_xstats = malloc((sizeof(struct rte_eth_xstat) + 6503 sizeof(struct rte_eth_xstat_name)) * num_xstats); 6504 if (eth_xstats == NULL) 6505 return -1; 6506 xstat_names = (void *)ð_xstats[num_xstats]; 6507 6508 ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats); 6509 if (ret < 0 || ret > num_xstats) { 6510 free(eth_xstats); 6511 return -1; 6512 } 6513 6514 ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats); 6515 if (ret < 0 || ret > num_xstats) { 6516 free(eth_xstats); 6517 return -1; 6518 } 6519 6520 rte_tel_data_start_dict(d); 6521 for (i = 0; i < num_xstats; i++) { 6522 if (hide_zero && eth_xstats[i].value == 0) 6523 continue; 6524 rte_tel_data_add_dict_uint(d, xstat_names[i].name, 6525 eth_xstats[i].value); 6526 } 6527 free(eth_xstats); 6528 return 0; 6529 } 6530 6531 #ifndef RTE_EXEC_ENV_WINDOWS 6532 static int 6533 eth_dev_handle_port_dump_priv(const char *cmd __rte_unused, 6534 const char *params, 6535 struct rte_tel_data *d) 6536 { 6537 char *buf, *end_param; 6538 int port_id, ret; 6539 FILE *f; 6540 6541 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6542 return -EINVAL; 6543 6544 port_id = strtoul(params, &end_param, 0); 6545 if (*end_param != '\0') 6546 RTE_ETHDEV_LOG(NOTICE, 6547 "Extra parameters passed to ethdev telemetry command, ignoring"); 6548 if (!rte_eth_dev_is_valid_port(port_id)) 6549 return -EINVAL; 6550 6551 buf = calloc(sizeof(char), RTE_TEL_MAX_SINGLE_STRING_LEN); 6552 if (buf == NULL) 6553 return -ENOMEM; 6554 6555 f = fmemopen(buf, RTE_TEL_MAX_SINGLE_STRING_LEN - 1, "w+"); 6556 if (f == NULL) { 6557 free(buf); 6558 return -EINVAL; 6559 } 6560 6561 ret = rte_eth_dev_priv_dump(port_id, f); 6562 fclose(f); 6563 if (ret == 0) { 6564 rte_tel_data_start_dict(d); 6565 rte_tel_data_string(d, buf); 6566 } 6567 6568 free(buf); 6569 return 0; 6570 } 6571 #endif /* !RTE_EXEC_ENV_WINDOWS */ 6572 6573 static int 6574 eth_dev_handle_port_link_status(const char *cmd __rte_unused, 6575 const char *params, 6576 struct rte_tel_data *d) 6577 { 6578 static const char *status_str = "status"; 6579 int ret, port_id; 6580 struct rte_eth_link link; 6581 char *end_param; 6582 6583 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6584 return -1; 6585 6586 port_id = strtoul(params, &end_param, 0); 6587 if (*end_param != '\0') 6588 RTE_ETHDEV_LOG(NOTICE, 6589 "Extra parameters passed to ethdev telemetry command, ignoring"); 6590 if (!rte_eth_dev_is_valid_port(port_id)) 6591 return -1; 6592 6593 ret = rte_eth_link_get_nowait(port_id, &link); 6594 if (ret < 0) 6595 return -1; 6596 6597 rte_tel_data_start_dict(d); 6598 if (!link.link_status) { 6599 rte_tel_data_add_dict_string(d, status_str, "DOWN"); 6600 return 0; 6601 } 6602 rte_tel_data_add_dict_string(d, status_str, "UP"); 6603 rte_tel_data_add_dict_uint(d, "speed", link.link_speed); 6604 rte_tel_data_add_dict_string(d, "duplex", 6605 (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 6606 "full-duplex" : "half-duplex"); 6607 return 0; 6608 } 6609 6610 static int 6611 eth_dev_handle_port_info(const char *cmd __rte_unused, 6612 const char *params, 6613 struct rte_tel_data *d) 6614 { 6615 struct rte_tel_data *rxq_state, *txq_state; 6616 char mac_addr[RTE_ETHER_ADDR_FMT_SIZE]; 6617 struct rte_eth_dev *eth_dev; 6618 char *end_param; 6619 int port_id, i; 6620 6621 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6622 return -1; 6623 6624 port_id = strtoul(params, &end_param, 0); 6625 if (*end_param != '\0') 6626 RTE_ETHDEV_LOG(NOTICE, 6627 "Extra parameters passed to ethdev telemetry command, ignoring"); 6628 6629 if (!rte_eth_dev_is_valid_port(port_id)) 6630 return -EINVAL; 6631 6632 eth_dev = &rte_eth_devices[port_id]; 6633 6634 rxq_state = rte_tel_data_alloc(); 6635 if (!rxq_state) 6636 return -ENOMEM; 6637 6638 txq_state = rte_tel_data_alloc(); 6639 if (!txq_state) { 6640 rte_tel_data_free(rxq_state); 6641 return -ENOMEM; 6642 } 6643 6644 rte_tel_data_start_dict(d); 6645 rte_tel_data_add_dict_string(d, "name", eth_dev->data->name); 6646 rte_tel_data_add_dict_int(d, "state", eth_dev->state); 6647 rte_tel_data_add_dict_int(d, "nb_rx_queues", 6648 eth_dev->data->nb_rx_queues); 6649 rte_tel_data_add_dict_int(d, "nb_tx_queues", 6650 eth_dev->data->nb_tx_queues); 6651 rte_tel_data_add_dict_int(d, "port_id", eth_dev->data->port_id); 6652 rte_tel_data_add_dict_int(d, "mtu", eth_dev->data->mtu); 6653 rte_tel_data_add_dict_uint(d, "rx_mbuf_size_min", 6654 eth_dev->data->min_rx_buf_size); 6655 rte_ether_format_addr(mac_addr, sizeof(mac_addr), 6656 eth_dev->data->mac_addrs); 6657 rte_tel_data_add_dict_string(d, "mac_addr", mac_addr); 6658 rte_tel_data_add_dict_int(d, "promiscuous", 6659 eth_dev->data->promiscuous); 6660 rte_tel_data_add_dict_int(d, "scattered_rx", 6661 eth_dev->data->scattered_rx); 6662 rte_tel_data_add_dict_int(d, "all_multicast", 6663 eth_dev->data->all_multicast); 6664 rte_tel_data_add_dict_int(d, "dev_started", eth_dev->data->dev_started); 6665 rte_tel_data_add_dict_int(d, "lro", eth_dev->data->lro); 6666 rte_tel_data_add_dict_int(d, "dev_configured", 6667 eth_dev->data->dev_configured); 6668 6669 rte_tel_data_start_array(rxq_state, RTE_TEL_INT_VAL); 6670 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) 6671 rte_tel_data_add_array_int(rxq_state, 6672 eth_dev->data->rx_queue_state[i]); 6673 6674 rte_tel_data_start_array(txq_state, RTE_TEL_INT_VAL); 6675 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) 6676 rte_tel_data_add_array_int(txq_state, 6677 eth_dev->data->tx_queue_state[i]); 6678 6679 rte_tel_data_add_dict_container(d, "rxq_state", rxq_state, 0); 6680 rte_tel_data_add_dict_container(d, "txq_state", txq_state, 0); 6681 rte_tel_data_add_dict_int(d, "numa_node", eth_dev->data->numa_node); 6682 rte_tel_data_add_dict_uint_hex(d, "dev_flags", 6683 eth_dev->data->dev_flags, 0); 6684 rte_tel_data_add_dict_uint_hex(d, "rx_offloads", 6685 eth_dev->data->dev_conf.rxmode.offloads, 0); 6686 rte_tel_data_add_dict_uint_hex(d, "tx_offloads", 6687 eth_dev->data->dev_conf.txmode.offloads, 0); 6688 rte_tel_data_add_dict_uint_hex(d, "ethdev_rss_hf", 6689 eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf, 0); 6690 6691 return 0; 6692 } 6693 6694 int 6695 rte_eth_representor_info_get(uint16_t port_id, 6696 struct rte_eth_representor_info *info) 6697 { 6698 struct rte_eth_dev *dev; 6699 int ret; 6700 6701 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6702 dev = &rte_eth_devices[port_id]; 6703 6704 if (*dev->dev_ops->representor_info_get == NULL) 6705 return -ENOTSUP; 6706 ret = eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info)); 6707 6708 rte_eth_trace_representor_info_get(port_id, info, ret); 6709 6710 return ret; 6711 } 6712 6713 int 6714 rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features) 6715 { 6716 struct rte_eth_dev *dev; 6717 int ret; 6718 6719 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6720 dev = &rte_eth_devices[port_id]; 6721 6722 if (dev->data->dev_configured != 0) { 6723 RTE_ETHDEV_LOG(ERR, 6724 "The port (ID=%"PRIu16") is already configured\n", 6725 port_id); 6726 return -EBUSY; 6727 } 6728 6729 if (features == NULL) { 6730 RTE_ETHDEV_LOG(ERR, "Invalid features (NULL)\n"); 6731 return -EINVAL; 6732 } 6733 6734 if (*dev->dev_ops->rx_metadata_negotiate == NULL) 6735 return -ENOTSUP; 6736 ret = eth_err(port_id, 6737 (*dev->dev_ops->rx_metadata_negotiate)(dev, features)); 6738 6739 rte_eth_trace_rx_metadata_negotiate(port_id, *features, ret); 6740 6741 return ret; 6742 } 6743 6744 int 6745 rte_eth_ip_reassembly_capability_get(uint16_t port_id, 6746 struct rte_eth_ip_reassembly_params *reassembly_capa) 6747 { 6748 struct rte_eth_dev *dev; 6749 int ret; 6750 6751 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6752 dev = &rte_eth_devices[port_id]; 6753 6754 if (dev->data->dev_configured == 0) { 6755 RTE_ETHDEV_LOG(ERR, 6756 "Device with port_id=%u is not configured.\n" 6757 "Cannot get IP reassembly capability\n", 6758 port_id); 6759 return -EINVAL; 6760 } 6761 6762 if (reassembly_capa == NULL) { 6763 RTE_ETHDEV_LOG(ERR, "Cannot get reassembly capability to NULL"); 6764 return -EINVAL; 6765 } 6766 6767 if (*dev->dev_ops->ip_reassembly_capability_get == NULL) 6768 return -ENOTSUP; 6769 memset(reassembly_capa, 0, sizeof(struct rte_eth_ip_reassembly_params)); 6770 6771 ret = eth_err(port_id, (*dev->dev_ops->ip_reassembly_capability_get) 6772 (dev, reassembly_capa)); 6773 6774 rte_eth_trace_ip_reassembly_capability_get(port_id, reassembly_capa, 6775 ret); 6776 6777 return ret; 6778 } 6779 6780 int 6781 rte_eth_ip_reassembly_conf_get(uint16_t port_id, 6782 struct rte_eth_ip_reassembly_params *conf) 6783 { 6784 struct rte_eth_dev *dev; 6785 int ret; 6786 6787 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6788 dev = &rte_eth_devices[port_id]; 6789 6790 if (dev->data->dev_configured == 0) { 6791 RTE_ETHDEV_LOG(ERR, 6792 "Device with port_id=%u is not configured.\n" 6793 "Cannot get IP reassembly configuration\n", 6794 port_id); 6795 return -EINVAL; 6796 } 6797 6798 if (conf == NULL) { 6799 RTE_ETHDEV_LOG(ERR, "Cannot get reassembly info to NULL"); 6800 return -EINVAL; 6801 } 6802 6803 if (*dev->dev_ops->ip_reassembly_conf_get == NULL) 6804 return -ENOTSUP; 6805 memset(conf, 0, sizeof(struct rte_eth_ip_reassembly_params)); 6806 ret = eth_err(port_id, 6807 (*dev->dev_ops->ip_reassembly_conf_get)(dev, conf)); 6808 6809 rte_eth_trace_ip_reassembly_conf_get(port_id, conf, ret); 6810 6811 return ret; 6812 } 6813 6814 int 6815 rte_eth_ip_reassembly_conf_set(uint16_t port_id, 6816 const struct rte_eth_ip_reassembly_params *conf) 6817 { 6818 struct rte_eth_dev *dev; 6819 int ret; 6820 6821 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6822 dev = &rte_eth_devices[port_id]; 6823 6824 if (dev->data->dev_configured == 0) { 6825 RTE_ETHDEV_LOG(ERR, 6826 "Device with port_id=%u is not configured.\n" 6827 "Cannot set IP reassembly configuration", 6828 port_id); 6829 return -EINVAL; 6830 } 6831 6832 if (dev->data->dev_started != 0) { 6833 RTE_ETHDEV_LOG(ERR, 6834 "Device with port_id=%u started,\n" 6835 "cannot configure IP reassembly params.\n", 6836 port_id); 6837 return -EINVAL; 6838 } 6839 6840 if (conf == NULL) { 6841 RTE_ETHDEV_LOG(ERR, 6842 "Invalid IP reassembly configuration (NULL)\n"); 6843 return -EINVAL; 6844 } 6845 6846 if (*dev->dev_ops->ip_reassembly_conf_set == NULL) 6847 return -ENOTSUP; 6848 ret = eth_err(port_id, 6849 (*dev->dev_ops->ip_reassembly_conf_set)(dev, conf)); 6850 6851 rte_eth_trace_ip_reassembly_conf_set(port_id, conf, ret); 6852 6853 return ret; 6854 } 6855 6856 int 6857 rte_eth_dev_priv_dump(uint16_t port_id, FILE *file) 6858 { 6859 struct rte_eth_dev *dev; 6860 6861 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6862 dev = &rte_eth_devices[port_id]; 6863 6864 if (file == NULL) { 6865 RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n"); 6866 return -EINVAL; 6867 } 6868 6869 if (*dev->dev_ops->eth_dev_priv_dump == NULL) 6870 return -ENOTSUP; 6871 return eth_err(port_id, (*dev->dev_ops->eth_dev_priv_dump)(dev, file)); 6872 } 6873 6874 int 6875 rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id, 6876 uint16_t offset, uint16_t num, FILE *file) 6877 { 6878 struct rte_eth_dev *dev; 6879 6880 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6881 dev = &rte_eth_devices[port_id]; 6882 6883 if (queue_id >= dev->data->nb_rx_queues) { 6884 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 6885 return -EINVAL; 6886 } 6887 6888 if (file == NULL) { 6889 RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n"); 6890 return -EINVAL; 6891 } 6892 6893 if (*dev->dev_ops->eth_rx_descriptor_dump == NULL) 6894 return -ENOTSUP; 6895 6896 return eth_err(port_id, (*dev->dev_ops->eth_rx_descriptor_dump)(dev, 6897 queue_id, offset, num, file)); 6898 } 6899 6900 int 6901 rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id, 6902 uint16_t offset, uint16_t num, FILE *file) 6903 { 6904 struct rte_eth_dev *dev; 6905 6906 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6907 dev = &rte_eth_devices[port_id]; 6908 6909 if (queue_id >= dev->data->nb_tx_queues) { 6910 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 6911 return -EINVAL; 6912 } 6913 6914 if (file == NULL) { 6915 RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n"); 6916 return -EINVAL; 6917 } 6918 6919 if (*dev->dev_ops->eth_tx_descriptor_dump == NULL) 6920 return -ENOTSUP; 6921 6922 return eth_err(port_id, (*dev->dev_ops->eth_tx_descriptor_dump)(dev, 6923 queue_id, offset, num, file)); 6924 } 6925 6926 int 6927 rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num) 6928 { 6929 int i, j; 6930 struct rte_eth_dev *dev; 6931 const uint32_t *all_types; 6932 6933 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6934 dev = &rte_eth_devices[port_id]; 6935 6936 if (ptypes == NULL && num > 0) { 6937 RTE_ETHDEV_LOG(ERR, 6938 "Cannot get ethdev port %u supported header protocol types to NULL when array size is non zero\n", 6939 port_id); 6940 return -EINVAL; 6941 } 6942 6943 if (*dev->dev_ops->buffer_split_supported_hdr_ptypes_get == NULL) 6944 return -ENOTSUP; 6945 all_types = (*dev->dev_ops->buffer_split_supported_hdr_ptypes_get)(dev); 6946 6947 if (all_types == NULL) 6948 return 0; 6949 6950 for (i = 0, j = 0; all_types[i] != RTE_PTYPE_UNKNOWN; ++i) { 6951 if (j < num) { 6952 ptypes[j] = all_types[i]; 6953 6954 rte_eth_trace_buffer_split_get_supported_hdr_ptypes( 6955 port_id, j, ptypes[j]); 6956 } 6957 j++; 6958 } 6959 6960 return j; 6961 } 6962 6963 int rte_eth_dev_count_aggr_ports(uint16_t port_id) 6964 { 6965 struct rte_eth_dev *dev; 6966 int ret; 6967 6968 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6969 dev = &rte_eth_devices[port_id]; 6970 6971 if (*dev->dev_ops->count_aggr_ports == NULL) 6972 return 0; 6973 ret = eth_err(port_id, (*dev->dev_ops->count_aggr_ports)(dev)); 6974 6975 rte_eth_trace_count_aggr_ports(port_id, ret); 6976 6977 return ret; 6978 } 6979 6980 int rte_eth_dev_map_aggr_tx_affinity(uint16_t port_id, uint16_t tx_queue_id, 6981 uint8_t affinity) 6982 { 6983 struct rte_eth_dev *dev; 6984 int aggr_ports; 6985 int ret; 6986 6987 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6988 dev = &rte_eth_devices[port_id]; 6989 6990 if (tx_queue_id >= dev->data->nb_tx_queues) { 6991 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id); 6992 return -EINVAL; 6993 } 6994 6995 if (*dev->dev_ops->map_aggr_tx_affinity == NULL) 6996 return -ENOTSUP; 6997 6998 if (dev->data->dev_configured == 0) { 6999 RTE_ETHDEV_LOG(ERR, 7000 "Port %u must be configured before Tx affinity mapping\n", 7001 port_id); 7002 return -EINVAL; 7003 } 7004 7005 if (dev->data->dev_started) { 7006 RTE_ETHDEV_LOG(ERR, 7007 "Port %u must be stopped to allow configuration\n", 7008 port_id); 7009 return -EBUSY; 7010 } 7011 7012 aggr_ports = rte_eth_dev_count_aggr_ports(port_id); 7013 if (aggr_ports == 0) { 7014 RTE_ETHDEV_LOG(ERR, 7015 "Port %u has no aggregated port\n", 7016 port_id); 7017 return -ENOTSUP; 7018 } 7019 7020 if (affinity > aggr_ports) { 7021 RTE_ETHDEV_LOG(ERR, 7022 "Port %u map invalid affinity %u exceeds the maximum number %u\n", 7023 port_id, affinity, aggr_ports); 7024 return -EINVAL; 7025 } 7026 7027 ret = eth_err(port_id, (*dev->dev_ops->map_aggr_tx_affinity)(dev, 7028 tx_queue_id, affinity)); 7029 7030 rte_eth_trace_map_aggr_tx_affinity(port_id, tx_queue_id, affinity, ret); 7031 7032 return ret; 7033 } 7034 7035 RTE_LOG_REGISTER_DEFAULT(rte_eth_dev_logtype, INFO); 7036 7037 RTE_INIT(ethdev_init_telemetry) 7038 { 7039 rte_telemetry_register_cmd("/ethdev/list", eth_dev_handle_port_list, 7040 "Returns list of available ethdev ports. Takes no parameters"); 7041 rte_telemetry_register_cmd("/ethdev/stats", eth_dev_handle_port_stats, 7042 "Returns the common stats for a port. Parameters: int port_id"); 7043 rte_telemetry_register_cmd("/ethdev/xstats", eth_dev_handle_port_xstats, 7044 "Returns the extended stats for a port. Parameters: int port_id,hide_zero=true|false(Optional for indicates hide zero xstats)"); 7045 #ifndef RTE_EXEC_ENV_WINDOWS 7046 rte_telemetry_register_cmd("/ethdev/dump_priv", eth_dev_handle_port_dump_priv, 7047 "Returns dump private information for a port. Parameters: int port_id"); 7048 #endif 7049 rte_telemetry_register_cmd("/ethdev/link_status", 7050 eth_dev_handle_port_link_status, 7051 "Returns the link status for a port. Parameters: int port_id"); 7052 rte_telemetry_register_cmd("/ethdev/info", eth_dev_handle_port_info, 7053 "Returns the device info for a port. Parameters: int port_id"); 7054 rte_telemetry_register_cmd("/ethdev/module_eeprom", eth_dev_handle_port_module_eeprom, 7055 "Returns module EEPROM info with SFF specs. Parameters: int port_id"); 7056 } 7057