1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <ctype.h> 6 #include <errno.h> 7 #include <inttypes.h> 8 #include <stdbool.h> 9 #include <stdint.h> 10 #include <stdio.h> 11 #include <stdlib.h> 12 #include <string.h> 13 #include <sys/queue.h> 14 15 #include <bus_driver.h> 16 #include <rte_log.h> 17 #include <rte_interrupts.h> 18 #include <rte_kvargs.h> 19 #include <rte_memcpy.h> 20 #include <rte_common.h> 21 #include <rte_mempool.h> 22 #include <rte_malloc.h> 23 #include <rte_mbuf.h> 24 #include <rte_errno.h> 25 #include <rte_spinlock.h> 26 #include <rte_string_fns.h> 27 #include <rte_class.h> 28 #include <rte_ether.h> 29 #include <rte_telemetry.h> 30 31 #include "rte_ethdev.h" 32 #include "rte_ethdev_trace_fp.h" 33 #include "ethdev_driver.h" 34 #include "ethdev_profile.h" 35 #include "ethdev_private.h" 36 #include "ethdev_trace.h" 37 #include "sff_telemetry.h" 38 39 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS]; 40 41 /* public fast-path API */ 42 struct rte_eth_fp_ops rte_eth_fp_ops[RTE_MAX_ETHPORTS]; 43 44 /* spinlock for add/remove Rx callbacks */ 45 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER; 46 47 /* spinlock for add/remove Tx callbacks */ 48 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER; 49 50 /* store statistics names and its offset in stats structure */ 51 struct rte_eth_xstats_name_off { 52 char name[RTE_ETH_XSTATS_NAME_SIZE]; 53 unsigned offset; 54 }; 55 56 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = { 57 {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)}, 58 {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)}, 59 {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)}, 60 {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)}, 61 {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)}, 62 {"rx_errors", offsetof(struct rte_eth_stats, ierrors)}, 63 {"tx_errors", offsetof(struct rte_eth_stats, oerrors)}, 64 {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats, 65 rx_nombuf)}, 66 }; 67 68 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings) 69 70 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = { 71 {"packets", offsetof(struct rte_eth_stats, q_ipackets)}, 72 {"bytes", offsetof(struct rte_eth_stats, q_ibytes)}, 73 {"errors", offsetof(struct rte_eth_stats, q_errors)}, 74 }; 75 76 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings) 77 78 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = { 79 {"packets", offsetof(struct rte_eth_stats, q_opackets)}, 80 {"bytes", offsetof(struct rte_eth_stats, q_obytes)}, 81 }; 82 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings) 83 84 #define RTE_RX_OFFLOAD_BIT2STR(_name) \ 85 { RTE_ETH_RX_OFFLOAD_##_name, #_name } 86 87 static const struct { 88 uint64_t offload; 89 const char *name; 90 } eth_dev_rx_offload_names[] = { 91 RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP), 92 RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM), 93 RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM), 94 RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM), 95 RTE_RX_OFFLOAD_BIT2STR(TCP_LRO), 96 RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP), 97 RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 98 RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP), 99 RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER), 100 RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND), 101 RTE_RX_OFFLOAD_BIT2STR(SCATTER), 102 RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP), 103 RTE_RX_OFFLOAD_BIT2STR(SECURITY), 104 RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC), 105 RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM), 106 RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 107 RTE_RX_OFFLOAD_BIT2STR(RSS_HASH), 108 RTE_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT), 109 }; 110 111 #undef RTE_RX_OFFLOAD_BIT2STR 112 #undef RTE_ETH_RX_OFFLOAD_BIT2STR 113 114 #define RTE_TX_OFFLOAD_BIT2STR(_name) \ 115 { RTE_ETH_TX_OFFLOAD_##_name, #_name } 116 117 static const struct { 118 uint64_t offload; 119 const char *name; 120 } eth_dev_tx_offload_names[] = { 121 RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT), 122 RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM), 123 RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM), 124 RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM), 125 RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM), 126 RTE_TX_OFFLOAD_BIT2STR(TCP_TSO), 127 RTE_TX_OFFLOAD_BIT2STR(UDP_TSO), 128 RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 129 RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT), 130 RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO), 131 RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO), 132 RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO), 133 RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO), 134 RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT), 135 RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE), 136 RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS), 137 RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE), 138 RTE_TX_OFFLOAD_BIT2STR(SECURITY), 139 RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO), 140 RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO), 141 RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 142 RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP), 143 }; 144 145 #undef RTE_TX_OFFLOAD_BIT2STR 146 147 static const struct { 148 uint64_t offload; 149 const char *name; 150 } rte_eth_dev_capa_names[] = { 151 {RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP, "RUNTIME_RX_QUEUE_SETUP"}, 152 {RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP, "RUNTIME_TX_QUEUE_SETUP"}, 153 {RTE_ETH_DEV_CAPA_RXQ_SHARE, "RXQ_SHARE"}, 154 {RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP, "FLOW_RULE_KEEP"}, 155 {RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP, "FLOW_SHARED_OBJECT_KEEP"}, 156 }; 157 158 enum { 159 STAT_QMAP_TX = 0, 160 STAT_QMAP_RX 161 }; 162 163 int 164 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str) 165 { 166 int ret; 167 struct rte_devargs devargs; 168 const char *bus_param_key; 169 char *bus_str = NULL; 170 char *cls_str = NULL; 171 int str_size; 172 173 if (iter == NULL) { 174 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL iterator\n"); 175 return -EINVAL; 176 } 177 178 if (devargs_str == NULL) { 179 RTE_ETHDEV_LOG(ERR, 180 "Cannot initialize iterator from NULL device description string\n"); 181 return -EINVAL; 182 } 183 184 memset(iter, 0, sizeof(*iter)); 185 memset(&devargs, 0, sizeof(devargs)); 186 187 /* 188 * The devargs string may use various syntaxes: 189 * - 0000:08:00.0,representor=[1-3] 190 * - pci:0000:06:00.0,representor=[0,5] 191 * - class=eth,mac=00:11:22:33:44:55 192 * - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z 193 */ 194 195 /* 196 * Handle pure class filter (i.e. without any bus-level argument), 197 * from future new syntax. 198 * rte_devargs_parse() is not yet supporting the new syntax, 199 * that's why this simple case is temporarily parsed here. 200 */ 201 #define iter_anybus_str "class=eth," 202 if (strncmp(devargs_str, iter_anybus_str, 203 strlen(iter_anybus_str)) == 0) { 204 iter->cls_str = devargs_str + strlen(iter_anybus_str); 205 goto end; 206 } 207 208 /* Split bus, device and parameters. */ 209 ret = rte_devargs_parse(&devargs, devargs_str); 210 if (ret != 0) 211 goto error; 212 213 /* 214 * Assume parameters of old syntax can match only at ethdev level. 215 * Extra parameters will be ignored, thanks to "+" prefix. 216 */ 217 str_size = strlen(devargs.args) + 2; 218 cls_str = malloc(str_size); 219 if (cls_str == NULL) { 220 ret = -ENOMEM; 221 goto error; 222 } 223 ret = snprintf(cls_str, str_size, "+%s", devargs.args); 224 if (ret != str_size - 1) { 225 ret = -EINVAL; 226 goto error; 227 } 228 iter->cls_str = cls_str; 229 230 iter->bus = devargs.bus; 231 if (iter->bus->dev_iterate == NULL) { 232 ret = -ENOTSUP; 233 goto error; 234 } 235 236 /* Convert bus args to new syntax for use with new API dev_iterate. */ 237 if ((strcmp(iter->bus->name, "vdev") == 0) || 238 (strcmp(iter->bus->name, "fslmc") == 0) || 239 (strcmp(iter->bus->name, "dpaa_bus") == 0)) { 240 bus_param_key = "name"; 241 } else if (strcmp(iter->bus->name, "pci") == 0) { 242 bus_param_key = "addr"; 243 } else { 244 ret = -ENOTSUP; 245 goto error; 246 } 247 str_size = strlen(bus_param_key) + strlen(devargs.name) + 2; 248 bus_str = malloc(str_size); 249 if (bus_str == NULL) { 250 ret = -ENOMEM; 251 goto error; 252 } 253 ret = snprintf(bus_str, str_size, "%s=%s", 254 bus_param_key, devargs.name); 255 if (ret != str_size - 1) { 256 ret = -EINVAL; 257 goto error; 258 } 259 iter->bus_str = bus_str; 260 261 end: 262 iter->cls = rte_class_find_by_name("eth"); 263 rte_devargs_reset(&devargs); 264 265 rte_eth_trace_iterator_init(devargs_str); 266 267 return 0; 268 269 error: 270 if (ret == -ENOTSUP) 271 RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n", 272 iter->bus->name); 273 rte_devargs_reset(&devargs); 274 free(bus_str); 275 free(cls_str); 276 return ret; 277 } 278 279 uint16_t 280 rte_eth_iterator_next(struct rte_dev_iterator *iter) 281 { 282 if (iter == NULL) { 283 RTE_ETHDEV_LOG(ERR, 284 "Cannot get next device from NULL iterator\n"); 285 return RTE_MAX_ETHPORTS; 286 } 287 288 if (iter->cls == NULL) /* invalid ethdev iterator */ 289 return RTE_MAX_ETHPORTS; 290 291 do { /* loop to try all matching rte_device */ 292 /* If not pure ethdev filter and */ 293 if (iter->bus != NULL && 294 /* not in middle of rte_eth_dev iteration, */ 295 iter->class_device == NULL) { 296 /* get next rte_device to try. */ 297 iter->device = iter->bus->dev_iterate( 298 iter->device, iter->bus_str, iter); 299 if (iter->device == NULL) 300 break; /* no more rte_device candidate */ 301 } 302 /* A device is matching bus part, need to check ethdev part. */ 303 iter->class_device = iter->cls->dev_iterate( 304 iter->class_device, iter->cls_str, iter); 305 if (iter->class_device != NULL) { 306 uint16_t id = eth_dev_to_id(iter->class_device); 307 308 rte_eth_trace_iterator_next(iter, id); 309 310 return id; /* match */ 311 } 312 } while (iter->bus != NULL); /* need to try next rte_device */ 313 314 /* No more ethdev port to iterate. */ 315 rte_eth_iterator_cleanup(iter); 316 return RTE_MAX_ETHPORTS; 317 } 318 319 void 320 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter) 321 { 322 if (iter == NULL) { 323 RTE_ETHDEV_LOG(ERR, "Cannot do clean up from NULL iterator\n"); 324 return; 325 } 326 327 if (iter->bus_str == NULL) 328 return; /* nothing to free in pure class filter */ 329 330 rte_eth_trace_iterator_cleanup(iter); 331 332 free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */ 333 free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */ 334 memset(iter, 0, sizeof(*iter)); 335 } 336 337 uint16_t 338 rte_eth_find_next(uint16_t port_id) 339 { 340 while (port_id < RTE_MAX_ETHPORTS && 341 rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED) 342 port_id++; 343 344 if (port_id >= RTE_MAX_ETHPORTS) 345 return RTE_MAX_ETHPORTS; 346 347 rte_eth_trace_find_next(port_id); 348 349 return port_id; 350 } 351 352 /* 353 * Macro to iterate over all valid ports for internal usage. 354 * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports. 355 */ 356 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \ 357 for (port_id = rte_eth_find_next(0); \ 358 port_id < RTE_MAX_ETHPORTS; \ 359 port_id = rte_eth_find_next(port_id + 1)) 360 361 uint16_t 362 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent) 363 { 364 port_id = rte_eth_find_next(port_id); 365 while (port_id < RTE_MAX_ETHPORTS && 366 rte_eth_devices[port_id].device != parent) 367 port_id = rte_eth_find_next(port_id + 1); 368 369 rte_eth_trace_find_next_of(port_id, parent); 370 371 return port_id; 372 } 373 374 uint16_t 375 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id) 376 { 377 uint16_t ret; 378 379 RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS); 380 ret = rte_eth_find_next_of(port_id, 381 rte_eth_devices[ref_port_id].device); 382 383 rte_eth_trace_find_next_sibling(port_id, ref_port_id, ret); 384 385 return ret; 386 } 387 388 static bool 389 eth_dev_is_allocated(const struct rte_eth_dev *ethdev) 390 { 391 return ethdev->data->name[0] != '\0'; 392 } 393 394 int 395 rte_eth_dev_is_valid_port(uint16_t port_id) 396 { 397 int is_valid; 398 399 if (port_id >= RTE_MAX_ETHPORTS || 400 (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)) 401 is_valid = 0; 402 else 403 is_valid = 1; 404 405 rte_ethdev_trace_is_valid_port(port_id, is_valid); 406 407 return is_valid; 408 } 409 410 static int 411 eth_is_valid_owner_id(uint64_t owner_id) 412 { 413 if (owner_id == RTE_ETH_DEV_NO_OWNER || 414 eth_dev_shared_data->next_owner_id <= owner_id) 415 return 0; 416 return 1; 417 } 418 419 uint64_t 420 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id) 421 { 422 port_id = rte_eth_find_next(port_id); 423 while (port_id < RTE_MAX_ETHPORTS && 424 rte_eth_devices[port_id].data->owner.id != owner_id) 425 port_id = rte_eth_find_next(port_id + 1); 426 427 rte_eth_trace_find_next_owned_by(port_id, owner_id); 428 429 return port_id; 430 } 431 432 int 433 rte_eth_dev_owner_new(uint64_t *owner_id) 434 { 435 if (owner_id == NULL) { 436 RTE_ETHDEV_LOG(ERR, "Cannot get new owner ID to NULL\n"); 437 return -EINVAL; 438 } 439 440 eth_dev_shared_data_prepare(); 441 442 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 443 444 *owner_id = eth_dev_shared_data->next_owner_id++; 445 446 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 447 448 rte_ethdev_trace_owner_new(*owner_id); 449 450 return 0; 451 } 452 453 static int 454 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id, 455 const struct rte_eth_dev_owner *new_owner) 456 { 457 struct rte_eth_dev *ethdev = &rte_eth_devices[port_id]; 458 struct rte_eth_dev_owner *port_owner; 459 460 if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) { 461 RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n", 462 port_id); 463 return -ENODEV; 464 } 465 466 if (new_owner == NULL) { 467 RTE_ETHDEV_LOG(ERR, 468 "Cannot set ethdev port %u owner from NULL owner\n", 469 port_id); 470 return -EINVAL; 471 } 472 473 if (!eth_is_valid_owner_id(new_owner->id) && 474 !eth_is_valid_owner_id(old_owner_id)) { 475 RTE_ETHDEV_LOG(ERR, 476 "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n", 477 old_owner_id, new_owner->id); 478 return -EINVAL; 479 } 480 481 port_owner = &rte_eth_devices[port_id].data->owner; 482 if (port_owner->id != old_owner_id) { 483 RTE_ETHDEV_LOG(ERR, 484 "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n", 485 port_id, port_owner->name, port_owner->id); 486 return -EPERM; 487 } 488 489 /* can not truncate (same structure) */ 490 strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN); 491 492 port_owner->id = new_owner->id; 493 494 RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n", 495 port_id, new_owner->name, new_owner->id); 496 497 return 0; 498 } 499 500 int 501 rte_eth_dev_owner_set(const uint16_t port_id, 502 const struct rte_eth_dev_owner *owner) 503 { 504 int ret; 505 506 eth_dev_shared_data_prepare(); 507 508 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 509 510 ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner); 511 512 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 513 514 rte_ethdev_trace_owner_set(port_id, owner, ret); 515 516 return ret; 517 } 518 519 int 520 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id) 521 { 522 const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner) 523 {.id = RTE_ETH_DEV_NO_OWNER, .name = ""}; 524 int ret; 525 526 eth_dev_shared_data_prepare(); 527 528 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 529 530 ret = eth_dev_owner_set(port_id, owner_id, &new_owner); 531 532 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 533 534 rte_ethdev_trace_owner_unset(port_id, owner_id, ret); 535 536 return ret; 537 } 538 539 int 540 rte_eth_dev_owner_delete(const uint64_t owner_id) 541 { 542 uint16_t port_id; 543 int ret = 0; 544 545 eth_dev_shared_data_prepare(); 546 547 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 548 549 if (eth_is_valid_owner_id(owner_id)) { 550 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) { 551 struct rte_eth_dev_data *data = 552 rte_eth_devices[port_id].data; 553 if (data != NULL && data->owner.id == owner_id) 554 memset(&data->owner, 0, 555 sizeof(struct rte_eth_dev_owner)); 556 } 557 RTE_ETHDEV_LOG(NOTICE, 558 "All port owners owned by %016"PRIx64" identifier have removed\n", 559 owner_id); 560 } else { 561 RTE_ETHDEV_LOG(ERR, 562 "Invalid owner ID=%016"PRIx64"\n", 563 owner_id); 564 ret = -EINVAL; 565 } 566 567 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 568 569 rte_ethdev_trace_owner_delete(owner_id, ret); 570 571 return ret; 572 } 573 574 int 575 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner) 576 { 577 struct rte_eth_dev *ethdev; 578 579 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 580 ethdev = &rte_eth_devices[port_id]; 581 582 if (!eth_dev_is_allocated(ethdev)) { 583 RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n", 584 port_id); 585 return -ENODEV; 586 } 587 588 if (owner == NULL) { 589 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u owner to NULL\n", 590 port_id); 591 return -EINVAL; 592 } 593 594 eth_dev_shared_data_prepare(); 595 596 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 597 rte_memcpy(owner, ðdev->data->owner, sizeof(*owner)); 598 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 599 600 rte_ethdev_trace_owner_get(port_id, owner); 601 602 return 0; 603 } 604 605 int 606 rte_eth_dev_socket_id(uint16_t port_id) 607 { 608 int socket_id = SOCKET_ID_ANY; 609 610 if (!rte_eth_dev_is_valid_port(port_id)) { 611 rte_errno = EINVAL; 612 } else { 613 socket_id = rte_eth_devices[port_id].data->numa_node; 614 if (socket_id == SOCKET_ID_ANY) 615 rte_errno = 0; 616 } 617 618 rte_ethdev_trace_socket_id(port_id, socket_id); 619 620 return socket_id; 621 } 622 623 void * 624 rte_eth_dev_get_sec_ctx(uint16_t port_id) 625 { 626 void *ctx; 627 628 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL); 629 ctx = rte_eth_devices[port_id].security_ctx; 630 631 rte_ethdev_trace_get_sec_ctx(port_id, ctx); 632 633 return ctx; 634 } 635 636 uint16_t 637 rte_eth_dev_count_avail(void) 638 { 639 uint16_t p; 640 uint16_t count; 641 642 count = 0; 643 644 RTE_ETH_FOREACH_DEV(p) 645 count++; 646 647 rte_ethdev_trace_count_avail(count); 648 649 return count; 650 } 651 652 uint16_t 653 rte_eth_dev_count_total(void) 654 { 655 uint16_t port, count = 0; 656 657 RTE_ETH_FOREACH_VALID_DEV(port) 658 count++; 659 660 rte_ethdev_trace_count_total(count); 661 662 return count; 663 } 664 665 int 666 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name) 667 { 668 char *tmp; 669 670 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 671 672 if (name == NULL) { 673 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u name to NULL\n", 674 port_id); 675 return -EINVAL; 676 } 677 678 /* shouldn't check 'rte_eth_devices[i].data', 679 * because it might be overwritten by VDEV PMD */ 680 tmp = eth_dev_shared_data->data[port_id].name; 681 strcpy(name, tmp); 682 683 rte_ethdev_trace_get_name_by_port(port_id, name); 684 685 return 0; 686 } 687 688 int 689 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id) 690 { 691 uint16_t pid; 692 693 if (name == NULL) { 694 RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name"); 695 return -EINVAL; 696 } 697 698 if (port_id == NULL) { 699 RTE_ETHDEV_LOG(ERR, 700 "Cannot get port ID to NULL for %s\n", name); 701 return -EINVAL; 702 } 703 704 RTE_ETH_FOREACH_VALID_DEV(pid) 705 if (!strcmp(name, eth_dev_shared_data->data[pid].name)) { 706 *port_id = pid; 707 708 rte_ethdev_trace_get_port_by_name(name, *port_id); 709 710 return 0; 711 } 712 713 return -ENODEV; 714 } 715 716 int 717 eth_err(uint16_t port_id, int ret) 718 { 719 if (ret == 0) 720 return 0; 721 if (rte_eth_dev_is_removed(port_id)) 722 return -EIO; 723 return ret; 724 } 725 726 static int 727 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id) 728 { 729 uint16_t port_id; 730 731 if (rx_queue_id >= dev->data->nb_rx_queues) { 732 port_id = dev->data->port_id; 733 RTE_ETHDEV_LOG(ERR, 734 "Invalid Rx queue_id=%u of device with port_id=%u\n", 735 rx_queue_id, port_id); 736 return -EINVAL; 737 } 738 739 if (dev->data->rx_queues[rx_queue_id] == NULL) { 740 port_id = dev->data->port_id; 741 RTE_ETHDEV_LOG(ERR, 742 "Queue %u of device with port_id=%u has not been setup\n", 743 rx_queue_id, port_id); 744 return -EINVAL; 745 } 746 747 return 0; 748 } 749 750 static int 751 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id) 752 { 753 uint16_t port_id; 754 755 if (tx_queue_id >= dev->data->nb_tx_queues) { 756 port_id = dev->data->port_id; 757 RTE_ETHDEV_LOG(ERR, 758 "Invalid Tx queue_id=%u of device with port_id=%u\n", 759 tx_queue_id, port_id); 760 return -EINVAL; 761 } 762 763 if (dev->data->tx_queues[tx_queue_id] == NULL) { 764 port_id = dev->data->port_id; 765 RTE_ETHDEV_LOG(ERR, 766 "Queue %u of device with port_id=%u has not been setup\n", 767 tx_queue_id, port_id); 768 return -EINVAL; 769 } 770 771 return 0; 772 } 773 774 int 775 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id) 776 { 777 struct rte_eth_dev *dev; 778 int ret; 779 780 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 781 dev = &rte_eth_devices[port_id]; 782 783 if (!dev->data->dev_started) { 784 RTE_ETHDEV_LOG(ERR, 785 "Port %u must be started before start any queue\n", 786 port_id); 787 return -EINVAL; 788 } 789 790 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 791 if (ret != 0) 792 return ret; 793 794 if (*dev->dev_ops->rx_queue_start == NULL) 795 return -ENOTSUP; 796 797 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 798 RTE_ETHDEV_LOG(INFO, 799 "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 800 rx_queue_id, port_id); 801 return -EINVAL; 802 } 803 804 if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 805 RTE_ETHDEV_LOG(INFO, 806 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 807 rx_queue_id, port_id); 808 return 0; 809 } 810 811 ret = eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id)); 812 813 rte_ethdev_trace_rx_queue_start(port_id, rx_queue_id, ret); 814 815 return ret; 816 } 817 818 int 819 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id) 820 { 821 struct rte_eth_dev *dev; 822 int ret; 823 824 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 825 dev = &rte_eth_devices[port_id]; 826 827 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 828 if (ret != 0) 829 return ret; 830 831 if (*dev->dev_ops->rx_queue_stop == NULL) 832 return -ENOTSUP; 833 834 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 835 RTE_ETHDEV_LOG(INFO, 836 "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 837 rx_queue_id, port_id); 838 return -EINVAL; 839 } 840 841 if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 842 RTE_ETHDEV_LOG(INFO, 843 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 844 rx_queue_id, port_id); 845 return 0; 846 } 847 848 ret = eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id)); 849 850 rte_ethdev_trace_rx_queue_stop(port_id, rx_queue_id, ret); 851 852 return ret; 853 } 854 855 int 856 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id) 857 { 858 struct rte_eth_dev *dev; 859 int ret; 860 861 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 862 dev = &rte_eth_devices[port_id]; 863 864 if (!dev->data->dev_started) { 865 RTE_ETHDEV_LOG(ERR, 866 "Port %u must be started before start any queue\n", 867 port_id); 868 return -EINVAL; 869 } 870 871 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 872 if (ret != 0) 873 return ret; 874 875 if (*dev->dev_ops->tx_queue_start == NULL) 876 return -ENOTSUP; 877 878 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 879 RTE_ETHDEV_LOG(INFO, 880 "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 881 tx_queue_id, port_id); 882 return -EINVAL; 883 } 884 885 if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 886 RTE_ETHDEV_LOG(INFO, 887 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 888 tx_queue_id, port_id); 889 return 0; 890 } 891 892 ret = eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id)); 893 894 rte_ethdev_trace_tx_queue_start(port_id, tx_queue_id, ret); 895 896 return ret; 897 } 898 899 int 900 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id) 901 { 902 struct rte_eth_dev *dev; 903 int ret; 904 905 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 906 dev = &rte_eth_devices[port_id]; 907 908 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 909 if (ret != 0) 910 return ret; 911 912 if (*dev->dev_ops->tx_queue_stop == NULL) 913 return -ENOTSUP; 914 915 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 916 RTE_ETHDEV_LOG(INFO, 917 "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 918 tx_queue_id, port_id); 919 return -EINVAL; 920 } 921 922 if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 923 RTE_ETHDEV_LOG(INFO, 924 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 925 tx_queue_id, port_id); 926 return 0; 927 } 928 929 ret = eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id)); 930 931 rte_ethdev_trace_tx_queue_stop(port_id, tx_queue_id, ret); 932 933 return ret; 934 } 935 936 uint32_t 937 rte_eth_speed_bitflag(uint32_t speed, int duplex) 938 { 939 uint32_t ret; 940 941 switch (speed) { 942 case RTE_ETH_SPEED_NUM_10M: 943 ret = duplex ? RTE_ETH_LINK_SPEED_10M : RTE_ETH_LINK_SPEED_10M_HD; 944 break; 945 case RTE_ETH_SPEED_NUM_100M: 946 ret = duplex ? RTE_ETH_LINK_SPEED_100M : RTE_ETH_LINK_SPEED_100M_HD; 947 break; 948 case RTE_ETH_SPEED_NUM_1G: 949 ret = RTE_ETH_LINK_SPEED_1G; 950 break; 951 case RTE_ETH_SPEED_NUM_2_5G: 952 ret = RTE_ETH_LINK_SPEED_2_5G; 953 break; 954 case RTE_ETH_SPEED_NUM_5G: 955 ret = RTE_ETH_LINK_SPEED_5G; 956 break; 957 case RTE_ETH_SPEED_NUM_10G: 958 ret = RTE_ETH_LINK_SPEED_10G; 959 break; 960 case RTE_ETH_SPEED_NUM_20G: 961 ret = RTE_ETH_LINK_SPEED_20G; 962 break; 963 case RTE_ETH_SPEED_NUM_25G: 964 ret = RTE_ETH_LINK_SPEED_25G; 965 break; 966 case RTE_ETH_SPEED_NUM_40G: 967 ret = RTE_ETH_LINK_SPEED_40G; 968 break; 969 case RTE_ETH_SPEED_NUM_50G: 970 ret = RTE_ETH_LINK_SPEED_50G; 971 break; 972 case RTE_ETH_SPEED_NUM_56G: 973 ret = RTE_ETH_LINK_SPEED_56G; 974 break; 975 case RTE_ETH_SPEED_NUM_100G: 976 ret = RTE_ETH_LINK_SPEED_100G; 977 break; 978 case RTE_ETH_SPEED_NUM_200G: 979 ret = RTE_ETH_LINK_SPEED_200G; 980 break; 981 case RTE_ETH_SPEED_NUM_400G: 982 ret = RTE_ETH_LINK_SPEED_400G; 983 break; 984 default: 985 ret = 0; 986 } 987 988 rte_eth_trace_speed_bitflag(speed, duplex, ret); 989 990 return ret; 991 } 992 993 const char * 994 rte_eth_dev_rx_offload_name(uint64_t offload) 995 { 996 const char *name = "UNKNOWN"; 997 unsigned int i; 998 999 for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) { 1000 if (offload == eth_dev_rx_offload_names[i].offload) { 1001 name = eth_dev_rx_offload_names[i].name; 1002 break; 1003 } 1004 } 1005 1006 rte_ethdev_trace_rx_offload_name(offload, name); 1007 1008 return name; 1009 } 1010 1011 const char * 1012 rte_eth_dev_tx_offload_name(uint64_t offload) 1013 { 1014 const char *name = "UNKNOWN"; 1015 unsigned int i; 1016 1017 for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) { 1018 if (offload == eth_dev_tx_offload_names[i].offload) { 1019 name = eth_dev_tx_offload_names[i].name; 1020 break; 1021 } 1022 } 1023 1024 rte_ethdev_trace_tx_offload_name(offload, name); 1025 1026 return name; 1027 } 1028 1029 const char * 1030 rte_eth_dev_capability_name(uint64_t capability) 1031 { 1032 const char *name = "UNKNOWN"; 1033 unsigned int i; 1034 1035 for (i = 0; i < RTE_DIM(rte_eth_dev_capa_names); ++i) { 1036 if (capability == rte_eth_dev_capa_names[i].offload) { 1037 name = rte_eth_dev_capa_names[i].name; 1038 break; 1039 } 1040 } 1041 1042 rte_ethdev_trace_capability_name(capability, name); 1043 1044 return name; 1045 } 1046 1047 static inline int 1048 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size, 1049 uint32_t max_rx_pkt_len, uint32_t dev_info_size) 1050 { 1051 int ret = 0; 1052 1053 if (dev_info_size == 0) { 1054 if (config_size != max_rx_pkt_len) { 1055 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size" 1056 " %u != %u is not allowed\n", 1057 port_id, config_size, max_rx_pkt_len); 1058 ret = -EINVAL; 1059 } 1060 } else if (config_size > dev_info_size) { 1061 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 1062 "> max allowed value %u\n", port_id, config_size, 1063 dev_info_size); 1064 ret = -EINVAL; 1065 } else if (config_size < RTE_ETHER_MIN_LEN) { 1066 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 1067 "< min allowed value %u\n", port_id, config_size, 1068 (unsigned int)RTE_ETHER_MIN_LEN); 1069 ret = -EINVAL; 1070 } 1071 return ret; 1072 } 1073 1074 /* 1075 * Validate offloads that are requested through rte_eth_dev_configure against 1076 * the offloads successfully set by the Ethernet device. 1077 * 1078 * @param port_id 1079 * The port identifier of the Ethernet device. 1080 * @param req_offloads 1081 * The offloads that have been requested through `rte_eth_dev_configure`. 1082 * @param set_offloads 1083 * The offloads successfully set by the Ethernet device. 1084 * @param offload_type 1085 * The offload type i.e. Rx/Tx string. 1086 * @param offload_name 1087 * The function that prints the offload name. 1088 * @return 1089 * - (0) if validation successful. 1090 * - (-EINVAL) if requested offload has been silently disabled. 1091 * 1092 */ 1093 static int 1094 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads, 1095 uint64_t set_offloads, const char *offload_type, 1096 const char *(*offload_name)(uint64_t)) 1097 { 1098 uint64_t offloads_diff = req_offloads ^ set_offloads; 1099 uint64_t offload; 1100 int ret = 0; 1101 1102 while (offloads_diff != 0) { 1103 /* Check if any offload is requested but not enabled. */ 1104 offload = RTE_BIT64(__builtin_ctzll(offloads_diff)); 1105 if (offload & req_offloads) { 1106 RTE_ETHDEV_LOG(ERR, 1107 "Port %u failed to enable %s offload %s\n", 1108 port_id, offload_type, offload_name(offload)); 1109 ret = -EINVAL; 1110 } 1111 1112 /* Check if offload couldn't be disabled. */ 1113 if (offload & set_offloads) { 1114 RTE_ETHDEV_LOG(DEBUG, 1115 "Port %u %s offload %s is not requested but enabled\n", 1116 port_id, offload_type, offload_name(offload)); 1117 } 1118 1119 offloads_diff &= ~offload; 1120 } 1121 1122 return ret; 1123 } 1124 1125 static uint32_t 1126 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu) 1127 { 1128 uint32_t overhead_len; 1129 1130 if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu) 1131 overhead_len = max_rx_pktlen - max_mtu; 1132 else 1133 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 1134 1135 return overhead_len; 1136 } 1137 1138 /* rte_eth_dev_info_get() should be called prior to this function */ 1139 static int 1140 eth_dev_validate_mtu(uint16_t port_id, struct rte_eth_dev_info *dev_info, 1141 uint16_t mtu) 1142 { 1143 uint32_t overhead_len; 1144 uint32_t frame_size; 1145 1146 if (mtu < dev_info->min_mtu) { 1147 RTE_ETHDEV_LOG(ERR, 1148 "MTU (%u) < device min MTU (%u) for port_id %u\n", 1149 mtu, dev_info->min_mtu, port_id); 1150 return -EINVAL; 1151 } 1152 if (mtu > dev_info->max_mtu) { 1153 RTE_ETHDEV_LOG(ERR, 1154 "MTU (%u) > device max MTU (%u) for port_id %u\n", 1155 mtu, dev_info->max_mtu, port_id); 1156 return -EINVAL; 1157 } 1158 1159 overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen, 1160 dev_info->max_mtu); 1161 frame_size = mtu + overhead_len; 1162 if (frame_size < RTE_ETHER_MIN_LEN) { 1163 RTE_ETHDEV_LOG(ERR, 1164 "Frame size (%u) < min frame size (%u) for port_id %u\n", 1165 frame_size, RTE_ETHER_MIN_LEN, port_id); 1166 return -EINVAL; 1167 } 1168 1169 if (frame_size > dev_info->max_rx_pktlen) { 1170 RTE_ETHDEV_LOG(ERR, 1171 "Frame size (%u) > device max frame size (%u) for port_id %u\n", 1172 frame_size, dev_info->max_rx_pktlen, port_id); 1173 return -EINVAL; 1174 } 1175 1176 return 0; 1177 } 1178 1179 int 1180 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, 1181 const struct rte_eth_conf *dev_conf) 1182 { 1183 struct rte_eth_dev *dev; 1184 struct rte_eth_dev_info dev_info; 1185 struct rte_eth_conf orig_conf; 1186 int diag; 1187 int ret; 1188 uint16_t old_mtu; 1189 1190 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1191 dev = &rte_eth_devices[port_id]; 1192 1193 if (dev_conf == NULL) { 1194 RTE_ETHDEV_LOG(ERR, 1195 "Cannot configure ethdev port %u from NULL config\n", 1196 port_id); 1197 return -EINVAL; 1198 } 1199 1200 if (*dev->dev_ops->dev_configure == NULL) 1201 return -ENOTSUP; 1202 1203 if (dev->data->dev_started) { 1204 RTE_ETHDEV_LOG(ERR, 1205 "Port %u must be stopped to allow configuration\n", 1206 port_id); 1207 return -EBUSY; 1208 } 1209 1210 /* 1211 * Ensure that "dev_configured" is always 0 each time prepare to do 1212 * dev_configure() to avoid any non-anticipated behaviour. 1213 * And set to 1 when dev_configure() is executed successfully. 1214 */ 1215 dev->data->dev_configured = 0; 1216 1217 /* Store original config, as rollback required on failure */ 1218 memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf)); 1219 1220 /* 1221 * Copy the dev_conf parameter into the dev structure. 1222 * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get 1223 */ 1224 if (dev_conf != &dev->data->dev_conf) 1225 memcpy(&dev->data->dev_conf, dev_conf, 1226 sizeof(dev->data->dev_conf)); 1227 1228 /* Backup mtu for rollback */ 1229 old_mtu = dev->data->mtu; 1230 1231 ret = rte_eth_dev_info_get(port_id, &dev_info); 1232 if (ret != 0) 1233 goto rollback; 1234 1235 /* If number of queues specified by application for both Rx and Tx is 1236 * zero, use driver preferred values. This cannot be done individually 1237 * as it is valid for either Tx or Rx (but not both) to be zero. 1238 * If driver does not provide any preferred valued, fall back on 1239 * EAL defaults. 1240 */ 1241 if (nb_rx_q == 0 && nb_tx_q == 0) { 1242 nb_rx_q = dev_info.default_rxportconf.nb_queues; 1243 if (nb_rx_q == 0) 1244 nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES; 1245 nb_tx_q = dev_info.default_txportconf.nb_queues; 1246 if (nb_tx_q == 0) 1247 nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES; 1248 } 1249 1250 if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) { 1251 RTE_ETHDEV_LOG(ERR, 1252 "Number of Rx queues requested (%u) is greater than max supported(%d)\n", 1253 nb_rx_q, RTE_MAX_QUEUES_PER_PORT); 1254 ret = -EINVAL; 1255 goto rollback; 1256 } 1257 1258 if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) { 1259 RTE_ETHDEV_LOG(ERR, 1260 "Number of Tx queues requested (%u) is greater than max supported(%d)\n", 1261 nb_tx_q, RTE_MAX_QUEUES_PER_PORT); 1262 ret = -EINVAL; 1263 goto rollback; 1264 } 1265 1266 /* 1267 * Check that the numbers of Rx and Tx queues are not greater 1268 * than the maximum number of Rx and Tx queues supported by the 1269 * configured device. 1270 */ 1271 if (nb_rx_q > dev_info.max_rx_queues) { 1272 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n", 1273 port_id, nb_rx_q, dev_info.max_rx_queues); 1274 ret = -EINVAL; 1275 goto rollback; 1276 } 1277 1278 if (nb_tx_q > dev_info.max_tx_queues) { 1279 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n", 1280 port_id, nb_tx_q, dev_info.max_tx_queues); 1281 ret = -EINVAL; 1282 goto rollback; 1283 } 1284 1285 /* Check that the device supports requested interrupts */ 1286 if ((dev_conf->intr_conf.lsc == 1) && 1287 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) { 1288 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n", 1289 dev->device->driver->name); 1290 ret = -EINVAL; 1291 goto rollback; 1292 } 1293 if ((dev_conf->intr_conf.rmv == 1) && 1294 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) { 1295 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n", 1296 dev->device->driver->name); 1297 ret = -EINVAL; 1298 goto rollback; 1299 } 1300 1301 if (dev_conf->rxmode.mtu == 0) 1302 dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU; 1303 1304 ret = eth_dev_validate_mtu(port_id, &dev_info, 1305 dev->data->dev_conf.rxmode.mtu); 1306 if (ret != 0) 1307 goto rollback; 1308 1309 dev->data->mtu = dev->data->dev_conf.rxmode.mtu; 1310 1311 /* 1312 * If LRO is enabled, check that the maximum aggregated packet 1313 * size is supported by the configured device. 1314 */ 1315 if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) { 1316 uint32_t max_rx_pktlen; 1317 uint32_t overhead_len; 1318 1319 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 1320 dev_info.max_mtu); 1321 max_rx_pktlen = dev->data->dev_conf.rxmode.mtu + overhead_len; 1322 if (dev_conf->rxmode.max_lro_pkt_size == 0) 1323 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 1324 ret = eth_dev_check_lro_pkt_size(port_id, 1325 dev->data->dev_conf.rxmode.max_lro_pkt_size, 1326 max_rx_pktlen, 1327 dev_info.max_lro_pkt_size); 1328 if (ret != 0) 1329 goto rollback; 1330 } 1331 1332 /* Any requested offloading must be within its device capabilities */ 1333 if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) != 1334 dev_conf->rxmode.offloads) { 1335 RTE_ETHDEV_LOG(ERR, 1336 "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads " 1337 "capabilities 0x%"PRIx64" in %s()\n", 1338 port_id, dev_conf->rxmode.offloads, 1339 dev_info.rx_offload_capa, 1340 __func__); 1341 ret = -EINVAL; 1342 goto rollback; 1343 } 1344 if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) != 1345 dev_conf->txmode.offloads) { 1346 RTE_ETHDEV_LOG(ERR, 1347 "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads " 1348 "capabilities 0x%"PRIx64" in %s()\n", 1349 port_id, dev_conf->txmode.offloads, 1350 dev_info.tx_offload_capa, 1351 __func__); 1352 ret = -EINVAL; 1353 goto rollback; 1354 } 1355 1356 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = 1357 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf); 1358 1359 /* Check that device supports requested rss hash functions. */ 1360 if ((dev_info.flow_type_rss_offloads | 1361 dev_conf->rx_adv_conf.rss_conf.rss_hf) != 1362 dev_info.flow_type_rss_offloads) { 1363 RTE_ETHDEV_LOG(ERR, 1364 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 1365 port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf, 1366 dev_info.flow_type_rss_offloads); 1367 ret = -EINVAL; 1368 goto rollback; 1369 } 1370 1371 /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */ 1372 if (((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0) && 1373 (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) { 1374 RTE_ETHDEV_LOG(ERR, 1375 "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n", 1376 port_id, 1377 rte_eth_dev_rx_offload_name(RTE_ETH_RX_OFFLOAD_RSS_HASH)); 1378 ret = -EINVAL; 1379 goto rollback; 1380 } 1381 1382 /* 1383 * Setup new number of Rx/Tx queues and reconfigure device. 1384 */ 1385 diag = eth_dev_rx_queue_config(dev, nb_rx_q); 1386 if (diag != 0) { 1387 RTE_ETHDEV_LOG(ERR, 1388 "Port%u eth_dev_rx_queue_config = %d\n", 1389 port_id, diag); 1390 ret = diag; 1391 goto rollback; 1392 } 1393 1394 diag = eth_dev_tx_queue_config(dev, nb_tx_q); 1395 if (diag != 0) { 1396 RTE_ETHDEV_LOG(ERR, 1397 "Port%u eth_dev_tx_queue_config = %d\n", 1398 port_id, diag); 1399 eth_dev_rx_queue_config(dev, 0); 1400 ret = diag; 1401 goto rollback; 1402 } 1403 1404 diag = (*dev->dev_ops->dev_configure)(dev); 1405 if (diag != 0) { 1406 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n", 1407 port_id, diag); 1408 ret = eth_err(port_id, diag); 1409 goto reset_queues; 1410 } 1411 1412 /* Initialize Rx profiling if enabled at compilation time. */ 1413 diag = __rte_eth_dev_profile_init(port_id, dev); 1414 if (diag != 0) { 1415 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n", 1416 port_id, diag); 1417 ret = eth_err(port_id, diag); 1418 goto reset_queues; 1419 } 1420 1421 /* Validate Rx offloads. */ 1422 diag = eth_dev_validate_offloads(port_id, 1423 dev_conf->rxmode.offloads, 1424 dev->data->dev_conf.rxmode.offloads, "Rx", 1425 rte_eth_dev_rx_offload_name); 1426 if (diag != 0) { 1427 ret = diag; 1428 goto reset_queues; 1429 } 1430 1431 /* Validate Tx offloads. */ 1432 diag = eth_dev_validate_offloads(port_id, 1433 dev_conf->txmode.offloads, 1434 dev->data->dev_conf.txmode.offloads, "Tx", 1435 rte_eth_dev_tx_offload_name); 1436 if (diag != 0) { 1437 ret = diag; 1438 goto reset_queues; 1439 } 1440 1441 dev->data->dev_configured = 1; 1442 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0); 1443 return 0; 1444 reset_queues: 1445 eth_dev_rx_queue_config(dev, 0); 1446 eth_dev_tx_queue_config(dev, 0); 1447 rollback: 1448 memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf)); 1449 if (old_mtu != dev->data->mtu) 1450 dev->data->mtu = old_mtu; 1451 1452 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret); 1453 return ret; 1454 } 1455 1456 static void 1457 eth_dev_mac_restore(struct rte_eth_dev *dev, 1458 struct rte_eth_dev_info *dev_info) 1459 { 1460 struct rte_ether_addr *addr; 1461 uint16_t i; 1462 uint32_t pool = 0; 1463 uint64_t pool_mask; 1464 1465 /* replay MAC address configuration including default MAC */ 1466 addr = &dev->data->mac_addrs[0]; 1467 if (*dev->dev_ops->mac_addr_set != NULL) 1468 (*dev->dev_ops->mac_addr_set)(dev, addr); 1469 else if (*dev->dev_ops->mac_addr_add != NULL) 1470 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool); 1471 1472 if (*dev->dev_ops->mac_addr_add != NULL) { 1473 for (i = 1; i < dev_info->max_mac_addrs; i++) { 1474 addr = &dev->data->mac_addrs[i]; 1475 1476 /* skip zero address */ 1477 if (rte_is_zero_ether_addr(addr)) 1478 continue; 1479 1480 pool = 0; 1481 pool_mask = dev->data->mac_pool_sel[i]; 1482 1483 do { 1484 if (pool_mask & UINT64_C(1)) 1485 (*dev->dev_ops->mac_addr_add)(dev, 1486 addr, i, pool); 1487 pool_mask >>= 1; 1488 pool++; 1489 } while (pool_mask); 1490 } 1491 } 1492 } 1493 1494 static int 1495 eth_dev_config_restore(struct rte_eth_dev *dev, 1496 struct rte_eth_dev_info *dev_info, uint16_t port_id) 1497 { 1498 int ret; 1499 1500 if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)) 1501 eth_dev_mac_restore(dev, dev_info); 1502 1503 /* replay promiscuous configuration */ 1504 /* 1505 * use callbacks directly since we don't need port_id check and 1506 * would like to bypass the same value set 1507 */ 1508 if (rte_eth_promiscuous_get(port_id) == 1 && 1509 *dev->dev_ops->promiscuous_enable != NULL) { 1510 ret = eth_err(port_id, 1511 (*dev->dev_ops->promiscuous_enable)(dev)); 1512 if (ret != 0 && ret != -ENOTSUP) { 1513 RTE_ETHDEV_LOG(ERR, 1514 "Failed to enable promiscuous mode for device (port %u): %s\n", 1515 port_id, rte_strerror(-ret)); 1516 return ret; 1517 } 1518 } else if (rte_eth_promiscuous_get(port_id) == 0 && 1519 *dev->dev_ops->promiscuous_disable != NULL) { 1520 ret = eth_err(port_id, 1521 (*dev->dev_ops->promiscuous_disable)(dev)); 1522 if (ret != 0 && ret != -ENOTSUP) { 1523 RTE_ETHDEV_LOG(ERR, 1524 "Failed to disable promiscuous mode for device (port %u): %s\n", 1525 port_id, rte_strerror(-ret)); 1526 return ret; 1527 } 1528 } 1529 1530 /* replay all multicast configuration */ 1531 /* 1532 * use callbacks directly since we don't need port_id check and 1533 * would like to bypass the same value set 1534 */ 1535 if (rte_eth_allmulticast_get(port_id) == 1 && 1536 *dev->dev_ops->allmulticast_enable != NULL) { 1537 ret = eth_err(port_id, 1538 (*dev->dev_ops->allmulticast_enable)(dev)); 1539 if (ret != 0 && ret != -ENOTSUP) { 1540 RTE_ETHDEV_LOG(ERR, 1541 "Failed to enable allmulticast mode for device (port %u): %s\n", 1542 port_id, rte_strerror(-ret)); 1543 return ret; 1544 } 1545 } else if (rte_eth_allmulticast_get(port_id) == 0 && 1546 *dev->dev_ops->allmulticast_disable != NULL) { 1547 ret = eth_err(port_id, 1548 (*dev->dev_ops->allmulticast_disable)(dev)); 1549 if (ret != 0 && ret != -ENOTSUP) { 1550 RTE_ETHDEV_LOG(ERR, 1551 "Failed to disable allmulticast mode for device (port %u): %s\n", 1552 port_id, rte_strerror(-ret)); 1553 return ret; 1554 } 1555 } 1556 1557 return 0; 1558 } 1559 1560 int 1561 rte_eth_dev_start(uint16_t port_id) 1562 { 1563 struct rte_eth_dev *dev; 1564 struct rte_eth_dev_info dev_info; 1565 int diag; 1566 int ret, ret_stop; 1567 1568 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1569 dev = &rte_eth_devices[port_id]; 1570 1571 if (*dev->dev_ops->dev_start == NULL) 1572 return -ENOTSUP; 1573 1574 if (dev->data->dev_configured == 0) { 1575 RTE_ETHDEV_LOG(INFO, 1576 "Device with port_id=%"PRIu16" is not configured.\n", 1577 port_id); 1578 return -EINVAL; 1579 } 1580 1581 if (dev->data->dev_started != 0) { 1582 RTE_ETHDEV_LOG(INFO, 1583 "Device with port_id=%"PRIu16" already started\n", 1584 port_id); 1585 return 0; 1586 } 1587 1588 ret = rte_eth_dev_info_get(port_id, &dev_info); 1589 if (ret != 0) 1590 return ret; 1591 1592 /* Lets restore MAC now if device does not support live change */ 1593 if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR) 1594 eth_dev_mac_restore(dev, &dev_info); 1595 1596 diag = (*dev->dev_ops->dev_start)(dev); 1597 if (diag == 0) 1598 dev->data->dev_started = 1; 1599 else 1600 return eth_err(port_id, diag); 1601 1602 ret = eth_dev_config_restore(dev, &dev_info, port_id); 1603 if (ret != 0) { 1604 RTE_ETHDEV_LOG(ERR, 1605 "Error during restoring configuration for device (port %u): %s\n", 1606 port_id, rte_strerror(-ret)); 1607 ret_stop = rte_eth_dev_stop(port_id); 1608 if (ret_stop != 0) { 1609 RTE_ETHDEV_LOG(ERR, 1610 "Failed to stop device (port %u): %s\n", 1611 port_id, rte_strerror(-ret_stop)); 1612 } 1613 1614 return ret; 1615 } 1616 1617 if (dev->data->dev_conf.intr_conf.lsc == 0) { 1618 if (*dev->dev_ops->link_update == NULL) 1619 return -ENOTSUP; 1620 (*dev->dev_ops->link_update)(dev, 0); 1621 } 1622 1623 /* expose selection of PMD fast-path functions */ 1624 eth_dev_fp_ops_setup(rte_eth_fp_ops + port_id, dev); 1625 1626 rte_ethdev_trace_start(port_id); 1627 return 0; 1628 } 1629 1630 int 1631 rte_eth_dev_stop(uint16_t port_id) 1632 { 1633 struct rte_eth_dev *dev; 1634 int ret; 1635 1636 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1637 dev = &rte_eth_devices[port_id]; 1638 1639 if (*dev->dev_ops->dev_stop == NULL) 1640 return -ENOTSUP; 1641 1642 if (dev->data->dev_started == 0) { 1643 RTE_ETHDEV_LOG(INFO, 1644 "Device with port_id=%"PRIu16" already stopped\n", 1645 port_id); 1646 return 0; 1647 } 1648 1649 /* point fast-path functions to dummy ones */ 1650 eth_dev_fp_ops_reset(rte_eth_fp_ops + port_id); 1651 1652 ret = (*dev->dev_ops->dev_stop)(dev); 1653 if (ret == 0) 1654 dev->data->dev_started = 0; 1655 rte_ethdev_trace_stop(port_id, ret); 1656 1657 return ret; 1658 } 1659 1660 int 1661 rte_eth_dev_set_link_up(uint16_t port_id) 1662 { 1663 struct rte_eth_dev *dev; 1664 int ret; 1665 1666 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1667 dev = &rte_eth_devices[port_id]; 1668 1669 if (*dev->dev_ops->dev_set_link_up == NULL) 1670 return -ENOTSUP; 1671 ret = eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev)); 1672 1673 rte_ethdev_trace_set_link_up(port_id, ret); 1674 1675 return ret; 1676 } 1677 1678 int 1679 rte_eth_dev_set_link_down(uint16_t port_id) 1680 { 1681 struct rte_eth_dev *dev; 1682 int ret; 1683 1684 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1685 dev = &rte_eth_devices[port_id]; 1686 1687 if (*dev->dev_ops->dev_set_link_down == NULL) 1688 return -ENOTSUP; 1689 ret = eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev)); 1690 1691 rte_ethdev_trace_set_link_down(port_id, ret); 1692 1693 return ret; 1694 } 1695 1696 int 1697 rte_eth_dev_close(uint16_t port_id) 1698 { 1699 struct rte_eth_dev *dev; 1700 int firsterr, binerr; 1701 int *lasterr = &firsterr; 1702 1703 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1704 dev = &rte_eth_devices[port_id]; 1705 1706 /* 1707 * Secondary process needs to close device to release process private 1708 * resources. But secondary process should not be obliged to wait 1709 * for device stop before closing ethdev. 1710 */ 1711 if (rte_eal_process_type() == RTE_PROC_PRIMARY && 1712 dev->data->dev_started) { 1713 RTE_ETHDEV_LOG(ERR, "Cannot close started device (port %u)\n", 1714 port_id); 1715 return -EINVAL; 1716 } 1717 1718 if (*dev->dev_ops->dev_close == NULL) 1719 return -ENOTSUP; 1720 *lasterr = (*dev->dev_ops->dev_close)(dev); 1721 if (*lasterr != 0) 1722 lasterr = &binerr; 1723 1724 rte_ethdev_trace_close(port_id); 1725 *lasterr = rte_eth_dev_release_port(dev); 1726 1727 return firsterr; 1728 } 1729 1730 int 1731 rte_eth_dev_reset(uint16_t port_id) 1732 { 1733 struct rte_eth_dev *dev; 1734 int ret; 1735 1736 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1737 dev = &rte_eth_devices[port_id]; 1738 1739 if (*dev->dev_ops->dev_reset == NULL) 1740 return -ENOTSUP; 1741 1742 ret = rte_eth_dev_stop(port_id); 1743 if (ret != 0) { 1744 RTE_ETHDEV_LOG(ERR, 1745 "Failed to stop device (port %u) before reset: %s - ignore\n", 1746 port_id, rte_strerror(-ret)); 1747 } 1748 ret = eth_err(port_id, dev->dev_ops->dev_reset(dev)); 1749 1750 rte_ethdev_trace_reset(port_id, ret); 1751 1752 return ret; 1753 } 1754 1755 int 1756 rte_eth_dev_is_removed(uint16_t port_id) 1757 { 1758 struct rte_eth_dev *dev; 1759 int ret; 1760 1761 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0); 1762 dev = &rte_eth_devices[port_id]; 1763 1764 if (dev->state == RTE_ETH_DEV_REMOVED) 1765 return 1; 1766 1767 if (*dev->dev_ops->is_removed == NULL) 1768 return 0; 1769 1770 ret = dev->dev_ops->is_removed(dev); 1771 if (ret != 0) 1772 /* Device is physically removed. */ 1773 dev->state = RTE_ETH_DEV_REMOVED; 1774 1775 rte_ethdev_trace_is_removed(port_id, ret); 1776 1777 return ret; 1778 } 1779 1780 static int 1781 rte_eth_check_rx_mempool(struct rte_mempool *mp, uint16_t offset, 1782 uint16_t min_length) 1783 { 1784 uint16_t data_room_size; 1785 1786 /* 1787 * Check the size of the mbuf data buffer, this value 1788 * must be provided in the private data of the memory pool. 1789 * First check that the memory pool(s) has a valid private data. 1790 */ 1791 if (mp->private_data_size < 1792 sizeof(struct rte_pktmbuf_pool_private)) { 1793 RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n", 1794 mp->name, mp->private_data_size, 1795 (unsigned int) 1796 sizeof(struct rte_pktmbuf_pool_private)); 1797 return -ENOSPC; 1798 } 1799 data_room_size = rte_pktmbuf_data_room_size(mp); 1800 if (data_room_size < offset + min_length) { 1801 RTE_ETHDEV_LOG(ERR, 1802 "%s mbuf_data_room_size %u < %u (%u + %u)\n", 1803 mp->name, data_room_size, 1804 offset + min_length, offset, min_length); 1805 return -EINVAL; 1806 } 1807 return 0; 1808 } 1809 1810 static int 1811 eth_dev_buffer_split_get_supported_hdrs_helper(uint16_t port_id, uint32_t **ptypes) 1812 { 1813 int cnt; 1814 1815 cnt = rte_eth_buffer_split_get_supported_hdr_ptypes(port_id, NULL, 0); 1816 if (cnt <= 0) 1817 return cnt; 1818 1819 *ptypes = malloc(sizeof(uint32_t) * cnt); 1820 if (*ptypes == NULL) 1821 return -ENOMEM; 1822 1823 cnt = rte_eth_buffer_split_get_supported_hdr_ptypes(port_id, *ptypes, cnt); 1824 if (cnt <= 0) { 1825 free(*ptypes); 1826 *ptypes = NULL; 1827 } 1828 return cnt; 1829 } 1830 1831 static int 1832 rte_eth_rx_queue_check_split(uint16_t port_id, 1833 const struct rte_eth_rxseg_split *rx_seg, 1834 uint16_t n_seg, uint32_t *mbp_buf_size, 1835 const struct rte_eth_dev_info *dev_info) 1836 { 1837 const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa; 1838 struct rte_mempool *mp_first; 1839 uint32_t offset_mask; 1840 uint16_t seg_idx; 1841 int ret = 0; 1842 int ptype_cnt; 1843 uint32_t *ptypes; 1844 uint32_t prev_proto_hdrs = RTE_PTYPE_UNKNOWN; 1845 int i; 1846 1847 if (n_seg > seg_capa->max_nseg) { 1848 RTE_ETHDEV_LOG(ERR, 1849 "Requested Rx segments %u exceed supported %u\n", 1850 n_seg, seg_capa->max_nseg); 1851 return -EINVAL; 1852 } 1853 /* 1854 * Check the sizes and offsets against buffer sizes 1855 * for each segment specified in extended configuration. 1856 */ 1857 mp_first = rx_seg[0].mp; 1858 offset_mask = RTE_BIT32(seg_capa->offset_align_log2) - 1; 1859 1860 ptypes = NULL; 1861 ptype_cnt = eth_dev_buffer_split_get_supported_hdrs_helper(port_id, &ptypes); 1862 1863 for (seg_idx = 0; seg_idx < n_seg; seg_idx++) { 1864 struct rte_mempool *mpl = rx_seg[seg_idx].mp; 1865 uint32_t length = rx_seg[seg_idx].length; 1866 uint32_t offset = rx_seg[seg_idx].offset; 1867 uint32_t proto_hdr = rx_seg[seg_idx].proto_hdr; 1868 1869 if (mpl == NULL) { 1870 RTE_ETHDEV_LOG(ERR, "null mempool pointer\n"); 1871 ret = -EINVAL; 1872 goto out; 1873 } 1874 if (seg_idx != 0 && mp_first != mpl && 1875 seg_capa->multi_pools == 0) { 1876 RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n"); 1877 ret = -ENOTSUP; 1878 goto out; 1879 } 1880 if (offset != 0) { 1881 if (seg_capa->offset_allowed == 0) { 1882 RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n"); 1883 ret = -ENOTSUP; 1884 goto out; 1885 } 1886 if (offset & offset_mask) { 1887 RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n", 1888 offset, 1889 seg_capa->offset_align_log2); 1890 ret = -EINVAL; 1891 goto out; 1892 } 1893 } 1894 1895 offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM; 1896 *mbp_buf_size = rte_pktmbuf_data_room_size(mpl); 1897 if (proto_hdr != 0) { 1898 /* Split based on protocol headers. */ 1899 if (length != 0) { 1900 RTE_ETHDEV_LOG(ERR, 1901 "Do not set length split and protocol split within a segment\n" 1902 ); 1903 ret = -EINVAL; 1904 goto out; 1905 } 1906 if ((proto_hdr & prev_proto_hdrs) != 0) { 1907 RTE_ETHDEV_LOG(ERR, 1908 "Repeat with previous protocol headers or proto-split after length-based split\n" 1909 ); 1910 ret = -EINVAL; 1911 goto out; 1912 } 1913 if (ptype_cnt <= 0) { 1914 RTE_ETHDEV_LOG(ERR, 1915 "Port %u failed to get supported buffer split header protocols\n", 1916 port_id); 1917 ret = -ENOTSUP; 1918 goto out; 1919 } 1920 for (i = 0; i < ptype_cnt; i++) { 1921 if ((prev_proto_hdrs | proto_hdr) == ptypes[i]) 1922 break; 1923 } 1924 if (i == ptype_cnt) { 1925 RTE_ETHDEV_LOG(ERR, 1926 "Requested Rx split header protocols 0x%x is not supported.\n", 1927 proto_hdr); 1928 ret = -EINVAL; 1929 goto out; 1930 } 1931 prev_proto_hdrs |= proto_hdr; 1932 } else { 1933 /* Split at fixed length. */ 1934 length = length != 0 ? length : *mbp_buf_size; 1935 prev_proto_hdrs = RTE_PTYPE_ALL_MASK; 1936 } 1937 1938 ret = rte_eth_check_rx_mempool(mpl, offset, length); 1939 if (ret != 0) 1940 goto out; 1941 } 1942 out: 1943 free(ptypes); 1944 return ret; 1945 } 1946 1947 static int 1948 rte_eth_rx_queue_check_mempools(struct rte_mempool **rx_mempools, 1949 uint16_t n_mempools, uint32_t *min_buf_size, 1950 const struct rte_eth_dev_info *dev_info) 1951 { 1952 uint16_t pool_idx; 1953 int ret; 1954 1955 if (n_mempools > dev_info->max_rx_mempools) { 1956 RTE_ETHDEV_LOG(ERR, 1957 "Too many Rx mempools %u vs maximum %u\n", 1958 n_mempools, dev_info->max_rx_mempools); 1959 return -EINVAL; 1960 } 1961 1962 for (pool_idx = 0; pool_idx < n_mempools; pool_idx++) { 1963 struct rte_mempool *mp = rx_mempools[pool_idx]; 1964 1965 if (mp == NULL) { 1966 RTE_ETHDEV_LOG(ERR, "null Rx mempool pointer\n"); 1967 return -EINVAL; 1968 } 1969 1970 ret = rte_eth_check_rx_mempool(mp, RTE_PKTMBUF_HEADROOM, 1971 dev_info->min_rx_bufsize); 1972 if (ret != 0) 1973 return ret; 1974 1975 *min_buf_size = RTE_MIN(*min_buf_size, 1976 rte_pktmbuf_data_room_size(mp)); 1977 } 1978 1979 return 0; 1980 } 1981 1982 int 1983 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 1984 uint16_t nb_rx_desc, unsigned int socket_id, 1985 const struct rte_eth_rxconf *rx_conf, 1986 struct rte_mempool *mp) 1987 { 1988 int ret; 1989 uint64_t rx_offloads; 1990 uint32_t mbp_buf_size = UINT32_MAX; 1991 struct rte_eth_dev *dev; 1992 struct rte_eth_dev_info dev_info; 1993 struct rte_eth_rxconf local_conf; 1994 1995 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1996 dev = &rte_eth_devices[port_id]; 1997 1998 if (rx_queue_id >= dev->data->nb_rx_queues) { 1999 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id); 2000 return -EINVAL; 2001 } 2002 2003 if (*dev->dev_ops->rx_queue_setup == NULL) 2004 return -ENOTSUP; 2005 2006 ret = rte_eth_dev_info_get(port_id, &dev_info); 2007 if (ret != 0) 2008 return ret; 2009 2010 rx_offloads = dev->data->dev_conf.rxmode.offloads; 2011 if (rx_conf != NULL) 2012 rx_offloads |= rx_conf->offloads; 2013 2014 /* Ensure that we have one and only one source of Rx buffers */ 2015 if ((mp != NULL) + 2016 (rx_conf != NULL && rx_conf->rx_nseg > 0) + 2017 (rx_conf != NULL && rx_conf->rx_nmempool > 0) != 1) { 2018 RTE_ETHDEV_LOG(ERR, 2019 "Ambiguous Rx mempools configuration\n"); 2020 return -EINVAL; 2021 } 2022 2023 if (mp != NULL) { 2024 /* Single pool configuration check. */ 2025 ret = rte_eth_check_rx_mempool(mp, RTE_PKTMBUF_HEADROOM, 2026 dev_info.min_rx_bufsize); 2027 if (ret != 0) 2028 return ret; 2029 2030 mbp_buf_size = rte_pktmbuf_data_room_size(mp); 2031 } else if (rx_conf != NULL && rx_conf->rx_nseg > 0) { 2032 const struct rte_eth_rxseg_split *rx_seg; 2033 uint16_t n_seg; 2034 2035 /* Extended multi-segment configuration check. */ 2036 if (rx_conf->rx_seg == NULL) { 2037 RTE_ETHDEV_LOG(ERR, 2038 "Memory pool is null and no multi-segment configuration provided\n"); 2039 return -EINVAL; 2040 } 2041 2042 rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg; 2043 n_seg = rx_conf->rx_nseg; 2044 2045 if (rx_offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { 2046 ret = rte_eth_rx_queue_check_split(port_id, rx_seg, n_seg, 2047 &mbp_buf_size, 2048 &dev_info); 2049 if (ret != 0) 2050 return ret; 2051 } else { 2052 RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n"); 2053 return -EINVAL; 2054 } 2055 } else if (rx_conf != NULL && rx_conf->rx_nmempool > 0) { 2056 /* Extended multi-pool configuration check. */ 2057 if (rx_conf->rx_mempools == NULL) { 2058 RTE_ETHDEV_LOG(ERR, "Memory pools array is null\n"); 2059 return -EINVAL; 2060 } 2061 2062 ret = rte_eth_rx_queue_check_mempools(rx_conf->rx_mempools, 2063 rx_conf->rx_nmempool, 2064 &mbp_buf_size, 2065 &dev_info); 2066 if (ret != 0) 2067 return ret; 2068 } else { 2069 RTE_ETHDEV_LOG(ERR, "Missing Rx mempool configuration\n"); 2070 return -EINVAL; 2071 } 2072 2073 /* Use default specified by driver, if nb_rx_desc is zero */ 2074 if (nb_rx_desc == 0) { 2075 nb_rx_desc = dev_info.default_rxportconf.ring_size; 2076 /* If driver default is also zero, fall back on EAL default */ 2077 if (nb_rx_desc == 0) 2078 nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 2079 } 2080 2081 if (nb_rx_desc > dev_info.rx_desc_lim.nb_max || 2082 nb_rx_desc < dev_info.rx_desc_lim.nb_min || 2083 nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) { 2084 2085 RTE_ETHDEV_LOG(ERR, 2086 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2087 nb_rx_desc, dev_info.rx_desc_lim.nb_max, 2088 dev_info.rx_desc_lim.nb_min, 2089 dev_info.rx_desc_lim.nb_align); 2090 return -EINVAL; 2091 } 2092 2093 if (dev->data->dev_started && 2094 !(dev_info.dev_capa & 2095 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP)) 2096 return -EBUSY; 2097 2098 if (dev->data->dev_started && 2099 (dev->data->rx_queue_state[rx_queue_id] != 2100 RTE_ETH_QUEUE_STATE_STOPPED)) 2101 return -EBUSY; 2102 2103 eth_dev_rxq_release(dev, rx_queue_id); 2104 2105 if (rx_conf == NULL) 2106 rx_conf = &dev_info.default_rxconf; 2107 2108 local_conf = *rx_conf; 2109 2110 /* 2111 * If an offloading has already been enabled in 2112 * rte_eth_dev_configure(), it has been enabled on all queues, 2113 * so there is no need to enable it in this queue again. 2114 * The local_conf.offloads input to underlying PMD only carries 2115 * those offloadings which are only enabled on this queue and 2116 * not enabled on all queues. 2117 */ 2118 local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads; 2119 2120 /* 2121 * New added offloadings for this queue are those not enabled in 2122 * rte_eth_dev_configure() and they must be per-queue type. 2123 * A pure per-port offloading can't be enabled on a queue while 2124 * disabled on another queue. A pure per-port offloading can't 2125 * be enabled for any queue as new added one if it hasn't been 2126 * enabled in rte_eth_dev_configure(). 2127 */ 2128 if ((local_conf.offloads & dev_info.rx_queue_offload_capa) != 2129 local_conf.offloads) { 2130 RTE_ETHDEV_LOG(ERR, 2131 "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2132 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2133 port_id, rx_queue_id, local_conf.offloads, 2134 dev_info.rx_queue_offload_capa, 2135 __func__); 2136 return -EINVAL; 2137 } 2138 2139 if (local_conf.share_group > 0 && 2140 (dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) == 0) { 2141 RTE_ETHDEV_LOG(ERR, 2142 "Ethdev port_id=%d rx_queue_id=%d, enabled share_group=%hu while device doesn't support Rx queue share\n", 2143 port_id, rx_queue_id, local_conf.share_group); 2144 return -EINVAL; 2145 } 2146 2147 /* 2148 * If LRO is enabled, check that the maximum aggregated packet 2149 * size is supported by the configured device. 2150 */ 2151 /* Get the real Ethernet overhead length */ 2152 if (local_conf.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) { 2153 uint32_t overhead_len; 2154 uint32_t max_rx_pktlen; 2155 int ret; 2156 2157 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 2158 dev_info.max_mtu); 2159 max_rx_pktlen = dev->data->mtu + overhead_len; 2160 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0) 2161 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 2162 ret = eth_dev_check_lro_pkt_size(port_id, 2163 dev->data->dev_conf.rxmode.max_lro_pkt_size, 2164 max_rx_pktlen, 2165 dev_info.max_lro_pkt_size); 2166 if (ret != 0) 2167 return ret; 2168 } 2169 2170 ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc, 2171 socket_id, &local_conf, mp); 2172 if (!ret) { 2173 if (!dev->data->min_rx_buf_size || 2174 dev->data->min_rx_buf_size > mbp_buf_size) 2175 dev->data->min_rx_buf_size = mbp_buf_size; 2176 } 2177 2178 rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp, 2179 rx_conf, ret); 2180 return eth_err(port_id, ret); 2181 } 2182 2183 int 2184 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 2185 uint16_t nb_rx_desc, 2186 const struct rte_eth_hairpin_conf *conf) 2187 { 2188 int ret; 2189 struct rte_eth_dev *dev; 2190 struct rte_eth_hairpin_cap cap; 2191 int i; 2192 int count; 2193 2194 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2195 dev = &rte_eth_devices[port_id]; 2196 2197 if (rx_queue_id >= dev->data->nb_rx_queues) { 2198 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id); 2199 return -EINVAL; 2200 } 2201 2202 if (conf == NULL) { 2203 RTE_ETHDEV_LOG(ERR, 2204 "Cannot setup ethdev port %u Rx hairpin queue from NULL config\n", 2205 port_id); 2206 return -EINVAL; 2207 } 2208 2209 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2210 if (ret != 0) 2211 return ret; 2212 if (*dev->dev_ops->rx_hairpin_queue_setup == NULL) 2213 return -ENOTSUP; 2214 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2215 if (nb_rx_desc == 0) 2216 nb_rx_desc = cap.max_nb_desc; 2217 if (nb_rx_desc > cap.max_nb_desc) { 2218 RTE_ETHDEV_LOG(ERR, 2219 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu", 2220 nb_rx_desc, cap.max_nb_desc); 2221 return -EINVAL; 2222 } 2223 if (conf->peer_count > cap.max_rx_2_tx) { 2224 RTE_ETHDEV_LOG(ERR, 2225 "Invalid value for number of peers for Rx queue(=%u), should be: <= %hu", 2226 conf->peer_count, cap.max_rx_2_tx); 2227 return -EINVAL; 2228 } 2229 if (conf->use_locked_device_memory && !cap.rx_cap.locked_device_memory) { 2230 RTE_ETHDEV_LOG(ERR, 2231 "Attempt to use locked device memory for Rx queue, which is not supported"); 2232 return -EINVAL; 2233 } 2234 if (conf->use_rte_memory && !cap.rx_cap.rte_memory) { 2235 RTE_ETHDEV_LOG(ERR, 2236 "Attempt to use DPDK memory for Rx queue, which is not supported"); 2237 return -EINVAL; 2238 } 2239 if (conf->use_locked_device_memory && conf->use_rte_memory) { 2240 RTE_ETHDEV_LOG(ERR, 2241 "Attempt to use mutually exclusive memory settings for Rx queue"); 2242 return -EINVAL; 2243 } 2244 if (conf->force_memory && 2245 !conf->use_locked_device_memory && 2246 !conf->use_rte_memory) { 2247 RTE_ETHDEV_LOG(ERR, 2248 "Attempt to force Rx queue memory settings, but none is set"); 2249 return -EINVAL; 2250 } 2251 if (conf->peer_count == 0) { 2252 RTE_ETHDEV_LOG(ERR, 2253 "Invalid value for number of peers for Rx queue(=%u), should be: > 0", 2254 conf->peer_count); 2255 return -EINVAL; 2256 } 2257 for (i = 0, count = 0; i < dev->data->nb_rx_queues && 2258 cap.max_nb_queues != UINT16_MAX; i++) { 2259 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i)) 2260 count++; 2261 } 2262 if (count > cap.max_nb_queues) { 2263 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d", 2264 cap.max_nb_queues); 2265 return -EINVAL; 2266 } 2267 if (dev->data->dev_started) 2268 return -EBUSY; 2269 eth_dev_rxq_release(dev, rx_queue_id); 2270 ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id, 2271 nb_rx_desc, conf); 2272 if (ret == 0) 2273 dev->data->rx_queue_state[rx_queue_id] = 2274 RTE_ETH_QUEUE_STATE_HAIRPIN; 2275 ret = eth_err(port_id, ret); 2276 2277 rte_eth_trace_rx_hairpin_queue_setup(port_id, rx_queue_id, nb_rx_desc, 2278 conf, ret); 2279 2280 return ret; 2281 } 2282 2283 int 2284 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2285 uint16_t nb_tx_desc, unsigned int socket_id, 2286 const struct rte_eth_txconf *tx_conf) 2287 { 2288 struct rte_eth_dev *dev; 2289 struct rte_eth_dev_info dev_info; 2290 struct rte_eth_txconf local_conf; 2291 int ret; 2292 2293 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2294 dev = &rte_eth_devices[port_id]; 2295 2296 if (tx_queue_id >= dev->data->nb_tx_queues) { 2297 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id); 2298 return -EINVAL; 2299 } 2300 2301 if (*dev->dev_ops->tx_queue_setup == NULL) 2302 return -ENOTSUP; 2303 2304 ret = rte_eth_dev_info_get(port_id, &dev_info); 2305 if (ret != 0) 2306 return ret; 2307 2308 /* Use default specified by driver, if nb_tx_desc is zero */ 2309 if (nb_tx_desc == 0) { 2310 nb_tx_desc = dev_info.default_txportconf.ring_size; 2311 /* If driver default is zero, fall back on EAL default */ 2312 if (nb_tx_desc == 0) 2313 nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 2314 } 2315 if (nb_tx_desc > dev_info.tx_desc_lim.nb_max || 2316 nb_tx_desc < dev_info.tx_desc_lim.nb_min || 2317 nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) { 2318 RTE_ETHDEV_LOG(ERR, 2319 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2320 nb_tx_desc, dev_info.tx_desc_lim.nb_max, 2321 dev_info.tx_desc_lim.nb_min, 2322 dev_info.tx_desc_lim.nb_align); 2323 return -EINVAL; 2324 } 2325 2326 if (dev->data->dev_started && 2327 !(dev_info.dev_capa & 2328 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP)) 2329 return -EBUSY; 2330 2331 if (dev->data->dev_started && 2332 (dev->data->tx_queue_state[tx_queue_id] != 2333 RTE_ETH_QUEUE_STATE_STOPPED)) 2334 return -EBUSY; 2335 2336 eth_dev_txq_release(dev, tx_queue_id); 2337 2338 if (tx_conf == NULL) 2339 tx_conf = &dev_info.default_txconf; 2340 2341 local_conf = *tx_conf; 2342 2343 /* 2344 * If an offloading has already been enabled in 2345 * rte_eth_dev_configure(), it has been enabled on all queues, 2346 * so there is no need to enable it in this queue again. 2347 * The local_conf.offloads input to underlying PMD only carries 2348 * those offloadings which are only enabled on this queue and 2349 * not enabled on all queues. 2350 */ 2351 local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads; 2352 2353 /* 2354 * New added offloadings for this queue are those not enabled in 2355 * rte_eth_dev_configure() and they must be per-queue type. 2356 * A pure per-port offloading can't be enabled on a queue while 2357 * disabled on another queue. A pure per-port offloading can't 2358 * be enabled for any queue as new added one if it hasn't been 2359 * enabled in rte_eth_dev_configure(). 2360 */ 2361 if ((local_conf.offloads & dev_info.tx_queue_offload_capa) != 2362 local_conf.offloads) { 2363 RTE_ETHDEV_LOG(ERR, 2364 "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2365 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2366 port_id, tx_queue_id, local_conf.offloads, 2367 dev_info.tx_queue_offload_capa, 2368 __func__); 2369 return -EINVAL; 2370 } 2371 2372 rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf); 2373 return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev, 2374 tx_queue_id, nb_tx_desc, socket_id, &local_conf)); 2375 } 2376 2377 int 2378 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2379 uint16_t nb_tx_desc, 2380 const struct rte_eth_hairpin_conf *conf) 2381 { 2382 struct rte_eth_dev *dev; 2383 struct rte_eth_hairpin_cap cap; 2384 int i; 2385 int count; 2386 int ret; 2387 2388 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2389 dev = &rte_eth_devices[port_id]; 2390 2391 if (tx_queue_id >= dev->data->nb_tx_queues) { 2392 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id); 2393 return -EINVAL; 2394 } 2395 2396 if (conf == NULL) { 2397 RTE_ETHDEV_LOG(ERR, 2398 "Cannot setup ethdev port %u Tx hairpin queue from NULL config\n", 2399 port_id); 2400 return -EINVAL; 2401 } 2402 2403 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2404 if (ret != 0) 2405 return ret; 2406 if (*dev->dev_ops->tx_hairpin_queue_setup == NULL) 2407 return -ENOTSUP; 2408 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2409 if (nb_tx_desc == 0) 2410 nb_tx_desc = cap.max_nb_desc; 2411 if (nb_tx_desc > cap.max_nb_desc) { 2412 RTE_ETHDEV_LOG(ERR, 2413 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu", 2414 nb_tx_desc, cap.max_nb_desc); 2415 return -EINVAL; 2416 } 2417 if (conf->peer_count > cap.max_tx_2_rx) { 2418 RTE_ETHDEV_LOG(ERR, 2419 "Invalid value for number of peers for Tx queue(=%u), should be: <= %hu", 2420 conf->peer_count, cap.max_tx_2_rx); 2421 return -EINVAL; 2422 } 2423 if (conf->use_locked_device_memory && !cap.tx_cap.locked_device_memory) { 2424 RTE_ETHDEV_LOG(ERR, 2425 "Attempt to use locked device memory for Tx queue, which is not supported"); 2426 return -EINVAL; 2427 } 2428 if (conf->use_rte_memory && !cap.tx_cap.rte_memory) { 2429 RTE_ETHDEV_LOG(ERR, 2430 "Attempt to use DPDK memory for Tx queue, which is not supported"); 2431 return -EINVAL; 2432 } 2433 if (conf->use_locked_device_memory && conf->use_rte_memory) { 2434 RTE_ETHDEV_LOG(ERR, 2435 "Attempt to use mutually exclusive memory settings for Tx queue"); 2436 return -EINVAL; 2437 } 2438 if (conf->force_memory && 2439 !conf->use_locked_device_memory && 2440 !conf->use_rte_memory) { 2441 RTE_ETHDEV_LOG(ERR, 2442 "Attempt to force Tx queue memory settings, but none is set"); 2443 return -EINVAL; 2444 } 2445 if (conf->peer_count == 0) { 2446 RTE_ETHDEV_LOG(ERR, 2447 "Invalid value for number of peers for Tx queue(=%u), should be: > 0", 2448 conf->peer_count); 2449 return -EINVAL; 2450 } 2451 for (i = 0, count = 0; i < dev->data->nb_tx_queues && 2452 cap.max_nb_queues != UINT16_MAX; i++) { 2453 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i)) 2454 count++; 2455 } 2456 if (count > cap.max_nb_queues) { 2457 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d", 2458 cap.max_nb_queues); 2459 return -EINVAL; 2460 } 2461 if (dev->data->dev_started) 2462 return -EBUSY; 2463 eth_dev_txq_release(dev, tx_queue_id); 2464 ret = (*dev->dev_ops->tx_hairpin_queue_setup) 2465 (dev, tx_queue_id, nb_tx_desc, conf); 2466 if (ret == 0) 2467 dev->data->tx_queue_state[tx_queue_id] = 2468 RTE_ETH_QUEUE_STATE_HAIRPIN; 2469 ret = eth_err(port_id, ret); 2470 2471 rte_eth_trace_tx_hairpin_queue_setup(port_id, tx_queue_id, nb_tx_desc, 2472 conf, ret); 2473 2474 return ret; 2475 } 2476 2477 int 2478 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port) 2479 { 2480 struct rte_eth_dev *dev; 2481 int ret; 2482 2483 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2484 dev = &rte_eth_devices[tx_port]; 2485 2486 if (dev->data->dev_started == 0) { 2487 RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port); 2488 return -EBUSY; 2489 } 2490 2491 if (*dev->dev_ops->hairpin_bind == NULL) 2492 return -ENOTSUP; 2493 ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port); 2494 if (ret != 0) 2495 RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d" 2496 " to Rx %d (%d - all ports)\n", 2497 tx_port, rx_port, RTE_MAX_ETHPORTS); 2498 2499 rte_eth_trace_hairpin_bind(tx_port, rx_port, ret); 2500 2501 return ret; 2502 } 2503 2504 int 2505 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port) 2506 { 2507 struct rte_eth_dev *dev; 2508 int ret; 2509 2510 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2511 dev = &rte_eth_devices[tx_port]; 2512 2513 if (dev->data->dev_started == 0) { 2514 RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port); 2515 return -EBUSY; 2516 } 2517 2518 if (*dev->dev_ops->hairpin_unbind == NULL) 2519 return -ENOTSUP; 2520 ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port); 2521 if (ret != 0) 2522 RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d" 2523 " from Rx %d (%d - all ports)\n", 2524 tx_port, rx_port, RTE_MAX_ETHPORTS); 2525 2526 rte_eth_trace_hairpin_unbind(tx_port, rx_port, ret); 2527 2528 return ret; 2529 } 2530 2531 int 2532 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, 2533 size_t len, uint32_t direction) 2534 { 2535 struct rte_eth_dev *dev; 2536 int ret; 2537 2538 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2539 dev = &rte_eth_devices[port_id]; 2540 2541 if (peer_ports == NULL) { 2542 RTE_ETHDEV_LOG(ERR, 2543 "Cannot get ethdev port %u hairpin peer ports to NULL\n", 2544 port_id); 2545 return -EINVAL; 2546 } 2547 2548 if (len == 0) { 2549 RTE_ETHDEV_LOG(ERR, 2550 "Cannot get ethdev port %u hairpin peer ports to array with zero size\n", 2551 port_id); 2552 return -EINVAL; 2553 } 2554 2555 if (*dev->dev_ops->hairpin_get_peer_ports == NULL) 2556 return -ENOTSUP; 2557 2558 ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports, 2559 len, direction); 2560 if (ret < 0) 2561 RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n", 2562 port_id, direction ? "Rx" : "Tx"); 2563 2564 rte_eth_trace_hairpin_get_peer_ports(port_id, peer_ports, len, 2565 direction, ret); 2566 2567 return ret; 2568 } 2569 2570 void 2571 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, 2572 void *userdata __rte_unused) 2573 { 2574 rte_pktmbuf_free_bulk(pkts, unsent); 2575 2576 rte_eth_trace_tx_buffer_drop_callback((void **)pkts, unsent); 2577 } 2578 2579 void 2580 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, 2581 void *userdata) 2582 { 2583 uint64_t *count = userdata; 2584 2585 rte_pktmbuf_free_bulk(pkts, unsent); 2586 *count += unsent; 2587 2588 rte_eth_trace_tx_buffer_count_callback((void **)pkts, unsent, *count); 2589 } 2590 2591 int 2592 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, 2593 buffer_tx_error_fn cbfn, void *userdata) 2594 { 2595 if (buffer == NULL) { 2596 RTE_ETHDEV_LOG(ERR, 2597 "Cannot set Tx buffer error callback to NULL buffer\n"); 2598 return -EINVAL; 2599 } 2600 2601 buffer->error_callback = cbfn; 2602 buffer->error_userdata = userdata; 2603 2604 rte_eth_trace_tx_buffer_set_err_callback(buffer); 2605 2606 return 0; 2607 } 2608 2609 int 2610 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size) 2611 { 2612 int ret = 0; 2613 2614 if (buffer == NULL) { 2615 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL buffer\n"); 2616 return -EINVAL; 2617 } 2618 2619 buffer->size = size; 2620 if (buffer->error_callback == NULL) { 2621 ret = rte_eth_tx_buffer_set_err_callback( 2622 buffer, rte_eth_tx_buffer_drop_callback, NULL); 2623 } 2624 2625 rte_eth_trace_tx_buffer_init(buffer, size, ret); 2626 2627 return ret; 2628 } 2629 2630 int 2631 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt) 2632 { 2633 struct rte_eth_dev *dev; 2634 int ret; 2635 2636 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2637 dev = &rte_eth_devices[port_id]; 2638 2639 if (*dev->dev_ops->tx_done_cleanup == NULL) 2640 return -ENOTSUP; 2641 2642 /* Call driver to free pending mbufs. */ 2643 ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id], 2644 free_cnt); 2645 ret = eth_err(port_id, ret); 2646 2647 rte_eth_trace_tx_done_cleanup(port_id, queue_id, free_cnt, ret); 2648 2649 return ret; 2650 } 2651 2652 int 2653 rte_eth_promiscuous_enable(uint16_t port_id) 2654 { 2655 struct rte_eth_dev *dev; 2656 int diag = 0; 2657 2658 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2659 dev = &rte_eth_devices[port_id]; 2660 2661 if (dev->data->promiscuous == 1) 2662 return 0; 2663 2664 if (*dev->dev_ops->promiscuous_enable == NULL) 2665 return -ENOTSUP; 2666 2667 diag = (*dev->dev_ops->promiscuous_enable)(dev); 2668 dev->data->promiscuous = (diag == 0) ? 1 : 0; 2669 2670 diag = eth_err(port_id, diag); 2671 2672 rte_eth_trace_promiscuous_enable(port_id, dev->data->promiscuous, 2673 diag); 2674 2675 return diag; 2676 } 2677 2678 int 2679 rte_eth_promiscuous_disable(uint16_t port_id) 2680 { 2681 struct rte_eth_dev *dev; 2682 int diag = 0; 2683 2684 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2685 dev = &rte_eth_devices[port_id]; 2686 2687 if (dev->data->promiscuous == 0) 2688 return 0; 2689 2690 if (*dev->dev_ops->promiscuous_disable == NULL) 2691 return -ENOTSUP; 2692 2693 dev->data->promiscuous = 0; 2694 diag = (*dev->dev_ops->promiscuous_disable)(dev); 2695 if (diag != 0) 2696 dev->data->promiscuous = 1; 2697 2698 diag = eth_err(port_id, diag); 2699 2700 rte_eth_trace_promiscuous_disable(port_id, dev->data->promiscuous, 2701 diag); 2702 2703 return diag; 2704 } 2705 2706 int 2707 rte_eth_promiscuous_get(uint16_t port_id) 2708 { 2709 struct rte_eth_dev *dev; 2710 2711 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2712 dev = &rte_eth_devices[port_id]; 2713 2714 rte_eth_trace_promiscuous_get(port_id, dev->data->promiscuous); 2715 2716 return dev->data->promiscuous; 2717 } 2718 2719 int 2720 rte_eth_allmulticast_enable(uint16_t port_id) 2721 { 2722 struct rte_eth_dev *dev; 2723 int diag; 2724 2725 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2726 dev = &rte_eth_devices[port_id]; 2727 2728 if (dev->data->all_multicast == 1) 2729 return 0; 2730 2731 if (*dev->dev_ops->allmulticast_enable == NULL) 2732 return -ENOTSUP; 2733 diag = (*dev->dev_ops->allmulticast_enable)(dev); 2734 dev->data->all_multicast = (diag == 0) ? 1 : 0; 2735 2736 diag = eth_err(port_id, diag); 2737 2738 rte_eth_trace_allmulticast_enable(port_id, dev->data->all_multicast, 2739 diag); 2740 2741 return diag; 2742 } 2743 2744 int 2745 rte_eth_allmulticast_disable(uint16_t port_id) 2746 { 2747 struct rte_eth_dev *dev; 2748 int diag; 2749 2750 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2751 dev = &rte_eth_devices[port_id]; 2752 2753 if (dev->data->all_multicast == 0) 2754 return 0; 2755 2756 if (*dev->dev_ops->allmulticast_disable == NULL) 2757 return -ENOTSUP; 2758 dev->data->all_multicast = 0; 2759 diag = (*dev->dev_ops->allmulticast_disable)(dev); 2760 if (diag != 0) 2761 dev->data->all_multicast = 1; 2762 2763 diag = eth_err(port_id, diag); 2764 2765 rte_eth_trace_allmulticast_disable(port_id, dev->data->all_multicast, 2766 diag); 2767 2768 return diag; 2769 } 2770 2771 int 2772 rte_eth_allmulticast_get(uint16_t port_id) 2773 { 2774 struct rte_eth_dev *dev; 2775 2776 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2777 dev = &rte_eth_devices[port_id]; 2778 2779 rte_eth_trace_allmulticast_get(port_id, dev->data->all_multicast); 2780 2781 return dev->data->all_multicast; 2782 } 2783 2784 int 2785 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link) 2786 { 2787 struct rte_eth_dev *dev; 2788 2789 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2790 dev = &rte_eth_devices[port_id]; 2791 2792 if (eth_link == NULL) { 2793 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2794 port_id); 2795 return -EINVAL; 2796 } 2797 2798 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2799 rte_eth_linkstatus_get(dev, eth_link); 2800 else { 2801 if (*dev->dev_ops->link_update == NULL) 2802 return -ENOTSUP; 2803 (*dev->dev_ops->link_update)(dev, 1); 2804 *eth_link = dev->data->dev_link; 2805 } 2806 2807 rte_eth_trace_link_get(port_id, eth_link); 2808 2809 return 0; 2810 } 2811 2812 int 2813 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link) 2814 { 2815 struct rte_eth_dev *dev; 2816 2817 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2818 dev = &rte_eth_devices[port_id]; 2819 2820 if (eth_link == NULL) { 2821 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2822 port_id); 2823 return -EINVAL; 2824 } 2825 2826 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2827 rte_eth_linkstatus_get(dev, eth_link); 2828 else { 2829 if (*dev->dev_ops->link_update == NULL) 2830 return -ENOTSUP; 2831 (*dev->dev_ops->link_update)(dev, 0); 2832 *eth_link = dev->data->dev_link; 2833 } 2834 2835 rte_eth_trace_link_get_nowait(port_id, eth_link); 2836 2837 return 0; 2838 } 2839 2840 const char * 2841 rte_eth_link_speed_to_str(uint32_t link_speed) 2842 { 2843 const char *ret; 2844 2845 switch (link_speed) { 2846 case RTE_ETH_SPEED_NUM_NONE: 2847 ret = "None"; 2848 break; 2849 case RTE_ETH_SPEED_NUM_10M: 2850 ret = "10 Mbps"; 2851 break; 2852 case RTE_ETH_SPEED_NUM_100M: 2853 ret = "100 Mbps"; 2854 break; 2855 case RTE_ETH_SPEED_NUM_1G: 2856 ret = "1 Gbps"; 2857 break; 2858 case RTE_ETH_SPEED_NUM_2_5G: 2859 ret = "2.5 Gbps"; 2860 break; 2861 case RTE_ETH_SPEED_NUM_5G: 2862 ret = "5 Gbps"; 2863 break; 2864 case RTE_ETH_SPEED_NUM_10G: 2865 ret = "10 Gbps"; 2866 break; 2867 case RTE_ETH_SPEED_NUM_20G: 2868 ret = "20 Gbps"; 2869 break; 2870 case RTE_ETH_SPEED_NUM_25G: 2871 ret = "25 Gbps"; 2872 break; 2873 case RTE_ETH_SPEED_NUM_40G: 2874 ret = "40 Gbps"; 2875 break; 2876 case RTE_ETH_SPEED_NUM_50G: 2877 ret = "50 Gbps"; 2878 break; 2879 case RTE_ETH_SPEED_NUM_56G: 2880 ret = "56 Gbps"; 2881 break; 2882 case RTE_ETH_SPEED_NUM_100G: 2883 ret = "100 Gbps"; 2884 break; 2885 case RTE_ETH_SPEED_NUM_200G: 2886 ret = "200 Gbps"; 2887 break; 2888 case RTE_ETH_SPEED_NUM_400G: 2889 ret = "400 Gbps"; 2890 break; 2891 case RTE_ETH_SPEED_NUM_UNKNOWN: 2892 ret = "Unknown"; 2893 break; 2894 default: 2895 ret = "Invalid"; 2896 } 2897 2898 rte_eth_trace_link_speed_to_str(link_speed, ret); 2899 2900 return ret; 2901 } 2902 2903 int 2904 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link) 2905 { 2906 int ret; 2907 2908 if (str == NULL) { 2909 RTE_ETHDEV_LOG(ERR, "Cannot convert link to NULL string\n"); 2910 return -EINVAL; 2911 } 2912 2913 if (len == 0) { 2914 RTE_ETHDEV_LOG(ERR, 2915 "Cannot convert link to string with zero size\n"); 2916 return -EINVAL; 2917 } 2918 2919 if (eth_link == NULL) { 2920 RTE_ETHDEV_LOG(ERR, "Cannot convert to string from NULL link\n"); 2921 return -EINVAL; 2922 } 2923 2924 if (eth_link->link_status == RTE_ETH_LINK_DOWN) 2925 ret = snprintf(str, len, "Link down"); 2926 else 2927 ret = snprintf(str, len, "Link up at %s %s %s", 2928 rte_eth_link_speed_to_str(eth_link->link_speed), 2929 (eth_link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 2930 "FDX" : "HDX", 2931 (eth_link->link_autoneg == RTE_ETH_LINK_AUTONEG) ? 2932 "Autoneg" : "Fixed"); 2933 2934 rte_eth_trace_link_to_str(len, eth_link, str, ret); 2935 2936 return ret; 2937 } 2938 2939 int 2940 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats) 2941 { 2942 struct rte_eth_dev *dev; 2943 int ret; 2944 2945 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2946 dev = &rte_eth_devices[port_id]; 2947 2948 if (stats == NULL) { 2949 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u stats to NULL\n", 2950 port_id); 2951 return -EINVAL; 2952 } 2953 2954 memset(stats, 0, sizeof(*stats)); 2955 2956 if (*dev->dev_ops->stats_get == NULL) 2957 return -ENOTSUP; 2958 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed; 2959 ret = eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats)); 2960 2961 rte_eth_trace_stats_get(port_id, stats, ret); 2962 2963 return ret; 2964 } 2965 2966 int 2967 rte_eth_stats_reset(uint16_t port_id) 2968 { 2969 struct rte_eth_dev *dev; 2970 int ret; 2971 2972 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2973 dev = &rte_eth_devices[port_id]; 2974 2975 if (*dev->dev_ops->stats_reset == NULL) 2976 return -ENOTSUP; 2977 ret = (*dev->dev_ops->stats_reset)(dev); 2978 if (ret != 0) 2979 return eth_err(port_id, ret); 2980 2981 dev->data->rx_mbuf_alloc_failed = 0; 2982 2983 rte_eth_trace_stats_reset(port_id); 2984 2985 return 0; 2986 } 2987 2988 static inline int 2989 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev) 2990 { 2991 uint16_t nb_rxqs, nb_txqs; 2992 int count; 2993 2994 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2995 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2996 2997 count = RTE_NB_STATS; 2998 if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) { 2999 count += nb_rxqs * RTE_NB_RXQ_STATS; 3000 count += nb_txqs * RTE_NB_TXQ_STATS; 3001 } 3002 3003 return count; 3004 } 3005 3006 static int 3007 eth_dev_get_xstats_count(uint16_t port_id) 3008 { 3009 struct rte_eth_dev *dev; 3010 int count; 3011 3012 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3013 dev = &rte_eth_devices[port_id]; 3014 if (dev->dev_ops->xstats_get_names != NULL) { 3015 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0); 3016 if (count < 0) 3017 return eth_err(port_id, count); 3018 } else 3019 count = 0; 3020 3021 3022 count += eth_dev_get_xstats_basic_count(dev); 3023 3024 return count; 3025 } 3026 3027 int 3028 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, 3029 uint64_t *id) 3030 { 3031 int cnt_xstats, idx_xstat; 3032 3033 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3034 3035 if (xstat_name == NULL) { 3036 RTE_ETHDEV_LOG(ERR, 3037 "Cannot get ethdev port %u xstats ID from NULL xstat name\n", 3038 port_id); 3039 return -ENOMEM; 3040 } 3041 3042 if (id == NULL) { 3043 RTE_ETHDEV_LOG(ERR, 3044 "Cannot get ethdev port %u xstats ID to NULL\n", 3045 port_id); 3046 return -ENOMEM; 3047 } 3048 3049 /* Get count */ 3050 cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL); 3051 if (cnt_xstats < 0) { 3052 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n"); 3053 return -ENODEV; 3054 } 3055 3056 /* Get id-name lookup table */ 3057 struct rte_eth_xstat_name xstats_names[cnt_xstats]; 3058 3059 if (cnt_xstats != rte_eth_xstats_get_names_by_id( 3060 port_id, xstats_names, cnt_xstats, NULL)) { 3061 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n"); 3062 return -1; 3063 } 3064 3065 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 3066 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) { 3067 *id = idx_xstat; 3068 3069 rte_eth_trace_xstats_get_id_by_name(port_id, 3070 xstat_name, *id); 3071 3072 return 0; 3073 }; 3074 } 3075 3076 return -EINVAL; 3077 } 3078 3079 /* retrieve basic stats names */ 3080 static int 3081 eth_basic_stats_get_names(struct rte_eth_dev *dev, 3082 struct rte_eth_xstat_name *xstats_names) 3083 { 3084 int cnt_used_entries = 0; 3085 uint32_t idx, id_queue; 3086 uint16_t num_q; 3087 3088 for (idx = 0; idx < RTE_NB_STATS; idx++) { 3089 strlcpy(xstats_names[cnt_used_entries].name, 3090 eth_dev_stats_strings[idx].name, 3091 sizeof(xstats_names[0].name)); 3092 cnt_used_entries++; 3093 } 3094 3095 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 3096 return cnt_used_entries; 3097 3098 num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3099 for (id_queue = 0; id_queue < num_q; id_queue++) { 3100 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) { 3101 snprintf(xstats_names[cnt_used_entries].name, 3102 sizeof(xstats_names[0].name), 3103 "rx_q%u_%s", 3104 id_queue, eth_dev_rxq_stats_strings[idx].name); 3105 cnt_used_entries++; 3106 } 3107 3108 } 3109 num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3110 for (id_queue = 0; id_queue < num_q; id_queue++) { 3111 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) { 3112 snprintf(xstats_names[cnt_used_entries].name, 3113 sizeof(xstats_names[0].name), 3114 "tx_q%u_%s", 3115 id_queue, eth_dev_txq_stats_strings[idx].name); 3116 cnt_used_entries++; 3117 } 3118 } 3119 return cnt_used_entries; 3120 } 3121 3122 /* retrieve ethdev extended statistics names */ 3123 int 3124 rte_eth_xstats_get_names_by_id(uint16_t port_id, 3125 struct rte_eth_xstat_name *xstats_names, unsigned int size, 3126 uint64_t *ids) 3127 { 3128 struct rte_eth_xstat_name *xstats_names_copy; 3129 unsigned int no_basic_stat_requested = 1; 3130 unsigned int no_ext_stat_requested = 1; 3131 unsigned int expected_entries; 3132 unsigned int basic_count; 3133 struct rte_eth_dev *dev; 3134 unsigned int i; 3135 int ret; 3136 3137 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3138 dev = &rte_eth_devices[port_id]; 3139 3140 basic_count = eth_dev_get_xstats_basic_count(dev); 3141 ret = eth_dev_get_xstats_count(port_id); 3142 if (ret < 0) 3143 return ret; 3144 expected_entries = (unsigned int)ret; 3145 3146 /* Return max number of stats if no ids given */ 3147 if (!ids) { 3148 if (!xstats_names) 3149 return expected_entries; 3150 else if (xstats_names && size < expected_entries) 3151 return expected_entries; 3152 } 3153 3154 if (ids && !xstats_names) 3155 return -EINVAL; 3156 3157 if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) { 3158 uint64_t ids_copy[size]; 3159 3160 for (i = 0; i < size; i++) { 3161 if (ids[i] < basic_count) { 3162 no_basic_stat_requested = 0; 3163 break; 3164 } 3165 3166 /* 3167 * Convert ids to xstats ids that PMD knows. 3168 * ids known by user are basic + extended stats. 3169 */ 3170 ids_copy[i] = ids[i] - basic_count; 3171 } 3172 3173 if (no_basic_stat_requested) 3174 return (*dev->dev_ops->xstats_get_names_by_id)(dev, 3175 ids_copy, xstats_names, size); 3176 } 3177 3178 /* Retrieve all stats */ 3179 if (!ids) { 3180 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names, 3181 expected_entries); 3182 if (num_stats < 0 || num_stats > (int)expected_entries) 3183 return num_stats; 3184 else 3185 return expected_entries; 3186 } 3187 3188 xstats_names_copy = calloc(expected_entries, 3189 sizeof(struct rte_eth_xstat_name)); 3190 3191 if (!xstats_names_copy) { 3192 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n"); 3193 return -ENOMEM; 3194 } 3195 3196 if (ids) { 3197 for (i = 0; i < size; i++) { 3198 if (ids[i] >= basic_count) { 3199 no_ext_stat_requested = 0; 3200 break; 3201 } 3202 } 3203 } 3204 3205 /* Fill xstats_names_copy structure */ 3206 if (ids && no_ext_stat_requested) { 3207 eth_basic_stats_get_names(dev, xstats_names_copy); 3208 } else { 3209 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy, 3210 expected_entries); 3211 if (ret < 0) { 3212 free(xstats_names_copy); 3213 return ret; 3214 } 3215 } 3216 3217 /* Filter stats */ 3218 for (i = 0; i < size; i++) { 3219 if (ids[i] >= expected_entries) { 3220 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 3221 free(xstats_names_copy); 3222 return -1; 3223 } 3224 xstats_names[i] = xstats_names_copy[ids[i]]; 3225 3226 rte_eth_trace_xstats_get_names_by_id(port_id, &xstats_names[i], 3227 ids[i]); 3228 } 3229 3230 free(xstats_names_copy); 3231 return size; 3232 } 3233 3234 int 3235 rte_eth_xstats_get_names(uint16_t port_id, 3236 struct rte_eth_xstat_name *xstats_names, 3237 unsigned int size) 3238 { 3239 struct rte_eth_dev *dev; 3240 int cnt_used_entries; 3241 int cnt_expected_entries; 3242 int cnt_driver_entries; 3243 int i; 3244 3245 cnt_expected_entries = eth_dev_get_xstats_count(port_id); 3246 if (xstats_names == NULL || cnt_expected_entries < 0 || 3247 (int)size < cnt_expected_entries) 3248 return cnt_expected_entries; 3249 3250 /* port_id checked in eth_dev_get_xstats_count() */ 3251 dev = &rte_eth_devices[port_id]; 3252 3253 cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names); 3254 3255 if (dev->dev_ops->xstats_get_names != NULL) { 3256 /* If there are any driver-specific xstats, append them 3257 * to end of list. 3258 */ 3259 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)( 3260 dev, 3261 xstats_names + cnt_used_entries, 3262 size - cnt_used_entries); 3263 if (cnt_driver_entries < 0) 3264 return eth_err(port_id, cnt_driver_entries); 3265 cnt_used_entries += cnt_driver_entries; 3266 } 3267 3268 for (i = 0; i < cnt_used_entries; i++) 3269 rte_eth_trace_xstats_get_names(port_id, i, &xstats_names[i], 3270 size, cnt_used_entries); 3271 3272 return cnt_used_entries; 3273 } 3274 3275 3276 static int 3277 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats) 3278 { 3279 struct rte_eth_dev *dev; 3280 struct rte_eth_stats eth_stats; 3281 unsigned int count = 0, i, q; 3282 uint64_t val, *stats_ptr; 3283 uint16_t nb_rxqs, nb_txqs; 3284 int ret; 3285 3286 ret = rte_eth_stats_get(port_id, ð_stats); 3287 if (ret < 0) 3288 return ret; 3289 3290 dev = &rte_eth_devices[port_id]; 3291 3292 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3293 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3294 3295 /* global stats */ 3296 for (i = 0; i < RTE_NB_STATS; i++) { 3297 stats_ptr = RTE_PTR_ADD(ð_stats, 3298 eth_dev_stats_strings[i].offset); 3299 val = *stats_ptr; 3300 xstats[count++].value = val; 3301 } 3302 3303 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 3304 return count; 3305 3306 /* per-rxq stats */ 3307 for (q = 0; q < nb_rxqs; q++) { 3308 for (i = 0; i < RTE_NB_RXQ_STATS; i++) { 3309 stats_ptr = RTE_PTR_ADD(ð_stats, 3310 eth_dev_rxq_stats_strings[i].offset + 3311 q * sizeof(uint64_t)); 3312 val = *stats_ptr; 3313 xstats[count++].value = val; 3314 } 3315 } 3316 3317 /* per-txq stats */ 3318 for (q = 0; q < nb_txqs; q++) { 3319 for (i = 0; i < RTE_NB_TXQ_STATS; i++) { 3320 stats_ptr = RTE_PTR_ADD(ð_stats, 3321 eth_dev_txq_stats_strings[i].offset + 3322 q * sizeof(uint64_t)); 3323 val = *stats_ptr; 3324 xstats[count++].value = val; 3325 } 3326 } 3327 return count; 3328 } 3329 3330 /* retrieve ethdev extended statistics */ 3331 int 3332 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, 3333 uint64_t *values, unsigned int size) 3334 { 3335 unsigned int no_basic_stat_requested = 1; 3336 unsigned int no_ext_stat_requested = 1; 3337 unsigned int num_xstats_filled; 3338 unsigned int basic_count; 3339 uint16_t expected_entries; 3340 struct rte_eth_dev *dev; 3341 unsigned int i; 3342 int ret; 3343 3344 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3345 dev = &rte_eth_devices[port_id]; 3346 3347 ret = eth_dev_get_xstats_count(port_id); 3348 if (ret < 0) 3349 return ret; 3350 expected_entries = (uint16_t)ret; 3351 struct rte_eth_xstat xstats[expected_entries]; 3352 basic_count = eth_dev_get_xstats_basic_count(dev); 3353 3354 /* Return max number of stats if no ids given */ 3355 if (!ids) { 3356 if (!values) 3357 return expected_entries; 3358 else if (values && size < expected_entries) 3359 return expected_entries; 3360 } 3361 3362 if (ids && !values) 3363 return -EINVAL; 3364 3365 if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) { 3366 unsigned int basic_count = eth_dev_get_xstats_basic_count(dev); 3367 uint64_t ids_copy[size]; 3368 3369 for (i = 0; i < size; i++) { 3370 if (ids[i] < basic_count) { 3371 no_basic_stat_requested = 0; 3372 break; 3373 } 3374 3375 /* 3376 * Convert ids to xstats ids that PMD knows. 3377 * ids known by user are basic + extended stats. 3378 */ 3379 ids_copy[i] = ids[i] - basic_count; 3380 } 3381 3382 if (no_basic_stat_requested) 3383 return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy, 3384 values, size); 3385 } 3386 3387 if (ids) { 3388 for (i = 0; i < size; i++) { 3389 if (ids[i] >= basic_count) { 3390 no_ext_stat_requested = 0; 3391 break; 3392 } 3393 } 3394 } 3395 3396 /* Fill the xstats structure */ 3397 if (ids && no_ext_stat_requested) 3398 ret = eth_basic_stats_get(port_id, xstats); 3399 else 3400 ret = rte_eth_xstats_get(port_id, xstats, expected_entries); 3401 3402 if (ret < 0) 3403 return ret; 3404 num_xstats_filled = (unsigned int)ret; 3405 3406 /* Return all stats */ 3407 if (!ids) { 3408 for (i = 0; i < num_xstats_filled; i++) 3409 values[i] = xstats[i].value; 3410 return expected_entries; 3411 } 3412 3413 /* Filter stats */ 3414 for (i = 0; i < size; i++) { 3415 if (ids[i] >= expected_entries) { 3416 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 3417 return -1; 3418 } 3419 values[i] = xstats[ids[i]].value; 3420 } 3421 3422 rte_eth_trace_xstats_get_by_id(port_id, ids, values, size); 3423 3424 return size; 3425 } 3426 3427 int 3428 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, 3429 unsigned int n) 3430 { 3431 struct rte_eth_dev *dev; 3432 unsigned int count, i; 3433 signed int xcount = 0; 3434 int ret; 3435 3436 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3437 if (xstats == NULL && n > 0) 3438 return -EINVAL; 3439 dev = &rte_eth_devices[port_id]; 3440 3441 count = eth_dev_get_xstats_basic_count(dev); 3442 3443 /* implemented by the driver */ 3444 if (dev->dev_ops->xstats_get != NULL) { 3445 /* Retrieve the xstats from the driver at the end of the 3446 * xstats struct. 3447 */ 3448 xcount = (*dev->dev_ops->xstats_get)(dev, 3449 (n > count) ? xstats + count : NULL, 3450 (n > count) ? n - count : 0); 3451 3452 if (xcount < 0) 3453 return eth_err(port_id, xcount); 3454 } 3455 3456 if (n < count + xcount || xstats == NULL) 3457 return count + xcount; 3458 3459 /* now fill the xstats structure */ 3460 ret = eth_basic_stats_get(port_id, xstats); 3461 if (ret < 0) 3462 return ret; 3463 count = ret; 3464 3465 for (i = 0; i < count; i++) 3466 xstats[i].id = i; 3467 /* add an offset to driver-specific stats */ 3468 for ( ; i < count + xcount; i++) 3469 xstats[i].id += count; 3470 3471 for (i = 0; i < n; i++) 3472 rte_eth_trace_xstats_get(port_id, xstats[i]); 3473 3474 return count + xcount; 3475 } 3476 3477 /* reset ethdev extended statistics */ 3478 int 3479 rte_eth_xstats_reset(uint16_t port_id) 3480 { 3481 struct rte_eth_dev *dev; 3482 3483 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3484 dev = &rte_eth_devices[port_id]; 3485 3486 /* implemented by the driver */ 3487 if (dev->dev_ops->xstats_reset != NULL) { 3488 int ret = eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev)); 3489 3490 rte_eth_trace_xstats_reset(port_id, ret); 3491 3492 return ret; 3493 } 3494 3495 /* fallback to default */ 3496 return rte_eth_stats_reset(port_id); 3497 } 3498 3499 static int 3500 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, 3501 uint8_t stat_idx, uint8_t is_rx) 3502 { 3503 struct rte_eth_dev *dev; 3504 3505 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3506 dev = &rte_eth_devices[port_id]; 3507 3508 if (is_rx && (queue_id >= dev->data->nb_rx_queues)) 3509 return -EINVAL; 3510 3511 if (!is_rx && (queue_id >= dev->data->nb_tx_queues)) 3512 return -EINVAL; 3513 3514 if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) 3515 return -EINVAL; 3516 3517 if (*dev->dev_ops->queue_stats_mapping_set == NULL) 3518 return -ENOTSUP; 3519 return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx); 3520 } 3521 3522 int 3523 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, 3524 uint8_t stat_idx) 3525 { 3526 int ret; 3527 3528 ret = eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3529 tx_queue_id, 3530 stat_idx, STAT_QMAP_TX)); 3531 3532 rte_ethdev_trace_set_tx_queue_stats_mapping(port_id, tx_queue_id, 3533 stat_idx, ret); 3534 3535 return ret; 3536 } 3537 3538 int 3539 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, 3540 uint8_t stat_idx) 3541 { 3542 int ret; 3543 3544 ret = eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3545 rx_queue_id, 3546 stat_idx, STAT_QMAP_RX)); 3547 3548 rte_ethdev_trace_set_rx_queue_stats_mapping(port_id, rx_queue_id, 3549 stat_idx, ret); 3550 3551 return ret; 3552 } 3553 3554 int 3555 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size) 3556 { 3557 struct rte_eth_dev *dev; 3558 int ret; 3559 3560 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3561 dev = &rte_eth_devices[port_id]; 3562 3563 if (fw_version == NULL && fw_size > 0) { 3564 RTE_ETHDEV_LOG(ERR, 3565 "Cannot get ethdev port %u FW version to NULL when string size is non zero\n", 3566 port_id); 3567 return -EINVAL; 3568 } 3569 3570 if (*dev->dev_ops->fw_version_get == NULL) 3571 return -ENOTSUP; 3572 ret = eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev, 3573 fw_version, fw_size)); 3574 3575 rte_ethdev_trace_fw_version_get(port_id, fw_version, fw_size, ret); 3576 3577 return ret; 3578 } 3579 3580 int 3581 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) 3582 { 3583 struct rte_eth_dev *dev; 3584 const struct rte_eth_desc_lim lim = { 3585 .nb_max = UINT16_MAX, 3586 .nb_min = 0, 3587 .nb_align = 1, 3588 .nb_seg_max = UINT16_MAX, 3589 .nb_mtu_seg_max = UINT16_MAX, 3590 }; 3591 int diag; 3592 3593 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3594 dev = &rte_eth_devices[port_id]; 3595 3596 if (dev_info == NULL) { 3597 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u info to NULL\n", 3598 port_id); 3599 return -EINVAL; 3600 } 3601 3602 /* 3603 * Init dev_info before port_id check since caller does not have 3604 * return status and does not know if get is successful or not. 3605 */ 3606 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3607 dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 3608 3609 dev_info->rx_desc_lim = lim; 3610 dev_info->tx_desc_lim = lim; 3611 dev_info->device = dev->device; 3612 dev_info->min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN - 3613 RTE_ETHER_CRC_LEN; 3614 dev_info->max_mtu = UINT16_MAX; 3615 3616 if (*dev->dev_ops->dev_infos_get == NULL) 3617 return -ENOTSUP; 3618 diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info); 3619 if (diag != 0) { 3620 /* Cleanup already filled in device information */ 3621 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3622 return eth_err(port_id, diag); 3623 } 3624 3625 /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */ 3626 dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues, 3627 RTE_MAX_QUEUES_PER_PORT); 3628 dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues, 3629 RTE_MAX_QUEUES_PER_PORT); 3630 3631 dev_info->driver_name = dev->device->driver->name; 3632 dev_info->nb_rx_queues = dev->data->nb_rx_queues; 3633 dev_info->nb_tx_queues = dev->data->nb_tx_queues; 3634 3635 dev_info->dev_flags = &dev->data->dev_flags; 3636 3637 rte_ethdev_trace_info_get(port_id, dev_info); 3638 3639 return 0; 3640 } 3641 3642 int 3643 rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf) 3644 { 3645 struct rte_eth_dev *dev; 3646 3647 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3648 dev = &rte_eth_devices[port_id]; 3649 3650 if (dev_conf == NULL) { 3651 RTE_ETHDEV_LOG(ERR, 3652 "Cannot get ethdev port %u configuration to NULL\n", 3653 port_id); 3654 return -EINVAL; 3655 } 3656 3657 memcpy(dev_conf, &dev->data->dev_conf, sizeof(struct rte_eth_conf)); 3658 3659 rte_ethdev_trace_conf_get(port_id, dev_conf); 3660 3661 return 0; 3662 } 3663 3664 int 3665 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, 3666 uint32_t *ptypes, int num) 3667 { 3668 int i, j; 3669 struct rte_eth_dev *dev; 3670 const uint32_t *all_ptypes; 3671 3672 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3673 dev = &rte_eth_devices[port_id]; 3674 3675 if (ptypes == NULL && num > 0) { 3676 RTE_ETHDEV_LOG(ERR, 3677 "Cannot get ethdev port %u supported packet types to NULL when array size is non zero\n", 3678 port_id); 3679 return -EINVAL; 3680 } 3681 3682 if (*dev->dev_ops->dev_supported_ptypes_get == NULL) 3683 return 0; 3684 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3685 3686 if (!all_ptypes) 3687 return 0; 3688 3689 for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i) 3690 if (all_ptypes[i] & ptype_mask) { 3691 if (j < num) { 3692 ptypes[j] = all_ptypes[i]; 3693 3694 rte_ethdev_trace_get_supported_ptypes(port_id, 3695 j, num, ptypes[j]); 3696 } 3697 j++; 3698 } 3699 3700 return j; 3701 } 3702 3703 int 3704 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, 3705 uint32_t *set_ptypes, unsigned int num) 3706 { 3707 const uint32_t valid_ptype_masks[] = { 3708 RTE_PTYPE_L2_MASK, 3709 RTE_PTYPE_L3_MASK, 3710 RTE_PTYPE_L4_MASK, 3711 RTE_PTYPE_TUNNEL_MASK, 3712 RTE_PTYPE_INNER_L2_MASK, 3713 RTE_PTYPE_INNER_L3_MASK, 3714 RTE_PTYPE_INNER_L4_MASK, 3715 }; 3716 const uint32_t *all_ptypes; 3717 struct rte_eth_dev *dev; 3718 uint32_t unused_mask; 3719 unsigned int i, j; 3720 int ret; 3721 3722 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3723 dev = &rte_eth_devices[port_id]; 3724 3725 if (num > 0 && set_ptypes == NULL) { 3726 RTE_ETHDEV_LOG(ERR, 3727 "Cannot get ethdev port %u set packet types to NULL when array size is non zero\n", 3728 port_id); 3729 return -EINVAL; 3730 } 3731 3732 if (*dev->dev_ops->dev_supported_ptypes_get == NULL || 3733 *dev->dev_ops->dev_ptypes_set == NULL) { 3734 ret = 0; 3735 goto ptype_unknown; 3736 } 3737 3738 if (ptype_mask == 0) { 3739 ret = (*dev->dev_ops->dev_ptypes_set)(dev, 3740 ptype_mask); 3741 goto ptype_unknown; 3742 } 3743 3744 unused_mask = ptype_mask; 3745 for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) { 3746 uint32_t mask = ptype_mask & valid_ptype_masks[i]; 3747 if (mask && mask != valid_ptype_masks[i]) { 3748 ret = -EINVAL; 3749 goto ptype_unknown; 3750 } 3751 unused_mask &= ~valid_ptype_masks[i]; 3752 } 3753 3754 if (unused_mask) { 3755 ret = -EINVAL; 3756 goto ptype_unknown; 3757 } 3758 3759 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3760 if (all_ptypes == NULL) { 3761 ret = 0; 3762 goto ptype_unknown; 3763 } 3764 3765 /* 3766 * Accommodate as many set_ptypes as possible. If the supplied 3767 * set_ptypes array is insufficient fill it partially. 3768 */ 3769 for (i = 0, j = 0; set_ptypes != NULL && 3770 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) { 3771 if (ptype_mask & all_ptypes[i]) { 3772 if (j < num - 1) { 3773 set_ptypes[j] = all_ptypes[i]; 3774 3775 rte_ethdev_trace_set_ptypes(port_id, j, num, 3776 set_ptypes[j]); 3777 3778 j++; 3779 continue; 3780 } 3781 break; 3782 } 3783 } 3784 3785 if (set_ptypes != NULL && j < num) 3786 set_ptypes[j] = RTE_PTYPE_UNKNOWN; 3787 3788 return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask); 3789 3790 ptype_unknown: 3791 if (num > 0) 3792 set_ptypes[0] = RTE_PTYPE_UNKNOWN; 3793 3794 return ret; 3795 } 3796 3797 int 3798 rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma, 3799 unsigned int num) 3800 { 3801 int32_t ret; 3802 struct rte_eth_dev *dev; 3803 struct rte_eth_dev_info dev_info; 3804 3805 if (ma == NULL) { 3806 RTE_ETHDEV_LOG(ERR, "%s: invalid parameters\n", __func__); 3807 return -EINVAL; 3808 } 3809 3810 /* will check for us that port_id is a valid one */ 3811 ret = rte_eth_dev_info_get(port_id, &dev_info); 3812 if (ret != 0) 3813 return ret; 3814 3815 dev = &rte_eth_devices[port_id]; 3816 num = RTE_MIN(dev_info.max_mac_addrs, num); 3817 memcpy(ma, dev->data->mac_addrs, num * sizeof(ma[0])); 3818 3819 rte_eth_trace_macaddrs_get(port_id, num); 3820 3821 return num; 3822 } 3823 3824 int 3825 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr) 3826 { 3827 struct rte_eth_dev *dev; 3828 3829 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3830 dev = &rte_eth_devices[port_id]; 3831 3832 if (mac_addr == NULL) { 3833 RTE_ETHDEV_LOG(ERR, 3834 "Cannot get ethdev port %u MAC address to NULL\n", 3835 port_id); 3836 return -EINVAL; 3837 } 3838 3839 rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr); 3840 3841 rte_eth_trace_macaddr_get(port_id, mac_addr); 3842 3843 return 0; 3844 } 3845 3846 int 3847 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu) 3848 { 3849 struct rte_eth_dev *dev; 3850 3851 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3852 dev = &rte_eth_devices[port_id]; 3853 3854 if (mtu == NULL) { 3855 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u MTU to NULL\n", 3856 port_id); 3857 return -EINVAL; 3858 } 3859 3860 *mtu = dev->data->mtu; 3861 3862 rte_ethdev_trace_get_mtu(port_id, *mtu); 3863 3864 return 0; 3865 } 3866 3867 int 3868 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu) 3869 { 3870 int ret; 3871 struct rte_eth_dev_info dev_info; 3872 struct rte_eth_dev *dev; 3873 3874 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3875 dev = &rte_eth_devices[port_id]; 3876 if (*dev->dev_ops->mtu_set == NULL) 3877 return -ENOTSUP; 3878 3879 /* 3880 * Check if the device supports dev_infos_get, if it does not 3881 * skip min_mtu/max_mtu validation here as this requires values 3882 * that are populated within the call to rte_eth_dev_info_get() 3883 * which relies on dev->dev_ops->dev_infos_get. 3884 */ 3885 if (*dev->dev_ops->dev_infos_get != NULL) { 3886 ret = rte_eth_dev_info_get(port_id, &dev_info); 3887 if (ret != 0) 3888 return ret; 3889 3890 ret = eth_dev_validate_mtu(port_id, &dev_info, mtu); 3891 if (ret != 0) 3892 return ret; 3893 } 3894 3895 if (dev->data->dev_configured == 0) { 3896 RTE_ETHDEV_LOG(ERR, 3897 "Port %u must be configured before MTU set\n", 3898 port_id); 3899 return -EINVAL; 3900 } 3901 3902 ret = (*dev->dev_ops->mtu_set)(dev, mtu); 3903 if (ret == 0) 3904 dev->data->mtu = mtu; 3905 3906 ret = eth_err(port_id, ret); 3907 3908 rte_ethdev_trace_set_mtu(port_id, mtu, ret); 3909 3910 return ret; 3911 } 3912 3913 int 3914 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on) 3915 { 3916 struct rte_eth_dev *dev; 3917 int ret; 3918 3919 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3920 dev = &rte_eth_devices[port_id]; 3921 3922 if (!(dev->data->dev_conf.rxmode.offloads & 3923 RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) { 3924 RTE_ETHDEV_LOG(ERR, "Port %u: VLAN-filtering disabled\n", 3925 port_id); 3926 return -ENOSYS; 3927 } 3928 3929 if (vlan_id > 4095) { 3930 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n", 3931 port_id, vlan_id); 3932 return -EINVAL; 3933 } 3934 if (*dev->dev_ops->vlan_filter_set == NULL) 3935 return -ENOTSUP; 3936 3937 ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on); 3938 if (ret == 0) { 3939 struct rte_vlan_filter_conf *vfc; 3940 int vidx; 3941 int vbit; 3942 3943 vfc = &dev->data->vlan_filter_conf; 3944 vidx = vlan_id / 64; 3945 vbit = vlan_id % 64; 3946 3947 if (on) 3948 vfc->ids[vidx] |= RTE_BIT64(vbit); 3949 else 3950 vfc->ids[vidx] &= ~RTE_BIT64(vbit); 3951 } 3952 3953 ret = eth_err(port_id, ret); 3954 3955 rte_ethdev_trace_vlan_filter(port_id, vlan_id, on, ret); 3956 3957 return ret; 3958 } 3959 3960 int 3961 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, 3962 int on) 3963 { 3964 struct rte_eth_dev *dev; 3965 3966 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3967 dev = &rte_eth_devices[port_id]; 3968 3969 if (rx_queue_id >= dev->data->nb_rx_queues) { 3970 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id); 3971 return -EINVAL; 3972 } 3973 3974 if (*dev->dev_ops->vlan_strip_queue_set == NULL) 3975 return -ENOTSUP; 3976 (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on); 3977 3978 rte_ethdev_trace_set_vlan_strip_on_queue(port_id, rx_queue_id, on); 3979 3980 return 0; 3981 } 3982 3983 int 3984 rte_eth_dev_set_vlan_ether_type(uint16_t port_id, 3985 enum rte_vlan_type vlan_type, 3986 uint16_t tpid) 3987 { 3988 struct rte_eth_dev *dev; 3989 int ret; 3990 3991 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3992 dev = &rte_eth_devices[port_id]; 3993 3994 if (*dev->dev_ops->vlan_tpid_set == NULL) 3995 return -ENOTSUP; 3996 ret = eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, 3997 tpid)); 3998 3999 rte_ethdev_trace_set_vlan_ether_type(port_id, vlan_type, tpid, ret); 4000 4001 return ret; 4002 } 4003 4004 int 4005 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask) 4006 { 4007 struct rte_eth_dev_info dev_info; 4008 struct rte_eth_dev *dev; 4009 int ret = 0; 4010 int mask = 0; 4011 int cur, org = 0; 4012 uint64_t orig_offloads; 4013 uint64_t dev_offloads; 4014 uint64_t new_offloads; 4015 4016 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4017 dev = &rte_eth_devices[port_id]; 4018 4019 /* save original values in case of failure */ 4020 orig_offloads = dev->data->dev_conf.rxmode.offloads; 4021 dev_offloads = orig_offloads; 4022 4023 /* check which option changed by application */ 4024 cur = !!(offload_mask & RTE_ETH_VLAN_STRIP_OFFLOAD); 4025 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP); 4026 if (cur != org) { 4027 if (cur) 4028 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 4029 else 4030 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 4031 mask |= RTE_ETH_VLAN_STRIP_MASK; 4032 } 4033 4034 cur = !!(offload_mask & RTE_ETH_VLAN_FILTER_OFFLOAD); 4035 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER); 4036 if (cur != org) { 4037 if (cur) 4038 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 4039 else 4040 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 4041 mask |= RTE_ETH_VLAN_FILTER_MASK; 4042 } 4043 4044 cur = !!(offload_mask & RTE_ETH_VLAN_EXTEND_OFFLOAD); 4045 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND); 4046 if (cur != org) { 4047 if (cur) 4048 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 4049 else 4050 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 4051 mask |= RTE_ETH_VLAN_EXTEND_MASK; 4052 } 4053 4054 cur = !!(offload_mask & RTE_ETH_QINQ_STRIP_OFFLOAD); 4055 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP); 4056 if (cur != org) { 4057 if (cur) 4058 dev_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 4059 else 4060 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 4061 mask |= RTE_ETH_QINQ_STRIP_MASK; 4062 } 4063 4064 /*no change*/ 4065 if (mask == 0) 4066 return ret; 4067 4068 ret = rte_eth_dev_info_get(port_id, &dev_info); 4069 if (ret != 0) 4070 return ret; 4071 4072 /* Rx VLAN offloading must be within its device capabilities */ 4073 if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) { 4074 new_offloads = dev_offloads & ~orig_offloads; 4075 RTE_ETHDEV_LOG(ERR, 4076 "Ethdev port_id=%u requested new added VLAN offloads " 4077 "0x%" PRIx64 " must be within Rx offloads capabilities " 4078 "0x%" PRIx64 " in %s()\n", 4079 port_id, new_offloads, dev_info.rx_offload_capa, 4080 __func__); 4081 return -EINVAL; 4082 } 4083 4084 if (*dev->dev_ops->vlan_offload_set == NULL) 4085 return -ENOTSUP; 4086 dev->data->dev_conf.rxmode.offloads = dev_offloads; 4087 ret = (*dev->dev_ops->vlan_offload_set)(dev, mask); 4088 if (ret) { 4089 /* hit an error restore original values */ 4090 dev->data->dev_conf.rxmode.offloads = orig_offloads; 4091 } 4092 4093 ret = eth_err(port_id, ret); 4094 4095 rte_ethdev_trace_set_vlan_offload(port_id, offload_mask, ret); 4096 4097 return ret; 4098 } 4099 4100 int 4101 rte_eth_dev_get_vlan_offload(uint16_t port_id) 4102 { 4103 struct rte_eth_dev *dev; 4104 uint64_t *dev_offloads; 4105 int ret = 0; 4106 4107 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4108 dev = &rte_eth_devices[port_id]; 4109 dev_offloads = &dev->data->dev_conf.rxmode.offloads; 4110 4111 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 4112 ret |= RTE_ETH_VLAN_STRIP_OFFLOAD; 4113 4114 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) 4115 ret |= RTE_ETH_VLAN_FILTER_OFFLOAD; 4116 4117 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) 4118 ret |= RTE_ETH_VLAN_EXTEND_OFFLOAD; 4119 4120 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) 4121 ret |= RTE_ETH_QINQ_STRIP_OFFLOAD; 4122 4123 rte_ethdev_trace_get_vlan_offload(port_id, ret); 4124 4125 return ret; 4126 } 4127 4128 int 4129 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on) 4130 { 4131 struct rte_eth_dev *dev; 4132 int ret; 4133 4134 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4135 dev = &rte_eth_devices[port_id]; 4136 4137 if (*dev->dev_ops->vlan_pvid_set == NULL) 4138 return -ENOTSUP; 4139 ret = eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on)); 4140 4141 rte_ethdev_trace_set_vlan_pvid(port_id, pvid, on, ret); 4142 4143 return ret; 4144 } 4145 4146 int 4147 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 4148 { 4149 struct rte_eth_dev *dev; 4150 int ret; 4151 4152 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4153 dev = &rte_eth_devices[port_id]; 4154 4155 if (fc_conf == NULL) { 4156 RTE_ETHDEV_LOG(ERR, 4157 "Cannot get ethdev port %u flow control config to NULL\n", 4158 port_id); 4159 return -EINVAL; 4160 } 4161 4162 if (*dev->dev_ops->flow_ctrl_get == NULL) 4163 return -ENOTSUP; 4164 memset(fc_conf, 0, sizeof(*fc_conf)); 4165 ret = eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf)); 4166 4167 rte_ethdev_trace_flow_ctrl_get(port_id, fc_conf, ret); 4168 4169 return ret; 4170 } 4171 4172 int 4173 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 4174 { 4175 struct rte_eth_dev *dev; 4176 int ret; 4177 4178 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4179 dev = &rte_eth_devices[port_id]; 4180 4181 if (fc_conf == NULL) { 4182 RTE_ETHDEV_LOG(ERR, 4183 "Cannot set ethdev port %u flow control from NULL config\n", 4184 port_id); 4185 return -EINVAL; 4186 } 4187 4188 if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) { 4189 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n"); 4190 return -EINVAL; 4191 } 4192 4193 if (*dev->dev_ops->flow_ctrl_set == NULL) 4194 return -ENOTSUP; 4195 ret = eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf)); 4196 4197 rte_ethdev_trace_flow_ctrl_set(port_id, fc_conf, ret); 4198 4199 return ret; 4200 } 4201 4202 int 4203 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, 4204 struct rte_eth_pfc_conf *pfc_conf) 4205 { 4206 struct rte_eth_dev *dev; 4207 int ret; 4208 4209 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4210 dev = &rte_eth_devices[port_id]; 4211 4212 if (pfc_conf == NULL) { 4213 RTE_ETHDEV_LOG(ERR, 4214 "Cannot set ethdev port %u priority flow control from NULL config\n", 4215 port_id); 4216 return -EINVAL; 4217 } 4218 4219 if (pfc_conf->priority > (RTE_ETH_DCB_NUM_USER_PRIORITIES - 1)) { 4220 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n"); 4221 return -EINVAL; 4222 } 4223 4224 /* High water, low water validation are device specific */ 4225 if (*dev->dev_ops->priority_flow_ctrl_set == NULL) 4226 return -ENOTSUP; 4227 ret = eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set) 4228 (dev, pfc_conf)); 4229 4230 rte_ethdev_trace_priority_flow_ctrl_set(port_id, pfc_conf, ret); 4231 4232 return ret; 4233 } 4234 4235 static int 4236 validate_rx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max, 4237 struct rte_eth_pfc_queue_conf *pfc_queue_conf) 4238 { 4239 if ((pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) || 4240 (pfc_queue_conf->mode == RTE_ETH_FC_FULL)) { 4241 if (pfc_queue_conf->rx_pause.tx_qid >= dev_info->nb_tx_queues) { 4242 RTE_ETHDEV_LOG(ERR, 4243 "PFC Tx queue not in range for Rx pause requested:%d configured:%d\n", 4244 pfc_queue_conf->rx_pause.tx_qid, 4245 dev_info->nb_tx_queues); 4246 return -EINVAL; 4247 } 4248 4249 if (pfc_queue_conf->rx_pause.tc >= tc_max) { 4250 RTE_ETHDEV_LOG(ERR, 4251 "PFC TC not in range for Rx pause requested:%d max:%d\n", 4252 pfc_queue_conf->rx_pause.tc, tc_max); 4253 return -EINVAL; 4254 } 4255 } 4256 4257 return 0; 4258 } 4259 4260 static int 4261 validate_tx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max, 4262 struct rte_eth_pfc_queue_conf *pfc_queue_conf) 4263 { 4264 if ((pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) || 4265 (pfc_queue_conf->mode == RTE_ETH_FC_FULL)) { 4266 if (pfc_queue_conf->tx_pause.rx_qid >= dev_info->nb_rx_queues) { 4267 RTE_ETHDEV_LOG(ERR, 4268 "PFC Rx queue not in range for Tx pause requested:%d configured:%d\n", 4269 pfc_queue_conf->tx_pause.rx_qid, 4270 dev_info->nb_rx_queues); 4271 return -EINVAL; 4272 } 4273 4274 if (pfc_queue_conf->tx_pause.tc >= tc_max) { 4275 RTE_ETHDEV_LOG(ERR, 4276 "PFC TC not in range for Tx pause requested:%d max:%d\n", 4277 pfc_queue_conf->tx_pause.tc, tc_max); 4278 return -EINVAL; 4279 } 4280 } 4281 4282 return 0; 4283 } 4284 4285 int 4286 rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id, 4287 struct rte_eth_pfc_queue_info *pfc_queue_info) 4288 { 4289 struct rte_eth_dev *dev; 4290 int ret; 4291 4292 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4293 dev = &rte_eth_devices[port_id]; 4294 4295 if (pfc_queue_info == NULL) { 4296 RTE_ETHDEV_LOG(ERR, "PFC info param is NULL for port (%u)\n", 4297 port_id); 4298 return -EINVAL; 4299 } 4300 4301 if (*dev->dev_ops->priority_flow_ctrl_queue_info_get == NULL) 4302 return -ENOTSUP; 4303 ret = eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_queue_info_get) 4304 (dev, pfc_queue_info)); 4305 4306 rte_ethdev_trace_priority_flow_ctrl_queue_info_get(port_id, 4307 pfc_queue_info, ret); 4308 4309 return ret; 4310 } 4311 4312 int 4313 rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id, 4314 struct rte_eth_pfc_queue_conf *pfc_queue_conf) 4315 { 4316 struct rte_eth_pfc_queue_info pfc_info; 4317 struct rte_eth_dev_info dev_info; 4318 struct rte_eth_dev *dev; 4319 int ret; 4320 4321 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4322 dev = &rte_eth_devices[port_id]; 4323 4324 if (pfc_queue_conf == NULL) { 4325 RTE_ETHDEV_LOG(ERR, "PFC parameters are NULL for port (%u)\n", 4326 port_id); 4327 return -EINVAL; 4328 } 4329 4330 ret = rte_eth_dev_info_get(port_id, &dev_info); 4331 if (ret != 0) 4332 return ret; 4333 4334 ret = rte_eth_dev_priority_flow_ctrl_queue_info_get(port_id, &pfc_info); 4335 if (ret != 0) 4336 return ret; 4337 4338 if (pfc_info.tc_max == 0) { 4339 RTE_ETHDEV_LOG(ERR, "Ethdev port %u does not support PFC TC values\n", 4340 port_id); 4341 return -ENOTSUP; 4342 } 4343 4344 /* Check requested mode supported or not */ 4345 if (pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE && 4346 pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) { 4347 RTE_ETHDEV_LOG(ERR, "PFC Tx pause unsupported for port (%d)\n", 4348 port_id); 4349 return -EINVAL; 4350 } 4351 4352 if (pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE && 4353 pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) { 4354 RTE_ETHDEV_LOG(ERR, "PFC Rx pause unsupported for port (%d)\n", 4355 port_id); 4356 return -EINVAL; 4357 } 4358 4359 /* Validate Rx pause parameters */ 4360 if (pfc_info.mode_capa == RTE_ETH_FC_FULL || 4361 pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE) { 4362 ret = validate_rx_pause_config(&dev_info, pfc_info.tc_max, 4363 pfc_queue_conf); 4364 if (ret != 0) 4365 return ret; 4366 } 4367 4368 /* Validate Tx pause parameters */ 4369 if (pfc_info.mode_capa == RTE_ETH_FC_FULL || 4370 pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE) { 4371 ret = validate_tx_pause_config(&dev_info, pfc_info.tc_max, 4372 pfc_queue_conf); 4373 if (ret != 0) 4374 return ret; 4375 } 4376 4377 if (*dev->dev_ops->priority_flow_ctrl_queue_config == NULL) 4378 return -ENOTSUP; 4379 ret = eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_queue_config) 4380 (dev, pfc_queue_conf)); 4381 4382 rte_ethdev_trace_priority_flow_ctrl_queue_configure(port_id, 4383 pfc_queue_conf, ret); 4384 4385 return ret; 4386 } 4387 4388 static int 4389 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf, 4390 uint16_t reta_size) 4391 { 4392 uint16_t i, num; 4393 4394 num = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) / RTE_ETH_RETA_GROUP_SIZE; 4395 for (i = 0; i < num; i++) { 4396 if (reta_conf[i].mask) 4397 return 0; 4398 } 4399 4400 return -EINVAL; 4401 } 4402 4403 static int 4404 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf, 4405 uint16_t reta_size, 4406 uint16_t max_rxq) 4407 { 4408 uint16_t i, idx, shift; 4409 4410 if (max_rxq == 0) { 4411 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n"); 4412 return -EINVAL; 4413 } 4414 4415 for (i = 0; i < reta_size; i++) { 4416 idx = i / RTE_ETH_RETA_GROUP_SIZE; 4417 shift = i % RTE_ETH_RETA_GROUP_SIZE; 4418 if ((reta_conf[idx].mask & RTE_BIT64(shift)) && 4419 (reta_conf[idx].reta[shift] >= max_rxq)) { 4420 RTE_ETHDEV_LOG(ERR, 4421 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n", 4422 idx, shift, 4423 reta_conf[idx].reta[shift], max_rxq); 4424 return -EINVAL; 4425 } 4426 } 4427 4428 return 0; 4429 } 4430 4431 int 4432 rte_eth_dev_rss_reta_update(uint16_t port_id, 4433 struct rte_eth_rss_reta_entry64 *reta_conf, 4434 uint16_t reta_size) 4435 { 4436 enum rte_eth_rx_mq_mode mq_mode; 4437 struct rte_eth_dev *dev; 4438 int ret; 4439 4440 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4441 dev = &rte_eth_devices[port_id]; 4442 4443 if (reta_conf == NULL) { 4444 RTE_ETHDEV_LOG(ERR, 4445 "Cannot update ethdev port %u RSS RETA to NULL\n", 4446 port_id); 4447 return -EINVAL; 4448 } 4449 4450 if (reta_size == 0) { 4451 RTE_ETHDEV_LOG(ERR, 4452 "Cannot update ethdev port %u RSS RETA with zero size\n", 4453 port_id); 4454 return -EINVAL; 4455 } 4456 4457 /* Check mask bits */ 4458 ret = eth_check_reta_mask(reta_conf, reta_size); 4459 if (ret < 0) 4460 return ret; 4461 4462 /* Check entry value */ 4463 ret = eth_check_reta_entry(reta_conf, reta_size, 4464 dev->data->nb_rx_queues); 4465 if (ret < 0) 4466 return ret; 4467 4468 mq_mode = dev->data->dev_conf.rxmode.mq_mode; 4469 if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) { 4470 RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n"); 4471 return -ENOTSUP; 4472 } 4473 4474 if (*dev->dev_ops->reta_update == NULL) 4475 return -ENOTSUP; 4476 ret = eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf, 4477 reta_size)); 4478 4479 rte_ethdev_trace_rss_reta_update(port_id, reta_conf, reta_size, ret); 4480 4481 return ret; 4482 } 4483 4484 int 4485 rte_eth_dev_rss_reta_query(uint16_t port_id, 4486 struct rte_eth_rss_reta_entry64 *reta_conf, 4487 uint16_t reta_size) 4488 { 4489 struct rte_eth_dev *dev; 4490 int ret; 4491 4492 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4493 dev = &rte_eth_devices[port_id]; 4494 4495 if (reta_conf == NULL) { 4496 RTE_ETHDEV_LOG(ERR, 4497 "Cannot query ethdev port %u RSS RETA from NULL config\n", 4498 port_id); 4499 return -EINVAL; 4500 } 4501 4502 /* Check mask bits */ 4503 ret = eth_check_reta_mask(reta_conf, reta_size); 4504 if (ret < 0) 4505 return ret; 4506 4507 if (*dev->dev_ops->reta_query == NULL) 4508 return -ENOTSUP; 4509 ret = eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf, 4510 reta_size)); 4511 4512 rte_ethdev_trace_rss_reta_query(port_id, reta_conf, reta_size, ret); 4513 4514 return ret; 4515 } 4516 4517 int 4518 rte_eth_dev_rss_hash_update(uint16_t port_id, 4519 struct rte_eth_rss_conf *rss_conf) 4520 { 4521 struct rte_eth_dev *dev; 4522 struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, }; 4523 enum rte_eth_rx_mq_mode mq_mode; 4524 int ret; 4525 4526 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4527 dev = &rte_eth_devices[port_id]; 4528 4529 if (rss_conf == NULL) { 4530 RTE_ETHDEV_LOG(ERR, 4531 "Cannot update ethdev port %u RSS hash from NULL config\n", 4532 port_id); 4533 return -EINVAL; 4534 } 4535 4536 ret = rte_eth_dev_info_get(port_id, &dev_info); 4537 if (ret != 0) 4538 return ret; 4539 4540 rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf); 4541 if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) != 4542 dev_info.flow_type_rss_offloads) { 4543 RTE_ETHDEV_LOG(ERR, 4544 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 4545 port_id, rss_conf->rss_hf, 4546 dev_info.flow_type_rss_offloads); 4547 return -EINVAL; 4548 } 4549 4550 mq_mode = dev->data->dev_conf.rxmode.mq_mode; 4551 if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) { 4552 RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n"); 4553 return -ENOTSUP; 4554 } 4555 4556 if (*dev->dev_ops->rss_hash_update == NULL) 4557 return -ENOTSUP; 4558 ret = eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev, 4559 rss_conf)); 4560 4561 rte_ethdev_trace_rss_hash_update(port_id, rss_conf, ret); 4562 4563 return ret; 4564 } 4565 4566 int 4567 rte_eth_dev_rss_hash_conf_get(uint16_t port_id, 4568 struct rte_eth_rss_conf *rss_conf) 4569 { 4570 struct rte_eth_dev *dev; 4571 int ret; 4572 4573 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4574 dev = &rte_eth_devices[port_id]; 4575 4576 if (rss_conf == NULL) { 4577 RTE_ETHDEV_LOG(ERR, 4578 "Cannot get ethdev port %u RSS hash config to NULL\n", 4579 port_id); 4580 return -EINVAL; 4581 } 4582 4583 if (*dev->dev_ops->rss_hash_conf_get == NULL) 4584 return -ENOTSUP; 4585 ret = eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev, 4586 rss_conf)); 4587 4588 rte_ethdev_trace_rss_hash_conf_get(port_id, rss_conf, ret); 4589 4590 return ret; 4591 } 4592 4593 int 4594 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, 4595 struct rte_eth_udp_tunnel *udp_tunnel) 4596 { 4597 struct rte_eth_dev *dev; 4598 int ret; 4599 4600 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4601 dev = &rte_eth_devices[port_id]; 4602 4603 if (udp_tunnel == NULL) { 4604 RTE_ETHDEV_LOG(ERR, 4605 "Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4606 port_id); 4607 return -EINVAL; 4608 } 4609 4610 if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) { 4611 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4612 return -EINVAL; 4613 } 4614 4615 if (*dev->dev_ops->udp_tunnel_port_add == NULL) 4616 return -ENOTSUP; 4617 ret = eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev, 4618 udp_tunnel)); 4619 4620 rte_ethdev_trace_udp_tunnel_port_add(port_id, udp_tunnel, ret); 4621 4622 return ret; 4623 } 4624 4625 int 4626 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, 4627 struct rte_eth_udp_tunnel *udp_tunnel) 4628 { 4629 struct rte_eth_dev *dev; 4630 int ret; 4631 4632 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4633 dev = &rte_eth_devices[port_id]; 4634 4635 if (udp_tunnel == NULL) { 4636 RTE_ETHDEV_LOG(ERR, 4637 "Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4638 port_id); 4639 return -EINVAL; 4640 } 4641 4642 if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) { 4643 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4644 return -EINVAL; 4645 } 4646 4647 if (*dev->dev_ops->udp_tunnel_port_del == NULL) 4648 return -ENOTSUP; 4649 ret = eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev, 4650 udp_tunnel)); 4651 4652 rte_ethdev_trace_udp_tunnel_port_delete(port_id, udp_tunnel, ret); 4653 4654 return ret; 4655 } 4656 4657 int 4658 rte_eth_led_on(uint16_t port_id) 4659 { 4660 struct rte_eth_dev *dev; 4661 int ret; 4662 4663 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4664 dev = &rte_eth_devices[port_id]; 4665 4666 if (*dev->dev_ops->dev_led_on == NULL) 4667 return -ENOTSUP; 4668 ret = eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev)); 4669 4670 rte_eth_trace_led_on(port_id, ret); 4671 4672 return ret; 4673 } 4674 4675 int 4676 rte_eth_led_off(uint16_t port_id) 4677 { 4678 struct rte_eth_dev *dev; 4679 int ret; 4680 4681 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4682 dev = &rte_eth_devices[port_id]; 4683 4684 if (*dev->dev_ops->dev_led_off == NULL) 4685 return -ENOTSUP; 4686 ret = eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev)); 4687 4688 rte_eth_trace_led_off(port_id, ret); 4689 4690 return ret; 4691 } 4692 4693 int 4694 rte_eth_fec_get_capability(uint16_t port_id, 4695 struct rte_eth_fec_capa *speed_fec_capa, 4696 unsigned int num) 4697 { 4698 struct rte_eth_dev *dev; 4699 int ret; 4700 4701 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4702 dev = &rte_eth_devices[port_id]; 4703 4704 if (speed_fec_capa == NULL && num > 0) { 4705 RTE_ETHDEV_LOG(ERR, 4706 "Cannot get ethdev port %u FEC capability to NULL when array size is non zero\n", 4707 port_id); 4708 return -EINVAL; 4709 } 4710 4711 if (*dev->dev_ops->fec_get_capability == NULL) 4712 return -ENOTSUP; 4713 ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num); 4714 4715 rte_eth_trace_fec_get_capability(port_id, speed_fec_capa, num, ret); 4716 4717 return ret; 4718 } 4719 4720 int 4721 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa) 4722 { 4723 struct rte_eth_dev *dev; 4724 int ret; 4725 4726 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4727 dev = &rte_eth_devices[port_id]; 4728 4729 if (fec_capa == NULL) { 4730 RTE_ETHDEV_LOG(ERR, 4731 "Cannot get ethdev port %u current FEC mode to NULL\n", 4732 port_id); 4733 return -EINVAL; 4734 } 4735 4736 if (*dev->dev_ops->fec_get == NULL) 4737 return -ENOTSUP; 4738 ret = eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa)); 4739 4740 rte_eth_trace_fec_get(port_id, fec_capa, ret); 4741 4742 return ret; 4743 } 4744 4745 int 4746 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa) 4747 { 4748 struct rte_eth_dev *dev; 4749 int ret; 4750 4751 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4752 dev = &rte_eth_devices[port_id]; 4753 4754 if (*dev->dev_ops->fec_set == NULL) 4755 return -ENOTSUP; 4756 ret = eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa)); 4757 4758 rte_eth_trace_fec_set(port_id, fec_capa, ret); 4759 4760 return ret; 4761 } 4762 4763 /* 4764 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 4765 * an empty spot. 4766 */ 4767 static int 4768 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr) 4769 { 4770 struct rte_eth_dev_info dev_info; 4771 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4772 unsigned i; 4773 int ret; 4774 4775 ret = rte_eth_dev_info_get(port_id, &dev_info); 4776 if (ret != 0) 4777 return -1; 4778 4779 for (i = 0; i < dev_info.max_mac_addrs; i++) 4780 if (memcmp(addr, &dev->data->mac_addrs[i], 4781 RTE_ETHER_ADDR_LEN) == 0) 4782 return i; 4783 4784 return -1; 4785 } 4786 4787 static const struct rte_ether_addr null_mac_addr; 4788 4789 int 4790 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr, 4791 uint32_t pool) 4792 { 4793 struct rte_eth_dev *dev; 4794 int index; 4795 uint64_t pool_mask; 4796 int ret; 4797 4798 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4799 dev = &rte_eth_devices[port_id]; 4800 4801 if (addr == NULL) { 4802 RTE_ETHDEV_LOG(ERR, 4803 "Cannot add ethdev port %u MAC address from NULL address\n", 4804 port_id); 4805 return -EINVAL; 4806 } 4807 4808 if (*dev->dev_ops->mac_addr_add == NULL) 4809 return -ENOTSUP; 4810 4811 if (rte_is_zero_ether_addr(addr)) { 4812 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 4813 port_id); 4814 return -EINVAL; 4815 } 4816 if (pool >= RTE_ETH_64_POOLS) { 4817 RTE_ETHDEV_LOG(ERR, "Pool ID must be 0-%d\n", RTE_ETH_64_POOLS - 1); 4818 return -EINVAL; 4819 } 4820 4821 index = eth_dev_get_mac_addr_index(port_id, addr); 4822 if (index < 0) { 4823 index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr); 4824 if (index < 0) { 4825 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 4826 port_id); 4827 return -ENOSPC; 4828 } 4829 } else { 4830 pool_mask = dev->data->mac_pool_sel[index]; 4831 4832 /* Check if both MAC address and pool is already there, and do nothing */ 4833 if (pool_mask & RTE_BIT64(pool)) 4834 return 0; 4835 } 4836 4837 /* Update NIC */ 4838 ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool); 4839 4840 if (ret == 0) { 4841 /* Update address in NIC data structure */ 4842 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]); 4843 4844 /* Update pool bitmap in NIC data structure */ 4845 dev->data->mac_pool_sel[index] |= RTE_BIT64(pool); 4846 } 4847 4848 ret = eth_err(port_id, ret); 4849 4850 rte_ethdev_trace_mac_addr_add(port_id, addr, pool, ret); 4851 4852 return ret; 4853 } 4854 4855 int 4856 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr) 4857 { 4858 struct rte_eth_dev *dev; 4859 int index; 4860 4861 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4862 dev = &rte_eth_devices[port_id]; 4863 4864 if (addr == NULL) { 4865 RTE_ETHDEV_LOG(ERR, 4866 "Cannot remove ethdev port %u MAC address from NULL address\n", 4867 port_id); 4868 return -EINVAL; 4869 } 4870 4871 if (*dev->dev_ops->mac_addr_remove == NULL) 4872 return -ENOTSUP; 4873 4874 index = eth_dev_get_mac_addr_index(port_id, addr); 4875 if (index == 0) { 4876 RTE_ETHDEV_LOG(ERR, 4877 "Port %u: Cannot remove default MAC address\n", 4878 port_id); 4879 return -EADDRINUSE; 4880 } else if (index < 0) 4881 return 0; /* Do nothing if address wasn't found */ 4882 4883 /* Update NIC */ 4884 (*dev->dev_ops->mac_addr_remove)(dev, index); 4885 4886 /* Update address in NIC data structure */ 4887 rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]); 4888 4889 /* reset pool bitmap */ 4890 dev->data->mac_pool_sel[index] = 0; 4891 4892 rte_ethdev_trace_mac_addr_remove(port_id, addr); 4893 4894 return 0; 4895 } 4896 4897 int 4898 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr) 4899 { 4900 struct rte_eth_dev *dev; 4901 int ret; 4902 4903 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4904 dev = &rte_eth_devices[port_id]; 4905 4906 if (addr == NULL) { 4907 RTE_ETHDEV_LOG(ERR, 4908 "Cannot set ethdev port %u default MAC address from NULL address\n", 4909 port_id); 4910 return -EINVAL; 4911 } 4912 4913 if (!rte_is_valid_assigned_ether_addr(addr)) 4914 return -EINVAL; 4915 4916 if (*dev->dev_ops->mac_addr_set == NULL) 4917 return -ENOTSUP; 4918 4919 ret = (*dev->dev_ops->mac_addr_set)(dev, addr); 4920 if (ret < 0) 4921 return ret; 4922 4923 /* Update default address in NIC data structure */ 4924 rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]); 4925 4926 rte_ethdev_trace_default_mac_addr_set(port_id, addr); 4927 4928 return 0; 4929 } 4930 4931 4932 /* 4933 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 4934 * an empty spot. 4935 */ 4936 static int 4937 eth_dev_get_hash_mac_addr_index(uint16_t port_id, 4938 const struct rte_ether_addr *addr) 4939 { 4940 struct rte_eth_dev_info dev_info; 4941 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4942 unsigned i; 4943 int ret; 4944 4945 ret = rte_eth_dev_info_get(port_id, &dev_info); 4946 if (ret != 0) 4947 return -1; 4948 4949 if (!dev->data->hash_mac_addrs) 4950 return -1; 4951 4952 for (i = 0; i < dev_info.max_hash_mac_addrs; i++) 4953 if (memcmp(addr, &dev->data->hash_mac_addrs[i], 4954 RTE_ETHER_ADDR_LEN) == 0) 4955 return i; 4956 4957 return -1; 4958 } 4959 4960 int 4961 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, 4962 uint8_t on) 4963 { 4964 int index; 4965 int ret; 4966 struct rte_eth_dev *dev; 4967 4968 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4969 dev = &rte_eth_devices[port_id]; 4970 4971 if (addr == NULL) { 4972 RTE_ETHDEV_LOG(ERR, 4973 "Cannot set ethdev port %u unicast hash table from NULL address\n", 4974 port_id); 4975 return -EINVAL; 4976 } 4977 4978 if (rte_is_zero_ether_addr(addr)) { 4979 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 4980 port_id); 4981 return -EINVAL; 4982 } 4983 4984 index = eth_dev_get_hash_mac_addr_index(port_id, addr); 4985 /* Check if it's already there, and do nothing */ 4986 if ((index >= 0) && on) 4987 return 0; 4988 4989 if (index < 0) { 4990 if (!on) { 4991 RTE_ETHDEV_LOG(ERR, 4992 "Port %u: the MAC address was not set in UTA\n", 4993 port_id); 4994 return -EINVAL; 4995 } 4996 4997 index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr); 4998 if (index < 0) { 4999 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 5000 port_id); 5001 return -ENOSPC; 5002 } 5003 } 5004 5005 if (*dev->dev_ops->uc_hash_table_set == NULL) 5006 return -ENOTSUP; 5007 ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on); 5008 if (ret == 0) { 5009 /* Update address in NIC data structure */ 5010 if (on) 5011 rte_ether_addr_copy(addr, 5012 &dev->data->hash_mac_addrs[index]); 5013 else 5014 rte_ether_addr_copy(&null_mac_addr, 5015 &dev->data->hash_mac_addrs[index]); 5016 } 5017 5018 ret = eth_err(port_id, ret); 5019 5020 rte_ethdev_trace_uc_hash_table_set(port_id, on, ret); 5021 5022 return ret; 5023 } 5024 5025 int 5026 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on) 5027 { 5028 struct rte_eth_dev *dev; 5029 int ret; 5030 5031 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5032 dev = &rte_eth_devices[port_id]; 5033 5034 if (*dev->dev_ops->uc_all_hash_table_set == NULL) 5035 return -ENOTSUP; 5036 ret = eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev, on)); 5037 5038 rte_ethdev_trace_uc_all_hash_table_set(port_id, on, ret); 5039 5040 return ret; 5041 } 5042 5043 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, 5044 uint32_t tx_rate) 5045 { 5046 struct rte_eth_dev *dev; 5047 struct rte_eth_dev_info dev_info; 5048 struct rte_eth_link link; 5049 int ret; 5050 5051 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5052 dev = &rte_eth_devices[port_id]; 5053 5054 ret = rte_eth_dev_info_get(port_id, &dev_info); 5055 if (ret != 0) 5056 return ret; 5057 5058 link = dev->data->dev_link; 5059 5060 if (queue_idx > dev_info.max_tx_queues) { 5061 RTE_ETHDEV_LOG(ERR, 5062 "Set queue rate limit:port %u: invalid queue ID=%u\n", 5063 port_id, queue_idx); 5064 return -EINVAL; 5065 } 5066 5067 if (tx_rate > link.link_speed) { 5068 RTE_ETHDEV_LOG(ERR, 5069 "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n", 5070 tx_rate, link.link_speed); 5071 return -EINVAL; 5072 } 5073 5074 if (*dev->dev_ops->set_queue_rate_limit == NULL) 5075 return -ENOTSUP; 5076 ret = eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev, 5077 queue_idx, tx_rate)); 5078 5079 rte_eth_trace_set_queue_rate_limit(port_id, queue_idx, tx_rate, ret); 5080 5081 return ret; 5082 } 5083 5084 int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id, 5085 uint8_t avail_thresh) 5086 { 5087 struct rte_eth_dev *dev; 5088 int ret; 5089 5090 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5091 dev = &rte_eth_devices[port_id]; 5092 5093 if (queue_id > dev->data->nb_rx_queues) { 5094 RTE_ETHDEV_LOG(ERR, 5095 "Set queue avail thresh: port %u: invalid queue ID=%u.\n", 5096 port_id, queue_id); 5097 return -EINVAL; 5098 } 5099 5100 if (avail_thresh > 99) { 5101 RTE_ETHDEV_LOG(ERR, 5102 "Set queue avail thresh: port %u: threshold should be <= 99.\n", 5103 port_id); 5104 return -EINVAL; 5105 } 5106 if (*dev->dev_ops->rx_queue_avail_thresh_set == NULL) 5107 return -ENOTSUP; 5108 ret = eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_set)(dev, 5109 queue_id, avail_thresh)); 5110 5111 rte_eth_trace_rx_avail_thresh_set(port_id, queue_id, avail_thresh, ret); 5112 5113 return ret; 5114 } 5115 5116 int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id, 5117 uint8_t *avail_thresh) 5118 { 5119 struct rte_eth_dev *dev; 5120 int ret; 5121 5122 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5123 dev = &rte_eth_devices[port_id]; 5124 5125 if (queue_id == NULL) 5126 return -EINVAL; 5127 if (*queue_id >= dev->data->nb_rx_queues) 5128 *queue_id = 0; 5129 5130 if (*dev->dev_ops->rx_queue_avail_thresh_query == NULL) 5131 return -ENOTSUP; 5132 ret = eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_query)(dev, 5133 queue_id, avail_thresh)); 5134 5135 rte_eth_trace_rx_avail_thresh_query(port_id, *queue_id, ret); 5136 5137 return ret; 5138 } 5139 5140 RTE_INIT(eth_dev_init_fp_ops) 5141 { 5142 uint32_t i; 5143 5144 for (i = 0; i != RTE_DIM(rte_eth_fp_ops); i++) 5145 eth_dev_fp_ops_reset(rte_eth_fp_ops + i); 5146 } 5147 5148 RTE_INIT(eth_dev_init_cb_lists) 5149 { 5150 uint16_t i; 5151 5152 for (i = 0; i < RTE_MAX_ETHPORTS; i++) 5153 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs); 5154 } 5155 5156 int 5157 rte_eth_dev_callback_register(uint16_t port_id, 5158 enum rte_eth_event_type event, 5159 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 5160 { 5161 struct rte_eth_dev *dev; 5162 struct rte_eth_dev_callback *user_cb; 5163 uint16_t next_port; 5164 uint16_t last_port; 5165 5166 if (cb_fn == NULL) { 5167 RTE_ETHDEV_LOG(ERR, 5168 "Cannot register ethdev port %u callback from NULL\n", 5169 port_id); 5170 return -EINVAL; 5171 } 5172 5173 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 5174 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 5175 return -EINVAL; 5176 } 5177 5178 if (port_id == RTE_ETH_ALL) { 5179 next_port = 0; 5180 last_port = RTE_MAX_ETHPORTS - 1; 5181 } else { 5182 next_port = last_port = port_id; 5183 } 5184 5185 rte_spinlock_lock(ð_dev_cb_lock); 5186 5187 do { 5188 dev = &rte_eth_devices[next_port]; 5189 5190 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) { 5191 if (user_cb->cb_fn == cb_fn && 5192 user_cb->cb_arg == cb_arg && 5193 user_cb->event == event) { 5194 break; 5195 } 5196 } 5197 5198 /* create a new callback. */ 5199 if (user_cb == NULL) { 5200 user_cb = rte_zmalloc("INTR_USER_CALLBACK", 5201 sizeof(struct rte_eth_dev_callback), 0); 5202 if (user_cb != NULL) { 5203 user_cb->cb_fn = cb_fn; 5204 user_cb->cb_arg = cb_arg; 5205 user_cb->event = event; 5206 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), 5207 user_cb, next); 5208 } else { 5209 rte_spinlock_unlock(ð_dev_cb_lock); 5210 rte_eth_dev_callback_unregister(port_id, event, 5211 cb_fn, cb_arg); 5212 return -ENOMEM; 5213 } 5214 5215 } 5216 } while (++next_port <= last_port); 5217 5218 rte_spinlock_unlock(ð_dev_cb_lock); 5219 5220 rte_ethdev_trace_callback_register(port_id, event, cb_fn, cb_arg); 5221 5222 return 0; 5223 } 5224 5225 int 5226 rte_eth_dev_callback_unregister(uint16_t port_id, 5227 enum rte_eth_event_type event, 5228 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 5229 { 5230 int ret; 5231 struct rte_eth_dev *dev; 5232 struct rte_eth_dev_callback *cb, *next; 5233 uint16_t next_port; 5234 uint16_t last_port; 5235 5236 if (cb_fn == NULL) { 5237 RTE_ETHDEV_LOG(ERR, 5238 "Cannot unregister ethdev port %u callback from NULL\n", 5239 port_id); 5240 return -EINVAL; 5241 } 5242 5243 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 5244 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 5245 return -EINVAL; 5246 } 5247 5248 if (port_id == RTE_ETH_ALL) { 5249 next_port = 0; 5250 last_port = RTE_MAX_ETHPORTS - 1; 5251 } else { 5252 next_port = last_port = port_id; 5253 } 5254 5255 rte_spinlock_lock(ð_dev_cb_lock); 5256 5257 do { 5258 dev = &rte_eth_devices[next_port]; 5259 ret = 0; 5260 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; 5261 cb = next) { 5262 5263 next = TAILQ_NEXT(cb, next); 5264 5265 if (cb->cb_fn != cb_fn || cb->event != event || 5266 (cb_arg != (void *)-1 && cb->cb_arg != cb_arg)) 5267 continue; 5268 5269 /* 5270 * if this callback is not executing right now, 5271 * then remove it. 5272 */ 5273 if (cb->active == 0) { 5274 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next); 5275 rte_free(cb); 5276 } else { 5277 ret = -EAGAIN; 5278 } 5279 } 5280 } while (++next_port <= last_port); 5281 5282 rte_spinlock_unlock(ð_dev_cb_lock); 5283 5284 rte_ethdev_trace_callback_unregister(port_id, event, cb_fn, cb_arg, 5285 ret); 5286 5287 return ret; 5288 } 5289 5290 int 5291 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data) 5292 { 5293 uint32_t vec; 5294 struct rte_eth_dev *dev; 5295 struct rte_intr_handle *intr_handle; 5296 uint16_t qid; 5297 int rc; 5298 5299 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5300 dev = &rte_eth_devices[port_id]; 5301 5302 if (!dev->intr_handle) { 5303 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 5304 return -ENOTSUP; 5305 } 5306 5307 intr_handle = dev->intr_handle; 5308 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 5309 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 5310 return -EPERM; 5311 } 5312 5313 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) { 5314 vec = rte_intr_vec_list_index_get(intr_handle, qid); 5315 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 5316 5317 rte_ethdev_trace_rx_intr_ctl(port_id, qid, epfd, op, data, rc); 5318 5319 if (rc && rc != -EEXIST) { 5320 RTE_ETHDEV_LOG(ERR, 5321 "p %u q %u Rx ctl error op %d epfd %d vec %u\n", 5322 port_id, qid, op, epfd, vec); 5323 } 5324 } 5325 5326 return 0; 5327 } 5328 5329 int 5330 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id) 5331 { 5332 struct rte_intr_handle *intr_handle; 5333 struct rte_eth_dev *dev; 5334 unsigned int efd_idx; 5335 uint32_t vec; 5336 int fd; 5337 5338 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 5339 dev = &rte_eth_devices[port_id]; 5340 5341 if (queue_id >= dev->data->nb_rx_queues) { 5342 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5343 return -1; 5344 } 5345 5346 if (!dev->intr_handle) { 5347 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 5348 return -1; 5349 } 5350 5351 intr_handle = dev->intr_handle; 5352 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 5353 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 5354 return -1; 5355 } 5356 5357 vec = rte_intr_vec_list_index_get(intr_handle, queue_id); 5358 efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ? 5359 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec; 5360 fd = rte_intr_efds_index_get(intr_handle, efd_idx); 5361 5362 rte_ethdev_trace_rx_intr_ctl_q_get_fd(port_id, queue_id, fd); 5363 5364 return fd; 5365 } 5366 5367 int 5368 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, 5369 int epfd, int op, void *data) 5370 { 5371 uint32_t vec; 5372 struct rte_eth_dev *dev; 5373 struct rte_intr_handle *intr_handle; 5374 int rc; 5375 5376 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5377 dev = &rte_eth_devices[port_id]; 5378 5379 if (queue_id >= dev->data->nb_rx_queues) { 5380 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5381 return -EINVAL; 5382 } 5383 5384 if (!dev->intr_handle) { 5385 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 5386 return -ENOTSUP; 5387 } 5388 5389 intr_handle = dev->intr_handle; 5390 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 5391 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 5392 return -EPERM; 5393 } 5394 5395 vec = rte_intr_vec_list_index_get(intr_handle, queue_id); 5396 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 5397 5398 rte_ethdev_trace_rx_intr_ctl_q(port_id, queue_id, epfd, op, data, rc); 5399 5400 if (rc && rc != -EEXIST) { 5401 RTE_ETHDEV_LOG(ERR, 5402 "p %u q %u Rx ctl error op %d epfd %d vec %u\n", 5403 port_id, queue_id, op, epfd, vec); 5404 return rc; 5405 } 5406 5407 return 0; 5408 } 5409 5410 int 5411 rte_eth_dev_rx_intr_enable(uint16_t port_id, 5412 uint16_t queue_id) 5413 { 5414 struct rte_eth_dev *dev; 5415 int ret; 5416 5417 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5418 dev = &rte_eth_devices[port_id]; 5419 5420 ret = eth_dev_validate_rx_queue(dev, queue_id); 5421 if (ret != 0) 5422 return ret; 5423 5424 if (*dev->dev_ops->rx_queue_intr_enable == NULL) 5425 return -ENOTSUP; 5426 ret = eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id)); 5427 5428 rte_ethdev_trace_rx_intr_enable(port_id, queue_id, ret); 5429 5430 return ret; 5431 } 5432 5433 int 5434 rte_eth_dev_rx_intr_disable(uint16_t port_id, 5435 uint16_t queue_id) 5436 { 5437 struct rte_eth_dev *dev; 5438 int ret; 5439 5440 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5441 dev = &rte_eth_devices[port_id]; 5442 5443 ret = eth_dev_validate_rx_queue(dev, queue_id); 5444 if (ret != 0) 5445 return ret; 5446 5447 if (*dev->dev_ops->rx_queue_intr_disable == NULL) 5448 return -ENOTSUP; 5449 ret = eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id)); 5450 5451 rte_ethdev_trace_rx_intr_disable(port_id, queue_id, ret); 5452 5453 return ret; 5454 } 5455 5456 5457 const struct rte_eth_rxtx_callback * 5458 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, 5459 rte_rx_callback_fn fn, void *user_param) 5460 { 5461 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5462 rte_errno = ENOTSUP; 5463 return NULL; 5464 #endif 5465 struct rte_eth_dev *dev; 5466 5467 /* check input parameters */ 5468 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5469 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 5470 rte_errno = EINVAL; 5471 return NULL; 5472 } 5473 dev = &rte_eth_devices[port_id]; 5474 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 5475 rte_errno = EINVAL; 5476 return NULL; 5477 } 5478 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5479 5480 if (cb == NULL) { 5481 rte_errno = ENOMEM; 5482 return NULL; 5483 } 5484 5485 cb->fn.rx = fn; 5486 cb->param = user_param; 5487 5488 rte_spinlock_lock(ð_dev_rx_cb_lock); 5489 /* Add the callbacks in fifo order. */ 5490 struct rte_eth_rxtx_callback *tail = 5491 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 5492 5493 if (!tail) { 5494 /* Stores to cb->fn and cb->param should complete before 5495 * cb is visible to data plane. 5496 */ 5497 __atomic_store_n( 5498 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 5499 cb, __ATOMIC_RELEASE); 5500 5501 } else { 5502 while (tail->next) 5503 tail = tail->next; 5504 /* Stores to cb->fn and cb->param should complete before 5505 * cb is visible to data plane. 5506 */ 5507 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 5508 } 5509 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5510 5511 rte_eth_trace_add_rx_callback(port_id, queue_id, fn, user_param, cb); 5512 5513 return cb; 5514 } 5515 5516 const struct rte_eth_rxtx_callback * 5517 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, 5518 rte_rx_callback_fn fn, void *user_param) 5519 { 5520 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5521 rte_errno = ENOTSUP; 5522 return NULL; 5523 #endif 5524 /* check input parameters */ 5525 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5526 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 5527 rte_errno = EINVAL; 5528 return NULL; 5529 } 5530 5531 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5532 5533 if (cb == NULL) { 5534 rte_errno = ENOMEM; 5535 return NULL; 5536 } 5537 5538 cb->fn.rx = fn; 5539 cb->param = user_param; 5540 5541 rte_spinlock_lock(ð_dev_rx_cb_lock); 5542 /* Add the callbacks at first position */ 5543 cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 5544 /* Stores to cb->fn, cb->param and cb->next should complete before 5545 * cb is visible to data plane threads. 5546 */ 5547 __atomic_store_n( 5548 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 5549 cb, __ATOMIC_RELEASE); 5550 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5551 5552 rte_eth_trace_add_first_rx_callback(port_id, queue_id, fn, user_param, 5553 cb); 5554 5555 return cb; 5556 } 5557 5558 const struct rte_eth_rxtx_callback * 5559 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, 5560 rte_tx_callback_fn fn, void *user_param) 5561 { 5562 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5563 rte_errno = ENOTSUP; 5564 return NULL; 5565 #endif 5566 struct rte_eth_dev *dev; 5567 5568 /* check input parameters */ 5569 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5570 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) { 5571 rte_errno = EINVAL; 5572 return NULL; 5573 } 5574 5575 dev = &rte_eth_devices[port_id]; 5576 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 5577 rte_errno = EINVAL; 5578 return NULL; 5579 } 5580 5581 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5582 5583 if (cb == NULL) { 5584 rte_errno = ENOMEM; 5585 return NULL; 5586 } 5587 5588 cb->fn.tx = fn; 5589 cb->param = user_param; 5590 5591 rte_spinlock_lock(ð_dev_tx_cb_lock); 5592 /* Add the callbacks in fifo order. */ 5593 struct rte_eth_rxtx_callback *tail = 5594 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id]; 5595 5596 if (!tail) { 5597 /* Stores to cb->fn and cb->param should complete before 5598 * cb is visible to data plane. 5599 */ 5600 __atomic_store_n( 5601 &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id], 5602 cb, __ATOMIC_RELEASE); 5603 5604 } else { 5605 while (tail->next) 5606 tail = tail->next; 5607 /* Stores to cb->fn and cb->param should complete before 5608 * cb is visible to data plane. 5609 */ 5610 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 5611 } 5612 rte_spinlock_unlock(ð_dev_tx_cb_lock); 5613 5614 rte_eth_trace_add_tx_callback(port_id, queue_id, fn, user_param, cb); 5615 5616 return cb; 5617 } 5618 5619 int 5620 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, 5621 const struct rte_eth_rxtx_callback *user_cb) 5622 { 5623 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5624 return -ENOTSUP; 5625 #endif 5626 /* Check input parameters. */ 5627 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5628 if (user_cb == NULL || 5629 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) 5630 return -EINVAL; 5631 5632 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5633 struct rte_eth_rxtx_callback *cb; 5634 struct rte_eth_rxtx_callback **prev_cb; 5635 int ret = -EINVAL; 5636 5637 rte_spinlock_lock(ð_dev_rx_cb_lock); 5638 prev_cb = &dev->post_rx_burst_cbs[queue_id]; 5639 for (; *prev_cb != NULL; prev_cb = &cb->next) { 5640 cb = *prev_cb; 5641 if (cb == user_cb) { 5642 /* Remove the user cb from the callback list. */ 5643 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 5644 ret = 0; 5645 break; 5646 } 5647 } 5648 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5649 5650 rte_eth_trace_remove_rx_callback(port_id, queue_id, user_cb, ret); 5651 5652 return ret; 5653 } 5654 5655 int 5656 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, 5657 const struct rte_eth_rxtx_callback *user_cb) 5658 { 5659 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5660 return -ENOTSUP; 5661 #endif 5662 /* Check input parameters. */ 5663 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5664 if (user_cb == NULL || 5665 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) 5666 return -EINVAL; 5667 5668 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5669 int ret = -EINVAL; 5670 struct rte_eth_rxtx_callback *cb; 5671 struct rte_eth_rxtx_callback **prev_cb; 5672 5673 rte_spinlock_lock(ð_dev_tx_cb_lock); 5674 prev_cb = &dev->pre_tx_burst_cbs[queue_id]; 5675 for (; *prev_cb != NULL; prev_cb = &cb->next) { 5676 cb = *prev_cb; 5677 if (cb == user_cb) { 5678 /* Remove the user cb from the callback list. */ 5679 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 5680 ret = 0; 5681 break; 5682 } 5683 } 5684 rte_spinlock_unlock(ð_dev_tx_cb_lock); 5685 5686 rte_eth_trace_remove_tx_callback(port_id, queue_id, user_cb, ret); 5687 5688 return ret; 5689 } 5690 5691 int 5692 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5693 struct rte_eth_rxq_info *qinfo) 5694 { 5695 struct rte_eth_dev *dev; 5696 5697 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5698 dev = &rte_eth_devices[port_id]; 5699 5700 if (queue_id >= dev->data->nb_rx_queues) { 5701 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5702 return -EINVAL; 5703 } 5704 5705 if (qinfo == NULL) { 5706 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL\n", 5707 port_id, queue_id); 5708 return -EINVAL; 5709 } 5710 5711 if (dev->data->rx_queues == NULL || 5712 dev->data->rx_queues[queue_id] == NULL) { 5713 RTE_ETHDEV_LOG(ERR, 5714 "Rx queue %"PRIu16" of device with port_id=%" 5715 PRIu16" has not been setup\n", 5716 queue_id, port_id); 5717 return -EINVAL; 5718 } 5719 5720 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 5721 RTE_ETHDEV_LOG(INFO, 5722 "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5723 queue_id, port_id); 5724 return -EINVAL; 5725 } 5726 5727 if (*dev->dev_ops->rxq_info_get == NULL) 5728 return -ENOTSUP; 5729 5730 memset(qinfo, 0, sizeof(*qinfo)); 5731 dev->dev_ops->rxq_info_get(dev, queue_id, qinfo); 5732 qinfo->queue_state = dev->data->rx_queue_state[queue_id]; 5733 5734 rte_eth_trace_rx_queue_info_get(port_id, queue_id, qinfo); 5735 5736 return 0; 5737 } 5738 5739 int 5740 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5741 struct rte_eth_txq_info *qinfo) 5742 { 5743 struct rte_eth_dev *dev; 5744 5745 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5746 dev = &rte_eth_devices[port_id]; 5747 5748 if (queue_id >= dev->data->nb_tx_queues) { 5749 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 5750 return -EINVAL; 5751 } 5752 5753 if (qinfo == NULL) { 5754 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL\n", 5755 port_id, queue_id); 5756 return -EINVAL; 5757 } 5758 5759 if (dev->data->tx_queues == NULL || 5760 dev->data->tx_queues[queue_id] == NULL) { 5761 RTE_ETHDEV_LOG(ERR, 5762 "Tx queue %"PRIu16" of device with port_id=%" 5763 PRIu16" has not been setup\n", 5764 queue_id, port_id); 5765 return -EINVAL; 5766 } 5767 5768 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 5769 RTE_ETHDEV_LOG(INFO, 5770 "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5771 queue_id, port_id); 5772 return -EINVAL; 5773 } 5774 5775 if (*dev->dev_ops->txq_info_get == NULL) 5776 return -ENOTSUP; 5777 5778 memset(qinfo, 0, sizeof(*qinfo)); 5779 dev->dev_ops->txq_info_get(dev, queue_id, qinfo); 5780 qinfo->queue_state = dev->data->tx_queue_state[queue_id]; 5781 5782 rte_eth_trace_tx_queue_info_get(port_id, queue_id, qinfo); 5783 5784 return 0; 5785 } 5786 5787 int 5788 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5789 struct rte_eth_burst_mode *mode) 5790 { 5791 struct rte_eth_dev *dev; 5792 int ret; 5793 5794 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5795 dev = &rte_eth_devices[port_id]; 5796 5797 if (queue_id >= dev->data->nb_rx_queues) { 5798 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5799 return -EINVAL; 5800 } 5801 5802 if (mode == NULL) { 5803 RTE_ETHDEV_LOG(ERR, 5804 "Cannot get ethdev port %u Rx queue %u burst mode to NULL\n", 5805 port_id, queue_id); 5806 return -EINVAL; 5807 } 5808 5809 if (*dev->dev_ops->rx_burst_mode_get == NULL) 5810 return -ENOTSUP; 5811 memset(mode, 0, sizeof(*mode)); 5812 ret = eth_err(port_id, 5813 dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode)); 5814 5815 rte_eth_trace_rx_burst_mode_get(port_id, queue_id, mode, ret); 5816 5817 return ret; 5818 } 5819 5820 int 5821 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5822 struct rte_eth_burst_mode *mode) 5823 { 5824 struct rte_eth_dev *dev; 5825 int ret; 5826 5827 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5828 dev = &rte_eth_devices[port_id]; 5829 5830 if (queue_id >= dev->data->nb_tx_queues) { 5831 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 5832 return -EINVAL; 5833 } 5834 5835 if (mode == NULL) { 5836 RTE_ETHDEV_LOG(ERR, 5837 "Cannot get ethdev port %u Tx queue %u burst mode to NULL\n", 5838 port_id, queue_id); 5839 return -EINVAL; 5840 } 5841 5842 if (*dev->dev_ops->tx_burst_mode_get == NULL) 5843 return -ENOTSUP; 5844 memset(mode, 0, sizeof(*mode)); 5845 ret = eth_err(port_id, 5846 dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode)); 5847 5848 rte_eth_trace_tx_burst_mode_get(port_id, queue_id, mode, ret); 5849 5850 return ret; 5851 } 5852 5853 int 5854 rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id, 5855 struct rte_power_monitor_cond *pmc) 5856 { 5857 struct rte_eth_dev *dev; 5858 int ret; 5859 5860 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5861 dev = &rte_eth_devices[port_id]; 5862 5863 if (queue_id >= dev->data->nb_rx_queues) { 5864 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5865 return -EINVAL; 5866 } 5867 5868 if (pmc == NULL) { 5869 RTE_ETHDEV_LOG(ERR, 5870 "Cannot get ethdev port %u Rx queue %u power monitor condition to NULL\n", 5871 port_id, queue_id); 5872 return -EINVAL; 5873 } 5874 5875 if (*dev->dev_ops->get_monitor_addr == NULL) 5876 return -ENOTSUP; 5877 ret = eth_err(port_id, 5878 dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc)); 5879 5880 rte_eth_trace_get_monitor_addr(port_id, queue_id, pmc, ret); 5881 5882 return ret; 5883 } 5884 5885 int 5886 rte_eth_dev_set_mc_addr_list(uint16_t port_id, 5887 struct rte_ether_addr *mc_addr_set, 5888 uint32_t nb_mc_addr) 5889 { 5890 struct rte_eth_dev *dev; 5891 int ret; 5892 5893 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5894 dev = &rte_eth_devices[port_id]; 5895 5896 if (*dev->dev_ops->set_mc_addr_list == NULL) 5897 return -ENOTSUP; 5898 ret = eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev, 5899 mc_addr_set, nb_mc_addr)); 5900 5901 rte_ethdev_trace_set_mc_addr_list(port_id, mc_addr_set, nb_mc_addr, 5902 ret); 5903 5904 return ret; 5905 } 5906 5907 int 5908 rte_eth_timesync_enable(uint16_t port_id) 5909 { 5910 struct rte_eth_dev *dev; 5911 int ret; 5912 5913 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5914 dev = &rte_eth_devices[port_id]; 5915 5916 if (*dev->dev_ops->timesync_enable == NULL) 5917 return -ENOTSUP; 5918 ret = eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev)); 5919 5920 rte_eth_trace_timesync_enable(port_id, ret); 5921 5922 return ret; 5923 } 5924 5925 int 5926 rte_eth_timesync_disable(uint16_t port_id) 5927 { 5928 struct rte_eth_dev *dev; 5929 int ret; 5930 5931 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5932 dev = &rte_eth_devices[port_id]; 5933 5934 if (*dev->dev_ops->timesync_disable == NULL) 5935 return -ENOTSUP; 5936 ret = eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev)); 5937 5938 rte_eth_trace_timesync_disable(port_id, ret); 5939 5940 return ret; 5941 } 5942 5943 int 5944 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, 5945 uint32_t flags) 5946 { 5947 struct rte_eth_dev *dev; 5948 int ret; 5949 5950 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5951 dev = &rte_eth_devices[port_id]; 5952 5953 if (timestamp == NULL) { 5954 RTE_ETHDEV_LOG(ERR, 5955 "Cannot read ethdev port %u Rx timestamp to NULL\n", 5956 port_id); 5957 return -EINVAL; 5958 } 5959 5960 if (*dev->dev_ops->timesync_read_rx_timestamp == NULL) 5961 return -ENOTSUP; 5962 5963 ret = eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp) 5964 (dev, timestamp, flags)); 5965 5966 rte_eth_trace_timesync_read_rx_timestamp(port_id, timestamp, flags, 5967 ret); 5968 5969 return ret; 5970 } 5971 5972 int 5973 rte_eth_timesync_read_tx_timestamp(uint16_t port_id, 5974 struct timespec *timestamp) 5975 { 5976 struct rte_eth_dev *dev; 5977 int ret; 5978 5979 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5980 dev = &rte_eth_devices[port_id]; 5981 5982 if (timestamp == NULL) { 5983 RTE_ETHDEV_LOG(ERR, 5984 "Cannot read ethdev port %u Tx timestamp to NULL\n", 5985 port_id); 5986 return -EINVAL; 5987 } 5988 5989 if (*dev->dev_ops->timesync_read_tx_timestamp == NULL) 5990 return -ENOTSUP; 5991 5992 ret = eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp) 5993 (dev, timestamp)); 5994 5995 rte_eth_trace_timesync_read_tx_timestamp(port_id, timestamp, ret); 5996 5997 return ret; 5998 5999 } 6000 6001 int 6002 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta) 6003 { 6004 struct rte_eth_dev *dev; 6005 int ret; 6006 6007 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6008 dev = &rte_eth_devices[port_id]; 6009 6010 if (*dev->dev_ops->timesync_adjust_time == NULL) 6011 return -ENOTSUP; 6012 ret = eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta)); 6013 6014 rte_eth_trace_timesync_adjust_time(port_id, delta, ret); 6015 6016 return ret; 6017 } 6018 6019 int 6020 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp) 6021 { 6022 struct rte_eth_dev *dev; 6023 int ret; 6024 6025 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6026 dev = &rte_eth_devices[port_id]; 6027 6028 if (timestamp == NULL) { 6029 RTE_ETHDEV_LOG(ERR, 6030 "Cannot read ethdev port %u timesync time to NULL\n", 6031 port_id); 6032 return -EINVAL; 6033 } 6034 6035 if (*dev->dev_ops->timesync_read_time == NULL) 6036 return -ENOTSUP; 6037 ret = eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev, 6038 timestamp)); 6039 6040 rte_eth_trace_timesync_read_time(port_id, timestamp, ret); 6041 6042 return ret; 6043 } 6044 6045 int 6046 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp) 6047 { 6048 struct rte_eth_dev *dev; 6049 int ret; 6050 6051 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6052 dev = &rte_eth_devices[port_id]; 6053 6054 if (timestamp == NULL) { 6055 RTE_ETHDEV_LOG(ERR, 6056 "Cannot write ethdev port %u timesync from NULL time\n", 6057 port_id); 6058 return -EINVAL; 6059 } 6060 6061 if (*dev->dev_ops->timesync_write_time == NULL) 6062 return -ENOTSUP; 6063 ret = eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev, 6064 timestamp)); 6065 6066 rte_eth_trace_timesync_write_time(port_id, timestamp, ret); 6067 6068 return ret; 6069 } 6070 6071 int 6072 rte_eth_read_clock(uint16_t port_id, uint64_t *clock) 6073 { 6074 struct rte_eth_dev *dev; 6075 int ret; 6076 6077 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6078 dev = &rte_eth_devices[port_id]; 6079 6080 if (clock == NULL) { 6081 RTE_ETHDEV_LOG(ERR, "Cannot read ethdev port %u clock to NULL\n", 6082 port_id); 6083 return -EINVAL; 6084 } 6085 6086 if (*dev->dev_ops->read_clock == NULL) 6087 return -ENOTSUP; 6088 ret = eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock)); 6089 6090 rte_eth_trace_read_clock(port_id, clock, ret); 6091 6092 return ret; 6093 } 6094 6095 int 6096 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info) 6097 { 6098 struct rte_eth_dev *dev; 6099 int ret; 6100 6101 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6102 dev = &rte_eth_devices[port_id]; 6103 6104 if (info == NULL) { 6105 RTE_ETHDEV_LOG(ERR, 6106 "Cannot get ethdev port %u register info to NULL\n", 6107 port_id); 6108 return -EINVAL; 6109 } 6110 6111 if (*dev->dev_ops->get_reg == NULL) 6112 return -ENOTSUP; 6113 ret = eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info)); 6114 6115 rte_ethdev_trace_get_reg_info(port_id, info, ret); 6116 6117 return ret; 6118 } 6119 6120 int 6121 rte_eth_dev_get_eeprom_length(uint16_t port_id) 6122 { 6123 struct rte_eth_dev *dev; 6124 int ret; 6125 6126 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6127 dev = &rte_eth_devices[port_id]; 6128 6129 if (*dev->dev_ops->get_eeprom_length == NULL) 6130 return -ENOTSUP; 6131 ret = eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev)); 6132 6133 rte_ethdev_trace_get_eeprom_length(port_id, ret); 6134 6135 return ret; 6136 } 6137 6138 int 6139 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 6140 { 6141 struct rte_eth_dev *dev; 6142 int ret; 6143 6144 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6145 dev = &rte_eth_devices[port_id]; 6146 6147 if (info == NULL) { 6148 RTE_ETHDEV_LOG(ERR, 6149 "Cannot get ethdev port %u EEPROM info to NULL\n", 6150 port_id); 6151 return -EINVAL; 6152 } 6153 6154 if (*dev->dev_ops->get_eeprom == NULL) 6155 return -ENOTSUP; 6156 ret = eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info)); 6157 6158 rte_ethdev_trace_get_eeprom(port_id, info, ret); 6159 6160 return ret; 6161 } 6162 6163 int 6164 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 6165 { 6166 struct rte_eth_dev *dev; 6167 int ret; 6168 6169 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6170 dev = &rte_eth_devices[port_id]; 6171 6172 if (info == NULL) { 6173 RTE_ETHDEV_LOG(ERR, 6174 "Cannot set ethdev port %u EEPROM from NULL info\n", 6175 port_id); 6176 return -EINVAL; 6177 } 6178 6179 if (*dev->dev_ops->set_eeprom == NULL) 6180 return -ENOTSUP; 6181 ret = eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info)); 6182 6183 rte_ethdev_trace_set_eeprom(port_id, info, ret); 6184 6185 return ret; 6186 } 6187 6188 int 6189 rte_eth_dev_get_module_info(uint16_t port_id, 6190 struct rte_eth_dev_module_info *modinfo) 6191 { 6192 struct rte_eth_dev *dev; 6193 int ret; 6194 6195 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6196 dev = &rte_eth_devices[port_id]; 6197 6198 if (modinfo == NULL) { 6199 RTE_ETHDEV_LOG(ERR, 6200 "Cannot get ethdev port %u EEPROM module info to NULL\n", 6201 port_id); 6202 return -EINVAL; 6203 } 6204 6205 if (*dev->dev_ops->get_module_info == NULL) 6206 return -ENOTSUP; 6207 ret = (*dev->dev_ops->get_module_info)(dev, modinfo); 6208 6209 rte_ethdev_trace_get_module_info(port_id, modinfo, ret); 6210 6211 return ret; 6212 } 6213 6214 int 6215 rte_eth_dev_get_module_eeprom(uint16_t port_id, 6216 struct rte_dev_eeprom_info *info) 6217 { 6218 struct rte_eth_dev *dev; 6219 int ret; 6220 6221 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6222 dev = &rte_eth_devices[port_id]; 6223 6224 if (info == NULL) { 6225 RTE_ETHDEV_LOG(ERR, 6226 "Cannot get ethdev port %u module EEPROM info to NULL\n", 6227 port_id); 6228 return -EINVAL; 6229 } 6230 6231 if (info->data == NULL) { 6232 RTE_ETHDEV_LOG(ERR, 6233 "Cannot get ethdev port %u module EEPROM data to NULL\n", 6234 port_id); 6235 return -EINVAL; 6236 } 6237 6238 if (info->length == 0) { 6239 RTE_ETHDEV_LOG(ERR, 6240 "Cannot get ethdev port %u module EEPROM to data with zero size\n", 6241 port_id); 6242 return -EINVAL; 6243 } 6244 6245 if (*dev->dev_ops->get_module_eeprom == NULL) 6246 return -ENOTSUP; 6247 ret = (*dev->dev_ops->get_module_eeprom)(dev, info); 6248 6249 rte_ethdev_trace_get_module_eeprom(port_id, info, ret); 6250 6251 return ret; 6252 } 6253 6254 int 6255 rte_eth_dev_get_dcb_info(uint16_t port_id, 6256 struct rte_eth_dcb_info *dcb_info) 6257 { 6258 struct rte_eth_dev *dev; 6259 int ret; 6260 6261 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6262 dev = &rte_eth_devices[port_id]; 6263 6264 if (dcb_info == NULL) { 6265 RTE_ETHDEV_LOG(ERR, 6266 "Cannot get ethdev port %u DCB info to NULL\n", 6267 port_id); 6268 return -EINVAL; 6269 } 6270 6271 memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info)); 6272 6273 if (*dev->dev_ops->get_dcb_info == NULL) 6274 return -ENOTSUP; 6275 ret = eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info)); 6276 6277 rte_ethdev_trace_get_dcb_info(port_id, dcb_info, ret); 6278 6279 return ret; 6280 } 6281 6282 static void 6283 eth_dev_adjust_nb_desc(uint16_t *nb_desc, 6284 const struct rte_eth_desc_lim *desc_lim) 6285 { 6286 if (desc_lim->nb_align != 0) 6287 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align); 6288 6289 if (desc_lim->nb_max != 0) 6290 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max); 6291 6292 *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min); 6293 } 6294 6295 int 6296 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, 6297 uint16_t *nb_rx_desc, 6298 uint16_t *nb_tx_desc) 6299 { 6300 struct rte_eth_dev_info dev_info; 6301 int ret; 6302 6303 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6304 6305 ret = rte_eth_dev_info_get(port_id, &dev_info); 6306 if (ret != 0) 6307 return ret; 6308 6309 if (nb_rx_desc != NULL) 6310 eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim); 6311 6312 if (nb_tx_desc != NULL) 6313 eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim); 6314 6315 rte_ethdev_trace_adjust_nb_rx_tx_desc(port_id); 6316 6317 return 0; 6318 } 6319 6320 int 6321 rte_eth_dev_hairpin_capability_get(uint16_t port_id, 6322 struct rte_eth_hairpin_cap *cap) 6323 { 6324 struct rte_eth_dev *dev; 6325 int ret; 6326 6327 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6328 dev = &rte_eth_devices[port_id]; 6329 6330 if (cap == NULL) { 6331 RTE_ETHDEV_LOG(ERR, 6332 "Cannot get ethdev port %u hairpin capability to NULL\n", 6333 port_id); 6334 return -EINVAL; 6335 } 6336 6337 if (*dev->dev_ops->hairpin_cap_get == NULL) 6338 return -ENOTSUP; 6339 memset(cap, 0, sizeof(*cap)); 6340 ret = eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap)); 6341 6342 rte_ethdev_trace_hairpin_capability_get(port_id, cap, ret); 6343 6344 return ret; 6345 } 6346 6347 int 6348 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool) 6349 { 6350 struct rte_eth_dev *dev; 6351 int ret; 6352 6353 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6354 dev = &rte_eth_devices[port_id]; 6355 6356 if (pool == NULL) { 6357 RTE_ETHDEV_LOG(ERR, 6358 "Cannot test ethdev port %u mempool operation from NULL pool\n", 6359 port_id); 6360 return -EINVAL; 6361 } 6362 6363 if (*dev->dev_ops->pool_ops_supported == NULL) 6364 return 1; /* all pools are supported */ 6365 6366 ret = (*dev->dev_ops->pool_ops_supported)(dev, pool); 6367 6368 rte_ethdev_trace_pool_ops_supported(port_id, pool, ret); 6369 6370 return ret; 6371 } 6372 6373 static int 6374 eth_dev_handle_port_list(const char *cmd __rte_unused, 6375 const char *params __rte_unused, 6376 struct rte_tel_data *d) 6377 { 6378 int port_id; 6379 6380 rte_tel_data_start_array(d, RTE_TEL_INT_VAL); 6381 RTE_ETH_FOREACH_DEV(port_id) 6382 rte_tel_data_add_array_int(d, port_id); 6383 return 0; 6384 } 6385 6386 static void 6387 eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats, 6388 const char *stat_name) 6389 { 6390 int q; 6391 struct rte_tel_data *q_data = rte_tel_data_alloc(); 6392 if (q_data == NULL) 6393 return; 6394 rte_tel_data_start_array(q_data, RTE_TEL_UINT_VAL); 6395 for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++) 6396 rte_tel_data_add_array_uint(q_data, q_stats[q]); 6397 rte_tel_data_add_dict_container(d, stat_name, q_data, 0); 6398 } 6399 6400 #define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_uint(d, #s, stats.s) 6401 6402 static int 6403 eth_dev_handle_port_stats(const char *cmd __rte_unused, 6404 const char *params, 6405 struct rte_tel_data *d) 6406 { 6407 struct rte_eth_stats stats; 6408 int port_id, ret; 6409 6410 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6411 return -1; 6412 6413 port_id = atoi(params); 6414 if (!rte_eth_dev_is_valid_port(port_id)) 6415 return -1; 6416 6417 ret = rte_eth_stats_get(port_id, &stats); 6418 if (ret < 0) 6419 return -1; 6420 6421 rte_tel_data_start_dict(d); 6422 ADD_DICT_STAT(stats, ipackets); 6423 ADD_DICT_STAT(stats, opackets); 6424 ADD_DICT_STAT(stats, ibytes); 6425 ADD_DICT_STAT(stats, obytes); 6426 ADD_DICT_STAT(stats, imissed); 6427 ADD_DICT_STAT(stats, ierrors); 6428 ADD_DICT_STAT(stats, oerrors); 6429 ADD_DICT_STAT(stats, rx_nombuf); 6430 eth_dev_add_port_queue_stats(d, stats.q_ipackets, "q_ipackets"); 6431 eth_dev_add_port_queue_stats(d, stats.q_opackets, "q_opackets"); 6432 eth_dev_add_port_queue_stats(d, stats.q_ibytes, "q_ibytes"); 6433 eth_dev_add_port_queue_stats(d, stats.q_obytes, "q_obytes"); 6434 eth_dev_add_port_queue_stats(d, stats.q_errors, "q_errors"); 6435 6436 return 0; 6437 } 6438 6439 static int 6440 eth_dev_parse_hide_zero(const char *key, const char *value, void *extra_args) 6441 { 6442 RTE_SET_USED(key); 6443 6444 if (value == NULL) 6445 return -1; 6446 6447 if (strcmp(value, "true") == 0) 6448 *(bool *)extra_args = true; 6449 else if (strcmp(value, "false") == 0) 6450 *(bool *)extra_args = false; 6451 else 6452 return -1; 6453 6454 return 0; 6455 } 6456 6457 static int 6458 eth_dev_handle_port_xstats(const char *cmd __rte_unused, 6459 const char *params, 6460 struct rte_tel_data *d) 6461 { 6462 const char *const valid_keys[] = { "hide_zero", NULL }; 6463 struct rte_eth_xstat *eth_xstats; 6464 struct rte_eth_xstat_name *xstat_names; 6465 struct rte_kvargs *kvlist; 6466 int port_id, num_xstats; 6467 bool hide_zero = false; 6468 char *end_param; 6469 int i, ret; 6470 6471 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6472 return -1; 6473 6474 port_id = strtoul(params, &end_param, 0); 6475 if (!rte_eth_dev_is_valid_port(port_id)) 6476 return -1; 6477 6478 if (*end_param != '\0') { 6479 kvlist = rte_kvargs_parse(end_param, valid_keys); 6480 ret = rte_kvargs_process(kvlist, NULL, eth_dev_parse_hide_zero, &hide_zero); 6481 if (kvlist == NULL || ret != 0) 6482 RTE_ETHDEV_LOG(NOTICE, 6483 "Unknown extra parameters passed to ethdev telemetry command, ignoring\n"); 6484 rte_kvargs_free(kvlist); 6485 } 6486 6487 num_xstats = rte_eth_xstats_get(port_id, NULL, 0); 6488 if (num_xstats < 0) 6489 return -1; 6490 6491 /* use one malloc for both names and stats */ 6492 eth_xstats = malloc((sizeof(struct rte_eth_xstat) + 6493 sizeof(struct rte_eth_xstat_name)) * num_xstats); 6494 if (eth_xstats == NULL) 6495 return -1; 6496 xstat_names = (void *)ð_xstats[num_xstats]; 6497 6498 ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats); 6499 if (ret < 0 || ret > num_xstats) { 6500 free(eth_xstats); 6501 return -1; 6502 } 6503 6504 ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats); 6505 if (ret < 0 || ret > num_xstats) { 6506 free(eth_xstats); 6507 return -1; 6508 } 6509 6510 rte_tel_data_start_dict(d); 6511 for (i = 0; i < num_xstats; i++) { 6512 if (hide_zero && eth_xstats[i].value == 0) 6513 continue; 6514 rte_tel_data_add_dict_uint(d, xstat_names[i].name, 6515 eth_xstats[i].value); 6516 } 6517 free(eth_xstats); 6518 return 0; 6519 } 6520 6521 #ifndef RTE_EXEC_ENV_WINDOWS 6522 static int 6523 eth_dev_handle_port_dump_priv(const char *cmd __rte_unused, 6524 const char *params, 6525 struct rte_tel_data *d) 6526 { 6527 char *buf, *end_param; 6528 int port_id, ret; 6529 FILE *f; 6530 6531 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6532 return -EINVAL; 6533 6534 port_id = strtoul(params, &end_param, 0); 6535 if (*end_param != '\0') 6536 RTE_ETHDEV_LOG(NOTICE, 6537 "Extra parameters passed to ethdev telemetry command, ignoring"); 6538 if (!rte_eth_dev_is_valid_port(port_id)) 6539 return -EINVAL; 6540 6541 buf = calloc(sizeof(char), RTE_TEL_MAX_SINGLE_STRING_LEN); 6542 if (buf == NULL) 6543 return -ENOMEM; 6544 6545 f = fmemopen(buf, RTE_TEL_MAX_SINGLE_STRING_LEN - 1, "w+"); 6546 if (f == NULL) { 6547 free(buf); 6548 return -EINVAL; 6549 } 6550 6551 ret = rte_eth_dev_priv_dump(port_id, f); 6552 fclose(f); 6553 if (ret == 0) { 6554 rte_tel_data_start_dict(d); 6555 rte_tel_data_string(d, buf); 6556 } 6557 6558 free(buf); 6559 return 0; 6560 } 6561 #endif /* !RTE_EXEC_ENV_WINDOWS */ 6562 6563 static int 6564 eth_dev_handle_port_link_status(const char *cmd __rte_unused, 6565 const char *params, 6566 struct rte_tel_data *d) 6567 { 6568 static const char *status_str = "status"; 6569 int ret, port_id; 6570 struct rte_eth_link link; 6571 char *end_param; 6572 6573 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6574 return -1; 6575 6576 port_id = strtoul(params, &end_param, 0); 6577 if (*end_param != '\0') 6578 RTE_ETHDEV_LOG(NOTICE, 6579 "Extra parameters passed to ethdev telemetry command, ignoring"); 6580 if (!rte_eth_dev_is_valid_port(port_id)) 6581 return -1; 6582 6583 ret = rte_eth_link_get_nowait(port_id, &link); 6584 if (ret < 0) 6585 return -1; 6586 6587 rte_tel_data_start_dict(d); 6588 if (!link.link_status) { 6589 rte_tel_data_add_dict_string(d, status_str, "DOWN"); 6590 return 0; 6591 } 6592 rte_tel_data_add_dict_string(d, status_str, "UP"); 6593 rte_tel_data_add_dict_uint(d, "speed", link.link_speed); 6594 rte_tel_data_add_dict_string(d, "duplex", 6595 (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 6596 "full-duplex" : "half-duplex"); 6597 return 0; 6598 } 6599 6600 static int 6601 eth_dev_handle_port_info(const char *cmd __rte_unused, 6602 const char *params, 6603 struct rte_tel_data *d) 6604 { 6605 struct rte_tel_data *rxq_state, *txq_state; 6606 char mac_addr[RTE_ETHER_ADDR_FMT_SIZE]; 6607 struct rte_eth_dev *eth_dev; 6608 char *end_param; 6609 int port_id, i; 6610 6611 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6612 return -1; 6613 6614 port_id = strtoul(params, &end_param, 0); 6615 if (*end_param != '\0') 6616 RTE_ETHDEV_LOG(NOTICE, 6617 "Extra parameters passed to ethdev telemetry command, ignoring"); 6618 6619 if (!rte_eth_dev_is_valid_port(port_id)) 6620 return -EINVAL; 6621 6622 eth_dev = &rte_eth_devices[port_id]; 6623 6624 rxq_state = rte_tel_data_alloc(); 6625 if (!rxq_state) 6626 return -ENOMEM; 6627 6628 txq_state = rte_tel_data_alloc(); 6629 if (!txq_state) { 6630 rte_tel_data_free(rxq_state); 6631 return -ENOMEM; 6632 } 6633 6634 rte_tel_data_start_dict(d); 6635 rte_tel_data_add_dict_string(d, "name", eth_dev->data->name); 6636 rte_tel_data_add_dict_int(d, "state", eth_dev->state); 6637 rte_tel_data_add_dict_int(d, "nb_rx_queues", 6638 eth_dev->data->nb_rx_queues); 6639 rte_tel_data_add_dict_int(d, "nb_tx_queues", 6640 eth_dev->data->nb_tx_queues); 6641 rte_tel_data_add_dict_int(d, "port_id", eth_dev->data->port_id); 6642 rte_tel_data_add_dict_int(d, "mtu", eth_dev->data->mtu); 6643 rte_tel_data_add_dict_uint(d, "rx_mbuf_size_min", 6644 eth_dev->data->min_rx_buf_size); 6645 rte_ether_format_addr(mac_addr, sizeof(mac_addr), 6646 eth_dev->data->mac_addrs); 6647 rte_tel_data_add_dict_string(d, "mac_addr", mac_addr); 6648 rte_tel_data_add_dict_int(d, "promiscuous", 6649 eth_dev->data->promiscuous); 6650 rte_tel_data_add_dict_int(d, "scattered_rx", 6651 eth_dev->data->scattered_rx); 6652 rte_tel_data_add_dict_int(d, "all_multicast", 6653 eth_dev->data->all_multicast); 6654 rte_tel_data_add_dict_int(d, "dev_started", eth_dev->data->dev_started); 6655 rte_tel_data_add_dict_int(d, "lro", eth_dev->data->lro); 6656 rte_tel_data_add_dict_int(d, "dev_configured", 6657 eth_dev->data->dev_configured); 6658 6659 rte_tel_data_start_array(rxq_state, RTE_TEL_INT_VAL); 6660 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) 6661 rte_tel_data_add_array_int(rxq_state, 6662 eth_dev->data->rx_queue_state[i]); 6663 6664 rte_tel_data_start_array(txq_state, RTE_TEL_INT_VAL); 6665 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) 6666 rte_tel_data_add_array_int(txq_state, 6667 eth_dev->data->tx_queue_state[i]); 6668 6669 rte_tel_data_add_dict_container(d, "rxq_state", rxq_state, 0); 6670 rte_tel_data_add_dict_container(d, "txq_state", txq_state, 0); 6671 rte_tel_data_add_dict_int(d, "numa_node", eth_dev->data->numa_node); 6672 rte_tel_data_add_dict_uint_hex(d, "dev_flags", 6673 eth_dev->data->dev_flags, 0); 6674 rte_tel_data_add_dict_uint_hex(d, "rx_offloads", 6675 eth_dev->data->dev_conf.rxmode.offloads, 0); 6676 rte_tel_data_add_dict_uint_hex(d, "tx_offloads", 6677 eth_dev->data->dev_conf.txmode.offloads, 0); 6678 rte_tel_data_add_dict_uint_hex(d, "ethdev_rss_hf", 6679 eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf, 0); 6680 6681 return 0; 6682 } 6683 6684 int 6685 rte_eth_representor_info_get(uint16_t port_id, 6686 struct rte_eth_representor_info *info) 6687 { 6688 struct rte_eth_dev *dev; 6689 int ret; 6690 6691 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6692 dev = &rte_eth_devices[port_id]; 6693 6694 if (*dev->dev_ops->representor_info_get == NULL) 6695 return -ENOTSUP; 6696 ret = eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info)); 6697 6698 rte_eth_trace_representor_info_get(port_id, info, ret); 6699 6700 return ret; 6701 } 6702 6703 int 6704 rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features) 6705 { 6706 struct rte_eth_dev *dev; 6707 int ret; 6708 6709 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6710 dev = &rte_eth_devices[port_id]; 6711 6712 if (dev->data->dev_configured != 0) { 6713 RTE_ETHDEV_LOG(ERR, 6714 "The port (ID=%"PRIu16") is already configured\n", 6715 port_id); 6716 return -EBUSY; 6717 } 6718 6719 if (features == NULL) { 6720 RTE_ETHDEV_LOG(ERR, "Invalid features (NULL)\n"); 6721 return -EINVAL; 6722 } 6723 6724 if (*dev->dev_ops->rx_metadata_negotiate == NULL) 6725 return -ENOTSUP; 6726 ret = eth_err(port_id, 6727 (*dev->dev_ops->rx_metadata_negotiate)(dev, features)); 6728 6729 rte_eth_trace_rx_metadata_negotiate(port_id, *features, ret); 6730 6731 return ret; 6732 } 6733 6734 int 6735 rte_eth_ip_reassembly_capability_get(uint16_t port_id, 6736 struct rte_eth_ip_reassembly_params *reassembly_capa) 6737 { 6738 struct rte_eth_dev *dev; 6739 int ret; 6740 6741 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6742 dev = &rte_eth_devices[port_id]; 6743 6744 if (dev->data->dev_configured == 0) { 6745 RTE_ETHDEV_LOG(ERR, 6746 "Device with port_id=%u is not configured.\n" 6747 "Cannot get IP reassembly capability\n", 6748 port_id); 6749 return -EINVAL; 6750 } 6751 6752 if (reassembly_capa == NULL) { 6753 RTE_ETHDEV_LOG(ERR, "Cannot get reassembly capability to NULL"); 6754 return -EINVAL; 6755 } 6756 6757 if (*dev->dev_ops->ip_reassembly_capability_get == NULL) 6758 return -ENOTSUP; 6759 memset(reassembly_capa, 0, sizeof(struct rte_eth_ip_reassembly_params)); 6760 6761 ret = eth_err(port_id, (*dev->dev_ops->ip_reassembly_capability_get) 6762 (dev, reassembly_capa)); 6763 6764 rte_eth_trace_ip_reassembly_capability_get(port_id, reassembly_capa, 6765 ret); 6766 6767 return ret; 6768 } 6769 6770 int 6771 rte_eth_ip_reassembly_conf_get(uint16_t port_id, 6772 struct rte_eth_ip_reassembly_params *conf) 6773 { 6774 struct rte_eth_dev *dev; 6775 int ret; 6776 6777 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6778 dev = &rte_eth_devices[port_id]; 6779 6780 if (dev->data->dev_configured == 0) { 6781 RTE_ETHDEV_LOG(ERR, 6782 "Device with port_id=%u is not configured.\n" 6783 "Cannot get IP reassembly configuration\n", 6784 port_id); 6785 return -EINVAL; 6786 } 6787 6788 if (conf == NULL) { 6789 RTE_ETHDEV_LOG(ERR, "Cannot get reassembly info to NULL"); 6790 return -EINVAL; 6791 } 6792 6793 if (*dev->dev_ops->ip_reassembly_conf_get == NULL) 6794 return -ENOTSUP; 6795 memset(conf, 0, sizeof(struct rte_eth_ip_reassembly_params)); 6796 ret = eth_err(port_id, 6797 (*dev->dev_ops->ip_reassembly_conf_get)(dev, conf)); 6798 6799 rte_eth_trace_ip_reassembly_conf_get(port_id, conf, ret); 6800 6801 return ret; 6802 } 6803 6804 int 6805 rte_eth_ip_reassembly_conf_set(uint16_t port_id, 6806 const struct rte_eth_ip_reassembly_params *conf) 6807 { 6808 struct rte_eth_dev *dev; 6809 int ret; 6810 6811 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6812 dev = &rte_eth_devices[port_id]; 6813 6814 if (dev->data->dev_configured == 0) { 6815 RTE_ETHDEV_LOG(ERR, 6816 "Device with port_id=%u is not configured.\n" 6817 "Cannot set IP reassembly configuration", 6818 port_id); 6819 return -EINVAL; 6820 } 6821 6822 if (dev->data->dev_started != 0) { 6823 RTE_ETHDEV_LOG(ERR, 6824 "Device with port_id=%u started,\n" 6825 "cannot configure IP reassembly params.\n", 6826 port_id); 6827 return -EINVAL; 6828 } 6829 6830 if (conf == NULL) { 6831 RTE_ETHDEV_LOG(ERR, 6832 "Invalid IP reassembly configuration (NULL)\n"); 6833 return -EINVAL; 6834 } 6835 6836 if (*dev->dev_ops->ip_reassembly_conf_set == NULL) 6837 return -ENOTSUP; 6838 ret = eth_err(port_id, 6839 (*dev->dev_ops->ip_reassembly_conf_set)(dev, conf)); 6840 6841 rte_eth_trace_ip_reassembly_conf_set(port_id, conf, ret); 6842 6843 return ret; 6844 } 6845 6846 int 6847 rte_eth_dev_priv_dump(uint16_t port_id, FILE *file) 6848 { 6849 struct rte_eth_dev *dev; 6850 6851 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6852 dev = &rte_eth_devices[port_id]; 6853 6854 if (file == NULL) { 6855 RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n"); 6856 return -EINVAL; 6857 } 6858 6859 if (*dev->dev_ops->eth_dev_priv_dump == NULL) 6860 return -ENOTSUP; 6861 return eth_err(port_id, (*dev->dev_ops->eth_dev_priv_dump)(dev, file)); 6862 } 6863 6864 int 6865 rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id, 6866 uint16_t offset, uint16_t num, FILE *file) 6867 { 6868 struct rte_eth_dev *dev; 6869 6870 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6871 dev = &rte_eth_devices[port_id]; 6872 6873 if (queue_id >= dev->data->nb_rx_queues) { 6874 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 6875 return -EINVAL; 6876 } 6877 6878 if (file == NULL) { 6879 RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n"); 6880 return -EINVAL; 6881 } 6882 6883 if (*dev->dev_ops->eth_rx_descriptor_dump == NULL) 6884 return -ENOTSUP; 6885 6886 return eth_err(port_id, (*dev->dev_ops->eth_rx_descriptor_dump)(dev, 6887 queue_id, offset, num, file)); 6888 } 6889 6890 int 6891 rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id, 6892 uint16_t offset, uint16_t num, FILE *file) 6893 { 6894 struct rte_eth_dev *dev; 6895 6896 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6897 dev = &rte_eth_devices[port_id]; 6898 6899 if (queue_id >= dev->data->nb_tx_queues) { 6900 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 6901 return -EINVAL; 6902 } 6903 6904 if (file == NULL) { 6905 RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n"); 6906 return -EINVAL; 6907 } 6908 6909 if (*dev->dev_ops->eth_tx_descriptor_dump == NULL) 6910 return -ENOTSUP; 6911 6912 return eth_err(port_id, (*dev->dev_ops->eth_tx_descriptor_dump)(dev, 6913 queue_id, offset, num, file)); 6914 } 6915 6916 int 6917 rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num) 6918 { 6919 int i, j; 6920 struct rte_eth_dev *dev; 6921 const uint32_t *all_types; 6922 6923 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6924 dev = &rte_eth_devices[port_id]; 6925 6926 if (ptypes == NULL && num > 0) { 6927 RTE_ETHDEV_LOG(ERR, 6928 "Cannot get ethdev port %u supported header protocol types to NULL when array size is non zero\n", 6929 port_id); 6930 return -EINVAL; 6931 } 6932 6933 if (*dev->dev_ops->buffer_split_supported_hdr_ptypes_get == NULL) 6934 return -ENOTSUP; 6935 all_types = (*dev->dev_ops->buffer_split_supported_hdr_ptypes_get)(dev); 6936 6937 if (all_types == NULL) 6938 return 0; 6939 6940 for (i = 0, j = 0; all_types[i] != RTE_PTYPE_UNKNOWN; ++i) { 6941 if (j < num) { 6942 ptypes[j] = all_types[i]; 6943 6944 rte_eth_trace_buffer_split_get_supported_hdr_ptypes( 6945 port_id, j, ptypes[j]); 6946 } 6947 j++; 6948 } 6949 6950 return j; 6951 } 6952 6953 int rte_eth_dev_count_aggr_ports(uint16_t port_id) 6954 { 6955 struct rte_eth_dev *dev; 6956 int ret; 6957 6958 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6959 dev = &rte_eth_devices[port_id]; 6960 6961 if (*dev->dev_ops->count_aggr_ports == NULL) 6962 return 0; 6963 ret = eth_err(port_id, (*dev->dev_ops->count_aggr_ports)(dev)); 6964 6965 rte_eth_trace_count_aggr_ports(port_id, ret); 6966 6967 return ret; 6968 } 6969 6970 int rte_eth_dev_map_aggr_tx_affinity(uint16_t port_id, uint16_t tx_queue_id, 6971 uint8_t affinity) 6972 { 6973 struct rte_eth_dev *dev; 6974 int aggr_ports; 6975 int ret; 6976 6977 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6978 dev = &rte_eth_devices[port_id]; 6979 6980 if (tx_queue_id >= dev->data->nb_tx_queues) { 6981 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id); 6982 return -EINVAL; 6983 } 6984 6985 if (*dev->dev_ops->map_aggr_tx_affinity == NULL) 6986 return -ENOTSUP; 6987 6988 if (dev->data->dev_configured == 0) { 6989 RTE_ETHDEV_LOG(ERR, 6990 "Port %u must be configured before Tx affinity mapping\n", 6991 port_id); 6992 return -EINVAL; 6993 } 6994 6995 if (dev->data->dev_started) { 6996 RTE_ETHDEV_LOG(ERR, 6997 "Port %u must be stopped to allow configuration\n", 6998 port_id); 6999 return -EBUSY; 7000 } 7001 7002 aggr_ports = rte_eth_dev_count_aggr_ports(port_id); 7003 if (aggr_ports == 0) { 7004 RTE_ETHDEV_LOG(ERR, 7005 "Port %u has no aggregated port\n", 7006 port_id); 7007 return -ENOTSUP; 7008 } 7009 7010 if (affinity > aggr_ports) { 7011 RTE_ETHDEV_LOG(ERR, 7012 "Port %u map invalid affinity %u exceeds the maximum number %u\n", 7013 port_id, affinity, aggr_ports); 7014 return -EINVAL; 7015 } 7016 7017 ret = eth_err(port_id, (*dev->dev_ops->map_aggr_tx_affinity)(dev, 7018 tx_queue_id, affinity)); 7019 7020 rte_eth_trace_map_aggr_tx_affinity(port_id, tx_queue_id, affinity, ret); 7021 7022 return ret; 7023 } 7024 7025 RTE_LOG_REGISTER_DEFAULT(rte_eth_dev_logtype, INFO); 7026 7027 RTE_INIT(ethdev_init_telemetry) 7028 { 7029 rte_telemetry_register_cmd("/ethdev/list", eth_dev_handle_port_list, 7030 "Returns list of available ethdev ports. Takes no parameters"); 7031 rte_telemetry_register_cmd("/ethdev/stats", eth_dev_handle_port_stats, 7032 "Returns the common stats for a port. Parameters: int port_id"); 7033 rte_telemetry_register_cmd("/ethdev/xstats", eth_dev_handle_port_xstats, 7034 "Returns the extended stats for a port. Parameters: int port_id,hide_zero=true|false(Optional for indicates hide zero xstats)"); 7035 #ifndef RTE_EXEC_ENV_WINDOWS 7036 rte_telemetry_register_cmd("/ethdev/dump_priv", eth_dev_handle_port_dump_priv, 7037 "Returns dump private information for a port. Parameters: int port_id"); 7038 #endif 7039 rte_telemetry_register_cmd("/ethdev/link_status", 7040 eth_dev_handle_port_link_status, 7041 "Returns the link status for a port. Parameters: int port_id"); 7042 rte_telemetry_register_cmd("/ethdev/info", eth_dev_handle_port_info, 7043 "Returns the device info for a port. Parameters: int port_id"); 7044 rte_telemetry_register_cmd("/ethdev/module_eeprom", eth_dev_handle_port_module_eeprom, 7045 "Returns module EEPROM info with SFF specs. Parameters: int port_id"); 7046 } 7047