1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <ctype.h> 6 #include <errno.h> 7 #include <inttypes.h> 8 #include <stdbool.h> 9 #include <stdint.h> 10 #include <stdio.h> 11 #include <stdlib.h> 12 #include <string.h> 13 #include <sys/queue.h> 14 15 #include <bus_driver.h> 16 #include <rte_log.h> 17 #include <rte_interrupts.h> 18 #include <rte_kvargs.h> 19 #include <rte_memcpy.h> 20 #include <rte_common.h> 21 #include <rte_mempool.h> 22 #include <rte_malloc.h> 23 #include <rte_mbuf.h> 24 #include <rte_errno.h> 25 #include <rte_spinlock.h> 26 #include <rte_string_fns.h> 27 #include <rte_class.h> 28 #include <rte_ether.h> 29 #include <rte_telemetry.h> 30 31 #include "rte_ethdev.h" 32 #include "rte_ethdev_trace_fp.h" 33 #include "ethdev_driver.h" 34 #include "ethdev_profile.h" 35 #include "ethdev_private.h" 36 #include "ethdev_trace.h" 37 #include "sff_telemetry.h" 38 39 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS]; 40 41 /* public fast-path API */ 42 struct rte_eth_fp_ops rte_eth_fp_ops[RTE_MAX_ETHPORTS]; 43 44 /* spinlock for add/remove Rx callbacks */ 45 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER; 46 47 /* spinlock for add/remove Tx callbacks */ 48 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER; 49 50 /* store statistics names and its offset in stats structure */ 51 struct rte_eth_xstats_name_off { 52 char name[RTE_ETH_XSTATS_NAME_SIZE]; 53 unsigned offset; 54 }; 55 56 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = { 57 {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)}, 58 {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)}, 59 {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)}, 60 {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)}, 61 {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)}, 62 {"rx_errors", offsetof(struct rte_eth_stats, ierrors)}, 63 {"tx_errors", offsetof(struct rte_eth_stats, oerrors)}, 64 {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats, 65 rx_nombuf)}, 66 }; 67 68 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings) 69 70 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = { 71 {"packets", offsetof(struct rte_eth_stats, q_ipackets)}, 72 {"bytes", offsetof(struct rte_eth_stats, q_ibytes)}, 73 {"errors", offsetof(struct rte_eth_stats, q_errors)}, 74 }; 75 76 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings) 77 78 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = { 79 {"packets", offsetof(struct rte_eth_stats, q_opackets)}, 80 {"bytes", offsetof(struct rte_eth_stats, q_obytes)}, 81 }; 82 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings) 83 84 #define RTE_RX_OFFLOAD_BIT2STR(_name) \ 85 { RTE_ETH_RX_OFFLOAD_##_name, #_name } 86 87 static const struct { 88 uint64_t offload; 89 const char *name; 90 } eth_dev_rx_offload_names[] = { 91 RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP), 92 RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM), 93 RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM), 94 RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM), 95 RTE_RX_OFFLOAD_BIT2STR(TCP_LRO), 96 RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP), 97 RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 98 RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP), 99 RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER), 100 RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND), 101 RTE_RX_OFFLOAD_BIT2STR(SCATTER), 102 RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP), 103 RTE_RX_OFFLOAD_BIT2STR(SECURITY), 104 RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC), 105 RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM), 106 RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 107 RTE_RX_OFFLOAD_BIT2STR(RSS_HASH), 108 RTE_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT), 109 }; 110 111 #undef RTE_RX_OFFLOAD_BIT2STR 112 #undef RTE_ETH_RX_OFFLOAD_BIT2STR 113 114 #define RTE_TX_OFFLOAD_BIT2STR(_name) \ 115 { RTE_ETH_TX_OFFLOAD_##_name, #_name } 116 117 static const struct { 118 uint64_t offload; 119 const char *name; 120 } eth_dev_tx_offload_names[] = { 121 RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT), 122 RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM), 123 RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM), 124 RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM), 125 RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM), 126 RTE_TX_OFFLOAD_BIT2STR(TCP_TSO), 127 RTE_TX_OFFLOAD_BIT2STR(UDP_TSO), 128 RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 129 RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT), 130 RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO), 131 RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO), 132 RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO), 133 RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO), 134 RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT), 135 RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE), 136 RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS), 137 RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE), 138 RTE_TX_OFFLOAD_BIT2STR(SECURITY), 139 RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO), 140 RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO), 141 RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 142 RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP), 143 }; 144 145 #undef RTE_TX_OFFLOAD_BIT2STR 146 147 static const struct { 148 uint64_t offload; 149 const char *name; 150 } rte_eth_dev_capa_names[] = { 151 {RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP, "RUNTIME_RX_QUEUE_SETUP"}, 152 {RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP, "RUNTIME_TX_QUEUE_SETUP"}, 153 {RTE_ETH_DEV_CAPA_RXQ_SHARE, "RXQ_SHARE"}, 154 {RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP, "FLOW_RULE_KEEP"}, 155 {RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP, "FLOW_SHARED_OBJECT_KEEP"}, 156 }; 157 158 enum { 159 STAT_QMAP_TX = 0, 160 STAT_QMAP_RX 161 }; 162 163 int 164 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str) 165 { 166 int ret; 167 struct rte_devargs devargs; 168 const char *bus_param_key; 169 char *bus_str = NULL; 170 char *cls_str = NULL; 171 int str_size; 172 173 if (iter == NULL) { 174 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL iterator\n"); 175 return -EINVAL; 176 } 177 178 if (devargs_str == NULL) { 179 RTE_ETHDEV_LOG(ERR, 180 "Cannot initialize iterator from NULL device description string\n"); 181 return -EINVAL; 182 } 183 184 memset(iter, 0, sizeof(*iter)); 185 memset(&devargs, 0, sizeof(devargs)); 186 187 /* 188 * The devargs string may use various syntaxes: 189 * - 0000:08:00.0,representor=[1-3] 190 * - pci:0000:06:00.0,representor=[0,5] 191 * - class=eth,mac=00:11:22:33:44:55 192 * - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z 193 */ 194 195 /* 196 * Handle pure class filter (i.e. without any bus-level argument), 197 * from future new syntax. 198 * rte_devargs_parse() is not yet supporting the new syntax, 199 * that's why this simple case is temporarily parsed here. 200 */ 201 #define iter_anybus_str "class=eth," 202 if (strncmp(devargs_str, iter_anybus_str, 203 strlen(iter_anybus_str)) == 0) { 204 iter->cls_str = devargs_str + strlen(iter_anybus_str); 205 goto end; 206 } 207 208 /* Split bus, device and parameters. */ 209 ret = rte_devargs_parse(&devargs, devargs_str); 210 if (ret != 0) 211 goto error; 212 213 /* 214 * Assume parameters of old syntax can match only at ethdev level. 215 * Extra parameters will be ignored, thanks to "+" prefix. 216 */ 217 str_size = strlen(devargs.args) + 2; 218 cls_str = malloc(str_size); 219 if (cls_str == NULL) { 220 ret = -ENOMEM; 221 goto error; 222 } 223 ret = snprintf(cls_str, str_size, "+%s", devargs.args); 224 if (ret != str_size - 1) { 225 ret = -EINVAL; 226 goto error; 227 } 228 iter->cls_str = cls_str; 229 230 iter->bus = devargs.bus; 231 if (iter->bus->dev_iterate == NULL) { 232 ret = -ENOTSUP; 233 goto error; 234 } 235 236 /* Convert bus args to new syntax for use with new API dev_iterate. */ 237 if ((strcmp(iter->bus->name, "vdev") == 0) || 238 (strcmp(iter->bus->name, "fslmc") == 0) || 239 (strcmp(iter->bus->name, "dpaa_bus") == 0)) { 240 bus_param_key = "name"; 241 } else if (strcmp(iter->bus->name, "pci") == 0) { 242 bus_param_key = "addr"; 243 } else { 244 ret = -ENOTSUP; 245 goto error; 246 } 247 str_size = strlen(bus_param_key) + strlen(devargs.name) + 2; 248 bus_str = malloc(str_size); 249 if (bus_str == NULL) { 250 ret = -ENOMEM; 251 goto error; 252 } 253 ret = snprintf(bus_str, str_size, "%s=%s", 254 bus_param_key, devargs.name); 255 if (ret != str_size - 1) { 256 ret = -EINVAL; 257 goto error; 258 } 259 iter->bus_str = bus_str; 260 261 end: 262 iter->cls = rte_class_find_by_name("eth"); 263 rte_devargs_reset(&devargs); 264 265 rte_eth_trace_iterator_init(devargs_str); 266 267 return 0; 268 269 error: 270 if (ret == -ENOTSUP) 271 RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n", 272 iter->bus->name); 273 rte_devargs_reset(&devargs); 274 free(bus_str); 275 free(cls_str); 276 return ret; 277 } 278 279 uint16_t 280 rte_eth_iterator_next(struct rte_dev_iterator *iter) 281 { 282 if (iter == NULL) { 283 RTE_ETHDEV_LOG(ERR, 284 "Cannot get next device from NULL iterator\n"); 285 return RTE_MAX_ETHPORTS; 286 } 287 288 if (iter->cls == NULL) /* invalid ethdev iterator */ 289 return RTE_MAX_ETHPORTS; 290 291 do { /* loop to try all matching rte_device */ 292 /* If not pure ethdev filter and */ 293 if (iter->bus != NULL && 294 /* not in middle of rte_eth_dev iteration, */ 295 iter->class_device == NULL) { 296 /* get next rte_device to try. */ 297 iter->device = iter->bus->dev_iterate( 298 iter->device, iter->bus_str, iter); 299 if (iter->device == NULL) 300 break; /* no more rte_device candidate */ 301 } 302 /* A device is matching bus part, need to check ethdev part. */ 303 iter->class_device = iter->cls->dev_iterate( 304 iter->class_device, iter->cls_str, iter); 305 if (iter->class_device != NULL) { 306 uint16_t id = eth_dev_to_id(iter->class_device); 307 308 rte_eth_trace_iterator_next(iter, id); 309 310 return id; /* match */ 311 } 312 } while (iter->bus != NULL); /* need to try next rte_device */ 313 314 /* No more ethdev port to iterate. */ 315 rte_eth_iterator_cleanup(iter); 316 return RTE_MAX_ETHPORTS; 317 } 318 319 void 320 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter) 321 { 322 if (iter == NULL) { 323 RTE_ETHDEV_LOG(ERR, "Cannot do clean up from NULL iterator\n"); 324 return; 325 } 326 327 if (iter->bus_str == NULL) 328 return; /* nothing to free in pure class filter */ 329 330 rte_eth_trace_iterator_cleanup(iter); 331 332 free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */ 333 free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */ 334 memset(iter, 0, sizeof(*iter)); 335 } 336 337 uint16_t 338 rte_eth_find_next(uint16_t port_id) 339 { 340 while (port_id < RTE_MAX_ETHPORTS && 341 rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED) 342 port_id++; 343 344 if (port_id >= RTE_MAX_ETHPORTS) 345 return RTE_MAX_ETHPORTS; 346 347 rte_eth_trace_find_next(port_id); 348 349 return port_id; 350 } 351 352 /* 353 * Macro to iterate over all valid ports for internal usage. 354 * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports. 355 */ 356 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \ 357 for (port_id = rte_eth_find_next(0); \ 358 port_id < RTE_MAX_ETHPORTS; \ 359 port_id = rte_eth_find_next(port_id + 1)) 360 361 uint16_t 362 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent) 363 { 364 port_id = rte_eth_find_next(port_id); 365 while (port_id < RTE_MAX_ETHPORTS && 366 rte_eth_devices[port_id].device != parent) 367 port_id = rte_eth_find_next(port_id + 1); 368 369 rte_eth_trace_find_next_of(port_id, parent); 370 371 return port_id; 372 } 373 374 uint16_t 375 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id) 376 { 377 uint16_t ret; 378 379 RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS); 380 ret = rte_eth_find_next_of(port_id, 381 rte_eth_devices[ref_port_id].device); 382 383 rte_eth_trace_find_next_sibling(port_id, ref_port_id, ret); 384 385 return ret; 386 } 387 388 static bool 389 eth_dev_is_allocated(const struct rte_eth_dev *ethdev) 390 { 391 return ethdev->data->name[0] != '\0'; 392 } 393 394 int 395 rte_eth_dev_is_valid_port(uint16_t port_id) 396 { 397 int is_valid; 398 399 if (port_id >= RTE_MAX_ETHPORTS || 400 (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)) 401 is_valid = 0; 402 else 403 is_valid = 1; 404 405 rte_ethdev_trace_is_valid_port(port_id, is_valid); 406 407 return is_valid; 408 } 409 410 static int 411 eth_is_valid_owner_id(uint64_t owner_id) 412 { 413 if (owner_id == RTE_ETH_DEV_NO_OWNER || 414 eth_dev_shared_data->next_owner_id <= owner_id) 415 return 0; 416 return 1; 417 } 418 419 uint64_t 420 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id) 421 { 422 port_id = rte_eth_find_next(port_id); 423 while (port_id < RTE_MAX_ETHPORTS && 424 rte_eth_devices[port_id].data->owner.id != owner_id) 425 port_id = rte_eth_find_next(port_id + 1); 426 427 rte_eth_trace_find_next_owned_by(port_id, owner_id); 428 429 return port_id; 430 } 431 432 int 433 rte_eth_dev_owner_new(uint64_t *owner_id) 434 { 435 if (owner_id == NULL) { 436 RTE_ETHDEV_LOG(ERR, "Cannot get new owner ID to NULL\n"); 437 return -EINVAL; 438 } 439 440 eth_dev_shared_data_prepare(); 441 442 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 443 444 *owner_id = eth_dev_shared_data->next_owner_id++; 445 446 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 447 448 rte_ethdev_trace_owner_new(*owner_id); 449 450 return 0; 451 } 452 453 static int 454 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id, 455 const struct rte_eth_dev_owner *new_owner) 456 { 457 struct rte_eth_dev *ethdev = &rte_eth_devices[port_id]; 458 struct rte_eth_dev_owner *port_owner; 459 460 if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) { 461 RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n", 462 port_id); 463 return -ENODEV; 464 } 465 466 if (new_owner == NULL) { 467 RTE_ETHDEV_LOG(ERR, 468 "Cannot set ethdev port %u owner from NULL owner\n", 469 port_id); 470 return -EINVAL; 471 } 472 473 if (!eth_is_valid_owner_id(new_owner->id) && 474 !eth_is_valid_owner_id(old_owner_id)) { 475 RTE_ETHDEV_LOG(ERR, 476 "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n", 477 old_owner_id, new_owner->id); 478 return -EINVAL; 479 } 480 481 port_owner = &rte_eth_devices[port_id].data->owner; 482 if (port_owner->id != old_owner_id) { 483 RTE_ETHDEV_LOG(ERR, 484 "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n", 485 port_id, port_owner->name, port_owner->id); 486 return -EPERM; 487 } 488 489 /* can not truncate (same structure) */ 490 strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN); 491 492 port_owner->id = new_owner->id; 493 494 RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n", 495 port_id, new_owner->name, new_owner->id); 496 497 return 0; 498 } 499 500 int 501 rte_eth_dev_owner_set(const uint16_t port_id, 502 const struct rte_eth_dev_owner *owner) 503 { 504 int ret; 505 506 eth_dev_shared_data_prepare(); 507 508 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 509 510 ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner); 511 512 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 513 514 rte_ethdev_trace_owner_set(port_id, owner, ret); 515 516 return ret; 517 } 518 519 int 520 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id) 521 { 522 const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner) 523 {.id = RTE_ETH_DEV_NO_OWNER, .name = ""}; 524 int ret; 525 526 eth_dev_shared_data_prepare(); 527 528 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 529 530 ret = eth_dev_owner_set(port_id, owner_id, &new_owner); 531 532 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 533 534 rte_ethdev_trace_owner_unset(port_id, owner_id, ret); 535 536 return ret; 537 } 538 539 int 540 rte_eth_dev_owner_delete(const uint64_t owner_id) 541 { 542 uint16_t port_id; 543 int ret = 0; 544 545 eth_dev_shared_data_prepare(); 546 547 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 548 549 if (eth_is_valid_owner_id(owner_id)) { 550 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) { 551 struct rte_eth_dev_data *data = 552 rte_eth_devices[port_id].data; 553 if (data != NULL && data->owner.id == owner_id) 554 memset(&data->owner, 0, 555 sizeof(struct rte_eth_dev_owner)); 556 } 557 RTE_ETHDEV_LOG(NOTICE, 558 "All port owners owned by %016"PRIx64" identifier have removed\n", 559 owner_id); 560 } else { 561 RTE_ETHDEV_LOG(ERR, 562 "Invalid owner ID=%016"PRIx64"\n", 563 owner_id); 564 ret = -EINVAL; 565 } 566 567 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 568 569 rte_ethdev_trace_owner_delete(owner_id, ret); 570 571 return ret; 572 } 573 574 int 575 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner) 576 { 577 struct rte_eth_dev *ethdev; 578 579 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 580 ethdev = &rte_eth_devices[port_id]; 581 582 if (!eth_dev_is_allocated(ethdev)) { 583 RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n", 584 port_id); 585 return -ENODEV; 586 } 587 588 if (owner == NULL) { 589 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u owner to NULL\n", 590 port_id); 591 return -EINVAL; 592 } 593 594 eth_dev_shared_data_prepare(); 595 596 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 597 rte_memcpy(owner, ðdev->data->owner, sizeof(*owner)); 598 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 599 600 rte_ethdev_trace_owner_get(port_id, owner); 601 602 return 0; 603 } 604 605 int 606 rte_eth_dev_socket_id(uint16_t port_id) 607 { 608 int socket_id = SOCKET_ID_ANY; 609 610 if (!rte_eth_dev_is_valid_port(port_id)) { 611 rte_errno = EINVAL; 612 } else { 613 socket_id = rte_eth_devices[port_id].data->numa_node; 614 if (socket_id == SOCKET_ID_ANY) 615 rte_errno = 0; 616 } 617 618 rte_ethdev_trace_socket_id(port_id, socket_id); 619 620 return socket_id; 621 } 622 623 void * 624 rte_eth_dev_get_sec_ctx(uint16_t port_id) 625 { 626 void *ctx; 627 628 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL); 629 ctx = rte_eth_devices[port_id].security_ctx; 630 631 rte_ethdev_trace_get_sec_ctx(port_id, ctx); 632 633 return ctx; 634 } 635 636 uint16_t 637 rte_eth_dev_count_avail(void) 638 { 639 uint16_t p; 640 uint16_t count; 641 642 count = 0; 643 644 RTE_ETH_FOREACH_DEV(p) 645 count++; 646 647 rte_ethdev_trace_count_avail(count); 648 649 return count; 650 } 651 652 uint16_t 653 rte_eth_dev_count_total(void) 654 { 655 uint16_t port, count = 0; 656 657 RTE_ETH_FOREACH_VALID_DEV(port) 658 count++; 659 660 rte_ethdev_trace_count_total(count); 661 662 return count; 663 } 664 665 int 666 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name) 667 { 668 char *tmp; 669 670 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 671 672 if (name == NULL) { 673 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u name to NULL\n", 674 port_id); 675 return -EINVAL; 676 } 677 678 /* shouldn't check 'rte_eth_devices[i].data', 679 * because it might be overwritten by VDEV PMD */ 680 tmp = eth_dev_shared_data->data[port_id].name; 681 strcpy(name, tmp); 682 683 rte_ethdev_trace_get_name_by_port(port_id, name); 684 685 return 0; 686 } 687 688 int 689 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id) 690 { 691 uint16_t pid; 692 693 if (name == NULL) { 694 RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name"); 695 return -EINVAL; 696 } 697 698 if (port_id == NULL) { 699 RTE_ETHDEV_LOG(ERR, 700 "Cannot get port ID to NULL for %s\n", name); 701 return -EINVAL; 702 } 703 704 RTE_ETH_FOREACH_VALID_DEV(pid) 705 if (!strcmp(name, eth_dev_shared_data->data[pid].name)) { 706 *port_id = pid; 707 708 rte_ethdev_trace_get_port_by_name(name, *port_id); 709 710 return 0; 711 } 712 713 return -ENODEV; 714 } 715 716 int 717 eth_err(uint16_t port_id, int ret) 718 { 719 if (ret == 0) 720 return 0; 721 if (rte_eth_dev_is_removed(port_id)) 722 return -EIO; 723 return ret; 724 } 725 726 static int 727 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id) 728 { 729 uint16_t port_id; 730 731 if (rx_queue_id >= dev->data->nb_rx_queues) { 732 port_id = dev->data->port_id; 733 RTE_ETHDEV_LOG(ERR, 734 "Invalid Rx queue_id=%u of device with port_id=%u\n", 735 rx_queue_id, port_id); 736 return -EINVAL; 737 } 738 739 if (dev->data->rx_queues[rx_queue_id] == NULL) { 740 port_id = dev->data->port_id; 741 RTE_ETHDEV_LOG(ERR, 742 "Queue %u of device with port_id=%u has not been setup\n", 743 rx_queue_id, port_id); 744 return -EINVAL; 745 } 746 747 return 0; 748 } 749 750 static int 751 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id) 752 { 753 uint16_t port_id; 754 755 if (tx_queue_id >= dev->data->nb_tx_queues) { 756 port_id = dev->data->port_id; 757 RTE_ETHDEV_LOG(ERR, 758 "Invalid Tx queue_id=%u of device with port_id=%u\n", 759 tx_queue_id, port_id); 760 return -EINVAL; 761 } 762 763 if (dev->data->tx_queues[tx_queue_id] == NULL) { 764 port_id = dev->data->port_id; 765 RTE_ETHDEV_LOG(ERR, 766 "Queue %u of device with port_id=%u has not been setup\n", 767 tx_queue_id, port_id); 768 return -EINVAL; 769 } 770 771 return 0; 772 } 773 774 int 775 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id) 776 { 777 struct rte_eth_dev *dev; 778 int ret; 779 780 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 781 dev = &rte_eth_devices[port_id]; 782 783 if (!dev->data->dev_started) { 784 RTE_ETHDEV_LOG(ERR, 785 "Port %u must be started before start any queue\n", 786 port_id); 787 return -EINVAL; 788 } 789 790 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 791 if (ret != 0) 792 return ret; 793 794 if (*dev->dev_ops->rx_queue_start == NULL) 795 return -ENOTSUP; 796 797 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 798 RTE_ETHDEV_LOG(INFO, 799 "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 800 rx_queue_id, port_id); 801 return -EINVAL; 802 } 803 804 if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 805 RTE_ETHDEV_LOG(INFO, 806 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 807 rx_queue_id, port_id); 808 return 0; 809 } 810 811 ret = eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id)); 812 813 rte_ethdev_trace_rx_queue_start(port_id, rx_queue_id, ret); 814 815 return ret; 816 } 817 818 int 819 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id) 820 { 821 struct rte_eth_dev *dev; 822 int ret; 823 824 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 825 dev = &rte_eth_devices[port_id]; 826 827 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 828 if (ret != 0) 829 return ret; 830 831 if (*dev->dev_ops->rx_queue_stop == NULL) 832 return -ENOTSUP; 833 834 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 835 RTE_ETHDEV_LOG(INFO, 836 "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 837 rx_queue_id, port_id); 838 return -EINVAL; 839 } 840 841 if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 842 RTE_ETHDEV_LOG(INFO, 843 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 844 rx_queue_id, port_id); 845 return 0; 846 } 847 848 ret = eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id)); 849 850 rte_ethdev_trace_rx_queue_stop(port_id, rx_queue_id, ret); 851 852 return ret; 853 } 854 855 int 856 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id) 857 { 858 struct rte_eth_dev *dev; 859 int ret; 860 861 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 862 dev = &rte_eth_devices[port_id]; 863 864 if (!dev->data->dev_started) { 865 RTE_ETHDEV_LOG(ERR, 866 "Port %u must be started before start any queue\n", 867 port_id); 868 return -EINVAL; 869 } 870 871 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 872 if (ret != 0) 873 return ret; 874 875 if (*dev->dev_ops->tx_queue_start == NULL) 876 return -ENOTSUP; 877 878 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 879 RTE_ETHDEV_LOG(INFO, 880 "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 881 tx_queue_id, port_id); 882 return -EINVAL; 883 } 884 885 if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 886 RTE_ETHDEV_LOG(INFO, 887 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 888 tx_queue_id, port_id); 889 return 0; 890 } 891 892 ret = eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id)); 893 894 rte_ethdev_trace_tx_queue_start(port_id, tx_queue_id, ret); 895 896 return ret; 897 } 898 899 int 900 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id) 901 { 902 struct rte_eth_dev *dev; 903 int ret; 904 905 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 906 dev = &rte_eth_devices[port_id]; 907 908 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 909 if (ret != 0) 910 return ret; 911 912 if (*dev->dev_ops->tx_queue_stop == NULL) 913 return -ENOTSUP; 914 915 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 916 RTE_ETHDEV_LOG(INFO, 917 "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 918 tx_queue_id, port_id); 919 return -EINVAL; 920 } 921 922 if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 923 RTE_ETHDEV_LOG(INFO, 924 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 925 tx_queue_id, port_id); 926 return 0; 927 } 928 929 ret = eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id)); 930 931 rte_ethdev_trace_tx_queue_stop(port_id, tx_queue_id, ret); 932 933 return ret; 934 } 935 936 uint32_t 937 rte_eth_speed_bitflag(uint32_t speed, int duplex) 938 { 939 uint32_t ret; 940 941 switch (speed) { 942 case RTE_ETH_SPEED_NUM_10M: 943 ret = duplex ? RTE_ETH_LINK_SPEED_10M : RTE_ETH_LINK_SPEED_10M_HD; 944 break; 945 case RTE_ETH_SPEED_NUM_100M: 946 ret = duplex ? RTE_ETH_LINK_SPEED_100M : RTE_ETH_LINK_SPEED_100M_HD; 947 break; 948 case RTE_ETH_SPEED_NUM_1G: 949 ret = RTE_ETH_LINK_SPEED_1G; 950 break; 951 case RTE_ETH_SPEED_NUM_2_5G: 952 ret = RTE_ETH_LINK_SPEED_2_5G; 953 break; 954 case RTE_ETH_SPEED_NUM_5G: 955 ret = RTE_ETH_LINK_SPEED_5G; 956 break; 957 case RTE_ETH_SPEED_NUM_10G: 958 ret = RTE_ETH_LINK_SPEED_10G; 959 break; 960 case RTE_ETH_SPEED_NUM_20G: 961 ret = RTE_ETH_LINK_SPEED_20G; 962 break; 963 case RTE_ETH_SPEED_NUM_25G: 964 ret = RTE_ETH_LINK_SPEED_25G; 965 break; 966 case RTE_ETH_SPEED_NUM_40G: 967 ret = RTE_ETH_LINK_SPEED_40G; 968 break; 969 case RTE_ETH_SPEED_NUM_50G: 970 ret = RTE_ETH_LINK_SPEED_50G; 971 break; 972 case RTE_ETH_SPEED_NUM_56G: 973 ret = RTE_ETH_LINK_SPEED_56G; 974 break; 975 case RTE_ETH_SPEED_NUM_100G: 976 ret = RTE_ETH_LINK_SPEED_100G; 977 break; 978 case RTE_ETH_SPEED_NUM_200G: 979 ret = RTE_ETH_LINK_SPEED_200G; 980 break; 981 default: 982 ret = 0; 983 } 984 985 rte_eth_trace_speed_bitflag(speed, duplex, ret); 986 987 return ret; 988 } 989 990 const char * 991 rte_eth_dev_rx_offload_name(uint64_t offload) 992 { 993 const char *name = "UNKNOWN"; 994 unsigned int i; 995 996 for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) { 997 if (offload == eth_dev_rx_offload_names[i].offload) { 998 name = eth_dev_rx_offload_names[i].name; 999 break; 1000 } 1001 } 1002 1003 rte_ethdev_trace_rx_offload_name(offload, name); 1004 1005 return name; 1006 } 1007 1008 const char * 1009 rte_eth_dev_tx_offload_name(uint64_t offload) 1010 { 1011 const char *name = "UNKNOWN"; 1012 unsigned int i; 1013 1014 for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) { 1015 if (offload == eth_dev_tx_offload_names[i].offload) { 1016 name = eth_dev_tx_offload_names[i].name; 1017 break; 1018 } 1019 } 1020 1021 rte_ethdev_trace_tx_offload_name(offload, name); 1022 1023 return name; 1024 } 1025 1026 const char * 1027 rte_eth_dev_capability_name(uint64_t capability) 1028 { 1029 const char *name = "UNKNOWN"; 1030 unsigned int i; 1031 1032 for (i = 0; i < RTE_DIM(rte_eth_dev_capa_names); ++i) { 1033 if (capability == rte_eth_dev_capa_names[i].offload) { 1034 name = rte_eth_dev_capa_names[i].name; 1035 break; 1036 } 1037 } 1038 1039 rte_ethdev_trace_capability_name(capability, name); 1040 1041 return name; 1042 } 1043 1044 static inline int 1045 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size, 1046 uint32_t max_rx_pkt_len, uint32_t dev_info_size) 1047 { 1048 int ret = 0; 1049 1050 if (dev_info_size == 0) { 1051 if (config_size != max_rx_pkt_len) { 1052 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size" 1053 " %u != %u is not allowed\n", 1054 port_id, config_size, max_rx_pkt_len); 1055 ret = -EINVAL; 1056 } 1057 } else if (config_size > dev_info_size) { 1058 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 1059 "> max allowed value %u\n", port_id, config_size, 1060 dev_info_size); 1061 ret = -EINVAL; 1062 } else if (config_size < RTE_ETHER_MIN_LEN) { 1063 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 1064 "< min allowed value %u\n", port_id, config_size, 1065 (unsigned int)RTE_ETHER_MIN_LEN); 1066 ret = -EINVAL; 1067 } 1068 return ret; 1069 } 1070 1071 /* 1072 * Validate offloads that are requested through rte_eth_dev_configure against 1073 * the offloads successfully set by the Ethernet device. 1074 * 1075 * @param port_id 1076 * The port identifier of the Ethernet device. 1077 * @param req_offloads 1078 * The offloads that have been requested through `rte_eth_dev_configure`. 1079 * @param set_offloads 1080 * The offloads successfully set by the Ethernet device. 1081 * @param offload_type 1082 * The offload type i.e. Rx/Tx string. 1083 * @param offload_name 1084 * The function that prints the offload name. 1085 * @return 1086 * - (0) if validation successful. 1087 * - (-EINVAL) if requested offload has been silently disabled. 1088 * 1089 */ 1090 static int 1091 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads, 1092 uint64_t set_offloads, const char *offload_type, 1093 const char *(*offload_name)(uint64_t)) 1094 { 1095 uint64_t offloads_diff = req_offloads ^ set_offloads; 1096 uint64_t offload; 1097 int ret = 0; 1098 1099 while (offloads_diff != 0) { 1100 /* Check if any offload is requested but not enabled. */ 1101 offload = RTE_BIT64(__builtin_ctzll(offloads_diff)); 1102 if (offload & req_offloads) { 1103 RTE_ETHDEV_LOG(ERR, 1104 "Port %u failed to enable %s offload %s\n", 1105 port_id, offload_type, offload_name(offload)); 1106 ret = -EINVAL; 1107 } 1108 1109 /* Check if offload couldn't be disabled. */ 1110 if (offload & set_offloads) { 1111 RTE_ETHDEV_LOG(DEBUG, 1112 "Port %u %s offload %s is not requested but enabled\n", 1113 port_id, offload_type, offload_name(offload)); 1114 } 1115 1116 offloads_diff &= ~offload; 1117 } 1118 1119 return ret; 1120 } 1121 1122 static uint32_t 1123 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu) 1124 { 1125 uint32_t overhead_len; 1126 1127 if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu) 1128 overhead_len = max_rx_pktlen - max_mtu; 1129 else 1130 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 1131 1132 return overhead_len; 1133 } 1134 1135 /* rte_eth_dev_info_get() should be called prior to this function */ 1136 static int 1137 eth_dev_validate_mtu(uint16_t port_id, struct rte_eth_dev_info *dev_info, 1138 uint16_t mtu) 1139 { 1140 uint32_t overhead_len; 1141 uint32_t frame_size; 1142 1143 if (mtu < dev_info->min_mtu) { 1144 RTE_ETHDEV_LOG(ERR, 1145 "MTU (%u) < device min MTU (%u) for port_id %u\n", 1146 mtu, dev_info->min_mtu, port_id); 1147 return -EINVAL; 1148 } 1149 if (mtu > dev_info->max_mtu) { 1150 RTE_ETHDEV_LOG(ERR, 1151 "MTU (%u) > device max MTU (%u) for port_id %u\n", 1152 mtu, dev_info->max_mtu, port_id); 1153 return -EINVAL; 1154 } 1155 1156 overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen, 1157 dev_info->max_mtu); 1158 frame_size = mtu + overhead_len; 1159 if (frame_size < RTE_ETHER_MIN_LEN) { 1160 RTE_ETHDEV_LOG(ERR, 1161 "Frame size (%u) < min frame size (%u) for port_id %u\n", 1162 frame_size, RTE_ETHER_MIN_LEN, port_id); 1163 return -EINVAL; 1164 } 1165 1166 if (frame_size > dev_info->max_rx_pktlen) { 1167 RTE_ETHDEV_LOG(ERR, 1168 "Frame size (%u) > device max frame size (%u) for port_id %u\n", 1169 frame_size, dev_info->max_rx_pktlen, port_id); 1170 return -EINVAL; 1171 } 1172 1173 return 0; 1174 } 1175 1176 int 1177 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, 1178 const struct rte_eth_conf *dev_conf) 1179 { 1180 struct rte_eth_dev *dev; 1181 struct rte_eth_dev_info dev_info; 1182 struct rte_eth_conf orig_conf; 1183 int diag; 1184 int ret; 1185 uint16_t old_mtu; 1186 1187 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1188 dev = &rte_eth_devices[port_id]; 1189 1190 if (dev_conf == NULL) { 1191 RTE_ETHDEV_LOG(ERR, 1192 "Cannot configure ethdev port %u from NULL config\n", 1193 port_id); 1194 return -EINVAL; 1195 } 1196 1197 if (*dev->dev_ops->dev_configure == NULL) 1198 return -ENOTSUP; 1199 1200 if (dev->data->dev_started) { 1201 RTE_ETHDEV_LOG(ERR, 1202 "Port %u must be stopped to allow configuration\n", 1203 port_id); 1204 return -EBUSY; 1205 } 1206 1207 /* 1208 * Ensure that "dev_configured" is always 0 each time prepare to do 1209 * dev_configure() to avoid any non-anticipated behaviour. 1210 * And set to 1 when dev_configure() is executed successfully. 1211 */ 1212 dev->data->dev_configured = 0; 1213 1214 /* Store original config, as rollback required on failure */ 1215 memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf)); 1216 1217 /* 1218 * Copy the dev_conf parameter into the dev structure. 1219 * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get 1220 */ 1221 if (dev_conf != &dev->data->dev_conf) 1222 memcpy(&dev->data->dev_conf, dev_conf, 1223 sizeof(dev->data->dev_conf)); 1224 1225 /* Backup mtu for rollback */ 1226 old_mtu = dev->data->mtu; 1227 1228 ret = rte_eth_dev_info_get(port_id, &dev_info); 1229 if (ret != 0) 1230 goto rollback; 1231 1232 /* If number of queues specified by application for both Rx and Tx is 1233 * zero, use driver preferred values. This cannot be done individually 1234 * as it is valid for either Tx or Rx (but not both) to be zero. 1235 * If driver does not provide any preferred valued, fall back on 1236 * EAL defaults. 1237 */ 1238 if (nb_rx_q == 0 && nb_tx_q == 0) { 1239 nb_rx_q = dev_info.default_rxportconf.nb_queues; 1240 if (nb_rx_q == 0) 1241 nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES; 1242 nb_tx_q = dev_info.default_txportconf.nb_queues; 1243 if (nb_tx_q == 0) 1244 nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES; 1245 } 1246 1247 if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) { 1248 RTE_ETHDEV_LOG(ERR, 1249 "Number of Rx queues requested (%u) is greater than max supported(%d)\n", 1250 nb_rx_q, RTE_MAX_QUEUES_PER_PORT); 1251 ret = -EINVAL; 1252 goto rollback; 1253 } 1254 1255 if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) { 1256 RTE_ETHDEV_LOG(ERR, 1257 "Number of Tx queues requested (%u) is greater than max supported(%d)\n", 1258 nb_tx_q, RTE_MAX_QUEUES_PER_PORT); 1259 ret = -EINVAL; 1260 goto rollback; 1261 } 1262 1263 /* 1264 * Check that the numbers of Rx and Tx queues are not greater 1265 * than the maximum number of Rx and Tx queues supported by the 1266 * configured device. 1267 */ 1268 if (nb_rx_q > dev_info.max_rx_queues) { 1269 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n", 1270 port_id, nb_rx_q, dev_info.max_rx_queues); 1271 ret = -EINVAL; 1272 goto rollback; 1273 } 1274 1275 if (nb_tx_q > dev_info.max_tx_queues) { 1276 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n", 1277 port_id, nb_tx_q, dev_info.max_tx_queues); 1278 ret = -EINVAL; 1279 goto rollback; 1280 } 1281 1282 /* Check that the device supports requested interrupts */ 1283 if ((dev_conf->intr_conf.lsc == 1) && 1284 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) { 1285 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n", 1286 dev->device->driver->name); 1287 ret = -EINVAL; 1288 goto rollback; 1289 } 1290 if ((dev_conf->intr_conf.rmv == 1) && 1291 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) { 1292 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n", 1293 dev->device->driver->name); 1294 ret = -EINVAL; 1295 goto rollback; 1296 } 1297 1298 if (dev_conf->rxmode.mtu == 0) 1299 dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU; 1300 1301 ret = eth_dev_validate_mtu(port_id, &dev_info, 1302 dev->data->dev_conf.rxmode.mtu); 1303 if (ret != 0) 1304 goto rollback; 1305 1306 dev->data->mtu = dev->data->dev_conf.rxmode.mtu; 1307 1308 /* 1309 * If LRO is enabled, check that the maximum aggregated packet 1310 * size is supported by the configured device. 1311 */ 1312 if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) { 1313 uint32_t max_rx_pktlen; 1314 uint32_t overhead_len; 1315 1316 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 1317 dev_info.max_mtu); 1318 max_rx_pktlen = dev->data->dev_conf.rxmode.mtu + overhead_len; 1319 if (dev_conf->rxmode.max_lro_pkt_size == 0) 1320 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 1321 ret = eth_dev_check_lro_pkt_size(port_id, 1322 dev->data->dev_conf.rxmode.max_lro_pkt_size, 1323 max_rx_pktlen, 1324 dev_info.max_lro_pkt_size); 1325 if (ret != 0) 1326 goto rollback; 1327 } 1328 1329 /* Any requested offloading must be within its device capabilities */ 1330 if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) != 1331 dev_conf->rxmode.offloads) { 1332 RTE_ETHDEV_LOG(ERR, 1333 "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads " 1334 "capabilities 0x%"PRIx64" in %s()\n", 1335 port_id, dev_conf->rxmode.offloads, 1336 dev_info.rx_offload_capa, 1337 __func__); 1338 ret = -EINVAL; 1339 goto rollback; 1340 } 1341 if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) != 1342 dev_conf->txmode.offloads) { 1343 RTE_ETHDEV_LOG(ERR, 1344 "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads " 1345 "capabilities 0x%"PRIx64" in %s()\n", 1346 port_id, dev_conf->txmode.offloads, 1347 dev_info.tx_offload_capa, 1348 __func__); 1349 ret = -EINVAL; 1350 goto rollback; 1351 } 1352 1353 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = 1354 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf); 1355 1356 /* Check that device supports requested rss hash functions. */ 1357 if ((dev_info.flow_type_rss_offloads | 1358 dev_conf->rx_adv_conf.rss_conf.rss_hf) != 1359 dev_info.flow_type_rss_offloads) { 1360 RTE_ETHDEV_LOG(ERR, 1361 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 1362 port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf, 1363 dev_info.flow_type_rss_offloads); 1364 ret = -EINVAL; 1365 goto rollback; 1366 } 1367 1368 /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */ 1369 if (((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0) && 1370 (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) { 1371 RTE_ETHDEV_LOG(ERR, 1372 "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n", 1373 port_id, 1374 rte_eth_dev_rx_offload_name(RTE_ETH_RX_OFFLOAD_RSS_HASH)); 1375 ret = -EINVAL; 1376 goto rollback; 1377 } 1378 1379 /* 1380 * Setup new number of Rx/Tx queues and reconfigure device. 1381 */ 1382 diag = eth_dev_rx_queue_config(dev, nb_rx_q); 1383 if (diag != 0) { 1384 RTE_ETHDEV_LOG(ERR, 1385 "Port%u eth_dev_rx_queue_config = %d\n", 1386 port_id, diag); 1387 ret = diag; 1388 goto rollback; 1389 } 1390 1391 diag = eth_dev_tx_queue_config(dev, nb_tx_q); 1392 if (diag != 0) { 1393 RTE_ETHDEV_LOG(ERR, 1394 "Port%u eth_dev_tx_queue_config = %d\n", 1395 port_id, diag); 1396 eth_dev_rx_queue_config(dev, 0); 1397 ret = diag; 1398 goto rollback; 1399 } 1400 1401 diag = (*dev->dev_ops->dev_configure)(dev); 1402 if (diag != 0) { 1403 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n", 1404 port_id, diag); 1405 ret = eth_err(port_id, diag); 1406 goto reset_queues; 1407 } 1408 1409 /* Initialize Rx profiling if enabled at compilation time. */ 1410 diag = __rte_eth_dev_profile_init(port_id, dev); 1411 if (diag != 0) { 1412 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n", 1413 port_id, diag); 1414 ret = eth_err(port_id, diag); 1415 goto reset_queues; 1416 } 1417 1418 /* Validate Rx offloads. */ 1419 diag = eth_dev_validate_offloads(port_id, 1420 dev_conf->rxmode.offloads, 1421 dev->data->dev_conf.rxmode.offloads, "Rx", 1422 rte_eth_dev_rx_offload_name); 1423 if (diag != 0) { 1424 ret = diag; 1425 goto reset_queues; 1426 } 1427 1428 /* Validate Tx offloads. */ 1429 diag = eth_dev_validate_offloads(port_id, 1430 dev_conf->txmode.offloads, 1431 dev->data->dev_conf.txmode.offloads, "Tx", 1432 rte_eth_dev_tx_offload_name); 1433 if (diag != 0) { 1434 ret = diag; 1435 goto reset_queues; 1436 } 1437 1438 dev->data->dev_configured = 1; 1439 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0); 1440 return 0; 1441 reset_queues: 1442 eth_dev_rx_queue_config(dev, 0); 1443 eth_dev_tx_queue_config(dev, 0); 1444 rollback: 1445 memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf)); 1446 if (old_mtu != dev->data->mtu) 1447 dev->data->mtu = old_mtu; 1448 1449 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret); 1450 return ret; 1451 } 1452 1453 static void 1454 eth_dev_mac_restore(struct rte_eth_dev *dev, 1455 struct rte_eth_dev_info *dev_info) 1456 { 1457 struct rte_ether_addr *addr; 1458 uint16_t i; 1459 uint32_t pool = 0; 1460 uint64_t pool_mask; 1461 1462 /* replay MAC address configuration including default MAC */ 1463 addr = &dev->data->mac_addrs[0]; 1464 if (*dev->dev_ops->mac_addr_set != NULL) 1465 (*dev->dev_ops->mac_addr_set)(dev, addr); 1466 else if (*dev->dev_ops->mac_addr_add != NULL) 1467 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool); 1468 1469 if (*dev->dev_ops->mac_addr_add != NULL) { 1470 for (i = 1; i < dev_info->max_mac_addrs; i++) { 1471 addr = &dev->data->mac_addrs[i]; 1472 1473 /* skip zero address */ 1474 if (rte_is_zero_ether_addr(addr)) 1475 continue; 1476 1477 pool = 0; 1478 pool_mask = dev->data->mac_pool_sel[i]; 1479 1480 do { 1481 if (pool_mask & UINT64_C(1)) 1482 (*dev->dev_ops->mac_addr_add)(dev, 1483 addr, i, pool); 1484 pool_mask >>= 1; 1485 pool++; 1486 } while (pool_mask); 1487 } 1488 } 1489 } 1490 1491 static int 1492 eth_dev_config_restore(struct rte_eth_dev *dev, 1493 struct rte_eth_dev_info *dev_info, uint16_t port_id) 1494 { 1495 int ret; 1496 1497 if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)) 1498 eth_dev_mac_restore(dev, dev_info); 1499 1500 /* replay promiscuous configuration */ 1501 /* 1502 * use callbacks directly since we don't need port_id check and 1503 * would like to bypass the same value set 1504 */ 1505 if (rte_eth_promiscuous_get(port_id) == 1 && 1506 *dev->dev_ops->promiscuous_enable != NULL) { 1507 ret = eth_err(port_id, 1508 (*dev->dev_ops->promiscuous_enable)(dev)); 1509 if (ret != 0 && ret != -ENOTSUP) { 1510 RTE_ETHDEV_LOG(ERR, 1511 "Failed to enable promiscuous mode for device (port %u): %s\n", 1512 port_id, rte_strerror(-ret)); 1513 return ret; 1514 } 1515 } else if (rte_eth_promiscuous_get(port_id) == 0 && 1516 *dev->dev_ops->promiscuous_disable != NULL) { 1517 ret = eth_err(port_id, 1518 (*dev->dev_ops->promiscuous_disable)(dev)); 1519 if (ret != 0 && ret != -ENOTSUP) { 1520 RTE_ETHDEV_LOG(ERR, 1521 "Failed to disable promiscuous mode for device (port %u): %s\n", 1522 port_id, rte_strerror(-ret)); 1523 return ret; 1524 } 1525 } 1526 1527 /* replay all multicast configuration */ 1528 /* 1529 * use callbacks directly since we don't need port_id check and 1530 * would like to bypass the same value set 1531 */ 1532 if (rte_eth_allmulticast_get(port_id) == 1 && 1533 *dev->dev_ops->allmulticast_enable != NULL) { 1534 ret = eth_err(port_id, 1535 (*dev->dev_ops->allmulticast_enable)(dev)); 1536 if (ret != 0 && ret != -ENOTSUP) { 1537 RTE_ETHDEV_LOG(ERR, 1538 "Failed to enable allmulticast mode for device (port %u): %s\n", 1539 port_id, rte_strerror(-ret)); 1540 return ret; 1541 } 1542 } else if (rte_eth_allmulticast_get(port_id) == 0 && 1543 *dev->dev_ops->allmulticast_disable != NULL) { 1544 ret = eth_err(port_id, 1545 (*dev->dev_ops->allmulticast_disable)(dev)); 1546 if (ret != 0 && ret != -ENOTSUP) { 1547 RTE_ETHDEV_LOG(ERR, 1548 "Failed to disable allmulticast mode for device (port %u): %s\n", 1549 port_id, rte_strerror(-ret)); 1550 return ret; 1551 } 1552 } 1553 1554 return 0; 1555 } 1556 1557 int 1558 rte_eth_dev_start(uint16_t port_id) 1559 { 1560 struct rte_eth_dev *dev; 1561 struct rte_eth_dev_info dev_info; 1562 int diag; 1563 int ret, ret_stop; 1564 1565 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1566 dev = &rte_eth_devices[port_id]; 1567 1568 if (*dev->dev_ops->dev_start == NULL) 1569 return -ENOTSUP; 1570 1571 if (dev->data->dev_configured == 0) { 1572 RTE_ETHDEV_LOG(INFO, 1573 "Device with port_id=%"PRIu16" is not configured.\n", 1574 port_id); 1575 return -EINVAL; 1576 } 1577 1578 if (dev->data->dev_started != 0) { 1579 RTE_ETHDEV_LOG(INFO, 1580 "Device with port_id=%"PRIu16" already started\n", 1581 port_id); 1582 return 0; 1583 } 1584 1585 ret = rte_eth_dev_info_get(port_id, &dev_info); 1586 if (ret != 0) 1587 return ret; 1588 1589 /* Lets restore MAC now if device does not support live change */ 1590 if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR) 1591 eth_dev_mac_restore(dev, &dev_info); 1592 1593 diag = (*dev->dev_ops->dev_start)(dev); 1594 if (diag == 0) 1595 dev->data->dev_started = 1; 1596 else 1597 return eth_err(port_id, diag); 1598 1599 ret = eth_dev_config_restore(dev, &dev_info, port_id); 1600 if (ret != 0) { 1601 RTE_ETHDEV_LOG(ERR, 1602 "Error during restoring configuration for device (port %u): %s\n", 1603 port_id, rte_strerror(-ret)); 1604 ret_stop = rte_eth_dev_stop(port_id); 1605 if (ret_stop != 0) { 1606 RTE_ETHDEV_LOG(ERR, 1607 "Failed to stop device (port %u): %s\n", 1608 port_id, rte_strerror(-ret_stop)); 1609 } 1610 1611 return ret; 1612 } 1613 1614 if (dev->data->dev_conf.intr_conf.lsc == 0) { 1615 if (*dev->dev_ops->link_update == NULL) 1616 return -ENOTSUP; 1617 (*dev->dev_ops->link_update)(dev, 0); 1618 } 1619 1620 /* expose selection of PMD fast-path functions */ 1621 eth_dev_fp_ops_setup(rte_eth_fp_ops + port_id, dev); 1622 1623 rte_ethdev_trace_start(port_id); 1624 return 0; 1625 } 1626 1627 int 1628 rte_eth_dev_stop(uint16_t port_id) 1629 { 1630 struct rte_eth_dev *dev; 1631 int ret; 1632 1633 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1634 dev = &rte_eth_devices[port_id]; 1635 1636 if (*dev->dev_ops->dev_stop == NULL) 1637 return -ENOTSUP; 1638 1639 if (dev->data->dev_started == 0) { 1640 RTE_ETHDEV_LOG(INFO, 1641 "Device with port_id=%"PRIu16" already stopped\n", 1642 port_id); 1643 return 0; 1644 } 1645 1646 /* point fast-path functions to dummy ones */ 1647 eth_dev_fp_ops_reset(rte_eth_fp_ops + port_id); 1648 1649 ret = (*dev->dev_ops->dev_stop)(dev); 1650 if (ret == 0) 1651 dev->data->dev_started = 0; 1652 rte_ethdev_trace_stop(port_id, ret); 1653 1654 return ret; 1655 } 1656 1657 int 1658 rte_eth_dev_set_link_up(uint16_t port_id) 1659 { 1660 struct rte_eth_dev *dev; 1661 int ret; 1662 1663 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1664 dev = &rte_eth_devices[port_id]; 1665 1666 if (*dev->dev_ops->dev_set_link_up == NULL) 1667 return -ENOTSUP; 1668 ret = eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev)); 1669 1670 rte_ethdev_trace_set_link_up(port_id, ret); 1671 1672 return ret; 1673 } 1674 1675 int 1676 rte_eth_dev_set_link_down(uint16_t port_id) 1677 { 1678 struct rte_eth_dev *dev; 1679 int ret; 1680 1681 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1682 dev = &rte_eth_devices[port_id]; 1683 1684 if (*dev->dev_ops->dev_set_link_down == NULL) 1685 return -ENOTSUP; 1686 ret = eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev)); 1687 1688 rte_ethdev_trace_set_link_down(port_id, ret); 1689 1690 return ret; 1691 } 1692 1693 int 1694 rte_eth_dev_close(uint16_t port_id) 1695 { 1696 struct rte_eth_dev *dev; 1697 int firsterr, binerr; 1698 int *lasterr = &firsterr; 1699 1700 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1701 dev = &rte_eth_devices[port_id]; 1702 1703 /* 1704 * Secondary process needs to close device to release process private 1705 * resources. But secondary process should not be obliged to wait 1706 * for device stop before closing ethdev. 1707 */ 1708 if (rte_eal_process_type() == RTE_PROC_PRIMARY && 1709 dev->data->dev_started) { 1710 RTE_ETHDEV_LOG(ERR, "Cannot close started device (port %u)\n", 1711 port_id); 1712 return -EINVAL; 1713 } 1714 1715 if (*dev->dev_ops->dev_close == NULL) 1716 return -ENOTSUP; 1717 *lasterr = (*dev->dev_ops->dev_close)(dev); 1718 if (*lasterr != 0) 1719 lasterr = &binerr; 1720 1721 rte_ethdev_trace_close(port_id); 1722 *lasterr = rte_eth_dev_release_port(dev); 1723 1724 return firsterr; 1725 } 1726 1727 int 1728 rte_eth_dev_reset(uint16_t port_id) 1729 { 1730 struct rte_eth_dev *dev; 1731 int ret; 1732 1733 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1734 dev = &rte_eth_devices[port_id]; 1735 1736 if (*dev->dev_ops->dev_reset == NULL) 1737 return -ENOTSUP; 1738 1739 ret = rte_eth_dev_stop(port_id); 1740 if (ret != 0) { 1741 RTE_ETHDEV_LOG(ERR, 1742 "Failed to stop device (port %u) before reset: %s - ignore\n", 1743 port_id, rte_strerror(-ret)); 1744 } 1745 ret = eth_err(port_id, dev->dev_ops->dev_reset(dev)); 1746 1747 rte_ethdev_trace_reset(port_id, ret); 1748 1749 return ret; 1750 } 1751 1752 int 1753 rte_eth_dev_is_removed(uint16_t port_id) 1754 { 1755 struct rte_eth_dev *dev; 1756 int ret; 1757 1758 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0); 1759 dev = &rte_eth_devices[port_id]; 1760 1761 if (dev->state == RTE_ETH_DEV_REMOVED) 1762 return 1; 1763 1764 if (*dev->dev_ops->is_removed == NULL) 1765 return 0; 1766 1767 ret = dev->dev_ops->is_removed(dev); 1768 if (ret != 0) 1769 /* Device is physically removed. */ 1770 dev->state = RTE_ETH_DEV_REMOVED; 1771 1772 rte_ethdev_trace_is_removed(port_id, ret); 1773 1774 return ret; 1775 } 1776 1777 static int 1778 rte_eth_check_rx_mempool(struct rte_mempool *mp, uint16_t offset, 1779 uint16_t min_length) 1780 { 1781 uint16_t data_room_size; 1782 1783 /* 1784 * Check the size of the mbuf data buffer, this value 1785 * must be provided in the private data of the memory pool. 1786 * First check that the memory pool(s) has a valid private data. 1787 */ 1788 if (mp->private_data_size < 1789 sizeof(struct rte_pktmbuf_pool_private)) { 1790 RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n", 1791 mp->name, mp->private_data_size, 1792 (unsigned int) 1793 sizeof(struct rte_pktmbuf_pool_private)); 1794 return -ENOSPC; 1795 } 1796 data_room_size = rte_pktmbuf_data_room_size(mp); 1797 if (data_room_size < offset + min_length) { 1798 RTE_ETHDEV_LOG(ERR, 1799 "%s mbuf_data_room_size %u < %u (%u + %u)\n", 1800 mp->name, data_room_size, 1801 offset + min_length, offset, min_length); 1802 return -EINVAL; 1803 } 1804 return 0; 1805 } 1806 1807 static int 1808 eth_dev_buffer_split_get_supported_hdrs_helper(uint16_t port_id, uint32_t **ptypes) 1809 { 1810 int cnt; 1811 1812 cnt = rte_eth_buffer_split_get_supported_hdr_ptypes(port_id, NULL, 0); 1813 if (cnt <= 0) 1814 return cnt; 1815 1816 *ptypes = malloc(sizeof(uint32_t) * cnt); 1817 if (*ptypes == NULL) 1818 return -ENOMEM; 1819 1820 cnt = rte_eth_buffer_split_get_supported_hdr_ptypes(port_id, *ptypes, cnt); 1821 if (cnt <= 0) { 1822 free(*ptypes); 1823 *ptypes = NULL; 1824 } 1825 return cnt; 1826 } 1827 1828 static int 1829 rte_eth_rx_queue_check_split(uint16_t port_id, 1830 const struct rte_eth_rxseg_split *rx_seg, 1831 uint16_t n_seg, uint32_t *mbp_buf_size, 1832 const struct rte_eth_dev_info *dev_info) 1833 { 1834 const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa; 1835 struct rte_mempool *mp_first; 1836 uint32_t offset_mask; 1837 uint16_t seg_idx; 1838 int ret = 0; 1839 int ptype_cnt; 1840 uint32_t *ptypes; 1841 uint32_t prev_proto_hdrs = RTE_PTYPE_UNKNOWN; 1842 int i; 1843 1844 if (n_seg > seg_capa->max_nseg) { 1845 RTE_ETHDEV_LOG(ERR, 1846 "Requested Rx segments %u exceed supported %u\n", 1847 n_seg, seg_capa->max_nseg); 1848 return -EINVAL; 1849 } 1850 /* 1851 * Check the sizes and offsets against buffer sizes 1852 * for each segment specified in extended configuration. 1853 */ 1854 mp_first = rx_seg[0].mp; 1855 offset_mask = RTE_BIT32(seg_capa->offset_align_log2) - 1; 1856 1857 ptypes = NULL; 1858 ptype_cnt = eth_dev_buffer_split_get_supported_hdrs_helper(port_id, &ptypes); 1859 1860 for (seg_idx = 0; seg_idx < n_seg; seg_idx++) { 1861 struct rte_mempool *mpl = rx_seg[seg_idx].mp; 1862 uint32_t length = rx_seg[seg_idx].length; 1863 uint32_t offset = rx_seg[seg_idx].offset; 1864 uint32_t proto_hdr = rx_seg[seg_idx].proto_hdr; 1865 1866 if (mpl == NULL) { 1867 RTE_ETHDEV_LOG(ERR, "null mempool pointer\n"); 1868 ret = -EINVAL; 1869 goto out; 1870 } 1871 if (seg_idx != 0 && mp_first != mpl && 1872 seg_capa->multi_pools == 0) { 1873 RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n"); 1874 ret = -ENOTSUP; 1875 goto out; 1876 } 1877 if (offset != 0) { 1878 if (seg_capa->offset_allowed == 0) { 1879 RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n"); 1880 ret = -ENOTSUP; 1881 goto out; 1882 } 1883 if (offset & offset_mask) { 1884 RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n", 1885 offset, 1886 seg_capa->offset_align_log2); 1887 ret = -EINVAL; 1888 goto out; 1889 } 1890 } 1891 1892 offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM; 1893 *mbp_buf_size = rte_pktmbuf_data_room_size(mpl); 1894 if (proto_hdr != 0) { 1895 /* Split based on protocol headers. */ 1896 if (length != 0) { 1897 RTE_ETHDEV_LOG(ERR, 1898 "Do not set length split and protocol split within a segment\n" 1899 ); 1900 ret = -EINVAL; 1901 goto out; 1902 } 1903 if ((proto_hdr & prev_proto_hdrs) != 0) { 1904 RTE_ETHDEV_LOG(ERR, 1905 "Repeat with previous protocol headers or proto-split after length-based split\n" 1906 ); 1907 ret = -EINVAL; 1908 goto out; 1909 } 1910 if (ptype_cnt <= 0) { 1911 RTE_ETHDEV_LOG(ERR, 1912 "Port %u failed to get supported buffer split header protocols\n", 1913 port_id); 1914 ret = -ENOTSUP; 1915 goto out; 1916 } 1917 for (i = 0; i < ptype_cnt; i++) { 1918 if ((prev_proto_hdrs | proto_hdr) == ptypes[i]) 1919 break; 1920 } 1921 if (i == ptype_cnt) { 1922 RTE_ETHDEV_LOG(ERR, 1923 "Requested Rx split header protocols 0x%x is not supported.\n", 1924 proto_hdr); 1925 ret = -EINVAL; 1926 goto out; 1927 } 1928 prev_proto_hdrs |= proto_hdr; 1929 } else { 1930 /* Split at fixed length. */ 1931 length = length != 0 ? length : *mbp_buf_size; 1932 prev_proto_hdrs = RTE_PTYPE_ALL_MASK; 1933 } 1934 1935 ret = rte_eth_check_rx_mempool(mpl, offset, length); 1936 if (ret != 0) 1937 goto out; 1938 } 1939 out: 1940 free(ptypes); 1941 return ret; 1942 } 1943 1944 static int 1945 rte_eth_rx_queue_check_mempools(struct rte_mempool **rx_mempools, 1946 uint16_t n_mempools, uint32_t *min_buf_size, 1947 const struct rte_eth_dev_info *dev_info) 1948 { 1949 uint16_t pool_idx; 1950 int ret; 1951 1952 if (n_mempools > dev_info->max_rx_mempools) { 1953 RTE_ETHDEV_LOG(ERR, 1954 "Too many Rx mempools %u vs maximum %u\n", 1955 n_mempools, dev_info->max_rx_mempools); 1956 return -EINVAL; 1957 } 1958 1959 for (pool_idx = 0; pool_idx < n_mempools; pool_idx++) { 1960 struct rte_mempool *mp = rx_mempools[pool_idx]; 1961 1962 if (mp == NULL) { 1963 RTE_ETHDEV_LOG(ERR, "null Rx mempool pointer\n"); 1964 return -EINVAL; 1965 } 1966 1967 ret = rte_eth_check_rx_mempool(mp, RTE_PKTMBUF_HEADROOM, 1968 dev_info->min_rx_bufsize); 1969 if (ret != 0) 1970 return ret; 1971 1972 *min_buf_size = RTE_MIN(*min_buf_size, 1973 rte_pktmbuf_data_room_size(mp)); 1974 } 1975 1976 return 0; 1977 } 1978 1979 int 1980 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 1981 uint16_t nb_rx_desc, unsigned int socket_id, 1982 const struct rte_eth_rxconf *rx_conf, 1983 struct rte_mempool *mp) 1984 { 1985 int ret; 1986 uint64_t rx_offloads; 1987 uint32_t mbp_buf_size = UINT32_MAX; 1988 struct rte_eth_dev *dev; 1989 struct rte_eth_dev_info dev_info; 1990 struct rte_eth_rxconf local_conf; 1991 1992 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1993 dev = &rte_eth_devices[port_id]; 1994 1995 if (rx_queue_id >= dev->data->nb_rx_queues) { 1996 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id); 1997 return -EINVAL; 1998 } 1999 2000 if (*dev->dev_ops->rx_queue_setup == NULL) 2001 return -ENOTSUP; 2002 2003 ret = rte_eth_dev_info_get(port_id, &dev_info); 2004 if (ret != 0) 2005 return ret; 2006 2007 rx_offloads = dev->data->dev_conf.rxmode.offloads; 2008 if (rx_conf != NULL) 2009 rx_offloads |= rx_conf->offloads; 2010 2011 /* Ensure that we have one and only one source of Rx buffers */ 2012 if ((mp != NULL) + 2013 (rx_conf != NULL && rx_conf->rx_nseg > 0) + 2014 (rx_conf != NULL && rx_conf->rx_nmempool > 0) != 1) { 2015 RTE_ETHDEV_LOG(ERR, 2016 "Ambiguous Rx mempools configuration\n"); 2017 return -EINVAL; 2018 } 2019 2020 if (mp != NULL) { 2021 /* Single pool configuration check. */ 2022 ret = rte_eth_check_rx_mempool(mp, RTE_PKTMBUF_HEADROOM, 2023 dev_info.min_rx_bufsize); 2024 if (ret != 0) 2025 return ret; 2026 2027 mbp_buf_size = rte_pktmbuf_data_room_size(mp); 2028 } else if (rx_conf != NULL && rx_conf->rx_nseg > 0) { 2029 const struct rte_eth_rxseg_split *rx_seg; 2030 uint16_t n_seg; 2031 2032 /* Extended multi-segment configuration check. */ 2033 if (rx_conf->rx_seg == NULL) { 2034 RTE_ETHDEV_LOG(ERR, 2035 "Memory pool is null and no multi-segment configuration provided\n"); 2036 return -EINVAL; 2037 } 2038 2039 rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg; 2040 n_seg = rx_conf->rx_nseg; 2041 2042 if (rx_offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { 2043 ret = rte_eth_rx_queue_check_split(port_id, rx_seg, n_seg, 2044 &mbp_buf_size, 2045 &dev_info); 2046 if (ret != 0) 2047 return ret; 2048 } else { 2049 RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n"); 2050 return -EINVAL; 2051 } 2052 } else if (rx_conf != NULL && rx_conf->rx_nmempool > 0) { 2053 /* Extended multi-pool configuration check. */ 2054 if (rx_conf->rx_mempools == NULL) { 2055 RTE_ETHDEV_LOG(ERR, "Memory pools array is null\n"); 2056 return -EINVAL; 2057 } 2058 2059 ret = rte_eth_rx_queue_check_mempools(rx_conf->rx_mempools, 2060 rx_conf->rx_nmempool, 2061 &mbp_buf_size, 2062 &dev_info); 2063 if (ret != 0) 2064 return ret; 2065 } else { 2066 RTE_ETHDEV_LOG(ERR, "Missing Rx mempool configuration\n"); 2067 return -EINVAL; 2068 } 2069 2070 /* Use default specified by driver, if nb_rx_desc is zero */ 2071 if (nb_rx_desc == 0) { 2072 nb_rx_desc = dev_info.default_rxportconf.ring_size; 2073 /* If driver default is also zero, fall back on EAL default */ 2074 if (nb_rx_desc == 0) 2075 nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 2076 } 2077 2078 if (nb_rx_desc > dev_info.rx_desc_lim.nb_max || 2079 nb_rx_desc < dev_info.rx_desc_lim.nb_min || 2080 nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) { 2081 2082 RTE_ETHDEV_LOG(ERR, 2083 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2084 nb_rx_desc, dev_info.rx_desc_lim.nb_max, 2085 dev_info.rx_desc_lim.nb_min, 2086 dev_info.rx_desc_lim.nb_align); 2087 return -EINVAL; 2088 } 2089 2090 if (dev->data->dev_started && 2091 !(dev_info.dev_capa & 2092 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP)) 2093 return -EBUSY; 2094 2095 if (dev->data->dev_started && 2096 (dev->data->rx_queue_state[rx_queue_id] != 2097 RTE_ETH_QUEUE_STATE_STOPPED)) 2098 return -EBUSY; 2099 2100 eth_dev_rxq_release(dev, rx_queue_id); 2101 2102 if (rx_conf == NULL) 2103 rx_conf = &dev_info.default_rxconf; 2104 2105 local_conf = *rx_conf; 2106 2107 /* 2108 * If an offloading has already been enabled in 2109 * rte_eth_dev_configure(), it has been enabled on all queues, 2110 * so there is no need to enable it in this queue again. 2111 * The local_conf.offloads input to underlying PMD only carries 2112 * those offloadings which are only enabled on this queue and 2113 * not enabled on all queues. 2114 */ 2115 local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads; 2116 2117 /* 2118 * New added offloadings for this queue are those not enabled in 2119 * rte_eth_dev_configure() and they must be per-queue type. 2120 * A pure per-port offloading can't be enabled on a queue while 2121 * disabled on another queue. A pure per-port offloading can't 2122 * be enabled for any queue as new added one if it hasn't been 2123 * enabled in rte_eth_dev_configure(). 2124 */ 2125 if ((local_conf.offloads & dev_info.rx_queue_offload_capa) != 2126 local_conf.offloads) { 2127 RTE_ETHDEV_LOG(ERR, 2128 "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2129 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2130 port_id, rx_queue_id, local_conf.offloads, 2131 dev_info.rx_queue_offload_capa, 2132 __func__); 2133 return -EINVAL; 2134 } 2135 2136 if (local_conf.share_group > 0 && 2137 (dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) == 0) { 2138 RTE_ETHDEV_LOG(ERR, 2139 "Ethdev port_id=%d rx_queue_id=%d, enabled share_group=%hu while device doesn't support Rx queue share\n", 2140 port_id, rx_queue_id, local_conf.share_group); 2141 return -EINVAL; 2142 } 2143 2144 /* 2145 * If LRO is enabled, check that the maximum aggregated packet 2146 * size is supported by the configured device. 2147 */ 2148 /* Get the real Ethernet overhead length */ 2149 if (local_conf.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) { 2150 uint32_t overhead_len; 2151 uint32_t max_rx_pktlen; 2152 int ret; 2153 2154 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 2155 dev_info.max_mtu); 2156 max_rx_pktlen = dev->data->mtu + overhead_len; 2157 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0) 2158 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 2159 ret = eth_dev_check_lro_pkt_size(port_id, 2160 dev->data->dev_conf.rxmode.max_lro_pkt_size, 2161 max_rx_pktlen, 2162 dev_info.max_lro_pkt_size); 2163 if (ret != 0) 2164 return ret; 2165 } 2166 2167 ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc, 2168 socket_id, &local_conf, mp); 2169 if (!ret) { 2170 if (!dev->data->min_rx_buf_size || 2171 dev->data->min_rx_buf_size > mbp_buf_size) 2172 dev->data->min_rx_buf_size = mbp_buf_size; 2173 } 2174 2175 rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp, 2176 rx_conf, ret); 2177 return eth_err(port_id, ret); 2178 } 2179 2180 int 2181 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 2182 uint16_t nb_rx_desc, 2183 const struct rte_eth_hairpin_conf *conf) 2184 { 2185 int ret; 2186 struct rte_eth_dev *dev; 2187 struct rte_eth_hairpin_cap cap; 2188 int i; 2189 int count; 2190 2191 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2192 dev = &rte_eth_devices[port_id]; 2193 2194 if (rx_queue_id >= dev->data->nb_rx_queues) { 2195 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id); 2196 return -EINVAL; 2197 } 2198 2199 if (conf == NULL) { 2200 RTE_ETHDEV_LOG(ERR, 2201 "Cannot setup ethdev port %u Rx hairpin queue from NULL config\n", 2202 port_id); 2203 return -EINVAL; 2204 } 2205 2206 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2207 if (ret != 0) 2208 return ret; 2209 if (*dev->dev_ops->rx_hairpin_queue_setup == NULL) 2210 return -ENOTSUP; 2211 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2212 if (nb_rx_desc == 0) 2213 nb_rx_desc = cap.max_nb_desc; 2214 if (nb_rx_desc > cap.max_nb_desc) { 2215 RTE_ETHDEV_LOG(ERR, 2216 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu", 2217 nb_rx_desc, cap.max_nb_desc); 2218 return -EINVAL; 2219 } 2220 if (conf->peer_count > cap.max_rx_2_tx) { 2221 RTE_ETHDEV_LOG(ERR, 2222 "Invalid value for number of peers for Rx queue(=%u), should be: <= %hu", 2223 conf->peer_count, cap.max_rx_2_tx); 2224 return -EINVAL; 2225 } 2226 if (conf->use_locked_device_memory && !cap.rx_cap.locked_device_memory) { 2227 RTE_ETHDEV_LOG(ERR, 2228 "Attempt to use locked device memory for Rx queue, which is not supported"); 2229 return -EINVAL; 2230 } 2231 if (conf->use_rte_memory && !cap.rx_cap.rte_memory) { 2232 RTE_ETHDEV_LOG(ERR, 2233 "Attempt to use DPDK memory for Rx queue, which is not supported"); 2234 return -EINVAL; 2235 } 2236 if (conf->use_locked_device_memory && conf->use_rte_memory) { 2237 RTE_ETHDEV_LOG(ERR, 2238 "Attempt to use mutually exclusive memory settings for Rx queue"); 2239 return -EINVAL; 2240 } 2241 if (conf->force_memory && 2242 !conf->use_locked_device_memory && 2243 !conf->use_rte_memory) { 2244 RTE_ETHDEV_LOG(ERR, 2245 "Attempt to force Rx queue memory settings, but none is set"); 2246 return -EINVAL; 2247 } 2248 if (conf->peer_count == 0) { 2249 RTE_ETHDEV_LOG(ERR, 2250 "Invalid value for number of peers for Rx queue(=%u), should be: > 0", 2251 conf->peer_count); 2252 return -EINVAL; 2253 } 2254 for (i = 0, count = 0; i < dev->data->nb_rx_queues && 2255 cap.max_nb_queues != UINT16_MAX; i++) { 2256 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i)) 2257 count++; 2258 } 2259 if (count > cap.max_nb_queues) { 2260 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d", 2261 cap.max_nb_queues); 2262 return -EINVAL; 2263 } 2264 if (dev->data->dev_started) 2265 return -EBUSY; 2266 eth_dev_rxq_release(dev, rx_queue_id); 2267 ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id, 2268 nb_rx_desc, conf); 2269 if (ret == 0) 2270 dev->data->rx_queue_state[rx_queue_id] = 2271 RTE_ETH_QUEUE_STATE_HAIRPIN; 2272 ret = eth_err(port_id, ret); 2273 2274 rte_eth_trace_rx_hairpin_queue_setup(port_id, rx_queue_id, nb_rx_desc, 2275 conf, ret); 2276 2277 return ret; 2278 } 2279 2280 int 2281 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2282 uint16_t nb_tx_desc, unsigned int socket_id, 2283 const struct rte_eth_txconf *tx_conf) 2284 { 2285 struct rte_eth_dev *dev; 2286 struct rte_eth_dev_info dev_info; 2287 struct rte_eth_txconf local_conf; 2288 int ret; 2289 2290 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2291 dev = &rte_eth_devices[port_id]; 2292 2293 if (tx_queue_id >= dev->data->nb_tx_queues) { 2294 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id); 2295 return -EINVAL; 2296 } 2297 2298 if (*dev->dev_ops->tx_queue_setup == NULL) 2299 return -ENOTSUP; 2300 2301 ret = rte_eth_dev_info_get(port_id, &dev_info); 2302 if (ret != 0) 2303 return ret; 2304 2305 /* Use default specified by driver, if nb_tx_desc is zero */ 2306 if (nb_tx_desc == 0) { 2307 nb_tx_desc = dev_info.default_txportconf.ring_size; 2308 /* If driver default is zero, fall back on EAL default */ 2309 if (nb_tx_desc == 0) 2310 nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 2311 } 2312 if (nb_tx_desc > dev_info.tx_desc_lim.nb_max || 2313 nb_tx_desc < dev_info.tx_desc_lim.nb_min || 2314 nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) { 2315 RTE_ETHDEV_LOG(ERR, 2316 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2317 nb_tx_desc, dev_info.tx_desc_lim.nb_max, 2318 dev_info.tx_desc_lim.nb_min, 2319 dev_info.tx_desc_lim.nb_align); 2320 return -EINVAL; 2321 } 2322 2323 if (dev->data->dev_started && 2324 !(dev_info.dev_capa & 2325 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP)) 2326 return -EBUSY; 2327 2328 if (dev->data->dev_started && 2329 (dev->data->tx_queue_state[tx_queue_id] != 2330 RTE_ETH_QUEUE_STATE_STOPPED)) 2331 return -EBUSY; 2332 2333 eth_dev_txq_release(dev, tx_queue_id); 2334 2335 if (tx_conf == NULL) 2336 tx_conf = &dev_info.default_txconf; 2337 2338 local_conf = *tx_conf; 2339 2340 /* 2341 * If an offloading has already been enabled in 2342 * rte_eth_dev_configure(), it has been enabled on all queues, 2343 * so there is no need to enable it in this queue again. 2344 * The local_conf.offloads input to underlying PMD only carries 2345 * those offloadings which are only enabled on this queue and 2346 * not enabled on all queues. 2347 */ 2348 local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads; 2349 2350 /* 2351 * New added offloadings for this queue are those not enabled in 2352 * rte_eth_dev_configure() and they must be per-queue type. 2353 * A pure per-port offloading can't be enabled on a queue while 2354 * disabled on another queue. A pure per-port offloading can't 2355 * be enabled for any queue as new added one if it hasn't been 2356 * enabled in rte_eth_dev_configure(). 2357 */ 2358 if ((local_conf.offloads & dev_info.tx_queue_offload_capa) != 2359 local_conf.offloads) { 2360 RTE_ETHDEV_LOG(ERR, 2361 "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2362 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2363 port_id, tx_queue_id, local_conf.offloads, 2364 dev_info.tx_queue_offload_capa, 2365 __func__); 2366 return -EINVAL; 2367 } 2368 2369 rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf); 2370 return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev, 2371 tx_queue_id, nb_tx_desc, socket_id, &local_conf)); 2372 } 2373 2374 int 2375 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2376 uint16_t nb_tx_desc, 2377 const struct rte_eth_hairpin_conf *conf) 2378 { 2379 struct rte_eth_dev *dev; 2380 struct rte_eth_hairpin_cap cap; 2381 int i; 2382 int count; 2383 int ret; 2384 2385 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2386 dev = &rte_eth_devices[port_id]; 2387 2388 if (tx_queue_id >= dev->data->nb_tx_queues) { 2389 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id); 2390 return -EINVAL; 2391 } 2392 2393 if (conf == NULL) { 2394 RTE_ETHDEV_LOG(ERR, 2395 "Cannot setup ethdev port %u Tx hairpin queue from NULL config\n", 2396 port_id); 2397 return -EINVAL; 2398 } 2399 2400 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2401 if (ret != 0) 2402 return ret; 2403 if (*dev->dev_ops->tx_hairpin_queue_setup == NULL) 2404 return -ENOTSUP; 2405 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2406 if (nb_tx_desc == 0) 2407 nb_tx_desc = cap.max_nb_desc; 2408 if (nb_tx_desc > cap.max_nb_desc) { 2409 RTE_ETHDEV_LOG(ERR, 2410 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu", 2411 nb_tx_desc, cap.max_nb_desc); 2412 return -EINVAL; 2413 } 2414 if (conf->peer_count > cap.max_tx_2_rx) { 2415 RTE_ETHDEV_LOG(ERR, 2416 "Invalid value for number of peers for Tx queue(=%u), should be: <= %hu", 2417 conf->peer_count, cap.max_tx_2_rx); 2418 return -EINVAL; 2419 } 2420 if (conf->use_locked_device_memory && !cap.tx_cap.locked_device_memory) { 2421 RTE_ETHDEV_LOG(ERR, 2422 "Attempt to use locked device memory for Tx queue, which is not supported"); 2423 return -EINVAL; 2424 } 2425 if (conf->use_rte_memory && !cap.tx_cap.rte_memory) { 2426 RTE_ETHDEV_LOG(ERR, 2427 "Attempt to use DPDK memory for Tx queue, which is not supported"); 2428 return -EINVAL; 2429 } 2430 if (conf->use_locked_device_memory && conf->use_rte_memory) { 2431 RTE_ETHDEV_LOG(ERR, 2432 "Attempt to use mutually exclusive memory settings for Tx queue"); 2433 return -EINVAL; 2434 } 2435 if (conf->force_memory && 2436 !conf->use_locked_device_memory && 2437 !conf->use_rte_memory) { 2438 RTE_ETHDEV_LOG(ERR, 2439 "Attempt to force Tx queue memory settings, but none is set"); 2440 return -EINVAL; 2441 } 2442 if (conf->peer_count == 0) { 2443 RTE_ETHDEV_LOG(ERR, 2444 "Invalid value for number of peers for Tx queue(=%u), should be: > 0", 2445 conf->peer_count); 2446 return -EINVAL; 2447 } 2448 for (i = 0, count = 0; i < dev->data->nb_tx_queues && 2449 cap.max_nb_queues != UINT16_MAX; i++) { 2450 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i)) 2451 count++; 2452 } 2453 if (count > cap.max_nb_queues) { 2454 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d", 2455 cap.max_nb_queues); 2456 return -EINVAL; 2457 } 2458 if (dev->data->dev_started) 2459 return -EBUSY; 2460 eth_dev_txq_release(dev, tx_queue_id); 2461 ret = (*dev->dev_ops->tx_hairpin_queue_setup) 2462 (dev, tx_queue_id, nb_tx_desc, conf); 2463 if (ret == 0) 2464 dev->data->tx_queue_state[tx_queue_id] = 2465 RTE_ETH_QUEUE_STATE_HAIRPIN; 2466 ret = eth_err(port_id, ret); 2467 2468 rte_eth_trace_tx_hairpin_queue_setup(port_id, tx_queue_id, nb_tx_desc, 2469 conf, ret); 2470 2471 return ret; 2472 } 2473 2474 int 2475 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port) 2476 { 2477 struct rte_eth_dev *dev; 2478 int ret; 2479 2480 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2481 dev = &rte_eth_devices[tx_port]; 2482 2483 if (dev->data->dev_started == 0) { 2484 RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port); 2485 return -EBUSY; 2486 } 2487 2488 if (*dev->dev_ops->hairpin_bind == NULL) 2489 return -ENOTSUP; 2490 ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port); 2491 if (ret != 0) 2492 RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d" 2493 " to Rx %d (%d - all ports)\n", 2494 tx_port, rx_port, RTE_MAX_ETHPORTS); 2495 2496 rte_eth_trace_hairpin_bind(tx_port, rx_port, ret); 2497 2498 return ret; 2499 } 2500 2501 int 2502 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port) 2503 { 2504 struct rte_eth_dev *dev; 2505 int ret; 2506 2507 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2508 dev = &rte_eth_devices[tx_port]; 2509 2510 if (dev->data->dev_started == 0) { 2511 RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port); 2512 return -EBUSY; 2513 } 2514 2515 if (*dev->dev_ops->hairpin_unbind == NULL) 2516 return -ENOTSUP; 2517 ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port); 2518 if (ret != 0) 2519 RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d" 2520 " from Rx %d (%d - all ports)\n", 2521 tx_port, rx_port, RTE_MAX_ETHPORTS); 2522 2523 rte_eth_trace_hairpin_unbind(tx_port, rx_port, ret); 2524 2525 return ret; 2526 } 2527 2528 int 2529 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, 2530 size_t len, uint32_t direction) 2531 { 2532 struct rte_eth_dev *dev; 2533 int ret; 2534 2535 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2536 dev = &rte_eth_devices[port_id]; 2537 2538 if (peer_ports == NULL) { 2539 RTE_ETHDEV_LOG(ERR, 2540 "Cannot get ethdev port %u hairpin peer ports to NULL\n", 2541 port_id); 2542 return -EINVAL; 2543 } 2544 2545 if (len == 0) { 2546 RTE_ETHDEV_LOG(ERR, 2547 "Cannot get ethdev port %u hairpin peer ports to array with zero size\n", 2548 port_id); 2549 return -EINVAL; 2550 } 2551 2552 if (*dev->dev_ops->hairpin_get_peer_ports == NULL) 2553 return -ENOTSUP; 2554 2555 ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports, 2556 len, direction); 2557 if (ret < 0) 2558 RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n", 2559 port_id, direction ? "Rx" : "Tx"); 2560 2561 rte_eth_trace_hairpin_get_peer_ports(port_id, peer_ports, len, 2562 direction, ret); 2563 2564 return ret; 2565 } 2566 2567 void 2568 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, 2569 void *userdata __rte_unused) 2570 { 2571 rte_pktmbuf_free_bulk(pkts, unsent); 2572 2573 rte_eth_trace_tx_buffer_drop_callback((void **)pkts, unsent); 2574 } 2575 2576 void 2577 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, 2578 void *userdata) 2579 { 2580 uint64_t *count = userdata; 2581 2582 rte_pktmbuf_free_bulk(pkts, unsent); 2583 *count += unsent; 2584 2585 rte_eth_trace_tx_buffer_count_callback((void **)pkts, unsent, *count); 2586 } 2587 2588 int 2589 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, 2590 buffer_tx_error_fn cbfn, void *userdata) 2591 { 2592 if (buffer == NULL) { 2593 RTE_ETHDEV_LOG(ERR, 2594 "Cannot set Tx buffer error callback to NULL buffer\n"); 2595 return -EINVAL; 2596 } 2597 2598 buffer->error_callback = cbfn; 2599 buffer->error_userdata = userdata; 2600 2601 rte_eth_trace_tx_buffer_set_err_callback(buffer); 2602 2603 return 0; 2604 } 2605 2606 int 2607 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size) 2608 { 2609 int ret = 0; 2610 2611 if (buffer == NULL) { 2612 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL buffer\n"); 2613 return -EINVAL; 2614 } 2615 2616 buffer->size = size; 2617 if (buffer->error_callback == NULL) { 2618 ret = rte_eth_tx_buffer_set_err_callback( 2619 buffer, rte_eth_tx_buffer_drop_callback, NULL); 2620 } 2621 2622 rte_eth_trace_tx_buffer_init(buffer, size, ret); 2623 2624 return ret; 2625 } 2626 2627 int 2628 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt) 2629 { 2630 struct rte_eth_dev *dev; 2631 int ret; 2632 2633 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2634 dev = &rte_eth_devices[port_id]; 2635 2636 if (*dev->dev_ops->tx_done_cleanup == NULL) 2637 return -ENOTSUP; 2638 2639 /* Call driver to free pending mbufs. */ 2640 ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id], 2641 free_cnt); 2642 ret = eth_err(port_id, ret); 2643 2644 rte_eth_trace_tx_done_cleanup(port_id, queue_id, free_cnt, ret); 2645 2646 return ret; 2647 } 2648 2649 int 2650 rte_eth_promiscuous_enable(uint16_t port_id) 2651 { 2652 struct rte_eth_dev *dev; 2653 int diag = 0; 2654 2655 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2656 dev = &rte_eth_devices[port_id]; 2657 2658 if (dev->data->promiscuous == 1) 2659 return 0; 2660 2661 if (*dev->dev_ops->promiscuous_enable == NULL) 2662 return -ENOTSUP; 2663 2664 diag = (*dev->dev_ops->promiscuous_enable)(dev); 2665 dev->data->promiscuous = (diag == 0) ? 1 : 0; 2666 2667 diag = eth_err(port_id, diag); 2668 2669 rte_eth_trace_promiscuous_enable(port_id, dev->data->promiscuous, 2670 diag); 2671 2672 return diag; 2673 } 2674 2675 int 2676 rte_eth_promiscuous_disable(uint16_t port_id) 2677 { 2678 struct rte_eth_dev *dev; 2679 int diag = 0; 2680 2681 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2682 dev = &rte_eth_devices[port_id]; 2683 2684 if (dev->data->promiscuous == 0) 2685 return 0; 2686 2687 if (*dev->dev_ops->promiscuous_disable == NULL) 2688 return -ENOTSUP; 2689 2690 dev->data->promiscuous = 0; 2691 diag = (*dev->dev_ops->promiscuous_disable)(dev); 2692 if (diag != 0) 2693 dev->data->promiscuous = 1; 2694 2695 diag = eth_err(port_id, diag); 2696 2697 rte_eth_trace_promiscuous_disable(port_id, dev->data->promiscuous, 2698 diag); 2699 2700 return diag; 2701 } 2702 2703 int 2704 rte_eth_promiscuous_get(uint16_t port_id) 2705 { 2706 struct rte_eth_dev *dev; 2707 2708 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2709 dev = &rte_eth_devices[port_id]; 2710 2711 rte_eth_trace_promiscuous_get(port_id, dev->data->promiscuous); 2712 2713 return dev->data->promiscuous; 2714 } 2715 2716 int 2717 rte_eth_allmulticast_enable(uint16_t port_id) 2718 { 2719 struct rte_eth_dev *dev; 2720 int diag; 2721 2722 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2723 dev = &rte_eth_devices[port_id]; 2724 2725 if (dev->data->all_multicast == 1) 2726 return 0; 2727 2728 if (*dev->dev_ops->allmulticast_enable == NULL) 2729 return -ENOTSUP; 2730 diag = (*dev->dev_ops->allmulticast_enable)(dev); 2731 dev->data->all_multicast = (diag == 0) ? 1 : 0; 2732 2733 diag = eth_err(port_id, diag); 2734 2735 rte_eth_trace_allmulticast_enable(port_id, dev->data->all_multicast, 2736 diag); 2737 2738 return diag; 2739 } 2740 2741 int 2742 rte_eth_allmulticast_disable(uint16_t port_id) 2743 { 2744 struct rte_eth_dev *dev; 2745 int diag; 2746 2747 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2748 dev = &rte_eth_devices[port_id]; 2749 2750 if (dev->data->all_multicast == 0) 2751 return 0; 2752 2753 if (*dev->dev_ops->allmulticast_disable == NULL) 2754 return -ENOTSUP; 2755 dev->data->all_multicast = 0; 2756 diag = (*dev->dev_ops->allmulticast_disable)(dev); 2757 if (diag != 0) 2758 dev->data->all_multicast = 1; 2759 2760 diag = eth_err(port_id, diag); 2761 2762 rte_eth_trace_allmulticast_disable(port_id, dev->data->all_multicast, 2763 diag); 2764 2765 return diag; 2766 } 2767 2768 int 2769 rte_eth_allmulticast_get(uint16_t port_id) 2770 { 2771 struct rte_eth_dev *dev; 2772 2773 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2774 dev = &rte_eth_devices[port_id]; 2775 2776 rte_eth_trace_allmulticast_get(port_id, dev->data->all_multicast); 2777 2778 return dev->data->all_multicast; 2779 } 2780 2781 int 2782 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link) 2783 { 2784 struct rte_eth_dev *dev; 2785 2786 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2787 dev = &rte_eth_devices[port_id]; 2788 2789 if (eth_link == NULL) { 2790 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2791 port_id); 2792 return -EINVAL; 2793 } 2794 2795 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2796 rte_eth_linkstatus_get(dev, eth_link); 2797 else { 2798 if (*dev->dev_ops->link_update == NULL) 2799 return -ENOTSUP; 2800 (*dev->dev_ops->link_update)(dev, 1); 2801 *eth_link = dev->data->dev_link; 2802 } 2803 2804 rte_eth_trace_link_get(port_id, eth_link); 2805 2806 return 0; 2807 } 2808 2809 int 2810 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link) 2811 { 2812 struct rte_eth_dev *dev; 2813 2814 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2815 dev = &rte_eth_devices[port_id]; 2816 2817 if (eth_link == NULL) { 2818 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2819 port_id); 2820 return -EINVAL; 2821 } 2822 2823 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2824 rte_eth_linkstatus_get(dev, eth_link); 2825 else { 2826 if (*dev->dev_ops->link_update == NULL) 2827 return -ENOTSUP; 2828 (*dev->dev_ops->link_update)(dev, 0); 2829 *eth_link = dev->data->dev_link; 2830 } 2831 2832 rte_eth_trace_link_get_nowait(port_id, eth_link); 2833 2834 return 0; 2835 } 2836 2837 const char * 2838 rte_eth_link_speed_to_str(uint32_t link_speed) 2839 { 2840 const char *ret; 2841 2842 switch (link_speed) { 2843 case RTE_ETH_SPEED_NUM_NONE: 2844 ret = "None"; 2845 break; 2846 case RTE_ETH_SPEED_NUM_10M: 2847 ret = "10 Mbps"; 2848 break; 2849 case RTE_ETH_SPEED_NUM_100M: 2850 ret = "100 Mbps"; 2851 break; 2852 case RTE_ETH_SPEED_NUM_1G: 2853 ret = "1 Gbps"; 2854 break; 2855 case RTE_ETH_SPEED_NUM_2_5G: 2856 ret = "2.5 Gbps"; 2857 break; 2858 case RTE_ETH_SPEED_NUM_5G: 2859 ret = "5 Gbps"; 2860 break; 2861 case RTE_ETH_SPEED_NUM_10G: 2862 ret = "10 Gbps"; 2863 break; 2864 case RTE_ETH_SPEED_NUM_20G: 2865 ret = "20 Gbps"; 2866 break; 2867 case RTE_ETH_SPEED_NUM_25G: 2868 ret = "25 Gbps"; 2869 break; 2870 case RTE_ETH_SPEED_NUM_40G: 2871 ret = "40 Gbps"; 2872 break; 2873 case RTE_ETH_SPEED_NUM_50G: 2874 ret = "50 Gbps"; 2875 break; 2876 case RTE_ETH_SPEED_NUM_56G: 2877 ret = "56 Gbps"; 2878 break; 2879 case RTE_ETH_SPEED_NUM_100G: 2880 ret = "100 Gbps"; 2881 break; 2882 case RTE_ETH_SPEED_NUM_200G: 2883 ret = "200 Gbps"; 2884 break; 2885 case RTE_ETH_SPEED_NUM_UNKNOWN: 2886 ret = "Unknown"; 2887 break; 2888 default: 2889 ret = "Invalid"; 2890 } 2891 2892 rte_eth_trace_link_speed_to_str(link_speed, ret); 2893 2894 return ret; 2895 } 2896 2897 int 2898 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link) 2899 { 2900 int ret; 2901 2902 if (str == NULL) { 2903 RTE_ETHDEV_LOG(ERR, "Cannot convert link to NULL string\n"); 2904 return -EINVAL; 2905 } 2906 2907 if (len == 0) { 2908 RTE_ETHDEV_LOG(ERR, 2909 "Cannot convert link to string with zero size\n"); 2910 return -EINVAL; 2911 } 2912 2913 if (eth_link == NULL) { 2914 RTE_ETHDEV_LOG(ERR, "Cannot convert to string from NULL link\n"); 2915 return -EINVAL; 2916 } 2917 2918 if (eth_link->link_status == RTE_ETH_LINK_DOWN) 2919 ret = snprintf(str, len, "Link down"); 2920 else 2921 ret = snprintf(str, len, "Link up at %s %s %s", 2922 rte_eth_link_speed_to_str(eth_link->link_speed), 2923 (eth_link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 2924 "FDX" : "HDX", 2925 (eth_link->link_autoneg == RTE_ETH_LINK_AUTONEG) ? 2926 "Autoneg" : "Fixed"); 2927 2928 rte_eth_trace_link_to_str(len, eth_link, str, ret); 2929 2930 return ret; 2931 } 2932 2933 int 2934 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats) 2935 { 2936 struct rte_eth_dev *dev; 2937 int ret; 2938 2939 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2940 dev = &rte_eth_devices[port_id]; 2941 2942 if (stats == NULL) { 2943 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u stats to NULL\n", 2944 port_id); 2945 return -EINVAL; 2946 } 2947 2948 memset(stats, 0, sizeof(*stats)); 2949 2950 if (*dev->dev_ops->stats_get == NULL) 2951 return -ENOTSUP; 2952 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed; 2953 ret = eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats)); 2954 2955 rte_eth_trace_stats_get(port_id, stats, ret); 2956 2957 return ret; 2958 } 2959 2960 int 2961 rte_eth_stats_reset(uint16_t port_id) 2962 { 2963 struct rte_eth_dev *dev; 2964 int ret; 2965 2966 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2967 dev = &rte_eth_devices[port_id]; 2968 2969 if (*dev->dev_ops->stats_reset == NULL) 2970 return -ENOTSUP; 2971 ret = (*dev->dev_ops->stats_reset)(dev); 2972 if (ret != 0) 2973 return eth_err(port_id, ret); 2974 2975 dev->data->rx_mbuf_alloc_failed = 0; 2976 2977 rte_eth_trace_stats_reset(port_id); 2978 2979 return 0; 2980 } 2981 2982 static inline int 2983 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev) 2984 { 2985 uint16_t nb_rxqs, nb_txqs; 2986 int count; 2987 2988 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2989 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2990 2991 count = RTE_NB_STATS; 2992 if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) { 2993 count += nb_rxqs * RTE_NB_RXQ_STATS; 2994 count += nb_txqs * RTE_NB_TXQ_STATS; 2995 } 2996 2997 return count; 2998 } 2999 3000 static int 3001 eth_dev_get_xstats_count(uint16_t port_id) 3002 { 3003 struct rte_eth_dev *dev; 3004 int count; 3005 3006 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3007 dev = &rte_eth_devices[port_id]; 3008 if (dev->dev_ops->xstats_get_names != NULL) { 3009 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0); 3010 if (count < 0) 3011 return eth_err(port_id, count); 3012 } else 3013 count = 0; 3014 3015 3016 count += eth_dev_get_xstats_basic_count(dev); 3017 3018 return count; 3019 } 3020 3021 int 3022 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, 3023 uint64_t *id) 3024 { 3025 int cnt_xstats, idx_xstat; 3026 3027 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3028 3029 if (xstat_name == NULL) { 3030 RTE_ETHDEV_LOG(ERR, 3031 "Cannot get ethdev port %u xstats ID from NULL xstat name\n", 3032 port_id); 3033 return -ENOMEM; 3034 } 3035 3036 if (id == NULL) { 3037 RTE_ETHDEV_LOG(ERR, 3038 "Cannot get ethdev port %u xstats ID to NULL\n", 3039 port_id); 3040 return -ENOMEM; 3041 } 3042 3043 /* Get count */ 3044 cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL); 3045 if (cnt_xstats < 0) { 3046 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n"); 3047 return -ENODEV; 3048 } 3049 3050 /* Get id-name lookup table */ 3051 struct rte_eth_xstat_name xstats_names[cnt_xstats]; 3052 3053 if (cnt_xstats != rte_eth_xstats_get_names_by_id( 3054 port_id, xstats_names, cnt_xstats, NULL)) { 3055 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n"); 3056 return -1; 3057 } 3058 3059 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 3060 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) { 3061 *id = idx_xstat; 3062 3063 rte_eth_trace_xstats_get_id_by_name(port_id, 3064 xstat_name, *id); 3065 3066 return 0; 3067 }; 3068 } 3069 3070 return -EINVAL; 3071 } 3072 3073 /* retrieve basic stats names */ 3074 static int 3075 eth_basic_stats_get_names(struct rte_eth_dev *dev, 3076 struct rte_eth_xstat_name *xstats_names) 3077 { 3078 int cnt_used_entries = 0; 3079 uint32_t idx, id_queue; 3080 uint16_t num_q; 3081 3082 for (idx = 0; idx < RTE_NB_STATS; idx++) { 3083 strlcpy(xstats_names[cnt_used_entries].name, 3084 eth_dev_stats_strings[idx].name, 3085 sizeof(xstats_names[0].name)); 3086 cnt_used_entries++; 3087 } 3088 3089 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 3090 return cnt_used_entries; 3091 3092 num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3093 for (id_queue = 0; id_queue < num_q; id_queue++) { 3094 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) { 3095 snprintf(xstats_names[cnt_used_entries].name, 3096 sizeof(xstats_names[0].name), 3097 "rx_q%u_%s", 3098 id_queue, eth_dev_rxq_stats_strings[idx].name); 3099 cnt_used_entries++; 3100 } 3101 3102 } 3103 num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3104 for (id_queue = 0; id_queue < num_q; id_queue++) { 3105 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) { 3106 snprintf(xstats_names[cnt_used_entries].name, 3107 sizeof(xstats_names[0].name), 3108 "tx_q%u_%s", 3109 id_queue, eth_dev_txq_stats_strings[idx].name); 3110 cnt_used_entries++; 3111 } 3112 } 3113 return cnt_used_entries; 3114 } 3115 3116 /* retrieve ethdev extended statistics names */ 3117 int 3118 rte_eth_xstats_get_names_by_id(uint16_t port_id, 3119 struct rte_eth_xstat_name *xstats_names, unsigned int size, 3120 uint64_t *ids) 3121 { 3122 struct rte_eth_xstat_name *xstats_names_copy; 3123 unsigned int no_basic_stat_requested = 1; 3124 unsigned int no_ext_stat_requested = 1; 3125 unsigned int expected_entries; 3126 unsigned int basic_count; 3127 struct rte_eth_dev *dev; 3128 unsigned int i; 3129 int ret; 3130 3131 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3132 dev = &rte_eth_devices[port_id]; 3133 3134 basic_count = eth_dev_get_xstats_basic_count(dev); 3135 ret = eth_dev_get_xstats_count(port_id); 3136 if (ret < 0) 3137 return ret; 3138 expected_entries = (unsigned int)ret; 3139 3140 /* Return max number of stats if no ids given */ 3141 if (!ids) { 3142 if (!xstats_names) 3143 return expected_entries; 3144 else if (xstats_names && size < expected_entries) 3145 return expected_entries; 3146 } 3147 3148 if (ids && !xstats_names) 3149 return -EINVAL; 3150 3151 if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) { 3152 uint64_t ids_copy[size]; 3153 3154 for (i = 0; i < size; i++) { 3155 if (ids[i] < basic_count) { 3156 no_basic_stat_requested = 0; 3157 break; 3158 } 3159 3160 /* 3161 * Convert ids to xstats ids that PMD knows. 3162 * ids known by user are basic + extended stats. 3163 */ 3164 ids_copy[i] = ids[i] - basic_count; 3165 } 3166 3167 if (no_basic_stat_requested) 3168 return (*dev->dev_ops->xstats_get_names_by_id)(dev, 3169 ids_copy, xstats_names, size); 3170 } 3171 3172 /* Retrieve all stats */ 3173 if (!ids) { 3174 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names, 3175 expected_entries); 3176 if (num_stats < 0 || num_stats > (int)expected_entries) 3177 return num_stats; 3178 else 3179 return expected_entries; 3180 } 3181 3182 xstats_names_copy = calloc(expected_entries, 3183 sizeof(struct rte_eth_xstat_name)); 3184 3185 if (!xstats_names_copy) { 3186 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n"); 3187 return -ENOMEM; 3188 } 3189 3190 if (ids) { 3191 for (i = 0; i < size; i++) { 3192 if (ids[i] >= basic_count) { 3193 no_ext_stat_requested = 0; 3194 break; 3195 } 3196 } 3197 } 3198 3199 /* Fill xstats_names_copy structure */ 3200 if (ids && no_ext_stat_requested) { 3201 eth_basic_stats_get_names(dev, xstats_names_copy); 3202 } else { 3203 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy, 3204 expected_entries); 3205 if (ret < 0) { 3206 free(xstats_names_copy); 3207 return ret; 3208 } 3209 } 3210 3211 /* Filter stats */ 3212 for (i = 0; i < size; i++) { 3213 if (ids[i] >= expected_entries) { 3214 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 3215 free(xstats_names_copy); 3216 return -1; 3217 } 3218 xstats_names[i] = xstats_names_copy[ids[i]]; 3219 3220 rte_eth_trace_xstats_get_names_by_id(port_id, &xstats_names[i], 3221 ids[i]); 3222 } 3223 3224 free(xstats_names_copy); 3225 return size; 3226 } 3227 3228 int 3229 rte_eth_xstats_get_names(uint16_t port_id, 3230 struct rte_eth_xstat_name *xstats_names, 3231 unsigned int size) 3232 { 3233 struct rte_eth_dev *dev; 3234 int cnt_used_entries; 3235 int cnt_expected_entries; 3236 int cnt_driver_entries; 3237 int i; 3238 3239 cnt_expected_entries = eth_dev_get_xstats_count(port_id); 3240 if (xstats_names == NULL || cnt_expected_entries < 0 || 3241 (int)size < cnt_expected_entries) 3242 return cnt_expected_entries; 3243 3244 /* port_id checked in eth_dev_get_xstats_count() */ 3245 dev = &rte_eth_devices[port_id]; 3246 3247 cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names); 3248 3249 if (dev->dev_ops->xstats_get_names != NULL) { 3250 /* If there are any driver-specific xstats, append them 3251 * to end of list. 3252 */ 3253 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)( 3254 dev, 3255 xstats_names + cnt_used_entries, 3256 size - cnt_used_entries); 3257 if (cnt_driver_entries < 0) 3258 return eth_err(port_id, cnt_driver_entries); 3259 cnt_used_entries += cnt_driver_entries; 3260 } 3261 3262 for (i = 0; i < cnt_used_entries; i++) 3263 rte_eth_trace_xstats_get_names(port_id, i, xstats_names[i], 3264 size, cnt_used_entries); 3265 3266 return cnt_used_entries; 3267 } 3268 3269 3270 static int 3271 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats) 3272 { 3273 struct rte_eth_dev *dev; 3274 struct rte_eth_stats eth_stats; 3275 unsigned int count = 0, i, q; 3276 uint64_t val, *stats_ptr; 3277 uint16_t nb_rxqs, nb_txqs; 3278 int ret; 3279 3280 ret = rte_eth_stats_get(port_id, ð_stats); 3281 if (ret < 0) 3282 return ret; 3283 3284 dev = &rte_eth_devices[port_id]; 3285 3286 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3287 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3288 3289 /* global stats */ 3290 for (i = 0; i < RTE_NB_STATS; i++) { 3291 stats_ptr = RTE_PTR_ADD(ð_stats, 3292 eth_dev_stats_strings[i].offset); 3293 val = *stats_ptr; 3294 xstats[count++].value = val; 3295 } 3296 3297 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 3298 return count; 3299 3300 /* per-rxq stats */ 3301 for (q = 0; q < nb_rxqs; q++) { 3302 for (i = 0; i < RTE_NB_RXQ_STATS; i++) { 3303 stats_ptr = RTE_PTR_ADD(ð_stats, 3304 eth_dev_rxq_stats_strings[i].offset + 3305 q * sizeof(uint64_t)); 3306 val = *stats_ptr; 3307 xstats[count++].value = val; 3308 } 3309 } 3310 3311 /* per-txq stats */ 3312 for (q = 0; q < nb_txqs; q++) { 3313 for (i = 0; i < RTE_NB_TXQ_STATS; i++) { 3314 stats_ptr = RTE_PTR_ADD(ð_stats, 3315 eth_dev_txq_stats_strings[i].offset + 3316 q * sizeof(uint64_t)); 3317 val = *stats_ptr; 3318 xstats[count++].value = val; 3319 } 3320 } 3321 return count; 3322 } 3323 3324 /* retrieve ethdev extended statistics */ 3325 int 3326 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, 3327 uint64_t *values, unsigned int size) 3328 { 3329 unsigned int no_basic_stat_requested = 1; 3330 unsigned int no_ext_stat_requested = 1; 3331 unsigned int num_xstats_filled; 3332 unsigned int basic_count; 3333 uint16_t expected_entries; 3334 struct rte_eth_dev *dev; 3335 unsigned int i; 3336 int ret; 3337 3338 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3339 dev = &rte_eth_devices[port_id]; 3340 3341 ret = eth_dev_get_xstats_count(port_id); 3342 if (ret < 0) 3343 return ret; 3344 expected_entries = (uint16_t)ret; 3345 struct rte_eth_xstat xstats[expected_entries]; 3346 basic_count = eth_dev_get_xstats_basic_count(dev); 3347 3348 /* Return max number of stats if no ids given */ 3349 if (!ids) { 3350 if (!values) 3351 return expected_entries; 3352 else if (values && size < expected_entries) 3353 return expected_entries; 3354 } 3355 3356 if (ids && !values) 3357 return -EINVAL; 3358 3359 if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) { 3360 unsigned int basic_count = eth_dev_get_xstats_basic_count(dev); 3361 uint64_t ids_copy[size]; 3362 3363 for (i = 0; i < size; i++) { 3364 if (ids[i] < basic_count) { 3365 no_basic_stat_requested = 0; 3366 break; 3367 } 3368 3369 /* 3370 * Convert ids to xstats ids that PMD knows. 3371 * ids known by user are basic + extended stats. 3372 */ 3373 ids_copy[i] = ids[i] - basic_count; 3374 } 3375 3376 if (no_basic_stat_requested) 3377 return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy, 3378 values, size); 3379 } 3380 3381 if (ids) { 3382 for (i = 0; i < size; i++) { 3383 if (ids[i] >= basic_count) { 3384 no_ext_stat_requested = 0; 3385 break; 3386 } 3387 } 3388 } 3389 3390 /* Fill the xstats structure */ 3391 if (ids && no_ext_stat_requested) 3392 ret = eth_basic_stats_get(port_id, xstats); 3393 else 3394 ret = rte_eth_xstats_get(port_id, xstats, expected_entries); 3395 3396 if (ret < 0) 3397 return ret; 3398 num_xstats_filled = (unsigned int)ret; 3399 3400 /* Return all stats */ 3401 if (!ids) { 3402 for (i = 0; i < num_xstats_filled; i++) 3403 values[i] = xstats[i].value; 3404 return expected_entries; 3405 } 3406 3407 /* Filter stats */ 3408 for (i = 0; i < size; i++) { 3409 if (ids[i] >= expected_entries) { 3410 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 3411 return -1; 3412 } 3413 values[i] = xstats[ids[i]].value; 3414 } 3415 3416 rte_eth_trace_xstats_get_by_id(port_id, ids, values, size); 3417 3418 return size; 3419 } 3420 3421 int 3422 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, 3423 unsigned int n) 3424 { 3425 struct rte_eth_dev *dev; 3426 unsigned int count, i; 3427 signed int xcount = 0; 3428 int ret; 3429 3430 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3431 if (xstats == NULL && n > 0) 3432 return -EINVAL; 3433 dev = &rte_eth_devices[port_id]; 3434 3435 count = eth_dev_get_xstats_basic_count(dev); 3436 3437 /* implemented by the driver */ 3438 if (dev->dev_ops->xstats_get != NULL) { 3439 /* Retrieve the xstats from the driver at the end of the 3440 * xstats struct. 3441 */ 3442 xcount = (*dev->dev_ops->xstats_get)(dev, 3443 (n > count) ? xstats + count : NULL, 3444 (n > count) ? n - count : 0); 3445 3446 if (xcount < 0) 3447 return eth_err(port_id, xcount); 3448 } 3449 3450 if (n < count + xcount || xstats == NULL) 3451 return count + xcount; 3452 3453 /* now fill the xstats structure */ 3454 ret = eth_basic_stats_get(port_id, xstats); 3455 if (ret < 0) 3456 return ret; 3457 count = ret; 3458 3459 for (i = 0; i < count; i++) 3460 xstats[i].id = i; 3461 /* add an offset to driver-specific stats */ 3462 for ( ; i < count + xcount; i++) 3463 xstats[i].id += count; 3464 3465 for (i = 0; i < n; i++) 3466 rte_eth_trace_xstats_get(port_id, xstats[i]); 3467 3468 return count + xcount; 3469 } 3470 3471 /* reset ethdev extended statistics */ 3472 int 3473 rte_eth_xstats_reset(uint16_t port_id) 3474 { 3475 struct rte_eth_dev *dev; 3476 3477 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3478 dev = &rte_eth_devices[port_id]; 3479 3480 /* implemented by the driver */ 3481 if (dev->dev_ops->xstats_reset != NULL) { 3482 int ret = eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev)); 3483 3484 rte_eth_trace_xstats_reset(port_id, ret); 3485 3486 return ret; 3487 } 3488 3489 /* fallback to default */ 3490 return rte_eth_stats_reset(port_id); 3491 } 3492 3493 static int 3494 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, 3495 uint8_t stat_idx, uint8_t is_rx) 3496 { 3497 struct rte_eth_dev *dev; 3498 3499 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3500 dev = &rte_eth_devices[port_id]; 3501 3502 if (is_rx && (queue_id >= dev->data->nb_rx_queues)) 3503 return -EINVAL; 3504 3505 if (!is_rx && (queue_id >= dev->data->nb_tx_queues)) 3506 return -EINVAL; 3507 3508 if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) 3509 return -EINVAL; 3510 3511 if (*dev->dev_ops->queue_stats_mapping_set == NULL) 3512 return -ENOTSUP; 3513 return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx); 3514 } 3515 3516 int 3517 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, 3518 uint8_t stat_idx) 3519 { 3520 int ret; 3521 3522 ret = eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3523 tx_queue_id, 3524 stat_idx, STAT_QMAP_TX)); 3525 3526 rte_ethdev_trace_set_tx_queue_stats_mapping(port_id, tx_queue_id, 3527 stat_idx, ret); 3528 3529 return ret; 3530 } 3531 3532 int 3533 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, 3534 uint8_t stat_idx) 3535 { 3536 int ret; 3537 3538 ret = eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3539 rx_queue_id, 3540 stat_idx, STAT_QMAP_RX)); 3541 3542 rte_ethdev_trace_set_rx_queue_stats_mapping(port_id, rx_queue_id, 3543 stat_idx, ret); 3544 3545 return ret; 3546 } 3547 3548 int 3549 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size) 3550 { 3551 struct rte_eth_dev *dev; 3552 int ret; 3553 3554 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3555 dev = &rte_eth_devices[port_id]; 3556 3557 if (fw_version == NULL && fw_size > 0) { 3558 RTE_ETHDEV_LOG(ERR, 3559 "Cannot get ethdev port %u FW version to NULL when string size is non zero\n", 3560 port_id); 3561 return -EINVAL; 3562 } 3563 3564 if (*dev->dev_ops->fw_version_get == NULL) 3565 return -ENOTSUP; 3566 ret = eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev, 3567 fw_version, fw_size)); 3568 3569 rte_ethdev_trace_fw_version_get(port_id, fw_version, fw_size, ret); 3570 3571 return ret; 3572 } 3573 3574 int 3575 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) 3576 { 3577 struct rte_eth_dev *dev; 3578 const struct rte_eth_desc_lim lim = { 3579 .nb_max = UINT16_MAX, 3580 .nb_min = 0, 3581 .nb_align = 1, 3582 .nb_seg_max = UINT16_MAX, 3583 .nb_mtu_seg_max = UINT16_MAX, 3584 }; 3585 int diag; 3586 3587 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3588 dev = &rte_eth_devices[port_id]; 3589 3590 if (dev_info == NULL) { 3591 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u info to NULL\n", 3592 port_id); 3593 return -EINVAL; 3594 } 3595 3596 /* 3597 * Init dev_info before port_id check since caller does not have 3598 * return status and does not know if get is successful or not. 3599 */ 3600 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3601 dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 3602 3603 dev_info->rx_desc_lim = lim; 3604 dev_info->tx_desc_lim = lim; 3605 dev_info->device = dev->device; 3606 dev_info->min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN - 3607 RTE_ETHER_CRC_LEN; 3608 dev_info->max_mtu = UINT16_MAX; 3609 3610 if (*dev->dev_ops->dev_infos_get == NULL) 3611 return -ENOTSUP; 3612 diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info); 3613 if (diag != 0) { 3614 /* Cleanup already filled in device information */ 3615 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3616 return eth_err(port_id, diag); 3617 } 3618 3619 /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */ 3620 dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues, 3621 RTE_MAX_QUEUES_PER_PORT); 3622 dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues, 3623 RTE_MAX_QUEUES_PER_PORT); 3624 3625 dev_info->driver_name = dev->device->driver->name; 3626 dev_info->nb_rx_queues = dev->data->nb_rx_queues; 3627 dev_info->nb_tx_queues = dev->data->nb_tx_queues; 3628 3629 dev_info->dev_flags = &dev->data->dev_flags; 3630 3631 rte_ethdev_trace_info_get(port_id, dev_info); 3632 3633 return 0; 3634 } 3635 3636 int 3637 rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf) 3638 { 3639 struct rte_eth_dev *dev; 3640 3641 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3642 dev = &rte_eth_devices[port_id]; 3643 3644 if (dev_conf == NULL) { 3645 RTE_ETHDEV_LOG(ERR, 3646 "Cannot get ethdev port %u configuration to NULL\n", 3647 port_id); 3648 return -EINVAL; 3649 } 3650 3651 memcpy(dev_conf, &dev->data->dev_conf, sizeof(struct rte_eth_conf)); 3652 3653 rte_ethdev_trace_conf_get(port_id, dev_conf); 3654 3655 return 0; 3656 } 3657 3658 int 3659 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, 3660 uint32_t *ptypes, int num) 3661 { 3662 int i, j; 3663 struct rte_eth_dev *dev; 3664 const uint32_t *all_ptypes; 3665 3666 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3667 dev = &rte_eth_devices[port_id]; 3668 3669 if (ptypes == NULL && num > 0) { 3670 RTE_ETHDEV_LOG(ERR, 3671 "Cannot get ethdev port %u supported packet types to NULL when array size is non zero\n", 3672 port_id); 3673 return -EINVAL; 3674 } 3675 3676 if (*dev->dev_ops->dev_supported_ptypes_get == NULL) 3677 return 0; 3678 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3679 3680 if (!all_ptypes) 3681 return 0; 3682 3683 for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i) 3684 if (all_ptypes[i] & ptype_mask) { 3685 if (j < num) { 3686 ptypes[j] = all_ptypes[i]; 3687 3688 rte_ethdev_trace_get_supported_ptypes(port_id, 3689 j, num, ptypes[j]); 3690 } 3691 j++; 3692 } 3693 3694 return j; 3695 } 3696 3697 int 3698 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, 3699 uint32_t *set_ptypes, unsigned int num) 3700 { 3701 const uint32_t valid_ptype_masks[] = { 3702 RTE_PTYPE_L2_MASK, 3703 RTE_PTYPE_L3_MASK, 3704 RTE_PTYPE_L4_MASK, 3705 RTE_PTYPE_TUNNEL_MASK, 3706 RTE_PTYPE_INNER_L2_MASK, 3707 RTE_PTYPE_INNER_L3_MASK, 3708 RTE_PTYPE_INNER_L4_MASK, 3709 }; 3710 const uint32_t *all_ptypes; 3711 struct rte_eth_dev *dev; 3712 uint32_t unused_mask; 3713 unsigned int i, j; 3714 int ret; 3715 3716 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3717 dev = &rte_eth_devices[port_id]; 3718 3719 if (num > 0 && set_ptypes == NULL) { 3720 RTE_ETHDEV_LOG(ERR, 3721 "Cannot get ethdev port %u set packet types to NULL when array size is non zero\n", 3722 port_id); 3723 return -EINVAL; 3724 } 3725 3726 if (*dev->dev_ops->dev_supported_ptypes_get == NULL || 3727 *dev->dev_ops->dev_ptypes_set == NULL) { 3728 ret = 0; 3729 goto ptype_unknown; 3730 } 3731 3732 if (ptype_mask == 0) { 3733 ret = (*dev->dev_ops->dev_ptypes_set)(dev, 3734 ptype_mask); 3735 goto ptype_unknown; 3736 } 3737 3738 unused_mask = ptype_mask; 3739 for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) { 3740 uint32_t mask = ptype_mask & valid_ptype_masks[i]; 3741 if (mask && mask != valid_ptype_masks[i]) { 3742 ret = -EINVAL; 3743 goto ptype_unknown; 3744 } 3745 unused_mask &= ~valid_ptype_masks[i]; 3746 } 3747 3748 if (unused_mask) { 3749 ret = -EINVAL; 3750 goto ptype_unknown; 3751 } 3752 3753 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3754 if (all_ptypes == NULL) { 3755 ret = 0; 3756 goto ptype_unknown; 3757 } 3758 3759 /* 3760 * Accommodate as many set_ptypes as possible. If the supplied 3761 * set_ptypes array is insufficient fill it partially. 3762 */ 3763 for (i = 0, j = 0; set_ptypes != NULL && 3764 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) { 3765 if (ptype_mask & all_ptypes[i]) { 3766 if (j < num - 1) { 3767 set_ptypes[j] = all_ptypes[i]; 3768 3769 rte_ethdev_trace_set_ptypes(port_id, j, num, 3770 set_ptypes[j]); 3771 3772 j++; 3773 continue; 3774 } 3775 break; 3776 } 3777 } 3778 3779 if (set_ptypes != NULL && j < num) 3780 set_ptypes[j] = RTE_PTYPE_UNKNOWN; 3781 3782 return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask); 3783 3784 ptype_unknown: 3785 if (num > 0) 3786 set_ptypes[0] = RTE_PTYPE_UNKNOWN; 3787 3788 return ret; 3789 } 3790 3791 int 3792 rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma, 3793 unsigned int num) 3794 { 3795 int32_t ret; 3796 struct rte_eth_dev *dev; 3797 struct rte_eth_dev_info dev_info; 3798 3799 if (ma == NULL) { 3800 RTE_ETHDEV_LOG(ERR, "%s: invalid parameters\n", __func__); 3801 return -EINVAL; 3802 } 3803 3804 /* will check for us that port_id is a valid one */ 3805 ret = rte_eth_dev_info_get(port_id, &dev_info); 3806 if (ret != 0) 3807 return ret; 3808 3809 dev = &rte_eth_devices[port_id]; 3810 num = RTE_MIN(dev_info.max_mac_addrs, num); 3811 memcpy(ma, dev->data->mac_addrs, num * sizeof(ma[0])); 3812 3813 rte_eth_trace_macaddrs_get(port_id, num); 3814 3815 return num; 3816 } 3817 3818 int 3819 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr) 3820 { 3821 struct rte_eth_dev *dev; 3822 3823 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3824 dev = &rte_eth_devices[port_id]; 3825 3826 if (mac_addr == NULL) { 3827 RTE_ETHDEV_LOG(ERR, 3828 "Cannot get ethdev port %u MAC address to NULL\n", 3829 port_id); 3830 return -EINVAL; 3831 } 3832 3833 rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr); 3834 3835 rte_eth_trace_macaddr_get(port_id, mac_addr); 3836 3837 return 0; 3838 } 3839 3840 int 3841 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu) 3842 { 3843 struct rte_eth_dev *dev; 3844 3845 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3846 dev = &rte_eth_devices[port_id]; 3847 3848 if (mtu == NULL) { 3849 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u MTU to NULL\n", 3850 port_id); 3851 return -EINVAL; 3852 } 3853 3854 *mtu = dev->data->mtu; 3855 3856 rte_ethdev_trace_get_mtu(port_id, *mtu); 3857 3858 return 0; 3859 } 3860 3861 int 3862 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu) 3863 { 3864 int ret; 3865 struct rte_eth_dev_info dev_info; 3866 struct rte_eth_dev *dev; 3867 3868 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3869 dev = &rte_eth_devices[port_id]; 3870 if (*dev->dev_ops->mtu_set == NULL) 3871 return -ENOTSUP; 3872 3873 /* 3874 * Check if the device supports dev_infos_get, if it does not 3875 * skip min_mtu/max_mtu validation here as this requires values 3876 * that are populated within the call to rte_eth_dev_info_get() 3877 * which relies on dev->dev_ops->dev_infos_get. 3878 */ 3879 if (*dev->dev_ops->dev_infos_get != NULL) { 3880 ret = rte_eth_dev_info_get(port_id, &dev_info); 3881 if (ret != 0) 3882 return ret; 3883 3884 ret = eth_dev_validate_mtu(port_id, &dev_info, mtu); 3885 if (ret != 0) 3886 return ret; 3887 } 3888 3889 if (dev->data->dev_configured == 0) { 3890 RTE_ETHDEV_LOG(ERR, 3891 "Port %u must be configured before MTU set\n", 3892 port_id); 3893 return -EINVAL; 3894 } 3895 3896 ret = (*dev->dev_ops->mtu_set)(dev, mtu); 3897 if (ret == 0) 3898 dev->data->mtu = mtu; 3899 3900 ret = eth_err(port_id, ret); 3901 3902 rte_ethdev_trace_set_mtu(port_id, mtu, ret); 3903 3904 return ret; 3905 } 3906 3907 int 3908 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on) 3909 { 3910 struct rte_eth_dev *dev; 3911 int ret; 3912 3913 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3914 dev = &rte_eth_devices[port_id]; 3915 3916 if (!(dev->data->dev_conf.rxmode.offloads & 3917 RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) { 3918 RTE_ETHDEV_LOG(ERR, "Port %u: VLAN-filtering disabled\n", 3919 port_id); 3920 return -ENOSYS; 3921 } 3922 3923 if (vlan_id > 4095) { 3924 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n", 3925 port_id, vlan_id); 3926 return -EINVAL; 3927 } 3928 if (*dev->dev_ops->vlan_filter_set == NULL) 3929 return -ENOTSUP; 3930 3931 ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on); 3932 if (ret == 0) { 3933 struct rte_vlan_filter_conf *vfc; 3934 int vidx; 3935 int vbit; 3936 3937 vfc = &dev->data->vlan_filter_conf; 3938 vidx = vlan_id / 64; 3939 vbit = vlan_id % 64; 3940 3941 if (on) 3942 vfc->ids[vidx] |= RTE_BIT64(vbit); 3943 else 3944 vfc->ids[vidx] &= ~RTE_BIT64(vbit); 3945 } 3946 3947 ret = eth_err(port_id, ret); 3948 3949 rte_ethdev_trace_vlan_filter(port_id, vlan_id, on, ret); 3950 3951 return ret; 3952 } 3953 3954 int 3955 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, 3956 int on) 3957 { 3958 struct rte_eth_dev *dev; 3959 3960 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3961 dev = &rte_eth_devices[port_id]; 3962 3963 if (rx_queue_id >= dev->data->nb_rx_queues) { 3964 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id); 3965 return -EINVAL; 3966 } 3967 3968 if (*dev->dev_ops->vlan_strip_queue_set == NULL) 3969 return -ENOTSUP; 3970 (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on); 3971 3972 rte_ethdev_trace_set_vlan_strip_on_queue(port_id, rx_queue_id, on); 3973 3974 return 0; 3975 } 3976 3977 int 3978 rte_eth_dev_set_vlan_ether_type(uint16_t port_id, 3979 enum rte_vlan_type vlan_type, 3980 uint16_t tpid) 3981 { 3982 struct rte_eth_dev *dev; 3983 int ret; 3984 3985 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3986 dev = &rte_eth_devices[port_id]; 3987 3988 if (*dev->dev_ops->vlan_tpid_set == NULL) 3989 return -ENOTSUP; 3990 ret = eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, 3991 tpid)); 3992 3993 rte_ethdev_trace_set_vlan_ether_type(port_id, vlan_type, tpid, ret); 3994 3995 return ret; 3996 } 3997 3998 int 3999 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask) 4000 { 4001 struct rte_eth_dev_info dev_info; 4002 struct rte_eth_dev *dev; 4003 int ret = 0; 4004 int mask = 0; 4005 int cur, org = 0; 4006 uint64_t orig_offloads; 4007 uint64_t dev_offloads; 4008 uint64_t new_offloads; 4009 4010 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4011 dev = &rte_eth_devices[port_id]; 4012 4013 /* save original values in case of failure */ 4014 orig_offloads = dev->data->dev_conf.rxmode.offloads; 4015 dev_offloads = orig_offloads; 4016 4017 /* check which option changed by application */ 4018 cur = !!(offload_mask & RTE_ETH_VLAN_STRIP_OFFLOAD); 4019 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP); 4020 if (cur != org) { 4021 if (cur) 4022 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 4023 else 4024 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 4025 mask |= RTE_ETH_VLAN_STRIP_MASK; 4026 } 4027 4028 cur = !!(offload_mask & RTE_ETH_VLAN_FILTER_OFFLOAD); 4029 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER); 4030 if (cur != org) { 4031 if (cur) 4032 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 4033 else 4034 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 4035 mask |= RTE_ETH_VLAN_FILTER_MASK; 4036 } 4037 4038 cur = !!(offload_mask & RTE_ETH_VLAN_EXTEND_OFFLOAD); 4039 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND); 4040 if (cur != org) { 4041 if (cur) 4042 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 4043 else 4044 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 4045 mask |= RTE_ETH_VLAN_EXTEND_MASK; 4046 } 4047 4048 cur = !!(offload_mask & RTE_ETH_QINQ_STRIP_OFFLOAD); 4049 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP); 4050 if (cur != org) { 4051 if (cur) 4052 dev_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 4053 else 4054 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 4055 mask |= RTE_ETH_QINQ_STRIP_MASK; 4056 } 4057 4058 /*no change*/ 4059 if (mask == 0) 4060 return ret; 4061 4062 ret = rte_eth_dev_info_get(port_id, &dev_info); 4063 if (ret != 0) 4064 return ret; 4065 4066 /* Rx VLAN offloading must be within its device capabilities */ 4067 if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) { 4068 new_offloads = dev_offloads & ~orig_offloads; 4069 RTE_ETHDEV_LOG(ERR, 4070 "Ethdev port_id=%u requested new added VLAN offloads " 4071 "0x%" PRIx64 " must be within Rx offloads capabilities " 4072 "0x%" PRIx64 " in %s()\n", 4073 port_id, new_offloads, dev_info.rx_offload_capa, 4074 __func__); 4075 return -EINVAL; 4076 } 4077 4078 if (*dev->dev_ops->vlan_offload_set == NULL) 4079 return -ENOTSUP; 4080 dev->data->dev_conf.rxmode.offloads = dev_offloads; 4081 ret = (*dev->dev_ops->vlan_offload_set)(dev, mask); 4082 if (ret) { 4083 /* hit an error restore original values */ 4084 dev->data->dev_conf.rxmode.offloads = orig_offloads; 4085 } 4086 4087 ret = eth_err(port_id, ret); 4088 4089 rte_ethdev_trace_set_vlan_offload(port_id, offload_mask, ret); 4090 4091 return ret; 4092 } 4093 4094 int 4095 rte_eth_dev_get_vlan_offload(uint16_t port_id) 4096 { 4097 struct rte_eth_dev *dev; 4098 uint64_t *dev_offloads; 4099 int ret = 0; 4100 4101 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4102 dev = &rte_eth_devices[port_id]; 4103 dev_offloads = &dev->data->dev_conf.rxmode.offloads; 4104 4105 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 4106 ret |= RTE_ETH_VLAN_STRIP_OFFLOAD; 4107 4108 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) 4109 ret |= RTE_ETH_VLAN_FILTER_OFFLOAD; 4110 4111 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) 4112 ret |= RTE_ETH_VLAN_EXTEND_OFFLOAD; 4113 4114 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) 4115 ret |= RTE_ETH_QINQ_STRIP_OFFLOAD; 4116 4117 rte_ethdev_trace_get_vlan_offload(port_id, ret); 4118 4119 return ret; 4120 } 4121 4122 int 4123 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on) 4124 { 4125 struct rte_eth_dev *dev; 4126 int ret; 4127 4128 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4129 dev = &rte_eth_devices[port_id]; 4130 4131 if (*dev->dev_ops->vlan_pvid_set == NULL) 4132 return -ENOTSUP; 4133 ret = eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on)); 4134 4135 rte_ethdev_trace_set_vlan_pvid(port_id, pvid, on, ret); 4136 4137 return ret; 4138 } 4139 4140 int 4141 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 4142 { 4143 struct rte_eth_dev *dev; 4144 int ret; 4145 4146 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4147 dev = &rte_eth_devices[port_id]; 4148 4149 if (fc_conf == NULL) { 4150 RTE_ETHDEV_LOG(ERR, 4151 "Cannot get ethdev port %u flow control config to NULL\n", 4152 port_id); 4153 return -EINVAL; 4154 } 4155 4156 if (*dev->dev_ops->flow_ctrl_get == NULL) 4157 return -ENOTSUP; 4158 memset(fc_conf, 0, sizeof(*fc_conf)); 4159 ret = eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf)); 4160 4161 rte_ethdev_trace_flow_ctrl_get(port_id, fc_conf, ret); 4162 4163 return ret; 4164 } 4165 4166 int 4167 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 4168 { 4169 struct rte_eth_dev *dev; 4170 int ret; 4171 4172 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4173 dev = &rte_eth_devices[port_id]; 4174 4175 if (fc_conf == NULL) { 4176 RTE_ETHDEV_LOG(ERR, 4177 "Cannot set ethdev port %u flow control from NULL config\n", 4178 port_id); 4179 return -EINVAL; 4180 } 4181 4182 if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) { 4183 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n"); 4184 return -EINVAL; 4185 } 4186 4187 if (*dev->dev_ops->flow_ctrl_set == NULL) 4188 return -ENOTSUP; 4189 ret = eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf)); 4190 4191 rte_ethdev_trace_flow_ctrl_set(port_id, fc_conf, ret); 4192 4193 return ret; 4194 } 4195 4196 int 4197 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, 4198 struct rte_eth_pfc_conf *pfc_conf) 4199 { 4200 struct rte_eth_dev *dev; 4201 int ret; 4202 4203 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4204 dev = &rte_eth_devices[port_id]; 4205 4206 if (pfc_conf == NULL) { 4207 RTE_ETHDEV_LOG(ERR, 4208 "Cannot set ethdev port %u priority flow control from NULL config\n", 4209 port_id); 4210 return -EINVAL; 4211 } 4212 4213 if (pfc_conf->priority > (RTE_ETH_DCB_NUM_USER_PRIORITIES - 1)) { 4214 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n"); 4215 return -EINVAL; 4216 } 4217 4218 /* High water, low water validation are device specific */ 4219 if (*dev->dev_ops->priority_flow_ctrl_set == NULL) 4220 return -ENOTSUP; 4221 ret = eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set) 4222 (dev, pfc_conf)); 4223 4224 rte_ethdev_trace_priority_flow_ctrl_set(port_id, pfc_conf, ret); 4225 4226 return ret; 4227 } 4228 4229 static int 4230 validate_rx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max, 4231 struct rte_eth_pfc_queue_conf *pfc_queue_conf) 4232 { 4233 if ((pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) || 4234 (pfc_queue_conf->mode == RTE_ETH_FC_FULL)) { 4235 if (pfc_queue_conf->rx_pause.tx_qid >= dev_info->nb_tx_queues) { 4236 RTE_ETHDEV_LOG(ERR, 4237 "PFC Tx queue not in range for Rx pause requested:%d configured:%d\n", 4238 pfc_queue_conf->rx_pause.tx_qid, 4239 dev_info->nb_tx_queues); 4240 return -EINVAL; 4241 } 4242 4243 if (pfc_queue_conf->rx_pause.tc >= tc_max) { 4244 RTE_ETHDEV_LOG(ERR, 4245 "PFC TC not in range for Rx pause requested:%d max:%d\n", 4246 pfc_queue_conf->rx_pause.tc, tc_max); 4247 return -EINVAL; 4248 } 4249 } 4250 4251 return 0; 4252 } 4253 4254 static int 4255 validate_tx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max, 4256 struct rte_eth_pfc_queue_conf *pfc_queue_conf) 4257 { 4258 if ((pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) || 4259 (pfc_queue_conf->mode == RTE_ETH_FC_FULL)) { 4260 if (pfc_queue_conf->tx_pause.rx_qid >= dev_info->nb_rx_queues) { 4261 RTE_ETHDEV_LOG(ERR, 4262 "PFC Rx queue not in range for Tx pause requested:%d configured:%d\n", 4263 pfc_queue_conf->tx_pause.rx_qid, 4264 dev_info->nb_rx_queues); 4265 return -EINVAL; 4266 } 4267 4268 if (pfc_queue_conf->tx_pause.tc >= tc_max) { 4269 RTE_ETHDEV_LOG(ERR, 4270 "PFC TC not in range for Tx pause requested:%d max:%d\n", 4271 pfc_queue_conf->tx_pause.tc, tc_max); 4272 return -EINVAL; 4273 } 4274 } 4275 4276 return 0; 4277 } 4278 4279 int 4280 rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id, 4281 struct rte_eth_pfc_queue_info *pfc_queue_info) 4282 { 4283 struct rte_eth_dev *dev; 4284 int ret; 4285 4286 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4287 dev = &rte_eth_devices[port_id]; 4288 4289 if (pfc_queue_info == NULL) { 4290 RTE_ETHDEV_LOG(ERR, "PFC info param is NULL for port (%u)\n", 4291 port_id); 4292 return -EINVAL; 4293 } 4294 4295 if (*dev->dev_ops->priority_flow_ctrl_queue_info_get == NULL) 4296 return -ENOTSUP; 4297 ret = eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_queue_info_get) 4298 (dev, pfc_queue_info)); 4299 4300 rte_ethdev_trace_priority_flow_ctrl_queue_info_get(port_id, 4301 pfc_queue_info, ret); 4302 4303 return ret; 4304 } 4305 4306 int 4307 rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id, 4308 struct rte_eth_pfc_queue_conf *pfc_queue_conf) 4309 { 4310 struct rte_eth_pfc_queue_info pfc_info; 4311 struct rte_eth_dev_info dev_info; 4312 struct rte_eth_dev *dev; 4313 int ret; 4314 4315 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4316 dev = &rte_eth_devices[port_id]; 4317 4318 if (pfc_queue_conf == NULL) { 4319 RTE_ETHDEV_LOG(ERR, "PFC parameters are NULL for port (%u)\n", 4320 port_id); 4321 return -EINVAL; 4322 } 4323 4324 ret = rte_eth_dev_info_get(port_id, &dev_info); 4325 if (ret != 0) 4326 return ret; 4327 4328 ret = rte_eth_dev_priority_flow_ctrl_queue_info_get(port_id, &pfc_info); 4329 if (ret != 0) 4330 return ret; 4331 4332 if (pfc_info.tc_max == 0) { 4333 RTE_ETHDEV_LOG(ERR, "Ethdev port %u does not support PFC TC values\n", 4334 port_id); 4335 return -ENOTSUP; 4336 } 4337 4338 /* Check requested mode supported or not */ 4339 if (pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE && 4340 pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) { 4341 RTE_ETHDEV_LOG(ERR, "PFC Tx pause unsupported for port (%d)\n", 4342 port_id); 4343 return -EINVAL; 4344 } 4345 4346 if (pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE && 4347 pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) { 4348 RTE_ETHDEV_LOG(ERR, "PFC Rx pause unsupported for port (%d)\n", 4349 port_id); 4350 return -EINVAL; 4351 } 4352 4353 /* Validate Rx pause parameters */ 4354 if (pfc_info.mode_capa == RTE_ETH_FC_FULL || 4355 pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE) { 4356 ret = validate_rx_pause_config(&dev_info, pfc_info.tc_max, 4357 pfc_queue_conf); 4358 if (ret != 0) 4359 return ret; 4360 } 4361 4362 /* Validate Tx pause parameters */ 4363 if (pfc_info.mode_capa == RTE_ETH_FC_FULL || 4364 pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE) { 4365 ret = validate_tx_pause_config(&dev_info, pfc_info.tc_max, 4366 pfc_queue_conf); 4367 if (ret != 0) 4368 return ret; 4369 } 4370 4371 if (*dev->dev_ops->priority_flow_ctrl_queue_config == NULL) 4372 return -ENOTSUP; 4373 ret = eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_queue_config) 4374 (dev, pfc_queue_conf)); 4375 4376 rte_ethdev_trace_priority_flow_ctrl_queue_configure(port_id, 4377 pfc_queue_conf, ret); 4378 4379 return ret; 4380 } 4381 4382 static int 4383 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf, 4384 uint16_t reta_size) 4385 { 4386 uint16_t i, num; 4387 4388 num = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) / RTE_ETH_RETA_GROUP_SIZE; 4389 for (i = 0; i < num; i++) { 4390 if (reta_conf[i].mask) 4391 return 0; 4392 } 4393 4394 return -EINVAL; 4395 } 4396 4397 static int 4398 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf, 4399 uint16_t reta_size, 4400 uint16_t max_rxq) 4401 { 4402 uint16_t i, idx, shift; 4403 4404 if (max_rxq == 0) { 4405 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n"); 4406 return -EINVAL; 4407 } 4408 4409 for (i = 0; i < reta_size; i++) { 4410 idx = i / RTE_ETH_RETA_GROUP_SIZE; 4411 shift = i % RTE_ETH_RETA_GROUP_SIZE; 4412 if ((reta_conf[idx].mask & RTE_BIT64(shift)) && 4413 (reta_conf[idx].reta[shift] >= max_rxq)) { 4414 RTE_ETHDEV_LOG(ERR, 4415 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n", 4416 idx, shift, 4417 reta_conf[idx].reta[shift], max_rxq); 4418 return -EINVAL; 4419 } 4420 } 4421 4422 return 0; 4423 } 4424 4425 int 4426 rte_eth_dev_rss_reta_update(uint16_t port_id, 4427 struct rte_eth_rss_reta_entry64 *reta_conf, 4428 uint16_t reta_size) 4429 { 4430 enum rte_eth_rx_mq_mode mq_mode; 4431 struct rte_eth_dev *dev; 4432 int ret; 4433 4434 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4435 dev = &rte_eth_devices[port_id]; 4436 4437 if (reta_conf == NULL) { 4438 RTE_ETHDEV_LOG(ERR, 4439 "Cannot update ethdev port %u RSS RETA to NULL\n", 4440 port_id); 4441 return -EINVAL; 4442 } 4443 4444 if (reta_size == 0) { 4445 RTE_ETHDEV_LOG(ERR, 4446 "Cannot update ethdev port %u RSS RETA with zero size\n", 4447 port_id); 4448 return -EINVAL; 4449 } 4450 4451 /* Check mask bits */ 4452 ret = eth_check_reta_mask(reta_conf, reta_size); 4453 if (ret < 0) 4454 return ret; 4455 4456 /* Check entry value */ 4457 ret = eth_check_reta_entry(reta_conf, reta_size, 4458 dev->data->nb_rx_queues); 4459 if (ret < 0) 4460 return ret; 4461 4462 mq_mode = dev->data->dev_conf.rxmode.mq_mode; 4463 if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) { 4464 RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n"); 4465 return -ENOTSUP; 4466 } 4467 4468 if (*dev->dev_ops->reta_update == NULL) 4469 return -ENOTSUP; 4470 ret = eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf, 4471 reta_size)); 4472 4473 rte_ethdev_trace_rss_reta_update(port_id, reta_conf, reta_size, ret); 4474 4475 return ret; 4476 } 4477 4478 int 4479 rte_eth_dev_rss_reta_query(uint16_t port_id, 4480 struct rte_eth_rss_reta_entry64 *reta_conf, 4481 uint16_t reta_size) 4482 { 4483 struct rte_eth_dev *dev; 4484 int ret; 4485 4486 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4487 dev = &rte_eth_devices[port_id]; 4488 4489 if (reta_conf == NULL) { 4490 RTE_ETHDEV_LOG(ERR, 4491 "Cannot query ethdev port %u RSS RETA from NULL config\n", 4492 port_id); 4493 return -EINVAL; 4494 } 4495 4496 /* Check mask bits */ 4497 ret = eth_check_reta_mask(reta_conf, reta_size); 4498 if (ret < 0) 4499 return ret; 4500 4501 if (*dev->dev_ops->reta_query == NULL) 4502 return -ENOTSUP; 4503 ret = eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf, 4504 reta_size)); 4505 4506 rte_ethdev_trace_rss_reta_query(port_id, reta_conf, reta_size, ret); 4507 4508 return ret; 4509 } 4510 4511 int 4512 rte_eth_dev_rss_hash_update(uint16_t port_id, 4513 struct rte_eth_rss_conf *rss_conf) 4514 { 4515 struct rte_eth_dev *dev; 4516 struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, }; 4517 enum rte_eth_rx_mq_mode mq_mode; 4518 int ret; 4519 4520 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4521 dev = &rte_eth_devices[port_id]; 4522 4523 if (rss_conf == NULL) { 4524 RTE_ETHDEV_LOG(ERR, 4525 "Cannot update ethdev port %u RSS hash from NULL config\n", 4526 port_id); 4527 return -EINVAL; 4528 } 4529 4530 ret = rte_eth_dev_info_get(port_id, &dev_info); 4531 if (ret != 0) 4532 return ret; 4533 4534 rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf); 4535 if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) != 4536 dev_info.flow_type_rss_offloads) { 4537 RTE_ETHDEV_LOG(ERR, 4538 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 4539 port_id, rss_conf->rss_hf, 4540 dev_info.flow_type_rss_offloads); 4541 return -EINVAL; 4542 } 4543 4544 mq_mode = dev->data->dev_conf.rxmode.mq_mode; 4545 if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) { 4546 RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n"); 4547 return -ENOTSUP; 4548 } 4549 4550 if (*dev->dev_ops->rss_hash_update == NULL) 4551 return -ENOTSUP; 4552 ret = eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev, 4553 rss_conf)); 4554 4555 rte_ethdev_trace_rss_hash_update(port_id, rss_conf, ret); 4556 4557 return ret; 4558 } 4559 4560 int 4561 rte_eth_dev_rss_hash_conf_get(uint16_t port_id, 4562 struct rte_eth_rss_conf *rss_conf) 4563 { 4564 struct rte_eth_dev *dev; 4565 int ret; 4566 4567 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4568 dev = &rte_eth_devices[port_id]; 4569 4570 if (rss_conf == NULL) { 4571 RTE_ETHDEV_LOG(ERR, 4572 "Cannot get ethdev port %u RSS hash config to NULL\n", 4573 port_id); 4574 return -EINVAL; 4575 } 4576 4577 if (*dev->dev_ops->rss_hash_conf_get == NULL) 4578 return -ENOTSUP; 4579 ret = eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev, 4580 rss_conf)); 4581 4582 rte_ethdev_trace_rss_hash_conf_get(port_id, rss_conf, ret); 4583 4584 return ret; 4585 } 4586 4587 int 4588 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, 4589 struct rte_eth_udp_tunnel *udp_tunnel) 4590 { 4591 struct rte_eth_dev *dev; 4592 int ret; 4593 4594 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4595 dev = &rte_eth_devices[port_id]; 4596 4597 if (udp_tunnel == NULL) { 4598 RTE_ETHDEV_LOG(ERR, 4599 "Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4600 port_id); 4601 return -EINVAL; 4602 } 4603 4604 if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) { 4605 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4606 return -EINVAL; 4607 } 4608 4609 if (*dev->dev_ops->udp_tunnel_port_add == NULL) 4610 return -ENOTSUP; 4611 ret = eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev, 4612 udp_tunnel)); 4613 4614 rte_ethdev_trace_udp_tunnel_port_add(port_id, udp_tunnel, ret); 4615 4616 return ret; 4617 } 4618 4619 int 4620 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, 4621 struct rte_eth_udp_tunnel *udp_tunnel) 4622 { 4623 struct rte_eth_dev *dev; 4624 int ret; 4625 4626 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4627 dev = &rte_eth_devices[port_id]; 4628 4629 if (udp_tunnel == NULL) { 4630 RTE_ETHDEV_LOG(ERR, 4631 "Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4632 port_id); 4633 return -EINVAL; 4634 } 4635 4636 if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) { 4637 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4638 return -EINVAL; 4639 } 4640 4641 if (*dev->dev_ops->udp_tunnel_port_del == NULL) 4642 return -ENOTSUP; 4643 ret = eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev, 4644 udp_tunnel)); 4645 4646 rte_ethdev_trace_udp_tunnel_port_delete(port_id, udp_tunnel, ret); 4647 4648 return ret; 4649 } 4650 4651 int 4652 rte_eth_led_on(uint16_t port_id) 4653 { 4654 struct rte_eth_dev *dev; 4655 int ret; 4656 4657 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4658 dev = &rte_eth_devices[port_id]; 4659 4660 if (*dev->dev_ops->dev_led_on == NULL) 4661 return -ENOTSUP; 4662 ret = eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev)); 4663 4664 rte_eth_trace_led_on(port_id, ret); 4665 4666 return ret; 4667 } 4668 4669 int 4670 rte_eth_led_off(uint16_t port_id) 4671 { 4672 struct rte_eth_dev *dev; 4673 int ret; 4674 4675 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4676 dev = &rte_eth_devices[port_id]; 4677 4678 if (*dev->dev_ops->dev_led_off == NULL) 4679 return -ENOTSUP; 4680 ret = eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev)); 4681 4682 rte_eth_trace_led_off(port_id, ret); 4683 4684 return ret; 4685 } 4686 4687 int 4688 rte_eth_fec_get_capability(uint16_t port_id, 4689 struct rte_eth_fec_capa *speed_fec_capa, 4690 unsigned int num) 4691 { 4692 struct rte_eth_dev *dev; 4693 int ret; 4694 4695 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4696 dev = &rte_eth_devices[port_id]; 4697 4698 if (speed_fec_capa == NULL && num > 0) { 4699 RTE_ETHDEV_LOG(ERR, 4700 "Cannot get ethdev port %u FEC capability to NULL when array size is non zero\n", 4701 port_id); 4702 return -EINVAL; 4703 } 4704 4705 if (*dev->dev_ops->fec_get_capability == NULL) 4706 return -ENOTSUP; 4707 ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num); 4708 4709 rte_eth_trace_fec_get_capability(port_id, speed_fec_capa, num, ret); 4710 4711 return ret; 4712 } 4713 4714 int 4715 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa) 4716 { 4717 struct rte_eth_dev *dev; 4718 int ret; 4719 4720 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4721 dev = &rte_eth_devices[port_id]; 4722 4723 if (fec_capa == NULL) { 4724 RTE_ETHDEV_LOG(ERR, 4725 "Cannot get ethdev port %u current FEC mode to NULL\n", 4726 port_id); 4727 return -EINVAL; 4728 } 4729 4730 if (*dev->dev_ops->fec_get == NULL) 4731 return -ENOTSUP; 4732 ret = eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa)); 4733 4734 rte_eth_trace_fec_get(port_id, fec_capa, ret); 4735 4736 return ret; 4737 } 4738 4739 int 4740 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa) 4741 { 4742 struct rte_eth_dev *dev; 4743 int ret; 4744 4745 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4746 dev = &rte_eth_devices[port_id]; 4747 4748 if (*dev->dev_ops->fec_set == NULL) 4749 return -ENOTSUP; 4750 ret = eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa)); 4751 4752 rte_eth_trace_fec_set(port_id, fec_capa, ret); 4753 4754 return ret; 4755 } 4756 4757 /* 4758 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 4759 * an empty spot. 4760 */ 4761 static int 4762 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr) 4763 { 4764 struct rte_eth_dev_info dev_info; 4765 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4766 unsigned i; 4767 int ret; 4768 4769 ret = rte_eth_dev_info_get(port_id, &dev_info); 4770 if (ret != 0) 4771 return -1; 4772 4773 for (i = 0; i < dev_info.max_mac_addrs; i++) 4774 if (memcmp(addr, &dev->data->mac_addrs[i], 4775 RTE_ETHER_ADDR_LEN) == 0) 4776 return i; 4777 4778 return -1; 4779 } 4780 4781 static const struct rte_ether_addr null_mac_addr; 4782 4783 int 4784 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr, 4785 uint32_t pool) 4786 { 4787 struct rte_eth_dev *dev; 4788 int index; 4789 uint64_t pool_mask; 4790 int ret; 4791 4792 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4793 dev = &rte_eth_devices[port_id]; 4794 4795 if (addr == NULL) { 4796 RTE_ETHDEV_LOG(ERR, 4797 "Cannot add ethdev port %u MAC address from NULL address\n", 4798 port_id); 4799 return -EINVAL; 4800 } 4801 4802 if (*dev->dev_ops->mac_addr_add == NULL) 4803 return -ENOTSUP; 4804 4805 if (rte_is_zero_ether_addr(addr)) { 4806 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 4807 port_id); 4808 return -EINVAL; 4809 } 4810 if (pool >= RTE_ETH_64_POOLS) { 4811 RTE_ETHDEV_LOG(ERR, "Pool ID must be 0-%d\n", RTE_ETH_64_POOLS - 1); 4812 return -EINVAL; 4813 } 4814 4815 index = eth_dev_get_mac_addr_index(port_id, addr); 4816 if (index < 0) { 4817 index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr); 4818 if (index < 0) { 4819 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 4820 port_id); 4821 return -ENOSPC; 4822 } 4823 } else { 4824 pool_mask = dev->data->mac_pool_sel[index]; 4825 4826 /* Check if both MAC address and pool is already there, and do nothing */ 4827 if (pool_mask & RTE_BIT64(pool)) 4828 return 0; 4829 } 4830 4831 /* Update NIC */ 4832 ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool); 4833 4834 if (ret == 0) { 4835 /* Update address in NIC data structure */ 4836 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]); 4837 4838 /* Update pool bitmap in NIC data structure */ 4839 dev->data->mac_pool_sel[index] |= RTE_BIT64(pool); 4840 } 4841 4842 ret = eth_err(port_id, ret); 4843 4844 rte_ethdev_trace_mac_addr_add(port_id, addr, pool, ret); 4845 4846 return ret; 4847 } 4848 4849 int 4850 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr) 4851 { 4852 struct rte_eth_dev *dev; 4853 int index; 4854 4855 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4856 dev = &rte_eth_devices[port_id]; 4857 4858 if (addr == NULL) { 4859 RTE_ETHDEV_LOG(ERR, 4860 "Cannot remove ethdev port %u MAC address from NULL address\n", 4861 port_id); 4862 return -EINVAL; 4863 } 4864 4865 if (*dev->dev_ops->mac_addr_remove == NULL) 4866 return -ENOTSUP; 4867 4868 index = eth_dev_get_mac_addr_index(port_id, addr); 4869 if (index == 0) { 4870 RTE_ETHDEV_LOG(ERR, 4871 "Port %u: Cannot remove default MAC address\n", 4872 port_id); 4873 return -EADDRINUSE; 4874 } else if (index < 0) 4875 return 0; /* Do nothing if address wasn't found */ 4876 4877 /* Update NIC */ 4878 (*dev->dev_ops->mac_addr_remove)(dev, index); 4879 4880 /* Update address in NIC data structure */ 4881 rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]); 4882 4883 /* reset pool bitmap */ 4884 dev->data->mac_pool_sel[index] = 0; 4885 4886 rte_ethdev_trace_mac_addr_remove(port_id, addr); 4887 4888 return 0; 4889 } 4890 4891 int 4892 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr) 4893 { 4894 struct rte_eth_dev *dev; 4895 int ret; 4896 4897 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4898 dev = &rte_eth_devices[port_id]; 4899 4900 if (addr == NULL) { 4901 RTE_ETHDEV_LOG(ERR, 4902 "Cannot set ethdev port %u default MAC address from NULL address\n", 4903 port_id); 4904 return -EINVAL; 4905 } 4906 4907 if (!rte_is_valid_assigned_ether_addr(addr)) 4908 return -EINVAL; 4909 4910 if (*dev->dev_ops->mac_addr_set == NULL) 4911 return -ENOTSUP; 4912 4913 ret = (*dev->dev_ops->mac_addr_set)(dev, addr); 4914 if (ret < 0) 4915 return ret; 4916 4917 /* Update default address in NIC data structure */ 4918 rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]); 4919 4920 rte_ethdev_trace_default_mac_addr_set(port_id, addr); 4921 4922 return 0; 4923 } 4924 4925 4926 /* 4927 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 4928 * an empty spot. 4929 */ 4930 static int 4931 eth_dev_get_hash_mac_addr_index(uint16_t port_id, 4932 const struct rte_ether_addr *addr) 4933 { 4934 struct rte_eth_dev_info dev_info; 4935 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4936 unsigned i; 4937 int ret; 4938 4939 ret = rte_eth_dev_info_get(port_id, &dev_info); 4940 if (ret != 0) 4941 return -1; 4942 4943 if (!dev->data->hash_mac_addrs) 4944 return -1; 4945 4946 for (i = 0; i < dev_info.max_hash_mac_addrs; i++) 4947 if (memcmp(addr, &dev->data->hash_mac_addrs[i], 4948 RTE_ETHER_ADDR_LEN) == 0) 4949 return i; 4950 4951 return -1; 4952 } 4953 4954 int 4955 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, 4956 uint8_t on) 4957 { 4958 int index; 4959 int ret; 4960 struct rte_eth_dev *dev; 4961 4962 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4963 dev = &rte_eth_devices[port_id]; 4964 4965 if (addr == NULL) { 4966 RTE_ETHDEV_LOG(ERR, 4967 "Cannot set ethdev port %u unicast hash table from NULL address\n", 4968 port_id); 4969 return -EINVAL; 4970 } 4971 4972 if (rte_is_zero_ether_addr(addr)) { 4973 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 4974 port_id); 4975 return -EINVAL; 4976 } 4977 4978 index = eth_dev_get_hash_mac_addr_index(port_id, addr); 4979 /* Check if it's already there, and do nothing */ 4980 if ((index >= 0) && on) 4981 return 0; 4982 4983 if (index < 0) { 4984 if (!on) { 4985 RTE_ETHDEV_LOG(ERR, 4986 "Port %u: the MAC address was not set in UTA\n", 4987 port_id); 4988 return -EINVAL; 4989 } 4990 4991 index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr); 4992 if (index < 0) { 4993 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 4994 port_id); 4995 return -ENOSPC; 4996 } 4997 } 4998 4999 if (*dev->dev_ops->uc_hash_table_set == NULL) 5000 return -ENOTSUP; 5001 ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on); 5002 if (ret == 0) { 5003 /* Update address in NIC data structure */ 5004 if (on) 5005 rte_ether_addr_copy(addr, 5006 &dev->data->hash_mac_addrs[index]); 5007 else 5008 rte_ether_addr_copy(&null_mac_addr, 5009 &dev->data->hash_mac_addrs[index]); 5010 } 5011 5012 ret = eth_err(port_id, ret); 5013 5014 rte_ethdev_trace_uc_hash_table_set(port_id, on, ret); 5015 5016 return ret; 5017 } 5018 5019 int 5020 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on) 5021 { 5022 struct rte_eth_dev *dev; 5023 int ret; 5024 5025 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5026 dev = &rte_eth_devices[port_id]; 5027 5028 if (*dev->dev_ops->uc_all_hash_table_set == NULL) 5029 return -ENOTSUP; 5030 ret = eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev, on)); 5031 5032 rte_ethdev_trace_uc_all_hash_table_set(port_id, on, ret); 5033 5034 return ret; 5035 } 5036 5037 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, 5038 uint32_t tx_rate) 5039 { 5040 struct rte_eth_dev *dev; 5041 struct rte_eth_dev_info dev_info; 5042 struct rte_eth_link link; 5043 int ret; 5044 5045 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5046 dev = &rte_eth_devices[port_id]; 5047 5048 ret = rte_eth_dev_info_get(port_id, &dev_info); 5049 if (ret != 0) 5050 return ret; 5051 5052 link = dev->data->dev_link; 5053 5054 if (queue_idx > dev_info.max_tx_queues) { 5055 RTE_ETHDEV_LOG(ERR, 5056 "Set queue rate limit:port %u: invalid queue ID=%u\n", 5057 port_id, queue_idx); 5058 return -EINVAL; 5059 } 5060 5061 if (tx_rate > link.link_speed) { 5062 RTE_ETHDEV_LOG(ERR, 5063 "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n", 5064 tx_rate, link.link_speed); 5065 return -EINVAL; 5066 } 5067 5068 if (*dev->dev_ops->set_queue_rate_limit == NULL) 5069 return -ENOTSUP; 5070 ret = eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev, 5071 queue_idx, tx_rate)); 5072 5073 rte_eth_trace_set_queue_rate_limit(port_id, queue_idx, tx_rate, ret); 5074 5075 return ret; 5076 } 5077 5078 int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id, 5079 uint8_t avail_thresh) 5080 { 5081 struct rte_eth_dev *dev; 5082 int ret; 5083 5084 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5085 dev = &rte_eth_devices[port_id]; 5086 5087 if (queue_id > dev->data->nb_rx_queues) { 5088 RTE_ETHDEV_LOG(ERR, 5089 "Set queue avail thresh: port %u: invalid queue ID=%u.\n", 5090 port_id, queue_id); 5091 return -EINVAL; 5092 } 5093 5094 if (avail_thresh > 99) { 5095 RTE_ETHDEV_LOG(ERR, 5096 "Set queue avail thresh: port %u: threshold should be <= 99.\n", 5097 port_id); 5098 return -EINVAL; 5099 } 5100 if (*dev->dev_ops->rx_queue_avail_thresh_set == NULL) 5101 return -ENOTSUP; 5102 ret = eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_set)(dev, 5103 queue_id, avail_thresh)); 5104 5105 rte_eth_trace_rx_avail_thresh_set(port_id, queue_id, avail_thresh, ret); 5106 5107 return ret; 5108 } 5109 5110 int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id, 5111 uint8_t *avail_thresh) 5112 { 5113 struct rte_eth_dev *dev; 5114 int ret; 5115 5116 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5117 dev = &rte_eth_devices[port_id]; 5118 5119 if (queue_id == NULL) 5120 return -EINVAL; 5121 if (*queue_id >= dev->data->nb_rx_queues) 5122 *queue_id = 0; 5123 5124 if (*dev->dev_ops->rx_queue_avail_thresh_query == NULL) 5125 return -ENOTSUP; 5126 ret = eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_query)(dev, 5127 queue_id, avail_thresh)); 5128 5129 rte_eth_trace_rx_avail_thresh_query(port_id, *queue_id, ret); 5130 5131 return ret; 5132 } 5133 5134 RTE_INIT(eth_dev_init_fp_ops) 5135 { 5136 uint32_t i; 5137 5138 for (i = 0; i != RTE_DIM(rte_eth_fp_ops); i++) 5139 eth_dev_fp_ops_reset(rte_eth_fp_ops + i); 5140 } 5141 5142 RTE_INIT(eth_dev_init_cb_lists) 5143 { 5144 uint16_t i; 5145 5146 for (i = 0; i < RTE_MAX_ETHPORTS; i++) 5147 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs); 5148 } 5149 5150 int 5151 rte_eth_dev_callback_register(uint16_t port_id, 5152 enum rte_eth_event_type event, 5153 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 5154 { 5155 struct rte_eth_dev *dev; 5156 struct rte_eth_dev_callback *user_cb; 5157 uint16_t next_port; 5158 uint16_t last_port; 5159 5160 if (cb_fn == NULL) { 5161 RTE_ETHDEV_LOG(ERR, 5162 "Cannot register ethdev port %u callback from NULL\n", 5163 port_id); 5164 return -EINVAL; 5165 } 5166 5167 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 5168 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 5169 return -EINVAL; 5170 } 5171 5172 if (port_id == RTE_ETH_ALL) { 5173 next_port = 0; 5174 last_port = RTE_MAX_ETHPORTS - 1; 5175 } else { 5176 next_port = last_port = port_id; 5177 } 5178 5179 rte_spinlock_lock(ð_dev_cb_lock); 5180 5181 do { 5182 dev = &rte_eth_devices[next_port]; 5183 5184 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) { 5185 if (user_cb->cb_fn == cb_fn && 5186 user_cb->cb_arg == cb_arg && 5187 user_cb->event == event) { 5188 break; 5189 } 5190 } 5191 5192 /* create a new callback. */ 5193 if (user_cb == NULL) { 5194 user_cb = rte_zmalloc("INTR_USER_CALLBACK", 5195 sizeof(struct rte_eth_dev_callback), 0); 5196 if (user_cb != NULL) { 5197 user_cb->cb_fn = cb_fn; 5198 user_cb->cb_arg = cb_arg; 5199 user_cb->event = event; 5200 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), 5201 user_cb, next); 5202 } else { 5203 rte_spinlock_unlock(ð_dev_cb_lock); 5204 rte_eth_dev_callback_unregister(port_id, event, 5205 cb_fn, cb_arg); 5206 return -ENOMEM; 5207 } 5208 5209 } 5210 } while (++next_port <= last_port); 5211 5212 rte_spinlock_unlock(ð_dev_cb_lock); 5213 5214 rte_ethdev_trace_callback_register(port_id, event, cb_fn, cb_arg); 5215 5216 return 0; 5217 } 5218 5219 int 5220 rte_eth_dev_callback_unregister(uint16_t port_id, 5221 enum rte_eth_event_type event, 5222 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 5223 { 5224 int ret; 5225 struct rte_eth_dev *dev; 5226 struct rte_eth_dev_callback *cb, *next; 5227 uint16_t next_port; 5228 uint16_t last_port; 5229 5230 if (cb_fn == NULL) { 5231 RTE_ETHDEV_LOG(ERR, 5232 "Cannot unregister ethdev port %u callback from NULL\n", 5233 port_id); 5234 return -EINVAL; 5235 } 5236 5237 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 5238 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 5239 return -EINVAL; 5240 } 5241 5242 if (port_id == RTE_ETH_ALL) { 5243 next_port = 0; 5244 last_port = RTE_MAX_ETHPORTS - 1; 5245 } else { 5246 next_port = last_port = port_id; 5247 } 5248 5249 rte_spinlock_lock(ð_dev_cb_lock); 5250 5251 do { 5252 dev = &rte_eth_devices[next_port]; 5253 ret = 0; 5254 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; 5255 cb = next) { 5256 5257 next = TAILQ_NEXT(cb, next); 5258 5259 if (cb->cb_fn != cb_fn || cb->event != event || 5260 (cb_arg != (void *)-1 && cb->cb_arg != cb_arg)) 5261 continue; 5262 5263 /* 5264 * if this callback is not executing right now, 5265 * then remove it. 5266 */ 5267 if (cb->active == 0) { 5268 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next); 5269 rte_free(cb); 5270 } else { 5271 ret = -EAGAIN; 5272 } 5273 } 5274 } while (++next_port <= last_port); 5275 5276 rte_spinlock_unlock(ð_dev_cb_lock); 5277 5278 rte_ethdev_trace_callback_unregister(port_id, event, cb_fn, cb_arg, 5279 ret); 5280 5281 return ret; 5282 } 5283 5284 int 5285 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data) 5286 { 5287 uint32_t vec; 5288 struct rte_eth_dev *dev; 5289 struct rte_intr_handle *intr_handle; 5290 uint16_t qid; 5291 int rc; 5292 5293 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5294 dev = &rte_eth_devices[port_id]; 5295 5296 if (!dev->intr_handle) { 5297 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 5298 return -ENOTSUP; 5299 } 5300 5301 intr_handle = dev->intr_handle; 5302 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 5303 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 5304 return -EPERM; 5305 } 5306 5307 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) { 5308 vec = rte_intr_vec_list_index_get(intr_handle, qid); 5309 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 5310 5311 rte_ethdev_trace_rx_intr_ctl(port_id, qid, epfd, op, data, rc); 5312 5313 if (rc && rc != -EEXIST) { 5314 RTE_ETHDEV_LOG(ERR, 5315 "p %u q %u Rx ctl error op %d epfd %d vec %u\n", 5316 port_id, qid, op, epfd, vec); 5317 } 5318 } 5319 5320 return 0; 5321 } 5322 5323 int 5324 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id) 5325 { 5326 struct rte_intr_handle *intr_handle; 5327 struct rte_eth_dev *dev; 5328 unsigned int efd_idx; 5329 uint32_t vec; 5330 int fd; 5331 5332 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 5333 dev = &rte_eth_devices[port_id]; 5334 5335 if (queue_id >= dev->data->nb_rx_queues) { 5336 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5337 return -1; 5338 } 5339 5340 if (!dev->intr_handle) { 5341 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 5342 return -1; 5343 } 5344 5345 intr_handle = dev->intr_handle; 5346 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 5347 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 5348 return -1; 5349 } 5350 5351 vec = rte_intr_vec_list_index_get(intr_handle, queue_id); 5352 efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ? 5353 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec; 5354 fd = rte_intr_efds_index_get(intr_handle, efd_idx); 5355 5356 rte_ethdev_trace_rx_intr_ctl_q_get_fd(port_id, queue_id, fd); 5357 5358 return fd; 5359 } 5360 5361 int 5362 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, 5363 int epfd, int op, void *data) 5364 { 5365 uint32_t vec; 5366 struct rte_eth_dev *dev; 5367 struct rte_intr_handle *intr_handle; 5368 int rc; 5369 5370 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5371 dev = &rte_eth_devices[port_id]; 5372 5373 if (queue_id >= dev->data->nb_rx_queues) { 5374 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5375 return -EINVAL; 5376 } 5377 5378 if (!dev->intr_handle) { 5379 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 5380 return -ENOTSUP; 5381 } 5382 5383 intr_handle = dev->intr_handle; 5384 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 5385 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 5386 return -EPERM; 5387 } 5388 5389 vec = rte_intr_vec_list_index_get(intr_handle, queue_id); 5390 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 5391 5392 rte_ethdev_trace_rx_intr_ctl_q(port_id, queue_id, epfd, op, data, rc); 5393 5394 if (rc && rc != -EEXIST) { 5395 RTE_ETHDEV_LOG(ERR, 5396 "p %u q %u Rx ctl error op %d epfd %d vec %u\n", 5397 port_id, queue_id, op, epfd, vec); 5398 return rc; 5399 } 5400 5401 return 0; 5402 } 5403 5404 int 5405 rte_eth_dev_rx_intr_enable(uint16_t port_id, 5406 uint16_t queue_id) 5407 { 5408 struct rte_eth_dev *dev; 5409 int ret; 5410 5411 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5412 dev = &rte_eth_devices[port_id]; 5413 5414 ret = eth_dev_validate_rx_queue(dev, queue_id); 5415 if (ret != 0) 5416 return ret; 5417 5418 if (*dev->dev_ops->rx_queue_intr_enable == NULL) 5419 return -ENOTSUP; 5420 ret = eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id)); 5421 5422 rte_ethdev_trace_rx_intr_enable(port_id, queue_id, ret); 5423 5424 return ret; 5425 } 5426 5427 int 5428 rte_eth_dev_rx_intr_disable(uint16_t port_id, 5429 uint16_t queue_id) 5430 { 5431 struct rte_eth_dev *dev; 5432 int ret; 5433 5434 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5435 dev = &rte_eth_devices[port_id]; 5436 5437 ret = eth_dev_validate_rx_queue(dev, queue_id); 5438 if (ret != 0) 5439 return ret; 5440 5441 if (*dev->dev_ops->rx_queue_intr_disable == NULL) 5442 return -ENOTSUP; 5443 ret = eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id)); 5444 5445 rte_ethdev_trace_rx_intr_disable(port_id, queue_id, ret); 5446 5447 return ret; 5448 } 5449 5450 5451 const struct rte_eth_rxtx_callback * 5452 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, 5453 rte_rx_callback_fn fn, void *user_param) 5454 { 5455 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5456 rte_errno = ENOTSUP; 5457 return NULL; 5458 #endif 5459 struct rte_eth_dev *dev; 5460 5461 /* check input parameters */ 5462 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5463 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 5464 rte_errno = EINVAL; 5465 return NULL; 5466 } 5467 dev = &rte_eth_devices[port_id]; 5468 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 5469 rte_errno = EINVAL; 5470 return NULL; 5471 } 5472 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5473 5474 if (cb == NULL) { 5475 rte_errno = ENOMEM; 5476 return NULL; 5477 } 5478 5479 cb->fn.rx = fn; 5480 cb->param = user_param; 5481 5482 rte_spinlock_lock(ð_dev_rx_cb_lock); 5483 /* Add the callbacks in fifo order. */ 5484 struct rte_eth_rxtx_callback *tail = 5485 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 5486 5487 if (!tail) { 5488 /* Stores to cb->fn and cb->param should complete before 5489 * cb is visible to data plane. 5490 */ 5491 __atomic_store_n( 5492 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 5493 cb, __ATOMIC_RELEASE); 5494 5495 } else { 5496 while (tail->next) 5497 tail = tail->next; 5498 /* Stores to cb->fn and cb->param should complete before 5499 * cb is visible to data plane. 5500 */ 5501 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 5502 } 5503 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5504 5505 rte_eth_trace_add_rx_callback(port_id, queue_id, fn, user_param, cb); 5506 5507 return cb; 5508 } 5509 5510 const struct rte_eth_rxtx_callback * 5511 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, 5512 rte_rx_callback_fn fn, void *user_param) 5513 { 5514 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5515 rte_errno = ENOTSUP; 5516 return NULL; 5517 #endif 5518 /* check input parameters */ 5519 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5520 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 5521 rte_errno = EINVAL; 5522 return NULL; 5523 } 5524 5525 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5526 5527 if (cb == NULL) { 5528 rte_errno = ENOMEM; 5529 return NULL; 5530 } 5531 5532 cb->fn.rx = fn; 5533 cb->param = user_param; 5534 5535 rte_spinlock_lock(ð_dev_rx_cb_lock); 5536 /* Add the callbacks at first position */ 5537 cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 5538 /* Stores to cb->fn, cb->param and cb->next should complete before 5539 * cb is visible to data plane threads. 5540 */ 5541 __atomic_store_n( 5542 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 5543 cb, __ATOMIC_RELEASE); 5544 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5545 5546 rte_eth_trace_add_first_rx_callback(port_id, queue_id, fn, user_param, 5547 cb); 5548 5549 return cb; 5550 } 5551 5552 const struct rte_eth_rxtx_callback * 5553 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, 5554 rte_tx_callback_fn fn, void *user_param) 5555 { 5556 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5557 rte_errno = ENOTSUP; 5558 return NULL; 5559 #endif 5560 struct rte_eth_dev *dev; 5561 5562 /* check input parameters */ 5563 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5564 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) { 5565 rte_errno = EINVAL; 5566 return NULL; 5567 } 5568 5569 dev = &rte_eth_devices[port_id]; 5570 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 5571 rte_errno = EINVAL; 5572 return NULL; 5573 } 5574 5575 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5576 5577 if (cb == NULL) { 5578 rte_errno = ENOMEM; 5579 return NULL; 5580 } 5581 5582 cb->fn.tx = fn; 5583 cb->param = user_param; 5584 5585 rte_spinlock_lock(ð_dev_tx_cb_lock); 5586 /* Add the callbacks in fifo order. */ 5587 struct rte_eth_rxtx_callback *tail = 5588 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id]; 5589 5590 if (!tail) { 5591 /* Stores to cb->fn and cb->param should complete before 5592 * cb is visible to data plane. 5593 */ 5594 __atomic_store_n( 5595 &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id], 5596 cb, __ATOMIC_RELEASE); 5597 5598 } else { 5599 while (tail->next) 5600 tail = tail->next; 5601 /* Stores to cb->fn and cb->param should complete before 5602 * cb is visible to data plane. 5603 */ 5604 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 5605 } 5606 rte_spinlock_unlock(ð_dev_tx_cb_lock); 5607 5608 rte_eth_trace_add_tx_callback(port_id, queue_id, fn, user_param, cb); 5609 5610 return cb; 5611 } 5612 5613 int 5614 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, 5615 const struct rte_eth_rxtx_callback *user_cb) 5616 { 5617 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5618 return -ENOTSUP; 5619 #endif 5620 /* Check input parameters. */ 5621 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5622 if (user_cb == NULL || 5623 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) 5624 return -EINVAL; 5625 5626 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5627 struct rte_eth_rxtx_callback *cb; 5628 struct rte_eth_rxtx_callback **prev_cb; 5629 int ret = -EINVAL; 5630 5631 rte_spinlock_lock(ð_dev_rx_cb_lock); 5632 prev_cb = &dev->post_rx_burst_cbs[queue_id]; 5633 for (; *prev_cb != NULL; prev_cb = &cb->next) { 5634 cb = *prev_cb; 5635 if (cb == user_cb) { 5636 /* Remove the user cb from the callback list. */ 5637 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 5638 ret = 0; 5639 break; 5640 } 5641 } 5642 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5643 5644 rte_eth_trace_remove_rx_callback(port_id, queue_id, user_cb, ret); 5645 5646 return ret; 5647 } 5648 5649 int 5650 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, 5651 const struct rte_eth_rxtx_callback *user_cb) 5652 { 5653 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5654 return -ENOTSUP; 5655 #endif 5656 /* Check input parameters. */ 5657 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5658 if (user_cb == NULL || 5659 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) 5660 return -EINVAL; 5661 5662 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5663 int ret = -EINVAL; 5664 struct rte_eth_rxtx_callback *cb; 5665 struct rte_eth_rxtx_callback **prev_cb; 5666 5667 rte_spinlock_lock(ð_dev_tx_cb_lock); 5668 prev_cb = &dev->pre_tx_burst_cbs[queue_id]; 5669 for (; *prev_cb != NULL; prev_cb = &cb->next) { 5670 cb = *prev_cb; 5671 if (cb == user_cb) { 5672 /* Remove the user cb from the callback list. */ 5673 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 5674 ret = 0; 5675 break; 5676 } 5677 } 5678 rte_spinlock_unlock(ð_dev_tx_cb_lock); 5679 5680 rte_eth_trace_remove_tx_callback(port_id, queue_id, user_cb, ret); 5681 5682 return ret; 5683 } 5684 5685 int 5686 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5687 struct rte_eth_rxq_info *qinfo) 5688 { 5689 struct rte_eth_dev *dev; 5690 5691 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5692 dev = &rte_eth_devices[port_id]; 5693 5694 if (queue_id >= dev->data->nb_rx_queues) { 5695 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5696 return -EINVAL; 5697 } 5698 5699 if (qinfo == NULL) { 5700 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL\n", 5701 port_id, queue_id); 5702 return -EINVAL; 5703 } 5704 5705 if (dev->data->rx_queues == NULL || 5706 dev->data->rx_queues[queue_id] == NULL) { 5707 RTE_ETHDEV_LOG(ERR, 5708 "Rx queue %"PRIu16" of device with port_id=%" 5709 PRIu16" has not been setup\n", 5710 queue_id, port_id); 5711 return -EINVAL; 5712 } 5713 5714 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 5715 RTE_ETHDEV_LOG(INFO, 5716 "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5717 queue_id, port_id); 5718 return -EINVAL; 5719 } 5720 5721 if (*dev->dev_ops->rxq_info_get == NULL) 5722 return -ENOTSUP; 5723 5724 memset(qinfo, 0, sizeof(*qinfo)); 5725 dev->dev_ops->rxq_info_get(dev, queue_id, qinfo); 5726 qinfo->queue_state = dev->data->rx_queue_state[queue_id]; 5727 5728 rte_eth_trace_rx_queue_info_get(port_id, queue_id, qinfo); 5729 5730 return 0; 5731 } 5732 5733 int 5734 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5735 struct rte_eth_txq_info *qinfo) 5736 { 5737 struct rte_eth_dev *dev; 5738 5739 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5740 dev = &rte_eth_devices[port_id]; 5741 5742 if (queue_id >= dev->data->nb_tx_queues) { 5743 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 5744 return -EINVAL; 5745 } 5746 5747 if (qinfo == NULL) { 5748 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL\n", 5749 port_id, queue_id); 5750 return -EINVAL; 5751 } 5752 5753 if (dev->data->tx_queues == NULL || 5754 dev->data->tx_queues[queue_id] == NULL) { 5755 RTE_ETHDEV_LOG(ERR, 5756 "Tx queue %"PRIu16" of device with port_id=%" 5757 PRIu16" has not been setup\n", 5758 queue_id, port_id); 5759 return -EINVAL; 5760 } 5761 5762 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 5763 RTE_ETHDEV_LOG(INFO, 5764 "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5765 queue_id, port_id); 5766 return -EINVAL; 5767 } 5768 5769 if (*dev->dev_ops->txq_info_get == NULL) 5770 return -ENOTSUP; 5771 5772 memset(qinfo, 0, sizeof(*qinfo)); 5773 dev->dev_ops->txq_info_get(dev, queue_id, qinfo); 5774 qinfo->queue_state = dev->data->tx_queue_state[queue_id]; 5775 5776 rte_eth_trace_tx_queue_info_get(port_id, queue_id, qinfo); 5777 5778 return 0; 5779 } 5780 5781 int 5782 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5783 struct rte_eth_burst_mode *mode) 5784 { 5785 struct rte_eth_dev *dev; 5786 int ret; 5787 5788 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5789 dev = &rte_eth_devices[port_id]; 5790 5791 if (queue_id >= dev->data->nb_rx_queues) { 5792 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5793 return -EINVAL; 5794 } 5795 5796 if (mode == NULL) { 5797 RTE_ETHDEV_LOG(ERR, 5798 "Cannot get ethdev port %u Rx queue %u burst mode to NULL\n", 5799 port_id, queue_id); 5800 return -EINVAL; 5801 } 5802 5803 if (*dev->dev_ops->rx_burst_mode_get == NULL) 5804 return -ENOTSUP; 5805 memset(mode, 0, sizeof(*mode)); 5806 ret = eth_err(port_id, 5807 dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode)); 5808 5809 rte_eth_trace_rx_burst_mode_get(port_id, queue_id, mode, ret); 5810 5811 return ret; 5812 } 5813 5814 int 5815 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5816 struct rte_eth_burst_mode *mode) 5817 { 5818 struct rte_eth_dev *dev; 5819 int ret; 5820 5821 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5822 dev = &rte_eth_devices[port_id]; 5823 5824 if (queue_id >= dev->data->nb_tx_queues) { 5825 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 5826 return -EINVAL; 5827 } 5828 5829 if (mode == NULL) { 5830 RTE_ETHDEV_LOG(ERR, 5831 "Cannot get ethdev port %u Tx queue %u burst mode to NULL\n", 5832 port_id, queue_id); 5833 return -EINVAL; 5834 } 5835 5836 if (*dev->dev_ops->tx_burst_mode_get == NULL) 5837 return -ENOTSUP; 5838 memset(mode, 0, sizeof(*mode)); 5839 ret = eth_err(port_id, 5840 dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode)); 5841 5842 rte_eth_trace_tx_burst_mode_get(port_id, queue_id, mode, ret); 5843 5844 return ret; 5845 } 5846 5847 int 5848 rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id, 5849 struct rte_power_monitor_cond *pmc) 5850 { 5851 struct rte_eth_dev *dev; 5852 int ret; 5853 5854 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5855 dev = &rte_eth_devices[port_id]; 5856 5857 if (queue_id >= dev->data->nb_rx_queues) { 5858 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5859 return -EINVAL; 5860 } 5861 5862 if (pmc == NULL) { 5863 RTE_ETHDEV_LOG(ERR, 5864 "Cannot get ethdev port %u Rx queue %u power monitor condition to NULL\n", 5865 port_id, queue_id); 5866 return -EINVAL; 5867 } 5868 5869 if (*dev->dev_ops->get_monitor_addr == NULL) 5870 return -ENOTSUP; 5871 ret = eth_err(port_id, 5872 dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc)); 5873 5874 rte_eth_trace_get_monitor_addr(port_id, queue_id, pmc, ret); 5875 5876 return ret; 5877 } 5878 5879 int 5880 rte_eth_dev_set_mc_addr_list(uint16_t port_id, 5881 struct rte_ether_addr *mc_addr_set, 5882 uint32_t nb_mc_addr) 5883 { 5884 struct rte_eth_dev *dev; 5885 int ret; 5886 5887 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5888 dev = &rte_eth_devices[port_id]; 5889 5890 if (*dev->dev_ops->set_mc_addr_list == NULL) 5891 return -ENOTSUP; 5892 ret = eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev, 5893 mc_addr_set, nb_mc_addr)); 5894 5895 rte_ethdev_trace_set_mc_addr_list(port_id, mc_addr_set, nb_mc_addr, 5896 ret); 5897 5898 return ret; 5899 } 5900 5901 int 5902 rte_eth_timesync_enable(uint16_t port_id) 5903 { 5904 struct rte_eth_dev *dev; 5905 int ret; 5906 5907 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5908 dev = &rte_eth_devices[port_id]; 5909 5910 if (*dev->dev_ops->timesync_enable == NULL) 5911 return -ENOTSUP; 5912 ret = eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev)); 5913 5914 rte_eth_trace_timesync_enable(port_id, ret); 5915 5916 return ret; 5917 } 5918 5919 int 5920 rte_eth_timesync_disable(uint16_t port_id) 5921 { 5922 struct rte_eth_dev *dev; 5923 int ret; 5924 5925 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5926 dev = &rte_eth_devices[port_id]; 5927 5928 if (*dev->dev_ops->timesync_disable == NULL) 5929 return -ENOTSUP; 5930 ret = eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev)); 5931 5932 rte_eth_trace_timesync_disable(port_id, ret); 5933 5934 return ret; 5935 } 5936 5937 int 5938 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, 5939 uint32_t flags) 5940 { 5941 struct rte_eth_dev *dev; 5942 int ret; 5943 5944 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5945 dev = &rte_eth_devices[port_id]; 5946 5947 if (timestamp == NULL) { 5948 RTE_ETHDEV_LOG(ERR, 5949 "Cannot read ethdev port %u Rx timestamp to NULL\n", 5950 port_id); 5951 return -EINVAL; 5952 } 5953 5954 if (*dev->dev_ops->timesync_read_rx_timestamp == NULL) 5955 return -ENOTSUP; 5956 5957 ret = eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp) 5958 (dev, timestamp, flags)); 5959 5960 rte_eth_trace_timesync_read_rx_timestamp(port_id, timestamp, flags, 5961 ret); 5962 5963 return ret; 5964 } 5965 5966 int 5967 rte_eth_timesync_read_tx_timestamp(uint16_t port_id, 5968 struct timespec *timestamp) 5969 { 5970 struct rte_eth_dev *dev; 5971 int ret; 5972 5973 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5974 dev = &rte_eth_devices[port_id]; 5975 5976 if (timestamp == NULL) { 5977 RTE_ETHDEV_LOG(ERR, 5978 "Cannot read ethdev port %u Tx timestamp to NULL\n", 5979 port_id); 5980 return -EINVAL; 5981 } 5982 5983 if (*dev->dev_ops->timesync_read_tx_timestamp == NULL) 5984 return -ENOTSUP; 5985 5986 ret = eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp) 5987 (dev, timestamp)); 5988 5989 rte_eth_trace_timesync_read_tx_timestamp(port_id, timestamp, ret); 5990 5991 return ret; 5992 5993 } 5994 5995 int 5996 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta) 5997 { 5998 struct rte_eth_dev *dev; 5999 int ret; 6000 6001 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6002 dev = &rte_eth_devices[port_id]; 6003 6004 if (*dev->dev_ops->timesync_adjust_time == NULL) 6005 return -ENOTSUP; 6006 ret = eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta)); 6007 6008 rte_eth_trace_timesync_adjust_time(port_id, delta, ret); 6009 6010 return ret; 6011 } 6012 6013 int 6014 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp) 6015 { 6016 struct rte_eth_dev *dev; 6017 int ret; 6018 6019 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6020 dev = &rte_eth_devices[port_id]; 6021 6022 if (timestamp == NULL) { 6023 RTE_ETHDEV_LOG(ERR, 6024 "Cannot read ethdev port %u timesync time to NULL\n", 6025 port_id); 6026 return -EINVAL; 6027 } 6028 6029 if (*dev->dev_ops->timesync_read_time == NULL) 6030 return -ENOTSUP; 6031 ret = eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev, 6032 timestamp)); 6033 6034 rte_eth_trace_timesync_read_time(port_id, timestamp, ret); 6035 6036 return ret; 6037 } 6038 6039 int 6040 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp) 6041 { 6042 struct rte_eth_dev *dev; 6043 int ret; 6044 6045 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6046 dev = &rte_eth_devices[port_id]; 6047 6048 if (timestamp == NULL) { 6049 RTE_ETHDEV_LOG(ERR, 6050 "Cannot write ethdev port %u timesync from NULL time\n", 6051 port_id); 6052 return -EINVAL; 6053 } 6054 6055 if (*dev->dev_ops->timesync_write_time == NULL) 6056 return -ENOTSUP; 6057 ret = eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev, 6058 timestamp)); 6059 6060 rte_eth_trace_timesync_write_time(port_id, timestamp, ret); 6061 6062 return ret; 6063 } 6064 6065 int 6066 rte_eth_read_clock(uint16_t port_id, uint64_t *clock) 6067 { 6068 struct rte_eth_dev *dev; 6069 int ret; 6070 6071 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6072 dev = &rte_eth_devices[port_id]; 6073 6074 if (clock == NULL) { 6075 RTE_ETHDEV_LOG(ERR, "Cannot read ethdev port %u clock to NULL\n", 6076 port_id); 6077 return -EINVAL; 6078 } 6079 6080 if (*dev->dev_ops->read_clock == NULL) 6081 return -ENOTSUP; 6082 ret = eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock)); 6083 6084 rte_eth_trace_read_clock(port_id, clock, ret); 6085 6086 return ret; 6087 } 6088 6089 int 6090 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info) 6091 { 6092 struct rte_eth_dev *dev; 6093 int ret; 6094 6095 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6096 dev = &rte_eth_devices[port_id]; 6097 6098 if (info == NULL) { 6099 RTE_ETHDEV_LOG(ERR, 6100 "Cannot get ethdev port %u register info to NULL\n", 6101 port_id); 6102 return -EINVAL; 6103 } 6104 6105 if (*dev->dev_ops->get_reg == NULL) 6106 return -ENOTSUP; 6107 ret = eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info)); 6108 6109 rte_ethdev_trace_get_reg_info(port_id, info, ret); 6110 6111 return ret; 6112 } 6113 6114 int 6115 rte_eth_dev_get_eeprom_length(uint16_t port_id) 6116 { 6117 struct rte_eth_dev *dev; 6118 int ret; 6119 6120 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6121 dev = &rte_eth_devices[port_id]; 6122 6123 if (*dev->dev_ops->get_eeprom_length == NULL) 6124 return -ENOTSUP; 6125 ret = eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev)); 6126 6127 rte_ethdev_trace_get_eeprom_length(port_id, ret); 6128 6129 return ret; 6130 } 6131 6132 int 6133 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 6134 { 6135 struct rte_eth_dev *dev; 6136 int ret; 6137 6138 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6139 dev = &rte_eth_devices[port_id]; 6140 6141 if (info == NULL) { 6142 RTE_ETHDEV_LOG(ERR, 6143 "Cannot get ethdev port %u EEPROM info to NULL\n", 6144 port_id); 6145 return -EINVAL; 6146 } 6147 6148 if (*dev->dev_ops->get_eeprom == NULL) 6149 return -ENOTSUP; 6150 ret = eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info)); 6151 6152 rte_ethdev_trace_get_eeprom(port_id, info, ret); 6153 6154 return ret; 6155 } 6156 6157 int 6158 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 6159 { 6160 struct rte_eth_dev *dev; 6161 int ret; 6162 6163 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6164 dev = &rte_eth_devices[port_id]; 6165 6166 if (info == NULL) { 6167 RTE_ETHDEV_LOG(ERR, 6168 "Cannot set ethdev port %u EEPROM from NULL info\n", 6169 port_id); 6170 return -EINVAL; 6171 } 6172 6173 if (*dev->dev_ops->set_eeprom == NULL) 6174 return -ENOTSUP; 6175 ret = eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info)); 6176 6177 rte_ethdev_trace_set_eeprom(port_id, info, ret); 6178 6179 return ret; 6180 } 6181 6182 int 6183 rte_eth_dev_get_module_info(uint16_t port_id, 6184 struct rte_eth_dev_module_info *modinfo) 6185 { 6186 struct rte_eth_dev *dev; 6187 int ret; 6188 6189 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6190 dev = &rte_eth_devices[port_id]; 6191 6192 if (modinfo == NULL) { 6193 RTE_ETHDEV_LOG(ERR, 6194 "Cannot get ethdev port %u EEPROM module info to NULL\n", 6195 port_id); 6196 return -EINVAL; 6197 } 6198 6199 if (*dev->dev_ops->get_module_info == NULL) 6200 return -ENOTSUP; 6201 ret = (*dev->dev_ops->get_module_info)(dev, modinfo); 6202 6203 rte_ethdev_trace_get_module_info(port_id, modinfo, ret); 6204 6205 return ret; 6206 } 6207 6208 int 6209 rte_eth_dev_get_module_eeprom(uint16_t port_id, 6210 struct rte_dev_eeprom_info *info) 6211 { 6212 struct rte_eth_dev *dev; 6213 int ret; 6214 6215 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6216 dev = &rte_eth_devices[port_id]; 6217 6218 if (info == NULL) { 6219 RTE_ETHDEV_LOG(ERR, 6220 "Cannot get ethdev port %u module EEPROM info to NULL\n", 6221 port_id); 6222 return -EINVAL; 6223 } 6224 6225 if (info->data == NULL) { 6226 RTE_ETHDEV_LOG(ERR, 6227 "Cannot get ethdev port %u module EEPROM data to NULL\n", 6228 port_id); 6229 return -EINVAL; 6230 } 6231 6232 if (info->length == 0) { 6233 RTE_ETHDEV_LOG(ERR, 6234 "Cannot get ethdev port %u module EEPROM to data with zero size\n", 6235 port_id); 6236 return -EINVAL; 6237 } 6238 6239 if (*dev->dev_ops->get_module_eeprom == NULL) 6240 return -ENOTSUP; 6241 ret = (*dev->dev_ops->get_module_eeprom)(dev, info); 6242 6243 rte_ethdev_trace_get_module_eeprom(port_id, info, ret); 6244 6245 return ret; 6246 } 6247 6248 int 6249 rte_eth_dev_get_dcb_info(uint16_t port_id, 6250 struct rte_eth_dcb_info *dcb_info) 6251 { 6252 struct rte_eth_dev *dev; 6253 int ret; 6254 6255 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6256 dev = &rte_eth_devices[port_id]; 6257 6258 if (dcb_info == NULL) { 6259 RTE_ETHDEV_LOG(ERR, 6260 "Cannot get ethdev port %u DCB info to NULL\n", 6261 port_id); 6262 return -EINVAL; 6263 } 6264 6265 memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info)); 6266 6267 if (*dev->dev_ops->get_dcb_info == NULL) 6268 return -ENOTSUP; 6269 ret = eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info)); 6270 6271 rte_ethdev_trace_get_dcb_info(port_id, dcb_info, ret); 6272 6273 return ret; 6274 } 6275 6276 static void 6277 eth_dev_adjust_nb_desc(uint16_t *nb_desc, 6278 const struct rte_eth_desc_lim *desc_lim) 6279 { 6280 if (desc_lim->nb_align != 0) 6281 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align); 6282 6283 if (desc_lim->nb_max != 0) 6284 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max); 6285 6286 *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min); 6287 } 6288 6289 int 6290 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, 6291 uint16_t *nb_rx_desc, 6292 uint16_t *nb_tx_desc) 6293 { 6294 struct rte_eth_dev_info dev_info; 6295 int ret; 6296 6297 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6298 6299 ret = rte_eth_dev_info_get(port_id, &dev_info); 6300 if (ret != 0) 6301 return ret; 6302 6303 if (nb_rx_desc != NULL) 6304 eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim); 6305 6306 if (nb_tx_desc != NULL) 6307 eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim); 6308 6309 rte_ethdev_trace_adjust_nb_rx_tx_desc(port_id); 6310 6311 return 0; 6312 } 6313 6314 int 6315 rte_eth_dev_hairpin_capability_get(uint16_t port_id, 6316 struct rte_eth_hairpin_cap *cap) 6317 { 6318 struct rte_eth_dev *dev; 6319 int ret; 6320 6321 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6322 dev = &rte_eth_devices[port_id]; 6323 6324 if (cap == NULL) { 6325 RTE_ETHDEV_LOG(ERR, 6326 "Cannot get ethdev port %u hairpin capability to NULL\n", 6327 port_id); 6328 return -EINVAL; 6329 } 6330 6331 if (*dev->dev_ops->hairpin_cap_get == NULL) 6332 return -ENOTSUP; 6333 memset(cap, 0, sizeof(*cap)); 6334 ret = eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap)); 6335 6336 rte_ethdev_trace_hairpin_capability_get(port_id, cap, ret); 6337 6338 return ret; 6339 } 6340 6341 int 6342 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool) 6343 { 6344 struct rte_eth_dev *dev; 6345 int ret; 6346 6347 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6348 dev = &rte_eth_devices[port_id]; 6349 6350 if (pool == NULL) { 6351 RTE_ETHDEV_LOG(ERR, 6352 "Cannot test ethdev port %u mempool operation from NULL pool\n", 6353 port_id); 6354 return -EINVAL; 6355 } 6356 6357 if (*dev->dev_ops->pool_ops_supported == NULL) 6358 return 1; /* all pools are supported */ 6359 6360 ret = (*dev->dev_ops->pool_ops_supported)(dev, pool); 6361 6362 rte_ethdev_trace_pool_ops_supported(port_id, pool, ret); 6363 6364 return ret; 6365 } 6366 6367 static int 6368 eth_dev_handle_port_list(const char *cmd __rte_unused, 6369 const char *params __rte_unused, 6370 struct rte_tel_data *d) 6371 { 6372 int port_id; 6373 6374 rte_tel_data_start_array(d, RTE_TEL_INT_VAL); 6375 RTE_ETH_FOREACH_DEV(port_id) 6376 rte_tel_data_add_array_int(d, port_id); 6377 return 0; 6378 } 6379 6380 static void 6381 eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats, 6382 const char *stat_name) 6383 { 6384 int q; 6385 struct rte_tel_data *q_data = rte_tel_data_alloc(); 6386 if (q_data == NULL) 6387 return; 6388 rte_tel_data_start_array(q_data, RTE_TEL_UINT_VAL); 6389 for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++) 6390 rte_tel_data_add_array_uint(q_data, q_stats[q]); 6391 rte_tel_data_add_dict_container(d, stat_name, q_data, 0); 6392 } 6393 6394 #define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_uint(d, #s, stats.s) 6395 6396 static int 6397 eth_dev_handle_port_stats(const char *cmd __rte_unused, 6398 const char *params, 6399 struct rte_tel_data *d) 6400 { 6401 struct rte_eth_stats stats; 6402 int port_id, ret; 6403 6404 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6405 return -1; 6406 6407 port_id = atoi(params); 6408 if (!rte_eth_dev_is_valid_port(port_id)) 6409 return -1; 6410 6411 ret = rte_eth_stats_get(port_id, &stats); 6412 if (ret < 0) 6413 return -1; 6414 6415 rte_tel_data_start_dict(d); 6416 ADD_DICT_STAT(stats, ipackets); 6417 ADD_DICT_STAT(stats, opackets); 6418 ADD_DICT_STAT(stats, ibytes); 6419 ADD_DICT_STAT(stats, obytes); 6420 ADD_DICT_STAT(stats, imissed); 6421 ADD_DICT_STAT(stats, ierrors); 6422 ADD_DICT_STAT(stats, oerrors); 6423 ADD_DICT_STAT(stats, rx_nombuf); 6424 eth_dev_add_port_queue_stats(d, stats.q_ipackets, "q_ipackets"); 6425 eth_dev_add_port_queue_stats(d, stats.q_opackets, "q_opackets"); 6426 eth_dev_add_port_queue_stats(d, stats.q_ibytes, "q_ibytes"); 6427 eth_dev_add_port_queue_stats(d, stats.q_obytes, "q_obytes"); 6428 eth_dev_add_port_queue_stats(d, stats.q_errors, "q_errors"); 6429 6430 return 0; 6431 } 6432 6433 static int 6434 eth_dev_parse_hide_zero(const char *key, const char *value, void *extra_args) 6435 { 6436 RTE_SET_USED(key); 6437 6438 if (value == NULL) 6439 return -1; 6440 6441 if (strcmp(value, "true") == 0) 6442 *(bool *)extra_args = true; 6443 else if (strcmp(value, "false") == 0) 6444 *(bool *)extra_args = false; 6445 else 6446 return -1; 6447 6448 return 0; 6449 } 6450 6451 static int 6452 eth_dev_handle_port_xstats(const char *cmd __rte_unused, 6453 const char *params, 6454 struct rte_tel_data *d) 6455 { 6456 const char *const valid_keys[] = { "hide_zero", NULL }; 6457 struct rte_eth_xstat *eth_xstats; 6458 struct rte_eth_xstat_name *xstat_names; 6459 struct rte_kvargs *kvlist; 6460 int port_id, num_xstats; 6461 bool hide_zero = false; 6462 char *end_param; 6463 int i, ret; 6464 6465 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6466 return -1; 6467 6468 port_id = strtoul(params, &end_param, 0); 6469 if (!rte_eth_dev_is_valid_port(port_id)) 6470 return -1; 6471 6472 if (*end_param != '\0') { 6473 kvlist = rte_kvargs_parse(end_param, valid_keys); 6474 ret = rte_kvargs_process(kvlist, NULL, eth_dev_parse_hide_zero, &hide_zero); 6475 if (kvlist == NULL || ret != 0) 6476 RTE_ETHDEV_LOG(NOTICE, 6477 "Unknown extra parameters passed to ethdev telemetry command, ignoring\n"); 6478 rte_kvargs_free(kvlist); 6479 } 6480 6481 num_xstats = rte_eth_xstats_get(port_id, NULL, 0); 6482 if (num_xstats < 0) 6483 return -1; 6484 6485 /* use one malloc for both names and stats */ 6486 eth_xstats = malloc((sizeof(struct rte_eth_xstat) + 6487 sizeof(struct rte_eth_xstat_name)) * num_xstats); 6488 if (eth_xstats == NULL) 6489 return -1; 6490 xstat_names = (void *)ð_xstats[num_xstats]; 6491 6492 ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats); 6493 if (ret < 0 || ret > num_xstats) { 6494 free(eth_xstats); 6495 return -1; 6496 } 6497 6498 ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats); 6499 if (ret < 0 || ret > num_xstats) { 6500 free(eth_xstats); 6501 return -1; 6502 } 6503 6504 rte_tel_data_start_dict(d); 6505 for (i = 0; i < num_xstats; i++) { 6506 if (hide_zero && eth_xstats[i].value == 0) 6507 continue; 6508 rte_tel_data_add_dict_uint(d, xstat_names[i].name, 6509 eth_xstats[i].value); 6510 } 6511 free(eth_xstats); 6512 return 0; 6513 } 6514 6515 #ifndef RTE_EXEC_ENV_WINDOWS 6516 static int 6517 eth_dev_handle_port_dump_priv(const char *cmd __rte_unused, 6518 const char *params, 6519 struct rte_tel_data *d) 6520 { 6521 char *buf, *end_param; 6522 int port_id, ret; 6523 FILE *f; 6524 6525 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6526 return -EINVAL; 6527 6528 port_id = strtoul(params, &end_param, 0); 6529 if (*end_param != '\0') 6530 RTE_ETHDEV_LOG(NOTICE, 6531 "Extra parameters passed to ethdev telemetry command, ignoring"); 6532 if (!rte_eth_dev_is_valid_port(port_id)) 6533 return -EINVAL; 6534 6535 buf = calloc(sizeof(char), RTE_TEL_MAX_SINGLE_STRING_LEN); 6536 if (buf == NULL) 6537 return -ENOMEM; 6538 6539 f = fmemopen(buf, RTE_TEL_MAX_SINGLE_STRING_LEN - 1, "w+"); 6540 if (f == NULL) { 6541 free(buf); 6542 return -EINVAL; 6543 } 6544 6545 ret = rte_eth_dev_priv_dump(port_id, f); 6546 fclose(f); 6547 if (ret == 0) { 6548 rte_tel_data_start_dict(d); 6549 rte_tel_data_string(d, buf); 6550 } 6551 6552 free(buf); 6553 return 0; 6554 } 6555 #endif /* !RTE_EXEC_ENV_WINDOWS */ 6556 6557 static int 6558 eth_dev_handle_port_link_status(const char *cmd __rte_unused, 6559 const char *params, 6560 struct rte_tel_data *d) 6561 { 6562 static const char *status_str = "status"; 6563 int ret, port_id; 6564 struct rte_eth_link link; 6565 char *end_param; 6566 6567 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6568 return -1; 6569 6570 port_id = strtoul(params, &end_param, 0); 6571 if (*end_param != '\0') 6572 RTE_ETHDEV_LOG(NOTICE, 6573 "Extra parameters passed to ethdev telemetry command, ignoring"); 6574 if (!rte_eth_dev_is_valid_port(port_id)) 6575 return -1; 6576 6577 ret = rte_eth_link_get_nowait(port_id, &link); 6578 if (ret < 0) 6579 return -1; 6580 6581 rte_tel_data_start_dict(d); 6582 if (!link.link_status) { 6583 rte_tel_data_add_dict_string(d, status_str, "DOWN"); 6584 return 0; 6585 } 6586 rte_tel_data_add_dict_string(d, status_str, "UP"); 6587 rte_tel_data_add_dict_uint(d, "speed", link.link_speed); 6588 rte_tel_data_add_dict_string(d, "duplex", 6589 (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 6590 "full-duplex" : "half-duplex"); 6591 return 0; 6592 } 6593 6594 static int 6595 eth_dev_handle_port_info(const char *cmd __rte_unused, 6596 const char *params, 6597 struct rte_tel_data *d) 6598 { 6599 struct rte_tel_data *rxq_state, *txq_state; 6600 char mac_addr[RTE_ETHER_ADDR_FMT_SIZE]; 6601 struct rte_eth_dev *eth_dev; 6602 char *end_param; 6603 int port_id, i; 6604 6605 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 6606 return -1; 6607 6608 port_id = strtoul(params, &end_param, 0); 6609 if (*end_param != '\0') 6610 RTE_ETHDEV_LOG(NOTICE, 6611 "Extra parameters passed to ethdev telemetry command, ignoring"); 6612 6613 if (!rte_eth_dev_is_valid_port(port_id)) 6614 return -EINVAL; 6615 6616 eth_dev = &rte_eth_devices[port_id]; 6617 6618 rxq_state = rte_tel_data_alloc(); 6619 if (!rxq_state) 6620 return -ENOMEM; 6621 6622 txq_state = rte_tel_data_alloc(); 6623 if (!txq_state) { 6624 rte_tel_data_free(rxq_state); 6625 return -ENOMEM; 6626 } 6627 6628 rte_tel_data_start_dict(d); 6629 rte_tel_data_add_dict_string(d, "name", eth_dev->data->name); 6630 rte_tel_data_add_dict_int(d, "state", eth_dev->state); 6631 rte_tel_data_add_dict_int(d, "nb_rx_queues", 6632 eth_dev->data->nb_rx_queues); 6633 rte_tel_data_add_dict_int(d, "nb_tx_queues", 6634 eth_dev->data->nb_tx_queues); 6635 rte_tel_data_add_dict_int(d, "port_id", eth_dev->data->port_id); 6636 rte_tel_data_add_dict_int(d, "mtu", eth_dev->data->mtu); 6637 rte_tel_data_add_dict_uint(d, "rx_mbuf_size_min", 6638 eth_dev->data->min_rx_buf_size); 6639 rte_tel_data_add_dict_uint(d, "rx_mbuf_alloc_fail", 6640 eth_dev->data->rx_mbuf_alloc_failed); 6641 rte_ether_format_addr(mac_addr, sizeof(mac_addr), 6642 eth_dev->data->mac_addrs); 6643 rte_tel_data_add_dict_string(d, "mac_addr", mac_addr); 6644 rte_tel_data_add_dict_int(d, "promiscuous", 6645 eth_dev->data->promiscuous); 6646 rte_tel_data_add_dict_int(d, "scattered_rx", 6647 eth_dev->data->scattered_rx); 6648 rte_tel_data_add_dict_int(d, "all_multicast", 6649 eth_dev->data->all_multicast); 6650 rte_tel_data_add_dict_int(d, "dev_started", eth_dev->data->dev_started); 6651 rte_tel_data_add_dict_int(d, "lro", eth_dev->data->lro); 6652 rte_tel_data_add_dict_int(d, "dev_configured", 6653 eth_dev->data->dev_configured); 6654 6655 rte_tel_data_start_array(rxq_state, RTE_TEL_INT_VAL); 6656 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) 6657 rte_tel_data_add_array_int(rxq_state, 6658 eth_dev->data->rx_queue_state[i]); 6659 6660 rte_tel_data_start_array(txq_state, RTE_TEL_INT_VAL); 6661 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) 6662 rte_tel_data_add_array_int(txq_state, 6663 eth_dev->data->tx_queue_state[i]); 6664 6665 rte_tel_data_add_dict_container(d, "rxq_state", rxq_state, 0); 6666 rte_tel_data_add_dict_container(d, "txq_state", txq_state, 0); 6667 rte_tel_data_add_dict_int(d, "numa_node", eth_dev->data->numa_node); 6668 rte_tel_data_add_dict_uint_hex(d, "dev_flags", 6669 eth_dev->data->dev_flags, 0); 6670 rte_tel_data_add_dict_uint_hex(d, "rx_offloads", 6671 eth_dev->data->dev_conf.rxmode.offloads, 0); 6672 rte_tel_data_add_dict_uint_hex(d, "tx_offloads", 6673 eth_dev->data->dev_conf.txmode.offloads, 0); 6674 rte_tel_data_add_dict_uint_hex(d, "ethdev_rss_hf", 6675 eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf, 0); 6676 6677 return 0; 6678 } 6679 6680 int 6681 rte_eth_representor_info_get(uint16_t port_id, 6682 struct rte_eth_representor_info *info) 6683 { 6684 struct rte_eth_dev *dev; 6685 int ret; 6686 6687 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6688 dev = &rte_eth_devices[port_id]; 6689 6690 if (*dev->dev_ops->representor_info_get == NULL) 6691 return -ENOTSUP; 6692 ret = eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info)); 6693 6694 rte_eth_trace_representor_info_get(port_id, info, ret); 6695 6696 return ret; 6697 } 6698 6699 int 6700 rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features) 6701 { 6702 struct rte_eth_dev *dev; 6703 int ret; 6704 6705 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6706 dev = &rte_eth_devices[port_id]; 6707 6708 if (dev->data->dev_configured != 0) { 6709 RTE_ETHDEV_LOG(ERR, 6710 "The port (ID=%"PRIu16") is already configured\n", 6711 port_id); 6712 return -EBUSY; 6713 } 6714 6715 if (features == NULL) { 6716 RTE_ETHDEV_LOG(ERR, "Invalid features (NULL)\n"); 6717 return -EINVAL; 6718 } 6719 6720 if (*dev->dev_ops->rx_metadata_negotiate == NULL) 6721 return -ENOTSUP; 6722 ret = eth_err(port_id, 6723 (*dev->dev_ops->rx_metadata_negotiate)(dev, features)); 6724 6725 rte_eth_trace_rx_metadata_negotiate(port_id, *features, ret); 6726 6727 return ret; 6728 } 6729 6730 int 6731 rte_eth_ip_reassembly_capability_get(uint16_t port_id, 6732 struct rte_eth_ip_reassembly_params *reassembly_capa) 6733 { 6734 struct rte_eth_dev *dev; 6735 int ret; 6736 6737 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6738 dev = &rte_eth_devices[port_id]; 6739 6740 if (dev->data->dev_configured == 0) { 6741 RTE_ETHDEV_LOG(ERR, 6742 "Device with port_id=%u is not configured.\n" 6743 "Cannot get IP reassembly capability\n", 6744 port_id); 6745 return -EINVAL; 6746 } 6747 6748 if (reassembly_capa == NULL) { 6749 RTE_ETHDEV_LOG(ERR, "Cannot get reassembly capability to NULL"); 6750 return -EINVAL; 6751 } 6752 6753 if (*dev->dev_ops->ip_reassembly_capability_get == NULL) 6754 return -ENOTSUP; 6755 memset(reassembly_capa, 0, sizeof(struct rte_eth_ip_reassembly_params)); 6756 6757 ret = eth_err(port_id, (*dev->dev_ops->ip_reassembly_capability_get) 6758 (dev, reassembly_capa)); 6759 6760 rte_eth_trace_ip_reassembly_capability_get(port_id, reassembly_capa, 6761 ret); 6762 6763 return ret; 6764 } 6765 6766 int 6767 rte_eth_ip_reassembly_conf_get(uint16_t port_id, 6768 struct rte_eth_ip_reassembly_params *conf) 6769 { 6770 struct rte_eth_dev *dev; 6771 int ret; 6772 6773 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6774 dev = &rte_eth_devices[port_id]; 6775 6776 if (dev->data->dev_configured == 0) { 6777 RTE_ETHDEV_LOG(ERR, 6778 "Device with port_id=%u is not configured.\n" 6779 "Cannot get IP reassembly configuration\n", 6780 port_id); 6781 return -EINVAL; 6782 } 6783 6784 if (conf == NULL) { 6785 RTE_ETHDEV_LOG(ERR, "Cannot get reassembly info to NULL"); 6786 return -EINVAL; 6787 } 6788 6789 if (*dev->dev_ops->ip_reassembly_conf_get == NULL) 6790 return -ENOTSUP; 6791 memset(conf, 0, sizeof(struct rte_eth_ip_reassembly_params)); 6792 ret = eth_err(port_id, 6793 (*dev->dev_ops->ip_reassembly_conf_get)(dev, conf)); 6794 6795 rte_eth_trace_ip_reassembly_conf_get(port_id, conf, ret); 6796 6797 return ret; 6798 } 6799 6800 int 6801 rte_eth_ip_reassembly_conf_set(uint16_t port_id, 6802 const struct rte_eth_ip_reassembly_params *conf) 6803 { 6804 struct rte_eth_dev *dev; 6805 int ret; 6806 6807 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6808 dev = &rte_eth_devices[port_id]; 6809 6810 if (dev->data->dev_configured == 0) { 6811 RTE_ETHDEV_LOG(ERR, 6812 "Device with port_id=%u is not configured.\n" 6813 "Cannot set IP reassembly configuration", 6814 port_id); 6815 return -EINVAL; 6816 } 6817 6818 if (dev->data->dev_started != 0) { 6819 RTE_ETHDEV_LOG(ERR, 6820 "Device with port_id=%u started,\n" 6821 "cannot configure IP reassembly params.\n", 6822 port_id); 6823 return -EINVAL; 6824 } 6825 6826 if (conf == NULL) { 6827 RTE_ETHDEV_LOG(ERR, 6828 "Invalid IP reassembly configuration (NULL)\n"); 6829 return -EINVAL; 6830 } 6831 6832 if (*dev->dev_ops->ip_reassembly_conf_set == NULL) 6833 return -ENOTSUP; 6834 ret = eth_err(port_id, 6835 (*dev->dev_ops->ip_reassembly_conf_set)(dev, conf)); 6836 6837 rte_eth_trace_ip_reassembly_conf_set(port_id, conf, ret); 6838 6839 return ret; 6840 } 6841 6842 int 6843 rte_eth_dev_priv_dump(uint16_t port_id, FILE *file) 6844 { 6845 struct rte_eth_dev *dev; 6846 6847 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6848 dev = &rte_eth_devices[port_id]; 6849 6850 if (file == NULL) { 6851 RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n"); 6852 return -EINVAL; 6853 } 6854 6855 if (*dev->dev_ops->eth_dev_priv_dump == NULL) 6856 return -ENOTSUP; 6857 return eth_err(port_id, (*dev->dev_ops->eth_dev_priv_dump)(dev, file)); 6858 } 6859 6860 int 6861 rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id, 6862 uint16_t offset, uint16_t num, FILE *file) 6863 { 6864 struct rte_eth_dev *dev; 6865 6866 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6867 dev = &rte_eth_devices[port_id]; 6868 6869 if (queue_id >= dev->data->nb_rx_queues) { 6870 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 6871 return -EINVAL; 6872 } 6873 6874 if (file == NULL) { 6875 RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n"); 6876 return -EINVAL; 6877 } 6878 6879 if (*dev->dev_ops->eth_rx_descriptor_dump == NULL) 6880 return -ENOTSUP; 6881 6882 return eth_err(port_id, (*dev->dev_ops->eth_rx_descriptor_dump)(dev, 6883 queue_id, offset, num, file)); 6884 } 6885 6886 int 6887 rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id, 6888 uint16_t offset, uint16_t num, FILE *file) 6889 { 6890 struct rte_eth_dev *dev; 6891 6892 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6893 dev = &rte_eth_devices[port_id]; 6894 6895 if (queue_id >= dev->data->nb_tx_queues) { 6896 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 6897 return -EINVAL; 6898 } 6899 6900 if (file == NULL) { 6901 RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n"); 6902 return -EINVAL; 6903 } 6904 6905 if (*dev->dev_ops->eth_tx_descriptor_dump == NULL) 6906 return -ENOTSUP; 6907 6908 return eth_err(port_id, (*dev->dev_ops->eth_tx_descriptor_dump)(dev, 6909 queue_id, offset, num, file)); 6910 } 6911 6912 int 6913 rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num) 6914 { 6915 int i, j; 6916 struct rte_eth_dev *dev; 6917 const uint32_t *all_types; 6918 6919 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6920 dev = &rte_eth_devices[port_id]; 6921 6922 if (ptypes == NULL && num > 0) { 6923 RTE_ETHDEV_LOG(ERR, 6924 "Cannot get ethdev port %u supported header protocol types to NULL when array size is non zero\n", 6925 port_id); 6926 return -EINVAL; 6927 } 6928 6929 if (*dev->dev_ops->buffer_split_supported_hdr_ptypes_get == NULL) 6930 return -ENOTSUP; 6931 all_types = (*dev->dev_ops->buffer_split_supported_hdr_ptypes_get)(dev); 6932 6933 if (all_types == NULL) 6934 return 0; 6935 6936 for (i = 0, j = 0; all_types[i] != RTE_PTYPE_UNKNOWN; ++i) { 6937 if (j < num) { 6938 ptypes[j] = all_types[i]; 6939 6940 rte_eth_trace_buffer_split_get_supported_hdr_ptypes( 6941 port_id, j, ptypes[j]); 6942 } 6943 j++; 6944 } 6945 6946 return j; 6947 } 6948 6949 int rte_eth_dev_count_aggr_ports(uint16_t port_id) 6950 { 6951 struct rte_eth_dev *dev; 6952 int ret; 6953 6954 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6955 dev = &rte_eth_devices[port_id]; 6956 6957 if (*dev->dev_ops->count_aggr_ports == NULL) 6958 return 0; 6959 ret = eth_err(port_id, (*dev->dev_ops->count_aggr_ports)(dev)); 6960 6961 rte_eth_trace_count_aggr_ports(port_id, ret); 6962 6963 return ret; 6964 } 6965 6966 int rte_eth_dev_map_aggr_tx_affinity(uint16_t port_id, uint16_t tx_queue_id, 6967 uint8_t affinity) 6968 { 6969 struct rte_eth_dev *dev; 6970 int aggr_ports; 6971 int ret; 6972 6973 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6974 dev = &rte_eth_devices[port_id]; 6975 6976 if (tx_queue_id >= dev->data->nb_tx_queues) { 6977 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id); 6978 return -EINVAL; 6979 } 6980 6981 if (*dev->dev_ops->map_aggr_tx_affinity == NULL) 6982 return -ENOTSUP; 6983 6984 if (dev->data->dev_configured == 0) { 6985 RTE_ETHDEV_LOG(ERR, 6986 "Port %u must be configured before Tx affinity mapping\n", 6987 port_id); 6988 return -EINVAL; 6989 } 6990 6991 if (dev->data->dev_started) { 6992 RTE_ETHDEV_LOG(ERR, 6993 "Port %u must be stopped to allow configuration\n", 6994 port_id); 6995 return -EBUSY; 6996 } 6997 6998 aggr_ports = rte_eth_dev_count_aggr_ports(port_id); 6999 if (aggr_ports == 0) { 7000 RTE_ETHDEV_LOG(ERR, 7001 "Port %u has no aggregated port\n", 7002 port_id); 7003 return -ENOTSUP; 7004 } 7005 7006 if (affinity > aggr_ports) { 7007 RTE_ETHDEV_LOG(ERR, 7008 "Port %u map invalid affinity %u exceeds the maximum number %u\n", 7009 port_id, affinity, aggr_ports); 7010 return -EINVAL; 7011 } 7012 7013 ret = eth_err(port_id, (*dev->dev_ops->map_aggr_tx_affinity)(dev, 7014 tx_queue_id, affinity)); 7015 7016 rte_eth_trace_map_aggr_tx_affinity(port_id, tx_queue_id, affinity, ret); 7017 7018 return ret; 7019 } 7020 7021 RTE_LOG_REGISTER_DEFAULT(rte_eth_dev_logtype, INFO); 7022 7023 RTE_INIT(ethdev_init_telemetry) 7024 { 7025 rte_telemetry_register_cmd("/ethdev/list", eth_dev_handle_port_list, 7026 "Returns list of available ethdev ports. Takes no parameters"); 7027 rte_telemetry_register_cmd("/ethdev/stats", eth_dev_handle_port_stats, 7028 "Returns the common stats for a port. Parameters: int port_id"); 7029 rte_telemetry_register_cmd("/ethdev/xstats", eth_dev_handle_port_xstats, 7030 "Returns the extended stats for a port. Parameters: int port_id,hide_zero=true|false(Optional for indicates hide zero xstats)"); 7031 #ifndef RTE_EXEC_ENV_WINDOWS 7032 rte_telemetry_register_cmd("/ethdev/dump_priv", eth_dev_handle_port_dump_priv, 7033 "Returns dump private information for a port. Parameters: int port_id"); 7034 #endif 7035 rte_telemetry_register_cmd("/ethdev/link_status", 7036 eth_dev_handle_port_link_status, 7037 "Returns the link status for a port. Parameters: int port_id"); 7038 rte_telemetry_register_cmd("/ethdev/info", eth_dev_handle_port_info, 7039 "Returns the device info for a port. Parameters: int port_id"); 7040 rte_telemetry_register_cmd("/ethdev/module_eeprom", eth_dev_handle_port_module_eeprom, 7041 "Returns module EEPROM info with SFF specs. Parameters: int port_id"); 7042 } 7043