1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <errno.h> 6 #include <inttypes.h> 7 #include <stdbool.h> 8 #include <stdint.h> 9 #include <stdio.h> 10 #include <stdlib.h> 11 #include <string.h> 12 #include <sys/queue.h> 13 14 #include <bus_driver.h> 15 #include <rte_log.h> 16 #include <rte_interrupts.h> 17 #include <rte_kvargs.h> 18 #include <rte_memcpy.h> 19 #include <rte_common.h> 20 #include <rte_mempool.h> 21 #include <rte_malloc.h> 22 #include <rte_mbuf.h> 23 #include <rte_errno.h> 24 #include <rte_spinlock.h> 25 #include <rte_string_fns.h> 26 #include <rte_class.h> 27 #include <rte_ether.h> 28 #include <rte_telemetry.h> 29 30 #include "rte_ethdev.h" 31 #include "rte_ethdev_trace_fp.h" 32 #include "ethdev_driver.h" 33 #include "rte_flow_driver.h" 34 #include "ethdev_profile.h" 35 #include "ethdev_private.h" 36 #include "ethdev_trace.h" 37 #include "sff_telemetry.h" 38 39 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS]; 40 41 /* public fast-path API */ 42 struct rte_eth_fp_ops rte_eth_fp_ops[RTE_MAX_ETHPORTS]; 43 44 /* spinlock for add/remove Rx callbacks */ 45 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER; 46 47 /* spinlock for add/remove Tx callbacks */ 48 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER; 49 50 /* store statistics names and its offset in stats structure */ 51 struct rte_eth_xstats_name_off { 52 char name[RTE_ETH_XSTATS_NAME_SIZE]; 53 unsigned offset; 54 }; 55 56 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = { 57 {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)}, 58 {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)}, 59 {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)}, 60 {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)}, 61 {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)}, 62 {"rx_errors", offsetof(struct rte_eth_stats, ierrors)}, 63 {"tx_errors", offsetof(struct rte_eth_stats, oerrors)}, 64 {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats, 65 rx_nombuf)}, 66 }; 67 68 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings) 69 70 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = { 71 {"packets", offsetof(struct rte_eth_stats, q_ipackets)}, 72 {"bytes", offsetof(struct rte_eth_stats, q_ibytes)}, 73 {"errors", offsetof(struct rte_eth_stats, q_errors)}, 74 }; 75 76 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings) 77 78 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = { 79 {"packets", offsetof(struct rte_eth_stats, q_opackets)}, 80 {"bytes", offsetof(struct rte_eth_stats, q_obytes)}, 81 }; 82 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings) 83 84 #define RTE_RX_OFFLOAD_BIT2STR(_name) \ 85 { RTE_ETH_RX_OFFLOAD_##_name, #_name } 86 87 static const struct { 88 uint64_t offload; 89 const char *name; 90 } eth_dev_rx_offload_names[] = { 91 RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP), 92 RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM), 93 RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM), 94 RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM), 95 RTE_RX_OFFLOAD_BIT2STR(TCP_LRO), 96 RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP), 97 RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 98 RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP), 99 RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER), 100 RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND), 101 RTE_RX_OFFLOAD_BIT2STR(SCATTER), 102 RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP), 103 RTE_RX_OFFLOAD_BIT2STR(SECURITY), 104 RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC), 105 RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM), 106 RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 107 RTE_RX_OFFLOAD_BIT2STR(RSS_HASH), 108 RTE_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT), 109 }; 110 111 #undef RTE_RX_OFFLOAD_BIT2STR 112 #undef RTE_ETH_RX_OFFLOAD_BIT2STR 113 114 #define RTE_TX_OFFLOAD_BIT2STR(_name) \ 115 { RTE_ETH_TX_OFFLOAD_##_name, #_name } 116 117 static const struct { 118 uint64_t offload; 119 const char *name; 120 } eth_dev_tx_offload_names[] = { 121 RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT), 122 RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM), 123 RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM), 124 RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM), 125 RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM), 126 RTE_TX_OFFLOAD_BIT2STR(TCP_TSO), 127 RTE_TX_OFFLOAD_BIT2STR(UDP_TSO), 128 RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 129 RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT), 130 RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO), 131 RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO), 132 RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO), 133 RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO), 134 RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT), 135 RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE), 136 RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS), 137 RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE), 138 RTE_TX_OFFLOAD_BIT2STR(SECURITY), 139 RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO), 140 RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO), 141 RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 142 RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP), 143 }; 144 145 #undef RTE_TX_OFFLOAD_BIT2STR 146 147 static const struct { 148 uint64_t offload; 149 const char *name; 150 } rte_eth_dev_capa_names[] = { 151 {RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP, "RUNTIME_RX_QUEUE_SETUP"}, 152 {RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP, "RUNTIME_TX_QUEUE_SETUP"}, 153 {RTE_ETH_DEV_CAPA_RXQ_SHARE, "RXQ_SHARE"}, 154 {RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP, "FLOW_RULE_KEEP"}, 155 {RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP, "FLOW_SHARED_OBJECT_KEEP"}, 156 }; 157 158 enum { 159 STAT_QMAP_TX = 0, 160 STAT_QMAP_RX 161 }; 162 163 static const struct { 164 enum rte_eth_hash_function algo; 165 const char *name; 166 } rte_eth_dev_rss_algo_names[] = { 167 {RTE_ETH_HASH_FUNCTION_DEFAULT, "default"}, 168 {RTE_ETH_HASH_FUNCTION_SIMPLE_XOR, "simple_xor"}, 169 {RTE_ETH_HASH_FUNCTION_TOEPLITZ, "toeplitz"}, 170 {RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ, "symmetric_toeplitz"}, 171 {RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ_SORT, "symmetric_toeplitz_sort"}, 172 }; 173 174 int 175 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str) 176 { 177 int ret; 178 struct rte_devargs devargs; 179 const char *bus_param_key; 180 char *bus_str = NULL; 181 char *cls_str = NULL; 182 int str_size; 183 184 if (iter == NULL) { 185 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL iterator\n"); 186 return -EINVAL; 187 } 188 189 if (devargs_str == NULL) { 190 RTE_ETHDEV_LOG(ERR, 191 "Cannot initialize iterator from NULL device description string\n"); 192 return -EINVAL; 193 } 194 195 memset(iter, 0, sizeof(*iter)); 196 memset(&devargs, 0, sizeof(devargs)); 197 198 /* 199 * The devargs string may use various syntaxes: 200 * - 0000:08:00.0,representor=[1-3] 201 * - pci:0000:06:00.0,representor=[0,5] 202 * - class=eth,mac=00:11:22:33:44:55 203 * - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z 204 */ 205 206 /* 207 * Handle pure class filter (i.e. without any bus-level argument), 208 * from future new syntax. 209 * rte_devargs_parse() is not yet supporting the new syntax, 210 * that's why this simple case is temporarily parsed here. 211 */ 212 #define iter_anybus_str "class=eth," 213 if (strncmp(devargs_str, iter_anybus_str, 214 strlen(iter_anybus_str)) == 0) { 215 iter->cls_str = devargs_str + strlen(iter_anybus_str); 216 goto end; 217 } 218 219 /* Split bus, device and parameters. */ 220 ret = rte_devargs_parse(&devargs, devargs_str); 221 if (ret != 0) 222 goto error; 223 224 /* 225 * Assume parameters of old syntax can match only at ethdev level. 226 * Extra parameters will be ignored, thanks to "+" prefix. 227 */ 228 str_size = strlen(devargs.args) + 2; 229 cls_str = malloc(str_size); 230 if (cls_str == NULL) { 231 ret = -ENOMEM; 232 goto error; 233 } 234 ret = snprintf(cls_str, str_size, "+%s", devargs.args); 235 if (ret != str_size - 1) { 236 ret = -EINVAL; 237 goto error; 238 } 239 iter->cls_str = cls_str; 240 241 iter->bus = devargs.bus; 242 if (iter->bus->dev_iterate == NULL) { 243 ret = -ENOTSUP; 244 goto error; 245 } 246 247 /* Convert bus args to new syntax for use with new API dev_iterate. */ 248 if ((strcmp(iter->bus->name, "vdev") == 0) || 249 (strcmp(iter->bus->name, "fslmc") == 0) || 250 (strcmp(iter->bus->name, "dpaa_bus") == 0)) { 251 bus_param_key = "name"; 252 } else if (strcmp(iter->bus->name, "pci") == 0) { 253 bus_param_key = "addr"; 254 } else { 255 ret = -ENOTSUP; 256 goto error; 257 } 258 str_size = strlen(bus_param_key) + strlen(devargs.name) + 2; 259 bus_str = malloc(str_size); 260 if (bus_str == NULL) { 261 ret = -ENOMEM; 262 goto error; 263 } 264 ret = snprintf(bus_str, str_size, "%s=%s", 265 bus_param_key, devargs.name); 266 if (ret != str_size - 1) { 267 ret = -EINVAL; 268 goto error; 269 } 270 iter->bus_str = bus_str; 271 272 end: 273 iter->cls = rte_class_find_by_name("eth"); 274 rte_devargs_reset(&devargs); 275 276 rte_eth_trace_iterator_init(devargs_str); 277 278 return 0; 279 280 error: 281 if (ret == -ENOTSUP) 282 RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n", 283 iter->bus->name); 284 rte_devargs_reset(&devargs); 285 free(bus_str); 286 free(cls_str); 287 return ret; 288 } 289 290 uint16_t 291 rte_eth_iterator_next(struct rte_dev_iterator *iter) 292 { 293 if (iter == NULL) { 294 RTE_ETHDEV_LOG(ERR, 295 "Cannot get next device from NULL iterator\n"); 296 return RTE_MAX_ETHPORTS; 297 } 298 299 if (iter->cls == NULL) /* invalid ethdev iterator */ 300 return RTE_MAX_ETHPORTS; 301 302 do { /* loop to try all matching rte_device */ 303 /* If not pure ethdev filter and */ 304 if (iter->bus != NULL && 305 /* not in middle of rte_eth_dev iteration, */ 306 iter->class_device == NULL) { 307 /* get next rte_device to try. */ 308 iter->device = iter->bus->dev_iterate( 309 iter->device, iter->bus_str, iter); 310 if (iter->device == NULL) 311 break; /* no more rte_device candidate */ 312 } 313 /* A device is matching bus part, need to check ethdev part. */ 314 iter->class_device = iter->cls->dev_iterate( 315 iter->class_device, iter->cls_str, iter); 316 if (iter->class_device != NULL) { 317 uint16_t id = eth_dev_to_id(iter->class_device); 318 319 rte_eth_trace_iterator_next(iter, id); 320 321 return id; /* match */ 322 } 323 } while (iter->bus != NULL); /* need to try next rte_device */ 324 325 /* No more ethdev port to iterate. */ 326 rte_eth_iterator_cleanup(iter); 327 return RTE_MAX_ETHPORTS; 328 } 329 330 void 331 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter) 332 { 333 if (iter == NULL) { 334 RTE_ETHDEV_LOG(ERR, "Cannot do clean up from NULL iterator\n"); 335 return; 336 } 337 338 if (iter->bus_str == NULL) 339 return; /* nothing to free in pure class filter */ 340 341 rte_eth_trace_iterator_cleanup(iter); 342 343 free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */ 344 free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */ 345 memset(iter, 0, sizeof(*iter)); 346 } 347 348 uint16_t 349 rte_eth_find_next(uint16_t port_id) 350 { 351 while (port_id < RTE_MAX_ETHPORTS && 352 rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED) 353 port_id++; 354 355 if (port_id >= RTE_MAX_ETHPORTS) 356 return RTE_MAX_ETHPORTS; 357 358 rte_eth_trace_find_next(port_id); 359 360 return port_id; 361 } 362 363 /* 364 * Macro to iterate over all valid ports for internal usage. 365 * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports. 366 */ 367 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \ 368 for (port_id = rte_eth_find_next(0); \ 369 port_id < RTE_MAX_ETHPORTS; \ 370 port_id = rte_eth_find_next(port_id + 1)) 371 372 uint16_t 373 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent) 374 { 375 port_id = rte_eth_find_next(port_id); 376 while (port_id < RTE_MAX_ETHPORTS && 377 rte_eth_devices[port_id].device != parent) 378 port_id = rte_eth_find_next(port_id + 1); 379 380 rte_eth_trace_find_next_of(port_id, parent); 381 382 return port_id; 383 } 384 385 uint16_t 386 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id) 387 { 388 uint16_t ret; 389 390 RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS); 391 ret = rte_eth_find_next_of(port_id, 392 rte_eth_devices[ref_port_id].device); 393 394 rte_eth_trace_find_next_sibling(port_id, ref_port_id, ret); 395 396 return ret; 397 } 398 399 static bool 400 eth_dev_is_allocated(const struct rte_eth_dev *ethdev) 401 { 402 return ethdev->data != NULL && ethdev->data->name[0] != '\0'; 403 } 404 405 int 406 rte_eth_dev_is_valid_port(uint16_t port_id) 407 { 408 int is_valid; 409 410 if (port_id >= RTE_MAX_ETHPORTS || 411 (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)) 412 is_valid = 0; 413 else 414 is_valid = 1; 415 416 rte_ethdev_trace_is_valid_port(port_id, is_valid); 417 418 return is_valid; 419 } 420 421 static int 422 eth_is_valid_owner_id(uint64_t owner_id) 423 __rte_exclusive_locks_required(rte_mcfg_ethdev_get_lock()) 424 { 425 if (owner_id == RTE_ETH_DEV_NO_OWNER || 426 eth_dev_shared_data->next_owner_id <= owner_id) 427 return 0; 428 return 1; 429 } 430 431 uint64_t 432 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id) 433 { 434 port_id = rte_eth_find_next(port_id); 435 while (port_id < RTE_MAX_ETHPORTS && 436 rte_eth_devices[port_id].data->owner.id != owner_id) 437 port_id = rte_eth_find_next(port_id + 1); 438 439 rte_eth_trace_find_next_owned_by(port_id, owner_id); 440 441 return port_id; 442 } 443 444 int 445 rte_eth_dev_owner_new(uint64_t *owner_id) 446 { 447 int ret; 448 449 if (owner_id == NULL) { 450 RTE_ETHDEV_LOG(ERR, "Cannot get new owner ID to NULL\n"); 451 return -EINVAL; 452 } 453 454 rte_spinlock_lock(rte_mcfg_ethdev_get_lock()); 455 456 if (eth_dev_shared_data_prepare() != NULL) { 457 *owner_id = eth_dev_shared_data->next_owner_id++; 458 eth_dev_shared_data->allocated_owners++; 459 ret = 0; 460 } else { 461 ret = -ENOMEM; 462 } 463 464 rte_spinlock_unlock(rte_mcfg_ethdev_get_lock()); 465 466 rte_ethdev_trace_owner_new(*owner_id, ret); 467 468 return ret; 469 } 470 471 static int 472 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id, 473 const struct rte_eth_dev_owner *new_owner) 474 __rte_exclusive_locks_required(rte_mcfg_ethdev_get_lock()) 475 { 476 struct rte_eth_dev *ethdev = &rte_eth_devices[port_id]; 477 struct rte_eth_dev_owner *port_owner; 478 479 if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) { 480 RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n", 481 port_id); 482 return -ENODEV; 483 } 484 485 if (new_owner == NULL) { 486 RTE_ETHDEV_LOG(ERR, 487 "Cannot set ethdev port %u owner from NULL owner\n", 488 port_id); 489 return -EINVAL; 490 } 491 492 if (!eth_is_valid_owner_id(new_owner->id) && 493 !eth_is_valid_owner_id(old_owner_id)) { 494 RTE_ETHDEV_LOG(ERR, 495 "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n", 496 old_owner_id, new_owner->id); 497 return -EINVAL; 498 } 499 500 port_owner = &rte_eth_devices[port_id].data->owner; 501 if (port_owner->id != old_owner_id) { 502 RTE_ETHDEV_LOG(ERR, 503 "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n", 504 port_id, port_owner->name, port_owner->id); 505 return -EPERM; 506 } 507 508 /* can not truncate (same structure) */ 509 strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN); 510 511 port_owner->id = new_owner->id; 512 513 RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n", 514 port_id, new_owner->name, new_owner->id); 515 516 return 0; 517 } 518 519 int 520 rte_eth_dev_owner_set(const uint16_t port_id, 521 const struct rte_eth_dev_owner *owner) 522 { 523 int ret; 524 525 rte_spinlock_lock(rte_mcfg_ethdev_get_lock()); 526 527 if (eth_dev_shared_data_prepare() != NULL) 528 ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner); 529 else 530 ret = -ENOMEM; 531 532 rte_spinlock_unlock(rte_mcfg_ethdev_get_lock()); 533 534 rte_ethdev_trace_owner_set(port_id, owner, ret); 535 536 return ret; 537 } 538 539 int 540 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id) 541 { 542 const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner) 543 {.id = RTE_ETH_DEV_NO_OWNER, .name = ""}; 544 int ret; 545 546 rte_spinlock_lock(rte_mcfg_ethdev_get_lock()); 547 548 if (eth_dev_shared_data_prepare() != NULL) 549 ret = eth_dev_owner_set(port_id, owner_id, &new_owner); 550 else 551 ret = -ENOMEM; 552 553 rte_spinlock_unlock(rte_mcfg_ethdev_get_lock()); 554 555 rte_ethdev_trace_owner_unset(port_id, owner_id, ret); 556 557 return ret; 558 } 559 560 int 561 rte_eth_dev_owner_delete(const uint64_t owner_id) 562 { 563 uint16_t port_id; 564 int ret = 0; 565 566 rte_spinlock_lock(rte_mcfg_ethdev_get_lock()); 567 568 if (eth_dev_shared_data_prepare() == NULL) { 569 ret = -ENOMEM; 570 } else if (eth_is_valid_owner_id(owner_id)) { 571 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) { 572 struct rte_eth_dev_data *data = 573 rte_eth_devices[port_id].data; 574 if (data != NULL && data->owner.id == owner_id) 575 memset(&data->owner, 0, 576 sizeof(struct rte_eth_dev_owner)); 577 } 578 RTE_ETHDEV_LOG(NOTICE, 579 "All port owners owned by %016"PRIx64" identifier have removed\n", 580 owner_id); 581 eth_dev_shared_data->allocated_owners--; 582 eth_dev_shared_data_release(); 583 } else { 584 RTE_ETHDEV_LOG(ERR, 585 "Invalid owner ID=%016"PRIx64"\n", 586 owner_id); 587 ret = -EINVAL; 588 } 589 590 rte_spinlock_unlock(rte_mcfg_ethdev_get_lock()); 591 592 rte_ethdev_trace_owner_delete(owner_id, ret); 593 594 return ret; 595 } 596 597 int 598 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner) 599 { 600 struct rte_eth_dev *ethdev; 601 int ret; 602 603 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 604 ethdev = &rte_eth_devices[port_id]; 605 606 if (!eth_dev_is_allocated(ethdev)) { 607 RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n", 608 port_id); 609 return -ENODEV; 610 } 611 612 if (owner == NULL) { 613 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u owner to NULL\n", 614 port_id); 615 return -EINVAL; 616 } 617 618 rte_spinlock_lock(rte_mcfg_ethdev_get_lock()); 619 620 if (eth_dev_shared_data_prepare() != NULL) { 621 rte_memcpy(owner, ðdev->data->owner, sizeof(*owner)); 622 ret = 0; 623 } else { 624 ret = -ENOMEM; 625 } 626 627 rte_spinlock_unlock(rte_mcfg_ethdev_get_lock()); 628 629 rte_ethdev_trace_owner_get(port_id, owner, ret); 630 631 return ret; 632 } 633 634 int 635 rte_eth_dev_socket_id(uint16_t port_id) 636 { 637 int socket_id = SOCKET_ID_ANY; 638 639 if (!rte_eth_dev_is_valid_port(port_id)) { 640 rte_errno = EINVAL; 641 } else { 642 socket_id = rte_eth_devices[port_id].data->numa_node; 643 if (socket_id == SOCKET_ID_ANY) 644 rte_errno = 0; 645 } 646 647 rte_ethdev_trace_socket_id(port_id, socket_id); 648 649 return socket_id; 650 } 651 652 void * 653 rte_eth_dev_get_sec_ctx(uint16_t port_id) 654 { 655 void *ctx; 656 657 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL); 658 ctx = rte_eth_devices[port_id].security_ctx; 659 660 rte_ethdev_trace_get_sec_ctx(port_id, ctx); 661 662 return ctx; 663 } 664 665 uint16_t 666 rte_eth_dev_count_avail(void) 667 { 668 uint16_t p; 669 uint16_t count; 670 671 count = 0; 672 673 RTE_ETH_FOREACH_DEV(p) 674 count++; 675 676 rte_ethdev_trace_count_avail(count); 677 678 return count; 679 } 680 681 uint16_t 682 rte_eth_dev_count_total(void) 683 { 684 uint16_t port, count = 0; 685 686 RTE_ETH_FOREACH_VALID_DEV(port) 687 count++; 688 689 rte_ethdev_trace_count_total(count); 690 691 return count; 692 } 693 694 int 695 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name) 696 { 697 char *tmp; 698 699 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 700 701 if (name == NULL) { 702 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u name to NULL\n", 703 port_id); 704 return -EINVAL; 705 } 706 707 rte_spinlock_lock(rte_mcfg_ethdev_get_lock()); 708 /* shouldn't check 'rte_eth_devices[i].data', 709 * because it might be overwritten by VDEV PMD */ 710 tmp = eth_dev_shared_data->data[port_id].name; 711 rte_spinlock_unlock(rte_mcfg_ethdev_get_lock()); 712 713 strcpy(name, tmp); 714 715 rte_ethdev_trace_get_name_by_port(port_id, name); 716 717 return 0; 718 } 719 720 int 721 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id) 722 { 723 int ret = -ENODEV; 724 uint16_t pid; 725 726 if (name == NULL) { 727 RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name"); 728 return -EINVAL; 729 } 730 731 if (port_id == NULL) { 732 RTE_ETHDEV_LOG(ERR, 733 "Cannot get port ID to NULL for %s\n", name); 734 return -EINVAL; 735 } 736 737 rte_spinlock_lock(rte_mcfg_ethdev_get_lock()); 738 RTE_ETH_FOREACH_VALID_DEV(pid) { 739 if (strcmp(name, eth_dev_shared_data->data[pid].name) != 0) 740 continue; 741 742 *port_id = pid; 743 rte_ethdev_trace_get_port_by_name(name, *port_id); 744 ret = 0; 745 break; 746 } 747 rte_spinlock_unlock(rte_mcfg_ethdev_get_lock()); 748 749 return ret; 750 } 751 752 int 753 eth_err(uint16_t port_id, int ret) 754 { 755 if (ret == 0) 756 return 0; 757 if (rte_eth_dev_is_removed(port_id)) 758 return -EIO; 759 return ret; 760 } 761 762 static int 763 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id) 764 { 765 uint16_t port_id; 766 767 if (rx_queue_id >= dev->data->nb_rx_queues) { 768 port_id = dev->data->port_id; 769 RTE_ETHDEV_LOG(ERR, 770 "Invalid Rx queue_id=%u of device with port_id=%u\n", 771 rx_queue_id, port_id); 772 return -EINVAL; 773 } 774 775 if (dev->data->rx_queues[rx_queue_id] == NULL) { 776 port_id = dev->data->port_id; 777 RTE_ETHDEV_LOG(ERR, 778 "Queue %u of device with port_id=%u has not been setup\n", 779 rx_queue_id, port_id); 780 return -EINVAL; 781 } 782 783 return 0; 784 } 785 786 static int 787 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id) 788 { 789 uint16_t port_id; 790 791 if (tx_queue_id >= dev->data->nb_tx_queues) { 792 port_id = dev->data->port_id; 793 RTE_ETHDEV_LOG(ERR, 794 "Invalid Tx queue_id=%u of device with port_id=%u\n", 795 tx_queue_id, port_id); 796 return -EINVAL; 797 } 798 799 if (dev->data->tx_queues[tx_queue_id] == NULL) { 800 port_id = dev->data->port_id; 801 RTE_ETHDEV_LOG(ERR, 802 "Queue %u of device with port_id=%u has not been setup\n", 803 tx_queue_id, port_id); 804 return -EINVAL; 805 } 806 807 return 0; 808 } 809 810 int 811 rte_eth_rx_queue_is_valid(uint16_t port_id, uint16_t queue_id) 812 { 813 struct rte_eth_dev *dev; 814 815 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 816 dev = &rte_eth_devices[port_id]; 817 818 return eth_dev_validate_rx_queue(dev, queue_id); 819 } 820 821 int 822 rte_eth_tx_queue_is_valid(uint16_t port_id, uint16_t queue_id) 823 { 824 struct rte_eth_dev *dev; 825 826 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 827 dev = &rte_eth_devices[port_id]; 828 829 return eth_dev_validate_tx_queue(dev, queue_id); 830 } 831 832 int 833 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id) 834 { 835 struct rte_eth_dev *dev; 836 int ret; 837 838 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 839 dev = &rte_eth_devices[port_id]; 840 841 if (!dev->data->dev_started) { 842 RTE_ETHDEV_LOG(ERR, 843 "Port %u must be started before start any queue\n", 844 port_id); 845 return -EINVAL; 846 } 847 848 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 849 if (ret != 0) 850 return ret; 851 852 if (*dev->dev_ops->rx_queue_start == NULL) 853 return -ENOTSUP; 854 855 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 856 RTE_ETHDEV_LOG(INFO, 857 "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 858 rx_queue_id, port_id); 859 return -EINVAL; 860 } 861 862 if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 863 RTE_ETHDEV_LOG(INFO, 864 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 865 rx_queue_id, port_id); 866 return 0; 867 } 868 869 ret = eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id)); 870 871 rte_ethdev_trace_rx_queue_start(port_id, rx_queue_id, ret); 872 873 return ret; 874 } 875 876 int 877 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id) 878 { 879 struct rte_eth_dev *dev; 880 int ret; 881 882 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 883 dev = &rte_eth_devices[port_id]; 884 885 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 886 if (ret != 0) 887 return ret; 888 889 if (*dev->dev_ops->rx_queue_stop == NULL) 890 return -ENOTSUP; 891 892 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 893 RTE_ETHDEV_LOG(INFO, 894 "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 895 rx_queue_id, port_id); 896 return -EINVAL; 897 } 898 899 if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 900 RTE_ETHDEV_LOG(INFO, 901 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 902 rx_queue_id, port_id); 903 return 0; 904 } 905 906 ret = eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id)); 907 908 rte_ethdev_trace_rx_queue_stop(port_id, rx_queue_id, ret); 909 910 return ret; 911 } 912 913 int 914 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id) 915 { 916 struct rte_eth_dev *dev; 917 int ret; 918 919 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 920 dev = &rte_eth_devices[port_id]; 921 922 if (!dev->data->dev_started) { 923 RTE_ETHDEV_LOG(ERR, 924 "Port %u must be started before start any queue\n", 925 port_id); 926 return -EINVAL; 927 } 928 929 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 930 if (ret != 0) 931 return ret; 932 933 if (*dev->dev_ops->tx_queue_start == NULL) 934 return -ENOTSUP; 935 936 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 937 RTE_ETHDEV_LOG(INFO, 938 "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 939 tx_queue_id, port_id); 940 return -EINVAL; 941 } 942 943 if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 944 RTE_ETHDEV_LOG(INFO, 945 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 946 tx_queue_id, port_id); 947 return 0; 948 } 949 950 ret = eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id)); 951 952 rte_ethdev_trace_tx_queue_start(port_id, tx_queue_id, ret); 953 954 return ret; 955 } 956 957 int 958 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id) 959 { 960 struct rte_eth_dev *dev; 961 int ret; 962 963 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 964 dev = &rte_eth_devices[port_id]; 965 966 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 967 if (ret != 0) 968 return ret; 969 970 if (*dev->dev_ops->tx_queue_stop == NULL) 971 return -ENOTSUP; 972 973 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 974 RTE_ETHDEV_LOG(INFO, 975 "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 976 tx_queue_id, port_id); 977 return -EINVAL; 978 } 979 980 if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 981 RTE_ETHDEV_LOG(INFO, 982 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 983 tx_queue_id, port_id); 984 return 0; 985 } 986 987 ret = eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id)); 988 989 rte_ethdev_trace_tx_queue_stop(port_id, tx_queue_id, ret); 990 991 return ret; 992 } 993 994 uint32_t 995 rte_eth_speed_bitflag(uint32_t speed, int duplex) 996 { 997 uint32_t ret; 998 999 switch (speed) { 1000 case RTE_ETH_SPEED_NUM_10M: 1001 ret = duplex ? RTE_ETH_LINK_SPEED_10M : RTE_ETH_LINK_SPEED_10M_HD; 1002 break; 1003 case RTE_ETH_SPEED_NUM_100M: 1004 ret = duplex ? RTE_ETH_LINK_SPEED_100M : RTE_ETH_LINK_SPEED_100M_HD; 1005 break; 1006 case RTE_ETH_SPEED_NUM_1G: 1007 ret = RTE_ETH_LINK_SPEED_1G; 1008 break; 1009 case RTE_ETH_SPEED_NUM_2_5G: 1010 ret = RTE_ETH_LINK_SPEED_2_5G; 1011 break; 1012 case RTE_ETH_SPEED_NUM_5G: 1013 ret = RTE_ETH_LINK_SPEED_5G; 1014 break; 1015 case RTE_ETH_SPEED_NUM_10G: 1016 ret = RTE_ETH_LINK_SPEED_10G; 1017 break; 1018 case RTE_ETH_SPEED_NUM_20G: 1019 ret = RTE_ETH_LINK_SPEED_20G; 1020 break; 1021 case RTE_ETH_SPEED_NUM_25G: 1022 ret = RTE_ETH_LINK_SPEED_25G; 1023 break; 1024 case RTE_ETH_SPEED_NUM_40G: 1025 ret = RTE_ETH_LINK_SPEED_40G; 1026 break; 1027 case RTE_ETH_SPEED_NUM_50G: 1028 ret = RTE_ETH_LINK_SPEED_50G; 1029 break; 1030 case RTE_ETH_SPEED_NUM_56G: 1031 ret = RTE_ETH_LINK_SPEED_56G; 1032 break; 1033 case RTE_ETH_SPEED_NUM_100G: 1034 ret = RTE_ETH_LINK_SPEED_100G; 1035 break; 1036 case RTE_ETH_SPEED_NUM_200G: 1037 ret = RTE_ETH_LINK_SPEED_200G; 1038 break; 1039 case RTE_ETH_SPEED_NUM_400G: 1040 ret = RTE_ETH_LINK_SPEED_400G; 1041 break; 1042 default: 1043 ret = 0; 1044 } 1045 1046 rte_eth_trace_speed_bitflag(speed, duplex, ret); 1047 1048 return ret; 1049 } 1050 1051 const char * 1052 rte_eth_dev_rx_offload_name(uint64_t offload) 1053 { 1054 const char *name = "UNKNOWN"; 1055 unsigned int i; 1056 1057 for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) { 1058 if (offload == eth_dev_rx_offload_names[i].offload) { 1059 name = eth_dev_rx_offload_names[i].name; 1060 break; 1061 } 1062 } 1063 1064 rte_ethdev_trace_rx_offload_name(offload, name); 1065 1066 return name; 1067 } 1068 1069 const char * 1070 rte_eth_dev_tx_offload_name(uint64_t offload) 1071 { 1072 const char *name = "UNKNOWN"; 1073 unsigned int i; 1074 1075 for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) { 1076 if (offload == eth_dev_tx_offload_names[i].offload) { 1077 name = eth_dev_tx_offload_names[i].name; 1078 break; 1079 } 1080 } 1081 1082 rte_ethdev_trace_tx_offload_name(offload, name); 1083 1084 return name; 1085 } 1086 1087 static char * 1088 eth_dev_offload_names(uint64_t bitmask, char *buf, size_t size, 1089 const char *(*offload_name)(uint64_t)) 1090 { 1091 unsigned int pos = 0; 1092 int ret; 1093 1094 /* There should be at least enough space to handle those cases */ 1095 RTE_ASSERT(size >= sizeof("none") && size >= sizeof("...")); 1096 1097 if (bitmask == 0) { 1098 ret = snprintf(&buf[pos], size - pos, "none"); 1099 if (ret < 0 || pos + ret >= size) 1100 ret = 0; 1101 pos += ret; 1102 goto out; 1103 } 1104 1105 while (bitmask != 0) { 1106 uint64_t offload = RTE_BIT64(rte_ctz64(bitmask)); 1107 const char *name = offload_name(offload); 1108 1109 ret = snprintf(&buf[pos], size - pos, "%s,", name); 1110 if (ret < 0 || pos + ret >= size) { 1111 if (pos + sizeof("...") >= size) 1112 pos = size - sizeof("..."); 1113 ret = snprintf(&buf[pos], size - pos, "..."); 1114 if (ret > 0 && pos + ret < size) 1115 pos += ret; 1116 goto out; 1117 } 1118 1119 pos += ret; 1120 bitmask &= ~offload; 1121 } 1122 1123 /* Eliminate trailing comma */ 1124 pos--; 1125 out: 1126 buf[pos] = '\0'; 1127 return buf; 1128 } 1129 1130 const char * 1131 rte_eth_dev_capability_name(uint64_t capability) 1132 { 1133 const char *name = "UNKNOWN"; 1134 unsigned int i; 1135 1136 for (i = 0; i < RTE_DIM(rte_eth_dev_capa_names); ++i) { 1137 if (capability == rte_eth_dev_capa_names[i].offload) { 1138 name = rte_eth_dev_capa_names[i].name; 1139 break; 1140 } 1141 } 1142 1143 rte_ethdev_trace_capability_name(capability, name); 1144 1145 return name; 1146 } 1147 1148 static inline int 1149 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size, 1150 uint32_t max_rx_pkt_len, uint32_t dev_info_size) 1151 { 1152 int ret = 0; 1153 1154 if (dev_info_size == 0) { 1155 if (config_size != max_rx_pkt_len) { 1156 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size" 1157 " %u != %u is not allowed\n", 1158 port_id, config_size, max_rx_pkt_len); 1159 ret = -EINVAL; 1160 } 1161 } else if (config_size > dev_info_size) { 1162 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 1163 "> max allowed value %u\n", port_id, config_size, 1164 dev_info_size); 1165 ret = -EINVAL; 1166 } else if (config_size < RTE_ETHER_MIN_LEN) { 1167 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 1168 "< min allowed value %u\n", port_id, config_size, 1169 (unsigned int)RTE_ETHER_MIN_LEN); 1170 ret = -EINVAL; 1171 } 1172 return ret; 1173 } 1174 1175 /* 1176 * Validate offloads that are requested through rte_eth_dev_configure against 1177 * the offloads successfully set by the Ethernet device. 1178 * 1179 * @param port_id 1180 * The port identifier of the Ethernet device. 1181 * @param req_offloads 1182 * The offloads that have been requested through `rte_eth_dev_configure`. 1183 * @param set_offloads 1184 * The offloads successfully set by the Ethernet device. 1185 * @param offload_type 1186 * The offload type i.e. Rx/Tx string. 1187 * @param offload_name 1188 * The function that prints the offload name. 1189 * @return 1190 * - (0) if validation successful. 1191 * - (-EINVAL) if requested offload has been silently disabled. 1192 */ 1193 static int 1194 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads, 1195 uint64_t set_offloads, const char *offload_type, 1196 const char *(*offload_name)(uint64_t)) 1197 { 1198 uint64_t offloads_diff = req_offloads ^ set_offloads; 1199 uint64_t offload; 1200 int ret = 0; 1201 1202 while (offloads_diff != 0) { 1203 /* Check if any offload is requested but not enabled. */ 1204 offload = RTE_BIT64(rte_ctz64(offloads_diff)); 1205 if (offload & req_offloads) { 1206 RTE_ETHDEV_LOG(ERR, 1207 "Port %u failed to enable %s offload %s\n", 1208 port_id, offload_type, offload_name(offload)); 1209 ret = -EINVAL; 1210 } 1211 1212 /* Check if offload couldn't be disabled. */ 1213 if (offload & set_offloads) { 1214 RTE_ETHDEV_LOG(DEBUG, 1215 "Port %u %s offload %s is not requested but enabled\n", 1216 port_id, offload_type, offload_name(offload)); 1217 } 1218 1219 offloads_diff &= ~offload; 1220 } 1221 1222 return ret; 1223 } 1224 1225 static uint32_t 1226 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu) 1227 { 1228 uint32_t overhead_len; 1229 1230 if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu) 1231 overhead_len = max_rx_pktlen - max_mtu; 1232 else 1233 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 1234 1235 return overhead_len; 1236 } 1237 1238 /* rte_eth_dev_info_get() should be called prior to this function */ 1239 static int 1240 eth_dev_validate_mtu(uint16_t port_id, struct rte_eth_dev_info *dev_info, 1241 uint16_t mtu) 1242 { 1243 uint32_t overhead_len; 1244 uint32_t frame_size; 1245 1246 if (mtu < dev_info->min_mtu) { 1247 RTE_ETHDEV_LOG(ERR, 1248 "MTU (%u) < device min MTU (%u) for port_id %u\n", 1249 mtu, dev_info->min_mtu, port_id); 1250 return -EINVAL; 1251 } 1252 if (mtu > dev_info->max_mtu) { 1253 RTE_ETHDEV_LOG(ERR, 1254 "MTU (%u) > device max MTU (%u) for port_id %u\n", 1255 mtu, dev_info->max_mtu, port_id); 1256 return -EINVAL; 1257 } 1258 1259 overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen, 1260 dev_info->max_mtu); 1261 frame_size = mtu + overhead_len; 1262 if (frame_size < RTE_ETHER_MIN_LEN) { 1263 RTE_ETHDEV_LOG(ERR, 1264 "Frame size (%u) < min frame size (%u) for port_id %u\n", 1265 frame_size, RTE_ETHER_MIN_LEN, port_id); 1266 return -EINVAL; 1267 } 1268 1269 if (frame_size > dev_info->max_rx_pktlen) { 1270 RTE_ETHDEV_LOG(ERR, 1271 "Frame size (%u) > device max frame size (%u) for port_id %u\n", 1272 frame_size, dev_info->max_rx_pktlen, port_id); 1273 return -EINVAL; 1274 } 1275 1276 return 0; 1277 } 1278 1279 int 1280 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, 1281 const struct rte_eth_conf *dev_conf) 1282 { 1283 enum rte_eth_hash_function algorithm; 1284 struct rte_eth_dev *dev; 1285 struct rte_eth_dev_info dev_info; 1286 struct rte_eth_conf orig_conf; 1287 int diag; 1288 int ret; 1289 uint16_t old_mtu; 1290 1291 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1292 dev = &rte_eth_devices[port_id]; 1293 1294 if (dev_conf == NULL) { 1295 RTE_ETHDEV_LOG(ERR, 1296 "Cannot configure ethdev port %u from NULL config\n", 1297 port_id); 1298 return -EINVAL; 1299 } 1300 1301 if (*dev->dev_ops->dev_configure == NULL) 1302 return -ENOTSUP; 1303 1304 if (dev->data->dev_started) { 1305 RTE_ETHDEV_LOG(ERR, 1306 "Port %u must be stopped to allow configuration\n", 1307 port_id); 1308 return -EBUSY; 1309 } 1310 1311 /* 1312 * Ensure that "dev_configured" is always 0 each time prepare to do 1313 * dev_configure() to avoid any non-anticipated behaviour. 1314 * And set to 1 when dev_configure() is executed successfully. 1315 */ 1316 dev->data->dev_configured = 0; 1317 1318 /* Store original config, as rollback required on failure */ 1319 memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf)); 1320 1321 /* 1322 * Copy the dev_conf parameter into the dev structure. 1323 * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get 1324 */ 1325 if (dev_conf != &dev->data->dev_conf) 1326 memcpy(&dev->data->dev_conf, dev_conf, 1327 sizeof(dev->data->dev_conf)); 1328 1329 /* Backup mtu for rollback */ 1330 old_mtu = dev->data->mtu; 1331 1332 /* fields must be zero to reserve them for future ABI changes */ 1333 if (dev_conf->rxmode.reserved_64s[0] != 0 || 1334 dev_conf->rxmode.reserved_64s[1] != 0 || 1335 dev_conf->rxmode.reserved_ptrs[0] != NULL || 1336 dev_conf->rxmode.reserved_ptrs[1] != NULL) { 1337 RTE_ETHDEV_LOG(ERR, "Rxmode reserved fields not zero\n"); 1338 ret = -EINVAL; 1339 goto rollback; 1340 } 1341 1342 if (dev_conf->txmode.reserved_64s[0] != 0 || 1343 dev_conf->txmode.reserved_64s[1] != 0 || 1344 dev_conf->txmode.reserved_ptrs[0] != NULL || 1345 dev_conf->txmode.reserved_ptrs[1] != NULL) { 1346 RTE_ETHDEV_LOG(ERR, "txmode reserved fields not zero\n"); 1347 ret = -EINVAL; 1348 goto rollback; 1349 } 1350 1351 ret = rte_eth_dev_info_get(port_id, &dev_info); 1352 if (ret != 0) 1353 goto rollback; 1354 1355 /* If number of queues specified by application for both Rx and Tx is 1356 * zero, use driver preferred values. This cannot be done individually 1357 * as it is valid for either Tx or Rx (but not both) to be zero. 1358 * If driver does not provide any preferred valued, fall back on 1359 * EAL defaults. 1360 */ 1361 if (nb_rx_q == 0 && nb_tx_q == 0) { 1362 nb_rx_q = dev_info.default_rxportconf.nb_queues; 1363 if (nb_rx_q == 0) 1364 nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES; 1365 nb_tx_q = dev_info.default_txportconf.nb_queues; 1366 if (nb_tx_q == 0) 1367 nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES; 1368 } 1369 1370 if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) { 1371 RTE_ETHDEV_LOG(ERR, 1372 "Number of Rx queues requested (%u) is greater than max supported(%d)\n", 1373 nb_rx_q, RTE_MAX_QUEUES_PER_PORT); 1374 ret = -EINVAL; 1375 goto rollback; 1376 } 1377 1378 if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) { 1379 RTE_ETHDEV_LOG(ERR, 1380 "Number of Tx queues requested (%u) is greater than max supported(%d)\n", 1381 nb_tx_q, RTE_MAX_QUEUES_PER_PORT); 1382 ret = -EINVAL; 1383 goto rollback; 1384 } 1385 1386 /* 1387 * Check that the numbers of Rx and Tx queues are not greater 1388 * than the maximum number of Rx and Tx queues supported by the 1389 * configured device. 1390 */ 1391 if (nb_rx_q > dev_info.max_rx_queues) { 1392 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n", 1393 port_id, nb_rx_q, dev_info.max_rx_queues); 1394 ret = -EINVAL; 1395 goto rollback; 1396 } 1397 1398 if (nb_tx_q > dev_info.max_tx_queues) { 1399 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n", 1400 port_id, nb_tx_q, dev_info.max_tx_queues); 1401 ret = -EINVAL; 1402 goto rollback; 1403 } 1404 1405 /* Check that the device supports requested interrupts */ 1406 if ((dev_conf->intr_conf.lsc == 1) && 1407 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) { 1408 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n", 1409 dev->device->driver->name); 1410 ret = -EINVAL; 1411 goto rollback; 1412 } 1413 if ((dev_conf->intr_conf.rmv == 1) && 1414 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) { 1415 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n", 1416 dev->device->driver->name); 1417 ret = -EINVAL; 1418 goto rollback; 1419 } 1420 1421 if (dev_conf->rxmode.mtu == 0) 1422 dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU; 1423 1424 ret = eth_dev_validate_mtu(port_id, &dev_info, 1425 dev->data->dev_conf.rxmode.mtu); 1426 if (ret != 0) 1427 goto rollback; 1428 1429 dev->data->mtu = dev->data->dev_conf.rxmode.mtu; 1430 1431 /* 1432 * If LRO is enabled, check that the maximum aggregated packet 1433 * size is supported by the configured device. 1434 */ 1435 if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) { 1436 uint32_t max_rx_pktlen; 1437 uint32_t overhead_len; 1438 1439 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 1440 dev_info.max_mtu); 1441 max_rx_pktlen = dev->data->dev_conf.rxmode.mtu + overhead_len; 1442 if (dev_conf->rxmode.max_lro_pkt_size == 0) 1443 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 1444 ret = eth_dev_check_lro_pkt_size(port_id, 1445 dev->data->dev_conf.rxmode.max_lro_pkt_size, 1446 max_rx_pktlen, 1447 dev_info.max_lro_pkt_size); 1448 if (ret != 0) 1449 goto rollback; 1450 } 1451 1452 /* Any requested offloading must be within its device capabilities */ 1453 if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) != 1454 dev_conf->rxmode.offloads) { 1455 char buffer[512]; 1456 1457 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u does not support Rx offloads %s\n", 1458 port_id, eth_dev_offload_names( 1459 dev_conf->rxmode.offloads & ~dev_info.rx_offload_capa, 1460 buffer, sizeof(buffer), rte_eth_dev_rx_offload_name)); 1461 RTE_ETHDEV_LOG(DEBUG, "Ethdev port_id=%u was requested Rx offloads %s\n", 1462 port_id, eth_dev_offload_names(dev_conf->rxmode.offloads, 1463 buffer, sizeof(buffer), rte_eth_dev_rx_offload_name)); 1464 RTE_ETHDEV_LOG(DEBUG, "Ethdev port_id=%u supports Rx offloads %s\n", 1465 port_id, eth_dev_offload_names(dev_info.rx_offload_capa, 1466 buffer, sizeof(buffer), rte_eth_dev_rx_offload_name)); 1467 1468 ret = -EINVAL; 1469 goto rollback; 1470 } 1471 if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) != 1472 dev_conf->txmode.offloads) { 1473 char buffer[512]; 1474 1475 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u does not support Tx offloads %s\n", 1476 port_id, eth_dev_offload_names( 1477 dev_conf->txmode.offloads & ~dev_info.tx_offload_capa, 1478 buffer, sizeof(buffer), rte_eth_dev_tx_offload_name)); 1479 RTE_ETHDEV_LOG(DEBUG, "Ethdev port_id=%u was requested Tx offloads %s\n", 1480 port_id, eth_dev_offload_names(dev_conf->txmode.offloads, 1481 buffer, sizeof(buffer), rte_eth_dev_tx_offload_name)); 1482 RTE_ETHDEV_LOG(DEBUG, "Ethdev port_id=%u supports Tx offloads %s\n", 1483 port_id, eth_dev_offload_names(dev_info.tx_offload_capa, 1484 buffer, sizeof(buffer), rte_eth_dev_tx_offload_name)); 1485 ret = -EINVAL; 1486 goto rollback; 1487 } 1488 1489 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = 1490 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf); 1491 1492 /* Check that device supports requested rss hash functions. */ 1493 if ((dev_info.flow_type_rss_offloads | 1494 dev_conf->rx_adv_conf.rss_conf.rss_hf) != 1495 dev_info.flow_type_rss_offloads) { 1496 RTE_ETHDEV_LOG(ERR, 1497 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 1498 port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf, 1499 dev_info.flow_type_rss_offloads); 1500 ret = -EINVAL; 1501 goto rollback; 1502 } 1503 1504 /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */ 1505 if (((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0) && 1506 (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) { 1507 RTE_ETHDEV_LOG(ERR, 1508 "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n", 1509 port_id, 1510 rte_eth_dev_rx_offload_name(RTE_ETH_RX_OFFLOAD_RSS_HASH)); 1511 ret = -EINVAL; 1512 goto rollback; 1513 } 1514 1515 if (dev_conf->rx_adv_conf.rss_conf.rss_key != NULL && 1516 dev_conf->rx_adv_conf.rss_conf.rss_key_len != dev_info.hash_key_size) { 1517 RTE_ETHDEV_LOG(ERR, 1518 "Ethdev port_id=%u invalid RSS key len: %u, valid value: %u\n", 1519 port_id, dev_conf->rx_adv_conf.rss_conf.rss_key_len, 1520 dev_info.hash_key_size); 1521 ret = -EINVAL; 1522 goto rollback; 1523 } 1524 1525 algorithm = dev_conf->rx_adv_conf.rss_conf.algorithm; 1526 if ((size_t)algorithm >= CHAR_BIT * sizeof(dev_info.rss_algo_capa) || 1527 (dev_info.rss_algo_capa & RTE_ETH_HASH_ALGO_TO_CAPA(algorithm)) == 0) { 1528 RTE_ETHDEV_LOG(ERR, 1529 "Ethdev port_id=%u configured RSS hash algorithm (%u)" 1530 "is not in the algorithm capability (0x%" PRIx32 ")\n", 1531 port_id, algorithm, dev_info.rss_algo_capa); 1532 ret = -EINVAL; 1533 goto rollback; 1534 } 1535 1536 /* 1537 * Setup new number of Rx/Tx queues and reconfigure device. 1538 */ 1539 diag = eth_dev_rx_queue_config(dev, nb_rx_q); 1540 if (diag != 0) { 1541 RTE_ETHDEV_LOG(ERR, 1542 "Port%u eth_dev_rx_queue_config = %d\n", 1543 port_id, diag); 1544 ret = diag; 1545 goto rollback; 1546 } 1547 1548 diag = eth_dev_tx_queue_config(dev, nb_tx_q); 1549 if (diag != 0) { 1550 RTE_ETHDEV_LOG(ERR, 1551 "Port%u eth_dev_tx_queue_config = %d\n", 1552 port_id, diag); 1553 eth_dev_rx_queue_config(dev, 0); 1554 ret = diag; 1555 goto rollback; 1556 } 1557 1558 diag = (*dev->dev_ops->dev_configure)(dev); 1559 if (diag != 0) { 1560 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n", 1561 port_id, diag); 1562 ret = eth_err(port_id, diag); 1563 goto reset_queues; 1564 } 1565 1566 /* Initialize Rx profiling if enabled at compilation time. */ 1567 diag = __rte_eth_dev_profile_init(port_id, dev); 1568 if (diag != 0) { 1569 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n", 1570 port_id, diag); 1571 ret = eth_err(port_id, diag); 1572 goto reset_queues; 1573 } 1574 1575 /* Validate Rx offloads. */ 1576 diag = eth_dev_validate_offloads(port_id, 1577 dev_conf->rxmode.offloads, 1578 dev->data->dev_conf.rxmode.offloads, "Rx", 1579 rte_eth_dev_rx_offload_name); 1580 if (diag != 0) { 1581 ret = diag; 1582 goto reset_queues; 1583 } 1584 1585 /* Validate Tx offloads. */ 1586 diag = eth_dev_validate_offloads(port_id, 1587 dev_conf->txmode.offloads, 1588 dev->data->dev_conf.txmode.offloads, "Tx", 1589 rte_eth_dev_tx_offload_name); 1590 if (diag != 0) { 1591 ret = diag; 1592 goto reset_queues; 1593 } 1594 1595 dev->data->dev_configured = 1; 1596 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0); 1597 return 0; 1598 reset_queues: 1599 eth_dev_rx_queue_config(dev, 0); 1600 eth_dev_tx_queue_config(dev, 0); 1601 rollback: 1602 memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf)); 1603 if (old_mtu != dev->data->mtu) 1604 dev->data->mtu = old_mtu; 1605 1606 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret); 1607 return ret; 1608 } 1609 1610 static void 1611 eth_dev_mac_restore(struct rte_eth_dev *dev, 1612 struct rte_eth_dev_info *dev_info) 1613 { 1614 struct rte_ether_addr *addr; 1615 uint16_t i; 1616 uint32_t pool = 0; 1617 uint64_t pool_mask; 1618 1619 /* replay MAC address configuration including default MAC */ 1620 addr = &dev->data->mac_addrs[0]; 1621 if (*dev->dev_ops->mac_addr_set != NULL) 1622 (*dev->dev_ops->mac_addr_set)(dev, addr); 1623 else if (*dev->dev_ops->mac_addr_add != NULL) 1624 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool); 1625 1626 if (*dev->dev_ops->mac_addr_add != NULL) { 1627 for (i = 1; i < dev_info->max_mac_addrs; i++) { 1628 addr = &dev->data->mac_addrs[i]; 1629 1630 /* skip zero address */ 1631 if (rte_is_zero_ether_addr(addr)) 1632 continue; 1633 1634 pool = 0; 1635 pool_mask = dev->data->mac_pool_sel[i]; 1636 1637 do { 1638 if (pool_mask & UINT64_C(1)) 1639 (*dev->dev_ops->mac_addr_add)(dev, 1640 addr, i, pool); 1641 pool_mask >>= 1; 1642 pool++; 1643 } while (pool_mask); 1644 } 1645 } 1646 } 1647 1648 static int 1649 eth_dev_config_restore(struct rte_eth_dev *dev, 1650 struct rte_eth_dev_info *dev_info, uint16_t port_id) 1651 { 1652 int ret; 1653 1654 if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)) 1655 eth_dev_mac_restore(dev, dev_info); 1656 1657 /* replay promiscuous configuration */ 1658 /* 1659 * use callbacks directly since we don't need port_id check and 1660 * would like to bypass the same value set 1661 */ 1662 if (rte_eth_promiscuous_get(port_id) == 1 && 1663 *dev->dev_ops->promiscuous_enable != NULL) { 1664 ret = eth_err(port_id, 1665 (*dev->dev_ops->promiscuous_enable)(dev)); 1666 if (ret != 0 && ret != -ENOTSUP) { 1667 RTE_ETHDEV_LOG(ERR, 1668 "Failed to enable promiscuous mode for device (port %u): %s\n", 1669 port_id, rte_strerror(-ret)); 1670 return ret; 1671 } 1672 } else if (rte_eth_promiscuous_get(port_id) == 0 && 1673 *dev->dev_ops->promiscuous_disable != NULL) { 1674 ret = eth_err(port_id, 1675 (*dev->dev_ops->promiscuous_disable)(dev)); 1676 if (ret != 0 && ret != -ENOTSUP) { 1677 RTE_ETHDEV_LOG(ERR, 1678 "Failed to disable promiscuous mode for device (port %u): %s\n", 1679 port_id, rte_strerror(-ret)); 1680 return ret; 1681 } 1682 } 1683 1684 /* replay all multicast configuration */ 1685 /* 1686 * use callbacks directly since we don't need port_id check and 1687 * would like to bypass the same value set 1688 */ 1689 if (rte_eth_allmulticast_get(port_id) == 1 && 1690 *dev->dev_ops->allmulticast_enable != NULL) { 1691 ret = eth_err(port_id, 1692 (*dev->dev_ops->allmulticast_enable)(dev)); 1693 if (ret != 0 && ret != -ENOTSUP) { 1694 RTE_ETHDEV_LOG(ERR, 1695 "Failed to enable allmulticast mode for device (port %u): %s\n", 1696 port_id, rte_strerror(-ret)); 1697 return ret; 1698 } 1699 } else if (rte_eth_allmulticast_get(port_id) == 0 && 1700 *dev->dev_ops->allmulticast_disable != NULL) { 1701 ret = eth_err(port_id, 1702 (*dev->dev_ops->allmulticast_disable)(dev)); 1703 if (ret != 0 && ret != -ENOTSUP) { 1704 RTE_ETHDEV_LOG(ERR, 1705 "Failed to disable allmulticast mode for device (port %u): %s\n", 1706 port_id, rte_strerror(-ret)); 1707 return ret; 1708 } 1709 } 1710 1711 return 0; 1712 } 1713 1714 int 1715 rte_eth_dev_start(uint16_t port_id) 1716 { 1717 struct rte_eth_dev *dev; 1718 struct rte_eth_dev_info dev_info; 1719 int diag; 1720 int ret, ret_stop; 1721 1722 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1723 dev = &rte_eth_devices[port_id]; 1724 1725 if (*dev->dev_ops->dev_start == NULL) 1726 return -ENOTSUP; 1727 1728 if (dev->data->dev_configured == 0) { 1729 RTE_ETHDEV_LOG(INFO, 1730 "Device with port_id=%"PRIu16" is not configured.\n", 1731 port_id); 1732 return -EINVAL; 1733 } 1734 1735 if (dev->data->dev_started != 0) { 1736 RTE_ETHDEV_LOG(INFO, 1737 "Device with port_id=%"PRIu16" already started\n", 1738 port_id); 1739 return 0; 1740 } 1741 1742 ret = rte_eth_dev_info_get(port_id, &dev_info); 1743 if (ret != 0) 1744 return ret; 1745 1746 /* Lets restore MAC now if device does not support live change */ 1747 if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR) 1748 eth_dev_mac_restore(dev, &dev_info); 1749 1750 diag = (*dev->dev_ops->dev_start)(dev); 1751 if (diag == 0) 1752 dev->data->dev_started = 1; 1753 else 1754 return eth_err(port_id, diag); 1755 1756 ret = eth_dev_config_restore(dev, &dev_info, port_id); 1757 if (ret != 0) { 1758 RTE_ETHDEV_LOG(ERR, 1759 "Error during restoring configuration for device (port %u): %s\n", 1760 port_id, rte_strerror(-ret)); 1761 ret_stop = rte_eth_dev_stop(port_id); 1762 if (ret_stop != 0) { 1763 RTE_ETHDEV_LOG(ERR, 1764 "Failed to stop device (port %u): %s\n", 1765 port_id, rte_strerror(-ret_stop)); 1766 } 1767 1768 return ret; 1769 } 1770 1771 if (dev->data->dev_conf.intr_conf.lsc == 0) { 1772 if (*dev->dev_ops->link_update == NULL) 1773 return -ENOTSUP; 1774 (*dev->dev_ops->link_update)(dev, 0); 1775 } 1776 1777 /* expose selection of PMD fast-path functions */ 1778 eth_dev_fp_ops_setup(rte_eth_fp_ops + port_id, dev); 1779 1780 rte_ethdev_trace_start(port_id); 1781 return 0; 1782 } 1783 1784 int 1785 rte_eth_dev_stop(uint16_t port_id) 1786 { 1787 struct rte_eth_dev *dev; 1788 int ret; 1789 1790 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1791 dev = &rte_eth_devices[port_id]; 1792 1793 if (*dev->dev_ops->dev_stop == NULL) 1794 return -ENOTSUP; 1795 1796 if (dev->data->dev_started == 0) { 1797 RTE_ETHDEV_LOG(INFO, 1798 "Device with port_id=%"PRIu16" already stopped\n", 1799 port_id); 1800 return 0; 1801 } 1802 1803 /* point fast-path functions to dummy ones */ 1804 eth_dev_fp_ops_reset(rte_eth_fp_ops + port_id); 1805 1806 ret = (*dev->dev_ops->dev_stop)(dev); 1807 if (ret == 0) 1808 dev->data->dev_started = 0; 1809 rte_ethdev_trace_stop(port_id, ret); 1810 1811 return ret; 1812 } 1813 1814 int 1815 rte_eth_dev_set_link_up(uint16_t port_id) 1816 { 1817 struct rte_eth_dev *dev; 1818 int ret; 1819 1820 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1821 dev = &rte_eth_devices[port_id]; 1822 1823 if (*dev->dev_ops->dev_set_link_up == NULL) 1824 return -ENOTSUP; 1825 ret = eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev)); 1826 1827 rte_ethdev_trace_set_link_up(port_id, ret); 1828 1829 return ret; 1830 } 1831 1832 int 1833 rte_eth_dev_set_link_down(uint16_t port_id) 1834 { 1835 struct rte_eth_dev *dev; 1836 int ret; 1837 1838 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1839 dev = &rte_eth_devices[port_id]; 1840 1841 if (*dev->dev_ops->dev_set_link_down == NULL) 1842 return -ENOTSUP; 1843 ret = eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev)); 1844 1845 rte_ethdev_trace_set_link_down(port_id, ret); 1846 1847 return ret; 1848 } 1849 1850 int 1851 rte_eth_dev_close(uint16_t port_id) 1852 { 1853 struct rte_eth_dev *dev; 1854 int firsterr, binerr; 1855 int *lasterr = &firsterr; 1856 1857 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1858 dev = &rte_eth_devices[port_id]; 1859 1860 /* 1861 * Secondary process needs to close device to release process private 1862 * resources. But secondary process should not be obliged to wait 1863 * for device stop before closing ethdev. 1864 */ 1865 if (rte_eal_process_type() == RTE_PROC_PRIMARY && 1866 dev->data->dev_started) { 1867 RTE_ETHDEV_LOG(ERR, "Cannot close started device (port %u)\n", 1868 port_id); 1869 return -EINVAL; 1870 } 1871 1872 if (*dev->dev_ops->dev_close == NULL) 1873 return -ENOTSUP; 1874 *lasterr = (*dev->dev_ops->dev_close)(dev); 1875 if (*lasterr != 0) 1876 lasterr = &binerr; 1877 1878 rte_ethdev_trace_close(port_id); 1879 *lasterr = rte_eth_dev_release_port(dev); 1880 1881 return firsterr; 1882 } 1883 1884 int 1885 rte_eth_dev_reset(uint16_t port_id) 1886 { 1887 struct rte_eth_dev *dev; 1888 int ret; 1889 1890 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1891 dev = &rte_eth_devices[port_id]; 1892 1893 if (*dev->dev_ops->dev_reset == NULL) 1894 return -ENOTSUP; 1895 1896 ret = rte_eth_dev_stop(port_id); 1897 if (ret != 0) { 1898 RTE_ETHDEV_LOG(ERR, 1899 "Failed to stop device (port %u) before reset: %s - ignore\n", 1900 port_id, rte_strerror(-ret)); 1901 } 1902 ret = eth_err(port_id, dev->dev_ops->dev_reset(dev)); 1903 1904 rte_ethdev_trace_reset(port_id, ret); 1905 1906 return ret; 1907 } 1908 1909 int 1910 rte_eth_dev_is_removed(uint16_t port_id) 1911 { 1912 struct rte_eth_dev *dev; 1913 int ret; 1914 1915 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0); 1916 dev = &rte_eth_devices[port_id]; 1917 1918 if (dev->state == RTE_ETH_DEV_REMOVED) 1919 return 1; 1920 1921 if (*dev->dev_ops->is_removed == NULL) 1922 return 0; 1923 1924 ret = dev->dev_ops->is_removed(dev); 1925 if (ret != 0) 1926 /* Device is physically removed. */ 1927 dev->state = RTE_ETH_DEV_REMOVED; 1928 1929 rte_ethdev_trace_is_removed(port_id, ret); 1930 1931 return ret; 1932 } 1933 1934 static int 1935 rte_eth_check_rx_mempool(struct rte_mempool *mp, uint16_t offset, 1936 uint16_t min_length) 1937 { 1938 uint16_t data_room_size; 1939 1940 /* 1941 * Check the size of the mbuf data buffer, this value 1942 * must be provided in the private data of the memory pool. 1943 * First check that the memory pool(s) has a valid private data. 1944 */ 1945 if (mp->private_data_size < 1946 sizeof(struct rte_pktmbuf_pool_private)) { 1947 RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n", 1948 mp->name, mp->private_data_size, 1949 (unsigned int) 1950 sizeof(struct rte_pktmbuf_pool_private)); 1951 return -ENOSPC; 1952 } 1953 data_room_size = rte_pktmbuf_data_room_size(mp); 1954 if (data_room_size < offset + min_length) { 1955 RTE_ETHDEV_LOG(ERR, 1956 "%s mbuf_data_room_size %u < %u (%u + %u)\n", 1957 mp->name, data_room_size, 1958 offset + min_length, offset, min_length); 1959 return -EINVAL; 1960 } 1961 return 0; 1962 } 1963 1964 static int 1965 eth_dev_buffer_split_get_supported_hdrs_helper(uint16_t port_id, uint32_t **ptypes) 1966 { 1967 int cnt; 1968 1969 cnt = rte_eth_buffer_split_get_supported_hdr_ptypes(port_id, NULL, 0); 1970 if (cnt <= 0) 1971 return cnt; 1972 1973 *ptypes = malloc(sizeof(uint32_t) * cnt); 1974 if (*ptypes == NULL) 1975 return -ENOMEM; 1976 1977 cnt = rte_eth_buffer_split_get_supported_hdr_ptypes(port_id, *ptypes, cnt); 1978 if (cnt <= 0) { 1979 free(*ptypes); 1980 *ptypes = NULL; 1981 } 1982 return cnt; 1983 } 1984 1985 static int 1986 rte_eth_rx_queue_check_split(uint16_t port_id, 1987 const struct rte_eth_rxseg_split *rx_seg, 1988 uint16_t n_seg, uint32_t *mbp_buf_size, 1989 const struct rte_eth_dev_info *dev_info) 1990 { 1991 const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa; 1992 struct rte_mempool *mp_first; 1993 uint32_t offset_mask; 1994 uint16_t seg_idx; 1995 int ret = 0; 1996 int ptype_cnt; 1997 uint32_t *ptypes; 1998 uint32_t prev_proto_hdrs = RTE_PTYPE_UNKNOWN; 1999 int i; 2000 2001 if (n_seg > seg_capa->max_nseg) { 2002 RTE_ETHDEV_LOG(ERR, 2003 "Requested Rx segments %u exceed supported %u\n", 2004 n_seg, seg_capa->max_nseg); 2005 return -EINVAL; 2006 } 2007 /* 2008 * Check the sizes and offsets against buffer sizes 2009 * for each segment specified in extended configuration. 2010 */ 2011 mp_first = rx_seg[0].mp; 2012 offset_mask = RTE_BIT32(seg_capa->offset_align_log2) - 1; 2013 2014 ptypes = NULL; 2015 ptype_cnt = eth_dev_buffer_split_get_supported_hdrs_helper(port_id, &ptypes); 2016 2017 for (seg_idx = 0; seg_idx < n_seg; seg_idx++) { 2018 struct rte_mempool *mpl = rx_seg[seg_idx].mp; 2019 uint32_t length = rx_seg[seg_idx].length; 2020 uint32_t offset = rx_seg[seg_idx].offset; 2021 uint32_t proto_hdr = rx_seg[seg_idx].proto_hdr; 2022 2023 if (mpl == NULL) { 2024 RTE_ETHDEV_LOG(ERR, "null mempool pointer\n"); 2025 ret = -EINVAL; 2026 goto out; 2027 } 2028 if (seg_idx != 0 && mp_first != mpl && 2029 seg_capa->multi_pools == 0) { 2030 RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n"); 2031 ret = -ENOTSUP; 2032 goto out; 2033 } 2034 if (offset != 0) { 2035 if (seg_capa->offset_allowed == 0) { 2036 RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n"); 2037 ret = -ENOTSUP; 2038 goto out; 2039 } 2040 if (offset & offset_mask) { 2041 RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n", 2042 offset, 2043 seg_capa->offset_align_log2); 2044 ret = -EINVAL; 2045 goto out; 2046 } 2047 } 2048 2049 offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM; 2050 *mbp_buf_size = rte_pktmbuf_data_room_size(mpl); 2051 if (proto_hdr != 0) { 2052 /* Split based on protocol headers. */ 2053 if (length != 0) { 2054 RTE_ETHDEV_LOG(ERR, 2055 "Do not set length split and protocol split within a segment\n" 2056 ); 2057 ret = -EINVAL; 2058 goto out; 2059 } 2060 if ((proto_hdr & prev_proto_hdrs) != 0) { 2061 RTE_ETHDEV_LOG(ERR, 2062 "Repeat with previous protocol headers or proto-split after length-based split\n" 2063 ); 2064 ret = -EINVAL; 2065 goto out; 2066 } 2067 if (ptype_cnt <= 0) { 2068 RTE_ETHDEV_LOG(ERR, 2069 "Port %u failed to get supported buffer split header protocols\n", 2070 port_id); 2071 ret = -ENOTSUP; 2072 goto out; 2073 } 2074 for (i = 0; i < ptype_cnt; i++) { 2075 if ((prev_proto_hdrs | proto_hdr) == ptypes[i]) 2076 break; 2077 } 2078 if (i == ptype_cnt) { 2079 RTE_ETHDEV_LOG(ERR, 2080 "Requested Rx split header protocols 0x%x is not supported.\n", 2081 proto_hdr); 2082 ret = -EINVAL; 2083 goto out; 2084 } 2085 prev_proto_hdrs |= proto_hdr; 2086 } else { 2087 /* Split at fixed length. */ 2088 length = length != 0 ? length : *mbp_buf_size; 2089 prev_proto_hdrs = RTE_PTYPE_ALL_MASK; 2090 } 2091 2092 ret = rte_eth_check_rx_mempool(mpl, offset, length); 2093 if (ret != 0) 2094 goto out; 2095 } 2096 out: 2097 free(ptypes); 2098 return ret; 2099 } 2100 2101 static int 2102 rte_eth_rx_queue_check_mempools(struct rte_mempool **rx_mempools, 2103 uint16_t n_mempools, uint32_t *min_buf_size, 2104 const struct rte_eth_dev_info *dev_info) 2105 { 2106 uint16_t pool_idx; 2107 int ret; 2108 2109 if (n_mempools > dev_info->max_rx_mempools) { 2110 RTE_ETHDEV_LOG(ERR, 2111 "Too many Rx mempools %u vs maximum %u\n", 2112 n_mempools, dev_info->max_rx_mempools); 2113 return -EINVAL; 2114 } 2115 2116 for (pool_idx = 0; pool_idx < n_mempools; pool_idx++) { 2117 struct rte_mempool *mp = rx_mempools[pool_idx]; 2118 2119 if (mp == NULL) { 2120 RTE_ETHDEV_LOG(ERR, "null Rx mempool pointer\n"); 2121 return -EINVAL; 2122 } 2123 2124 ret = rte_eth_check_rx_mempool(mp, RTE_PKTMBUF_HEADROOM, 2125 dev_info->min_rx_bufsize); 2126 if (ret != 0) 2127 return ret; 2128 2129 *min_buf_size = RTE_MIN(*min_buf_size, 2130 rte_pktmbuf_data_room_size(mp)); 2131 } 2132 2133 return 0; 2134 } 2135 2136 int 2137 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 2138 uint16_t nb_rx_desc, unsigned int socket_id, 2139 const struct rte_eth_rxconf *rx_conf, 2140 struct rte_mempool *mp) 2141 { 2142 int ret; 2143 uint64_t rx_offloads; 2144 uint32_t mbp_buf_size = UINT32_MAX; 2145 struct rte_eth_dev *dev; 2146 struct rte_eth_dev_info dev_info; 2147 struct rte_eth_rxconf local_conf; 2148 uint32_t buf_data_size; 2149 2150 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2151 dev = &rte_eth_devices[port_id]; 2152 2153 if (rx_queue_id >= dev->data->nb_rx_queues) { 2154 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id); 2155 return -EINVAL; 2156 } 2157 2158 if (*dev->dev_ops->rx_queue_setup == NULL) 2159 return -ENOTSUP; 2160 2161 if (rx_conf != NULL && 2162 (rx_conf->reserved_64s[0] != 0 || 2163 rx_conf->reserved_64s[1] != 0 || 2164 rx_conf->reserved_ptrs[0] != NULL || 2165 rx_conf->reserved_ptrs[1] != NULL)) { 2166 RTE_ETHDEV_LOG(ERR, "Rx conf reserved fields not zero\n"); 2167 return -EINVAL; 2168 } 2169 2170 ret = rte_eth_dev_info_get(port_id, &dev_info); 2171 if (ret != 0) 2172 return ret; 2173 2174 rx_offloads = dev->data->dev_conf.rxmode.offloads; 2175 if (rx_conf != NULL) 2176 rx_offloads |= rx_conf->offloads; 2177 2178 /* Ensure that we have one and only one source of Rx buffers */ 2179 if ((mp != NULL) + 2180 (rx_conf != NULL && rx_conf->rx_nseg > 0) + 2181 (rx_conf != NULL && rx_conf->rx_nmempool > 0) != 1) { 2182 RTE_ETHDEV_LOG(ERR, 2183 "Ambiguous Rx mempools configuration\n"); 2184 return -EINVAL; 2185 } 2186 2187 if (mp != NULL) { 2188 /* Single pool configuration check. */ 2189 ret = rte_eth_check_rx_mempool(mp, RTE_PKTMBUF_HEADROOM, 2190 dev_info.min_rx_bufsize); 2191 if (ret != 0) 2192 return ret; 2193 2194 mbp_buf_size = rte_pktmbuf_data_room_size(mp); 2195 buf_data_size = mbp_buf_size - RTE_PKTMBUF_HEADROOM; 2196 if (buf_data_size > dev_info.max_rx_bufsize) 2197 RTE_ETHDEV_LOG(DEBUG, 2198 "For port_id=%u, the mbuf data buffer size (%u) is bigger than " 2199 "max buffer size (%u) device can utilize, so mbuf size can be reduced.\n", 2200 port_id, buf_data_size, dev_info.max_rx_bufsize); 2201 } else if (rx_conf != NULL && rx_conf->rx_nseg > 0) { 2202 const struct rte_eth_rxseg_split *rx_seg; 2203 uint16_t n_seg; 2204 2205 /* Extended multi-segment configuration check. */ 2206 if (rx_conf->rx_seg == NULL) { 2207 RTE_ETHDEV_LOG(ERR, 2208 "Memory pool is null and no multi-segment configuration provided\n"); 2209 return -EINVAL; 2210 } 2211 2212 rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg; 2213 n_seg = rx_conf->rx_nseg; 2214 2215 if (rx_offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { 2216 ret = rte_eth_rx_queue_check_split(port_id, rx_seg, n_seg, 2217 &mbp_buf_size, 2218 &dev_info); 2219 if (ret != 0) 2220 return ret; 2221 } else { 2222 RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n"); 2223 return -EINVAL; 2224 } 2225 } else if (rx_conf != NULL && rx_conf->rx_nmempool > 0) { 2226 /* Extended multi-pool configuration check. */ 2227 if (rx_conf->rx_mempools == NULL) { 2228 RTE_ETHDEV_LOG(ERR, "Memory pools array is null\n"); 2229 return -EINVAL; 2230 } 2231 2232 ret = rte_eth_rx_queue_check_mempools(rx_conf->rx_mempools, 2233 rx_conf->rx_nmempool, 2234 &mbp_buf_size, 2235 &dev_info); 2236 if (ret != 0) 2237 return ret; 2238 } else { 2239 RTE_ETHDEV_LOG(ERR, "Missing Rx mempool configuration\n"); 2240 return -EINVAL; 2241 } 2242 2243 /* Use default specified by driver, if nb_rx_desc is zero */ 2244 if (nb_rx_desc == 0) { 2245 nb_rx_desc = dev_info.default_rxportconf.ring_size; 2246 /* If driver default is also zero, fall back on EAL default */ 2247 if (nb_rx_desc == 0) 2248 nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 2249 } 2250 2251 if (nb_rx_desc > dev_info.rx_desc_lim.nb_max || 2252 nb_rx_desc < dev_info.rx_desc_lim.nb_min || 2253 nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) { 2254 2255 RTE_ETHDEV_LOG(ERR, 2256 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2257 nb_rx_desc, dev_info.rx_desc_lim.nb_max, 2258 dev_info.rx_desc_lim.nb_min, 2259 dev_info.rx_desc_lim.nb_align); 2260 return -EINVAL; 2261 } 2262 2263 if (dev->data->dev_started && 2264 !(dev_info.dev_capa & 2265 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP)) 2266 return -EBUSY; 2267 2268 if (dev->data->dev_started && 2269 (dev->data->rx_queue_state[rx_queue_id] != 2270 RTE_ETH_QUEUE_STATE_STOPPED)) 2271 return -EBUSY; 2272 2273 eth_dev_rxq_release(dev, rx_queue_id); 2274 2275 if (rx_conf == NULL) 2276 rx_conf = &dev_info.default_rxconf; 2277 2278 local_conf = *rx_conf; 2279 2280 /* 2281 * If an offloading has already been enabled in 2282 * rte_eth_dev_configure(), it has been enabled on all queues, 2283 * so there is no need to enable it in this queue again. 2284 * The local_conf.offloads input to underlying PMD only carries 2285 * those offloadings which are only enabled on this queue and 2286 * not enabled on all queues. 2287 */ 2288 local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads; 2289 2290 /* 2291 * New added offloadings for this queue are those not enabled in 2292 * rte_eth_dev_configure() and they must be per-queue type. 2293 * A pure per-port offloading can't be enabled on a queue while 2294 * disabled on another queue. A pure per-port offloading can't 2295 * be enabled for any queue as new added one if it hasn't been 2296 * enabled in rte_eth_dev_configure(). 2297 */ 2298 if ((local_conf.offloads & dev_info.rx_queue_offload_capa) != 2299 local_conf.offloads) { 2300 RTE_ETHDEV_LOG(ERR, 2301 "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2302 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2303 port_id, rx_queue_id, local_conf.offloads, 2304 dev_info.rx_queue_offload_capa, 2305 __func__); 2306 return -EINVAL; 2307 } 2308 2309 if (local_conf.share_group > 0 && 2310 (dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) == 0) { 2311 RTE_ETHDEV_LOG(ERR, 2312 "Ethdev port_id=%d rx_queue_id=%d, enabled share_group=%hu while device doesn't support Rx queue share\n", 2313 port_id, rx_queue_id, local_conf.share_group); 2314 return -EINVAL; 2315 } 2316 2317 /* 2318 * If LRO is enabled, check that the maximum aggregated packet 2319 * size is supported by the configured device. 2320 */ 2321 /* Get the real Ethernet overhead length */ 2322 if (local_conf.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) { 2323 uint32_t overhead_len; 2324 uint32_t max_rx_pktlen; 2325 int ret; 2326 2327 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 2328 dev_info.max_mtu); 2329 max_rx_pktlen = dev->data->mtu + overhead_len; 2330 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0) 2331 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 2332 ret = eth_dev_check_lro_pkt_size(port_id, 2333 dev->data->dev_conf.rxmode.max_lro_pkt_size, 2334 max_rx_pktlen, 2335 dev_info.max_lro_pkt_size); 2336 if (ret != 0) 2337 return ret; 2338 } 2339 2340 ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc, 2341 socket_id, &local_conf, mp); 2342 if (!ret) { 2343 if (!dev->data->min_rx_buf_size || 2344 dev->data->min_rx_buf_size > mbp_buf_size) 2345 dev->data->min_rx_buf_size = mbp_buf_size; 2346 } 2347 2348 rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp, 2349 rx_conf, ret); 2350 return eth_err(port_id, ret); 2351 } 2352 2353 int 2354 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 2355 uint16_t nb_rx_desc, 2356 const struct rte_eth_hairpin_conf *conf) 2357 { 2358 int ret; 2359 struct rte_eth_dev *dev; 2360 struct rte_eth_hairpin_cap cap; 2361 int i; 2362 int count; 2363 2364 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2365 dev = &rte_eth_devices[port_id]; 2366 2367 if (rx_queue_id >= dev->data->nb_rx_queues) { 2368 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id); 2369 return -EINVAL; 2370 } 2371 2372 if (conf == NULL) { 2373 RTE_ETHDEV_LOG(ERR, 2374 "Cannot setup ethdev port %u Rx hairpin queue from NULL config\n", 2375 port_id); 2376 return -EINVAL; 2377 } 2378 2379 if (conf->reserved != 0) { 2380 RTE_ETHDEV_LOG(ERR, 2381 "Rx hairpin reserved field not zero\n"); 2382 return -EINVAL; 2383 } 2384 2385 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2386 if (ret != 0) 2387 return ret; 2388 if (*dev->dev_ops->rx_hairpin_queue_setup == NULL) 2389 return -ENOTSUP; 2390 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2391 if (nb_rx_desc == 0) 2392 nb_rx_desc = cap.max_nb_desc; 2393 if (nb_rx_desc > cap.max_nb_desc) { 2394 RTE_ETHDEV_LOG(ERR, 2395 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu", 2396 nb_rx_desc, cap.max_nb_desc); 2397 return -EINVAL; 2398 } 2399 if (conf->peer_count > cap.max_rx_2_tx) { 2400 RTE_ETHDEV_LOG(ERR, 2401 "Invalid value for number of peers for Rx queue(=%u), should be: <= %hu", 2402 conf->peer_count, cap.max_rx_2_tx); 2403 return -EINVAL; 2404 } 2405 if (conf->use_locked_device_memory && !cap.rx_cap.locked_device_memory) { 2406 RTE_ETHDEV_LOG(ERR, 2407 "Attempt to use locked device memory for Rx queue, which is not supported"); 2408 return -EINVAL; 2409 } 2410 if (conf->use_rte_memory && !cap.rx_cap.rte_memory) { 2411 RTE_ETHDEV_LOG(ERR, 2412 "Attempt to use DPDK memory for Rx queue, which is not supported"); 2413 return -EINVAL; 2414 } 2415 if (conf->use_locked_device_memory && conf->use_rte_memory) { 2416 RTE_ETHDEV_LOG(ERR, 2417 "Attempt to use mutually exclusive memory settings for Rx queue"); 2418 return -EINVAL; 2419 } 2420 if (conf->force_memory && 2421 !conf->use_locked_device_memory && 2422 !conf->use_rte_memory) { 2423 RTE_ETHDEV_LOG(ERR, 2424 "Attempt to force Rx queue memory settings, but none is set"); 2425 return -EINVAL; 2426 } 2427 if (conf->peer_count == 0) { 2428 RTE_ETHDEV_LOG(ERR, 2429 "Invalid value for number of peers for Rx queue(=%u), should be: > 0", 2430 conf->peer_count); 2431 return -EINVAL; 2432 } 2433 for (i = 0, count = 0; i < dev->data->nb_rx_queues && 2434 cap.max_nb_queues != UINT16_MAX; i++) { 2435 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i)) 2436 count++; 2437 } 2438 if (count > cap.max_nb_queues) { 2439 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d", 2440 cap.max_nb_queues); 2441 return -EINVAL; 2442 } 2443 if (dev->data->dev_started) 2444 return -EBUSY; 2445 eth_dev_rxq_release(dev, rx_queue_id); 2446 ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id, 2447 nb_rx_desc, conf); 2448 if (ret == 0) 2449 dev->data->rx_queue_state[rx_queue_id] = 2450 RTE_ETH_QUEUE_STATE_HAIRPIN; 2451 ret = eth_err(port_id, ret); 2452 2453 rte_eth_trace_rx_hairpin_queue_setup(port_id, rx_queue_id, nb_rx_desc, 2454 conf, ret); 2455 2456 return ret; 2457 } 2458 2459 int 2460 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2461 uint16_t nb_tx_desc, unsigned int socket_id, 2462 const struct rte_eth_txconf *tx_conf) 2463 { 2464 struct rte_eth_dev *dev; 2465 struct rte_eth_dev_info dev_info; 2466 struct rte_eth_txconf local_conf; 2467 int ret; 2468 2469 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2470 dev = &rte_eth_devices[port_id]; 2471 2472 if (tx_queue_id >= dev->data->nb_tx_queues) { 2473 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id); 2474 return -EINVAL; 2475 } 2476 2477 if (*dev->dev_ops->tx_queue_setup == NULL) 2478 return -ENOTSUP; 2479 2480 if (tx_conf != NULL && 2481 (tx_conf->reserved_64s[0] != 0 || 2482 tx_conf->reserved_64s[1] != 0 || 2483 tx_conf->reserved_ptrs[0] != NULL || 2484 tx_conf->reserved_ptrs[1] != NULL)) { 2485 RTE_ETHDEV_LOG(ERR, "Tx conf reserved fields not zero\n"); 2486 return -EINVAL; 2487 } 2488 2489 ret = rte_eth_dev_info_get(port_id, &dev_info); 2490 if (ret != 0) 2491 return ret; 2492 2493 /* Use default specified by driver, if nb_tx_desc is zero */ 2494 if (nb_tx_desc == 0) { 2495 nb_tx_desc = dev_info.default_txportconf.ring_size; 2496 /* If driver default is zero, fall back on EAL default */ 2497 if (nb_tx_desc == 0) 2498 nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 2499 } 2500 if (nb_tx_desc > dev_info.tx_desc_lim.nb_max || 2501 nb_tx_desc < dev_info.tx_desc_lim.nb_min || 2502 nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) { 2503 RTE_ETHDEV_LOG(ERR, 2504 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2505 nb_tx_desc, dev_info.tx_desc_lim.nb_max, 2506 dev_info.tx_desc_lim.nb_min, 2507 dev_info.tx_desc_lim.nb_align); 2508 return -EINVAL; 2509 } 2510 2511 if (dev->data->dev_started && 2512 !(dev_info.dev_capa & 2513 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP)) 2514 return -EBUSY; 2515 2516 if (dev->data->dev_started && 2517 (dev->data->tx_queue_state[tx_queue_id] != 2518 RTE_ETH_QUEUE_STATE_STOPPED)) 2519 return -EBUSY; 2520 2521 eth_dev_txq_release(dev, tx_queue_id); 2522 2523 if (tx_conf == NULL) 2524 tx_conf = &dev_info.default_txconf; 2525 2526 local_conf = *tx_conf; 2527 2528 /* 2529 * If an offloading has already been enabled in 2530 * rte_eth_dev_configure(), it has been enabled on all queues, 2531 * so there is no need to enable it in this queue again. 2532 * The local_conf.offloads input to underlying PMD only carries 2533 * those offloadings which are only enabled on this queue and 2534 * not enabled on all queues. 2535 */ 2536 local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads; 2537 2538 /* 2539 * New added offloadings for this queue are those not enabled in 2540 * rte_eth_dev_configure() and they must be per-queue type. 2541 * A pure per-port offloading can't be enabled on a queue while 2542 * disabled on another queue. A pure per-port offloading can't 2543 * be enabled for any queue as new added one if it hasn't been 2544 * enabled in rte_eth_dev_configure(). 2545 */ 2546 if ((local_conf.offloads & dev_info.tx_queue_offload_capa) != 2547 local_conf.offloads) { 2548 RTE_ETHDEV_LOG(ERR, 2549 "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2550 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2551 port_id, tx_queue_id, local_conf.offloads, 2552 dev_info.tx_queue_offload_capa, 2553 __func__); 2554 return -EINVAL; 2555 } 2556 2557 rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf); 2558 return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev, 2559 tx_queue_id, nb_tx_desc, socket_id, &local_conf)); 2560 } 2561 2562 int 2563 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2564 uint16_t nb_tx_desc, 2565 const struct rte_eth_hairpin_conf *conf) 2566 { 2567 struct rte_eth_dev *dev; 2568 struct rte_eth_hairpin_cap cap; 2569 int i; 2570 int count; 2571 int ret; 2572 2573 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2574 dev = &rte_eth_devices[port_id]; 2575 2576 if (tx_queue_id >= dev->data->nb_tx_queues) { 2577 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id); 2578 return -EINVAL; 2579 } 2580 2581 if (conf == NULL) { 2582 RTE_ETHDEV_LOG(ERR, 2583 "Cannot setup ethdev port %u Tx hairpin queue from NULL config\n", 2584 port_id); 2585 return -EINVAL; 2586 } 2587 2588 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2589 if (ret != 0) 2590 return ret; 2591 if (*dev->dev_ops->tx_hairpin_queue_setup == NULL) 2592 return -ENOTSUP; 2593 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2594 if (nb_tx_desc == 0) 2595 nb_tx_desc = cap.max_nb_desc; 2596 if (nb_tx_desc > cap.max_nb_desc) { 2597 RTE_ETHDEV_LOG(ERR, 2598 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu", 2599 nb_tx_desc, cap.max_nb_desc); 2600 return -EINVAL; 2601 } 2602 if (conf->peer_count > cap.max_tx_2_rx) { 2603 RTE_ETHDEV_LOG(ERR, 2604 "Invalid value for number of peers for Tx queue(=%u), should be: <= %hu", 2605 conf->peer_count, cap.max_tx_2_rx); 2606 return -EINVAL; 2607 } 2608 if (conf->use_locked_device_memory && !cap.tx_cap.locked_device_memory) { 2609 RTE_ETHDEV_LOG(ERR, 2610 "Attempt to use locked device memory for Tx queue, which is not supported"); 2611 return -EINVAL; 2612 } 2613 if (conf->use_rte_memory && !cap.tx_cap.rte_memory) { 2614 RTE_ETHDEV_LOG(ERR, 2615 "Attempt to use DPDK memory for Tx queue, which is not supported"); 2616 return -EINVAL; 2617 } 2618 if (conf->use_locked_device_memory && conf->use_rte_memory) { 2619 RTE_ETHDEV_LOG(ERR, 2620 "Attempt to use mutually exclusive memory settings for Tx queue"); 2621 return -EINVAL; 2622 } 2623 if (conf->force_memory && 2624 !conf->use_locked_device_memory && 2625 !conf->use_rte_memory) { 2626 RTE_ETHDEV_LOG(ERR, 2627 "Attempt to force Tx queue memory settings, but none is set"); 2628 return -EINVAL; 2629 } 2630 if (conf->peer_count == 0) { 2631 RTE_ETHDEV_LOG(ERR, 2632 "Invalid value for number of peers for Tx queue(=%u), should be: > 0", 2633 conf->peer_count); 2634 return -EINVAL; 2635 } 2636 for (i = 0, count = 0; i < dev->data->nb_tx_queues && 2637 cap.max_nb_queues != UINT16_MAX; i++) { 2638 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i)) 2639 count++; 2640 } 2641 if (count > cap.max_nb_queues) { 2642 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d", 2643 cap.max_nb_queues); 2644 return -EINVAL; 2645 } 2646 if (dev->data->dev_started) 2647 return -EBUSY; 2648 eth_dev_txq_release(dev, tx_queue_id); 2649 ret = (*dev->dev_ops->tx_hairpin_queue_setup) 2650 (dev, tx_queue_id, nb_tx_desc, conf); 2651 if (ret == 0) 2652 dev->data->tx_queue_state[tx_queue_id] = 2653 RTE_ETH_QUEUE_STATE_HAIRPIN; 2654 ret = eth_err(port_id, ret); 2655 2656 rte_eth_trace_tx_hairpin_queue_setup(port_id, tx_queue_id, nb_tx_desc, 2657 conf, ret); 2658 2659 return ret; 2660 } 2661 2662 int 2663 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port) 2664 { 2665 struct rte_eth_dev *dev; 2666 int ret; 2667 2668 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2669 dev = &rte_eth_devices[tx_port]; 2670 2671 if (dev->data->dev_started == 0) { 2672 RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port); 2673 return -EBUSY; 2674 } 2675 2676 if (*dev->dev_ops->hairpin_bind == NULL) 2677 return -ENOTSUP; 2678 ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port); 2679 if (ret != 0) 2680 RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d" 2681 " to Rx %d (%d - all ports)\n", 2682 tx_port, rx_port, RTE_MAX_ETHPORTS); 2683 2684 rte_eth_trace_hairpin_bind(tx_port, rx_port, ret); 2685 2686 return ret; 2687 } 2688 2689 int 2690 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port) 2691 { 2692 struct rte_eth_dev *dev; 2693 int ret; 2694 2695 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2696 dev = &rte_eth_devices[tx_port]; 2697 2698 if (dev->data->dev_started == 0) { 2699 RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port); 2700 return -EBUSY; 2701 } 2702 2703 if (*dev->dev_ops->hairpin_unbind == NULL) 2704 return -ENOTSUP; 2705 ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port); 2706 if (ret != 0) 2707 RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d" 2708 " from Rx %d (%d - all ports)\n", 2709 tx_port, rx_port, RTE_MAX_ETHPORTS); 2710 2711 rte_eth_trace_hairpin_unbind(tx_port, rx_port, ret); 2712 2713 return ret; 2714 } 2715 2716 int 2717 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, 2718 size_t len, uint32_t direction) 2719 { 2720 struct rte_eth_dev *dev; 2721 int ret; 2722 2723 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2724 dev = &rte_eth_devices[port_id]; 2725 2726 if (peer_ports == NULL) { 2727 RTE_ETHDEV_LOG(ERR, 2728 "Cannot get ethdev port %u hairpin peer ports to NULL\n", 2729 port_id); 2730 return -EINVAL; 2731 } 2732 2733 if (len == 0) { 2734 RTE_ETHDEV_LOG(ERR, 2735 "Cannot get ethdev port %u hairpin peer ports to array with zero size\n", 2736 port_id); 2737 return -EINVAL; 2738 } 2739 2740 if (*dev->dev_ops->hairpin_get_peer_ports == NULL) 2741 return -ENOTSUP; 2742 2743 ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports, 2744 len, direction); 2745 if (ret < 0) 2746 RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n", 2747 port_id, direction ? "Rx" : "Tx"); 2748 2749 rte_eth_trace_hairpin_get_peer_ports(port_id, peer_ports, len, 2750 direction, ret); 2751 2752 return ret; 2753 } 2754 2755 void 2756 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, 2757 void *userdata __rte_unused) 2758 { 2759 rte_pktmbuf_free_bulk(pkts, unsent); 2760 2761 rte_eth_trace_tx_buffer_drop_callback((void **)pkts, unsent); 2762 } 2763 2764 void 2765 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, 2766 void *userdata) 2767 { 2768 uint64_t *count = userdata; 2769 2770 rte_pktmbuf_free_bulk(pkts, unsent); 2771 *count += unsent; 2772 2773 rte_eth_trace_tx_buffer_count_callback((void **)pkts, unsent, *count); 2774 } 2775 2776 int 2777 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, 2778 buffer_tx_error_fn cbfn, void *userdata) 2779 { 2780 if (buffer == NULL) { 2781 RTE_ETHDEV_LOG(ERR, 2782 "Cannot set Tx buffer error callback to NULL buffer\n"); 2783 return -EINVAL; 2784 } 2785 2786 buffer->error_callback = cbfn; 2787 buffer->error_userdata = userdata; 2788 2789 rte_eth_trace_tx_buffer_set_err_callback(buffer); 2790 2791 return 0; 2792 } 2793 2794 int 2795 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size) 2796 { 2797 int ret = 0; 2798 2799 if (buffer == NULL) { 2800 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL buffer\n"); 2801 return -EINVAL; 2802 } 2803 2804 buffer->size = size; 2805 if (buffer->error_callback == NULL) { 2806 ret = rte_eth_tx_buffer_set_err_callback( 2807 buffer, rte_eth_tx_buffer_drop_callback, NULL); 2808 } 2809 2810 rte_eth_trace_tx_buffer_init(buffer, size, ret); 2811 2812 return ret; 2813 } 2814 2815 int 2816 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt) 2817 { 2818 struct rte_eth_dev *dev; 2819 int ret; 2820 2821 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2822 dev = &rte_eth_devices[port_id]; 2823 2824 if (*dev->dev_ops->tx_done_cleanup == NULL) 2825 return -ENOTSUP; 2826 2827 /* Call driver to free pending mbufs. */ 2828 ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id], 2829 free_cnt); 2830 ret = eth_err(port_id, ret); 2831 2832 rte_eth_trace_tx_done_cleanup(port_id, queue_id, free_cnt, ret); 2833 2834 return ret; 2835 } 2836 2837 int 2838 rte_eth_promiscuous_enable(uint16_t port_id) 2839 { 2840 struct rte_eth_dev *dev; 2841 int diag = 0; 2842 2843 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2844 dev = &rte_eth_devices[port_id]; 2845 2846 if (dev->data->promiscuous == 1) 2847 return 0; 2848 2849 if (*dev->dev_ops->promiscuous_enable == NULL) 2850 return -ENOTSUP; 2851 2852 diag = (*dev->dev_ops->promiscuous_enable)(dev); 2853 dev->data->promiscuous = (diag == 0) ? 1 : 0; 2854 2855 diag = eth_err(port_id, diag); 2856 2857 rte_eth_trace_promiscuous_enable(port_id, dev->data->promiscuous, 2858 diag); 2859 2860 return diag; 2861 } 2862 2863 int 2864 rte_eth_promiscuous_disable(uint16_t port_id) 2865 { 2866 struct rte_eth_dev *dev; 2867 int diag = 0; 2868 2869 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2870 dev = &rte_eth_devices[port_id]; 2871 2872 if (dev->data->promiscuous == 0) 2873 return 0; 2874 2875 if (*dev->dev_ops->promiscuous_disable == NULL) 2876 return -ENOTSUP; 2877 2878 dev->data->promiscuous = 0; 2879 diag = (*dev->dev_ops->promiscuous_disable)(dev); 2880 if (diag != 0) 2881 dev->data->promiscuous = 1; 2882 2883 diag = eth_err(port_id, diag); 2884 2885 rte_eth_trace_promiscuous_disable(port_id, dev->data->promiscuous, 2886 diag); 2887 2888 return diag; 2889 } 2890 2891 int 2892 rte_eth_promiscuous_get(uint16_t port_id) 2893 { 2894 struct rte_eth_dev *dev; 2895 2896 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2897 dev = &rte_eth_devices[port_id]; 2898 2899 rte_eth_trace_promiscuous_get(port_id, dev->data->promiscuous); 2900 2901 return dev->data->promiscuous; 2902 } 2903 2904 int 2905 rte_eth_allmulticast_enable(uint16_t port_id) 2906 { 2907 struct rte_eth_dev *dev; 2908 int diag; 2909 2910 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2911 dev = &rte_eth_devices[port_id]; 2912 2913 if (dev->data->all_multicast == 1) 2914 return 0; 2915 2916 if (*dev->dev_ops->allmulticast_enable == NULL) 2917 return -ENOTSUP; 2918 diag = (*dev->dev_ops->allmulticast_enable)(dev); 2919 dev->data->all_multicast = (diag == 0) ? 1 : 0; 2920 2921 diag = eth_err(port_id, diag); 2922 2923 rte_eth_trace_allmulticast_enable(port_id, dev->data->all_multicast, 2924 diag); 2925 2926 return diag; 2927 } 2928 2929 int 2930 rte_eth_allmulticast_disable(uint16_t port_id) 2931 { 2932 struct rte_eth_dev *dev; 2933 int diag; 2934 2935 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2936 dev = &rte_eth_devices[port_id]; 2937 2938 if (dev->data->all_multicast == 0) 2939 return 0; 2940 2941 if (*dev->dev_ops->allmulticast_disable == NULL) 2942 return -ENOTSUP; 2943 dev->data->all_multicast = 0; 2944 diag = (*dev->dev_ops->allmulticast_disable)(dev); 2945 if (diag != 0) 2946 dev->data->all_multicast = 1; 2947 2948 diag = eth_err(port_id, diag); 2949 2950 rte_eth_trace_allmulticast_disable(port_id, dev->data->all_multicast, 2951 diag); 2952 2953 return diag; 2954 } 2955 2956 int 2957 rte_eth_allmulticast_get(uint16_t port_id) 2958 { 2959 struct rte_eth_dev *dev; 2960 2961 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2962 dev = &rte_eth_devices[port_id]; 2963 2964 rte_eth_trace_allmulticast_get(port_id, dev->data->all_multicast); 2965 2966 return dev->data->all_multicast; 2967 } 2968 2969 int 2970 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link) 2971 { 2972 struct rte_eth_dev *dev; 2973 2974 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2975 dev = &rte_eth_devices[port_id]; 2976 2977 if (eth_link == NULL) { 2978 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2979 port_id); 2980 return -EINVAL; 2981 } 2982 2983 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2984 rte_eth_linkstatus_get(dev, eth_link); 2985 else { 2986 if (*dev->dev_ops->link_update == NULL) 2987 return -ENOTSUP; 2988 (*dev->dev_ops->link_update)(dev, 1); 2989 *eth_link = dev->data->dev_link; 2990 } 2991 2992 rte_eth_trace_link_get(port_id, eth_link); 2993 2994 return 0; 2995 } 2996 2997 int 2998 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link) 2999 { 3000 struct rte_eth_dev *dev; 3001 3002 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3003 dev = &rte_eth_devices[port_id]; 3004 3005 if (eth_link == NULL) { 3006 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 3007 port_id); 3008 return -EINVAL; 3009 } 3010 3011 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 3012 rte_eth_linkstatus_get(dev, eth_link); 3013 else { 3014 if (*dev->dev_ops->link_update == NULL) 3015 return -ENOTSUP; 3016 (*dev->dev_ops->link_update)(dev, 0); 3017 *eth_link = dev->data->dev_link; 3018 } 3019 3020 rte_eth_trace_link_get_nowait(port_id, eth_link); 3021 3022 return 0; 3023 } 3024 3025 const char * 3026 rte_eth_link_speed_to_str(uint32_t link_speed) 3027 { 3028 const char *ret; 3029 3030 switch (link_speed) { 3031 case RTE_ETH_SPEED_NUM_NONE: 3032 ret = "None"; 3033 break; 3034 case RTE_ETH_SPEED_NUM_10M: 3035 ret = "10 Mbps"; 3036 break; 3037 case RTE_ETH_SPEED_NUM_100M: 3038 ret = "100 Mbps"; 3039 break; 3040 case RTE_ETH_SPEED_NUM_1G: 3041 ret = "1 Gbps"; 3042 break; 3043 case RTE_ETH_SPEED_NUM_2_5G: 3044 ret = "2.5 Gbps"; 3045 break; 3046 case RTE_ETH_SPEED_NUM_5G: 3047 ret = "5 Gbps"; 3048 break; 3049 case RTE_ETH_SPEED_NUM_10G: 3050 ret = "10 Gbps"; 3051 break; 3052 case RTE_ETH_SPEED_NUM_20G: 3053 ret = "20 Gbps"; 3054 break; 3055 case RTE_ETH_SPEED_NUM_25G: 3056 ret = "25 Gbps"; 3057 break; 3058 case RTE_ETH_SPEED_NUM_40G: 3059 ret = "40 Gbps"; 3060 break; 3061 case RTE_ETH_SPEED_NUM_50G: 3062 ret = "50 Gbps"; 3063 break; 3064 case RTE_ETH_SPEED_NUM_56G: 3065 ret = "56 Gbps"; 3066 break; 3067 case RTE_ETH_SPEED_NUM_100G: 3068 ret = "100 Gbps"; 3069 break; 3070 case RTE_ETH_SPEED_NUM_200G: 3071 ret = "200 Gbps"; 3072 break; 3073 case RTE_ETH_SPEED_NUM_400G: 3074 ret = "400 Gbps"; 3075 break; 3076 case RTE_ETH_SPEED_NUM_UNKNOWN: 3077 ret = "Unknown"; 3078 break; 3079 default: 3080 ret = "Invalid"; 3081 } 3082 3083 rte_eth_trace_link_speed_to_str(link_speed, ret); 3084 3085 return ret; 3086 } 3087 3088 int 3089 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link) 3090 { 3091 int ret; 3092 3093 if (str == NULL) { 3094 RTE_ETHDEV_LOG(ERR, "Cannot convert link to NULL string\n"); 3095 return -EINVAL; 3096 } 3097 3098 if (len == 0) { 3099 RTE_ETHDEV_LOG(ERR, 3100 "Cannot convert link to string with zero size\n"); 3101 return -EINVAL; 3102 } 3103 3104 if (eth_link == NULL) { 3105 RTE_ETHDEV_LOG(ERR, "Cannot convert to string from NULL link\n"); 3106 return -EINVAL; 3107 } 3108 3109 if (eth_link->link_status == RTE_ETH_LINK_DOWN) 3110 ret = snprintf(str, len, "Link down"); 3111 else 3112 ret = snprintf(str, len, "Link up at %s %s %s", 3113 rte_eth_link_speed_to_str(eth_link->link_speed), 3114 (eth_link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 3115 "FDX" : "HDX", 3116 (eth_link->link_autoneg == RTE_ETH_LINK_AUTONEG) ? 3117 "Autoneg" : "Fixed"); 3118 3119 rte_eth_trace_link_to_str(len, eth_link, str, ret); 3120 3121 return ret; 3122 } 3123 3124 int 3125 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats) 3126 { 3127 struct rte_eth_dev *dev; 3128 int ret; 3129 3130 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3131 dev = &rte_eth_devices[port_id]; 3132 3133 if (stats == NULL) { 3134 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u stats to NULL\n", 3135 port_id); 3136 return -EINVAL; 3137 } 3138 3139 memset(stats, 0, sizeof(*stats)); 3140 3141 if (*dev->dev_ops->stats_get == NULL) 3142 return -ENOTSUP; 3143 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed; 3144 ret = eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats)); 3145 3146 rte_eth_trace_stats_get(port_id, stats, ret); 3147 3148 return ret; 3149 } 3150 3151 int 3152 rte_eth_stats_reset(uint16_t port_id) 3153 { 3154 struct rte_eth_dev *dev; 3155 int ret; 3156 3157 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3158 dev = &rte_eth_devices[port_id]; 3159 3160 if (*dev->dev_ops->stats_reset == NULL) 3161 return -ENOTSUP; 3162 ret = (*dev->dev_ops->stats_reset)(dev); 3163 if (ret != 0) 3164 return eth_err(port_id, ret); 3165 3166 dev->data->rx_mbuf_alloc_failed = 0; 3167 3168 rte_eth_trace_stats_reset(port_id); 3169 3170 return 0; 3171 } 3172 3173 static inline int 3174 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev) 3175 { 3176 uint16_t nb_rxqs, nb_txqs; 3177 int count; 3178 3179 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3180 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3181 3182 count = RTE_NB_STATS; 3183 if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) { 3184 count += nb_rxqs * RTE_NB_RXQ_STATS; 3185 count += nb_txqs * RTE_NB_TXQ_STATS; 3186 } 3187 3188 return count; 3189 } 3190 3191 static int 3192 eth_dev_get_xstats_count(uint16_t port_id) 3193 { 3194 struct rte_eth_dev *dev; 3195 int count; 3196 3197 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3198 dev = &rte_eth_devices[port_id]; 3199 if (dev->dev_ops->xstats_get_names != NULL) { 3200 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0); 3201 if (count < 0) 3202 return eth_err(port_id, count); 3203 } else 3204 count = 0; 3205 3206 3207 count += eth_dev_get_xstats_basic_count(dev); 3208 3209 return count; 3210 } 3211 3212 int 3213 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, 3214 uint64_t *id) 3215 { 3216 int cnt_xstats, idx_xstat; 3217 3218 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3219 3220 if (xstat_name == NULL) { 3221 RTE_ETHDEV_LOG(ERR, 3222 "Cannot get ethdev port %u xstats ID from NULL xstat name\n", 3223 port_id); 3224 return -ENOMEM; 3225 } 3226 3227 if (id == NULL) { 3228 RTE_ETHDEV_LOG(ERR, 3229 "Cannot get ethdev port %u xstats ID to NULL\n", 3230 port_id); 3231 return -ENOMEM; 3232 } 3233 3234 /* Get count */ 3235 cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL); 3236 if (cnt_xstats < 0) { 3237 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n"); 3238 return -ENODEV; 3239 } 3240 3241 /* Get id-name lookup table */ 3242 struct rte_eth_xstat_name xstats_names[cnt_xstats]; 3243 3244 if (cnt_xstats != rte_eth_xstats_get_names_by_id( 3245 port_id, xstats_names, cnt_xstats, NULL)) { 3246 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n"); 3247 return -1; 3248 } 3249 3250 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 3251 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) { 3252 *id = idx_xstat; 3253 3254 rte_eth_trace_xstats_get_id_by_name(port_id, 3255 xstat_name, *id); 3256 3257 return 0; 3258 }; 3259 } 3260 3261 return -EINVAL; 3262 } 3263 3264 /* retrieve basic stats names */ 3265 static int 3266 eth_basic_stats_get_names(struct rte_eth_dev *dev, 3267 struct rte_eth_xstat_name *xstats_names) 3268 { 3269 int cnt_used_entries = 0; 3270 uint32_t idx, id_queue; 3271 uint16_t num_q; 3272 3273 for (idx = 0; idx < RTE_NB_STATS; idx++) { 3274 strlcpy(xstats_names[cnt_used_entries].name, 3275 eth_dev_stats_strings[idx].name, 3276 sizeof(xstats_names[0].name)); 3277 cnt_used_entries++; 3278 } 3279 3280 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 3281 return cnt_used_entries; 3282 3283 num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3284 for (id_queue = 0; id_queue < num_q; id_queue++) { 3285 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) { 3286 snprintf(xstats_names[cnt_used_entries].name, 3287 sizeof(xstats_names[0].name), 3288 "rx_q%u_%s", 3289 id_queue, eth_dev_rxq_stats_strings[idx].name); 3290 cnt_used_entries++; 3291 } 3292 3293 } 3294 num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3295 for (id_queue = 0; id_queue < num_q; id_queue++) { 3296 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) { 3297 snprintf(xstats_names[cnt_used_entries].name, 3298 sizeof(xstats_names[0].name), 3299 "tx_q%u_%s", 3300 id_queue, eth_dev_txq_stats_strings[idx].name); 3301 cnt_used_entries++; 3302 } 3303 } 3304 return cnt_used_entries; 3305 } 3306 3307 /* retrieve ethdev extended statistics names */ 3308 int 3309 rte_eth_xstats_get_names_by_id(uint16_t port_id, 3310 struct rte_eth_xstat_name *xstats_names, unsigned int size, 3311 uint64_t *ids) 3312 { 3313 struct rte_eth_xstat_name *xstats_names_copy; 3314 unsigned int no_basic_stat_requested = 1; 3315 unsigned int no_ext_stat_requested = 1; 3316 unsigned int expected_entries; 3317 unsigned int basic_count; 3318 struct rte_eth_dev *dev; 3319 unsigned int i; 3320 int ret; 3321 3322 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3323 dev = &rte_eth_devices[port_id]; 3324 3325 basic_count = eth_dev_get_xstats_basic_count(dev); 3326 ret = eth_dev_get_xstats_count(port_id); 3327 if (ret < 0) 3328 return ret; 3329 expected_entries = (unsigned int)ret; 3330 3331 /* Return max number of stats if no ids given */ 3332 if (!ids) { 3333 if (!xstats_names) 3334 return expected_entries; 3335 else if (xstats_names && size < expected_entries) 3336 return expected_entries; 3337 } 3338 3339 if (ids && !xstats_names) 3340 return -EINVAL; 3341 3342 if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) { 3343 uint64_t ids_copy[size]; 3344 3345 for (i = 0; i < size; i++) { 3346 if (ids[i] < basic_count) { 3347 no_basic_stat_requested = 0; 3348 break; 3349 } 3350 3351 /* 3352 * Convert ids to xstats ids that PMD knows. 3353 * ids known by user are basic + extended stats. 3354 */ 3355 ids_copy[i] = ids[i] - basic_count; 3356 } 3357 3358 if (no_basic_stat_requested) 3359 return (*dev->dev_ops->xstats_get_names_by_id)(dev, 3360 ids_copy, xstats_names, size); 3361 } 3362 3363 /* Retrieve all stats */ 3364 if (!ids) { 3365 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names, 3366 expected_entries); 3367 if (num_stats < 0 || num_stats > (int)expected_entries) 3368 return num_stats; 3369 else 3370 return expected_entries; 3371 } 3372 3373 xstats_names_copy = calloc(expected_entries, 3374 sizeof(struct rte_eth_xstat_name)); 3375 3376 if (!xstats_names_copy) { 3377 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n"); 3378 return -ENOMEM; 3379 } 3380 3381 if (ids) { 3382 for (i = 0; i < size; i++) { 3383 if (ids[i] >= basic_count) { 3384 no_ext_stat_requested = 0; 3385 break; 3386 } 3387 } 3388 } 3389 3390 /* Fill xstats_names_copy structure */ 3391 if (ids && no_ext_stat_requested) { 3392 eth_basic_stats_get_names(dev, xstats_names_copy); 3393 } else { 3394 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy, 3395 expected_entries); 3396 if (ret < 0) { 3397 free(xstats_names_copy); 3398 return ret; 3399 } 3400 } 3401 3402 /* Filter stats */ 3403 for (i = 0; i < size; i++) { 3404 if (ids[i] >= expected_entries) { 3405 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 3406 free(xstats_names_copy); 3407 return -1; 3408 } 3409 xstats_names[i] = xstats_names_copy[ids[i]]; 3410 3411 rte_eth_trace_xstats_get_names_by_id(port_id, &xstats_names[i], 3412 ids[i]); 3413 } 3414 3415 free(xstats_names_copy); 3416 return size; 3417 } 3418 3419 int 3420 rte_eth_xstats_get_names(uint16_t port_id, 3421 struct rte_eth_xstat_name *xstats_names, 3422 unsigned int size) 3423 { 3424 struct rte_eth_dev *dev; 3425 int cnt_used_entries; 3426 int cnt_expected_entries; 3427 int cnt_driver_entries; 3428 int i; 3429 3430 cnt_expected_entries = eth_dev_get_xstats_count(port_id); 3431 if (xstats_names == NULL || cnt_expected_entries < 0 || 3432 (int)size < cnt_expected_entries) 3433 return cnt_expected_entries; 3434 3435 /* port_id checked in eth_dev_get_xstats_count() */ 3436 dev = &rte_eth_devices[port_id]; 3437 3438 cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names); 3439 3440 if (dev->dev_ops->xstats_get_names != NULL) { 3441 /* If there are any driver-specific xstats, append them 3442 * to end of list. 3443 */ 3444 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)( 3445 dev, 3446 xstats_names + cnt_used_entries, 3447 size - cnt_used_entries); 3448 if (cnt_driver_entries < 0) 3449 return eth_err(port_id, cnt_driver_entries); 3450 cnt_used_entries += cnt_driver_entries; 3451 } 3452 3453 for (i = 0; i < cnt_used_entries; i++) 3454 rte_eth_trace_xstats_get_names(port_id, i, &xstats_names[i], 3455 size, cnt_used_entries); 3456 3457 return cnt_used_entries; 3458 } 3459 3460 3461 static int 3462 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats) 3463 { 3464 struct rte_eth_dev *dev; 3465 struct rte_eth_stats eth_stats; 3466 unsigned int count = 0, i, q; 3467 uint64_t val, *stats_ptr; 3468 uint16_t nb_rxqs, nb_txqs; 3469 int ret; 3470 3471 ret = rte_eth_stats_get(port_id, ð_stats); 3472 if (ret < 0) 3473 return ret; 3474 3475 dev = &rte_eth_devices[port_id]; 3476 3477 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3478 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3479 3480 /* global stats */ 3481 for (i = 0; i < RTE_NB_STATS; i++) { 3482 stats_ptr = RTE_PTR_ADD(ð_stats, 3483 eth_dev_stats_strings[i].offset); 3484 val = *stats_ptr; 3485 xstats[count++].value = val; 3486 } 3487 3488 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 3489 return count; 3490 3491 /* per-rxq stats */ 3492 for (q = 0; q < nb_rxqs; q++) { 3493 for (i = 0; i < RTE_NB_RXQ_STATS; i++) { 3494 stats_ptr = RTE_PTR_ADD(ð_stats, 3495 eth_dev_rxq_stats_strings[i].offset + 3496 q * sizeof(uint64_t)); 3497 val = *stats_ptr; 3498 xstats[count++].value = val; 3499 } 3500 } 3501 3502 /* per-txq stats */ 3503 for (q = 0; q < nb_txqs; q++) { 3504 for (i = 0; i < RTE_NB_TXQ_STATS; i++) { 3505 stats_ptr = RTE_PTR_ADD(ð_stats, 3506 eth_dev_txq_stats_strings[i].offset + 3507 q * sizeof(uint64_t)); 3508 val = *stats_ptr; 3509 xstats[count++].value = val; 3510 } 3511 } 3512 return count; 3513 } 3514 3515 /* retrieve ethdev extended statistics */ 3516 int 3517 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, 3518 uint64_t *values, unsigned int size) 3519 { 3520 unsigned int no_basic_stat_requested = 1; 3521 unsigned int no_ext_stat_requested = 1; 3522 unsigned int num_xstats_filled; 3523 unsigned int basic_count; 3524 uint16_t expected_entries; 3525 struct rte_eth_dev *dev; 3526 unsigned int i; 3527 int ret; 3528 3529 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3530 dev = &rte_eth_devices[port_id]; 3531 3532 ret = eth_dev_get_xstats_count(port_id); 3533 if (ret < 0) 3534 return ret; 3535 expected_entries = (uint16_t)ret; 3536 struct rte_eth_xstat xstats[expected_entries]; 3537 basic_count = eth_dev_get_xstats_basic_count(dev); 3538 3539 /* Return max number of stats if no ids given */ 3540 if (!ids) { 3541 if (!values) 3542 return expected_entries; 3543 else if (values && size < expected_entries) 3544 return expected_entries; 3545 } 3546 3547 if (ids && !values) 3548 return -EINVAL; 3549 3550 if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) { 3551 unsigned int basic_count = eth_dev_get_xstats_basic_count(dev); 3552 uint64_t ids_copy[size]; 3553 3554 for (i = 0; i < size; i++) { 3555 if (ids[i] < basic_count) { 3556 no_basic_stat_requested = 0; 3557 break; 3558 } 3559 3560 /* 3561 * Convert ids to xstats ids that PMD knows. 3562 * ids known by user are basic + extended stats. 3563 */ 3564 ids_copy[i] = ids[i] - basic_count; 3565 } 3566 3567 if (no_basic_stat_requested) 3568 return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy, 3569 values, size); 3570 } 3571 3572 if (ids) { 3573 for (i = 0; i < size; i++) { 3574 if (ids[i] >= basic_count) { 3575 no_ext_stat_requested = 0; 3576 break; 3577 } 3578 } 3579 } 3580 3581 /* Fill the xstats structure */ 3582 if (ids && no_ext_stat_requested) 3583 ret = eth_basic_stats_get(port_id, xstats); 3584 else 3585 ret = rte_eth_xstats_get(port_id, xstats, expected_entries); 3586 3587 if (ret < 0) 3588 return ret; 3589 num_xstats_filled = (unsigned int)ret; 3590 3591 /* Return all stats */ 3592 if (!ids) { 3593 for (i = 0; i < num_xstats_filled; i++) 3594 values[i] = xstats[i].value; 3595 return expected_entries; 3596 } 3597 3598 /* Filter stats */ 3599 for (i = 0; i < size; i++) { 3600 if (ids[i] >= expected_entries) { 3601 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 3602 return -1; 3603 } 3604 values[i] = xstats[ids[i]].value; 3605 } 3606 3607 rte_eth_trace_xstats_get_by_id(port_id, ids, values, size); 3608 3609 return size; 3610 } 3611 3612 int 3613 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, 3614 unsigned int n) 3615 { 3616 struct rte_eth_dev *dev; 3617 unsigned int count, i; 3618 signed int xcount = 0; 3619 int ret; 3620 3621 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3622 if (xstats == NULL && n > 0) 3623 return -EINVAL; 3624 dev = &rte_eth_devices[port_id]; 3625 3626 count = eth_dev_get_xstats_basic_count(dev); 3627 3628 /* implemented by the driver */ 3629 if (dev->dev_ops->xstats_get != NULL) { 3630 /* Retrieve the xstats from the driver at the end of the 3631 * xstats struct. 3632 */ 3633 xcount = (*dev->dev_ops->xstats_get)(dev, 3634 (n > count) ? xstats + count : NULL, 3635 (n > count) ? n - count : 0); 3636 3637 if (xcount < 0) 3638 return eth_err(port_id, xcount); 3639 } 3640 3641 if (n < count + xcount || xstats == NULL) 3642 return count + xcount; 3643 3644 /* now fill the xstats structure */ 3645 ret = eth_basic_stats_get(port_id, xstats); 3646 if (ret < 0) 3647 return ret; 3648 count = ret; 3649 3650 for (i = 0; i < count; i++) 3651 xstats[i].id = i; 3652 /* add an offset to driver-specific stats */ 3653 for ( ; i < count + xcount; i++) 3654 xstats[i].id += count; 3655 3656 for (i = 0; i < n; i++) 3657 rte_eth_trace_xstats_get(port_id, xstats[i]); 3658 3659 return count + xcount; 3660 } 3661 3662 /* reset ethdev extended statistics */ 3663 int 3664 rte_eth_xstats_reset(uint16_t port_id) 3665 { 3666 struct rte_eth_dev *dev; 3667 3668 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3669 dev = &rte_eth_devices[port_id]; 3670 3671 /* implemented by the driver */ 3672 if (dev->dev_ops->xstats_reset != NULL) { 3673 int ret = eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev)); 3674 3675 rte_eth_trace_xstats_reset(port_id, ret); 3676 3677 return ret; 3678 } 3679 3680 /* fallback to default */ 3681 return rte_eth_stats_reset(port_id); 3682 } 3683 3684 static int 3685 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, 3686 uint8_t stat_idx, uint8_t is_rx) 3687 { 3688 struct rte_eth_dev *dev; 3689 3690 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3691 dev = &rte_eth_devices[port_id]; 3692 3693 if (is_rx && (queue_id >= dev->data->nb_rx_queues)) 3694 return -EINVAL; 3695 3696 if (!is_rx && (queue_id >= dev->data->nb_tx_queues)) 3697 return -EINVAL; 3698 3699 if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) 3700 return -EINVAL; 3701 3702 if (*dev->dev_ops->queue_stats_mapping_set == NULL) 3703 return -ENOTSUP; 3704 return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx); 3705 } 3706 3707 int 3708 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, 3709 uint8_t stat_idx) 3710 { 3711 int ret; 3712 3713 ret = eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3714 tx_queue_id, 3715 stat_idx, STAT_QMAP_TX)); 3716 3717 rte_ethdev_trace_set_tx_queue_stats_mapping(port_id, tx_queue_id, 3718 stat_idx, ret); 3719 3720 return ret; 3721 } 3722 3723 int 3724 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, 3725 uint8_t stat_idx) 3726 { 3727 int ret; 3728 3729 ret = eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3730 rx_queue_id, 3731 stat_idx, STAT_QMAP_RX)); 3732 3733 rte_ethdev_trace_set_rx_queue_stats_mapping(port_id, rx_queue_id, 3734 stat_idx, ret); 3735 3736 return ret; 3737 } 3738 3739 int 3740 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size) 3741 { 3742 struct rte_eth_dev *dev; 3743 int ret; 3744 3745 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3746 dev = &rte_eth_devices[port_id]; 3747 3748 if (fw_version == NULL && fw_size > 0) { 3749 RTE_ETHDEV_LOG(ERR, 3750 "Cannot get ethdev port %u FW version to NULL when string size is non zero\n", 3751 port_id); 3752 return -EINVAL; 3753 } 3754 3755 if (*dev->dev_ops->fw_version_get == NULL) 3756 return -ENOTSUP; 3757 ret = eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev, 3758 fw_version, fw_size)); 3759 3760 rte_ethdev_trace_fw_version_get(port_id, fw_version, fw_size, ret); 3761 3762 return ret; 3763 } 3764 3765 int 3766 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) 3767 { 3768 struct rte_eth_dev *dev; 3769 const struct rte_eth_desc_lim lim = { 3770 .nb_max = UINT16_MAX, 3771 .nb_min = 0, 3772 .nb_align = 1, 3773 .nb_seg_max = UINT16_MAX, 3774 .nb_mtu_seg_max = UINT16_MAX, 3775 }; 3776 int diag; 3777 3778 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3779 dev = &rte_eth_devices[port_id]; 3780 3781 if (dev_info == NULL) { 3782 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u info to NULL\n", 3783 port_id); 3784 return -EINVAL; 3785 } 3786 3787 /* 3788 * Init dev_info before port_id check since caller does not have 3789 * return status and does not know if get is successful or not. 3790 */ 3791 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3792 dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 3793 3794 dev_info->rx_desc_lim = lim; 3795 dev_info->tx_desc_lim = lim; 3796 dev_info->device = dev->device; 3797 dev_info->min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN - 3798 RTE_ETHER_CRC_LEN; 3799 dev_info->max_mtu = UINT16_MAX; 3800 dev_info->rss_algo_capa = RTE_ETH_HASH_ALGO_CAPA_MASK(DEFAULT); 3801 dev_info->max_rx_bufsize = UINT32_MAX; 3802 3803 if (*dev->dev_ops->dev_infos_get == NULL) 3804 return -ENOTSUP; 3805 diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info); 3806 if (diag != 0) { 3807 /* Cleanup already filled in device information */ 3808 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3809 return eth_err(port_id, diag); 3810 } 3811 3812 /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */ 3813 dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues, 3814 RTE_MAX_QUEUES_PER_PORT); 3815 dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues, 3816 RTE_MAX_QUEUES_PER_PORT); 3817 3818 dev_info->driver_name = dev->device->driver->name; 3819 dev_info->nb_rx_queues = dev->data->nb_rx_queues; 3820 dev_info->nb_tx_queues = dev->data->nb_tx_queues; 3821 3822 dev_info->dev_flags = &dev->data->dev_flags; 3823 3824 rte_ethdev_trace_info_get(port_id, dev_info); 3825 3826 return 0; 3827 } 3828 3829 int 3830 rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf) 3831 { 3832 struct rte_eth_dev *dev; 3833 3834 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3835 dev = &rte_eth_devices[port_id]; 3836 3837 if (dev_conf == NULL) { 3838 RTE_ETHDEV_LOG(ERR, 3839 "Cannot get ethdev port %u configuration to NULL\n", 3840 port_id); 3841 return -EINVAL; 3842 } 3843 3844 memcpy(dev_conf, &dev->data->dev_conf, sizeof(struct rte_eth_conf)); 3845 3846 rte_ethdev_trace_conf_get(port_id, dev_conf); 3847 3848 return 0; 3849 } 3850 3851 int 3852 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, 3853 uint32_t *ptypes, int num) 3854 { 3855 int i, j; 3856 struct rte_eth_dev *dev; 3857 const uint32_t *all_ptypes; 3858 3859 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3860 dev = &rte_eth_devices[port_id]; 3861 3862 if (ptypes == NULL && num > 0) { 3863 RTE_ETHDEV_LOG(ERR, 3864 "Cannot get ethdev port %u supported packet types to NULL when array size is non zero\n", 3865 port_id); 3866 return -EINVAL; 3867 } 3868 3869 if (*dev->dev_ops->dev_supported_ptypes_get == NULL) 3870 return 0; 3871 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3872 3873 if (!all_ptypes) 3874 return 0; 3875 3876 for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i) 3877 if (all_ptypes[i] & ptype_mask) { 3878 if (j < num) { 3879 ptypes[j] = all_ptypes[i]; 3880 3881 rte_ethdev_trace_get_supported_ptypes(port_id, 3882 j, num, ptypes[j]); 3883 } 3884 j++; 3885 } 3886 3887 return j; 3888 } 3889 3890 int 3891 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, 3892 uint32_t *set_ptypes, unsigned int num) 3893 { 3894 const uint32_t valid_ptype_masks[] = { 3895 RTE_PTYPE_L2_MASK, 3896 RTE_PTYPE_L3_MASK, 3897 RTE_PTYPE_L4_MASK, 3898 RTE_PTYPE_TUNNEL_MASK, 3899 RTE_PTYPE_INNER_L2_MASK, 3900 RTE_PTYPE_INNER_L3_MASK, 3901 RTE_PTYPE_INNER_L4_MASK, 3902 }; 3903 const uint32_t *all_ptypes; 3904 struct rte_eth_dev *dev; 3905 uint32_t unused_mask; 3906 unsigned int i, j; 3907 int ret; 3908 3909 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3910 dev = &rte_eth_devices[port_id]; 3911 3912 if (num > 0 && set_ptypes == NULL) { 3913 RTE_ETHDEV_LOG(ERR, 3914 "Cannot get ethdev port %u set packet types to NULL when array size is non zero\n", 3915 port_id); 3916 return -EINVAL; 3917 } 3918 3919 if (*dev->dev_ops->dev_supported_ptypes_get == NULL || 3920 *dev->dev_ops->dev_ptypes_set == NULL) { 3921 ret = 0; 3922 goto ptype_unknown; 3923 } 3924 3925 if (ptype_mask == 0) { 3926 ret = (*dev->dev_ops->dev_ptypes_set)(dev, 3927 ptype_mask); 3928 goto ptype_unknown; 3929 } 3930 3931 unused_mask = ptype_mask; 3932 for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) { 3933 uint32_t mask = ptype_mask & valid_ptype_masks[i]; 3934 if (mask && mask != valid_ptype_masks[i]) { 3935 ret = -EINVAL; 3936 goto ptype_unknown; 3937 } 3938 unused_mask &= ~valid_ptype_masks[i]; 3939 } 3940 3941 if (unused_mask) { 3942 ret = -EINVAL; 3943 goto ptype_unknown; 3944 } 3945 3946 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3947 if (all_ptypes == NULL) { 3948 ret = 0; 3949 goto ptype_unknown; 3950 } 3951 3952 /* 3953 * Accommodate as many set_ptypes as possible. If the supplied 3954 * set_ptypes array is insufficient fill it partially. 3955 */ 3956 for (i = 0, j = 0; set_ptypes != NULL && 3957 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) { 3958 if (ptype_mask & all_ptypes[i]) { 3959 if (j < num - 1) { 3960 set_ptypes[j] = all_ptypes[i]; 3961 3962 rte_ethdev_trace_set_ptypes(port_id, j, num, 3963 set_ptypes[j]); 3964 3965 j++; 3966 continue; 3967 } 3968 break; 3969 } 3970 } 3971 3972 if (set_ptypes != NULL && j < num) 3973 set_ptypes[j] = RTE_PTYPE_UNKNOWN; 3974 3975 return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask); 3976 3977 ptype_unknown: 3978 if (num > 0) 3979 set_ptypes[0] = RTE_PTYPE_UNKNOWN; 3980 3981 return ret; 3982 } 3983 3984 int 3985 rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma, 3986 unsigned int num) 3987 { 3988 int32_t ret; 3989 struct rte_eth_dev *dev; 3990 struct rte_eth_dev_info dev_info; 3991 3992 if (ma == NULL) { 3993 RTE_ETHDEV_LOG(ERR, "%s: invalid parameters\n", __func__); 3994 return -EINVAL; 3995 } 3996 3997 /* will check for us that port_id is a valid one */ 3998 ret = rte_eth_dev_info_get(port_id, &dev_info); 3999 if (ret != 0) 4000 return ret; 4001 4002 dev = &rte_eth_devices[port_id]; 4003 num = RTE_MIN(dev_info.max_mac_addrs, num); 4004 memcpy(ma, dev->data->mac_addrs, num * sizeof(ma[0])); 4005 4006 rte_eth_trace_macaddrs_get(port_id, num); 4007 4008 return num; 4009 } 4010 4011 int 4012 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr) 4013 { 4014 struct rte_eth_dev *dev; 4015 4016 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4017 dev = &rte_eth_devices[port_id]; 4018 4019 if (mac_addr == NULL) { 4020 RTE_ETHDEV_LOG(ERR, 4021 "Cannot get ethdev port %u MAC address to NULL\n", 4022 port_id); 4023 return -EINVAL; 4024 } 4025 4026 rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr); 4027 4028 rte_eth_trace_macaddr_get(port_id, mac_addr); 4029 4030 return 0; 4031 } 4032 4033 int 4034 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu) 4035 { 4036 struct rte_eth_dev *dev; 4037 4038 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4039 dev = &rte_eth_devices[port_id]; 4040 4041 if (mtu == NULL) { 4042 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u MTU to NULL\n", 4043 port_id); 4044 return -EINVAL; 4045 } 4046 4047 *mtu = dev->data->mtu; 4048 4049 rte_ethdev_trace_get_mtu(port_id, *mtu); 4050 4051 return 0; 4052 } 4053 4054 int 4055 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu) 4056 { 4057 int ret; 4058 struct rte_eth_dev_info dev_info; 4059 struct rte_eth_dev *dev; 4060 4061 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4062 dev = &rte_eth_devices[port_id]; 4063 if (*dev->dev_ops->mtu_set == NULL) 4064 return -ENOTSUP; 4065 4066 /* 4067 * Check if the device supports dev_infos_get, if it does not 4068 * skip min_mtu/max_mtu validation here as this requires values 4069 * that are populated within the call to rte_eth_dev_info_get() 4070 * which relies on dev->dev_ops->dev_infos_get. 4071 */ 4072 if (*dev->dev_ops->dev_infos_get != NULL) { 4073 ret = rte_eth_dev_info_get(port_id, &dev_info); 4074 if (ret != 0) 4075 return ret; 4076 4077 ret = eth_dev_validate_mtu(port_id, &dev_info, mtu); 4078 if (ret != 0) 4079 return ret; 4080 } 4081 4082 if (dev->data->dev_configured == 0) { 4083 RTE_ETHDEV_LOG(ERR, 4084 "Port %u must be configured before MTU set\n", 4085 port_id); 4086 return -EINVAL; 4087 } 4088 4089 ret = (*dev->dev_ops->mtu_set)(dev, mtu); 4090 if (ret == 0) 4091 dev->data->mtu = mtu; 4092 4093 ret = eth_err(port_id, ret); 4094 4095 rte_ethdev_trace_set_mtu(port_id, mtu, ret); 4096 4097 return ret; 4098 } 4099 4100 int 4101 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on) 4102 { 4103 struct rte_eth_dev *dev; 4104 int ret; 4105 4106 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4107 dev = &rte_eth_devices[port_id]; 4108 4109 if (!(dev->data->dev_conf.rxmode.offloads & 4110 RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) { 4111 RTE_ETHDEV_LOG(ERR, "Port %u: VLAN-filtering disabled\n", 4112 port_id); 4113 return -ENOSYS; 4114 } 4115 4116 if (vlan_id > 4095) { 4117 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n", 4118 port_id, vlan_id); 4119 return -EINVAL; 4120 } 4121 if (*dev->dev_ops->vlan_filter_set == NULL) 4122 return -ENOTSUP; 4123 4124 ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on); 4125 if (ret == 0) { 4126 struct rte_vlan_filter_conf *vfc; 4127 int vidx; 4128 int vbit; 4129 4130 vfc = &dev->data->vlan_filter_conf; 4131 vidx = vlan_id / 64; 4132 vbit = vlan_id % 64; 4133 4134 if (on) 4135 vfc->ids[vidx] |= RTE_BIT64(vbit); 4136 else 4137 vfc->ids[vidx] &= ~RTE_BIT64(vbit); 4138 } 4139 4140 ret = eth_err(port_id, ret); 4141 4142 rte_ethdev_trace_vlan_filter(port_id, vlan_id, on, ret); 4143 4144 return ret; 4145 } 4146 4147 int 4148 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, 4149 int on) 4150 { 4151 struct rte_eth_dev *dev; 4152 4153 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4154 dev = &rte_eth_devices[port_id]; 4155 4156 if (rx_queue_id >= dev->data->nb_rx_queues) { 4157 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id); 4158 return -EINVAL; 4159 } 4160 4161 if (*dev->dev_ops->vlan_strip_queue_set == NULL) 4162 return -ENOTSUP; 4163 (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on); 4164 4165 rte_ethdev_trace_set_vlan_strip_on_queue(port_id, rx_queue_id, on); 4166 4167 return 0; 4168 } 4169 4170 int 4171 rte_eth_dev_set_vlan_ether_type(uint16_t port_id, 4172 enum rte_vlan_type vlan_type, 4173 uint16_t tpid) 4174 { 4175 struct rte_eth_dev *dev; 4176 int ret; 4177 4178 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4179 dev = &rte_eth_devices[port_id]; 4180 4181 if (*dev->dev_ops->vlan_tpid_set == NULL) 4182 return -ENOTSUP; 4183 ret = eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, 4184 tpid)); 4185 4186 rte_ethdev_trace_set_vlan_ether_type(port_id, vlan_type, tpid, ret); 4187 4188 return ret; 4189 } 4190 4191 int 4192 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask) 4193 { 4194 struct rte_eth_dev_info dev_info; 4195 struct rte_eth_dev *dev; 4196 int ret = 0; 4197 int mask = 0; 4198 int cur, org = 0; 4199 uint64_t orig_offloads; 4200 uint64_t dev_offloads; 4201 uint64_t new_offloads; 4202 4203 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4204 dev = &rte_eth_devices[port_id]; 4205 4206 /* save original values in case of failure */ 4207 orig_offloads = dev->data->dev_conf.rxmode.offloads; 4208 dev_offloads = orig_offloads; 4209 4210 /* check which option changed by application */ 4211 cur = !!(offload_mask & RTE_ETH_VLAN_STRIP_OFFLOAD); 4212 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP); 4213 if (cur != org) { 4214 if (cur) 4215 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 4216 else 4217 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 4218 mask |= RTE_ETH_VLAN_STRIP_MASK; 4219 } 4220 4221 cur = !!(offload_mask & RTE_ETH_VLAN_FILTER_OFFLOAD); 4222 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER); 4223 if (cur != org) { 4224 if (cur) 4225 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 4226 else 4227 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 4228 mask |= RTE_ETH_VLAN_FILTER_MASK; 4229 } 4230 4231 cur = !!(offload_mask & RTE_ETH_VLAN_EXTEND_OFFLOAD); 4232 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND); 4233 if (cur != org) { 4234 if (cur) 4235 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 4236 else 4237 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 4238 mask |= RTE_ETH_VLAN_EXTEND_MASK; 4239 } 4240 4241 cur = !!(offload_mask & RTE_ETH_QINQ_STRIP_OFFLOAD); 4242 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP); 4243 if (cur != org) { 4244 if (cur) 4245 dev_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 4246 else 4247 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 4248 mask |= RTE_ETH_QINQ_STRIP_MASK; 4249 } 4250 4251 /*no change*/ 4252 if (mask == 0) 4253 return ret; 4254 4255 ret = rte_eth_dev_info_get(port_id, &dev_info); 4256 if (ret != 0) 4257 return ret; 4258 4259 /* Rx VLAN offloading must be within its device capabilities */ 4260 if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) { 4261 new_offloads = dev_offloads & ~orig_offloads; 4262 RTE_ETHDEV_LOG(ERR, 4263 "Ethdev port_id=%u requested new added VLAN offloads " 4264 "0x%" PRIx64 " must be within Rx offloads capabilities " 4265 "0x%" PRIx64 " in %s()\n", 4266 port_id, new_offloads, dev_info.rx_offload_capa, 4267 __func__); 4268 return -EINVAL; 4269 } 4270 4271 if (*dev->dev_ops->vlan_offload_set == NULL) 4272 return -ENOTSUP; 4273 dev->data->dev_conf.rxmode.offloads = dev_offloads; 4274 ret = (*dev->dev_ops->vlan_offload_set)(dev, mask); 4275 if (ret) { 4276 /* hit an error restore original values */ 4277 dev->data->dev_conf.rxmode.offloads = orig_offloads; 4278 } 4279 4280 ret = eth_err(port_id, ret); 4281 4282 rte_ethdev_trace_set_vlan_offload(port_id, offload_mask, ret); 4283 4284 return ret; 4285 } 4286 4287 int 4288 rte_eth_dev_get_vlan_offload(uint16_t port_id) 4289 { 4290 struct rte_eth_dev *dev; 4291 uint64_t *dev_offloads; 4292 int ret = 0; 4293 4294 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4295 dev = &rte_eth_devices[port_id]; 4296 dev_offloads = &dev->data->dev_conf.rxmode.offloads; 4297 4298 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 4299 ret |= RTE_ETH_VLAN_STRIP_OFFLOAD; 4300 4301 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) 4302 ret |= RTE_ETH_VLAN_FILTER_OFFLOAD; 4303 4304 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) 4305 ret |= RTE_ETH_VLAN_EXTEND_OFFLOAD; 4306 4307 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) 4308 ret |= RTE_ETH_QINQ_STRIP_OFFLOAD; 4309 4310 rte_ethdev_trace_get_vlan_offload(port_id, ret); 4311 4312 return ret; 4313 } 4314 4315 int 4316 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on) 4317 { 4318 struct rte_eth_dev *dev; 4319 int ret; 4320 4321 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4322 dev = &rte_eth_devices[port_id]; 4323 4324 if (*dev->dev_ops->vlan_pvid_set == NULL) 4325 return -ENOTSUP; 4326 ret = eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on)); 4327 4328 rte_ethdev_trace_set_vlan_pvid(port_id, pvid, on, ret); 4329 4330 return ret; 4331 } 4332 4333 int 4334 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 4335 { 4336 struct rte_eth_dev *dev; 4337 int ret; 4338 4339 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4340 dev = &rte_eth_devices[port_id]; 4341 4342 if (fc_conf == NULL) { 4343 RTE_ETHDEV_LOG(ERR, 4344 "Cannot get ethdev port %u flow control config to NULL\n", 4345 port_id); 4346 return -EINVAL; 4347 } 4348 4349 if (*dev->dev_ops->flow_ctrl_get == NULL) 4350 return -ENOTSUP; 4351 memset(fc_conf, 0, sizeof(*fc_conf)); 4352 ret = eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf)); 4353 4354 rte_ethdev_trace_flow_ctrl_get(port_id, fc_conf, ret); 4355 4356 return ret; 4357 } 4358 4359 int 4360 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 4361 { 4362 struct rte_eth_dev *dev; 4363 int ret; 4364 4365 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4366 dev = &rte_eth_devices[port_id]; 4367 4368 if (fc_conf == NULL) { 4369 RTE_ETHDEV_LOG(ERR, 4370 "Cannot set ethdev port %u flow control from NULL config\n", 4371 port_id); 4372 return -EINVAL; 4373 } 4374 4375 if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) { 4376 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n"); 4377 return -EINVAL; 4378 } 4379 4380 if (*dev->dev_ops->flow_ctrl_set == NULL) 4381 return -ENOTSUP; 4382 ret = eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf)); 4383 4384 rte_ethdev_trace_flow_ctrl_set(port_id, fc_conf, ret); 4385 4386 return ret; 4387 } 4388 4389 int 4390 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, 4391 struct rte_eth_pfc_conf *pfc_conf) 4392 { 4393 struct rte_eth_dev *dev; 4394 int ret; 4395 4396 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4397 dev = &rte_eth_devices[port_id]; 4398 4399 if (pfc_conf == NULL) { 4400 RTE_ETHDEV_LOG(ERR, 4401 "Cannot set ethdev port %u priority flow control from NULL config\n", 4402 port_id); 4403 return -EINVAL; 4404 } 4405 4406 if (pfc_conf->priority > (RTE_ETH_DCB_NUM_USER_PRIORITIES - 1)) { 4407 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n"); 4408 return -EINVAL; 4409 } 4410 4411 /* High water, low water validation are device specific */ 4412 if (*dev->dev_ops->priority_flow_ctrl_set == NULL) 4413 return -ENOTSUP; 4414 ret = eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set) 4415 (dev, pfc_conf)); 4416 4417 rte_ethdev_trace_priority_flow_ctrl_set(port_id, pfc_conf, ret); 4418 4419 return ret; 4420 } 4421 4422 static int 4423 validate_rx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max, 4424 struct rte_eth_pfc_queue_conf *pfc_queue_conf) 4425 { 4426 if ((pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) || 4427 (pfc_queue_conf->mode == RTE_ETH_FC_FULL)) { 4428 if (pfc_queue_conf->rx_pause.tx_qid >= dev_info->nb_tx_queues) { 4429 RTE_ETHDEV_LOG(ERR, 4430 "PFC Tx queue not in range for Rx pause requested:%d configured:%d\n", 4431 pfc_queue_conf->rx_pause.tx_qid, 4432 dev_info->nb_tx_queues); 4433 return -EINVAL; 4434 } 4435 4436 if (pfc_queue_conf->rx_pause.tc >= tc_max) { 4437 RTE_ETHDEV_LOG(ERR, 4438 "PFC TC not in range for Rx pause requested:%d max:%d\n", 4439 pfc_queue_conf->rx_pause.tc, tc_max); 4440 return -EINVAL; 4441 } 4442 } 4443 4444 return 0; 4445 } 4446 4447 static int 4448 validate_tx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max, 4449 struct rte_eth_pfc_queue_conf *pfc_queue_conf) 4450 { 4451 if ((pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) || 4452 (pfc_queue_conf->mode == RTE_ETH_FC_FULL)) { 4453 if (pfc_queue_conf->tx_pause.rx_qid >= dev_info->nb_rx_queues) { 4454 RTE_ETHDEV_LOG(ERR, 4455 "PFC Rx queue not in range for Tx pause requested:%d configured:%d\n", 4456 pfc_queue_conf->tx_pause.rx_qid, 4457 dev_info->nb_rx_queues); 4458 return -EINVAL; 4459 } 4460 4461 if (pfc_queue_conf->tx_pause.tc >= tc_max) { 4462 RTE_ETHDEV_LOG(ERR, 4463 "PFC TC not in range for Tx pause requested:%d max:%d\n", 4464 pfc_queue_conf->tx_pause.tc, tc_max); 4465 return -EINVAL; 4466 } 4467 } 4468 4469 return 0; 4470 } 4471 4472 int 4473 rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id, 4474 struct rte_eth_pfc_queue_info *pfc_queue_info) 4475 { 4476 struct rte_eth_dev *dev; 4477 int ret; 4478 4479 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4480 dev = &rte_eth_devices[port_id]; 4481 4482 if (pfc_queue_info == NULL) { 4483 RTE_ETHDEV_LOG(ERR, "PFC info param is NULL for port (%u)\n", 4484 port_id); 4485 return -EINVAL; 4486 } 4487 4488 if (*dev->dev_ops->priority_flow_ctrl_queue_info_get == NULL) 4489 return -ENOTSUP; 4490 ret = eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_queue_info_get) 4491 (dev, pfc_queue_info)); 4492 4493 rte_ethdev_trace_priority_flow_ctrl_queue_info_get(port_id, 4494 pfc_queue_info, ret); 4495 4496 return ret; 4497 } 4498 4499 int 4500 rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id, 4501 struct rte_eth_pfc_queue_conf *pfc_queue_conf) 4502 { 4503 struct rte_eth_pfc_queue_info pfc_info; 4504 struct rte_eth_dev_info dev_info; 4505 struct rte_eth_dev *dev; 4506 int ret; 4507 4508 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4509 dev = &rte_eth_devices[port_id]; 4510 4511 if (pfc_queue_conf == NULL) { 4512 RTE_ETHDEV_LOG(ERR, "PFC parameters are NULL for port (%u)\n", 4513 port_id); 4514 return -EINVAL; 4515 } 4516 4517 ret = rte_eth_dev_info_get(port_id, &dev_info); 4518 if (ret != 0) 4519 return ret; 4520 4521 ret = rte_eth_dev_priority_flow_ctrl_queue_info_get(port_id, &pfc_info); 4522 if (ret != 0) 4523 return ret; 4524 4525 if (pfc_info.tc_max == 0) { 4526 RTE_ETHDEV_LOG(ERR, "Ethdev port %u does not support PFC TC values\n", 4527 port_id); 4528 return -ENOTSUP; 4529 } 4530 4531 /* Check requested mode supported or not */ 4532 if (pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE && 4533 pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) { 4534 RTE_ETHDEV_LOG(ERR, "PFC Tx pause unsupported for port (%d)\n", 4535 port_id); 4536 return -EINVAL; 4537 } 4538 4539 if (pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE && 4540 pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) { 4541 RTE_ETHDEV_LOG(ERR, "PFC Rx pause unsupported for port (%d)\n", 4542 port_id); 4543 return -EINVAL; 4544 } 4545 4546 /* Validate Rx pause parameters */ 4547 if (pfc_info.mode_capa == RTE_ETH_FC_FULL || 4548 pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE) { 4549 ret = validate_rx_pause_config(&dev_info, pfc_info.tc_max, 4550 pfc_queue_conf); 4551 if (ret != 0) 4552 return ret; 4553 } 4554 4555 /* Validate Tx pause parameters */ 4556 if (pfc_info.mode_capa == RTE_ETH_FC_FULL || 4557 pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE) { 4558 ret = validate_tx_pause_config(&dev_info, pfc_info.tc_max, 4559 pfc_queue_conf); 4560 if (ret != 0) 4561 return ret; 4562 } 4563 4564 if (*dev->dev_ops->priority_flow_ctrl_queue_config == NULL) 4565 return -ENOTSUP; 4566 ret = eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_queue_config) 4567 (dev, pfc_queue_conf)); 4568 4569 rte_ethdev_trace_priority_flow_ctrl_queue_configure(port_id, 4570 pfc_queue_conf, ret); 4571 4572 return ret; 4573 } 4574 4575 static int 4576 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf, 4577 uint16_t reta_size) 4578 { 4579 uint16_t i, num; 4580 4581 num = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) / RTE_ETH_RETA_GROUP_SIZE; 4582 for (i = 0; i < num; i++) { 4583 if (reta_conf[i].mask) 4584 return 0; 4585 } 4586 4587 return -EINVAL; 4588 } 4589 4590 static int 4591 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf, 4592 uint16_t reta_size, 4593 uint16_t max_rxq) 4594 { 4595 uint16_t i, idx, shift; 4596 4597 if (max_rxq == 0) { 4598 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n"); 4599 return -EINVAL; 4600 } 4601 4602 for (i = 0; i < reta_size; i++) { 4603 idx = i / RTE_ETH_RETA_GROUP_SIZE; 4604 shift = i % RTE_ETH_RETA_GROUP_SIZE; 4605 if ((reta_conf[idx].mask & RTE_BIT64(shift)) && 4606 (reta_conf[idx].reta[shift] >= max_rxq)) { 4607 RTE_ETHDEV_LOG(ERR, 4608 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n", 4609 idx, shift, 4610 reta_conf[idx].reta[shift], max_rxq); 4611 return -EINVAL; 4612 } 4613 } 4614 4615 return 0; 4616 } 4617 4618 int 4619 rte_eth_dev_rss_reta_update(uint16_t port_id, 4620 struct rte_eth_rss_reta_entry64 *reta_conf, 4621 uint16_t reta_size) 4622 { 4623 enum rte_eth_rx_mq_mode mq_mode; 4624 struct rte_eth_dev *dev; 4625 int ret; 4626 4627 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4628 dev = &rte_eth_devices[port_id]; 4629 4630 if (reta_conf == NULL) { 4631 RTE_ETHDEV_LOG(ERR, 4632 "Cannot update ethdev port %u RSS RETA to NULL\n", 4633 port_id); 4634 return -EINVAL; 4635 } 4636 4637 if (reta_size == 0) { 4638 RTE_ETHDEV_LOG(ERR, 4639 "Cannot update ethdev port %u RSS RETA with zero size\n", 4640 port_id); 4641 return -EINVAL; 4642 } 4643 4644 /* Check mask bits */ 4645 ret = eth_check_reta_mask(reta_conf, reta_size); 4646 if (ret < 0) 4647 return ret; 4648 4649 /* Check entry value */ 4650 ret = eth_check_reta_entry(reta_conf, reta_size, 4651 dev->data->nb_rx_queues); 4652 if (ret < 0) 4653 return ret; 4654 4655 mq_mode = dev->data->dev_conf.rxmode.mq_mode; 4656 if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) { 4657 RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n"); 4658 return -ENOTSUP; 4659 } 4660 4661 if (*dev->dev_ops->reta_update == NULL) 4662 return -ENOTSUP; 4663 ret = eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf, 4664 reta_size)); 4665 4666 rte_ethdev_trace_rss_reta_update(port_id, reta_conf, reta_size, ret); 4667 4668 return ret; 4669 } 4670 4671 int 4672 rte_eth_dev_rss_reta_query(uint16_t port_id, 4673 struct rte_eth_rss_reta_entry64 *reta_conf, 4674 uint16_t reta_size) 4675 { 4676 struct rte_eth_dev *dev; 4677 int ret; 4678 4679 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4680 dev = &rte_eth_devices[port_id]; 4681 4682 if (reta_conf == NULL) { 4683 RTE_ETHDEV_LOG(ERR, 4684 "Cannot query ethdev port %u RSS RETA from NULL config\n", 4685 port_id); 4686 return -EINVAL; 4687 } 4688 4689 /* Check mask bits */ 4690 ret = eth_check_reta_mask(reta_conf, reta_size); 4691 if (ret < 0) 4692 return ret; 4693 4694 if (*dev->dev_ops->reta_query == NULL) 4695 return -ENOTSUP; 4696 ret = eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf, 4697 reta_size)); 4698 4699 rte_ethdev_trace_rss_reta_query(port_id, reta_conf, reta_size, ret); 4700 4701 return ret; 4702 } 4703 4704 int 4705 rte_eth_dev_rss_hash_update(uint16_t port_id, 4706 struct rte_eth_rss_conf *rss_conf) 4707 { 4708 struct rte_eth_dev *dev; 4709 struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, }; 4710 enum rte_eth_rx_mq_mode mq_mode; 4711 int ret; 4712 4713 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4714 dev = &rte_eth_devices[port_id]; 4715 4716 if (rss_conf == NULL) { 4717 RTE_ETHDEV_LOG(ERR, 4718 "Cannot update ethdev port %u RSS hash from NULL config\n", 4719 port_id); 4720 return -EINVAL; 4721 } 4722 4723 ret = rte_eth_dev_info_get(port_id, &dev_info); 4724 if (ret != 0) 4725 return ret; 4726 4727 rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf); 4728 if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) != 4729 dev_info.flow_type_rss_offloads) { 4730 RTE_ETHDEV_LOG(ERR, 4731 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 4732 port_id, rss_conf->rss_hf, 4733 dev_info.flow_type_rss_offloads); 4734 return -EINVAL; 4735 } 4736 4737 mq_mode = dev->data->dev_conf.rxmode.mq_mode; 4738 if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) { 4739 RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n"); 4740 return -ENOTSUP; 4741 } 4742 4743 if (rss_conf->rss_key != NULL && 4744 rss_conf->rss_key_len != dev_info.hash_key_size) { 4745 RTE_ETHDEV_LOG(ERR, 4746 "Ethdev port_id=%u invalid RSS key len: %u, valid value: %u\n", 4747 port_id, rss_conf->rss_key_len, dev_info.hash_key_size); 4748 return -EINVAL; 4749 } 4750 4751 if ((size_t)rss_conf->algorithm >= CHAR_BIT * sizeof(dev_info.rss_algo_capa) || 4752 (dev_info.rss_algo_capa & 4753 RTE_ETH_HASH_ALGO_TO_CAPA(rss_conf->algorithm)) == 0) { 4754 RTE_ETHDEV_LOG(ERR, 4755 "Ethdev port_id=%u configured RSS hash algorithm (%u)" 4756 "is not in the algorithm capability (0x%" PRIx32 ")\n", 4757 port_id, rss_conf->algorithm, dev_info.rss_algo_capa); 4758 return -EINVAL; 4759 } 4760 4761 if (*dev->dev_ops->rss_hash_update == NULL) 4762 return -ENOTSUP; 4763 ret = eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev, 4764 rss_conf)); 4765 4766 rte_ethdev_trace_rss_hash_update(port_id, rss_conf, ret); 4767 4768 return ret; 4769 } 4770 4771 int 4772 rte_eth_dev_rss_hash_conf_get(uint16_t port_id, 4773 struct rte_eth_rss_conf *rss_conf) 4774 { 4775 struct rte_eth_dev_info dev_info = { 0 }; 4776 struct rte_eth_dev *dev; 4777 int ret; 4778 4779 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4780 dev = &rte_eth_devices[port_id]; 4781 4782 if (rss_conf == NULL) { 4783 RTE_ETHDEV_LOG(ERR, 4784 "Cannot get ethdev port %u RSS hash config to NULL\n", 4785 port_id); 4786 return -EINVAL; 4787 } 4788 4789 ret = rte_eth_dev_info_get(port_id, &dev_info); 4790 if (ret != 0) 4791 return ret; 4792 4793 if (rss_conf->rss_key != NULL && 4794 rss_conf->rss_key_len < dev_info.hash_key_size) { 4795 RTE_ETHDEV_LOG(ERR, 4796 "Ethdev port_id=%u invalid RSS key len: %u, should not be less than: %u\n", 4797 port_id, rss_conf->rss_key_len, dev_info.hash_key_size); 4798 return -EINVAL; 4799 } 4800 4801 rss_conf->algorithm = RTE_ETH_HASH_FUNCTION_DEFAULT; 4802 4803 if (*dev->dev_ops->rss_hash_conf_get == NULL) 4804 return -ENOTSUP; 4805 ret = eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev, 4806 rss_conf)); 4807 4808 rte_ethdev_trace_rss_hash_conf_get(port_id, rss_conf, ret); 4809 4810 return ret; 4811 } 4812 4813 const char * 4814 rte_eth_dev_rss_algo_name(enum rte_eth_hash_function rss_algo) 4815 { 4816 const char *name = "Unknown function"; 4817 unsigned int i; 4818 4819 for (i = 0; i < RTE_DIM(rte_eth_dev_rss_algo_names); i++) { 4820 if (rss_algo == rte_eth_dev_rss_algo_names[i].algo) 4821 return rte_eth_dev_rss_algo_names[i].name; 4822 } 4823 4824 return name; 4825 } 4826 4827 int 4828 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, 4829 struct rte_eth_udp_tunnel *udp_tunnel) 4830 { 4831 struct rte_eth_dev *dev; 4832 int ret; 4833 4834 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4835 dev = &rte_eth_devices[port_id]; 4836 4837 if (udp_tunnel == NULL) { 4838 RTE_ETHDEV_LOG(ERR, 4839 "Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4840 port_id); 4841 return -EINVAL; 4842 } 4843 4844 if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) { 4845 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4846 return -EINVAL; 4847 } 4848 4849 if (*dev->dev_ops->udp_tunnel_port_add == NULL) 4850 return -ENOTSUP; 4851 ret = eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev, 4852 udp_tunnel)); 4853 4854 rte_ethdev_trace_udp_tunnel_port_add(port_id, udp_tunnel, ret); 4855 4856 return ret; 4857 } 4858 4859 int 4860 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, 4861 struct rte_eth_udp_tunnel *udp_tunnel) 4862 { 4863 struct rte_eth_dev *dev; 4864 int ret; 4865 4866 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4867 dev = &rte_eth_devices[port_id]; 4868 4869 if (udp_tunnel == NULL) { 4870 RTE_ETHDEV_LOG(ERR, 4871 "Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4872 port_id); 4873 return -EINVAL; 4874 } 4875 4876 if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) { 4877 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4878 return -EINVAL; 4879 } 4880 4881 if (*dev->dev_ops->udp_tunnel_port_del == NULL) 4882 return -ENOTSUP; 4883 ret = eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev, 4884 udp_tunnel)); 4885 4886 rte_ethdev_trace_udp_tunnel_port_delete(port_id, udp_tunnel, ret); 4887 4888 return ret; 4889 } 4890 4891 int 4892 rte_eth_led_on(uint16_t port_id) 4893 { 4894 struct rte_eth_dev *dev; 4895 int ret; 4896 4897 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4898 dev = &rte_eth_devices[port_id]; 4899 4900 if (*dev->dev_ops->dev_led_on == NULL) 4901 return -ENOTSUP; 4902 ret = eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev)); 4903 4904 rte_eth_trace_led_on(port_id, ret); 4905 4906 return ret; 4907 } 4908 4909 int 4910 rte_eth_led_off(uint16_t port_id) 4911 { 4912 struct rte_eth_dev *dev; 4913 int ret; 4914 4915 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4916 dev = &rte_eth_devices[port_id]; 4917 4918 if (*dev->dev_ops->dev_led_off == NULL) 4919 return -ENOTSUP; 4920 ret = eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev)); 4921 4922 rte_eth_trace_led_off(port_id, ret); 4923 4924 return ret; 4925 } 4926 4927 int 4928 rte_eth_fec_get_capability(uint16_t port_id, 4929 struct rte_eth_fec_capa *speed_fec_capa, 4930 unsigned int num) 4931 { 4932 struct rte_eth_dev *dev; 4933 int ret; 4934 4935 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4936 dev = &rte_eth_devices[port_id]; 4937 4938 if (speed_fec_capa == NULL && num > 0) { 4939 RTE_ETHDEV_LOG(ERR, 4940 "Cannot get ethdev port %u FEC capability to NULL when array size is non zero\n", 4941 port_id); 4942 return -EINVAL; 4943 } 4944 4945 if (*dev->dev_ops->fec_get_capability == NULL) 4946 return -ENOTSUP; 4947 ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num); 4948 4949 rte_eth_trace_fec_get_capability(port_id, speed_fec_capa, num, ret); 4950 4951 return ret; 4952 } 4953 4954 int 4955 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa) 4956 { 4957 struct rte_eth_dev *dev; 4958 int ret; 4959 4960 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4961 dev = &rte_eth_devices[port_id]; 4962 4963 if (fec_capa == NULL) { 4964 RTE_ETHDEV_LOG(ERR, 4965 "Cannot get ethdev port %u current FEC mode to NULL\n", 4966 port_id); 4967 return -EINVAL; 4968 } 4969 4970 if (*dev->dev_ops->fec_get == NULL) 4971 return -ENOTSUP; 4972 ret = eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa)); 4973 4974 rte_eth_trace_fec_get(port_id, fec_capa, ret); 4975 4976 return ret; 4977 } 4978 4979 int 4980 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa) 4981 { 4982 struct rte_eth_dev *dev; 4983 int ret; 4984 4985 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4986 dev = &rte_eth_devices[port_id]; 4987 4988 if (fec_capa == 0) { 4989 RTE_ETHDEV_LOG(ERR, "At least one FEC mode should be specified\n"); 4990 return -EINVAL; 4991 } 4992 4993 if (*dev->dev_ops->fec_set == NULL) 4994 return -ENOTSUP; 4995 ret = eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa)); 4996 4997 rte_eth_trace_fec_set(port_id, fec_capa, ret); 4998 4999 return ret; 5000 } 5001 5002 /* 5003 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 5004 * an empty spot. 5005 */ 5006 static int 5007 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr) 5008 { 5009 struct rte_eth_dev_info dev_info; 5010 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5011 unsigned i; 5012 int ret; 5013 5014 ret = rte_eth_dev_info_get(port_id, &dev_info); 5015 if (ret != 0) 5016 return -1; 5017 5018 for (i = 0; i < dev_info.max_mac_addrs; i++) 5019 if (memcmp(addr, &dev->data->mac_addrs[i], 5020 RTE_ETHER_ADDR_LEN) == 0) 5021 return i; 5022 5023 return -1; 5024 } 5025 5026 static const struct rte_ether_addr null_mac_addr; 5027 5028 int 5029 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr, 5030 uint32_t pool) 5031 { 5032 struct rte_eth_dev *dev; 5033 int index; 5034 uint64_t pool_mask; 5035 int ret; 5036 5037 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5038 dev = &rte_eth_devices[port_id]; 5039 5040 if (addr == NULL) { 5041 RTE_ETHDEV_LOG(ERR, 5042 "Cannot add ethdev port %u MAC address from NULL address\n", 5043 port_id); 5044 return -EINVAL; 5045 } 5046 5047 if (*dev->dev_ops->mac_addr_add == NULL) 5048 return -ENOTSUP; 5049 5050 if (rte_is_zero_ether_addr(addr)) { 5051 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 5052 port_id); 5053 return -EINVAL; 5054 } 5055 if (pool >= RTE_ETH_64_POOLS) { 5056 RTE_ETHDEV_LOG(ERR, "Pool ID must be 0-%d\n", RTE_ETH_64_POOLS - 1); 5057 return -EINVAL; 5058 } 5059 5060 index = eth_dev_get_mac_addr_index(port_id, addr); 5061 if (index < 0) { 5062 index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr); 5063 if (index < 0) { 5064 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 5065 port_id); 5066 return -ENOSPC; 5067 } 5068 } else { 5069 pool_mask = dev->data->mac_pool_sel[index]; 5070 5071 /* Check if both MAC address and pool is already there, and do nothing */ 5072 if (pool_mask & RTE_BIT64(pool)) 5073 return 0; 5074 } 5075 5076 /* Update NIC */ 5077 ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool); 5078 5079 if (ret == 0) { 5080 /* Update address in NIC data structure */ 5081 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]); 5082 5083 /* Update pool bitmap in NIC data structure */ 5084 dev->data->mac_pool_sel[index] |= RTE_BIT64(pool); 5085 } 5086 5087 ret = eth_err(port_id, ret); 5088 5089 rte_ethdev_trace_mac_addr_add(port_id, addr, pool, ret); 5090 5091 return ret; 5092 } 5093 5094 int 5095 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr) 5096 { 5097 struct rte_eth_dev *dev; 5098 int index; 5099 5100 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5101 dev = &rte_eth_devices[port_id]; 5102 5103 if (addr == NULL) { 5104 RTE_ETHDEV_LOG(ERR, 5105 "Cannot remove ethdev port %u MAC address from NULL address\n", 5106 port_id); 5107 return -EINVAL; 5108 } 5109 5110 if (*dev->dev_ops->mac_addr_remove == NULL) 5111 return -ENOTSUP; 5112 5113 index = eth_dev_get_mac_addr_index(port_id, addr); 5114 if (index == 0) { 5115 RTE_ETHDEV_LOG(ERR, 5116 "Port %u: Cannot remove default MAC address\n", 5117 port_id); 5118 return -EADDRINUSE; 5119 } else if (index < 0) 5120 return 0; /* Do nothing if address wasn't found */ 5121 5122 /* Update NIC */ 5123 (*dev->dev_ops->mac_addr_remove)(dev, index); 5124 5125 /* Update address in NIC data structure */ 5126 rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]); 5127 5128 /* reset pool bitmap */ 5129 dev->data->mac_pool_sel[index] = 0; 5130 5131 rte_ethdev_trace_mac_addr_remove(port_id, addr); 5132 5133 return 0; 5134 } 5135 5136 int 5137 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr) 5138 { 5139 struct rte_eth_dev *dev; 5140 int index; 5141 int ret; 5142 5143 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5144 dev = &rte_eth_devices[port_id]; 5145 5146 if (addr == NULL) { 5147 RTE_ETHDEV_LOG(ERR, 5148 "Cannot set ethdev port %u default MAC address from NULL address\n", 5149 port_id); 5150 return -EINVAL; 5151 } 5152 5153 if (!rte_is_valid_assigned_ether_addr(addr)) 5154 return -EINVAL; 5155 5156 if (*dev->dev_ops->mac_addr_set == NULL) 5157 return -ENOTSUP; 5158 5159 /* Keep address unique in dev->data->mac_addrs[]. */ 5160 index = eth_dev_get_mac_addr_index(port_id, addr); 5161 if (index > 0) { 5162 RTE_ETHDEV_LOG(ERR, 5163 "New default address for port %u was already in the address list. Please remove it first.\n", 5164 port_id); 5165 return -EEXIST; 5166 } 5167 5168 ret = (*dev->dev_ops->mac_addr_set)(dev, addr); 5169 if (ret < 0) 5170 return ret; 5171 5172 /* Update default address in NIC data structure */ 5173 rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]); 5174 5175 rte_ethdev_trace_default_mac_addr_set(port_id, addr); 5176 5177 return 0; 5178 } 5179 5180 5181 /* 5182 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 5183 * an empty spot. 5184 */ 5185 static int 5186 eth_dev_get_hash_mac_addr_index(uint16_t port_id, 5187 const struct rte_ether_addr *addr) 5188 { 5189 struct rte_eth_dev_info dev_info; 5190 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5191 unsigned i; 5192 int ret; 5193 5194 ret = rte_eth_dev_info_get(port_id, &dev_info); 5195 if (ret != 0) 5196 return -1; 5197 5198 if (!dev->data->hash_mac_addrs) 5199 return -1; 5200 5201 for (i = 0; i < dev_info.max_hash_mac_addrs; i++) 5202 if (memcmp(addr, &dev->data->hash_mac_addrs[i], 5203 RTE_ETHER_ADDR_LEN) == 0) 5204 return i; 5205 5206 return -1; 5207 } 5208 5209 int 5210 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, 5211 uint8_t on) 5212 { 5213 int index; 5214 int ret; 5215 struct rte_eth_dev *dev; 5216 5217 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5218 dev = &rte_eth_devices[port_id]; 5219 5220 if (addr == NULL) { 5221 RTE_ETHDEV_LOG(ERR, 5222 "Cannot set ethdev port %u unicast hash table from NULL address\n", 5223 port_id); 5224 return -EINVAL; 5225 } 5226 5227 if (rte_is_zero_ether_addr(addr)) { 5228 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 5229 port_id); 5230 return -EINVAL; 5231 } 5232 5233 index = eth_dev_get_hash_mac_addr_index(port_id, addr); 5234 /* Check if it's already there, and do nothing */ 5235 if ((index >= 0) && on) 5236 return 0; 5237 5238 if (index < 0) { 5239 if (!on) { 5240 RTE_ETHDEV_LOG(ERR, 5241 "Port %u: the MAC address was not set in UTA\n", 5242 port_id); 5243 return -EINVAL; 5244 } 5245 5246 index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr); 5247 if (index < 0) { 5248 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 5249 port_id); 5250 return -ENOSPC; 5251 } 5252 } 5253 5254 if (*dev->dev_ops->uc_hash_table_set == NULL) 5255 return -ENOTSUP; 5256 ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on); 5257 if (ret == 0) { 5258 /* Update address in NIC data structure */ 5259 if (on) 5260 rte_ether_addr_copy(addr, 5261 &dev->data->hash_mac_addrs[index]); 5262 else 5263 rte_ether_addr_copy(&null_mac_addr, 5264 &dev->data->hash_mac_addrs[index]); 5265 } 5266 5267 ret = eth_err(port_id, ret); 5268 5269 rte_ethdev_trace_uc_hash_table_set(port_id, on, ret); 5270 5271 return ret; 5272 } 5273 5274 int 5275 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on) 5276 { 5277 struct rte_eth_dev *dev; 5278 int ret; 5279 5280 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5281 dev = &rte_eth_devices[port_id]; 5282 5283 if (*dev->dev_ops->uc_all_hash_table_set == NULL) 5284 return -ENOTSUP; 5285 ret = eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev, on)); 5286 5287 rte_ethdev_trace_uc_all_hash_table_set(port_id, on, ret); 5288 5289 return ret; 5290 } 5291 5292 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, 5293 uint32_t tx_rate) 5294 { 5295 struct rte_eth_dev *dev; 5296 struct rte_eth_dev_info dev_info; 5297 struct rte_eth_link link; 5298 int ret; 5299 5300 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5301 dev = &rte_eth_devices[port_id]; 5302 5303 ret = rte_eth_dev_info_get(port_id, &dev_info); 5304 if (ret != 0) 5305 return ret; 5306 5307 link = dev->data->dev_link; 5308 5309 if (queue_idx > dev_info.max_tx_queues) { 5310 RTE_ETHDEV_LOG(ERR, 5311 "Set queue rate limit:port %u: invalid queue ID=%u\n", 5312 port_id, queue_idx); 5313 return -EINVAL; 5314 } 5315 5316 if (tx_rate > link.link_speed) { 5317 RTE_ETHDEV_LOG(ERR, 5318 "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n", 5319 tx_rate, link.link_speed); 5320 return -EINVAL; 5321 } 5322 5323 if (*dev->dev_ops->set_queue_rate_limit == NULL) 5324 return -ENOTSUP; 5325 ret = eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev, 5326 queue_idx, tx_rate)); 5327 5328 rte_eth_trace_set_queue_rate_limit(port_id, queue_idx, tx_rate, ret); 5329 5330 return ret; 5331 } 5332 5333 int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id, 5334 uint8_t avail_thresh) 5335 { 5336 struct rte_eth_dev *dev; 5337 int ret; 5338 5339 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5340 dev = &rte_eth_devices[port_id]; 5341 5342 if (queue_id > dev->data->nb_rx_queues) { 5343 RTE_ETHDEV_LOG(ERR, 5344 "Set queue avail thresh: port %u: invalid queue ID=%u.\n", 5345 port_id, queue_id); 5346 return -EINVAL; 5347 } 5348 5349 if (avail_thresh > 99) { 5350 RTE_ETHDEV_LOG(ERR, 5351 "Set queue avail thresh: port %u: threshold should be <= 99.\n", 5352 port_id); 5353 return -EINVAL; 5354 } 5355 if (*dev->dev_ops->rx_queue_avail_thresh_set == NULL) 5356 return -ENOTSUP; 5357 ret = eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_set)(dev, 5358 queue_id, avail_thresh)); 5359 5360 rte_eth_trace_rx_avail_thresh_set(port_id, queue_id, avail_thresh, ret); 5361 5362 return ret; 5363 } 5364 5365 int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id, 5366 uint8_t *avail_thresh) 5367 { 5368 struct rte_eth_dev *dev; 5369 int ret; 5370 5371 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5372 dev = &rte_eth_devices[port_id]; 5373 5374 if (queue_id == NULL) 5375 return -EINVAL; 5376 if (*queue_id >= dev->data->nb_rx_queues) 5377 *queue_id = 0; 5378 5379 if (*dev->dev_ops->rx_queue_avail_thresh_query == NULL) 5380 return -ENOTSUP; 5381 ret = eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_query)(dev, 5382 queue_id, avail_thresh)); 5383 5384 rte_eth_trace_rx_avail_thresh_query(port_id, *queue_id, ret); 5385 5386 return ret; 5387 } 5388 5389 RTE_INIT(eth_dev_init_fp_ops) 5390 { 5391 uint32_t i; 5392 5393 for (i = 0; i != RTE_DIM(rte_eth_fp_ops); i++) 5394 eth_dev_fp_ops_reset(rte_eth_fp_ops + i); 5395 } 5396 5397 RTE_INIT(eth_dev_init_cb_lists) 5398 { 5399 uint16_t i; 5400 5401 for (i = 0; i < RTE_MAX_ETHPORTS; i++) 5402 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs); 5403 } 5404 5405 int 5406 rte_eth_dev_callback_register(uint16_t port_id, 5407 enum rte_eth_event_type event, 5408 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 5409 { 5410 struct rte_eth_dev *dev; 5411 struct rte_eth_dev_callback *user_cb; 5412 uint16_t next_port; 5413 uint16_t last_port; 5414 5415 if (cb_fn == NULL) { 5416 RTE_ETHDEV_LOG(ERR, 5417 "Cannot register ethdev port %u callback from NULL\n", 5418 port_id); 5419 return -EINVAL; 5420 } 5421 5422 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 5423 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 5424 return -EINVAL; 5425 } 5426 5427 if (port_id == RTE_ETH_ALL) { 5428 next_port = 0; 5429 last_port = RTE_MAX_ETHPORTS - 1; 5430 } else { 5431 next_port = last_port = port_id; 5432 } 5433 5434 rte_spinlock_lock(ð_dev_cb_lock); 5435 5436 do { 5437 dev = &rte_eth_devices[next_port]; 5438 5439 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) { 5440 if (user_cb->cb_fn == cb_fn && 5441 user_cb->cb_arg == cb_arg && 5442 user_cb->event == event) { 5443 break; 5444 } 5445 } 5446 5447 /* create a new callback. */ 5448 if (user_cb == NULL) { 5449 user_cb = rte_zmalloc("INTR_USER_CALLBACK", 5450 sizeof(struct rte_eth_dev_callback), 0); 5451 if (user_cb != NULL) { 5452 user_cb->cb_fn = cb_fn; 5453 user_cb->cb_arg = cb_arg; 5454 user_cb->event = event; 5455 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), 5456 user_cb, next); 5457 } else { 5458 rte_spinlock_unlock(ð_dev_cb_lock); 5459 rte_eth_dev_callback_unregister(port_id, event, 5460 cb_fn, cb_arg); 5461 return -ENOMEM; 5462 } 5463 5464 } 5465 } while (++next_port <= last_port); 5466 5467 rte_spinlock_unlock(ð_dev_cb_lock); 5468 5469 rte_ethdev_trace_callback_register(port_id, event, cb_fn, cb_arg); 5470 5471 return 0; 5472 } 5473 5474 int 5475 rte_eth_dev_callback_unregister(uint16_t port_id, 5476 enum rte_eth_event_type event, 5477 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 5478 { 5479 int ret; 5480 struct rte_eth_dev *dev; 5481 struct rte_eth_dev_callback *cb, *next; 5482 uint16_t next_port; 5483 uint16_t last_port; 5484 5485 if (cb_fn == NULL) { 5486 RTE_ETHDEV_LOG(ERR, 5487 "Cannot unregister ethdev port %u callback from NULL\n", 5488 port_id); 5489 return -EINVAL; 5490 } 5491 5492 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 5493 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 5494 return -EINVAL; 5495 } 5496 5497 if (port_id == RTE_ETH_ALL) { 5498 next_port = 0; 5499 last_port = RTE_MAX_ETHPORTS - 1; 5500 } else { 5501 next_port = last_port = port_id; 5502 } 5503 5504 rte_spinlock_lock(ð_dev_cb_lock); 5505 5506 do { 5507 dev = &rte_eth_devices[next_port]; 5508 ret = 0; 5509 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; 5510 cb = next) { 5511 5512 next = TAILQ_NEXT(cb, next); 5513 5514 if (cb->cb_fn != cb_fn || cb->event != event || 5515 (cb_arg != (void *)-1 && cb->cb_arg != cb_arg)) 5516 continue; 5517 5518 /* 5519 * if this callback is not executing right now, 5520 * then remove it. 5521 */ 5522 if (cb->active == 0) { 5523 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next); 5524 rte_free(cb); 5525 } else { 5526 ret = -EAGAIN; 5527 } 5528 } 5529 } while (++next_port <= last_port); 5530 5531 rte_spinlock_unlock(ð_dev_cb_lock); 5532 5533 rte_ethdev_trace_callback_unregister(port_id, event, cb_fn, cb_arg, 5534 ret); 5535 5536 return ret; 5537 } 5538 5539 int 5540 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data) 5541 { 5542 uint32_t vec; 5543 struct rte_eth_dev *dev; 5544 struct rte_intr_handle *intr_handle; 5545 uint16_t qid; 5546 int rc; 5547 5548 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5549 dev = &rte_eth_devices[port_id]; 5550 5551 if (!dev->intr_handle) { 5552 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 5553 return -ENOTSUP; 5554 } 5555 5556 intr_handle = dev->intr_handle; 5557 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 5558 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 5559 return -EPERM; 5560 } 5561 5562 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) { 5563 vec = rte_intr_vec_list_index_get(intr_handle, qid); 5564 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 5565 5566 rte_ethdev_trace_rx_intr_ctl(port_id, qid, epfd, op, data, rc); 5567 5568 if (rc && rc != -EEXIST) { 5569 RTE_ETHDEV_LOG(ERR, 5570 "p %u q %u Rx ctl error op %d epfd %d vec %u\n", 5571 port_id, qid, op, epfd, vec); 5572 } 5573 } 5574 5575 return 0; 5576 } 5577 5578 int 5579 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id) 5580 { 5581 struct rte_intr_handle *intr_handle; 5582 struct rte_eth_dev *dev; 5583 unsigned int efd_idx; 5584 uint32_t vec; 5585 int fd; 5586 5587 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 5588 dev = &rte_eth_devices[port_id]; 5589 5590 if (queue_id >= dev->data->nb_rx_queues) { 5591 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5592 return -1; 5593 } 5594 5595 if (!dev->intr_handle) { 5596 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 5597 return -1; 5598 } 5599 5600 intr_handle = dev->intr_handle; 5601 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 5602 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 5603 return -1; 5604 } 5605 5606 vec = rte_intr_vec_list_index_get(intr_handle, queue_id); 5607 efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ? 5608 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec; 5609 fd = rte_intr_efds_index_get(intr_handle, efd_idx); 5610 5611 rte_ethdev_trace_rx_intr_ctl_q_get_fd(port_id, queue_id, fd); 5612 5613 return fd; 5614 } 5615 5616 int 5617 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, 5618 int epfd, int op, void *data) 5619 { 5620 uint32_t vec; 5621 struct rte_eth_dev *dev; 5622 struct rte_intr_handle *intr_handle; 5623 int rc; 5624 5625 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5626 dev = &rte_eth_devices[port_id]; 5627 5628 if (queue_id >= dev->data->nb_rx_queues) { 5629 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5630 return -EINVAL; 5631 } 5632 5633 if (!dev->intr_handle) { 5634 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 5635 return -ENOTSUP; 5636 } 5637 5638 intr_handle = dev->intr_handle; 5639 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 5640 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 5641 return -EPERM; 5642 } 5643 5644 vec = rte_intr_vec_list_index_get(intr_handle, queue_id); 5645 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 5646 5647 rte_ethdev_trace_rx_intr_ctl_q(port_id, queue_id, epfd, op, data, rc); 5648 5649 if (rc && rc != -EEXIST) { 5650 RTE_ETHDEV_LOG(ERR, 5651 "p %u q %u Rx ctl error op %d epfd %d vec %u\n", 5652 port_id, queue_id, op, epfd, vec); 5653 return rc; 5654 } 5655 5656 return 0; 5657 } 5658 5659 int 5660 rte_eth_dev_rx_intr_enable(uint16_t port_id, 5661 uint16_t queue_id) 5662 { 5663 struct rte_eth_dev *dev; 5664 int ret; 5665 5666 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5667 dev = &rte_eth_devices[port_id]; 5668 5669 ret = eth_dev_validate_rx_queue(dev, queue_id); 5670 if (ret != 0) 5671 return ret; 5672 5673 if (*dev->dev_ops->rx_queue_intr_enable == NULL) 5674 return -ENOTSUP; 5675 ret = eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id)); 5676 5677 rte_ethdev_trace_rx_intr_enable(port_id, queue_id, ret); 5678 5679 return ret; 5680 } 5681 5682 int 5683 rte_eth_dev_rx_intr_disable(uint16_t port_id, 5684 uint16_t queue_id) 5685 { 5686 struct rte_eth_dev *dev; 5687 int ret; 5688 5689 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5690 dev = &rte_eth_devices[port_id]; 5691 5692 ret = eth_dev_validate_rx_queue(dev, queue_id); 5693 if (ret != 0) 5694 return ret; 5695 5696 if (*dev->dev_ops->rx_queue_intr_disable == NULL) 5697 return -ENOTSUP; 5698 ret = eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id)); 5699 5700 rte_ethdev_trace_rx_intr_disable(port_id, queue_id, ret); 5701 5702 return ret; 5703 } 5704 5705 5706 const struct rte_eth_rxtx_callback * 5707 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, 5708 rte_rx_callback_fn fn, void *user_param) 5709 { 5710 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5711 rte_errno = ENOTSUP; 5712 return NULL; 5713 #endif 5714 struct rte_eth_dev *dev; 5715 5716 /* check input parameters */ 5717 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5718 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 5719 rte_errno = EINVAL; 5720 return NULL; 5721 } 5722 dev = &rte_eth_devices[port_id]; 5723 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 5724 rte_errno = EINVAL; 5725 return NULL; 5726 } 5727 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5728 5729 if (cb == NULL) { 5730 rte_errno = ENOMEM; 5731 return NULL; 5732 } 5733 5734 cb->fn.rx = fn; 5735 cb->param = user_param; 5736 5737 rte_spinlock_lock(ð_dev_rx_cb_lock); 5738 /* Add the callbacks in fifo order. */ 5739 struct rte_eth_rxtx_callback *tail = 5740 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 5741 5742 if (!tail) { 5743 /* Stores to cb->fn and cb->param should complete before 5744 * cb is visible to data plane. 5745 */ 5746 rte_atomic_store_explicit( 5747 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 5748 cb, rte_memory_order_release); 5749 5750 } else { 5751 while (tail->next) 5752 tail = tail->next; 5753 /* Stores to cb->fn and cb->param should complete before 5754 * cb is visible to data plane. 5755 */ 5756 rte_atomic_store_explicit(&tail->next, cb, rte_memory_order_release); 5757 } 5758 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5759 5760 rte_eth_trace_add_rx_callback(port_id, queue_id, fn, user_param, cb); 5761 5762 return cb; 5763 } 5764 5765 const struct rte_eth_rxtx_callback * 5766 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, 5767 rte_rx_callback_fn fn, void *user_param) 5768 { 5769 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5770 rte_errno = ENOTSUP; 5771 return NULL; 5772 #endif 5773 /* check input parameters */ 5774 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5775 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 5776 rte_errno = EINVAL; 5777 return NULL; 5778 } 5779 5780 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5781 5782 if (cb == NULL) { 5783 rte_errno = ENOMEM; 5784 return NULL; 5785 } 5786 5787 cb->fn.rx = fn; 5788 cb->param = user_param; 5789 5790 rte_spinlock_lock(ð_dev_rx_cb_lock); 5791 /* Add the callbacks at first position */ 5792 cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 5793 /* Stores to cb->fn, cb->param and cb->next should complete before 5794 * cb is visible to data plane threads. 5795 */ 5796 rte_atomic_store_explicit( 5797 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 5798 cb, rte_memory_order_release); 5799 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5800 5801 rte_eth_trace_add_first_rx_callback(port_id, queue_id, fn, user_param, 5802 cb); 5803 5804 return cb; 5805 } 5806 5807 const struct rte_eth_rxtx_callback * 5808 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, 5809 rte_tx_callback_fn fn, void *user_param) 5810 { 5811 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5812 rte_errno = ENOTSUP; 5813 return NULL; 5814 #endif 5815 struct rte_eth_dev *dev; 5816 5817 /* check input parameters */ 5818 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5819 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) { 5820 rte_errno = EINVAL; 5821 return NULL; 5822 } 5823 5824 dev = &rte_eth_devices[port_id]; 5825 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 5826 rte_errno = EINVAL; 5827 return NULL; 5828 } 5829 5830 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5831 5832 if (cb == NULL) { 5833 rte_errno = ENOMEM; 5834 return NULL; 5835 } 5836 5837 cb->fn.tx = fn; 5838 cb->param = user_param; 5839 5840 rte_spinlock_lock(ð_dev_tx_cb_lock); 5841 /* Add the callbacks in fifo order. */ 5842 struct rte_eth_rxtx_callback *tail = 5843 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id]; 5844 5845 if (!tail) { 5846 /* Stores to cb->fn and cb->param should complete before 5847 * cb is visible to data plane. 5848 */ 5849 rte_atomic_store_explicit( 5850 &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id], 5851 cb, rte_memory_order_release); 5852 5853 } else { 5854 while (tail->next) 5855 tail = tail->next; 5856 /* Stores to cb->fn and cb->param should complete before 5857 * cb is visible to data plane. 5858 */ 5859 rte_atomic_store_explicit(&tail->next, cb, rte_memory_order_release); 5860 } 5861 rte_spinlock_unlock(ð_dev_tx_cb_lock); 5862 5863 rte_eth_trace_add_tx_callback(port_id, queue_id, fn, user_param, cb); 5864 5865 return cb; 5866 } 5867 5868 int 5869 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, 5870 const struct rte_eth_rxtx_callback *user_cb) 5871 { 5872 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5873 return -ENOTSUP; 5874 #endif 5875 /* Check input parameters. */ 5876 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5877 if (user_cb == NULL || 5878 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) 5879 return -EINVAL; 5880 5881 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5882 struct rte_eth_rxtx_callback *cb; 5883 RTE_ATOMIC(struct rte_eth_rxtx_callback *) *prev_cb; 5884 int ret = -EINVAL; 5885 5886 rte_spinlock_lock(ð_dev_rx_cb_lock); 5887 prev_cb = &dev->post_rx_burst_cbs[queue_id]; 5888 for (; *prev_cb != NULL; prev_cb = &cb->next) { 5889 cb = *prev_cb; 5890 if (cb == user_cb) { 5891 /* Remove the user cb from the callback list. */ 5892 rte_atomic_store_explicit(prev_cb, cb->next, rte_memory_order_relaxed); 5893 ret = 0; 5894 break; 5895 } 5896 } 5897 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5898 5899 rte_eth_trace_remove_rx_callback(port_id, queue_id, user_cb, ret); 5900 5901 return ret; 5902 } 5903 5904 int 5905 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, 5906 const struct rte_eth_rxtx_callback *user_cb) 5907 { 5908 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5909 return -ENOTSUP; 5910 #endif 5911 /* Check input parameters. */ 5912 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5913 if (user_cb == NULL || 5914 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) 5915 return -EINVAL; 5916 5917 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5918 int ret = -EINVAL; 5919 struct rte_eth_rxtx_callback *cb; 5920 RTE_ATOMIC(struct rte_eth_rxtx_callback *) *prev_cb; 5921 5922 rte_spinlock_lock(ð_dev_tx_cb_lock); 5923 prev_cb = &dev->pre_tx_burst_cbs[queue_id]; 5924 for (; *prev_cb != NULL; prev_cb = &cb->next) { 5925 cb = *prev_cb; 5926 if (cb == user_cb) { 5927 /* Remove the user cb from the callback list. */ 5928 rte_atomic_store_explicit(prev_cb, cb->next, rte_memory_order_relaxed); 5929 ret = 0; 5930 break; 5931 } 5932 } 5933 rte_spinlock_unlock(ð_dev_tx_cb_lock); 5934 5935 rte_eth_trace_remove_tx_callback(port_id, queue_id, user_cb, ret); 5936 5937 return ret; 5938 } 5939 5940 int 5941 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5942 struct rte_eth_rxq_info *qinfo) 5943 { 5944 struct rte_eth_dev *dev; 5945 5946 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5947 dev = &rte_eth_devices[port_id]; 5948 5949 if (queue_id >= dev->data->nb_rx_queues) { 5950 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5951 return -EINVAL; 5952 } 5953 5954 if (qinfo == NULL) { 5955 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL\n", 5956 port_id, queue_id); 5957 return -EINVAL; 5958 } 5959 5960 if (dev->data->rx_queues == NULL || 5961 dev->data->rx_queues[queue_id] == NULL) { 5962 RTE_ETHDEV_LOG(ERR, 5963 "Rx queue %"PRIu16" of device with port_id=%" 5964 PRIu16" has not been setup\n", 5965 queue_id, port_id); 5966 return -EINVAL; 5967 } 5968 5969 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 5970 RTE_ETHDEV_LOG(INFO, 5971 "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5972 queue_id, port_id); 5973 return -EINVAL; 5974 } 5975 5976 if (*dev->dev_ops->rxq_info_get == NULL) 5977 return -ENOTSUP; 5978 5979 memset(qinfo, 0, sizeof(*qinfo)); 5980 dev->dev_ops->rxq_info_get(dev, queue_id, qinfo); 5981 qinfo->queue_state = dev->data->rx_queue_state[queue_id]; 5982 5983 rte_eth_trace_rx_queue_info_get(port_id, queue_id, qinfo); 5984 5985 return 0; 5986 } 5987 5988 int 5989 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5990 struct rte_eth_txq_info *qinfo) 5991 { 5992 struct rte_eth_dev *dev; 5993 5994 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5995 dev = &rte_eth_devices[port_id]; 5996 5997 if (queue_id >= dev->data->nb_tx_queues) { 5998 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 5999 return -EINVAL; 6000 } 6001 6002 if (qinfo == NULL) { 6003 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL\n", 6004 port_id, queue_id); 6005 return -EINVAL; 6006 } 6007 6008 if (dev->data->tx_queues == NULL || 6009 dev->data->tx_queues[queue_id] == NULL) { 6010 RTE_ETHDEV_LOG(ERR, 6011 "Tx queue %"PRIu16" of device with port_id=%" 6012 PRIu16" has not been setup\n", 6013 queue_id, port_id); 6014 return -EINVAL; 6015 } 6016 6017 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 6018 RTE_ETHDEV_LOG(INFO, 6019 "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 6020 queue_id, port_id); 6021 return -EINVAL; 6022 } 6023 6024 if (*dev->dev_ops->txq_info_get == NULL) 6025 return -ENOTSUP; 6026 6027 memset(qinfo, 0, sizeof(*qinfo)); 6028 dev->dev_ops->txq_info_get(dev, queue_id, qinfo); 6029 qinfo->queue_state = dev->data->tx_queue_state[queue_id]; 6030 6031 rte_eth_trace_tx_queue_info_get(port_id, queue_id, qinfo); 6032 6033 return 0; 6034 } 6035 6036 int 6037 rte_eth_recycle_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, 6038 struct rte_eth_recycle_rxq_info *recycle_rxq_info) 6039 { 6040 struct rte_eth_dev *dev; 6041 int ret; 6042 6043 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6044 dev = &rte_eth_devices[port_id]; 6045 6046 ret = eth_dev_validate_rx_queue(dev, queue_id); 6047 if (unlikely(ret != 0)) 6048 return ret; 6049 6050 if (*dev->dev_ops->recycle_rxq_info_get == NULL) 6051 return -ENOTSUP; 6052 6053 dev->dev_ops->recycle_rxq_info_get(dev, queue_id, recycle_rxq_info); 6054 6055 return 0; 6056 } 6057 6058 int 6059 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 6060 struct rte_eth_burst_mode *mode) 6061 { 6062 struct rte_eth_dev *dev; 6063 int ret; 6064 6065 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6066 dev = &rte_eth_devices[port_id]; 6067 6068 if (queue_id >= dev->data->nb_rx_queues) { 6069 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 6070 return -EINVAL; 6071 } 6072 6073 if (mode == NULL) { 6074 RTE_ETHDEV_LOG(ERR, 6075 "Cannot get ethdev port %u Rx queue %u burst mode to NULL\n", 6076 port_id, queue_id); 6077 return -EINVAL; 6078 } 6079 6080 if (*dev->dev_ops->rx_burst_mode_get == NULL) 6081 return -ENOTSUP; 6082 memset(mode, 0, sizeof(*mode)); 6083 ret = eth_err(port_id, 6084 dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode)); 6085 6086 rte_eth_trace_rx_burst_mode_get(port_id, queue_id, mode, ret); 6087 6088 return ret; 6089 } 6090 6091 int 6092 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 6093 struct rte_eth_burst_mode *mode) 6094 { 6095 struct rte_eth_dev *dev; 6096 int ret; 6097 6098 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6099 dev = &rte_eth_devices[port_id]; 6100 6101 if (queue_id >= dev->data->nb_tx_queues) { 6102 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 6103 return -EINVAL; 6104 } 6105 6106 if (mode == NULL) { 6107 RTE_ETHDEV_LOG(ERR, 6108 "Cannot get ethdev port %u Tx queue %u burst mode to NULL\n", 6109 port_id, queue_id); 6110 return -EINVAL; 6111 } 6112 6113 if (*dev->dev_ops->tx_burst_mode_get == NULL) 6114 return -ENOTSUP; 6115 memset(mode, 0, sizeof(*mode)); 6116 ret = eth_err(port_id, 6117 dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode)); 6118 6119 rte_eth_trace_tx_burst_mode_get(port_id, queue_id, mode, ret); 6120 6121 return ret; 6122 } 6123 6124 int 6125 rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id, 6126 struct rte_power_monitor_cond *pmc) 6127 { 6128 struct rte_eth_dev *dev; 6129 int ret; 6130 6131 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6132 dev = &rte_eth_devices[port_id]; 6133 6134 if (queue_id >= dev->data->nb_rx_queues) { 6135 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 6136 return -EINVAL; 6137 } 6138 6139 if (pmc == NULL) { 6140 RTE_ETHDEV_LOG(ERR, 6141 "Cannot get ethdev port %u Rx queue %u power monitor condition to NULL\n", 6142 port_id, queue_id); 6143 return -EINVAL; 6144 } 6145 6146 if (*dev->dev_ops->get_monitor_addr == NULL) 6147 return -ENOTSUP; 6148 ret = eth_err(port_id, 6149 dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc)); 6150 6151 rte_eth_trace_get_monitor_addr(port_id, queue_id, pmc, ret); 6152 6153 return ret; 6154 } 6155 6156 int 6157 rte_eth_dev_set_mc_addr_list(uint16_t port_id, 6158 struct rte_ether_addr *mc_addr_set, 6159 uint32_t nb_mc_addr) 6160 { 6161 struct rte_eth_dev *dev; 6162 int ret; 6163 6164 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6165 dev = &rte_eth_devices[port_id]; 6166 6167 if (*dev->dev_ops->set_mc_addr_list == NULL) 6168 return -ENOTSUP; 6169 ret = eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev, 6170 mc_addr_set, nb_mc_addr)); 6171 6172 rte_ethdev_trace_set_mc_addr_list(port_id, mc_addr_set, nb_mc_addr, 6173 ret); 6174 6175 return ret; 6176 } 6177 6178 int 6179 rte_eth_timesync_enable(uint16_t port_id) 6180 { 6181 struct rte_eth_dev *dev; 6182 int ret; 6183 6184 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6185 dev = &rte_eth_devices[port_id]; 6186 6187 if (*dev->dev_ops->timesync_enable == NULL) 6188 return -ENOTSUP; 6189 ret = eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev)); 6190 6191 rte_eth_trace_timesync_enable(port_id, ret); 6192 6193 return ret; 6194 } 6195 6196 int 6197 rte_eth_timesync_disable(uint16_t port_id) 6198 { 6199 struct rte_eth_dev *dev; 6200 int ret; 6201 6202 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6203 dev = &rte_eth_devices[port_id]; 6204 6205 if (*dev->dev_ops->timesync_disable == NULL) 6206 return -ENOTSUP; 6207 ret = eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev)); 6208 6209 rte_eth_trace_timesync_disable(port_id, ret); 6210 6211 return ret; 6212 } 6213 6214 int 6215 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, 6216 uint32_t flags) 6217 { 6218 struct rte_eth_dev *dev; 6219 int ret; 6220 6221 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6222 dev = &rte_eth_devices[port_id]; 6223 6224 if (timestamp == NULL) { 6225 RTE_ETHDEV_LOG(ERR, 6226 "Cannot read ethdev port %u Rx timestamp to NULL\n", 6227 port_id); 6228 return -EINVAL; 6229 } 6230 6231 if (*dev->dev_ops->timesync_read_rx_timestamp == NULL) 6232 return -ENOTSUP; 6233 6234 ret = eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp) 6235 (dev, timestamp, flags)); 6236 6237 rte_eth_trace_timesync_read_rx_timestamp(port_id, timestamp, flags, 6238 ret); 6239 6240 return ret; 6241 } 6242 6243 int 6244 rte_eth_timesync_read_tx_timestamp(uint16_t port_id, 6245 struct timespec *timestamp) 6246 { 6247 struct rte_eth_dev *dev; 6248 int ret; 6249 6250 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6251 dev = &rte_eth_devices[port_id]; 6252 6253 if (timestamp == NULL) { 6254 RTE_ETHDEV_LOG(ERR, 6255 "Cannot read ethdev port %u Tx timestamp to NULL\n", 6256 port_id); 6257 return -EINVAL; 6258 } 6259 6260 if (*dev->dev_ops->timesync_read_tx_timestamp == NULL) 6261 return -ENOTSUP; 6262 6263 ret = eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp) 6264 (dev, timestamp)); 6265 6266 rte_eth_trace_timesync_read_tx_timestamp(port_id, timestamp, ret); 6267 6268 return ret; 6269 6270 } 6271 6272 int 6273 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta) 6274 { 6275 struct rte_eth_dev *dev; 6276 int ret; 6277 6278 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6279 dev = &rte_eth_devices[port_id]; 6280 6281 if (*dev->dev_ops->timesync_adjust_time == NULL) 6282 return -ENOTSUP; 6283 ret = eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta)); 6284 6285 rte_eth_trace_timesync_adjust_time(port_id, delta, ret); 6286 6287 return ret; 6288 } 6289 6290 int 6291 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp) 6292 { 6293 struct rte_eth_dev *dev; 6294 int ret; 6295 6296 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6297 dev = &rte_eth_devices[port_id]; 6298 6299 if (timestamp == NULL) { 6300 RTE_ETHDEV_LOG(ERR, 6301 "Cannot read ethdev port %u timesync time to NULL\n", 6302 port_id); 6303 return -EINVAL; 6304 } 6305 6306 if (*dev->dev_ops->timesync_read_time == NULL) 6307 return -ENOTSUP; 6308 ret = eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev, 6309 timestamp)); 6310 6311 rte_eth_trace_timesync_read_time(port_id, timestamp, ret); 6312 6313 return ret; 6314 } 6315 6316 int 6317 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp) 6318 { 6319 struct rte_eth_dev *dev; 6320 int ret; 6321 6322 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6323 dev = &rte_eth_devices[port_id]; 6324 6325 if (timestamp == NULL) { 6326 RTE_ETHDEV_LOG(ERR, 6327 "Cannot write ethdev port %u timesync from NULL time\n", 6328 port_id); 6329 return -EINVAL; 6330 } 6331 6332 if (*dev->dev_ops->timesync_write_time == NULL) 6333 return -ENOTSUP; 6334 ret = eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev, 6335 timestamp)); 6336 6337 rte_eth_trace_timesync_write_time(port_id, timestamp, ret); 6338 6339 return ret; 6340 } 6341 6342 int 6343 rte_eth_read_clock(uint16_t port_id, uint64_t *clock) 6344 { 6345 struct rte_eth_dev *dev; 6346 int ret; 6347 6348 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6349 dev = &rte_eth_devices[port_id]; 6350 6351 if (clock == NULL) { 6352 RTE_ETHDEV_LOG(ERR, "Cannot read ethdev port %u clock to NULL\n", 6353 port_id); 6354 return -EINVAL; 6355 } 6356 6357 if (*dev->dev_ops->read_clock == NULL) 6358 return -ENOTSUP; 6359 ret = eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock)); 6360 6361 rte_eth_trace_read_clock(port_id, clock, ret); 6362 6363 return ret; 6364 } 6365 6366 int 6367 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info) 6368 { 6369 struct rte_eth_dev *dev; 6370 int ret; 6371 6372 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6373 dev = &rte_eth_devices[port_id]; 6374 6375 if (info == NULL) { 6376 RTE_ETHDEV_LOG(ERR, 6377 "Cannot get ethdev port %u register info to NULL\n", 6378 port_id); 6379 return -EINVAL; 6380 } 6381 6382 if (*dev->dev_ops->get_reg == NULL) 6383 return -ENOTSUP; 6384 ret = eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info)); 6385 6386 rte_ethdev_trace_get_reg_info(port_id, info, ret); 6387 6388 return ret; 6389 } 6390 6391 int 6392 rte_eth_dev_get_eeprom_length(uint16_t port_id) 6393 { 6394 struct rte_eth_dev *dev; 6395 int ret; 6396 6397 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6398 dev = &rte_eth_devices[port_id]; 6399 6400 if (*dev->dev_ops->get_eeprom_length == NULL) 6401 return -ENOTSUP; 6402 ret = eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev)); 6403 6404 rte_ethdev_trace_get_eeprom_length(port_id, ret); 6405 6406 return ret; 6407 } 6408 6409 int 6410 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 6411 { 6412 struct rte_eth_dev *dev; 6413 int ret; 6414 6415 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6416 dev = &rte_eth_devices[port_id]; 6417 6418 if (info == NULL) { 6419 RTE_ETHDEV_LOG(ERR, 6420 "Cannot get ethdev port %u EEPROM info to NULL\n", 6421 port_id); 6422 return -EINVAL; 6423 } 6424 6425 if (*dev->dev_ops->get_eeprom == NULL) 6426 return -ENOTSUP; 6427 ret = eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info)); 6428 6429 rte_ethdev_trace_get_eeprom(port_id, info, ret); 6430 6431 return ret; 6432 } 6433 6434 int 6435 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 6436 { 6437 struct rte_eth_dev *dev; 6438 int ret; 6439 6440 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6441 dev = &rte_eth_devices[port_id]; 6442 6443 if (info == NULL) { 6444 RTE_ETHDEV_LOG(ERR, 6445 "Cannot set ethdev port %u EEPROM from NULL info\n", 6446 port_id); 6447 return -EINVAL; 6448 } 6449 6450 if (*dev->dev_ops->set_eeprom == NULL) 6451 return -ENOTSUP; 6452 ret = eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info)); 6453 6454 rte_ethdev_trace_set_eeprom(port_id, info, ret); 6455 6456 return ret; 6457 } 6458 6459 int 6460 rte_eth_dev_get_module_info(uint16_t port_id, 6461 struct rte_eth_dev_module_info *modinfo) 6462 { 6463 struct rte_eth_dev *dev; 6464 int ret; 6465 6466 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6467 dev = &rte_eth_devices[port_id]; 6468 6469 if (modinfo == NULL) { 6470 RTE_ETHDEV_LOG(ERR, 6471 "Cannot get ethdev port %u EEPROM module info to NULL\n", 6472 port_id); 6473 return -EINVAL; 6474 } 6475 6476 if (*dev->dev_ops->get_module_info == NULL) 6477 return -ENOTSUP; 6478 ret = (*dev->dev_ops->get_module_info)(dev, modinfo); 6479 6480 rte_ethdev_trace_get_module_info(port_id, modinfo, ret); 6481 6482 return ret; 6483 } 6484 6485 int 6486 rte_eth_dev_get_module_eeprom(uint16_t port_id, 6487 struct rte_dev_eeprom_info *info) 6488 { 6489 struct rte_eth_dev *dev; 6490 int ret; 6491 6492 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6493 dev = &rte_eth_devices[port_id]; 6494 6495 if (info == NULL) { 6496 RTE_ETHDEV_LOG(ERR, 6497 "Cannot get ethdev port %u module EEPROM info to NULL\n", 6498 port_id); 6499 return -EINVAL; 6500 } 6501 6502 if (info->data == NULL) { 6503 RTE_ETHDEV_LOG(ERR, 6504 "Cannot get ethdev port %u module EEPROM data to NULL\n", 6505 port_id); 6506 return -EINVAL; 6507 } 6508 6509 if (info->length == 0) { 6510 RTE_ETHDEV_LOG(ERR, 6511 "Cannot get ethdev port %u module EEPROM to data with zero size\n", 6512 port_id); 6513 return -EINVAL; 6514 } 6515 6516 if (*dev->dev_ops->get_module_eeprom == NULL) 6517 return -ENOTSUP; 6518 ret = (*dev->dev_ops->get_module_eeprom)(dev, info); 6519 6520 rte_ethdev_trace_get_module_eeprom(port_id, info, ret); 6521 6522 return ret; 6523 } 6524 6525 int 6526 rte_eth_dev_get_dcb_info(uint16_t port_id, 6527 struct rte_eth_dcb_info *dcb_info) 6528 { 6529 struct rte_eth_dev *dev; 6530 int ret; 6531 6532 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6533 dev = &rte_eth_devices[port_id]; 6534 6535 if (dcb_info == NULL) { 6536 RTE_ETHDEV_LOG(ERR, 6537 "Cannot get ethdev port %u DCB info to NULL\n", 6538 port_id); 6539 return -EINVAL; 6540 } 6541 6542 memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info)); 6543 6544 if (*dev->dev_ops->get_dcb_info == NULL) 6545 return -ENOTSUP; 6546 ret = eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info)); 6547 6548 rte_ethdev_trace_get_dcb_info(port_id, dcb_info, ret); 6549 6550 return ret; 6551 } 6552 6553 static void 6554 eth_dev_adjust_nb_desc(uint16_t *nb_desc, 6555 const struct rte_eth_desc_lim *desc_lim) 6556 { 6557 if (desc_lim->nb_align != 0) 6558 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align); 6559 6560 if (desc_lim->nb_max != 0) 6561 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max); 6562 6563 *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min); 6564 } 6565 6566 int 6567 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, 6568 uint16_t *nb_rx_desc, 6569 uint16_t *nb_tx_desc) 6570 { 6571 struct rte_eth_dev_info dev_info; 6572 int ret; 6573 6574 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6575 6576 ret = rte_eth_dev_info_get(port_id, &dev_info); 6577 if (ret != 0) 6578 return ret; 6579 6580 if (nb_rx_desc != NULL) 6581 eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim); 6582 6583 if (nb_tx_desc != NULL) 6584 eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim); 6585 6586 rte_ethdev_trace_adjust_nb_rx_tx_desc(port_id); 6587 6588 return 0; 6589 } 6590 6591 int 6592 rte_eth_dev_hairpin_capability_get(uint16_t port_id, 6593 struct rte_eth_hairpin_cap *cap) 6594 { 6595 struct rte_eth_dev *dev; 6596 int ret; 6597 6598 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6599 dev = &rte_eth_devices[port_id]; 6600 6601 if (cap == NULL) { 6602 RTE_ETHDEV_LOG(ERR, 6603 "Cannot get ethdev port %u hairpin capability to NULL\n", 6604 port_id); 6605 return -EINVAL; 6606 } 6607 6608 if (*dev->dev_ops->hairpin_cap_get == NULL) 6609 return -ENOTSUP; 6610 memset(cap, 0, sizeof(*cap)); 6611 ret = eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap)); 6612 6613 rte_ethdev_trace_hairpin_capability_get(port_id, cap, ret); 6614 6615 return ret; 6616 } 6617 6618 int 6619 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool) 6620 { 6621 struct rte_eth_dev *dev; 6622 int ret; 6623 6624 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6625 dev = &rte_eth_devices[port_id]; 6626 6627 if (pool == NULL) { 6628 RTE_ETHDEV_LOG(ERR, 6629 "Cannot test ethdev port %u mempool operation from NULL pool\n", 6630 port_id); 6631 return -EINVAL; 6632 } 6633 6634 if (*dev->dev_ops->pool_ops_supported == NULL) 6635 return 1; /* all pools are supported */ 6636 6637 ret = (*dev->dev_ops->pool_ops_supported)(dev, pool); 6638 6639 rte_ethdev_trace_pool_ops_supported(port_id, pool, ret); 6640 6641 return ret; 6642 } 6643 6644 int 6645 rte_eth_representor_info_get(uint16_t port_id, 6646 struct rte_eth_representor_info *info) 6647 { 6648 struct rte_eth_dev *dev; 6649 int ret; 6650 6651 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6652 dev = &rte_eth_devices[port_id]; 6653 6654 if (*dev->dev_ops->representor_info_get == NULL) 6655 return -ENOTSUP; 6656 ret = eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info)); 6657 6658 rte_eth_trace_representor_info_get(port_id, info, ret); 6659 6660 return ret; 6661 } 6662 6663 int 6664 rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features) 6665 { 6666 struct rte_eth_dev *dev; 6667 int ret; 6668 6669 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6670 dev = &rte_eth_devices[port_id]; 6671 6672 if (dev->data->dev_configured != 0) { 6673 RTE_ETHDEV_LOG(ERR, 6674 "The port (ID=%"PRIu16") is already configured\n", 6675 port_id); 6676 return -EBUSY; 6677 } 6678 6679 if (features == NULL) { 6680 RTE_ETHDEV_LOG(ERR, "Invalid features (NULL)\n"); 6681 return -EINVAL; 6682 } 6683 6684 if ((*features & RTE_ETH_RX_METADATA_TUNNEL_ID) != 0 && 6685 rte_flow_restore_info_dynflag_register() < 0) 6686 *features &= ~RTE_ETH_RX_METADATA_TUNNEL_ID; 6687 6688 if (*dev->dev_ops->rx_metadata_negotiate == NULL) 6689 return -ENOTSUP; 6690 ret = eth_err(port_id, 6691 (*dev->dev_ops->rx_metadata_negotiate)(dev, features)); 6692 6693 rte_eth_trace_rx_metadata_negotiate(port_id, *features, ret); 6694 6695 return ret; 6696 } 6697 6698 int 6699 rte_eth_ip_reassembly_capability_get(uint16_t port_id, 6700 struct rte_eth_ip_reassembly_params *reassembly_capa) 6701 { 6702 struct rte_eth_dev *dev; 6703 int ret; 6704 6705 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6706 dev = &rte_eth_devices[port_id]; 6707 6708 if (dev->data->dev_configured == 0) { 6709 RTE_ETHDEV_LOG(ERR, 6710 "Device with port_id=%u is not configured.\n" 6711 "Cannot get IP reassembly capability\n", 6712 port_id); 6713 return -EINVAL; 6714 } 6715 6716 if (reassembly_capa == NULL) { 6717 RTE_ETHDEV_LOG(ERR, "Cannot get reassembly capability to NULL"); 6718 return -EINVAL; 6719 } 6720 6721 if (*dev->dev_ops->ip_reassembly_capability_get == NULL) 6722 return -ENOTSUP; 6723 memset(reassembly_capa, 0, sizeof(struct rte_eth_ip_reassembly_params)); 6724 6725 ret = eth_err(port_id, (*dev->dev_ops->ip_reassembly_capability_get) 6726 (dev, reassembly_capa)); 6727 6728 rte_eth_trace_ip_reassembly_capability_get(port_id, reassembly_capa, 6729 ret); 6730 6731 return ret; 6732 } 6733 6734 int 6735 rte_eth_ip_reassembly_conf_get(uint16_t port_id, 6736 struct rte_eth_ip_reassembly_params *conf) 6737 { 6738 struct rte_eth_dev *dev; 6739 int ret; 6740 6741 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6742 dev = &rte_eth_devices[port_id]; 6743 6744 if (dev->data->dev_configured == 0) { 6745 RTE_ETHDEV_LOG(ERR, 6746 "Device with port_id=%u is not configured.\n" 6747 "Cannot get IP reassembly configuration\n", 6748 port_id); 6749 return -EINVAL; 6750 } 6751 6752 if (conf == NULL) { 6753 RTE_ETHDEV_LOG(ERR, "Cannot get reassembly info to NULL"); 6754 return -EINVAL; 6755 } 6756 6757 if (*dev->dev_ops->ip_reassembly_conf_get == NULL) 6758 return -ENOTSUP; 6759 memset(conf, 0, sizeof(struct rte_eth_ip_reassembly_params)); 6760 ret = eth_err(port_id, 6761 (*dev->dev_ops->ip_reassembly_conf_get)(dev, conf)); 6762 6763 rte_eth_trace_ip_reassembly_conf_get(port_id, conf, ret); 6764 6765 return ret; 6766 } 6767 6768 int 6769 rte_eth_ip_reassembly_conf_set(uint16_t port_id, 6770 const struct rte_eth_ip_reassembly_params *conf) 6771 { 6772 struct rte_eth_dev *dev; 6773 int ret; 6774 6775 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6776 dev = &rte_eth_devices[port_id]; 6777 6778 if (dev->data->dev_configured == 0) { 6779 RTE_ETHDEV_LOG(ERR, 6780 "Device with port_id=%u is not configured.\n" 6781 "Cannot set IP reassembly configuration", 6782 port_id); 6783 return -EINVAL; 6784 } 6785 6786 if (dev->data->dev_started != 0) { 6787 RTE_ETHDEV_LOG(ERR, 6788 "Device with port_id=%u started,\n" 6789 "cannot configure IP reassembly params.\n", 6790 port_id); 6791 return -EINVAL; 6792 } 6793 6794 if (conf == NULL) { 6795 RTE_ETHDEV_LOG(ERR, 6796 "Invalid IP reassembly configuration (NULL)\n"); 6797 return -EINVAL; 6798 } 6799 6800 if (*dev->dev_ops->ip_reassembly_conf_set == NULL) 6801 return -ENOTSUP; 6802 ret = eth_err(port_id, 6803 (*dev->dev_ops->ip_reassembly_conf_set)(dev, conf)); 6804 6805 rte_eth_trace_ip_reassembly_conf_set(port_id, conf, ret); 6806 6807 return ret; 6808 } 6809 6810 int 6811 rte_eth_dev_priv_dump(uint16_t port_id, FILE *file) 6812 { 6813 struct rte_eth_dev *dev; 6814 6815 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6816 dev = &rte_eth_devices[port_id]; 6817 6818 if (file == NULL) { 6819 RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n"); 6820 return -EINVAL; 6821 } 6822 6823 if (*dev->dev_ops->eth_dev_priv_dump == NULL) 6824 return -ENOTSUP; 6825 return eth_err(port_id, (*dev->dev_ops->eth_dev_priv_dump)(dev, file)); 6826 } 6827 6828 int 6829 rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id, 6830 uint16_t offset, uint16_t num, FILE *file) 6831 { 6832 struct rte_eth_dev *dev; 6833 6834 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6835 dev = &rte_eth_devices[port_id]; 6836 6837 if (queue_id >= dev->data->nb_rx_queues) { 6838 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 6839 return -EINVAL; 6840 } 6841 6842 if (file == NULL) { 6843 RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n"); 6844 return -EINVAL; 6845 } 6846 6847 if (*dev->dev_ops->eth_rx_descriptor_dump == NULL) 6848 return -ENOTSUP; 6849 6850 return eth_err(port_id, (*dev->dev_ops->eth_rx_descriptor_dump)(dev, 6851 queue_id, offset, num, file)); 6852 } 6853 6854 int 6855 rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id, 6856 uint16_t offset, uint16_t num, FILE *file) 6857 { 6858 struct rte_eth_dev *dev; 6859 6860 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6861 dev = &rte_eth_devices[port_id]; 6862 6863 if (queue_id >= dev->data->nb_tx_queues) { 6864 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 6865 return -EINVAL; 6866 } 6867 6868 if (file == NULL) { 6869 RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n"); 6870 return -EINVAL; 6871 } 6872 6873 if (*dev->dev_ops->eth_tx_descriptor_dump == NULL) 6874 return -ENOTSUP; 6875 6876 return eth_err(port_id, (*dev->dev_ops->eth_tx_descriptor_dump)(dev, 6877 queue_id, offset, num, file)); 6878 } 6879 6880 int 6881 rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num) 6882 { 6883 int i, j; 6884 struct rte_eth_dev *dev; 6885 const uint32_t *all_types; 6886 6887 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6888 dev = &rte_eth_devices[port_id]; 6889 6890 if (ptypes == NULL && num > 0) { 6891 RTE_ETHDEV_LOG(ERR, 6892 "Cannot get ethdev port %u supported header protocol types to NULL when array size is non zero\n", 6893 port_id); 6894 return -EINVAL; 6895 } 6896 6897 if (*dev->dev_ops->buffer_split_supported_hdr_ptypes_get == NULL) 6898 return -ENOTSUP; 6899 all_types = (*dev->dev_ops->buffer_split_supported_hdr_ptypes_get)(dev); 6900 6901 if (all_types == NULL) 6902 return 0; 6903 6904 for (i = 0, j = 0; all_types[i] != RTE_PTYPE_UNKNOWN; ++i) { 6905 if (j < num) { 6906 ptypes[j] = all_types[i]; 6907 6908 rte_eth_trace_buffer_split_get_supported_hdr_ptypes( 6909 port_id, j, ptypes[j]); 6910 } 6911 j++; 6912 } 6913 6914 return j; 6915 } 6916 6917 int rte_eth_dev_count_aggr_ports(uint16_t port_id) 6918 { 6919 struct rte_eth_dev *dev; 6920 int ret; 6921 6922 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6923 dev = &rte_eth_devices[port_id]; 6924 6925 if (*dev->dev_ops->count_aggr_ports == NULL) 6926 return 0; 6927 ret = eth_err(port_id, (*dev->dev_ops->count_aggr_ports)(dev)); 6928 6929 rte_eth_trace_count_aggr_ports(port_id, ret); 6930 6931 return ret; 6932 } 6933 6934 int rte_eth_dev_map_aggr_tx_affinity(uint16_t port_id, uint16_t tx_queue_id, 6935 uint8_t affinity) 6936 { 6937 struct rte_eth_dev *dev; 6938 int aggr_ports; 6939 int ret; 6940 6941 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6942 dev = &rte_eth_devices[port_id]; 6943 6944 if (tx_queue_id >= dev->data->nb_tx_queues) { 6945 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id); 6946 return -EINVAL; 6947 } 6948 6949 if (*dev->dev_ops->map_aggr_tx_affinity == NULL) 6950 return -ENOTSUP; 6951 6952 if (dev->data->dev_configured == 0) { 6953 RTE_ETHDEV_LOG(ERR, 6954 "Port %u must be configured before Tx affinity mapping\n", 6955 port_id); 6956 return -EINVAL; 6957 } 6958 6959 if (dev->data->dev_started) { 6960 RTE_ETHDEV_LOG(ERR, 6961 "Port %u must be stopped to allow configuration\n", 6962 port_id); 6963 return -EBUSY; 6964 } 6965 6966 aggr_ports = rte_eth_dev_count_aggr_ports(port_id); 6967 if (aggr_ports == 0) { 6968 RTE_ETHDEV_LOG(ERR, 6969 "Port %u has no aggregated port\n", 6970 port_id); 6971 return -ENOTSUP; 6972 } 6973 6974 if (affinity > aggr_ports) { 6975 RTE_ETHDEV_LOG(ERR, 6976 "Port %u map invalid affinity %u exceeds the maximum number %u\n", 6977 port_id, affinity, aggr_ports); 6978 return -EINVAL; 6979 } 6980 6981 ret = eth_err(port_id, (*dev->dev_ops->map_aggr_tx_affinity)(dev, 6982 tx_queue_id, affinity)); 6983 6984 rte_eth_trace_map_aggr_tx_affinity(port_id, tx_queue_id, affinity, ret); 6985 6986 return ret; 6987 } 6988 6989 RTE_LOG_REGISTER_DEFAULT(rte_eth_dev_logtype, INFO); 6990