1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <errno.h> 6 #include <inttypes.h> 7 #include <stdbool.h> 8 #include <stdint.h> 9 #include <stdio.h> 10 #include <stdlib.h> 11 #include <string.h> 12 #include <sys/queue.h> 13 14 #include <bus_driver.h> 15 #include <rte_log.h> 16 #include <rte_interrupts.h> 17 #include <rte_kvargs.h> 18 #include <rte_memcpy.h> 19 #include <rte_common.h> 20 #include <rte_mempool.h> 21 #include <rte_malloc.h> 22 #include <rte_mbuf.h> 23 #include <rte_errno.h> 24 #include <rte_spinlock.h> 25 #include <rte_string_fns.h> 26 #include <rte_class.h> 27 #include <rte_ether.h> 28 #include <rte_telemetry.h> 29 30 #include "rte_ethdev.h" 31 #include "rte_ethdev_trace_fp.h" 32 #include "ethdev_driver.h" 33 #include "rte_flow_driver.h" 34 #include "ethdev_profile.h" 35 #include "ethdev_private.h" 36 #include "ethdev_trace.h" 37 #include "sff_telemetry.h" 38 39 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS]; 40 41 /* public fast-path API */ 42 struct rte_eth_fp_ops rte_eth_fp_ops[RTE_MAX_ETHPORTS]; 43 44 /* spinlock for add/remove Rx callbacks */ 45 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER; 46 47 /* spinlock for add/remove Tx callbacks */ 48 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER; 49 50 /* store statistics names and its offset in stats structure */ 51 struct rte_eth_xstats_name_off { 52 char name[RTE_ETH_XSTATS_NAME_SIZE]; 53 unsigned offset; 54 }; 55 56 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = { 57 {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)}, 58 {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)}, 59 {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)}, 60 {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)}, 61 {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)}, 62 {"rx_errors", offsetof(struct rte_eth_stats, ierrors)}, 63 {"tx_errors", offsetof(struct rte_eth_stats, oerrors)}, 64 {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats, 65 rx_nombuf)}, 66 }; 67 68 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings) 69 70 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = { 71 {"packets", offsetof(struct rte_eth_stats, q_ipackets)}, 72 {"bytes", offsetof(struct rte_eth_stats, q_ibytes)}, 73 {"errors", offsetof(struct rte_eth_stats, q_errors)}, 74 }; 75 76 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings) 77 78 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = { 79 {"packets", offsetof(struct rte_eth_stats, q_opackets)}, 80 {"bytes", offsetof(struct rte_eth_stats, q_obytes)}, 81 }; 82 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings) 83 84 #define RTE_RX_OFFLOAD_BIT2STR(_name) \ 85 { RTE_ETH_RX_OFFLOAD_##_name, #_name } 86 87 static const struct { 88 uint64_t offload; 89 const char *name; 90 } eth_dev_rx_offload_names[] = { 91 RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP), 92 RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM), 93 RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM), 94 RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM), 95 RTE_RX_OFFLOAD_BIT2STR(TCP_LRO), 96 RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP), 97 RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 98 RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP), 99 RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER), 100 RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND), 101 RTE_RX_OFFLOAD_BIT2STR(SCATTER), 102 RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP), 103 RTE_RX_OFFLOAD_BIT2STR(SECURITY), 104 RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC), 105 RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM), 106 RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 107 RTE_RX_OFFLOAD_BIT2STR(RSS_HASH), 108 RTE_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT), 109 }; 110 111 #undef RTE_RX_OFFLOAD_BIT2STR 112 #undef RTE_ETH_RX_OFFLOAD_BIT2STR 113 114 #define RTE_TX_OFFLOAD_BIT2STR(_name) \ 115 { RTE_ETH_TX_OFFLOAD_##_name, #_name } 116 117 static const struct { 118 uint64_t offload; 119 const char *name; 120 } eth_dev_tx_offload_names[] = { 121 RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT), 122 RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM), 123 RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM), 124 RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM), 125 RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM), 126 RTE_TX_OFFLOAD_BIT2STR(TCP_TSO), 127 RTE_TX_OFFLOAD_BIT2STR(UDP_TSO), 128 RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 129 RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT), 130 RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO), 131 RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO), 132 RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO), 133 RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO), 134 RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT), 135 RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE), 136 RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS), 137 RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE), 138 RTE_TX_OFFLOAD_BIT2STR(SECURITY), 139 RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO), 140 RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO), 141 RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 142 RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP), 143 }; 144 145 #undef RTE_TX_OFFLOAD_BIT2STR 146 147 static const struct { 148 uint64_t offload; 149 const char *name; 150 } rte_eth_dev_capa_names[] = { 151 {RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP, "RUNTIME_RX_QUEUE_SETUP"}, 152 {RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP, "RUNTIME_TX_QUEUE_SETUP"}, 153 {RTE_ETH_DEV_CAPA_RXQ_SHARE, "RXQ_SHARE"}, 154 {RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP, "FLOW_RULE_KEEP"}, 155 {RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP, "FLOW_SHARED_OBJECT_KEEP"}, 156 }; 157 158 enum { 159 STAT_QMAP_TX = 0, 160 STAT_QMAP_RX 161 }; 162 163 static const struct { 164 enum rte_eth_hash_function algo; 165 const char *name; 166 } rte_eth_dev_rss_algo_names[] = { 167 {RTE_ETH_HASH_FUNCTION_DEFAULT, "default"}, 168 {RTE_ETH_HASH_FUNCTION_SIMPLE_XOR, "simple_xor"}, 169 {RTE_ETH_HASH_FUNCTION_TOEPLITZ, "toeplitz"}, 170 {RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ, "symmetric_toeplitz"}, 171 {RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ_SORT, "symmetric_toeplitz_sort"}, 172 }; 173 174 int 175 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str) 176 { 177 int ret; 178 struct rte_devargs devargs; 179 const char *bus_param_key; 180 char *bus_str = NULL; 181 char *cls_str = NULL; 182 int str_size; 183 184 if (iter == NULL) { 185 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL iterator\n"); 186 return -EINVAL; 187 } 188 189 if (devargs_str == NULL) { 190 RTE_ETHDEV_LOG(ERR, 191 "Cannot initialize iterator from NULL device description string\n"); 192 return -EINVAL; 193 } 194 195 memset(iter, 0, sizeof(*iter)); 196 memset(&devargs, 0, sizeof(devargs)); 197 198 /* 199 * The devargs string may use various syntaxes: 200 * - 0000:08:00.0,representor=[1-3] 201 * - pci:0000:06:00.0,representor=[0,5] 202 * - class=eth,mac=00:11:22:33:44:55 203 * - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z 204 */ 205 206 /* 207 * Handle pure class filter (i.e. without any bus-level argument), 208 * from future new syntax. 209 * rte_devargs_parse() is not yet supporting the new syntax, 210 * that's why this simple case is temporarily parsed here. 211 */ 212 #define iter_anybus_str "class=eth," 213 if (strncmp(devargs_str, iter_anybus_str, 214 strlen(iter_anybus_str)) == 0) { 215 iter->cls_str = devargs_str + strlen(iter_anybus_str); 216 goto end; 217 } 218 219 /* Split bus, device and parameters. */ 220 ret = rte_devargs_parse(&devargs, devargs_str); 221 if (ret != 0) 222 goto error; 223 224 /* 225 * Assume parameters of old syntax can match only at ethdev level. 226 * Extra parameters will be ignored, thanks to "+" prefix. 227 */ 228 str_size = strlen(devargs.args) + 2; 229 cls_str = malloc(str_size); 230 if (cls_str == NULL) { 231 ret = -ENOMEM; 232 goto error; 233 } 234 ret = snprintf(cls_str, str_size, "+%s", devargs.args); 235 if (ret != str_size - 1) { 236 ret = -EINVAL; 237 goto error; 238 } 239 iter->cls_str = cls_str; 240 241 iter->bus = devargs.bus; 242 if (iter->bus->dev_iterate == NULL) { 243 ret = -ENOTSUP; 244 goto error; 245 } 246 247 /* Convert bus args to new syntax for use with new API dev_iterate. */ 248 if ((strcmp(iter->bus->name, "vdev") == 0) || 249 (strcmp(iter->bus->name, "fslmc") == 0) || 250 (strcmp(iter->bus->name, "dpaa_bus") == 0)) { 251 bus_param_key = "name"; 252 } else if (strcmp(iter->bus->name, "pci") == 0) { 253 bus_param_key = "addr"; 254 } else { 255 ret = -ENOTSUP; 256 goto error; 257 } 258 str_size = strlen(bus_param_key) + strlen(devargs.name) + 2; 259 bus_str = malloc(str_size); 260 if (bus_str == NULL) { 261 ret = -ENOMEM; 262 goto error; 263 } 264 ret = snprintf(bus_str, str_size, "%s=%s", 265 bus_param_key, devargs.name); 266 if (ret != str_size - 1) { 267 ret = -EINVAL; 268 goto error; 269 } 270 iter->bus_str = bus_str; 271 272 end: 273 iter->cls = rte_class_find_by_name("eth"); 274 rte_devargs_reset(&devargs); 275 276 rte_eth_trace_iterator_init(devargs_str); 277 278 return 0; 279 280 error: 281 if (ret == -ENOTSUP) 282 RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n", 283 iter->bus->name); 284 rte_devargs_reset(&devargs); 285 free(bus_str); 286 free(cls_str); 287 return ret; 288 } 289 290 uint16_t 291 rte_eth_iterator_next(struct rte_dev_iterator *iter) 292 { 293 if (iter == NULL) { 294 RTE_ETHDEV_LOG(ERR, 295 "Cannot get next device from NULL iterator\n"); 296 return RTE_MAX_ETHPORTS; 297 } 298 299 if (iter->cls == NULL) /* invalid ethdev iterator */ 300 return RTE_MAX_ETHPORTS; 301 302 do { /* loop to try all matching rte_device */ 303 /* If not pure ethdev filter and */ 304 if (iter->bus != NULL && 305 /* not in middle of rte_eth_dev iteration, */ 306 iter->class_device == NULL) { 307 /* get next rte_device to try. */ 308 iter->device = iter->bus->dev_iterate( 309 iter->device, iter->bus_str, iter); 310 if (iter->device == NULL) 311 break; /* no more rte_device candidate */ 312 } 313 /* A device is matching bus part, need to check ethdev part. */ 314 iter->class_device = iter->cls->dev_iterate( 315 iter->class_device, iter->cls_str, iter); 316 if (iter->class_device != NULL) { 317 uint16_t id = eth_dev_to_id(iter->class_device); 318 319 rte_eth_trace_iterator_next(iter, id); 320 321 return id; /* match */ 322 } 323 } while (iter->bus != NULL); /* need to try next rte_device */ 324 325 /* No more ethdev port to iterate. */ 326 rte_eth_iterator_cleanup(iter); 327 return RTE_MAX_ETHPORTS; 328 } 329 330 void 331 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter) 332 { 333 if (iter == NULL) { 334 RTE_ETHDEV_LOG(ERR, "Cannot do clean up from NULL iterator\n"); 335 return; 336 } 337 338 if (iter->bus_str == NULL) 339 return; /* nothing to free in pure class filter */ 340 341 rte_eth_trace_iterator_cleanup(iter); 342 343 free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */ 344 free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */ 345 memset(iter, 0, sizeof(*iter)); 346 } 347 348 uint16_t 349 rte_eth_find_next(uint16_t port_id) 350 { 351 while (port_id < RTE_MAX_ETHPORTS && 352 rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED) 353 port_id++; 354 355 if (port_id >= RTE_MAX_ETHPORTS) 356 return RTE_MAX_ETHPORTS; 357 358 rte_eth_trace_find_next(port_id); 359 360 return port_id; 361 } 362 363 /* 364 * Macro to iterate over all valid ports for internal usage. 365 * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports. 366 */ 367 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \ 368 for (port_id = rte_eth_find_next(0); \ 369 port_id < RTE_MAX_ETHPORTS; \ 370 port_id = rte_eth_find_next(port_id + 1)) 371 372 uint16_t 373 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent) 374 { 375 port_id = rte_eth_find_next(port_id); 376 while (port_id < RTE_MAX_ETHPORTS && 377 rte_eth_devices[port_id].device != parent) 378 port_id = rte_eth_find_next(port_id + 1); 379 380 rte_eth_trace_find_next_of(port_id, parent); 381 382 return port_id; 383 } 384 385 uint16_t 386 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id) 387 { 388 uint16_t ret; 389 390 RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS); 391 ret = rte_eth_find_next_of(port_id, 392 rte_eth_devices[ref_port_id].device); 393 394 rte_eth_trace_find_next_sibling(port_id, ref_port_id, ret); 395 396 return ret; 397 } 398 399 static bool 400 eth_dev_is_allocated(const struct rte_eth_dev *ethdev) 401 { 402 return ethdev->data != NULL && ethdev->data->name[0] != '\0'; 403 } 404 405 int 406 rte_eth_dev_is_valid_port(uint16_t port_id) 407 { 408 int is_valid; 409 410 if (port_id >= RTE_MAX_ETHPORTS || 411 (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)) 412 is_valid = 0; 413 else 414 is_valid = 1; 415 416 rte_ethdev_trace_is_valid_port(port_id, is_valid); 417 418 return is_valid; 419 } 420 421 static int 422 eth_is_valid_owner_id(uint64_t owner_id) 423 __rte_exclusive_locks_required(rte_mcfg_ethdev_get_lock()) 424 { 425 if (owner_id == RTE_ETH_DEV_NO_OWNER || 426 eth_dev_shared_data->next_owner_id <= owner_id) 427 return 0; 428 return 1; 429 } 430 431 uint64_t 432 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id) 433 { 434 port_id = rte_eth_find_next(port_id); 435 while (port_id < RTE_MAX_ETHPORTS && 436 rte_eth_devices[port_id].data->owner.id != owner_id) 437 port_id = rte_eth_find_next(port_id + 1); 438 439 rte_eth_trace_find_next_owned_by(port_id, owner_id); 440 441 return port_id; 442 } 443 444 int 445 rte_eth_dev_owner_new(uint64_t *owner_id) 446 { 447 int ret; 448 449 if (owner_id == NULL) { 450 RTE_ETHDEV_LOG(ERR, "Cannot get new owner ID to NULL\n"); 451 return -EINVAL; 452 } 453 454 rte_spinlock_lock(rte_mcfg_ethdev_get_lock()); 455 456 if (eth_dev_shared_data_prepare() != NULL) { 457 *owner_id = eth_dev_shared_data->next_owner_id++; 458 eth_dev_shared_data->allocated_owners++; 459 ret = 0; 460 } else { 461 ret = -ENOMEM; 462 } 463 464 rte_spinlock_unlock(rte_mcfg_ethdev_get_lock()); 465 466 rte_ethdev_trace_owner_new(*owner_id, ret); 467 468 return ret; 469 } 470 471 static int 472 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id, 473 const struct rte_eth_dev_owner *new_owner) 474 __rte_exclusive_locks_required(rte_mcfg_ethdev_get_lock()) 475 { 476 struct rte_eth_dev *ethdev = &rte_eth_devices[port_id]; 477 struct rte_eth_dev_owner *port_owner; 478 479 if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) { 480 RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n", 481 port_id); 482 return -ENODEV; 483 } 484 485 if (new_owner == NULL) { 486 RTE_ETHDEV_LOG(ERR, 487 "Cannot set ethdev port %u owner from NULL owner\n", 488 port_id); 489 return -EINVAL; 490 } 491 492 if (!eth_is_valid_owner_id(new_owner->id) && 493 !eth_is_valid_owner_id(old_owner_id)) { 494 RTE_ETHDEV_LOG(ERR, 495 "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n", 496 old_owner_id, new_owner->id); 497 return -EINVAL; 498 } 499 500 port_owner = &rte_eth_devices[port_id].data->owner; 501 if (port_owner->id != old_owner_id) { 502 RTE_ETHDEV_LOG(ERR, 503 "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n", 504 port_id, port_owner->name, port_owner->id); 505 return -EPERM; 506 } 507 508 /* can not truncate (same structure) */ 509 strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN); 510 511 port_owner->id = new_owner->id; 512 513 RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n", 514 port_id, new_owner->name, new_owner->id); 515 516 return 0; 517 } 518 519 int 520 rte_eth_dev_owner_set(const uint16_t port_id, 521 const struct rte_eth_dev_owner *owner) 522 { 523 int ret; 524 525 rte_spinlock_lock(rte_mcfg_ethdev_get_lock()); 526 527 if (eth_dev_shared_data_prepare() != NULL) 528 ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner); 529 else 530 ret = -ENOMEM; 531 532 rte_spinlock_unlock(rte_mcfg_ethdev_get_lock()); 533 534 rte_ethdev_trace_owner_set(port_id, owner, ret); 535 536 return ret; 537 } 538 539 int 540 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id) 541 { 542 const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner) 543 {.id = RTE_ETH_DEV_NO_OWNER, .name = ""}; 544 int ret; 545 546 rte_spinlock_lock(rte_mcfg_ethdev_get_lock()); 547 548 if (eth_dev_shared_data_prepare() != NULL) 549 ret = eth_dev_owner_set(port_id, owner_id, &new_owner); 550 else 551 ret = -ENOMEM; 552 553 rte_spinlock_unlock(rte_mcfg_ethdev_get_lock()); 554 555 rte_ethdev_trace_owner_unset(port_id, owner_id, ret); 556 557 return ret; 558 } 559 560 int 561 rte_eth_dev_owner_delete(const uint64_t owner_id) 562 { 563 uint16_t port_id; 564 int ret = 0; 565 566 rte_spinlock_lock(rte_mcfg_ethdev_get_lock()); 567 568 if (eth_dev_shared_data_prepare() == NULL) { 569 ret = -ENOMEM; 570 } else if (eth_is_valid_owner_id(owner_id)) { 571 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) { 572 struct rte_eth_dev_data *data = 573 rte_eth_devices[port_id].data; 574 if (data != NULL && data->owner.id == owner_id) 575 memset(&data->owner, 0, 576 sizeof(struct rte_eth_dev_owner)); 577 } 578 RTE_ETHDEV_LOG(NOTICE, 579 "All port owners owned by %016"PRIx64" identifier have removed\n", 580 owner_id); 581 eth_dev_shared_data->allocated_owners--; 582 eth_dev_shared_data_release(); 583 } else { 584 RTE_ETHDEV_LOG(ERR, 585 "Invalid owner ID=%016"PRIx64"\n", 586 owner_id); 587 ret = -EINVAL; 588 } 589 590 rte_spinlock_unlock(rte_mcfg_ethdev_get_lock()); 591 592 rte_ethdev_trace_owner_delete(owner_id, ret); 593 594 return ret; 595 } 596 597 int 598 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner) 599 { 600 struct rte_eth_dev *ethdev; 601 int ret; 602 603 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 604 ethdev = &rte_eth_devices[port_id]; 605 606 if (!eth_dev_is_allocated(ethdev)) { 607 RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n", 608 port_id); 609 return -ENODEV; 610 } 611 612 if (owner == NULL) { 613 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u owner to NULL\n", 614 port_id); 615 return -EINVAL; 616 } 617 618 rte_spinlock_lock(rte_mcfg_ethdev_get_lock()); 619 620 if (eth_dev_shared_data_prepare() != NULL) { 621 rte_memcpy(owner, ðdev->data->owner, sizeof(*owner)); 622 ret = 0; 623 } else { 624 ret = -ENOMEM; 625 } 626 627 rte_spinlock_unlock(rte_mcfg_ethdev_get_lock()); 628 629 rte_ethdev_trace_owner_get(port_id, owner, ret); 630 631 return ret; 632 } 633 634 int 635 rte_eth_dev_socket_id(uint16_t port_id) 636 { 637 int socket_id = SOCKET_ID_ANY; 638 639 if (!rte_eth_dev_is_valid_port(port_id)) { 640 rte_errno = EINVAL; 641 } else { 642 socket_id = rte_eth_devices[port_id].data->numa_node; 643 if (socket_id == SOCKET_ID_ANY) 644 rte_errno = 0; 645 } 646 647 rte_ethdev_trace_socket_id(port_id, socket_id); 648 649 return socket_id; 650 } 651 652 void * 653 rte_eth_dev_get_sec_ctx(uint16_t port_id) 654 { 655 void *ctx; 656 657 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL); 658 ctx = rte_eth_devices[port_id].security_ctx; 659 660 rte_ethdev_trace_get_sec_ctx(port_id, ctx); 661 662 return ctx; 663 } 664 665 uint16_t 666 rte_eth_dev_count_avail(void) 667 { 668 uint16_t p; 669 uint16_t count; 670 671 count = 0; 672 673 RTE_ETH_FOREACH_DEV(p) 674 count++; 675 676 rte_ethdev_trace_count_avail(count); 677 678 return count; 679 } 680 681 uint16_t 682 rte_eth_dev_count_total(void) 683 { 684 uint16_t port, count = 0; 685 686 RTE_ETH_FOREACH_VALID_DEV(port) 687 count++; 688 689 rte_ethdev_trace_count_total(count); 690 691 return count; 692 } 693 694 int 695 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name) 696 { 697 char *tmp; 698 699 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 700 701 if (name == NULL) { 702 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u name to NULL\n", 703 port_id); 704 return -EINVAL; 705 } 706 707 rte_spinlock_lock(rte_mcfg_ethdev_get_lock()); 708 /* shouldn't check 'rte_eth_devices[i].data', 709 * because it might be overwritten by VDEV PMD */ 710 tmp = eth_dev_shared_data->data[port_id].name; 711 rte_spinlock_unlock(rte_mcfg_ethdev_get_lock()); 712 713 strcpy(name, tmp); 714 715 rte_ethdev_trace_get_name_by_port(port_id, name); 716 717 return 0; 718 } 719 720 int 721 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id) 722 { 723 int ret = -ENODEV; 724 uint16_t pid; 725 726 if (name == NULL) { 727 RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name"); 728 return -EINVAL; 729 } 730 731 if (port_id == NULL) { 732 RTE_ETHDEV_LOG(ERR, 733 "Cannot get port ID to NULL for %s\n", name); 734 return -EINVAL; 735 } 736 737 rte_spinlock_lock(rte_mcfg_ethdev_get_lock()); 738 RTE_ETH_FOREACH_VALID_DEV(pid) { 739 if (strcmp(name, eth_dev_shared_data->data[pid].name) != 0) 740 continue; 741 742 *port_id = pid; 743 rte_ethdev_trace_get_port_by_name(name, *port_id); 744 ret = 0; 745 break; 746 } 747 rte_spinlock_unlock(rte_mcfg_ethdev_get_lock()); 748 749 return ret; 750 } 751 752 int 753 eth_err(uint16_t port_id, int ret) 754 { 755 if (ret == 0) 756 return 0; 757 if (rte_eth_dev_is_removed(port_id)) 758 return -EIO; 759 return ret; 760 } 761 762 static int 763 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id) 764 { 765 uint16_t port_id; 766 767 if (rx_queue_id >= dev->data->nb_rx_queues) { 768 port_id = dev->data->port_id; 769 RTE_ETHDEV_LOG(ERR, 770 "Invalid Rx queue_id=%u of device with port_id=%u\n", 771 rx_queue_id, port_id); 772 return -EINVAL; 773 } 774 775 if (dev->data->rx_queues[rx_queue_id] == NULL) { 776 port_id = dev->data->port_id; 777 RTE_ETHDEV_LOG(ERR, 778 "Queue %u of device with port_id=%u has not been setup\n", 779 rx_queue_id, port_id); 780 return -EINVAL; 781 } 782 783 return 0; 784 } 785 786 static int 787 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id) 788 { 789 uint16_t port_id; 790 791 if (tx_queue_id >= dev->data->nb_tx_queues) { 792 port_id = dev->data->port_id; 793 RTE_ETHDEV_LOG(ERR, 794 "Invalid Tx queue_id=%u of device with port_id=%u\n", 795 tx_queue_id, port_id); 796 return -EINVAL; 797 } 798 799 if (dev->data->tx_queues[tx_queue_id] == NULL) { 800 port_id = dev->data->port_id; 801 RTE_ETHDEV_LOG(ERR, 802 "Queue %u of device with port_id=%u has not been setup\n", 803 tx_queue_id, port_id); 804 return -EINVAL; 805 } 806 807 return 0; 808 } 809 810 int 811 rte_eth_rx_queue_is_valid(uint16_t port_id, uint16_t queue_id) 812 { 813 struct rte_eth_dev *dev; 814 815 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 816 dev = &rte_eth_devices[port_id]; 817 818 return eth_dev_validate_rx_queue(dev, queue_id); 819 } 820 821 int 822 rte_eth_tx_queue_is_valid(uint16_t port_id, uint16_t queue_id) 823 { 824 struct rte_eth_dev *dev; 825 826 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 827 dev = &rte_eth_devices[port_id]; 828 829 return eth_dev_validate_tx_queue(dev, queue_id); 830 } 831 832 int 833 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id) 834 { 835 struct rte_eth_dev *dev; 836 int ret; 837 838 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 839 dev = &rte_eth_devices[port_id]; 840 841 if (!dev->data->dev_started) { 842 RTE_ETHDEV_LOG(ERR, 843 "Port %u must be started before start any queue\n", 844 port_id); 845 return -EINVAL; 846 } 847 848 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 849 if (ret != 0) 850 return ret; 851 852 if (*dev->dev_ops->rx_queue_start == NULL) 853 return -ENOTSUP; 854 855 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 856 RTE_ETHDEV_LOG(INFO, 857 "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 858 rx_queue_id, port_id); 859 return -EINVAL; 860 } 861 862 if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 863 RTE_ETHDEV_LOG(INFO, 864 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 865 rx_queue_id, port_id); 866 return 0; 867 } 868 869 ret = eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id)); 870 871 rte_ethdev_trace_rx_queue_start(port_id, rx_queue_id, ret); 872 873 return ret; 874 } 875 876 int 877 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id) 878 { 879 struct rte_eth_dev *dev; 880 int ret; 881 882 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 883 dev = &rte_eth_devices[port_id]; 884 885 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 886 if (ret != 0) 887 return ret; 888 889 if (*dev->dev_ops->rx_queue_stop == NULL) 890 return -ENOTSUP; 891 892 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 893 RTE_ETHDEV_LOG(INFO, 894 "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 895 rx_queue_id, port_id); 896 return -EINVAL; 897 } 898 899 if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 900 RTE_ETHDEV_LOG(INFO, 901 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 902 rx_queue_id, port_id); 903 return 0; 904 } 905 906 ret = eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id)); 907 908 rte_ethdev_trace_rx_queue_stop(port_id, rx_queue_id, ret); 909 910 return ret; 911 } 912 913 int 914 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id) 915 { 916 struct rte_eth_dev *dev; 917 int ret; 918 919 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 920 dev = &rte_eth_devices[port_id]; 921 922 if (!dev->data->dev_started) { 923 RTE_ETHDEV_LOG(ERR, 924 "Port %u must be started before start any queue\n", 925 port_id); 926 return -EINVAL; 927 } 928 929 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 930 if (ret != 0) 931 return ret; 932 933 if (*dev->dev_ops->tx_queue_start == NULL) 934 return -ENOTSUP; 935 936 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 937 RTE_ETHDEV_LOG(INFO, 938 "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 939 tx_queue_id, port_id); 940 return -EINVAL; 941 } 942 943 if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 944 RTE_ETHDEV_LOG(INFO, 945 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 946 tx_queue_id, port_id); 947 return 0; 948 } 949 950 ret = eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id)); 951 952 rte_ethdev_trace_tx_queue_start(port_id, tx_queue_id, ret); 953 954 return ret; 955 } 956 957 int 958 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id) 959 { 960 struct rte_eth_dev *dev; 961 int ret; 962 963 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 964 dev = &rte_eth_devices[port_id]; 965 966 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 967 if (ret != 0) 968 return ret; 969 970 if (*dev->dev_ops->tx_queue_stop == NULL) 971 return -ENOTSUP; 972 973 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 974 RTE_ETHDEV_LOG(INFO, 975 "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 976 tx_queue_id, port_id); 977 return -EINVAL; 978 } 979 980 if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 981 RTE_ETHDEV_LOG(INFO, 982 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 983 tx_queue_id, port_id); 984 return 0; 985 } 986 987 ret = eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id)); 988 989 rte_ethdev_trace_tx_queue_stop(port_id, tx_queue_id, ret); 990 991 return ret; 992 } 993 994 uint32_t 995 rte_eth_speed_bitflag(uint32_t speed, int duplex) 996 { 997 uint32_t ret; 998 999 switch (speed) { 1000 case RTE_ETH_SPEED_NUM_10M: 1001 ret = duplex ? RTE_ETH_LINK_SPEED_10M : RTE_ETH_LINK_SPEED_10M_HD; 1002 break; 1003 case RTE_ETH_SPEED_NUM_100M: 1004 ret = duplex ? RTE_ETH_LINK_SPEED_100M : RTE_ETH_LINK_SPEED_100M_HD; 1005 break; 1006 case RTE_ETH_SPEED_NUM_1G: 1007 ret = RTE_ETH_LINK_SPEED_1G; 1008 break; 1009 case RTE_ETH_SPEED_NUM_2_5G: 1010 ret = RTE_ETH_LINK_SPEED_2_5G; 1011 break; 1012 case RTE_ETH_SPEED_NUM_5G: 1013 ret = RTE_ETH_LINK_SPEED_5G; 1014 break; 1015 case RTE_ETH_SPEED_NUM_10G: 1016 ret = RTE_ETH_LINK_SPEED_10G; 1017 break; 1018 case RTE_ETH_SPEED_NUM_20G: 1019 ret = RTE_ETH_LINK_SPEED_20G; 1020 break; 1021 case RTE_ETH_SPEED_NUM_25G: 1022 ret = RTE_ETH_LINK_SPEED_25G; 1023 break; 1024 case RTE_ETH_SPEED_NUM_40G: 1025 ret = RTE_ETH_LINK_SPEED_40G; 1026 break; 1027 case RTE_ETH_SPEED_NUM_50G: 1028 ret = RTE_ETH_LINK_SPEED_50G; 1029 break; 1030 case RTE_ETH_SPEED_NUM_56G: 1031 ret = RTE_ETH_LINK_SPEED_56G; 1032 break; 1033 case RTE_ETH_SPEED_NUM_100G: 1034 ret = RTE_ETH_LINK_SPEED_100G; 1035 break; 1036 case RTE_ETH_SPEED_NUM_200G: 1037 ret = RTE_ETH_LINK_SPEED_200G; 1038 break; 1039 case RTE_ETH_SPEED_NUM_400G: 1040 ret = RTE_ETH_LINK_SPEED_400G; 1041 break; 1042 default: 1043 ret = 0; 1044 } 1045 1046 rte_eth_trace_speed_bitflag(speed, duplex, ret); 1047 1048 return ret; 1049 } 1050 1051 const char * 1052 rte_eth_dev_rx_offload_name(uint64_t offload) 1053 { 1054 const char *name = "UNKNOWN"; 1055 unsigned int i; 1056 1057 for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) { 1058 if (offload == eth_dev_rx_offload_names[i].offload) { 1059 name = eth_dev_rx_offload_names[i].name; 1060 break; 1061 } 1062 } 1063 1064 rte_ethdev_trace_rx_offload_name(offload, name); 1065 1066 return name; 1067 } 1068 1069 const char * 1070 rte_eth_dev_tx_offload_name(uint64_t offload) 1071 { 1072 const char *name = "UNKNOWN"; 1073 unsigned int i; 1074 1075 for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) { 1076 if (offload == eth_dev_tx_offload_names[i].offload) { 1077 name = eth_dev_tx_offload_names[i].name; 1078 break; 1079 } 1080 } 1081 1082 rte_ethdev_trace_tx_offload_name(offload, name); 1083 1084 return name; 1085 } 1086 1087 static char * 1088 eth_dev_offload_names(uint64_t bitmask, char *buf, size_t size, 1089 const char *(*offload_name)(uint64_t)) 1090 { 1091 unsigned int pos = 0; 1092 int ret; 1093 1094 /* There should be at least enough space to handle those cases */ 1095 RTE_ASSERT(size >= sizeof("none") && size >= sizeof("...")); 1096 1097 if (bitmask == 0) { 1098 ret = snprintf(&buf[pos], size - pos, "none"); 1099 if (ret < 0 || pos + ret >= size) 1100 ret = 0; 1101 pos += ret; 1102 goto out; 1103 } 1104 1105 while (bitmask != 0) { 1106 uint64_t offload = RTE_BIT64(rte_ctz64(bitmask)); 1107 const char *name = offload_name(offload); 1108 1109 ret = snprintf(&buf[pos], size - pos, "%s,", name); 1110 if (ret < 0 || pos + ret >= size) { 1111 if (pos + sizeof("...") >= size) 1112 pos = size - sizeof("..."); 1113 ret = snprintf(&buf[pos], size - pos, "..."); 1114 if (ret > 0 && pos + ret < size) 1115 pos += ret; 1116 goto out; 1117 } 1118 1119 pos += ret; 1120 bitmask &= ~offload; 1121 } 1122 1123 /* Eliminate trailing comma */ 1124 pos--; 1125 out: 1126 buf[pos] = '\0'; 1127 return buf; 1128 } 1129 1130 const char * 1131 rte_eth_dev_capability_name(uint64_t capability) 1132 { 1133 const char *name = "UNKNOWN"; 1134 unsigned int i; 1135 1136 for (i = 0; i < RTE_DIM(rte_eth_dev_capa_names); ++i) { 1137 if (capability == rte_eth_dev_capa_names[i].offload) { 1138 name = rte_eth_dev_capa_names[i].name; 1139 break; 1140 } 1141 } 1142 1143 rte_ethdev_trace_capability_name(capability, name); 1144 1145 return name; 1146 } 1147 1148 static inline int 1149 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size, 1150 uint32_t max_rx_pkt_len, uint32_t dev_info_size) 1151 { 1152 int ret = 0; 1153 1154 if (dev_info_size == 0) { 1155 if (config_size != max_rx_pkt_len) { 1156 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size" 1157 " %u != %u is not allowed\n", 1158 port_id, config_size, max_rx_pkt_len); 1159 ret = -EINVAL; 1160 } 1161 } else if (config_size > dev_info_size) { 1162 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 1163 "> max allowed value %u\n", port_id, config_size, 1164 dev_info_size); 1165 ret = -EINVAL; 1166 } else if (config_size < RTE_ETHER_MIN_LEN) { 1167 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 1168 "< min allowed value %u\n", port_id, config_size, 1169 (unsigned int)RTE_ETHER_MIN_LEN); 1170 ret = -EINVAL; 1171 } 1172 return ret; 1173 } 1174 1175 /* 1176 * Validate offloads that are requested through rte_eth_dev_configure against 1177 * the offloads successfully set by the Ethernet device. 1178 * 1179 * @param port_id 1180 * The port identifier of the Ethernet device. 1181 * @param req_offloads 1182 * The offloads that have been requested through `rte_eth_dev_configure`. 1183 * @param set_offloads 1184 * The offloads successfully set by the Ethernet device. 1185 * @param offload_type 1186 * The offload type i.e. Rx/Tx string. 1187 * @param offload_name 1188 * The function that prints the offload name. 1189 * @return 1190 * - (0) if validation successful. 1191 * - (-EINVAL) if requested offload has been silently disabled. 1192 */ 1193 static int 1194 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads, 1195 uint64_t set_offloads, const char *offload_type, 1196 const char *(*offload_name)(uint64_t)) 1197 { 1198 uint64_t offloads_diff = req_offloads ^ set_offloads; 1199 uint64_t offload; 1200 int ret = 0; 1201 1202 while (offloads_diff != 0) { 1203 /* Check if any offload is requested but not enabled. */ 1204 offload = RTE_BIT64(rte_ctz64(offloads_diff)); 1205 if (offload & req_offloads) { 1206 RTE_ETHDEV_LOG(ERR, 1207 "Port %u failed to enable %s offload %s\n", 1208 port_id, offload_type, offload_name(offload)); 1209 ret = -EINVAL; 1210 } 1211 1212 /* Check if offload couldn't be disabled. */ 1213 if (offload & set_offloads) { 1214 RTE_ETHDEV_LOG(DEBUG, 1215 "Port %u %s offload %s is not requested but enabled\n", 1216 port_id, offload_type, offload_name(offload)); 1217 } 1218 1219 offloads_diff &= ~offload; 1220 } 1221 1222 return ret; 1223 } 1224 1225 static uint32_t 1226 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu) 1227 { 1228 uint32_t overhead_len; 1229 1230 if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu) 1231 overhead_len = max_rx_pktlen - max_mtu; 1232 else 1233 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 1234 1235 return overhead_len; 1236 } 1237 1238 /* rte_eth_dev_info_get() should be called prior to this function */ 1239 static int 1240 eth_dev_validate_mtu(uint16_t port_id, struct rte_eth_dev_info *dev_info, 1241 uint16_t mtu) 1242 { 1243 uint32_t overhead_len; 1244 uint32_t frame_size; 1245 1246 if (mtu < dev_info->min_mtu) { 1247 RTE_ETHDEV_LOG(ERR, 1248 "MTU (%u) < device min MTU (%u) for port_id %u\n", 1249 mtu, dev_info->min_mtu, port_id); 1250 return -EINVAL; 1251 } 1252 if (mtu > dev_info->max_mtu) { 1253 RTE_ETHDEV_LOG(ERR, 1254 "MTU (%u) > device max MTU (%u) for port_id %u\n", 1255 mtu, dev_info->max_mtu, port_id); 1256 return -EINVAL; 1257 } 1258 1259 overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen, 1260 dev_info->max_mtu); 1261 frame_size = mtu + overhead_len; 1262 if (frame_size < RTE_ETHER_MIN_LEN) { 1263 RTE_ETHDEV_LOG(ERR, 1264 "Frame size (%u) < min frame size (%u) for port_id %u\n", 1265 frame_size, RTE_ETHER_MIN_LEN, port_id); 1266 return -EINVAL; 1267 } 1268 1269 if (frame_size > dev_info->max_rx_pktlen) { 1270 RTE_ETHDEV_LOG(ERR, 1271 "Frame size (%u) > device max frame size (%u) for port_id %u\n", 1272 frame_size, dev_info->max_rx_pktlen, port_id); 1273 return -EINVAL; 1274 } 1275 1276 return 0; 1277 } 1278 1279 int 1280 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, 1281 const struct rte_eth_conf *dev_conf) 1282 { 1283 enum rte_eth_hash_function algorithm; 1284 struct rte_eth_dev *dev; 1285 struct rte_eth_dev_info dev_info; 1286 struct rte_eth_conf orig_conf; 1287 int diag; 1288 int ret; 1289 uint16_t old_mtu; 1290 1291 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1292 dev = &rte_eth_devices[port_id]; 1293 1294 if (dev_conf == NULL) { 1295 RTE_ETHDEV_LOG(ERR, 1296 "Cannot configure ethdev port %u from NULL config\n", 1297 port_id); 1298 return -EINVAL; 1299 } 1300 1301 if (*dev->dev_ops->dev_configure == NULL) 1302 return -ENOTSUP; 1303 1304 if (dev->data->dev_started) { 1305 RTE_ETHDEV_LOG(ERR, 1306 "Port %u must be stopped to allow configuration\n", 1307 port_id); 1308 return -EBUSY; 1309 } 1310 1311 /* 1312 * Ensure that "dev_configured" is always 0 each time prepare to do 1313 * dev_configure() to avoid any non-anticipated behaviour. 1314 * And set to 1 when dev_configure() is executed successfully. 1315 */ 1316 dev->data->dev_configured = 0; 1317 1318 /* Store original config, as rollback required on failure */ 1319 memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf)); 1320 1321 /* 1322 * Copy the dev_conf parameter into the dev structure. 1323 * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get 1324 */ 1325 if (dev_conf != &dev->data->dev_conf) 1326 memcpy(&dev->data->dev_conf, dev_conf, 1327 sizeof(dev->data->dev_conf)); 1328 1329 /* Backup mtu for rollback */ 1330 old_mtu = dev->data->mtu; 1331 1332 /* fields must be zero to reserve them for future ABI changes */ 1333 if (dev_conf->rxmode.reserved_64s[0] != 0 || 1334 dev_conf->rxmode.reserved_64s[1] != 0 || 1335 dev_conf->rxmode.reserved_ptrs[0] != NULL || 1336 dev_conf->rxmode.reserved_ptrs[1] != NULL) { 1337 RTE_ETHDEV_LOG(ERR, "Rxmode reserved fields not zero\n"); 1338 ret = -EINVAL; 1339 goto rollback; 1340 } 1341 1342 if (dev_conf->txmode.reserved_64s[0] != 0 || 1343 dev_conf->txmode.reserved_64s[1] != 0 || 1344 dev_conf->txmode.reserved_ptrs[0] != NULL || 1345 dev_conf->txmode.reserved_ptrs[1] != NULL) { 1346 RTE_ETHDEV_LOG(ERR, "txmode reserved fields not zero\n"); 1347 ret = -EINVAL; 1348 goto rollback; 1349 } 1350 1351 ret = rte_eth_dev_info_get(port_id, &dev_info); 1352 if (ret != 0) 1353 goto rollback; 1354 1355 /* If number of queues specified by application for both Rx and Tx is 1356 * zero, use driver preferred values. This cannot be done individually 1357 * as it is valid for either Tx or Rx (but not both) to be zero. 1358 * If driver does not provide any preferred valued, fall back on 1359 * EAL defaults. 1360 */ 1361 if (nb_rx_q == 0 && nb_tx_q == 0) { 1362 nb_rx_q = dev_info.default_rxportconf.nb_queues; 1363 if (nb_rx_q == 0) 1364 nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES; 1365 nb_tx_q = dev_info.default_txportconf.nb_queues; 1366 if (nb_tx_q == 0) 1367 nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES; 1368 } 1369 1370 if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) { 1371 RTE_ETHDEV_LOG(ERR, 1372 "Number of Rx queues requested (%u) is greater than max supported(%d)\n", 1373 nb_rx_q, RTE_MAX_QUEUES_PER_PORT); 1374 ret = -EINVAL; 1375 goto rollback; 1376 } 1377 1378 if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) { 1379 RTE_ETHDEV_LOG(ERR, 1380 "Number of Tx queues requested (%u) is greater than max supported(%d)\n", 1381 nb_tx_q, RTE_MAX_QUEUES_PER_PORT); 1382 ret = -EINVAL; 1383 goto rollback; 1384 } 1385 1386 /* 1387 * Check that the numbers of Rx and Tx queues are not greater 1388 * than the maximum number of Rx and Tx queues supported by the 1389 * configured device. 1390 */ 1391 if (nb_rx_q > dev_info.max_rx_queues) { 1392 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n", 1393 port_id, nb_rx_q, dev_info.max_rx_queues); 1394 ret = -EINVAL; 1395 goto rollback; 1396 } 1397 1398 if (nb_tx_q > dev_info.max_tx_queues) { 1399 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n", 1400 port_id, nb_tx_q, dev_info.max_tx_queues); 1401 ret = -EINVAL; 1402 goto rollback; 1403 } 1404 1405 /* Check that the device supports requested interrupts */ 1406 if ((dev_conf->intr_conf.lsc == 1) && 1407 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) { 1408 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n", 1409 dev->device->driver->name); 1410 ret = -EINVAL; 1411 goto rollback; 1412 } 1413 if ((dev_conf->intr_conf.rmv == 1) && 1414 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) { 1415 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n", 1416 dev->device->driver->name); 1417 ret = -EINVAL; 1418 goto rollback; 1419 } 1420 1421 if (dev_conf->rxmode.mtu == 0) 1422 dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU; 1423 1424 ret = eth_dev_validate_mtu(port_id, &dev_info, 1425 dev->data->dev_conf.rxmode.mtu); 1426 if (ret != 0) 1427 goto rollback; 1428 1429 dev->data->mtu = dev->data->dev_conf.rxmode.mtu; 1430 1431 /* 1432 * If LRO is enabled, check that the maximum aggregated packet 1433 * size is supported by the configured device. 1434 */ 1435 if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) { 1436 uint32_t max_rx_pktlen; 1437 uint32_t overhead_len; 1438 1439 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 1440 dev_info.max_mtu); 1441 max_rx_pktlen = dev->data->dev_conf.rxmode.mtu + overhead_len; 1442 if (dev_conf->rxmode.max_lro_pkt_size == 0) 1443 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 1444 ret = eth_dev_check_lro_pkt_size(port_id, 1445 dev->data->dev_conf.rxmode.max_lro_pkt_size, 1446 max_rx_pktlen, 1447 dev_info.max_lro_pkt_size); 1448 if (ret != 0) 1449 goto rollback; 1450 } 1451 1452 /* Any requested offloading must be within its device capabilities */ 1453 if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) != 1454 dev_conf->rxmode.offloads) { 1455 char buffer[512]; 1456 1457 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u does not support Rx offloads %s\n", 1458 port_id, eth_dev_offload_names( 1459 dev_conf->rxmode.offloads & ~dev_info.rx_offload_capa, 1460 buffer, sizeof(buffer), rte_eth_dev_rx_offload_name)); 1461 RTE_ETHDEV_LOG(DEBUG, "Ethdev port_id=%u was requested Rx offloads %s\n", 1462 port_id, eth_dev_offload_names(dev_conf->rxmode.offloads, 1463 buffer, sizeof(buffer), rte_eth_dev_rx_offload_name)); 1464 RTE_ETHDEV_LOG(DEBUG, "Ethdev port_id=%u supports Rx offloads %s\n", 1465 port_id, eth_dev_offload_names(dev_info.rx_offload_capa, 1466 buffer, sizeof(buffer), rte_eth_dev_rx_offload_name)); 1467 1468 ret = -EINVAL; 1469 goto rollback; 1470 } 1471 if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) != 1472 dev_conf->txmode.offloads) { 1473 char buffer[512]; 1474 1475 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u does not support Tx offloads %s\n", 1476 port_id, eth_dev_offload_names( 1477 dev_conf->txmode.offloads & ~dev_info.tx_offload_capa, 1478 buffer, sizeof(buffer), rte_eth_dev_tx_offload_name)); 1479 RTE_ETHDEV_LOG(DEBUG, "Ethdev port_id=%u was requested Tx offloads %s\n", 1480 port_id, eth_dev_offload_names(dev_conf->txmode.offloads, 1481 buffer, sizeof(buffer), rte_eth_dev_tx_offload_name)); 1482 RTE_ETHDEV_LOG(DEBUG, "Ethdev port_id=%u supports Tx offloads %s\n", 1483 port_id, eth_dev_offload_names(dev_info.tx_offload_capa, 1484 buffer, sizeof(buffer), rte_eth_dev_tx_offload_name)); 1485 ret = -EINVAL; 1486 goto rollback; 1487 } 1488 1489 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = 1490 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf); 1491 1492 /* Check that device supports requested rss hash functions. */ 1493 if ((dev_info.flow_type_rss_offloads | 1494 dev_conf->rx_adv_conf.rss_conf.rss_hf) != 1495 dev_info.flow_type_rss_offloads) { 1496 RTE_ETHDEV_LOG(ERR, 1497 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 1498 port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf, 1499 dev_info.flow_type_rss_offloads); 1500 ret = -EINVAL; 1501 goto rollback; 1502 } 1503 1504 /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */ 1505 if (((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0) && 1506 (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) { 1507 RTE_ETHDEV_LOG(ERR, 1508 "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n", 1509 port_id, 1510 rte_eth_dev_rx_offload_name(RTE_ETH_RX_OFFLOAD_RSS_HASH)); 1511 ret = -EINVAL; 1512 goto rollback; 1513 } 1514 1515 if (dev_conf->rx_adv_conf.rss_conf.rss_key != NULL && 1516 dev_conf->rx_adv_conf.rss_conf.rss_key_len != dev_info.hash_key_size) { 1517 RTE_ETHDEV_LOG(ERR, 1518 "Ethdev port_id=%u invalid RSS key len: %u, valid value: %u\n", 1519 port_id, dev_conf->rx_adv_conf.rss_conf.rss_key_len, 1520 dev_info.hash_key_size); 1521 ret = -EINVAL; 1522 goto rollback; 1523 } 1524 1525 algorithm = dev_conf->rx_adv_conf.rss_conf.algorithm; 1526 if ((size_t)algorithm >= CHAR_BIT * sizeof(dev_info.rss_algo_capa) || 1527 (dev_info.rss_algo_capa & RTE_ETH_HASH_ALGO_TO_CAPA(algorithm)) == 0) { 1528 RTE_ETHDEV_LOG(ERR, 1529 "Ethdev port_id=%u configured RSS hash algorithm (%u)" 1530 "is not in the algorithm capability (0x%" PRIx32 ")\n", 1531 port_id, algorithm, dev_info.rss_algo_capa); 1532 ret = -EINVAL; 1533 goto rollback; 1534 } 1535 1536 /* 1537 * Setup new number of Rx/Tx queues and reconfigure device. 1538 */ 1539 diag = eth_dev_rx_queue_config(dev, nb_rx_q); 1540 if (diag != 0) { 1541 RTE_ETHDEV_LOG(ERR, 1542 "Port%u eth_dev_rx_queue_config = %d\n", 1543 port_id, diag); 1544 ret = diag; 1545 goto rollback; 1546 } 1547 1548 diag = eth_dev_tx_queue_config(dev, nb_tx_q); 1549 if (diag != 0) { 1550 RTE_ETHDEV_LOG(ERR, 1551 "Port%u eth_dev_tx_queue_config = %d\n", 1552 port_id, diag); 1553 eth_dev_rx_queue_config(dev, 0); 1554 ret = diag; 1555 goto rollback; 1556 } 1557 1558 diag = (*dev->dev_ops->dev_configure)(dev); 1559 if (diag != 0) { 1560 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n", 1561 port_id, diag); 1562 ret = eth_err(port_id, diag); 1563 goto reset_queues; 1564 } 1565 1566 /* Initialize Rx profiling if enabled at compilation time. */ 1567 diag = __rte_eth_dev_profile_init(port_id, dev); 1568 if (diag != 0) { 1569 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n", 1570 port_id, diag); 1571 ret = eth_err(port_id, diag); 1572 goto reset_queues; 1573 } 1574 1575 /* Validate Rx offloads. */ 1576 diag = eth_dev_validate_offloads(port_id, 1577 dev_conf->rxmode.offloads, 1578 dev->data->dev_conf.rxmode.offloads, "Rx", 1579 rte_eth_dev_rx_offload_name); 1580 if (diag != 0) { 1581 ret = diag; 1582 goto reset_queues; 1583 } 1584 1585 /* Validate Tx offloads. */ 1586 diag = eth_dev_validate_offloads(port_id, 1587 dev_conf->txmode.offloads, 1588 dev->data->dev_conf.txmode.offloads, "Tx", 1589 rte_eth_dev_tx_offload_name); 1590 if (diag != 0) { 1591 ret = diag; 1592 goto reset_queues; 1593 } 1594 1595 dev->data->dev_configured = 1; 1596 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0); 1597 return 0; 1598 reset_queues: 1599 eth_dev_rx_queue_config(dev, 0); 1600 eth_dev_tx_queue_config(dev, 0); 1601 rollback: 1602 memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf)); 1603 if (old_mtu != dev->data->mtu) 1604 dev->data->mtu = old_mtu; 1605 1606 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret); 1607 return ret; 1608 } 1609 1610 static void 1611 eth_dev_mac_restore(struct rte_eth_dev *dev, 1612 struct rte_eth_dev_info *dev_info) 1613 { 1614 struct rte_ether_addr *addr; 1615 uint16_t i; 1616 uint32_t pool = 0; 1617 uint64_t pool_mask; 1618 1619 /* replay MAC address configuration including default MAC */ 1620 addr = &dev->data->mac_addrs[0]; 1621 if (*dev->dev_ops->mac_addr_set != NULL) 1622 (*dev->dev_ops->mac_addr_set)(dev, addr); 1623 else if (*dev->dev_ops->mac_addr_add != NULL) 1624 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool); 1625 1626 if (*dev->dev_ops->mac_addr_add != NULL) { 1627 for (i = 1; i < dev_info->max_mac_addrs; i++) { 1628 addr = &dev->data->mac_addrs[i]; 1629 1630 /* skip zero address */ 1631 if (rte_is_zero_ether_addr(addr)) 1632 continue; 1633 1634 pool = 0; 1635 pool_mask = dev->data->mac_pool_sel[i]; 1636 1637 do { 1638 if (pool_mask & UINT64_C(1)) 1639 (*dev->dev_ops->mac_addr_add)(dev, 1640 addr, i, pool); 1641 pool_mask >>= 1; 1642 pool++; 1643 } while (pool_mask); 1644 } 1645 } 1646 } 1647 1648 static int 1649 eth_dev_config_restore(struct rte_eth_dev *dev, 1650 struct rte_eth_dev_info *dev_info, uint16_t port_id) 1651 { 1652 int ret; 1653 1654 if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)) 1655 eth_dev_mac_restore(dev, dev_info); 1656 1657 /* replay promiscuous configuration */ 1658 /* 1659 * use callbacks directly since we don't need port_id check and 1660 * would like to bypass the same value set 1661 */ 1662 if (rte_eth_promiscuous_get(port_id) == 1 && 1663 *dev->dev_ops->promiscuous_enable != NULL) { 1664 ret = eth_err(port_id, 1665 (*dev->dev_ops->promiscuous_enable)(dev)); 1666 if (ret != 0 && ret != -ENOTSUP) { 1667 RTE_ETHDEV_LOG(ERR, 1668 "Failed to enable promiscuous mode for device (port %u): %s\n", 1669 port_id, rte_strerror(-ret)); 1670 return ret; 1671 } 1672 } else if (rte_eth_promiscuous_get(port_id) == 0 && 1673 *dev->dev_ops->promiscuous_disable != NULL) { 1674 ret = eth_err(port_id, 1675 (*dev->dev_ops->promiscuous_disable)(dev)); 1676 if (ret != 0 && ret != -ENOTSUP) { 1677 RTE_ETHDEV_LOG(ERR, 1678 "Failed to disable promiscuous mode for device (port %u): %s\n", 1679 port_id, rte_strerror(-ret)); 1680 return ret; 1681 } 1682 } 1683 1684 /* replay all multicast configuration */ 1685 /* 1686 * use callbacks directly since we don't need port_id check and 1687 * would like to bypass the same value set 1688 */ 1689 if (rte_eth_allmulticast_get(port_id) == 1 && 1690 *dev->dev_ops->allmulticast_enable != NULL) { 1691 ret = eth_err(port_id, 1692 (*dev->dev_ops->allmulticast_enable)(dev)); 1693 if (ret != 0 && ret != -ENOTSUP) { 1694 RTE_ETHDEV_LOG(ERR, 1695 "Failed to enable allmulticast mode for device (port %u): %s\n", 1696 port_id, rte_strerror(-ret)); 1697 return ret; 1698 } 1699 } else if (rte_eth_allmulticast_get(port_id) == 0 && 1700 *dev->dev_ops->allmulticast_disable != NULL) { 1701 ret = eth_err(port_id, 1702 (*dev->dev_ops->allmulticast_disable)(dev)); 1703 if (ret != 0 && ret != -ENOTSUP) { 1704 RTE_ETHDEV_LOG(ERR, 1705 "Failed to disable allmulticast mode for device (port %u): %s\n", 1706 port_id, rte_strerror(-ret)); 1707 return ret; 1708 } 1709 } 1710 1711 return 0; 1712 } 1713 1714 int 1715 rte_eth_dev_start(uint16_t port_id) 1716 { 1717 struct rte_eth_dev *dev; 1718 struct rte_eth_dev_info dev_info; 1719 int diag; 1720 int ret, ret_stop; 1721 1722 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1723 dev = &rte_eth_devices[port_id]; 1724 1725 if (*dev->dev_ops->dev_start == NULL) 1726 return -ENOTSUP; 1727 1728 if (dev->data->dev_configured == 0) { 1729 RTE_ETHDEV_LOG(INFO, 1730 "Device with port_id=%"PRIu16" is not configured.\n", 1731 port_id); 1732 return -EINVAL; 1733 } 1734 1735 if (dev->data->dev_started != 0) { 1736 RTE_ETHDEV_LOG(INFO, 1737 "Device with port_id=%"PRIu16" already started\n", 1738 port_id); 1739 return 0; 1740 } 1741 1742 ret = rte_eth_dev_info_get(port_id, &dev_info); 1743 if (ret != 0) 1744 return ret; 1745 1746 /* Lets restore MAC now if device does not support live change */ 1747 if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR) 1748 eth_dev_mac_restore(dev, &dev_info); 1749 1750 diag = (*dev->dev_ops->dev_start)(dev); 1751 if (diag == 0) 1752 dev->data->dev_started = 1; 1753 else 1754 return eth_err(port_id, diag); 1755 1756 ret = eth_dev_config_restore(dev, &dev_info, port_id); 1757 if (ret != 0) { 1758 RTE_ETHDEV_LOG(ERR, 1759 "Error during restoring configuration for device (port %u): %s\n", 1760 port_id, rte_strerror(-ret)); 1761 ret_stop = rte_eth_dev_stop(port_id); 1762 if (ret_stop != 0) { 1763 RTE_ETHDEV_LOG(ERR, 1764 "Failed to stop device (port %u): %s\n", 1765 port_id, rte_strerror(-ret_stop)); 1766 } 1767 1768 return ret; 1769 } 1770 1771 if (dev->data->dev_conf.intr_conf.lsc == 0) { 1772 if (*dev->dev_ops->link_update == NULL) 1773 return -ENOTSUP; 1774 (*dev->dev_ops->link_update)(dev, 0); 1775 } 1776 1777 /* expose selection of PMD fast-path functions */ 1778 eth_dev_fp_ops_setup(rte_eth_fp_ops + port_id, dev); 1779 1780 rte_ethdev_trace_start(port_id); 1781 return 0; 1782 } 1783 1784 int 1785 rte_eth_dev_stop(uint16_t port_id) 1786 { 1787 struct rte_eth_dev *dev; 1788 int ret; 1789 1790 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1791 dev = &rte_eth_devices[port_id]; 1792 1793 if (*dev->dev_ops->dev_stop == NULL) 1794 return -ENOTSUP; 1795 1796 if (dev->data->dev_started == 0) { 1797 RTE_ETHDEV_LOG(INFO, 1798 "Device with port_id=%"PRIu16" already stopped\n", 1799 port_id); 1800 return 0; 1801 } 1802 1803 /* point fast-path functions to dummy ones */ 1804 eth_dev_fp_ops_reset(rte_eth_fp_ops + port_id); 1805 1806 ret = (*dev->dev_ops->dev_stop)(dev); 1807 if (ret == 0) 1808 dev->data->dev_started = 0; 1809 rte_ethdev_trace_stop(port_id, ret); 1810 1811 return ret; 1812 } 1813 1814 int 1815 rte_eth_dev_set_link_up(uint16_t port_id) 1816 { 1817 struct rte_eth_dev *dev; 1818 int ret; 1819 1820 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1821 dev = &rte_eth_devices[port_id]; 1822 1823 if (*dev->dev_ops->dev_set_link_up == NULL) 1824 return -ENOTSUP; 1825 ret = eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev)); 1826 1827 rte_ethdev_trace_set_link_up(port_id, ret); 1828 1829 return ret; 1830 } 1831 1832 int 1833 rte_eth_dev_set_link_down(uint16_t port_id) 1834 { 1835 struct rte_eth_dev *dev; 1836 int ret; 1837 1838 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1839 dev = &rte_eth_devices[port_id]; 1840 1841 if (*dev->dev_ops->dev_set_link_down == NULL) 1842 return -ENOTSUP; 1843 ret = eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev)); 1844 1845 rte_ethdev_trace_set_link_down(port_id, ret); 1846 1847 return ret; 1848 } 1849 1850 int 1851 rte_eth_dev_close(uint16_t port_id) 1852 { 1853 struct rte_eth_dev *dev; 1854 int firsterr, binerr; 1855 int *lasterr = &firsterr; 1856 1857 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1858 dev = &rte_eth_devices[port_id]; 1859 1860 /* 1861 * Secondary process needs to close device to release process private 1862 * resources. But secondary process should not be obliged to wait 1863 * for device stop before closing ethdev. 1864 */ 1865 if (rte_eal_process_type() == RTE_PROC_PRIMARY && 1866 dev->data->dev_started) { 1867 RTE_ETHDEV_LOG(ERR, "Cannot close started device (port %u)\n", 1868 port_id); 1869 return -EINVAL; 1870 } 1871 1872 if (*dev->dev_ops->dev_close == NULL) 1873 return -ENOTSUP; 1874 *lasterr = (*dev->dev_ops->dev_close)(dev); 1875 if (*lasterr != 0) 1876 lasterr = &binerr; 1877 1878 rte_ethdev_trace_close(port_id); 1879 *lasterr = rte_eth_dev_release_port(dev); 1880 1881 return firsterr; 1882 } 1883 1884 int 1885 rte_eth_dev_reset(uint16_t port_id) 1886 { 1887 struct rte_eth_dev *dev; 1888 int ret; 1889 1890 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1891 dev = &rte_eth_devices[port_id]; 1892 1893 if (*dev->dev_ops->dev_reset == NULL) 1894 return -ENOTSUP; 1895 1896 ret = rte_eth_dev_stop(port_id); 1897 if (ret != 0) { 1898 RTE_ETHDEV_LOG(ERR, 1899 "Failed to stop device (port %u) before reset: %s - ignore\n", 1900 port_id, rte_strerror(-ret)); 1901 } 1902 ret = eth_err(port_id, dev->dev_ops->dev_reset(dev)); 1903 1904 rte_ethdev_trace_reset(port_id, ret); 1905 1906 return ret; 1907 } 1908 1909 int 1910 rte_eth_dev_is_removed(uint16_t port_id) 1911 { 1912 struct rte_eth_dev *dev; 1913 int ret; 1914 1915 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0); 1916 dev = &rte_eth_devices[port_id]; 1917 1918 if (dev->state == RTE_ETH_DEV_REMOVED) 1919 return 1; 1920 1921 if (*dev->dev_ops->is_removed == NULL) 1922 return 0; 1923 1924 ret = dev->dev_ops->is_removed(dev); 1925 if (ret != 0) 1926 /* Device is physically removed. */ 1927 dev->state = RTE_ETH_DEV_REMOVED; 1928 1929 rte_ethdev_trace_is_removed(port_id, ret); 1930 1931 return ret; 1932 } 1933 1934 static int 1935 rte_eth_check_rx_mempool(struct rte_mempool *mp, uint16_t offset, 1936 uint16_t min_length) 1937 { 1938 uint16_t data_room_size; 1939 1940 /* 1941 * Check the size of the mbuf data buffer, this value 1942 * must be provided in the private data of the memory pool. 1943 * First check that the memory pool(s) has a valid private data. 1944 */ 1945 if (mp->private_data_size < 1946 sizeof(struct rte_pktmbuf_pool_private)) { 1947 RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n", 1948 mp->name, mp->private_data_size, 1949 (unsigned int) 1950 sizeof(struct rte_pktmbuf_pool_private)); 1951 return -ENOSPC; 1952 } 1953 data_room_size = rte_pktmbuf_data_room_size(mp); 1954 if (data_room_size < offset + min_length) { 1955 RTE_ETHDEV_LOG(ERR, 1956 "%s mbuf_data_room_size %u < %u (%u + %u)\n", 1957 mp->name, data_room_size, 1958 offset + min_length, offset, min_length); 1959 return -EINVAL; 1960 } 1961 return 0; 1962 } 1963 1964 static int 1965 eth_dev_buffer_split_get_supported_hdrs_helper(uint16_t port_id, uint32_t **ptypes) 1966 { 1967 int cnt; 1968 1969 cnt = rte_eth_buffer_split_get_supported_hdr_ptypes(port_id, NULL, 0); 1970 if (cnt <= 0) 1971 return cnt; 1972 1973 *ptypes = malloc(sizeof(uint32_t) * cnt); 1974 if (*ptypes == NULL) 1975 return -ENOMEM; 1976 1977 cnt = rte_eth_buffer_split_get_supported_hdr_ptypes(port_id, *ptypes, cnt); 1978 if (cnt <= 0) { 1979 free(*ptypes); 1980 *ptypes = NULL; 1981 } 1982 return cnt; 1983 } 1984 1985 static int 1986 rte_eth_rx_queue_check_split(uint16_t port_id, 1987 const struct rte_eth_rxseg_split *rx_seg, 1988 uint16_t n_seg, uint32_t *mbp_buf_size, 1989 const struct rte_eth_dev_info *dev_info) 1990 { 1991 const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa; 1992 struct rte_mempool *mp_first; 1993 uint32_t offset_mask; 1994 uint16_t seg_idx; 1995 int ret = 0; 1996 int ptype_cnt; 1997 uint32_t *ptypes; 1998 uint32_t prev_proto_hdrs = RTE_PTYPE_UNKNOWN; 1999 int i; 2000 2001 if (n_seg > seg_capa->max_nseg) { 2002 RTE_ETHDEV_LOG(ERR, 2003 "Requested Rx segments %u exceed supported %u\n", 2004 n_seg, seg_capa->max_nseg); 2005 return -EINVAL; 2006 } 2007 /* 2008 * Check the sizes and offsets against buffer sizes 2009 * for each segment specified in extended configuration. 2010 */ 2011 mp_first = rx_seg[0].mp; 2012 offset_mask = RTE_BIT32(seg_capa->offset_align_log2) - 1; 2013 2014 ptypes = NULL; 2015 ptype_cnt = eth_dev_buffer_split_get_supported_hdrs_helper(port_id, &ptypes); 2016 2017 for (seg_idx = 0; seg_idx < n_seg; seg_idx++) { 2018 struct rte_mempool *mpl = rx_seg[seg_idx].mp; 2019 uint32_t length = rx_seg[seg_idx].length; 2020 uint32_t offset = rx_seg[seg_idx].offset; 2021 uint32_t proto_hdr = rx_seg[seg_idx].proto_hdr; 2022 2023 if (mpl == NULL) { 2024 RTE_ETHDEV_LOG(ERR, "null mempool pointer\n"); 2025 ret = -EINVAL; 2026 goto out; 2027 } 2028 if (seg_idx != 0 && mp_first != mpl && 2029 seg_capa->multi_pools == 0) { 2030 RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n"); 2031 ret = -ENOTSUP; 2032 goto out; 2033 } 2034 if (offset != 0) { 2035 if (seg_capa->offset_allowed == 0) { 2036 RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n"); 2037 ret = -ENOTSUP; 2038 goto out; 2039 } 2040 if (offset & offset_mask) { 2041 RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n", 2042 offset, 2043 seg_capa->offset_align_log2); 2044 ret = -EINVAL; 2045 goto out; 2046 } 2047 } 2048 2049 offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM; 2050 *mbp_buf_size = rte_pktmbuf_data_room_size(mpl); 2051 if (proto_hdr != 0) { 2052 /* Split based on protocol headers. */ 2053 if (length != 0) { 2054 RTE_ETHDEV_LOG(ERR, 2055 "Do not set length split and protocol split within a segment\n" 2056 ); 2057 ret = -EINVAL; 2058 goto out; 2059 } 2060 if ((proto_hdr & prev_proto_hdrs) != 0) { 2061 RTE_ETHDEV_LOG(ERR, 2062 "Repeat with previous protocol headers or proto-split after length-based split\n" 2063 ); 2064 ret = -EINVAL; 2065 goto out; 2066 } 2067 if (ptype_cnt <= 0) { 2068 RTE_ETHDEV_LOG(ERR, 2069 "Port %u failed to get supported buffer split header protocols\n", 2070 port_id); 2071 ret = -ENOTSUP; 2072 goto out; 2073 } 2074 for (i = 0; i < ptype_cnt; i++) { 2075 if ((prev_proto_hdrs | proto_hdr) == ptypes[i]) 2076 break; 2077 } 2078 if (i == ptype_cnt) { 2079 RTE_ETHDEV_LOG(ERR, 2080 "Requested Rx split header protocols 0x%x is not supported.\n", 2081 proto_hdr); 2082 ret = -EINVAL; 2083 goto out; 2084 } 2085 prev_proto_hdrs |= proto_hdr; 2086 } else { 2087 /* Split at fixed length. */ 2088 length = length != 0 ? length : *mbp_buf_size; 2089 prev_proto_hdrs = RTE_PTYPE_ALL_MASK; 2090 } 2091 2092 ret = rte_eth_check_rx_mempool(mpl, offset, length); 2093 if (ret != 0) 2094 goto out; 2095 } 2096 out: 2097 free(ptypes); 2098 return ret; 2099 } 2100 2101 static int 2102 rte_eth_rx_queue_check_mempools(struct rte_mempool **rx_mempools, 2103 uint16_t n_mempools, uint32_t *min_buf_size, 2104 const struct rte_eth_dev_info *dev_info) 2105 { 2106 uint16_t pool_idx; 2107 int ret; 2108 2109 if (n_mempools > dev_info->max_rx_mempools) { 2110 RTE_ETHDEV_LOG(ERR, 2111 "Too many Rx mempools %u vs maximum %u\n", 2112 n_mempools, dev_info->max_rx_mempools); 2113 return -EINVAL; 2114 } 2115 2116 for (pool_idx = 0; pool_idx < n_mempools; pool_idx++) { 2117 struct rte_mempool *mp = rx_mempools[pool_idx]; 2118 2119 if (mp == NULL) { 2120 RTE_ETHDEV_LOG(ERR, "null Rx mempool pointer\n"); 2121 return -EINVAL; 2122 } 2123 2124 ret = rte_eth_check_rx_mempool(mp, RTE_PKTMBUF_HEADROOM, 2125 dev_info->min_rx_bufsize); 2126 if (ret != 0) 2127 return ret; 2128 2129 *min_buf_size = RTE_MIN(*min_buf_size, 2130 rte_pktmbuf_data_room_size(mp)); 2131 } 2132 2133 return 0; 2134 } 2135 2136 int 2137 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 2138 uint16_t nb_rx_desc, unsigned int socket_id, 2139 const struct rte_eth_rxconf *rx_conf, 2140 struct rte_mempool *mp) 2141 { 2142 int ret; 2143 uint64_t rx_offloads; 2144 uint32_t mbp_buf_size = UINT32_MAX; 2145 struct rte_eth_dev *dev; 2146 struct rte_eth_dev_info dev_info; 2147 struct rte_eth_rxconf local_conf; 2148 2149 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2150 dev = &rte_eth_devices[port_id]; 2151 2152 if (rx_queue_id >= dev->data->nb_rx_queues) { 2153 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id); 2154 return -EINVAL; 2155 } 2156 2157 if (*dev->dev_ops->rx_queue_setup == NULL) 2158 return -ENOTSUP; 2159 2160 if (rx_conf != NULL && 2161 (rx_conf->reserved_64s[0] != 0 || 2162 rx_conf->reserved_64s[1] != 0 || 2163 rx_conf->reserved_ptrs[0] != NULL || 2164 rx_conf->reserved_ptrs[1] != NULL)) { 2165 RTE_ETHDEV_LOG(ERR, "Rx conf reserved fields not zero\n"); 2166 return -EINVAL; 2167 } 2168 2169 ret = rte_eth_dev_info_get(port_id, &dev_info); 2170 if (ret != 0) 2171 return ret; 2172 2173 rx_offloads = dev->data->dev_conf.rxmode.offloads; 2174 if (rx_conf != NULL) 2175 rx_offloads |= rx_conf->offloads; 2176 2177 /* Ensure that we have one and only one source of Rx buffers */ 2178 if ((mp != NULL) + 2179 (rx_conf != NULL && rx_conf->rx_nseg > 0) + 2180 (rx_conf != NULL && rx_conf->rx_nmempool > 0) != 1) { 2181 RTE_ETHDEV_LOG(ERR, 2182 "Ambiguous Rx mempools configuration\n"); 2183 return -EINVAL; 2184 } 2185 2186 if (mp != NULL) { 2187 /* Single pool configuration check. */ 2188 ret = rte_eth_check_rx_mempool(mp, RTE_PKTMBUF_HEADROOM, 2189 dev_info.min_rx_bufsize); 2190 if (ret != 0) 2191 return ret; 2192 2193 mbp_buf_size = rte_pktmbuf_data_room_size(mp); 2194 } else if (rx_conf != NULL && rx_conf->rx_nseg > 0) { 2195 const struct rte_eth_rxseg_split *rx_seg; 2196 uint16_t n_seg; 2197 2198 /* Extended multi-segment configuration check. */ 2199 if (rx_conf->rx_seg == NULL) { 2200 RTE_ETHDEV_LOG(ERR, 2201 "Memory pool is null and no multi-segment configuration provided\n"); 2202 return -EINVAL; 2203 } 2204 2205 rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg; 2206 n_seg = rx_conf->rx_nseg; 2207 2208 if (rx_offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { 2209 ret = rte_eth_rx_queue_check_split(port_id, rx_seg, n_seg, 2210 &mbp_buf_size, 2211 &dev_info); 2212 if (ret != 0) 2213 return ret; 2214 } else { 2215 RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n"); 2216 return -EINVAL; 2217 } 2218 } else if (rx_conf != NULL && rx_conf->rx_nmempool > 0) { 2219 /* Extended multi-pool configuration check. */ 2220 if (rx_conf->rx_mempools == NULL) { 2221 RTE_ETHDEV_LOG(ERR, "Memory pools array is null\n"); 2222 return -EINVAL; 2223 } 2224 2225 ret = rte_eth_rx_queue_check_mempools(rx_conf->rx_mempools, 2226 rx_conf->rx_nmempool, 2227 &mbp_buf_size, 2228 &dev_info); 2229 if (ret != 0) 2230 return ret; 2231 } else { 2232 RTE_ETHDEV_LOG(ERR, "Missing Rx mempool configuration\n"); 2233 return -EINVAL; 2234 } 2235 2236 /* Use default specified by driver, if nb_rx_desc is zero */ 2237 if (nb_rx_desc == 0) { 2238 nb_rx_desc = dev_info.default_rxportconf.ring_size; 2239 /* If driver default is also zero, fall back on EAL default */ 2240 if (nb_rx_desc == 0) 2241 nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 2242 } 2243 2244 if (nb_rx_desc > dev_info.rx_desc_lim.nb_max || 2245 nb_rx_desc < dev_info.rx_desc_lim.nb_min || 2246 nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) { 2247 2248 RTE_ETHDEV_LOG(ERR, 2249 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2250 nb_rx_desc, dev_info.rx_desc_lim.nb_max, 2251 dev_info.rx_desc_lim.nb_min, 2252 dev_info.rx_desc_lim.nb_align); 2253 return -EINVAL; 2254 } 2255 2256 if (dev->data->dev_started && 2257 !(dev_info.dev_capa & 2258 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP)) 2259 return -EBUSY; 2260 2261 if (dev->data->dev_started && 2262 (dev->data->rx_queue_state[rx_queue_id] != 2263 RTE_ETH_QUEUE_STATE_STOPPED)) 2264 return -EBUSY; 2265 2266 eth_dev_rxq_release(dev, rx_queue_id); 2267 2268 if (rx_conf == NULL) 2269 rx_conf = &dev_info.default_rxconf; 2270 2271 local_conf = *rx_conf; 2272 2273 /* 2274 * If an offloading has already been enabled in 2275 * rte_eth_dev_configure(), it has been enabled on all queues, 2276 * so there is no need to enable it in this queue again. 2277 * The local_conf.offloads input to underlying PMD only carries 2278 * those offloadings which are only enabled on this queue and 2279 * not enabled on all queues. 2280 */ 2281 local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads; 2282 2283 /* 2284 * New added offloadings for this queue are those not enabled in 2285 * rte_eth_dev_configure() and they must be per-queue type. 2286 * A pure per-port offloading can't be enabled on a queue while 2287 * disabled on another queue. A pure per-port offloading can't 2288 * be enabled for any queue as new added one if it hasn't been 2289 * enabled in rte_eth_dev_configure(). 2290 */ 2291 if ((local_conf.offloads & dev_info.rx_queue_offload_capa) != 2292 local_conf.offloads) { 2293 RTE_ETHDEV_LOG(ERR, 2294 "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2295 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2296 port_id, rx_queue_id, local_conf.offloads, 2297 dev_info.rx_queue_offload_capa, 2298 __func__); 2299 return -EINVAL; 2300 } 2301 2302 if (local_conf.share_group > 0 && 2303 (dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) == 0) { 2304 RTE_ETHDEV_LOG(ERR, 2305 "Ethdev port_id=%d rx_queue_id=%d, enabled share_group=%hu while device doesn't support Rx queue share\n", 2306 port_id, rx_queue_id, local_conf.share_group); 2307 return -EINVAL; 2308 } 2309 2310 /* 2311 * If LRO is enabled, check that the maximum aggregated packet 2312 * size is supported by the configured device. 2313 */ 2314 /* Get the real Ethernet overhead length */ 2315 if (local_conf.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) { 2316 uint32_t overhead_len; 2317 uint32_t max_rx_pktlen; 2318 int ret; 2319 2320 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 2321 dev_info.max_mtu); 2322 max_rx_pktlen = dev->data->mtu + overhead_len; 2323 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0) 2324 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 2325 ret = eth_dev_check_lro_pkt_size(port_id, 2326 dev->data->dev_conf.rxmode.max_lro_pkt_size, 2327 max_rx_pktlen, 2328 dev_info.max_lro_pkt_size); 2329 if (ret != 0) 2330 return ret; 2331 } 2332 2333 ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc, 2334 socket_id, &local_conf, mp); 2335 if (!ret) { 2336 if (!dev->data->min_rx_buf_size || 2337 dev->data->min_rx_buf_size > mbp_buf_size) 2338 dev->data->min_rx_buf_size = mbp_buf_size; 2339 } 2340 2341 rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp, 2342 rx_conf, ret); 2343 return eth_err(port_id, ret); 2344 } 2345 2346 int 2347 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 2348 uint16_t nb_rx_desc, 2349 const struct rte_eth_hairpin_conf *conf) 2350 { 2351 int ret; 2352 struct rte_eth_dev *dev; 2353 struct rte_eth_hairpin_cap cap; 2354 int i; 2355 int count; 2356 2357 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2358 dev = &rte_eth_devices[port_id]; 2359 2360 if (rx_queue_id >= dev->data->nb_rx_queues) { 2361 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id); 2362 return -EINVAL; 2363 } 2364 2365 if (conf == NULL) { 2366 RTE_ETHDEV_LOG(ERR, 2367 "Cannot setup ethdev port %u Rx hairpin queue from NULL config\n", 2368 port_id); 2369 return -EINVAL; 2370 } 2371 2372 if (conf->reserved != 0) { 2373 RTE_ETHDEV_LOG(ERR, 2374 "Rx hairpin reserved field not zero\n"); 2375 return -EINVAL; 2376 } 2377 2378 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2379 if (ret != 0) 2380 return ret; 2381 if (*dev->dev_ops->rx_hairpin_queue_setup == NULL) 2382 return -ENOTSUP; 2383 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2384 if (nb_rx_desc == 0) 2385 nb_rx_desc = cap.max_nb_desc; 2386 if (nb_rx_desc > cap.max_nb_desc) { 2387 RTE_ETHDEV_LOG(ERR, 2388 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu", 2389 nb_rx_desc, cap.max_nb_desc); 2390 return -EINVAL; 2391 } 2392 if (conf->peer_count > cap.max_rx_2_tx) { 2393 RTE_ETHDEV_LOG(ERR, 2394 "Invalid value for number of peers for Rx queue(=%u), should be: <= %hu", 2395 conf->peer_count, cap.max_rx_2_tx); 2396 return -EINVAL; 2397 } 2398 if (conf->use_locked_device_memory && !cap.rx_cap.locked_device_memory) { 2399 RTE_ETHDEV_LOG(ERR, 2400 "Attempt to use locked device memory for Rx queue, which is not supported"); 2401 return -EINVAL; 2402 } 2403 if (conf->use_rte_memory && !cap.rx_cap.rte_memory) { 2404 RTE_ETHDEV_LOG(ERR, 2405 "Attempt to use DPDK memory for Rx queue, which is not supported"); 2406 return -EINVAL; 2407 } 2408 if (conf->use_locked_device_memory && conf->use_rte_memory) { 2409 RTE_ETHDEV_LOG(ERR, 2410 "Attempt to use mutually exclusive memory settings for Rx queue"); 2411 return -EINVAL; 2412 } 2413 if (conf->force_memory && 2414 !conf->use_locked_device_memory && 2415 !conf->use_rte_memory) { 2416 RTE_ETHDEV_LOG(ERR, 2417 "Attempt to force Rx queue memory settings, but none is set"); 2418 return -EINVAL; 2419 } 2420 if (conf->peer_count == 0) { 2421 RTE_ETHDEV_LOG(ERR, 2422 "Invalid value for number of peers for Rx queue(=%u), should be: > 0", 2423 conf->peer_count); 2424 return -EINVAL; 2425 } 2426 for (i = 0, count = 0; i < dev->data->nb_rx_queues && 2427 cap.max_nb_queues != UINT16_MAX; i++) { 2428 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i)) 2429 count++; 2430 } 2431 if (count > cap.max_nb_queues) { 2432 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d", 2433 cap.max_nb_queues); 2434 return -EINVAL; 2435 } 2436 if (dev->data->dev_started) 2437 return -EBUSY; 2438 eth_dev_rxq_release(dev, rx_queue_id); 2439 ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id, 2440 nb_rx_desc, conf); 2441 if (ret == 0) 2442 dev->data->rx_queue_state[rx_queue_id] = 2443 RTE_ETH_QUEUE_STATE_HAIRPIN; 2444 ret = eth_err(port_id, ret); 2445 2446 rte_eth_trace_rx_hairpin_queue_setup(port_id, rx_queue_id, nb_rx_desc, 2447 conf, ret); 2448 2449 return ret; 2450 } 2451 2452 int 2453 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2454 uint16_t nb_tx_desc, unsigned int socket_id, 2455 const struct rte_eth_txconf *tx_conf) 2456 { 2457 struct rte_eth_dev *dev; 2458 struct rte_eth_dev_info dev_info; 2459 struct rte_eth_txconf local_conf; 2460 int ret; 2461 2462 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2463 dev = &rte_eth_devices[port_id]; 2464 2465 if (tx_queue_id >= dev->data->nb_tx_queues) { 2466 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id); 2467 return -EINVAL; 2468 } 2469 2470 if (*dev->dev_ops->tx_queue_setup == NULL) 2471 return -ENOTSUP; 2472 2473 if (tx_conf != NULL && 2474 (tx_conf->reserved_64s[0] != 0 || 2475 tx_conf->reserved_64s[1] != 0 || 2476 tx_conf->reserved_ptrs[0] != NULL || 2477 tx_conf->reserved_ptrs[1] != NULL)) { 2478 RTE_ETHDEV_LOG(ERR, "Tx conf reserved fields not zero\n"); 2479 return -EINVAL; 2480 } 2481 2482 ret = rte_eth_dev_info_get(port_id, &dev_info); 2483 if (ret != 0) 2484 return ret; 2485 2486 /* Use default specified by driver, if nb_tx_desc is zero */ 2487 if (nb_tx_desc == 0) { 2488 nb_tx_desc = dev_info.default_txportconf.ring_size; 2489 /* If driver default is zero, fall back on EAL default */ 2490 if (nb_tx_desc == 0) 2491 nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 2492 } 2493 if (nb_tx_desc > dev_info.tx_desc_lim.nb_max || 2494 nb_tx_desc < dev_info.tx_desc_lim.nb_min || 2495 nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) { 2496 RTE_ETHDEV_LOG(ERR, 2497 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2498 nb_tx_desc, dev_info.tx_desc_lim.nb_max, 2499 dev_info.tx_desc_lim.nb_min, 2500 dev_info.tx_desc_lim.nb_align); 2501 return -EINVAL; 2502 } 2503 2504 if (dev->data->dev_started && 2505 !(dev_info.dev_capa & 2506 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP)) 2507 return -EBUSY; 2508 2509 if (dev->data->dev_started && 2510 (dev->data->tx_queue_state[tx_queue_id] != 2511 RTE_ETH_QUEUE_STATE_STOPPED)) 2512 return -EBUSY; 2513 2514 eth_dev_txq_release(dev, tx_queue_id); 2515 2516 if (tx_conf == NULL) 2517 tx_conf = &dev_info.default_txconf; 2518 2519 local_conf = *tx_conf; 2520 2521 /* 2522 * If an offloading has already been enabled in 2523 * rte_eth_dev_configure(), it has been enabled on all queues, 2524 * so there is no need to enable it in this queue again. 2525 * The local_conf.offloads input to underlying PMD only carries 2526 * those offloadings which are only enabled on this queue and 2527 * not enabled on all queues. 2528 */ 2529 local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads; 2530 2531 /* 2532 * New added offloadings for this queue are those not enabled in 2533 * rte_eth_dev_configure() and they must be per-queue type. 2534 * A pure per-port offloading can't be enabled on a queue while 2535 * disabled on another queue. A pure per-port offloading can't 2536 * be enabled for any queue as new added one if it hasn't been 2537 * enabled in rte_eth_dev_configure(). 2538 */ 2539 if ((local_conf.offloads & dev_info.tx_queue_offload_capa) != 2540 local_conf.offloads) { 2541 RTE_ETHDEV_LOG(ERR, 2542 "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2543 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2544 port_id, tx_queue_id, local_conf.offloads, 2545 dev_info.tx_queue_offload_capa, 2546 __func__); 2547 return -EINVAL; 2548 } 2549 2550 rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf); 2551 return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev, 2552 tx_queue_id, nb_tx_desc, socket_id, &local_conf)); 2553 } 2554 2555 int 2556 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2557 uint16_t nb_tx_desc, 2558 const struct rte_eth_hairpin_conf *conf) 2559 { 2560 struct rte_eth_dev *dev; 2561 struct rte_eth_hairpin_cap cap; 2562 int i; 2563 int count; 2564 int ret; 2565 2566 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2567 dev = &rte_eth_devices[port_id]; 2568 2569 if (tx_queue_id >= dev->data->nb_tx_queues) { 2570 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id); 2571 return -EINVAL; 2572 } 2573 2574 if (conf == NULL) { 2575 RTE_ETHDEV_LOG(ERR, 2576 "Cannot setup ethdev port %u Tx hairpin queue from NULL config\n", 2577 port_id); 2578 return -EINVAL; 2579 } 2580 2581 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2582 if (ret != 0) 2583 return ret; 2584 if (*dev->dev_ops->tx_hairpin_queue_setup == NULL) 2585 return -ENOTSUP; 2586 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2587 if (nb_tx_desc == 0) 2588 nb_tx_desc = cap.max_nb_desc; 2589 if (nb_tx_desc > cap.max_nb_desc) { 2590 RTE_ETHDEV_LOG(ERR, 2591 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu", 2592 nb_tx_desc, cap.max_nb_desc); 2593 return -EINVAL; 2594 } 2595 if (conf->peer_count > cap.max_tx_2_rx) { 2596 RTE_ETHDEV_LOG(ERR, 2597 "Invalid value for number of peers for Tx queue(=%u), should be: <= %hu", 2598 conf->peer_count, cap.max_tx_2_rx); 2599 return -EINVAL; 2600 } 2601 if (conf->use_locked_device_memory && !cap.tx_cap.locked_device_memory) { 2602 RTE_ETHDEV_LOG(ERR, 2603 "Attempt to use locked device memory for Tx queue, which is not supported"); 2604 return -EINVAL; 2605 } 2606 if (conf->use_rte_memory && !cap.tx_cap.rte_memory) { 2607 RTE_ETHDEV_LOG(ERR, 2608 "Attempt to use DPDK memory for Tx queue, which is not supported"); 2609 return -EINVAL; 2610 } 2611 if (conf->use_locked_device_memory && conf->use_rte_memory) { 2612 RTE_ETHDEV_LOG(ERR, 2613 "Attempt to use mutually exclusive memory settings for Tx queue"); 2614 return -EINVAL; 2615 } 2616 if (conf->force_memory && 2617 !conf->use_locked_device_memory && 2618 !conf->use_rte_memory) { 2619 RTE_ETHDEV_LOG(ERR, 2620 "Attempt to force Tx queue memory settings, but none is set"); 2621 return -EINVAL; 2622 } 2623 if (conf->peer_count == 0) { 2624 RTE_ETHDEV_LOG(ERR, 2625 "Invalid value for number of peers for Tx queue(=%u), should be: > 0", 2626 conf->peer_count); 2627 return -EINVAL; 2628 } 2629 for (i = 0, count = 0; i < dev->data->nb_tx_queues && 2630 cap.max_nb_queues != UINT16_MAX; i++) { 2631 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i)) 2632 count++; 2633 } 2634 if (count > cap.max_nb_queues) { 2635 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d", 2636 cap.max_nb_queues); 2637 return -EINVAL; 2638 } 2639 if (dev->data->dev_started) 2640 return -EBUSY; 2641 eth_dev_txq_release(dev, tx_queue_id); 2642 ret = (*dev->dev_ops->tx_hairpin_queue_setup) 2643 (dev, tx_queue_id, nb_tx_desc, conf); 2644 if (ret == 0) 2645 dev->data->tx_queue_state[tx_queue_id] = 2646 RTE_ETH_QUEUE_STATE_HAIRPIN; 2647 ret = eth_err(port_id, ret); 2648 2649 rte_eth_trace_tx_hairpin_queue_setup(port_id, tx_queue_id, nb_tx_desc, 2650 conf, ret); 2651 2652 return ret; 2653 } 2654 2655 int 2656 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port) 2657 { 2658 struct rte_eth_dev *dev; 2659 int ret; 2660 2661 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2662 dev = &rte_eth_devices[tx_port]; 2663 2664 if (dev->data->dev_started == 0) { 2665 RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port); 2666 return -EBUSY; 2667 } 2668 2669 if (*dev->dev_ops->hairpin_bind == NULL) 2670 return -ENOTSUP; 2671 ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port); 2672 if (ret != 0) 2673 RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d" 2674 " to Rx %d (%d - all ports)\n", 2675 tx_port, rx_port, RTE_MAX_ETHPORTS); 2676 2677 rte_eth_trace_hairpin_bind(tx_port, rx_port, ret); 2678 2679 return ret; 2680 } 2681 2682 int 2683 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port) 2684 { 2685 struct rte_eth_dev *dev; 2686 int ret; 2687 2688 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2689 dev = &rte_eth_devices[tx_port]; 2690 2691 if (dev->data->dev_started == 0) { 2692 RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port); 2693 return -EBUSY; 2694 } 2695 2696 if (*dev->dev_ops->hairpin_unbind == NULL) 2697 return -ENOTSUP; 2698 ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port); 2699 if (ret != 0) 2700 RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d" 2701 " from Rx %d (%d - all ports)\n", 2702 tx_port, rx_port, RTE_MAX_ETHPORTS); 2703 2704 rte_eth_trace_hairpin_unbind(tx_port, rx_port, ret); 2705 2706 return ret; 2707 } 2708 2709 int 2710 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, 2711 size_t len, uint32_t direction) 2712 { 2713 struct rte_eth_dev *dev; 2714 int ret; 2715 2716 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2717 dev = &rte_eth_devices[port_id]; 2718 2719 if (peer_ports == NULL) { 2720 RTE_ETHDEV_LOG(ERR, 2721 "Cannot get ethdev port %u hairpin peer ports to NULL\n", 2722 port_id); 2723 return -EINVAL; 2724 } 2725 2726 if (len == 0) { 2727 RTE_ETHDEV_LOG(ERR, 2728 "Cannot get ethdev port %u hairpin peer ports to array with zero size\n", 2729 port_id); 2730 return -EINVAL; 2731 } 2732 2733 if (*dev->dev_ops->hairpin_get_peer_ports == NULL) 2734 return -ENOTSUP; 2735 2736 ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports, 2737 len, direction); 2738 if (ret < 0) 2739 RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n", 2740 port_id, direction ? "Rx" : "Tx"); 2741 2742 rte_eth_trace_hairpin_get_peer_ports(port_id, peer_ports, len, 2743 direction, ret); 2744 2745 return ret; 2746 } 2747 2748 void 2749 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, 2750 void *userdata __rte_unused) 2751 { 2752 rte_pktmbuf_free_bulk(pkts, unsent); 2753 2754 rte_eth_trace_tx_buffer_drop_callback((void **)pkts, unsent); 2755 } 2756 2757 void 2758 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, 2759 void *userdata) 2760 { 2761 uint64_t *count = userdata; 2762 2763 rte_pktmbuf_free_bulk(pkts, unsent); 2764 *count += unsent; 2765 2766 rte_eth_trace_tx_buffer_count_callback((void **)pkts, unsent, *count); 2767 } 2768 2769 int 2770 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, 2771 buffer_tx_error_fn cbfn, void *userdata) 2772 { 2773 if (buffer == NULL) { 2774 RTE_ETHDEV_LOG(ERR, 2775 "Cannot set Tx buffer error callback to NULL buffer\n"); 2776 return -EINVAL; 2777 } 2778 2779 buffer->error_callback = cbfn; 2780 buffer->error_userdata = userdata; 2781 2782 rte_eth_trace_tx_buffer_set_err_callback(buffer); 2783 2784 return 0; 2785 } 2786 2787 int 2788 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size) 2789 { 2790 int ret = 0; 2791 2792 if (buffer == NULL) { 2793 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL buffer\n"); 2794 return -EINVAL; 2795 } 2796 2797 buffer->size = size; 2798 if (buffer->error_callback == NULL) { 2799 ret = rte_eth_tx_buffer_set_err_callback( 2800 buffer, rte_eth_tx_buffer_drop_callback, NULL); 2801 } 2802 2803 rte_eth_trace_tx_buffer_init(buffer, size, ret); 2804 2805 return ret; 2806 } 2807 2808 int 2809 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt) 2810 { 2811 struct rte_eth_dev *dev; 2812 int ret; 2813 2814 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2815 dev = &rte_eth_devices[port_id]; 2816 2817 if (*dev->dev_ops->tx_done_cleanup == NULL) 2818 return -ENOTSUP; 2819 2820 /* Call driver to free pending mbufs. */ 2821 ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id], 2822 free_cnt); 2823 ret = eth_err(port_id, ret); 2824 2825 rte_eth_trace_tx_done_cleanup(port_id, queue_id, free_cnt, ret); 2826 2827 return ret; 2828 } 2829 2830 int 2831 rte_eth_promiscuous_enable(uint16_t port_id) 2832 { 2833 struct rte_eth_dev *dev; 2834 int diag = 0; 2835 2836 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2837 dev = &rte_eth_devices[port_id]; 2838 2839 if (dev->data->promiscuous == 1) 2840 return 0; 2841 2842 if (*dev->dev_ops->promiscuous_enable == NULL) 2843 return -ENOTSUP; 2844 2845 diag = (*dev->dev_ops->promiscuous_enable)(dev); 2846 dev->data->promiscuous = (diag == 0) ? 1 : 0; 2847 2848 diag = eth_err(port_id, diag); 2849 2850 rte_eth_trace_promiscuous_enable(port_id, dev->data->promiscuous, 2851 diag); 2852 2853 return diag; 2854 } 2855 2856 int 2857 rte_eth_promiscuous_disable(uint16_t port_id) 2858 { 2859 struct rte_eth_dev *dev; 2860 int diag = 0; 2861 2862 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2863 dev = &rte_eth_devices[port_id]; 2864 2865 if (dev->data->promiscuous == 0) 2866 return 0; 2867 2868 if (*dev->dev_ops->promiscuous_disable == NULL) 2869 return -ENOTSUP; 2870 2871 dev->data->promiscuous = 0; 2872 diag = (*dev->dev_ops->promiscuous_disable)(dev); 2873 if (diag != 0) 2874 dev->data->promiscuous = 1; 2875 2876 diag = eth_err(port_id, diag); 2877 2878 rte_eth_trace_promiscuous_disable(port_id, dev->data->promiscuous, 2879 diag); 2880 2881 return diag; 2882 } 2883 2884 int 2885 rte_eth_promiscuous_get(uint16_t port_id) 2886 { 2887 struct rte_eth_dev *dev; 2888 2889 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2890 dev = &rte_eth_devices[port_id]; 2891 2892 rte_eth_trace_promiscuous_get(port_id, dev->data->promiscuous); 2893 2894 return dev->data->promiscuous; 2895 } 2896 2897 int 2898 rte_eth_allmulticast_enable(uint16_t port_id) 2899 { 2900 struct rte_eth_dev *dev; 2901 int diag; 2902 2903 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2904 dev = &rte_eth_devices[port_id]; 2905 2906 if (dev->data->all_multicast == 1) 2907 return 0; 2908 2909 if (*dev->dev_ops->allmulticast_enable == NULL) 2910 return -ENOTSUP; 2911 diag = (*dev->dev_ops->allmulticast_enable)(dev); 2912 dev->data->all_multicast = (diag == 0) ? 1 : 0; 2913 2914 diag = eth_err(port_id, diag); 2915 2916 rte_eth_trace_allmulticast_enable(port_id, dev->data->all_multicast, 2917 diag); 2918 2919 return diag; 2920 } 2921 2922 int 2923 rte_eth_allmulticast_disable(uint16_t port_id) 2924 { 2925 struct rte_eth_dev *dev; 2926 int diag; 2927 2928 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2929 dev = &rte_eth_devices[port_id]; 2930 2931 if (dev->data->all_multicast == 0) 2932 return 0; 2933 2934 if (*dev->dev_ops->allmulticast_disable == NULL) 2935 return -ENOTSUP; 2936 dev->data->all_multicast = 0; 2937 diag = (*dev->dev_ops->allmulticast_disable)(dev); 2938 if (diag != 0) 2939 dev->data->all_multicast = 1; 2940 2941 diag = eth_err(port_id, diag); 2942 2943 rte_eth_trace_allmulticast_disable(port_id, dev->data->all_multicast, 2944 diag); 2945 2946 return diag; 2947 } 2948 2949 int 2950 rte_eth_allmulticast_get(uint16_t port_id) 2951 { 2952 struct rte_eth_dev *dev; 2953 2954 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2955 dev = &rte_eth_devices[port_id]; 2956 2957 rte_eth_trace_allmulticast_get(port_id, dev->data->all_multicast); 2958 2959 return dev->data->all_multicast; 2960 } 2961 2962 int 2963 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link) 2964 { 2965 struct rte_eth_dev *dev; 2966 2967 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2968 dev = &rte_eth_devices[port_id]; 2969 2970 if (eth_link == NULL) { 2971 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2972 port_id); 2973 return -EINVAL; 2974 } 2975 2976 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2977 rte_eth_linkstatus_get(dev, eth_link); 2978 else { 2979 if (*dev->dev_ops->link_update == NULL) 2980 return -ENOTSUP; 2981 (*dev->dev_ops->link_update)(dev, 1); 2982 *eth_link = dev->data->dev_link; 2983 } 2984 2985 rte_eth_trace_link_get(port_id, eth_link); 2986 2987 return 0; 2988 } 2989 2990 int 2991 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link) 2992 { 2993 struct rte_eth_dev *dev; 2994 2995 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2996 dev = &rte_eth_devices[port_id]; 2997 2998 if (eth_link == NULL) { 2999 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 3000 port_id); 3001 return -EINVAL; 3002 } 3003 3004 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 3005 rte_eth_linkstatus_get(dev, eth_link); 3006 else { 3007 if (*dev->dev_ops->link_update == NULL) 3008 return -ENOTSUP; 3009 (*dev->dev_ops->link_update)(dev, 0); 3010 *eth_link = dev->data->dev_link; 3011 } 3012 3013 rte_eth_trace_link_get_nowait(port_id, eth_link); 3014 3015 return 0; 3016 } 3017 3018 const char * 3019 rte_eth_link_speed_to_str(uint32_t link_speed) 3020 { 3021 const char *ret; 3022 3023 switch (link_speed) { 3024 case RTE_ETH_SPEED_NUM_NONE: 3025 ret = "None"; 3026 break; 3027 case RTE_ETH_SPEED_NUM_10M: 3028 ret = "10 Mbps"; 3029 break; 3030 case RTE_ETH_SPEED_NUM_100M: 3031 ret = "100 Mbps"; 3032 break; 3033 case RTE_ETH_SPEED_NUM_1G: 3034 ret = "1 Gbps"; 3035 break; 3036 case RTE_ETH_SPEED_NUM_2_5G: 3037 ret = "2.5 Gbps"; 3038 break; 3039 case RTE_ETH_SPEED_NUM_5G: 3040 ret = "5 Gbps"; 3041 break; 3042 case RTE_ETH_SPEED_NUM_10G: 3043 ret = "10 Gbps"; 3044 break; 3045 case RTE_ETH_SPEED_NUM_20G: 3046 ret = "20 Gbps"; 3047 break; 3048 case RTE_ETH_SPEED_NUM_25G: 3049 ret = "25 Gbps"; 3050 break; 3051 case RTE_ETH_SPEED_NUM_40G: 3052 ret = "40 Gbps"; 3053 break; 3054 case RTE_ETH_SPEED_NUM_50G: 3055 ret = "50 Gbps"; 3056 break; 3057 case RTE_ETH_SPEED_NUM_56G: 3058 ret = "56 Gbps"; 3059 break; 3060 case RTE_ETH_SPEED_NUM_100G: 3061 ret = "100 Gbps"; 3062 break; 3063 case RTE_ETH_SPEED_NUM_200G: 3064 ret = "200 Gbps"; 3065 break; 3066 case RTE_ETH_SPEED_NUM_400G: 3067 ret = "400 Gbps"; 3068 break; 3069 case RTE_ETH_SPEED_NUM_UNKNOWN: 3070 ret = "Unknown"; 3071 break; 3072 default: 3073 ret = "Invalid"; 3074 } 3075 3076 rte_eth_trace_link_speed_to_str(link_speed, ret); 3077 3078 return ret; 3079 } 3080 3081 int 3082 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link) 3083 { 3084 int ret; 3085 3086 if (str == NULL) { 3087 RTE_ETHDEV_LOG(ERR, "Cannot convert link to NULL string\n"); 3088 return -EINVAL; 3089 } 3090 3091 if (len == 0) { 3092 RTE_ETHDEV_LOG(ERR, 3093 "Cannot convert link to string with zero size\n"); 3094 return -EINVAL; 3095 } 3096 3097 if (eth_link == NULL) { 3098 RTE_ETHDEV_LOG(ERR, "Cannot convert to string from NULL link\n"); 3099 return -EINVAL; 3100 } 3101 3102 if (eth_link->link_status == RTE_ETH_LINK_DOWN) 3103 ret = snprintf(str, len, "Link down"); 3104 else 3105 ret = snprintf(str, len, "Link up at %s %s %s", 3106 rte_eth_link_speed_to_str(eth_link->link_speed), 3107 (eth_link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 3108 "FDX" : "HDX", 3109 (eth_link->link_autoneg == RTE_ETH_LINK_AUTONEG) ? 3110 "Autoneg" : "Fixed"); 3111 3112 rte_eth_trace_link_to_str(len, eth_link, str, ret); 3113 3114 return ret; 3115 } 3116 3117 int 3118 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats) 3119 { 3120 struct rte_eth_dev *dev; 3121 int ret; 3122 3123 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3124 dev = &rte_eth_devices[port_id]; 3125 3126 if (stats == NULL) { 3127 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u stats to NULL\n", 3128 port_id); 3129 return -EINVAL; 3130 } 3131 3132 memset(stats, 0, sizeof(*stats)); 3133 3134 if (*dev->dev_ops->stats_get == NULL) 3135 return -ENOTSUP; 3136 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed; 3137 ret = eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats)); 3138 3139 rte_eth_trace_stats_get(port_id, stats, ret); 3140 3141 return ret; 3142 } 3143 3144 int 3145 rte_eth_stats_reset(uint16_t port_id) 3146 { 3147 struct rte_eth_dev *dev; 3148 int ret; 3149 3150 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3151 dev = &rte_eth_devices[port_id]; 3152 3153 if (*dev->dev_ops->stats_reset == NULL) 3154 return -ENOTSUP; 3155 ret = (*dev->dev_ops->stats_reset)(dev); 3156 if (ret != 0) 3157 return eth_err(port_id, ret); 3158 3159 dev->data->rx_mbuf_alloc_failed = 0; 3160 3161 rte_eth_trace_stats_reset(port_id); 3162 3163 return 0; 3164 } 3165 3166 static inline int 3167 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev) 3168 { 3169 uint16_t nb_rxqs, nb_txqs; 3170 int count; 3171 3172 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3173 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3174 3175 count = RTE_NB_STATS; 3176 if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) { 3177 count += nb_rxqs * RTE_NB_RXQ_STATS; 3178 count += nb_txqs * RTE_NB_TXQ_STATS; 3179 } 3180 3181 return count; 3182 } 3183 3184 static int 3185 eth_dev_get_xstats_count(uint16_t port_id) 3186 { 3187 struct rte_eth_dev *dev; 3188 int count; 3189 3190 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3191 dev = &rte_eth_devices[port_id]; 3192 if (dev->dev_ops->xstats_get_names != NULL) { 3193 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0); 3194 if (count < 0) 3195 return eth_err(port_id, count); 3196 } else 3197 count = 0; 3198 3199 3200 count += eth_dev_get_xstats_basic_count(dev); 3201 3202 return count; 3203 } 3204 3205 int 3206 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, 3207 uint64_t *id) 3208 { 3209 int cnt_xstats, idx_xstat; 3210 3211 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3212 3213 if (xstat_name == NULL) { 3214 RTE_ETHDEV_LOG(ERR, 3215 "Cannot get ethdev port %u xstats ID from NULL xstat name\n", 3216 port_id); 3217 return -ENOMEM; 3218 } 3219 3220 if (id == NULL) { 3221 RTE_ETHDEV_LOG(ERR, 3222 "Cannot get ethdev port %u xstats ID to NULL\n", 3223 port_id); 3224 return -ENOMEM; 3225 } 3226 3227 /* Get count */ 3228 cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL); 3229 if (cnt_xstats < 0) { 3230 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n"); 3231 return -ENODEV; 3232 } 3233 3234 /* Get id-name lookup table */ 3235 struct rte_eth_xstat_name xstats_names[cnt_xstats]; 3236 3237 if (cnt_xstats != rte_eth_xstats_get_names_by_id( 3238 port_id, xstats_names, cnt_xstats, NULL)) { 3239 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n"); 3240 return -1; 3241 } 3242 3243 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 3244 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) { 3245 *id = idx_xstat; 3246 3247 rte_eth_trace_xstats_get_id_by_name(port_id, 3248 xstat_name, *id); 3249 3250 return 0; 3251 }; 3252 } 3253 3254 return -EINVAL; 3255 } 3256 3257 /* retrieve basic stats names */ 3258 static int 3259 eth_basic_stats_get_names(struct rte_eth_dev *dev, 3260 struct rte_eth_xstat_name *xstats_names) 3261 { 3262 int cnt_used_entries = 0; 3263 uint32_t idx, id_queue; 3264 uint16_t num_q; 3265 3266 for (idx = 0; idx < RTE_NB_STATS; idx++) { 3267 strlcpy(xstats_names[cnt_used_entries].name, 3268 eth_dev_stats_strings[idx].name, 3269 sizeof(xstats_names[0].name)); 3270 cnt_used_entries++; 3271 } 3272 3273 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 3274 return cnt_used_entries; 3275 3276 num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3277 for (id_queue = 0; id_queue < num_q; id_queue++) { 3278 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) { 3279 snprintf(xstats_names[cnt_used_entries].name, 3280 sizeof(xstats_names[0].name), 3281 "rx_q%u_%s", 3282 id_queue, eth_dev_rxq_stats_strings[idx].name); 3283 cnt_used_entries++; 3284 } 3285 3286 } 3287 num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3288 for (id_queue = 0; id_queue < num_q; id_queue++) { 3289 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) { 3290 snprintf(xstats_names[cnt_used_entries].name, 3291 sizeof(xstats_names[0].name), 3292 "tx_q%u_%s", 3293 id_queue, eth_dev_txq_stats_strings[idx].name); 3294 cnt_used_entries++; 3295 } 3296 } 3297 return cnt_used_entries; 3298 } 3299 3300 /* retrieve ethdev extended statistics names */ 3301 int 3302 rte_eth_xstats_get_names_by_id(uint16_t port_id, 3303 struct rte_eth_xstat_name *xstats_names, unsigned int size, 3304 uint64_t *ids) 3305 { 3306 struct rte_eth_xstat_name *xstats_names_copy; 3307 unsigned int no_basic_stat_requested = 1; 3308 unsigned int no_ext_stat_requested = 1; 3309 unsigned int expected_entries; 3310 unsigned int basic_count; 3311 struct rte_eth_dev *dev; 3312 unsigned int i; 3313 int ret; 3314 3315 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3316 dev = &rte_eth_devices[port_id]; 3317 3318 basic_count = eth_dev_get_xstats_basic_count(dev); 3319 ret = eth_dev_get_xstats_count(port_id); 3320 if (ret < 0) 3321 return ret; 3322 expected_entries = (unsigned int)ret; 3323 3324 /* Return max number of stats if no ids given */ 3325 if (!ids) { 3326 if (!xstats_names) 3327 return expected_entries; 3328 else if (xstats_names && size < expected_entries) 3329 return expected_entries; 3330 } 3331 3332 if (ids && !xstats_names) 3333 return -EINVAL; 3334 3335 if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) { 3336 uint64_t ids_copy[size]; 3337 3338 for (i = 0; i < size; i++) { 3339 if (ids[i] < basic_count) { 3340 no_basic_stat_requested = 0; 3341 break; 3342 } 3343 3344 /* 3345 * Convert ids to xstats ids that PMD knows. 3346 * ids known by user are basic + extended stats. 3347 */ 3348 ids_copy[i] = ids[i] - basic_count; 3349 } 3350 3351 if (no_basic_stat_requested) 3352 return (*dev->dev_ops->xstats_get_names_by_id)(dev, 3353 ids_copy, xstats_names, size); 3354 } 3355 3356 /* Retrieve all stats */ 3357 if (!ids) { 3358 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names, 3359 expected_entries); 3360 if (num_stats < 0 || num_stats > (int)expected_entries) 3361 return num_stats; 3362 else 3363 return expected_entries; 3364 } 3365 3366 xstats_names_copy = calloc(expected_entries, 3367 sizeof(struct rte_eth_xstat_name)); 3368 3369 if (!xstats_names_copy) { 3370 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n"); 3371 return -ENOMEM; 3372 } 3373 3374 if (ids) { 3375 for (i = 0; i < size; i++) { 3376 if (ids[i] >= basic_count) { 3377 no_ext_stat_requested = 0; 3378 break; 3379 } 3380 } 3381 } 3382 3383 /* Fill xstats_names_copy structure */ 3384 if (ids && no_ext_stat_requested) { 3385 eth_basic_stats_get_names(dev, xstats_names_copy); 3386 } else { 3387 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy, 3388 expected_entries); 3389 if (ret < 0) { 3390 free(xstats_names_copy); 3391 return ret; 3392 } 3393 } 3394 3395 /* Filter stats */ 3396 for (i = 0; i < size; i++) { 3397 if (ids[i] >= expected_entries) { 3398 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 3399 free(xstats_names_copy); 3400 return -1; 3401 } 3402 xstats_names[i] = xstats_names_copy[ids[i]]; 3403 3404 rte_eth_trace_xstats_get_names_by_id(port_id, &xstats_names[i], 3405 ids[i]); 3406 } 3407 3408 free(xstats_names_copy); 3409 return size; 3410 } 3411 3412 int 3413 rte_eth_xstats_get_names(uint16_t port_id, 3414 struct rte_eth_xstat_name *xstats_names, 3415 unsigned int size) 3416 { 3417 struct rte_eth_dev *dev; 3418 int cnt_used_entries; 3419 int cnt_expected_entries; 3420 int cnt_driver_entries; 3421 int i; 3422 3423 cnt_expected_entries = eth_dev_get_xstats_count(port_id); 3424 if (xstats_names == NULL || cnt_expected_entries < 0 || 3425 (int)size < cnt_expected_entries) 3426 return cnt_expected_entries; 3427 3428 /* port_id checked in eth_dev_get_xstats_count() */ 3429 dev = &rte_eth_devices[port_id]; 3430 3431 cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names); 3432 3433 if (dev->dev_ops->xstats_get_names != NULL) { 3434 /* If there are any driver-specific xstats, append them 3435 * to end of list. 3436 */ 3437 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)( 3438 dev, 3439 xstats_names + cnt_used_entries, 3440 size - cnt_used_entries); 3441 if (cnt_driver_entries < 0) 3442 return eth_err(port_id, cnt_driver_entries); 3443 cnt_used_entries += cnt_driver_entries; 3444 } 3445 3446 for (i = 0; i < cnt_used_entries; i++) 3447 rte_eth_trace_xstats_get_names(port_id, i, &xstats_names[i], 3448 size, cnt_used_entries); 3449 3450 return cnt_used_entries; 3451 } 3452 3453 3454 static int 3455 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats) 3456 { 3457 struct rte_eth_dev *dev; 3458 struct rte_eth_stats eth_stats; 3459 unsigned int count = 0, i, q; 3460 uint64_t val, *stats_ptr; 3461 uint16_t nb_rxqs, nb_txqs; 3462 int ret; 3463 3464 ret = rte_eth_stats_get(port_id, ð_stats); 3465 if (ret < 0) 3466 return ret; 3467 3468 dev = &rte_eth_devices[port_id]; 3469 3470 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3471 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3472 3473 /* global stats */ 3474 for (i = 0; i < RTE_NB_STATS; i++) { 3475 stats_ptr = RTE_PTR_ADD(ð_stats, 3476 eth_dev_stats_strings[i].offset); 3477 val = *stats_ptr; 3478 xstats[count++].value = val; 3479 } 3480 3481 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 3482 return count; 3483 3484 /* per-rxq stats */ 3485 for (q = 0; q < nb_rxqs; q++) { 3486 for (i = 0; i < RTE_NB_RXQ_STATS; i++) { 3487 stats_ptr = RTE_PTR_ADD(ð_stats, 3488 eth_dev_rxq_stats_strings[i].offset + 3489 q * sizeof(uint64_t)); 3490 val = *stats_ptr; 3491 xstats[count++].value = val; 3492 } 3493 } 3494 3495 /* per-txq stats */ 3496 for (q = 0; q < nb_txqs; q++) { 3497 for (i = 0; i < RTE_NB_TXQ_STATS; i++) { 3498 stats_ptr = RTE_PTR_ADD(ð_stats, 3499 eth_dev_txq_stats_strings[i].offset + 3500 q * sizeof(uint64_t)); 3501 val = *stats_ptr; 3502 xstats[count++].value = val; 3503 } 3504 } 3505 return count; 3506 } 3507 3508 /* retrieve ethdev extended statistics */ 3509 int 3510 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, 3511 uint64_t *values, unsigned int size) 3512 { 3513 unsigned int no_basic_stat_requested = 1; 3514 unsigned int no_ext_stat_requested = 1; 3515 unsigned int num_xstats_filled; 3516 unsigned int basic_count; 3517 uint16_t expected_entries; 3518 struct rte_eth_dev *dev; 3519 unsigned int i; 3520 int ret; 3521 3522 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3523 dev = &rte_eth_devices[port_id]; 3524 3525 ret = eth_dev_get_xstats_count(port_id); 3526 if (ret < 0) 3527 return ret; 3528 expected_entries = (uint16_t)ret; 3529 struct rte_eth_xstat xstats[expected_entries]; 3530 basic_count = eth_dev_get_xstats_basic_count(dev); 3531 3532 /* Return max number of stats if no ids given */ 3533 if (!ids) { 3534 if (!values) 3535 return expected_entries; 3536 else if (values && size < expected_entries) 3537 return expected_entries; 3538 } 3539 3540 if (ids && !values) 3541 return -EINVAL; 3542 3543 if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) { 3544 unsigned int basic_count = eth_dev_get_xstats_basic_count(dev); 3545 uint64_t ids_copy[size]; 3546 3547 for (i = 0; i < size; i++) { 3548 if (ids[i] < basic_count) { 3549 no_basic_stat_requested = 0; 3550 break; 3551 } 3552 3553 /* 3554 * Convert ids to xstats ids that PMD knows. 3555 * ids known by user are basic + extended stats. 3556 */ 3557 ids_copy[i] = ids[i] - basic_count; 3558 } 3559 3560 if (no_basic_stat_requested) 3561 return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy, 3562 values, size); 3563 } 3564 3565 if (ids) { 3566 for (i = 0; i < size; i++) { 3567 if (ids[i] >= basic_count) { 3568 no_ext_stat_requested = 0; 3569 break; 3570 } 3571 } 3572 } 3573 3574 /* Fill the xstats structure */ 3575 if (ids && no_ext_stat_requested) 3576 ret = eth_basic_stats_get(port_id, xstats); 3577 else 3578 ret = rte_eth_xstats_get(port_id, xstats, expected_entries); 3579 3580 if (ret < 0) 3581 return ret; 3582 num_xstats_filled = (unsigned int)ret; 3583 3584 /* Return all stats */ 3585 if (!ids) { 3586 for (i = 0; i < num_xstats_filled; i++) 3587 values[i] = xstats[i].value; 3588 return expected_entries; 3589 } 3590 3591 /* Filter stats */ 3592 for (i = 0; i < size; i++) { 3593 if (ids[i] >= expected_entries) { 3594 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 3595 return -1; 3596 } 3597 values[i] = xstats[ids[i]].value; 3598 } 3599 3600 rte_eth_trace_xstats_get_by_id(port_id, ids, values, size); 3601 3602 return size; 3603 } 3604 3605 int 3606 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, 3607 unsigned int n) 3608 { 3609 struct rte_eth_dev *dev; 3610 unsigned int count, i; 3611 signed int xcount = 0; 3612 int ret; 3613 3614 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3615 if (xstats == NULL && n > 0) 3616 return -EINVAL; 3617 dev = &rte_eth_devices[port_id]; 3618 3619 count = eth_dev_get_xstats_basic_count(dev); 3620 3621 /* implemented by the driver */ 3622 if (dev->dev_ops->xstats_get != NULL) { 3623 /* Retrieve the xstats from the driver at the end of the 3624 * xstats struct. 3625 */ 3626 xcount = (*dev->dev_ops->xstats_get)(dev, 3627 (n > count) ? xstats + count : NULL, 3628 (n > count) ? n - count : 0); 3629 3630 if (xcount < 0) 3631 return eth_err(port_id, xcount); 3632 } 3633 3634 if (n < count + xcount || xstats == NULL) 3635 return count + xcount; 3636 3637 /* now fill the xstats structure */ 3638 ret = eth_basic_stats_get(port_id, xstats); 3639 if (ret < 0) 3640 return ret; 3641 count = ret; 3642 3643 for (i = 0; i < count; i++) 3644 xstats[i].id = i; 3645 /* add an offset to driver-specific stats */ 3646 for ( ; i < count + xcount; i++) 3647 xstats[i].id += count; 3648 3649 for (i = 0; i < n; i++) 3650 rte_eth_trace_xstats_get(port_id, xstats[i]); 3651 3652 return count + xcount; 3653 } 3654 3655 /* reset ethdev extended statistics */ 3656 int 3657 rte_eth_xstats_reset(uint16_t port_id) 3658 { 3659 struct rte_eth_dev *dev; 3660 3661 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3662 dev = &rte_eth_devices[port_id]; 3663 3664 /* implemented by the driver */ 3665 if (dev->dev_ops->xstats_reset != NULL) { 3666 int ret = eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev)); 3667 3668 rte_eth_trace_xstats_reset(port_id, ret); 3669 3670 return ret; 3671 } 3672 3673 /* fallback to default */ 3674 return rte_eth_stats_reset(port_id); 3675 } 3676 3677 static int 3678 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, 3679 uint8_t stat_idx, uint8_t is_rx) 3680 { 3681 struct rte_eth_dev *dev; 3682 3683 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3684 dev = &rte_eth_devices[port_id]; 3685 3686 if (is_rx && (queue_id >= dev->data->nb_rx_queues)) 3687 return -EINVAL; 3688 3689 if (!is_rx && (queue_id >= dev->data->nb_tx_queues)) 3690 return -EINVAL; 3691 3692 if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) 3693 return -EINVAL; 3694 3695 if (*dev->dev_ops->queue_stats_mapping_set == NULL) 3696 return -ENOTSUP; 3697 return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx); 3698 } 3699 3700 int 3701 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, 3702 uint8_t stat_idx) 3703 { 3704 int ret; 3705 3706 ret = eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3707 tx_queue_id, 3708 stat_idx, STAT_QMAP_TX)); 3709 3710 rte_ethdev_trace_set_tx_queue_stats_mapping(port_id, tx_queue_id, 3711 stat_idx, ret); 3712 3713 return ret; 3714 } 3715 3716 int 3717 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, 3718 uint8_t stat_idx) 3719 { 3720 int ret; 3721 3722 ret = eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3723 rx_queue_id, 3724 stat_idx, STAT_QMAP_RX)); 3725 3726 rte_ethdev_trace_set_rx_queue_stats_mapping(port_id, rx_queue_id, 3727 stat_idx, ret); 3728 3729 return ret; 3730 } 3731 3732 int 3733 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size) 3734 { 3735 struct rte_eth_dev *dev; 3736 int ret; 3737 3738 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3739 dev = &rte_eth_devices[port_id]; 3740 3741 if (fw_version == NULL && fw_size > 0) { 3742 RTE_ETHDEV_LOG(ERR, 3743 "Cannot get ethdev port %u FW version to NULL when string size is non zero\n", 3744 port_id); 3745 return -EINVAL; 3746 } 3747 3748 if (*dev->dev_ops->fw_version_get == NULL) 3749 return -ENOTSUP; 3750 ret = eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev, 3751 fw_version, fw_size)); 3752 3753 rte_ethdev_trace_fw_version_get(port_id, fw_version, fw_size, ret); 3754 3755 return ret; 3756 } 3757 3758 int 3759 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) 3760 { 3761 struct rte_eth_dev *dev; 3762 const struct rte_eth_desc_lim lim = { 3763 .nb_max = UINT16_MAX, 3764 .nb_min = 0, 3765 .nb_align = 1, 3766 .nb_seg_max = UINT16_MAX, 3767 .nb_mtu_seg_max = UINT16_MAX, 3768 }; 3769 int diag; 3770 3771 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3772 dev = &rte_eth_devices[port_id]; 3773 3774 if (dev_info == NULL) { 3775 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u info to NULL\n", 3776 port_id); 3777 return -EINVAL; 3778 } 3779 3780 /* 3781 * Init dev_info before port_id check since caller does not have 3782 * return status and does not know if get is successful or not. 3783 */ 3784 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3785 dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 3786 3787 dev_info->rx_desc_lim = lim; 3788 dev_info->tx_desc_lim = lim; 3789 dev_info->device = dev->device; 3790 dev_info->min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN - 3791 RTE_ETHER_CRC_LEN; 3792 dev_info->max_mtu = UINT16_MAX; 3793 dev_info->rss_algo_capa = RTE_ETH_HASH_ALGO_CAPA_MASK(DEFAULT); 3794 3795 if (*dev->dev_ops->dev_infos_get == NULL) 3796 return -ENOTSUP; 3797 diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info); 3798 if (diag != 0) { 3799 /* Cleanup already filled in device information */ 3800 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3801 return eth_err(port_id, diag); 3802 } 3803 3804 /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */ 3805 dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues, 3806 RTE_MAX_QUEUES_PER_PORT); 3807 dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues, 3808 RTE_MAX_QUEUES_PER_PORT); 3809 3810 dev_info->driver_name = dev->device->driver->name; 3811 dev_info->nb_rx_queues = dev->data->nb_rx_queues; 3812 dev_info->nb_tx_queues = dev->data->nb_tx_queues; 3813 3814 dev_info->dev_flags = &dev->data->dev_flags; 3815 3816 rte_ethdev_trace_info_get(port_id, dev_info); 3817 3818 return 0; 3819 } 3820 3821 int 3822 rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf) 3823 { 3824 struct rte_eth_dev *dev; 3825 3826 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3827 dev = &rte_eth_devices[port_id]; 3828 3829 if (dev_conf == NULL) { 3830 RTE_ETHDEV_LOG(ERR, 3831 "Cannot get ethdev port %u configuration to NULL\n", 3832 port_id); 3833 return -EINVAL; 3834 } 3835 3836 memcpy(dev_conf, &dev->data->dev_conf, sizeof(struct rte_eth_conf)); 3837 3838 rte_ethdev_trace_conf_get(port_id, dev_conf); 3839 3840 return 0; 3841 } 3842 3843 int 3844 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, 3845 uint32_t *ptypes, int num) 3846 { 3847 int i, j; 3848 struct rte_eth_dev *dev; 3849 const uint32_t *all_ptypes; 3850 3851 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3852 dev = &rte_eth_devices[port_id]; 3853 3854 if (ptypes == NULL && num > 0) { 3855 RTE_ETHDEV_LOG(ERR, 3856 "Cannot get ethdev port %u supported packet types to NULL when array size is non zero\n", 3857 port_id); 3858 return -EINVAL; 3859 } 3860 3861 if (*dev->dev_ops->dev_supported_ptypes_get == NULL) 3862 return 0; 3863 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3864 3865 if (!all_ptypes) 3866 return 0; 3867 3868 for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i) 3869 if (all_ptypes[i] & ptype_mask) { 3870 if (j < num) { 3871 ptypes[j] = all_ptypes[i]; 3872 3873 rte_ethdev_trace_get_supported_ptypes(port_id, 3874 j, num, ptypes[j]); 3875 } 3876 j++; 3877 } 3878 3879 return j; 3880 } 3881 3882 int 3883 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, 3884 uint32_t *set_ptypes, unsigned int num) 3885 { 3886 const uint32_t valid_ptype_masks[] = { 3887 RTE_PTYPE_L2_MASK, 3888 RTE_PTYPE_L3_MASK, 3889 RTE_PTYPE_L4_MASK, 3890 RTE_PTYPE_TUNNEL_MASK, 3891 RTE_PTYPE_INNER_L2_MASK, 3892 RTE_PTYPE_INNER_L3_MASK, 3893 RTE_PTYPE_INNER_L4_MASK, 3894 }; 3895 const uint32_t *all_ptypes; 3896 struct rte_eth_dev *dev; 3897 uint32_t unused_mask; 3898 unsigned int i, j; 3899 int ret; 3900 3901 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3902 dev = &rte_eth_devices[port_id]; 3903 3904 if (num > 0 && set_ptypes == NULL) { 3905 RTE_ETHDEV_LOG(ERR, 3906 "Cannot get ethdev port %u set packet types to NULL when array size is non zero\n", 3907 port_id); 3908 return -EINVAL; 3909 } 3910 3911 if (*dev->dev_ops->dev_supported_ptypes_get == NULL || 3912 *dev->dev_ops->dev_ptypes_set == NULL) { 3913 ret = 0; 3914 goto ptype_unknown; 3915 } 3916 3917 if (ptype_mask == 0) { 3918 ret = (*dev->dev_ops->dev_ptypes_set)(dev, 3919 ptype_mask); 3920 goto ptype_unknown; 3921 } 3922 3923 unused_mask = ptype_mask; 3924 for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) { 3925 uint32_t mask = ptype_mask & valid_ptype_masks[i]; 3926 if (mask && mask != valid_ptype_masks[i]) { 3927 ret = -EINVAL; 3928 goto ptype_unknown; 3929 } 3930 unused_mask &= ~valid_ptype_masks[i]; 3931 } 3932 3933 if (unused_mask) { 3934 ret = -EINVAL; 3935 goto ptype_unknown; 3936 } 3937 3938 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3939 if (all_ptypes == NULL) { 3940 ret = 0; 3941 goto ptype_unknown; 3942 } 3943 3944 /* 3945 * Accommodate as many set_ptypes as possible. If the supplied 3946 * set_ptypes array is insufficient fill it partially. 3947 */ 3948 for (i = 0, j = 0; set_ptypes != NULL && 3949 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) { 3950 if (ptype_mask & all_ptypes[i]) { 3951 if (j < num - 1) { 3952 set_ptypes[j] = all_ptypes[i]; 3953 3954 rte_ethdev_trace_set_ptypes(port_id, j, num, 3955 set_ptypes[j]); 3956 3957 j++; 3958 continue; 3959 } 3960 break; 3961 } 3962 } 3963 3964 if (set_ptypes != NULL && j < num) 3965 set_ptypes[j] = RTE_PTYPE_UNKNOWN; 3966 3967 return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask); 3968 3969 ptype_unknown: 3970 if (num > 0) 3971 set_ptypes[0] = RTE_PTYPE_UNKNOWN; 3972 3973 return ret; 3974 } 3975 3976 int 3977 rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma, 3978 unsigned int num) 3979 { 3980 int32_t ret; 3981 struct rte_eth_dev *dev; 3982 struct rte_eth_dev_info dev_info; 3983 3984 if (ma == NULL) { 3985 RTE_ETHDEV_LOG(ERR, "%s: invalid parameters\n", __func__); 3986 return -EINVAL; 3987 } 3988 3989 /* will check for us that port_id is a valid one */ 3990 ret = rte_eth_dev_info_get(port_id, &dev_info); 3991 if (ret != 0) 3992 return ret; 3993 3994 dev = &rte_eth_devices[port_id]; 3995 num = RTE_MIN(dev_info.max_mac_addrs, num); 3996 memcpy(ma, dev->data->mac_addrs, num * sizeof(ma[0])); 3997 3998 rte_eth_trace_macaddrs_get(port_id, num); 3999 4000 return num; 4001 } 4002 4003 int 4004 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr) 4005 { 4006 struct rte_eth_dev *dev; 4007 4008 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4009 dev = &rte_eth_devices[port_id]; 4010 4011 if (mac_addr == NULL) { 4012 RTE_ETHDEV_LOG(ERR, 4013 "Cannot get ethdev port %u MAC address to NULL\n", 4014 port_id); 4015 return -EINVAL; 4016 } 4017 4018 rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr); 4019 4020 rte_eth_trace_macaddr_get(port_id, mac_addr); 4021 4022 return 0; 4023 } 4024 4025 int 4026 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu) 4027 { 4028 struct rte_eth_dev *dev; 4029 4030 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4031 dev = &rte_eth_devices[port_id]; 4032 4033 if (mtu == NULL) { 4034 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u MTU to NULL\n", 4035 port_id); 4036 return -EINVAL; 4037 } 4038 4039 *mtu = dev->data->mtu; 4040 4041 rte_ethdev_trace_get_mtu(port_id, *mtu); 4042 4043 return 0; 4044 } 4045 4046 int 4047 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu) 4048 { 4049 int ret; 4050 struct rte_eth_dev_info dev_info; 4051 struct rte_eth_dev *dev; 4052 4053 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4054 dev = &rte_eth_devices[port_id]; 4055 if (*dev->dev_ops->mtu_set == NULL) 4056 return -ENOTSUP; 4057 4058 /* 4059 * Check if the device supports dev_infos_get, if it does not 4060 * skip min_mtu/max_mtu validation here as this requires values 4061 * that are populated within the call to rte_eth_dev_info_get() 4062 * which relies on dev->dev_ops->dev_infos_get. 4063 */ 4064 if (*dev->dev_ops->dev_infos_get != NULL) { 4065 ret = rte_eth_dev_info_get(port_id, &dev_info); 4066 if (ret != 0) 4067 return ret; 4068 4069 ret = eth_dev_validate_mtu(port_id, &dev_info, mtu); 4070 if (ret != 0) 4071 return ret; 4072 } 4073 4074 if (dev->data->dev_configured == 0) { 4075 RTE_ETHDEV_LOG(ERR, 4076 "Port %u must be configured before MTU set\n", 4077 port_id); 4078 return -EINVAL; 4079 } 4080 4081 ret = (*dev->dev_ops->mtu_set)(dev, mtu); 4082 if (ret == 0) 4083 dev->data->mtu = mtu; 4084 4085 ret = eth_err(port_id, ret); 4086 4087 rte_ethdev_trace_set_mtu(port_id, mtu, ret); 4088 4089 return ret; 4090 } 4091 4092 int 4093 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on) 4094 { 4095 struct rte_eth_dev *dev; 4096 int ret; 4097 4098 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4099 dev = &rte_eth_devices[port_id]; 4100 4101 if (!(dev->data->dev_conf.rxmode.offloads & 4102 RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) { 4103 RTE_ETHDEV_LOG(ERR, "Port %u: VLAN-filtering disabled\n", 4104 port_id); 4105 return -ENOSYS; 4106 } 4107 4108 if (vlan_id > 4095) { 4109 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n", 4110 port_id, vlan_id); 4111 return -EINVAL; 4112 } 4113 if (*dev->dev_ops->vlan_filter_set == NULL) 4114 return -ENOTSUP; 4115 4116 ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on); 4117 if (ret == 0) { 4118 struct rte_vlan_filter_conf *vfc; 4119 int vidx; 4120 int vbit; 4121 4122 vfc = &dev->data->vlan_filter_conf; 4123 vidx = vlan_id / 64; 4124 vbit = vlan_id % 64; 4125 4126 if (on) 4127 vfc->ids[vidx] |= RTE_BIT64(vbit); 4128 else 4129 vfc->ids[vidx] &= ~RTE_BIT64(vbit); 4130 } 4131 4132 ret = eth_err(port_id, ret); 4133 4134 rte_ethdev_trace_vlan_filter(port_id, vlan_id, on, ret); 4135 4136 return ret; 4137 } 4138 4139 int 4140 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, 4141 int on) 4142 { 4143 struct rte_eth_dev *dev; 4144 4145 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4146 dev = &rte_eth_devices[port_id]; 4147 4148 if (rx_queue_id >= dev->data->nb_rx_queues) { 4149 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id); 4150 return -EINVAL; 4151 } 4152 4153 if (*dev->dev_ops->vlan_strip_queue_set == NULL) 4154 return -ENOTSUP; 4155 (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on); 4156 4157 rte_ethdev_trace_set_vlan_strip_on_queue(port_id, rx_queue_id, on); 4158 4159 return 0; 4160 } 4161 4162 int 4163 rte_eth_dev_set_vlan_ether_type(uint16_t port_id, 4164 enum rte_vlan_type vlan_type, 4165 uint16_t tpid) 4166 { 4167 struct rte_eth_dev *dev; 4168 int ret; 4169 4170 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4171 dev = &rte_eth_devices[port_id]; 4172 4173 if (*dev->dev_ops->vlan_tpid_set == NULL) 4174 return -ENOTSUP; 4175 ret = eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, 4176 tpid)); 4177 4178 rte_ethdev_trace_set_vlan_ether_type(port_id, vlan_type, tpid, ret); 4179 4180 return ret; 4181 } 4182 4183 int 4184 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask) 4185 { 4186 struct rte_eth_dev_info dev_info; 4187 struct rte_eth_dev *dev; 4188 int ret = 0; 4189 int mask = 0; 4190 int cur, org = 0; 4191 uint64_t orig_offloads; 4192 uint64_t dev_offloads; 4193 uint64_t new_offloads; 4194 4195 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4196 dev = &rte_eth_devices[port_id]; 4197 4198 /* save original values in case of failure */ 4199 orig_offloads = dev->data->dev_conf.rxmode.offloads; 4200 dev_offloads = orig_offloads; 4201 4202 /* check which option changed by application */ 4203 cur = !!(offload_mask & RTE_ETH_VLAN_STRIP_OFFLOAD); 4204 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP); 4205 if (cur != org) { 4206 if (cur) 4207 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 4208 else 4209 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 4210 mask |= RTE_ETH_VLAN_STRIP_MASK; 4211 } 4212 4213 cur = !!(offload_mask & RTE_ETH_VLAN_FILTER_OFFLOAD); 4214 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER); 4215 if (cur != org) { 4216 if (cur) 4217 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 4218 else 4219 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 4220 mask |= RTE_ETH_VLAN_FILTER_MASK; 4221 } 4222 4223 cur = !!(offload_mask & RTE_ETH_VLAN_EXTEND_OFFLOAD); 4224 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND); 4225 if (cur != org) { 4226 if (cur) 4227 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 4228 else 4229 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 4230 mask |= RTE_ETH_VLAN_EXTEND_MASK; 4231 } 4232 4233 cur = !!(offload_mask & RTE_ETH_QINQ_STRIP_OFFLOAD); 4234 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP); 4235 if (cur != org) { 4236 if (cur) 4237 dev_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 4238 else 4239 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 4240 mask |= RTE_ETH_QINQ_STRIP_MASK; 4241 } 4242 4243 /*no change*/ 4244 if (mask == 0) 4245 return ret; 4246 4247 ret = rte_eth_dev_info_get(port_id, &dev_info); 4248 if (ret != 0) 4249 return ret; 4250 4251 /* Rx VLAN offloading must be within its device capabilities */ 4252 if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) { 4253 new_offloads = dev_offloads & ~orig_offloads; 4254 RTE_ETHDEV_LOG(ERR, 4255 "Ethdev port_id=%u requested new added VLAN offloads " 4256 "0x%" PRIx64 " must be within Rx offloads capabilities " 4257 "0x%" PRIx64 " in %s()\n", 4258 port_id, new_offloads, dev_info.rx_offload_capa, 4259 __func__); 4260 return -EINVAL; 4261 } 4262 4263 if (*dev->dev_ops->vlan_offload_set == NULL) 4264 return -ENOTSUP; 4265 dev->data->dev_conf.rxmode.offloads = dev_offloads; 4266 ret = (*dev->dev_ops->vlan_offload_set)(dev, mask); 4267 if (ret) { 4268 /* hit an error restore original values */ 4269 dev->data->dev_conf.rxmode.offloads = orig_offloads; 4270 } 4271 4272 ret = eth_err(port_id, ret); 4273 4274 rte_ethdev_trace_set_vlan_offload(port_id, offload_mask, ret); 4275 4276 return ret; 4277 } 4278 4279 int 4280 rte_eth_dev_get_vlan_offload(uint16_t port_id) 4281 { 4282 struct rte_eth_dev *dev; 4283 uint64_t *dev_offloads; 4284 int ret = 0; 4285 4286 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4287 dev = &rte_eth_devices[port_id]; 4288 dev_offloads = &dev->data->dev_conf.rxmode.offloads; 4289 4290 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 4291 ret |= RTE_ETH_VLAN_STRIP_OFFLOAD; 4292 4293 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) 4294 ret |= RTE_ETH_VLAN_FILTER_OFFLOAD; 4295 4296 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) 4297 ret |= RTE_ETH_VLAN_EXTEND_OFFLOAD; 4298 4299 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) 4300 ret |= RTE_ETH_QINQ_STRIP_OFFLOAD; 4301 4302 rte_ethdev_trace_get_vlan_offload(port_id, ret); 4303 4304 return ret; 4305 } 4306 4307 int 4308 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on) 4309 { 4310 struct rte_eth_dev *dev; 4311 int ret; 4312 4313 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4314 dev = &rte_eth_devices[port_id]; 4315 4316 if (*dev->dev_ops->vlan_pvid_set == NULL) 4317 return -ENOTSUP; 4318 ret = eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on)); 4319 4320 rte_ethdev_trace_set_vlan_pvid(port_id, pvid, on, ret); 4321 4322 return ret; 4323 } 4324 4325 int 4326 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 4327 { 4328 struct rte_eth_dev *dev; 4329 int ret; 4330 4331 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4332 dev = &rte_eth_devices[port_id]; 4333 4334 if (fc_conf == NULL) { 4335 RTE_ETHDEV_LOG(ERR, 4336 "Cannot get ethdev port %u flow control config to NULL\n", 4337 port_id); 4338 return -EINVAL; 4339 } 4340 4341 if (*dev->dev_ops->flow_ctrl_get == NULL) 4342 return -ENOTSUP; 4343 memset(fc_conf, 0, sizeof(*fc_conf)); 4344 ret = eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf)); 4345 4346 rte_ethdev_trace_flow_ctrl_get(port_id, fc_conf, ret); 4347 4348 return ret; 4349 } 4350 4351 int 4352 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 4353 { 4354 struct rte_eth_dev *dev; 4355 int ret; 4356 4357 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4358 dev = &rte_eth_devices[port_id]; 4359 4360 if (fc_conf == NULL) { 4361 RTE_ETHDEV_LOG(ERR, 4362 "Cannot set ethdev port %u flow control from NULL config\n", 4363 port_id); 4364 return -EINVAL; 4365 } 4366 4367 if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) { 4368 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n"); 4369 return -EINVAL; 4370 } 4371 4372 if (*dev->dev_ops->flow_ctrl_set == NULL) 4373 return -ENOTSUP; 4374 ret = eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf)); 4375 4376 rte_ethdev_trace_flow_ctrl_set(port_id, fc_conf, ret); 4377 4378 return ret; 4379 } 4380 4381 int 4382 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, 4383 struct rte_eth_pfc_conf *pfc_conf) 4384 { 4385 struct rte_eth_dev *dev; 4386 int ret; 4387 4388 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4389 dev = &rte_eth_devices[port_id]; 4390 4391 if (pfc_conf == NULL) { 4392 RTE_ETHDEV_LOG(ERR, 4393 "Cannot set ethdev port %u priority flow control from NULL config\n", 4394 port_id); 4395 return -EINVAL; 4396 } 4397 4398 if (pfc_conf->priority > (RTE_ETH_DCB_NUM_USER_PRIORITIES - 1)) { 4399 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n"); 4400 return -EINVAL; 4401 } 4402 4403 /* High water, low water validation are device specific */ 4404 if (*dev->dev_ops->priority_flow_ctrl_set == NULL) 4405 return -ENOTSUP; 4406 ret = eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set) 4407 (dev, pfc_conf)); 4408 4409 rte_ethdev_trace_priority_flow_ctrl_set(port_id, pfc_conf, ret); 4410 4411 return ret; 4412 } 4413 4414 static int 4415 validate_rx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max, 4416 struct rte_eth_pfc_queue_conf *pfc_queue_conf) 4417 { 4418 if ((pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) || 4419 (pfc_queue_conf->mode == RTE_ETH_FC_FULL)) { 4420 if (pfc_queue_conf->rx_pause.tx_qid >= dev_info->nb_tx_queues) { 4421 RTE_ETHDEV_LOG(ERR, 4422 "PFC Tx queue not in range for Rx pause requested:%d configured:%d\n", 4423 pfc_queue_conf->rx_pause.tx_qid, 4424 dev_info->nb_tx_queues); 4425 return -EINVAL; 4426 } 4427 4428 if (pfc_queue_conf->rx_pause.tc >= tc_max) { 4429 RTE_ETHDEV_LOG(ERR, 4430 "PFC TC not in range for Rx pause requested:%d max:%d\n", 4431 pfc_queue_conf->rx_pause.tc, tc_max); 4432 return -EINVAL; 4433 } 4434 } 4435 4436 return 0; 4437 } 4438 4439 static int 4440 validate_tx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max, 4441 struct rte_eth_pfc_queue_conf *pfc_queue_conf) 4442 { 4443 if ((pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) || 4444 (pfc_queue_conf->mode == RTE_ETH_FC_FULL)) { 4445 if (pfc_queue_conf->tx_pause.rx_qid >= dev_info->nb_rx_queues) { 4446 RTE_ETHDEV_LOG(ERR, 4447 "PFC Rx queue not in range for Tx pause requested:%d configured:%d\n", 4448 pfc_queue_conf->tx_pause.rx_qid, 4449 dev_info->nb_rx_queues); 4450 return -EINVAL; 4451 } 4452 4453 if (pfc_queue_conf->tx_pause.tc >= tc_max) { 4454 RTE_ETHDEV_LOG(ERR, 4455 "PFC TC not in range for Tx pause requested:%d max:%d\n", 4456 pfc_queue_conf->tx_pause.tc, tc_max); 4457 return -EINVAL; 4458 } 4459 } 4460 4461 return 0; 4462 } 4463 4464 int 4465 rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id, 4466 struct rte_eth_pfc_queue_info *pfc_queue_info) 4467 { 4468 struct rte_eth_dev *dev; 4469 int ret; 4470 4471 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4472 dev = &rte_eth_devices[port_id]; 4473 4474 if (pfc_queue_info == NULL) { 4475 RTE_ETHDEV_LOG(ERR, "PFC info param is NULL for port (%u)\n", 4476 port_id); 4477 return -EINVAL; 4478 } 4479 4480 if (*dev->dev_ops->priority_flow_ctrl_queue_info_get == NULL) 4481 return -ENOTSUP; 4482 ret = eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_queue_info_get) 4483 (dev, pfc_queue_info)); 4484 4485 rte_ethdev_trace_priority_flow_ctrl_queue_info_get(port_id, 4486 pfc_queue_info, ret); 4487 4488 return ret; 4489 } 4490 4491 int 4492 rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id, 4493 struct rte_eth_pfc_queue_conf *pfc_queue_conf) 4494 { 4495 struct rte_eth_pfc_queue_info pfc_info; 4496 struct rte_eth_dev_info dev_info; 4497 struct rte_eth_dev *dev; 4498 int ret; 4499 4500 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4501 dev = &rte_eth_devices[port_id]; 4502 4503 if (pfc_queue_conf == NULL) { 4504 RTE_ETHDEV_LOG(ERR, "PFC parameters are NULL for port (%u)\n", 4505 port_id); 4506 return -EINVAL; 4507 } 4508 4509 ret = rte_eth_dev_info_get(port_id, &dev_info); 4510 if (ret != 0) 4511 return ret; 4512 4513 ret = rte_eth_dev_priority_flow_ctrl_queue_info_get(port_id, &pfc_info); 4514 if (ret != 0) 4515 return ret; 4516 4517 if (pfc_info.tc_max == 0) { 4518 RTE_ETHDEV_LOG(ERR, "Ethdev port %u does not support PFC TC values\n", 4519 port_id); 4520 return -ENOTSUP; 4521 } 4522 4523 /* Check requested mode supported or not */ 4524 if (pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE && 4525 pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) { 4526 RTE_ETHDEV_LOG(ERR, "PFC Tx pause unsupported for port (%d)\n", 4527 port_id); 4528 return -EINVAL; 4529 } 4530 4531 if (pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE && 4532 pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) { 4533 RTE_ETHDEV_LOG(ERR, "PFC Rx pause unsupported for port (%d)\n", 4534 port_id); 4535 return -EINVAL; 4536 } 4537 4538 /* Validate Rx pause parameters */ 4539 if (pfc_info.mode_capa == RTE_ETH_FC_FULL || 4540 pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE) { 4541 ret = validate_rx_pause_config(&dev_info, pfc_info.tc_max, 4542 pfc_queue_conf); 4543 if (ret != 0) 4544 return ret; 4545 } 4546 4547 /* Validate Tx pause parameters */ 4548 if (pfc_info.mode_capa == RTE_ETH_FC_FULL || 4549 pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE) { 4550 ret = validate_tx_pause_config(&dev_info, pfc_info.tc_max, 4551 pfc_queue_conf); 4552 if (ret != 0) 4553 return ret; 4554 } 4555 4556 if (*dev->dev_ops->priority_flow_ctrl_queue_config == NULL) 4557 return -ENOTSUP; 4558 ret = eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_queue_config) 4559 (dev, pfc_queue_conf)); 4560 4561 rte_ethdev_trace_priority_flow_ctrl_queue_configure(port_id, 4562 pfc_queue_conf, ret); 4563 4564 return ret; 4565 } 4566 4567 static int 4568 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf, 4569 uint16_t reta_size) 4570 { 4571 uint16_t i, num; 4572 4573 num = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) / RTE_ETH_RETA_GROUP_SIZE; 4574 for (i = 0; i < num; i++) { 4575 if (reta_conf[i].mask) 4576 return 0; 4577 } 4578 4579 return -EINVAL; 4580 } 4581 4582 static int 4583 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf, 4584 uint16_t reta_size, 4585 uint16_t max_rxq) 4586 { 4587 uint16_t i, idx, shift; 4588 4589 if (max_rxq == 0) { 4590 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n"); 4591 return -EINVAL; 4592 } 4593 4594 for (i = 0; i < reta_size; i++) { 4595 idx = i / RTE_ETH_RETA_GROUP_SIZE; 4596 shift = i % RTE_ETH_RETA_GROUP_SIZE; 4597 if ((reta_conf[idx].mask & RTE_BIT64(shift)) && 4598 (reta_conf[idx].reta[shift] >= max_rxq)) { 4599 RTE_ETHDEV_LOG(ERR, 4600 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n", 4601 idx, shift, 4602 reta_conf[idx].reta[shift], max_rxq); 4603 return -EINVAL; 4604 } 4605 } 4606 4607 return 0; 4608 } 4609 4610 int 4611 rte_eth_dev_rss_reta_update(uint16_t port_id, 4612 struct rte_eth_rss_reta_entry64 *reta_conf, 4613 uint16_t reta_size) 4614 { 4615 enum rte_eth_rx_mq_mode mq_mode; 4616 struct rte_eth_dev *dev; 4617 int ret; 4618 4619 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4620 dev = &rte_eth_devices[port_id]; 4621 4622 if (reta_conf == NULL) { 4623 RTE_ETHDEV_LOG(ERR, 4624 "Cannot update ethdev port %u RSS RETA to NULL\n", 4625 port_id); 4626 return -EINVAL; 4627 } 4628 4629 if (reta_size == 0) { 4630 RTE_ETHDEV_LOG(ERR, 4631 "Cannot update ethdev port %u RSS RETA with zero size\n", 4632 port_id); 4633 return -EINVAL; 4634 } 4635 4636 /* Check mask bits */ 4637 ret = eth_check_reta_mask(reta_conf, reta_size); 4638 if (ret < 0) 4639 return ret; 4640 4641 /* Check entry value */ 4642 ret = eth_check_reta_entry(reta_conf, reta_size, 4643 dev->data->nb_rx_queues); 4644 if (ret < 0) 4645 return ret; 4646 4647 mq_mode = dev->data->dev_conf.rxmode.mq_mode; 4648 if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) { 4649 RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n"); 4650 return -ENOTSUP; 4651 } 4652 4653 if (*dev->dev_ops->reta_update == NULL) 4654 return -ENOTSUP; 4655 ret = eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf, 4656 reta_size)); 4657 4658 rte_ethdev_trace_rss_reta_update(port_id, reta_conf, reta_size, ret); 4659 4660 return ret; 4661 } 4662 4663 int 4664 rte_eth_dev_rss_reta_query(uint16_t port_id, 4665 struct rte_eth_rss_reta_entry64 *reta_conf, 4666 uint16_t reta_size) 4667 { 4668 struct rte_eth_dev *dev; 4669 int ret; 4670 4671 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4672 dev = &rte_eth_devices[port_id]; 4673 4674 if (reta_conf == NULL) { 4675 RTE_ETHDEV_LOG(ERR, 4676 "Cannot query ethdev port %u RSS RETA from NULL config\n", 4677 port_id); 4678 return -EINVAL; 4679 } 4680 4681 /* Check mask bits */ 4682 ret = eth_check_reta_mask(reta_conf, reta_size); 4683 if (ret < 0) 4684 return ret; 4685 4686 if (*dev->dev_ops->reta_query == NULL) 4687 return -ENOTSUP; 4688 ret = eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf, 4689 reta_size)); 4690 4691 rte_ethdev_trace_rss_reta_query(port_id, reta_conf, reta_size, ret); 4692 4693 return ret; 4694 } 4695 4696 int 4697 rte_eth_dev_rss_hash_update(uint16_t port_id, 4698 struct rte_eth_rss_conf *rss_conf) 4699 { 4700 struct rte_eth_dev *dev; 4701 struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, }; 4702 enum rte_eth_rx_mq_mode mq_mode; 4703 int ret; 4704 4705 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4706 dev = &rte_eth_devices[port_id]; 4707 4708 if (rss_conf == NULL) { 4709 RTE_ETHDEV_LOG(ERR, 4710 "Cannot update ethdev port %u RSS hash from NULL config\n", 4711 port_id); 4712 return -EINVAL; 4713 } 4714 4715 ret = rte_eth_dev_info_get(port_id, &dev_info); 4716 if (ret != 0) 4717 return ret; 4718 4719 rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf); 4720 if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) != 4721 dev_info.flow_type_rss_offloads) { 4722 RTE_ETHDEV_LOG(ERR, 4723 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 4724 port_id, rss_conf->rss_hf, 4725 dev_info.flow_type_rss_offloads); 4726 return -EINVAL; 4727 } 4728 4729 mq_mode = dev->data->dev_conf.rxmode.mq_mode; 4730 if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) { 4731 RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n"); 4732 return -ENOTSUP; 4733 } 4734 4735 if (rss_conf->rss_key != NULL && 4736 rss_conf->rss_key_len != dev_info.hash_key_size) { 4737 RTE_ETHDEV_LOG(ERR, 4738 "Ethdev port_id=%u invalid RSS key len: %u, valid value: %u\n", 4739 port_id, rss_conf->rss_key_len, dev_info.hash_key_size); 4740 return -EINVAL; 4741 } 4742 4743 if ((size_t)rss_conf->algorithm >= CHAR_BIT * sizeof(dev_info.rss_algo_capa) || 4744 (dev_info.rss_algo_capa & 4745 RTE_ETH_HASH_ALGO_TO_CAPA(rss_conf->algorithm)) == 0) { 4746 RTE_ETHDEV_LOG(ERR, 4747 "Ethdev port_id=%u configured RSS hash algorithm (%u)" 4748 "is not in the algorithm capability (0x%" PRIx32 ")\n", 4749 port_id, rss_conf->algorithm, dev_info.rss_algo_capa); 4750 return -EINVAL; 4751 } 4752 4753 if (*dev->dev_ops->rss_hash_update == NULL) 4754 return -ENOTSUP; 4755 ret = eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev, 4756 rss_conf)); 4757 4758 rte_ethdev_trace_rss_hash_update(port_id, rss_conf, ret); 4759 4760 return ret; 4761 } 4762 4763 int 4764 rte_eth_dev_rss_hash_conf_get(uint16_t port_id, 4765 struct rte_eth_rss_conf *rss_conf) 4766 { 4767 struct rte_eth_dev_info dev_info = { 0 }; 4768 struct rte_eth_dev *dev; 4769 int ret; 4770 4771 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4772 dev = &rte_eth_devices[port_id]; 4773 4774 if (rss_conf == NULL) { 4775 RTE_ETHDEV_LOG(ERR, 4776 "Cannot get ethdev port %u RSS hash config to NULL\n", 4777 port_id); 4778 return -EINVAL; 4779 } 4780 4781 ret = rte_eth_dev_info_get(port_id, &dev_info); 4782 if (ret != 0) 4783 return ret; 4784 4785 if (rss_conf->rss_key != NULL && 4786 rss_conf->rss_key_len < dev_info.hash_key_size) { 4787 RTE_ETHDEV_LOG(ERR, 4788 "Ethdev port_id=%u invalid RSS key len: %u, should not be less than: %u\n", 4789 port_id, rss_conf->rss_key_len, dev_info.hash_key_size); 4790 return -EINVAL; 4791 } 4792 4793 rss_conf->algorithm = RTE_ETH_HASH_FUNCTION_DEFAULT; 4794 4795 if (*dev->dev_ops->rss_hash_conf_get == NULL) 4796 return -ENOTSUP; 4797 ret = eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev, 4798 rss_conf)); 4799 4800 rte_ethdev_trace_rss_hash_conf_get(port_id, rss_conf, ret); 4801 4802 return ret; 4803 } 4804 4805 const char * 4806 rte_eth_dev_rss_algo_name(enum rte_eth_hash_function rss_algo) 4807 { 4808 const char *name = "Unknown function"; 4809 unsigned int i; 4810 4811 for (i = 0; i < RTE_DIM(rte_eth_dev_rss_algo_names); i++) { 4812 if (rss_algo == rte_eth_dev_rss_algo_names[i].algo) 4813 return rte_eth_dev_rss_algo_names[i].name; 4814 } 4815 4816 return name; 4817 } 4818 4819 int 4820 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, 4821 struct rte_eth_udp_tunnel *udp_tunnel) 4822 { 4823 struct rte_eth_dev *dev; 4824 int ret; 4825 4826 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4827 dev = &rte_eth_devices[port_id]; 4828 4829 if (udp_tunnel == NULL) { 4830 RTE_ETHDEV_LOG(ERR, 4831 "Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4832 port_id); 4833 return -EINVAL; 4834 } 4835 4836 if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) { 4837 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4838 return -EINVAL; 4839 } 4840 4841 if (*dev->dev_ops->udp_tunnel_port_add == NULL) 4842 return -ENOTSUP; 4843 ret = eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev, 4844 udp_tunnel)); 4845 4846 rte_ethdev_trace_udp_tunnel_port_add(port_id, udp_tunnel, ret); 4847 4848 return ret; 4849 } 4850 4851 int 4852 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, 4853 struct rte_eth_udp_tunnel *udp_tunnel) 4854 { 4855 struct rte_eth_dev *dev; 4856 int ret; 4857 4858 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4859 dev = &rte_eth_devices[port_id]; 4860 4861 if (udp_tunnel == NULL) { 4862 RTE_ETHDEV_LOG(ERR, 4863 "Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4864 port_id); 4865 return -EINVAL; 4866 } 4867 4868 if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) { 4869 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4870 return -EINVAL; 4871 } 4872 4873 if (*dev->dev_ops->udp_tunnel_port_del == NULL) 4874 return -ENOTSUP; 4875 ret = eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev, 4876 udp_tunnel)); 4877 4878 rte_ethdev_trace_udp_tunnel_port_delete(port_id, udp_tunnel, ret); 4879 4880 return ret; 4881 } 4882 4883 int 4884 rte_eth_led_on(uint16_t port_id) 4885 { 4886 struct rte_eth_dev *dev; 4887 int ret; 4888 4889 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4890 dev = &rte_eth_devices[port_id]; 4891 4892 if (*dev->dev_ops->dev_led_on == NULL) 4893 return -ENOTSUP; 4894 ret = eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev)); 4895 4896 rte_eth_trace_led_on(port_id, ret); 4897 4898 return ret; 4899 } 4900 4901 int 4902 rte_eth_led_off(uint16_t port_id) 4903 { 4904 struct rte_eth_dev *dev; 4905 int ret; 4906 4907 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4908 dev = &rte_eth_devices[port_id]; 4909 4910 if (*dev->dev_ops->dev_led_off == NULL) 4911 return -ENOTSUP; 4912 ret = eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev)); 4913 4914 rte_eth_trace_led_off(port_id, ret); 4915 4916 return ret; 4917 } 4918 4919 int 4920 rte_eth_fec_get_capability(uint16_t port_id, 4921 struct rte_eth_fec_capa *speed_fec_capa, 4922 unsigned int num) 4923 { 4924 struct rte_eth_dev *dev; 4925 int ret; 4926 4927 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4928 dev = &rte_eth_devices[port_id]; 4929 4930 if (speed_fec_capa == NULL && num > 0) { 4931 RTE_ETHDEV_LOG(ERR, 4932 "Cannot get ethdev port %u FEC capability to NULL when array size is non zero\n", 4933 port_id); 4934 return -EINVAL; 4935 } 4936 4937 if (*dev->dev_ops->fec_get_capability == NULL) 4938 return -ENOTSUP; 4939 ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num); 4940 4941 rte_eth_trace_fec_get_capability(port_id, speed_fec_capa, num, ret); 4942 4943 return ret; 4944 } 4945 4946 int 4947 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa) 4948 { 4949 struct rte_eth_dev *dev; 4950 int ret; 4951 4952 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4953 dev = &rte_eth_devices[port_id]; 4954 4955 if (fec_capa == NULL) { 4956 RTE_ETHDEV_LOG(ERR, 4957 "Cannot get ethdev port %u current FEC mode to NULL\n", 4958 port_id); 4959 return -EINVAL; 4960 } 4961 4962 if (*dev->dev_ops->fec_get == NULL) 4963 return -ENOTSUP; 4964 ret = eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa)); 4965 4966 rte_eth_trace_fec_get(port_id, fec_capa, ret); 4967 4968 return ret; 4969 } 4970 4971 int 4972 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa) 4973 { 4974 struct rte_eth_dev *dev; 4975 int ret; 4976 4977 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4978 dev = &rte_eth_devices[port_id]; 4979 4980 if (fec_capa == 0) { 4981 RTE_ETHDEV_LOG(ERR, "At least one FEC mode should be specified\n"); 4982 return -EINVAL; 4983 } 4984 4985 if (*dev->dev_ops->fec_set == NULL) 4986 return -ENOTSUP; 4987 ret = eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa)); 4988 4989 rte_eth_trace_fec_set(port_id, fec_capa, ret); 4990 4991 return ret; 4992 } 4993 4994 /* 4995 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 4996 * an empty spot. 4997 */ 4998 static int 4999 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr) 5000 { 5001 struct rte_eth_dev_info dev_info; 5002 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5003 unsigned i; 5004 int ret; 5005 5006 ret = rte_eth_dev_info_get(port_id, &dev_info); 5007 if (ret != 0) 5008 return -1; 5009 5010 for (i = 0; i < dev_info.max_mac_addrs; i++) 5011 if (memcmp(addr, &dev->data->mac_addrs[i], 5012 RTE_ETHER_ADDR_LEN) == 0) 5013 return i; 5014 5015 return -1; 5016 } 5017 5018 static const struct rte_ether_addr null_mac_addr; 5019 5020 int 5021 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr, 5022 uint32_t pool) 5023 { 5024 struct rte_eth_dev *dev; 5025 int index; 5026 uint64_t pool_mask; 5027 int ret; 5028 5029 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5030 dev = &rte_eth_devices[port_id]; 5031 5032 if (addr == NULL) { 5033 RTE_ETHDEV_LOG(ERR, 5034 "Cannot add ethdev port %u MAC address from NULL address\n", 5035 port_id); 5036 return -EINVAL; 5037 } 5038 5039 if (*dev->dev_ops->mac_addr_add == NULL) 5040 return -ENOTSUP; 5041 5042 if (rte_is_zero_ether_addr(addr)) { 5043 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 5044 port_id); 5045 return -EINVAL; 5046 } 5047 if (pool >= RTE_ETH_64_POOLS) { 5048 RTE_ETHDEV_LOG(ERR, "Pool ID must be 0-%d\n", RTE_ETH_64_POOLS - 1); 5049 return -EINVAL; 5050 } 5051 5052 index = eth_dev_get_mac_addr_index(port_id, addr); 5053 if (index < 0) { 5054 index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr); 5055 if (index < 0) { 5056 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 5057 port_id); 5058 return -ENOSPC; 5059 } 5060 } else { 5061 pool_mask = dev->data->mac_pool_sel[index]; 5062 5063 /* Check if both MAC address and pool is already there, and do nothing */ 5064 if (pool_mask & RTE_BIT64(pool)) 5065 return 0; 5066 } 5067 5068 /* Update NIC */ 5069 ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool); 5070 5071 if (ret == 0) { 5072 /* Update address in NIC data structure */ 5073 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]); 5074 5075 /* Update pool bitmap in NIC data structure */ 5076 dev->data->mac_pool_sel[index] |= RTE_BIT64(pool); 5077 } 5078 5079 ret = eth_err(port_id, ret); 5080 5081 rte_ethdev_trace_mac_addr_add(port_id, addr, pool, ret); 5082 5083 return ret; 5084 } 5085 5086 int 5087 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr) 5088 { 5089 struct rte_eth_dev *dev; 5090 int index; 5091 5092 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5093 dev = &rte_eth_devices[port_id]; 5094 5095 if (addr == NULL) { 5096 RTE_ETHDEV_LOG(ERR, 5097 "Cannot remove ethdev port %u MAC address from NULL address\n", 5098 port_id); 5099 return -EINVAL; 5100 } 5101 5102 if (*dev->dev_ops->mac_addr_remove == NULL) 5103 return -ENOTSUP; 5104 5105 index = eth_dev_get_mac_addr_index(port_id, addr); 5106 if (index == 0) { 5107 RTE_ETHDEV_LOG(ERR, 5108 "Port %u: Cannot remove default MAC address\n", 5109 port_id); 5110 return -EADDRINUSE; 5111 } else if (index < 0) 5112 return 0; /* Do nothing if address wasn't found */ 5113 5114 /* Update NIC */ 5115 (*dev->dev_ops->mac_addr_remove)(dev, index); 5116 5117 /* Update address in NIC data structure */ 5118 rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]); 5119 5120 /* reset pool bitmap */ 5121 dev->data->mac_pool_sel[index] = 0; 5122 5123 rte_ethdev_trace_mac_addr_remove(port_id, addr); 5124 5125 return 0; 5126 } 5127 5128 int 5129 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr) 5130 { 5131 struct rte_eth_dev *dev; 5132 int index; 5133 int ret; 5134 5135 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5136 dev = &rte_eth_devices[port_id]; 5137 5138 if (addr == NULL) { 5139 RTE_ETHDEV_LOG(ERR, 5140 "Cannot set ethdev port %u default MAC address from NULL address\n", 5141 port_id); 5142 return -EINVAL; 5143 } 5144 5145 if (!rte_is_valid_assigned_ether_addr(addr)) 5146 return -EINVAL; 5147 5148 if (*dev->dev_ops->mac_addr_set == NULL) 5149 return -ENOTSUP; 5150 5151 /* Keep address unique in dev->data->mac_addrs[]. */ 5152 index = eth_dev_get_mac_addr_index(port_id, addr); 5153 if (index > 0) { 5154 RTE_ETHDEV_LOG(ERR, 5155 "New default address for port %u was already in the address list. Please remove it first.\n", 5156 port_id); 5157 return -EEXIST; 5158 } 5159 5160 ret = (*dev->dev_ops->mac_addr_set)(dev, addr); 5161 if (ret < 0) 5162 return ret; 5163 5164 /* Update default address in NIC data structure */ 5165 rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]); 5166 5167 rte_ethdev_trace_default_mac_addr_set(port_id, addr); 5168 5169 return 0; 5170 } 5171 5172 5173 /* 5174 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 5175 * an empty spot. 5176 */ 5177 static int 5178 eth_dev_get_hash_mac_addr_index(uint16_t port_id, 5179 const struct rte_ether_addr *addr) 5180 { 5181 struct rte_eth_dev_info dev_info; 5182 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5183 unsigned i; 5184 int ret; 5185 5186 ret = rte_eth_dev_info_get(port_id, &dev_info); 5187 if (ret != 0) 5188 return -1; 5189 5190 if (!dev->data->hash_mac_addrs) 5191 return -1; 5192 5193 for (i = 0; i < dev_info.max_hash_mac_addrs; i++) 5194 if (memcmp(addr, &dev->data->hash_mac_addrs[i], 5195 RTE_ETHER_ADDR_LEN) == 0) 5196 return i; 5197 5198 return -1; 5199 } 5200 5201 int 5202 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, 5203 uint8_t on) 5204 { 5205 int index; 5206 int ret; 5207 struct rte_eth_dev *dev; 5208 5209 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5210 dev = &rte_eth_devices[port_id]; 5211 5212 if (addr == NULL) { 5213 RTE_ETHDEV_LOG(ERR, 5214 "Cannot set ethdev port %u unicast hash table from NULL address\n", 5215 port_id); 5216 return -EINVAL; 5217 } 5218 5219 if (rte_is_zero_ether_addr(addr)) { 5220 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 5221 port_id); 5222 return -EINVAL; 5223 } 5224 5225 index = eth_dev_get_hash_mac_addr_index(port_id, addr); 5226 /* Check if it's already there, and do nothing */ 5227 if ((index >= 0) && on) 5228 return 0; 5229 5230 if (index < 0) { 5231 if (!on) { 5232 RTE_ETHDEV_LOG(ERR, 5233 "Port %u: the MAC address was not set in UTA\n", 5234 port_id); 5235 return -EINVAL; 5236 } 5237 5238 index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr); 5239 if (index < 0) { 5240 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 5241 port_id); 5242 return -ENOSPC; 5243 } 5244 } 5245 5246 if (*dev->dev_ops->uc_hash_table_set == NULL) 5247 return -ENOTSUP; 5248 ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on); 5249 if (ret == 0) { 5250 /* Update address in NIC data structure */ 5251 if (on) 5252 rte_ether_addr_copy(addr, 5253 &dev->data->hash_mac_addrs[index]); 5254 else 5255 rte_ether_addr_copy(&null_mac_addr, 5256 &dev->data->hash_mac_addrs[index]); 5257 } 5258 5259 ret = eth_err(port_id, ret); 5260 5261 rte_ethdev_trace_uc_hash_table_set(port_id, on, ret); 5262 5263 return ret; 5264 } 5265 5266 int 5267 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on) 5268 { 5269 struct rte_eth_dev *dev; 5270 int ret; 5271 5272 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5273 dev = &rte_eth_devices[port_id]; 5274 5275 if (*dev->dev_ops->uc_all_hash_table_set == NULL) 5276 return -ENOTSUP; 5277 ret = eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev, on)); 5278 5279 rte_ethdev_trace_uc_all_hash_table_set(port_id, on, ret); 5280 5281 return ret; 5282 } 5283 5284 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, 5285 uint32_t tx_rate) 5286 { 5287 struct rte_eth_dev *dev; 5288 struct rte_eth_dev_info dev_info; 5289 struct rte_eth_link link; 5290 int ret; 5291 5292 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5293 dev = &rte_eth_devices[port_id]; 5294 5295 ret = rte_eth_dev_info_get(port_id, &dev_info); 5296 if (ret != 0) 5297 return ret; 5298 5299 link = dev->data->dev_link; 5300 5301 if (queue_idx > dev_info.max_tx_queues) { 5302 RTE_ETHDEV_LOG(ERR, 5303 "Set queue rate limit:port %u: invalid queue ID=%u\n", 5304 port_id, queue_idx); 5305 return -EINVAL; 5306 } 5307 5308 if (tx_rate > link.link_speed) { 5309 RTE_ETHDEV_LOG(ERR, 5310 "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n", 5311 tx_rate, link.link_speed); 5312 return -EINVAL; 5313 } 5314 5315 if (*dev->dev_ops->set_queue_rate_limit == NULL) 5316 return -ENOTSUP; 5317 ret = eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev, 5318 queue_idx, tx_rate)); 5319 5320 rte_eth_trace_set_queue_rate_limit(port_id, queue_idx, tx_rate, ret); 5321 5322 return ret; 5323 } 5324 5325 int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id, 5326 uint8_t avail_thresh) 5327 { 5328 struct rte_eth_dev *dev; 5329 int ret; 5330 5331 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5332 dev = &rte_eth_devices[port_id]; 5333 5334 if (queue_id > dev->data->nb_rx_queues) { 5335 RTE_ETHDEV_LOG(ERR, 5336 "Set queue avail thresh: port %u: invalid queue ID=%u.\n", 5337 port_id, queue_id); 5338 return -EINVAL; 5339 } 5340 5341 if (avail_thresh > 99) { 5342 RTE_ETHDEV_LOG(ERR, 5343 "Set queue avail thresh: port %u: threshold should be <= 99.\n", 5344 port_id); 5345 return -EINVAL; 5346 } 5347 if (*dev->dev_ops->rx_queue_avail_thresh_set == NULL) 5348 return -ENOTSUP; 5349 ret = eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_set)(dev, 5350 queue_id, avail_thresh)); 5351 5352 rte_eth_trace_rx_avail_thresh_set(port_id, queue_id, avail_thresh, ret); 5353 5354 return ret; 5355 } 5356 5357 int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id, 5358 uint8_t *avail_thresh) 5359 { 5360 struct rte_eth_dev *dev; 5361 int ret; 5362 5363 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5364 dev = &rte_eth_devices[port_id]; 5365 5366 if (queue_id == NULL) 5367 return -EINVAL; 5368 if (*queue_id >= dev->data->nb_rx_queues) 5369 *queue_id = 0; 5370 5371 if (*dev->dev_ops->rx_queue_avail_thresh_query == NULL) 5372 return -ENOTSUP; 5373 ret = eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_query)(dev, 5374 queue_id, avail_thresh)); 5375 5376 rte_eth_trace_rx_avail_thresh_query(port_id, *queue_id, ret); 5377 5378 return ret; 5379 } 5380 5381 RTE_INIT(eth_dev_init_fp_ops) 5382 { 5383 uint32_t i; 5384 5385 for (i = 0; i != RTE_DIM(rte_eth_fp_ops); i++) 5386 eth_dev_fp_ops_reset(rte_eth_fp_ops + i); 5387 } 5388 5389 RTE_INIT(eth_dev_init_cb_lists) 5390 { 5391 uint16_t i; 5392 5393 for (i = 0; i < RTE_MAX_ETHPORTS; i++) 5394 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs); 5395 } 5396 5397 int 5398 rte_eth_dev_callback_register(uint16_t port_id, 5399 enum rte_eth_event_type event, 5400 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 5401 { 5402 struct rte_eth_dev *dev; 5403 struct rte_eth_dev_callback *user_cb; 5404 uint16_t next_port; 5405 uint16_t last_port; 5406 5407 if (cb_fn == NULL) { 5408 RTE_ETHDEV_LOG(ERR, 5409 "Cannot register ethdev port %u callback from NULL\n", 5410 port_id); 5411 return -EINVAL; 5412 } 5413 5414 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 5415 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 5416 return -EINVAL; 5417 } 5418 5419 if (port_id == RTE_ETH_ALL) { 5420 next_port = 0; 5421 last_port = RTE_MAX_ETHPORTS - 1; 5422 } else { 5423 next_port = last_port = port_id; 5424 } 5425 5426 rte_spinlock_lock(ð_dev_cb_lock); 5427 5428 do { 5429 dev = &rte_eth_devices[next_port]; 5430 5431 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) { 5432 if (user_cb->cb_fn == cb_fn && 5433 user_cb->cb_arg == cb_arg && 5434 user_cb->event == event) { 5435 break; 5436 } 5437 } 5438 5439 /* create a new callback. */ 5440 if (user_cb == NULL) { 5441 user_cb = rte_zmalloc("INTR_USER_CALLBACK", 5442 sizeof(struct rte_eth_dev_callback), 0); 5443 if (user_cb != NULL) { 5444 user_cb->cb_fn = cb_fn; 5445 user_cb->cb_arg = cb_arg; 5446 user_cb->event = event; 5447 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), 5448 user_cb, next); 5449 } else { 5450 rte_spinlock_unlock(ð_dev_cb_lock); 5451 rte_eth_dev_callback_unregister(port_id, event, 5452 cb_fn, cb_arg); 5453 return -ENOMEM; 5454 } 5455 5456 } 5457 } while (++next_port <= last_port); 5458 5459 rte_spinlock_unlock(ð_dev_cb_lock); 5460 5461 rte_ethdev_trace_callback_register(port_id, event, cb_fn, cb_arg); 5462 5463 return 0; 5464 } 5465 5466 int 5467 rte_eth_dev_callback_unregister(uint16_t port_id, 5468 enum rte_eth_event_type event, 5469 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 5470 { 5471 int ret; 5472 struct rte_eth_dev *dev; 5473 struct rte_eth_dev_callback *cb, *next; 5474 uint16_t next_port; 5475 uint16_t last_port; 5476 5477 if (cb_fn == NULL) { 5478 RTE_ETHDEV_LOG(ERR, 5479 "Cannot unregister ethdev port %u callback from NULL\n", 5480 port_id); 5481 return -EINVAL; 5482 } 5483 5484 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 5485 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 5486 return -EINVAL; 5487 } 5488 5489 if (port_id == RTE_ETH_ALL) { 5490 next_port = 0; 5491 last_port = RTE_MAX_ETHPORTS - 1; 5492 } else { 5493 next_port = last_port = port_id; 5494 } 5495 5496 rte_spinlock_lock(ð_dev_cb_lock); 5497 5498 do { 5499 dev = &rte_eth_devices[next_port]; 5500 ret = 0; 5501 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; 5502 cb = next) { 5503 5504 next = TAILQ_NEXT(cb, next); 5505 5506 if (cb->cb_fn != cb_fn || cb->event != event || 5507 (cb_arg != (void *)-1 && cb->cb_arg != cb_arg)) 5508 continue; 5509 5510 /* 5511 * if this callback is not executing right now, 5512 * then remove it. 5513 */ 5514 if (cb->active == 0) { 5515 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next); 5516 rte_free(cb); 5517 } else { 5518 ret = -EAGAIN; 5519 } 5520 } 5521 } while (++next_port <= last_port); 5522 5523 rte_spinlock_unlock(ð_dev_cb_lock); 5524 5525 rte_ethdev_trace_callback_unregister(port_id, event, cb_fn, cb_arg, 5526 ret); 5527 5528 return ret; 5529 } 5530 5531 int 5532 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data) 5533 { 5534 uint32_t vec; 5535 struct rte_eth_dev *dev; 5536 struct rte_intr_handle *intr_handle; 5537 uint16_t qid; 5538 int rc; 5539 5540 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5541 dev = &rte_eth_devices[port_id]; 5542 5543 if (!dev->intr_handle) { 5544 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 5545 return -ENOTSUP; 5546 } 5547 5548 intr_handle = dev->intr_handle; 5549 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 5550 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 5551 return -EPERM; 5552 } 5553 5554 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) { 5555 vec = rte_intr_vec_list_index_get(intr_handle, qid); 5556 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 5557 5558 rte_ethdev_trace_rx_intr_ctl(port_id, qid, epfd, op, data, rc); 5559 5560 if (rc && rc != -EEXIST) { 5561 RTE_ETHDEV_LOG(ERR, 5562 "p %u q %u Rx ctl error op %d epfd %d vec %u\n", 5563 port_id, qid, op, epfd, vec); 5564 } 5565 } 5566 5567 return 0; 5568 } 5569 5570 int 5571 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id) 5572 { 5573 struct rte_intr_handle *intr_handle; 5574 struct rte_eth_dev *dev; 5575 unsigned int efd_idx; 5576 uint32_t vec; 5577 int fd; 5578 5579 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 5580 dev = &rte_eth_devices[port_id]; 5581 5582 if (queue_id >= dev->data->nb_rx_queues) { 5583 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5584 return -1; 5585 } 5586 5587 if (!dev->intr_handle) { 5588 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 5589 return -1; 5590 } 5591 5592 intr_handle = dev->intr_handle; 5593 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 5594 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 5595 return -1; 5596 } 5597 5598 vec = rte_intr_vec_list_index_get(intr_handle, queue_id); 5599 efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ? 5600 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec; 5601 fd = rte_intr_efds_index_get(intr_handle, efd_idx); 5602 5603 rte_ethdev_trace_rx_intr_ctl_q_get_fd(port_id, queue_id, fd); 5604 5605 return fd; 5606 } 5607 5608 int 5609 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, 5610 int epfd, int op, void *data) 5611 { 5612 uint32_t vec; 5613 struct rte_eth_dev *dev; 5614 struct rte_intr_handle *intr_handle; 5615 int rc; 5616 5617 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5618 dev = &rte_eth_devices[port_id]; 5619 5620 if (queue_id >= dev->data->nb_rx_queues) { 5621 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5622 return -EINVAL; 5623 } 5624 5625 if (!dev->intr_handle) { 5626 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 5627 return -ENOTSUP; 5628 } 5629 5630 intr_handle = dev->intr_handle; 5631 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 5632 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 5633 return -EPERM; 5634 } 5635 5636 vec = rte_intr_vec_list_index_get(intr_handle, queue_id); 5637 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 5638 5639 rte_ethdev_trace_rx_intr_ctl_q(port_id, queue_id, epfd, op, data, rc); 5640 5641 if (rc && rc != -EEXIST) { 5642 RTE_ETHDEV_LOG(ERR, 5643 "p %u q %u Rx ctl error op %d epfd %d vec %u\n", 5644 port_id, queue_id, op, epfd, vec); 5645 return rc; 5646 } 5647 5648 return 0; 5649 } 5650 5651 int 5652 rte_eth_dev_rx_intr_enable(uint16_t port_id, 5653 uint16_t queue_id) 5654 { 5655 struct rte_eth_dev *dev; 5656 int ret; 5657 5658 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5659 dev = &rte_eth_devices[port_id]; 5660 5661 ret = eth_dev_validate_rx_queue(dev, queue_id); 5662 if (ret != 0) 5663 return ret; 5664 5665 if (*dev->dev_ops->rx_queue_intr_enable == NULL) 5666 return -ENOTSUP; 5667 ret = eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id)); 5668 5669 rte_ethdev_trace_rx_intr_enable(port_id, queue_id, ret); 5670 5671 return ret; 5672 } 5673 5674 int 5675 rte_eth_dev_rx_intr_disable(uint16_t port_id, 5676 uint16_t queue_id) 5677 { 5678 struct rte_eth_dev *dev; 5679 int ret; 5680 5681 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5682 dev = &rte_eth_devices[port_id]; 5683 5684 ret = eth_dev_validate_rx_queue(dev, queue_id); 5685 if (ret != 0) 5686 return ret; 5687 5688 if (*dev->dev_ops->rx_queue_intr_disable == NULL) 5689 return -ENOTSUP; 5690 ret = eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id)); 5691 5692 rte_ethdev_trace_rx_intr_disable(port_id, queue_id, ret); 5693 5694 return ret; 5695 } 5696 5697 5698 const struct rte_eth_rxtx_callback * 5699 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, 5700 rte_rx_callback_fn fn, void *user_param) 5701 { 5702 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5703 rte_errno = ENOTSUP; 5704 return NULL; 5705 #endif 5706 struct rte_eth_dev *dev; 5707 5708 /* check input parameters */ 5709 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5710 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 5711 rte_errno = EINVAL; 5712 return NULL; 5713 } 5714 dev = &rte_eth_devices[port_id]; 5715 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 5716 rte_errno = EINVAL; 5717 return NULL; 5718 } 5719 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5720 5721 if (cb == NULL) { 5722 rte_errno = ENOMEM; 5723 return NULL; 5724 } 5725 5726 cb->fn.rx = fn; 5727 cb->param = user_param; 5728 5729 rte_spinlock_lock(ð_dev_rx_cb_lock); 5730 /* Add the callbacks in fifo order. */ 5731 struct rte_eth_rxtx_callback *tail = 5732 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 5733 5734 if (!tail) { 5735 /* Stores to cb->fn and cb->param should complete before 5736 * cb is visible to data plane. 5737 */ 5738 rte_atomic_store_explicit( 5739 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 5740 cb, rte_memory_order_release); 5741 5742 } else { 5743 while (tail->next) 5744 tail = tail->next; 5745 /* Stores to cb->fn and cb->param should complete before 5746 * cb is visible to data plane. 5747 */ 5748 rte_atomic_store_explicit(&tail->next, cb, rte_memory_order_release); 5749 } 5750 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5751 5752 rte_eth_trace_add_rx_callback(port_id, queue_id, fn, user_param, cb); 5753 5754 return cb; 5755 } 5756 5757 const struct rte_eth_rxtx_callback * 5758 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, 5759 rte_rx_callback_fn fn, void *user_param) 5760 { 5761 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5762 rte_errno = ENOTSUP; 5763 return NULL; 5764 #endif 5765 /* check input parameters */ 5766 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5767 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 5768 rte_errno = EINVAL; 5769 return NULL; 5770 } 5771 5772 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5773 5774 if (cb == NULL) { 5775 rte_errno = ENOMEM; 5776 return NULL; 5777 } 5778 5779 cb->fn.rx = fn; 5780 cb->param = user_param; 5781 5782 rte_spinlock_lock(ð_dev_rx_cb_lock); 5783 /* Add the callbacks at first position */ 5784 cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 5785 /* Stores to cb->fn, cb->param and cb->next should complete before 5786 * cb is visible to data plane threads. 5787 */ 5788 rte_atomic_store_explicit( 5789 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 5790 cb, rte_memory_order_release); 5791 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5792 5793 rte_eth_trace_add_first_rx_callback(port_id, queue_id, fn, user_param, 5794 cb); 5795 5796 return cb; 5797 } 5798 5799 const struct rte_eth_rxtx_callback * 5800 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, 5801 rte_tx_callback_fn fn, void *user_param) 5802 { 5803 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5804 rte_errno = ENOTSUP; 5805 return NULL; 5806 #endif 5807 struct rte_eth_dev *dev; 5808 5809 /* check input parameters */ 5810 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5811 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) { 5812 rte_errno = EINVAL; 5813 return NULL; 5814 } 5815 5816 dev = &rte_eth_devices[port_id]; 5817 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 5818 rte_errno = EINVAL; 5819 return NULL; 5820 } 5821 5822 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5823 5824 if (cb == NULL) { 5825 rte_errno = ENOMEM; 5826 return NULL; 5827 } 5828 5829 cb->fn.tx = fn; 5830 cb->param = user_param; 5831 5832 rte_spinlock_lock(ð_dev_tx_cb_lock); 5833 /* Add the callbacks in fifo order. */ 5834 struct rte_eth_rxtx_callback *tail = 5835 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id]; 5836 5837 if (!tail) { 5838 /* Stores to cb->fn and cb->param should complete before 5839 * cb is visible to data plane. 5840 */ 5841 rte_atomic_store_explicit( 5842 &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id], 5843 cb, rte_memory_order_release); 5844 5845 } else { 5846 while (tail->next) 5847 tail = tail->next; 5848 /* Stores to cb->fn and cb->param should complete before 5849 * cb is visible to data plane. 5850 */ 5851 rte_atomic_store_explicit(&tail->next, cb, rte_memory_order_release); 5852 } 5853 rte_spinlock_unlock(ð_dev_tx_cb_lock); 5854 5855 rte_eth_trace_add_tx_callback(port_id, queue_id, fn, user_param, cb); 5856 5857 return cb; 5858 } 5859 5860 int 5861 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, 5862 const struct rte_eth_rxtx_callback *user_cb) 5863 { 5864 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5865 return -ENOTSUP; 5866 #endif 5867 /* Check input parameters. */ 5868 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5869 if (user_cb == NULL || 5870 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) 5871 return -EINVAL; 5872 5873 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5874 struct rte_eth_rxtx_callback *cb; 5875 RTE_ATOMIC(struct rte_eth_rxtx_callback *) *prev_cb; 5876 int ret = -EINVAL; 5877 5878 rte_spinlock_lock(ð_dev_rx_cb_lock); 5879 prev_cb = &dev->post_rx_burst_cbs[queue_id]; 5880 for (; *prev_cb != NULL; prev_cb = &cb->next) { 5881 cb = *prev_cb; 5882 if (cb == user_cb) { 5883 /* Remove the user cb from the callback list. */ 5884 rte_atomic_store_explicit(prev_cb, cb->next, rte_memory_order_relaxed); 5885 ret = 0; 5886 break; 5887 } 5888 } 5889 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5890 5891 rte_eth_trace_remove_rx_callback(port_id, queue_id, user_cb, ret); 5892 5893 return ret; 5894 } 5895 5896 int 5897 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, 5898 const struct rte_eth_rxtx_callback *user_cb) 5899 { 5900 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5901 return -ENOTSUP; 5902 #endif 5903 /* Check input parameters. */ 5904 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5905 if (user_cb == NULL || 5906 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) 5907 return -EINVAL; 5908 5909 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5910 int ret = -EINVAL; 5911 struct rte_eth_rxtx_callback *cb; 5912 RTE_ATOMIC(struct rte_eth_rxtx_callback *) *prev_cb; 5913 5914 rte_spinlock_lock(ð_dev_tx_cb_lock); 5915 prev_cb = &dev->pre_tx_burst_cbs[queue_id]; 5916 for (; *prev_cb != NULL; prev_cb = &cb->next) { 5917 cb = *prev_cb; 5918 if (cb == user_cb) { 5919 /* Remove the user cb from the callback list. */ 5920 rte_atomic_store_explicit(prev_cb, cb->next, rte_memory_order_relaxed); 5921 ret = 0; 5922 break; 5923 } 5924 } 5925 rte_spinlock_unlock(ð_dev_tx_cb_lock); 5926 5927 rte_eth_trace_remove_tx_callback(port_id, queue_id, user_cb, ret); 5928 5929 return ret; 5930 } 5931 5932 int 5933 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5934 struct rte_eth_rxq_info *qinfo) 5935 { 5936 struct rte_eth_dev *dev; 5937 5938 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5939 dev = &rte_eth_devices[port_id]; 5940 5941 if (queue_id >= dev->data->nb_rx_queues) { 5942 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5943 return -EINVAL; 5944 } 5945 5946 if (qinfo == NULL) { 5947 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL\n", 5948 port_id, queue_id); 5949 return -EINVAL; 5950 } 5951 5952 if (dev->data->rx_queues == NULL || 5953 dev->data->rx_queues[queue_id] == NULL) { 5954 RTE_ETHDEV_LOG(ERR, 5955 "Rx queue %"PRIu16" of device with port_id=%" 5956 PRIu16" has not been setup\n", 5957 queue_id, port_id); 5958 return -EINVAL; 5959 } 5960 5961 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 5962 RTE_ETHDEV_LOG(INFO, 5963 "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5964 queue_id, port_id); 5965 return -EINVAL; 5966 } 5967 5968 if (*dev->dev_ops->rxq_info_get == NULL) 5969 return -ENOTSUP; 5970 5971 memset(qinfo, 0, sizeof(*qinfo)); 5972 dev->dev_ops->rxq_info_get(dev, queue_id, qinfo); 5973 qinfo->queue_state = dev->data->rx_queue_state[queue_id]; 5974 5975 rte_eth_trace_rx_queue_info_get(port_id, queue_id, qinfo); 5976 5977 return 0; 5978 } 5979 5980 int 5981 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5982 struct rte_eth_txq_info *qinfo) 5983 { 5984 struct rte_eth_dev *dev; 5985 5986 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5987 dev = &rte_eth_devices[port_id]; 5988 5989 if (queue_id >= dev->data->nb_tx_queues) { 5990 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 5991 return -EINVAL; 5992 } 5993 5994 if (qinfo == NULL) { 5995 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL\n", 5996 port_id, queue_id); 5997 return -EINVAL; 5998 } 5999 6000 if (dev->data->tx_queues == NULL || 6001 dev->data->tx_queues[queue_id] == NULL) { 6002 RTE_ETHDEV_LOG(ERR, 6003 "Tx queue %"PRIu16" of device with port_id=%" 6004 PRIu16" has not been setup\n", 6005 queue_id, port_id); 6006 return -EINVAL; 6007 } 6008 6009 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 6010 RTE_ETHDEV_LOG(INFO, 6011 "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 6012 queue_id, port_id); 6013 return -EINVAL; 6014 } 6015 6016 if (*dev->dev_ops->txq_info_get == NULL) 6017 return -ENOTSUP; 6018 6019 memset(qinfo, 0, sizeof(*qinfo)); 6020 dev->dev_ops->txq_info_get(dev, queue_id, qinfo); 6021 qinfo->queue_state = dev->data->tx_queue_state[queue_id]; 6022 6023 rte_eth_trace_tx_queue_info_get(port_id, queue_id, qinfo); 6024 6025 return 0; 6026 } 6027 6028 int 6029 rte_eth_recycle_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, 6030 struct rte_eth_recycle_rxq_info *recycle_rxq_info) 6031 { 6032 struct rte_eth_dev *dev; 6033 int ret; 6034 6035 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6036 dev = &rte_eth_devices[port_id]; 6037 6038 ret = eth_dev_validate_rx_queue(dev, queue_id); 6039 if (unlikely(ret != 0)) 6040 return ret; 6041 6042 if (*dev->dev_ops->recycle_rxq_info_get == NULL) 6043 return -ENOTSUP; 6044 6045 dev->dev_ops->recycle_rxq_info_get(dev, queue_id, recycle_rxq_info); 6046 6047 return 0; 6048 } 6049 6050 int 6051 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 6052 struct rte_eth_burst_mode *mode) 6053 { 6054 struct rte_eth_dev *dev; 6055 int ret; 6056 6057 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6058 dev = &rte_eth_devices[port_id]; 6059 6060 if (queue_id >= dev->data->nb_rx_queues) { 6061 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 6062 return -EINVAL; 6063 } 6064 6065 if (mode == NULL) { 6066 RTE_ETHDEV_LOG(ERR, 6067 "Cannot get ethdev port %u Rx queue %u burst mode to NULL\n", 6068 port_id, queue_id); 6069 return -EINVAL; 6070 } 6071 6072 if (*dev->dev_ops->rx_burst_mode_get == NULL) 6073 return -ENOTSUP; 6074 memset(mode, 0, sizeof(*mode)); 6075 ret = eth_err(port_id, 6076 dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode)); 6077 6078 rte_eth_trace_rx_burst_mode_get(port_id, queue_id, mode, ret); 6079 6080 return ret; 6081 } 6082 6083 int 6084 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 6085 struct rte_eth_burst_mode *mode) 6086 { 6087 struct rte_eth_dev *dev; 6088 int ret; 6089 6090 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6091 dev = &rte_eth_devices[port_id]; 6092 6093 if (queue_id >= dev->data->nb_tx_queues) { 6094 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 6095 return -EINVAL; 6096 } 6097 6098 if (mode == NULL) { 6099 RTE_ETHDEV_LOG(ERR, 6100 "Cannot get ethdev port %u Tx queue %u burst mode to NULL\n", 6101 port_id, queue_id); 6102 return -EINVAL; 6103 } 6104 6105 if (*dev->dev_ops->tx_burst_mode_get == NULL) 6106 return -ENOTSUP; 6107 memset(mode, 0, sizeof(*mode)); 6108 ret = eth_err(port_id, 6109 dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode)); 6110 6111 rte_eth_trace_tx_burst_mode_get(port_id, queue_id, mode, ret); 6112 6113 return ret; 6114 } 6115 6116 int 6117 rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id, 6118 struct rte_power_monitor_cond *pmc) 6119 { 6120 struct rte_eth_dev *dev; 6121 int ret; 6122 6123 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6124 dev = &rte_eth_devices[port_id]; 6125 6126 if (queue_id >= dev->data->nb_rx_queues) { 6127 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 6128 return -EINVAL; 6129 } 6130 6131 if (pmc == NULL) { 6132 RTE_ETHDEV_LOG(ERR, 6133 "Cannot get ethdev port %u Rx queue %u power monitor condition to NULL\n", 6134 port_id, queue_id); 6135 return -EINVAL; 6136 } 6137 6138 if (*dev->dev_ops->get_monitor_addr == NULL) 6139 return -ENOTSUP; 6140 ret = eth_err(port_id, 6141 dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc)); 6142 6143 rte_eth_trace_get_monitor_addr(port_id, queue_id, pmc, ret); 6144 6145 return ret; 6146 } 6147 6148 int 6149 rte_eth_dev_set_mc_addr_list(uint16_t port_id, 6150 struct rte_ether_addr *mc_addr_set, 6151 uint32_t nb_mc_addr) 6152 { 6153 struct rte_eth_dev *dev; 6154 int ret; 6155 6156 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6157 dev = &rte_eth_devices[port_id]; 6158 6159 if (*dev->dev_ops->set_mc_addr_list == NULL) 6160 return -ENOTSUP; 6161 ret = eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev, 6162 mc_addr_set, nb_mc_addr)); 6163 6164 rte_ethdev_trace_set_mc_addr_list(port_id, mc_addr_set, nb_mc_addr, 6165 ret); 6166 6167 return ret; 6168 } 6169 6170 int 6171 rte_eth_timesync_enable(uint16_t port_id) 6172 { 6173 struct rte_eth_dev *dev; 6174 int ret; 6175 6176 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6177 dev = &rte_eth_devices[port_id]; 6178 6179 if (*dev->dev_ops->timesync_enable == NULL) 6180 return -ENOTSUP; 6181 ret = eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev)); 6182 6183 rte_eth_trace_timesync_enable(port_id, ret); 6184 6185 return ret; 6186 } 6187 6188 int 6189 rte_eth_timesync_disable(uint16_t port_id) 6190 { 6191 struct rte_eth_dev *dev; 6192 int ret; 6193 6194 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6195 dev = &rte_eth_devices[port_id]; 6196 6197 if (*dev->dev_ops->timesync_disable == NULL) 6198 return -ENOTSUP; 6199 ret = eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev)); 6200 6201 rte_eth_trace_timesync_disable(port_id, ret); 6202 6203 return ret; 6204 } 6205 6206 int 6207 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, 6208 uint32_t flags) 6209 { 6210 struct rte_eth_dev *dev; 6211 int ret; 6212 6213 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6214 dev = &rte_eth_devices[port_id]; 6215 6216 if (timestamp == NULL) { 6217 RTE_ETHDEV_LOG(ERR, 6218 "Cannot read ethdev port %u Rx timestamp to NULL\n", 6219 port_id); 6220 return -EINVAL; 6221 } 6222 6223 if (*dev->dev_ops->timesync_read_rx_timestamp == NULL) 6224 return -ENOTSUP; 6225 6226 ret = eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp) 6227 (dev, timestamp, flags)); 6228 6229 rte_eth_trace_timesync_read_rx_timestamp(port_id, timestamp, flags, 6230 ret); 6231 6232 return ret; 6233 } 6234 6235 int 6236 rte_eth_timesync_read_tx_timestamp(uint16_t port_id, 6237 struct timespec *timestamp) 6238 { 6239 struct rte_eth_dev *dev; 6240 int ret; 6241 6242 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6243 dev = &rte_eth_devices[port_id]; 6244 6245 if (timestamp == NULL) { 6246 RTE_ETHDEV_LOG(ERR, 6247 "Cannot read ethdev port %u Tx timestamp to NULL\n", 6248 port_id); 6249 return -EINVAL; 6250 } 6251 6252 if (*dev->dev_ops->timesync_read_tx_timestamp == NULL) 6253 return -ENOTSUP; 6254 6255 ret = eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp) 6256 (dev, timestamp)); 6257 6258 rte_eth_trace_timesync_read_tx_timestamp(port_id, timestamp, ret); 6259 6260 return ret; 6261 6262 } 6263 6264 int 6265 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta) 6266 { 6267 struct rte_eth_dev *dev; 6268 int ret; 6269 6270 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6271 dev = &rte_eth_devices[port_id]; 6272 6273 if (*dev->dev_ops->timesync_adjust_time == NULL) 6274 return -ENOTSUP; 6275 ret = eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta)); 6276 6277 rte_eth_trace_timesync_adjust_time(port_id, delta, ret); 6278 6279 return ret; 6280 } 6281 6282 int 6283 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp) 6284 { 6285 struct rte_eth_dev *dev; 6286 int ret; 6287 6288 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6289 dev = &rte_eth_devices[port_id]; 6290 6291 if (timestamp == NULL) { 6292 RTE_ETHDEV_LOG(ERR, 6293 "Cannot read ethdev port %u timesync time to NULL\n", 6294 port_id); 6295 return -EINVAL; 6296 } 6297 6298 if (*dev->dev_ops->timesync_read_time == NULL) 6299 return -ENOTSUP; 6300 ret = eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev, 6301 timestamp)); 6302 6303 rte_eth_trace_timesync_read_time(port_id, timestamp, ret); 6304 6305 return ret; 6306 } 6307 6308 int 6309 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp) 6310 { 6311 struct rte_eth_dev *dev; 6312 int ret; 6313 6314 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6315 dev = &rte_eth_devices[port_id]; 6316 6317 if (timestamp == NULL) { 6318 RTE_ETHDEV_LOG(ERR, 6319 "Cannot write ethdev port %u timesync from NULL time\n", 6320 port_id); 6321 return -EINVAL; 6322 } 6323 6324 if (*dev->dev_ops->timesync_write_time == NULL) 6325 return -ENOTSUP; 6326 ret = eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev, 6327 timestamp)); 6328 6329 rte_eth_trace_timesync_write_time(port_id, timestamp, ret); 6330 6331 return ret; 6332 } 6333 6334 int 6335 rte_eth_read_clock(uint16_t port_id, uint64_t *clock) 6336 { 6337 struct rte_eth_dev *dev; 6338 int ret; 6339 6340 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6341 dev = &rte_eth_devices[port_id]; 6342 6343 if (clock == NULL) { 6344 RTE_ETHDEV_LOG(ERR, "Cannot read ethdev port %u clock to NULL\n", 6345 port_id); 6346 return -EINVAL; 6347 } 6348 6349 if (*dev->dev_ops->read_clock == NULL) 6350 return -ENOTSUP; 6351 ret = eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock)); 6352 6353 rte_eth_trace_read_clock(port_id, clock, ret); 6354 6355 return ret; 6356 } 6357 6358 int 6359 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info) 6360 { 6361 struct rte_eth_dev *dev; 6362 int ret; 6363 6364 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6365 dev = &rte_eth_devices[port_id]; 6366 6367 if (info == NULL) { 6368 RTE_ETHDEV_LOG(ERR, 6369 "Cannot get ethdev port %u register info to NULL\n", 6370 port_id); 6371 return -EINVAL; 6372 } 6373 6374 if (*dev->dev_ops->get_reg == NULL) 6375 return -ENOTSUP; 6376 ret = eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info)); 6377 6378 rte_ethdev_trace_get_reg_info(port_id, info, ret); 6379 6380 return ret; 6381 } 6382 6383 int 6384 rte_eth_dev_get_eeprom_length(uint16_t port_id) 6385 { 6386 struct rte_eth_dev *dev; 6387 int ret; 6388 6389 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6390 dev = &rte_eth_devices[port_id]; 6391 6392 if (*dev->dev_ops->get_eeprom_length == NULL) 6393 return -ENOTSUP; 6394 ret = eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev)); 6395 6396 rte_ethdev_trace_get_eeprom_length(port_id, ret); 6397 6398 return ret; 6399 } 6400 6401 int 6402 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 6403 { 6404 struct rte_eth_dev *dev; 6405 int ret; 6406 6407 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6408 dev = &rte_eth_devices[port_id]; 6409 6410 if (info == NULL) { 6411 RTE_ETHDEV_LOG(ERR, 6412 "Cannot get ethdev port %u EEPROM info to NULL\n", 6413 port_id); 6414 return -EINVAL; 6415 } 6416 6417 if (*dev->dev_ops->get_eeprom == NULL) 6418 return -ENOTSUP; 6419 ret = eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info)); 6420 6421 rte_ethdev_trace_get_eeprom(port_id, info, ret); 6422 6423 return ret; 6424 } 6425 6426 int 6427 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 6428 { 6429 struct rte_eth_dev *dev; 6430 int ret; 6431 6432 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6433 dev = &rte_eth_devices[port_id]; 6434 6435 if (info == NULL) { 6436 RTE_ETHDEV_LOG(ERR, 6437 "Cannot set ethdev port %u EEPROM from NULL info\n", 6438 port_id); 6439 return -EINVAL; 6440 } 6441 6442 if (*dev->dev_ops->set_eeprom == NULL) 6443 return -ENOTSUP; 6444 ret = eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info)); 6445 6446 rte_ethdev_trace_set_eeprom(port_id, info, ret); 6447 6448 return ret; 6449 } 6450 6451 int 6452 rte_eth_dev_get_module_info(uint16_t port_id, 6453 struct rte_eth_dev_module_info *modinfo) 6454 { 6455 struct rte_eth_dev *dev; 6456 int ret; 6457 6458 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6459 dev = &rte_eth_devices[port_id]; 6460 6461 if (modinfo == NULL) { 6462 RTE_ETHDEV_LOG(ERR, 6463 "Cannot get ethdev port %u EEPROM module info to NULL\n", 6464 port_id); 6465 return -EINVAL; 6466 } 6467 6468 if (*dev->dev_ops->get_module_info == NULL) 6469 return -ENOTSUP; 6470 ret = (*dev->dev_ops->get_module_info)(dev, modinfo); 6471 6472 rte_ethdev_trace_get_module_info(port_id, modinfo, ret); 6473 6474 return ret; 6475 } 6476 6477 int 6478 rte_eth_dev_get_module_eeprom(uint16_t port_id, 6479 struct rte_dev_eeprom_info *info) 6480 { 6481 struct rte_eth_dev *dev; 6482 int ret; 6483 6484 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6485 dev = &rte_eth_devices[port_id]; 6486 6487 if (info == NULL) { 6488 RTE_ETHDEV_LOG(ERR, 6489 "Cannot get ethdev port %u module EEPROM info to NULL\n", 6490 port_id); 6491 return -EINVAL; 6492 } 6493 6494 if (info->data == NULL) { 6495 RTE_ETHDEV_LOG(ERR, 6496 "Cannot get ethdev port %u module EEPROM data to NULL\n", 6497 port_id); 6498 return -EINVAL; 6499 } 6500 6501 if (info->length == 0) { 6502 RTE_ETHDEV_LOG(ERR, 6503 "Cannot get ethdev port %u module EEPROM to data with zero size\n", 6504 port_id); 6505 return -EINVAL; 6506 } 6507 6508 if (*dev->dev_ops->get_module_eeprom == NULL) 6509 return -ENOTSUP; 6510 ret = (*dev->dev_ops->get_module_eeprom)(dev, info); 6511 6512 rte_ethdev_trace_get_module_eeprom(port_id, info, ret); 6513 6514 return ret; 6515 } 6516 6517 int 6518 rte_eth_dev_get_dcb_info(uint16_t port_id, 6519 struct rte_eth_dcb_info *dcb_info) 6520 { 6521 struct rte_eth_dev *dev; 6522 int ret; 6523 6524 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6525 dev = &rte_eth_devices[port_id]; 6526 6527 if (dcb_info == NULL) { 6528 RTE_ETHDEV_LOG(ERR, 6529 "Cannot get ethdev port %u DCB info to NULL\n", 6530 port_id); 6531 return -EINVAL; 6532 } 6533 6534 memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info)); 6535 6536 if (*dev->dev_ops->get_dcb_info == NULL) 6537 return -ENOTSUP; 6538 ret = eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info)); 6539 6540 rte_ethdev_trace_get_dcb_info(port_id, dcb_info, ret); 6541 6542 return ret; 6543 } 6544 6545 static void 6546 eth_dev_adjust_nb_desc(uint16_t *nb_desc, 6547 const struct rte_eth_desc_lim *desc_lim) 6548 { 6549 if (desc_lim->nb_align != 0) 6550 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align); 6551 6552 if (desc_lim->nb_max != 0) 6553 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max); 6554 6555 *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min); 6556 } 6557 6558 int 6559 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, 6560 uint16_t *nb_rx_desc, 6561 uint16_t *nb_tx_desc) 6562 { 6563 struct rte_eth_dev_info dev_info; 6564 int ret; 6565 6566 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6567 6568 ret = rte_eth_dev_info_get(port_id, &dev_info); 6569 if (ret != 0) 6570 return ret; 6571 6572 if (nb_rx_desc != NULL) 6573 eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim); 6574 6575 if (nb_tx_desc != NULL) 6576 eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim); 6577 6578 rte_ethdev_trace_adjust_nb_rx_tx_desc(port_id); 6579 6580 return 0; 6581 } 6582 6583 int 6584 rte_eth_dev_hairpin_capability_get(uint16_t port_id, 6585 struct rte_eth_hairpin_cap *cap) 6586 { 6587 struct rte_eth_dev *dev; 6588 int ret; 6589 6590 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6591 dev = &rte_eth_devices[port_id]; 6592 6593 if (cap == NULL) { 6594 RTE_ETHDEV_LOG(ERR, 6595 "Cannot get ethdev port %u hairpin capability to NULL\n", 6596 port_id); 6597 return -EINVAL; 6598 } 6599 6600 if (*dev->dev_ops->hairpin_cap_get == NULL) 6601 return -ENOTSUP; 6602 memset(cap, 0, sizeof(*cap)); 6603 ret = eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap)); 6604 6605 rte_ethdev_trace_hairpin_capability_get(port_id, cap, ret); 6606 6607 return ret; 6608 } 6609 6610 int 6611 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool) 6612 { 6613 struct rte_eth_dev *dev; 6614 int ret; 6615 6616 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6617 dev = &rte_eth_devices[port_id]; 6618 6619 if (pool == NULL) { 6620 RTE_ETHDEV_LOG(ERR, 6621 "Cannot test ethdev port %u mempool operation from NULL pool\n", 6622 port_id); 6623 return -EINVAL; 6624 } 6625 6626 if (*dev->dev_ops->pool_ops_supported == NULL) 6627 return 1; /* all pools are supported */ 6628 6629 ret = (*dev->dev_ops->pool_ops_supported)(dev, pool); 6630 6631 rte_ethdev_trace_pool_ops_supported(port_id, pool, ret); 6632 6633 return ret; 6634 } 6635 6636 int 6637 rte_eth_representor_info_get(uint16_t port_id, 6638 struct rte_eth_representor_info *info) 6639 { 6640 struct rte_eth_dev *dev; 6641 int ret; 6642 6643 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6644 dev = &rte_eth_devices[port_id]; 6645 6646 if (*dev->dev_ops->representor_info_get == NULL) 6647 return -ENOTSUP; 6648 ret = eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info)); 6649 6650 rte_eth_trace_representor_info_get(port_id, info, ret); 6651 6652 return ret; 6653 } 6654 6655 int 6656 rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features) 6657 { 6658 struct rte_eth_dev *dev; 6659 int ret; 6660 6661 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6662 dev = &rte_eth_devices[port_id]; 6663 6664 if (dev->data->dev_configured != 0) { 6665 RTE_ETHDEV_LOG(ERR, 6666 "The port (ID=%"PRIu16") is already configured\n", 6667 port_id); 6668 return -EBUSY; 6669 } 6670 6671 if (features == NULL) { 6672 RTE_ETHDEV_LOG(ERR, "Invalid features (NULL)\n"); 6673 return -EINVAL; 6674 } 6675 6676 if ((*features & RTE_ETH_RX_METADATA_TUNNEL_ID) != 0 && 6677 rte_flow_restore_info_dynflag_register() < 0) 6678 *features &= ~RTE_ETH_RX_METADATA_TUNNEL_ID; 6679 6680 if (*dev->dev_ops->rx_metadata_negotiate == NULL) 6681 return -ENOTSUP; 6682 ret = eth_err(port_id, 6683 (*dev->dev_ops->rx_metadata_negotiate)(dev, features)); 6684 6685 rte_eth_trace_rx_metadata_negotiate(port_id, *features, ret); 6686 6687 return ret; 6688 } 6689 6690 int 6691 rte_eth_ip_reassembly_capability_get(uint16_t port_id, 6692 struct rte_eth_ip_reassembly_params *reassembly_capa) 6693 { 6694 struct rte_eth_dev *dev; 6695 int ret; 6696 6697 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6698 dev = &rte_eth_devices[port_id]; 6699 6700 if (dev->data->dev_configured == 0) { 6701 RTE_ETHDEV_LOG(ERR, 6702 "Device with port_id=%u is not configured.\n" 6703 "Cannot get IP reassembly capability\n", 6704 port_id); 6705 return -EINVAL; 6706 } 6707 6708 if (reassembly_capa == NULL) { 6709 RTE_ETHDEV_LOG(ERR, "Cannot get reassembly capability to NULL"); 6710 return -EINVAL; 6711 } 6712 6713 if (*dev->dev_ops->ip_reassembly_capability_get == NULL) 6714 return -ENOTSUP; 6715 memset(reassembly_capa, 0, sizeof(struct rte_eth_ip_reassembly_params)); 6716 6717 ret = eth_err(port_id, (*dev->dev_ops->ip_reassembly_capability_get) 6718 (dev, reassembly_capa)); 6719 6720 rte_eth_trace_ip_reassembly_capability_get(port_id, reassembly_capa, 6721 ret); 6722 6723 return ret; 6724 } 6725 6726 int 6727 rte_eth_ip_reassembly_conf_get(uint16_t port_id, 6728 struct rte_eth_ip_reassembly_params *conf) 6729 { 6730 struct rte_eth_dev *dev; 6731 int ret; 6732 6733 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6734 dev = &rte_eth_devices[port_id]; 6735 6736 if (dev->data->dev_configured == 0) { 6737 RTE_ETHDEV_LOG(ERR, 6738 "Device with port_id=%u is not configured.\n" 6739 "Cannot get IP reassembly configuration\n", 6740 port_id); 6741 return -EINVAL; 6742 } 6743 6744 if (conf == NULL) { 6745 RTE_ETHDEV_LOG(ERR, "Cannot get reassembly info to NULL"); 6746 return -EINVAL; 6747 } 6748 6749 if (*dev->dev_ops->ip_reassembly_conf_get == NULL) 6750 return -ENOTSUP; 6751 memset(conf, 0, sizeof(struct rte_eth_ip_reassembly_params)); 6752 ret = eth_err(port_id, 6753 (*dev->dev_ops->ip_reassembly_conf_get)(dev, conf)); 6754 6755 rte_eth_trace_ip_reassembly_conf_get(port_id, conf, ret); 6756 6757 return ret; 6758 } 6759 6760 int 6761 rte_eth_ip_reassembly_conf_set(uint16_t port_id, 6762 const struct rte_eth_ip_reassembly_params *conf) 6763 { 6764 struct rte_eth_dev *dev; 6765 int ret; 6766 6767 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6768 dev = &rte_eth_devices[port_id]; 6769 6770 if (dev->data->dev_configured == 0) { 6771 RTE_ETHDEV_LOG(ERR, 6772 "Device with port_id=%u is not configured.\n" 6773 "Cannot set IP reassembly configuration", 6774 port_id); 6775 return -EINVAL; 6776 } 6777 6778 if (dev->data->dev_started != 0) { 6779 RTE_ETHDEV_LOG(ERR, 6780 "Device with port_id=%u started,\n" 6781 "cannot configure IP reassembly params.\n", 6782 port_id); 6783 return -EINVAL; 6784 } 6785 6786 if (conf == NULL) { 6787 RTE_ETHDEV_LOG(ERR, 6788 "Invalid IP reassembly configuration (NULL)\n"); 6789 return -EINVAL; 6790 } 6791 6792 if (*dev->dev_ops->ip_reassembly_conf_set == NULL) 6793 return -ENOTSUP; 6794 ret = eth_err(port_id, 6795 (*dev->dev_ops->ip_reassembly_conf_set)(dev, conf)); 6796 6797 rte_eth_trace_ip_reassembly_conf_set(port_id, conf, ret); 6798 6799 return ret; 6800 } 6801 6802 int 6803 rte_eth_dev_priv_dump(uint16_t port_id, FILE *file) 6804 { 6805 struct rte_eth_dev *dev; 6806 6807 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6808 dev = &rte_eth_devices[port_id]; 6809 6810 if (file == NULL) { 6811 RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n"); 6812 return -EINVAL; 6813 } 6814 6815 if (*dev->dev_ops->eth_dev_priv_dump == NULL) 6816 return -ENOTSUP; 6817 return eth_err(port_id, (*dev->dev_ops->eth_dev_priv_dump)(dev, file)); 6818 } 6819 6820 int 6821 rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id, 6822 uint16_t offset, uint16_t num, FILE *file) 6823 { 6824 struct rte_eth_dev *dev; 6825 6826 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6827 dev = &rte_eth_devices[port_id]; 6828 6829 if (queue_id >= dev->data->nb_rx_queues) { 6830 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 6831 return -EINVAL; 6832 } 6833 6834 if (file == NULL) { 6835 RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n"); 6836 return -EINVAL; 6837 } 6838 6839 if (*dev->dev_ops->eth_rx_descriptor_dump == NULL) 6840 return -ENOTSUP; 6841 6842 return eth_err(port_id, (*dev->dev_ops->eth_rx_descriptor_dump)(dev, 6843 queue_id, offset, num, file)); 6844 } 6845 6846 int 6847 rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id, 6848 uint16_t offset, uint16_t num, FILE *file) 6849 { 6850 struct rte_eth_dev *dev; 6851 6852 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6853 dev = &rte_eth_devices[port_id]; 6854 6855 if (queue_id >= dev->data->nb_tx_queues) { 6856 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 6857 return -EINVAL; 6858 } 6859 6860 if (file == NULL) { 6861 RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n"); 6862 return -EINVAL; 6863 } 6864 6865 if (*dev->dev_ops->eth_tx_descriptor_dump == NULL) 6866 return -ENOTSUP; 6867 6868 return eth_err(port_id, (*dev->dev_ops->eth_tx_descriptor_dump)(dev, 6869 queue_id, offset, num, file)); 6870 } 6871 6872 int 6873 rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num) 6874 { 6875 int i, j; 6876 struct rte_eth_dev *dev; 6877 const uint32_t *all_types; 6878 6879 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6880 dev = &rte_eth_devices[port_id]; 6881 6882 if (ptypes == NULL && num > 0) { 6883 RTE_ETHDEV_LOG(ERR, 6884 "Cannot get ethdev port %u supported header protocol types to NULL when array size is non zero\n", 6885 port_id); 6886 return -EINVAL; 6887 } 6888 6889 if (*dev->dev_ops->buffer_split_supported_hdr_ptypes_get == NULL) 6890 return -ENOTSUP; 6891 all_types = (*dev->dev_ops->buffer_split_supported_hdr_ptypes_get)(dev); 6892 6893 if (all_types == NULL) 6894 return 0; 6895 6896 for (i = 0, j = 0; all_types[i] != RTE_PTYPE_UNKNOWN; ++i) { 6897 if (j < num) { 6898 ptypes[j] = all_types[i]; 6899 6900 rte_eth_trace_buffer_split_get_supported_hdr_ptypes( 6901 port_id, j, ptypes[j]); 6902 } 6903 j++; 6904 } 6905 6906 return j; 6907 } 6908 6909 int rte_eth_dev_count_aggr_ports(uint16_t port_id) 6910 { 6911 struct rte_eth_dev *dev; 6912 int ret; 6913 6914 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6915 dev = &rte_eth_devices[port_id]; 6916 6917 if (*dev->dev_ops->count_aggr_ports == NULL) 6918 return 0; 6919 ret = eth_err(port_id, (*dev->dev_ops->count_aggr_ports)(dev)); 6920 6921 rte_eth_trace_count_aggr_ports(port_id, ret); 6922 6923 return ret; 6924 } 6925 6926 int rte_eth_dev_map_aggr_tx_affinity(uint16_t port_id, uint16_t tx_queue_id, 6927 uint8_t affinity) 6928 { 6929 struct rte_eth_dev *dev; 6930 int aggr_ports; 6931 int ret; 6932 6933 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6934 dev = &rte_eth_devices[port_id]; 6935 6936 if (tx_queue_id >= dev->data->nb_tx_queues) { 6937 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id); 6938 return -EINVAL; 6939 } 6940 6941 if (*dev->dev_ops->map_aggr_tx_affinity == NULL) 6942 return -ENOTSUP; 6943 6944 if (dev->data->dev_configured == 0) { 6945 RTE_ETHDEV_LOG(ERR, 6946 "Port %u must be configured before Tx affinity mapping\n", 6947 port_id); 6948 return -EINVAL; 6949 } 6950 6951 if (dev->data->dev_started) { 6952 RTE_ETHDEV_LOG(ERR, 6953 "Port %u must be stopped to allow configuration\n", 6954 port_id); 6955 return -EBUSY; 6956 } 6957 6958 aggr_ports = rte_eth_dev_count_aggr_ports(port_id); 6959 if (aggr_ports == 0) { 6960 RTE_ETHDEV_LOG(ERR, 6961 "Port %u has no aggregated port\n", 6962 port_id); 6963 return -ENOTSUP; 6964 } 6965 6966 if (affinity > aggr_ports) { 6967 RTE_ETHDEV_LOG(ERR, 6968 "Port %u map invalid affinity %u exceeds the maximum number %u\n", 6969 port_id, affinity, aggr_ports); 6970 return -EINVAL; 6971 } 6972 6973 ret = eth_err(port_id, (*dev->dev_ops->map_aggr_tx_affinity)(dev, 6974 tx_queue_id, affinity)); 6975 6976 rte_eth_trace_map_aggr_tx_affinity(port_id, tx_queue_id, affinity, ret); 6977 6978 return ret; 6979 } 6980 6981 RTE_LOG_REGISTER_DEFAULT(rte_eth_dev_logtype, INFO); 6982