1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <errno.h> 6 #include <inttypes.h> 7 #include <stdbool.h> 8 #include <stdint.h> 9 #include <stdio.h> 10 #include <stdlib.h> 11 #include <string.h> 12 #include <sys/queue.h> 13 14 #include <bus_driver.h> 15 #include <rte_log.h> 16 #include <rte_interrupts.h> 17 #include <rte_kvargs.h> 18 #include <rte_memcpy.h> 19 #include <rte_common.h> 20 #include <rte_mempool.h> 21 #include <rte_malloc.h> 22 #include <rte_mbuf.h> 23 #include <rte_errno.h> 24 #include <rte_spinlock.h> 25 #include <rte_string_fns.h> 26 #include <rte_class.h> 27 #include <rte_ether.h> 28 #include <rte_telemetry.h> 29 30 #include "rte_ethdev.h" 31 #include "rte_ethdev_trace_fp.h" 32 #include "ethdev_driver.h" 33 #include "rte_flow_driver.h" 34 #include "ethdev_profile.h" 35 #include "ethdev_private.h" 36 #include "ethdev_trace.h" 37 #include "sff_telemetry.h" 38 39 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS]; 40 41 /* public fast-path API */ 42 struct rte_eth_fp_ops rte_eth_fp_ops[RTE_MAX_ETHPORTS]; 43 44 /* spinlock for add/remove Rx callbacks */ 45 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER; 46 47 /* spinlock for add/remove Tx callbacks */ 48 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER; 49 50 /* store statistics names and its offset in stats structure */ 51 struct rte_eth_xstats_name_off { 52 char name[RTE_ETH_XSTATS_NAME_SIZE]; 53 unsigned offset; 54 }; 55 56 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = { 57 {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)}, 58 {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)}, 59 {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)}, 60 {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)}, 61 {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)}, 62 {"rx_errors", offsetof(struct rte_eth_stats, ierrors)}, 63 {"tx_errors", offsetof(struct rte_eth_stats, oerrors)}, 64 {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats, 65 rx_nombuf)}, 66 }; 67 68 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings) 69 70 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = { 71 {"packets", offsetof(struct rte_eth_stats, q_ipackets)}, 72 {"bytes", offsetof(struct rte_eth_stats, q_ibytes)}, 73 {"errors", offsetof(struct rte_eth_stats, q_errors)}, 74 }; 75 76 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings) 77 78 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = { 79 {"packets", offsetof(struct rte_eth_stats, q_opackets)}, 80 {"bytes", offsetof(struct rte_eth_stats, q_obytes)}, 81 }; 82 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings) 83 84 #define RTE_RX_OFFLOAD_BIT2STR(_name) \ 85 { RTE_ETH_RX_OFFLOAD_##_name, #_name } 86 87 static const struct { 88 uint64_t offload; 89 const char *name; 90 } eth_dev_rx_offload_names[] = { 91 RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP), 92 RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM), 93 RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM), 94 RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM), 95 RTE_RX_OFFLOAD_BIT2STR(TCP_LRO), 96 RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP), 97 RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 98 RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP), 99 RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER), 100 RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND), 101 RTE_RX_OFFLOAD_BIT2STR(SCATTER), 102 RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP), 103 RTE_RX_OFFLOAD_BIT2STR(SECURITY), 104 RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC), 105 RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM), 106 RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 107 RTE_RX_OFFLOAD_BIT2STR(RSS_HASH), 108 RTE_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT), 109 }; 110 111 #undef RTE_RX_OFFLOAD_BIT2STR 112 #undef RTE_ETH_RX_OFFLOAD_BIT2STR 113 114 #define RTE_TX_OFFLOAD_BIT2STR(_name) \ 115 { RTE_ETH_TX_OFFLOAD_##_name, #_name } 116 117 static const struct { 118 uint64_t offload; 119 const char *name; 120 } eth_dev_tx_offload_names[] = { 121 RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT), 122 RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM), 123 RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM), 124 RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM), 125 RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM), 126 RTE_TX_OFFLOAD_BIT2STR(TCP_TSO), 127 RTE_TX_OFFLOAD_BIT2STR(UDP_TSO), 128 RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 129 RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT), 130 RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO), 131 RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO), 132 RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO), 133 RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO), 134 RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT), 135 RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE), 136 RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS), 137 RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE), 138 RTE_TX_OFFLOAD_BIT2STR(SECURITY), 139 RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO), 140 RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO), 141 RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 142 RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP), 143 }; 144 145 #undef RTE_TX_OFFLOAD_BIT2STR 146 147 static const struct { 148 uint64_t offload; 149 const char *name; 150 } rte_eth_dev_capa_names[] = { 151 {RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP, "RUNTIME_RX_QUEUE_SETUP"}, 152 {RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP, "RUNTIME_TX_QUEUE_SETUP"}, 153 {RTE_ETH_DEV_CAPA_RXQ_SHARE, "RXQ_SHARE"}, 154 {RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP, "FLOW_RULE_KEEP"}, 155 {RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP, "FLOW_SHARED_OBJECT_KEEP"}, 156 }; 157 158 enum { 159 STAT_QMAP_TX = 0, 160 STAT_QMAP_RX 161 }; 162 163 static const struct { 164 enum rte_eth_hash_function algo; 165 const char *name; 166 } rte_eth_dev_rss_algo_names[] = { 167 {RTE_ETH_HASH_FUNCTION_DEFAULT, "default"}, 168 {RTE_ETH_HASH_FUNCTION_SIMPLE_XOR, "simple_xor"}, 169 {RTE_ETH_HASH_FUNCTION_TOEPLITZ, "toeplitz"}, 170 {RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ, "symmetric_toeplitz"}, 171 {RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ_SORT, "symmetric_toeplitz_sort"}, 172 }; 173 174 int 175 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str) 176 { 177 int ret; 178 struct rte_devargs devargs; 179 const char *bus_param_key; 180 char *bus_str = NULL; 181 char *cls_str = NULL; 182 int str_size; 183 184 if (iter == NULL) { 185 RTE_ETHDEV_LOG_LINE(ERR, "Cannot initialize NULL iterator"); 186 return -EINVAL; 187 } 188 189 if (devargs_str == NULL) { 190 RTE_ETHDEV_LOG_LINE(ERR, 191 "Cannot initialize iterator from NULL device description string"); 192 return -EINVAL; 193 } 194 195 memset(iter, 0, sizeof(*iter)); 196 memset(&devargs, 0, sizeof(devargs)); 197 198 /* 199 * The devargs string may use various syntaxes: 200 * - 0000:08:00.0,representor=[1-3] 201 * - pci:0000:06:00.0,representor=[0,5] 202 * - class=eth,mac=00:11:22:33:44:55 203 * - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z 204 */ 205 206 /* 207 * Handle pure class filter (i.e. without any bus-level argument), 208 * from future new syntax. 209 * rte_devargs_parse() is not yet supporting the new syntax, 210 * that's why this simple case is temporarily parsed here. 211 */ 212 #define iter_anybus_str "class=eth," 213 if (strncmp(devargs_str, iter_anybus_str, 214 strlen(iter_anybus_str)) == 0) { 215 iter->cls_str = devargs_str + strlen(iter_anybus_str); 216 goto end; 217 } 218 219 /* Split bus, device and parameters. */ 220 ret = rte_devargs_parse(&devargs, devargs_str); 221 if (ret != 0) 222 goto error; 223 224 /* 225 * Assume parameters of old syntax can match only at ethdev level. 226 * Extra parameters will be ignored, thanks to "+" prefix. 227 */ 228 str_size = strlen(devargs.args) + 2; 229 cls_str = malloc(str_size); 230 if (cls_str == NULL) { 231 ret = -ENOMEM; 232 goto error; 233 } 234 ret = snprintf(cls_str, str_size, "+%s", devargs.args); 235 if (ret != str_size - 1) { 236 ret = -EINVAL; 237 goto error; 238 } 239 iter->cls_str = cls_str; 240 241 iter->bus = devargs.bus; 242 if (iter->bus->dev_iterate == NULL) { 243 ret = -ENOTSUP; 244 goto error; 245 } 246 247 /* Convert bus args to new syntax for use with new API dev_iterate. */ 248 if ((strcmp(iter->bus->name, "vdev") == 0) || 249 (strcmp(iter->bus->name, "fslmc") == 0) || 250 (strcmp(iter->bus->name, "dpaa_bus") == 0)) { 251 bus_param_key = "name"; 252 } else if (strcmp(iter->bus->name, "pci") == 0) { 253 bus_param_key = "addr"; 254 } else { 255 ret = -ENOTSUP; 256 goto error; 257 } 258 str_size = strlen(bus_param_key) + strlen(devargs.name) + 2; 259 bus_str = malloc(str_size); 260 if (bus_str == NULL) { 261 ret = -ENOMEM; 262 goto error; 263 } 264 ret = snprintf(bus_str, str_size, "%s=%s", 265 bus_param_key, devargs.name); 266 if (ret != str_size - 1) { 267 ret = -EINVAL; 268 goto error; 269 } 270 iter->bus_str = bus_str; 271 272 end: 273 iter->cls = rte_class_find_by_name("eth"); 274 rte_devargs_reset(&devargs); 275 276 rte_eth_trace_iterator_init(devargs_str); 277 278 return 0; 279 280 error: 281 if (ret == -ENOTSUP) 282 RTE_ETHDEV_LOG_LINE(ERR, "Bus %s does not support iterating.", 283 iter->bus->name); 284 rte_devargs_reset(&devargs); 285 free(bus_str); 286 free(cls_str); 287 return ret; 288 } 289 290 uint16_t 291 rte_eth_iterator_next(struct rte_dev_iterator *iter) 292 { 293 if (iter == NULL) { 294 RTE_ETHDEV_LOG_LINE(ERR, 295 "Cannot get next device from NULL iterator"); 296 return RTE_MAX_ETHPORTS; 297 } 298 299 if (iter->cls == NULL) /* invalid ethdev iterator */ 300 return RTE_MAX_ETHPORTS; 301 302 do { /* loop to try all matching rte_device */ 303 /* If not pure ethdev filter and */ 304 if (iter->bus != NULL && 305 /* not in middle of rte_eth_dev iteration, */ 306 iter->class_device == NULL) { 307 /* get next rte_device to try. */ 308 iter->device = iter->bus->dev_iterate( 309 iter->device, iter->bus_str, iter); 310 if (iter->device == NULL) 311 break; /* no more rte_device candidate */ 312 } 313 /* A device is matching bus part, need to check ethdev part. */ 314 iter->class_device = iter->cls->dev_iterate( 315 iter->class_device, iter->cls_str, iter); 316 if (iter->class_device != NULL) { 317 uint16_t id = eth_dev_to_id(iter->class_device); 318 319 rte_eth_trace_iterator_next(iter, id); 320 321 return id; /* match */ 322 } 323 } while (iter->bus != NULL); /* need to try next rte_device */ 324 325 /* No more ethdev port to iterate. */ 326 rte_eth_iterator_cleanup(iter); 327 return RTE_MAX_ETHPORTS; 328 } 329 330 void 331 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter) 332 { 333 if (iter == NULL) { 334 RTE_ETHDEV_LOG_LINE(ERR, "Cannot do clean up from NULL iterator"); 335 return; 336 } 337 338 if (iter->bus_str == NULL) 339 return; /* nothing to free in pure class filter */ 340 341 rte_eth_trace_iterator_cleanup(iter); 342 343 free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */ 344 free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */ 345 memset(iter, 0, sizeof(*iter)); 346 } 347 348 uint16_t 349 rte_eth_find_next(uint16_t port_id) 350 { 351 while (port_id < RTE_MAX_ETHPORTS && 352 rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED) 353 port_id++; 354 355 if (port_id >= RTE_MAX_ETHPORTS) 356 return RTE_MAX_ETHPORTS; 357 358 rte_eth_trace_find_next(port_id); 359 360 return port_id; 361 } 362 363 /* 364 * Macro to iterate over all valid ports for internal usage. 365 * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports. 366 */ 367 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \ 368 for (port_id = rte_eth_find_next(0); \ 369 port_id < RTE_MAX_ETHPORTS; \ 370 port_id = rte_eth_find_next(port_id + 1)) 371 372 uint16_t 373 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent) 374 { 375 port_id = rte_eth_find_next(port_id); 376 while (port_id < RTE_MAX_ETHPORTS && 377 rte_eth_devices[port_id].device != parent) 378 port_id = rte_eth_find_next(port_id + 1); 379 380 rte_eth_trace_find_next_of(port_id, parent); 381 382 return port_id; 383 } 384 385 uint16_t 386 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id) 387 { 388 uint16_t ret; 389 390 RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS); 391 ret = rte_eth_find_next_of(port_id, 392 rte_eth_devices[ref_port_id].device); 393 394 rte_eth_trace_find_next_sibling(port_id, ref_port_id, ret); 395 396 return ret; 397 } 398 399 static bool 400 eth_dev_is_allocated(const struct rte_eth_dev *ethdev) 401 { 402 return ethdev->data != NULL && ethdev->data->name[0] != '\0'; 403 } 404 405 int 406 rte_eth_dev_is_valid_port(uint16_t port_id) 407 { 408 int is_valid; 409 410 if (port_id >= RTE_MAX_ETHPORTS || 411 (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)) 412 is_valid = 0; 413 else 414 is_valid = 1; 415 416 rte_ethdev_trace_is_valid_port(port_id, is_valid); 417 418 return is_valid; 419 } 420 421 static int 422 eth_is_valid_owner_id(uint64_t owner_id) 423 __rte_exclusive_locks_required(rte_mcfg_ethdev_get_lock()) 424 { 425 if (owner_id == RTE_ETH_DEV_NO_OWNER || 426 eth_dev_shared_data->next_owner_id <= owner_id) 427 return 0; 428 return 1; 429 } 430 431 uint64_t 432 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id) 433 { 434 port_id = rte_eth_find_next(port_id); 435 while (port_id < RTE_MAX_ETHPORTS && 436 rte_eth_devices[port_id].data->owner.id != owner_id) 437 port_id = rte_eth_find_next(port_id + 1); 438 439 rte_eth_trace_find_next_owned_by(port_id, owner_id); 440 441 return port_id; 442 } 443 444 int 445 rte_eth_dev_owner_new(uint64_t *owner_id) 446 { 447 int ret; 448 449 if (owner_id == NULL) { 450 RTE_ETHDEV_LOG_LINE(ERR, "Cannot get new owner ID to NULL"); 451 return -EINVAL; 452 } 453 454 rte_spinlock_lock(rte_mcfg_ethdev_get_lock()); 455 456 if (eth_dev_shared_data_prepare() != NULL) { 457 *owner_id = eth_dev_shared_data->next_owner_id++; 458 eth_dev_shared_data->allocated_owners++; 459 ret = 0; 460 } else { 461 ret = -ENOMEM; 462 } 463 464 rte_spinlock_unlock(rte_mcfg_ethdev_get_lock()); 465 466 rte_ethdev_trace_owner_new(*owner_id, ret); 467 468 return ret; 469 } 470 471 static int 472 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id, 473 const struct rte_eth_dev_owner *new_owner) 474 __rte_exclusive_locks_required(rte_mcfg_ethdev_get_lock()) 475 { 476 struct rte_eth_dev *ethdev = &rte_eth_devices[port_id]; 477 struct rte_eth_dev_owner *port_owner; 478 479 if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) { 480 RTE_ETHDEV_LOG_LINE(ERR, "Port ID %"PRIu16" is not allocated", 481 port_id); 482 return -ENODEV; 483 } 484 485 if (new_owner == NULL) { 486 RTE_ETHDEV_LOG_LINE(ERR, 487 "Cannot set ethdev port %u owner from NULL owner", 488 port_id); 489 return -EINVAL; 490 } 491 492 if (!eth_is_valid_owner_id(new_owner->id) && 493 !eth_is_valid_owner_id(old_owner_id)) { 494 RTE_ETHDEV_LOG_LINE(ERR, 495 "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64, 496 old_owner_id, new_owner->id); 497 return -EINVAL; 498 } 499 500 port_owner = &rte_eth_devices[port_id].data->owner; 501 if (port_owner->id != old_owner_id) { 502 RTE_ETHDEV_LOG_LINE(ERR, 503 "Cannot set owner to port %u already owned by %s_%016"PRIX64, 504 port_id, port_owner->name, port_owner->id); 505 return -EPERM; 506 } 507 508 /* can not truncate (same structure) */ 509 strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN); 510 511 port_owner->id = new_owner->id; 512 513 RTE_ETHDEV_LOG_LINE(DEBUG, "Port %u owner is %s_%016"PRIx64, 514 port_id, new_owner->name, new_owner->id); 515 516 return 0; 517 } 518 519 int 520 rte_eth_dev_owner_set(const uint16_t port_id, 521 const struct rte_eth_dev_owner *owner) 522 { 523 int ret; 524 525 rte_spinlock_lock(rte_mcfg_ethdev_get_lock()); 526 527 if (eth_dev_shared_data_prepare() != NULL) 528 ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner); 529 else 530 ret = -ENOMEM; 531 532 rte_spinlock_unlock(rte_mcfg_ethdev_get_lock()); 533 534 rte_ethdev_trace_owner_set(port_id, owner, ret); 535 536 return ret; 537 } 538 539 int 540 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id) 541 { 542 const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner) 543 {.id = RTE_ETH_DEV_NO_OWNER, .name = ""}; 544 int ret; 545 546 rte_spinlock_lock(rte_mcfg_ethdev_get_lock()); 547 548 if (eth_dev_shared_data_prepare() != NULL) 549 ret = eth_dev_owner_set(port_id, owner_id, &new_owner); 550 else 551 ret = -ENOMEM; 552 553 rte_spinlock_unlock(rte_mcfg_ethdev_get_lock()); 554 555 rte_ethdev_trace_owner_unset(port_id, owner_id, ret); 556 557 return ret; 558 } 559 560 int 561 rte_eth_dev_owner_delete(const uint64_t owner_id) 562 { 563 uint16_t port_id; 564 int ret = 0; 565 566 rte_spinlock_lock(rte_mcfg_ethdev_get_lock()); 567 568 if (eth_dev_shared_data_prepare() == NULL) { 569 ret = -ENOMEM; 570 } else if (eth_is_valid_owner_id(owner_id)) { 571 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) { 572 struct rte_eth_dev_data *data = 573 rte_eth_devices[port_id].data; 574 if (data != NULL && data->owner.id == owner_id) 575 memset(&data->owner, 0, 576 sizeof(struct rte_eth_dev_owner)); 577 } 578 RTE_ETHDEV_LOG_LINE(NOTICE, 579 "All port owners owned by %016"PRIx64" identifier have removed", 580 owner_id); 581 eth_dev_shared_data->allocated_owners--; 582 eth_dev_shared_data_release(); 583 } else { 584 RTE_ETHDEV_LOG_LINE(ERR, 585 "Invalid owner ID=%016"PRIx64, 586 owner_id); 587 ret = -EINVAL; 588 } 589 590 rte_spinlock_unlock(rte_mcfg_ethdev_get_lock()); 591 592 rte_ethdev_trace_owner_delete(owner_id, ret); 593 594 return ret; 595 } 596 597 int 598 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner) 599 { 600 struct rte_eth_dev *ethdev; 601 int ret; 602 603 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 604 ethdev = &rte_eth_devices[port_id]; 605 606 if (!eth_dev_is_allocated(ethdev)) { 607 RTE_ETHDEV_LOG_LINE(ERR, "Port ID %"PRIu16" is not allocated", 608 port_id); 609 return -ENODEV; 610 } 611 612 if (owner == NULL) { 613 RTE_ETHDEV_LOG_LINE(ERR, "Cannot get ethdev port %u owner to NULL", 614 port_id); 615 return -EINVAL; 616 } 617 618 rte_spinlock_lock(rte_mcfg_ethdev_get_lock()); 619 620 if (eth_dev_shared_data_prepare() != NULL) { 621 rte_memcpy(owner, ðdev->data->owner, sizeof(*owner)); 622 ret = 0; 623 } else { 624 ret = -ENOMEM; 625 } 626 627 rte_spinlock_unlock(rte_mcfg_ethdev_get_lock()); 628 629 rte_ethdev_trace_owner_get(port_id, owner, ret); 630 631 return ret; 632 } 633 634 int 635 rte_eth_dev_socket_id(uint16_t port_id) 636 { 637 int socket_id = SOCKET_ID_ANY; 638 639 if (!rte_eth_dev_is_valid_port(port_id)) { 640 rte_errno = EINVAL; 641 } else { 642 socket_id = rte_eth_devices[port_id].data->numa_node; 643 if (socket_id == SOCKET_ID_ANY) 644 rte_errno = 0; 645 } 646 647 rte_ethdev_trace_socket_id(port_id, socket_id); 648 649 return socket_id; 650 } 651 652 void * 653 rte_eth_dev_get_sec_ctx(uint16_t port_id) 654 { 655 void *ctx; 656 657 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL); 658 ctx = rte_eth_devices[port_id].security_ctx; 659 660 rte_ethdev_trace_get_sec_ctx(port_id, ctx); 661 662 return ctx; 663 } 664 665 uint16_t 666 rte_eth_dev_count_avail(void) 667 { 668 uint16_t p; 669 uint16_t count; 670 671 count = 0; 672 673 RTE_ETH_FOREACH_DEV(p) 674 count++; 675 676 rte_ethdev_trace_count_avail(count); 677 678 return count; 679 } 680 681 uint16_t 682 rte_eth_dev_count_total(void) 683 { 684 uint16_t port, count = 0; 685 686 RTE_ETH_FOREACH_VALID_DEV(port) 687 count++; 688 689 rte_ethdev_trace_count_total(count); 690 691 return count; 692 } 693 694 int 695 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name) 696 { 697 char *tmp; 698 699 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 700 701 if (name == NULL) { 702 RTE_ETHDEV_LOG_LINE(ERR, "Cannot get ethdev port %u name to NULL", 703 port_id); 704 return -EINVAL; 705 } 706 707 rte_spinlock_lock(rte_mcfg_ethdev_get_lock()); 708 /* shouldn't check 'rte_eth_devices[i].data', 709 * because it might be overwritten by VDEV PMD */ 710 tmp = eth_dev_shared_data->data[port_id].name; 711 rte_spinlock_unlock(rte_mcfg_ethdev_get_lock()); 712 713 strcpy(name, tmp); 714 715 rte_ethdev_trace_get_name_by_port(port_id, name); 716 717 return 0; 718 } 719 720 int 721 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id) 722 { 723 int ret = -ENODEV; 724 uint16_t pid; 725 726 if (name == NULL) { 727 RTE_ETHDEV_LOG_LINE(ERR, "Cannot get port ID from NULL name"); 728 return -EINVAL; 729 } 730 731 if (port_id == NULL) { 732 RTE_ETHDEV_LOG_LINE(ERR, 733 "Cannot get port ID to NULL for %s", name); 734 return -EINVAL; 735 } 736 737 rte_spinlock_lock(rte_mcfg_ethdev_get_lock()); 738 RTE_ETH_FOREACH_VALID_DEV(pid) { 739 if (strcmp(name, eth_dev_shared_data->data[pid].name) != 0) 740 continue; 741 742 *port_id = pid; 743 rte_ethdev_trace_get_port_by_name(name, *port_id); 744 ret = 0; 745 break; 746 } 747 rte_spinlock_unlock(rte_mcfg_ethdev_get_lock()); 748 749 return ret; 750 } 751 752 int 753 eth_err(uint16_t port_id, int ret) 754 { 755 if (ret == 0) 756 return 0; 757 if (rte_eth_dev_is_removed(port_id)) 758 return -EIO; 759 return ret; 760 } 761 762 static int 763 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id) 764 { 765 uint16_t port_id; 766 767 if (rx_queue_id >= dev->data->nb_rx_queues) { 768 port_id = dev->data->port_id; 769 RTE_ETHDEV_LOG_LINE(ERR, 770 "Invalid Rx queue_id=%u of device with port_id=%u", 771 rx_queue_id, port_id); 772 return -EINVAL; 773 } 774 775 if (dev->data->rx_queues[rx_queue_id] == NULL) { 776 port_id = dev->data->port_id; 777 RTE_ETHDEV_LOG_LINE(ERR, 778 "Queue %u of device with port_id=%u has not been setup", 779 rx_queue_id, port_id); 780 return -EINVAL; 781 } 782 783 return 0; 784 } 785 786 static int 787 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id) 788 { 789 uint16_t port_id; 790 791 if (tx_queue_id >= dev->data->nb_tx_queues) { 792 port_id = dev->data->port_id; 793 RTE_ETHDEV_LOG_LINE(ERR, 794 "Invalid Tx queue_id=%u of device with port_id=%u", 795 tx_queue_id, port_id); 796 return -EINVAL; 797 } 798 799 if (dev->data->tx_queues[tx_queue_id] == NULL) { 800 port_id = dev->data->port_id; 801 RTE_ETHDEV_LOG_LINE(ERR, 802 "Queue %u of device with port_id=%u has not been setup", 803 tx_queue_id, port_id); 804 return -EINVAL; 805 } 806 807 return 0; 808 } 809 810 int 811 rte_eth_rx_queue_is_valid(uint16_t port_id, uint16_t queue_id) 812 { 813 struct rte_eth_dev *dev; 814 815 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 816 dev = &rte_eth_devices[port_id]; 817 818 return eth_dev_validate_rx_queue(dev, queue_id); 819 } 820 821 int 822 rte_eth_tx_queue_is_valid(uint16_t port_id, uint16_t queue_id) 823 { 824 struct rte_eth_dev *dev; 825 826 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 827 dev = &rte_eth_devices[port_id]; 828 829 return eth_dev_validate_tx_queue(dev, queue_id); 830 } 831 832 int 833 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id) 834 { 835 struct rte_eth_dev *dev; 836 int ret; 837 838 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 839 dev = &rte_eth_devices[port_id]; 840 841 if (!dev->data->dev_started) { 842 RTE_ETHDEV_LOG_LINE(ERR, 843 "Port %u must be started before start any queue", 844 port_id); 845 return -EINVAL; 846 } 847 848 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 849 if (ret != 0) 850 return ret; 851 852 if (*dev->dev_ops->rx_queue_start == NULL) 853 return -ENOTSUP; 854 855 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 856 RTE_ETHDEV_LOG_LINE(INFO, 857 "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16, 858 rx_queue_id, port_id); 859 return -EINVAL; 860 } 861 862 if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 863 RTE_ETHDEV_LOG_LINE(INFO, 864 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started", 865 rx_queue_id, port_id); 866 return 0; 867 } 868 869 ret = eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id)); 870 871 rte_ethdev_trace_rx_queue_start(port_id, rx_queue_id, ret); 872 873 return ret; 874 } 875 876 int 877 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id) 878 { 879 struct rte_eth_dev *dev; 880 int ret; 881 882 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 883 dev = &rte_eth_devices[port_id]; 884 885 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 886 if (ret != 0) 887 return ret; 888 889 if (*dev->dev_ops->rx_queue_stop == NULL) 890 return -ENOTSUP; 891 892 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 893 RTE_ETHDEV_LOG_LINE(INFO, 894 "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16, 895 rx_queue_id, port_id); 896 return -EINVAL; 897 } 898 899 if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 900 RTE_ETHDEV_LOG_LINE(INFO, 901 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped", 902 rx_queue_id, port_id); 903 return 0; 904 } 905 906 ret = eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id)); 907 908 rte_ethdev_trace_rx_queue_stop(port_id, rx_queue_id, ret); 909 910 return ret; 911 } 912 913 int 914 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id) 915 { 916 struct rte_eth_dev *dev; 917 int ret; 918 919 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 920 dev = &rte_eth_devices[port_id]; 921 922 if (!dev->data->dev_started) { 923 RTE_ETHDEV_LOG_LINE(ERR, 924 "Port %u must be started before start any queue", 925 port_id); 926 return -EINVAL; 927 } 928 929 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 930 if (ret != 0) 931 return ret; 932 933 if (*dev->dev_ops->tx_queue_start == NULL) 934 return -ENOTSUP; 935 936 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 937 RTE_ETHDEV_LOG_LINE(INFO, 938 "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16, 939 tx_queue_id, port_id); 940 return -EINVAL; 941 } 942 943 if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 944 RTE_ETHDEV_LOG_LINE(INFO, 945 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started", 946 tx_queue_id, port_id); 947 return 0; 948 } 949 950 ret = eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id)); 951 952 rte_ethdev_trace_tx_queue_start(port_id, tx_queue_id, ret); 953 954 return ret; 955 } 956 957 int 958 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id) 959 { 960 struct rte_eth_dev *dev; 961 int ret; 962 963 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 964 dev = &rte_eth_devices[port_id]; 965 966 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 967 if (ret != 0) 968 return ret; 969 970 if (*dev->dev_ops->tx_queue_stop == NULL) 971 return -ENOTSUP; 972 973 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 974 RTE_ETHDEV_LOG_LINE(INFO, 975 "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16, 976 tx_queue_id, port_id); 977 return -EINVAL; 978 } 979 980 if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 981 RTE_ETHDEV_LOG_LINE(INFO, 982 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped", 983 tx_queue_id, port_id); 984 return 0; 985 } 986 987 ret = eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id)); 988 989 rte_ethdev_trace_tx_queue_stop(port_id, tx_queue_id, ret); 990 991 return ret; 992 } 993 994 uint32_t 995 rte_eth_speed_bitflag(uint32_t speed, int duplex) 996 { 997 uint32_t ret; 998 999 switch (speed) { 1000 case RTE_ETH_SPEED_NUM_10M: 1001 ret = duplex ? RTE_ETH_LINK_SPEED_10M : RTE_ETH_LINK_SPEED_10M_HD; 1002 break; 1003 case RTE_ETH_SPEED_NUM_100M: 1004 ret = duplex ? RTE_ETH_LINK_SPEED_100M : RTE_ETH_LINK_SPEED_100M_HD; 1005 break; 1006 case RTE_ETH_SPEED_NUM_1G: 1007 ret = RTE_ETH_LINK_SPEED_1G; 1008 break; 1009 case RTE_ETH_SPEED_NUM_2_5G: 1010 ret = RTE_ETH_LINK_SPEED_2_5G; 1011 break; 1012 case RTE_ETH_SPEED_NUM_5G: 1013 ret = RTE_ETH_LINK_SPEED_5G; 1014 break; 1015 case RTE_ETH_SPEED_NUM_10G: 1016 ret = RTE_ETH_LINK_SPEED_10G; 1017 break; 1018 case RTE_ETH_SPEED_NUM_20G: 1019 ret = RTE_ETH_LINK_SPEED_20G; 1020 break; 1021 case RTE_ETH_SPEED_NUM_25G: 1022 ret = RTE_ETH_LINK_SPEED_25G; 1023 break; 1024 case RTE_ETH_SPEED_NUM_40G: 1025 ret = RTE_ETH_LINK_SPEED_40G; 1026 break; 1027 case RTE_ETH_SPEED_NUM_50G: 1028 ret = RTE_ETH_LINK_SPEED_50G; 1029 break; 1030 case RTE_ETH_SPEED_NUM_56G: 1031 ret = RTE_ETH_LINK_SPEED_56G; 1032 break; 1033 case RTE_ETH_SPEED_NUM_100G: 1034 ret = RTE_ETH_LINK_SPEED_100G; 1035 break; 1036 case RTE_ETH_SPEED_NUM_200G: 1037 ret = RTE_ETH_LINK_SPEED_200G; 1038 break; 1039 case RTE_ETH_SPEED_NUM_400G: 1040 ret = RTE_ETH_LINK_SPEED_400G; 1041 break; 1042 default: 1043 ret = 0; 1044 } 1045 1046 rte_eth_trace_speed_bitflag(speed, duplex, ret); 1047 1048 return ret; 1049 } 1050 1051 const char * 1052 rte_eth_dev_rx_offload_name(uint64_t offload) 1053 { 1054 const char *name = "UNKNOWN"; 1055 unsigned int i; 1056 1057 for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) { 1058 if (offload == eth_dev_rx_offload_names[i].offload) { 1059 name = eth_dev_rx_offload_names[i].name; 1060 break; 1061 } 1062 } 1063 1064 rte_ethdev_trace_rx_offload_name(offload, name); 1065 1066 return name; 1067 } 1068 1069 const char * 1070 rte_eth_dev_tx_offload_name(uint64_t offload) 1071 { 1072 const char *name = "UNKNOWN"; 1073 unsigned int i; 1074 1075 for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) { 1076 if (offload == eth_dev_tx_offload_names[i].offload) { 1077 name = eth_dev_tx_offload_names[i].name; 1078 break; 1079 } 1080 } 1081 1082 rte_ethdev_trace_tx_offload_name(offload, name); 1083 1084 return name; 1085 } 1086 1087 static char * 1088 eth_dev_offload_names(uint64_t bitmask, char *buf, size_t size, 1089 const char *(*offload_name)(uint64_t)) 1090 { 1091 unsigned int pos = 0; 1092 int ret; 1093 1094 /* There should be at least enough space to handle those cases */ 1095 RTE_ASSERT(size >= sizeof("none") && size >= sizeof("...")); 1096 1097 if (bitmask == 0) { 1098 ret = snprintf(&buf[pos], size - pos, "none"); 1099 if (ret < 0 || pos + ret >= size) 1100 ret = 0; 1101 pos += ret; 1102 goto out; 1103 } 1104 1105 while (bitmask != 0) { 1106 uint64_t offload = RTE_BIT64(rte_ctz64(bitmask)); 1107 const char *name = offload_name(offload); 1108 1109 ret = snprintf(&buf[pos], size - pos, "%s,", name); 1110 if (ret < 0 || pos + ret >= size) { 1111 if (pos + sizeof("...") >= size) 1112 pos = size - sizeof("..."); 1113 ret = snprintf(&buf[pos], size - pos, "..."); 1114 if (ret > 0 && pos + ret < size) 1115 pos += ret; 1116 goto out; 1117 } 1118 1119 pos += ret; 1120 bitmask &= ~offload; 1121 } 1122 1123 /* Eliminate trailing comma */ 1124 pos--; 1125 out: 1126 buf[pos] = '\0'; 1127 return buf; 1128 } 1129 1130 const char * 1131 rte_eth_dev_capability_name(uint64_t capability) 1132 { 1133 const char *name = "UNKNOWN"; 1134 unsigned int i; 1135 1136 for (i = 0; i < RTE_DIM(rte_eth_dev_capa_names); ++i) { 1137 if (capability == rte_eth_dev_capa_names[i].offload) { 1138 name = rte_eth_dev_capa_names[i].name; 1139 break; 1140 } 1141 } 1142 1143 rte_ethdev_trace_capability_name(capability, name); 1144 1145 return name; 1146 } 1147 1148 static inline int 1149 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size, 1150 uint32_t max_rx_pkt_len, uint32_t dev_info_size) 1151 { 1152 int ret = 0; 1153 1154 if (dev_info_size == 0) { 1155 if (config_size != max_rx_pkt_len) { 1156 RTE_ETHDEV_LOG_LINE(ERR, "Ethdev port_id=%d max_lro_pkt_size" 1157 " %u != %u is not allowed", 1158 port_id, config_size, max_rx_pkt_len); 1159 ret = -EINVAL; 1160 } 1161 } else if (config_size > dev_info_size) { 1162 RTE_ETHDEV_LOG_LINE(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 1163 "> max allowed value %u", port_id, config_size, 1164 dev_info_size); 1165 ret = -EINVAL; 1166 } else if (config_size < RTE_ETHER_MIN_LEN) { 1167 RTE_ETHDEV_LOG_LINE(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 1168 "< min allowed value %u", port_id, config_size, 1169 (unsigned int)RTE_ETHER_MIN_LEN); 1170 ret = -EINVAL; 1171 } 1172 return ret; 1173 } 1174 1175 /* 1176 * Validate offloads that are requested through rte_eth_dev_configure against 1177 * the offloads successfully set by the Ethernet device. 1178 * 1179 * @param port_id 1180 * The port identifier of the Ethernet device. 1181 * @param req_offloads 1182 * The offloads that have been requested through `rte_eth_dev_configure`. 1183 * @param set_offloads 1184 * The offloads successfully set by the Ethernet device. 1185 * @param offload_type 1186 * The offload type i.e. Rx/Tx string. 1187 * @param offload_name 1188 * The function that prints the offload name. 1189 * @return 1190 * - (0) if validation successful. 1191 * - (-EINVAL) if requested offload has been silently disabled. 1192 */ 1193 static int 1194 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads, 1195 uint64_t set_offloads, const char *offload_type, 1196 const char *(*offload_name)(uint64_t)) 1197 { 1198 uint64_t offloads_diff = req_offloads ^ set_offloads; 1199 uint64_t offload; 1200 int ret = 0; 1201 1202 while (offloads_diff != 0) { 1203 /* Check if any offload is requested but not enabled. */ 1204 offload = RTE_BIT64(rte_ctz64(offloads_diff)); 1205 if (offload & req_offloads) { 1206 RTE_ETHDEV_LOG_LINE(ERR, 1207 "Port %u failed to enable %s offload %s", 1208 port_id, offload_type, offload_name(offload)); 1209 ret = -EINVAL; 1210 } 1211 1212 /* Check if offload couldn't be disabled. */ 1213 if (offload & set_offloads) { 1214 RTE_ETHDEV_LOG_LINE(DEBUG, 1215 "Port %u %s offload %s is not requested but enabled", 1216 port_id, offload_type, offload_name(offload)); 1217 } 1218 1219 offloads_diff &= ~offload; 1220 } 1221 1222 return ret; 1223 } 1224 1225 static uint32_t 1226 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu) 1227 { 1228 uint32_t overhead_len; 1229 1230 if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu) 1231 overhead_len = max_rx_pktlen - max_mtu; 1232 else 1233 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 1234 1235 return overhead_len; 1236 } 1237 1238 /* rte_eth_dev_info_get() should be called prior to this function */ 1239 static int 1240 eth_dev_validate_mtu(uint16_t port_id, struct rte_eth_dev_info *dev_info, 1241 uint16_t mtu) 1242 { 1243 uint32_t overhead_len; 1244 uint32_t frame_size; 1245 1246 if (mtu < dev_info->min_mtu) { 1247 RTE_ETHDEV_LOG_LINE(ERR, 1248 "MTU (%u) < device min MTU (%u) for port_id %u", 1249 mtu, dev_info->min_mtu, port_id); 1250 return -EINVAL; 1251 } 1252 if (mtu > dev_info->max_mtu) { 1253 RTE_ETHDEV_LOG_LINE(ERR, 1254 "MTU (%u) > device max MTU (%u) for port_id %u", 1255 mtu, dev_info->max_mtu, port_id); 1256 return -EINVAL; 1257 } 1258 1259 overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen, 1260 dev_info->max_mtu); 1261 frame_size = mtu + overhead_len; 1262 if (frame_size < RTE_ETHER_MIN_LEN) { 1263 RTE_ETHDEV_LOG_LINE(ERR, 1264 "Frame size (%u) < min frame size (%u) for port_id %u", 1265 frame_size, RTE_ETHER_MIN_LEN, port_id); 1266 return -EINVAL; 1267 } 1268 1269 if (frame_size > dev_info->max_rx_pktlen) { 1270 RTE_ETHDEV_LOG_LINE(ERR, 1271 "Frame size (%u) > device max frame size (%u) for port_id %u", 1272 frame_size, dev_info->max_rx_pktlen, port_id); 1273 return -EINVAL; 1274 } 1275 1276 return 0; 1277 } 1278 1279 int 1280 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, 1281 const struct rte_eth_conf *dev_conf) 1282 { 1283 enum rte_eth_hash_function algorithm; 1284 struct rte_eth_dev *dev; 1285 struct rte_eth_dev_info dev_info; 1286 struct rte_eth_conf orig_conf; 1287 int diag; 1288 int ret; 1289 uint16_t old_mtu; 1290 1291 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1292 dev = &rte_eth_devices[port_id]; 1293 1294 if (dev_conf == NULL) { 1295 RTE_ETHDEV_LOG_LINE(ERR, 1296 "Cannot configure ethdev port %u from NULL config", 1297 port_id); 1298 return -EINVAL; 1299 } 1300 1301 if (*dev->dev_ops->dev_configure == NULL) 1302 return -ENOTSUP; 1303 1304 if (dev->data->dev_started) { 1305 RTE_ETHDEV_LOG_LINE(ERR, 1306 "Port %u must be stopped to allow configuration", 1307 port_id); 1308 return -EBUSY; 1309 } 1310 1311 /* 1312 * Ensure that "dev_configured" is always 0 each time prepare to do 1313 * dev_configure() to avoid any non-anticipated behaviour. 1314 * And set to 1 when dev_configure() is executed successfully. 1315 */ 1316 dev->data->dev_configured = 0; 1317 1318 /* Store original config, as rollback required on failure */ 1319 memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf)); 1320 1321 /* 1322 * Copy the dev_conf parameter into the dev structure. 1323 * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get 1324 */ 1325 if (dev_conf != &dev->data->dev_conf) 1326 memcpy(&dev->data->dev_conf, dev_conf, 1327 sizeof(dev->data->dev_conf)); 1328 1329 /* Backup mtu for rollback */ 1330 old_mtu = dev->data->mtu; 1331 1332 /* fields must be zero to reserve them for future ABI changes */ 1333 if (dev_conf->rxmode.reserved_64s[0] != 0 || 1334 dev_conf->rxmode.reserved_64s[1] != 0 || 1335 dev_conf->rxmode.reserved_ptrs[0] != NULL || 1336 dev_conf->rxmode.reserved_ptrs[1] != NULL) { 1337 RTE_ETHDEV_LOG_LINE(ERR, "Rxmode reserved fields not zero"); 1338 ret = -EINVAL; 1339 goto rollback; 1340 } 1341 1342 if (dev_conf->txmode.reserved_64s[0] != 0 || 1343 dev_conf->txmode.reserved_64s[1] != 0 || 1344 dev_conf->txmode.reserved_ptrs[0] != NULL || 1345 dev_conf->txmode.reserved_ptrs[1] != NULL) { 1346 RTE_ETHDEV_LOG_LINE(ERR, "txmode reserved fields not zero"); 1347 ret = -EINVAL; 1348 goto rollback; 1349 } 1350 1351 ret = rte_eth_dev_info_get(port_id, &dev_info); 1352 if (ret != 0) 1353 goto rollback; 1354 1355 /* If number of queues specified by application for both Rx and Tx is 1356 * zero, use driver preferred values. This cannot be done individually 1357 * as it is valid for either Tx or Rx (but not both) to be zero. 1358 * If driver does not provide any preferred valued, fall back on 1359 * EAL defaults. 1360 */ 1361 if (nb_rx_q == 0 && nb_tx_q == 0) { 1362 nb_rx_q = dev_info.default_rxportconf.nb_queues; 1363 if (nb_rx_q == 0) 1364 nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES; 1365 nb_tx_q = dev_info.default_txportconf.nb_queues; 1366 if (nb_tx_q == 0) 1367 nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES; 1368 } 1369 1370 if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) { 1371 RTE_ETHDEV_LOG_LINE(ERR, 1372 "Number of Rx queues requested (%u) is greater than max supported(%d)", 1373 nb_rx_q, RTE_MAX_QUEUES_PER_PORT); 1374 ret = -EINVAL; 1375 goto rollback; 1376 } 1377 1378 if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) { 1379 RTE_ETHDEV_LOG_LINE(ERR, 1380 "Number of Tx queues requested (%u) is greater than max supported(%d)", 1381 nb_tx_q, RTE_MAX_QUEUES_PER_PORT); 1382 ret = -EINVAL; 1383 goto rollback; 1384 } 1385 1386 /* 1387 * Check that the numbers of Rx and Tx queues are not greater 1388 * than the maximum number of Rx and Tx queues supported by the 1389 * configured device. 1390 */ 1391 if (nb_rx_q > dev_info.max_rx_queues) { 1392 RTE_ETHDEV_LOG_LINE(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u", 1393 port_id, nb_rx_q, dev_info.max_rx_queues); 1394 ret = -EINVAL; 1395 goto rollback; 1396 } 1397 1398 if (nb_tx_q > dev_info.max_tx_queues) { 1399 RTE_ETHDEV_LOG_LINE(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u", 1400 port_id, nb_tx_q, dev_info.max_tx_queues); 1401 ret = -EINVAL; 1402 goto rollback; 1403 } 1404 1405 /* Check that the device supports requested interrupts */ 1406 if ((dev_conf->intr_conf.lsc == 1) && 1407 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) { 1408 RTE_ETHDEV_LOG_LINE(ERR, "Driver %s does not support lsc", 1409 dev->device->driver->name); 1410 ret = -EINVAL; 1411 goto rollback; 1412 } 1413 if ((dev_conf->intr_conf.rmv == 1) && 1414 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) { 1415 RTE_ETHDEV_LOG_LINE(ERR, "Driver %s does not support rmv", 1416 dev->device->driver->name); 1417 ret = -EINVAL; 1418 goto rollback; 1419 } 1420 1421 if (dev_conf->rxmode.mtu == 0) 1422 dev->data->dev_conf.rxmode.mtu = 1423 (dev_info.max_mtu == 0) ? RTE_ETHER_MTU : 1424 RTE_MIN(dev_info.max_mtu, RTE_ETHER_MTU); 1425 1426 ret = eth_dev_validate_mtu(port_id, &dev_info, 1427 dev->data->dev_conf.rxmode.mtu); 1428 if (ret != 0) 1429 goto rollback; 1430 1431 dev->data->mtu = dev->data->dev_conf.rxmode.mtu; 1432 1433 /* 1434 * If LRO is enabled, check that the maximum aggregated packet 1435 * size is supported by the configured device. 1436 */ 1437 if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) { 1438 uint32_t max_rx_pktlen; 1439 uint32_t overhead_len; 1440 1441 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 1442 dev_info.max_mtu); 1443 max_rx_pktlen = dev->data->dev_conf.rxmode.mtu + overhead_len; 1444 if (dev_conf->rxmode.max_lro_pkt_size == 0) 1445 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 1446 ret = eth_dev_check_lro_pkt_size(port_id, 1447 dev->data->dev_conf.rxmode.max_lro_pkt_size, 1448 max_rx_pktlen, 1449 dev_info.max_lro_pkt_size); 1450 if (ret != 0) 1451 goto rollback; 1452 } 1453 1454 /* Any requested offloading must be within its device capabilities */ 1455 if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) != 1456 dev_conf->rxmode.offloads) { 1457 char buffer[512]; 1458 1459 RTE_ETHDEV_LOG_LINE(ERR, "Ethdev port_id=%u does not support Rx offloads %s", 1460 port_id, eth_dev_offload_names( 1461 dev_conf->rxmode.offloads & ~dev_info.rx_offload_capa, 1462 buffer, sizeof(buffer), rte_eth_dev_rx_offload_name)); 1463 RTE_ETHDEV_LOG_LINE(DEBUG, "Ethdev port_id=%u was requested Rx offloads %s", 1464 port_id, eth_dev_offload_names(dev_conf->rxmode.offloads, 1465 buffer, sizeof(buffer), rte_eth_dev_rx_offload_name)); 1466 RTE_ETHDEV_LOG_LINE(DEBUG, "Ethdev port_id=%u supports Rx offloads %s", 1467 port_id, eth_dev_offload_names(dev_info.rx_offload_capa, 1468 buffer, sizeof(buffer), rte_eth_dev_rx_offload_name)); 1469 1470 ret = -EINVAL; 1471 goto rollback; 1472 } 1473 if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) != 1474 dev_conf->txmode.offloads) { 1475 char buffer[512]; 1476 1477 RTE_ETHDEV_LOG_LINE(ERR, "Ethdev port_id=%u does not support Tx offloads %s", 1478 port_id, eth_dev_offload_names( 1479 dev_conf->txmode.offloads & ~dev_info.tx_offload_capa, 1480 buffer, sizeof(buffer), rte_eth_dev_tx_offload_name)); 1481 RTE_ETHDEV_LOG_LINE(DEBUG, "Ethdev port_id=%u was requested Tx offloads %s", 1482 port_id, eth_dev_offload_names(dev_conf->txmode.offloads, 1483 buffer, sizeof(buffer), rte_eth_dev_tx_offload_name)); 1484 RTE_ETHDEV_LOG_LINE(DEBUG, "Ethdev port_id=%u supports Tx offloads %s", 1485 port_id, eth_dev_offload_names(dev_info.tx_offload_capa, 1486 buffer, sizeof(buffer), rte_eth_dev_tx_offload_name)); 1487 ret = -EINVAL; 1488 goto rollback; 1489 } 1490 1491 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = 1492 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf); 1493 1494 /* Check that device supports requested rss hash functions. */ 1495 if ((dev_info.flow_type_rss_offloads | 1496 dev_conf->rx_adv_conf.rss_conf.rss_hf) != 1497 dev_info.flow_type_rss_offloads) { 1498 RTE_ETHDEV_LOG_LINE(ERR, 1499 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64, 1500 port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf, 1501 dev_info.flow_type_rss_offloads); 1502 ret = -EINVAL; 1503 goto rollback; 1504 } 1505 1506 /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */ 1507 if (((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0) && 1508 (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) { 1509 RTE_ETHDEV_LOG_LINE(ERR, 1510 "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested", 1511 port_id, 1512 rte_eth_dev_rx_offload_name(RTE_ETH_RX_OFFLOAD_RSS_HASH)); 1513 ret = -EINVAL; 1514 goto rollback; 1515 } 1516 1517 if (dev_conf->rx_adv_conf.rss_conf.rss_key != NULL && 1518 dev_conf->rx_adv_conf.rss_conf.rss_key_len != dev_info.hash_key_size) { 1519 RTE_ETHDEV_LOG_LINE(ERR, 1520 "Ethdev port_id=%u invalid RSS key len: %u, valid value: %u", 1521 port_id, dev_conf->rx_adv_conf.rss_conf.rss_key_len, 1522 dev_info.hash_key_size); 1523 ret = -EINVAL; 1524 goto rollback; 1525 } 1526 1527 algorithm = dev_conf->rx_adv_conf.rss_conf.algorithm; 1528 if ((size_t)algorithm >= CHAR_BIT * sizeof(dev_info.rss_algo_capa) || 1529 (dev_info.rss_algo_capa & RTE_ETH_HASH_ALGO_TO_CAPA(algorithm)) == 0) { 1530 RTE_ETHDEV_LOG_LINE(ERR, 1531 "Ethdev port_id=%u configured RSS hash algorithm (%u)" 1532 "is not in the algorithm capability (0x%" PRIx32 ")", 1533 port_id, algorithm, dev_info.rss_algo_capa); 1534 ret = -EINVAL; 1535 goto rollback; 1536 } 1537 1538 /* 1539 * Setup new number of Rx/Tx queues and reconfigure device. 1540 */ 1541 diag = eth_dev_rx_queue_config(dev, nb_rx_q); 1542 if (diag != 0) { 1543 RTE_ETHDEV_LOG_LINE(ERR, 1544 "Port%u eth_dev_rx_queue_config = %d", 1545 port_id, diag); 1546 ret = diag; 1547 goto rollback; 1548 } 1549 1550 diag = eth_dev_tx_queue_config(dev, nb_tx_q); 1551 if (diag != 0) { 1552 RTE_ETHDEV_LOG_LINE(ERR, 1553 "Port%u eth_dev_tx_queue_config = %d", 1554 port_id, diag); 1555 eth_dev_rx_queue_config(dev, 0); 1556 ret = diag; 1557 goto rollback; 1558 } 1559 1560 diag = (*dev->dev_ops->dev_configure)(dev); 1561 if (diag != 0) { 1562 RTE_ETHDEV_LOG_LINE(ERR, "Port%u dev_configure = %d", 1563 port_id, diag); 1564 ret = eth_err(port_id, diag); 1565 goto reset_queues; 1566 } 1567 1568 /* Initialize Rx profiling if enabled at compilation time. */ 1569 diag = __rte_eth_dev_profile_init(port_id, dev); 1570 if (diag != 0) { 1571 RTE_ETHDEV_LOG_LINE(ERR, "Port%u __rte_eth_dev_profile_init = %d", 1572 port_id, diag); 1573 ret = eth_err(port_id, diag); 1574 goto reset_queues; 1575 } 1576 1577 /* Validate Rx offloads. */ 1578 diag = eth_dev_validate_offloads(port_id, 1579 dev_conf->rxmode.offloads, 1580 dev->data->dev_conf.rxmode.offloads, "Rx", 1581 rte_eth_dev_rx_offload_name); 1582 if (diag != 0) { 1583 ret = diag; 1584 goto reset_queues; 1585 } 1586 1587 /* Validate Tx offloads. */ 1588 diag = eth_dev_validate_offloads(port_id, 1589 dev_conf->txmode.offloads, 1590 dev->data->dev_conf.txmode.offloads, "Tx", 1591 rte_eth_dev_tx_offload_name); 1592 if (diag != 0) { 1593 ret = diag; 1594 goto reset_queues; 1595 } 1596 1597 dev->data->dev_configured = 1; 1598 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0); 1599 return 0; 1600 reset_queues: 1601 eth_dev_rx_queue_config(dev, 0); 1602 eth_dev_tx_queue_config(dev, 0); 1603 rollback: 1604 memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf)); 1605 if (old_mtu != dev->data->mtu) 1606 dev->data->mtu = old_mtu; 1607 1608 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret); 1609 return ret; 1610 } 1611 1612 static void 1613 eth_dev_mac_restore(struct rte_eth_dev *dev, 1614 struct rte_eth_dev_info *dev_info) 1615 { 1616 struct rte_ether_addr *addr; 1617 uint16_t i; 1618 uint32_t pool = 0; 1619 uint64_t pool_mask; 1620 1621 /* replay MAC address configuration including default MAC */ 1622 addr = &dev->data->mac_addrs[0]; 1623 if (*dev->dev_ops->mac_addr_set != NULL) 1624 (*dev->dev_ops->mac_addr_set)(dev, addr); 1625 else if (*dev->dev_ops->mac_addr_add != NULL) 1626 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool); 1627 1628 if (*dev->dev_ops->mac_addr_add != NULL) { 1629 for (i = 1; i < dev_info->max_mac_addrs; i++) { 1630 addr = &dev->data->mac_addrs[i]; 1631 1632 /* skip zero address */ 1633 if (rte_is_zero_ether_addr(addr)) 1634 continue; 1635 1636 pool = 0; 1637 pool_mask = dev->data->mac_pool_sel[i]; 1638 1639 do { 1640 if (pool_mask & UINT64_C(1)) 1641 (*dev->dev_ops->mac_addr_add)(dev, 1642 addr, i, pool); 1643 pool_mask >>= 1; 1644 pool++; 1645 } while (pool_mask); 1646 } 1647 } 1648 } 1649 1650 static int 1651 eth_dev_promiscuous_restore(struct rte_eth_dev *dev, uint16_t port_id) 1652 { 1653 int ret; 1654 1655 /* replay promiscuous configuration */ 1656 /* 1657 * use callbacks directly since we don't need port_id check and 1658 * would like to bypass the same value set 1659 */ 1660 if (rte_eth_promiscuous_get(port_id) == 1 && 1661 *dev->dev_ops->promiscuous_enable != NULL) { 1662 ret = eth_err(port_id, 1663 (*dev->dev_ops->promiscuous_enable)(dev)); 1664 if (ret != 0 && ret != -ENOTSUP) { 1665 RTE_ETHDEV_LOG_LINE(ERR, 1666 "Failed to enable promiscuous mode for device (port %u): %s", 1667 port_id, rte_strerror(-ret)); 1668 return ret; 1669 } 1670 } else if (rte_eth_promiscuous_get(port_id) == 0 && 1671 *dev->dev_ops->promiscuous_disable != NULL) { 1672 ret = eth_err(port_id, 1673 (*dev->dev_ops->promiscuous_disable)(dev)); 1674 if (ret != 0 && ret != -ENOTSUP) { 1675 RTE_ETHDEV_LOG_LINE(ERR, 1676 "Failed to disable promiscuous mode for device (port %u): %s", 1677 port_id, rte_strerror(-ret)); 1678 return ret; 1679 } 1680 } 1681 1682 return 0; 1683 } 1684 1685 static int 1686 eth_dev_allmulticast_restore(struct rte_eth_dev *dev, uint16_t port_id) 1687 { 1688 int ret; 1689 1690 /* replay all multicast configuration */ 1691 /* 1692 * use callbacks directly since we don't need port_id check and 1693 * would like to bypass the same value set 1694 */ 1695 if (rte_eth_allmulticast_get(port_id) == 1 && 1696 *dev->dev_ops->allmulticast_enable != NULL) { 1697 ret = eth_err(port_id, 1698 (*dev->dev_ops->allmulticast_enable)(dev)); 1699 if (ret != 0 && ret != -ENOTSUP) { 1700 RTE_ETHDEV_LOG_LINE(ERR, 1701 "Failed to enable allmulticast mode for device (port %u): %s", 1702 port_id, rte_strerror(-ret)); 1703 return ret; 1704 } 1705 } else if (rte_eth_allmulticast_get(port_id) == 0 && 1706 *dev->dev_ops->allmulticast_disable != NULL) { 1707 ret = eth_err(port_id, 1708 (*dev->dev_ops->allmulticast_disable)(dev)); 1709 if (ret != 0 && ret != -ENOTSUP) { 1710 RTE_ETHDEV_LOG_LINE(ERR, 1711 "Failed to disable allmulticast mode for device (port %u): %s", 1712 port_id, rte_strerror(-ret)); 1713 return ret; 1714 } 1715 } 1716 1717 return 0; 1718 } 1719 1720 static int 1721 eth_dev_config_restore(struct rte_eth_dev *dev, 1722 struct rte_eth_dev_info *dev_info, 1723 uint64_t restore_flags, 1724 uint16_t port_id) 1725 { 1726 int ret; 1727 1728 if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR) && 1729 (restore_flags & RTE_ETH_RESTORE_MAC_ADDR)) 1730 eth_dev_mac_restore(dev, dev_info); 1731 1732 if (restore_flags & RTE_ETH_RESTORE_PROMISC) { 1733 ret = eth_dev_promiscuous_restore(dev, port_id); 1734 if (ret != 0) 1735 return ret; 1736 } 1737 1738 if (restore_flags & RTE_ETH_RESTORE_ALLMULTI) { 1739 ret = eth_dev_allmulticast_restore(dev, port_id); 1740 if (ret != 0) 1741 return ret; 1742 } 1743 1744 return 0; 1745 } 1746 1747 int 1748 rte_eth_dev_start(uint16_t port_id) 1749 { 1750 struct rte_eth_dev *dev; 1751 struct rte_eth_dev_info dev_info; 1752 uint64_t restore_flags; 1753 int diag; 1754 int ret, ret_stop; 1755 1756 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1757 dev = &rte_eth_devices[port_id]; 1758 1759 if (*dev->dev_ops->dev_start == NULL) 1760 return -ENOTSUP; 1761 1762 if (dev->data->dev_configured == 0) { 1763 RTE_ETHDEV_LOG_LINE(INFO, 1764 "Device with port_id=%"PRIu16" is not configured.", 1765 port_id); 1766 return -EINVAL; 1767 } 1768 1769 if (dev->data->dev_started != 0) { 1770 RTE_ETHDEV_LOG_LINE(INFO, 1771 "Device with port_id=%"PRIu16" already started", 1772 port_id); 1773 return 0; 1774 } 1775 1776 ret = rte_eth_dev_info_get(port_id, &dev_info); 1777 if (ret != 0) 1778 return ret; 1779 1780 restore_flags = rte_eth_get_restore_flags(dev, RTE_ETH_START); 1781 1782 /* Lets restore MAC now if device does not support live change */ 1783 if ((*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR) && 1784 (restore_flags & RTE_ETH_RESTORE_MAC_ADDR)) 1785 eth_dev_mac_restore(dev, &dev_info); 1786 1787 diag = (*dev->dev_ops->dev_start)(dev); 1788 if (diag == 0) 1789 dev->data->dev_started = 1; 1790 else 1791 return eth_err(port_id, diag); 1792 1793 ret = eth_dev_config_restore(dev, &dev_info, restore_flags, port_id); 1794 if (ret != 0) { 1795 RTE_ETHDEV_LOG_LINE(ERR, 1796 "Error during restoring configuration for device (port %u): %s", 1797 port_id, rte_strerror(-ret)); 1798 ret_stop = rte_eth_dev_stop(port_id); 1799 if (ret_stop != 0) { 1800 RTE_ETHDEV_LOG_LINE(ERR, 1801 "Failed to stop device (port %u): %s", 1802 port_id, rte_strerror(-ret_stop)); 1803 } 1804 1805 return ret; 1806 } 1807 1808 if (dev->data->dev_conf.intr_conf.lsc == 0) { 1809 if (*dev->dev_ops->link_update == NULL) 1810 return -ENOTSUP; 1811 (*dev->dev_ops->link_update)(dev, 0); 1812 } 1813 1814 /* expose selection of PMD fast-path functions */ 1815 eth_dev_fp_ops_setup(rte_eth_fp_ops + port_id, dev); 1816 1817 rte_ethdev_trace_start(port_id); 1818 return 0; 1819 } 1820 1821 int 1822 rte_eth_dev_stop(uint16_t port_id) 1823 { 1824 struct rte_eth_dev *dev; 1825 int ret; 1826 1827 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1828 dev = &rte_eth_devices[port_id]; 1829 1830 if (*dev->dev_ops->dev_stop == NULL) 1831 return -ENOTSUP; 1832 1833 if (dev->data->dev_started == 0) { 1834 RTE_ETHDEV_LOG_LINE(INFO, 1835 "Device with port_id=%"PRIu16" already stopped", 1836 port_id); 1837 return 0; 1838 } 1839 1840 /* point fast-path functions to dummy ones */ 1841 eth_dev_fp_ops_reset(rte_eth_fp_ops + port_id); 1842 1843 ret = (*dev->dev_ops->dev_stop)(dev); 1844 if (ret == 0) 1845 dev->data->dev_started = 0; 1846 rte_ethdev_trace_stop(port_id, ret); 1847 1848 return ret; 1849 } 1850 1851 int 1852 rte_eth_dev_set_link_up(uint16_t port_id) 1853 { 1854 struct rte_eth_dev *dev; 1855 int ret; 1856 1857 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1858 dev = &rte_eth_devices[port_id]; 1859 1860 if (*dev->dev_ops->dev_set_link_up == NULL) 1861 return -ENOTSUP; 1862 ret = eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev)); 1863 1864 rte_ethdev_trace_set_link_up(port_id, ret); 1865 1866 return ret; 1867 } 1868 1869 int 1870 rte_eth_dev_set_link_down(uint16_t port_id) 1871 { 1872 struct rte_eth_dev *dev; 1873 int ret; 1874 1875 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1876 dev = &rte_eth_devices[port_id]; 1877 1878 if (*dev->dev_ops->dev_set_link_down == NULL) 1879 return -ENOTSUP; 1880 ret = eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev)); 1881 1882 rte_ethdev_trace_set_link_down(port_id, ret); 1883 1884 return ret; 1885 } 1886 1887 int 1888 rte_eth_speed_lanes_get(uint16_t port_id, uint32_t *lane) 1889 { 1890 struct rte_eth_dev *dev; 1891 1892 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1893 dev = &rte_eth_devices[port_id]; 1894 1895 if (*dev->dev_ops->speed_lanes_get == NULL) 1896 return -ENOTSUP; 1897 return eth_err(port_id, (*dev->dev_ops->speed_lanes_get)(dev, lane)); 1898 } 1899 1900 int 1901 rte_eth_speed_lanes_get_capability(uint16_t port_id, 1902 struct rte_eth_speed_lanes_capa *speed_lanes_capa, 1903 unsigned int num) 1904 { 1905 struct rte_eth_dev *dev; 1906 int ret; 1907 1908 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1909 dev = &rte_eth_devices[port_id]; 1910 1911 if (*dev->dev_ops->speed_lanes_get_capa == NULL) 1912 return -ENOTSUP; 1913 1914 if (speed_lanes_capa == NULL && num > 0) { 1915 RTE_ETHDEV_LOG_LINE(ERR, 1916 "Cannot get ethdev port %u speed lanes capability to NULL when array size is non zero", 1917 port_id); 1918 return -EINVAL; 1919 } 1920 1921 ret = (*dev->dev_ops->speed_lanes_get_capa)(dev, speed_lanes_capa, num); 1922 1923 return ret; 1924 } 1925 1926 int 1927 rte_eth_speed_lanes_set(uint16_t port_id, uint32_t speed_lanes_capa) 1928 { 1929 struct rte_eth_dev *dev; 1930 1931 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1932 dev = &rte_eth_devices[port_id]; 1933 1934 if (*dev->dev_ops->speed_lanes_set == NULL) 1935 return -ENOTSUP; 1936 return eth_err(port_id, (*dev->dev_ops->speed_lanes_set)(dev, speed_lanes_capa)); 1937 } 1938 1939 int 1940 rte_eth_dev_close(uint16_t port_id) 1941 { 1942 struct rte_eth_dev *dev; 1943 int firsterr, binerr; 1944 int *lasterr = &firsterr; 1945 1946 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1947 dev = &rte_eth_devices[port_id]; 1948 1949 /* 1950 * Secondary process needs to close device to release process private 1951 * resources. But secondary process should not be obliged to wait 1952 * for device stop before closing ethdev. 1953 */ 1954 if (rte_eal_process_type() == RTE_PROC_PRIMARY && 1955 dev->data->dev_started) { 1956 RTE_ETHDEV_LOG_LINE(ERR, "Cannot close started device (port %u)", 1957 port_id); 1958 return -EINVAL; 1959 } 1960 1961 if (*dev->dev_ops->dev_close == NULL) 1962 return -ENOTSUP; 1963 *lasterr = (*dev->dev_ops->dev_close)(dev); 1964 if (*lasterr != 0) 1965 lasterr = &binerr; 1966 1967 rte_ethdev_trace_close(port_id); 1968 *lasterr = rte_eth_dev_release_port(dev); 1969 1970 return firsterr; 1971 } 1972 1973 int 1974 rte_eth_dev_reset(uint16_t port_id) 1975 { 1976 struct rte_eth_dev *dev; 1977 int ret; 1978 1979 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1980 dev = &rte_eth_devices[port_id]; 1981 1982 if (*dev->dev_ops->dev_reset == NULL) 1983 return -ENOTSUP; 1984 1985 ret = rte_eth_dev_stop(port_id); 1986 if (ret != 0) { 1987 RTE_ETHDEV_LOG_LINE(ERR, 1988 "Failed to stop device (port %u) before reset: %s - ignore", 1989 port_id, rte_strerror(-ret)); 1990 } 1991 ret = eth_err(port_id, dev->dev_ops->dev_reset(dev)); 1992 1993 rte_ethdev_trace_reset(port_id, ret); 1994 1995 return ret; 1996 } 1997 1998 int 1999 rte_eth_dev_is_removed(uint16_t port_id) 2000 { 2001 struct rte_eth_dev *dev; 2002 int ret; 2003 2004 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0); 2005 dev = &rte_eth_devices[port_id]; 2006 2007 if (dev->state == RTE_ETH_DEV_REMOVED) 2008 return 1; 2009 2010 if (*dev->dev_ops->is_removed == NULL) 2011 return 0; 2012 2013 ret = dev->dev_ops->is_removed(dev); 2014 if (ret != 0) 2015 /* Device is physically removed. */ 2016 dev->state = RTE_ETH_DEV_REMOVED; 2017 2018 rte_ethdev_trace_is_removed(port_id, ret); 2019 2020 return ret; 2021 } 2022 2023 static int 2024 rte_eth_check_rx_mempool(struct rte_mempool *mp, uint16_t offset, 2025 uint16_t min_length) 2026 { 2027 uint16_t data_room_size; 2028 2029 /* 2030 * Check the size of the mbuf data buffer, this value 2031 * must be provided in the private data of the memory pool. 2032 * First check that the memory pool(s) has a valid private data. 2033 */ 2034 if (mp->private_data_size < 2035 sizeof(struct rte_pktmbuf_pool_private)) { 2036 RTE_ETHDEV_LOG_LINE(ERR, "%s private_data_size %u < %u", 2037 mp->name, mp->private_data_size, 2038 (unsigned int) 2039 sizeof(struct rte_pktmbuf_pool_private)); 2040 return -ENOSPC; 2041 } 2042 data_room_size = rte_pktmbuf_data_room_size(mp); 2043 if (data_room_size < offset + min_length) { 2044 RTE_ETHDEV_LOG_LINE(ERR, 2045 "%s mbuf_data_room_size %u < %u (%u + %u)", 2046 mp->name, data_room_size, 2047 offset + min_length, offset, min_length); 2048 return -EINVAL; 2049 } 2050 return 0; 2051 } 2052 2053 static int 2054 eth_dev_buffer_split_get_supported_hdrs_helper(uint16_t port_id, uint32_t **ptypes) 2055 { 2056 int cnt; 2057 2058 cnt = rte_eth_buffer_split_get_supported_hdr_ptypes(port_id, NULL, 0); 2059 if (cnt <= 0) 2060 return cnt; 2061 2062 *ptypes = malloc(sizeof(uint32_t) * cnt); 2063 if (*ptypes == NULL) 2064 return -ENOMEM; 2065 2066 cnt = rte_eth_buffer_split_get_supported_hdr_ptypes(port_id, *ptypes, cnt); 2067 if (cnt <= 0) { 2068 free(*ptypes); 2069 *ptypes = NULL; 2070 } 2071 return cnt; 2072 } 2073 2074 static int 2075 rte_eth_rx_queue_check_split(uint16_t port_id, 2076 const struct rte_eth_rxseg_split *rx_seg, 2077 uint16_t n_seg, uint32_t *mbp_buf_size, 2078 const struct rte_eth_dev_info *dev_info) 2079 { 2080 const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa; 2081 struct rte_mempool *mp_first; 2082 uint32_t offset_mask; 2083 uint16_t seg_idx; 2084 int ret = 0; 2085 int ptype_cnt; 2086 uint32_t *ptypes; 2087 uint32_t prev_proto_hdrs = RTE_PTYPE_UNKNOWN; 2088 int i; 2089 2090 if (n_seg > seg_capa->max_nseg) { 2091 RTE_ETHDEV_LOG_LINE(ERR, 2092 "Requested Rx segments %u exceed supported %u", 2093 n_seg, seg_capa->max_nseg); 2094 return -EINVAL; 2095 } 2096 /* 2097 * Check the sizes and offsets against buffer sizes 2098 * for each segment specified in extended configuration. 2099 */ 2100 mp_first = rx_seg[0].mp; 2101 offset_mask = RTE_BIT32(seg_capa->offset_align_log2) - 1; 2102 2103 ptypes = NULL; 2104 ptype_cnt = eth_dev_buffer_split_get_supported_hdrs_helper(port_id, &ptypes); 2105 2106 for (seg_idx = 0; seg_idx < n_seg; seg_idx++) { 2107 struct rte_mempool *mpl = rx_seg[seg_idx].mp; 2108 uint32_t length = rx_seg[seg_idx].length; 2109 uint32_t offset = rx_seg[seg_idx].offset; 2110 uint32_t proto_hdr = rx_seg[seg_idx].proto_hdr; 2111 2112 if (mpl == NULL) { 2113 RTE_ETHDEV_LOG_LINE(ERR, "null mempool pointer"); 2114 ret = -EINVAL; 2115 goto out; 2116 } 2117 if (seg_idx != 0 && mp_first != mpl && 2118 seg_capa->multi_pools == 0) { 2119 RTE_ETHDEV_LOG_LINE(ERR, "Receiving to multiple pools is not supported"); 2120 ret = -ENOTSUP; 2121 goto out; 2122 } 2123 if (offset != 0) { 2124 if (seg_capa->offset_allowed == 0) { 2125 RTE_ETHDEV_LOG_LINE(ERR, "Rx segmentation with offset is not supported"); 2126 ret = -ENOTSUP; 2127 goto out; 2128 } 2129 if (offset & offset_mask) { 2130 RTE_ETHDEV_LOG_LINE(ERR, "Rx segmentation invalid offset alignment %u, %u", 2131 offset, 2132 seg_capa->offset_align_log2); 2133 ret = -EINVAL; 2134 goto out; 2135 } 2136 } 2137 2138 offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM; 2139 *mbp_buf_size = rte_pktmbuf_data_room_size(mpl); 2140 if (proto_hdr != 0) { 2141 /* Split based on protocol headers. */ 2142 if (length != 0) { 2143 RTE_ETHDEV_LOG_LINE(ERR, 2144 "Do not set length split and protocol split within a segment" 2145 ); 2146 ret = -EINVAL; 2147 goto out; 2148 } 2149 if ((proto_hdr & prev_proto_hdrs) != 0) { 2150 RTE_ETHDEV_LOG_LINE(ERR, 2151 "Repeat with previous protocol headers or proto-split after length-based split" 2152 ); 2153 ret = -EINVAL; 2154 goto out; 2155 } 2156 if (ptype_cnt <= 0) { 2157 RTE_ETHDEV_LOG_LINE(ERR, 2158 "Port %u failed to get supported buffer split header protocols", 2159 port_id); 2160 ret = -ENOTSUP; 2161 goto out; 2162 } 2163 for (i = 0; i < ptype_cnt; i++) { 2164 if ((prev_proto_hdrs | proto_hdr) == ptypes[i]) 2165 break; 2166 } 2167 if (i == ptype_cnt) { 2168 RTE_ETHDEV_LOG_LINE(ERR, 2169 "Requested Rx split header protocols 0x%x is not supported.", 2170 proto_hdr); 2171 ret = -EINVAL; 2172 goto out; 2173 } 2174 prev_proto_hdrs |= proto_hdr; 2175 } else { 2176 /* Split at fixed length. */ 2177 length = length != 0 ? length : *mbp_buf_size; 2178 prev_proto_hdrs = RTE_PTYPE_ALL_MASK; 2179 } 2180 2181 ret = rte_eth_check_rx_mempool(mpl, offset, length); 2182 if (ret != 0) 2183 goto out; 2184 } 2185 out: 2186 free(ptypes); 2187 return ret; 2188 } 2189 2190 static int 2191 rte_eth_rx_queue_check_mempools(struct rte_mempool **rx_mempools, 2192 uint16_t n_mempools, uint32_t *min_buf_size, 2193 const struct rte_eth_dev_info *dev_info) 2194 { 2195 uint16_t pool_idx; 2196 int ret; 2197 2198 if (n_mempools > dev_info->max_rx_mempools) { 2199 RTE_ETHDEV_LOG_LINE(ERR, 2200 "Too many Rx mempools %u vs maximum %u", 2201 n_mempools, dev_info->max_rx_mempools); 2202 return -EINVAL; 2203 } 2204 2205 for (pool_idx = 0; pool_idx < n_mempools; pool_idx++) { 2206 struct rte_mempool *mp = rx_mempools[pool_idx]; 2207 2208 if (mp == NULL) { 2209 RTE_ETHDEV_LOG_LINE(ERR, "null Rx mempool pointer"); 2210 return -EINVAL; 2211 } 2212 2213 ret = rte_eth_check_rx_mempool(mp, RTE_PKTMBUF_HEADROOM, 2214 dev_info->min_rx_bufsize); 2215 if (ret != 0) 2216 return ret; 2217 2218 *min_buf_size = RTE_MIN(*min_buf_size, 2219 rte_pktmbuf_data_room_size(mp)); 2220 } 2221 2222 return 0; 2223 } 2224 2225 int 2226 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 2227 uint16_t nb_rx_desc, unsigned int socket_id, 2228 const struct rte_eth_rxconf *rx_conf, 2229 struct rte_mempool *mp) 2230 { 2231 int ret; 2232 uint64_t rx_offloads; 2233 uint32_t mbp_buf_size = UINT32_MAX; 2234 struct rte_eth_dev *dev; 2235 struct rte_eth_dev_info dev_info; 2236 struct rte_eth_rxconf local_conf; 2237 uint32_t buf_data_size; 2238 2239 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2240 dev = &rte_eth_devices[port_id]; 2241 2242 if (rx_queue_id >= dev->data->nb_rx_queues) { 2243 RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u", rx_queue_id); 2244 return -EINVAL; 2245 } 2246 2247 if (*dev->dev_ops->rx_queue_setup == NULL) 2248 return -ENOTSUP; 2249 2250 if (rx_conf != NULL && 2251 (rx_conf->reserved_64s[0] != 0 || 2252 rx_conf->reserved_64s[1] != 0 || 2253 rx_conf->reserved_ptrs[0] != NULL || 2254 rx_conf->reserved_ptrs[1] != NULL)) { 2255 RTE_ETHDEV_LOG_LINE(ERR, "Rx conf reserved fields not zero"); 2256 return -EINVAL; 2257 } 2258 2259 ret = rte_eth_dev_info_get(port_id, &dev_info); 2260 if (ret != 0) 2261 return ret; 2262 2263 rx_offloads = dev->data->dev_conf.rxmode.offloads; 2264 if (rx_conf != NULL) 2265 rx_offloads |= rx_conf->offloads; 2266 2267 /* Ensure that we have one and only one source of Rx buffers */ 2268 if ((mp != NULL) + 2269 (rx_conf != NULL && rx_conf->rx_nseg > 0) + 2270 (rx_conf != NULL && rx_conf->rx_nmempool > 0) != 1) { 2271 RTE_ETHDEV_LOG_LINE(ERR, 2272 "Ambiguous Rx mempools configuration"); 2273 return -EINVAL; 2274 } 2275 2276 if (mp != NULL) { 2277 /* Single pool configuration check. */ 2278 ret = rte_eth_check_rx_mempool(mp, RTE_PKTMBUF_HEADROOM, 2279 dev_info.min_rx_bufsize); 2280 if (ret != 0) 2281 return ret; 2282 2283 mbp_buf_size = rte_pktmbuf_data_room_size(mp); 2284 buf_data_size = mbp_buf_size - RTE_PKTMBUF_HEADROOM; 2285 if (buf_data_size > dev_info.max_rx_bufsize) 2286 RTE_ETHDEV_LOG_LINE(DEBUG, 2287 "For port_id=%u, the mbuf data buffer size (%u) is bigger than " 2288 "max buffer size (%u) device can utilize, so mbuf size can be reduced.", 2289 port_id, buf_data_size, dev_info.max_rx_bufsize); 2290 } else if (rx_conf != NULL && rx_conf->rx_nseg > 0) { 2291 const struct rte_eth_rxseg_split *rx_seg; 2292 uint16_t n_seg; 2293 2294 /* Extended multi-segment configuration check. */ 2295 if (rx_conf->rx_seg == NULL) { 2296 RTE_ETHDEV_LOG_LINE(ERR, 2297 "Memory pool is null and no multi-segment configuration provided"); 2298 return -EINVAL; 2299 } 2300 2301 rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg; 2302 n_seg = rx_conf->rx_nseg; 2303 2304 if (rx_offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { 2305 ret = rte_eth_rx_queue_check_split(port_id, rx_seg, n_seg, 2306 &mbp_buf_size, 2307 &dev_info); 2308 if (ret != 0) 2309 return ret; 2310 } else { 2311 RTE_ETHDEV_LOG_LINE(ERR, "No Rx segmentation offload configured"); 2312 return -EINVAL; 2313 } 2314 } else if (rx_conf != NULL && rx_conf->rx_nmempool > 0) { 2315 /* Extended multi-pool configuration check. */ 2316 if (rx_conf->rx_mempools == NULL) { 2317 RTE_ETHDEV_LOG_LINE(ERR, "Memory pools array is null"); 2318 return -EINVAL; 2319 } 2320 2321 ret = rte_eth_rx_queue_check_mempools(rx_conf->rx_mempools, 2322 rx_conf->rx_nmempool, 2323 &mbp_buf_size, 2324 &dev_info); 2325 if (ret != 0) 2326 return ret; 2327 } else { 2328 RTE_ETHDEV_LOG_LINE(ERR, "Missing Rx mempool configuration"); 2329 return -EINVAL; 2330 } 2331 2332 /* Use default specified by driver, if nb_rx_desc is zero */ 2333 if (nb_rx_desc == 0) { 2334 nb_rx_desc = dev_info.default_rxportconf.ring_size; 2335 /* If driver default is also zero, fall back on EAL default */ 2336 if (nb_rx_desc == 0) 2337 nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 2338 } 2339 2340 if (nb_rx_desc > dev_info.rx_desc_lim.nb_max || 2341 nb_rx_desc < dev_info.rx_desc_lim.nb_min || 2342 nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) { 2343 2344 RTE_ETHDEV_LOG_LINE(ERR, 2345 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu", 2346 nb_rx_desc, dev_info.rx_desc_lim.nb_max, 2347 dev_info.rx_desc_lim.nb_min, 2348 dev_info.rx_desc_lim.nb_align); 2349 return -EINVAL; 2350 } 2351 2352 if (dev->data->dev_started && 2353 !(dev_info.dev_capa & 2354 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP)) 2355 return -EBUSY; 2356 2357 if (dev->data->dev_started && 2358 (dev->data->rx_queue_state[rx_queue_id] != 2359 RTE_ETH_QUEUE_STATE_STOPPED)) 2360 return -EBUSY; 2361 2362 eth_dev_rxq_release(dev, rx_queue_id); 2363 2364 if (rx_conf == NULL) 2365 rx_conf = &dev_info.default_rxconf; 2366 2367 local_conf = *rx_conf; 2368 2369 /* 2370 * If an offloading has already been enabled in 2371 * rte_eth_dev_configure(), it has been enabled on all queues, 2372 * so there is no need to enable it in this queue again. 2373 * The local_conf.offloads input to underlying PMD only carries 2374 * those offloadings which are only enabled on this queue and 2375 * not enabled on all queues. 2376 */ 2377 local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads; 2378 2379 /* 2380 * New added offloadings for this queue are those not enabled in 2381 * rte_eth_dev_configure() and they must be per-queue type. 2382 * A pure per-port offloading can't be enabled on a queue while 2383 * disabled on another queue. A pure per-port offloading can't 2384 * be enabled for any queue as new added one if it hasn't been 2385 * enabled in rte_eth_dev_configure(). 2386 */ 2387 if ((local_conf.offloads & dev_info.rx_queue_offload_capa) != 2388 local_conf.offloads) { 2389 RTE_ETHDEV_LOG_LINE(ERR, 2390 "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2391 "within per-queue offload capabilities 0x%"PRIx64" in %s()", 2392 port_id, rx_queue_id, local_conf.offloads, 2393 dev_info.rx_queue_offload_capa, 2394 __func__); 2395 return -EINVAL; 2396 } 2397 2398 if (local_conf.share_group > 0 && 2399 (dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) == 0) { 2400 RTE_ETHDEV_LOG_LINE(ERR, 2401 "Ethdev port_id=%d rx_queue_id=%d, enabled share_group=%hu while device doesn't support Rx queue share", 2402 port_id, rx_queue_id, local_conf.share_group); 2403 return -EINVAL; 2404 } 2405 2406 /* 2407 * If LRO is enabled, check that the maximum aggregated packet 2408 * size is supported by the configured device. 2409 */ 2410 /* Get the real Ethernet overhead length */ 2411 if (local_conf.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) { 2412 uint32_t overhead_len; 2413 uint32_t max_rx_pktlen; 2414 int ret; 2415 2416 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 2417 dev_info.max_mtu); 2418 max_rx_pktlen = dev->data->mtu + overhead_len; 2419 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0) 2420 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 2421 ret = eth_dev_check_lro_pkt_size(port_id, 2422 dev->data->dev_conf.rxmode.max_lro_pkt_size, 2423 max_rx_pktlen, 2424 dev_info.max_lro_pkt_size); 2425 if (ret != 0) 2426 return ret; 2427 } 2428 2429 ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc, 2430 socket_id, &local_conf, mp); 2431 if (!ret) { 2432 if (!dev->data->min_rx_buf_size || 2433 dev->data->min_rx_buf_size > mbp_buf_size) 2434 dev->data->min_rx_buf_size = mbp_buf_size; 2435 } 2436 2437 rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp, 2438 rx_conf, ret); 2439 return eth_err(port_id, ret); 2440 } 2441 2442 int 2443 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 2444 uint16_t nb_rx_desc, 2445 const struct rte_eth_hairpin_conf *conf) 2446 { 2447 int ret; 2448 struct rte_eth_dev *dev; 2449 struct rte_eth_hairpin_cap cap; 2450 int i; 2451 int count; 2452 2453 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2454 dev = &rte_eth_devices[port_id]; 2455 2456 if (rx_queue_id >= dev->data->nb_rx_queues) { 2457 RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u", rx_queue_id); 2458 return -EINVAL; 2459 } 2460 2461 if (conf == NULL) { 2462 RTE_ETHDEV_LOG_LINE(ERR, 2463 "Cannot setup ethdev port %u Rx hairpin queue from NULL config", 2464 port_id); 2465 return -EINVAL; 2466 } 2467 2468 if (conf->reserved != 0) { 2469 RTE_ETHDEV_LOG_LINE(ERR, 2470 "Rx hairpin reserved field not zero"); 2471 return -EINVAL; 2472 } 2473 2474 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2475 if (ret != 0) 2476 return ret; 2477 if (*dev->dev_ops->rx_hairpin_queue_setup == NULL) 2478 return -ENOTSUP; 2479 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2480 if (nb_rx_desc == 0) 2481 nb_rx_desc = cap.max_nb_desc; 2482 if (nb_rx_desc > cap.max_nb_desc) { 2483 RTE_ETHDEV_LOG_LINE(ERR, 2484 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu", 2485 nb_rx_desc, cap.max_nb_desc); 2486 return -EINVAL; 2487 } 2488 if (conf->peer_count > cap.max_rx_2_tx) { 2489 RTE_ETHDEV_LOG_LINE(ERR, 2490 "Invalid value for number of peers for Rx queue(=%u), should be: <= %hu", 2491 conf->peer_count, cap.max_rx_2_tx); 2492 return -EINVAL; 2493 } 2494 if (conf->use_locked_device_memory && !cap.rx_cap.locked_device_memory) { 2495 RTE_ETHDEV_LOG_LINE(ERR, 2496 "Attempt to use locked device memory for Rx queue, which is not supported"); 2497 return -EINVAL; 2498 } 2499 if (conf->use_rte_memory && !cap.rx_cap.rte_memory) { 2500 RTE_ETHDEV_LOG_LINE(ERR, 2501 "Attempt to use DPDK memory for Rx queue, which is not supported"); 2502 return -EINVAL; 2503 } 2504 if (conf->use_locked_device_memory && conf->use_rte_memory) { 2505 RTE_ETHDEV_LOG_LINE(ERR, 2506 "Attempt to use mutually exclusive memory settings for Rx queue"); 2507 return -EINVAL; 2508 } 2509 if (conf->force_memory && 2510 !conf->use_locked_device_memory && 2511 !conf->use_rte_memory) { 2512 RTE_ETHDEV_LOG_LINE(ERR, 2513 "Attempt to force Rx queue memory settings, but none is set"); 2514 return -EINVAL; 2515 } 2516 if (conf->peer_count == 0) { 2517 RTE_ETHDEV_LOG_LINE(ERR, 2518 "Invalid value for number of peers for Rx queue(=%u), should be: > 0", 2519 conf->peer_count); 2520 return -EINVAL; 2521 } 2522 for (i = 0, count = 0; i < dev->data->nb_rx_queues && 2523 cap.max_nb_queues != UINT16_MAX; i++) { 2524 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i)) 2525 count++; 2526 } 2527 if (count > cap.max_nb_queues) { 2528 RTE_ETHDEV_LOG_LINE(ERR, "To many Rx hairpin queues max is %d", 2529 cap.max_nb_queues); 2530 return -EINVAL; 2531 } 2532 if (dev->data->dev_started) 2533 return -EBUSY; 2534 eth_dev_rxq_release(dev, rx_queue_id); 2535 ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id, 2536 nb_rx_desc, conf); 2537 if (ret == 0) 2538 dev->data->rx_queue_state[rx_queue_id] = 2539 RTE_ETH_QUEUE_STATE_HAIRPIN; 2540 ret = eth_err(port_id, ret); 2541 2542 rte_eth_trace_rx_hairpin_queue_setup(port_id, rx_queue_id, nb_rx_desc, 2543 conf, ret); 2544 2545 return ret; 2546 } 2547 2548 int 2549 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2550 uint16_t nb_tx_desc, unsigned int socket_id, 2551 const struct rte_eth_txconf *tx_conf) 2552 { 2553 struct rte_eth_dev *dev; 2554 struct rte_eth_dev_info dev_info; 2555 struct rte_eth_txconf local_conf; 2556 int ret; 2557 2558 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2559 dev = &rte_eth_devices[port_id]; 2560 2561 if (tx_queue_id >= dev->data->nb_tx_queues) { 2562 RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u", tx_queue_id); 2563 return -EINVAL; 2564 } 2565 2566 if (*dev->dev_ops->tx_queue_setup == NULL) 2567 return -ENOTSUP; 2568 2569 if (tx_conf != NULL && 2570 (tx_conf->reserved_64s[0] != 0 || 2571 tx_conf->reserved_64s[1] != 0 || 2572 tx_conf->reserved_ptrs[0] != NULL || 2573 tx_conf->reserved_ptrs[1] != NULL)) { 2574 RTE_ETHDEV_LOG_LINE(ERR, "Tx conf reserved fields not zero"); 2575 return -EINVAL; 2576 } 2577 2578 ret = rte_eth_dev_info_get(port_id, &dev_info); 2579 if (ret != 0) 2580 return ret; 2581 2582 /* Use default specified by driver, if nb_tx_desc is zero */ 2583 if (nb_tx_desc == 0) { 2584 nb_tx_desc = dev_info.default_txportconf.ring_size; 2585 /* If driver default is zero, fall back on EAL default */ 2586 if (nb_tx_desc == 0) 2587 nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 2588 } 2589 if (nb_tx_desc > dev_info.tx_desc_lim.nb_max || 2590 nb_tx_desc < dev_info.tx_desc_lim.nb_min || 2591 nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) { 2592 RTE_ETHDEV_LOG_LINE(ERR, 2593 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu", 2594 nb_tx_desc, dev_info.tx_desc_lim.nb_max, 2595 dev_info.tx_desc_lim.nb_min, 2596 dev_info.tx_desc_lim.nb_align); 2597 return -EINVAL; 2598 } 2599 2600 if (dev->data->dev_started && 2601 !(dev_info.dev_capa & 2602 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP)) 2603 return -EBUSY; 2604 2605 if (dev->data->dev_started && 2606 (dev->data->tx_queue_state[tx_queue_id] != 2607 RTE_ETH_QUEUE_STATE_STOPPED)) 2608 return -EBUSY; 2609 2610 eth_dev_txq_release(dev, tx_queue_id); 2611 2612 if (tx_conf == NULL) 2613 tx_conf = &dev_info.default_txconf; 2614 2615 local_conf = *tx_conf; 2616 2617 /* 2618 * If an offloading has already been enabled in 2619 * rte_eth_dev_configure(), it has been enabled on all queues, 2620 * so there is no need to enable it in this queue again. 2621 * The local_conf.offloads input to underlying PMD only carries 2622 * those offloadings which are only enabled on this queue and 2623 * not enabled on all queues. 2624 */ 2625 local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads; 2626 2627 /* 2628 * New added offloadings for this queue are those not enabled in 2629 * rte_eth_dev_configure() and they must be per-queue type. 2630 * A pure per-port offloading can't be enabled on a queue while 2631 * disabled on another queue. A pure per-port offloading can't 2632 * be enabled for any queue as new added one if it hasn't been 2633 * enabled in rte_eth_dev_configure(). 2634 */ 2635 if ((local_conf.offloads & dev_info.tx_queue_offload_capa) != 2636 local_conf.offloads) { 2637 RTE_ETHDEV_LOG_LINE(ERR, 2638 "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2639 "within per-queue offload capabilities 0x%"PRIx64" in %s()", 2640 port_id, tx_queue_id, local_conf.offloads, 2641 dev_info.tx_queue_offload_capa, 2642 __func__); 2643 return -EINVAL; 2644 } 2645 2646 rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf); 2647 return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev, 2648 tx_queue_id, nb_tx_desc, socket_id, &local_conf)); 2649 } 2650 2651 int 2652 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2653 uint16_t nb_tx_desc, 2654 const struct rte_eth_hairpin_conf *conf) 2655 { 2656 struct rte_eth_dev *dev; 2657 struct rte_eth_hairpin_cap cap; 2658 int i; 2659 int count; 2660 int ret; 2661 2662 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2663 dev = &rte_eth_devices[port_id]; 2664 2665 if (tx_queue_id >= dev->data->nb_tx_queues) { 2666 RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u", tx_queue_id); 2667 return -EINVAL; 2668 } 2669 2670 if (conf == NULL) { 2671 RTE_ETHDEV_LOG_LINE(ERR, 2672 "Cannot setup ethdev port %u Tx hairpin queue from NULL config", 2673 port_id); 2674 return -EINVAL; 2675 } 2676 2677 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2678 if (ret != 0) 2679 return ret; 2680 if (*dev->dev_ops->tx_hairpin_queue_setup == NULL) 2681 return -ENOTSUP; 2682 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2683 if (nb_tx_desc == 0) 2684 nb_tx_desc = cap.max_nb_desc; 2685 if (nb_tx_desc > cap.max_nb_desc) { 2686 RTE_ETHDEV_LOG_LINE(ERR, 2687 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu", 2688 nb_tx_desc, cap.max_nb_desc); 2689 return -EINVAL; 2690 } 2691 if (conf->peer_count > cap.max_tx_2_rx) { 2692 RTE_ETHDEV_LOG_LINE(ERR, 2693 "Invalid value for number of peers for Tx queue(=%u), should be: <= %hu", 2694 conf->peer_count, cap.max_tx_2_rx); 2695 return -EINVAL; 2696 } 2697 if (conf->use_locked_device_memory && !cap.tx_cap.locked_device_memory) { 2698 RTE_ETHDEV_LOG_LINE(ERR, 2699 "Attempt to use locked device memory for Tx queue, which is not supported"); 2700 return -EINVAL; 2701 } 2702 if (conf->use_rte_memory && !cap.tx_cap.rte_memory) { 2703 RTE_ETHDEV_LOG_LINE(ERR, 2704 "Attempt to use DPDK memory for Tx queue, which is not supported"); 2705 return -EINVAL; 2706 } 2707 if (conf->use_locked_device_memory && conf->use_rte_memory) { 2708 RTE_ETHDEV_LOG_LINE(ERR, 2709 "Attempt to use mutually exclusive memory settings for Tx queue"); 2710 return -EINVAL; 2711 } 2712 if (conf->force_memory && 2713 !conf->use_locked_device_memory && 2714 !conf->use_rte_memory) { 2715 RTE_ETHDEV_LOG_LINE(ERR, 2716 "Attempt to force Tx queue memory settings, but none is set"); 2717 return -EINVAL; 2718 } 2719 if (conf->peer_count == 0) { 2720 RTE_ETHDEV_LOG_LINE(ERR, 2721 "Invalid value for number of peers for Tx queue(=%u), should be: > 0", 2722 conf->peer_count); 2723 return -EINVAL; 2724 } 2725 for (i = 0, count = 0; i < dev->data->nb_tx_queues && 2726 cap.max_nb_queues != UINT16_MAX; i++) { 2727 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i)) 2728 count++; 2729 } 2730 if (count > cap.max_nb_queues) { 2731 RTE_ETHDEV_LOG_LINE(ERR, "To many Tx hairpin queues max is %d", 2732 cap.max_nb_queues); 2733 return -EINVAL; 2734 } 2735 if (dev->data->dev_started) 2736 return -EBUSY; 2737 eth_dev_txq_release(dev, tx_queue_id); 2738 ret = (*dev->dev_ops->tx_hairpin_queue_setup) 2739 (dev, tx_queue_id, nb_tx_desc, conf); 2740 if (ret == 0) 2741 dev->data->tx_queue_state[tx_queue_id] = 2742 RTE_ETH_QUEUE_STATE_HAIRPIN; 2743 ret = eth_err(port_id, ret); 2744 2745 rte_eth_trace_tx_hairpin_queue_setup(port_id, tx_queue_id, nb_tx_desc, 2746 conf, ret); 2747 2748 return ret; 2749 } 2750 2751 int 2752 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port) 2753 { 2754 struct rte_eth_dev *dev; 2755 int ret; 2756 2757 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2758 dev = &rte_eth_devices[tx_port]; 2759 2760 if (dev->data->dev_started == 0) { 2761 RTE_ETHDEV_LOG_LINE(ERR, "Tx port %d is not started", tx_port); 2762 return -EBUSY; 2763 } 2764 2765 if (*dev->dev_ops->hairpin_bind == NULL) 2766 return -ENOTSUP; 2767 ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port); 2768 if (ret != 0) 2769 RTE_ETHDEV_LOG_LINE(ERR, "Failed to bind hairpin Tx %d" 2770 " to Rx %d (%d - all ports)", 2771 tx_port, rx_port, RTE_MAX_ETHPORTS); 2772 2773 rte_eth_trace_hairpin_bind(tx_port, rx_port, ret); 2774 2775 return ret; 2776 } 2777 2778 int 2779 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port) 2780 { 2781 struct rte_eth_dev *dev; 2782 int ret; 2783 2784 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2785 dev = &rte_eth_devices[tx_port]; 2786 2787 if (dev->data->dev_started == 0) { 2788 RTE_ETHDEV_LOG_LINE(ERR, "Tx port %d is already stopped", tx_port); 2789 return -EBUSY; 2790 } 2791 2792 if (*dev->dev_ops->hairpin_unbind == NULL) 2793 return -ENOTSUP; 2794 ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port); 2795 if (ret != 0) 2796 RTE_ETHDEV_LOG_LINE(ERR, "Failed to unbind hairpin Tx %d" 2797 " from Rx %d (%d - all ports)", 2798 tx_port, rx_port, RTE_MAX_ETHPORTS); 2799 2800 rte_eth_trace_hairpin_unbind(tx_port, rx_port, ret); 2801 2802 return ret; 2803 } 2804 2805 int 2806 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, 2807 size_t len, uint32_t direction) 2808 { 2809 struct rte_eth_dev *dev; 2810 int ret; 2811 2812 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2813 dev = &rte_eth_devices[port_id]; 2814 2815 if (peer_ports == NULL) { 2816 RTE_ETHDEV_LOG_LINE(ERR, 2817 "Cannot get ethdev port %u hairpin peer ports to NULL", 2818 port_id); 2819 return -EINVAL; 2820 } 2821 2822 if (len == 0) { 2823 RTE_ETHDEV_LOG_LINE(ERR, 2824 "Cannot get ethdev port %u hairpin peer ports to array with zero size", 2825 port_id); 2826 return -EINVAL; 2827 } 2828 2829 if (*dev->dev_ops->hairpin_get_peer_ports == NULL) 2830 return -ENOTSUP; 2831 2832 ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports, 2833 len, direction); 2834 if (ret < 0) 2835 RTE_ETHDEV_LOG_LINE(ERR, "Failed to get %d hairpin peer %s ports", 2836 port_id, direction ? "Rx" : "Tx"); 2837 2838 rte_eth_trace_hairpin_get_peer_ports(port_id, peer_ports, len, 2839 direction, ret); 2840 2841 return ret; 2842 } 2843 2844 void 2845 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, 2846 void *userdata __rte_unused) 2847 { 2848 rte_pktmbuf_free_bulk(pkts, unsent); 2849 2850 rte_eth_trace_tx_buffer_drop_callback((void **)pkts, unsent); 2851 } 2852 2853 void 2854 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, 2855 void *userdata) 2856 { 2857 uint64_t *count = userdata; 2858 2859 rte_pktmbuf_free_bulk(pkts, unsent); 2860 *count += unsent; 2861 2862 rte_eth_trace_tx_buffer_count_callback((void **)pkts, unsent, *count); 2863 } 2864 2865 int 2866 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, 2867 buffer_tx_error_fn cbfn, void *userdata) 2868 { 2869 if (buffer == NULL) { 2870 RTE_ETHDEV_LOG_LINE(ERR, 2871 "Cannot set Tx buffer error callback to NULL buffer"); 2872 return -EINVAL; 2873 } 2874 2875 buffer->error_callback = cbfn; 2876 buffer->error_userdata = userdata; 2877 2878 rte_eth_trace_tx_buffer_set_err_callback(buffer); 2879 2880 return 0; 2881 } 2882 2883 int 2884 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size) 2885 { 2886 int ret = 0; 2887 2888 if (buffer == NULL) { 2889 RTE_ETHDEV_LOG_LINE(ERR, "Cannot initialize NULL buffer"); 2890 return -EINVAL; 2891 } 2892 2893 buffer->size = size; 2894 if (buffer->error_callback == NULL) { 2895 ret = rte_eth_tx_buffer_set_err_callback( 2896 buffer, rte_eth_tx_buffer_drop_callback, NULL); 2897 } 2898 2899 rte_eth_trace_tx_buffer_init(buffer, size, ret); 2900 2901 return ret; 2902 } 2903 2904 int 2905 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt) 2906 { 2907 struct rte_eth_dev *dev; 2908 int ret; 2909 2910 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2911 dev = &rte_eth_devices[port_id]; 2912 2913 #ifdef RTE_ETHDEV_DEBUG_TX 2914 ret = eth_dev_validate_tx_queue(dev, queue_id); 2915 if (ret != 0) 2916 return ret; 2917 #endif 2918 2919 if (*dev->dev_ops->tx_done_cleanup == NULL) 2920 return -ENOTSUP; 2921 2922 /* Call driver to free pending mbufs. */ 2923 ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id], 2924 free_cnt); 2925 ret = eth_err(port_id, ret); 2926 2927 rte_eth_trace_tx_done_cleanup(port_id, queue_id, free_cnt, ret); 2928 2929 return ret; 2930 } 2931 2932 int 2933 rte_eth_promiscuous_enable(uint16_t port_id) 2934 { 2935 struct rte_eth_dev *dev; 2936 int diag = 0; 2937 2938 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2939 dev = &rte_eth_devices[port_id]; 2940 2941 if (dev->data->promiscuous == 1) 2942 return 0; 2943 2944 if (*dev->dev_ops->promiscuous_enable == NULL) 2945 return -ENOTSUP; 2946 2947 diag = (*dev->dev_ops->promiscuous_enable)(dev); 2948 dev->data->promiscuous = (diag == 0) ? 1 : 0; 2949 2950 diag = eth_err(port_id, diag); 2951 2952 rte_eth_trace_promiscuous_enable(port_id, dev->data->promiscuous, 2953 diag); 2954 2955 return diag; 2956 } 2957 2958 int 2959 rte_eth_promiscuous_disable(uint16_t port_id) 2960 { 2961 struct rte_eth_dev *dev; 2962 int diag = 0; 2963 2964 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2965 dev = &rte_eth_devices[port_id]; 2966 2967 if (dev->data->promiscuous == 0) 2968 return 0; 2969 2970 if (*dev->dev_ops->promiscuous_disable == NULL) 2971 return -ENOTSUP; 2972 2973 dev->data->promiscuous = 0; 2974 diag = (*dev->dev_ops->promiscuous_disable)(dev); 2975 if (diag != 0) 2976 dev->data->promiscuous = 1; 2977 2978 diag = eth_err(port_id, diag); 2979 2980 rte_eth_trace_promiscuous_disable(port_id, dev->data->promiscuous, 2981 diag); 2982 2983 return diag; 2984 } 2985 2986 int 2987 rte_eth_promiscuous_get(uint16_t port_id) 2988 { 2989 struct rte_eth_dev *dev; 2990 2991 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2992 dev = &rte_eth_devices[port_id]; 2993 2994 rte_eth_trace_promiscuous_get(port_id, dev->data->promiscuous); 2995 2996 return dev->data->promiscuous; 2997 } 2998 2999 int 3000 rte_eth_allmulticast_enable(uint16_t port_id) 3001 { 3002 struct rte_eth_dev *dev; 3003 int diag; 3004 3005 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3006 dev = &rte_eth_devices[port_id]; 3007 3008 if (dev->data->all_multicast == 1) 3009 return 0; 3010 3011 if (*dev->dev_ops->allmulticast_enable == NULL) 3012 return -ENOTSUP; 3013 diag = (*dev->dev_ops->allmulticast_enable)(dev); 3014 dev->data->all_multicast = (diag == 0) ? 1 : 0; 3015 3016 diag = eth_err(port_id, diag); 3017 3018 rte_eth_trace_allmulticast_enable(port_id, dev->data->all_multicast, 3019 diag); 3020 3021 return diag; 3022 } 3023 3024 int 3025 rte_eth_allmulticast_disable(uint16_t port_id) 3026 { 3027 struct rte_eth_dev *dev; 3028 int diag; 3029 3030 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3031 dev = &rte_eth_devices[port_id]; 3032 3033 if (dev->data->all_multicast == 0) 3034 return 0; 3035 3036 if (*dev->dev_ops->allmulticast_disable == NULL) 3037 return -ENOTSUP; 3038 dev->data->all_multicast = 0; 3039 diag = (*dev->dev_ops->allmulticast_disable)(dev); 3040 if (diag != 0) 3041 dev->data->all_multicast = 1; 3042 3043 diag = eth_err(port_id, diag); 3044 3045 rte_eth_trace_allmulticast_disable(port_id, dev->data->all_multicast, 3046 diag); 3047 3048 return diag; 3049 } 3050 3051 int 3052 rte_eth_allmulticast_get(uint16_t port_id) 3053 { 3054 struct rte_eth_dev *dev; 3055 3056 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3057 dev = &rte_eth_devices[port_id]; 3058 3059 rte_eth_trace_allmulticast_get(port_id, dev->data->all_multicast); 3060 3061 return dev->data->all_multicast; 3062 } 3063 3064 int 3065 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link) 3066 { 3067 struct rte_eth_dev *dev; 3068 3069 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3070 dev = &rte_eth_devices[port_id]; 3071 3072 if (eth_link == NULL) { 3073 RTE_ETHDEV_LOG_LINE(ERR, "Cannot get ethdev port %u link to NULL", 3074 port_id); 3075 return -EINVAL; 3076 } 3077 3078 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 3079 rte_eth_linkstatus_get(dev, eth_link); 3080 else { 3081 if (*dev->dev_ops->link_update == NULL) 3082 return -ENOTSUP; 3083 (*dev->dev_ops->link_update)(dev, 1); 3084 *eth_link = dev->data->dev_link; 3085 } 3086 3087 rte_eth_trace_link_get(port_id, eth_link); 3088 3089 return 0; 3090 } 3091 3092 int 3093 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link) 3094 { 3095 struct rte_eth_dev *dev; 3096 3097 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3098 dev = &rte_eth_devices[port_id]; 3099 3100 if (eth_link == NULL) { 3101 RTE_ETHDEV_LOG_LINE(ERR, "Cannot get ethdev port %u link to NULL", 3102 port_id); 3103 return -EINVAL; 3104 } 3105 3106 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 3107 rte_eth_linkstatus_get(dev, eth_link); 3108 else { 3109 if (*dev->dev_ops->link_update == NULL) 3110 return -ENOTSUP; 3111 (*dev->dev_ops->link_update)(dev, 0); 3112 *eth_link = dev->data->dev_link; 3113 } 3114 3115 rte_eth_trace_link_get_nowait(port_id, eth_link); 3116 3117 return 0; 3118 } 3119 3120 const char * 3121 rte_eth_link_speed_to_str(uint32_t link_speed) 3122 { 3123 const char *ret; 3124 3125 switch (link_speed) { 3126 case RTE_ETH_SPEED_NUM_NONE: 3127 ret = "None"; 3128 break; 3129 case RTE_ETH_SPEED_NUM_10M: 3130 ret = "10 Mbps"; 3131 break; 3132 case RTE_ETH_SPEED_NUM_100M: 3133 ret = "100 Mbps"; 3134 break; 3135 case RTE_ETH_SPEED_NUM_1G: 3136 ret = "1 Gbps"; 3137 break; 3138 case RTE_ETH_SPEED_NUM_2_5G: 3139 ret = "2.5 Gbps"; 3140 break; 3141 case RTE_ETH_SPEED_NUM_5G: 3142 ret = "5 Gbps"; 3143 break; 3144 case RTE_ETH_SPEED_NUM_10G: 3145 ret = "10 Gbps"; 3146 break; 3147 case RTE_ETH_SPEED_NUM_20G: 3148 ret = "20 Gbps"; 3149 break; 3150 case RTE_ETH_SPEED_NUM_25G: 3151 ret = "25 Gbps"; 3152 break; 3153 case RTE_ETH_SPEED_NUM_40G: 3154 ret = "40 Gbps"; 3155 break; 3156 case RTE_ETH_SPEED_NUM_50G: 3157 ret = "50 Gbps"; 3158 break; 3159 case RTE_ETH_SPEED_NUM_56G: 3160 ret = "56 Gbps"; 3161 break; 3162 case RTE_ETH_SPEED_NUM_100G: 3163 ret = "100 Gbps"; 3164 break; 3165 case RTE_ETH_SPEED_NUM_200G: 3166 ret = "200 Gbps"; 3167 break; 3168 case RTE_ETH_SPEED_NUM_400G: 3169 ret = "400 Gbps"; 3170 break; 3171 case RTE_ETH_SPEED_NUM_UNKNOWN: 3172 ret = "Unknown"; 3173 break; 3174 default: 3175 ret = "Invalid"; 3176 } 3177 3178 rte_eth_trace_link_speed_to_str(link_speed, ret); 3179 3180 return ret; 3181 } 3182 3183 int 3184 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link) 3185 { 3186 int ret; 3187 3188 if (str == NULL) { 3189 RTE_ETHDEV_LOG_LINE(ERR, "Cannot convert link to NULL string"); 3190 return -EINVAL; 3191 } 3192 3193 if (len == 0) { 3194 RTE_ETHDEV_LOG_LINE(ERR, 3195 "Cannot convert link to string with zero size"); 3196 return -EINVAL; 3197 } 3198 3199 if (eth_link == NULL) { 3200 RTE_ETHDEV_LOG_LINE(ERR, "Cannot convert to string from NULL link"); 3201 return -EINVAL; 3202 } 3203 3204 if (eth_link->link_status == RTE_ETH_LINK_DOWN) 3205 ret = snprintf(str, len, "Link down"); 3206 else 3207 ret = snprintf(str, len, "Link up at %s %s %s", 3208 rte_eth_link_speed_to_str(eth_link->link_speed), 3209 (eth_link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 3210 "FDX" : "HDX", 3211 (eth_link->link_autoneg == RTE_ETH_LINK_AUTONEG) ? 3212 "Autoneg" : "Fixed"); 3213 3214 rte_eth_trace_link_to_str(len, eth_link, str, ret); 3215 3216 return ret; 3217 } 3218 3219 int 3220 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats) 3221 { 3222 struct rte_eth_dev *dev; 3223 int ret; 3224 3225 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3226 dev = &rte_eth_devices[port_id]; 3227 3228 if (stats == NULL) { 3229 RTE_ETHDEV_LOG_LINE(ERR, "Cannot get ethdev port %u stats to NULL", 3230 port_id); 3231 return -EINVAL; 3232 } 3233 3234 memset(stats, 0, sizeof(*stats)); 3235 3236 if (*dev->dev_ops->stats_get == NULL) 3237 return -ENOTSUP; 3238 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed; 3239 ret = eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats)); 3240 3241 rte_eth_trace_stats_get(port_id, stats, ret); 3242 3243 return ret; 3244 } 3245 3246 int 3247 rte_eth_stats_reset(uint16_t port_id) 3248 { 3249 struct rte_eth_dev *dev; 3250 int ret; 3251 3252 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3253 dev = &rte_eth_devices[port_id]; 3254 3255 if (*dev->dev_ops->stats_reset == NULL) 3256 return -ENOTSUP; 3257 ret = (*dev->dev_ops->stats_reset)(dev); 3258 if (ret != 0) 3259 return eth_err(port_id, ret); 3260 3261 dev->data->rx_mbuf_alloc_failed = 0; 3262 3263 rte_eth_trace_stats_reset(port_id); 3264 3265 return 0; 3266 } 3267 3268 static inline int 3269 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev) 3270 { 3271 uint16_t nb_rxqs, nb_txqs; 3272 int count; 3273 3274 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3275 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3276 3277 count = RTE_NB_STATS; 3278 if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) { 3279 count += nb_rxqs * RTE_NB_RXQ_STATS; 3280 count += nb_txqs * RTE_NB_TXQ_STATS; 3281 } 3282 3283 return count; 3284 } 3285 3286 static int 3287 eth_dev_get_xstats_count(uint16_t port_id) 3288 { 3289 struct rte_eth_dev *dev; 3290 int count; 3291 3292 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3293 dev = &rte_eth_devices[port_id]; 3294 if (dev->dev_ops->xstats_get_names != NULL) { 3295 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0); 3296 if (count < 0) 3297 return eth_err(port_id, count); 3298 } else 3299 count = 0; 3300 3301 3302 count += eth_dev_get_xstats_basic_count(dev); 3303 3304 return count; 3305 } 3306 3307 int 3308 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, 3309 uint64_t *id) 3310 { 3311 int cnt_xstats, idx_xstat; 3312 3313 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3314 3315 if (xstat_name == NULL) { 3316 RTE_ETHDEV_LOG_LINE(ERR, 3317 "Cannot get ethdev port %u xstats ID from NULL xstat name", 3318 port_id); 3319 return -ENOMEM; 3320 } 3321 3322 if (id == NULL) { 3323 RTE_ETHDEV_LOG_LINE(ERR, 3324 "Cannot get ethdev port %u xstats ID to NULL", 3325 port_id); 3326 return -ENOMEM; 3327 } 3328 3329 /* Get count */ 3330 cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL); 3331 if (cnt_xstats < 0) { 3332 RTE_ETHDEV_LOG_LINE(ERR, "Cannot get count of xstats"); 3333 return -ENODEV; 3334 } 3335 3336 /* Get id-name lookup table */ 3337 struct rte_eth_xstat_name xstats_names[cnt_xstats]; 3338 3339 if (cnt_xstats != rte_eth_xstats_get_names_by_id( 3340 port_id, xstats_names, cnt_xstats, NULL)) { 3341 RTE_ETHDEV_LOG_LINE(ERR, "Cannot get xstats lookup"); 3342 return -1; 3343 } 3344 3345 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 3346 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) { 3347 *id = idx_xstat; 3348 3349 rte_eth_trace_xstats_get_id_by_name(port_id, 3350 xstat_name, *id); 3351 3352 return 0; 3353 }; 3354 } 3355 3356 return -EINVAL; 3357 } 3358 3359 /* retrieve basic stats names */ 3360 static int 3361 eth_basic_stats_get_names(struct rte_eth_dev *dev, 3362 struct rte_eth_xstat_name *xstats_names) 3363 { 3364 int cnt_used_entries = 0; 3365 uint32_t idx, id_queue; 3366 uint16_t num_q; 3367 3368 for (idx = 0; idx < RTE_NB_STATS; idx++) { 3369 strlcpy(xstats_names[cnt_used_entries].name, 3370 eth_dev_stats_strings[idx].name, 3371 sizeof(xstats_names[0].name)); 3372 cnt_used_entries++; 3373 } 3374 3375 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 3376 return cnt_used_entries; 3377 3378 num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3379 for (id_queue = 0; id_queue < num_q; id_queue++) { 3380 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) { 3381 snprintf(xstats_names[cnt_used_entries].name, 3382 sizeof(xstats_names[0].name), 3383 "rx_q%u_%s", 3384 id_queue, eth_dev_rxq_stats_strings[idx].name); 3385 cnt_used_entries++; 3386 } 3387 3388 } 3389 num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3390 for (id_queue = 0; id_queue < num_q; id_queue++) { 3391 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) { 3392 snprintf(xstats_names[cnt_used_entries].name, 3393 sizeof(xstats_names[0].name), 3394 "tx_q%u_%s", 3395 id_queue, eth_dev_txq_stats_strings[idx].name); 3396 cnt_used_entries++; 3397 } 3398 } 3399 return cnt_used_entries; 3400 } 3401 3402 /* retrieve ethdev extended statistics names */ 3403 int 3404 rte_eth_xstats_get_names_by_id(uint16_t port_id, 3405 struct rte_eth_xstat_name *xstats_names, unsigned int size, 3406 uint64_t *ids) 3407 { 3408 struct rte_eth_xstat_name *xstats_names_copy; 3409 unsigned int no_basic_stat_requested = 1; 3410 unsigned int no_ext_stat_requested = 1; 3411 unsigned int expected_entries; 3412 unsigned int basic_count; 3413 struct rte_eth_dev *dev; 3414 unsigned int i; 3415 int ret; 3416 3417 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3418 dev = &rte_eth_devices[port_id]; 3419 3420 basic_count = eth_dev_get_xstats_basic_count(dev); 3421 ret = eth_dev_get_xstats_count(port_id); 3422 if (ret < 0) 3423 return ret; 3424 expected_entries = (unsigned int)ret; 3425 3426 /* Return max number of stats if no ids given */ 3427 if (!ids) { 3428 if (!xstats_names) 3429 return expected_entries; 3430 else if (xstats_names && size < expected_entries) 3431 return expected_entries; 3432 } 3433 3434 if (ids && !xstats_names) 3435 return -EINVAL; 3436 3437 if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) { 3438 uint64_t ids_copy[size]; 3439 3440 for (i = 0; i < size; i++) { 3441 if (ids[i] < basic_count) { 3442 no_basic_stat_requested = 0; 3443 break; 3444 } 3445 3446 /* 3447 * Convert ids to xstats ids that PMD knows. 3448 * ids known by user are basic + extended stats. 3449 */ 3450 ids_copy[i] = ids[i] - basic_count; 3451 } 3452 3453 if (no_basic_stat_requested) 3454 return (*dev->dev_ops->xstats_get_names_by_id)(dev, 3455 ids_copy, xstats_names, size); 3456 } 3457 3458 /* Retrieve all stats */ 3459 if (!ids) { 3460 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names, 3461 expected_entries); 3462 if (num_stats < 0 || num_stats > (int)expected_entries) 3463 return num_stats; 3464 else 3465 return expected_entries; 3466 } 3467 3468 xstats_names_copy = calloc(expected_entries, 3469 sizeof(struct rte_eth_xstat_name)); 3470 3471 if (!xstats_names_copy) { 3472 RTE_ETHDEV_LOG_LINE(ERR, "Can't allocate memory"); 3473 return -ENOMEM; 3474 } 3475 3476 if (ids) { 3477 for (i = 0; i < size; i++) { 3478 if (ids[i] >= basic_count) { 3479 no_ext_stat_requested = 0; 3480 break; 3481 } 3482 } 3483 } 3484 3485 /* Fill xstats_names_copy structure */ 3486 if (ids && no_ext_stat_requested) { 3487 eth_basic_stats_get_names(dev, xstats_names_copy); 3488 } else { 3489 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy, 3490 expected_entries); 3491 if (ret < 0) { 3492 free(xstats_names_copy); 3493 return ret; 3494 } 3495 } 3496 3497 /* Filter stats */ 3498 for (i = 0; i < size; i++) { 3499 if (ids[i] >= expected_entries) { 3500 RTE_ETHDEV_LOG_LINE(ERR, "Id value isn't valid"); 3501 free(xstats_names_copy); 3502 return -1; 3503 } 3504 xstats_names[i] = xstats_names_copy[ids[i]]; 3505 3506 rte_eth_trace_xstats_get_names_by_id(port_id, &xstats_names[i], 3507 ids[i]); 3508 } 3509 3510 free(xstats_names_copy); 3511 return size; 3512 } 3513 3514 int 3515 rte_eth_xstats_get_names(uint16_t port_id, 3516 struct rte_eth_xstat_name *xstats_names, 3517 unsigned int size) 3518 { 3519 struct rte_eth_dev *dev; 3520 int cnt_used_entries; 3521 int cnt_expected_entries; 3522 int cnt_driver_entries; 3523 int i; 3524 3525 cnt_expected_entries = eth_dev_get_xstats_count(port_id); 3526 if (xstats_names == NULL || cnt_expected_entries < 0 || 3527 (int)size < cnt_expected_entries) 3528 return cnt_expected_entries; 3529 3530 /* port_id checked in eth_dev_get_xstats_count() */ 3531 dev = &rte_eth_devices[port_id]; 3532 3533 cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names); 3534 3535 if (dev->dev_ops->xstats_get_names != NULL) { 3536 /* If there are any driver-specific xstats, append them 3537 * to end of list. 3538 */ 3539 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)( 3540 dev, 3541 xstats_names + cnt_used_entries, 3542 size - cnt_used_entries); 3543 if (cnt_driver_entries < 0) 3544 return eth_err(port_id, cnt_driver_entries); 3545 cnt_used_entries += cnt_driver_entries; 3546 } 3547 3548 for (i = 0; i < cnt_used_entries; i++) 3549 rte_eth_trace_xstats_get_names(port_id, i, &xstats_names[i], 3550 size, cnt_used_entries); 3551 3552 return cnt_used_entries; 3553 } 3554 3555 3556 static int 3557 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats) 3558 { 3559 struct rte_eth_dev *dev; 3560 struct rte_eth_stats eth_stats; 3561 unsigned int count = 0, i, q; 3562 uint64_t val, *stats_ptr; 3563 uint16_t nb_rxqs, nb_txqs; 3564 int ret; 3565 3566 ret = rte_eth_stats_get(port_id, ð_stats); 3567 if (ret < 0) 3568 return ret; 3569 3570 dev = &rte_eth_devices[port_id]; 3571 3572 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3573 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3574 3575 /* global stats */ 3576 for (i = 0; i < RTE_NB_STATS; i++) { 3577 stats_ptr = RTE_PTR_ADD(ð_stats, 3578 eth_dev_stats_strings[i].offset); 3579 val = *stats_ptr; 3580 xstats[count++].value = val; 3581 } 3582 3583 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 3584 return count; 3585 3586 /* per-rxq stats */ 3587 for (q = 0; q < nb_rxqs; q++) { 3588 for (i = 0; i < RTE_NB_RXQ_STATS; i++) { 3589 stats_ptr = RTE_PTR_ADD(ð_stats, 3590 eth_dev_rxq_stats_strings[i].offset + 3591 q * sizeof(uint64_t)); 3592 val = *stats_ptr; 3593 xstats[count++].value = val; 3594 } 3595 } 3596 3597 /* per-txq stats */ 3598 for (q = 0; q < nb_txqs; q++) { 3599 for (i = 0; i < RTE_NB_TXQ_STATS; i++) { 3600 stats_ptr = RTE_PTR_ADD(ð_stats, 3601 eth_dev_txq_stats_strings[i].offset + 3602 q * sizeof(uint64_t)); 3603 val = *stats_ptr; 3604 xstats[count++].value = val; 3605 } 3606 } 3607 return count; 3608 } 3609 3610 /* retrieve ethdev extended statistics */ 3611 int 3612 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, 3613 uint64_t *values, unsigned int size) 3614 { 3615 unsigned int no_basic_stat_requested = 1; 3616 unsigned int no_ext_stat_requested = 1; 3617 unsigned int num_xstats_filled; 3618 unsigned int basic_count; 3619 uint16_t expected_entries; 3620 struct rte_eth_dev *dev; 3621 unsigned int i; 3622 int ret; 3623 3624 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3625 dev = &rte_eth_devices[port_id]; 3626 3627 ret = eth_dev_get_xstats_count(port_id); 3628 if (ret < 0) 3629 return ret; 3630 expected_entries = (uint16_t)ret; 3631 struct rte_eth_xstat xstats[expected_entries]; 3632 basic_count = eth_dev_get_xstats_basic_count(dev); 3633 3634 /* Return max number of stats if no ids given */ 3635 if (!ids) { 3636 if (!values) 3637 return expected_entries; 3638 else if (values && size < expected_entries) 3639 return expected_entries; 3640 } 3641 3642 if (ids && !values) 3643 return -EINVAL; 3644 3645 if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) { 3646 unsigned int basic_count = eth_dev_get_xstats_basic_count(dev); 3647 uint64_t ids_copy[size]; 3648 3649 for (i = 0; i < size; i++) { 3650 if (ids[i] < basic_count) { 3651 no_basic_stat_requested = 0; 3652 break; 3653 } 3654 3655 /* 3656 * Convert ids to xstats ids that PMD knows. 3657 * ids known by user are basic + extended stats. 3658 */ 3659 ids_copy[i] = ids[i] - basic_count; 3660 } 3661 3662 if (no_basic_stat_requested) 3663 return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy, 3664 values, size); 3665 } 3666 3667 if (ids) { 3668 for (i = 0; i < size; i++) { 3669 if (ids[i] >= basic_count) { 3670 no_ext_stat_requested = 0; 3671 break; 3672 } 3673 } 3674 } 3675 3676 /* Fill the xstats structure */ 3677 if (ids && no_ext_stat_requested) 3678 ret = eth_basic_stats_get(port_id, xstats); 3679 else 3680 ret = rte_eth_xstats_get(port_id, xstats, expected_entries); 3681 3682 if (ret < 0) 3683 return ret; 3684 num_xstats_filled = (unsigned int)ret; 3685 3686 /* Return all stats */ 3687 if (!ids) { 3688 for (i = 0; i < num_xstats_filled; i++) 3689 values[i] = xstats[i].value; 3690 return expected_entries; 3691 } 3692 3693 /* Filter stats */ 3694 for (i = 0; i < size; i++) { 3695 if (ids[i] >= expected_entries) { 3696 RTE_ETHDEV_LOG_LINE(ERR, "Id value isn't valid"); 3697 return -1; 3698 } 3699 values[i] = xstats[ids[i]].value; 3700 } 3701 3702 rte_eth_trace_xstats_get_by_id(port_id, ids, values, size); 3703 3704 return size; 3705 } 3706 3707 int 3708 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, 3709 unsigned int n) 3710 { 3711 struct rte_eth_dev *dev; 3712 unsigned int count, i; 3713 signed int xcount = 0; 3714 int ret; 3715 3716 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3717 if (xstats == NULL && n > 0) 3718 return -EINVAL; 3719 dev = &rte_eth_devices[port_id]; 3720 3721 count = eth_dev_get_xstats_basic_count(dev); 3722 3723 /* implemented by the driver */ 3724 if (dev->dev_ops->xstats_get != NULL) { 3725 /* Retrieve the xstats from the driver at the end of the 3726 * xstats struct. 3727 */ 3728 xcount = (*dev->dev_ops->xstats_get)(dev, 3729 (n > count) ? xstats + count : NULL, 3730 (n > count) ? n - count : 0); 3731 3732 if (xcount < 0) 3733 return eth_err(port_id, xcount); 3734 } 3735 3736 if (n < count + xcount || xstats == NULL) 3737 return count + xcount; 3738 3739 /* now fill the xstats structure */ 3740 ret = eth_basic_stats_get(port_id, xstats); 3741 if (ret < 0) 3742 return ret; 3743 count = ret; 3744 3745 for (i = 0; i < count; i++) 3746 xstats[i].id = i; 3747 /* add an offset to driver-specific stats */ 3748 for ( ; i < count + xcount; i++) 3749 xstats[i].id += count; 3750 3751 for (i = 0; i < n; i++) 3752 rte_eth_trace_xstats_get(port_id, xstats[i]); 3753 3754 return count + xcount; 3755 } 3756 3757 /* reset ethdev extended statistics */ 3758 int 3759 rte_eth_xstats_reset(uint16_t port_id) 3760 { 3761 struct rte_eth_dev *dev; 3762 3763 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3764 dev = &rte_eth_devices[port_id]; 3765 3766 /* implemented by the driver */ 3767 if (dev->dev_ops->xstats_reset != NULL) { 3768 int ret = eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev)); 3769 3770 rte_eth_trace_xstats_reset(port_id, ret); 3771 3772 return ret; 3773 } 3774 3775 /* fallback to default */ 3776 return rte_eth_stats_reset(port_id); 3777 } 3778 3779 static int 3780 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, 3781 uint8_t stat_idx, uint8_t is_rx) 3782 { 3783 struct rte_eth_dev *dev; 3784 3785 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3786 dev = &rte_eth_devices[port_id]; 3787 3788 if (is_rx && (queue_id >= dev->data->nb_rx_queues)) 3789 return -EINVAL; 3790 3791 if (!is_rx && (queue_id >= dev->data->nb_tx_queues)) 3792 return -EINVAL; 3793 3794 if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) 3795 return -EINVAL; 3796 3797 if (*dev->dev_ops->queue_stats_mapping_set == NULL) 3798 return -ENOTSUP; 3799 return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx); 3800 } 3801 3802 int 3803 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, 3804 uint8_t stat_idx) 3805 { 3806 int ret; 3807 3808 ret = eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3809 tx_queue_id, 3810 stat_idx, STAT_QMAP_TX)); 3811 3812 rte_ethdev_trace_set_tx_queue_stats_mapping(port_id, tx_queue_id, 3813 stat_idx, ret); 3814 3815 return ret; 3816 } 3817 3818 int 3819 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, 3820 uint8_t stat_idx) 3821 { 3822 int ret; 3823 3824 ret = eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3825 rx_queue_id, 3826 stat_idx, STAT_QMAP_RX)); 3827 3828 rte_ethdev_trace_set_rx_queue_stats_mapping(port_id, rx_queue_id, 3829 stat_idx, ret); 3830 3831 return ret; 3832 } 3833 3834 int 3835 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size) 3836 { 3837 struct rte_eth_dev *dev; 3838 int ret; 3839 3840 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3841 dev = &rte_eth_devices[port_id]; 3842 3843 if (fw_version == NULL && fw_size > 0) { 3844 RTE_ETHDEV_LOG_LINE(ERR, 3845 "Cannot get ethdev port %u FW version to NULL when string size is non zero", 3846 port_id); 3847 return -EINVAL; 3848 } 3849 3850 if (*dev->dev_ops->fw_version_get == NULL) 3851 return -ENOTSUP; 3852 ret = eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev, 3853 fw_version, fw_size)); 3854 3855 rte_ethdev_trace_fw_version_get(port_id, fw_version, fw_size, ret); 3856 3857 return ret; 3858 } 3859 3860 int 3861 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) 3862 { 3863 struct rte_eth_dev *dev; 3864 const struct rte_eth_desc_lim lim = { 3865 .nb_max = UINT16_MAX, 3866 .nb_min = 0, 3867 .nb_align = 1, 3868 .nb_seg_max = UINT16_MAX, 3869 .nb_mtu_seg_max = UINT16_MAX, 3870 }; 3871 int diag; 3872 3873 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3874 dev = &rte_eth_devices[port_id]; 3875 3876 if (dev_info == NULL) { 3877 RTE_ETHDEV_LOG_LINE(ERR, "Cannot get ethdev port %u info to NULL", 3878 port_id); 3879 return -EINVAL; 3880 } 3881 3882 /* 3883 * Init dev_info before port_id check since caller does not have 3884 * return status and does not know if get is successful or not. 3885 */ 3886 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3887 dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 3888 3889 dev_info->rx_desc_lim = lim; 3890 dev_info->tx_desc_lim = lim; 3891 dev_info->device = dev->device; 3892 dev_info->min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN - 3893 RTE_ETHER_CRC_LEN; 3894 dev_info->max_mtu = UINT16_MAX; 3895 dev_info->rss_algo_capa = RTE_ETH_HASH_ALGO_CAPA_MASK(DEFAULT); 3896 dev_info->max_rx_bufsize = UINT32_MAX; 3897 3898 if (*dev->dev_ops->dev_infos_get == NULL) 3899 return -ENOTSUP; 3900 diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info); 3901 if (diag != 0) { 3902 /* Cleanup already filled in device information */ 3903 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3904 return eth_err(port_id, diag); 3905 } 3906 3907 /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */ 3908 dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues, 3909 RTE_MAX_QUEUES_PER_PORT); 3910 dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues, 3911 RTE_MAX_QUEUES_PER_PORT); 3912 3913 dev_info->driver_name = dev->device->driver->name; 3914 dev_info->nb_rx_queues = dev->data->nb_rx_queues; 3915 dev_info->nb_tx_queues = dev->data->nb_tx_queues; 3916 3917 dev_info->dev_flags = &dev->data->dev_flags; 3918 3919 rte_ethdev_trace_info_get(port_id, dev_info); 3920 3921 return 0; 3922 } 3923 3924 int 3925 rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf) 3926 { 3927 struct rte_eth_dev *dev; 3928 3929 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3930 dev = &rte_eth_devices[port_id]; 3931 3932 if (dev_conf == NULL) { 3933 RTE_ETHDEV_LOG_LINE(ERR, 3934 "Cannot get ethdev port %u configuration to NULL", 3935 port_id); 3936 return -EINVAL; 3937 } 3938 3939 memcpy(dev_conf, &dev->data->dev_conf, sizeof(struct rte_eth_conf)); 3940 3941 rte_ethdev_trace_conf_get(port_id, dev_conf); 3942 3943 return 0; 3944 } 3945 3946 int 3947 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, 3948 uint32_t *ptypes, int num) 3949 { 3950 size_t i; 3951 int j; 3952 struct rte_eth_dev *dev; 3953 const uint32_t *all_ptypes; 3954 size_t no_of_elements = 0; 3955 3956 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3957 dev = &rte_eth_devices[port_id]; 3958 3959 if (ptypes == NULL && num > 0) { 3960 RTE_ETHDEV_LOG_LINE(ERR, 3961 "Cannot get ethdev port %u supported packet types to NULL when array size is non zero", 3962 port_id); 3963 return -EINVAL; 3964 } 3965 3966 if (*dev->dev_ops->dev_supported_ptypes_get == NULL) 3967 return 0; 3968 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev, 3969 &no_of_elements); 3970 3971 if (!all_ptypes) 3972 return 0; 3973 3974 for (i = 0, j = 0; i < no_of_elements; ++i) 3975 if (all_ptypes[i] & ptype_mask) { 3976 if (j < num) { 3977 ptypes[j] = all_ptypes[i]; 3978 3979 rte_ethdev_trace_get_supported_ptypes(port_id, 3980 j, num, ptypes[j]); 3981 } 3982 j++; 3983 } 3984 3985 return j; 3986 } 3987 3988 int 3989 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, 3990 uint32_t *set_ptypes, unsigned int num) 3991 { 3992 const uint32_t valid_ptype_masks[] = { 3993 RTE_PTYPE_L2_MASK, 3994 RTE_PTYPE_L3_MASK, 3995 RTE_PTYPE_L4_MASK, 3996 RTE_PTYPE_TUNNEL_MASK, 3997 RTE_PTYPE_INNER_L2_MASK, 3998 RTE_PTYPE_INNER_L3_MASK, 3999 RTE_PTYPE_INNER_L4_MASK, 4000 }; 4001 const uint32_t *all_ptypes; 4002 struct rte_eth_dev *dev; 4003 uint32_t unused_mask; 4004 size_t i; 4005 unsigned int j; 4006 int ret; 4007 size_t no_of_elements = 0; 4008 4009 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4010 dev = &rte_eth_devices[port_id]; 4011 4012 if (num > 0 && set_ptypes == NULL) { 4013 RTE_ETHDEV_LOG_LINE(ERR, 4014 "Cannot get ethdev port %u set packet types to NULL when array size is non zero", 4015 port_id); 4016 return -EINVAL; 4017 } 4018 4019 if (*dev->dev_ops->dev_supported_ptypes_get == NULL || 4020 *dev->dev_ops->dev_ptypes_set == NULL) { 4021 ret = 0; 4022 goto ptype_unknown; 4023 } 4024 4025 if (ptype_mask == 0) { 4026 ret = (*dev->dev_ops->dev_ptypes_set)(dev, 4027 ptype_mask); 4028 goto ptype_unknown; 4029 } 4030 4031 unused_mask = ptype_mask; 4032 for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) { 4033 uint32_t mask = ptype_mask & valid_ptype_masks[i]; 4034 if (mask && mask != valid_ptype_masks[i]) { 4035 ret = -EINVAL; 4036 goto ptype_unknown; 4037 } 4038 unused_mask &= ~valid_ptype_masks[i]; 4039 } 4040 4041 if (unused_mask) { 4042 ret = -EINVAL; 4043 goto ptype_unknown; 4044 } 4045 4046 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev, 4047 &no_of_elements); 4048 if (all_ptypes == NULL) { 4049 ret = 0; 4050 goto ptype_unknown; 4051 } 4052 4053 /* 4054 * Accommodate as many set_ptypes as possible. If the supplied 4055 * set_ptypes array is insufficient fill it partially. 4056 */ 4057 for (i = 0, j = 0; set_ptypes != NULL && 4058 (i < no_of_elements); ++i) { 4059 if (ptype_mask & all_ptypes[i]) { 4060 if (j < num - 1) { 4061 set_ptypes[j] = all_ptypes[i]; 4062 4063 rte_ethdev_trace_set_ptypes(port_id, j, num, 4064 set_ptypes[j]); 4065 4066 j++; 4067 continue; 4068 } 4069 break; 4070 } 4071 } 4072 4073 if (set_ptypes != NULL && j < num) 4074 set_ptypes[j] = RTE_PTYPE_UNKNOWN; 4075 4076 return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask); 4077 4078 ptype_unknown: 4079 if (num > 0) 4080 set_ptypes[0] = RTE_PTYPE_UNKNOWN; 4081 4082 return ret; 4083 } 4084 4085 int 4086 rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma, 4087 unsigned int num) 4088 { 4089 int32_t ret; 4090 struct rte_eth_dev *dev; 4091 struct rte_eth_dev_info dev_info; 4092 4093 if (ma == NULL) { 4094 RTE_ETHDEV_LOG_LINE(ERR, "%s: invalid parameters", __func__); 4095 return -EINVAL; 4096 } 4097 4098 /* will check for us that port_id is a valid one */ 4099 ret = rte_eth_dev_info_get(port_id, &dev_info); 4100 if (ret != 0) 4101 return ret; 4102 4103 dev = &rte_eth_devices[port_id]; 4104 num = RTE_MIN(dev_info.max_mac_addrs, num); 4105 memcpy(ma, dev->data->mac_addrs, num * sizeof(ma[0])); 4106 4107 rte_eth_trace_macaddrs_get(port_id, num); 4108 4109 return num; 4110 } 4111 4112 int 4113 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr) 4114 { 4115 struct rte_eth_dev *dev; 4116 4117 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4118 dev = &rte_eth_devices[port_id]; 4119 4120 if (mac_addr == NULL) { 4121 RTE_ETHDEV_LOG_LINE(ERR, 4122 "Cannot get ethdev port %u MAC address to NULL", 4123 port_id); 4124 return -EINVAL; 4125 } 4126 4127 rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr); 4128 4129 rte_eth_trace_macaddr_get(port_id, mac_addr); 4130 4131 return 0; 4132 } 4133 4134 int 4135 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu) 4136 { 4137 struct rte_eth_dev *dev; 4138 4139 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4140 dev = &rte_eth_devices[port_id]; 4141 4142 if (mtu == NULL) { 4143 RTE_ETHDEV_LOG_LINE(ERR, "Cannot get ethdev port %u MTU to NULL", 4144 port_id); 4145 return -EINVAL; 4146 } 4147 4148 *mtu = dev->data->mtu; 4149 4150 rte_ethdev_trace_get_mtu(port_id, *mtu); 4151 4152 return 0; 4153 } 4154 4155 int 4156 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu) 4157 { 4158 int ret; 4159 struct rte_eth_dev_info dev_info; 4160 struct rte_eth_dev *dev; 4161 4162 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4163 dev = &rte_eth_devices[port_id]; 4164 if (*dev->dev_ops->mtu_set == NULL) 4165 return -ENOTSUP; 4166 4167 /* 4168 * Check if the device supports dev_infos_get, if it does not 4169 * skip min_mtu/max_mtu validation here as this requires values 4170 * that are populated within the call to rte_eth_dev_info_get() 4171 * which relies on dev->dev_ops->dev_infos_get. 4172 */ 4173 if (*dev->dev_ops->dev_infos_get != NULL) { 4174 ret = rte_eth_dev_info_get(port_id, &dev_info); 4175 if (ret != 0) 4176 return ret; 4177 4178 ret = eth_dev_validate_mtu(port_id, &dev_info, mtu); 4179 if (ret != 0) 4180 return ret; 4181 } 4182 4183 if (dev->data->dev_configured == 0) { 4184 RTE_ETHDEV_LOG_LINE(ERR, 4185 "Port %u must be configured before MTU set", 4186 port_id); 4187 return -EINVAL; 4188 } 4189 4190 ret = (*dev->dev_ops->mtu_set)(dev, mtu); 4191 if (ret == 0) 4192 dev->data->mtu = mtu; 4193 4194 ret = eth_err(port_id, ret); 4195 4196 rte_ethdev_trace_set_mtu(port_id, mtu, ret); 4197 4198 return ret; 4199 } 4200 4201 int 4202 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on) 4203 { 4204 struct rte_eth_dev *dev; 4205 int ret; 4206 4207 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4208 dev = &rte_eth_devices[port_id]; 4209 4210 if (!(dev->data->dev_conf.rxmode.offloads & 4211 RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) { 4212 RTE_ETHDEV_LOG_LINE(ERR, "Port %u: VLAN-filtering disabled", 4213 port_id); 4214 return -ENOSYS; 4215 } 4216 4217 if (vlan_id > 4095) { 4218 RTE_ETHDEV_LOG_LINE(ERR, "Port_id=%u invalid vlan_id=%u > 4095", 4219 port_id, vlan_id); 4220 return -EINVAL; 4221 } 4222 if (*dev->dev_ops->vlan_filter_set == NULL) 4223 return -ENOTSUP; 4224 4225 ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on); 4226 if (ret == 0) { 4227 struct rte_vlan_filter_conf *vfc; 4228 int vidx; 4229 int vbit; 4230 4231 vfc = &dev->data->vlan_filter_conf; 4232 vidx = vlan_id / 64; 4233 vbit = vlan_id % 64; 4234 4235 if (on) 4236 vfc->ids[vidx] |= RTE_BIT64(vbit); 4237 else 4238 vfc->ids[vidx] &= ~RTE_BIT64(vbit); 4239 } 4240 4241 ret = eth_err(port_id, ret); 4242 4243 rte_ethdev_trace_vlan_filter(port_id, vlan_id, on, ret); 4244 4245 return ret; 4246 } 4247 4248 int 4249 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, 4250 int on) 4251 { 4252 struct rte_eth_dev *dev; 4253 4254 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4255 dev = &rte_eth_devices[port_id]; 4256 4257 if (rx_queue_id >= dev->data->nb_rx_queues) { 4258 RTE_ETHDEV_LOG_LINE(ERR, "Invalid rx_queue_id=%u", rx_queue_id); 4259 return -EINVAL; 4260 } 4261 4262 if (*dev->dev_ops->vlan_strip_queue_set == NULL) 4263 return -ENOTSUP; 4264 (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on); 4265 4266 rte_ethdev_trace_set_vlan_strip_on_queue(port_id, rx_queue_id, on); 4267 4268 return 0; 4269 } 4270 4271 int 4272 rte_eth_dev_set_vlan_ether_type(uint16_t port_id, 4273 enum rte_vlan_type vlan_type, 4274 uint16_t tpid) 4275 { 4276 struct rte_eth_dev *dev; 4277 int ret; 4278 4279 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4280 dev = &rte_eth_devices[port_id]; 4281 4282 if (*dev->dev_ops->vlan_tpid_set == NULL) 4283 return -ENOTSUP; 4284 ret = eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, 4285 tpid)); 4286 4287 rte_ethdev_trace_set_vlan_ether_type(port_id, vlan_type, tpid, ret); 4288 4289 return ret; 4290 } 4291 4292 int 4293 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask) 4294 { 4295 struct rte_eth_dev_info dev_info; 4296 struct rte_eth_dev *dev; 4297 int ret = 0; 4298 int mask = 0; 4299 int cur, org = 0; 4300 uint64_t orig_offloads; 4301 uint64_t dev_offloads; 4302 uint64_t new_offloads; 4303 4304 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4305 dev = &rte_eth_devices[port_id]; 4306 4307 /* save original values in case of failure */ 4308 orig_offloads = dev->data->dev_conf.rxmode.offloads; 4309 dev_offloads = orig_offloads; 4310 4311 /* check which option changed by application */ 4312 cur = !!(offload_mask & RTE_ETH_VLAN_STRIP_OFFLOAD); 4313 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP); 4314 if (cur != org) { 4315 if (cur) 4316 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 4317 else 4318 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 4319 mask |= RTE_ETH_VLAN_STRIP_MASK; 4320 } 4321 4322 cur = !!(offload_mask & RTE_ETH_VLAN_FILTER_OFFLOAD); 4323 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER); 4324 if (cur != org) { 4325 if (cur) 4326 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 4327 else 4328 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 4329 mask |= RTE_ETH_VLAN_FILTER_MASK; 4330 } 4331 4332 cur = !!(offload_mask & RTE_ETH_VLAN_EXTEND_OFFLOAD); 4333 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND); 4334 if (cur != org) { 4335 if (cur) 4336 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 4337 else 4338 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 4339 mask |= RTE_ETH_VLAN_EXTEND_MASK; 4340 } 4341 4342 cur = !!(offload_mask & RTE_ETH_QINQ_STRIP_OFFLOAD); 4343 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP); 4344 if (cur != org) { 4345 if (cur) 4346 dev_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 4347 else 4348 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 4349 mask |= RTE_ETH_QINQ_STRIP_MASK; 4350 } 4351 4352 /*no change*/ 4353 if (mask == 0) 4354 return ret; 4355 4356 ret = rte_eth_dev_info_get(port_id, &dev_info); 4357 if (ret != 0) 4358 return ret; 4359 4360 /* Rx VLAN offloading must be within its device capabilities */ 4361 if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) { 4362 new_offloads = dev_offloads & ~orig_offloads; 4363 RTE_ETHDEV_LOG_LINE(ERR, 4364 "Ethdev port_id=%u requested new added VLAN offloads " 4365 "0x%" PRIx64 " must be within Rx offloads capabilities " 4366 "0x%" PRIx64 " in %s()", 4367 port_id, new_offloads, dev_info.rx_offload_capa, 4368 __func__); 4369 return -EINVAL; 4370 } 4371 4372 if (*dev->dev_ops->vlan_offload_set == NULL) 4373 return -ENOTSUP; 4374 dev->data->dev_conf.rxmode.offloads = dev_offloads; 4375 ret = (*dev->dev_ops->vlan_offload_set)(dev, mask); 4376 if (ret) { 4377 /* hit an error restore original values */ 4378 dev->data->dev_conf.rxmode.offloads = orig_offloads; 4379 } 4380 4381 ret = eth_err(port_id, ret); 4382 4383 rte_ethdev_trace_set_vlan_offload(port_id, offload_mask, ret); 4384 4385 return ret; 4386 } 4387 4388 int 4389 rte_eth_dev_get_vlan_offload(uint16_t port_id) 4390 { 4391 struct rte_eth_dev *dev; 4392 uint64_t *dev_offloads; 4393 int ret = 0; 4394 4395 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4396 dev = &rte_eth_devices[port_id]; 4397 dev_offloads = &dev->data->dev_conf.rxmode.offloads; 4398 4399 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 4400 ret |= RTE_ETH_VLAN_STRIP_OFFLOAD; 4401 4402 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) 4403 ret |= RTE_ETH_VLAN_FILTER_OFFLOAD; 4404 4405 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) 4406 ret |= RTE_ETH_VLAN_EXTEND_OFFLOAD; 4407 4408 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) 4409 ret |= RTE_ETH_QINQ_STRIP_OFFLOAD; 4410 4411 rte_ethdev_trace_get_vlan_offload(port_id, ret); 4412 4413 return ret; 4414 } 4415 4416 int 4417 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on) 4418 { 4419 struct rte_eth_dev *dev; 4420 int ret; 4421 4422 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4423 dev = &rte_eth_devices[port_id]; 4424 4425 if (*dev->dev_ops->vlan_pvid_set == NULL) 4426 return -ENOTSUP; 4427 ret = eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on)); 4428 4429 rte_ethdev_trace_set_vlan_pvid(port_id, pvid, on, ret); 4430 4431 return ret; 4432 } 4433 4434 int 4435 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 4436 { 4437 struct rte_eth_dev *dev; 4438 int ret; 4439 4440 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4441 dev = &rte_eth_devices[port_id]; 4442 4443 if (fc_conf == NULL) { 4444 RTE_ETHDEV_LOG_LINE(ERR, 4445 "Cannot get ethdev port %u flow control config to NULL", 4446 port_id); 4447 return -EINVAL; 4448 } 4449 4450 if (*dev->dev_ops->flow_ctrl_get == NULL) 4451 return -ENOTSUP; 4452 memset(fc_conf, 0, sizeof(*fc_conf)); 4453 ret = eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf)); 4454 4455 rte_ethdev_trace_flow_ctrl_get(port_id, fc_conf, ret); 4456 4457 return ret; 4458 } 4459 4460 int 4461 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 4462 { 4463 struct rte_eth_dev *dev; 4464 int ret; 4465 4466 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4467 dev = &rte_eth_devices[port_id]; 4468 4469 if (fc_conf == NULL) { 4470 RTE_ETHDEV_LOG_LINE(ERR, 4471 "Cannot set ethdev port %u flow control from NULL config", 4472 port_id); 4473 return -EINVAL; 4474 } 4475 4476 if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) { 4477 RTE_ETHDEV_LOG_LINE(ERR, "Invalid send_xon, only 0/1 allowed"); 4478 return -EINVAL; 4479 } 4480 4481 if (*dev->dev_ops->flow_ctrl_set == NULL) 4482 return -ENOTSUP; 4483 ret = eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf)); 4484 4485 rte_ethdev_trace_flow_ctrl_set(port_id, fc_conf, ret); 4486 4487 return ret; 4488 } 4489 4490 int 4491 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, 4492 struct rte_eth_pfc_conf *pfc_conf) 4493 { 4494 struct rte_eth_dev *dev; 4495 int ret; 4496 4497 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4498 dev = &rte_eth_devices[port_id]; 4499 4500 if (pfc_conf == NULL) { 4501 RTE_ETHDEV_LOG_LINE(ERR, 4502 "Cannot set ethdev port %u priority flow control from NULL config", 4503 port_id); 4504 return -EINVAL; 4505 } 4506 4507 if (pfc_conf->priority > (RTE_ETH_DCB_NUM_USER_PRIORITIES - 1)) { 4508 RTE_ETHDEV_LOG_LINE(ERR, "Invalid priority, only 0-7 allowed"); 4509 return -EINVAL; 4510 } 4511 4512 /* High water, low water validation are device specific */ 4513 if (*dev->dev_ops->priority_flow_ctrl_set == NULL) 4514 return -ENOTSUP; 4515 ret = eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set) 4516 (dev, pfc_conf)); 4517 4518 rte_ethdev_trace_priority_flow_ctrl_set(port_id, pfc_conf, ret); 4519 4520 return ret; 4521 } 4522 4523 static int 4524 validate_rx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max, 4525 struct rte_eth_pfc_queue_conf *pfc_queue_conf) 4526 { 4527 if ((pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) || 4528 (pfc_queue_conf->mode == RTE_ETH_FC_FULL)) { 4529 if (pfc_queue_conf->rx_pause.tx_qid >= dev_info->nb_tx_queues) { 4530 RTE_ETHDEV_LOG_LINE(ERR, 4531 "PFC Tx queue not in range for Rx pause requested:%d configured:%d", 4532 pfc_queue_conf->rx_pause.tx_qid, 4533 dev_info->nb_tx_queues); 4534 return -EINVAL; 4535 } 4536 4537 if (pfc_queue_conf->rx_pause.tc >= tc_max) { 4538 RTE_ETHDEV_LOG_LINE(ERR, 4539 "PFC TC not in range for Rx pause requested:%d max:%d", 4540 pfc_queue_conf->rx_pause.tc, tc_max); 4541 return -EINVAL; 4542 } 4543 } 4544 4545 return 0; 4546 } 4547 4548 static int 4549 validate_tx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max, 4550 struct rte_eth_pfc_queue_conf *pfc_queue_conf) 4551 { 4552 if ((pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) || 4553 (pfc_queue_conf->mode == RTE_ETH_FC_FULL)) { 4554 if (pfc_queue_conf->tx_pause.rx_qid >= dev_info->nb_rx_queues) { 4555 RTE_ETHDEV_LOG_LINE(ERR, 4556 "PFC Rx queue not in range for Tx pause requested:%d configured:%d", 4557 pfc_queue_conf->tx_pause.rx_qid, 4558 dev_info->nb_rx_queues); 4559 return -EINVAL; 4560 } 4561 4562 if (pfc_queue_conf->tx_pause.tc >= tc_max) { 4563 RTE_ETHDEV_LOG_LINE(ERR, 4564 "PFC TC not in range for Tx pause requested:%d max:%d", 4565 pfc_queue_conf->tx_pause.tc, tc_max); 4566 return -EINVAL; 4567 } 4568 } 4569 4570 return 0; 4571 } 4572 4573 int 4574 rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id, 4575 struct rte_eth_pfc_queue_info *pfc_queue_info) 4576 { 4577 struct rte_eth_dev *dev; 4578 int ret; 4579 4580 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4581 dev = &rte_eth_devices[port_id]; 4582 4583 if (pfc_queue_info == NULL) { 4584 RTE_ETHDEV_LOG_LINE(ERR, "PFC info param is NULL for port (%u)", 4585 port_id); 4586 return -EINVAL; 4587 } 4588 4589 if (*dev->dev_ops->priority_flow_ctrl_queue_info_get == NULL) 4590 return -ENOTSUP; 4591 ret = eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_queue_info_get) 4592 (dev, pfc_queue_info)); 4593 4594 rte_ethdev_trace_priority_flow_ctrl_queue_info_get(port_id, 4595 pfc_queue_info, ret); 4596 4597 return ret; 4598 } 4599 4600 int 4601 rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id, 4602 struct rte_eth_pfc_queue_conf *pfc_queue_conf) 4603 { 4604 struct rte_eth_pfc_queue_info pfc_info; 4605 struct rte_eth_dev_info dev_info; 4606 struct rte_eth_dev *dev; 4607 int ret; 4608 4609 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4610 dev = &rte_eth_devices[port_id]; 4611 4612 if (pfc_queue_conf == NULL) { 4613 RTE_ETHDEV_LOG_LINE(ERR, "PFC parameters are NULL for port (%u)", 4614 port_id); 4615 return -EINVAL; 4616 } 4617 4618 ret = rte_eth_dev_info_get(port_id, &dev_info); 4619 if (ret != 0) 4620 return ret; 4621 4622 ret = rte_eth_dev_priority_flow_ctrl_queue_info_get(port_id, &pfc_info); 4623 if (ret != 0) 4624 return ret; 4625 4626 if (pfc_info.tc_max == 0) { 4627 RTE_ETHDEV_LOG_LINE(ERR, "Ethdev port %u does not support PFC TC values", 4628 port_id); 4629 return -ENOTSUP; 4630 } 4631 4632 /* Check requested mode supported or not */ 4633 if (pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE && 4634 pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) { 4635 RTE_ETHDEV_LOG_LINE(ERR, "PFC Tx pause unsupported for port (%d)", 4636 port_id); 4637 return -EINVAL; 4638 } 4639 4640 if (pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE && 4641 pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) { 4642 RTE_ETHDEV_LOG_LINE(ERR, "PFC Rx pause unsupported for port (%d)", 4643 port_id); 4644 return -EINVAL; 4645 } 4646 4647 /* Validate Rx pause parameters */ 4648 if (pfc_info.mode_capa == RTE_ETH_FC_FULL || 4649 pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE) { 4650 ret = validate_rx_pause_config(&dev_info, pfc_info.tc_max, 4651 pfc_queue_conf); 4652 if (ret != 0) 4653 return ret; 4654 } 4655 4656 /* Validate Tx pause parameters */ 4657 if (pfc_info.mode_capa == RTE_ETH_FC_FULL || 4658 pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE) { 4659 ret = validate_tx_pause_config(&dev_info, pfc_info.tc_max, 4660 pfc_queue_conf); 4661 if (ret != 0) 4662 return ret; 4663 } 4664 4665 if (*dev->dev_ops->priority_flow_ctrl_queue_config == NULL) 4666 return -ENOTSUP; 4667 ret = eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_queue_config) 4668 (dev, pfc_queue_conf)); 4669 4670 rte_ethdev_trace_priority_flow_ctrl_queue_configure(port_id, 4671 pfc_queue_conf, ret); 4672 4673 return ret; 4674 } 4675 4676 static int 4677 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf, 4678 uint16_t reta_size) 4679 { 4680 uint16_t i, num; 4681 4682 num = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) / RTE_ETH_RETA_GROUP_SIZE; 4683 for (i = 0; i < num; i++) { 4684 if (reta_conf[i].mask) 4685 return 0; 4686 } 4687 4688 return -EINVAL; 4689 } 4690 4691 static int 4692 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf, 4693 uint16_t reta_size, 4694 uint16_t max_rxq) 4695 { 4696 uint16_t i, idx, shift; 4697 4698 if (max_rxq == 0) { 4699 RTE_ETHDEV_LOG_LINE(ERR, "No receive queue is available"); 4700 return -EINVAL; 4701 } 4702 4703 for (i = 0; i < reta_size; i++) { 4704 idx = i / RTE_ETH_RETA_GROUP_SIZE; 4705 shift = i % RTE_ETH_RETA_GROUP_SIZE; 4706 if ((reta_conf[idx].mask & RTE_BIT64(shift)) && 4707 (reta_conf[idx].reta[shift] >= max_rxq)) { 4708 RTE_ETHDEV_LOG_LINE(ERR, 4709 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u", 4710 idx, shift, 4711 reta_conf[idx].reta[shift], max_rxq); 4712 return -EINVAL; 4713 } 4714 } 4715 4716 return 0; 4717 } 4718 4719 int 4720 rte_eth_dev_rss_reta_update(uint16_t port_id, 4721 struct rte_eth_rss_reta_entry64 *reta_conf, 4722 uint16_t reta_size) 4723 { 4724 enum rte_eth_rx_mq_mode mq_mode; 4725 struct rte_eth_dev *dev; 4726 int ret; 4727 4728 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4729 dev = &rte_eth_devices[port_id]; 4730 4731 if (reta_conf == NULL) { 4732 RTE_ETHDEV_LOG_LINE(ERR, 4733 "Cannot update ethdev port %u RSS RETA to NULL", 4734 port_id); 4735 return -EINVAL; 4736 } 4737 4738 if (reta_size == 0) { 4739 RTE_ETHDEV_LOG_LINE(ERR, 4740 "Cannot update ethdev port %u RSS RETA with zero size", 4741 port_id); 4742 return -EINVAL; 4743 } 4744 4745 /* Check mask bits */ 4746 ret = eth_check_reta_mask(reta_conf, reta_size); 4747 if (ret < 0) 4748 return ret; 4749 4750 /* Check entry value */ 4751 ret = eth_check_reta_entry(reta_conf, reta_size, 4752 dev->data->nb_rx_queues); 4753 if (ret < 0) 4754 return ret; 4755 4756 mq_mode = dev->data->dev_conf.rxmode.mq_mode; 4757 if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) { 4758 RTE_ETHDEV_LOG_LINE(ERR, "Multi-queue RSS mode isn't enabled."); 4759 return -ENOTSUP; 4760 } 4761 4762 if (*dev->dev_ops->reta_update == NULL) 4763 return -ENOTSUP; 4764 ret = eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf, 4765 reta_size)); 4766 4767 rte_ethdev_trace_rss_reta_update(port_id, reta_conf, reta_size, ret); 4768 4769 return ret; 4770 } 4771 4772 int 4773 rte_eth_dev_rss_reta_query(uint16_t port_id, 4774 struct rte_eth_rss_reta_entry64 *reta_conf, 4775 uint16_t reta_size) 4776 { 4777 struct rte_eth_dev *dev; 4778 int ret; 4779 4780 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4781 dev = &rte_eth_devices[port_id]; 4782 4783 if (reta_conf == NULL) { 4784 RTE_ETHDEV_LOG_LINE(ERR, 4785 "Cannot query ethdev port %u RSS RETA from NULL config", 4786 port_id); 4787 return -EINVAL; 4788 } 4789 4790 /* Check mask bits */ 4791 ret = eth_check_reta_mask(reta_conf, reta_size); 4792 if (ret < 0) 4793 return ret; 4794 4795 if (*dev->dev_ops->reta_query == NULL) 4796 return -ENOTSUP; 4797 ret = eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf, 4798 reta_size)); 4799 4800 rte_ethdev_trace_rss_reta_query(port_id, reta_conf, reta_size, ret); 4801 4802 return ret; 4803 } 4804 4805 int 4806 rte_eth_dev_rss_hash_update(uint16_t port_id, 4807 struct rte_eth_rss_conf *rss_conf) 4808 { 4809 struct rte_eth_dev *dev; 4810 struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, }; 4811 enum rte_eth_rx_mq_mode mq_mode; 4812 int ret; 4813 4814 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4815 dev = &rte_eth_devices[port_id]; 4816 4817 if (rss_conf == NULL) { 4818 RTE_ETHDEV_LOG_LINE(ERR, 4819 "Cannot update ethdev port %u RSS hash from NULL config", 4820 port_id); 4821 return -EINVAL; 4822 } 4823 4824 ret = rte_eth_dev_info_get(port_id, &dev_info); 4825 if (ret != 0) 4826 return ret; 4827 4828 rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf); 4829 if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) != 4830 dev_info.flow_type_rss_offloads) { 4831 RTE_ETHDEV_LOG_LINE(ERR, 4832 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64, 4833 port_id, rss_conf->rss_hf, 4834 dev_info.flow_type_rss_offloads); 4835 return -EINVAL; 4836 } 4837 4838 mq_mode = dev->data->dev_conf.rxmode.mq_mode; 4839 if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) { 4840 RTE_ETHDEV_LOG_LINE(ERR, "Multi-queue RSS mode isn't enabled."); 4841 return -ENOTSUP; 4842 } 4843 4844 if (rss_conf->rss_key != NULL && 4845 rss_conf->rss_key_len != dev_info.hash_key_size) { 4846 RTE_ETHDEV_LOG_LINE(ERR, 4847 "Ethdev port_id=%u invalid RSS key len: %u, valid value: %u", 4848 port_id, rss_conf->rss_key_len, dev_info.hash_key_size); 4849 return -EINVAL; 4850 } 4851 4852 if ((size_t)rss_conf->algorithm >= CHAR_BIT * sizeof(dev_info.rss_algo_capa) || 4853 (dev_info.rss_algo_capa & 4854 RTE_ETH_HASH_ALGO_TO_CAPA(rss_conf->algorithm)) == 0) { 4855 RTE_ETHDEV_LOG_LINE(ERR, 4856 "Ethdev port_id=%u configured RSS hash algorithm (%u)" 4857 "is not in the algorithm capability (0x%" PRIx32 ")", 4858 port_id, rss_conf->algorithm, dev_info.rss_algo_capa); 4859 return -EINVAL; 4860 } 4861 4862 if (*dev->dev_ops->rss_hash_update == NULL) 4863 return -ENOTSUP; 4864 ret = eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev, 4865 rss_conf)); 4866 4867 rte_ethdev_trace_rss_hash_update(port_id, rss_conf, ret); 4868 4869 return ret; 4870 } 4871 4872 int 4873 rte_eth_dev_rss_hash_conf_get(uint16_t port_id, 4874 struct rte_eth_rss_conf *rss_conf) 4875 { 4876 struct rte_eth_dev_info dev_info = { 0 }; 4877 struct rte_eth_dev *dev; 4878 int ret; 4879 4880 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4881 dev = &rte_eth_devices[port_id]; 4882 4883 if (rss_conf == NULL) { 4884 RTE_ETHDEV_LOG_LINE(ERR, 4885 "Cannot get ethdev port %u RSS hash config to NULL", 4886 port_id); 4887 return -EINVAL; 4888 } 4889 4890 ret = rte_eth_dev_info_get(port_id, &dev_info); 4891 if (ret != 0) 4892 return ret; 4893 4894 if (rss_conf->rss_key != NULL && 4895 rss_conf->rss_key_len < dev_info.hash_key_size) { 4896 RTE_ETHDEV_LOG_LINE(ERR, 4897 "Ethdev port_id=%u invalid RSS key len: %u, should not be less than: %u", 4898 port_id, rss_conf->rss_key_len, dev_info.hash_key_size); 4899 return -EINVAL; 4900 } 4901 4902 rss_conf->algorithm = RTE_ETH_HASH_FUNCTION_DEFAULT; 4903 4904 if (*dev->dev_ops->rss_hash_conf_get == NULL) 4905 return -ENOTSUP; 4906 ret = eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev, 4907 rss_conf)); 4908 4909 rte_ethdev_trace_rss_hash_conf_get(port_id, rss_conf, ret); 4910 4911 return ret; 4912 } 4913 4914 const char * 4915 rte_eth_dev_rss_algo_name(enum rte_eth_hash_function rss_algo) 4916 { 4917 const char *name = "Unknown function"; 4918 unsigned int i; 4919 4920 for (i = 0; i < RTE_DIM(rte_eth_dev_rss_algo_names); i++) { 4921 if (rss_algo == rte_eth_dev_rss_algo_names[i].algo) 4922 return rte_eth_dev_rss_algo_names[i].name; 4923 } 4924 4925 return name; 4926 } 4927 4928 int 4929 rte_eth_find_rss_algo(const char *name, uint32_t *algo) 4930 { 4931 unsigned int i; 4932 4933 for (i = 0; i < RTE_DIM(rte_eth_dev_rss_algo_names); i++) { 4934 if (strcmp(name, rte_eth_dev_rss_algo_names[i].name) == 0) { 4935 *algo = rte_eth_dev_rss_algo_names[i].algo; 4936 return 0; 4937 } 4938 } 4939 4940 return -EINVAL; 4941 } 4942 4943 int 4944 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, 4945 struct rte_eth_udp_tunnel *udp_tunnel) 4946 { 4947 struct rte_eth_dev *dev; 4948 int ret; 4949 4950 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4951 dev = &rte_eth_devices[port_id]; 4952 4953 if (udp_tunnel == NULL) { 4954 RTE_ETHDEV_LOG_LINE(ERR, 4955 "Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel", 4956 port_id); 4957 return -EINVAL; 4958 } 4959 4960 if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) { 4961 RTE_ETHDEV_LOG_LINE(ERR, "Invalid tunnel type"); 4962 return -EINVAL; 4963 } 4964 4965 if (*dev->dev_ops->udp_tunnel_port_add == NULL) 4966 return -ENOTSUP; 4967 ret = eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev, 4968 udp_tunnel)); 4969 4970 rte_ethdev_trace_udp_tunnel_port_add(port_id, udp_tunnel, ret); 4971 4972 return ret; 4973 } 4974 4975 int 4976 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, 4977 struct rte_eth_udp_tunnel *udp_tunnel) 4978 { 4979 struct rte_eth_dev *dev; 4980 int ret; 4981 4982 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4983 dev = &rte_eth_devices[port_id]; 4984 4985 if (udp_tunnel == NULL) { 4986 RTE_ETHDEV_LOG_LINE(ERR, 4987 "Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel", 4988 port_id); 4989 return -EINVAL; 4990 } 4991 4992 if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) { 4993 RTE_ETHDEV_LOG_LINE(ERR, "Invalid tunnel type"); 4994 return -EINVAL; 4995 } 4996 4997 if (*dev->dev_ops->udp_tunnel_port_del == NULL) 4998 return -ENOTSUP; 4999 ret = eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev, 5000 udp_tunnel)); 5001 5002 rte_ethdev_trace_udp_tunnel_port_delete(port_id, udp_tunnel, ret); 5003 5004 return ret; 5005 } 5006 5007 int 5008 rte_eth_led_on(uint16_t port_id) 5009 { 5010 struct rte_eth_dev *dev; 5011 int ret; 5012 5013 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5014 dev = &rte_eth_devices[port_id]; 5015 5016 if (*dev->dev_ops->dev_led_on == NULL) 5017 return -ENOTSUP; 5018 ret = eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev)); 5019 5020 rte_eth_trace_led_on(port_id, ret); 5021 5022 return ret; 5023 } 5024 5025 int 5026 rte_eth_led_off(uint16_t port_id) 5027 { 5028 struct rte_eth_dev *dev; 5029 int ret; 5030 5031 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5032 dev = &rte_eth_devices[port_id]; 5033 5034 if (*dev->dev_ops->dev_led_off == NULL) 5035 return -ENOTSUP; 5036 ret = eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev)); 5037 5038 rte_eth_trace_led_off(port_id, ret); 5039 5040 return ret; 5041 } 5042 5043 int 5044 rte_eth_fec_get_capability(uint16_t port_id, 5045 struct rte_eth_fec_capa *speed_fec_capa, 5046 unsigned int num) 5047 { 5048 struct rte_eth_dev *dev; 5049 int ret; 5050 5051 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5052 dev = &rte_eth_devices[port_id]; 5053 5054 if (speed_fec_capa == NULL && num > 0) { 5055 RTE_ETHDEV_LOG_LINE(ERR, 5056 "Cannot get ethdev port %u FEC capability to NULL when array size is non zero", 5057 port_id); 5058 return -EINVAL; 5059 } 5060 5061 if (*dev->dev_ops->fec_get_capability == NULL) 5062 return -ENOTSUP; 5063 ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num); 5064 5065 rte_eth_trace_fec_get_capability(port_id, speed_fec_capa, num, ret); 5066 5067 return ret; 5068 } 5069 5070 int 5071 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa) 5072 { 5073 struct rte_eth_dev *dev; 5074 int ret; 5075 5076 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5077 dev = &rte_eth_devices[port_id]; 5078 5079 if (fec_capa == NULL) { 5080 RTE_ETHDEV_LOG_LINE(ERR, 5081 "Cannot get ethdev port %u current FEC mode to NULL", 5082 port_id); 5083 return -EINVAL; 5084 } 5085 5086 if (*dev->dev_ops->fec_get == NULL) 5087 return -ENOTSUP; 5088 ret = eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa)); 5089 5090 rte_eth_trace_fec_get(port_id, fec_capa, ret); 5091 5092 return ret; 5093 } 5094 5095 int 5096 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa) 5097 { 5098 struct rte_eth_dev *dev; 5099 int ret; 5100 5101 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5102 dev = &rte_eth_devices[port_id]; 5103 5104 if (fec_capa == 0) { 5105 RTE_ETHDEV_LOG_LINE(ERR, "At least one FEC mode should be specified"); 5106 return -EINVAL; 5107 } 5108 5109 if (*dev->dev_ops->fec_set == NULL) 5110 return -ENOTSUP; 5111 ret = eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa)); 5112 5113 rte_eth_trace_fec_set(port_id, fec_capa, ret); 5114 5115 return ret; 5116 } 5117 5118 /* 5119 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 5120 * an empty spot. 5121 */ 5122 static int 5123 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr) 5124 { 5125 struct rte_eth_dev_info dev_info; 5126 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5127 unsigned i; 5128 int ret; 5129 5130 ret = rte_eth_dev_info_get(port_id, &dev_info); 5131 if (ret != 0) 5132 return -1; 5133 5134 for (i = 0; i < dev_info.max_mac_addrs; i++) 5135 if (memcmp(addr, &dev->data->mac_addrs[i], 5136 RTE_ETHER_ADDR_LEN) == 0) 5137 return i; 5138 5139 return -1; 5140 } 5141 5142 static const struct rte_ether_addr null_mac_addr; 5143 5144 int 5145 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr, 5146 uint32_t pool) 5147 { 5148 struct rte_eth_dev *dev; 5149 int index; 5150 uint64_t pool_mask; 5151 int ret; 5152 5153 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5154 dev = &rte_eth_devices[port_id]; 5155 5156 if (addr == NULL) { 5157 RTE_ETHDEV_LOG_LINE(ERR, 5158 "Cannot add ethdev port %u MAC address from NULL address", 5159 port_id); 5160 return -EINVAL; 5161 } 5162 5163 if (*dev->dev_ops->mac_addr_add == NULL) 5164 return -ENOTSUP; 5165 5166 if (rte_is_zero_ether_addr(addr)) { 5167 RTE_ETHDEV_LOG_LINE(ERR, "Port %u: Cannot add NULL MAC address", 5168 port_id); 5169 return -EINVAL; 5170 } 5171 if (pool >= RTE_ETH_64_POOLS) { 5172 RTE_ETHDEV_LOG_LINE(ERR, "Pool ID must be 0-%d", RTE_ETH_64_POOLS - 1); 5173 return -EINVAL; 5174 } 5175 5176 index = eth_dev_get_mac_addr_index(port_id, addr); 5177 if (index < 0) { 5178 index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr); 5179 if (index < 0) { 5180 RTE_ETHDEV_LOG_LINE(ERR, "Port %u: MAC address array full", 5181 port_id); 5182 return -ENOSPC; 5183 } 5184 } else { 5185 pool_mask = dev->data->mac_pool_sel[index]; 5186 5187 /* Check if both MAC address and pool is already there, and do nothing */ 5188 if (pool_mask & RTE_BIT64(pool)) 5189 return 0; 5190 } 5191 5192 /* Update NIC */ 5193 ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool); 5194 5195 if (ret == 0) { 5196 /* Update address in NIC data structure */ 5197 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]); 5198 5199 /* Update pool bitmap in NIC data structure */ 5200 dev->data->mac_pool_sel[index] |= RTE_BIT64(pool); 5201 } 5202 5203 ret = eth_err(port_id, ret); 5204 5205 rte_ethdev_trace_mac_addr_add(port_id, addr, pool, ret); 5206 5207 return ret; 5208 } 5209 5210 int 5211 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr) 5212 { 5213 struct rte_eth_dev *dev; 5214 int index; 5215 5216 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5217 dev = &rte_eth_devices[port_id]; 5218 5219 if (addr == NULL) { 5220 RTE_ETHDEV_LOG_LINE(ERR, 5221 "Cannot remove ethdev port %u MAC address from NULL address", 5222 port_id); 5223 return -EINVAL; 5224 } 5225 5226 if (*dev->dev_ops->mac_addr_remove == NULL) 5227 return -ENOTSUP; 5228 5229 index = eth_dev_get_mac_addr_index(port_id, addr); 5230 if (index == 0) { 5231 RTE_ETHDEV_LOG_LINE(ERR, 5232 "Port %u: Cannot remove default MAC address", 5233 port_id); 5234 return -EADDRINUSE; 5235 } else if (index < 0) 5236 return 0; /* Do nothing if address wasn't found */ 5237 5238 /* Update NIC */ 5239 (*dev->dev_ops->mac_addr_remove)(dev, index); 5240 5241 /* Update address in NIC data structure */ 5242 rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]); 5243 5244 /* reset pool bitmap */ 5245 dev->data->mac_pool_sel[index] = 0; 5246 5247 rte_ethdev_trace_mac_addr_remove(port_id, addr); 5248 5249 return 0; 5250 } 5251 5252 int 5253 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr) 5254 { 5255 struct rte_eth_dev *dev; 5256 int index; 5257 int ret; 5258 5259 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5260 dev = &rte_eth_devices[port_id]; 5261 5262 if (addr == NULL) { 5263 RTE_ETHDEV_LOG_LINE(ERR, 5264 "Cannot set ethdev port %u default MAC address from NULL address", 5265 port_id); 5266 return -EINVAL; 5267 } 5268 5269 if (!rte_is_valid_assigned_ether_addr(addr)) 5270 return -EINVAL; 5271 5272 if (*dev->dev_ops->mac_addr_set == NULL) 5273 return -ENOTSUP; 5274 5275 /* Keep address unique in dev->data->mac_addrs[]. */ 5276 index = eth_dev_get_mac_addr_index(port_id, addr); 5277 if (index > 0) { 5278 RTE_ETHDEV_LOG_LINE(ERR, 5279 "New default address for port %u was already in the address list. Please remove it first.", 5280 port_id); 5281 return -EEXIST; 5282 } 5283 5284 ret = (*dev->dev_ops->mac_addr_set)(dev, addr); 5285 if (ret < 0) 5286 return ret; 5287 5288 /* Update default address in NIC data structure */ 5289 rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]); 5290 5291 rte_ethdev_trace_default_mac_addr_set(port_id, addr); 5292 5293 return 0; 5294 } 5295 5296 5297 /* 5298 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 5299 * an empty spot. 5300 */ 5301 static int 5302 eth_dev_get_hash_mac_addr_index(uint16_t port_id, 5303 const struct rte_ether_addr *addr) 5304 { 5305 struct rte_eth_dev_info dev_info; 5306 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5307 unsigned i; 5308 int ret; 5309 5310 ret = rte_eth_dev_info_get(port_id, &dev_info); 5311 if (ret != 0) 5312 return -1; 5313 5314 if (!dev->data->hash_mac_addrs) 5315 return -1; 5316 5317 for (i = 0; i < dev_info.max_hash_mac_addrs; i++) 5318 if (memcmp(addr, &dev->data->hash_mac_addrs[i], 5319 RTE_ETHER_ADDR_LEN) == 0) 5320 return i; 5321 5322 return -1; 5323 } 5324 5325 int 5326 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, 5327 uint8_t on) 5328 { 5329 int index; 5330 int ret; 5331 struct rte_eth_dev *dev; 5332 5333 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5334 dev = &rte_eth_devices[port_id]; 5335 5336 if (addr == NULL) { 5337 RTE_ETHDEV_LOG_LINE(ERR, 5338 "Cannot set ethdev port %u unicast hash table from NULL address", 5339 port_id); 5340 return -EINVAL; 5341 } 5342 5343 if (rte_is_zero_ether_addr(addr)) { 5344 RTE_ETHDEV_LOG_LINE(ERR, "Port %u: Cannot add NULL MAC address", 5345 port_id); 5346 return -EINVAL; 5347 } 5348 5349 index = eth_dev_get_hash_mac_addr_index(port_id, addr); 5350 /* Check if it's already there, and do nothing */ 5351 if ((index >= 0) && on) 5352 return 0; 5353 5354 if (index < 0) { 5355 if (!on) { 5356 RTE_ETHDEV_LOG_LINE(ERR, 5357 "Port %u: the MAC address was not set in UTA", 5358 port_id); 5359 return -EINVAL; 5360 } 5361 5362 index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr); 5363 if (index < 0) { 5364 RTE_ETHDEV_LOG_LINE(ERR, "Port %u: MAC address array full", 5365 port_id); 5366 return -ENOSPC; 5367 } 5368 } 5369 5370 if (*dev->dev_ops->uc_hash_table_set == NULL) 5371 return -ENOTSUP; 5372 ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on); 5373 if (ret == 0) { 5374 /* Update address in NIC data structure */ 5375 if (on) 5376 rte_ether_addr_copy(addr, 5377 &dev->data->hash_mac_addrs[index]); 5378 else 5379 rte_ether_addr_copy(&null_mac_addr, 5380 &dev->data->hash_mac_addrs[index]); 5381 } 5382 5383 ret = eth_err(port_id, ret); 5384 5385 rte_ethdev_trace_uc_hash_table_set(port_id, on, ret); 5386 5387 return ret; 5388 } 5389 5390 int 5391 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on) 5392 { 5393 struct rte_eth_dev *dev; 5394 int ret; 5395 5396 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5397 dev = &rte_eth_devices[port_id]; 5398 5399 if (*dev->dev_ops->uc_all_hash_table_set == NULL) 5400 return -ENOTSUP; 5401 ret = eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev, on)); 5402 5403 rte_ethdev_trace_uc_all_hash_table_set(port_id, on, ret); 5404 5405 return ret; 5406 } 5407 5408 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, 5409 uint32_t tx_rate) 5410 { 5411 struct rte_eth_dev *dev; 5412 struct rte_eth_dev_info dev_info; 5413 struct rte_eth_link link; 5414 int ret; 5415 5416 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5417 dev = &rte_eth_devices[port_id]; 5418 5419 ret = rte_eth_dev_info_get(port_id, &dev_info); 5420 if (ret != 0) 5421 return ret; 5422 5423 link = dev->data->dev_link; 5424 5425 if (queue_idx > dev_info.max_tx_queues) { 5426 RTE_ETHDEV_LOG_LINE(ERR, 5427 "Set queue rate limit:port %u: invalid queue ID=%u", 5428 port_id, queue_idx); 5429 return -EINVAL; 5430 } 5431 5432 if (tx_rate > link.link_speed) { 5433 RTE_ETHDEV_LOG_LINE(ERR, 5434 "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d", 5435 tx_rate, link.link_speed); 5436 return -EINVAL; 5437 } 5438 5439 if (*dev->dev_ops->set_queue_rate_limit == NULL) 5440 return -ENOTSUP; 5441 ret = eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev, 5442 queue_idx, tx_rate)); 5443 5444 rte_eth_trace_set_queue_rate_limit(port_id, queue_idx, tx_rate, ret); 5445 5446 return ret; 5447 } 5448 5449 int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id, 5450 uint8_t avail_thresh) 5451 { 5452 struct rte_eth_dev *dev; 5453 int ret; 5454 5455 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5456 dev = &rte_eth_devices[port_id]; 5457 5458 if (queue_id > dev->data->nb_rx_queues) { 5459 RTE_ETHDEV_LOG_LINE(ERR, 5460 "Set queue avail thresh: port %u: invalid queue ID=%u.", 5461 port_id, queue_id); 5462 return -EINVAL; 5463 } 5464 5465 if (avail_thresh > 99) { 5466 RTE_ETHDEV_LOG_LINE(ERR, 5467 "Set queue avail thresh: port %u: threshold should be <= 99.", 5468 port_id); 5469 return -EINVAL; 5470 } 5471 if (*dev->dev_ops->rx_queue_avail_thresh_set == NULL) 5472 return -ENOTSUP; 5473 ret = eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_set)(dev, 5474 queue_id, avail_thresh)); 5475 5476 rte_eth_trace_rx_avail_thresh_set(port_id, queue_id, avail_thresh, ret); 5477 5478 return ret; 5479 } 5480 5481 int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id, 5482 uint8_t *avail_thresh) 5483 { 5484 struct rte_eth_dev *dev; 5485 int ret; 5486 5487 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5488 dev = &rte_eth_devices[port_id]; 5489 5490 if (queue_id == NULL) 5491 return -EINVAL; 5492 if (*queue_id >= dev->data->nb_rx_queues) 5493 *queue_id = 0; 5494 5495 if (*dev->dev_ops->rx_queue_avail_thresh_query == NULL) 5496 return -ENOTSUP; 5497 ret = eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_query)(dev, 5498 queue_id, avail_thresh)); 5499 5500 rte_eth_trace_rx_avail_thresh_query(port_id, *queue_id, ret); 5501 5502 return ret; 5503 } 5504 5505 RTE_INIT(eth_dev_init_fp_ops) 5506 { 5507 uint32_t i; 5508 5509 for (i = 0; i != RTE_DIM(rte_eth_fp_ops); i++) 5510 eth_dev_fp_ops_reset(rte_eth_fp_ops + i); 5511 } 5512 5513 RTE_INIT(eth_dev_init_cb_lists) 5514 { 5515 uint16_t i; 5516 5517 for (i = 0; i < RTE_MAX_ETHPORTS; i++) 5518 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs); 5519 } 5520 5521 int 5522 rte_eth_dev_callback_register(uint16_t port_id, 5523 enum rte_eth_event_type event, 5524 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 5525 { 5526 struct rte_eth_dev *dev; 5527 struct rte_eth_dev_callback *user_cb; 5528 uint16_t next_port; 5529 uint16_t last_port; 5530 5531 if (cb_fn == NULL) { 5532 RTE_ETHDEV_LOG_LINE(ERR, 5533 "Cannot register ethdev port %u callback from NULL", 5534 port_id); 5535 return -EINVAL; 5536 } 5537 5538 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 5539 RTE_ETHDEV_LOG_LINE(ERR, "Invalid port_id=%d", port_id); 5540 return -EINVAL; 5541 } 5542 5543 if (port_id == RTE_ETH_ALL) { 5544 next_port = 0; 5545 last_port = RTE_MAX_ETHPORTS - 1; 5546 } else { 5547 next_port = last_port = port_id; 5548 } 5549 5550 rte_spinlock_lock(ð_dev_cb_lock); 5551 5552 do { 5553 dev = &rte_eth_devices[next_port]; 5554 5555 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) { 5556 if (user_cb->cb_fn == cb_fn && 5557 user_cb->cb_arg == cb_arg && 5558 user_cb->event == event) { 5559 break; 5560 } 5561 } 5562 5563 /* create a new callback. */ 5564 if (user_cb == NULL) { 5565 user_cb = rte_zmalloc("INTR_USER_CALLBACK", 5566 sizeof(struct rte_eth_dev_callback), 0); 5567 if (user_cb != NULL) { 5568 user_cb->cb_fn = cb_fn; 5569 user_cb->cb_arg = cb_arg; 5570 user_cb->event = event; 5571 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), 5572 user_cb, next); 5573 } else { 5574 rte_spinlock_unlock(ð_dev_cb_lock); 5575 rte_eth_dev_callback_unregister(port_id, event, 5576 cb_fn, cb_arg); 5577 return -ENOMEM; 5578 } 5579 5580 } 5581 } while (++next_port <= last_port); 5582 5583 rte_spinlock_unlock(ð_dev_cb_lock); 5584 5585 rte_ethdev_trace_callback_register(port_id, event, cb_fn, cb_arg); 5586 5587 return 0; 5588 } 5589 5590 int 5591 rte_eth_dev_callback_unregister(uint16_t port_id, 5592 enum rte_eth_event_type event, 5593 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 5594 { 5595 int ret; 5596 struct rte_eth_dev *dev; 5597 struct rte_eth_dev_callback *cb, *next; 5598 uint16_t next_port; 5599 uint16_t last_port; 5600 5601 if (cb_fn == NULL) { 5602 RTE_ETHDEV_LOG_LINE(ERR, 5603 "Cannot unregister ethdev port %u callback from NULL", 5604 port_id); 5605 return -EINVAL; 5606 } 5607 5608 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 5609 RTE_ETHDEV_LOG_LINE(ERR, "Invalid port_id=%d", port_id); 5610 return -EINVAL; 5611 } 5612 5613 if (port_id == RTE_ETH_ALL) { 5614 next_port = 0; 5615 last_port = RTE_MAX_ETHPORTS - 1; 5616 } else { 5617 next_port = last_port = port_id; 5618 } 5619 5620 rte_spinlock_lock(ð_dev_cb_lock); 5621 5622 do { 5623 dev = &rte_eth_devices[next_port]; 5624 ret = 0; 5625 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; 5626 cb = next) { 5627 5628 next = TAILQ_NEXT(cb, next); 5629 5630 if (cb->cb_fn != cb_fn || cb->event != event || 5631 (cb_arg != (void *)-1 && cb->cb_arg != cb_arg)) 5632 continue; 5633 5634 /* 5635 * if this callback is not executing right now, 5636 * then remove it. 5637 */ 5638 if (cb->active == 0) { 5639 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next); 5640 rte_free(cb); 5641 } else { 5642 ret = -EAGAIN; 5643 } 5644 } 5645 } while (++next_port <= last_port); 5646 5647 rte_spinlock_unlock(ð_dev_cb_lock); 5648 5649 rte_ethdev_trace_callback_unregister(port_id, event, cb_fn, cb_arg, 5650 ret); 5651 5652 return ret; 5653 } 5654 5655 int 5656 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data) 5657 { 5658 uint32_t vec; 5659 struct rte_eth_dev *dev; 5660 struct rte_intr_handle *intr_handle; 5661 uint16_t qid; 5662 int rc; 5663 5664 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5665 dev = &rte_eth_devices[port_id]; 5666 5667 if (!dev->intr_handle) { 5668 RTE_ETHDEV_LOG_LINE(ERR, "Rx Intr handle unset"); 5669 return -ENOTSUP; 5670 } 5671 5672 intr_handle = dev->intr_handle; 5673 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 5674 RTE_ETHDEV_LOG_LINE(ERR, "Rx Intr vector unset"); 5675 return -EPERM; 5676 } 5677 5678 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) { 5679 vec = rte_intr_vec_list_index_get(intr_handle, qid); 5680 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 5681 5682 rte_ethdev_trace_rx_intr_ctl(port_id, qid, epfd, op, data, rc); 5683 5684 if (rc && rc != -EEXIST) { 5685 RTE_ETHDEV_LOG_LINE(ERR, 5686 "p %u q %u Rx ctl error op %d epfd %d vec %u", 5687 port_id, qid, op, epfd, vec); 5688 } 5689 } 5690 5691 return 0; 5692 } 5693 5694 int 5695 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id) 5696 { 5697 struct rte_intr_handle *intr_handle; 5698 struct rte_eth_dev *dev; 5699 unsigned int efd_idx; 5700 uint32_t vec; 5701 int fd; 5702 5703 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 5704 dev = &rte_eth_devices[port_id]; 5705 5706 if (queue_id >= dev->data->nb_rx_queues) { 5707 RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u", queue_id); 5708 return -1; 5709 } 5710 5711 if (!dev->intr_handle) { 5712 RTE_ETHDEV_LOG_LINE(ERR, "Rx Intr handle unset"); 5713 return -1; 5714 } 5715 5716 intr_handle = dev->intr_handle; 5717 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 5718 RTE_ETHDEV_LOG_LINE(ERR, "Rx Intr vector unset"); 5719 return -1; 5720 } 5721 5722 vec = rte_intr_vec_list_index_get(intr_handle, queue_id); 5723 efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ? 5724 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec; 5725 fd = rte_intr_efds_index_get(intr_handle, efd_idx); 5726 5727 rte_ethdev_trace_rx_intr_ctl_q_get_fd(port_id, queue_id, fd); 5728 5729 return fd; 5730 } 5731 5732 int 5733 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, 5734 int epfd, int op, void *data) 5735 { 5736 uint32_t vec; 5737 struct rte_eth_dev *dev; 5738 struct rte_intr_handle *intr_handle; 5739 int rc; 5740 5741 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5742 dev = &rte_eth_devices[port_id]; 5743 5744 if (queue_id >= dev->data->nb_rx_queues) { 5745 RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u", queue_id); 5746 return -EINVAL; 5747 } 5748 5749 if (!dev->intr_handle) { 5750 RTE_ETHDEV_LOG_LINE(ERR, "Rx Intr handle unset"); 5751 return -ENOTSUP; 5752 } 5753 5754 intr_handle = dev->intr_handle; 5755 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 5756 RTE_ETHDEV_LOG_LINE(ERR, "Rx Intr vector unset"); 5757 return -EPERM; 5758 } 5759 5760 vec = rte_intr_vec_list_index_get(intr_handle, queue_id); 5761 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 5762 5763 rte_ethdev_trace_rx_intr_ctl_q(port_id, queue_id, epfd, op, data, rc); 5764 5765 if (rc && rc != -EEXIST) { 5766 RTE_ETHDEV_LOG_LINE(ERR, 5767 "p %u q %u Rx ctl error op %d epfd %d vec %u", 5768 port_id, queue_id, op, epfd, vec); 5769 return rc; 5770 } 5771 5772 return 0; 5773 } 5774 5775 int 5776 rte_eth_dev_rx_intr_enable(uint16_t port_id, 5777 uint16_t queue_id) 5778 { 5779 struct rte_eth_dev *dev; 5780 int ret; 5781 5782 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5783 dev = &rte_eth_devices[port_id]; 5784 5785 ret = eth_dev_validate_rx_queue(dev, queue_id); 5786 if (ret != 0) 5787 return ret; 5788 5789 if (*dev->dev_ops->rx_queue_intr_enable == NULL) 5790 return -ENOTSUP; 5791 ret = eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id)); 5792 5793 rte_ethdev_trace_rx_intr_enable(port_id, queue_id, ret); 5794 5795 return ret; 5796 } 5797 5798 int 5799 rte_eth_dev_rx_intr_disable(uint16_t port_id, 5800 uint16_t queue_id) 5801 { 5802 struct rte_eth_dev *dev; 5803 int ret; 5804 5805 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5806 dev = &rte_eth_devices[port_id]; 5807 5808 ret = eth_dev_validate_rx_queue(dev, queue_id); 5809 if (ret != 0) 5810 return ret; 5811 5812 if (*dev->dev_ops->rx_queue_intr_disable == NULL) 5813 return -ENOTSUP; 5814 ret = eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id)); 5815 5816 rte_ethdev_trace_rx_intr_disable(port_id, queue_id, ret); 5817 5818 return ret; 5819 } 5820 5821 5822 const struct rte_eth_rxtx_callback * 5823 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, 5824 rte_rx_callback_fn fn, void *user_param) 5825 { 5826 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5827 rte_errno = ENOTSUP; 5828 return NULL; 5829 #endif 5830 struct rte_eth_dev *dev; 5831 5832 /* check input parameters */ 5833 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5834 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 5835 rte_errno = EINVAL; 5836 return NULL; 5837 } 5838 dev = &rte_eth_devices[port_id]; 5839 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 5840 rte_errno = EINVAL; 5841 return NULL; 5842 } 5843 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5844 5845 if (cb == NULL) { 5846 rte_errno = ENOMEM; 5847 return NULL; 5848 } 5849 5850 cb->fn.rx = fn; 5851 cb->param = user_param; 5852 5853 rte_spinlock_lock(ð_dev_rx_cb_lock); 5854 /* Add the callbacks in fifo order. */ 5855 struct rte_eth_rxtx_callback *tail = 5856 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 5857 5858 if (!tail) { 5859 /* Stores to cb->fn and cb->param should complete before 5860 * cb is visible to data plane. 5861 */ 5862 rte_atomic_store_explicit( 5863 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 5864 cb, rte_memory_order_release); 5865 5866 } else { 5867 while (tail->next) 5868 tail = tail->next; 5869 /* Stores to cb->fn and cb->param should complete before 5870 * cb is visible to data plane. 5871 */ 5872 rte_atomic_store_explicit(&tail->next, cb, rte_memory_order_release); 5873 } 5874 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5875 5876 rte_eth_trace_add_rx_callback(port_id, queue_id, fn, user_param, cb); 5877 5878 return cb; 5879 } 5880 5881 const struct rte_eth_rxtx_callback * 5882 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, 5883 rte_rx_callback_fn fn, void *user_param) 5884 { 5885 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5886 rte_errno = ENOTSUP; 5887 return NULL; 5888 #endif 5889 /* check input parameters */ 5890 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5891 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 5892 rte_errno = EINVAL; 5893 return NULL; 5894 } 5895 5896 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5897 5898 if (cb == NULL) { 5899 rte_errno = ENOMEM; 5900 return NULL; 5901 } 5902 5903 cb->fn.rx = fn; 5904 cb->param = user_param; 5905 5906 rte_spinlock_lock(ð_dev_rx_cb_lock); 5907 /* Add the callbacks at first position */ 5908 cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 5909 /* Stores to cb->fn, cb->param and cb->next should complete before 5910 * cb is visible to data plane threads. 5911 */ 5912 rte_atomic_store_explicit( 5913 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 5914 cb, rte_memory_order_release); 5915 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5916 5917 rte_eth_trace_add_first_rx_callback(port_id, queue_id, fn, user_param, 5918 cb); 5919 5920 return cb; 5921 } 5922 5923 const struct rte_eth_rxtx_callback * 5924 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, 5925 rte_tx_callback_fn fn, void *user_param) 5926 { 5927 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5928 rte_errno = ENOTSUP; 5929 return NULL; 5930 #endif 5931 struct rte_eth_dev *dev; 5932 5933 /* check input parameters */ 5934 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5935 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) { 5936 rte_errno = EINVAL; 5937 return NULL; 5938 } 5939 5940 dev = &rte_eth_devices[port_id]; 5941 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 5942 rte_errno = EINVAL; 5943 return NULL; 5944 } 5945 5946 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5947 5948 if (cb == NULL) { 5949 rte_errno = ENOMEM; 5950 return NULL; 5951 } 5952 5953 cb->fn.tx = fn; 5954 cb->param = user_param; 5955 5956 rte_spinlock_lock(ð_dev_tx_cb_lock); 5957 /* Add the callbacks in fifo order. */ 5958 struct rte_eth_rxtx_callback *tail = 5959 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id]; 5960 5961 if (!tail) { 5962 /* Stores to cb->fn and cb->param should complete before 5963 * cb is visible to data plane. 5964 */ 5965 rte_atomic_store_explicit( 5966 &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id], 5967 cb, rte_memory_order_release); 5968 5969 } else { 5970 while (tail->next) 5971 tail = tail->next; 5972 /* Stores to cb->fn and cb->param should complete before 5973 * cb is visible to data plane. 5974 */ 5975 rte_atomic_store_explicit(&tail->next, cb, rte_memory_order_release); 5976 } 5977 rte_spinlock_unlock(ð_dev_tx_cb_lock); 5978 5979 rte_eth_trace_add_tx_callback(port_id, queue_id, fn, user_param, cb); 5980 5981 return cb; 5982 } 5983 5984 int 5985 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, 5986 const struct rte_eth_rxtx_callback *user_cb) 5987 { 5988 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5989 return -ENOTSUP; 5990 #endif 5991 /* Check input parameters. */ 5992 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5993 if (user_cb == NULL || 5994 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) 5995 return -EINVAL; 5996 5997 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5998 struct rte_eth_rxtx_callback *cb; 5999 RTE_ATOMIC(struct rte_eth_rxtx_callback *) *prev_cb; 6000 int ret = -EINVAL; 6001 6002 rte_spinlock_lock(ð_dev_rx_cb_lock); 6003 prev_cb = &dev->post_rx_burst_cbs[queue_id]; 6004 for (; *prev_cb != NULL; prev_cb = &cb->next) { 6005 cb = *prev_cb; 6006 if (cb == user_cb) { 6007 /* Remove the user cb from the callback list. */ 6008 rte_atomic_store_explicit(prev_cb, cb->next, rte_memory_order_relaxed); 6009 ret = 0; 6010 break; 6011 } 6012 } 6013 rte_spinlock_unlock(ð_dev_rx_cb_lock); 6014 6015 rte_eth_trace_remove_rx_callback(port_id, queue_id, user_cb, ret); 6016 6017 return ret; 6018 } 6019 6020 int 6021 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, 6022 const struct rte_eth_rxtx_callback *user_cb) 6023 { 6024 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 6025 return -ENOTSUP; 6026 #endif 6027 /* Check input parameters. */ 6028 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6029 if (user_cb == NULL || 6030 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) 6031 return -EINVAL; 6032 6033 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 6034 int ret = -EINVAL; 6035 struct rte_eth_rxtx_callback *cb; 6036 RTE_ATOMIC(struct rte_eth_rxtx_callback *) *prev_cb; 6037 6038 rte_spinlock_lock(ð_dev_tx_cb_lock); 6039 prev_cb = &dev->pre_tx_burst_cbs[queue_id]; 6040 for (; *prev_cb != NULL; prev_cb = &cb->next) { 6041 cb = *prev_cb; 6042 if (cb == user_cb) { 6043 /* Remove the user cb from the callback list. */ 6044 rte_atomic_store_explicit(prev_cb, cb->next, rte_memory_order_relaxed); 6045 ret = 0; 6046 break; 6047 } 6048 } 6049 rte_spinlock_unlock(ð_dev_tx_cb_lock); 6050 6051 rte_eth_trace_remove_tx_callback(port_id, queue_id, user_cb, ret); 6052 6053 return ret; 6054 } 6055 6056 int 6057 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, 6058 struct rte_eth_rxq_info *qinfo) 6059 { 6060 struct rte_eth_dev *dev; 6061 6062 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6063 dev = &rte_eth_devices[port_id]; 6064 6065 if (queue_id >= dev->data->nb_rx_queues) { 6066 RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u", queue_id); 6067 return -EINVAL; 6068 } 6069 6070 if (qinfo == NULL) { 6071 RTE_ETHDEV_LOG_LINE(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL", 6072 port_id, queue_id); 6073 return -EINVAL; 6074 } 6075 6076 if (dev->data->rx_queues == NULL || 6077 dev->data->rx_queues[queue_id] == NULL) { 6078 RTE_ETHDEV_LOG_LINE(ERR, 6079 "Rx queue %"PRIu16" of device with port_id=%" 6080 PRIu16" has not been setup", 6081 queue_id, port_id); 6082 return -EINVAL; 6083 } 6084 6085 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 6086 RTE_ETHDEV_LOG_LINE(INFO, 6087 "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16, 6088 queue_id, port_id); 6089 return -EINVAL; 6090 } 6091 6092 if (*dev->dev_ops->rxq_info_get == NULL) 6093 return -ENOTSUP; 6094 6095 memset(qinfo, 0, sizeof(*qinfo)); 6096 dev->dev_ops->rxq_info_get(dev, queue_id, qinfo); 6097 qinfo->queue_state = dev->data->rx_queue_state[queue_id]; 6098 6099 rte_eth_trace_rx_queue_info_get(port_id, queue_id, qinfo); 6100 6101 return 0; 6102 } 6103 6104 int 6105 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, 6106 struct rte_eth_txq_info *qinfo) 6107 { 6108 struct rte_eth_dev *dev; 6109 6110 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6111 dev = &rte_eth_devices[port_id]; 6112 6113 if (queue_id >= dev->data->nb_tx_queues) { 6114 RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u", queue_id); 6115 return -EINVAL; 6116 } 6117 6118 if (qinfo == NULL) { 6119 RTE_ETHDEV_LOG_LINE(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL", 6120 port_id, queue_id); 6121 return -EINVAL; 6122 } 6123 6124 if (dev->data->tx_queues == NULL || 6125 dev->data->tx_queues[queue_id] == NULL) { 6126 RTE_ETHDEV_LOG_LINE(ERR, 6127 "Tx queue %"PRIu16" of device with port_id=%" 6128 PRIu16" has not been setup", 6129 queue_id, port_id); 6130 return -EINVAL; 6131 } 6132 6133 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 6134 RTE_ETHDEV_LOG_LINE(INFO, 6135 "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16, 6136 queue_id, port_id); 6137 return -EINVAL; 6138 } 6139 6140 if (*dev->dev_ops->txq_info_get == NULL) 6141 return -ENOTSUP; 6142 6143 memset(qinfo, 0, sizeof(*qinfo)); 6144 dev->dev_ops->txq_info_get(dev, queue_id, qinfo); 6145 qinfo->queue_state = dev->data->tx_queue_state[queue_id]; 6146 6147 rte_eth_trace_tx_queue_info_get(port_id, queue_id, qinfo); 6148 6149 return 0; 6150 } 6151 6152 int 6153 rte_eth_recycle_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, 6154 struct rte_eth_recycle_rxq_info *recycle_rxq_info) 6155 { 6156 struct rte_eth_dev *dev; 6157 int ret; 6158 6159 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6160 dev = &rte_eth_devices[port_id]; 6161 6162 ret = eth_dev_validate_rx_queue(dev, queue_id); 6163 if (unlikely(ret != 0)) 6164 return ret; 6165 6166 if (*dev->dev_ops->recycle_rxq_info_get == NULL) 6167 return -ENOTSUP; 6168 6169 dev->dev_ops->recycle_rxq_info_get(dev, queue_id, recycle_rxq_info); 6170 6171 return 0; 6172 } 6173 6174 int 6175 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 6176 struct rte_eth_burst_mode *mode) 6177 { 6178 struct rte_eth_dev *dev; 6179 int ret; 6180 6181 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6182 dev = &rte_eth_devices[port_id]; 6183 6184 if (queue_id >= dev->data->nb_rx_queues) { 6185 RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u", queue_id); 6186 return -EINVAL; 6187 } 6188 6189 if (mode == NULL) { 6190 RTE_ETHDEV_LOG_LINE(ERR, 6191 "Cannot get ethdev port %u Rx queue %u burst mode to NULL", 6192 port_id, queue_id); 6193 return -EINVAL; 6194 } 6195 6196 if (*dev->dev_ops->rx_burst_mode_get == NULL) 6197 return -ENOTSUP; 6198 memset(mode, 0, sizeof(*mode)); 6199 ret = eth_err(port_id, 6200 dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode)); 6201 6202 rte_eth_trace_rx_burst_mode_get(port_id, queue_id, mode, ret); 6203 6204 return ret; 6205 } 6206 6207 int 6208 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 6209 struct rte_eth_burst_mode *mode) 6210 { 6211 struct rte_eth_dev *dev; 6212 int ret; 6213 6214 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6215 dev = &rte_eth_devices[port_id]; 6216 6217 if (queue_id >= dev->data->nb_tx_queues) { 6218 RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u", queue_id); 6219 return -EINVAL; 6220 } 6221 6222 if (mode == NULL) { 6223 RTE_ETHDEV_LOG_LINE(ERR, 6224 "Cannot get ethdev port %u Tx queue %u burst mode to NULL", 6225 port_id, queue_id); 6226 return -EINVAL; 6227 } 6228 6229 if (*dev->dev_ops->tx_burst_mode_get == NULL) 6230 return -ENOTSUP; 6231 memset(mode, 0, sizeof(*mode)); 6232 ret = eth_err(port_id, 6233 dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode)); 6234 6235 rte_eth_trace_tx_burst_mode_get(port_id, queue_id, mode, ret); 6236 6237 return ret; 6238 } 6239 6240 int 6241 rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id, 6242 struct rte_power_monitor_cond *pmc) 6243 { 6244 struct rte_eth_dev *dev; 6245 int ret; 6246 6247 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6248 dev = &rte_eth_devices[port_id]; 6249 6250 if (queue_id >= dev->data->nb_rx_queues) { 6251 RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u", queue_id); 6252 return -EINVAL; 6253 } 6254 6255 if (pmc == NULL) { 6256 RTE_ETHDEV_LOG_LINE(ERR, 6257 "Cannot get ethdev port %u Rx queue %u power monitor condition to NULL", 6258 port_id, queue_id); 6259 return -EINVAL; 6260 } 6261 6262 if (*dev->dev_ops->get_monitor_addr == NULL) 6263 return -ENOTSUP; 6264 ret = eth_err(port_id, 6265 dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc)); 6266 6267 rte_eth_trace_get_monitor_addr(port_id, queue_id, pmc, ret); 6268 6269 return ret; 6270 } 6271 6272 int 6273 rte_eth_dev_set_mc_addr_list(uint16_t port_id, 6274 struct rte_ether_addr *mc_addr_set, 6275 uint32_t nb_mc_addr) 6276 { 6277 struct rte_eth_dev *dev; 6278 int ret; 6279 6280 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6281 dev = &rte_eth_devices[port_id]; 6282 6283 if (*dev->dev_ops->set_mc_addr_list == NULL) 6284 return -ENOTSUP; 6285 ret = eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev, 6286 mc_addr_set, nb_mc_addr)); 6287 6288 rte_ethdev_trace_set_mc_addr_list(port_id, mc_addr_set, nb_mc_addr, 6289 ret); 6290 6291 return ret; 6292 } 6293 6294 int 6295 rte_eth_timesync_enable(uint16_t port_id) 6296 { 6297 struct rte_eth_dev *dev; 6298 int ret; 6299 6300 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6301 dev = &rte_eth_devices[port_id]; 6302 6303 if (*dev->dev_ops->timesync_enable == NULL) 6304 return -ENOTSUP; 6305 ret = eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev)); 6306 6307 rte_eth_trace_timesync_enable(port_id, ret); 6308 6309 return ret; 6310 } 6311 6312 int 6313 rte_eth_timesync_disable(uint16_t port_id) 6314 { 6315 struct rte_eth_dev *dev; 6316 int ret; 6317 6318 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6319 dev = &rte_eth_devices[port_id]; 6320 6321 if (*dev->dev_ops->timesync_disable == NULL) 6322 return -ENOTSUP; 6323 ret = eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev)); 6324 6325 rte_eth_trace_timesync_disable(port_id, ret); 6326 6327 return ret; 6328 } 6329 6330 int 6331 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, 6332 uint32_t flags) 6333 { 6334 struct rte_eth_dev *dev; 6335 int ret; 6336 6337 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6338 dev = &rte_eth_devices[port_id]; 6339 6340 if (timestamp == NULL) { 6341 RTE_ETHDEV_LOG_LINE(ERR, 6342 "Cannot read ethdev port %u Rx timestamp to NULL", 6343 port_id); 6344 return -EINVAL; 6345 } 6346 6347 if (*dev->dev_ops->timesync_read_rx_timestamp == NULL) 6348 return -ENOTSUP; 6349 6350 ret = eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp) 6351 (dev, timestamp, flags)); 6352 6353 rte_eth_trace_timesync_read_rx_timestamp(port_id, timestamp, flags, 6354 ret); 6355 6356 return ret; 6357 } 6358 6359 int 6360 rte_eth_timesync_read_tx_timestamp(uint16_t port_id, 6361 struct timespec *timestamp) 6362 { 6363 struct rte_eth_dev *dev; 6364 int ret; 6365 6366 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6367 dev = &rte_eth_devices[port_id]; 6368 6369 if (timestamp == NULL) { 6370 RTE_ETHDEV_LOG_LINE(ERR, 6371 "Cannot read ethdev port %u Tx timestamp to NULL", 6372 port_id); 6373 return -EINVAL; 6374 } 6375 6376 if (*dev->dev_ops->timesync_read_tx_timestamp == NULL) 6377 return -ENOTSUP; 6378 6379 ret = eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp) 6380 (dev, timestamp)); 6381 6382 rte_eth_trace_timesync_read_tx_timestamp(port_id, timestamp, ret); 6383 6384 return ret; 6385 6386 } 6387 6388 int 6389 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta) 6390 { 6391 struct rte_eth_dev *dev; 6392 int ret; 6393 6394 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6395 dev = &rte_eth_devices[port_id]; 6396 6397 if (*dev->dev_ops->timesync_adjust_time == NULL) 6398 return -ENOTSUP; 6399 ret = eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta)); 6400 6401 rte_eth_trace_timesync_adjust_time(port_id, delta, ret); 6402 6403 return ret; 6404 } 6405 6406 int 6407 rte_eth_timesync_adjust_freq(uint16_t port_id, int64_t ppm) 6408 { 6409 struct rte_eth_dev *dev; 6410 int ret; 6411 6412 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6413 dev = &rte_eth_devices[port_id]; 6414 6415 if (*dev->dev_ops->timesync_adjust_freq == NULL) 6416 return -ENOTSUP; 6417 ret = eth_err(port_id, (*dev->dev_ops->timesync_adjust_freq)(dev, ppm)); 6418 6419 rte_eth_trace_timesync_adjust_freq(port_id, ppm, ret); 6420 6421 return ret; 6422 } 6423 6424 int 6425 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp) 6426 { 6427 struct rte_eth_dev *dev; 6428 int ret; 6429 6430 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6431 dev = &rte_eth_devices[port_id]; 6432 6433 if (timestamp == NULL) { 6434 RTE_ETHDEV_LOG_LINE(ERR, 6435 "Cannot read ethdev port %u timesync time to NULL", 6436 port_id); 6437 return -EINVAL; 6438 } 6439 6440 if (*dev->dev_ops->timesync_read_time == NULL) 6441 return -ENOTSUP; 6442 ret = eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev, 6443 timestamp)); 6444 6445 rte_eth_trace_timesync_read_time(port_id, timestamp, ret); 6446 6447 return ret; 6448 } 6449 6450 int 6451 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp) 6452 { 6453 struct rte_eth_dev *dev; 6454 int ret; 6455 6456 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6457 dev = &rte_eth_devices[port_id]; 6458 6459 if (timestamp == NULL) { 6460 RTE_ETHDEV_LOG_LINE(ERR, 6461 "Cannot write ethdev port %u timesync from NULL time", 6462 port_id); 6463 return -EINVAL; 6464 } 6465 6466 if (*dev->dev_ops->timesync_write_time == NULL) 6467 return -ENOTSUP; 6468 ret = eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev, 6469 timestamp)); 6470 6471 rte_eth_trace_timesync_write_time(port_id, timestamp, ret); 6472 6473 return ret; 6474 } 6475 6476 int 6477 rte_eth_read_clock(uint16_t port_id, uint64_t *clock) 6478 { 6479 struct rte_eth_dev *dev; 6480 int ret; 6481 6482 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6483 dev = &rte_eth_devices[port_id]; 6484 6485 if (clock == NULL) { 6486 RTE_ETHDEV_LOG_LINE(ERR, "Cannot read ethdev port %u clock to NULL", 6487 port_id); 6488 return -EINVAL; 6489 } 6490 6491 if (*dev->dev_ops->read_clock == NULL) 6492 return -ENOTSUP; 6493 ret = eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock)); 6494 6495 rte_eth_trace_read_clock(port_id, clock, ret); 6496 6497 return ret; 6498 } 6499 6500 int 6501 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info) 6502 { 6503 struct rte_dev_reg_info reg_info = { 0 }; 6504 int ret; 6505 6506 if (info == NULL) { 6507 RTE_ETHDEV_LOG_LINE(ERR, 6508 "Cannot get ethdev port %u register info to NULL", 6509 port_id); 6510 return -EINVAL; 6511 } 6512 6513 reg_info.length = info->length; 6514 reg_info.data = info->data; 6515 6516 ret = rte_eth_dev_get_reg_info_ext(port_id, ®_info); 6517 if (ret != 0) 6518 return ret; 6519 6520 info->length = reg_info.length; 6521 info->width = reg_info.width; 6522 info->version = reg_info.version; 6523 info->offset = reg_info.offset; 6524 6525 return 0; 6526 } 6527 6528 int 6529 rte_eth_dev_get_reg_info_ext(uint16_t port_id, struct rte_dev_reg_info *info) 6530 { 6531 struct rte_eth_dev *dev; 6532 uint32_t i; 6533 int ret; 6534 6535 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6536 dev = &rte_eth_devices[port_id]; 6537 6538 if (info == NULL) { 6539 RTE_ETHDEV_LOG_LINE(ERR, 6540 "Cannot get ethdev port %u register info to NULL", 6541 port_id); 6542 return -EINVAL; 6543 } 6544 6545 if (info->names != NULL && info->length != 0) 6546 memset(info->names, 0, sizeof(struct rte_eth_reg_name) * info->length); 6547 6548 if (*dev->dev_ops->get_reg == NULL) 6549 return -ENOTSUP; 6550 ret = eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info)); 6551 6552 rte_ethdev_trace_get_reg_info(port_id, info, ret); 6553 6554 /* Report the default names if drivers not report. */ 6555 if (ret == 0 && info->names != NULL && strlen(info->names[0].name) == 0) { 6556 for (i = 0; i < info->length; i++) 6557 snprintf(info->names[i].name, RTE_ETH_REG_NAME_SIZE, 6558 "index_%u", info->offset + i); 6559 } 6560 return ret; 6561 } 6562 6563 int 6564 rte_eth_dev_get_eeprom_length(uint16_t port_id) 6565 { 6566 struct rte_eth_dev *dev; 6567 int ret; 6568 6569 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6570 dev = &rte_eth_devices[port_id]; 6571 6572 if (*dev->dev_ops->get_eeprom_length == NULL) 6573 return -ENOTSUP; 6574 ret = eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev)); 6575 6576 rte_ethdev_trace_get_eeprom_length(port_id, ret); 6577 6578 return ret; 6579 } 6580 6581 int 6582 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 6583 { 6584 struct rte_eth_dev *dev; 6585 int ret; 6586 6587 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6588 dev = &rte_eth_devices[port_id]; 6589 6590 if (info == NULL) { 6591 RTE_ETHDEV_LOG_LINE(ERR, 6592 "Cannot get ethdev port %u EEPROM info to NULL", 6593 port_id); 6594 return -EINVAL; 6595 } 6596 6597 if (*dev->dev_ops->get_eeprom == NULL) 6598 return -ENOTSUP; 6599 ret = eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info)); 6600 6601 rte_ethdev_trace_get_eeprom(port_id, info, ret); 6602 6603 return ret; 6604 } 6605 6606 int 6607 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 6608 { 6609 struct rte_eth_dev *dev; 6610 int ret; 6611 6612 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6613 dev = &rte_eth_devices[port_id]; 6614 6615 if (info == NULL) { 6616 RTE_ETHDEV_LOG_LINE(ERR, 6617 "Cannot set ethdev port %u EEPROM from NULL info", 6618 port_id); 6619 return -EINVAL; 6620 } 6621 6622 if (*dev->dev_ops->set_eeprom == NULL) 6623 return -ENOTSUP; 6624 ret = eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info)); 6625 6626 rte_ethdev_trace_set_eeprom(port_id, info, ret); 6627 6628 return ret; 6629 } 6630 6631 int 6632 rte_eth_dev_get_module_info(uint16_t port_id, 6633 struct rte_eth_dev_module_info *modinfo) 6634 { 6635 struct rte_eth_dev *dev; 6636 int ret; 6637 6638 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6639 dev = &rte_eth_devices[port_id]; 6640 6641 if (modinfo == NULL) { 6642 RTE_ETHDEV_LOG_LINE(ERR, 6643 "Cannot get ethdev port %u EEPROM module info to NULL", 6644 port_id); 6645 return -EINVAL; 6646 } 6647 6648 if (*dev->dev_ops->get_module_info == NULL) 6649 return -ENOTSUP; 6650 ret = (*dev->dev_ops->get_module_info)(dev, modinfo); 6651 6652 rte_ethdev_trace_get_module_info(port_id, modinfo, ret); 6653 6654 return ret; 6655 } 6656 6657 int 6658 rte_eth_dev_get_module_eeprom(uint16_t port_id, 6659 struct rte_dev_eeprom_info *info) 6660 { 6661 struct rte_eth_dev *dev; 6662 int ret; 6663 6664 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6665 dev = &rte_eth_devices[port_id]; 6666 6667 if (info == NULL) { 6668 RTE_ETHDEV_LOG_LINE(ERR, 6669 "Cannot get ethdev port %u module EEPROM info to NULL", 6670 port_id); 6671 return -EINVAL; 6672 } 6673 6674 if (info->data == NULL) { 6675 RTE_ETHDEV_LOG_LINE(ERR, 6676 "Cannot get ethdev port %u module EEPROM data to NULL", 6677 port_id); 6678 return -EINVAL; 6679 } 6680 6681 if (info->length == 0) { 6682 RTE_ETHDEV_LOG_LINE(ERR, 6683 "Cannot get ethdev port %u module EEPROM to data with zero size", 6684 port_id); 6685 return -EINVAL; 6686 } 6687 6688 if (*dev->dev_ops->get_module_eeprom == NULL) 6689 return -ENOTSUP; 6690 ret = (*dev->dev_ops->get_module_eeprom)(dev, info); 6691 6692 rte_ethdev_trace_get_module_eeprom(port_id, info, ret); 6693 6694 return ret; 6695 } 6696 6697 int 6698 rte_eth_dev_get_dcb_info(uint16_t port_id, 6699 struct rte_eth_dcb_info *dcb_info) 6700 { 6701 struct rte_eth_dev *dev; 6702 int ret; 6703 6704 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6705 dev = &rte_eth_devices[port_id]; 6706 6707 if (dcb_info == NULL) { 6708 RTE_ETHDEV_LOG_LINE(ERR, 6709 "Cannot get ethdev port %u DCB info to NULL", 6710 port_id); 6711 return -EINVAL; 6712 } 6713 6714 memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info)); 6715 6716 if (*dev->dev_ops->get_dcb_info == NULL) 6717 return -ENOTSUP; 6718 ret = eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info)); 6719 6720 rte_ethdev_trace_get_dcb_info(port_id, dcb_info, ret); 6721 6722 return ret; 6723 } 6724 6725 static void 6726 eth_dev_adjust_nb_desc(uint16_t *nb_desc, 6727 const struct rte_eth_desc_lim *desc_lim) 6728 { 6729 /* Upcast to uint32 to avoid potential overflow with RTE_ALIGN_CEIL(). */ 6730 uint32_t nb_desc_32 = (uint32_t)*nb_desc; 6731 6732 if (desc_lim->nb_align != 0) 6733 nb_desc_32 = RTE_ALIGN_CEIL(nb_desc_32, desc_lim->nb_align); 6734 6735 if (desc_lim->nb_max != 0) 6736 nb_desc_32 = RTE_MIN(nb_desc_32, desc_lim->nb_max); 6737 6738 nb_desc_32 = RTE_MAX(nb_desc_32, desc_lim->nb_min); 6739 6740 /* Assign clipped u32 back to u16. */ 6741 *nb_desc = (uint16_t)nb_desc_32; 6742 } 6743 6744 int 6745 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, 6746 uint16_t *nb_rx_desc, 6747 uint16_t *nb_tx_desc) 6748 { 6749 struct rte_eth_dev_info dev_info; 6750 int ret; 6751 6752 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6753 6754 ret = rte_eth_dev_info_get(port_id, &dev_info); 6755 if (ret != 0) 6756 return ret; 6757 6758 if (nb_rx_desc != NULL) 6759 eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim); 6760 6761 if (nb_tx_desc != NULL) 6762 eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim); 6763 6764 rte_ethdev_trace_adjust_nb_rx_tx_desc(port_id); 6765 6766 return 0; 6767 } 6768 6769 int 6770 rte_eth_dev_hairpin_capability_get(uint16_t port_id, 6771 struct rte_eth_hairpin_cap *cap) 6772 { 6773 struct rte_eth_dev *dev; 6774 int ret; 6775 6776 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6777 dev = &rte_eth_devices[port_id]; 6778 6779 if (cap == NULL) { 6780 RTE_ETHDEV_LOG_LINE(ERR, 6781 "Cannot get ethdev port %u hairpin capability to NULL", 6782 port_id); 6783 return -EINVAL; 6784 } 6785 6786 if (*dev->dev_ops->hairpin_cap_get == NULL) 6787 return -ENOTSUP; 6788 memset(cap, 0, sizeof(*cap)); 6789 ret = eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap)); 6790 6791 rte_ethdev_trace_hairpin_capability_get(port_id, cap, ret); 6792 6793 return ret; 6794 } 6795 6796 int 6797 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool) 6798 { 6799 struct rte_eth_dev *dev; 6800 int ret; 6801 6802 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6803 dev = &rte_eth_devices[port_id]; 6804 6805 if (pool == NULL) { 6806 RTE_ETHDEV_LOG_LINE(ERR, 6807 "Cannot test ethdev port %u mempool operation from NULL pool", 6808 port_id); 6809 return -EINVAL; 6810 } 6811 6812 if (*dev->dev_ops->pool_ops_supported == NULL) 6813 return 1; /* all pools are supported */ 6814 6815 ret = (*dev->dev_ops->pool_ops_supported)(dev, pool); 6816 6817 rte_ethdev_trace_pool_ops_supported(port_id, pool, ret); 6818 6819 return ret; 6820 } 6821 6822 int 6823 rte_eth_representor_info_get(uint16_t port_id, 6824 struct rte_eth_representor_info *info) 6825 { 6826 struct rte_eth_dev *dev; 6827 int ret; 6828 6829 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6830 dev = &rte_eth_devices[port_id]; 6831 6832 if (*dev->dev_ops->representor_info_get == NULL) 6833 return -ENOTSUP; 6834 ret = eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info)); 6835 6836 rte_eth_trace_representor_info_get(port_id, info, ret); 6837 6838 return ret; 6839 } 6840 6841 int 6842 rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features) 6843 { 6844 struct rte_eth_dev *dev; 6845 int ret; 6846 6847 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6848 dev = &rte_eth_devices[port_id]; 6849 6850 if (dev->data->dev_configured != 0) { 6851 RTE_ETHDEV_LOG_LINE(ERR, 6852 "The port (ID=%"PRIu16") is already configured", 6853 port_id); 6854 return -EBUSY; 6855 } 6856 6857 if (features == NULL) { 6858 RTE_ETHDEV_LOG_LINE(ERR, "Invalid features (NULL)"); 6859 return -EINVAL; 6860 } 6861 6862 if ((*features & RTE_ETH_RX_METADATA_TUNNEL_ID) != 0 && 6863 rte_flow_restore_info_dynflag_register() < 0) 6864 *features &= ~RTE_ETH_RX_METADATA_TUNNEL_ID; 6865 6866 if (*dev->dev_ops->rx_metadata_negotiate == NULL) 6867 return -ENOTSUP; 6868 ret = eth_err(port_id, 6869 (*dev->dev_ops->rx_metadata_negotiate)(dev, features)); 6870 6871 rte_eth_trace_rx_metadata_negotiate(port_id, *features, ret); 6872 6873 return ret; 6874 } 6875 6876 int 6877 rte_eth_ip_reassembly_capability_get(uint16_t port_id, 6878 struct rte_eth_ip_reassembly_params *reassembly_capa) 6879 { 6880 struct rte_eth_dev *dev; 6881 int ret; 6882 6883 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6884 dev = &rte_eth_devices[port_id]; 6885 6886 if (dev->data->dev_configured == 0) { 6887 RTE_ETHDEV_LOG_LINE(ERR, 6888 "port_id=%u is not configured, cannot get IP reassembly capability", 6889 port_id); 6890 return -EINVAL; 6891 } 6892 6893 if (reassembly_capa == NULL) { 6894 RTE_ETHDEV_LOG_LINE(ERR, "Cannot get reassembly capability to NULL"); 6895 return -EINVAL; 6896 } 6897 6898 if (*dev->dev_ops->ip_reassembly_capability_get == NULL) 6899 return -ENOTSUP; 6900 memset(reassembly_capa, 0, sizeof(struct rte_eth_ip_reassembly_params)); 6901 6902 ret = eth_err(port_id, (*dev->dev_ops->ip_reassembly_capability_get) 6903 (dev, reassembly_capa)); 6904 6905 rte_eth_trace_ip_reassembly_capability_get(port_id, reassembly_capa, 6906 ret); 6907 6908 return ret; 6909 } 6910 6911 int 6912 rte_eth_ip_reassembly_conf_get(uint16_t port_id, 6913 struct rte_eth_ip_reassembly_params *conf) 6914 { 6915 struct rte_eth_dev *dev; 6916 int ret; 6917 6918 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6919 dev = &rte_eth_devices[port_id]; 6920 6921 if (dev->data->dev_configured == 0) { 6922 RTE_ETHDEV_LOG_LINE(ERR, 6923 "port_id=%u is not configured, cannot get IP reassembly configuration", 6924 port_id); 6925 return -EINVAL; 6926 } 6927 6928 if (conf == NULL) { 6929 RTE_ETHDEV_LOG_LINE(ERR, "Cannot get reassembly info to NULL"); 6930 return -EINVAL; 6931 } 6932 6933 if (*dev->dev_ops->ip_reassembly_conf_get == NULL) 6934 return -ENOTSUP; 6935 memset(conf, 0, sizeof(struct rte_eth_ip_reassembly_params)); 6936 ret = eth_err(port_id, 6937 (*dev->dev_ops->ip_reassembly_conf_get)(dev, conf)); 6938 6939 rte_eth_trace_ip_reassembly_conf_get(port_id, conf, ret); 6940 6941 return ret; 6942 } 6943 6944 int 6945 rte_eth_ip_reassembly_conf_set(uint16_t port_id, 6946 const struct rte_eth_ip_reassembly_params *conf) 6947 { 6948 struct rte_eth_dev *dev; 6949 int ret; 6950 6951 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6952 dev = &rte_eth_devices[port_id]; 6953 6954 if (dev->data->dev_configured == 0) { 6955 RTE_ETHDEV_LOG_LINE(ERR, 6956 "port_id=%u is not configured, cannot set IP reassembly configuration", 6957 port_id); 6958 return -EINVAL; 6959 } 6960 6961 if (dev->data->dev_started != 0) { 6962 RTE_ETHDEV_LOG_LINE(ERR, 6963 "port_id=%u is started, cannot configure IP reassembly params.", 6964 port_id); 6965 return -EINVAL; 6966 } 6967 6968 if (conf == NULL) { 6969 RTE_ETHDEV_LOG_LINE(ERR, 6970 "Invalid IP reassembly configuration (NULL)"); 6971 return -EINVAL; 6972 } 6973 6974 if (*dev->dev_ops->ip_reassembly_conf_set == NULL) 6975 return -ENOTSUP; 6976 ret = eth_err(port_id, 6977 (*dev->dev_ops->ip_reassembly_conf_set)(dev, conf)); 6978 6979 rte_eth_trace_ip_reassembly_conf_set(port_id, conf, ret); 6980 6981 return ret; 6982 } 6983 6984 int 6985 rte_eth_dev_priv_dump(uint16_t port_id, FILE *file) 6986 { 6987 struct rte_eth_dev *dev; 6988 6989 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6990 dev = &rte_eth_devices[port_id]; 6991 6992 if (file == NULL) { 6993 RTE_ETHDEV_LOG_LINE(ERR, "Invalid file (NULL)"); 6994 return -EINVAL; 6995 } 6996 6997 if (*dev->dev_ops->eth_dev_priv_dump == NULL) 6998 return -ENOTSUP; 6999 return eth_err(port_id, (*dev->dev_ops->eth_dev_priv_dump)(dev, file)); 7000 } 7001 7002 int 7003 rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id, 7004 uint16_t offset, uint16_t num, FILE *file) 7005 { 7006 struct rte_eth_dev *dev; 7007 7008 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 7009 dev = &rte_eth_devices[port_id]; 7010 7011 if (queue_id >= dev->data->nb_rx_queues) { 7012 RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u", queue_id); 7013 return -EINVAL; 7014 } 7015 7016 if (file == NULL) { 7017 RTE_ETHDEV_LOG_LINE(ERR, "Invalid file (NULL)"); 7018 return -EINVAL; 7019 } 7020 7021 if (*dev->dev_ops->eth_rx_descriptor_dump == NULL) 7022 return -ENOTSUP; 7023 7024 return eth_err(port_id, (*dev->dev_ops->eth_rx_descriptor_dump)(dev, 7025 queue_id, offset, num, file)); 7026 } 7027 7028 int 7029 rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id, 7030 uint16_t offset, uint16_t num, FILE *file) 7031 { 7032 struct rte_eth_dev *dev; 7033 7034 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 7035 dev = &rte_eth_devices[port_id]; 7036 7037 if (queue_id >= dev->data->nb_tx_queues) { 7038 RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u", queue_id); 7039 return -EINVAL; 7040 } 7041 7042 if (file == NULL) { 7043 RTE_ETHDEV_LOG_LINE(ERR, "Invalid file (NULL)"); 7044 return -EINVAL; 7045 } 7046 7047 if (*dev->dev_ops->eth_tx_descriptor_dump == NULL) 7048 return -ENOTSUP; 7049 7050 return eth_err(port_id, (*dev->dev_ops->eth_tx_descriptor_dump)(dev, 7051 queue_id, offset, num, file)); 7052 } 7053 7054 int 7055 rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num) 7056 { 7057 size_t i; 7058 int j; 7059 struct rte_eth_dev *dev; 7060 const uint32_t *all_types; 7061 size_t no_of_elements = 0; 7062 7063 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 7064 dev = &rte_eth_devices[port_id]; 7065 7066 if (ptypes == NULL && num > 0) { 7067 RTE_ETHDEV_LOG_LINE(ERR, 7068 "Cannot get ethdev port %u supported header protocol types to NULL when array size is non zero", 7069 port_id); 7070 return -EINVAL; 7071 } 7072 7073 if (*dev->dev_ops->buffer_split_supported_hdr_ptypes_get == NULL) 7074 return -ENOTSUP; 7075 all_types = (*dev->dev_ops->buffer_split_supported_hdr_ptypes_get)(dev, 7076 &no_of_elements); 7077 7078 if (all_types == NULL) 7079 return 0; 7080 7081 for (i = 0, j = 0; i < no_of_elements; ++i) { 7082 if (j < num) { 7083 ptypes[j] = all_types[i]; 7084 7085 rte_eth_trace_buffer_split_get_supported_hdr_ptypes( 7086 port_id, j, ptypes[j]); 7087 } 7088 j++; 7089 } 7090 7091 return j; 7092 } 7093 7094 int rte_eth_dev_count_aggr_ports(uint16_t port_id) 7095 { 7096 struct rte_eth_dev *dev; 7097 int ret; 7098 7099 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 7100 dev = &rte_eth_devices[port_id]; 7101 7102 if (*dev->dev_ops->count_aggr_ports == NULL) 7103 return 0; 7104 ret = eth_err(port_id, (*dev->dev_ops->count_aggr_ports)(dev)); 7105 7106 rte_eth_trace_count_aggr_ports(port_id, ret); 7107 7108 return ret; 7109 } 7110 7111 int rte_eth_dev_map_aggr_tx_affinity(uint16_t port_id, uint16_t tx_queue_id, 7112 uint8_t affinity) 7113 { 7114 struct rte_eth_dev *dev; 7115 int aggr_ports; 7116 int ret; 7117 7118 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 7119 dev = &rte_eth_devices[port_id]; 7120 7121 if (tx_queue_id >= dev->data->nb_tx_queues) { 7122 RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u", tx_queue_id); 7123 return -EINVAL; 7124 } 7125 7126 if (*dev->dev_ops->map_aggr_tx_affinity == NULL) 7127 return -ENOTSUP; 7128 7129 if (dev->data->dev_configured == 0) { 7130 RTE_ETHDEV_LOG_LINE(ERR, 7131 "Port %u must be configured before Tx affinity mapping", 7132 port_id); 7133 return -EINVAL; 7134 } 7135 7136 if (dev->data->dev_started) { 7137 RTE_ETHDEV_LOG_LINE(ERR, 7138 "Port %u must be stopped to allow configuration", 7139 port_id); 7140 return -EBUSY; 7141 } 7142 7143 aggr_ports = rte_eth_dev_count_aggr_ports(port_id); 7144 if (aggr_ports == 0) { 7145 RTE_ETHDEV_LOG_LINE(ERR, 7146 "Port %u has no aggregated port", 7147 port_id); 7148 return -ENOTSUP; 7149 } 7150 7151 if (affinity > aggr_ports) { 7152 RTE_ETHDEV_LOG_LINE(ERR, 7153 "Port %u map invalid affinity %u exceeds the maximum number %u", 7154 port_id, affinity, aggr_ports); 7155 return -EINVAL; 7156 } 7157 7158 ret = eth_err(port_id, (*dev->dev_ops->map_aggr_tx_affinity)(dev, 7159 tx_queue_id, affinity)); 7160 7161 rte_eth_trace_map_aggr_tx_affinity(port_id, tx_queue_id, affinity, ret); 7162 7163 return ret; 7164 } 7165 7166 RTE_LOG_REGISTER_DEFAULT(rte_eth_dev_logtype, INFO); 7167