1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <errno.h> 6 #include <inttypes.h> 7 #include <stdbool.h> 8 #include <stdint.h> 9 #include <stdio.h> 10 #include <stdlib.h> 11 #include <string.h> 12 #include <sys/queue.h> 13 14 #include <bus_driver.h> 15 #include <rte_log.h> 16 #include <rte_interrupts.h> 17 #include <rte_kvargs.h> 18 #include <rte_memcpy.h> 19 #include <rte_common.h> 20 #include <rte_mempool.h> 21 #include <rte_malloc.h> 22 #include <rte_mbuf.h> 23 #include <rte_errno.h> 24 #include <rte_spinlock.h> 25 #include <rte_string_fns.h> 26 #include <rte_class.h> 27 #include <rte_ether.h> 28 #include <rte_telemetry.h> 29 30 #include "rte_ethdev.h" 31 #include "rte_ethdev_trace_fp.h" 32 #include "ethdev_driver.h" 33 #include "rte_flow_driver.h" 34 #include "ethdev_profile.h" 35 #include "ethdev_private.h" 36 #include "ethdev_trace.h" 37 #include "sff_telemetry.h" 38 39 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS]; 40 41 /* public fast-path API */ 42 struct rte_eth_fp_ops rte_eth_fp_ops[RTE_MAX_ETHPORTS]; 43 44 /* spinlock for add/remove Rx callbacks */ 45 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER; 46 47 /* spinlock for add/remove Tx callbacks */ 48 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER; 49 50 /* store statistics names and its offset in stats structure */ 51 struct rte_eth_xstats_name_off { 52 char name[RTE_ETH_XSTATS_NAME_SIZE]; 53 unsigned offset; 54 }; 55 56 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = { 57 {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)}, 58 {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)}, 59 {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)}, 60 {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)}, 61 {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)}, 62 {"rx_errors", offsetof(struct rte_eth_stats, ierrors)}, 63 {"tx_errors", offsetof(struct rte_eth_stats, oerrors)}, 64 {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats, 65 rx_nombuf)}, 66 }; 67 68 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings) 69 70 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = { 71 {"packets", offsetof(struct rte_eth_stats, q_ipackets)}, 72 {"bytes", offsetof(struct rte_eth_stats, q_ibytes)}, 73 {"errors", offsetof(struct rte_eth_stats, q_errors)}, 74 }; 75 76 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings) 77 78 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = { 79 {"packets", offsetof(struct rte_eth_stats, q_opackets)}, 80 {"bytes", offsetof(struct rte_eth_stats, q_obytes)}, 81 }; 82 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings) 83 84 #define RTE_RX_OFFLOAD_BIT2STR(_name) \ 85 { RTE_ETH_RX_OFFLOAD_##_name, #_name } 86 87 static const struct { 88 uint64_t offload; 89 const char *name; 90 } eth_dev_rx_offload_names[] = { 91 RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP), 92 RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM), 93 RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM), 94 RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM), 95 RTE_RX_OFFLOAD_BIT2STR(TCP_LRO), 96 RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP), 97 RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 98 RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP), 99 RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER), 100 RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND), 101 RTE_RX_OFFLOAD_BIT2STR(SCATTER), 102 RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP), 103 RTE_RX_OFFLOAD_BIT2STR(SECURITY), 104 RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC), 105 RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM), 106 RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 107 RTE_RX_OFFLOAD_BIT2STR(RSS_HASH), 108 RTE_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT), 109 }; 110 111 #undef RTE_RX_OFFLOAD_BIT2STR 112 #undef RTE_ETH_RX_OFFLOAD_BIT2STR 113 114 #define RTE_TX_OFFLOAD_BIT2STR(_name) \ 115 { RTE_ETH_TX_OFFLOAD_##_name, #_name } 116 117 static const struct { 118 uint64_t offload; 119 const char *name; 120 } eth_dev_tx_offload_names[] = { 121 RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT), 122 RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM), 123 RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM), 124 RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM), 125 RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM), 126 RTE_TX_OFFLOAD_BIT2STR(TCP_TSO), 127 RTE_TX_OFFLOAD_BIT2STR(UDP_TSO), 128 RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 129 RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT), 130 RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO), 131 RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO), 132 RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO), 133 RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO), 134 RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT), 135 RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE), 136 RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS), 137 RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE), 138 RTE_TX_OFFLOAD_BIT2STR(SECURITY), 139 RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO), 140 RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO), 141 RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 142 RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP), 143 }; 144 145 #undef RTE_TX_OFFLOAD_BIT2STR 146 147 static const struct { 148 uint64_t offload; 149 const char *name; 150 } rte_eth_dev_capa_names[] = { 151 {RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP, "RUNTIME_RX_QUEUE_SETUP"}, 152 {RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP, "RUNTIME_TX_QUEUE_SETUP"}, 153 {RTE_ETH_DEV_CAPA_RXQ_SHARE, "RXQ_SHARE"}, 154 {RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP, "FLOW_RULE_KEEP"}, 155 {RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP, "FLOW_SHARED_OBJECT_KEEP"}, 156 }; 157 158 enum { 159 STAT_QMAP_TX = 0, 160 STAT_QMAP_RX 161 }; 162 163 int 164 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str) 165 { 166 int ret; 167 struct rte_devargs devargs; 168 const char *bus_param_key; 169 char *bus_str = NULL; 170 char *cls_str = NULL; 171 int str_size; 172 173 if (iter == NULL) { 174 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL iterator\n"); 175 return -EINVAL; 176 } 177 178 if (devargs_str == NULL) { 179 RTE_ETHDEV_LOG(ERR, 180 "Cannot initialize iterator from NULL device description string\n"); 181 return -EINVAL; 182 } 183 184 memset(iter, 0, sizeof(*iter)); 185 memset(&devargs, 0, sizeof(devargs)); 186 187 /* 188 * The devargs string may use various syntaxes: 189 * - 0000:08:00.0,representor=[1-3] 190 * - pci:0000:06:00.0,representor=[0,5] 191 * - class=eth,mac=00:11:22:33:44:55 192 * - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z 193 */ 194 195 /* 196 * Handle pure class filter (i.e. without any bus-level argument), 197 * from future new syntax. 198 * rte_devargs_parse() is not yet supporting the new syntax, 199 * that's why this simple case is temporarily parsed here. 200 */ 201 #define iter_anybus_str "class=eth," 202 if (strncmp(devargs_str, iter_anybus_str, 203 strlen(iter_anybus_str)) == 0) { 204 iter->cls_str = devargs_str + strlen(iter_anybus_str); 205 goto end; 206 } 207 208 /* Split bus, device and parameters. */ 209 ret = rte_devargs_parse(&devargs, devargs_str); 210 if (ret != 0) 211 goto error; 212 213 /* 214 * Assume parameters of old syntax can match only at ethdev level. 215 * Extra parameters will be ignored, thanks to "+" prefix. 216 */ 217 str_size = strlen(devargs.args) + 2; 218 cls_str = malloc(str_size); 219 if (cls_str == NULL) { 220 ret = -ENOMEM; 221 goto error; 222 } 223 ret = snprintf(cls_str, str_size, "+%s", devargs.args); 224 if (ret != str_size - 1) { 225 ret = -EINVAL; 226 goto error; 227 } 228 iter->cls_str = cls_str; 229 230 iter->bus = devargs.bus; 231 if (iter->bus->dev_iterate == NULL) { 232 ret = -ENOTSUP; 233 goto error; 234 } 235 236 /* Convert bus args to new syntax for use with new API dev_iterate. */ 237 if ((strcmp(iter->bus->name, "vdev") == 0) || 238 (strcmp(iter->bus->name, "fslmc") == 0) || 239 (strcmp(iter->bus->name, "dpaa_bus") == 0)) { 240 bus_param_key = "name"; 241 } else if (strcmp(iter->bus->name, "pci") == 0) { 242 bus_param_key = "addr"; 243 } else { 244 ret = -ENOTSUP; 245 goto error; 246 } 247 str_size = strlen(bus_param_key) + strlen(devargs.name) + 2; 248 bus_str = malloc(str_size); 249 if (bus_str == NULL) { 250 ret = -ENOMEM; 251 goto error; 252 } 253 ret = snprintf(bus_str, str_size, "%s=%s", 254 bus_param_key, devargs.name); 255 if (ret != str_size - 1) { 256 ret = -EINVAL; 257 goto error; 258 } 259 iter->bus_str = bus_str; 260 261 end: 262 iter->cls = rte_class_find_by_name("eth"); 263 rte_devargs_reset(&devargs); 264 265 rte_eth_trace_iterator_init(devargs_str); 266 267 return 0; 268 269 error: 270 if (ret == -ENOTSUP) 271 RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n", 272 iter->bus->name); 273 rte_devargs_reset(&devargs); 274 free(bus_str); 275 free(cls_str); 276 return ret; 277 } 278 279 uint16_t 280 rte_eth_iterator_next(struct rte_dev_iterator *iter) 281 { 282 if (iter == NULL) { 283 RTE_ETHDEV_LOG(ERR, 284 "Cannot get next device from NULL iterator\n"); 285 return RTE_MAX_ETHPORTS; 286 } 287 288 if (iter->cls == NULL) /* invalid ethdev iterator */ 289 return RTE_MAX_ETHPORTS; 290 291 do { /* loop to try all matching rte_device */ 292 /* If not pure ethdev filter and */ 293 if (iter->bus != NULL && 294 /* not in middle of rte_eth_dev iteration, */ 295 iter->class_device == NULL) { 296 /* get next rte_device to try. */ 297 iter->device = iter->bus->dev_iterate( 298 iter->device, iter->bus_str, iter); 299 if (iter->device == NULL) 300 break; /* no more rte_device candidate */ 301 } 302 /* A device is matching bus part, need to check ethdev part. */ 303 iter->class_device = iter->cls->dev_iterate( 304 iter->class_device, iter->cls_str, iter); 305 if (iter->class_device != NULL) { 306 uint16_t id = eth_dev_to_id(iter->class_device); 307 308 rte_eth_trace_iterator_next(iter, id); 309 310 return id; /* match */ 311 } 312 } while (iter->bus != NULL); /* need to try next rte_device */ 313 314 /* No more ethdev port to iterate. */ 315 rte_eth_iterator_cleanup(iter); 316 return RTE_MAX_ETHPORTS; 317 } 318 319 void 320 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter) 321 { 322 if (iter == NULL) { 323 RTE_ETHDEV_LOG(ERR, "Cannot do clean up from NULL iterator\n"); 324 return; 325 } 326 327 if (iter->bus_str == NULL) 328 return; /* nothing to free in pure class filter */ 329 330 rte_eth_trace_iterator_cleanup(iter); 331 332 free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */ 333 free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */ 334 memset(iter, 0, sizeof(*iter)); 335 } 336 337 uint16_t 338 rte_eth_find_next(uint16_t port_id) 339 { 340 while (port_id < RTE_MAX_ETHPORTS && 341 rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED) 342 port_id++; 343 344 if (port_id >= RTE_MAX_ETHPORTS) 345 return RTE_MAX_ETHPORTS; 346 347 rte_eth_trace_find_next(port_id); 348 349 return port_id; 350 } 351 352 /* 353 * Macro to iterate over all valid ports for internal usage. 354 * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports. 355 */ 356 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \ 357 for (port_id = rte_eth_find_next(0); \ 358 port_id < RTE_MAX_ETHPORTS; \ 359 port_id = rte_eth_find_next(port_id + 1)) 360 361 uint16_t 362 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent) 363 { 364 port_id = rte_eth_find_next(port_id); 365 while (port_id < RTE_MAX_ETHPORTS && 366 rte_eth_devices[port_id].device != parent) 367 port_id = rte_eth_find_next(port_id + 1); 368 369 rte_eth_trace_find_next_of(port_id, parent); 370 371 return port_id; 372 } 373 374 uint16_t 375 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id) 376 { 377 uint16_t ret; 378 379 RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS); 380 ret = rte_eth_find_next_of(port_id, 381 rte_eth_devices[ref_port_id].device); 382 383 rte_eth_trace_find_next_sibling(port_id, ref_port_id, ret); 384 385 return ret; 386 } 387 388 static bool 389 eth_dev_is_allocated(const struct rte_eth_dev *ethdev) 390 { 391 return ethdev->data->name[0] != '\0'; 392 } 393 394 int 395 rte_eth_dev_is_valid_port(uint16_t port_id) 396 { 397 int is_valid; 398 399 if (port_id >= RTE_MAX_ETHPORTS || 400 (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)) 401 is_valid = 0; 402 else 403 is_valid = 1; 404 405 rte_ethdev_trace_is_valid_port(port_id, is_valid); 406 407 return is_valid; 408 } 409 410 static int 411 eth_is_valid_owner_id(uint64_t owner_id) 412 { 413 if (owner_id == RTE_ETH_DEV_NO_OWNER || 414 eth_dev_shared_data->next_owner_id <= owner_id) 415 return 0; 416 return 1; 417 } 418 419 uint64_t 420 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id) 421 { 422 port_id = rte_eth_find_next(port_id); 423 while (port_id < RTE_MAX_ETHPORTS && 424 rte_eth_devices[port_id].data->owner.id != owner_id) 425 port_id = rte_eth_find_next(port_id + 1); 426 427 rte_eth_trace_find_next_owned_by(port_id, owner_id); 428 429 return port_id; 430 } 431 432 int 433 rte_eth_dev_owner_new(uint64_t *owner_id) 434 { 435 if (owner_id == NULL) { 436 RTE_ETHDEV_LOG(ERR, "Cannot get new owner ID to NULL\n"); 437 return -EINVAL; 438 } 439 440 eth_dev_shared_data_prepare(); 441 442 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 443 444 *owner_id = eth_dev_shared_data->next_owner_id++; 445 446 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 447 448 rte_ethdev_trace_owner_new(*owner_id); 449 450 return 0; 451 } 452 453 static int 454 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id, 455 const struct rte_eth_dev_owner *new_owner) 456 { 457 struct rte_eth_dev *ethdev = &rte_eth_devices[port_id]; 458 struct rte_eth_dev_owner *port_owner; 459 460 if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) { 461 RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n", 462 port_id); 463 return -ENODEV; 464 } 465 466 if (new_owner == NULL) { 467 RTE_ETHDEV_LOG(ERR, 468 "Cannot set ethdev port %u owner from NULL owner\n", 469 port_id); 470 return -EINVAL; 471 } 472 473 if (!eth_is_valid_owner_id(new_owner->id) && 474 !eth_is_valid_owner_id(old_owner_id)) { 475 RTE_ETHDEV_LOG(ERR, 476 "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n", 477 old_owner_id, new_owner->id); 478 return -EINVAL; 479 } 480 481 port_owner = &rte_eth_devices[port_id].data->owner; 482 if (port_owner->id != old_owner_id) { 483 RTE_ETHDEV_LOG(ERR, 484 "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n", 485 port_id, port_owner->name, port_owner->id); 486 return -EPERM; 487 } 488 489 /* can not truncate (same structure) */ 490 strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN); 491 492 port_owner->id = new_owner->id; 493 494 RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n", 495 port_id, new_owner->name, new_owner->id); 496 497 return 0; 498 } 499 500 int 501 rte_eth_dev_owner_set(const uint16_t port_id, 502 const struct rte_eth_dev_owner *owner) 503 { 504 int ret; 505 506 eth_dev_shared_data_prepare(); 507 508 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 509 510 ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner); 511 512 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 513 514 rte_ethdev_trace_owner_set(port_id, owner, ret); 515 516 return ret; 517 } 518 519 int 520 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id) 521 { 522 const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner) 523 {.id = RTE_ETH_DEV_NO_OWNER, .name = ""}; 524 int ret; 525 526 eth_dev_shared_data_prepare(); 527 528 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 529 530 ret = eth_dev_owner_set(port_id, owner_id, &new_owner); 531 532 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 533 534 rte_ethdev_trace_owner_unset(port_id, owner_id, ret); 535 536 return ret; 537 } 538 539 int 540 rte_eth_dev_owner_delete(const uint64_t owner_id) 541 { 542 uint16_t port_id; 543 int ret = 0; 544 545 eth_dev_shared_data_prepare(); 546 547 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 548 549 if (eth_is_valid_owner_id(owner_id)) { 550 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) { 551 struct rte_eth_dev_data *data = 552 rte_eth_devices[port_id].data; 553 if (data != NULL && data->owner.id == owner_id) 554 memset(&data->owner, 0, 555 sizeof(struct rte_eth_dev_owner)); 556 } 557 RTE_ETHDEV_LOG(NOTICE, 558 "All port owners owned by %016"PRIx64" identifier have removed\n", 559 owner_id); 560 } else { 561 RTE_ETHDEV_LOG(ERR, 562 "Invalid owner ID=%016"PRIx64"\n", 563 owner_id); 564 ret = -EINVAL; 565 } 566 567 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 568 569 rte_ethdev_trace_owner_delete(owner_id, ret); 570 571 return ret; 572 } 573 574 int 575 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner) 576 { 577 struct rte_eth_dev *ethdev; 578 579 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 580 ethdev = &rte_eth_devices[port_id]; 581 582 if (!eth_dev_is_allocated(ethdev)) { 583 RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n", 584 port_id); 585 return -ENODEV; 586 } 587 588 if (owner == NULL) { 589 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u owner to NULL\n", 590 port_id); 591 return -EINVAL; 592 } 593 594 eth_dev_shared_data_prepare(); 595 596 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 597 rte_memcpy(owner, ðdev->data->owner, sizeof(*owner)); 598 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 599 600 rte_ethdev_trace_owner_get(port_id, owner); 601 602 return 0; 603 } 604 605 int 606 rte_eth_dev_socket_id(uint16_t port_id) 607 { 608 int socket_id = SOCKET_ID_ANY; 609 610 if (!rte_eth_dev_is_valid_port(port_id)) { 611 rte_errno = EINVAL; 612 } else { 613 socket_id = rte_eth_devices[port_id].data->numa_node; 614 if (socket_id == SOCKET_ID_ANY) 615 rte_errno = 0; 616 } 617 618 rte_ethdev_trace_socket_id(port_id, socket_id); 619 620 return socket_id; 621 } 622 623 void * 624 rte_eth_dev_get_sec_ctx(uint16_t port_id) 625 { 626 void *ctx; 627 628 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL); 629 ctx = rte_eth_devices[port_id].security_ctx; 630 631 rte_ethdev_trace_get_sec_ctx(port_id, ctx); 632 633 return ctx; 634 } 635 636 uint16_t 637 rte_eth_dev_count_avail(void) 638 { 639 uint16_t p; 640 uint16_t count; 641 642 count = 0; 643 644 RTE_ETH_FOREACH_DEV(p) 645 count++; 646 647 rte_ethdev_trace_count_avail(count); 648 649 return count; 650 } 651 652 uint16_t 653 rte_eth_dev_count_total(void) 654 { 655 uint16_t port, count = 0; 656 657 RTE_ETH_FOREACH_VALID_DEV(port) 658 count++; 659 660 rte_ethdev_trace_count_total(count); 661 662 return count; 663 } 664 665 int 666 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name) 667 { 668 char *tmp; 669 670 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 671 672 if (name == NULL) { 673 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u name to NULL\n", 674 port_id); 675 return -EINVAL; 676 } 677 678 /* shouldn't check 'rte_eth_devices[i].data', 679 * because it might be overwritten by VDEV PMD */ 680 tmp = eth_dev_shared_data->data[port_id].name; 681 strcpy(name, tmp); 682 683 rte_ethdev_trace_get_name_by_port(port_id, name); 684 685 return 0; 686 } 687 688 int 689 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id) 690 { 691 uint16_t pid; 692 693 if (name == NULL) { 694 RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name"); 695 return -EINVAL; 696 } 697 698 if (port_id == NULL) { 699 RTE_ETHDEV_LOG(ERR, 700 "Cannot get port ID to NULL for %s\n", name); 701 return -EINVAL; 702 } 703 704 RTE_ETH_FOREACH_VALID_DEV(pid) 705 if (!strcmp(name, eth_dev_shared_data->data[pid].name)) { 706 *port_id = pid; 707 708 rte_ethdev_trace_get_port_by_name(name, *port_id); 709 710 return 0; 711 } 712 713 return -ENODEV; 714 } 715 716 int 717 eth_err(uint16_t port_id, int ret) 718 { 719 if (ret == 0) 720 return 0; 721 if (rte_eth_dev_is_removed(port_id)) 722 return -EIO; 723 return ret; 724 } 725 726 static int 727 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id) 728 { 729 uint16_t port_id; 730 731 if (rx_queue_id >= dev->data->nb_rx_queues) { 732 port_id = dev->data->port_id; 733 RTE_ETHDEV_LOG(ERR, 734 "Invalid Rx queue_id=%u of device with port_id=%u\n", 735 rx_queue_id, port_id); 736 return -EINVAL; 737 } 738 739 if (dev->data->rx_queues[rx_queue_id] == NULL) { 740 port_id = dev->data->port_id; 741 RTE_ETHDEV_LOG(ERR, 742 "Queue %u of device with port_id=%u has not been setup\n", 743 rx_queue_id, port_id); 744 return -EINVAL; 745 } 746 747 return 0; 748 } 749 750 static int 751 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id) 752 { 753 uint16_t port_id; 754 755 if (tx_queue_id >= dev->data->nb_tx_queues) { 756 port_id = dev->data->port_id; 757 RTE_ETHDEV_LOG(ERR, 758 "Invalid Tx queue_id=%u of device with port_id=%u\n", 759 tx_queue_id, port_id); 760 return -EINVAL; 761 } 762 763 if (dev->data->tx_queues[tx_queue_id] == NULL) { 764 port_id = dev->data->port_id; 765 RTE_ETHDEV_LOG(ERR, 766 "Queue %u of device with port_id=%u has not been setup\n", 767 tx_queue_id, port_id); 768 return -EINVAL; 769 } 770 771 return 0; 772 } 773 774 int 775 rte_eth_rx_queue_is_valid(uint16_t port_id, uint16_t queue_id) 776 { 777 struct rte_eth_dev *dev; 778 779 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 780 dev = &rte_eth_devices[port_id]; 781 782 return eth_dev_validate_rx_queue(dev, queue_id); 783 } 784 785 int 786 rte_eth_tx_queue_is_valid(uint16_t port_id, uint16_t queue_id) 787 { 788 struct rte_eth_dev *dev; 789 790 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 791 dev = &rte_eth_devices[port_id]; 792 793 return eth_dev_validate_tx_queue(dev, queue_id); 794 } 795 796 int 797 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id) 798 { 799 struct rte_eth_dev *dev; 800 int ret; 801 802 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 803 dev = &rte_eth_devices[port_id]; 804 805 if (!dev->data->dev_started) { 806 RTE_ETHDEV_LOG(ERR, 807 "Port %u must be started before start any queue\n", 808 port_id); 809 return -EINVAL; 810 } 811 812 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 813 if (ret != 0) 814 return ret; 815 816 if (*dev->dev_ops->rx_queue_start == NULL) 817 return -ENOTSUP; 818 819 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 820 RTE_ETHDEV_LOG(INFO, 821 "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 822 rx_queue_id, port_id); 823 return -EINVAL; 824 } 825 826 if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 827 RTE_ETHDEV_LOG(INFO, 828 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 829 rx_queue_id, port_id); 830 return 0; 831 } 832 833 ret = eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id)); 834 835 rte_ethdev_trace_rx_queue_start(port_id, rx_queue_id, ret); 836 837 return ret; 838 } 839 840 int 841 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id) 842 { 843 struct rte_eth_dev *dev; 844 int ret; 845 846 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 847 dev = &rte_eth_devices[port_id]; 848 849 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 850 if (ret != 0) 851 return ret; 852 853 if (*dev->dev_ops->rx_queue_stop == NULL) 854 return -ENOTSUP; 855 856 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 857 RTE_ETHDEV_LOG(INFO, 858 "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 859 rx_queue_id, port_id); 860 return -EINVAL; 861 } 862 863 if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 864 RTE_ETHDEV_LOG(INFO, 865 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 866 rx_queue_id, port_id); 867 return 0; 868 } 869 870 ret = eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id)); 871 872 rte_ethdev_trace_rx_queue_stop(port_id, rx_queue_id, ret); 873 874 return ret; 875 } 876 877 int 878 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id) 879 { 880 struct rte_eth_dev *dev; 881 int ret; 882 883 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 884 dev = &rte_eth_devices[port_id]; 885 886 if (!dev->data->dev_started) { 887 RTE_ETHDEV_LOG(ERR, 888 "Port %u must be started before start any queue\n", 889 port_id); 890 return -EINVAL; 891 } 892 893 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 894 if (ret != 0) 895 return ret; 896 897 if (*dev->dev_ops->tx_queue_start == NULL) 898 return -ENOTSUP; 899 900 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 901 RTE_ETHDEV_LOG(INFO, 902 "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 903 tx_queue_id, port_id); 904 return -EINVAL; 905 } 906 907 if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 908 RTE_ETHDEV_LOG(INFO, 909 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 910 tx_queue_id, port_id); 911 return 0; 912 } 913 914 ret = eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id)); 915 916 rte_ethdev_trace_tx_queue_start(port_id, tx_queue_id, ret); 917 918 return ret; 919 } 920 921 int 922 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id) 923 { 924 struct rte_eth_dev *dev; 925 int ret; 926 927 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 928 dev = &rte_eth_devices[port_id]; 929 930 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 931 if (ret != 0) 932 return ret; 933 934 if (*dev->dev_ops->tx_queue_stop == NULL) 935 return -ENOTSUP; 936 937 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 938 RTE_ETHDEV_LOG(INFO, 939 "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 940 tx_queue_id, port_id); 941 return -EINVAL; 942 } 943 944 if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 945 RTE_ETHDEV_LOG(INFO, 946 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 947 tx_queue_id, port_id); 948 return 0; 949 } 950 951 ret = eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id)); 952 953 rte_ethdev_trace_tx_queue_stop(port_id, tx_queue_id, ret); 954 955 return ret; 956 } 957 958 uint32_t 959 rte_eth_speed_bitflag(uint32_t speed, int duplex) 960 { 961 uint32_t ret; 962 963 switch (speed) { 964 case RTE_ETH_SPEED_NUM_10M: 965 ret = duplex ? RTE_ETH_LINK_SPEED_10M : RTE_ETH_LINK_SPEED_10M_HD; 966 break; 967 case RTE_ETH_SPEED_NUM_100M: 968 ret = duplex ? RTE_ETH_LINK_SPEED_100M : RTE_ETH_LINK_SPEED_100M_HD; 969 break; 970 case RTE_ETH_SPEED_NUM_1G: 971 ret = RTE_ETH_LINK_SPEED_1G; 972 break; 973 case RTE_ETH_SPEED_NUM_2_5G: 974 ret = RTE_ETH_LINK_SPEED_2_5G; 975 break; 976 case RTE_ETH_SPEED_NUM_5G: 977 ret = RTE_ETH_LINK_SPEED_5G; 978 break; 979 case RTE_ETH_SPEED_NUM_10G: 980 ret = RTE_ETH_LINK_SPEED_10G; 981 break; 982 case RTE_ETH_SPEED_NUM_20G: 983 ret = RTE_ETH_LINK_SPEED_20G; 984 break; 985 case RTE_ETH_SPEED_NUM_25G: 986 ret = RTE_ETH_LINK_SPEED_25G; 987 break; 988 case RTE_ETH_SPEED_NUM_40G: 989 ret = RTE_ETH_LINK_SPEED_40G; 990 break; 991 case RTE_ETH_SPEED_NUM_50G: 992 ret = RTE_ETH_LINK_SPEED_50G; 993 break; 994 case RTE_ETH_SPEED_NUM_56G: 995 ret = RTE_ETH_LINK_SPEED_56G; 996 break; 997 case RTE_ETH_SPEED_NUM_100G: 998 ret = RTE_ETH_LINK_SPEED_100G; 999 break; 1000 case RTE_ETH_SPEED_NUM_200G: 1001 ret = RTE_ETH_LINK_SPEED_200G; 1002 break; 1003 case RTE_ETH_SPEED_NUM_400G: 1004 ret = RTE_ETH_LINK_SPEED_400G; 1005 break; 1006 default: 1007 ret = 0; 1008 } 1009 1010 rte_eth_trace_speed_bitflag(speed, duplex, ret); 1011 1012 return ret; 1013 } 1014 1015 const char * 1016 rte_eth_dev_rx_offload_name(uint64_t offload) 1017 { 1018 const char *name = "UNKNOWN"; 1019 unsigned int i; 1020 1021 for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) { 1022 if (offload == eth_dev_rx_offload_names[i].offload) { 1023 name = eth_dev_rx_offload_names[i].name; 1024 break; 1025 } 1026 } 1027 1028 rte_ethdev_trace_rx_offload_name(offload, name); 1029 1030 return name; 1031 } 1032 1033 const char * 1034 rte_eth_dev_tx_offload_name(uint64_t offload) 1035 { 1036 const char *name = "UNKNOWN"; 1037 unsigned int i; 1038 1039 for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) { 1040 if (offload == eth_dev_tx_offload_names[i].offload) { 1041 name = eth_dev_tx_offload_names[i].name; 1042 break; 1043 } 1044 } 1045 1046 rte_ethdev_trace_tx_offload_name(offload, name); 1047 1048 return name; 1049 } 1050 1051 static char * 1052 eth_dev_offload_names(uint64_t bitmask, char *buf, size_t size, 1053 const char *(*offload_name)(uint64_t)) 1054 { 1055 unsigned int pos = 0; 1056 int ret; 1057 1058 /* There should be at least enough space to handle those cases */ 1059 RTE_ASSERT(size >= sizeof("none") && size >= sizeof("...")); 1060 1061 if (bitmask == 0) { 1062 ret = snprintf(&buf[pos], size - pos, "none"); 1063 if (ret < 0 || pos + ret >= size) 1064 ret = 0; 1065 pos += ret; 1066 goto out; 1067 } 1068 1069 while (bitmask != 0) { 1070 uint64_t offload = RTE_BIT64(rte_ctz64(bitmask)); 1071 const char *name = offload_name(offload); 1072 1073 ret = snprintf(&buf[pos], size - pos, "%s,", name); 1074 if (ret < 0 || pos + ret >= size) { 1075 if (pos + sizeof("...") >= size) 1076 pos = size - sizeof("..."); 1077 ret = snprintf(&buf[pos], size - pos, "..."); 1078 if (ret > 0 && pos + ret < size) 1079 pos += ret; 1080 goto out; 1081 } 1082 1083 pos += ret; 1084 bitmask &= ~offload; 1085 } 1086 1087 /* Eliminate trailing comma */ 1088 pos--; 1089 out: 1090 buf[pos] = '\0'; 1091 return buf; 1092 } 1093 1094 const char * 1095 rte_eth_dev_capability_name(uint64_t capability) 1096 { 1097 const char *name = "UNKNOWN"; 1098 unsigned int i; 1099 1100 for (i = 0; i < RTE_DIM(rte_eth_dev_capa_names); ++i) { 1101 if (capability == rte_eth_dev_capa_names[i].offload) { 1102 name = rte_eth_dev_capa_names[i].name; 1103 break; 1104 } 1105 } 1106 1107 rte_ethdev_trace_capability_name(capability, name); 1108 1109 return name; 1110 } 1111 1112 static inline int 1113 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size, 1114 uint32_t max_rx_pkt_len, uint32_t dev_info_size) 1115 { 1116 int ret = 0; 1117 1118 if (dev_info_size == 0) { 1119 if (config_size != max_rx_pkt_len) { 1120 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size" 1121 " %u != %u is not allowed\n", 1122 port_id, config_size, max_rx_pkt_len); 1123 ret = -EINVAL; 1124 } 1125 } else if (config_size > dev_info_size) { 1126 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 1127 "> max allowed value %u\n", port_id, config_size, 1128 dev_info_size); 1129 ret = -EINVAL; 1130 } else if (config_size < RTE_ETHER_MIN_LEN) { 1131 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 1132 "< min allowed value %u\n", port_id, config_size, 1133 (unsigned int)RTE_ETHER_MIN_LEN); 1134 ret = -EINVAL; 1135 } 1136 return ret; 1137 } 1138 1139 /* 1140 * Validate offloads that are requested through rte_eth_dev_configure against 1141 * the offloads successfully set by the Ethernet device. 1142 * 1143 * @param port_id 1144 * The port identifier of the Ethernet device. 1145 * @param req_offloads 1146 * The offloads that have been requested through `rte_eth_dev_configure`. 1147 * @param set_offloads 1148 * The offloads successfully set by the Ethernet device. 1149 * @param offload_type 1150 * The offload type i.e. Rx/Tx string. 1151 * @param offload_name 1152 * The function that prints the offload name. 1153 * @return 1154 * - (0) if validation successful. 1155 * - (-EINVAL) if requested offload has been silently disabled. 1156 */ 1157 static int 1158 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads, 1159 uint64_t set_offloads, const char *offload_type, 1160 const char *(*offload_name)(uint64_t)) 1161 { 1162 uint64_t offloads_diff = req_offloads ^ set_offloads; 1163 uint64_t offload; 1164 int ret = 0; 1165 1166 while (offloads_diff != 0) { 1167 /* Check if any offload is requested but not enabled. */ 1168 offload = RTE_BIT64(rte_ctz64(offloads_diff)); 1169 if (offload & req_offloads) { 1170 RTE_ETHDEV_LOG(ERR, 1171 "Port %u failed to enable %s offload %s\n", 1172 port_id, offload_type, offload_name(offload)); 1173 ret = -EINVAL; 1174 } 1175 1176 /* Check if offload couldn't be disabled. */ 1177 if (offload & set_offloads) { 1178 RTE_ETHDEV_LOG(DEBUG, 1179 "Port %u %s offload %s is not requested but enabled\n", 1180 port_id, offload_type, offload_name(offload)); 1181 } 1182 1183 offloads_diff &= ~offload; 1184 } 1185 1186 return ret; 1187 } 1188 1189 static uint32_t 1190 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu) 1191 { 1192 uint32_t overhead_len; 1193 1194 if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu) 1195 overhead_len = max_rx_pktlen - max_mtu; 1196 else 1197 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 1198 1199 return overhead_len; 1200 } 1201 1202 /* rte_eth_dev_info_get() should be called prior to this function */ 1203 static int 1204 eth_dev_validate_mtu(uint16_t port_id, struct rte_eth_dev_info *dev_info, 1205 uint16_t mtu) 1206 { 1207 uint32_t overhead_len; 1208 uint32_t frame_size; 1209 1210 if (mtu < dev_info->min_mtu) { 1211 RTE_ETHDEV_LOG(ERR, 1212 "MTU (%u) < device min MTU (%u) for port_id %u\n", 1213 mtu, dev_info->min_mtu, port_id); 1214 return -EINVAL; 1215 } 1216 if (mtu > dev_info->max_mtu) { 1217 RTE_ETHDEV_LOG(ERR, 1218 "MTU (%u) > device max MTU (%u) for port_id %u\n", 1219 mtu, dev_info->max_mtu, port_id); 1220 return -EINVAL; 1221 } 1222 1223 overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen, 1224 dev_info->max_mtu); 1225 frame_size = mtu + overhead_len; 1226 if (frame_size < RTE_ETHER_MIN_LEN) { 1227 RTE_ETHDEV_LOG(ERR, 1228 "Frame size (%u) < min frame size (%u) for port_id %u\n", 1229 frame_size, RTE_ETHER_MIN_LEN, port_id); 1230 return -EINVAL; 1231 } 1232 1233 if (frame_size > dev_info->max_rx_pktlen) { 1234 RTE_ETHDEV_LOG(ERR, 1235 "Frame size (%u) > device max frame size (%u) for port_id %u\n", 1236 frame_size, dev_info->max_rx_pktlen, port_id); 1237 return -EINVAL; 1238 } 1239 1240 return 0; 1241 } 1242 1243 int 1244 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, 1245 const struct rte_eth_conf *dev_conf) 1246 { 1247 struct rte_eth_dev *dev; 1248 struct rte_eth_dev_info dev_info; 1249 struct rte_eth_conf orig_conf; 1250 int diag; 1251 int ret; 1252 uint16_t old_mtu; 1253 1254 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1255 dev = &rte_eth_devices[port_id]; 1256 1257 if (dev_conf == NULL) { 1258 RTE_ETHDEV_LOG(ERR, 1259 "Cannot configure ethdev port %u from NULL config\n", 1260 port_id); 1261 return -EINVAL; 1262 } 1263 1264 if (*dev->dev_ops->dev_configure == NULL) 1265 return -ENOTSUP; 1266 1267 if (dev->data->dev_started) { 1268 RTE_ETHDEV_LOG(ERR, 1269 "Port %u must be stopped to allow configuration\n", 1270 port_id); 1271 return -EBUSY; 1272 } 1273 1274 /* 1275 * Ensure that "dev_configured" is always 0 each time prepare to do 1276 * dev_configure() to avoid any non-anticipated behaviour. 1277 * And set to 1 when dev_configure() is executed successfully. 1278 */ 1279 dev->data->dev_configured = 0; 1280 1281 /* Store original config, as rollback required on failure */ 1282 memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf)); 1283 1284 /* 1285 * Copy the dev_conf parameter into the dev structure. 1286 * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get 1287 */ 1288 if (dev_conf != &dev->data->dev_conf) 1289 memcpy(&dev->data->dev_conf, dev_conf, 1290 sizeof(dev->data->dev_conf)); 1291 1292 /* Backup mtu for rollback */ 1293 old_mtu = dev->data->mtu; 1294 1295 ret = rte_eth_dev_info_get(port_id, &dev_info); 1296 if (ret != 0) 1297 goto rollback; 1298 1299 /* If number of queues specified by application for both Rx and Tx is 1300 * zero, use driver preferred values. This cannot be done individually 1301 * as it is valid for either Tx or Rx (but not both) to be zero. 1302 * If driver does not provide any preferred valued, fall back on 1303 * EAL defaults. 1304 */ 1305 if (nb_rx_q == 0 && nb_tx_q == 0) { 1306 nb_rx_q = dev_info.default_rxportconf.nb_queues; 1307 if (nb_rx_q == 0) 1308 nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES; 1309 nb_tx_q = dev_info.default_txportconf.nb_queues; 1310 if (nb_tx_q == 0) 1311 nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES; 1312 } 1313 1314 if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) { 1315 RTE_ETHDEV_LOG(ERR, 1316 "Number of Rx queues requested (%u) is greater than max supported(%d)\n", 1317 nb_rx_q, RTE_MAX_QUEUES_PER_PORT); 1318 ret = -EINVAL; 1319 goto rollback; 1320 } 1321 1322 if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) { 1323 RTE_ETHDEV_LOG(ERR, 1324 "Number of Tx queues requested (%u) is greater than max supported(%d)\n", 1325 nb_tx_q, RTE_MAX_QUEUES_PER_PORT); 1326 ret = -EINVAL; 1327 goto rollback; 1328 } 1329 1330 /* 1331 * Check that the numbers of Rx and Tx queues are not greater 1332 * than the maximum number of Rx and Tx queues supported by the 1333 * configured device. 1334 */ 1335 if (nb_rx_q > dev_info.max_rx_queues) { 1336 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n", 1337 port_id, nb_rx_q, dev_info.max_rx_queues); 1338 ret = -EINVAL; 1339 goto rollback; 1340 } 1341 1342 if (nb_tx_q > dev_info.max_tx_queues) { 1343 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n", 1344 port_id, nb_tx_q, dev_info.max_tx_queues); 1345 ret = -EINVAL; 1346 goto rollback; 1347 } 1348 1349 /* Check that the device supports requested interrupts */ 1350 if ((dev_conf->intr_conf.lsc == 1) && 1351 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) { 1352 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n", 1353 dev->device->driver->name); 1354 ret = -EINVAL; 1355 goto rollback; 1356 } 1357 if ((dev_conf->intr_conf.rmv == 1) && 1358 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) { 1359 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n", 1360 dev->device->driver->name); 1361 ret = -EINVAL; 1362 goto rollback; 1363 } 1364 1365 if (dev_conf->rxmode.mtu == 0) 1366 dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU; 1367 1368 ret = eth_dev_validate_mtu(port_id, &dev_info, 1369 dev->data->dev_conf.rxmode.mtu); 1370 if (ret != 0) 1371 goto rollback; 1372 1373 dev->data->mtu = dev->data->dev_conf.rxmode.mtu; 1374 1375 /* 1376 * If LRO is enabled, check that the maximum aggregated packet 1377 * size is supported by the configured device. 1378 */ 1379 if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) { 1380 uint32_t max_rx_pktlen; 1381 uint32_t overhead_len; 1382 1383 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 1384 dev_info.max_mtu); 1385 max_rx_pktlen = dev->data->dev_conf.rxmode.mtu + overhead_len; 1386 if (dev_conf->rxmode.max_lro_pkt_size == 0) 1387 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 1388 ret = eth_dev_check_lro_pkt_size(port_id, 1389 dev->data->dev_conf.rxmode.max_lro_pkt_size, 1390 max_rx_pktlen, 1391 dev_info.max_lro_pkt_size); 1392 if (ret != 0) 1393 goto rollback; 1394 } 1395 1396 /* Any requested offloading must be within its device capabilities */ 1397 if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) != 1398 dev_conf->rxmode.offloads) { 1399 char buffer[512]; 1400 1401 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u does not support Rx offloads %s\n", 1402 port_id, eth_dev_offload_names( 1403 dev_conf->rxmode.offloads & ~dev_info.rx_offload_capa, 1404 buffer, sizeof(buffer), rte_eth_dev_rx_offload_name)); 1405 RTE_ETHDEV_LOG(DEBUG, "Ethdev port_id=%u was requested Rx offloads %s\n", 1406 port_id, eth_dev_offload_names(dev_conf->rxmode.offloads, 1407 buffer, sizeof(buffer), rte_eth_dev_rx_offload_name)); 1408 RTE_ETHDEV_LOG(DEBUG, "Ethdev port_id=%u supports Rx offloads %s\n", 1409 port_id, eth_dev_offload_names(dev_info.rx_offload_capa, 1410 buffer, sizeof(buffer), rte_eth_dev_rx_offload_name)); 1411 1412 ret = -EINVAL; 1413 goto rollback; 1414 } 1415 if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) != 1416 dev_conf->txmode.offloads) { 1417 char buffer[512]; 1418 1419 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u does not support Tx offloads %s\n", 1420 port_id, eth_dev_offload_names( 1421 dev_conf->txmode.offloads & ~dev_info.tx_offload_capa, 1422 buffer, sizeof(buffer), rte_eth_dev_tx_offload_name)); 1423 RTE_ETHDEV_LOG(DEBUG, "Ethdev port_id=%u was requested Tx offloads %s\n", 1424 port_id, eth_dev_offload_names(dev_conf->txmode.offloads, 1425 buffer, sizeof(buffer), rte_eth_dev_tx_offload_name)); 1426 RTE_ETHDEV_LOG(DEBUG, "Ethdev port_id=%u supports Tx offloads %s\n", 1427 port_id, eth_dev_offload_names(dev_info.tx_offload_capa, 1428 buffer, sizeof(buffer), rte_eth_dev_tx_offload_name)); 1429 ret = -EINVAL; 1430 goto rollback; 1431 } 1432 1433 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = 1434 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf); 1435 1436 /* Check that device supports requested rss hash functions. */ 1437 if ((dev_info.flow_type_rss_offloads | 1438 dev_conf->rx_adv_conf.rss_conf.rss_hf) != 1439 dev_info.flow_type_rss_offloads) { 1440 RTE_ETHDEV_LOG(ERR, 1441 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 1442 port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf, 1443 dev_info.flow_type_rss_offloads); 1444 ret = -EINVAL; 1445 goto rollback; 1446 } 1447 1448 /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */ 1449 if (((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0) && 1450 (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) { 1451 RTE_ETHDEV_LOG(ERR, 1452 "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n", 1453 port_id, 1454 rte_eth_dev_rx_offload_name(RTE_ETH_RX_OFFLOAD_RSS_HASH)); 1455 ret = -EINVAL; 1456 goto rollback; 1457 } 1458 1459 /* 1460 * Setup new number of Rx/Tx queues and reconfigure device. 1461 */ 1462 diag = eth_dev_rx_queue_config(dev, nb_rx_q); 1463 if (diag != 0) { 1464 RTE_ETHDEV_LOG(ERR, 1465 "Port%u eth_dev_rx_queue_config = %d\n", 1466 port_id, diag); 1467 ret = diag; 1468 goto rollback; 1469 } 1470 1471 diag = eth_dev_tx_queue_config(dev, nb_tx_q); 1472 if (diag != 0) { 1473 RTE_ETHDEV_LOG(ERR, 1474 "Port%u eth_dev_tx_queue_config = %d\n", 1475 port_id, diag); 1476 eth_dev_rx_queue_config(dev, 0); 1477 ret = diag; 1478 goto rollback; 1479 } 1480 1481 diag = (*dev->dev_ops->dev_configure)(dev); 1482 if (diag != 0) { 1483 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n", 1484 port_id, diag); 1485 ret = eth_err(port_id, diag); 1486 goto reset_queues; 1487 } 1488 1489 /* Initialize Rx profiling if enabled at compilation time. */ 1490 diag = __rte_eth_dev_profile_init(port_id, dev); 1491 if (diag != 0) { 1492 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n", 1493 port_id, diag); 1494 ret = eth_err(port_id, diag); 1495 goto reset_queues; 1496 } 1497 1498 /* Validate Rx offloads. */ 1499 diag = eth_dev_validate_offloads(port_id, 1500 dev_conf->rxmode.offloads, 1501 dev->data->dev_conf.rxmode.offloads, "Rx", 1502 rte_eth_dev_rx_offload_name); 1503 if (diag != 0) { 1504 ret = diag; 1505 goto reset_queues; 1506 } 1507 1508 /* Validate Tx offloads. */ 1509 diag = eth_dev_validate_offloads(port_id, 1510 dev_conf->txmode.offloads, 1511 dev->data->dev_conf.txmode.offloads, "Tx", 1512 rte_eth_dev_tx_offload_name); 1513 if (diag != 0) { 1514 ret = diag; 1515 goto reset_queues; 1516 } 1517 1518 dev->data->dev_configured = 1; 1519 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0); 1520 return 0; 1521 reset_queues: 1522 eth_dev_rx_queue_config(dev, 0); 1523 eth_dev_tx_queue_config(dev, 0); 1524 rollback: 1525 memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf)); 1526 if (old_mtu != dev->data->mtu) 1527 dev->data->mtu = old_mtu; 1528 1529 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret); 1530 return ret; 1531 } 1532 1533 static void 1534 eth_dev_mac_restore(struct rte_eth_dev *dev, 1535 struct rte_eth_dev_info *dev_info) 1536 { 1537 struct rte_ether_addr *addr; 1538 uint16_t i; 1539 uint32_t pool = 0; 1540 uint64_t pool_mask; 1541 1542 /* replay MAC address configuration including default MAC */ 1543 addr = &dev->data->mac_addrs[0]; 1544 if (*dev->dev_ops->mac_addr_set != NULL) 1545 (*dev->dev_ops->mac_addr_set)(dev, addr); 1546 else if (*dev->dev_ops->mac_addr_add != NULL) 1547 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool); 1548 1549 if (*dev->dev_ops->mac_addr_add != NULL) { 1550 for (i = 1; i < dev_info->max_mac_addrs; i++) { 1551 addr = &dev->data->mac_addrs[i]; 1552 1553 /* skip zero address */ 1554 if (rte_is_zero_ether_addr(addr)) 1555 continue; 1556 1557 pool = 0; 1558 pool_mask = dev->data->mac_pool_sel[i]; 1559 1560 do { 1561 if (pool_mask & UINT64_C(1)) 1562 (*dev->dev_ops->mac_addr_add)(dev, 1563 addr, i, pool); 1564 pool_mask >>= 1; 1565 pool++; 1566 } while (pool_mask); 1567 } 1568 } 1569 } 1570 1571 static int 1572 eth_dev_config_restore(struct rte_eth_dev *dev, 1573 struct rte_eth_dev_info *dev_info, uint16_t port_id) 1574 { 1575 int ret; 1576 1577 if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)) 1578 eth_dev_mac_restore(dev, dev_info); 1579 1580 /* replay promiscuous configuration */ 1581 /* 1582 * use callbacks directly since we don't need port_id check and 1583 * would like to bypass the same value set 1584 */ 1585 if (rte_eth_promiscuous_get(port_id) == 1 && 1586 *dev->dev_ops->promiscuous_enable != NULL) { 1587 ret = eth_err(port_id, 1588 (*dev->dev_ops->promiscuous_enable)(dev)); 1589 if (ret != 0 && ret != -ENOTSUP) { 1590 RTE_ETHDEV_LOG(ERR, 1591 "Failed to enable promiscuous mode for device (port %u): %s\n", 1592 port_id, rte_strerror(-ret)); 1593 return ret; 1594 } 1595 } else if (rte_eth_promiscuous_get(port_id) == 0 && 1596 *dev->dev_ops->promiscuous_disable != NULL) { 1597 ret = eth_err(port_id, 1598 (*dev->dev_ops->promiscuous_disable)(dev)); 1599 if (ret != 0 && ret != -ENOTSUP) { 1600 RTE_ETHDEV_LOG(ERR, 1601 "Failed to disable promiscuous mode for device (port %u): %s\n", 1602 port_id, rte_strerror(-ret)); 1603 return ret; 1604 } 1605 } 1606 1607 /* replay all multicast configuration */ 1608 /* 1609 * use callbacks directly since we don't need port_id check and 1610 * would like to bypass the same value set 1611 */ 1612 if (rte_eth_allmulticast_get(port_id) == 1 && 1613 *dev->dev_ops->allmulticast_enable != NULL) { 1614 ret = eth_err(port_id, 1615 (*dev->dev_ops->allmulticast_enable)(dev)); 1616 if (ret != 0 && ret != -ENOTSUP) { 1617 RTE_ETHDEV_LOG(ERR, 1618 "Failed to enable allmulticast mode for device (port %u): %s\n", 1619 port_id, rte_strerror(-ret)); 1620 return ret; 1621 } 1622 } else if (rte_eth_allmulticast_get(port_id) == 0 && 1623 *dev->dev_ops->allmulticast_disable != NULL) { 1624 ret = eth_err(port_id, 1625 (*dev->dev_ops->allmulticast_disable)(dev)); 1626 if (ret != 0 && ret != -ENOTSUP) { 1627 RTE_ETHDEV_LOG(ERR, 1628 "Failed to disable allmulticast mode for device (port %u): %s\n", 1629 port_id, rte_strerror(-ret)); 1630 return ret; 1631 } 1632 } 1633 1634 return 0; 1635 } 1636 1637 int 1638 rte_eth_dev_start(uint16_t port_id) 1639 { 1640 struct rte_eth_dev *dev; 1641 struct rte_eth_dev_info dev_info; 1642 int diag; 1643 int ret, ret_stop; 1644 1645 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1646 dev = &rte_eth_devices[port_id]; 1647 1648 if (*dev->dev_ops->dev_start == NULL) 1649 return -ENOTSUP; 1650 1651 if (dev->data->dev_configured == 0) { 1652 RTE_ETHDEV_LOG(INFO, 1653 "Device with port_id=%"PRIu16" is not configured.\n", 1654 port_id); 1655 return -EINVAL; 1656 } 1657 1658 if (dev->data->dev_started != 0) { 1659 RTE_ETHDEV_LOG(INFO, 1660 "Device with port_id=%"PRIu16" already started\n", 1661 port_id); 1662 return 0; 1663 } 1664 1665 ret = rte_eth_dev_info_get(port_id, &dev_info); 1666 if (ret != 0) 1667 return ret; 1668 1669 /* Lets restore MAC now if device does not support live change */ 1670 if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR) 1671 eth_dev_mac_restore(dev, &dev_info); 1672 1673 diag = (*dev->dev_ops->dev_start)(dev); 1674 if (diag == 0) 1675 dev->data->dev_started = 1; 1676 else 1677 return eth_err(port_id, diag); 1678 1679 ret = eth_dev_config_restore(dev, &dev_info, port_id); 1680 if (ret != 0) { 1681 RTE_ETHDEV_LOG(ERR, 1682 "Error during restoring configuration for device (port %u): %s\n", 1683 port_id, rte_strerror(-ret)); 1684 ret_stop = rte_eth_dev_stop(port_id); 1685 if (ret_stop != 0) { 1686 RTE_ETHDEV_LOG(ERR, 1687 "Failed to stop device (port %u): %s\n", 1688 port_id, rte_strerror(-ret_stop)); 1689 } 1690 1691 return ret; 1692 } 1693 1694 if (dev->data->dev_conf.intr_conf.lsc == 0) { 1695 if (*dev->dev_ops->link_update == NULL) 1696 return -ENOTSUP; 1697 (*dev->dev_ops->link_update)(dev, 0); 1698 } 1699 1700 /* expose selection of PMD fast-path functions */ 1701 eth_dev_fp_ops_setup(rte_eth_fp_ops + port_id, dev); 1702 1703 rte_ethdev_trace_start(port_id); 1704 return 0; 1705 } 1706 1707 int 1708 rte_eth_dev_stop(uint16_t port_id) 1709 { 1710 struct rte_eth_dev *dev; 1711 int ret; 1712 1713 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1714 dev = &rte_eth_devices[port_id]; 1715 1716 if (*dev->dev_ops->dev_stop == NULL) 1717 return -ENOTSUP; 1718 1719 if (dev->data->dev_started == 0) { 1720 RTE_ETHDEV_LOG(INFO, 1721 "Device with port_id=%"PRIu16" already stopped\n", 1722 port_id); 1723 return 0; 1724 } 1725 1726 /* point fast-path functions to dummy ones */ 1727 eth_dev_fp_ops_reset(rte_eth_fp_ops + port_id); 1728 1729 ret = (*dev->dev_ops->dev_stop)(dev); 1730 if (ret == 0) 1731 dev->data->dev_started = 0; 1732 rte_ethdev_trace_stop(port_id, ret); 1733 1734 return ret; 1735 } 1736 1737 int 1738 rte_eth_dev_set_link_up(uint16_t port_id) 1739 { 1740 struct rte_eth_dev *dev; 1741 int ret; 1742 1743 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1744 dev = &rte_eth_devices[port_id]; 1745 1746 if (*dev->dev_ops->dev_set_link_up == NULL) 1747 return -ENOTSUP; 1748 ret = eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev)); 1749 1750 rte_ethdev_trace_set_link_up(port_id, ret); 1751 1752 return ret; 1753 } 1754 1755 int 1756 rte_eth_dev_set_link_down(uint16_t port_id) 1757 { 1758 struct rte_eth_dev *dev; 1759 int ret; 1760 1761 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1762 dev = &rte_eth_devices[port_id]; 1763 1764 if (*dev->dev_ops->dev_set_link_down == NULL) 1765 return -ENOTSUP; 1766 ret = eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev)); 1767 1768 rte_ethdev_trace_set_link_down(port_id, ret); 1769 1770 return ret; 1771 } 1772 1773 int 1774 rte_eth_dev_close(uint16_t port_id) 1775 { 1776 struct rte_eth_dev *dev; 1777 int firsterr, binerr; 1778 int *lasterr = &firsterr; 1779 1780 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1781 dev = &rte_eth_devices[port_id]; 1782 1783 /* 1784 * Secondary process needs to close device to release process private 1785 * resources. But secondary process should not be obliged to wait 1786 * for device stop before closing ethdev. 1787 */ 1788 if (rte_eal_process_type() == RTE_PROC_PRIMARY && 1789 dev->data->dev_started) { 1790 RTE_ETHDEV_LOG(ERR, "Cannot close started device (port %u)\n", 1791 port_id); 1792 return -EINVAL; 1793 } 1794 1795 if (*dev->dev_ops->dev_close == NULL) 1796 return -ENOTSUP; 1797 *lasterr = (*dev->dev_ops->dev_close)(dev); 1798 if (*lasterr != 0) 1799 lasterr = &binerr; 1800 1801 rte_ethdev_trace_close(port_id); 1802 *lasterr = rte_eth_dev_release_port(dev); 1803 1804 return firsterr; 1805 } 1806 1807 int 1808 rte_eth_dev_reset(uint16_t port_id) 1809 { 1810 struct rte_eth_dev *dev; 1811 int ret; 1812 1813 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1814 dev = &rte_eth_devices[port_id]; 1815 1816 if (*dev->dev_ops->dev_reset == NULL) 1817 return -ENOTSUP; 1818 1819 ret = rte_eth_dev_stop(port_id); 1820 if (ret != 0) { 1821 RTE_ETHDEV_LOG(ERR, 1822 "Failed to stop device (port %u) before reset: %s - ignore\n", 1823 port_id, rte_strerror(-ret)); 1824 } 1825 ret = eth_err(port_id, dev->dev_ops->dev_reset(dev)); 1826 1827 rte_ethdev_trace_reset(port_id, ret); 1828 1829 return ret; 1830 } 1831 1832 int 1833 rte_eth_dev_is_removed(uint16_t port_id) 1834 { 1835 struct rte_eth_dev *dev; 1836 int ret; 1837 1838 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0); 1839 dev = &rte_eth_devices[port_id]; 1840 1841 if (dev->state == RTE_ETH_DEV_REMOVED) 1842 return 1; 1843 1844 if (*dev->dev_ops->is_removed == NULL) 1845 return 0; 1846 1847 ret = dev->dev_ops->is_removed(dev); 1848 if (ret != 0) 1849 /* Device is physically removed. */ 1850 dev->state = RTE_ETH_DEV_REMOVED; 1851 1852 rte_ethdev_trace_is_removed(port_id, ret); 1853 1854 return ret; 1855 } 1856 1857 static int 1858 rte_eth_check_rx_mempool(struct rte_mempool *mp, uint16_t offset, 1859 uint16_t min_length) 1860 { 1861 uint16_t data_room_size; 1862 1863 /* 1864 * Check the size of the mbuf data buffer, this value 1865 * must be provided in the private data of the memory pool. 1866 * First check that the memory pool(s) has a valid private data. 1867 */ 1868 if (mp->private_data_size < 1869 sizeof(struct rte_pktmbuf_pool_private)) { 1870 RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n", 1871 mp->name, mp->private_data_size, 1872 (unsigned int) 1873 sizeof(struct rte_pktmbuf_pool_private)); 1874 return -ENOSPC; 1875 } 1876 data_room_size = rte_pktmbuf_data_room_size(mp); 1877 if (data_room_size < offset + min_length) { 1878 RTE_ETHDEV_LOG(ERR, 1879 "%s mbuf_data_room_size %u < %u (%u + %u)\n", 1880 mp->name, data_room_size, 1881 offset + min_length, offset, min_length); 1882 return -EINVAL; 1883 } 1884 return 0; 1885 } 1886 1887 static int 1888 eth_dev_buffer_split_get_supported_hdrs_helper(uint16_t port_id, uint32_t **ptypes) 1889 { 1890 int cnt; 1891 1892 cnt = rte_eth_buffer_split_get_supported_hdr_ptypes(port_id, NULL, 0); 1893 if (cnt <= 0) 1894 return cnt; 1895 1896 *ptypes = malloc(sizeof(uint32_t) * cnt); 1897 if (*ptypes == NULL) 1898 return -ENOMEM; 1899 1900 cnt = rte_eth_buffer_split_get_supported_hdr_ptypes(port_id, *ptypes, cnt); 1901 if (cnt <= 0) { 1902 free(*ptypes); 1903 *ptypes = NULL; 1904 } 1905 return cnt; 1906 } 1907 1908 static int 1909 rte_eth_rx_queue_check_split(uint16_t port_id, 1910 const struct rte_eth_rxseg_split *rx_seg, 1911 uint16_t n_seg, uint32_t *mbp_buf_size, 1912 const struct rte_eth_dev_info *dev_info) 1913 { 1914 const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa; 1915 struct rte_mempool *mp_first; 1916 uint32_t offset_mask; 1917 uint16_t seg_idx; 1918 int ret = 0; 1919 int ptype_cnt; 1920 uint32_t *ptypes; 1921 uint32_t prev_proto_hdrs = RTE_PTYPE_UNKNOWN; 1922 int i; 1923 1924 if (n_seg > seg_capa->max_nseg) { 1925 RTE_ETHDEV_LOG(ERR, 1926 "Requested Rx segments %u exceed supported %u\n", 1927 n_seg, seg_capa->max_nseg); 1928 return -EINVAL; 1929 } 1930 /* 1931 * Check the sizes and offsets against buffer sizes 1932 * for each segment specified in extended configuration. 1933 */ 1934 mp_first = rx_seg[0].mp; 1935 offset_mask = RTE_BIT32(seg_capa->offset_align_log2) - 1; 1936 1937 ptypes = NULL; 1938 ptype_cnt = eth_dev_buffer_split_get_supported_hdrs_helper(port_id, &ptypes); 1939 1940 for (seg_idx = 0; seg_idx < n_seg; seg_idx++) { 1941 struct rte_mempool *mpl = rx_seg[seg_idx].mp; 1942 uint32_t length = rx_seg[seg_idx].length; 1943 uint32_t offset = rx_seg[seg_idx].offset; 1944 uint32_t proto_hdr = rx_seg[seg_idx].proto_hdr; 1945 1946 if (mpl == NULL) { 1947 RTE_ETHDEV_LOG(ERR, "null mempool pointer\n"); 1948 ret = -EINVAL; 1949 goto out; 1950 } 1951 if (seg_idx != 0 && mp_first != mpl && 1952 seg_capa->multi_pools == 0) { 1953 RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n"); 1954 ret = -ENOTSUP; 1955 goto out; 1956 } 1957 if (offset != 0) { 1958 if (seg_capa->offset_allowed == 0) { 1959 RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n"); 1960 ret = -ENOTSUP; 1961 goto out; 1962 } 1963 if (offset & offset_mask) { 1964 RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n", 1965 offset, 1966 seg_capa->offset_align_log2); 1967 ret = -EINVAL; 1968 goto out; 1969 } 1970 } 1971 1972 offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM; 1973 *mbp_buf_size = rte_pktmbuf_data_room_size(mpl); 1974 if (proto_hdr != 0) { 1975 /* Split based on protocol headers. */ 1976 if (length != 0) { 1977 RTE_ETHDEV_LOG(ERR, 1978 "Do not set length split and protocol split within a segment\n" 1979 ); 1980 ret = -EINVAL; 1981 goto out; 1982 } 1983 if ((proto_hdr & prev_proto_hdrs) != 0) { 1984 RTE_ETHDEV_LOG(ERR, 1985 "Repeat with previous protocol headers or proto-split after length-based split\n" 1986 ); 1987 ret = -EINVAL; 1988 goto out; 1989 } 1990 if (ptype_cnt <= 0) { 1991 RTE_ETHDEV_LOG(ERR, 1992 "Port %u failed to get supported buffer split header protocols\n", 1993 port_id); 1994 ret = -ENOTSUP; 1995 goto out; 1996 } 1997 for (i = 0; i < ptype_cnt; i++) { 1998 if ((prev_proto_hdrs | proto_hdr) == ptypes[i]) 1999 break; 2000 } 2001 if (i == ptype_cnt) { 2002 RTE_ETHDEV_LOG(ERR, 2003 "Requested Rx split header protocols 0x%x is not supported.\n", 2004 proto_hdr); 2005 ret = -EINVAL; 2006 goto out; 2007 } 2008 prev_proto_hdrs |= proto_hdr; 2009 } else { 2010 /* Split at fixed length. */ 2011 length = length != 0 ? length : *mbp_buf_size; 2012 prev_proto_hdrs = RTE_PTYPE_ALL_MASK; 2013 } 2014 2015 ret = rte_eth_check_rx_mempool(mpl, offset, length); 2016 if (ret != 0) 2017 goto out; 2018 } 2019 out: 2020 free(ptypes); 2021 return ret; 2022 } 2023 2024 static int 2025 rte_eth_rx_queue_check_mempools(struct rte_mempool **rx_mempools, 2026 uint16_t n_mempools, uint32_t *min_buf_size, 2027 const struct rte_eth_dev_info *dev_info) 2028 { 2029 uint16_t pool_idx; 2030 int ret; 2031 2032 if (n_mempools > dev_info->max_rx_mempools) { 2033 RTE_ETHDEV_LOG(ERR, 2034 "Too many Rx mempools %u vs maximum %u\n", 2035 n_mempools, dev_info->max_rx_mempools); 2036 return -EINVAL; 2037 } 2038 2039 for (pool_idx = 0; pool_idx < n_mempools; pool_idx++) { 2040 struct rte_mempool *mp = rx_mempools[pool_idx]; 2041 2042 if (mp == NULL) { 2043 RTE_ETHDEV_LOG(ERR, "null Rx mempool pointer\n"); 2044 return -EINVAL; 2045 } 2046 2047 ret = rte_eth_check_rx_mempool(mp, RTE_PKTMBUF_HEADROOM, 2048 dev_info->min_rx_bufsize); 2049 if (ret != 0) 2050 return ret; 2051 2052 *min_buf_size = RTE_MIN(*min_buf_size, 2053 rte_pktmbuf_data_room_size(mp)); 2054 } 2055 2056 return 0; 2057 } 2058 2059 int 2060 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 2061 uint16_t nb_rx_desc, unsigned int socket_id, 2062 const struct rte_eth_rxconf *rx_conf, 2063 struct rte_mempool *mp) 2064 { 2065 int ret; 2066 uint64_t rx_offloads; 2067 uint32_t mbp_buf_size = UINT32_MAX; 2068 struct rte_eth_dev *dev; 2069 struct rte_eth_dev_info dev_info; 2070 struct rte_eth_rxconf local_conf; 2071 2072 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2073 dev = &rte_eth_devices[port_id]; 2074 2075 if (rx_queue_id >= dev->data->nb_rx_queues) { 2076 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id); 2077 return -EINVAL; 2078 } 2079 2080 if (*dev->dev_ops->rx_queue_setup == NULL) 2081 return -ENOTSUP; 2082 2083 ret = rte_eth_dev_info_get(port_id, &dev_info); 2084 if (ret != 0) 2085 return ret; 2086 2087 rx_offloads = dev->data->dev_conf.rxmode.offloads; 2088 if (rx_conf != NULL) 2089 rx_offloads |= rx_conf->offloads; 2090 2091 /* Ensure that we have one and only one source of Rx buffers */ 2092 if ((mp != NULL) + 2093 (rx_conf != NULL && rx_conf->rx_nseg > 0) + 2094 (rx_conf != NULL && rx_conf->rx_nmempool > 0) != 1) { 2095 RTE_ETHDEV_LOG(ERR, 2096 "Ambiguous Rx mempools configuration\n"); 2097 return -EINVAL; 2098 } 2099 2100 if (mp != NULL) { 2101 /* Single pool configuration check. */ 2102 ret = rte_eth_check_rx_mempool(mp, RTE_PKTMBUF_HEADROOM, 2103 dev_info.min_rx_bufsize); 2104 if (ret != 0) 2105 return ret; 2106 2107 mbp_buf_size = rte_pktmbuf_data_room_size(mp); 2108 } else if (rx_conf != NULL && rx_conf->rx_nseg > 0) { 2109 const struct rte_eth_rxseg_split *rx_seg; 2110 uint16_t n_seg; 2111 2112 /* Extended multi-segment configuration check. */ 2113 if (rx_conf->rx_seg == NULL) { 2114 RTE_ETHDEV_LOG(ERR, 2115 "Memory pool is null and no multi-segment configuration provided\n"); 2116 return -EINVAL; 2117 } 2118 2119 rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg; 2120 n_seg = rx_conf->rx_nseg; 2121 2122 if (rx_offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { 2123 ret = rte_eth_rx_queue_check_split(port_id, rx_seg, n_seg, 2124 &mbp_buf_size, 2125 &dev_info); 2126 if (ret != 0) 2127 return ret; 2128 } else { 2129 RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n"); 2130 return -EINVAL; 2131 } 2132 } else if (rx_conf != NULL && rx_conf->rx_nmempool > 0) { 2133 /* Extended multi-pool configuration check. */ 2134 if (rx_conf->rx_mempools == NULL) { 2135 RTE_ETHDEV_LOG(ERR, "Memory pools array is null\n"); 2136 return -EINVAL; 2137 } 2138 2139 ret = rte_eth_rx_queue_check_mempools(rx_conf->rx_mempools, 2140 rx_conf->rx_nmempool, 2141 &mbp_buf_size, 2142 &dev_info); 2143 if (ret != 0) 2144 return ret; 2145 } else { 2146 RTE_ETHDEV_LOG(ERR, "Missing Rx mempool configuration\n"); 2147 return -EINVAL; 2148 } 2149 2150 /* Use default specified by driver, if nb_rx_desc is zero */ 2151 if (nb_rx_desc == 0) { 2152 nb_rx_desc = dev_info.default_rxportconf.ring_size; 2153 /* If driver default is also zero, fall back on EAL default */ 2154 if (nb_rx_desc == 0) 2155 nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 2156 } 2157 2158 if (nb_rx_desc > dev_info.rx_desc_lim.nb_max || 2159 nb_rx_desc < dev_info.rx_desc_lim.nb_min || 2160 nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) { 2161 2162 RTE_ETHDEV_LOG(ERR, 2163 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2164 nb_rx_desc, dev_info.rx_desc_lim.nb_max, 2165 dev_info.rx_desc_lim.nb_min, 2166 dev_info.rx_desc_lim.nb_align); 2167 return -EINVAL; 2168 } 2169 2170 if (dev->data->dev_started && 2171 !(dev_info.dev_capa & 2172 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP)) 2173 return -EBUSY; 2174 2175 if (dev->data->dev_started && 2176 (dev->data->rx_queue_state[rx_queue_id] != 2177 RTE_ETH_QUEUE_STATE_STOPPED)) 2178 return -EBUSY; 2179 2180 eth_dev_rxq_release(dev, rx_queue_id); 2181 2182 if (rx_conf == NULL) 2183 rx_conf = &dev_info.default_rxconf; 2184 2185 local_conf = *rx_conf; 2186 2187 /* 2188 * If an offloading has already been enabled in 2189 * rte_eth_dev_configure(), it has been enabled on all queues, 2190 * so there is no need to enable it in this queue again. 2191 * The local_conf.offloads input to underlying PMD only carries 2192 * those offloadings which are only enabled on this queue and 2193 * not enabled on all queues. 2194 */ 2195 local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads; 2196 2197 /* 2198 * New added offloadings for this queue are those not enabled in 2199 * rte_eth_dev_configure() and they must be per-queue type. 2200 * A pure per-port offloading can't be enabled on a queue while 2201 * disabled on another queue. A pure per-port offloading can't 2202 * be enabled for any queue as new added one if it hasn't been 2203 * enabled in rte_eth_dev_configure(). 2204 */ 2205 if ((local_conf.offloads & dev_info.rx_queue_offload_capa) != 2206 local_conf.offloads) { 2207 RTE_ETHDEV_LOG(ERR, 2208 "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2209 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2210 port_id, rx_queue_id, local_conf.offloads, 2211 dev_info.rx_queue_offload_capa, 2212 __func__); 2213 return -EINVAL; 2214 } 2215 2216 if (local_conf.share_group > 0 && 2217 (dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) == 0) { 2218 RTE_ETHDEV_LOG(ERR, 2219 "Ethdev port_id=%d rx_queue_id=%d, enabled share_group=%hu while device doesn't support Rx queue share\n", 2220 port_id, rx_queue_id, local_conf.share_group); 2221 return -EINVAL; 2222 } 2223 2224 /* 2225 * If LRO is enabled, check that the maximum aggregated packet 2226 * size is supported by the configured device. 2227 */ 2228 /* Get the real Ethernet overhead length */ 2229 if (local_conf.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) { 2230 uint32_t overhead_len; 2231 uint32_t max_rx_pktlen; 2232 int ret; 2233 2234 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 2235 dev_info.max_mtu); 2236 max_rx_pktlen = dev->data->mtu + overhead_len; 2237 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0) 2238 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 2239 ret = eth_dev_check_lro_pkt_size(port_id, 2240 dev->data->dev_conf.rxmode.max_lro_pkt_size, 2241 max_rx_pktlen, 2242 dev_info.max_lro_pkt_size); 2243 if (ret != 0) 2244 return ret; 2245 } 2246 2247 ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc, 2248 socket_id, &local_conf, mp); 2249 if (!ret) { 2250 if (!dev->data->min_rx_buf_size || 2251 dev->data->min_rx_buf_size > mbp_buf_size) 2252 dev->data->min_rx_buf_size = mbp_buf_size; 2253 } 2254 2255 rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp, 2256 rx_conf, ret); 2257 return eth_err(port_id, ret); 2258 } 2259 2260 int 2261 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 2262 uint16_t nb_rx_desc, 2263 const struct rte_eth_hairpin_conf *conf) 2264 { 2265 int ret; 2266 struct rte_eth_dev *dev; 2267 struct rte_eth_hairpin_cap cap; 2268 int i; 2269 int count; 2270 2271 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2272 dev = &rte_eth_devices[port_id]; 2273 2274 if (rx_queue_id >= dev->data->nb_rx_queues) { 2275 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id); 2276 return -EINVAL; 2277 } 2278 2279 if (conf == NULL) { 2280 RTE_ETHDEV_LOG(ERR, 2281 "Cannot setup ethdev port %u Rx hairpin queue from NULL config\n", 2282 port_id); 2283 return -EINVAL; 2284 } 2285 2286 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2287 if (ret != 0) 2288 return ret; 2289 if (*dev->dev_ops->rx_hairpin_queue_setup == NULL) 2290 return -ENOTSUP; 2291 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2292 if (nb_rx_desc == 0) 2293 nb_rx_desc = cap.max_nb_desc; 2294 if (nb_rx_desc > cap.max_nb_desc) { 2295 RTE_ETHDEV_LOG(ERR, 2296 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu", 2297 nb_rx_desc, cap.max_nb_desc); 2298 return -EINVAL; 2299 } 2300 if (conf->peer_count > cap.max_rx_2_tx) { 2301 RTE_ETHDEV_LOG(ERR, 2302 "Invalid value for number of peers for Rx queue(=%u), should be: <= %hu", 2303 conf->peer_count, cap.max_rx_2_tx); 2304 return -EINVAL; 2305 } 2306 if (conf->use_locked_device_memory && !cap.rx_cap.locked_device_memory) { 2307 RTE_ETHDEV_LOG(ERR, 2308 "Attempt to use locked device memory for Rx queue, which is not supported"); 2309 return -EINVAL; 2310 } 2311 if (conf->use_rte_memory && !cap.rx_cap.rte_memory) { 2312 RTE_ETHDEV_LOG(ERR, 2313 "Attempt to use DPDK memory for Rx queue, which is not supported"); 2314 return -EINVAL; 2315 } 2316 if (conf->use_locked_device_memory && conf->use_rte_memory) { 2317 RTE_ETHDEV_LOG(ERR, 2318 "Attempt to use mutually exclusive memory settings for Rx queue"); 2319 return -EINVAL; 2320 } 2321 if (conf->force_memory && 2322 !conf->use_locked_device_memory && 2323 !conf->use_rte_memory) { 2324 RTE_ETHDEV_LOG(ERR, 2325 "Attempt to force Rx queue memory settings, but none is set"); 2326 return -EINVAL; 2327 } 2328 if (conf->peer_count == 0) { 2329 RTE_ETHDEV_LOG(ERR, 2330 "Invalid value for number of peers for Rx queue(=%u), should be: > 0", 2331 conf->peer_count); 2332 return -EINVAL; 2333 } 2334 for (i = 0, count = 0; i < dev->data->nb_rx_queues && 2335 cap.max_nb_queues != UINT16_MAX; i++) { 2336 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i)) 2337 count++; 2338 } 2339 if (count > cap.max_nb_queues) { 2340 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d", 2341 cap.max_nb_queues); 2342 return -EINVAL; 2343 } 2344 if (dev->data->dev_started) 2345 return -EBUSY; 2346 eth_dev_rxq_release(dev, rx_queue_id); 2347 ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id, 2348 nb_rx_desc, conf); 2349 if (ret == 0) 2350 dev->data->rx_queue_state[rx_queue_id] = 2351 RTE_ETH_QUEUE_STATE_HAIRPIN; 2352 ret = eth_err(port_id, ret); 2353 2354 rte_eth_trace_rx_hairpin_queue_setup(port_id, rx_queue_id, nb_rx_desc, 2355 conf, ret); 2356 2357 return ret; 2358 } 2359 2360 int 2361 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2362 uint16_t nb_tx_desc, unsigned int socket_id, 2363 const struct rte_eth_txconf *tx_conf) 2364 { 2365 struct rte_eth_dev *dev; 2366 struct rte_eth_dev_info dev_info; 2367 struct rte_eth_txconf local_conf; 2368 int ret; 2369 2370 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2371 dev = &rte_eth_devices[port_id]; 2372 2373 if (tx_queue_id >= dev->data->nb_tx_queues) { 2374 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id); 2375 return -EINVAL; 2376 } 2377 2378 if (*dev->dev_ops->tx_queue_setup == NULL) 2379 return -ENOTSUP; 2380 2381 ret = rte_eth_dev_info_get(port_id, &dev_info); 2382 if (ret != 0) 2383 return ret; 2384 2385 /* Use default specified by driver, if nb_tx_desc is zero */ 2386 if (nb_tx_desc == 0) { 2387 nb_tx_desc = dev_info.default_txportconf.ring_size; 2388 /* If driver default is zero, fall back on EAL default */ 2389 if (nb_tx_desc == 0) 2390 nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 2391 } 2392 if (nb_tx_desc > dev_info.tx_desc_lim.nb_max || 2393 nb_tx_desc < dev_info.tx_desc_lim.nb_min || 2394 nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) { 2395 RTE_ETHDEV_LOG(ERR, 2396 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2397 nb_tx_desc, dev_info.tx_desc_lim.nb_max, 2398 dev_info.tx_desc_lim.nb_min, 2399 dev_info.tx_desc_lim.nb_align); 2400 return -EINVAL; 2401 } 2402 2403 if (dev->data->dev_started && 2404 !(dev_info.dev_capa & 2405 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP)) 2406 return -EBUSY; 2407 2408 if (dev->data->dev_started && 2409 (dev->data->tx_queue_state[tx_queue_id] != 2410 RTE_ETH_QUEUE_STATE_STOPPED)) 2411 return -EBUSY; 2412 2413 eth_dev_txq_release(dev, tx_queue_id); 2414 2415 if (tx_conf == NULL) 2416 tx_conf = &dev_info.default_txconf; 2417 2418 local_conf = *tx_conf; 2419 2420 /* 2421 * If an offloading has already been enabled in 2422 * rte_eth_dev_configure(), it has been enabled on all queues, 2423 * so there is no need to enable it in this queue again. 2424 * The local_conf.offloads input to underlying PMD only carries 2425 * those offloadings which are only enabled on this queue and 2426 * not enabled on all queues. 2427 */ 2428 local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads; 2429 2430 /* 2431 * New added offloadings for this queue are those not enabled in 2432 * rte_eth_dev_configure() and they must be per-queue type. 2433 * A pure per-port offloading can't be enabled on a queue while 2434 * disabled on another queue. A pure per-port offloading can't 2435 * be enabled for any queue as new added one if it hasn't been 2436 * enabled in rte_eth_dev_configure(). 2437 */ 2438 if ((local_conf.offloads & dev_info.tx_queue_offload_capa) != 2439 local_conf.offloads) { 2440 RTE_ETHDEV_LOG(ERR, 2441 "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2442 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2443 port_id, tx_queue_id, local_conf.offloads, 2444 dev_info.tx_queue_offload_capa, 2445 __func__); 2446 return -EINVAL; 2447 } 2448 2449 rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf); 2450 return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev, 2451 tx_queue_id, nb_tx_desc, socket_id, &local_conf)); 2452 } 2453 2454 int 2455 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2456 uint16_t nb_tx_desc, 2457 const struct rte_eth_hairpin_conf *conf) 2458 { 2459 struct rte_eth_dev *dev; 2460 struct rte_eth_hairpin_cap cap; 2461 int i; 2462 int count; 2463 int ret; 2464 2465 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2466 dev = &rte_eth_devices[port_id]; 2467 2468 if (tx_queue_id >= dev->data->nb_tx_queues) { 2469 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id); 2470 return -EINVAL; 2471 } 2472 2473 if (conf == NULL) { 2474 RTE_ETHDEV_LOG(ERR, 2475 "Cannot setup ethdev port %u Tx hairpin queue from NULL config\n", 2476 port_id); 2477 return -EINVAL; 2478 } 2479 2480 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2481 if (ret != 0) 2482 return ret; 2483 if (*dev->dev_ops->tx_hairpin_queue_setup == NULL) 2484 return -ENOTSUP; 2485 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2486 if (nb_tx_desc == 0) 2487 nb_tx_desc = cap.max_nb_desc; 2488 if (nb_tx_desc > cap.max_nb_desc) { 2489 RTE_ETHDEV_LOG(ERR, 2490 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu", 2491 nb_tx_desc, cap.max_nb_desc); 2492 return -EINVAL; 2493 } 2494 if (conf->peer_count > cap.max_tx_2_rx) { 2495 RTE_ETHDEV_LOG(ERR, 2496 "Invalid value for number of peers for Tx queue(=%u), should be: <= %hu", 2497 conf->peer_count, cap.max_tx_2_rx); 2498 return -EINVAL; 2499 } 2500 if (conf->use_locked_device_memory && !cap.tx_cap.locked_device_memory) { 2501 RTE_ETHDEV_LOG(ERR, 2502 "Attempt to use locked device memory for Tx queue, which is not supported"); 2503 return -EINVAL; 2504 } 2505 if (conf->use_rte_memory && !cap.tx_cap.rte_memory) { 2506 RTE_ETHDEV_LOG(ERR, 2507 "Attempt to use DPDK memory for Tx queue, which is not supported"); 2508 return -EINVAL; 2509 } 2510 if (conf->use_locked_device_memory && conf->use_rte_memory) { 2511 RTE_ETHDEV_LOG(ERR, 2512 "Attempt to use mutually exclusive memory settings for Tx queue"); 2513 return -EINVAL; 2514 } 2515 if (conf->force_memory && 2516 !conf->use_locked_device_memory && 2517 !conf->use_rte_memory) { 2518 RTE_ETHDEV_LOG(ERR, 2519 "Attempt to force Tx queue memory settings, but none is set"); 2520 return -EINVAL; 2521 } 2522 if (conf->peer_count == 0) { 2523 RTE_ETHDEV_LOG(ERR, 2524 "Invalid value for number of peers for Tx queue(=%u), should be: > 0", 2525 conf->peer_count); 2526 return -EINVAL; 2527 } 2528 for (i = 0, count = 0; i < dev->data->nb_tx_queues && 2529 cap.max_nb_queues != UINT16_MAX; i++) { 2530 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i)) 2531 count++; 2532 } 2533 if (count > cap.max_nb_queues) { 2534 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d", 2535 cap.max_nb_queues); 2536 return -EINVAL; 2537 } 2538 if (dev->data->dev_started) 2539 return -EBUSY; 2540 eth_dev_txq_release(dev, tx_queue_id); 2541 ret = (*dev->dev_ops->tx_hairpin_queue_setup) 2542 (dev, tx_queue_id, nb_tx_desc, conf); 2543 if (ret == 0) 2544 dev->data->tx_queue_state[tx_queue_id] = 2545 RTE_ETH_QUEUE_STATE_HAIRPIN; 2546 ret = eth_err(port_id, ret); 2547 2548 rte_eth_trace_tx_hairpin_queue_setup(port_id, tx_queue_id, nb_tx_desc, 2549 conf, ret); 2550 2551 return ret; 2552 } 2553 2554 int 2555 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port) 2556 { 2557 struct rte_eth_dev *dev; 2558 int ret; 2559 2560 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2561 dev = &rte_eth_devices[tx_port]; 2562 2563 if (dev->data->dev_started == 0) { 2564 RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port); 2565 return -EBUSY; 2566 } 2567 2568 if (*dev->dev_ops->hairpin_bind == NULL) 2569 return -ENOTSUP; 2570 ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port); 2571 if (ret != 0) 2572 RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d" 2573 " to Rx %d (%d - all ports)\n", 2574 tx_port, rx_port, RTE_MAX_ETHPORTS); 2575 2576 rte_eth_trace_hairpin_bind(tx_port, rx_port, ret); 2577 2578 return ret; 2579 } 2580 2581 int 2582 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port) 2583 { 2584 struct rte_eth_dev *dev; 2585 int ret; 2586 2587 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2588 dev = &rte_eth_devices[tx_port]; 2589 2590 if (dev->data->dev_started == 0) { 2591 RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port); 2592 return -EBUSY; 2593 } 2594 2595 if (*dev->dev_ops->hairpin_unbind == NULL) 2596 return -ENOTSUP; 2597 ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port); 2598 if (ret != 0) 2599 RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d" 2600 " from Rx %d (%d - all ports)\n", 2601 tx_port, rx_port, RTE_MAX_ETHPORTS); 2602 2603 rte_eth_trace_hairpin_unbind(tx_port, rx_port, ret); 2604 2605 return ret; 2606 } 2607 2608 int 2609 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, 2610 size_t len, uint32_t direction) 2611 { 2612 struct rte_eth_dev *dev; 2613 int ret; 2614 2615 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2616 dev = &rte_eth_devices[port_id]; 2617 2618 if (peer_ports == NULL) { 2619 RTE_ETHDEV_LOG(ERR, 2620 "Cannot get ethdev port %u hairpin peer ports to NULL\n", 2621 port_id); 2622 return -EINVAL; 2623 } 2624 2625 if (len == 0) { 2626 RTE_ETHDEV_LOG(ERR, 2627 "Cannot get ethdev port %u hairpin peer ports to array with zero size\n", 2628 port_id); 2629 return -EINVAL; 2630 } 2631 2632 if (*dev->dev_ops->hairpin_get_peer_ports == NULL) 2633 return -ENOTSUP; 2634 2635 ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports, 2636 len, direction); 2637 if (ret < 0) 2638 RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n", 2639 port_id, direction ? "Rx" : "Tx"); 2640 2641 rte_eth_trace_hairpin_get_peer_ports(port_id, peer_ports, len, 2642 direction, ret); 2643 2644 return ret; 2645 } 2646 2647 void 2648 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, 2649 void *userdata __rte_unused) 2650 { 2651 rte_pktmbuf_free_bulk(pkts, unsent); 2652 2653 rte_eth_trace_tx_buffer_drop_callback((void **)pkts, unsent); 2654 } 2655 2656 void 2657 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, 2658 void *userdata) 2659 { 2660 uint64_t *count = userdata; 2661 2662 rte_pktmbuf_free_bulk(pkts, unsent); 2663 *count += unsent; 2664 2665 rte_eth_trace_tx_buffer_count_callback((void **)pkts, unsent, *count); 2666 } 2667 2668 int 2669 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, 2670 buffer_tx_error_fn cbfn, void *userdata) 2671 { 2672 if (buffer == NULL) { 2673 RTE_ETHDEV_LOG(ERR, 2674 "Cannot set Tx buffer error callback to NULL buffer\n"); 2675 return -EINVAL; 2676 } 2677 2678 buffer->error_callback = cbfn; 2679 buffer->error_userdata = userdata; 2680 2681 rte_eth_trace_tx_buffer_set_err_callback(buffer); 2682 2683 return 0; 2684 } 2685 2686 int 2687 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size) 2688 { 2689 int ret = 0; 2690 2691 if (buffer == NULL) { 2692 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL buffer\n"); 2693 return -EINVAL; 2694 } 2695 2696 buffer->size = size; 2697 if (buffer->error_callback == NULL) { 2698 ret = rte_eth_tx_buffer_set_err_callback( 2699 buffer, rte_eth_tx_buffer_drop_callback, NULL); 2700 } 2701 2702 rte_eth_trace_tx_buffer_init(buffer, size, ret); 2703 2704 return ret; 2705 } 2706 2707 int 2708 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt) 2709 { 2710 struct rte_eth_dev *dev; 2711 int ret; 2712 2713 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2714 dev = &rte_eth_devices[port_id]; 2715 2716 if (*dev->dev_ops->tx_done_cleanup == NULL) 2717 return -ENOTSUP; 2718 2719 /* Call driver to free pending mbufs. */ 2720 ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id], 2721 free_cnt); 2722 ret = eth_err(port_id, ret); 2723 2724 rte_eth_trace_tx_done_cleanup(port_id, queue_id, free_cnt, ret); 2725 2726 return ret; 2727 } 2728 2729 int 2730 rte_eth_promiscuous_enable(uint16_t port_id) 2731 { 2732 struct rte_eth_dev *dev; 2733 int diag = 0; 2734 2735 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2736 dev = &rte_eth_devices[port_id]; 2737 2738 if (dev->data->promiscuous == 1) 2739 return 0; 2740 2741 if (*dev->dev_ops->promiscuous_enable == NULL) 2742 return -ENOTSUP; 2743 2744 diag = (*dev->dev_ops->promiscuous_enable)(dev); 2745 dev->data->promiscuous = (diag == 0) ? 1 : 0; 2746 2747 diag = eth_err(port_id, diag); 2748 2749 rte_eth_trace_promiscuous_enable(port_id, dev->data->promiscuous, 2750 diag); 2751 2752 return diag; 2753 } 2754 2755 int 2756 rte_eth_promiscuous_disable(uint16_t port_id) 2757 { 2758 struct rte_eth_dev *dev; 2759 int diag = 0; 2760 2761 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2762 dev = &rte_eth_devices[port_id]; 2763 2764 if (dev->data->promiscuous == 0) 2765 return 0; 2766 2767 if (*dev->dev_ops->promiscuous_disable == NULL) 2768 return -ENOTSUP; 2769 2770 dev->data->promiscuous = 0; 2771 diag = (*dev->dev_ops->promiscuous_disable)(dev); 2772 if (diag != 0) 2773 dev->data->promiscuous = 1; 2774 2775 diag = eth_err(port_id, diag); 2776 2777 rte_eth_trace_promiscuous_disable(port_id, dev->data->promiscuous, 2778 diag); 2779 2780 return diag; 2781 } 2782 2783 int 2784 rte_eth_promiscuous_get(uint16_t port_id) 2785 { 2786 struct rte_eth_dev *dev; 2787 2788 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2789 dev = &rte_eth_devices[port_id]; 2790 2791 rte_eth_trace_promiscuous_get(port_id, dev->data->promiscuous); 2792 2793 return dev->data->promiscuous; 2794 } 2795 2796 int 2797 rte_eth_allmulticast_enable(uint16_t port_id) 2798 { 2799 struct rte_eth_dev *dev; 2800 int diag; 2801 2802 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2803 dev = &rte_eth_devices[port_id]; 2804 2805 if (dev->data->all_multicast == 1) 2806 return 0; 2807 2808 if (*dev->dev_ops->allmulticast_enable == NULL) 2809 return -ENOTSUP; 2810 diag = (*dev->dev_ops->allmulticast_enable)(dev); 2811 dev->data->all_multicast = (diag == 0) ? 1 : 0; 2812 2813 diag = eth_err(port_id, diag); 2814 2815 rte_eth_trace_allmulticast_enable(port_id, dev->data->all_multicast, 2816 diag); 2817 2818 return diag; 2819 } 2820 2821 int 2822 rte_eth_allmulticast_disable(uint16_t port_id) 2823 { 2824 struct rte_eth_dev *dev; 2825 int diag; 2826 2827 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2828 dev = &rte_eth_devices[port_id]; 2829 2830 if (dev->data->all_multicast == 0) 2831 return 0; 2832 2833 if (*dev->dev_ops->allmulticast_disable == NULL) 2834 return -ENOTSUP; 2835 dev->data->all_multicast = 0; 2836 diag = (*dev->dev_ops->allmulticast_disable)(dev); 2837 if (diag != 0) 2838 dev->data->all_multicast = 1; 2839 2840 diag = eth_err(port_id, diag); 2841 2842 rte_eth_trace_allmulticast_disable(port_id, dev->data->all_multicast, 2843 diag); 2844 2845 return diag; 2846 } 2847 2848 int 2849 rte_eth_allmulticast_get(uint16_t port_id) 2850 { 2851 struct rte_eth_dev *dev; 2852 2853 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2854 dev = &rte_eth_devices[port_id]; 2855 2856 rte_eth_trace_allmulticast_get(port_id, dev->data->all_multicast); 2857 2858 return dev->data->all_multicast; 2859 } 2860 2861 int 2862 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link) 2863 { 2864 struct rte_eth_dev *dev; 2865 2866 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2867 dev = &rte_eth_devices[port_id]; 2868 2869 if (eth_link == NULL) { 2870 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2871 port_id); 2872 return -EINVAL; 2873 } 2874 2875 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2876 rte_eth_linkstatus_get(dev, eth_link); 2877 else { 2878 if (*dev->dev_ops->link_update == NULL) 2879 return -ENOTSUP; 2880 (*dev->dev_ops->link_update)(dev, 1); 2881 *eth_link = dev->data->dev_link; 2882 } 2883 2884 rte_eth_trace_link_get(port_id, eth_link); 2885 2886 return 0; 2887 } 2888 2889 int 2890 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link) 2891 { 2892 struct rte_eth_dev *dev; 2893 2894 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2895 dev = &rte_eth_devices[port_id]; 2896 2897 if (eth_link == NULL) { 2898 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2899 port_id); 2900 return -EINVAL; 2901 } 2902 2903 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2904 rte_eth_linkstatus_get(dev, eth_link); 2905 else { 2906 if (*dev->dev_ops->link_update == NULL) 2907 return -ENOTSUP; 2908 (*dev->dev_ops->link_update)(dev, 0); 2909 *eth_link = dev->data->dev_link; 2910 } 2911 2912 rte_eth_trace_link_get_nowait(port_id, eth_link); 2913 2914 return 0; 2915 } 2916 2917 const char * 2918 rte_eth_link_speed_to_str(uint32_t link_speed) 2919 { 2920 const char *ret; 2921 2922 switch (link_speed) { 2923 case RTE_ETH_SPEED_NUM_NONE: 2924 ret = "None"; 2925 break; 2926 case RTE_ETH_SPEED_NUM_10M: 2927 ret = "10 Mbps"; 2928 break; 2929 case RTE_ETH_SPEED_NUM_100M: 2930 ret = "100 Mbps"; 2931 break; 2932 case RTE_ETH_SPEED_NUM_1G: 2933 ret = "1 Gbps"; 2934 break; 2935 case RTE_ETH_SPEED_NUM_2_5G: 2936 ret = "2.5 Gbps"; 2937 break; 2938 case RTE_ETH_SPEED_NUM_5G: 2939 ret = "5 Gbps"; 2940 break; 2941 case RTE_ETH_SPEED_NUM_10G: 2942 ret = "10 Gbps"; 2943 break; 2944 case RTE_ETH_SPEED_NUM_20G: 2945 ret = "20 Gbps"; 2946 break; 2947 case RTE_ETH_SPEED_NUM_25G: 2948 ret = "25 Gbps"; 2949 break; 2950 case RTE_ETH_SPEED_NUM_40G: 2951 ret = "40 Gbps"; 2952 break; 2953 case RTE_ETH_SPEED_NUM_50G: 2954 ret = "50 Gbps"; 2955 break; 2956 case RTE_ETH_SPEED_NUM_56G: 2957 ret = "56 Gbps"; 2958 break; 2959 case RTE_ETH_SPEED_NUM_100G: 2960 ret = "100 Gbps"; 2961 break; 2962 case RTE_ETH_SPEED_NUM_200G: 2963 ret = "200 Gbps"; 2964 break; 2965 case RTE_ETH_SPEED_NUM_400G: 2966 ret = "400 Gbps"; 2967 break; 2968 case RTE_ETH_SPEED_NUM_UNKNOWN: 2969 ret = "Unknown"; 2970 break; 2971 default: 2972 ret = "Invalid"; 2973 } 2974 2975 rte_eth_trace_link_speed_to_str(link_speed, ret); 2976 2977 return ret; 2978 } 2979 2980 int 2981 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link) 2982 { 2983 int ret; 2984 2985 if (str == NULL) { 2986 RTE_ETHDEV_LOG(ERR, "Cannot convert link to NULL string\n"); 2987 return -EINVAL; 2988 } 2989 2990 if (len == 0) { 2991 RTE_ETHDEV_LOG(ERR, 2992 "Cannot convert link to string with zero size\n"); 2993 return -EINVAL; 2994 } 2995 2996 if (eth_link == NULL) { 2997 RTE_ETHDEV_LOG(ERR, "Cannot convert to string from NULL link\n"); 2998 return -EINVAL; 2999 } 3000 3001 if (eth_link->link_status == RTE_ETH_LINK_DOWN) 3002 ret = snprintf(str, len, "Link down"); 3003 else 3004 ret = snprintf(str, len, "Link up at %s %s %s", 3005 rte_eth_link_speed_to_str(eth_link->link_speed), 3006 (eth_link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 3007 "FDX" : "HDX", 3008 (eth_link->link_autoneg == RTE_ETH_LINK_AUTONEG) ? 3009 "Autoneg" : "Fixed"); 3010 3011 rte_eth_trace_link_to_str(len, eth_link, str, ret); 3012 3013 return ret; 3014 } 3015 3016 int 3017 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats) 3018 { 3019 struct rte_eth_dev *dev; 3020 int ret; 3021 3022 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3023 dev = &rte_eth_devices[port_id]; 3024 3025 if (stats == NULL) { 3026 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u stats to NULL\n", 3027 port_id); 3028 return -EINVAL; 3029 } 3030 3031 memset(stats, 0, sizeof(*stats)); 3032 3033 if (*dev->dev_ops->stats_get == NULL) 3034 return -ENOTSUP; 3035 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed; 3036 ret = eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats)); 3037 3038 rte_eth_trace_stats_get(port_id, stats, ret); 3039 3040 return ret; 3041 } 3042 3043 int 3044 rte_eth_stats_reset(uint16_t port_id) 3045 { 3046 struct rte_eth_dev *dev; 3047 int ret; 3048 3049 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3050 dev = &rte_eth_devices[port_id]; 3051 3052 if (*dev->dev_ops->stats_reset == NULL) 3053 return -ENOTSUP; 3054 ret = (*dev->dev_ops->stats_reset)(dev); 3055 if (ret != 0) 3056 return eth_err(port_id, ret); 3057 3058 dev->data->rx_mbuf_alloc_failed = 0; 3059 3060 rte_eth_trace_stats_reset(port_id); 3061 3062 return 0; 3063 } 3064 3065 static inline int 3066 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev) 3067 { 3068 uint16_t nb_rxqs, nb_txqs; 3069 int count; 3070 3071 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3072 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3073 3074 count = RTE_NB_STATS; 3075 if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) { 3076 count += nb_rxqs * RTE_NB_RXQ_STATS; 3077 count += nb_txqs * RTE_NB_TXQ_STATS; 3078 } 3079 3080 return count; 3081 } 3082 3083 static int 3084 eth_dev_get_xstats_count(uint16_t port_id) 3085 { 3086 struct rte_eth_dev *dev; 3087 int count; 3088 3089 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3090 dev = &rte_eth_devices[port_id]; 3091 if (dev->dev_ops->xstats_get_names != NULL) { 3092 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0); 3093 if (count < 0) 3094 return eth_err(port_id, count); 3095 } else 3096 count = 0; 3097 3098 3099 count += eth_dev_get_xstats_basic_count(dev); 3100 3101 return count; 3102 } 3103 3104 int 3105 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, 3106 uint64_t *id) 3107 { 3108 int cnt_xstats, idx_xstat; 3109 3110 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3111 3112 if (xstat_name == NULL) { 3113 RTE_ETHDEV_LOG(ERR, 3114 "Cannot get ethdev port %u xstats ID from NULL xstat name\n", 3115 port_id); 3116 return -ENOMEM; 3117 } 3118 3119 if (id == NULL) { 3120 RTE_ETHDEV_LOG(ERR, 3121 "Cannot get ethdev port %u xstats ID to NULL\n", 3122 port_id); 3123 return -ENOMEM; 3124 } 3125 3126 /* Get count */ 3127 cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL); 3128 if (cnt_xstats < 0) { 3129 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n"); 3130 return -ENODEV; 3131 } 3132 3133 /* Get id-name lookup table */ 3134 struct rte_eth_xstat_name xstats_names[cnt_xstats]; 3135 3136 if (cnt_xstats != rte_eth_xstats_get_names_by_id( 3137 port_id, xstats_names, cnt_xstats, NULL)) { 3138 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n"); 3139 return -1; 3140 } 3141 3142 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 3143 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) { 3144 *id = idx_xstat; 3145 3146 rte_eth_trace_xstats_get_id_by_name(port_id, 3147 xstat_name, *id); 3148 3149 return 0; 3150 }; 3151 } 3152 3153 return -EINVAL; 3154 } 3155 3156 /* retrieve basic stats names */ 3157 static int 3158 eth_basic_stats_get_names(struct rte_eth_dev *dev, 3159 struct rte_eth_xstat_name *xstats_names) 3160 { 3161 int cnt_used_entries = 0; 3162 uint32_t idx, id_queue; 3163 uint16_t num_q; 3164 3165 for (idx = 0; idx < RTE_NB_STATS; idx++) { 3166 strlcpy(xstats_names[cnt_used_entries].name, 3167 eth_dev_stats_strings[idx].name, 3168 sizeof(xstats_names[0].name)); 3169 cnt_used_entries++; 3170 } 3171 3172 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 3173 return cnt_used_entries; 3174 3175 num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3176 for (id_queue = 0; id_queue < num_q; id_queue++) { 3177 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) { 3178 snprintf(xstats_names[cnt_used_entries].name, 3179 sizeof(xstats_names[0].name), 3180 "rx_q%u_%s", 3181 id_queue, eth_dev_rxq_stats_strings[idx].name); 3182 cnt_used_entries++; 3183 } 3184 3185 } 3186 num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3187 for (id_queue = 0; id_queue < num_q; id_queue++) { 3188 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) { 3189 snprintf(xstats_names[cnt_used_entries].name, 3190 sizeof(xstats_names[0].name), 3191 "tx_q%u_%s", 3192 id_queue, eth_dev_txq_stats_strings[idx].name); 3193 cnt_used_entries++; 3194 } 3195 } 3196 return cnt_used_entries; 3197 } 3198 3199 /* retrieve ethdev extended statistics names */ 3200 int 3201 rte_eth_xstats_get_names_by_id(uint16_t port_id, 3202 struct rte_eth_xstat_name *xstats_names, unsigned int size, 3203 uint64_t *ids) 3204 { 3205 struct rte_eth_xstat_name *xstats_names_copy; 3206 unsigned int no_basic_stat_requested = 1; 3207 unsigned int no_ext_stat_requested = 1; 3208 unsigned int expected_entries; 3209 unsigned int basic_count; 3210 struct rte_eth_dev *dev; 3211 unsigned int i; 3212 int ret; 3213 3214 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3215 dev = &rte_eth_devices[port_id]; 3216 3217 basic_count = eth_dev_get_xstats_basic_count(dev); 3218 ret = eth_dev_get_xstats_count(port_id); 3219 if (ret < 0) 3220 return ret; 3221 expected_entries = (unsigned int)ret; 3222 3223 /* Return max number of stats if no ids given */ 3224 if (!ids) { 3225 if (!xstats_names) 3226 return expected_entries; 3227 else if (xstats_names && size < expected_entries) 3228 return expected_entries; 3229 } 3230 3231 if (ids && !xstats_names) 3232 return -EINVAL; 3233 3234 if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) { 3235 uint64_t ids_copy[size]; 3236 3237 for (i = 0; i < size; i++) { 3238 if (ids[i] < basic_count) { 3239 no_basic_stat_requested = 0; 3240 break; 3241 } 3242 3243 /* 3244 * Convert ids to xstats ids that PMD knows. 3245 * ids known by user are basic + extended stats. 3246 */ 3247 ids_copy[i] = ids[i] - basic_count; 3248 } 3249 3250 if (no_basic_stat_requested) 3251 return (*dev->dev_ops->xstats_get_names_by_id)(dev, 3252 ids_copy, xstats_names, size); 3253 } 3254 3255 /* Retrieve all stats */ 3256 if (!ids) { 3257 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names, 3258 expected_entries); 3259 if (num_stats < 0 || num_stats > (int)expected_entries) 3260 return num_stats; 3261 else 3262 return expected_entries; 3263 } 3264 3265 xstats_names_copy = calloc(expected_entries, 3266 sizeof(struct rte_eth_xstat_name)); 3267 3268 if (!xstats_names_copy) { 3269 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n"); 3270 return -ENOMEM; 3271 } 3272 3273 if (ids) { 3274 for (i = 0; i < size; i++) { 3275 if (ids[i] >= basic_count) { 3276 no_ext_stat_requested = 0; 3277 break; 3278 } 3279 } 3280 } 3281 3282 /* Fill xstats_names_copy structure */ 3283 if (ids && no_ext_stat_requested) { 3284 eth_basic_stats_get_names(dev, xstats_names_copy); 3285 } else { 3286 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy, 3287 expected_entries); 3288 if (ret < 0) { 3289 free(xstats_names_copy); 3290 return ret; 3291 } 3292 } 3293 3294 /* Filter stats */ 3295 for (i = 0; i < size; i++) { 3296 if (ids[i] >= expected_entries) { 3297 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 3298 free(xstats_names_copy); 3299 return -1; 3300 } 3301 xstats_names[i] = xstats_names_copy[ids[i]]; 3302 3303 rte_eth_trace_xstats_get_names_by_id(port_id, &xstats_names[i], 3304 ids[i]); 3305 } 3306 3307 free(xstats_names_copy); 3308 return size; 3309 } 3310 3311 int 3312 rte_eth_xstats_get_names(uint16_t port_id, 3313 struct rte_eth_xstat_name *xstats_names, 3314 unsigned int size) 3315 { 3316 struct rte_eth_dev *dev; 3317 int cnt_used_entries; 3318 int cnt_expected_entries; 3319 int cnt_driver_entries; 3320 int i; 3321 3322 cnt_expected_entries = eth_dev_get_xstats_count(port_id); 3323 if (xstats_names == NULL || cnt_expected_entries < 0 || 3324 (int)size < cnt_expected_entries) 3325 return cnt_expected_entries; 3326 3327 /* port_id checked in eth_dev_get_xstats_count() */ 3328 dev = &rte_eth_devices[port_id]; 3329 3330 cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names); 3331 3332 if (dev->dev_ops->xstats_get_names != NULL) { 3333 /* If there are any driver-specific xstats, append them 3334 * to end of list. 3335 */ 3336 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)( 3337 dev, 3338 xstats_names + cnt_used_entries, 3339 size - cnt_used_entries); 3340 if (cnt_driver_entries < 0) 3341 return eth_err(port_id, cnt_driver_entries); 3342 cnt_used_entries += cnt_driver_entries; 3343 } 3344 3345 for (i = 0; i < cnt_used_entries; i++) 3346 rte_eth_trace_xstats_get_names(port_id, i, &xstats_names[i], 3347 size, cnt_used_entries); 3348 3349 return cnt_used_entries; 3350 } 3351 3352 3353 static int 3354 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats) 3355 { 3356 struct rte_eth_dev *dev; 3357 struct rte_eth_stats eth_stats; 3358 unsigned int count = 0, i, q; 3359 uint64_t val, *stats_ptr; 3360 uint16_t nb_rxqs, nb_txqs; 3361 int ret; 3362 3363 ret = rte_eth_stats_get(port_id, ð_stats); 3364 if (ret < 0) 3365 return ret; 3366 3367 dev = &rte_eth_devices[port_id]; 3368 3369 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3370 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3371 3372 /* global stats */ 3373 for (i = 0; i < RTE_NB_STATS; i++) { 3374 stats_ptr = RTE_PTR_ADD(ð_stats, 3375 eth_dev_stats_strings[i].offset); 3376 val = *stats_ptr; 3377 xstats[count++].value = val; 3378 } 3379 3380 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 3381 return count; 3382 3383 /* per-rxq stats */ 3384 for (q = 0; q < nb_rxqs; q++) { 3385 for (i = 0; i < RTE_NB_RXQ_STATS; i++) { 3386 stats_ptr = RTE_PTR_ADD(ð_stats, 3387 eth_dev_rxq_stats_strings[i].offset + 3388 q * sizeof(uint64_t)); 3389 val = *stats_ptr; 3390 xstats[count++].value = val; 3391 } 3392 } 3393 3394 /* per-txq stats */ 3395 for (q = 0; q < nb_txqs; q++) { 3396 for (i = 0; i < RTE_NB_TXQ_STATS; i++) { 3397 stats_ptr = RTE_PTR_ADD(ð_stats, 3398 eth_dev_txq_stats_strings[i].offset + 3399 q * sizeof(uint64_t)); 3400 val = *stats_ptr; 3401 xstats[count++].value = val; 3402 } 3403 } 3404 return count; 3405 } 3406 3407 /* retrieve ethdev extended statistics */ 3408 int 3409 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, 3410 uint64_t *values, unsigned int size) 3411 { 3412 unsigned int no_basic_stat_requested = 1; 3413 unsigned int no_ext_stat_requested = 1; 3414 unsigned int num_xstats_filled; 3415 unsigned int basic_count; 3416 uint16_t expected_entries; 3417 struct rte_eth_dev *dev; 3418 unsigned int i; 3419 int ret; 3420 3421 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3422 dev = &rte_eth_devices[port_id]; 3423 3424 ret = eth_dev_get_xstats_count(port_id); 3425 if (ret < 0) 3426 return ret; 3427 expected_entries = (uint16_t)ret; 3428 struct rte_eth_xstat xstats[expected_entries]; 3429 basic_count = eth_dev_get_xstats_basic_count(dev); 3430 3431 /* Return max number of stats if no ids given */ 3432 if (!ids) { 3433 if (!values) 3434 return expected_entries; 3435 else if (values && size < expected_entries) 3436 return expected_entries; 3437 } 3438 3439 if (ids && !values) 3440 return -EINVAL; 3441 3442 if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) { 3443 unsigned int basic_count = eth_dev_get_xstats_basic_count(dev); 3444 uint64_t ids_copy[size]; 3445 3446 for (i = 0; i < size; i++) { 3447 if (ids[i] < basic_count) { 3448 no_basic_stat_requested = 0; 3449 break; 3450 } 3451 3452 /* 3453 * Convert ids to xstats ids that PMD knows. 3454 * ids known by user are basic + extended stats. 3455 */ 3456 ids_copy[i] = ids[i] - basic_count; 3457 } 3458 3459 if (no_basic_stat_requested) 3460 return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy, 3461 values, size); 3462 } 3463 3464 if (ids) { 3465 for (i = 0; i < size; i++) { 3466 if (ids[i] >= basic_count) { 3467 no_ext_stat_requested = 0; 3468 break; 3469 } 3470 } 3471 } 3472 3473 /* Fill the xstats structure */ 3474 if (ids && no_ext_stat_requested) 3475 ret = eth_basic_stats_get(port_id, xstats); 3476 else 3477 ret = rte_eth_xstats_get(port_id, xstats, expected_entries); 3478 3479 if (ret < 0) 3480 return ret; 3481 num_xstats_filled = (unsigned int)ret; 3482 3483 /* Return all stats */ 3484 if (!ids) { 3485 for (i = 0; i < num_xstats_filled; i++) 3486 values[i] = xstats[i].value; 3487 return expected_entries; 3488 } 3489 3490 /* Filter stats */ 3491 for (i = 0; i < size; i++) { 3492 if (ids[i] >= expected_entries) { 3493 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 3494 return -1; 3495 } 3496 values[i] = xstats[ids[i]].value; 3497 } 3498 3499 rte_eth_trace_xstats_get_by_id(port_id, ids, values, size); 3500 3501 return size; 3502 } 3503 3504 int 3505 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, 3506 unsigned int n) 3507 { 3508 struct rte_eth_dev *dev; 3509 unsigned int count, i; 3510 signed int xcount = 0; 3511 int ret; 3512 3513 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3514 if (xstats == NULL && n > 0) 3515 return -EINVAL; 3516 dev = &rte_eth_devices[port_id]; 3517 3518 count = eth_dev_get_xstats_basic_count(dev); 3519 3520 /* implemented by the driver */ 3521 if (dev->dev_ops->xstats_get != NULL) { 3522 /* Retrieve the xstats from the driver at the end of the 3523 * xstats struct. 3524 */ 3525 xcount = (*dev->dev_ops->xstats_get)(dev, 3526 (n > count) ? xstats + count : NULL, 3527 (n > count) ? n - count : 0); 3528 3529 if (xcount < 0) 3530 return eth_err(port_id, xcount); 3531 } 3532 3533 if (n < count + xcount || xstats == NULL) 3534 return count + xcount; 3535 3536 /* now fill the xstats structure */ 3537 ret = eth_basic_stats_get(port_id, xstats); 3538 if (ret < 0) 3539 return ret; 3540 count = ret; 3541 3542 for (i = 0; i < count; i++) 3543 xstats[i].id = i; 3544 /* add an offset to driver-specific stats */ 3545 for ( ; i < count + xcount; i++) 3546 xstats[i].id += count; 3547 3548 for (i = 0; i < n; i++) 3549 rte_eth_trace_xstats_get(port_id, xstats[i]); 3550 3551 return count + xcount; 3552 } 3553 3554 /* reset ethdev extended statistics */ 3555 int 3556 rte_eth_xstats_reset(uint16_t port_id) 3557 { 3558 struct rte_eth_dev *dev; 3559 3560 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3561 dev = &rte_eth_devices[port_id]; 3562 3563 /* implemented by the driver */ 3564 if (dev->dev_ops->xstats_reset != NULL) { 3565 int ret = eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev)); 3566 3567 rte_eth_trace_xstats_reset(port_id, ret); 3568 3569 return ret; 3570 } 3571 3572 /* fallback to default */ 3573 return rte_eth_stats_reset(port_id); 3574 } 3575 3576 static int 3577 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, 3578 uint8_t stat_idx, uint8_t is_rx) 3579 { 3580 struct rte_eth_dev *dev; 3581 3582 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3583 dev = &rte_eth_devices[port_id]; 3584 3585 if (is_rx && (queue_id >= dev->data->nb_rx_queues)) 3586 return -EINVAL; 3587 3588 if (!is_rx && (queue_id >= dev->data->nb_tx_queues)) 3589 return -EINVAL; 3590 3591 if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) 3592 return -EINVAL; 3593 3594 if (*dev->dev_ops->queue_stats_mapping_set == NULL) 3595 return -ENOTSUP; 3596 return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx); 3597 } 3598 3599 int 3600 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, 3601 uint8_t stat_idx) 3602 { 3603 int ret; 3604 3605 ret = eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3606 tx_queue_id, 3607 stat_idx, STAT_QMAP_TX)); 3608 3609 rte_ethdev_trace_set_tx_queue_stats_mapping(port_id, tx_queue_id, 3610 stat_idx, ret); 3611 3612 return ret; 3613 } 3614 3615 int 3616 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, 3617 uint8_t stat_idx) 3618 { 3619 int ret; 3620 3621 ret = eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3622 rx_queue_id, 3623 stat_idx, STAT_QMAP_RX)); 3624 3625 rte_ethdev_trace_set_rx_queue_stats_mapping(port_id, rx_queue_id, 3626 stat_idx, ret); 3627 3628 return ret; 3629 } 3630 3631 int 3632 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size) 3633 { 3634 struct rte_eth_dev *dev; 3635 int ret; 3636 3637 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3638 dev = &rte_eth_devices[port_id]; 3639 3640 if (fw_version == NULL && fw_size > 0) { 3641 RTE_ETHDEV_LOG(ERR, 3642 "Cannot get ethdev port %u FW version to NULL when string size is non zero\n", 3643 port_id); 3644 return -EINVAL; 3645 } 3646 3647 if (*dev->dev_ops->fw_version_get == NULL) 3648 return -ENOTSUP; 3649 ret = eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev, 3650 fw_version, fw_size)); 3651 3652 rte_ethdev_trace_fw_version_get(port_id, fw_version, fw_size, ret); 3653 3654 return ret; 3655 } 3656 3657 int 3658 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) 3659 { 3660 struct rte_eth_dev *dev; 3661 const struct rte_eth_desc_lim lim = { 3662 .nb_max = UINT16_MAX, 3663 .nb_min = 0, 3664 .nb_align = 1, 3665 .nb_seg_max = UINT16_MAX, 3666 .nb_mtu_seg_max = UINT16_MAX, 3667 }; 3668 int diag; 3669 3670 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3671 dev = &rte_eth_devices[port_id]; 3672 3673 if (dev_info == NULL) { 3674 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u info to NULL\n", 3675 port_id); 3676 return -EINVAL; 3677 } 3678 3679 /* 3680 * Init dev_info before port_id check since caller does not have 3681 * return status and does not know if get is successful or not. 3682 */ 3683 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3684 dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 3685 3686 dev_info->rx_desc_lim = lim; 3687 dev_info->tx_desc_lim = lim; 3688 dev_info->device = dev->device; 3689 dev_info->min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN - 3690 RTE_ETHER_CRC_LEN; 3691 dev_info->max_mtu = UINT16_MAX; 3692 3693 if (*dev->dev_ops->dev_infos_get == NULL) 3694 return -ENOTSUP; 3695 diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info); 3696 if (diag != 0) { 3697 /* Cleanup already filled in device information */ 3698 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3699 return eth_err(port_id, diag); 3700 } 3701 3702 /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */ 3703 dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues, 3704 RTE_MAX_QUEUES_PER_PORT); 3705 dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues, 3706 RTE_MAX_QUEUES_PER_PORT); 3707 3708 dev_info->driver_name = dev->device->driver->name; 3709 dev_info->nb_rx_queues = dev->data->nb_rx_queues; 3710 dev_info->nb_tx_queues = dev->data->nb_tx_queues; 3711 3712 dev_info->dev_flags = &dev->data->dev_flags; 3713 3714 rte_ethdev_trace_info_get(port_id, dev_info); 3715 3716 return 0; 3717 } 3718 3719 int 3720 rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf) 3721 { 3722 struct rte_eth_dev *dev; 3723 3724 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3725 dev = &rte_eth_devices[port_id]; 3726 3727 if (dev_conf == NULL) { 3728 RTE_ETHDEV_LOG(ERR, 3729 "Cannot get ethdev port %u configuration to NULL\n", 3730 port_id); 3731 return -EINVAL; 3732 } 3733 3734 memcpy(dev_conf, &dev->data->dev_conf, sizeof(struct rte_eth_conf)); 3735 3736 rte_ethdev_trace_conf_get(port_id, dev_conf); 3737 3738 return 0; 3739 } 3740 3741 int 3742 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, 3743 uint32_t *ptypes, int num) 3744 { 3745 int i, j; 3746 struct rte_eth_dev *dev; 3747 const uint32_t *all_ptypes; 3748 3749 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3750 dev = &rte_eth_devices[port_id]; 3751 3752 if (ptypes == NULL && num > 0) { 3753 RTE_ETHDEV_LOG(ERR, 3754 "Cannot get ethdev port %u supported packet types to NULL when array size is non zero\n", 3755 port_id); 3756 return -EINVAL; 3757 } 3758 3759 if (*dev->dev_ops->dev_supported_ptypes_get == NULL) 3760 return 0; 3761 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3762 3763 if (!all_ptypes) 3764 return 0; 3765 3766 for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i) 3767 if (all_ptypes[i] & ptype_mask) { 3768 if (j < num) { 3769 ptypes[j] = all_ptypes[i]; 3770 3771 rte_ethdev_trace_get_supported_ptypes(port_id, 3772 j, num, ptypes[j]); 3773 } 3774 j++; 3775 } 3776 3777 return j; 3778 } 3779 3780 int 3781 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, 3782 uint32_t *set_ptypes, unsigned int num) 3783 { 3784 const uint32_t valid_ptype_masks[] = { 3785 RTE_PTYPE_L2_MASK, 3786 RTE_PTYPE_L3_MASK, 3787 RTE_PTYPE_L4_MASK, 3788 RTE_PTYPE_TUNNEL_MASK, 3789 RTE_PTYPE_INNER_L2_MASK, 3790 RTE_PTYPE_INNER_L3_MASK, 3791 RTE_PTYPE_INNER_L4_MASK, 3792 }; 3793 const uint32_t *all_ptypes; 3794 struct rte_eth_dev *dev; 3795 uint32_t unused_mask; 3796 unsigned int i, j; 3797 int ret; 3798 3799 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3800 dev = &rte_eth_devices[port_id]; 3801 3802 if (num > 0 && set_ptypes == NULL) { 3803 RTE_ETHDEV_LOG(ERR, 3804 "Cannot get ethdev port %u set packet types to NULL when array size is non zero\n", 3805 port_id); 3806 return -EINVAL; 3807 } 3808 3809 if (*dev->dev_ops->dev_supported_ptypes_get == NULL || 3810 *dev->dev_ops->dev_ptypes_set == NULL) { 3811 ret = 0; 3812 goto ptype_unknown; 3813 } 3814 3815 if (ptype_mask == 0) { 3816 ret = (*dev->dev_ops->dev_ptypes_set)(dev, 3817 ptype_mask); 3818 goto ptype_unknown; 3819 } 3820 3821 unused_mask = ptype_mask; 3822 for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) { 3823 uint32_t mask = ptype_mask & valid_ptype_masks[i]; 3824 if (mask && mask != valid_ptype_masks[i]) { 3825 ret = -EINVAL; 3826 goto ptype_unknown; 3827 } 3828 unused_mask &= ~valid_ptype_masks[i]; 3829 } 3830 3831 if (unused_mask) { 3832 ret = -EINVAL; 3833 goto ptype_unknown; 3834 } 3835 3836 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3837 if (all_ptypes == NULL) { 3838 ret = 0; 3839 goto ptype_unknown; 3840 } 3841 3842 /* 3843 * Accommodate as many set_ptypes as possible. If the supplied 3844 * set_ptypes array is insufficient fill it partially. 3845 */ 3846 for (i = 0, j = 0; set_ptypes != NULL && 3847 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) { 3848 if (ptype_mask & all_ptypes[i]) { 3849 if (j < num - 1) { 3850 set_ptypes[j] = all_ptypes[i]; 3851 3852 rte_ethdev_trace_set_ptypes(port_id, j, num, 3853 set_ptypes[j]); 3854 3855 j++; 3856 continue; 3857 } 3858 break; 3859 } 3860 } 3861 3862 if (set_ptypes != NULL && j < num) 3863 set_ptypes[j] = RTE_PTYPE_UNKNOWN; 3864 3865 return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask); 3866 3867 ptype_unknown: 3868 if (num > 0) 3869 set_ptypes[0] = RTE_PTYPE_UNKNOWN; 3870 3871 return ret; 3872 } 3873 3874 int 3875 rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma, 3876 unsigned int num) 3877 { 3878 int32_t ret; 3879 struct rte_eth_dev *dev; 3880 struct rte_eth_dev_info dev_info; 3881 3882 if (ma == NULL) { 3883 RTE_ETHDEV_LOG(ERR, "%s: invalid parameters\n", __func__); 3884 return -EINVAL; 3885 } 3886 3887 /* will check for us that port_id is a valid one */ 3888 ret = rte_eth_dev_info_get(port_id, &dev_info); 3889 if (ret != 0) 3890 return ret; 3891 3892 dev = &rte_eth_devices[port_id]; 3893 num = RTE_MIN(dev_info.max_mac_addrs, num); 3894 memcpy(ma, dev->data->mac_addrs, num * sizeof(ma[0])); 3895 3896 rte_eth_trace_macaddrs_get(port_id, num); 3897 3898 return num; 3899 } 3900 3901 int 3902 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr) 3903 { 3904 struct rte_eth_dev *dev; 3905 3906 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3907 dev = &rte_eth_devices[port_id]; 3908 3909 if (mac_addr == NULL) { 3910 RTE_ETHDEV_LOG(ERR, 3911 "Cannot get ethdev port %u MAC address to NULL\n", 3912 port_id); 3913 return -EINVAL; 3914 } 3915 3916 rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr); 3917 3918 rte_eth_trace_macaddr_get(port_id, mac_addr); 3919 3920 return 0; 3921 } 3922 3923 int 3924 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu) 3925 { 3926 struct rte_eth_dev *dev; 3927 3928 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3929 dev = &rte_eth_devices[port_id]; 3930 3931 if (mtu == NULL) { 3932 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u MTU to NULL\n", 3933 port_id); 3934 return -EINVAL; 3935 } 3936 3937 *mtu = dev->data->mtu; 3938 3939 rte_ethdev_trace_get_mtu(port_id, *mtu); 3940 3941 return 0; 3942 } 3943 3944 int 3945 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu) 3946 { 3947 int ret; 3948 struct rte_eth_dev_info dev_info; 3949 struct rte_eth_dev *dev; 3950 3951 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3952 dev = &rte_eth_devices[port_id]; 3953 if (*dev->dev_ops->mtu_set == NULL) 3954 return -ENOTSUP; 3955 3956 /* 3957 * Check if the device supports dev_infos_get, if it does not 3958 * skip min_mtu/max_mtu validation here as this requires values 3959 * that are populated within the call to rte_eth_dev_info_get() 3960 * which relies on dev->dev_ops->dev_infos_get. 3961 */ 3962 if (*dev->dev_ops->dev_infos_get != NULL) { 3963 ret = rte_eth_dev_info_get(port_id, &dev_info); 3964 if (ret != 0) 3965 return ret; 3966 3967 ret = eth_dev_validate_mtu(port_id, &dev_info, mtu); 3968 if (ret != 0) 3969 return ret; 3970 } 3971 3972 if (dev->data->dev_configured == 0) { 3973 RTE_ETHDEV_LOG(ERR, 3974 "Port %u must be configured before MTU set\n", 3975 port_id); 3976 return -EINVAL; 3977 } 3978 3979 ret = (*dev->dev_ops->mtu_set)(dev, mtu); 3980 if (ret == 0) 3981 dev->data->mtu = mtu; 3982 3983 ret = eth_err(port_id, ret); 3984 3985 rte_ethdev_trace_set_mtu(port_id, mtu, ret); 3986 3987 return ret; 3988 } 3989 3990 int 3991 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on) 3992 { 3993 struct rte_eth_dev *dev; 3994 int ret; 3995 3996 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3997 dev = &rte_eth_devices[port_id]; 3998 3999 if (!(dev->data->dev_conf.rxmode.offloads & 4000 RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) { 4001 RTE_ETHDEV_LOG(ERR, "Port %u: VLAN-filtering disabled\n", 4002 port_id); 4003 return -ENOSYS; 4004 } 4005 4006 if (vlan_id > 4095) { 4007 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n", 4008 port_id, vlan_id); 4009 return -EINVAL; 4010 } 4011 if (*dev->dev_ops->vlan_filter_set == NULL) 4012 return -ENOTSUP; 4013 4014 ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on); 4015 if (ret == 0) { 4016 struct rte_vlan_filter_conf *vfc; 4017 int vidx; 4018 int vbit; 4019 4020 vfc = &dev->data->vlan_filter_conf; 4021 vidx = vlan_id / 64; 4022 vbit = vlan_id % 64; 4023 4024 if (on) 4025 vfc->ids[vidx] |= RTE_BIT64(vbit); 4026 else 4027 vfc->ids[vidx] &= ~RTE_BIT64(vbit); 4028 } 4029 4030 ret = eth_err(port_id, ret); 4031 4032 rte_ethdev_trace_vlan_filter(port_id, vlan_id, on, ret); 4033 4034 return ret; 4035 } 4036 4037 int 4038 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, 4039 int on) 4040 { 4041 struct rte_eth_dev *dev; 4042 4043 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4044 dev = &rte_eth_devices[port_id]; 4045 4046 if (rx_queue_id >= dev->data->nb_rx_queues) { 4047 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id); 4048 return -EINVAL; 4049 } 4050 4051 if (*dev->dev_ops->vlan_strip_queue_set == NULL) 4052 return -ENOTSUP; 4053 (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on); 4054 4055 rte_ethdev_trace_set_vlan_strip_on_queue(port_id, rx_queue_id, on); 4056 4057 return 0; 4058 } 4059 4060 int 4061 rte_eth_dev_set_vlan_ether_type(uint16_t port_id, 4062 enum rte_vlan_type vlan_type, 4063 uint16_t tpid) 4064 { 4065 struct rte_eth_dev *dev; 4066 int ret; 4067 4068 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4069 dev = &rte_eth_devices[port_id]; 4070 4071 if (*dev->dev_ops->vlan_tpid_set == NULL) 4072 return -ENOTSUP; 4073 ret = eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, 4074 tpid)); 4075 4076 rte_ethdev_trace_set_vlan_ether_type(port_id, vlan_type, tpid, ret); 4077 4078 return ret; 4079 } 4080 4081 int 4082 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask) 4083 { 4084 struct rte_eth_dev_info dev_info; 4085 struct rte_eth_dev *dev; 4086 int ret = 0; 4087 int mask = 0; 4088 int cur, org = 0; 4089 uint64_t orig_offloads; 4090 uint64_t dev_offloads; 4091 uint64_t new_offloads; 4092 4093 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4094 dev = &rte_eth_devices[port_id]; 4095 4096 /* save original values in case of failure */ 4097 orig_offloads = dev->data->dev_conf.rxmode.offloads; 4098 dev_offloads = orig_offloads; 4099 4100 /* check which option changed by application */ 4101 cur = !!(offload_mask & RTE_ETH_VLAN_STRIP_OFFLOAD); 4102 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP); 4103 if (cur != org) { 4104 if (cur) 4105 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 4106 else 4107 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 4108 mask |= RTE_ETH_VLAN_STRIP_MASK; 4109 } 4110 4111 cur = !!(offload_mask & RTE_ETH_VLAN_FILTER_OFFLOAD); 4112 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER); 4113 if (cur != org) { 4114 if (cur) 4115 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 4116 else 4117 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 4118 mask |= RTE_ETH_VLAN_FILTER_MASK; 4119 } 4120 4121 cur = !!(offload_mask & RTE_ETH_VLAN_EXTEND_OFFLOAD); 4122 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND); 4123 if (cur != org) { 4124 if (cur) 4125 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 4126 else 4127 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 4128 mask |= RTE_ETH_VLAN_EXTEND_MASK; 4129 } 4130 4131 cur = !!(offload_mask & RTE_ETH_QINQ_STRIP_OFFLOAD); 4132 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP); 4133 if (cur != org) { 4134 if (cur) 4135 dev_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 4136 else 4137 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 4138 mask |= RTE_ETH_QINQ_STRIP_MASK; 4139 } 4140 4141 /*no change*/ 4142 if (mask == 0) 4143 return ret; 4144 4145 ret = rte_eth_dev_info_get(port_id, &dev_info); 4146 if (ret != 0) 4147 return ret; 4148 4149 /* Rx VLAN offloading must be within its device capabilities */ 4150 if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) { 4151 new_offloads = dev_offloads & ~orig_offloads; 4152 RTE_ETHDEV_LOG(ERR, 4153 "Ethdev port_id=%u requested new added VLAN offloads " 4154 "0x%" PRIx64 " must be within Rx offloads capabilities " 4155 "0x%" PRIx64 " in %s()\n", 4156 port_id, new_offloads, dev_info.rx_offload_capa, 4157 __func__); 4158 return -EINVAL; 4159 } 4160 4161 if (*dev->dev_ops->vlan_offload_set == NULL) 4162 return -ENOTSUP; 4163 dev->data->dev_conf.rxmode.offloads = dev_offloads; 4164 ret = (*dev->dev_ops->vlan_offload_set)(dev, mask); 4165 if (ret) { 4166 /* hit an error restore original values */ 4167 dev->data->dev_conf.rxmode.offloads = orig_offloads; 4168 } 4169 4170 ret = eth_err(port_id, ret); 4171 4172 rte_ethdev_trace_set_vlan_offload(port_id, offload_mask, ret); 4173 4174 return ret; 4175 } 4176 4177 int 4178 rte_eth_dev_get_vlan_offload(uint16_t port_id) 4179 { 4180 struct rte_eth_dev *dev; 4181 uint64_t *dev_offloads; 4182 int ret = 0; 4183 4184 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4185 dev = &rte_eth_devices[port_id]; 4186 dev_offloads = &dev->data->dev_conf.rxmode.offloads; 4187 4188 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 4189 ret |= RTE_ETH_VLAN_STRIP_OFFLOAD; 4190 4191 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) 4192 ret |= RTE_ETH_VLAN_FILTER_OFFLOAD; 4193 4194 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) 4195 ret |= RTE_ETH_VLAN_EXTEND_OFFLOAD; 4196 4197 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) 4198 ret |= RTE_ETH_QINQ_STRIP_OFFLOAD; 4199 4200 rte_ethdev_trace_get_vlan_offload(port_id, ret); 4201 4202 return ret; 4203 } 4204 4205 int 4206 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on) 4207 { 4208 struct rte_eth_dev *dev; 4209 int ret; 4210 4211 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4212 dev = &rte_eth_devices[port_id]; 4213 4214 if (*dev->dev_ops->vlan_pvid_set == NULL) 4215 return -ENOTSUP; 4216 ret = eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on)); 4217 4218 rte_ethdev_trace_set_vlan_pvid(port_id, pvid, on, ret); 4219 4220 return ret; 4221 } 4222 4223 int 4224 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 4225 { 4226 struct rte_eth_dev *dev; 4227 int ret; 4228 4229 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4230 dev = &rte_eth_devices[port_id]; 4231 4232 if (fc_conf == NULL) { 4233 RTE_ETHDEV_LOG(ERR, 4234 "Cannot get ethdev port %u flow control config to NULL\n", 4235 port_id); 4236 return -EINVAL; 4237 } 4238 4239 if (*dev->dev_ops->flow_ctrl_get == NULL) 4240 return -ENOTSUP; 4241 memset(fc_conf, 0, sizeof(*fc_conf)); 4242 ret = eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf)); 4243 4244 rte_ethdev_trace_flow_ctrl_get(port_id, fc_conf, ret); 4245 4246 return ret; 4247 } 4248 4249 int 4250 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 4251 { 4252 struct rte_eth_dev *dev; 4253 int ret; 4254 4255 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4256 dev = &rte_eth_devices[port_id]; 4257 4258 if (fc_conf == NULL) { 4259 RTE_ETHDEV_LOG(ERR, 4260 "Cannot set ethdev port %u flow control from NULL config\n", 4261 port_id); 4262 return -EINVAL; 4263 } 4264 4265 if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) { 4266 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n"); 4267 return -EINVAL; 4268 } 4269 4270 if (*dev->dev_ops->flow_ctrl_set == NULL) 4271 return -ENOTSUP; 4272 ret = eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf)); 4273 4274 rte_ethdev_trace_flow_ctrl_set(port_id, fc_conf, ret); 4275 4276 return ret; 4277 } 4278 4279 int 4280 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, 4281 struct rte_eth_pfc_conf *pfc_conf) 4282 { 4283 struct rte_eth_dev *dev; 4284 int ret; 4285 4286 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4287 dev = &rte_eth_devices[port_id]; 4288 4289 if (pfc_conf == NULL) { 4290 RTE_ETHDEV_LOG(ERR, 4291 "Cannot set ethdev port %u priority flow control from NULL config\n", 4292 port_id); 4293 return -EINVAL; 4294 } 4295 4296 if (pfc_conf->priority > (RTE_ETH_DCB_NUM_USER_PRIORITIES - 1)) { 4297 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n"); 4298 return -EINVAL; 4299 } 4300 4301 /* High water, low water validation are device specific */ 4302 if (*dev->dev_ops->priority_flow_ctrl_set == NULL) 4303 return -ENOTSUP; 4304 ret = eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set) 4305 (dev, pfc_conf)); 4306 4307 rte_ethdev_trace_priority_flow_ctrl_set(port_id, pfc_conf, ret); 4308 4309 return ret; 4310 } 4311 4312 static int 4313 validate_rx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max, 4314 struct rte_eth_pfc_queue_conf *pfc_queue_conf) 4315 { 4316 if ((pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) || 4317 (pfc_queue_conf->mode == RTE_ETH_FC_FULL)) { 4318 if (pfc_queue_conf->rx_pause.tx_qid >= dev_info->nb_tx_queues) { 4319 RTE_ETHDEV_LOG(ERR, 4320 "PFC Tx queue not in range for Rx pause requested:%d configured:%d\n", 4321 pfc_queue_conf->rx_pause.tx_qid, 4322 dev_info->nb_tx_queues); 4323 return -EINVAL; 4324 } 4325 4326 if (pfc_queue_conf->rx_pause.tc >= tc_max) { 4327 RTE_ETHDEV_LOG(ERR, 4328 "PFC TC not in range for Rx pause requested:%d max:%d\n", 4329 pfc_queue_conf->rx_pause.tc, tc_max); 4330 return -EINVAL; 4331 } 4332 } 4333 4334 return 0; 4335 } 4336 4337 static int 4338 validate_tx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max, 4339 struct rte_eth_pfc_queue_conf *pfc_queue_conf) 4340 { 4341 if ((pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) || 4342 (pfc_queue_conf->mode == RTE_ETH_FC_FULL)) { 4343 if (pfc_queue_conf->tx_pause.rx_qid >= dev_info->nb_rx_queues) { 4344 RTE_ETHDEV_LOG(ERR, 4345 "PFC Rx queue not in range for Tx pause requested:%d configured:%d\n", 4346 pfc_queue_conf->tx_pause.rx_qid, 4347 dev_info->nb_rx_queues); 4348 return -EINVAL; 4349 } 4350 4351 if (pfc_queue_conf->tx_pause.tc >= tc_max) { 4352 RTE_ETHDEV_LOG(ERR, 4353 "PFC TC not in range for Tx pause requested:%d max:%d\n", 4354 pfc_queue_conf->tx_pause.tc, tc_max); 4355 return -EINVAL; 4356 } 4357 } 4358 4359 return 0; 4360 } 4361 4362 int 4363 rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id, 4364 struct rte_eth_pfc_queue_info *pfc_queue_info) 4365 { 4366 struct rte_eth_dev *dev; 4367 int ret; 4368 4369 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4370 dev = &rte_eth_devices[port_id]; 4371 4372 if (pfc_queue_info == NULL) { 4373 RTE_ETHDEV_LOG(ERR, "PFC info param is NULL for port (%u)\n", 4374 port_id); 4375 return -EINVAL; 4376 } 4377 4378 if (*dev->dev_ops->priority_flow_ctrl_queue_info_get == NULL) 4379 return -ENOTSUP; 4380 ret = eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_queue_info_get) 4381 (dev, pfc_queue_info)); 4382 4383 rte_ethdev_trace_priority_flow_ctrl_queue_info_get(port_id, 4384 pfc_queue_info, ret); 4385 4386 return ret; 4387 } 4388 4389 int 4390 rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id, 4391 struct rte_eth_pfc_queue_conf *pfc_queue_conf) 4392 { 4393 struct rte_eth_pfc_queue_info pfc_info; 4394 struct rte_eth_dev_info dev_info; 4395 struct rte_eth_dev *dev; 4396 int ret; 4397 4398 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4399 dev = &rte_eth_devices[port_id]; 4400 4401 if (pfc_queue_conf == NULL) { 4402 RTE_ETHDEV_LOG(ERR, "PFC parameters are NULL for port (%u)\n", 4403 port_id); 4404 return -EINVAL; 4405 } 4406 4407 ret = rte_eth_dev_info_get(port_id, &dev_info); 4408 if (ret != 0) 4409 return ret; 4410 4411 ret = rte_eth_dev_priority_flow_ctrl_queue_info_get(port_id, &pfc_info); 4412 if (ret != 0) 4413 return ret; 4414 4415 if (pfc_info.tc_max == 0) { 4416 RTE_ETHDEV_LOG(ERR, "Ethdev port %u does not support PFC TC values\n", 4417 port_id); 4418 return -ENOTSUP; 4419 } 4420 4421 /* Check requested mode supported or not */ 4422 if (pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE && 4423 pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) { 4424 RTE_ETHDEV_LOG(ERR, "PFC Tx pause unsupported for port (%d)\n", 4425 port_id); 4426 return -EINVAL; 4427 } 4428 4429 if (pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE && 4430 pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) { 4431 RTE_ETHDEV_LOG(ERR, "PFC Rx pause unsupported for port (%d)\n", 4432 port_id); 4433 return -EINVAL; 4434 } 4435 4436 /* Validate Rx pause parameters */ 4437 if (pfc_info.mode_capa == RTE_ETH_FC_FULL || 4438 pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE) { 4439 ret = validate_rx_pause_config(&dev_info, pfc_info.tc_max, 4440 pfc_queue_conf); 4441 if (ret != 0) 4442 return ret; 4443 } 4444 4445 /* Validate Tx pause parameters */ 4446 if (pfc_info.mode_capa == RTE_ETH_FC_FULL || 4447 pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE) { 4448 ret = validate_tx_pause_config(&dev_info, pfc_info.tc_max, 4449 pfc_queue_conf); 4450 if (ret != 0) 4451 return ret; 4452 } 4453 4454 if (*dev->dev_ops->priority_flow_ctrl_queue_config == NULL) 4455 return -ENOTSUP; 4456 ret = eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_queue_config) 4457 (dev, pfc_queue_conf)); 4458 4459 rte_ethdev_trace_priority_flow_ctrl_queue_configure(port_id, 4460 pfc_queue_conf, ret); 4461 4462 return ret; 4463 } 4464 4465 static int 4466 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf, 4467 uint16_t reta_size) 4468 { 4469 uint16_t i, num; 4470 4471 num = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) / RTE_ETH_RETA_GROUP_SIZE; 4472 for (i = 0; i < num; i++) { 4473 if (reta_conf[i].mask) 4474 return 0; 4475 } 4476 4477 return -EINVAL; 4478 } 4479 4480 static int 4481 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf, 4482 uint16_t reta_size, 4483 uint16_t max_rxq) 4484 { 4485 uint16_t i, idx, shift; 4486 4487 if (max_rxq == 0) { 4488 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n"); 4489 return -EINVAL; 4490 } 4491 4492 for (i = 0; i < reta_size; i++) { 4493 idx = i / RTE_ETH_RETA_GROUP_SIZE; 4494 shift = i % RTE_ETH_RETA_GROUP_SIZE; 4495 if ((reta_conf[idx].mask & RTE_BIT64(shift)) && 4496 (reta_conf[idx].reta[shift] >= max_rxq)) { 4497 RTE_ETHDEV_LOG(ERR, 4498 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n", 4499 idx, shift, 4500 reta_conf[idx].reta[shift], max_rxq); 4501 return -EINVAL; 4502 } 4503 } 4504 4505 return 0; 4506 } 4507 4508 int 4509 rte_eth_dev_rss_reta_update(uint16_t port_id, 4510 struct rte_eth_rss_reta_entry64 *reta_conf, 4511 uint16_t reta_size) 4512 { 4513 enum rte_eth_rx_mq_mode mq_mode; 4514 struct rte_eth_dev *dev; 4515 int ret; 4516 4517 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4518 dev = &rte_eth_devices[port_id]; 4519 4520 if (reta_conf == NULL) { 4521 RTE_ETHDEV_LOG(ERR, 4522 "Cannot update ethdev port %u RSS RETA to NULL\n", 4523 port_id); 4524 return -EINVAL; 4525 } 4526 4527 if (reta_size == 0) { 4528 RTE_ETHDEV_LOG(ERR, 4529 "Cannot update ethdev port %u RSS RETA with zero size\n", 4530 port_id); 4531 return -EINVAL; 4532 } 4533 4534 /* Check mask bits */ 4535 ret = eth_check_reta_mask(reta_conf, reta_size); 4536 if (ret < 0) 4537 return ret; 4538 4539 /* Check entry value */ 4540 ret = eth_check_reta_entry(reta_conf, reta_size, 4541 dev->data->nb_rx_queues); 4542 if (ret < 0) 4543 return ret; 4544 4545 mq_mode = dev->data->dev_conf.rxmode.mq_mode; 4546 if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) { 4547 RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n"); 4548 return -ENOTSUP; 4549 } 4550 4551 if (*dev->dev_ops->reta_update == NULL) 4552 return -ENOTSUP; 4553 ret = eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf, 4554 reta_size)); 4555 4556 rte_ethdev_trace_rss_reta_update(port_id, reta_conf, reta_size, ret); 4557 4558 return ret; 4559 } 4560 4561 int 4562 rte_eth_dev_rss_reta_query(uint16_t port_id, 4563 struct rte_eth_rss_reta_entry64 *reta_conf, 4564 uint16_t reta_size) 4565 { 4566 struct rte_eth_dev *dev; 4567 int ret; 4568 4569 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4570 dev = &rte_eth_devices[port_id]; 4571 4572 if (reta_conf == NULL) { 4573 RTE_ETHDEV_LOG(ERR, 4574 "Cannot query ethdev port %u RSS RETA from NULL config\n", 4575 port_id); 4576 return -EINVAL; 4577 } 4578 4579 /* Check mask bits */ 4580 ret = eth_check_reta_mask(reta_conf, reta_size); 4581 if (ret < 0) 4582 return ret; 4583 4584 if (*dev->dev_ops->reta_query == NULL) 4585 return -ENOTSUP; 4586 ret = eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf, 4587 reta_size)); 4588 4589 rte_ethdev_trace_rss_reta_query(port_id, reta_conf, reta_size, ret); 4590 4591 return ret; 4592 } 4593 4594 int 4595 rte_eth_dev_rss_hash_update(uint16_t port_id, 4596 struct rte_eth_rss_conf *rss_conf) 4597 { 4598 struct rte_eth_dev *dev; 4599 struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, }; 4600 enum rte_eth_rx_mq_mode mq_mode; 4601 int ret; 4602 4603 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4604 dev = &rte_eth_devices[port_id]; 4605 4606 if (rss_conf == NULL) { 4607 RTE_ETHDEV_LOG(ERR, 4608 "Cannot update ethdev port %u RSS hash from NULL config\n", 4609 port_id); 4610 return -EINVAL; 4611 } 4612 4613 ret = rte_eth_dev_info_get(port_id, &dev_info); 4614 if (ret != 0) 4615 return ret; 4616 4617 rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf); 4618 if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) != 4619 dev_info.flow_type_rss_offloads) { 4620 RTE_ETHDEV_LOG(ERR, 4621 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 4622 port_id, rss_conf->rss_hf, 4623 dev_info.flow_type_rss_offloads); 4624 return -EINVAL; 4625 } 4626 4627 mq_mode = dev->data->dev_conf.rxmode.mq_mode; 4628 if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) { 4629 RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n"); 4630 return -ENOTSUP; 4631 } 4632 4633 if (*dev->dev_ops->rss_hash_update == NULL) 4634 return -ENOTSUP; 4635 ret = eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev, 4636 rss_conf)); 4637 4638 rte_ethdev_trace_rss_hash_update(port_id, rss_conf, ret); 4639 4640 return ret; 4641 } 4642 4643 int 4644 rte_eth_dev_rss_hash_conf_get(uint16_t port_id, 4645 struct rte_eth_rss_conf *rss_conf) 4646 { 4647 struct rte_eth_dev *dev; 4648 int ret; 4649 4650 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4651 dev = &rte_eth_devices[port_id]; 4652 4653 if (rss_conf == NULL) { 4654 RTE_ETHDEV_LOG(ERR, 4655 "Cannot get ethdev port %u RSS hash config to NULL\n", 4656 port_id); 4657 return -EINVAL; 4658 } 4659 4660 if (*dev->dev_ops->rss_hash_conf_get == NULL) 4661 return -ENOTSUP; 4662 ret = eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev, 4663 rss_conf)); 4664 4665 rte_ethdev_trace_rss_hash_conf_get(port_id, rss_conf, ret); 4666 4667 return ret; 4668 } 4669 4670 int 4671 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, 4672 struct rte_eth_udp_tunnel *udp_tunnel) 4673 { 4674 struct rte_eth_dev *dev; 4675 int ret; 4676 4677 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4678 dev = &rte_eth_devices[port_id]; 4679 4680 if (udp_tunnel == NULL) { 4681 RTE_ETHDEV_LOG(ERR, 4682 "Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4683 port_id); 4684 return -EINVAL; 4685 } 4686 4687 if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) { 4688 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4689 return -EINVAL; 4690 } 4691 4692 if (*dev->dev_ops->udp_tunnel_port_add == NULL) 4693 return -ENOTSUP; 4694 ret = eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev, 4695 udp_tunnel)); 4696 4697 rte_ethdev_trace_udp_tunnel_port_add(port_id, udp_tunnel, ret); 4698 4699 return ret; 4700 } 4701 4702 int 4703 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, 4704 struct rte_eth_udp_tunnel *udp_tunnel) 4705 { 4706 struct rte_eth_dev *dev; 4707 int ret; 4708 4709 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4710 dev = &rte_eth_devices[port_id]; 4711 4712 if (udp_tunnel == NULL) { 4713 RTE_ETHDEV_LOG(ERR, 4714 "Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4715 port_id); 4716 return -EINVAL; 4717 } 4718 4719 if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) { 4720 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4721 return -EINVAL; 4722 } 4723 4724 if (*dev->dev_ops->udp_tunnel_port_del == NULL) 4725 return -ENOTSUP; 4726 ret = eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev, 4727 udp_tunnel)); 4728 4729 rte_ethdev_trace_udp_tunnel_port_delete(port_id, udp_tunnel, ret); 4730 4731 return ret; 4732 } 4733 4734 int 4735 rte_eth_led_on(uint16_t port_id) 4736 { 4737 struct rte_eth_dev *dev; 4738 int ret; 4739 4740 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4741 dev = &rte_eth_devices[port_id]; 4742 4743 if (*dev->dev_ops->dev_led_on == NULL) 4744 return -ENOTSUP; 4745 ret = eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev)); 4746 4747 rte_eth_trace_led_on(port_id, ret); 4748 4749 return ret; 4750 } 4751 4752 int 4753 rte_eth_led_off(uint16_t port_id) 4754 { 4755 struct rte_eth_dev *dev; 4756 int ret; 4757 4758 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4759 dev = &rte_eth_devices[port_id]; 4760 4761 if (*dev->dev_ops->dev_led_off == NULL) 4762 return -ENOTSUP; 4763 ret = eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev)); 4764 4765 rte_eth_trace_led_off(port_id, ret); 4766 4767 return ret; 4768 } 4769 4770 int 4771 rte_eth_fec_get_capability(uint16_t port_id, 4772 struct rte_eth_fec_capa *speed_fec_capa, 4773 unsigned int num) 4774 { 4775 struct rte_eth_dev *dev; 4776 int ret; 4777 4778 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4779 dev = &rte_eth_devices[port_id]; 4780 4781 if (speed_fec_capa == NULL && num > 0) { 4782 RTE_ETHDEV_LOG(ERR, 4783 "Cannot get ethdev port %u FEC capability to NULL when array size is non zero\n", 4784 port_id); 4785 return -EINVAL; 4786 } 4787 4788 if (*dev->dev_ops->fec_get_capability == NULL) 4789 return -ENOTSUP; 4790 ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num); 4791 4792 rte_eth_trace_fec_get_capability(port_id, speed_fec_capa, num, ret); 4793 4794 return ret; 4795 } 4796 4797 int 4798 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa) 4799 { 4800 struct rte_eth_dev *dev; 4801 int ret; 4802 4803 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4804 dev = &rte_eth_devices[port_id]; 4805 4806 if (fec_capa == NULL) { 4807 RTE_ETHDEV_LOG(ERR, 4808 "Cannot get ethdev port %u current FEC mode to NULL\n", 4809 port_id); 4810 return -EINVAL; 4811 } 4812 4813 if (*dev->dev_ops->fec_get == NULL) 4814 return -ENOTSUP; 4815 ret = eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa)); 4816 4817 rte_eth_trace_fec_get(port_id, fec_capa, ret); 4818 4819 return ret; 4820 } 4821 4822 int 4823 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa) 4824 { 4825 struct rte_eth_dev *dev; 4826 int ret; 4827 4828 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4829 dev = &rte_eth_devices[port_id]; 4830 4831 if (fec_capa == 0) { 4832 RTE_ETHDEV_LOG(ERR, "At least one FEC mode should be specified\n"); 4833 return -EINVAL; 4834 } 4835 4836 if (*dev->dev_ops->fec_set == NULL) 4837 return -ENOTSUP; 4838 ret = eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa)); 4839 4840 rte_eth_trace_fec_set(port_id, fec_capa, ret); 4841 4842 return ret; 4843 } 4844 4845 /* 4846 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 4847 * an empty spot. 4848 */ 4849 static int 4850 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr) 4851 { 4852 struct rte_eth_dev_info dev_info; 4853 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4854 unsigned i; 4855 int ret; 4856 4857 ret = rte_eth_dev_info_get(port_id, &dev_info); 4858 if (ret != 0) 4859 return -1; 4860 4861 for (i = 0; i < dev_info.max_mac_addrs; i++) 4862 if (memcmp(addr, &dev->data->mac_addrs[i], 4863 RTE_ETHER_ADDR_LEN) == 0) 4864 return i; 4865 4866 return -1; 4867 } 4868 4869 static const struct rte_ether_addr null_mac_addr; 4870 4871 int 4872 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr, 4873 uint32_t pool) 4874 { 4875 struct rte_eth_dev *dev; 4876 int index; 4877 uint64_t pool_mask; 4878 int ret; 4879 4880 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4881 dev = &rte_eth_devices[port_id]; 4882 4883 if (addr == NULL) { 4884 RTE_ETHDEV_LOG(ERR, 4885 "Cannot add ethdev port %u MAC address from NULL address\n", 4886 port_id); 4887 return -EINVAL; 4888 } 4889 4890 if (*dev->dev_ops->mac_addr_add == NULL) 4891 return -ENOTSUP; 4892 4893 if (rte_is_zero_ether_addr(addr)) { 4894 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 4895 port_id); 4896 return -EINVAL; 4897 } 4898 if (pool >= RTE_ETH_64_POOLS) { 4899 RTE_ETHDEV_LOG(ERR, "Pool ID must be 0-%d\n", RTE_ETH_64_POOLS - 1); 4900 return -EINVAL; 4901 } 4902 4903 index = eth_dev_get_mac_addr_index(port_id, addr); 4904 if (index < 0) { 4905 index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr); 4906 if (index < 0) { 4907 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 4908 port_id); 4909 return -ENOSPC; 4910 } 4911 } else { 4912 pool_mask = dev->data->mac_pool_sel[index]; 4913 4914 /* Check if both MAC address and pool is already there, and do nothing */ 4915 if (pool_mask & RTE_BIT64(pool)) 4916 return 0; 4917 } 4918 4919 /* Update NIC */ 4920 ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool); 4921 4922 if (ret == 0) { 4923 /* Update address in NIC data structure */ 4924 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]); 4925 4926 /* Update pool bitmap in NIC data structure */ 4927 dev->data->mac_pool_sel[index] |= RTE_BIT64(pool); 4928 } 4929 4930 ret = eth_err(port_id, ret); 4931 4932 rte_ethdev_trace_mac_addr_add(port_id, addr, pool, ret); 4933 4934 return ret; 4935 } 4936 4937 int 4938 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr) 4939 { 4940 struct rte_eth_dev *dev; 4941 int index; 4942 4943 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4944 dev = &rte_eth_devices[port_id]; 4945 4946 if (addr == NULL) { 4947 RTE_ETHDEV_LOG(ERR, 4948 "Cannot remove ethdev port %u MAC address from NULL address\n", 4949 port_id); 4950 return -EINVAL; 4951 } 4952 4953 if (*dev->dev_ops->mac_addr_remove == NULL) 4954 return -ENOTSUP; 4955 4956 index = eth_dev_get_mac_addr_index(port_id, addr); 4957 if (index == 0) { 4958 RTE_ETHDEV_LOG(ERR, 4959 "Port %u: Cannot remove default MAC address\n", 4960 port_id); 4961 return -EADDRINUSE; 4962 } else if (index < 0) 4963 return 0; /* Do nothing if address wasn't found */ 4964 4965 /* Update NIC */ 4966 (*dev->dev_ops->mac_addr_remove)(dev, index); 4967 4968 /* Update address in NIC data structure */ 4969 rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]); 4970 4971 /* reset pool bitmap */ 4972 dev->data->mac_pool_sel[index] = 0; 4973 4974 rte_ethdev_trace_mac_addr_remove(port_id, addr); 4975 4976 return 0; 4977 } 4978 4979 int 4980 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr) 4981 { 4982 struct rte_eth_dev *dev; 4983 int index; 4984 int ret; 4985 4986 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4987 dev = &rte_eth_devices[port_id]; 4988 4989 if (addr == NULL) { 4990 RTE_ETHDEV_LOG(ERR, 4991 "Cannot set ethdev port %u default MAC address from NULL address\n", 4992 port_id); 4993 return -EINVAL; 4994 } 4995 4996 if (!rte_is_valid_assigned_ether_addr(addr)) 4997 return -EINVAL; 4998 4999 if (*dev->dev_ops->mac_addr_set == NULL) 5000 return -ENOTSUP; 5001 5002 /* Keep address unique in dev->data->mac_addrs[]. */ 5003 index = eth_dev_get_mac_addr_index(port_id, addr); 5004 if (index > 0) { 5005 RTE_ETHDEV_LOG(ERR, 5006 "New default address for port %u was already in the address list. Please remove it first.\n", 5007 port_id); 5008 return -EEXIST; 5009 } 5010 5011 ret = (*dev->dev_ops->mac_addr_set)(dev, addr); 5012 if (ret < 0) 5013 return ret; 5014 5015 /* Update default address in NIC data structure */ 5016 rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]); 5017 5018 rte_ethdev_trace_default_mac_addr_set(port_id, addr); 5019 5020 return 0; 5021 } 5022 5023 5024 /* 5025 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 5026 * an empty spot. 5027 */ 5028 static int 5029 eth_dev_get_hash_mac_addr_index(uint16_t port_id, 5030 const struct rte_ether_addr *addr) 5031 { 5032 struct rte_eth_dev_info dev_info; 5033 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5034 unsigned i; 5035 int ret; 5036 5037 ret = rte_eth_dev_info_get(port_id, &dev_info); 5038 if (ret != 0) 5039 return -1; 5040 5041 if (!dev->data->hash_mac_addrs) 5042 return -1; 5043 5044 for (i = 0; i < dev_info.max_hash_mac_addrs; i++) 5045 if (memcmp(addr, &dev->data->hash_mac_addrs[i], 5046 RTE_ETHER_ADDR_LEN) == 0) 5047 return i; 5048 5049 return -1; 5050 } 5051 5052 int 5053 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, 5054 uint8_t on) 5055 { 5056 int index; 5057 int ret; 5058 struct rte_eth_dev *dev; 5059 5060 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5061 dev = &rte_eth_devices[port_id]; 5062 5063 if (addr == NULL) { 5064 RTE_ETHDEV_LOG(ERR, 5065 "Cannot set ethdev port %u unicast hash table from NULL address\n", 5066 port_id); 5067 return -EINVAL; 5068 } 5069 5070 if (rte_is_zero_ether_addr(addr)) { 5071 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 5072 port_id); 5073 return -EINVAL; 5074 } 5075 5076 index = eth_dev_get_hash_mac_addr_index(port_id, addr); 5077 /* Check if it's already there, and do nothing */ 5078 if ((index >= 0) && on) 5079 return 0; 5080 5081 if (index < 0) { 5082 if (!on) { 5083 RTE_ETHDEV_LOG(ERR, 5084 "Port %u: the MAC address was not set in UTA\n", 5085 port_id); 5086 return -EINVAL; 5087 } 5088 5089 index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr); 5090 if (index < 0) { 5091 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 5092 port_id); 5093 return -ENOSPC; 5094 } 5095 } 5096 5097 if (*dev->dev_ops->uc_hash_table_set == NULL) 5098 return -ENOTSUP; 5099 ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on); 5100 if (ret == 0) { 5101 /* Update address in NIC data structure */ 5102 if (on) 5103 rte_ether_addr_copy(addr, 5104 &dev->data->hash_mac_addrs[index]); 5105 else 5106 rte_ether_addr_copy(&null_mac_addr, 5107 &dev->data->hash_mac_addrs[index]); 5108 } 5109 5110 ret = eth_err(port_id, ret); 5111 5112 rte_ethdev_trace_uc_hash_table_set(port_id, on, ret); 5113 5114 return ret; 5115 } 5116 5117 int 5118 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on) 5119 { 5120 struct rte_eth_dev *dev; 5121 int ret; 5122 5123 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5124 dev = &rte_eth_devices[port_id]; 5125 5126 if (*dev->dev_ops->uc_all_hash_table_set == NULL) 5127 return -ENOTSUP; 5128 ret = eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev, on)); 5129 5130 rte_ethdev_trace_uc_all_hash_table_set(port_id, on, ret); 5131 5132 return ret; 5133 } 5134 5135 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, 5136 uint32_t tx_rate) 5137 { 5138 struct rte_eth_dev *dev; 5139 struct rte_eth_dev_info dev_info; 5140 struct rte_eth_link link; 5141 int ret; 5142 5143 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5144 dev = &rte_eth_devices[port_id]; 5145 5146 ret = rte_eth_dev_info_get(port_id, &dev_info); 5147 if (ret != 0) 5148 return ret; 5149 5150 link = dev->data->dev_link; 5151 5152 if (queue_idx > dev_info.max_tx_queues) { 5153 RTE_ETHDEV_LOG(ERR, 5154 "Set queue rate limit:port %u: invalid queue ID=%u\n", 5155 port_id, queue_idx); 5156 return -EINVAL; 5157 } 5158 5159 if (tx_rate > link.link_speed) { 5160 RTE_ETHDEV_LOG(ERR, 5161 "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n", 5162 tx_rate, link.link_speed); 5163 return -EINVAL; 5164 } 5165 5166 if (*dev->dev_ops->set_queue_rate_limit == NULL) 5167 return -ENOTSUP; 5168 ret = eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev, 5169 queue_idx, tx_rate)); 5170 5171 rte_eth_trace_set_queue_rate_limit(port_id, queue_idx, tx_rate, ret); 5172 5173 return ret; 5174 } 5175 5176 int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id, 5177 uint8_t avail_thresh) 5178 { 5179 struct rte_eth_dev *dev; 5180 int ret; 5181 5182 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5183 dev = &rte_eth_devices[port_id]; 5184 5185 if (queue_id > dev->data->nb_rx_queues) { 5186 RTE_ETHDEV_LOG(ERR, 5187 "Set queue avail thresh: port %u: invalid queue ID=%u.\n", 5188 port_id, queue_id); 5189 return -EINVAL; 5190 } 5191 5192 if (avail_thresh > 99) { 5193 RTE_ETHDEV_LOG(ERR, 5194 "Set queue avail thresh: port %u: threshold should be <= 99.\n", 5195 port_id); 5196 return -EINVAL; 5197 } 5198 if (*dev->dev_ops->rx_queue_avail_thresh_set == NULL) 5199 return -ENOTSUP; 5200 ret = eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_set)(dev, 5201 queue_id, avail_thresh)); 5202 5203 rte_eth_trace_rx_avail_thresh_set(port_id, queue_id, avail_thresh, ret); 5204 5205 return ret; 5206 } 5207 5208 int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id, 5209 uint8_t *avail_thresh) 5210 { 5211 struct rte_eth_dev *dev; 5212 int ret; 5213 5214 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5215 dev = &rte_eth_devices[port_id]; 5216 5217 if (queue_id == NULL) 5218 return -EINVAL; 5219 if (*queue_id >= dev->data->nb_rx_queues) 5220 *queue_id = 0; 5221 5222 if (*dev->dev_ops->rx_queue_avail_thresh_query == NULL) 5223 return -ENOTSUP; 5224 ret = eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_query)(dev, 5225 queue_id, avail_thresh)); 5226 5227 rte_eth_trace_rx_avail_thresh_query(port_id, *queue_id, ret); 5228 5229 return ret; 5230 } 5231 5232 RTE_INIT(eth_dev_init_fp_ops) 5233 { 5234 uint32_t i; 5235 5236 for (i = 0; i != RTE_DIM(rte_eth_fp_ops); i++) 5237 eth_dev_fp_ops_reset(rte_eth_fp_ops + i); 5238 } 5239 5240 RTE_INIT(eth_dev_init_cb_lists) 5241 { 5242 uint16_t i; 5243 5244 for (i = 0; i < RTE_MAX_ETHPORTS; i++) 5245 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs); 5246 } 5247 5248 int 5249 rte_eth_dev_callback_register(uint16_t port_id, 5250 enum rte_eth_event_type event, 5251 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 5252 { 5253 struct rte_eth_dev *dev; 5254 struct rte_eth_dev_callback *user_cb; 5255 uint16_t next_port; 5256 uint16_t last_port; 5257 5258 if (cb_fn == NULL) { 5259 RTE_ETHDEV_LOG(ERR, 5260 "Cannot register ethdev port %u callback from NULL\n", 5261 port_id); 5262 return -EINVAL; 5263 } 5264 5265 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 5266 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 5267 return -EINVAL; 5268 } 5269 5270 if (port_id == RTE_ETH_ALL) { 5271 next_port = 0; 5272 last_port = RTE_MAX_ETHPORTS - 1; 5273 } else { 5274 next_port = last_port = port_id; 5275 } 5276 5277 rte_spinlock_lock(ð_dev_cb_lock); 5278 5279 do { 5280 dev = &rte_eth_devices[next_port]; 5281 5282 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) { 5283 if (user_cb->cb_fn == cb_fn && 5284 user_cb->cb_arg == cb_arg && 5285 user_cb->event == event) { 5286 break; 5287 } 5288 } 5289 5290 /* create a new callback. */ 5291 if (user_cb == NULL) { 5292 user_cb = rte_zmalloc("INTR_USER_CALLBACK", 5293 sizeof(struct rte_eth_dev_callback), 0); 5294 if (user_cb != NULL) { 5295 user_cb->cb_fn = cb_fn; 5296 user_cb->cb_arg = cb_arg; 5297 user_cb->event = event; 5298 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), 5299 user_cb, next); 5300 } else { 5301 rte_spinlock_unlock(ð_dev_cb_lock); 5302 rte_eth_dev_callback_unregister(port_id, event, 5303 cb_fn, cb_arg); 5304 return -ENOMEM; 5305 } 5306 5307 } 5308 } while (++next_port <= last_port); 5309 5310 rte_spinlock_unlock(ð_dev_cb_lock); 5311 5312 rte_ethdev_trace_callback_register(port_id, event, cb_fn, cb_arg); 5313 5314 return 0; 5315 } 5316 5317 int 5318 rte_eth_dev_callback_unregister(uint16_t port_id, 5319 enum rte_eth_event_type event, 5320 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 5321 { 5322 int ret; 5323 struct rte_eth_dev *dev; 5324 struct rte_eth_dev_callback *cb, *next; 5325 uint16_t next_port; 5326 uint16_t last_port; 5327 5328 if (cb_fn == NULL) { 5329 RTE_ETHDEV_LOG(ERR, 5330 "Cannot unregister ethdev port %u callback from NULL\n", 5331 port_id); 5332 return -EINVAL; 5333 } 5334 5335 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 5336 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 5337 return -EINVAL; 5338 } 5339 5340 if (port_id == RTE_ETH_ALL) { 5341 next_port = 0; 5342 last_port = RTE_MAX_ETHPORTS - 1; 5343 } else { 5344 next_port = last_port = port_id; 5345 } 5346 5347 rte_spinlock_lock(ð_dev_cb_lock); 5348 5349 do { 5350 dev = &rte_eth_devices[next_port]; 5351 ret = 0; 5352 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; 5353 cb = next) { 5354 5355 next = TAILQ_NEXT(cb, next); 5356 5357 if (cb->cb_fn != cb_fn || cb->event != event || 5358 (cb_arg != (void *)-1 && cb->cb_arg != cb_arg)) 5359 continue; 5360 5361 /* 5362 * if this callback is not executing right now, 5363 * then remove it. 5364 */ 5365 if (cb->active == 0) { 5366 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next); 5367 rte_free(cb); 5368 } else { 5369 ret = -EAGAIN; 5370 } 5371 } 5372 } while (++next_port <= last_port); 5373 5374 rte_spinlock_unlock(ð_dev_cb_lock); 5375 5376 rte_ethdev_trace_callback_unregister(port_id, event, cb_fn, cb_arg, 5377 ret); 5378 5379 return ret; 5380 } 5381 5382 int 5383 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data) 5384 { 5385 uint32_t vec; 5386 struct rte_eth_dev *dev; 5387 struct rte_intr_handle *intr_handle; 5388 uint16_t qid; 5389 int rc; 5390 5391 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5392 dev = &rte_eth_devices[port_id]; 5393 5394 if (!dev->intr_handle) { 5395 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 5396 return -ENOTSUP; 5397 } 5398 5399 intr_handle = dev->intr_handle; 5400 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 5401 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 5402 return -EPERM; 5403 } 5404 5405 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) { 5406 vec = rte_intr_vec_list_index_get(intr_handle, qid); 5407 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 5408 5409 rte_ethdev_trace_rx_intr_ctl(port_id, qid, epfd, op, data, rc); 5410 5411 if (rc && rc != -EEXIST) { 5412 RTE_ETHDEV_LOG(ERR, 5413 "p %u q %u Rx ctl error op %d epfd %d vec %u\n", 5414 port_id, qid, op, epfd, vec); 5415 } 5416 } 5417 5418 return 0; 5419 } 5420 5421 int 5422 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id) 5423 { 5424 struct rte_intr_handle *intr_handle; 5425 struct rte_eth_dev *dev; 5426 unsigned int efd_idx; 5427 uint32_t vec; 5428 int fd; 5429 5430 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 5431 dev = &rte_eth_devices[port_id]; 5432 5433 if (queue_id >= dev->data->nb_rx_queues) { 5434 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5435 return -1; 5436 } 5437 5438 if (!dev->intr_handle) { 5439 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 5440 return -1; 5441 } 5442 5443 intr_handle = dev->intr_handle; 5444 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 5445 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 5446 return -1; 5447 } 5448 5449 vec = rte_intr_vec_list_index_get(intr_handle, queue_id); 5450 efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ? 5451 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec; 5452 fd = rte_intr_efds_index_get(intr_handle, efd_idx); 5453 5454 rte_ethdev_trace_rx_intr_ctl_q_get_fd(port_id, queue_id, fd); 5455 5456 return fd; 5457 } 5458 5459 int 5460 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, 5461 int epfd, int op, void *data) 5462 { 5463 uint32_t vec; 5464 struct rte_eth_dev *dev; 5465 struct rte_intr_handle *intr_handle; 5466 int rc; 5467 5468 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5469 dev = &rte_eth_devices[port_id]; 5470 5471 if (queue_id >= dev->data->nb_rx_queues) { 5472 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5473 return -EINVAL; 5474 } 5475 5476 if (!dev->intr_handle) { 5477 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 5478 return -ENOTSUP; 5479 } 5480 5481 intr_handle = dev->intr_handle; 5482 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 5483 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 5484 return -EPERM; 5485 } 5486 5487 vec = rte_intr_vec_list_index_get(intr_handle, queue_id); 5488 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 5489 5490 rte_ethdev_trace_rx_intr_ctl_q(port_id, queue_id, epfd, op, data, rc); 5491 5492 if (rc && rc != -EEXIST) { 5493 RTE_ETHDEV_LOG(ERR, 5494 "p %u q %u Rx ctl error op %d epfd %d vec %u\n", 5495 port_id, queue_id, op, epfd, vec); 5496 return rc; 5497 } 5498 5499 return 0; 5500 } 5501 5502 int 5503 rte_eth_dev_rx_intr_enable(uint16_t port_id, 5504 uint16_t queue_id) 5505 { 5506 struct rte_eth_dev *dev; 5507 int ret; 5508 5509 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5510 dev = &rte_eth_devices[port_id]; 5511 5512 ret = eth_dev_validate_rx_queue(dev, queue_id); 5513 if (ret != 0) 5514 return ret; 5515 5516 if (*dev->dev_ops->rx_queue_intr_enable == NULL) 5517 return -ENOTSUP; 5518 ret = eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id)); 5519 5520 rte_ethdev_trace_rx_intr_enable(port_id, queue_id, ret); 5521 5522 return ret; 5523 } 5524 5525 int 5526 rte_eth_dev_rx_intr_disable(uint16_t port_id, 5527 uint16_t queue_id) 5528 { 5529 struct rte_eth_dev *dev; 5530 int ret; 5531 5532 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5533 dev = &rte_eth_devices[port_id]; 5534 5535 ret = eth_dev_validate_rx_queue(dev, queue_id); 5536 if (ret != 0) 5537 return ret; 5538 5539 if (*dev->dev_ops->rx_queue_intr_disable == NULL) 5540 return -ENOTSUP; 5541 ret = eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id)); 5542 5543 rte_ethdev_trace_rx_intr_disable(port_id, queue_id, ret); 5544 5545 return ret; 5546 } 5547 5548 5549 const struct rte_eth_rxtx_callback * 5550 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, 5551 rte_rx_callback_fn fn, void *user_param) 5552 { 5553 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5554 rte_errno = ENOTSUP; 5555 return NULL; 5556 #endif 5557 struct rte_eth_dev *dev; 5558 5559 /* check input parameters */ 5560 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5561 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 5562 rte_errno = EINVAL; 5563 return NULL; 5564 } 5565 dev = &rte_eth_devices[port_id]; 5566 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 5567 rte_errno = EINVAL; 5568 return NULL; 5569 } 5570 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5571 5572 if (cb == NULL) { 5573 rte_errno = ENOMEM; 5574 return NULL; 5575 } 5576 5577 cb->fn.rx = fn; 5578 cb->param = user_param; 5579 5580 rte_spinlock_lock(ð_dev_rx_cb_lock); 5581 /* Add the callbacks in fifo order. */ 5582 struct rte_eth_rxtx_callback *tail = 5583 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 5584 5585 if (!tail) { 5586 /* Stores to cb->fn and cb->param should complete before 5587 * cb is visible to data plane. 5588 */ 5589 __atomic_store_n( 5590 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 5591 cb, __ATOMIC_RELEASE); 5592 5593 } else { 5594 while (tail->next) 5595 tail = tail->next; 5596 /* Stores to cb->fn and cb->param should complete before 5597 * cb is visible to data plane. 5598 */ 5599 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 5600 } 5601 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5602 5603 rte_eth_trace_add_rx_callback(port_id, queue_id, fn, user_param, cb); 5604 5605 return cb; 5606 } 5607 5608 const struct rte_eth_rxtx_callback * 5609 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, 5610 rte_rx_callback_fn fn, void *user_param) 5611 { 5612 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5613 rte_errno = ENOTSUP; 5614 return NULL; 5615 #endif 5616 /* check input parameters */ 5617 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5618 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 5619 rte_errno = EINVAL; 5620 return NULL; 5621 } 5622 5623 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5624 5625 if (cb == NULL) { 5626 rte_errno = ENOMEM; 5627 return NULL; 5628 } 5629 5630 cb->fn.rx = fn; 5631 cb->param = user_param; 5632 5633 rte_spinlock_lock(ð_dev_rx_cb_lock); 5634 /* Add the callbacks at first position */ 5635 cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 5636 /* Stores to cb->fn, cb->param and cb->next should complete before 5637 * cb is visible to data plane threads. 5638 */ 5639 __atomic_store_n( 5640 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 5641 cb, __ATOMIC_RELEASE); 5642 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5643 5644 rte_eth_trace_add_first_rx_callback(port_id, queue_id, fn, user_param, 5645 cb); 5646 5647 return cb; 5648 } 5649 5650 const struct rte_eth_rxtx_callback * 5651 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, 5652 rte_tx_callback_fn fn, void *user_param) 5653 { 5654 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5655 rte_errno = ENOTSUP; 5656 return NULL; 5657 #endif 5658 struct rte_eth_dev *dev; 5659 5660 /* check input parameters */ 5661 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5662 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) { 5663 rte_errno = EINVAL; 5664 return NULL; 5665 } 5666 5667 dev = &rte_eth_devices[port_id]; 5668 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 5669 rte_errno = EINVAL; 5670 return NULL; 5671 } 5672 5673 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5674 5675 if (cb == NULL) { 5676 rte_errno = ENOMEM; 5677 return NULL; 5678 } 5679 5680 cb->fn.tx = fn; 5681 cb->param = user_param; 5682 5683 rte_spinlock_lock(ð_dev_tx_cb_lock); 5684 /* Add the callbacks in fifo order. */ 5685 struct rte_eth_rxtx_callback *tail = 5686 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id]; 5687 5688 if (!tail) { 5689 /* Stores to cb->fn and cb->param should complete before 5690 * cb is visible to data plane. 5691 */ 5692 __atomic_store_n( 5693 &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id], 5694 cb, __ATOMIC_RELEASE); 5695 5696 } else { 5697 while (tail->next) 5698 tail = tail->next; 5699 /* Stores to cb->fn and cb->param should complete before 5700 * cb is visible to data plane. 5701 */ 5702 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 5703 } 5704 rte_spinlock_unlock(ð_dev_tx_cb_lock); 5705 5706 rte_eth_trace_add_tx_callback(port_id, queue_id, fn, user_param, cb); 5707 5708 return cb; 5709 } 5710 5711 int 5712 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, 5713 const struct rte_eth_rxtx_callback *user_cb) 5714 { 5715 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5716 return -ENOTSUP; 5717 #endif 5718 /* Check input parameters. */ 5719 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5720 if (user_cb == NULL || 5721 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) 5722 return -EINVAL; 5723 5724 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5725 struct rte_eth_rxtx_callback *cb; 5726 struct rte_eth_rxtx_callback **prev_cb; 5727 int ret = -EINVAL; 5728 5729 rte_spinlock_lock(ð_dev_rx_cb_lock); 5730 prev_cb = &dev->post_rx_burst_cbs[queue_id]; 5731 for (; *prev_cb != NULL; prev_cb = &cb->next) { 5732 cb = *prev_cb; 5733 if (cb == user_cb) { 5734 /* Remove the user cb from the callback list. */ 5735 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 5736 ret = 0; 5737 break; 5738 } 5739 } 5740 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5741 5742 rte_eth_trace_remove_rx_callback(port_id, queue_id, user_cb, ret); 5743 5744 return ret; 5745 } 5746 5747 int 5748 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, 5749 const struct rte_eth_rxtx_callback *user_cb) 5750 { 5751 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5752 return -ENOTSUP; 5753 #endif 5754 /* Check input parameters. */ 5755 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5756 if (user_cb == NULL || 5757 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) 5758 return -EINVAL; 5759 5760 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5761 int ret = -EINVAL; 5762 struct rte_eth_rxtx_callback *cb; 5763 struct rte_eth_rxtx_callback **prev_cb; 5764 5765 rte_spinlock_lock(ð_dev_tx_cb_lock); 5766 prev_cb = &dev->pre_tx_burst_cbs[queue_id]; 5767 for (; *prev_cb != NULL; prev_cb = &cb->next) { 5768 cb = *prev_cb; 5769 if (cb == user_cb) { 5770 /* Remove the user cb from the callback list. */ 5771 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 5772 ret = 0; 5773 break; 5774 } 5775 } 5776 rte_spinlock_unlock(ð_dev_tx_cb_lock); 5777 5778 rte_eth_trace_remove_tx_callback(port_id, queue_id, user_cb, ret); 5779 5780 return ret; 5781 } 5782 5783 int 5784 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5785 struct rte_eth_rxq_info *qinfo) 5786 { 5787 struct rte_eth_dev *dev; 5788 5789 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5790 dev = &rte_eth_devices[port_id]; 5791 5792 if (queue_id >= dev->data->nb_rx_queues) { 5793 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5794 return -EINVAL; 5795 } 5796 5797 if (qinfo == NULL) { 5798 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL\n", 5799 port_id, queue_id); 5800 return -EINVAL; 5801 } 5802 5803 if (dev->data->rx_queues == NULL || 5804 dev->data->rx_queues[queue_id] == NULL) { 5805 RTE_ETHDEV_LOG(ERR, 5806 "Rx queue %"PRIu16" of device with port_id=%" 5807 PRIu16" has not been setup\n", 5808 queue_id, port_id); 5809 return -EINVAL; 5810 } 5811 5812 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 5813 RTE_ETHDEV_LOG(INFO, 5814 "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5815 queue_id, port_id); 5816 return -EINVAL; 5817 } 5818 5819 if (*dev->dev_ops->rxq_info_get == NULL) 5820 return -ENOTSUP; 5821 5822 memset(qinfo, 0, sizeof(*qinfo)); 5823 dev->dev_ops->rxq_info_get(dev, queue_id, qinfo); 5824 qinfo->queue_state = dev->data->rx_queue_state[queue_id]; 5825 5826 rte_eth_trace_rx_queue_info_get(port_id, queue_id, qinfo); 5827 5828 return 0; 5829 } 5830 5831 int 5832 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5833 struct rte_eth_txq_info *qinfo) 5834 { 5835 struct rte_eth_dev *dev; 5836 5837 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5838 dev = &rte_eth_devices[port_id]; 5839 5840 if (queue_id >= dev->data->nb_tx_queues) { 5841 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 5842 return -EINVAL; 5843 } 5844 5845 if (qinfo == NULL) { 5846 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL\n", 5847 port_id, queue_id); 5848 return -EINVAL; 5849 } 5850 5851 if (dev->data->tx_queues == NULL || 5852 dev->data->tx_queues[queue_id] == NULL) { 5853 RTE_ETHDEV_LOG(ERR, 5854 "Tx queue %"PRIu16" of device with port_id=%" 5855 PRIu16" has not been setup\n", 5856 queue_id, port_id); 5857 return -EINVAL; 5858 } 5859 5860 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 5861 RTE_ETHDEV_LOG(INFO, 5862 "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5863 queue_id, port_id); 5864 return -EINVAL; 5865 } 5866 5867 if (*dev->dev_ops->txq_info_get == NULL) 5868 return -ENOTSUP; 5869 5870 memset(qinfo, 0, sizeof(*qinfo)); 5871 dev->dev_ops->txq_info_get(dev, queue_id, qinfo); 5872 qinfo->queue_state = dev->data->tx_queue_state[queue_id]; 5873 5874 rte_eth_trace_tx_queue_info_get(port_id, queue_id, qinfo); 5875 5876 return 0; 5877 } 5878 5879 int 5880 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5881 struct rte_eth_burst_mode *mode) 5882 { 5883 struct rte_eth_dev *dev; 5884 int ret; 5885 5886 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5887 dev = &rte_eth_devices[port_id]; 5888 5889 if (queue_id >= dev->data->nb_rx_queues) { 5890 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5891 return -EINVAL; 5892 } 5893 5894 if (mode == NULL) { 5895 RTE_ETHDEV_LOG(ERR, 5896 "Cannot get ethdev port %u Rx queue %u burst mode to NULL\n", 5897 port_id, queue_id); 5898 return -EINVAL; 5899 } 5900 5901 if (*dev->dev_ops->rx_burst_mode_get == NULL) 5902 return -ENOTSUP; 5903 memset(mode, 0, sizeof(*mode)); 5904 ret = eth_err(port_id, 5905 dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode)); 5906 5907 rte_eth_trace_rx_burst_mode_get(port_id, queue_id, mode, ret); 5908 5909 return ret; 5910 } 5911 5912 int 5913 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5914 struct rte_eth_burst_mode *mode) 5915 { 5916 struct rte_eth_dev *dev; 5917 int ret; 5918 5919 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5920 dev = &rte_eth_devices[port_id]; 5921 5922 if (queue_id >= dev->data->nb_tx_queues) { 5923 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 5924 return -EINVAL; 5925 } 5926 5927 if (mode == NULL) { 5928 RTE_ETHDEV_LOG(ERR, 5929 "Cannot get ethdev port %u Tx queue %u burst mode to NULL\n", 5930 port_id, queue_id); 5931 return -EINVAL; 5932 } 5933 5934 if (*dev->dev_ops->tx_burst_mode_get == NULL) 5935 return -ENOTSUP; 5936 memset(mode, 0, sizeof(*mode)); 5937 ret = eth_err(port_id, 5938 dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode)); 5939 5940 rte_eth_trace_tx_burst_mode_get(port_id, queue_id, mode, ret); 5941 5942 return ret; 5943 } 5944 5945 int 5946 rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id, 5947 struct rte_power_monitor_cond *pmc) 5948 { 5949 struct rte_eth_dev *dev; 5950 int ret; 5951 5952 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5953 dev = &rte_eth_devices[port_id]; 5954 5955 if (queue_id >= dev->data->nb_rx_queues) { 5956 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5957 return -EINVAL; 5958 } 5959 5960 if (pmc == NULL) { 5961 RTE_ETHDEV_LOG(ERR, 5962 "Cannot get ethdev port %u Rx queue %u power monitor condition to NULL\n", 5963 port_id, queue_id); 5964 return -EINVAL; 5965 } 5966 5967 if (*dev->dev_ops->get_monitor_addr == NULL) 5968 return -ENOTSUP; 5969 ret = eth_err(port_id, 5970 dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc)); 5971 5972 rte_eth_trace_get_monitor_addr(port_id, queue_id, pmc, ret); 5973 5974 return ret; 5975 } 5976 5977 int 5978 rte_eth_dev_set_mc_addr_list(uint16_t port_id, 5979 struct rte_ether_addr *mc_addr_set, 5980 uint32_t nb_mc_addr) 5981 { 5982 struct rte_eth_dev *dev; 5983 int ret; 5984 5985 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5986 dev = &rte_eth_devices[port_id]; 5987 5988 if (*dev->dev_ops->set_mc_addr_list == NULL) 5989 return -ENOTSUP; 5990 ret = eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev, 5991 mc_addr_set, nb_mc_addr)); 5992 5993 rte_ethdev_trace_set_mc_addr_list(port_id, mc_addr_set, nb_mc_addr, 5994 ret); 5995 5996 return ret; 5997 } 5998 5999 int 6000 rte_eth_timesync_enable(uint16_t port_id) 6001 { 6002 struct rte_eth_dev *dev; 6003 int ret; 6004 6005 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6006 dev = &rte_eth_devices[port_id]; 6007 6008 if (*dev->dev_ops->timesync_enable == NULL) 6009 return -ENOTSUP; 6010 ret = eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev)); 6011 6012 rte_eth_trace_timesync_enable(port_id, ret); 6013 6014 return ret; 6015 } 6016 6017 int 6018 rte_eth_timesync_disable(uint16_t port_id) 6019 { 6020 struct rte_eth_dev *dev; 6021 int ret; 6022 6023 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6024 dev = &rte_eth_devices[port_id]; 6025 6026 if (*dev->dev_ops->timesync_disable == NULL) 6027 return -ENOTSUP; 6028 ret = eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev)); 6029 6030 rte_eth_trace_timesync_disable(port_id, ret); 6031 6032 return ret; 6033 } 6034 6035 int 6036 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, 6037 uint32_t flags) 6038 { 6039 struct rte_eth_dev *dev; 6040 int ret; 6041 6042 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6043 dev = &rte_eth_devices[port_id]; 6044 6045 if (timestamp == NULL) { 6046 RTE_ETHDEV_LOG(ERR, 6047 "Cannot read ethdev port %u Rx timestamp to NULL\n", 6048 port_id); 6049 return -EINVAL; 6050 } 6051 6052 if (*dev->dev_ops->timesync_read_rx_timestamp == NULL) 6053 return -ENOTSUP; 6054 6055 ret = eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp) 6056 (dev, timestamp, flags)); 6057 6058 rte_eth_trace_timesync_read_rx_timestamp(port_id, timestamp, flags, 6059 ret); 6060 6061 return ret; 6062 } 6063 6064 int 6065 rte_eth_timesync_read_tx_timestamp(uint16_t port_id, 6066 struct timespec *timestamp) 6067 { 6068 struct rte_eth_dev *dev; 6069 int ret; 6070 6071 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6072 dev = &rte_eth_devices[port_id]; 6073 6074 if (timestamp == NULL) { 6075 RTE_ETHDEV_LOG(ERR, 6076 "Cannot read ethdev port %u Tx timestamp to NULL\n", 6077 port_id); 6078 return -EINVAL; 6079 } 6080 6081 if (*dev->dev_ops->timesync_read_tx_timestamp == NULL) 6082 return -ENOTSUP; 6083 6084 ret = eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp) 6085 (dev, timestamp)); 6086 6087 rte_eth_trace_timesync_read_tx_timestamp(port_id, timestamp, ret); 6088 6089 return ret; 6090 6091 } 6092 6093 int 6094 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta) 6095 { 6096 struct rte_eth_dev *dev; 6097 int ret; 6098 6099 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6100 dev = &rte_eth_devices[port_id]; 6101 6102 if (*dev->dev_ops->timesync_adjust_time == NULL) 6103 return -ENOTSUP; 6104 ret = eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta)); 6105 6106 rte_eth_trace_timesync_adjust_time(port_id, delta, ret); 6107 6108 return ret; 6109 } 6110 6111 int 6112 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp) 6113 { 6114 struct rte_eth_dev *dev; 6115 int ret; 6116 6117 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6118 dev = &rte_eth_devices[port_id]; 6119 6120 if (timestamp == NULL) { 6121 RTE_ETHDEV_LOG(ERR, 6122 "Cannot read ethdev port %u timesync time to NULL\n", 6123 port_id); 6124 return -EINVAL; 6125 } 6126 6127 if (*dev->dev_ops->timesync_read_time == NULL) 6128 return -ENOTSUP; 6129 ret = eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev, 6130 timestamp)); 6131 6132 rte_eth_trace_timesync_read_time(port_id, timestamp, ret); 6133 6134 return ret; 6135 } 6136 6137 int 6138 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp) 6139 { 6140 struct rte_eth_dev *dev; 6141 int ret; 6142 6143 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6144 dev = &rte_eth_devices[port_id]; 6145 6146 if (timestamp == NULL) { 6147 RTE_ETHDEV_LOG(ERR, 6148 "Cannot write ethdev port %u timesync from NULL time\n", 6149 port_id); 6150 return -EINVAL; 6151 } 6152 6153 if (*dev->dev_ops->timesync_write_time == NULL) 6154 return -ENOTSUP; 6155 ret = eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev, 6156 timestamp)); 6157 6158 rte_eth_trace_timesync_write_time(port_id, timestamp, ret); 6159 6160 return ret; 6161 } 6162 6163 int 6164 rte_eth_read_clock(uint16_t port_id, uint64_t *clock) 6165 { 6166 struct rte_eth_dev *dev; 6167 int ret; 6168 6169 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6170 dev = &rte_eth_devices[port_id]; 6171 6172 if (clock == NULL) { 6173 RTE_ETHDEV_LOG(ERR, "Cannot read ethdev port %u clock to NULL\n", 6174 port_id); 6175 return -EINVAL; 6176 } 6177 6178 if (*dev->dev_ops->read_clock == NULL) 6179 return -ENOTSUP; 6180 ret = eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock)); 6181 6182 rte_eth_trace_read_clock(port_id, clock, ret); 6183 6184 return ret; 6185 } 6186 6187 int 6188 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info) 6189 { 6190 struct rte_eth_dev *dev; 6191 int ret; 6192 6193 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6194 dev = &rte_eth_devices[port_id]; 6195 6196 if (info == NULL) { 6197 RTE_ETHDEV_LOG(ERR, 6198 "Cannot get ethdev port %u register info to NULL\n", 6199 port_id); 6200 return -EINVAL; 6201 } 6202 6203 if (*dev->dev_ops->get_reg == NULL) 6204 return -ENOTSUP; 6205 ret = eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info)); 6206 6207 rte_ethdev_trace_get_reg_info(port_id, info, ret); 6208 6209 return ret; 6210 } 6211 6212 int 6213 rte_eth_dev_get_eeprom_length(uint16_t port_id) 6214 { 6215 struct rte_eth_dev *dev; 6216 int ret; 6217 6218 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6219 dev = &rte_eth_devices[port_id]; 6220 6221 if (*dev->dev_ops->get_eeprom_length == NULL) 6222 return -ENOTSUP; 6223 ret = eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev)); 6224 6225 rte_ethdev_trace_get_eeprom_length(port_id, ret); 6226 6227 return ret; 6228 } 6229 6230 int 6231 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 6232 { 6233 struct rte_eth_dev *dev; 6234 int ret; 6235 6236 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6237 dev = &rte_eth_devices[port_id]; 6238 6239 if (info == NULL) { 6240 RTE_ETHDEV_LOG(ERR, 6241 "Cannot get ethdev port %u EEPROM info to NULL\n", 6242 port_id); 6243 return -EINVAL; 6244 } 6245 6246 if (*dev->dev_ops->get_eeprom == NULL) 6247 return -ENOTSUP; 6248 ret = eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info)); 6249 6250 rte_ethdev_trace_get_eeprom(port_id, info, ret); 6251 6252 return ret; 6253 } 6254 6255 int 6256 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 6257 { 6258 struct rte_eth_dev *dev; 6259 int ret; 6260 6261 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6262 dev = &rte_eth_devices[port_id]; 6263 6264 if (info == NULL) { 6265 RTE_ETHDEV_LOG(ERR, 6266 "Cannot set ethdev port %u EEPROM from NULL info\n", 6267 port_id); 6268 return -EINVAL; 6269 } 6270 6271 if (*dev->dev_ops->set_eeprom == NULL) 6272 return -ENOTSUP; 6273 ret = eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info)); 6274 6275 rte_ethdev_trace_set_eeprom(port_id, info, ret); 6276 6277 return ret; 6278 } 6279 6280 int 6281 rte_eth_dev_get_module_info(uint16_t port_id, 6282 struct rte_eth_dev_module_info *modinfo) 6283 { 6284 struct rte_eth_dev *dev; 6285 int ret; 6286 6287 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6288 dev = &rte_eth_devices[port_id]; 6289 6290 if (modinfo == NULL) { 6291 RTE_ETHDEV_LOG(ERR, 6292 "Cannot get ethdev port %u EEPROM module info to NULL\n", 6293 port_id); 6294 return -EINVAL; 6295 } 6296 6297 if (*dev->dev_ops->get_module_info == NULL) 6298 return -ENOTSUP; 6299 ret = (*dev->dev_ops->get_module_info)(dev, modinfo); 6300 6301 rte_ethdev_trace_get_module_info(port_id, modinfo, ret); 6302 6303 return ret; 6304 } 6305 6306 int 6307 rte_eth_dev_get_module_eeprom(uint16_t port_id, 6308 struct rte_dev_eeprom_info *info) 6309 { 6310 struct rte_eth_dev *dev; 6311 int ret; 6312 6313 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6314 dev = &rte_eth_devices[port_id]; 6315 6316 if (info == NULL) { 6317 RTE_ETHDEV_LOG(ERR, 6318 "Cannot get ethdev port %u module EEPROM info to NULL\n", 6319 port_id); 6320 return -EINVAL; 6321 } 6322 6323 if (info->data == NULL) { 6324 RTE_ETHDEV_LOG(ERR, 6325 "Cannot get ethdev port %u module EEPROM data to NULL\n", 6326 port_id); 6327 return -EINVAL; 6328 } 6329 6330 if (info->length == 0) { 6331 RTE_ETHDEV_LOG(ERR, 6332 "Cannot get ethdev port %u module EEPROM to data with zero size\n", 6333 port_id); 6334 return -EINVAL; 6335 } 6336 6337 if (*dev->dev_ops->get_module_eeprom == NULL) 6338 return -ENOTSUP; 6339 ret = (*dev->dev_ops->get_module_eeprom)(dev, info); 6340 6341 rte_ethdev_trace_get_module_eeprom(port_id, info, ret); 6342 6343 return ret; 6344 } 6345 6346 int 6347 rte_eth_dev_get_dcb_info(uint16_t port_id, 6348 struct rte_eth_dcb_info *dcb_info) 6349 { 6350 struct rte_eth_dev *dev; 6351 int ret; 6352 6353 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6354 dev = &rte_eth_devices[port_id]; 6355 6356 if (dcb_info == NULL) { 6357 RTE_ETHDEV_LOG(ERR, 6358 "Cannot get ethdev port %u DCB info to NULL\n", 6359 port_id); 6360 return -EINVAL; 6361 } 6362 6363 memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info)); 6364 6365 if (*dev->dev_ops->get_dcb_info == NULL) 6366 return -ENOTSUP; 6367 ret = eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info)); 6368 6369 rte_ethdev_trace_get_dcb_info(port_id, dcb_info, ret); 6370 6371 return ret; 6372 } 6373 6374 static void 6375 eth_dev_adjust_nb_desc(uint16_t *nb_desc, 6376 const struct rte_eth_desc_lim *desc_lim) 6377 { 6378 if (desc_lim->nb_align != 0) 6379 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align); 6380 6381 if (desc_lim->nb_max != 0) 6382 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max); 6383 6384 *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min); 6385 } 6386 6387 int 6388 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, 6389 uint16_t *nb_rx_desc, 6390 uint16_t *nb_tx_desc) 6391 { 6392 struct rte_eth_dev_info dev_info; 6393 int ret; 6394 6395 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6396 6397 ret = rte_eth_dev_info_get(port_id, &dev_info); 6398 if (ret != 0) 6399 return ret; 6400 6401 if (nb_rx_desc != NULL) 6402 eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim); 6403 6404 if (nb_tx_desc != NULL) 6405 eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim); 6406 6407 rte_ethdev_trace_adjust_nb_rx_tx_desc(port_id); 6408 6409 return 0; 6410 } 6411 6412 int 6413 rte_eth_dev_hairpin_capability_get(uint16_t port_id, 6414 struct rte_eth_hairpin_cap *cap) 6415 { 6416 struct rte_eth_dev *dev; 6417 int ret; 6418 6419 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6420 dev = &rte_eth_devices[port_id]; 6421 6422 if (cap == NULL) { 6423 RTE_ETHDEV_LOG(ERR, 6424 "Cannot get ethdev port %u hairpin capability to NULL\n", 6425 port_id); 6426 return -EINVAL; 6427 } 6428 6429 if (*dev->dev_ops->hairpin_cap_get == NULL) 6430 return -ENOTSUP; 6431 memset(cap, 0, sizeof(*cap)); 6432 ret = eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap)); 6433 6434 rte_ethdev_trace_hairpin_capability_get(port_id, cap, ret); 6435 6436 return ret; 6437 } 6438 6439 int 6440 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool) 6441 { 6442 struct rte_eth_dev *dev; 6443 int ret; 6444 6445 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6446 dev = &rte_eth_devices[port_id]; 6447 6448 if (pool == NULL) { 6449 RTE_ETHDEV_LOG(ERR, 6450 "Cannot test ethdev port %u mempool operation from NULL pool\n", 6451 port_id); 6452 return -EINVAL; 6453 } 6454 6455 if (*dev->dev_ops->pool_ops_supported == NULL) 6456 return 1; /* all pools are supported */ 6457 6458 ret = (*dev->dev_ops->pool_ops_supported)(dev, pool); 6459 6460 rte_ethdev_trace_pool_ops_supported(port_id, pool, ret); 6461 6462 return ret; 6463 } 6464 6465 int 6466 rte_eth_representor_info_get(uint16_t port_id, 6467 struct rte_eth_representor_info *info) 6468 { 6469 struct rte_eth_dev *dev; 6470 int ret; 6471 6472 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6473 dev = &rte_eth_devices[port_id]; 6474 6475 if (*dev->dev_ops->representor_info_get == NULL) 6476 return -ENOTSUP; 6477 ret = eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info)); 6478 6479 rte_eth_trace_representor_info_get(port_id, info, ret); 6480 6481 return ret; 6482 } 6483 6484 int 6485 rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features) 6486 { 6487 struct rte_eth_dev *dev; 6488 int ret; 6489 6490 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6491 dev = &rte_eth_devices[port_id]; 6492 6493 if (dev->data->dev_configured != 0) { 6494 RTE_ETHDEV_LOG(ERR, 6495 "The port (ID=%"PRIu16") is already configured\n", 6496 port_id); 6497 return -EBUSY; 6498 } 6499 6500 if (features == NULL) { 6501 RTE_ETHDEV_LOG(ERR, "Invalid features (NULL)\n"); 6502 return -EINVAL; 6503 } 6504 6505 if ((*features & RTE_ETH_RX_METADATA_TUNNEL_ID) != 0 && 6506 rte_flow_restore_info_dynflag_register() < 0) 6507 *features &= ~RTE_ETH_RX_METADATA_TUNNEL_ID; 6508 6509 if (*dev->dev_ops->rx_metadata_negotiate == NULL) 6510 return -ENOTSUP; 6511 ret = eth_err(port_id, 6512 (*dev->dev_ops->rx_metadata_negotiate)(dev, features)); 6513 6514 rte_eth_trace_rx_metadata_negotiate(port_id, *features, ret); 6515 6516 return ret; 6517 } 6518 6519 int 6520 rte_eth_ip_reassembly_capability_get(uint16_t port_id, 6521 struct rte_eth_ip_reassembly_params *reassembly_capa) 6522 { 6523 struct rte_eth_dev *dev; 6524 int ret; 6525 6526 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6527 dev = &rte_eth_devices[port_id]; 6528 6529 if (dev->data->dev_configured == 0) { 6530 RTE_ETHDEV_LOG(ERR, 6531 "Device with port_id=%u is not configured.\n" 6532 "Cannot get IP reassembly capability\n", 6533 port_id); 6534 return -EINVAL; 6535 } 6536 6537 if (reassembly_capa == NULL) { 6538 RTE_ETHDEV_LOG(ERR, "Cannot get reassembly capability to NULL"); 6539 return -EINVAL; 6540 } 6541 6542 if (*dev->dev_ops->ip_reassembly_capability_get == NULL) 6543 return -ENOTSUP; 6544 memset(reassembly_capa, 0, sizeof(struct rte_eth_ip_reassembly_params)); 6545 6546 ret = eth_err(port_id, (*dev->dev_ops->ip_reassembly_capability_get) 6547 (dev, reassembly_capa)); 6548 6549 rte_eth_trace_ip_reassembly_capability_get(port_id, reassembly_capa, 6550 ret); 6551 6552 return ret; 6553 } 6554 6555 int 6556 rte_eth_ip_reassembly_conf_get(uint16_t port_id, 6557 struct rte_eth_ip_reassembly_params *conf) 6558 { 6559 struct rte_eth_dev *dev; 6560 int ret; 6561 6562 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6563 dev = &rte_eth_devices[port_id]; 6564 6565 if (dev->data->dev_configured == 0) { 6566 RTE_ETHDEV_LOG(ERR, 6567 "Device with port_id=%u is not configured.\n" 6568 "Cannot get IP reassembly configuration\n", 6569 port_id); 6570 return -EINVAL; 6571 } 6572 6573 if (conf == NULL) { 6574 RTE_ETHDEV_LOG(ERR, "Cannot get reassembly info to NULL"); 6575 return -EINVAL; 6576 } 6577 6578 if (*dev->dev_ops->ip_reassembly_conf_get == NULL) 6579 return -ENOTSUP; 6580 memset(conf, 0, sizeof(struct rte_eth_ip_reassembly_params)); 6581 ret = eth_err(port_id, 6582 (*dev->dev_ops->ip_reassembly_conf_get)(dev, conf)); 6583 6584 rte_eth_trace_ip_reassembly_conf_get(port_id, conf, ret); 6585 6586 return ret; 6587 } 6588 6589 int 6590 rte_eth_ip_reassembly_conf_set(uint16_t port_id, 6591 const struct rte_eth_ip_reassembly_params *conf) 6592 { 6593 struct rte_eth_dev *dev; 6594 int ret; 6595 6596 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6597 dev = &rte_eth_devices[port_id]; 6598 6599 if (dev->data->dev_configured == 0) { 6600 RTE_ETHDEV_LOG(ERR, 6601 "Device with port_id=%u is not configured.\n" 6602 "Cannot set IP reassembly configuration", 6603 port_id); 6604 return -EINVAL; 6605 } 6606 6607 if (dev->data->dev_started != 0) { 6608 RTE_ETHDEV_LOG(ERR, 6609 "Device with port_id=%u started,\n" 6610 "cannot configure IP reassembly params.\n", 6611 port_id); 6612 return -EINVAL; 6613 } 6614 6615 if (conf == NULL) { 6616 RTE_ETHDEV_LOG(ERR, 6617 "Invalid IP reassembly configuration (NULL)\n"); 6618 return -EINVAL; 6619 } 6620 6621 if (*dev->dev_ops->ip_reassembly_conf_set == NULL) 6622 return -ENOTSUP; 6623 ret = eth_err(port_id, 6624 (*dev->dev_ops->ip_reassembly_conf_set)(dev, conf)); 6625 6626 rte_eth_trace_ip_reassembly_conf_set(port_id, conf, ret); 6627 6628 return ret; 6629 } 6630 6631 int 6632 rte_eth_dev_priv_dump(uint16_t port_id, FILE *file) 6633 { 6634 struct rte_eth_dev *dev; 6635 6636 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6637 dev = &rte_eth_devices[port_id]; 6638 6639 if (file == NULL) { 6640 RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n"); 6641 return -EINVAL; 6642 } 6643 6644 if (*dev->dev_ops->eth_dev_priv_dump == NULL) 6645 return -ENOTSUP; 6646 return eth_err(port_id, (*dev->dev_ops->eth_dev_priv_dump)(dev, file)); 6647 } 6648 6649 int 6650 rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id, 6651 uint16_t offset, uint16_t num, FILE *file) 6652 { 6653 struct rte_eth_dev *dev; 6654 6655 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6656 dev = &rte_eth_devices[port_id]; 6657 6658 if (queue_id >= dev->data->nb_rx_queues) { 6659 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 6660 return -EINVAL; 6661 } 6662 6663 if (file == NULL) { 6664 RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n"); 6665 return -EINVAL; 6666 } 6667 6668 if (*dev->dev_ops->eth_rx_descriptor_dump == NULL) 6669 return -ENOTSUP; 6670 6671 return eth_err(port_id, (*dev->dev_ops->eth_rx_descriptor_dump)(dev, 6672 queue_id, offset, num, file)); 6673 } 6674 6675 int 6676 rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id, 6677 uint16_t offset, uint16_t num, FILE *file) 6678 { 6679 struct rte_eth_dev *dev; 6680 6681 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6682 dev = &rte_eth_devices[port_id]; 6683 6684 if (queue_id >= dev->data->nb_tx_queues) { 6685 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 6686 return -EINVAL; 6687 } 6688 6689 if (file == NULL) { 6690 RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n"); 6691 return -EINVAL; 6692 } 6693 6694 if (*dev->dev_ops->eth_tx_descriptor_dump == NULL) 6695 return -ENOTSUP; 6696 6697 return eth_err(port_id, (*dev->dev_ops->eth_tx_descriptor_dump)(dev, 6698 queue_id, offset, num, file)); 6699 } 6700 6701 int 6702 rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num) 6703 { 6704 int i, j; 6705 struct rte_eth_dev *dev; 6706 const uint32_t *all_types; 6707 6708 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6709 dev = &rte_eth_devices[port_id]; 6710 6711 if (ptypes == NULL && num > 0) { 6712 RTE_ETHDEV_LOG(ERR, 6713 "Cannot get ethdev port %u supported header protocol types to NULL when array size is non zero\n", 6714 port_id); 6715 return -EINVAL; 6716 } 6717 6718 if (*dev->dev_ops->buffer_split_supported_hdr_ptypes_get == NULL) 6719 return -ENOTSUP; 6720 all_types = (*dev->dev_ops->buffer_split_supported_hdr_ptypes_get)(dev); 6721 6722 if (all_types == NULL) 6723 return 0; 6724 6725 for (i = 0, j = 0; all_types[i] != RTE_PTYPE_UNKNOWN; ++i) { 6726 if (j < num) { 6727 ptypes[j] = all_types[i]; 6728 6729 rte_eth_trace_buffer_split_get_supported_hdr_ptypes( 6730 port_id, j, ptypes[j]); 6731 } 6732 j++; 6733 } 6734 6735 return j; 6736 } 6737 6738 int rte_eth_dev_count_aggr_ports(uint16_t port_id) 6739 { 6740 struct rte_eth_dev *dev; 6741 int ret; 6742 6743 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6744 dev = &rte_eth_devices[port_id]; 6745 6746 if (*dev->dev_ops->count_aggr_ports == NULL) 6747 return 0; 6748 ret = eth_err(port_id, (*dev->dev_ops->count_aggr_ports)(dev)); 6749 6750 rte_eth_trace_count_aggr_ports(port_id, ret); 6751 6752 return ret; 6753 } 6754 6755 int rte_eth_dev_map_aggr_tx_affinity(uint16_t port_id, uint16_t tx_queue_id, 6756 uint8_t affinity) 6757 { 6758 struct rte_eth_dev *dev; 6759 int aggr_ports; 6760 int ret; 6761 6762 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6763 dev = &rte_eth_devices[port_id]; 6764 6765 if (tx_queue_id >= dev->data->nb_tx_queues) { 6766 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id); 6767 return -EINVAL; 6768 } 6769 6770 if (*dev->dev_ops->map_aggr_tx_affinity == NULL) 6771 return -ENOTSUP; 6772 6773 if (dev->data->dev_configured == 0) { 6774 RTE_ETHDEV_LOG(ERR, 6775 "Port %u must be configured before Tx affinity mapping\n", 6776 port_id); 6777 return -EINVAL; 6778 } 6779 6780 if (dev->data->dev_started) { 6781 RTE_ETHDEV_LOG(ERR, 6782 "Port %u must be stopped to allow configuration\n", 6783 port_id); 6784 return -EBUSY; 6785 } 6786 6787 aggr_ports = rte_eth_dev_count_aggr_ports(port_id); 6788 if (aggr_ports == 0) { 6789 RTE_ETHDEV_LOG(ERR, 6790 "Port %u has no aggregated port\n", 6791 port_id); 6792 return -ENOTSUP; 6793 } 6794 6795 if (affinity > aggr_ports) { 6796 RTE_ETHDEV_LOG(ERR, 6797 "Port %u map invalid affinity %u exceeds the maximum number %u\n", 6798 port_id, affinity, aggr_ports); 6799 return -EINVAL; 6800 } 6801 6802 ret = eth_err(port_id, (*dev->dev_ops->map_aggr_tx_affinity)(dev, 6803 tx_queue_id, affinity)); 6804 6805 rte_eth_trace_map_aggr_tx_affinity(port_id, tx_queue_id, affinity, ret); 6806 6807 return ret; 6808 } 6809 6810 RTE_LOG_REGISTER_DEFAULT(rte_eth_dev_logtype, INFO); 6811