1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <errno.h> 6 #include <inttypes.h> 7 #include <stdbool.h> 8 #include <stdint.h> 9 #include <stdio.h> 10 #include <stdlib.h> 11 #include <string.h> 12 #include <sys/queue.h> 13 14 #include <bus_driver.h> 15 #include <rte_log.h> 16 #include <rte_interrupts.h> 17 #include <rte_kvargs.h> 18 #include <rte_memcpy.h> 19 #include <rte_common.h> 20 #include <rte_mempool.h> 21 #include <rte_malloc.h> 22 #include <rte_mbuf.h> 23 #include <rte_errno.h> 24 #include <rte_spinlock.h> 25 #include <rte_string_fns.h> 26 #include <rte_class.h> 27 #include <rte_ether.h> 28 #include <rte_telemetry.h> 29 30 #include "rte_ethdev.h" 31 #include "rte_ethdev_trace_fp.h" 32 #include "ethdev_driver.h" 33 #include "rte_flow_driver.h" 34 #include "ethdev_profile.h" 35 #include "ethdev_private.h" 36 #include "ethdev_trace.h" 37 #include "sff_telemetry.h" 38 39 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS]; 40 41 /* public fast-path API */ 42 struct rte_eth_fp_ops rte_eth_fp_ops[RTE_MAX_ETHPORTS]; 43 44 /* spinlock for add/remove Rx callbacks */ 45 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER; 46 47 /* spinlock for add/remove Tx callbacks */ 48 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER; 49 50 /* store statistics names and its offset in stats structure */ 51 struct rte_eth_xstats_name_off { 52 char name[RTE_ETH_XSTATS_NAME_SIZE]; 53 unsigned offset; 54 }; 55 56 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = { 57 {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)}, 58 {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)}, 59 {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)}, 60 {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)}, 61 {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)}, 62 {"rx_errors", offsetof(struct rte_eth_stats, ierrors)}, 63 {"tx_errors", offsetof(struct rte_eth_stats, oerrors)}, 64 {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats, 65 rx_nombuf)}, 66 }; 67 68 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings) 69 70 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = { 71 {"packets", offsetof(struct rte_eth_stats, q_ipackets)}, 72 {"bytes", offsetof(struct rte_eth_stats, q_ibytes)}, 73 {"errors", offsetof(struct rte_eth_stats, q_errors)}, 74 }; 75 76 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings) 77 78 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = { 79 {"packets", offsetof(struct rte_eth_stats, q_opackets)}, 80 {"bytes", offsetof(struct rte_eth_stats, q_obytes)}, 81 }; 82 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings) 83 84 #define RTE_RX_OFFLOAD_BIT2STR(_name) \ 85 { RTE_ETH_RX_OFFLOAD_##_name, #_name } 86 87 static const struct { 88 uint64_t offload; 89 const char *name; 90 } eth_dev_rx_offload_names[] = { 91 RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP), 92 RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM), 93 RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM), 94 RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM), 95 RTE_RX_OFFLOAD_BIT2STR(TCP_LRO), 96 RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP), 97 RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 98 RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP), 99 RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER), 100 RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND), 101 RTE_RX_OFFLOAD_BIT2STR(SCATTER), 102 RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP), 103 RTE_RX_OFFLOAD_BIT2STR(SECURITY), 104 RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC), 105 RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM), 106 RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 107 RTE_RX_OFFLOAD_BIT2STR(RSS_HASH), 108 RTE_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT), 109 }; 110 111 #undef RTE_RX_OFFLOAD_BIT2STR 112 #undef RTE_ETH_RX_OFFLOAD_BIT2STR 113 114 #define RTE_TX_OFFLOAD_BIT2STR(_name) \ 115 { RTE_ETH_TX_OFFLOAD_##_name, #_name } 116 117 static const struct { 118 uint64_t offload; 119 const char *name; 120 } eth_dev_tx_offload_names[] = { 121 RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT), 122 RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM), 123 RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM), 124 RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM), 125 RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM), 126 RTE_TX_OFFLOAD_BIT2STR(TCP_TSO), 127 RTE_TX_OFFLOAD_BIT2STR(UDP_TSO), 128 RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 129 RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT), 130 RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO), 131 RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO), 132 RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO), 133 RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO), 134 RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT), 135 RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE), 136 RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS), 137 RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE), 138 RTE_TX_OFFLOAD_BIT2STR(SECURITY), 139 RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO), 140 RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO), 141 RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 142 RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP), 143 }; 144 145 #undef RTE_TX_OFFLOAD_BIT2STR 146 147 static const struct { 148 uint64_t offload; 149 const char *name; 150 } rte_eth_dev_capa_names[] = { 151 {RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP, "RUNTIME_RX_QUEUE_SETUP"}, 152 {RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP, "RUNTIME_TX_QUEUE_SETUP"}, 153 {RTE_ETH_DEV_CAPA_RXQ_SHARE, "RXQ_SHARE"}, 154 {RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP, "FLOW_RULE_KEEP"}, 155 {RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP, "FLOW_SHARED_OBJECT_KEEP"}, 156 }; 157 158 enum { 159 STAT_QMAP_TX = 0, 160 STAT_QMAP_RX 161 }; 162 163 int 164 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str) 165 { 166 int ret; 167 struct rte_devargs devargs; 168 const char *bus_param_key; 169 char *bus_str = NULL; 170 char *cls_str = NULL; 171 int str_size; 172 173 if (iter == NULL) { 174 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL iterator\n"); 175 return -EINVAL; 176 } 177 178 if (devargs_str == NULL) { 179 RTE_ETHDEV_LOG(ERR, 180 "Cannot initialize iterator from NULL device description string\n"); 181 return -EINVAL; 182 } 183 184 memset(iter, 0, sizeof(*iter)); 185 memset(&devargs, 0, sizeof(devargs)); 186 187 /* 188 * The devargs string may use various syntaxes: 189 * - 0000:08:00.0,representor=[1-3] 190 * - pci:0000:06:00.0,representor=[0,5] 191 * - class=eth,mac=00:11:22:33:44:55 192 * - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z 193 */ 194 195 /* 196 * Handle pure class filter (i.e. without any bus-level argument), 197 * from future new syntax. 198 * rte_devargs_parse() is not yet supporting the new syntax, 199 * that's why this simple case is temporarily parsed here. 200 */ 201 #define iter_anybus_str "class=eth," 202 if (strncmp(devargs_str, iter_anybus_str, 203 strlen(iter_anybus_str)) == 0) { 204 iter->cls_str = devargs_str + strlen(iter_anybus_str); 205 goto end; 206 } 207 208 /* Split bus, device and parameters. */ 209 ret = rte_devargs_parse(&devargs, devargs_str); 210 if (ret != 0) 211 goto error; 212 213 /* 214 * Assume parameters of old syntax can match only at ethdev level. 215 * Extra parameters will be ignored, thanks to "+" prefix. 216 */ 217 str_size = strlen(devargs.args) + 2; 218 cls_str = malloc(str_size); 219 if (cls_str == NULL) { 220 ret = -ENOMEM; 221 goto error; 222 } 223 ret = snprintf(cls_str, str_size, "+%s", devargs.args); 224 if (ret != str_size - 1) { 225 ret = -EINVAL; 226 goto error; 227 } 228 iter->cls_str = cls_str; 229 230 iter->bus = devargs.bus; 231 if (iter->bus->dev_iterate == NULL) { 232 ret = -ENOTSUP; 233 goto error; 234 } 235 236 /* Convert bus args to new syntax for use with new API dev_iterate. */ 237 if ((strcmp(iter->bus->name, "vdev") == 0) || 238 (strcmp(iter->bus->name, "fslmc") == 0) || 239 (strcmp(iter->bus->name, "dpaa_bus") == 0)) { 240 bus_param_key = "name"; 241 } else if (strcmp(iter->bus->name, "pci") == 0) { 242 bus_param_key = "addr"; 243 } else { 244 ret = -ENOTSUP; 245 goto error; 246 } 247 str_size = strlen(bus_param_key) + strlen(devargs.name) + 2; 248 bus_str = malloc(str_size); 249 if (bus_str == NULL) { 250 ret = -ENOMEM; 251 goto error; 252 } 253 ret = snprintf(bus_str, str_size, "%s=%s", 254 bus_param_key, devargs.name); 255 if (ret != str_size - 1) { 256 ret = -EINVAL; 257 goto error; 258 } 259 iter->bus_str = bus_str; 260 261 end: 262 iter->cls = rte_class_find_by_name("eth"); 263 rte_devargs_reset(&devargs); 264 265 rte_eth_trace_iterator_init(devargs_str); 266 267 return 0; 268 269 error: 270 if (ret == -ENOTSUP) 271 RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n", 272 iter->bus->name); 273 rte_devargs_reset(&devargs); 274 free(bus_str); 275 free(cls_str); 276 return ret; 277 } 278 279 uint16_t 280 rte_eth_iterator_next(struct rte_dev_iterator *iter) 281 { 282 if (iter == NULL) { 283 RTE_ETHDEV_LOG(ERR, 284 "Cannot get next device from NULL iterator\n"); 285 return RTE_MAX_ETHPORTS; 286 } 287 288 if (iter->cls == NULL) /* invalid ethdev iterator */ 289 return RTE_MAX_ETHPORTS; 290 291 do { /* loop to try all matching rte_device */ 292 /* If not pure ethdev filter and */ 293 if (iter->bus != NULL && 294 /* not in middle of rte_eth_dev iteration, */ 295 iter->class_device == NULL) { 296 /* get next rte_device to try. */ 297 iter->device = iter->bus->dev_iterate( 298 iter->device, iter->bus_str, iter); 299 if (iter->device == NULL) 300 break; /* no more rte_device candidate */ 301 } 302 /* A device is matching bus part, need to check ethdev part. */ 303 iter->class_device = iter->cls->dev_iterate( 304 iter->class_device, iter->cls_str, iter); 305 if (iter->class_device != NULL) { 306 uint16_t id = eth_dev_to_id(iter->class_device); 307 308 rte_eth_trace_iterator_next(iter, id); 309 310 return id; /* match */ 311 } 312 } while (iter->bus != NULL); /* need to try next rte_device */ 313 314 /* No more ethdev port to iterate. */ 315 rte_eth_iterator_cleanup(iter); 316 return RTE_MAX_ETHPORTS; 317 } 318 319 void 320 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter) 321 { 322 if (iter == NULL) { 323 RTE_ETHDEV_LOG(ERR, "Cannot do clean up from NULL iterator\n"); 324 return; 325 } 326 327 if (iter->bus_str == NULL) 328 return; /* nothing to free in pure class filter */ 329 330 rte_eth_trace_iterator_cleanup(iter); 331 332 free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */ 333 free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */ 334 memset(iter, 0, sizeof(*iter)); 335 } 336 337 uint16_t 338 rte_eth_find_next(uint16_t port_id) 339 { 340 while (port_id < RTE_MAX_ETHPORTS && 341 rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED) 342 port_id++; 343 344 if (port_id >= RTE_MAX_ETHPORTS) 345 return RTE_MAX_ETHPORTS; 346 347 rte_eth_trace_find_next(port_id); 348 349 return port_id; 350 } 351 352 /* 353 * Macro to iterate over all valid ports for internal usage. 354 * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports. 355 */ 356 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \ 357 for (port_id = rte_eth_find_next(0); \ 358 port_id < RTE_MAX_ETHPORTS; \ 359 port_id = rte_eth_find_next(port_id + 1)) 360 361 uint16_t 362 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent) 363 { 364 port_id = rte_eth_find_next(port_id); 365 while (port_id < RTE_MAX_ETHPORTS && 366 rte_eth_devices[port_id].device != parent) 367 port_id = rte_eth_find_next(port_id + 1); 368 369 rte_eth_trace_find_next_of(port_id, parent); 370 371 return port_id; 372 } 373 374 uint16_t 375 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id) 376 { 377 uint16_t ret; 378 379 RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS); 380 ret = rte_eth_find_next_of(port_id, 381 rte_eth_devices[ref_port_id].device); 382 383 rte_eth_trace_find_next_sibling(port_id, ref_port_id, ret); 384 385 return ret; 386 } 387 388 static bool 389 eth_dev_is_allocated(const struct rte_eth_dev *ethdev) 390 { 391 return ethdev->data->name[0] != '\0'; 392 } 393 394 int 395 rte_eth_dev_is_valid_port(uint16_t port_id) 396 { 397 int is_valid; 398 399 if (port_id >= RTE_MAX_ETHPORTS || 400 (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)) 401 is_valid = 0; 402 else 403 is_valid = 1; 404 405 rte_ethdev_trace_is_valid_port(port_id, is_valid); 406 407 return is_valid; 408 } 409 410 static int 411 eth_is_valid_owner_id(uint64_t owner_id) 412 { 413 if (owner_id == RTE_ETH_DEV_NO_OWNER || 414 eth_dev_shared_data->next_owner_id <= owner_id) 415 return 0; 416 return 1; 417 } 418 419 uint64_t 420 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id) 421 { 422 port_id = rte_eth_find_next(port_id); 423 while (port_id < RTE_MAX_ETHPORTS && 424 rte_eth_devices[port_id].data->owner.id != owner_id) 425 port_id = rte_eth_find_next(port_id + 1); 426 427 rte_eth_trace_find_next_owned_by(port_id, owner_id); 428 429 return port_id; 430 } 431 432 int 433 rte_eth_dev_owner_new(uint64_t *owner_id) 434 { 435 if (owner_id == NULL) { 436 RTE_ETHDEV_LOG(ERR, "Cannot get new owner ID to NULL\n"); 437 return -EINVAL; 438 } 439 440 eth_dev_shared_data_prepare(); 441 442 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 443 444 *owner_id = eth_dev_shared_data->next_owner_id++; 445 446 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 447 448 rte_ethdev_trace_owner_new(*owner_id); 449 450 return 0; 451 } 452 453 static int 454 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id, 455 const struct rte_eth_dev_owner *new_owner) 456 { 457 struct rte_eth_dev *ethdev = &rte_eth_devices[port_id]; 458 struct rte_eth_dev_owner *port_owner; 459 460 if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) { 461 RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n", 462 port_id); 463 return -ENODEV; 464 } 465 466 if (new_owner == NULL) { 467 RTE_ETHDEV_LOG(ERR, 468 "Cannot set ethdev port %u owner from NULL owner\n", 469 port_id); 470 return -EINVAL; 471 } 472 473 if (!eth_is_valid_owner_id(new_owner->id) && 474 !eth_is_valid_owner_id(old_owner_id)) { 475 RTE_ETHDEV_LOG(ERR, 476 "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n", 477 old_owner_id, new_owner->id); 478 return -EINVAL; 479 } 480 481 port_owner = &rte_eth_devices[port_id].data->owner; 482 if (port_owner->id != old_owner_id) { 483 RTE_ETHDEV_LOG(ERR, 484 "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n", 485 port_id, port_owner->name, port_owner->id); 486 return -EPERM; 487 } 488 489 /* can not truncate (same structure) */ 490 strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN); 491 492 port_owner->id = new_owner->id; 493 494 RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n", 495 port_id, new_owner->name, new_owner->id); 496 497 return 0; 498 } 499 500 int 501 rte_eth_dev_owner_set(const uint16_t port_id, 502 const struct rte_eth_dev_owner *owner) 503 { 504 int ret; 505 506 eth_dev_shared_data_prepare(); 507 508 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 509 510 ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner); 511 512 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 513 514 rte_ethdev_trace_owner_set(port_id, owner, ret); 515 516 return ret; 517 } 518 519 int 520 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id) 521 { 522 const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner) 523 {.id = RTE_ETH_DEV_NO_OWNER, .name = ""}; 524 int ret; 525 526 eth_dev_shared_data_prepare(); 527 528 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 529 530 ret = eth_dev_owner_set(port_id, owner_id, &new_owner); 531 532 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 533 534 rte_ethdev_trace_owner_unset(port_id, owner_id, ret); 535 536 return ret; 537 } 538 539 int 540 rte_eth_dev_owner_delete(const uint64_t owner_id) 541 { 542 uint16_t port_id; 543 int ret = 0; 544 545 eth_dev_shared_data_prepare(); 546 547 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 548 549 if (eth_is_valid_owner_id(owner_id)) { 550 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) { 551 struct rte_eth_dev_data *data = 552 rte_eth_devices[port_id].data; 553 if (data != NULL && data->owner.id == owner_id) 554 memset(&data->owner, 0, 555 sizeof(struct rte_eth_dev_owner)); 556 } 557 RTE_ETHDEV_LOG(NOTICE, 558 "All port owners owned by %016"PRIx64" identifier have removed\n", 559 owner_id); 560 } else { 561 RTE_ETHDEV_LOG(ERR, 562 "Invalid owner ID=%016"PRIx64"\n", 563 owner_id); 564 ret = -EINVAL; 565 } 566 567 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 568 569 rte_ethdev_trace_owner_delete(owner_id, ret); 570 571 return ret; 572 } 573 574 int 575 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner) 576 { 577 struct rte_eth_dev *ethdev; 578 579 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 580 ethdev = &rte_eth_devices[port_id]; 581 582 if (!eth_dev_is_allocated(ethdev)) { 583 RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n", 584 port_id); 585 return -ENODEV; 586 } 587 588 if (owner == NULL) { 589 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u owner to NULL\n", 590 port_id); 591 return -EINVAL; 592 } 593 594 eth_dev_shared_data_prepare(); 595 596 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 597 rte_memcpy(owner, ðdev->data->owner, sizeof(*owner)); 598 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 599 600 rte_ethdev_trace_owner_get(port_id, owner); 601 602 return 0; 603 } 604 605 int 606 rte_eth_dev_socket_id(uint16_t port_id) 607 { 608 int socket_id = SOCKET_ID_ANY; 609 610 if (!rte_eth_dev_is_valid_port(port_id)) { 611 rte_errno = EINVAL; 612 } else { 613 socket_id = rte_eth_devices[port_id].data->numa_node; 614 if (socket_id == SOCKET_ID_ANY) 615 rte_errno = 0; 616 } 617 618 rte_ethdev_trace_socket_id(port_id, socket_id); 619 620 return socket_id; 621 } 622 623 void * 624 rte_eth_dev_get_sec_ctx(uint16_t port_id) 625 { 626 void *ctx; 627 628 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL); 629 ctx = rte_eth_devices[port_id].security_ctx; 630 631 rte_ethdev_trace_get_sec_ctx(port_id, ctx); 632 633 return ctx; 634 } 635 636 uint16_t 637 rte_eth_dev_count_avail(void) 638 { 639 uint16_t p; 640 uint16_t count; 641 642 count = 0; 643 644 RTE_ETH_FOREACH_DEV(p) 645 count++; 646 647 rte_ethdev_trace_count_avail(count); 648 649 return count; 650 } 651 652 uint16_t 653 rte_eth_dev_count_total(void) 654 { 655 uint16_t port, count = 0; 656 657 RTE_ETH_FOREACH_VALID_DEV(port) 658 count++; 659 660 rte_ethdev_trace_count_total(count); 661 662 return count; 663 } 664 665 int 666 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name) 667 { 668 char *tmp; 669 670 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 671 672 if (name == NULL) { 673 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u name to NULL\n", 674 port_id); 675 return -EINVAL; 676 } 677 678 /* shouldn't check 'rte_eth_devices[i].data', 679 * because it might be overwritten by VDEV PMD */ 680 tmp = eth_dev_shared_data->data[port_id].name; 681 strcpy(name, tmp); 682 683 rte_ethdev_trace_get_name_by_port(port_id, name); 684 685 return 0; 686 } 687 688 int 689 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id) 690 { 691 uint16_t pid; 692 693 if (name == NULL) { 694 RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name"); 695 return -EINVAL; 696 } 697 698 if (port_id == NULL) { 699 RTE_ETHDEV_LOG(ERR, 700 "Cannot get port ID to NULL for %s\n", name); 701 return -EINVAL; 702 } 703 704 RTE_ETH_FOREACH_VALID_DEV(pid) 705 if (!strcmp(name, eth_dev_shared_data->data[pid].name)) { 706 *port_id = pid; 707 708 rte_ethdev_trace_get_port_by_name(name, *port_id); 709 710 return 0; 711 } 712 713 return -ENODEV; 714 } 715 716 int 717 eth_err(uint16_t port_id, int ret) 718 { 719 if (ret == 0) 720 return 0; 721 if (rte_eth_dev_is_removed(port_id)) 722 return -EIO; 723 return ret; 724 } 725 726 static int 727 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id) 728 { 729 uint16_t port_id; 730 731 if (rx_queue_id >= dev->data->nb_rx_queues) { 732 port_id = dev->data->port_id; 733 RTE_ETHDEV_LOG(ERR, 734 "Invalid Rx queue_id=%u of device with port_id=%u\n", 735 rx_queue_id, port_id); 736 return -EINVAL; 737 } 738 739 if (dev->data->rx_queues[rx_queue_id] == NULL) { 740 port_id = dev->data->port_id; 741 RTE_ETHDEV_LOG(ERR, 742 "Queue %u of device with port_id=%u has not been setup\n", 743 rx_queue_id, port_id); 744 return -EINVAL; 745 } 746 747 return 0; 748 } 749 750 static int 751 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id) 752 { 753 uint16_t port_id; 754 755 if (tx_queue_id >= dev->data->nb_tx_queues) { 756 port_id = dev->data->port_id; 757 RTE_ETHDEV_LOG(ERR, 758 "Invalid Tx queue_id=%u of device with port_id=%u\n", 759 tx_queue_id, port_id); 760 return -EINVAL; 761 } 762 763 if (dev->data->tx_queues[tx_queue_id] == NULL) { 764 port_id = dev->data->port_id; 765 RTE_ETHDEV_LOG(ERR, 766 "Queue %u of device with port_id=%u has not been setup\n", 767 tx_queue_id, port_id); 768 return -EINVAL; 769 } 770 771 return 0; 772 } 773 774 int 775 rte_eth_rx_queue_is_valid(uint16_t port_id, uint16_t queue_id) 776 { 777 struct rte_eth_dev *dev; 778 779 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 780 dev = &rte_eth_devices[port_id]; 781 782 return eth_dev_validate_rx_queue(dev, queue_id); 783 } 784 785 int 786 rte_eth_tx_queue_is_valid(uint16_t port_id, uint16_t queue_id) 787 { 788 struct rte_eth_dev *dev; 789 790 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 791 dev = &rte_eth_devices[port_id]; 792 793 return eth_dev_validate_tx_queue(dev, queue_id); 794 } 795 796 int 797 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id) 798 { 799 struct rte_eth_dev *dev; 800 int ret; 801 802 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 803 dev = &rte_eth_devices[port_id]; 804 805 if (!dev->data->dev_started) { 806 RTE_ETHDEV_LOG(ERR, 807 "Port %u must be started before start any queue\n", 808 port_id); 809 return -EINVAL; 810 } 811 812 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 813 if (ret != 0) 814 return ret; 815 816 if (*dev->dev_ops->rx_queue_start == NULL) 817 return -ENOTSUP; 818 819 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 820 RTE_ETHDEV_LOG(INFO, 821 "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 822 rx_queue_id, port_id); 823 return -EINVAL; 824 } 825 826 if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 827 RTE_ETHDEV_LOG(INFO, 828 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 829 rx_queue_id, port_id); 830 return 0; 831 } 832 833 ret = eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id)); 834 835 rte_ethdev_trace_rx_queue_start(port_id, rx_queue_id, ret); 836 837 return ret; 838 } 839 840 int 841 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id) 842 { 843 struct rte_eth_dev *dev; 844 int ret; 845 846 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 847 dev = &rte_eth_devices[port_id]; 848 849 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 850 if (ret != 0) 851 return ret; 852 853 if (*dev->dev_ops->rx_queue_stop == NULL) 854 return -ENOTSUP; 855 856 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 857 RTE_ETHDEV_LOG(INFO, 858 "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 859 rx_queue_id, port_id); 860 return -EINVAL; 861 } 862 863 if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 864 RTE_ETHDEV_LOG(INFO, 865 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 866 rx_queue_id, port_id); 867 return 0; 868 } 869 870 ret = eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id)); 871 872 rte_ethdev_trace_rx_queue_stop(port_id, rx_queue_id, ret); 873 874 return ret; 875 } 876 877 int 878 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id) 879 { 880 struct rte_eth_dev *dev; 881 int ret; 882 883 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 884 dev = &rte_eth_devices[port_id]; 885 886 if (!dev->data->dev_started) { 887 RTE_ETHDEV_LOG(ERR, 888 "Port %u must be started before start any queue\n", 889 port_id); 890 return -EINVAL; 891 } 892 893 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 894 if (ret != 0) 895 return ret; 896 897 if (*dev->dev_ops->tx_queue_start == NULL) 898 return -ENOTSUP; 899 900 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 901 RTE_ETHDEV_LOG(INFO, 902 "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 903 tx_queue_id, port_id); 904 return -EINVAL; 905 } 906 907 if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 908 RTE_ETHDEV_LOG(INFO, 909 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 910 tx_queue_id, port_id); 911 return 0; 912 } 913 914 ret = eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id)); 915 916 rte_ethdev_trace_tx_queue_start(port_id, tx_queue_id, ret); 917 918 return ret; 919 } 920 921 int 922 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id) 923 { 924 struct rte_eth_dev *dev; 925 int ret; 926 927 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 928 dev = &rte_eth_devices[port_id]; 929 930 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 931 if (ret != 0) 932 return ret; 933 934 if (*dev->dev_ops->tx_queue_stop == NULL) 935 return -ENOTSUP; 936 937 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 938 RTE_ETHDEV_LOG(INFO, 939 "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 940 tx_queue_id, port_id); 941 return -EINVAL; 942 } 943 944 if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 945 RTE_ETHDEV_LOG(INFO, 946 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 947 tx_queue_id, port_id); 948 return 0; 949 } 950 951 ret = eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id)); 952 953 rte_ethdev_trace_tx_queue_stop(port_id, tx_queue_id, ret); 954 955 return ret; 956 } 957 958 uint32_t 959 rte_eth_speed_bitflag(uint32_t speed, int duplex) 960 { 961 uint32_t ret; 962 963 switch (speed) { 964 case RTE_ETH_SPEED_NUM_10M: 965 ret = duplex ? RTE_ETH_LINK_SPEED_10M : RTE_ETH_LINK_SPEED_10M_HD; 966 break; 967 case RTE_ETH_SPEED_NUM_100M: 968 ret = duplex ? RTE_ETH_LINK_SPEED_100M : RTE_ETH_LINK_SPEED_100M_HD; 969 break; 970 case RTE_ETH_SPEED_NUM_1G: 971 ret = RTE_ETH_LINK_SPEED_1G; 972 break; 973 case RTE_ETH_SPEED_NUM_2_5G: 974 ret = RTE_ETH_LINK_SPEED_2_5G; 975 break; 976 case RTE_ETH_SPEED_NUM_5G: 977 ret = RTE_ETH_LINK_SPEED_5G; 978 break; 979 case RTE_ETH_SPEED_NUM_10G: 980 ret = RTE_ETH_LINK_SPEED_10G; 981 break; 982 case RTE_ETH_SPEED_NUM_20G: 983 ret = RTE_ETH_LINK_SPEED_20G; 984 break; 985 case RTE_ETH_SPEED_NUM_25G: 986 ret = RTE_ETH_LINK_SPEED_25G; 987 break; 988 case RTE_ETH_SPEED_NUM_40G: 989 ret = RTE_ETH_LINK_SPEED_40G; 990 break; 991 case RTE_ETH_SPEED_NUM_50G: 992 ret = RTE_ETH_LINK_SPEED_50G; 993 break; 994 case RTE_ETH_SPEED_NUM_56G: 995 ret = RTE_ETH_LINK_SPEED_56G; 996 break; 997 case RTE_ETH_SPEED_NUM_100G: 998 ret = RTE_ETH_LINK_SPEED_100G; 999 break; 1000 case RTE_ETH_SPEED_NUM_200G: 1001 ret = RTE_ETH_LINK_SPEED_200G; 1002 break; 1003 case RTE_ETH_SPEED_NUM_400G: 1004 ret = RTE_ETH_LINK_SPEED_400G; 1005 break; 1006 default: 1007 ret = 0; 1008 } 1009 1010 rte_eth_trace_speed_bitflag(speed, duplex, ret); 1011 1012 return ret; 1013 } 1014 1015 const char * 1016 rte_eth_dev_rx_offload_name(uint64_t offload) 1017 { 1018 const char *name = "UNKNOWN"; 1019 unsigned int i; 1020 1021 for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) { 1022 if (offload == eth_dev_rx_offload_names[i].offload) { 1023 name = eth_dev_rx_offload_names[i].name; 1024 break; 1025 } 1026 } 1027 1028 rte_ethdev_trace_rx_offload_name(offload, name); 1029 1030 return name; 1031 } 1032 1033 const char * 1034 rte_eth_dev_tx_offload_name(uint64_t offload) 1035 { 1036 const char *name = "UNKNOWN"; 1037 unsigned int i; 1038 1039 for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) { 1040 if (offload == eth_dev_tx_offload_names[i].offload) { 1041 name = eth_dev_tx_offload_names[i].name; 1042 break; 1043 } 1044 } 1045 1046 rte_ethdev_trace_tx_offload_name(offload, name); 1047 1048 return name; 1049 } 1050 1051 static char * 1052 eth_dev_offload_names(uint64_t bitmask, char *buf, size_t size, 1053 const char *(*offload_name)(uint64_t)) 1054 { 1055 unsigned int pos = 0; 1056 int ret; 1057 1058 /* There should be at least enough space to handle those cases */ 1059 RTE_ASSERT(size >= sizeof("none") && size >= sizeof("...")); 1060 1061 if (bitmask == 0) { 1062 ret = snprintf(&buf[pos], size - pos, "none"); 1063 if (ret < 0 || pos + ret >= size) 1064 ret = 0; 1065 pos += ret; 1066 goto out; 1067 } 1068 1069 while (bitmask != 0) { 1070 uint64_t offload = RTE_BIT64(rte_ctz64(bitmask)); 1071 const char *name = offload_name(offload); 1072 1073 ret = snprintf(&buf[pos], size - pos, "%s,", name); 1074 if (ret < 0 || pos + ret >= size) { 1075 if (pos + sizeof("...") >= size) 1076 pos = size - sizeof("..."); 1077 ret = snprintf(&buf[pos], size - pos, "..."); 1078 if (ret > 0 && pos + ret < size) 1079 pos += ret; 1080 goto out; 1081 } 1082 1083 pos += ret; 1084 bitmask &= ~offload; 1085 } 1086 1087 /* Eliminate trailing comma */ 1088 pos--; 1089 out: 1090 buf[pos] = '\0'; 1091 return buf; 1092 } 1093 1094 const char * 1095 rte_eth_dev_capability_name(uint64_t capability) 1096 { 1097 const char *name = "UNKNOWN"; 1098 unsigned int i; 1099 1100 for (i = 0; i < RTE_DIM(rte_eth_dev_capa_names); ++i) { 1101 if (capability == rte_eth_dev_capa_names[i].offload) { 1102 name = rte_eth_dev_capa_names[i].name; 1103 break; 1104 } 1105 } 1106 1107 rte_ethdev_trace_capability_name(capability, name); 1108 1109 return name; 1110 } 1111 1112 static inline int 1113 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size, 1114 uint32_t max_rx_pkt_len, uint32_t dev_info_size) 1115 { 1116 int ret = 0; 1117 1118 if (dev_info_size == 0) { 1119 if (config_size != max_rx_pkt_len) { 1120 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size" 1121 " %u != %u is not allowed\n", 1122 port_id, config_size, max_rx_pkt_len); 1123 ret = -EINVAL; 1124 } 1125 } else if (config_size > dev_info_size) { 1126 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 1127 "> max allowed value %u\n", port_id, config_size, 1128 dev_info_size); 1129 ret = -EINVAL; 1130 } else if (config_size < RTE_ETHER_MIN_LEN) { 1131 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 1132 "< min allowed value %u\n", port_id, config_size, 1133 (unsigned int)RTE_ETHER_MIN_LEN); 1134 ret = -EINVAL; 1135 } 1136 return ret; 1137 } 1138 1139 /* 1140 * Validate offloads that are requested through rte_eth_dev_configure against 1141 * the offloads successfully set by the Ethernet device. 1142 * 1143 * @param port_id 1144 * The port identifier of the Ethernet device. 1145 * @param req_offloads 1146 * The offloads that have been requested through `rte_eth_dev_configure`. 1147 * @param set_offloads 1148 * The offloads successfully set by the Ethernet device. 1149 * @param offload_type 1150 * The offload type i.e. Rx/Tx string. 1151 * @param offload_name 1152 * The function that prints the offload name. 1153 * @return 1154 * - (0) if validation successful. 1155 * - (-EINVAL) if requested offload has been silently disabled. 1156 */ 1157 static int 1158 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads, 1159 uint64_t set_offloads, const char *offload_type, 1160 const char *(*offload_name)(uint64_t)) 1161 { 1162 uint64_t offloads_diff = req_offloads ^ set_offloads; 1163 uint64_t offload; 1164 int ret = 0; 1165 1166 while (offloads_diff != 0) { 1167 /* Check if any offload is requested but not enabled. */ 1168 offload = RTE_BIT64(rte_ctz64(offloads_diff)); 1169 if (offload & req_offloads) { 1170 RTE_ETHDEV_LOG(ERR, 1171 "Port %u failed to enable %s offload %s\n", 1172 port_id, offload_type, offload_name(offload)); 1173 ret = -EINVAL; 1174 } 1175 1176 /* Check if offload couldn't be disabled. */ 1177 if (offload & set_offloads) { 1178 RTE_ETHDEV_LOG(DEBUG, 1179 "Port %u %s offload %s is not requested but enabled\n", 1180 port_id, offload_type, offload_name(offload)); 1181 } 1182 1183 offloads_diff &= ~offload; 1184 } 1185 1186 return ret; 1187 } 1188 1189 static uint32_t 1190 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu) 1191 { 1192 uint32_t overhead_len; 1193 1194 if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu) 1195 overhead_len = max_rx_pktlen - max_mtu; 1196 else 1197 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 1198 1199 return overhead_len; 1200 } 1201 1202 /* rte_eth_dev_info_get() should be called prior to this function */ 1203 static int 1204 eth_dev_validate_mtu(uint16_t port_id, struct rte_eth_dev_info *dev_info, 1205 uint16_t mtu) 1206 { 1207 uint32_t overhead_len; 1208 uint32_t frame_size; 1209 1210 if (mtu < dev_info->min_mtu) { 1211 RTE_ETHDEV_LOG(ERR, 1212 "MTU (%u) < device min MTU (%u) for port_id %u\n", 1213 mtu, dev_info->min_mtu, port_id); 1214 return -EINVAL; 1215 } 1216 if (mtu > dev_info->max_mtu) { 1217 RTE_ETHDEV_LOG(ERR, 1218 "MTU (%u) > device max MTU (%u) for port_id %u\n", 1219 mtu, dev_info->max_mtu, port_id); 1220 return -EINVAL; 1221 } 1222 1223 overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen, 1224 dev_info->max_mtu); 1225 frame_size = mtu + overhead_len; 1226 if (frame_size < RTE_ETHER_MIN_LEN) { 1227 RTE_ETHDEV_LOG(ERR, 1228 "Frame size (%u) < min frame size (%u) for port_id %u\n", 1229 frame_size, RTE_ETHER_MIN_LEN, port_id); 1230 return -EINVAL; 1231 } 1232 1233 if (frame_size > dev_info->max_rx_pktlen) { 1234 RTE_ETHDEV_LOG(ERR, 1235 "Frame size (%u) > device max frame size (%u) for port_id %u\n", 1236 frame_size, dev_info->max_rx_pktlen, port_id); 1237 return -EINVAL; 1238 } 1239 1240 return 0; 1241 } 1242 1243 int 1244 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, 1245 const struct rte_eth_conf *dev_conf) 1246 { 1247 struct rte_eth_dev *dev; 1248 struct rte_eth_dev_info dev_info; 1249 struct rte_eth_conf orig_conf; 1250 int diag; 1251 int ret; 1252 uint16_t old_mtu; 1253 1254 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1255 dev = &rte_eth_devices[port_id]; 1256 1257 if (dev_conf == NULL) { 1258 RTE_ETHDEV_LOG(ERR, 1259 "Cannot configure ethdev port %u from NULL config\n", 1260 port_id); 1261 return -EINVAL; 1262 } 1263 1264 if (*dev->dev_ops->dev_configure == NULL) 1265 return -ENOTSUP; 1266 1267 if (dev->data->dev_started) { 1268 RTE_ETHDEV_LOG(ERR, 1269 "Port %u must be stopped to allow configuration\n", 1270 port_id); 1271 return -EBUSY; 1272 } 1273 1274 /* 1275 * Ensure that "dev_configured" is always 0 each time prepare to do 1276 * dev_configure() to avoid any non-anticipated behaviour. 1277 * And set to 1 when dev_configure() is executed successfully. 1278 */ 1279 dev->data->dev_configured = 0; 1280 1281 /* Store original config, as rollback required on failure */ 1282 memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf)); 1283 1284 /* 1285 * Copy the dev_conf parameter into the dev structure. 1286 * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get 1287 */ 1288 if (dev_conf != &dev->data->dev_conf) 1289 memcpy(&dev->data->dev_conf, dev_conf, 1290 sizeof(dev->data->dev_conf)); 1291 1292 /* Backup mtu for rollback */ 1293 old_mtu = dev->data->mtu; 1294 1295 /* fields must be zero to reserve them for future ABI changes */ 1296 if (dev_conf->rxmode.reserved_64s[0] != 0 || 1297 dev_conf->rxmode.reserved_64s[1] != 0 || 1298 dev_conf->rxmode.reserved_ptrs[0] != NULL || 1299 dev_conf->rxmode.reserved_ptrs[1] != NULL) { 1300 RTE_ETHDEV_LOG(ERR, "Rxmode reserved fields not zero\n"); 1301 ret = -EINVAL; 1302 goto rollback; 1303 } 1304 1305 if (dev_conf->txmode.reserved_64s[0] != 0 || 1306 dev_conf->txmode.reserved_64s[1] != 0 || 1307 dev_conf->txmode.reserved_ptrs[0] != NULL || 1308 dev_conf->txmode.reserved_ptrs[1] != NULL) { 1309 RTE_ETHDEV_LOG(ERR, "txmode reserved fields not zero\n"); 1310 ret = -EINVAL; 1311 goto rollback; 1312 } 1313 1314 ret = rte_eth_dev_info_get(port_id, &dev_info); 1315 if (ret != 0) 1316 goto rollback; 1317 1318 /* If number of queues specified by application for both Rx and Tx is 1319 * zero, use driver preferred values. This cannot be done individually 1320 * as it is valid for either Tx or Rx (but not both) to be zero. 1321 * If driver does not provide any preferred valued, fall back on 1322 * EAL defaults. 1323 */ 1324 if (nb_rx_q == 0 && nb_tx_q == 0) { 1325 nb_rx_q = dev_info.default_rxportconf.nb_queues; 1326 if (nb_rx_q == 0) 1327 nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES; 1328 nb_tx_q = dev_info.default_txportconf.nb_queues; 1329 if (nb_tx_q == 0) 1330 nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES; 1331 } 1332 1333 if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) { 1334 RTE_ETHDEV_LOG(ERR, 1335 "Number of Rx queues requested (%u) is greater than max supported(%d)\n", 1336 nb_rx_q, RTE_MAX_QUEUES_PER_PORT); 1337 ret = -EINVAL; 1338 goto rollback; 1339 } 1340 1341 if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) { 1342 RTE_ETHDEV_LOG(ERR, 1343 "Number of Tx queues requested (%u) is greater than max supported(%d)\n", 1344 nb_tx_q, RTE_MAX_QUEUES_PER_PORT); 1345 ret = -EINVAL; 1346 goto rollback; 1347 } 1348 1349 /* 1350 * Check that the numbers of Rx and Tx queues are not greater 1351 * than the maximum number of Rx and Tx queues supported by the 1352 * configured device. 1353 */ 1354 if (nb_rx_q > dev_info.max_rx_queues) { 1355 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n", 1356 port_id, nb_rx_q, dev_info.max_rx_queues); 1357 ret = -EINVAL; 1358 goto rollback; 1359 } 1360 1361 if (nb_tx_q > dev_info.max_tx_queues) { 1362 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n", 1363 port_id, nb_tx_q, dev_info.max_tx_queues); 1364 ret = -EINVAL; 1365 goto rollback; 1366 } 1367 1368 /* Check that the device supports requested interrupts */ 1369 if ((dev_conf->intr_conf.lsc == 1) && 1370 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) { 1371 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n", 1372 dev->device->driver->name); 1373 ret = -EINVAL; 1374 goto rollback; 1375 } 1376 if ((dev_conf->intr_conf.rmv == 1) && 1377 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) { 1378 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n", 1379 dev->device->driver->name); 1380 ret = -EINVAL; 1381 goto rollback; 1382 } 1383 1384 if (dev_conf->rxmode.mtu == 0) 1385 dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU; 1386 1387 ret = eth_dev_validate_mtu(port_id, &dev_info, 1388 dev->data->dev_conf.rxmode.mtu); 1389 if (ret != 0) 1390 goto rollback; 1391 1392 dev->data->mtu = dev->data->dev_conf.rxmode.mtu; 1393 1394 /* 1395 * If LRO is enabled, check that the maximum aggregated packet 1396 * size is supported by the configured device. 1397 */ 1398 if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) { 1399 uint32_t max_rx_pktlen; 1400 uint32_t overhead_len; 1401 1402 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 1403 dev_info.max_mtu); 1404 max_rx_pktlen = dev->data->dev_conf.rxmode.mtu + overhead_len; 1405 if (dev_conf->rxmode.max_lro_pkt_size == 0) 1406 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 1407 ret = eth_dev_check_lro_pkt_size(port_id, 1408 dev->data->dev_conf.rxmode.max_lro_pkt_size, 1409 max_rx_pktlen, 1410 dev_info.max_lro_pkt_size); 1411 if (ret != 0) 1412 goto rollback; 1413 } 1414 1415 /* Any requested offloading must be within its device capabilities */ 1416 if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) != 1417 dev_conf->rxmode.offloads) { 1418 char buffer[512]; 1419 1420 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u does not support Rx offloads %s\n", 1421 port_id, eth_dev_offload_names( 1422 dev_conf->rxmode.offloads & ~dev_info.rx_offload_capa, 1423 buffer, sizeof(buffer), rte_eth_dev_rx_offload_name)); 1424 RTE_ETHDEV_LOG(DEBUG, "Ethdev port_id=%u was requested Rx offloads %s\n", 1425 port_id, eth_dev_offload_names(dev_conf->rxmode.offloads, 1426 buffer, sizeof(buffer), rte_eth_dev_rx_offload_name)); 1427 RTE_ETHDEV_LOG(DEBUG, "Ethdev port_id=%u supports Rx offloads %s\n", 1428 port_id, eth_dev_offload_names(dev_info.rx_offload_capa, 1429 buffer, sizeof(buffer), rte_eth_dev_rx_offload_name)); 1430 1431 ret = -EINVAL; 1432 goto rollback; 1433 } 1434 if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) != 1435 dev_conf->txmode.offloads) { 1436 char buffer[512]; 1437 1438 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u does not support Tx offloads %s\n", 1439 port_id, eth_dev_offload_names( 1440 dev_conf->txmode.offloads & ~dev_info.tx_offload_capa, 1441 buffer, sizeof(buffer), rte_eth_dev_tx_offload_name)); 1442 RTE_ETHDEV_LOG(DEBUG, "Ethdev port_id=%u was requested Tx offloads %s\n", 1443 port_id, eth_dev_offload_names(dev_conf->txmode.offloads, 1444 buffer, sizeof(buffer), rte_eth_dev_tx_offload_name)); 1445 RTE_ETHDEV_LOG(DEBUG, "Ethdev port_id=%u supports Tx offloads %s\n", 1446 port_id, eth_dev_offload_names(dev_info.tx_offload_capa, 1447 buffer, sizeof(buffer), rte_eth_dev_tx_offload_name)); 1448 ret = -EINVAL; 1449 goto rollback; 1450 } 1451 1452 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = 1453 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf); 1454 1455 /* Check that device supports requested rss hash functions. */ 1456 if ((dev_info.flow_type_rss_offloads | 1457 dev_conf->rx_adv_conf.rss_conf.rss_hf) != 1458 dev_info.flow_type_rss_offloads) { 1459 RTE_ETHDEV_LOG(ERR, 1460 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 1461 port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf, 1462 dev_info.flow_type_rss_offloads); 1463 ret = -EINVAL; 1464 goto rollback; 1465 } 1466 1467 /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */ 1468 if (((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0) && 1469 (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) { 1470 RTE_ETHDEV_LOG(ERR, 1471 "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n", 1472 port_id, 1473 rte_eth_dev_rx_offload_name(RTE_ETH_RX_OFFLOAD_RSS_HASH)); 1474 ret = -EINVAL; 1475 goto rollback; 1476 } 1477 1478 /* 1479 * Setup new number of Rx/Tx queues and reconfigure device. 1480 */ 1481 diag = eth_dev_rx_queue_config(dev, nb_rx_q); 1482 if (diag != 0) { 1483 RTE_ETHDEV_LOG(ERR, 1484 "Port%u eth_dev_rx_queue_config = %d\n", 1485 port_id, diag); 1486 ret = diag; 1487 goto rollback; 1488 } 1489 1490 diag = eth_dev_tx_queue_config(dev, nb_tx_q); 1491 if (diag != 0) { 1492 RTE_ETHDEV_LOG(ERR, 1493 "Port%u eth_dev_tx_queue_config = %d\n", 1494 port_id, diag); 1495 eth_dev_rx_queue_config(dev, 0); 1496 ret = diag; 1497 goto rollback; 1498 } 1499 1500 diag = (*dev->dev_ops->dev_configure)(dev); 1501 if (diag != 0) { 1502 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n", 1503 port_id, diag); 1504 ret = eth_err(port_id, diag); 1505 goto reset_queues; 1506 } 1507 1508 /* Initialize Rx profiling if enabled at compilation time. */ 1509 diag = __rte_eth_dev_profile_init(port_id, dev); 1510 if (diag != 0) { 1511 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n", 1512 port_id, diag); 1513 ret = eth_err(port_id, diag); 1514 goto reset_queues; 1515 } 1516 1517 /* Validate Rx offloads. */ 1518 diag = eth_dev_validate_offloads(port_id, 1519 dev_conf->rxmode.offloads, 1520 dev->data->dev_conf.rxmode.offloads, "Rx", 1521 rte_eth_dev_rx_offload_name); 1522 if (diag != 0) { 1523 ret = diag; 1524 goto reset_queues; 1525 } 1526 1527 /* Validate Tx offloads. */ 1528 diag = eth_dev_validate_offloads(port_id, 1529 dev_conf->txmode.offloads, 1530 dev->data->dev_conf.txmode.offloads, "Tx", 1531 rte_eth_dev_tx_offload_name); 1532 if (diag != 0) { 1533 ret = diag; 1534 goto reset_queues; 1535 } 1536 1537 dev->data->dev_configured = 1; 1538 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0); 1539 return 0; 1540 reset_queues: 1541 eth_dev_rx_queue_config(dev, 0); 1542 eth_dev_tx_queue_config(dev, 0); 1543 rollback: 1544 memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf)); 1545 if (old_mtu != dev->data->mtu) 1546 dev->data->mtu = old_mtu; 1547 1548 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret); 1549 return ret; 1550 } 1551 1552 static void 1553 eth_dev_mac_restore(struct rte_eth_dev *dev, 1554 struct rte_eth_dev_info *dev_info) 1555 { 1556 struct rte_ether_addr *addr; 1557 uint16_t i; 1558 uint32_t pool = 0; 1559 uint64_t pool_mask; 1560 1561 /* replay MAC address configuration including default MAC */ 1562 addr = &dev->data->mac_addrs[0]; 1563 if (*dev->dev_ops->mac_addr_set != NULL) 1564 (*dev->dev_ops->mac_addr_set)(dev, addr); 1565 else if (*dev->dev_ops->mac_addr_add != NULL) 1566 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool); 1567 1568 if (*dev->dev_ops->mac_addr_add != NULL) { 1569 for (i = 1; i < dev_info->max_mac_addrs; i++) { 1570 addr = &dev->data->mac_addrs[i]; 1571 1572 /* skip zero address */ 1573 if (rte_is_zero_ether_addr(addr)) 1574 continue; 1575 1576 pool = 0; 1577 pool_mask = dev->data->mac_pool_sel[i]; 1578 1579 do { 1580 if (pool_mask & UINT64_C(1)) 1581 (*dev->dev_ops->mac_addr_add)(dev, 1582 addr, i, pool); 1583 pool_mask >>= 1; 1584 pool++; 1585 } while (pool_mask); 1586 } 1587 } 1588 } 1589 1590 static int 1591 eth_dev_config_restore(struct rte_eth_dev *dev, 1592 struct rte_eth_dev_info *dev_info, uint16_t port_id) 1593 { 1594 int ret; 1595 1596 if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)) 1597 eth_dev_mac_restore(dev, dev_info); 1598 1599 /* replay promiscuous configuration */ 1600 /* 1601 * use callbacks directly since we don't need port_id check and 1602 * would like to bypass the same value set 1603 */ 1604 if (rte_eth_promiscuous_get(port_id) == 1 && 1605 *dev->dev_ops->promiscuous_enable != NULL) { 1606 ret = eth_err(port_id, 1607 (*dev->dev_ops->promiscuous_enable)(dev)); 1608 if (ret != 0 && ret != -ENOTSUP) { 1609 RTE_ETHDEV_LOG(ERR, 1610 "Failed to enable promiscuous mode for device (port %u): %s\n", 1611 port_id, rte_strerror(-ret)); 1612 return ret; 1613 } 1614 } else if (rte_eth_promiscuous_get(port_id) == 0 && 1615 *dev->dev_ops->promiscuous_disable != NULL) { 1616 ret = eth_err(port_id, 1617 (*dev->dev_ops->promiscuous_disable)(dev)); 1618 if (ret != 0 && ret != -ENOTSUP) { 1619 RTE_ETHDEV_LOG(ERR, 1620 "Failed to disable promiscuous mode for device (port %u): %s\n", 1621 port_id, rte_strerror(-ret)); 1622 return ret; 1623 } 1624 } 1625 1626 /* replay all multicast configuration */ 1627 /* 1628 * use callbacks directly since we don't need port_id check and 1629 * would like to bypass the same value set 1630 */ 1631 if (rte_eth_allmulticast_get(port_id) == 1 && 1632 *dev->dev_ops->allmulticast_enable != NULL) { 1633 ret = eth_err(port_id, 1634 (*dev->dev_ops->allmulticast_enable)(dev)); 1635 if (ret != 0 && ret != -ENOTSUP) { 1636 RTE_ETHDEV_LOG(ERR, 1637 "Failed to enable allmulticast mode for device (port %u): %s\n", 1638 port_id, rte_strerror(-ret)); 1639 return ret; 1640 } 1641 } else if (rte_eth_allmulticast_get(port_id) == 0 && 1642 *dev->dev_ops->allmulticast_disable != NULL) { 1643 ret = eth_err(port_id, 1644 (*dev->dev_ops->allmulticast_disable)(dev)); 1645 if (ret != 0 && ret != -ENOTSUP) { 1646 RTE_ETHDEV_LOG(ERR, 1647 "Failed to disable allmulticast mode for device (port %u): %s\n", 1648 port_id, rte_strerror(-ret)); 1649 return ret; 1650 } 1651 } 1652 1653 return 0; 1654 } 1655 1656 int 1657 rte_eth_dev_start(uint16_t port_id) 1658 { 1659 struct rte_eth_dev *dev; 1660 struct rte_eth_dev_info dev_info; 1661 int diag; 1662 int ret, ret_stop; 1663 1664 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1665 dev = &rte_eth_devices[port_id]; 1666 1667 if (*dev->dev_ops->dev_start == NULL) 1668 return -ENOTSUP; 1669 1670 if (dev->data->dev_configured == 0) { 1671 RTE_ETHDEV_LOG(INFO, 1672 "Device with port_id=%"PRIu16" is not configured.\n", 1673 port_id); 1674 return -EINVAL; 1675 } 1676 1677 if (dev->data->dev_started != 0) { 1678 RTE_ETHDEV_LOG(INFO, 1679 "Device with port_id=%"PRIu16" already started\n", 1680 port_id); 1681 return 0; 1682 } 1683 1684 ret = rte_eth_dev_info_get(port_id, &dev_info); 1685 if (ret != 0) 1686 return ret; 1687 1688 /* Lets restore MAC now if device does not support live change */ 1689 if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR) 1690 eth_dev_mac_restore(dev, &dev_info); 1691 1692 diag = (*dev->dev_ops->dev_start)(dev); 1693 if (diag == 0) 1694 dev->data->dev_started = 1; 1695 else 1696 return eth_err(port_id, diag); 1697 1698 ret = eth_dev_config_restore(dev, &dev_info, port_id); 1699 if (ret != 0) { 1700 RTE_ETHDEV_LOG(ERR, 1701 "Error during restoring configuration for device (port %u): %s\n", 1702 port_id, rte_strerror(-ret)); 1703 ret_stop = rte_eth_dev_stop(port_id); 1704 if (ret_stop != 0) { 1705 RTE_ETHDEV_LOG(ERR, 1706 "Failed to stop device (port %u): %s\n", 1707 port_id, rte_strerror(-ret_stop)); 1708 } 1709 1710 return ret; 1711 } 1712 1713 if (dev->data->dev_conf.intr_conf.lsc == 0) { 1714 if (*dev->dev_ops->link_update == NULL) 1715 return -ENOTSUP; 1716 (*dev->dev_ops->link_update)(dev, 0); 1717 } 1718 1719 /* expose selection of PMD fast-path functions */ 1720 eth_dev_fp_ops_setup(rte_eth_fp_ops + port_id, dev); 1721 1722 rte_ethdev_trace_start(port_id); 1723 return 0; 1724 } 1725 1726 int 1727 rte_eth_dev_stop(uint16_t port_id) 1728 { 1729 struct rte_eth_dev *dev; 1730 int ret; 1731 1732 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1733 dev = &rte_eth_devices[port_id]; 1734 1735 if (*dev->dev_ops->dev_stop == NULL) 1736 return -ENOTSUP; 1737 1738 if (dev->data->dev_started == 0) { 1739 RTE_ETHDEV_LOG(INFO, 1740 "Device with port_id=%"PRIu16" already stopped\n", 1741 port_id); 1742 return 0; 1743 } 1744 1745 /* point fast-path functions to dummy ones */ 1746 eth_dev_fp_ops_reset(rte_eth_fp_ops + port_id); 1747 1748 ret = (*dev->dev_ops->dev_stop)(dev); 1749 if (ret == 0) 1750 dev->data->dev_started = 0; 1751 rte_ethdev_trace_stop(port_id, ret); 1752 1753 return ret; 1754 } 1755 1756 int 1757 rte_eth_dev_set_link_up(uint16_t port_id) 1758 { 1759 struct rte_eth_dev *dev; 1760 int ret; 1761 1762 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1763 dev = &rte_eth_devices[port_id]; 1764 1765 if (*dev->dev_ops->dev_set_link_up == NULL) 1766 return -ENOTSUP; 1767 ret = eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev)); 1768 1769 rte_ethdev_trace_set_link_up(port_id, ret); 1770 1771 return ret; 1772 } 1773 1774 int 1775 rte_eth_dev_set_link_down(uint16_t port_id) 1776 { 1777 struct rte_eth_dev *dev; 1778 int ret; 1779 1780 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1781 dev = &rte_eth_devices[port_id]; 1782 1783 if (*dev->dev_ops->dev_set_link_down == NULL) 1784 return -ENOTSUP; 1785 ret = eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev)); 1786 1787 rte_ethdev_trace_set_link_down(port_id, ret); 1788 1789 return ret; 1790 } 1791 1792 int 1793 rte_eth_dev_close(uint16_t port_id) 1794 { 1795 struct rte_eth_dev *dev; 1796 int firsterr, binerr; 1797 int *lasterr = &firsterr; 1798 1799 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1800 dev = &rte_eth_devices[port_id]; 1801 1802 /* 1803 * Secondary process needs to close device to release process private 1804 * resources. But secondary process should not be obliged to wait 1805 * for device stop before closing ethdev. 1806 */ 1807 if (rte_eal_process_type() == RTE_PROC_PRIMARY && 1808 dev->data->dev_started) { 1809 RTE_ETHDEV_LOG(ERR, "Cannot close started device (port %u)\n", 1810 port_id); 1811 return -EINVAL; 1812 } 1813 1814 if (*dev->dev_ops->dev_close == NULL) 1815 return -ENOTSUP; 1816 *lasterr = (*dev->dev_ops->dev_close)(dev); 1817 if (*lasterr != 0) 1818 lasterr = &binerr; 1819 1820 rte_ethdev_trace_close(port_id); 1821 *lasterr = rte_eth_dev_release_port(dev); 1822 1823 return firsterr; 1824 } 1825 1826 int 1827 rte_eth_dev_reset(uint16_t port_id) 1828 { 1829 struct rte_eth_dev *dev; 1830 int ret; 1831 1832 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1833 dev = &rte_eth_devices[port_id]; 1834 1835 if (*dev->dev_ops->dev_reset == NULL) 1836 return -ENOTSUP; 1837 1838 ret = rte_eth_dev_stop(port_id); 1839 if (ret != 0) { 1840 RTE_ETHDEV_LOG(ERR, 1841 "Failed to stop device (port %u) before reset: %s - ignore\n", 1842 port_id, rte_strerror(-ret)); 1843 } 1844 ret = eth_err(port_id, dev->dev_ops->dev_reset(dev)); 1845 1846 rte_ethdev_trace_reset(port_id, ret); 1847 1848 return ret; 1849 } 1850 1851 int 1852 rte_eth_dev_is_removed(uint16_t port_id) 1853 { 1854 struct rte_eth_dev *dev; 1855 int ret; 1856 1857 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0); 1858 dev = &rte_eth_devices[port_id]; 1859 1860 if (dev->state == RTE_ETH_DEV_REMOVED) 1861 return 1; 1862 1863 if (*dev->dev_ops->is_removed == NULL) 1864 return 0; 1865 1866 ret = dev->dev_ops->is_removed(dev); 1867 if (ret != 0) 1868 /* Device is physically removed. */ 1869 dev->state = RTE_ETH_DEV_REMOVED; 1870 1871 rte_ethdev_trace_is_removed(port_id, ret); 1872 1873 return ret; 1874 } 1875 1876 static int 1877 rte_eth_check_rx_mempool(struct rte_mempool *mp, uint16_t offset, 1878 uint16_t min_length) 1879 { 1880 uint16_t data_room_size; 1881 1882 /* 1883 * Check the size of the mbuf data buffer, this value 1884 * must be provided in the private data of the memory pool. 1885 * First check that the memory pool(s) has a valid private data. 1886 */ 1887 if (mp->private_data_size < 1888 sizeof(struct rte_pktmbuf_pool_private)) { 1889 RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n", 1890 mp->name, mp->private_data_size, 1891 (unsigned int) 1892 sizeof(struct rte_pktmbuf_pool_private)); 1893 return -ENOSPC; 1894 } 1895 data_room_size = rte_pktmbuf_data_room_size(mp); 1896 if (data_room_size < offset + min_length) { 1897 RTE_ETHDEV_LOG(ERR, 1898 "%s mbuf_data_room_size %u < %u (%u + %u)\n", 1899 mp->name, data_room_size, 1900 offset + min_length, offset, min_length); 1901 return -EINVAL; 1902 } 1903 return 0; 1904 } 1905 1906 static int 1907 eth_dev_buffer_split_get_supported_hdrs_helper(uint16_t port_id, uint32_t **ptypes) 1908 { 1909 int cnt; 1910 1911 cnt = rte_eth_buffer_split_get_supported_hdr_ptypes(port_id, NULL, 0); 1912 if (cnt <= 0) 1913 return cnt; 1914 1915 *ptypes = malloc(sizeof(uint32_t) * cnt); 1916 if (*ptypes == NULL) 1917 return -ENOMEM; 1918 1919 cnt = rte_eth_buffer_split_get_supported_hdr_ptypes(port_id, *ptypes, cnt); 1920 if (cnt <= 0) { 1921 free(*ptypes); 1922 *ptypes = NULL; 1923 } 1924 return cnt; 1925 } 1926 1927 static int 1928 rte_eth_rx_queue_check_split(uint16_t port_id, 1929 const struct rte_eth_rxseg_split *rx_seg, 1930 uint16_t n_seg, uint32_t *mbp_buf_size, 1931 const struct rte_eth_dev_info *dev_info) 1932 { 1933 const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa; 1934 struct rte_mempool *mp_first; 1935 uint32_t offset_mask; 1936 uint16_t seg_idx; 1937 int ret = 0; 1938 int ptype_cnt; 1939 uint32_t *ptypes; 1940 uint32_t prev_proto_hdrs = RTE_PTYPE_UNKNOWN; 1941 int i; 1942 1943 if (n_seg > seg_capa->max_nseg) { 1944 RTE_ETHDEV_LOG(ERR, 1945 "Requested Rx segments %u exceed supported %u\n", 1946 n_seg, seg_capa->max_nseg); 1947 return -EINVAL; 1948 } 1949 /* 1950 * Check the sizes and offsets against buffer sizes 1951 * for each segment specified in extended configuration. 1952 */ 1953 mp_first = rx_seg[0].mp; 1954 offset_mask = RTE_BIT32(seg_capa->offset_align_log2) - 1; 1955 1956 ptypes = NULL; 1957 ptype_cnt = eth_dev_buffer_split_get_supported_hdrs_helper(port_id, &ptypes); 1958 1959 for (seg_idx = 0; seg_idx < n_seg; seg_idx++) { 1960 struct rte_mempool *mpl = rx_seg[seg_idx].mp; 1961 uint32_t length = rx_seg[seg_idx].length; 1962 uint32_t offset = rx_seg[seg_idx].offset; 1963 uint32_t proto_hdr = rx_seg[seg_idx].proto_hdr; 1964 1965 if (mpl == NULL) { 1966 RTE_ETHDEV_LOG(ERR, "null mempool pointer\n"); 1967 ret = -EINVAL; 1968 goto out; 1969 } 1970 if (seg_idx != 0 && mp_first != mpl && 1971 seg_capa->multi_pools == 0) { 1972 RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n"); 1973 ret = -ENOTSUP; 1974 goto out; 1975 } 1976 if (offset != 0) { 1977 if (seg_capa->offset_allowed == 0) { 1978 RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n"); 1979 ret = -ENOTSUP; 1980 goto out; 1981 } 1982 if (offset & offset_mask) { 1983 RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n", 1984 offset, 1985 seg_capa->offset_align_log2); 1986 ret = -EINVAL; 1987 goto out; 1988 } 1989 } 1990 1991 offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM; 1992 *mbp_buf_size = rte_pktmbuf_data_room_size(mpl); 1993 if (proto_hdr != 0) { 1994 /* Split based on protocol headers. */ 1995 if (length != 0) { 1996 RTE_ETHDEV_LOG(ERR, 1997 "Do not set length split and protocol split within a segment\n" 1998 ); 1999 ret = -EINVAL; 2000 goto out; 2001 } 2002 if ((proto_hdr & prev_proto_hdrs) != 0) { 2003 RTE_ETHDEV_LOG(ERR, 2004 "Repeat with previous protocol headers or proto-split after length-based split\n" 2005 ); 2006 ret = -EINVAL; 2007 goto out; 2008 } 2009 if (ptype_cnt <= 0) { 2010 RTE_ETHDEV_LOG(ERR, 2011 "Port %u failed to get supported buffer split header protocols\n", 2012 port_id); 2013 ret = -ENOTSUP; 2014 goto out; 2015 } 2016 for (i = 0; i < ptype_cnt; i++) { 2017 if ((prev_proto_hdrs | proto_hdr) == ptypes[i]) 2018 break; 2019 } 2020 if (i == ptype_cnt) { 2021 RTE_ETHDEV_LOG(ERR, 2022 "Requested Rx split header protocols 0x%x is not supported.\n", 2023 proto_hdr); 2024 ret = -EINVAL; 2025 goto out; 2026 } 2027 prev_proto_hdrs |= proto_hdr; 2028 } else { 2029 /* Split at fixed length. */ 2030 length = length != 0 ? length : *mbp_buf_size; 2031 prev_proto_hdrs = RTE_PTYPE_ALL_MASK; 2032 } 2033 2034 ret = rte_eth_check_rx_mempool(mpl, offset, length); 2035 if (ret != 0) 2036 goto out; 2037 } 2038 out: 2039 free(ptypes); 2040 return ret; 2041 } 2042 2043 static int 2044 rte_eth_rx_queue_check_mempools(struct rte_mempool **rx_mempools, 2045 uint16_t n_mempools, uint32_t *min_buf_size, 2046 const struct rte_eth_dev_info *dev_info) 2047 { 2048 uint16_t pool_idx; 2049 int ret; 2050 2051 if (n_mempools > dev_info->max_rx_mempools) { 2052 RTE_ETHDEV_LOG(ERR, 2053 "Too many Rx mempools %u vs maximum %u\n", 2054 n_mempools, dev_info->max_rx_mempools); 2055 return -EINVAL; 2056 } 2057 2058 for (pool_idx = 0; pool_idx < n_mempools; pool_idx++) { 2059 struct rte_mempool *mp = rx_mempools[pool_idx]; 2060 2061 if (mp == NULL) { 2062 RTE_ETHDEV_LOG(ERR, "null Rx mempool pointer\n"); 2063 return -EINVAL; 2064 } 2065 2066 ret = rte_eth_check_rx_mempool(mp, RTE_PKTMBUF_HEADROOM, 2067 dev_info->min_rx_bufsize); 2068 if (ret != 0) 2069 return ret; 2070 2071 *min_buf_size = RTE_MIN(*min_buf_size, 2072 rte_pktmbuf_data_room_size(mp)); 2073 } 2074 2075 return 0; 2076 } 2077 2078 int 2079 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 2080 uint16_t nb_rx_desc, unsigned int socket_id, 2081 const struct rte_eth_rxconf *rx_conf, 2082 struct rte_mempool *mp) 2083 { 2084 int ret; 2085 uint64_t rx_offloads; 2086 uint32_t mbp_buf_size = UINT32_MAX; 2087 struct rte_eth_dev *dev; 2088 struct rte_eth_dev_info dev_info; 2089 struct rte_eth_rxconf local_conf; 2090 2091 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2092 dev = &rte_eth_devices[port_id]; 2093 2094 if (rx_queue_id >= dev->data->nb_rx_queues) { 2095 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id); 2096 return -EINVAL; 2097 } 2098 2099 if (*dev->dev_ops->rx_queue_setup == NULL) 2100 return -ENOTSUP; 2101 2102 if (rx_conf != NULL && 2103 (rx_conf->reserved_64s[0] != 0 || 2104 rx_conf->reserved_64s[1] != 0 || 2105 rx_conf->reserved_ptrs[0] != NULL || 2106 rx_conf->reserved_ptrs[1] != NULL)) { 2107 RTE_ETHDEV_LOG(ERR, "Rx conf reserved fields not zero\n"); 2108 return -EINVAL; 2109 } 2110 2111 ret = rte_eth_dev_info_get(port_id, &dev_info); 2112 if (ret != 0) 2113 return ret; 2114 2115 rx_offloads = dev->data->dev_conf.rxmode.offloads; 2116 if (rx_conf != NULL) 2117 rx_offloads |= rx_conf->offloads; 2118 2119 /* Ensure that we have one and only one source of Rx buffers */ 2120 if ((mp != NULL) + 2121 (rx_conf != NULL && rx_conf->rx_nseg > 0) + 2122 (rx_conf != NULL && rx_conf->rx_nmempool > 0) != 1) { 2123 RTE_ETHDEV_LOG(ERR, 2124 "Ambiguous Rx mempools configuration\n"); 2125 return -EINVAL; 2126 } 2127 2128 if (mp != NULL) { 2129 /* Single pool configuration check. */ 2130 ret = rte_eth_check_rx_mempool(mp, RTE_PKTMBUF_HEADROOM, 2131 dev_info.min_rx_bufsize); 2132 if (ret != 0) 2133 return ret; 2134 2135 mbp_buf_size = rte_pktmbuf_data_room_size(mp); 2136 } else if (rx_conf != NULL && rx_conf->rx_nseg > 0) { 2137 const struct rte_eth_rxseg_split *rx_seg; 2138 uint16_t n_seg; 2139 2140 /* Extended multi-segment configuration check. */ 2141 if (rx_conf->rx_seg == NULL) { 2142 RTE_ETHDEV_LOG(ERR, 2143 "Memory pool is null and no multi-segment configuration provided\n"); 2144 return -EINVAL; 2145 } 2146 2147 rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg; 2148 n_seg = rx_conf->rx_nseg; 2149 2150 if (rx_offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { 2151 ret = rte_eth_rx_queue_check_split(port_id, rx_seg, n_seg, 2152 &mbp_buf_size, 2153 &dev_info); 2154 if (ret != 0) 2155 return ret; 2156 } else { 2157 RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n"); 2158 return -EINVAL; 2159 } 2160 } else if (rx_conf != NULL && rx_conf->rx_nmempool > 0) { 2161 /* Extended multi-pool configuration check. */ 2162 if (rx_conf->rx_mempools == NULL) { 2163 RTE_ETHDEV_LOG(ERR, "Memory pools array is null\n"); 2164 return -EINVAL; 2165 } 2166 2167 ret = rte_eth_rx_queue_check_mempools(rx_conf->rx_mempools, 2168 rx_conf->rx_nmempool, 2169 &mbp_buf_size, 2170 &dev_info); 2171 if (ret != 0) 2172 return ret; 2173 } else { 2174 RTE_ETHDEV_LOG(ERR, "Missing Rx mempool configuration\n"); 2175 return -EINVAL; 2176 } 2177 2178 /* Use default specified by driver, if nb_rx_desc is zero */ 2179 if (nb_rx_desc == 0) { 2180 nb_rx_desc = dev_info.default_rxportconf.ring_size; 2181 /* If driver default is also zero, fall back on EAL default */ 2182 if (nb_rx_desc == 0) 2183 nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 2184 } 2185 2186 if (nb_rx_desc > dev_info.rx_desc_lim.nb_max || 2187 nb_rx_desc < dev_info.rx_desc_lim.nb_min || 2188 nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) { 2189 2190 RTE_ETHDEV_LOG(ERR, 2191 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2192 nb_rx_desc, dev_info.rx_desc_lim.nb_max, 2193 dev_info.rx_desc_lim.nb_min, 2194 dev_info.rx_desc_lim.nb_align); 2195 return -EINVAL; 2196 } 2197 2198 if (dev->data->dev_started && 2199 !(dev_info.dev_capa & 2200 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP)) 2201 return -EBUSY; 2202 2203 if (dev->data->dev_started && 2204 (dev->data->rx_queue_state[rx_queue_id] != 2205 RTE_ETH_QUEUE_STATE_STOPPED)) 2206 return -EBUSY; 2207 2208 eth_dev_rxq_release(dev, rx_queue_id); 2209 2210 if (rx_conf == NULL) 2211 rx_conf = &dev_info.default_rxconf; 2212 2213 local_conf = *rx_conf; 2214 2215 /* 2216 * If an offloading has already been enabled in 2217 * rte_eth_dev_configure(), it has been enabled on all queues, 2218 * so there is no need to enable it in this queue again. 2219 * The local_conf.offloads input to underlying PMD only carries 2220 * those offloadings which are only enabled on this queue and 2221 * not enabled on all queues. 2222 */ 2223 local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads; 2224 2225 /* 2226 * New added offloadings for this queue are those not enabled in 2227 * rte_eth_dev_configure() and they must be per-queue type. 2228 * A pure per-port offloading can't be enabled on a queue while 2229 * disabled on another queue. A pure per-port offloading can't 2230 * be enabled for any queue as new added one if it hasn't been 2231 * enabled in rte_eth_dev_configure(). 2232 */ 2233 if ((local_conf.offloads & dev_info.rx_queue_offload_capa) != 2234 local_conf.offloads) { 2235 RTE_ETHDEV_LOG(ERR, 2236 "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2237 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2238 port_id, rx_queue_id, local_conf.offloads, 2239 dev_info.rx_queue_offload_capa, 2240 __func__); 2241 return -EINVAL; 2242 } 2243 2244 if (local_conf.share_group > 0 && 2245 (dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) == 0) { 2246 RTE_ETHDEV_LOG(ERR, 2247 "Ethdev port_id=%d rx_queue_id=%d, enabled share_group=%hu while device doesn't support Rx queue share\n", 2248 port_id, rx_queue_id, local_conf.share_group); 2249 return -EINVAL; 2250 } 2251 2252 /* 2253 * If LRO is enabled, check that the maximum aggregated packet 2254 * size is supported by the configured device. 2255 */ 2256 /* Get the real Ethernet overhead length */ 2257 if (local_conf.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) { 2258 uint32_t overhead_len; 2259 uint32_t max_rx_pktlen; 2260 int ret; 2261 2262 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 2263 dev_info.max_mtu); 2264 max_rx_pktlen = dev->data->mtu + overhead_len; 2265 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0) 2266 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 2267 ret = eth_dev_check_lro_pkt_size(port_id, 2268 dev->data->dev_conf.rxmode.max_lro_pkt_size, 2269 max_rx_pktlen, 2270 dev_info.max_lro_pkt_size); 2271 if (ret != 0) 2272 return ret; 2273 } 2274 2275 ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc, 2276 socket_id, &local_conf, mp); 2277 if (!ret) { 2278 if (!dev->data->min_rx_buf_size || 2279 dev->data->min_rx_buf_size > mbp_buf_size) 2280 dev->data->min_rx_buf_size = mbp_buf_size; 2281 } 2282 2283 rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp, 2284 rx_conf, ret); 2285 return eth_err(port_id, ret); 2286 } 2287 2288 int 2289 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 2290 uint16_t nb_rx_desc, 2291 const struct rte_eth_hairpin_conf *conf) 2292 { 2293 int ret; 2294 struct rte_eth_dev *dev; 2295 struct rte_eth_hairpin_cap cap; 2296 int i; 2297 int count; 2298 2299 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2300 dev = &rte_eth_devices[port_id]; 2301 2302 if (rx_queue_id >= dev->data->nb_rx_queues) { 2303 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id); 2304 return -EINVAL; 2305 } 2306 2307 if (conf == NULL) { 2308 RTE_ETHDEV_LOG(ERR, 2309 "Cannot setup ethdev port %u Rx hairpin queue from NULL config\n", 2310 port_id); 2311 return -EINVAL; 2312 } 2313 2314 if (conf->reserved != 0) { 2315 RTE_ETHDEV_LOG(ERR, 2316 "Rx hairpin reserved field not zero\n"); 2317 return -EINVAL; 2318 } 2319 2320 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2321 if (ret != 0) 2322 return ret; 2323 if (*dev->dev_ops->rx_hairpin_queue_setup == NULL) 2324 return -ENOTSUP; 2325 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2326 if (nb_rx_desc == 0) 2327 nb_rx_desc = cap.max_nb_desc; 2328 if (nb_rx_desc > cap.max_nb_desc) { 2329 RTE_ETHDEV_LOG(ERR, 2330 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu", 2331 nb_rx_desc, cap.max_nb_desc); 2332 return -EINVAL; 2333 } 2334 if (conf->peer_count > cap.max_rx_2_tx) { 2335 RTE_ETHDEV_LOG(ERR, 2336 "Invalid value for number of peers for Rx queue(=%u), should be: <= %hu", 2337 conf->peer_count, cap.max_rx_2_tx); 2338 return -EINVAL; 2339 } 2340 if (conf->use_locked_device_memory && !cap.rx_cap.locked_device_memory) { 2341 RTE_ETHDEV_LOG(ERR, 2342 "Attempt to use locked device memory for Rx queue, which is not supported"); 2343 return -EINVAL; 2344 } 2345 if (conf->use_rte_memory && !cap.rx_cap.rte_memory) { 2346 RTE_ETHDEV_LOG(ERR, 2347 "Attempt to use DPDK memory for Rx queue, which is not supported"); 2348 return -EINVAL; 2349 } 2350 if (conf->use_locked_device_memory && conf->use_rte_memory) { 2351 RTE_ETHDEV_LOG(ERR, 2352 "Attempt to use mutually exclusive memory settings for Rx queue"); 2353 return -EINVAL; 2354 } 2355 if (conf->force_memory && 2356 !conf->use_locked_device_memory && 2357 !conf->use_rte_memory) { 2358 RTE_ETHDEV_LOG(ERR, 2359 "Attempt to force Rx queue memory settings, but none is set"); 2360 return -EINVAL; 2361 } 2362 if (conf->peer_count == 0) { 2363 RTE_ETHDEV_LOG(ERR, 2364 "Invalid value for number of peers for Rx queue(=%u), should be: > 0", 2365 conf->peer_count); 2366 return -EINVAL; 2367 } 2368 for (i = 0, count = 0; i < dev->data->nb_rx_queues && 2369 cap.max_nb_queues != UINT16_MAX; i++) { 2370 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i)) 2371 count++; 2372 } 2373 if (count > cap.max_nb_queues) { 2374 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d", 2375 cap.max_nb_queues); 2376 return -EINVAL; 2377 } 2378 if (dev->data->dev_started) 2379 return -EBUSY; 2380 eth_dev_rxq_release(dev, rx_queue_id); 2381 ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id, 2382 nb_rx_desc, conf); 2383 if (ret == 0) 2384 dev->data->rx_queue_state[rx_queue_id] = 2385 RTE_ETH_QUEUE_STATE_HAIRPIN; 2386 ret = eth_err(port_id, ret); 2387 2388 rte_eth_trace_rx_hairpin_queue_setup(port_id, rx_queue_id, nb_rx_desc, 2389 conf, ret); 2390 2391 return ret; 2392 } 2393 2394 int 2395 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2396 uint16_t nb_tx_desc, unsigned int socket_id, 2397 const struct rte_eth_txconf *tx_conf) 2398 { 2399 struct rte_eth_dev *dev; 2400 struct rte_eth_dev_info dev_info; 2401 struct rte_eth_txconf local_conf; 2402 int ret; 2403 2404 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2405 dev = &rte_eth_devices[port_id]; 2406 2407 if (tx_queue_id >= dev->data->nb_tx_queues) { 2408 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id); 2409 return -EINVAL; 2410 } 2411 2412 if (*dev->dev_ops->tx_queue_setup == NULL) 2413 return -ENOTSUP; 2414 2415 if (tx_conf != NULL && 2416 (tx_conf->reserved_64s[0] != 0 || 2417 tx_conf->reserved_64s[1] != 0 || 2418 tx_conf->reserved_ptrs[0] != NULL || 2419 tx_conf->reserved_ptrs[1] != NULL)) { 2420 RTE_ETHDEV_LOG(ERR, "Tx conf reserved fields not zero\n"); 2421 return -EINVAL; 2422 } 2423 2424 ret = rte_eth_dev_info_get(port_id, &dev_info); 2425 if (ret != 0) 2426 return ret; 2427 2428 /* Use default specified by driver, if nb_tx_desc is zero */ 2429 if (nb_tx_desc == 0) { 2430 nb_tx_desc = dev_info.default_txportconf.ring_size; 2431 /* If driver default is zero, fall back on EAL default */ 2432 if (nb_tx_desc == 0) 2433 nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 2434 } 2435 if (nb_tx_desc > dev_info.tx_desc_lim.nb_max || 2436 nb_tx_desc < dev_info.tx_desc_lim.nb_min || 2437 nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) { 2438 RTE_ETHDEV_LOG(ERR, 2439 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2440 nb_tx_desc, dev_info.tx_desc_lim.nb_max, 2441 dev_info.tx_desc_lim.nb_min, 2442 dev_info.tx_desc_lim.nb_align); 2443 return -EINVAL; 2444 } 2445 2446 if (dev->data->dev_started && 2447 !(dev_info.dev_capa & 2448 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP)) 2449 return -EBUSY; 2450 2451 if (dev->data->dev_started && 2452 (dev->data->tx_queue_state[tx_queue_id] != 2453 RTE_ETH_QUEUE_STATE_STOPPED)) 2454 return -EBUSY; 2455 2456 eth_dev_txq_release(dev, tx_queue_id); 2457 2458 if (tx_conf == NULL) 2459 tx_conf = &dev_info.default_txconf; 2460 2461 local_conf = *tx_conf; 2462 2463 /* 2464 * If an offloading has already been enabled in 2465 * rte_eth_dev_configure(), it has been enabled on all queues, 2466 * so there is no need to enable it in this queue again. 2467 * The local_conf.offloads input to underlying PMD only carries 2468 * those offloadings which are only enabled on this queue and 2469 * not enabled on all queues. 2470 */ 2471 local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads; 2472 2473 /* 2474 * New added offloadings for this queue are those not enabled in 2475 * rte_eth_dev_configure() and they must be per-queue type. 2476 * A pure per-port offloading can't be enabled on a queue while 2477 * disabled on another queue. A pure per-port offloading can't 2478 * be enabled for any queue as new added one if it hasn't been 2479 * enabled in rte_eth_dev_configure(). 2480 */ 2481 if ((local_conf.offloads & dev_info.tx_queue_offload_capa) != 2482 local_conf.offloads) { 2483 RTE_ETHDEV_LOG(ERR, 2484 "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2485 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2486 port_id, tx_queue_id, local_conf.offloads, 2487 dev_info.tx_queue_offload_capa, 2488 __func__); 2489 return -EINVAL; 2490 } 2491 2492 rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf); 2493 return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev, 2494 tx_queue_id, nb_tx_desc, socket_id, &local_conf)); 2495 } 2496 2497 int 2498 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2499 uint16_t nb_tx_desc, 2500 const struct rte_eth_hairpin_conf *conf) 2501 { 2502 struct rte_eth_dev *dev; 2503 struct rte_eth_hairpin_cap cap; 2504 int i; 2505 int count; 2506 int ret; 2507 2508 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2509 dev = &rte_eth_devices[port_id]; 2510 2511 if (tx_queue_id >= dev->data->nb_tx_queues) { 2512 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id); 2513 return -EINVAL; 2514 } 2515 2516 if (conf == NULL) { 2517 RTE_ETHDEV_LOG(ERR, 2518 "Cannot setup ethdev port %u Tx hairpin queue from NULL config\n", 2519 port_id); 2520 return -EINVAL; 2521 } 2522 2523 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2524 if (ret != 0) 2525 return ret; 2526 if (*dev->dev_ops->tx_hairpin_queue_setup == NULL) 2527 return -ENOTSUP; 2528 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2529 if (nb_tx_desc == 0) 2530 nb_tx_desc = cap.max_nb_desc; 2531 if (nb_tx_desc > cap.max_nb_desc) { 2532 RTE_ETHDEV_LOG(ERR, 2533 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu", 2534 nb_tx_desc, cap.max_nb_desc); 2535 return -EINVAL; 2536 } 2537 if (conf->peer_count > cap.max_tx_2_rx) { 2538 RTE_ETHDEV_LOG(ERR, 2539 "Invalid value for number of peers for Tx queue(=%u), should be: <= %hu", 2540 conf->peer_count, cap.max_tx_2_rx); 2541 return -EINVAL; 2542 } 2543 if (conf->use_locked_device_memory && !cap.tx_cap.locked_device_memory) { 2544 RTE_ETHDEV_LOG(ERR, 2545 "Attempt to use locked device memory for Tx queue, which is not supported"); 2546 return -EINVAL; 2547 } 2548 if (conf->use_rte_memory && !cap.tx_cap.rte_memory) { 2549 RTE_ETHDEV_LOG(ERR, 2550 "Attempt to use DPDK memory for Tx queue, which is not supported"); 2551 return -EINVAL; 2552 } 2553 if (conf->use_locked_device_memory && conf->use_rte_memory) { 2554 RTE_ETHDEV_LOG(ERR, 2555 "Attempt to use mutually exclusive memory settings for Tx queue"); 2556 return -EINVAL; 2557 } 2558 if (conf->force_memory && 2559 !conf->use_locked_device_memory && 2560 !conf->use_rte_memory) { 2561 RTE_ETHDEV_LOG(ERR, 2562 "Attempt to force Tx queue memory settings, but none is set"); 2563 return -EINVAL; 2564 } 2565 if (conf->peer_count == 0) { 2566 RTE_ETHDEV_LOG(ERR, 2567 "Invalid value for number of peers for Tx queue(=%u), should be: > 0", 2568 conf->peer_count); 2569 return -EINVAL; 2570 } 2571 for (i = 0, count = 0; i < dev->data->nb_tx_queues && 2572 cap.max_nb_queues != UINT16_MAX; i++) { 2573 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i)) 2574 count++; 2575 } 2576 if (count > cap.max_nb_queues) { 2577 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d", 2578 cap.max_nb_queues); 2579 return -EINVAL; 2580 } 2581 if (dev->data->dev_started) 2582 return -EBUSY; 2583 eth_dev_txq_release(dev, tx_queue_id); 2584 ret = (*dev->dev_ops->tx_hairpin_queue_setup) 2585 (dev, tx_queue_id, nb_tx_desc, conf); 2586 if (ret == 0) 2587 dev->data->tx_queue_state[tx_queue_id] = 2588 RTE_ETH_QUEUE_STATE_HAIRPIN; 2589 ret = eth_err(port_id, ret); 2590 2591 rte_eth_trace_tx_hairpin_queue_setup(port_id, tx_queue_id, nb_tx_desc, 2592 conf, ret); 2593 2594 return ret; 2595 } 2596 2597 int 2598 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port) 2599 { 2600 struct rte_eth_dev *dev; 2601 int ret; 2602 2603 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2604 dev = &rte_eth_devices[tx_port]; 2605 2606 if (dev->data->dev_started == 0) { 2607 RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port); 2608 return -EBUSY; 2609 } 2610 2611 if (*dev->dev_ops->hairpin_bind == NULL) 2612 return -ENOTSUP; 2613 ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port); 2614 if (ret != 0) 2615 RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d" 2616 " to Rx %d (%d - all ports)\n", 2617 tx_port, rx_port, RTE_MAX_ETHPORTS); 2618 2619 rte_eth_trace_hairpin_bind(tx_port, rx_port, ret); 2620 2621 return ret; 2622 } 2623 2624 int 2625 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port) 2626 { 2627 struct rte_eth_dev *dev; 2628 int ret; 2629 2630 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2631 dev = &rte_eth_devices[tx_port]; 2632 2633 if (dev->data->dev_started == 0) { 2634 RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port); 2635 return -EBUSY; 2636 } 2637 2638 if (*dev->dev_ops->hairpin_unbind == NULL) 2639 return -ENOTSUP; 2640 ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port); 2641 if (ret != 0) 2642 RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d" 2643 " from Rx %d (%d - all ports)\n", 2644 tx_port, rx_port, RTE_MAX_ETHPORTS); 2645 2646 rte_eth_trace_hairpin_unbind(tx_port, rx_port, ret); 2647 2648 return ret; 2649 } 2650 2651 int 2652 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, 2653 size_t len, uint32_t direction) 2654 { 2655 struct rte_eth_dev *dev; 2656 int ret; 2657 2658 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2659 dev = &rte_eth_devices[port_id]; 2660 2661 if (peer_ports == NULL) { 2662 RTE_ETHDEV_LOG(ERR, 2663 "Cannot get ethdev port %u hairpin peer ports to NULL\n", 2664 port_id); 2665 return -EINVAL; 2666 } 2667 2668 if (len == 0) { 2669 RTE_ETHDEV_LOG(ERR, 2670 "Cannot get ethdev port %u hairpin peer ports to array with zero size\n", 2671 port_id); 2672 return -EINVAL; 2673 } 2674 2675 if (*dev->dev_ops->hairpin_get_peer_ports == NULL) 2676 return -ENOTSUP; 2677 2678 ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports, 2679 len, direction); 2680 if (ret < 0) 2681 RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n", 2682 port_id, direction ? "Rx" : "Tx"); 2683 2684 rte_eth_trace_hairpin_get_peer_ports(port_id, peer_ports, len, 2685 direction, ret); 2686 2687 return ret; 2688 } 2689 2690 void 2691 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, 2692 void *userdata __rte_unused) 2693 { 2694 rte_pktmbuf_free_bulk(pkts, unsent); 2695 2696 rte_eth_trace_tx_buffer_drop_callback((void **)pkts, unsent); 2697 } 2698 2699 void 2700 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, 2701 void *userdata) 2702 { 2703 uint64_t *count = userdata; 2704 2705 rte_pktmbuf_free_bulk(pkts, unsent); 2706 *count += unsent; 2707 2708 rte_eth_trace_tx_buffer_count_callback((void **)pkts, unsent, *count); 2709 } 2710 2711 int 2712 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, 2713 buffer_tx_error_fn cbfn, void *userdata) 2714 { 2715 if (buffer == NULL) { 2716 RTE_ETHDEV_LOG(ERR, 2717 "Cannot set Tx buffer error callback to NULL buffer\n"); 2718 return -EINVAL; 2719 } 2720 2721 buffer->error_callback = cbfn; 2722 buffer->error_userdata = userdata; 2723 2724 rte_eth_trace_tx_buffer_set_err_callback(buffer); 2725 2726 return 0; 2727 } 2728 2729 int 2730 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size) 2731 { 2732 int ret = 0; 2733 2734 if (buffer == NULL) { 2735 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL buffer\n"); 2736 return -EINVAL; 2737 } 2738 2739 buffer->size = size; 2740 if (buffer->error_callback == NULL) { 2741 ret = rte_eth_tx_buffer_set_err_callback( 2742 buffer, rte_eth_tx_buffer_drop_callback, NULL); 2743 } 2744 2745 rte_eth_trace_tx_buffer_init(buffer, size, ret); 2746 2747 return ret; 2748 } 2749 2750 int 2751 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt) 2752 { 2753 struct rte_eth_dev *dev; 2754 int ret; 2755 2756 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2757 dev = &rte_eth_devices[port_id]; 2758 2759 if (*dev->dev_ops->tx_done_cleanup == NULL) 2760 return -ENOTSUP; 2761 2762 /* Call driver to free pending mbufs. */ 2763 ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id], 2764 free_cnt); 2765 ret = eth_err(port_id, ret); 2766 2767 rte_eth_trace_tx_done_cleanup(port_id, queue_id, free_cnt, ret); 2768 2769 return ret; 2770 } 2771 2772 int 2773 rte_eth_promiscuous_enable(uint16_t port_id) 2774 { 2775 struct rte_eth_dev *dev; 2776 int diag = 0; 2777 2778 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2779 dev = &rte_eth_devices[port_id]; 2780 2781 if (dev->data->promiscuous == 1) 2782 return 0; 2783 2784 if (*dev->dev_ops->promiscuous_enable == NULL) 2785 return -ENOTSUP; 2786 2787 diag = (*dev->dev_ops->promiscuous_enable)(dev); 2788 dev->data->promiscuous = (diag == 0) ? 1 : 0; 2789 2790 diag = eth_err(port_id, diag); 2791 2792 rte_eth_trace_promiscuous_enable(port_id, dev->data->promiscuous, 2793 diag); 2794 2795 return diag; 2796 } 2797 2798 int 2799 rte_eth_promiscuous_disable(uint16_t port_id) 2800 { 2801 struct rte_eth_dev *dev; 2802 int diag = 0; 2803 2804 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2805 dev = &rte_eth_devices[port_id]; 2806 2807 if (dev->data->promiscuous == 0) 2808 return 0; 2809 2810 if (*dev->dev_ops->promiscuous_disable == NULL) 2811 return -ENOTSUP; 2812 2813 dev->data->promiscuous = 0; 2814 diag = (*dev->dev_ops->promiscuous_disable)(dev); 2815 if (diag != 0) 2816 dev->data->promiscuous = 1; 2817 2818 diag = eth_err(port_id, diag); 2819 2820 rte_eth_trace_promiscuous_disable(port_id, dev->data->promiscuous, 2821 diag); 2822 2823 return diag; 2824 } 2825 2826 int 2827 rte_eth_promiscuous_get(uint16_t port_id) 2828 { 2829 struct rte_eth_dev *dev; 2830 2831 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2832 dev = &rte_eth_devices[port_id]; 2833 2834 rte_eth_trace_promiscuous_get(port_id, dev->data->promiscuous); 2835 2836 return dev->data->promiscuous; 2837 } 2838 2839 int 2840 rte_eth_allmulticast_enable(uint16_t port_id) 2841 { 2842 struct rte_eth_dev *dev; 2843 int diag; 2844 2845 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2846 dev = &rte_eth_devices[port_id]; 2847 2848 if (dev->data->all_multicast == 1) 2849 return 0; 2850 2851 if (*dev->dev_ops->allmulticast_enable == NULL) 2852 return -ENOTSUP; 2853 diag = (*dev->dev_ops->allmulticast_enable)(dev); 2854 dev->data->all_multicast = (diag == 0) ? 1 : 0; 2855 2856 diag = eth_err(port_id, diag); 2857 2858 rte_eth_trace_allmulticast_enable(port_id, dev->data->all_multicast, 2859 diag); 2860 2861 return diag; 2862 } 2863 2864 int 2865 rte_eth_allmulticast_disable(uint16_t port_id) 2866 { 2867 struct rte_eth_dev *dev; 2868 int diag; 2869 2870 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2871 dev = &rte_eth_devices[port_id]; 2872 2873 if (dev->data->all_multicast == 0) 2874 return 0; 2875 2876 if (*dev->dev_ops->allmulticast_disable == NULL) 2877 return -ENOTSUP; 2878 dev->data->all_multicast = 0; 2879 diag = (*dev->dev_ops->allmulticast_disable)(dev); 2880 if (diag != 0) 2881 dev->data->all_multicast = 1; 2882 2883 diag = eth_err(port_id, diag); 2884 2885 rte_eth_trace_allmulticast_disable(port_id, dev->data->all_multicast, 2886 diag); 2887 2888 return diag; 2889 } 2890 2891 int 2892 rte_eth_allmulticast_get(uint16_t port_id) 2893 { 2894 struct rte_eth_dev *dev; 2895 2896 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2897 dev = &rte_eth_devices[port_id]; 2898 2899 rte_eth_trace_allmulticast_get(port_id, dev->data->all_multicast); 2900 2901 return dev->data->all_multicast; 2902 } 2903 2904 int 2905 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link) 2906 { 2907 struct rte_eth_dev *dev; 2908 2909 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2910 dev = &rte_eth_devices[port_id]; 2911 2912 if (eth_link == NULL) { 2913 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2914 port_id); 2915 return -EINVAL; 2916 } 2917 2918 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2919 rte_eth_linkstatus_get(dev, eth_link); 2920 else { 2921 if (*dev->dev_ops->link_update == NULL) 2922 return -ENOTSUP; 2923 (*dev->dev_ops->link_update)(dev, 1); 2924 *eth_link = dev->data->dev_link; 2925 } 2926 2927 rte_eth_trace_link_get(port_id, eth_link); 2928 2929 return 0; 2930 } 2931 2932 int 2933 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link) 2934 { 2935 struct rte_eth_dev *dev; 2936 2937 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2938 dev = &rte_eth_devices[port_id]; 2939 2940 if (eth_link == NULL) { 2941 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2942 port_id); 2943 return -EINVAL; 2944 } 2945 2946 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2947 rte_eth_linkstatus_get(dev, eth_link); 2948 else { 2949 if (*dev->dev_ops->link_update == NULL) 2950 return -ENOTSUP; 2951 (*dev->dev_ops->link_update)(dev, 0); 2952 *eth_link = dev->data->dev_link; 2953 } 2954 2955 rte_eth_trace_link_get_nowait(port_id, eth_link); 2956 2957 return 0; 2958 } 2959 2960 const char * 2961 rte_eth_link_speed_to_str(uint32_t link_speed) 2962 { 2963 const char *ret; 2964 2965 switch (link_speed) { 2966 case RTE_ETH_SPEED_NUM_NONE: 2967 ret = "None"; 2968 break; 2969 case RTE_ETH_SPEED_NUM_10M: 2970 ret = "10 Mbps"; 2971 break; 2972 case RTE_ETH_SPEED_NUM_100M: 2973 ret = "100 Mbps"; 2974 break; 2975 case RTE_ETH_SPEED_NUM_1G: 2976 ret = "1 Gbps"; 2977 break; 2978 case RTE_ETH_SPEED_NUM_2_5G: 2979 ret = "2.5 Gbps"; 2980 break; 2981 case RTE_ETH_SPEED_NUM_5G: 2982 ret = "5 Gbps"; 2983 break; 2984 case RTE_ETH_SPEED_NUM_10G: 2985 ret = "10 Gbps"; 2986 break; 2987 case RTE_ETH_SPEED_NUM_20G: 2988 ret = "20 Gbps"; 2989 break; 2990 case RTE_ETH_SPEED_NUM_25G: 2991 ret = "25 Gbps"; 2992 break; 2993 case RTE_ETH_SPEED_NUM_40G: 2994 ret = "40 Gbps"; 2995 break; 2996 case RTE_ETH_SPEED_NUM_50G: 2997 ret = "50 Gbps"; 2998 break; 2999 case RTE_ETH_SPEED_NUM_56G: 3000 ret = "56 Gbps"; 3001 break; 3002 case RTE_ETH_SPEED_NUM_100G: 3003 ret = "100 Gbps"; 3004 break; 3005 case RTE_ETH_SPEED_NUM_200G: 3006 ret = "200 Gbps"; 3007 break; 3008 case RTE_ETH_SPEED_NUM_400G: 3009 ret = "400 Gbps"; 3010 break; 3011 case RTE_ETH_SPEED_NUM_UNKNOWN: 3012 ret = "Unknown"; 3013 break; 3014 default: 3015 ret = "Invalid"; 3016 } 3017 3018 rte_eth_trace_link_speed_to_str(link_speed, ret); 3019 3020 return ret; 3021 } 3022 3023 int 3024 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link) 3025 { 3026 int ret; 3027 3028 if (str == NULL) { 3029 RTE_ETHDEV_LOG(ERR, "Cannot convert link to NULL string\n"); 3030 return -EINVAL; 3031 } 3032 3033 if (len == 0) { 3034 RTE_ETHDEV_LOG(ERR, 3035 "Cannot convert link to string with zero size\n"); 3036 return -EINVAL; 3037 } 3038 3039 if (eth_link == NULL) { 3040 RTE_ETHDEV_LOG(ERR, "Cannot convert to string from NULL link\n"); 3041 return -EINVAL; 3042 } 3043 3044 if (eth_link->link_status == RTE_ETH_LINK_DOWN) 3045 ret = snprintf(str, len, "Link down"); 3046 else 3047 ret = snprintf(str, len, "Link up at %s %s %s", 3048 rte_eth_link_speed_to_str(eth_link->link_speed), 3049 (eth_link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 3050 "FDX" : "HDX", 3051 (eth_link->link_autoneg == RTE_ETH_LINK_AUTONEG) ? 3052 "Autoneg" : "Fixed"); 3053 3054 rte_eth_trace_link_to_str(len, eth_link, str, ret); 3055 3056 return ret; 3057 } 3058 3059 int 3060 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats) 3061 { 3062 struct rte_eth_dev *dev; 3063 int ret; 3064 3065 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3066 dev = &rte_eth_devices[port_id]; 3067 3068 if (stats == NULL) { 3069 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u stats to NULL\n", 3070 port_id); 3071 return -EINVAL; 3072 } 3073 3074 memset(stats, 0, sizeof(*stats)); 3075 3076 if (*dev->dev_ops->stats_get == NULL) 3077 return -ENOTSUP; 3078 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed; 3079 ret = eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats)); 3080 3081 rte_eth_trace_stats_get(port_id, stats, ret); 3082 3083 return ret; 3084 } 3085 3086 int 3087 rte_eth_stats_reset(uint16_t port_id) 3088 { 3089 struct rte_eth_dev *dev; 3090 int ret; 3091 3092 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3093 dev = &rte_eth_devices[port_id]; 3094 3095 if (*dev->dev_ops->stats_reset == NULL) 3096 return -ENOTSUP; 3097 ret = (*dev->dev_ops->stats_reset)(dev); 3098 if (ret != 0) 3099 return eth_err(port_id, ret); 3100 3101 dev->data->rx_mbuf_alloc_failed = 0; 3102 3103 rte_eth_trace_stats_reset(port_id); 3104 3105 return 0; 3106 } 3107 3108 static inline int 3109 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev) 3110 { 3111 uint16_t nb_rxqs, nb_txqs; 3112 int count; 3113 3114 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3115 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3116 3117 count = RTE_NB_STATS; 3118 if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) { 3119 count += nb_rxqs * RTE_NB_RXQ_STATS; 3120 count += nb_txqs * RTE_NB_TXQ_STATS; 3121 } 3122 3123 return count; 3124 } 3125 3126 static int 3127 eth_dev_get_xstats_count(uint16_t port_id) 3128 { 3129 struct rte_eth_dev *dev; 3130 int count; 3131 3132 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3133 dev = &rte_eth_devices[port_id]; 3134 if (dev->dev_ops->xstats_get_names != NULL) { 3135 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0); 3136 if (count < 0) 3137 return eth_err(port_id, count); 3138 } else 3139 count = 0; 3140 3141 3142 count += eth_dev_get_xstats_basic_count(dev); 3143 3144 return count; 3145 } 3146 3147 int 3148 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, 3149 uint64_t *id) 3150 { 3151 int cnt_xstats, idx_xstat; 3152 3153 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3154 3155 if (xstat_name == NULL) { 3156 RTE_ETHDEV_LOG(ERR, 3157 "Cannot get ethdev port %u xstats ID from NULL xstat name\n", 3158 port_id); 3159 return -ENOMEM; 3160 } 3161 3162 if (id == NULL) { 3163 RTE_ETHDEV_LOG(ERR, 3164 "Cannot get ethdev port %u xstats ID to NULL\n", 3165 port_id); 3166 return -ENOMEM; 3167 } 3168 3169 /* Get count */ 3170 cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL); 3171 if (cnt_xstats < 0) { 3172 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n"); 3173 return -ENODEV; 3174 } 3175 3176 /* Get id-name lookup table */ 3177 struct rte_eth_xstat_name xstats_names[cnt_xstats]; 3178 3179 if (cnt_xstats != rte_eth_xstats_get_names_by_id( 3180 port_id, xstats_names, cnt_xstats, NULL)) { 3181 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n"); 3182 return -1; 3183 } 3184 3185 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 3186 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) { 3187 *id = idx_xstat; 3188 3189 rte_eth_trace_xstats_get_id_by_name(port_id, 3190 xstat_name, *id); 3191 3192 return 0; 3193 }; 3194 } 3195 3196 return -EINVAL; 3197 } 3198 3199 /* retrieve basic stats names */ 3200 static int 3201 eth_basic_stats_get_names(struct rte_eth_dev *dev, 3202 struct rte_eth_xstat_name *xstats_names) 3203 { 3204 int cnt_used_entries = 0; 3205 uint32_t idx, id_queue; 3206 uint16_t num_q; 3207 3208 for (idx = 0; idx < RTE_NB_STATS; idx++) { 3209 strlcpy(xstats_names[cnt_used_entries].name, 3210 eth_dev_stats_strings[idx].name, 3211 sizeof(xstats_names[0].name)); 3212 cnt_used_entries++; 3213 } 3214 3215 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 3216 return cnt_used_entries; 3217 3218 num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3219 for (id_queue = 0; id_queue < num_q; id_queue++) { 3220 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) { 3221 snprintf(xstats_names[cnt_used_entries].name, 3222 sizeof(xstats_names[0].name), 3223 "rx_q%u_%s", 3224 id_queue, eth_dev_rxq_stats_strings[idx].name); 3225 cnt_used_entries++; 3226 } 3227 3228 } 3229 num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3230 for (id_queue = 0; id_queue < num_q; id_queue++) { 3231 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) { 3232 snprintf(xstats_names[cnt_used_entries].name, 3233 sizeof(xstats_names[0].name), 3234 "tx_q%u_%s", 3235 id_queue, eth_dev_txq_stats_strings[idx].name); 3236 cnt_used_entries++; 3237 } 3238 } 3239 return cnt_used_entries; 3240 } 3241 3242 /* retrieve ethdev extended statistics names */ 3243 int 3244 rte_eth_xstats_get_names_by_id(uint16_t port_id, 3245 struct rte_eth_xstat_name *xstats_names, unsigned int size, 3246 uint64_t *ids) 3247 { 3248 struct rte_eth_xstat_name *xstats_names_copy; 3249 unsigned int no_basic_stat_requested = 1; 3250 unsigned int no_ext_stat_requested = 1; 3251 unsigned int expected_entries; 3252 unsigned int basic_count; 3253 struct rte_eth_dev *dev; 3254 unsigned int i; 3255 int ret; 3256 3257 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3258 dev = &rte_eth_devices[port_id]; 3259 3260 basic_count = eth_dev_get_xstats_basic_count(dev); 3261 ret = eth_dev_get_xstats_count(port_id); 3262 if (ret < 0) 3263 return ret; 3264 expected_entries = (unsigned int)ret; 3265 3266 /* Return max number of stats if no ids given */ 3267 if (!ids) { 3268 if (!xstats_names) 3269 return expected_entries; 3270 else if (xstats_names && size < expected_entries) 3271 return expected_entries; 3272 } 3273 3274 if (ids && !xstats_names) 3275 return -EINVAL; 3276 3277 if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) { 3278 uint64_t ids_copy[size]; 3279 3280 for (i = 0; i < size; i++) { 3281 if (ids[i] < basic_count) { 3282 no_basic_stat_requested = 0; 3283 break; 3284 } 3285 3286 /* 3287 * Convert ids to xstats ids that PMD knows. 3288 * ids known by user are basic + extended stats. 3289 */ 3290 ids_copy[i] = ids[i] - basic_count; 3291 } 3292 3293 if (no_basic_stat_requested) 3294 return (*dev->dev_ops->xstats_get_names_by_id)(dev, 3295 ids_copy, xstats_names, size); 3296 } 3297 3298 /* Retrieve all stats */ 3299 if (!ids) { 3300 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names, 3301 expected_entries); 3302 if (num_stats < 0 || num_stats > (int)expected_entries) 3303 return num_stats; 3304 else 3305 return expected_entries; 3306 } 3307 3308 xstats_names_copy = calloc(expected_entries, 3309 sizeof(struct rte_eth_xstat_name)); 3310 3311 if (!xstats_names_copy) { 3312 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n"); 3313 return -ENOMEM; 3314 } 3315 3316 if (ids) { 3317 for (i = 0; i < size; i++) { 3318 if (ids[i] >= basic_count) { 3319 no_ext_stat_requested = 0; 3320 break; 3321 } 3322 } 3323 } 3324 3325 /* Fill xstats_names_copy structure */ 3326 if (ids && no_ext_stat_requested) { 3327 eth_basic_stats_get_names(dev, xstats_names_copy); 3328 } else { 3329 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy, 3330 expected_entries); 3331 if (ret < 0) { 3332 free(xstats_names_copy); 3333 return ret; 3334 } 3335 } 3336 3337 /* Filter stats */ 3338 for (i = 0; i < size; i++) { 3339 if (ids[i] >= expected_entries) { 3340 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 3341 free(xstats_names_copy); 3342 return -1; 3343 } 3344 xstats_names[i] = xstats_names_copy[ids[i]]; 3345 3346 rte_eth_trace_xstats_get_names_by_id(port_id, &xstats_names[i], 3347 ids[i]); 3348 } 3349 3350 free(xstats_names_copy); 3351 return size; 3352 } 3353 3354 int 3355 rte_eth_xstats_get_names(uint16_t port_id, 3356 struct rte_eth_xstat_name *xstats_names, 3357 unsigned int size) 3358 { 3359 struct rte_eth_dev *dev; 3360 int cnt_used_entries; 3361 int cnt_expected_entries; 3362 int cnt_driver_entries; 3363 int i; 3364 3365 cnt_expected_entries = eth_dev_get_xstats_count(port_id); 3366 if (xstats_names == NULL || cnt_expected_entries < 0 || 3367 (int)size < cnt_expected_entries) 3368 return cnt_expected_entries; 3369 3370 /* port_id checked in eth_dev_get_xstats_count() */ 3371 dev = &rte_eth_devices[port_id]; 3372 3373 cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names); 3374 3375 if (dev->dev_ops->xstats_get_names != NULL) { 3376 /* If there are any driver-specific xstats, append them 3377 * to end of list. 3378 */ 3379 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)( 3380 dev, 3381 xstats_names + cnt_used_entries, 3382 size - cnt_used_entries); 3383 if (cnt_driver_entries < 0) 3384 return eth_err(port_id, cnt_driver_entries); 3385 cnt_used_entries += cnt_driver_entries; 3386 } 3387 3388 for (i = 0; i < cnt_used_entries; i++) 3389 rte_eth_trace_xstats_get_names(port_id, i, &xstats_names[i], 3390 size, cnt_used_entries); 3391 3392 return cnt_used_entries; 3393 } 3394 3395 3396 static int 3397 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats) 3398 { 3399 struct rte_eth_dev *dev; 3400 struct rte_eth_stats eth_stats; 3401 unsigned int count = 0, i, q; 3402 uint64_t val, *stats_ptr; 3403 uint16_t nb_rxqs, nb_txqs; 3404 int ret; 3405 3406 ret = rte_eth_stats_get(port_id, ð_stats); 3407 if (ret < 0) 3408 return ret; 3409 3410 dev = &rte_eth_devices[port_id]; 3411 3412 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3413 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 3414 3415 /* global stats */ 3416 for (i = 0; i < RTE_NB_STATS; i++) { 3417 stats_ptr = RTE_PTR_ADD(ð_stats, 3418 eth_dev_stats_strings[i].offset); 3419 val = *stats_ptr; 3420 xstats[count++].value = val; 3421 } 3422 3423 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 3424 return count; 3425 3426 /* per-rxq stats */ 3427 for (q = 0; q < nb_rxqs; q++) { 3428 for (i = 0; i < RTE_NB_RXQ_STATS; i++) { 3429 stats_ptr = RTE_PTR_ADD(ð_stats, 3430 eth_dev_rxq_stats_strings[i].offset + 3431 q * sizeof(uint64_t)); 3432 val = *stats_ptr; 3433 xstats[count++].value = val; 3434 } 3435 } 3436 3437 /* per-txq stats */ 3438 for (q = 0; q < nb_txqs; q++) { 3439 for (i = 0; i < RTE_NB_TXQ_STATS; i++) { 3440 stats_ptr = RTE_PTR_ADD(ð_stats, 3441 eth_dev_txq_stats_strings[i].offset + 3442 q * sizeof(uint64_t)); 3443 val = *stats_ptr; 3444 xstats[count++].value = val; 3445 } 3446 } 3447 return count; 3448 } 3449 3450 /* retrieve ethdev extended statistics */ 3451 int 3452 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, 3453 uint64_t *values, unsigned int size) 3454 { 3455 unsigned int no_basic_stat_requested = 1; 3456 unsigned int no_ext_stat_requested = 1; 3457 unsigned int num_xstats_filled; 3458 unsigned int basic_count; 3459 uint16_t expected_entries; 3460 struct rte_eth_dev *dev; 3461 unsigned int i; 3462 int ret; 3463 3464 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3465 dev = &rte_eth_devices[port_id]; 3466 3467 ret = eth_dev_get_xstats_count(port_id); 3468 if (ret < 0) 3469 return ret; 3470 expected_entries = (uint16_t)ret; 3471 struct rte_eth_xstat xstats[expected_entries]; 3472 basic_count = eth_dev_get_xstats_basic_count(dev); 3473 3474 /* Return max number of stats if no ids given */ 3475 if (!ids) { 3476 if (!values) 3477 return expected_entries; 3478 else if (values && size < expected_entries) 3479 return expected_entries; 3480 } 3481 3482 if (ids && !values) 3483 return -EINVAL; 3484 3485 if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) { 3486 unsigned int basic_count = eth_dev_get_xstats_basic_count(dev); 3487 uint64_t ids_copy[size]; 3488 3489 for (i = 0; i < size; i++) { 3490 if (ids[i] < basic_count) { 3491 no_basic_stat_requested = 0; 3492 break; 3493 } 3494 3495 /* 3496 * Convert ids to xstats ids that PMD knows. 3497 * ids known by user are basic + extended stats. 3498 */ 3499 ids_copy[i] = ids[i] - basic_count; 3500 } 3501 3502 if (no_basic_stat_requested) 3503 return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy, 3504 values, size); 3505 } 3506 3507 if (ids) { 3508 for (i = 0; i < size; i++) { 3509 if (ids[i] >= basic_count) { 3510 no_ext_stat_requested = 0; 3511 break; 3512 } 3513 } 3514 } 3515 3516 /* Fill the xstats structure */ 3517 if (ids && no_ext_stat_requested) 3518 ret = eth_basic_stats_get(port_id, xstats); 3519 else 3520 ret = rte_eth_xstats_get(port_id, xstats, expected_entries); 3521 3522 if (ret < 0) 3523 return ret; 3524 num_xstats_filled = (unsigned int)ret; 3525 3526 /* Return all stats */ 3527 if (!ids) { 3528 for (i = 0; i < num_xstats_filled; i++) 3529 values[i] = xstats[i].value; 3530 return expected_entries; 3531 } 3532 3533 /* Filter stats */ 3534 for (i = 0; i < size; i++) { 3535 if (ids[i] >= expected_entries) { 3536 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 3537 return -1; 3538 } 3539 values[i] = xstats[ids[i]].value; 3540 } 3541 3542 rte_eth_trace_xstats_get_by_id(port_id, ids, values, size); 3543 3544 return size; 3545 } 3546 3547 int 3548 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, 3549 unsigned int n) 3550 { 3551 struct rte_eth_dev *dev; 3552 unsigned int count, i; 3553 signed int xcount = 0; 3554 int ret; 3555 3556 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3557 if (xstats == NULL && n > 0) 3558 return -EINVAL; 3559 dev = &rte_eth_devices[port_id]; 3560 3561 count = eth_dev_get_xstats_basic_count(dev); 3562 3563 /* implemented by the driver */ 3564 if (dev->dev_ops->xstats_get != NULL) { 3565 /* Retrieve the xstats from the driver at the end of the 3566 * xstats struct. 3567 */ 3568 xcount = (*dev->dev_ops->xstats_get)(dev, 3569 (n > count) ? xstats + count : NULL, 3570 (n > count) ? n - count : 0); 3571 3572 if (xcount < 0) 3573 return eth_err(port_id, xcount); 3574 } 3575 3576 if (n < count + xcount || xstats == NULL) 3577 return count + xcount; 3578 3579 /* now fill the xstats structure */ 3580 ret = eth_basic_stats_get(port_id, xstats); 3581 if (ret < 0) 3582 return ret; 3583 count = ret; 3584 3585 for (i = 0; i < count; i++) 3586 xstats[i].id = i; 3587 /* add an offset to driver-specific stats */ 3588 for ( ; i < count + xcount; i++) 3589 xstats[i].id += count; 3590 3591 for (i = 0; i < n; i++) 3592 rte_eth_trace_xstats_get(port_id, xstats[i]); 3593 3594 return count + xcount; 3595 } 3596 3597 /* reset ethdev extended statistics */ 3598 int 3599 rte_eth_xstats_reset(uint16_t port_id) 3600 { 3601 struct rte_eth_dev *dev; 3602 3603 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3604 dev = &rte_eth_devices[port_id]; 3605 3606 /* implemented by the driver */ 3607 if (dev->dev_ops->xstats_reset != NULL) { 3608 int ret = eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev)); 3609 3610 rte_eth_trace_xstats_reset(port_id, ret); 3611 3612 return ret; 3613 } 3614 3615 /* fallback to default */ 3616 return rte_eth_stats_reset(port_id); 3617 } 3618 3619 static int 3620 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, 3621 uint8_t stat_idx, uint8_t is_rx) 3622 { 3623 struct rte_eth_dev *dev; 3624 3625 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3626 dev = &rte_eth_devices[port_id]; 3627 3628 if (is_rx && (queue_id >= dev->data->nb_rx_queues)) 3629 return -EINVAL; 3630 3631 if (!is_rx && (queue_id >= dev->data->nb_tx_queues)) 3632 return -EINVAL; 3633 3634 if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) 3635 return -EINVAL; 3636 3637 if (*dev->dev_ops->queue_stats_mapping_set == NULL) 3638 return -ENOTSUP; 3639 return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx); 3640 } 3641 3642 int 3643 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, 3644 uint8_t stat_idx) 3645 { 3646 int ret; 3647 3648 ret = eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3649 tx_queue_id, 3650 stat_idx, STAT_QMAP_TX)); 3651 3652 rte_ethdev_trace_set_tx_queue_stats_mapping(port_id, tx_queue_id, 3653 stat_idx, ret); 3654 3655 return ret; 3656 } 3657 3658 int 3659 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, 3660 uint8_t stat_idx) 3661 { 3662 int ret; 3663 3664 ret = eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3665 rx_queue_id, 3666 stat_idx, STAT_QMAP_RX)); 3667 3668 rte_ethdev_trace_set_rx_queue_stats_mapping(port_id, rx_queue_id, 3669 stat_idx, ret); 3670 3671 return ret; 3672 } 3673 3674 int 3675 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size) 3676 { 3677 struct rte_eth_dev *dev; 3678 int ret; 3679 3680 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3681 dev = &rte_eth_devices[port_id]; 3682 3683 if (fw_version == NULL && fw_size > 0) { 3684 RTE_ETHDEV_LOG(ERR, 3685 "Cannot get ethdev port %u FW version to NULL when string size is non zero\n", 3686 port_id); 3687 return -EINVAL; 3688 } 3689 3690 if (*dev->dev_ops->fw_version_get == NULL) 3691 return -ENOTSUP; 3692 ret = eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev, 3693 fw_version, fw_size)); 3694 3695 rte_ethdev_trace_fw_version_get(port_id, fw_version, fw_size, ret); 3696 3697 return ret; 3698 } 3699 3700 int 3701 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) 3702 { 3703 struct rte_eth_dev *dev; 3704 const struct rte_eth_desc_lim lim = { 3705 .nb_max = UINT16_MAX, 3706 .nb_min = 0, 3707 .nb_align = 1, 3708 .nb_seg_max = UINT16_MAX, 3709 .nb_mtu_seg_max = UINT16_MAX, 3710 }; 3711 int diag; 3712 3713 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3714 dev = &rte_eth_devices[port_id]; 3715 3716 if (dev_info == NULL) { 3717 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u info to NULL\n", 3718 port_id); 3719 return -EINVAL; 3720 } 3721 3722 /* 3723 * Init dev_info before port_id check since caller does not have 3724 * return status and does not know if get is successful or not. 3725 */ 3726 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3727 dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 3728 3729 dev_info->rx_desc_lim = lim; 3730 dev_info->tx_desc_lim = lim; 3731 dev_info->device = dev->device; 3732 dev_info->min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN - 3733 RTE_ETHER_CRC_LEN; 3734 dev_info->max_mtu = UINT16_MAX; 3735 3736 if (*dev->dev_ops->dev_infos_get == NULL) 3737 return -ENOTSUP; 3738 diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info); 3739 if (diag != 0) { 3740 /* Cleanup already filled in device information */ 3741 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3742 return eth_err(port_id, diag); 3743 } 3744 3745 /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */ 3746 dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues, 3747 RTE_MAX_QUEUES_PER_PORT); 3748 dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues, 3749 RTE_MAX_QUEUES_PER_PORT); 3750 3751 dev_info->driver_name = dev->device->driver->name; 3752 dev_info->nb_rx_queues = dev->data->nb_rx_queues; 3753 dev_info->nb_tx_queues = dev->data->nb_tx_queues; 3754 3755 dev_info->dev_flags = &dev->data->dev_flags; 3756 3757 rte_ethdev_trace_info_get(port_id, dev_info); 3758 3759 return 0; 3760 } 3761 3762 int 3763 rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf) 3764 { 3765 struct rte_eth_dev *dev; 3766 3767 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3768 dev = &rte_eth_devices[port_id]; 3769 3770 if (dev_conf == NULL) { 3771 RTE_ETHDEV_LOG(ERR, 3772 "Cannot get ethdev port %u configuration to NULL\n", 3773 port_id); 3774 return -EINVAL; 3775 } 3776 3777 memcpy(dev_conf, &dev->data->dev_conf, sizeof(struct rte_eth_conf)); 3778 3779 rte_ethdev_trace_conf_get(port_id, dev_conf); 3780 3781 return 0; 3782 } 3783 3784 int 3785 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, 3786 uint32_t *ptypes, int num) 3787 { 3788 int i, j; 3789 struct rte_eth_dev *dev; 3790 const uint32_t *all_ptypes; 3791 3792 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3793 dev = &rte_eth_devices[port_id]; 3794 3795 if (ptypes == NULL && num > 0) { 3796 RTE_ETHDEV_LOG(ERR, 3797 "Cannot get ethdev port %u supported packet types to NULL when array size is non zero\n", 3798 port_id); 3799 return -EINVAL; 3800 } 3801 3802 if (*dev->dev_ops->dev_supported_ptypes_get == NULL) 3803 return 0; 3804 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3805 3806 if (!all_ptypes) 3807 return 0; 3808 3809 for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i) 3810 if (all_ptypes[i] & ptype_mask) { 3811 if (j < num) { 3812 ptypes[j] = all_ptypes[i]; 3813 3814 rte_ethdev_trace_get_supported_ptypes(port_id, 3815 j, num, ptypes[j]); 3816 } 3817 j++; 3818 } 3819 3820 return j; 3821 } 3822 3823 int 3824 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, 3825 uint32_t *set_ptypes, unsigned int num) 3826 { 3827 const uint32_t valid_ptype_masks[] = { 3828 RTE_PTYPE_L2_MASK, 3829 RTE_PTYPE_L3_MASK, 3830 RTE_PTYPE_L4_MASK, 3831 RTE_PTYPE_TUNNEL_MASK, 3832 RTE_PTYPE_INNER_L2_MASK, 3833 RTE_PTYPE_INNER_L3_MASK, 3834 RTE_PTYPE_INNER_L4_MASK, 3835 }; 3836 const uint32_t *all_ptypes; 3837 struct rte_eth_dev *dev; 3838 uint32_t unused_mask; 3839 unsigned int i, j; 3840 int ret; 3841 3842 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3843 dev = &rte_eth_devices[port_id]; 3844 3845 if (num > 0 && set_ptypes == NULL) { 3846 RTE_ETHDEV_LOG(ERR, 3847 "Cannot get ethdev port %u set packet types to NULL when array size is non zero\n", 3848 port_id); 3849 return -EINVAL; 3850 } 3851 3852 if (*dev->dev_ops->dev_supported_ptypes_get == NULL || 3853 *dev->dev_ops->dev_ptypes_set == NULL) { 3854 ret = 0; 3855 goto ptype_unknown; 3856 } 3857 3858 if (ptype_mask == 0) { 3859 ret = (*dev->dev_ops->dev_ptypes_set)(dev, 3860 ptype_mask); 3861 goto ptype_unknown; 3862 } 3863 3864 unused_mask = ptype_mask; 3865 for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) { 3866 uint32_t mask = ptype_mask & valid_ptype_masks[i]; 3867 if (mask && mask != valid_ptype_masks[i]) { 3868 ret = -EINVAL; 3869 goto ptype_unknown; 3870 } 3871 unused_mask &= ~valid_ptype_masks[i]; 3872 } 3873 3874 if (unused_mask) { 3875 ret = -EINVAL; 3876 goto ptype_unknown; 3877 } 3878 3879 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3880 if (all_ptypes == NULL) { 3881 ret = 0; 3882 goto ptype_unknown; 3883 } 3884 3885 /* 3886 * Accommodate as many set_ptypes as possible. If the supplied 3887 * set_ptypes array is insufficient fill it partially. 3888 */ 3889 for (i = 0, j = 0; set_ptypes != NULL && 3890 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) { 3891 if (ptype_mask & all_ptypes[i]) { 3892 if (j < num - 1) { 3893 set_ptypes[j] = all_ptypes[i]; 3894 3895 rte_ethdev_trace_set_ptypes(port_id, j, num, 3896 set_ptypes[j]); 3897 3898 j++; 3899 continue; 3900 } 3901 break; 3902 } 3903 } 3904 3905 if (set_ptypes != NULL && j < num) 3906 set_ptypes[j] = RTE_PTYPE_UNKNOWN; 3907 3908 return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask); 3909 3910 ptype_unknown: 3911 if (num > 0) 3912 set_ptypes[0] = RTE_PTYPE_UNKNOWN; 3913 3914 return ret; 3915 } 3916 3917 int 3918 rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma, 3919 unsigned int num) 3920 { 3921 int32_t ret; 3922 struct rte_eth_dev *dev; 3923 struct rte_eth_dev_info dev_info; 3924 3925 if (ma == NULL) { 3926 RTE_ETHDEV_LOG(ERR, "%s: invalid parameters\n", __func__); 3927 return -EINVAL; 3928 } 3929 3930 /* will check for us that port_id is a valid one */ 3931 ret = rte_eth_dev_info_get(port_id, &dev_info); 3932 if (ret != 0) 3933 return ret; 3934 3935 dev = &rte_eth_devices[port_id]; 3936 num = RTE_MIN(dev_info.max_mac_addrs, num); 3937 memcpy(ma, dev->data->mac_addrs, num * sizeof(ma[0])); 3938 3939 rte_eth_trace_macaddrs_get(port_id, num); 3940 3941 return num; 3942 } 3943 3944 int 3945 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr) 3946 { 3947 struct rte_eth_dev *dev; 3948 3949 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3950 dev = &rte_eth_devices[port_id]; 3951 3952 if (mac_addr == NULL) { 3953 RTE_ETHDEV_LOG(ERR, 3954 "Cannot get ethdev port %u MAC address to NULL\n", 3955 port_id); 3956 return -EINVAL; 3957 } 3958 3959 rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr); 3960 3961 rte_eth_trace_macaddr_get(port_id, mac_addr); 3962 3963 return 0; 3964 } 3965 3966 int 3967 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu) 3968 { 3969 struct rte_eth_dev *dev; 3970 3971 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3972 dev = &rte_eth_devices[port_id]; 3973 3974 if (mtu == NULL) { 3975 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u MTU to NULL\n", 3976 port_id); 3977 return -EINVAL; 3978 } 3979 3980 *mtu = dev->data->mtu; 3981 3982 rte_ethdev_trace_get_mtu(port_id, *mtu); 3983 3984 return 0; 3985 } 3986 3987 int 3988 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu) 3989 { 3990 int ret; 3991 struct rte_eth_dev_info dev_info; 3992 struct rte_eth_dev *dev; 3993 3994 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3995 dev = &rte_eth_devices[port_id]; 3996 if (*dev->dev_ops->mtu_set == NULL) 3997 return -ENOTSUP; 3998 3999 /* 4000 * Check if the device supports dev_infos_get, if it does not 4001 * skip min_mtu/max_mtu validation here as this requires values 4002 * that are populated within the call to rte_eth_dev_info_get() 4003 * which relies on dev->dev_ops->dev_infos_get. 4004 */ 4005 if (*dev->dev_ops->dev_infos_get != NULL) { 4006 ret = rte_eth_dev_info_get(port_id, &dev_info); 4007 if (ret != 0) 4008 return ret; 4009 4010 ret = eth_dev_validate_mtu(port_id, &dev_info, mtu); 4011 if (ret != 0) 4012 return ret; 4013 } 4014 4015 if (dev->data->dev_configured == 0) { 4016 RTE_ETHDEV_LOG(ERR, 4017 "Port %u must be configured before MTU set\n", 4018 port_id); 4019 return -EINVAL; 4020 } 4021 4022 ret = (*dev->dev_ops->mtu_set)(dev, mtu); 4023 if (ret == 0) 4024 dev->data->mtu = mtu; 4025 4026 ret = eth_err(port_id, ret); 4027 4028 rte_ethdev_trace_set_mtu(port_id, mtu, ret); 4029 4030 return ret; 4031 } 4032 4033 int 4034 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on) 4035 { 4036 struct rte_eth_dev *dev; 4037 int ret; 4038 4039 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4040 dev = &rte_eth_devices[port_id]; 4041 4042 if (!(dev->data->dev_conf.rxmode.offloads & 4043 RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) { 4044 RTE_ETHDEV_LOG(ERR, "Port %u: VLAN-filtering disabled\n", 4045 port_id); 4046 return -ENOSYS; 4047 } 4048 4049 if (vlan_id > 4095) { 4050 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n", 4051 port_id, vlan_id); 4052 return -EINVAL; 4053 } 4054 if (*dev->dev_ops->vlan_filter_set == NULL) 4055 return -ENOTSUP; 4056 4057 ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on); 4058 if (ret == 0) { 4059 struct rte_vlan_filter_conf *vfc; 4060 int vidx; 4061 int vbit; 4062 4063 vfc = &dev->data->vlan_filter_conf; 4064 vidx = vlan_id / 64; 4065 vbit = vlan_id % 64; 4066 4067 if (on) 4068 vfc->ids[vidx] |= RTE_BIT64(vbit); 4069 else 4070 vfc->ids[vidx] &= ~RTE_BIT64(vbit); 4071 } 4072 4073 ret = eth_err(port_id, ret); 4074 4075 rte_ethdev_trace_vlan_filter(port_id, vlan_id, on, ret); 4076 4077 return ret; 4078 } 4079 4080 int 4081 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, 4082 int on) 4083 { 4084 struct rte_eth_dev *dev; 4085 4086 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4087 dev = &rte_eth_devices[port_id]; 4088 4089 if (rx_queue_id >= dev->data->nb_rx_queues) { 4090 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id); 4091 return -EINVAL; 4092 } 4093 4094 if (*dev->dev_ops->vlan_strip_queue_set == NULL) 4095 return -ENOTSUP; 4096 (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on); 4097 4098 rte_ethdev_trace_set_vlan_strip_on_queue(port_id, rx_queue_id, on); 4099 4100 return 0; 4101 } 4102 4103 int 4104 rte_eth_dev_set_vlan_ether_type(uint16_t port_id, 4105 enum rte_vlan_type vlan_type, 4106 uint16_t tpid) 4107 { 4108 struct rte_eth_dev *dev; 4109 int ret; 4110 4111 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4112 dev = &rte_eth_devices[port_id]; 4113 4114 if (*dev->dev_ops->vlan_tpid_set == NULL) 4115 return -ENOTSUP; 4116 ret = eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, 4117 tpid)); 4118 4119 rte_ethdev_trace_set_vlan_ether_type(port_id, vlan_type, tpid, ret); 4120 4121 return ret; 4122 } 4123 4124 int 4125 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask) 4126 { 4127 struct rte_eth_dev_info dev_info; 4128 struct rte_eth_dev *dev; 4129 int ret = 0; 4130 int mask = 0; 4131 int cur, org = 0; 4132 uint64_t orig_offloads; 4133 uint64_t dev_offloads; 4134 uint64_t new_offloads; 4135 4136 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4137 dev = &rte_eth_devices[port_id]; 4138 4139 /* save original values in case of failure */ 4140 orig_offloads = dev->data->dev_conf.rxmode.offloads; 4141 dev_offloads = orig_offloads; 4142 4143 /* check which option changed by application */ 4144 cur = !!(offload_mask & RTE_ETH_VLAN_STRIP_OFFLOAD); 4145 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP); 4146 if (cur != org) { 4147 if (cur) 4148 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 4149 else 4150 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 4151 mask |= RTE_ETH_VLAN_STRIP_MASK; 4152 } 4153 4154 cur = !!(offload_mask & RTE_ETH_VLAN_FILTER_OFFLOAD); 4155 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER); 4156 if (cur != org) { 4157 if (cur) 4158 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 4159 else 4160 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 4161 mask |= RTE_ETH_VLAN_FILTER_MASK; 4162 } 4163 4164 cur = !!(offload_mask & RTE_ETH_VLAN_EXTEND_OFFLOAD); 4165 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND); 4166 if (cur != org) { 4167 if (cur) 4168 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 4169 else 4170 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 4171 mask |= RTE_ETH_VLAN_EXTEND_MASK; 4172 } 4173 4174 cur = !!(offload_mask & RTE_ETH_QINQ_STRIP_OFFLOAD); 4175 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP); 4176 if (cur != org) { 4177 if (cur) 4178 dev_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 4179 else 4180 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 4181 mask |= RTE_ETH_QINQ_STRIP_MASK; 4182 } 4183 4184 /*no change*/ 4185 if (mask == 0) 4186 return ret; 4187 4188 ret = rte_eth_dev_info_get(port_id, &dev_info); 4189 if (ret != 0) 4190 return ret; 4191 4192 /* Rx VLAN offloading must be within its device capabilities */ 4193 if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) { 4194 new_offloads = dev_offloads & ~orig_offloads; 4195 RTE_ETHDEV_LOG(ERR, 4196 "Ethdev port_id=%u requested new added VLAN offloads " 4197 "0x%" PRIx64 " must be within Rx offloads capabilities " 4198 "0x%" PRIx64 " in %s()\n", 4199 port_id, new_offloads, dev_info.rx_offload_capa, 4200 __func__); 4201 return -EINVAL; 4202 } 4203 4204 if (*dev->dev_ops->vlan_offload_set == NULL) 4205 return -ENOTSUP; 4206 dev->data->dev_conf.rxmode.offloads = dev_offloads; 4207 ret = (*dev->dev_ops->vlan_offload_set)(dev, mask); 4208 if (ret) { 4209 /* hit an error restore original values */ 4210 dev->data->dev_conf.rxmode.offloads = orig_offloads; 4211 } 4212 4213 ret = eth_err(port_id, ret); 4214 4215 rte_ethdev_trace_set_vlan_offload(port_id, offload_mask, ret); 4216 4217 return ret; 4218 } 4219 4220 int 4221 rte_eth_dev_get_vlan_offload(uint16_t port_id) 4222 { 4223 struct rte_eth_dev *dev; 4224 uint64_t *dev_offloads; 4225 int ret = 0; 4226 4227 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4228 dev = &rte_eth_devices[port_id]; 4229 dev_offloads = &dev->data->dev_conf.rxmode.offloads; 4230 4231 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 4232 ret |= RTE_ETH_VLAN_STRIP_OFFLOAD; 4233 4234 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) 4235 ret |= RTE_ETH_VLAN_FILTER_OFFLOAD; 4236 4237 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) 4238 ret |= RTE_ETH_VLAN_EXTEND_OFFLOAD; 4239 4240 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) 4241 ret |= RTE_ETH_QINQ_STRIP_OFFLOAD; 4242 4243 rte_ethdev_trace_get_vlan_offload(port_id, ret); 4244 4245 return ret; 4246 } 4247 4248 int 4249 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on) 4250 { 4251 struct rte_eth_dev *dev; 4252 int ret; 4253 4254 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4255 dev = &rte_eth_devices[port_id]; 4256 4257 if (*dev->dev_ops->vlan_pvid_set == NULL) 4258 return -ENOTSUP; 4259 ret = eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on)); 4260 4261 rte_ethdev_trace_set_vlan_pvid(port_id, pvid, on, ret); 4262 4263 return ret; 4264 } 4265 4266 int 4267 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 4268 { 4269 struct rte_eth_dev *dev; 4270 int ret; 4271 4272 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4273 dev = &rte_eth_devices[port_id]; 4274 4275 if (fc_conf == NULL) { 4276 RTE_ETHDEV_LOG(ERR, 4277 "Cannot get ethdev port %u flow control config to NULL\n", 4278 port_id); 4279 return -EINVAL; 4280 } 4281 4282 if (*dev->dev_ops->flow_ctrl_get == NULL) 4283 return -ENOTSUP; 4284 memset(fc_conf, 0, sizeof(*fc_conf)); 4285 ret = eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf)); 4286 4287 rte_ethdev_trace_flow_ctrl_get(port_id, fc_conf, ret); 4288 4289 return ret; 4290 } 4291 4292 int 4293 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 4294 { 4295 struct rte_eth_dev *dev; 4296 int ret; 4297 4298 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4299 dev = &rte_eth_devices[port_id]; 4300 4301 if (fc_conf == NULL) { 4302 RTE_ETHDEV_LOG(ERR, 4303 "Cannot set ethdev port %u flow control from NULL config\n", 4304 port_id); 4305 return -EINVAL; 4306 } 4307 4308 if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) { 4309 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n"); 4310 return -EINVAL; 4311 } 4312 4313 if (*dev->dev_ops->flow_ctrl_set == NULL) 4314 return -ENOTSUP; 4315 ret = eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf)); 4316 4317 rte_ethdev_trace_flow_ctrl_set(port_id, fc_conf, ret); 4318 4319 return ret; 4320 } 4321 4322 int 4323 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, 4324 struct rte_eth_pfc_conf *pfc_conf) 4325 { 4326 struct rte_eth_dev *dev; 4327 int ret; 4328 4329 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4330 dev = &rte_eth_devices[port_id]; 4331 4332 if (pfc_conf == NULL) { 4333 RTE_ETHDEV_LOG(ERR, 4334 "Cannot set ethdev port %u priority flow control from NULL config\n", 4335 port_id); 4336 return -EINVAL; 4337 } 4338 4339 if (pfc_conf->priority > (RTE_ETH_DCB_NUM_USER_PRIORITIES - 1)) { 4340 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n"); 4341 return -EINVAL; 4342 } 4343 4344 /* High water, low water validation are device specific */ 4345 if (*dev->dev_ops->priority_flow_ctrl_set == NULL) 4346 return -ENOTSUP; 4347 ret = eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set) 4348 (dev, pfc_conf)); 4349 4350 rte_ethdev_trace_priority_flow_ctrl_set(port_id, pfc_conf, ret); 4351 4352 return ret; 4353 } 4354 4355 static int 4356 validate_rx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max, 4357 struct rte_eth_pfc_queue_conf *pfc_queue_conf) 4358 { 4359 if ((pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) || 4360 (pfc_queue_conf->mode == RTE_ETH_FC_FULL)) { 4361 if (pfc_queue_conf->rx_pause.tx_qid >= dev_info->nb_tx_queues) { 4362 RTE_ETHDEV_LOG(ERR, 4363 "PFC Tx queue not in range for Rx pause requested:%d configured:%d\n", 4364 pfc_queue_conf->rx_pause.tx_qid, 4365 dev_info->nb_tx_queues); 4366 return -EINVAL; 4367 } 4368 4369 if (pfc_queue_conf->rx_pause.tc >= tc_max) { 4370 RTE_ETHDEV_LOG(ERR, 4371 "PFC TC not in range for Rx pause requested:%d max:%d\n", 4372 pfc_queue_conf->rx_pause.tc, tc_max); 4373 return -EINVAL; 4374 } 4375 } 4376 4377 return 0; 4378 } 4379 4380 static int 4381 validate_tx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max, 4382 struct rte_eth_pfc_queue_conf *pfc_queue_conf) 4383 { 4384 if ((pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) || 4385 (pfc_queue_conf->mode == RTE_ETH_FC_FULL)) { 4386 if (pfc_queue_conf->tx_pause.rx_qid >= dev_info->nb_rx_queues) { 4387 RTE_ETHDEV_LOG(ERR, 4388 "PFC Rx queue not in range for Tx pause requested:%d configured:%d\n", 4389 pfc_queue_conf->tx_pause.rx_qid, 4390 dev_info->nb_rx_queues); 4391 return -EINVAL; 4392 } 4393 4394 if (pfc_queue_conf->tx_pause.tc >= tc_max) { 4395 RTE_ETHDEV_LOG(ERR, 4396 "PFC TC not in range for Tx pause requested:%d max:%d\n", 4397 pfc_queue_conf->tx_pause.tc, tc_max); 4398 return -EINVAL; 4399 } 4400 } 4401 4402 return 0; 4403 } 4404 4405 int 4406 rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id, 4407 struct rte_eth_pfc_queue_info *pfc_queue_info) 4408 { 4409 struct rte_eth_dev *dev; 4410 int ret; 4411 4412 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4413 dev = &rte_eth_devices[port_id]; 4414 4415 if (pfc_queue_info == NULL) { 4416 RTE_ETHDEV_LOG(ERR, "PFC info param is NULL for port (%u)\n", 4417 port_id); 4418 return -EINVAL; 4419 } 4420 4421 if (*dev->dev_ops->priority_flow_ctrl_queue_info_get == NULL) 4422 return -ENOTSUP; 4423 ret = eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_queue_info_get) 4424 (dev, pfc_queue_info)); 4425 4426 rte_ethdev_trace_priority_flow_ctrl_queue_info_get(port_id, 4427 pfc_queue_info, ret); 4428 4429 return ret; 4430 } 4431 4432 int 4433 rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id, 4434 struct rte_eth_pfc_queue_conf *pfc_queue_conf) 4435 { 4436 struct rte_eth_pfc_queue_info pfc_info; 4437 struct rte_eth_dev_info dev_info; 4438 struct rte_eth_dev *dev; 4439 int ret; 4440 4441 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4442 dev = &rte_eth_devices[port_id]; 4443 4444 if (pfc_queue_conf == NULL) { 4445 RTE_ETHDEV_LOG(ERR, "PFC parameters are NULL for port (%u)\n", 4446 port_id); 4447 return -EINVAL; 4448 } 4449 4450 ret = rte_eth_dev_info_get(port_id, &dev_info); 4451 if (ret != 0) 4452 return ret; 4453 4454 ret = rte_eth_dev_priority_flow_ctrl_queue_info_get(port_id, &pfc_info); 4455 if (ret != 0) 4456 return ret; 4457 4458 if (pfc_info.tc_max == 0) { 4459 RTE_ETHDEV_LOG(ERR, "Ethdev port %u does not support PFC TC values\n", 4460 port_id); 4461 return -ENOTSUP; 4462 } 4463 4464 /* Check requested mode supported or not */ 4465 if (pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE && 4466 pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) { 4467 RTE_ETHDEV_LOG(ERR, "PFC Tx pause unsupported for port (%d)\n", 4468 port_id); 4469 return -EINVAL; 4470 } 4471 4472 if (pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE && 4473 pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) { 4474 RTE_ETHDEV_LOG(ERR, "PFC Rx pause unsupported for port (%d)\n", 4475 port_id); 4476 return -EINVAL; 4477 } 4478 4479 /* Validate Rx pause parameters */ 4480 if (pfc_info.mode_capa == RTE_ETH_FC_FULL || 4481 pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE) { 4482 ret = validate_rx_pause_config(&dev_info, pfc_info.tc_max, 4483 pfc_queue_conf); 4484 if (ret != 0) 4485 return ret; 4486 } 4487 4488 /* Validate Tx pause parameters */ 4489 if (pfc_info.mode_capa == RTE_ETH_FC_FULL || 4490 pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE) { 4491 ret = validate_tx_pause_config(&dev_info, pfc_info.tc_max, 4492 pfc_queue_conf); 4493 if (ret != 0) 4494 return ret; 4495 } 4496 4497 if (*dev->dev_ops->priority_flow_ctrl_queue_config == NULL) 4498 return -ENOTSUP; 4499 ret = eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_queue_config) 4500 (dev, pfc_queue_conf)); 4501 4502 rte_ethdev_trace_priority_flow_ctrl_queue_configure(port_id, 4503 pfc_queue_conf, ret); 4504 4505 return ret; 4506 } 4507 4508 static int 4509 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf, 4510 uint16_t reta_size) 4511 { 4512 uint16_t i, num; 4513 4514 num = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) / RTE_ETH_RETA_GROUP_SIZE; 4515 for (i = 0; i < num; i++) { 4516 if (reta_conf[i].mask) 4517 return 0; 4518 } 4519 4520 return -EINVAL; 4521 } 4522 4523 static int 4524 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf, 4525 uint16_t reta_size, 4526 uint16_t max_rxq) 4527 { 4528 uint16_t i, idx, shift; 4529 4530 if (max_rxq == 0) { 4531 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n"); 4532 return -EINVAL; 4533 } 4534 4535 for (i = 0; i < reta_size; i++) { 4536 idx = i / RTE_ETH_RETA_GROUP_SIZE; 4537 shift = i % RTE_ETH_RETA_GROUP_SIZE; 4538 if ((reta_conf[idx].mask & RTE_BIT64(shift)) && 4539 (reta_conf[idx].reta[shift] >= max_rxq)) { 4540 RTE_ETHDEV_LOG(ERR, 4541 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n", 4542 idx, shift, 4543 reta_conf[idx].reta[shift], max_rxq); 4544 return -EINVAL; 4545 } 4546 } 4547 4548 return 0; 4549 } 4550 4551 int 4552 rte_eth_dev_rss_reta_update(uint16_t port_id, 4553 struct rte_eth_rss_reta_entry64 *reta_conf, 4554 uint16_t reta_size) 4555 { 4556 enum rte_eth_rx_mq_mode mq_mode; 4557 struct rte_eth_dev *dev; 4558 int ret; 4559 4560 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4561 dev = &rte_eth_devices[port_id]; 4562 4563 if (reta_conf == NULL) { 4564 RTE_ETHDEV_LOG(ERR, 4565 "Cannot update ethdev port %u RSS RETA to NULL\n", 4566 port_id); 4567 return -EINVAL; 4568 } 4569 4570 if (reta_size == 0) { 4571 RTE_ETHDEV_LOG(ERR, 4572 "Cannot update ethdev port %u RSS RETA with zero size\n", 4573 port_id); 4574 return -EINVAL; 4575 } 4576 4577 /* Check mask bits */ 4578 ret = eth_check_reta_mask(reta_conf, reta_size); 4579 if (ret < 0) 4580 return ret; 4581 4582 /* Check entry value */ 4583 ret = eth_check_reta_entry(reta_conf, reta_size, 4584 dev->data->nb_rx_queues); 4585 if (ret < 0) 4586 return ret; 4587 4588 mq_mode = dev->data->dev_conf.rxmode.mq_mode; 4589 if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) { 4590 RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n"); 4591 return -ENOTSUP; 4592 } 4593 4594 if (*dev->dev_ops->reta_update == NULL) 4595 return -ENOTSUP; 4596 ret = eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf, 4597 reta_size)); 4598 4599 rte_ethdev_trace_rss_reta_update(port_id, reta_conf, reta_size, ret); 4600 4601 return ret; 4602 } 4603 4604 int 4605 rte_eth_dev_rss_reta_query(uint16_t port_id, 4606 struct rte_eth_rss_reta_entry64 *reta_conf, 4607 uint16_t reta_size) 4608 { 4609 struct rte_eth_dev *dev; 4610 int ret; 4611 4612 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4613 dev = &rte_eth_devices[port_id]; 4614 4615 if (reta_conf == NULL) { 4616 RTE_ETHDEV_LOG(ERR, 4617 "Cannot query ethdev port %u RSS RETA from NULL config\n", 4618 port_id); 4619 return -EINVAL; 4620 } 4621 4622 /* Check mask bits */ 4623 ret = eth_check_reta_mask(reta_conf, reta_size); 4624 if (ret < 0) 4625 return ret; 4626 4627 if (*dev->dev_ops->reta_query == NULL) 4628 return -ENOTSUP; 4629 ret = eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf, 4630 reta_size)); 4631 4632 rte_ethdev_trace_rss_reta_query(port_id, reta_conf, reta_size, ret); 4633 4634 return ret; 4635 } 4636 4637 int 4638 rte_eth_dev_rss_hash_update(uint16_t port_id, 4639 struct rte_eth_rss_conf *rss_conf) 4640 { 4641 struct rte_eth_dev *dev; 4642 struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, }; 4643 enum rte_eth_rx_mq_mode mq_mode; 4644 int ret; 4645 4646 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4647 dev = &rte_eth_devices[port_id]; 4648 4649 if (rss_conf == NULL) { 4650 RTE_ETHDEV_LOG(ERR, 4651 "Cannot update ethdev port %u RSS hash from NULL config\n", 4652 port_id); 4653 return -EINVAL; 4654 } 4655 4656 ret = rte_eth_dev_info_get(port_id, &dev_info); 4657 if (ret != 0) 4658 return ret; 4659 4660 rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf); 4661 if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) != 4662 dev_info.flow_type_rss_offloads) { 4663 RTE_ETHDEV_LOG(ERR, 4664 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 4665 port_id, rss_conf->rss_hf, 4666 dev_info.flow_type_rss_offloads); 4667 return -EINVAL; 4668 } 4669 4670 mq_mode = dev->data->dev_conf.rxmode.mq_mode; 4671 if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) { 4672 RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n"); 4673 return -ENOTSUP; 4674 } 4675 4676 if (*dev->dev_ops->rss_hash_update == NULL) 4677 return -ENOTSUP; 4678 ret = eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev, 4679 rss_conf)); 4680 4681 rte_ethdev_trace_rss_hash_update(port_id, rss_conf, ret); 4682 4683 return ret; 4684 } 4685 4686 int 4687 rte_eth_dev_rss_hash_conf_get(uint16_t port_id, 4688 struct rte_eth_rss_conf *rss_conf) 4689 { 4690 struct rte_eth_dev *dev; 4691 int ret; 4692 4693 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4694 dev = &rte_eth_devices[port_id]; 4695 4696 if (rss_conf == NULL) { 4697 RTE_ETHDEV_LOG(ERR, 4698 "Cannot get ethdev port %u RSS hash config to NULL\n", 4699 port_id); 4700 return -EINVAL; 4701 } 4702 4703 if (*dev->dev_ops->rss_hash_conf_get == NULL) 4704 return -ENOTSUP; 4705 ret = eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev, 4706 rss_conf)); 4707 4708 rte_ethdev_trace_rss_hash_conf_get(port_id, rss_conf, ret); 4709 4710 return ret; 4711 } 4712 4713 int 4714 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, 4715 struct rte_eth_udp_tunnel *udp_tunnel) 4716 { 4717 struct rte_eth_dev *dev; 4718 int ret; 4719 4720 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4721 dev = &rte_eth_devices[port_id]; 4722 4723 if (udp_tunnel == NULL) { 4724 RTE_ETHDEV_LOG(ERR, 4725 "Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4726 port_id); 4727 return -EINVAL; 4728 } 4729 4730 if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) { 4731 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4732 return -EINVAL; 4733 } 4734 4735 if (*dev->dev_ops->udp_tunnel_port_add == NULL) 4736 return -ENOTSUP; 4737 ret = eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev, 4738 udp_tunnel)); 4739 4740 rte_ethdev_trace_udp_tunnel_port_add(port_id, udp_tunnel, ret); 4741 4742 return ret; 4743 } 4744 4745 int 4746 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, 4747 struct rte_eth_udp_tunnel *udp_tunnel) 4748 { 4749 struct rte_eth_dev *dev; 4750 int ret; 4751 4752 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4753 dev = &rte_eth_devices[port_id]; 4754 4755 if (udp_tunnel == NULL) { 4756 RTE_ETHDEV_LOG(ERR, 4757 "Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4758 port_id); 4759 return -EINVAL; 4760 } 4761 4762 if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) { 4763 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4764 return -EINVAL; 4765 } 4766 4767 if (*dev->dev_ops->udp_tunnel_port_del == NULL) 4768 return -ENOTSUP; 4769 ret = eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev, 4770 udp_tunnel)); 4771 4772 rte_ethdev_trace_udp_tunnel_port_delete(port_id, udp_tunnel, ret); 4773 4774 return ret; 4775 } 4776 4777 int 4778 rte_eth_led_on(uint16_t port_id) 4779 { 4780 struct rte_eth_dev *dev; 4781 int ret; 4782 4783 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4784 dev = &rte_eth_devices[port_id]; 4785 4786 if (*dev->dev_ops->dev_led_on == NULL) 4787 return -ENOTSUP; 4788 ret = eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev)); 4789 4790 rte_eth_trace_led_on(port_id, ret); 4791 4792 return ret; 4793 } 4794 4795 int 4796 rte_eth_led_off(uint16_t port_id) 4797 { 4798 struct rte_eth_dev *dev; 4799 int ret; 4800 4801 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4802 dev = &rte_eth_devices[port_id]; 4803 4804 if (*dev->dev_ops->dev_led_off == NULL) 4805 return -ENOTSUP; 4806 ret = eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev)); 4807 4808 rte_eth_trace_led_off(port_id, ret); 4809 4810 return ret; 4811 } 4812 4813 int 4814 rte_eth_fec_get_capability(uint16_t port_id, 4815 struct rte_eth_fec_capa *speed_fec_capa, 4816 unsigned int num) 4817 { 4818 struct rte_eth_dev *dev; 4819 int ret; 4820 4821 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4822 dev = &rte_eth_devices[port_id]; 4823 4824 if (speed_fec_capa == NULL && num > 0) { 4825 RTE_ETHDEV_LOG(ERR, 4826 "Cannot get ethdev port %u FEC capability to NULL when array size is non zero\n", 4827 port_id); 4828 return -EINVAL; 4829 } 4830 4831 if (*dev->dev_ops->fec_get_capability == NULL) 4832 return -ENOTSUP; 4833 ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num); 4834 4835 rte_eth_trace_fec_get_capability(port_id, speed_fec_capa, num, ret); 4836 4837 return ret; 4838 } 4839 4840 int 4841 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa) 4842 { 4843 struct rte_eth_dev *dev; 4844 int ret; 4845 4846 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4847 dev = &rte_eth_devices[port_id]; 4848 4849 if (fec_capa == NULL) { 4850 RTE_ETHDEV_LOG(ERR, 4851 "Cannot get ethdev port %u current FEC mode to NULL\n", 4852 port_id); 4853 return -EINVAL; 4854 } 4855 4856 if (*dev->dev_ops->fec_get == NULL) 4857 return -ENOTSUP; 4858 ret = eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa)); 4859 4860 rte_eth_trace_fec_get(port_id, fec_capa, ret); 4861 4862 return ret; 4863 } 4864 4865 int 4866 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa) 4867 { 4868 struct rte_eth_dev *dev; 4869 int ret; 4870 4871 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4872 dev = &rte_eth_devices[port_id]; 4873 4874 if (fec_capa == 0) { 4875 RTE_ETHDEV_LOG(ERR, "At least one FEC mode should be specified\n"); 4876 return -EINVAL; 4877 } 4878 4879 if (*dev->dev_ops->fec_set == NULL) 4880 return -ENOTSUP; 4881 ret = eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa)); 4882 4883 rte_eth_trace_fec_set(port_id, fec_capa, ret); 4884 4885 return ret; 4886 } 4887 4888 /* 4889 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 4890 * an empty spot. 4891 */ 4892 static int 4893 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr) 4894 { 4895 struct rte_eth_dev_info dev_info; 4896 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4897 unsigned i; 4898 int ret; 4899 4900 ret = rte_eth_dev_info_get(port_id, &dev_info); 4901 if (ret != 0) 4902 return -1; 4903 4904 for (i = 0; i < dev_info.max_mac_addrs; i++) 4905 if (memcmp(addr, &dev->data->mac_addrs[i], 4906 RTE_ETHER_ADDR_LEN) == 0) 4907 return i; 4908 4909 return -1; 4910 } 4911 4912 static const struct rte_ether_addr null_mac_addr; 4913 4914 int 4915 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr, 4916 uint32_t pool) 4917 { 4918 struct rte_eth_dev *dev; 4919 int index; 4920 uint64_t pool_mask; 4921 int ret; 4922 4923 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4924 dev = &rte_eth_devices[port_id]; 4925 4926 if (addr == NULL) { 4927 RTE_ETHDEV_LOG(ERR, 4928 "Cannot add ethdev port %u MAC address from NULL address\n", 4929 port_id); 4930 return -EINVAL; 4931 } 4932 4933 if (*dev->dev_ops->mac_addr_add == NULL) 4934 return -ENOTSUP; 4935 4936 if (rte_is_zero_ether_addr(addr)) { 4937 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 4938 port_id); 4939 return -EINVAL; 4940 } 4941 if (pool >= RTE_ETH_64_POOLS) { 4942 RTE_ETHDEV_LOG(ERR, "Pool ID must be 0-%d\n", RTE_ETH_64_POOLS - 1); 4943 return -EINVAL; 4944 } 4945 4946 index = eth_dev_get_mac_addr_index(port_id, addr); 4947 if (index < 0) { 4948 index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr); 4949 if (index < 0) { 4950 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 4951 port_id); 4952 return -ENOSPC; 4953 } 4954 } else { 4955 pool_mask = dev->data->mac_pool_sel[index]; 4956 4957 /* Check if both MAC address and pool is already there, and do nothing */ 4958 if (pool_mask & RTE_BIT64(pool)) 4959 return 0; 4960 } 4961 4962 /* Update NIC */ 4963 ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool); 4964 4965 if (ret == 0) { 4966 /* Update address in NIC data structure */ 4967 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]); 4968 4969 /* Update pool bitmap in NIC data structure */ 4970 dev->data->mac_pool_sel[index] |= RTE_BIT64(pool); 4971 } 4972 4973 ret = eth_err(port_id, ret); 4974 4975 rte_ethdev_trace_mac_addr_add(port_id, addr, pool, ret); 4976 4977 return ret; 4978 } 4979 4980 int 4981 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr) 4982 { 4983 struct rte_eth_dev *dev; 4984 int index; 4985 4986 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4987 dev = &rte_eth_devices[port_id]; 4988 4989 if (addr == NULL) { 4990 RTE_ETHDEV_LOG(ERR, 4991 "Cannot remove ethdev port %u MAC address from NULL address\n", 4992 port_id); 4993 return -EINVAL; 4994 } 4995 4996 if (*dev->dev_ops->mac_addr_remove == NULL) 4997 return -ENOTSUP; 4998 4999 index = eth_dev_get_mac_addr_index(port_id, addr); 5000 if (index == 0) { 5001 RTE_ETHDEV_LOG(ERR, 5002 "Port %u: Cannot remove default MAC address\n", 5003 port_id); 5004 return -EADDRINUSE; 5005 } else if (index < 0) 5006 return 0; /* Do nothing if address wasn't found */ 5007 5008 /* Update NIC */ 5009 (*dev->dev_ops->mac_addr_remove)(dev, index); 5010 5011 /* Update address in NIC data structure */ 5012 rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]); 5013 5014 /* reset pool bitmap */ 5015 dev->data->mac_pool_sel[index] = 0; 5016 5017 rte_ethdev_trace_mac_addr_remove(port_id, addr); 5018 5019 return 0; 5020 } 5021 5022 int 5023 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr) 5024 { 5025 struct rte_eth_dev *dev; 5026 int index; 5027 int ret; 5028 5029 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5030 dev = &rte_eth_devices[port_id]; 5031 5032 if (addr == NULL) { 5033 RTE_ETHDEV_LOG(ERR, 5034 "Cannot set ethdev port %u default MAC address from NULL address\n", 5035 port_id); 5036 return -EINVAL; 5037 } 5038 5039 if (!rte_is_valid_assigned_ether_addr(addr)) 5040 return -EINVAL; 5041 5042 if (*dev->dev_ops->mac_addr_set == NULL) 5043 return -ENOTSUP; 5044 5045 /* Keep address unique in dev->data->mac_addrs[]. */ 5046 index = eth_dev_get_mac_addr_index(port_id, addr); 5047 if (index > 0) { 5048 RTE_ETHDEV_LOG(ERR, 5049 "New default address for port %u was already in the address list. Please remove it first.\n", 5050 port_id); 5051 return -EEXIST; 5052 } 5053 5054 ret = (*dev->dev_ops->mac_addr_set)(dev, addr); 5055 if (ret < 0) 5056 return ret; 5057 5058 /* Update default address in NIC data structure */ 5059 rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]); 5060 5061 rte_ethdev_trace_default_mac_addr_set(port_id, addr); 5062 5063 return 0; 5064 } 5065 5066 5067 /* 5068 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 5069 * an empty spot. 5070 */ 5071 static int 5072 eth_dev_get_hash_mac_addr_index(uint16_t port_id, 5073 const struct rte_ether_addr *addr) 5074 { 5075 struct rte_eth_dev_info dev_info; 5076 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5077 unsigned i; 5078 int ret; 5079 5080 ret = rte_eth_dev_info_get(port_id, &dev_info); 5081 if (ret != 0) 5082 return -1; 5083 5084 if (!dev->data->hash_mac_addrs) 5085 return -1; 5086 5087 for (i = 0; i < dev_info.max_hash_mac_addrs; i++) 5088 if (memcmp(addr, &dev->data->hash_mac_addrs[i], 5089 RTE_ETHER_ADDR_LEN) == 0) 5090 return i; 5091 5092 return -1; 5093 } 5094 5095 int 5096 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, 5097 uint8_t on) 5098 { 5099 int index; 5100 int ret; 5101 struct rte_eth_dev *dev; 5102 5103 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5104 dev = &rte_eth_devices[port_id]; 5105 5106 if (addr == NULL) { 5107 RTE_ETHDEV_LOG(ERR, 5108 "Cannot set ethdev port %u unicast hash table from NULL address\n", 5109 port_id); 5110 return -EINVAL; 5111 } 5112 5113 if (rte_is_zero_ether_addr(addr)) { 5114 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 5115 port_id); 5116 return -EINVAL; 5117 } 5118 5119 index = eth_dev_get_hash_mac_addr_index(port_id, addr); 5120 /* Check if it's already there, and do nothing */ 5121 if ((index >= 0) && on) 5122 return 0; 5123 5124 if (index < 0) { 5125 if (!on) { 5126 RTE_ETHDEV_LOG(ERR, 5127 "Port %u: the MAC address was not set in UTA\n", 5128 port_id); 5129 return -EINVAL; 5130 } 5131 5132 index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr); 5133 if (index < 0) { 5134 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 5135 port_id); 5136 return -ENOSPC; 5137 } 5138 } 5139 5140 if (*dev->dev_ops->uc_hash_table_set == NULL) 5141 return -ENOTSUP; 5142 ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on); 5143 if (ret == 0) { 5144 /* Update address in NIC data structure */ 5145 if (on) 5146 rte_ether_addr_copy(addr, 5147 &dev->data->hash_mac_addrs[index]); 5148 else 5149 rte_ether_addr_copy(&null_mac_addr, 5150 &dev->data->hash_mac_addrs[index]); 5151 } 5152 5153 ret = eth_err(port_id, ret); 5154 5155 rte_ethdev_trace_uc_hash_table_set(port_id, on, ret); 5156 5157 return ret; 5158 } 5159 5160 int 5161 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on) 5162 { 5163 struct rte_eth_dev *dev; 5164 int ret; 5165 5166 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5167 dev = &rte_eth_devices[port_id]; 5168 5169 if (*dev->dev_ops->uc_all_hash_table_set == NULL) 5170 return -ENOTSUP; 5171 ret = eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev, on)); 5172 5173 rte_ethdev_trace_uc_all_hash_table_set(port_id, on, ret); 5174 5175 return ret; 5176 } 5177 5178 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, 5179 uint32_t tx_rate) 5180 { 5181 struct rte_eth_dev *dev; 5182 struct rte_eth_dev_info dev_info; 5183 struct rte_eth_link link; 5184 int ret; 5185 5186 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5187 dev = &rte_eth_devices[port_id]; 5188 5189 ret = rte_eth_dev_info_get(port_id, &dev_info); 5190 if (ret != 0) 5191 return ret; 5192 5193 link = dev->data->dev_link; 5194 5195 if (queue_idx > dev_info.max_tx_queues) { 5196 RTE_ETHDEV_LOG(ERR, 5197 "Set queue rate limit:port %u: invalid queue ID=%u\n", 5198 port_id, queue_idx); 5199 return -EINVAL; 5200 } 5201 5202 if (tx_rate > link.link_speed) { 5203 RTE_ETHDEV_LOG(ERR, 5204 "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n", 5205 tx_rate, link.link_speed); 5206 return -EINVAL; 5207 } 5208 5209 if (*dev->dev_ops->set_queue_rate_limit == NULL) 5210 return -ENOTSUP; 5211 ret = eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev, 5212 queue_idx, tx_rate)); 5213 5214 rte_eth_trace_set_queue_rate_limit(port_id, queue_idx, tx_rate, ret); 5215 5216 return ret; 5217 } 5218 5219 int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id, 5220 uint8_t avail_thresh) 5221 { 5222 struct rte_eth_dev *dev; 5223 int ret; 5224 5225 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5226 dev = &rte_eth_devices[port_id]; 5227 5228 if (queue_id > dev->data->nb_rx_queues) { 5229 RTE_ETHDEV_LOG(ERR, 5230 "Set queue avail thresh: port %u: invalid queue ID=%u.\n", 5231 port_id, queue_id); 5232 return -EINVAL; 5233 } 5234 5235 if (avail_thresh > 99) { 5236 RTE_ETHDEV_LOG(ERR, 5237 "Set queue avail thresh: port %u: threshold should be <= 99.\n", 5238 port_id); 5239 return -EINVAL; 5240 } 5241 if (*dev->dev_ops->rx_queue_avail_thresh_set == NULL) 5242 return -ENOTSUP; 5243 ret = eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_set)(dev, 5244 queue_id, avail_thresh)); 5245 5246 rte_eth_trace_rx_avail_thresh_set(port_id, queue_id, avail_thresh, ret); 5247 5248 return ret; 5249 } 5250 5251 int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id, 5252 uint8_t *avail_thresh) 5253 { 5254 struct rte_eth_dev *dev; 5255 int ret; 5256 5257 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5258 dev = &rte_eth_devices[port_id]; 5259 5260 if (queue_id == NULL) 5261 return -EINVAL; 5262 if (*queue_id >= dev->data->nb_rx_queues) 5263 *queue_id = 0; 5264 5265 if (*dev->dev_ops->rx_queue_avail_thresh_query == NULL) 5266 return -ENOTSUP; 5267 ret = eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_query)(dev, 5268 queue_id, avail_thresh)); 5269 5270 rte_eth_trace_rx_avail_thresh_query(port_id, *queue_id, ret); 5271 5272 return ret; 5273 } 5274 5275 RTE_INIT(eth_dev_init_fp_ops) 5276 { 5277 uint32_t i; 5278 5279 for (i = 0; i != RTE_DIM(rte_eth_fp_ops); i++) 5280 eth_dev_fp_ops_reset(rte_eth_fp_ops + i); 5281 } 5282 5283 RTE_INIT(eth_dev_init_cb_lists) 5284 { 5285 uint16_t i; 5286 5287 for (i = 0; i < RTE_MAX_ETHPORTS; i++) 5288 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs); 5289 } 5290 5291 int 5292 rte_eth_dev_callback_register(uint16_t port_id, 5293 enum rte_eth_event_type event, 5294 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 5295 { 5296 struct rte_eth_dev *dev; 5297 struct rte_eth_dev_callback *user_cb; 5298 uint16_t next_port; 5299 uint16_t last_port; 5300 5301 if (cb_fn == NULL) { 5302 RTE_ETHDEV_LOG(ERR, 5303 "Cannot register ethdev port %u callback from NULL\n", 5304 port_id); 5305 return -EINVAL; 5306 } 5307 5308 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 5309 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 5310 return -EINVAL; 5311 } 5312 5313 if (port_id == RTE_ETH_ALL) { 5314 next_port = 0; 5315 last_port = RTE_MAX_ETHPORTS - 1; 5316 } else { 5317 next_port = last_port = port_id; 5318 } 5319 5320 rte_spinlock_lock(ð_dev_cb_lock); 5321 5322 do { 5323 dev = &rte_eth_devices[next_port]; 5324 5325 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) { 5326 if (user_cb->cb_fn == cb_fn && 5327 user_cb->cb_arg == cb_arg && 5328 user_cb->event == event) { 5329 break; 5330 } 5331 } 5332 5333 /* create a new callback. */ 5334 if (user_cb == NULL) { 5335 user_cb = rte_zmalloc("INTR_USER_CALLBACK", 5336 sizeof(struct rte_eth_dev_callback), 0); 5337 if (user_cb != NULL) { 5338 user_cb->cb_fn = cb_fn; 5339 user_cb->cb_arg = cb_arg; 5340 user_cb->event = event; 5341 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), 5342 user_cb, next); 5343 } else { 5344 rte_spinlock_unlock(ð_dev_cb_lock); 5345 rte_eth_dev_callback_unregister(port_id, event, 5346 cb_fn, cb_arg); 5347 return -ENOMEM; 5348 } 5349 5350 } 5351 } while (++next_port <= last_port); 5352 5353 rte_spinlock_unlock(ð_dev_cb_lock); 5354 5355 rte_ethdev_trace_callback_register(port_id, event, cb_fn, cb_arg); 5356 5357 return 0; 5358 } 5359 5360 int 5361 rte_eth_dev_callback_unregister(uint16_t port_id, 5362 enum rte_eth_event_type event, 5363 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 5364 { 5365 int ret; 5366 struct rte_eth_dev *dev; 5367 struct rte_eth_dev_callback *cb, *next; 5368 uint16_t next_port; 5369 uint16_t last_port; 5370 5371 if (cb_fn == NULL) { 5372 RTE_ETHDEV_LOG(ERR, 5373 "Cannot unregister ethdev port %u callback from NULL\n", 5374 port_id); 5375 return -EINVAL; 5376 } 5377 5378 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 5379 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 5380 return -EINVAL; 5381 } 5382 5383 if (port_id == RTE_ETH_ALL) { 5384 next_port = 0; 5385 last_port = RTE_MAX_ETHPORTS - 1; 5386 } else { 5387 next_port = last_port = port_id; 5388 } 5389 5390 rte_spinlock_lock(ð_dev_cb_lock); 5391 5392 do { 5393 dev = &rte_eth_devices[next_port]; 5394 ret = 0; 5395 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; 5396 cb = next) { 5397 5398 next = TAILQ_NEXT(cb, next); 5399 5400 if (cb->cb_fn != cb_fn || cb->event != event || 5401 (cb_arg != (void *)-1 && cb->cb_arg != cb_arg)) 5402 continue; 5403 5404 /* 5405 * if this callback is not executing right now, 5406 * then remove it. 5407 */ 5408 if (cb->active == 0) { 5409 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next); 5410 rte_free(cb); 5411 } else { 5412 ret = -EAGAIN; 5413 } 5414 } 5415 } while (++next_port <= last_port); 5416 5417 rte_spinlock_unlock(ð_dev_cb_lock); 5418 5419 rte_ethdev_trace_callback_unregister(port_id, event, cb_fn, cb_arg, 5420 ret); 5421 5422 return ret; 5423 } 5424 5425 int 5426 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data) 5427 { 5428 uint32_t vec; 5429 struct rte_eth_dev *dev; 5430 struct rte_intr_handle *intr_handle; 5431 uint16_t qid; 5432 int rc; 5433 5434 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5435 dev = &rte_eth_devices[port_id]; 5436 5437 if (!dev->intr_handle) { 5438 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 5439 return -ENOTSUP; 5440 } 5441 5442 intr_handle = dev->intr_handle; 5443 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 5444 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 5445 return -EPERM; 5446 } 5447 5448 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) { 5449 vec = rte_intr_vec_list_index_get(intr_handle, qid); 5450 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 5451 5452 rte_ethdev_trace_rx_intr_ctl(port_id, qid, epfd, op, data, rc); 5453 5454 if (rc && rc != -EEXIST) { 5455 RTE_ETHDEV_LOG(ERR, 5456 "p %u q %u Rx ctl error op %d epfd %d vec %u\n", 5457 port_id, qid, op, epfd, vec); 5458 } 5459 } 5460 5461 return 0; 5462 } 5463 5464 int 5465 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id) 5466 { 5467 struct rte_intr_handle *intr_handle; 5468 struct rte_eth_dev *dev; 5469 unsigned int efd_idx; 5470 uint32_t vec; 5471 int fd; 5472 5473 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 5474 dev = &rte_eth_devices[port_id]; 5475 5476 if (queue_id >= dev->data->nb_rx_queues) { 5477 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5478 return -1; 5479 } 5480 5481 if (!dev->intr_handle) { 5482 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 5483 return -1; 5484 } 5485 5486 intr_handle = dev->intr_handle; 5487 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 5488 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 5489 return -1; 5490 } 5491 5492 vec = rte_intr_vec_list_index_get(intr_handle, queue_id); 5493 efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ? 5494 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec; 5495 fd = rte_intr_efds_index_get(intr_handle, efd_idx); 5496 5497 rte_ethdev_trace_rx_intr_ctl_q_get_fd(port_id, queue_id, fd); 5498 5499 return fd; 5500 } 5501 5502 int 5503 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, 5504 int epfd, int op, void *data) 5505 { 5506 uint32_t vec; 5507 struct rte_eth_dev *dev; 5508 struct rte_intr_handle *intr_handle; 5509 int rc; 5510 5511 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5512 dev = &rte_eth_devices[port_id]; 5513 5514 if (queue_id >= dev->data->nb_rx_queues) { 5515 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5516 return -EINVAL; 5517 } 5518 5519 if (!dev->intr_handle) { 5520 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 5521 return -ENOTSUP; 5522 } 5523 5524 intr_handle = dev->intr_handle; 5525 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 5526 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 5527 return -EPERM; 5528 } 5529 5530 vec = rte_intr_vec_list_index_get(intr_handle, queue_id); 5531 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 5532 5533 rte_ethdev_trace_rx_intr_ctl_q(port_id, queue_id, epfd, op, data, rc); 5534 5535 if (rc && rc != -EEXIST) { 5536 RTE_ETHDEV_LOG(ERR, 5537 "p %u q %u Rx ctl error op %d epfd %d vec %u\n", 5538 port_id, queue_id, op, epfd, vec); 5539 return rc; 5540 } 5541 5542 return 0; 5543 } 5544 5545 int 5546 rte_eth_dev_rx_intr_enable(uint16_t port_id, 5547 uint16_t queue_id) 5548 { 5549 struct rte_eth_dev *dev; 5550 int ret; 5551 5552 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5553 dev = &rte_eth_devices[port_id]; 5554 5555 ret = eth_dev_validate_rx_queue(dev, queue_id); 5556 if (ret != 0) 5557 return ret; 5558 5559 if (*dev->dev_ops->rx_queue_intr_enable == NULL) 5560 return -ENOTSUP; 5561 ret = eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id)); 5562 5563 rte_ethdev_trace_rx_intr_enable(port_id, queue_id, ret); 5564 5565 return ret; 5566 } 5567 5568 int 5569 rte_eth_dev_rx_intr_disable(uint16_t port_id, 5570 uint16_t queue_id) 5571 { 5572 struct rte_eth_dev *dev; 5573 int ret; 5574 5575 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5576 dev = &rte_eth_devices[port_id]; 5577 5578 ret = eth_dev_validate_rx_queue(dev, queue_id); 5579 if (ret != 0) 5580 return ret; 5581 5582 if (*dev->dev_ops->rx_queue_intr_disable == NULL) 5583 return -ENOTSUP; 5584 ret = eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id)); 5585 5586 rte_ethdev_trace_rx_intr_disable(port_id, queue_id, ret); 5587 5588 return ret; 5589 } 5590 5591 5592 const struct rte_eth_rxtx_callback * 5593 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, 5594 rte_rx_callback_fn fn, void *user_param) 5595 { 5596 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5597 rte_errno = ENOTSUP; 5598 return NULL; 5599 #endif 5600 struct rte_eth_dev *dev; 5601 5602 /* check input parameters */ 5603 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5604 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 5605 rte_errno = EINVAL; 5606 return NULL; 5607 } 5608 dev = &rte_eth_devices[port_id]; 5609 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 5610 rte_errno = EINVAL; 5611 return NULL; 5612 } 5613 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5614 5615 if (cb == NULL) { 5616 rte_errno = ENOMEM; 5617 return NULL; 5618 } 5619 5620 cb->fn.rx = fn; 5621 cb->param = user_param; 5622 5623 rte_spinlock_lock(ð_dev_rx_cb_lock); 5624 /* Add the callbacks in fifo order. */ 5625 struct rte_eth_rxtx_callback *tail = 5626 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 5627 5628 if (!tail) { 5629 /* Stores to cb->fn and cb->param should complete before 5630 * cb is visible to data plane. 5631 */ 5632 __atomic_store_n( 5633 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 5634 cb, __ATOMIC_RELEASE); 5635 5636 } else { 5637 while (tail->next) 5638 tail = tail->next; 5639 /* Stores to cb->fn and cb->param should complete before 5640 * cb is visible to data plane. 5641 */ 5642 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 5643 } 5644 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5645 5646 rte_eth_trace_add_rx_callback(port_id, queue_id, fn, user_param, cb); 5647 5648 return cb; 5649 } 5650 5651 const struct rte_eth_rxtx_callback * 5652 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, 5653 rte_rx_callback_fn fn, void *user_param) 5654 { 5655 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5656 rte_errno = ENOTSUP; 5657 return NULL; 5658 #endif 5659 /* check input parameters */ 5660 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5661 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 5662 rte_errno = EINVAL; 5663 return NULL; 5664 } 5665 5666 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5667 5668 if (cb == NULL) { 5669 rte_errno = ENOMEM; 5670 return NULL; 5671 } 5672 5673 cb->fn.rx = fn; 5674 cb->param = user_param; 5675 5676 rte_spinlock_lock(ð_dev_rx_cb_lock); 5677 /* Add the callbacks at first position */ 5678 cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 5679 /* Stores to cb->fn, cb->param and cb->next should complete before 5680 * cb is visible to data plane threads. 5681 */ 5682 __atomic_store_n( 5683 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 5684 cb, __ATOMIC_RELEASE); 5685 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5686 5687 rte_eth_trace_add_first_rx_callback(port_id, queue_id, fn, user_param, 5688 cb); 5689 5690 return cb; 5691 } 5692 5693 const struct rte_eth_rxtx_callback * 5694 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, 5695 rte_tx_callback_fn fn, void *user_param) 5696 { 5697 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5698 rte_errno = ENOTSUP; 5699 return NULL; 5700 #endif 5701 struct rte_eth_dev *dev; 5702 5703 /* check input parameters */ 5704 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 5705 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) { 5706 rte_errno = EINVAL; 5707 return NULL; 5708 } 5709 5710 dev = &rte_eth_devices[port_id]; 5711 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 5712 rte_errno = EINVAL; 5713 return NULL; 5714 } 5715 5716 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 5717 5718 if (cb == NULL) { 5719 rte_errno = ENOMEM; 5720 return NULL; 5721 } 5722 5723 cb->fn.tx = fn; 5724 cb->param = user_param; 5725 5726 rte_spinlock_lock(ð_dev_tx_cb_lock); 5727 /* Add the callbacks in fifo order. */ 5728 struct rte_eth_rxtx_callback *tail = 5729 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id]; 5730 5731 if (!tail) { 5732 /* Stores to cb->fn and cb->param should complete before 5733 * cb is visible to data plane. 5734 */ 5735 __atomic_store_n( 5736 &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id], 5737 cb, __ATOMIC_RELEASE); 5738 5739 } else { 5740 while (tail->next) 5741 tail = tail->next; 5742 /* Stores to cb->fn and cb->param should complete before 5743 * cb is visible to data plane. 5744 */ 5745 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 5746 } 5747 rte_spinlock_unlock(ð_dev_tx_cb_lock); 5748 5749 rte_eth_trace_add_tx_callback(port_id, queue_id, fn, user_param, cb); 5750 5751 return cb; 5752 } 5753 5754 int 5755 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, 5756 const struct rte_eth_rxtx_callback *user_cb) 5757 { 5758 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5759 return -ENOTSUP; 5760 #endif 5761 /* Check input parameters. */ 5762 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5763 if (user_cb == NULL || 5764 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) 5765 return -EINVAL; 5766 5767 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5768 struct rte_eth_rxtx_callback *cb; 5769 struct rte_eth_rxtx_callback **prev_cb; 5770 int ret = -EINVAL; 5771 5772 rte_spinlock_lock(ð_dev_rx_cb_lock); 5773 prev_cb = &dev->post_rx_burst_cbs[queue_id]; 5774 for (; *prev_cb != NULL; prev_cb = &cb->next) { 5775 cb = *prev_cb; 5776 if (cb == user_cb) { 5777 /* Remove the user cb from the callback list. */ 5778 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 5779 ret = 0; 5780 break; 5781 } 5782 } 5783 rte_spinlock_unlock(ð_dev_rx_cb_lock); 5784 5785 rte_eth_trace_remove_rx_callback(port_id, queue_id, user_cb, ret); 5786 5787 return ret; 5788 } 5789 5790 int 5791 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, 5792 const struct rte_eth_rxtx_callback *user_cb) 5793 { 5794 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 5795 return -ENOTSUP; 5796 #endif 5797 /* Check input parameters. */ 5798 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5799 if (user_cb == NULL || 5800 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) 5801 return -EINVAL; 5802 5803 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 5804 int ret = -EINVAL; 5805 struct rte_eth_rxtx_callback *cb; 5806 struct rte_eth_rxtx_callback **prev_cb; 5807 5808 rte_spinlock_lock(ð_dev_tx_cb_lock); 5809 prev_cb = &dev->pre_tx_burst_cbs[queue_id]; 5810 for (; *prev_cb != NULL; prev_cb = &cb->next) { 5811 cb = *prev_cb; 5812 if (cb == user_cb) { 5813 /* Remove the user cb from the callback list. */ 5814 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 5815 ret = 0; 5816 break; 5817 } 5818 } 5819 rte_spinlock_unlock(ð_dev_tx_cb_lock); 5820 5821 rte_eth_trace_remove_tx_callback(port_id, queue_id, user_cb, ret); 5822 5823 return ret; 5824 } 5825 5826 int 5827 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5828 struct rte_eth_rxq_info *qinfo) 5829 { 5830 struct rte_eth_dev *dev; 5831 5832 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5833 dev = &rte_eth_devices[port_id]; 5834 5835 if (queue_id >= dev->data->nb_rx_queues) { 5836 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5837 return -EINVAL; 5838 } 5839 5840 if (qinfo == NULL) { 5841 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL\n", 5842 port_id, queue_id); 5843 return -EINVAL; 5844 } 5845 5846 if (dev->data->rx_queues == NULL || 5847 dev->data->rx_queues[queue_id] == NULL) { 5848 RTE_ETHDEV_LOG(ERR, 5849 "Rx queue %"PRIu16" of device with port_id=%" 5850 PRIu16" has not been setup\n", 5851 queue_id, port_id); 5852 return -EINVAL; 5853 } 5854 5855 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 5856 RTE_ETHDEV_LOG(INFO, 5857 "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5858 queue_id, port_id); 5859 return -EINVAL; 5860 } 5861 5862 if (*dev->dev_ops->rxq_info_get == NULL) 5863 return -ENOTSUP; 5864 5865 memset(qinfo, 0, sizeof(*qinfo)); 5866 dev->dev_ops->rxq_info_get(dev, queue_id, qinfo); 5867 qinfo->queue_state = dev->data->rx_queue_state[queue_id]; 5868 5869 rte_eth_trace_rx_queue_info_get(port_id, queue_id, qinfo); 5870 5871 return 0; 5872 } 5873 5874 int 5875 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5876 struct rte_eth_txq_info *qinfo) 5877 { 5878 struct rte_eth_dev *dev; 5879 5880 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5881 dev = &rte_eth_devices[port_id]; 5882 5883 if (queue_id >= dev->data->nb_tx_queues) { 5884 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 5885 return -EINVAL; 5886 } 5887 5888 if (qinfo == NULL) { 5889 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL\n", 5890 port_id, queue_id); 5891 return -EINVAL; 5892 } 5893 5894 if (dev->data->tx_queues == NULL || 5895 dev->data->tx_queues[queue_id] == NULL) { 5896 RTE_ETHDEV_LOG(ERR, 5897 "Tx queue %"PRIu16" of device with port_id=%" 5898 PRIu16" has not been setup\n", 5899 queue_id, port_id); 5900 return -EINVAL; 5901 } 5902 5903 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 5904 RTE_ETHDEV_LOG(INFO, 5905 "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5906 queue_id, port_id); 5907 return -EINVAL; 5908 } 5909 5910 if (*dev->dev_ops->txq_info_get == NULL) 5911 return -ENOTSUP; 5912 5913 memset(qinfo, 0, sizeof(*qinfo)); 5914 dev->dev_ops->txq_info_get(dev, queue_id, qinfo); 5915 qinfo->queue_state = dev->data->tx_queue_state[queue_id]; 5916 5917 rte_eth_trace_tx_queue_info_get(port_id, queue_id, qinfo); 5918 5919 return 0; 5920 } 5921 5922 int 5923 rte_eth_recycle_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5924 struct rte_eth_recycle_rxq_info *recycle_rxq_info) 5925 { 5926 struct rte_eth_dev *dev; 5927 int ret; 5928 5929 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5930 dev = &rte_eth_devices[port_id]; 5931 5932 ret = eth_dev_validate_rx_queue(dev, queue_id); 5933 if (unlikely(ret != 0)) 5934 return ret; 5935 5936 if (*dev->dev_ops->recycle_rxq_info_get == NULL) 5937 return -ENOTSUP; 5938 5939 dev->dev_ops->recycle_rxq_info_get(dev, queue_id, recycle_rxq_info); 5940 5941 return 0; 5942 } 5943 5944 int 5945 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5946 struct rte_eth_burst_mode *mode) 5947 { 5948 struct rte_eth_dev *dev; 5949 int ret; 5950 5951 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5952 dev = &rte_eth_devices[port_id]; 5953 5954 if (queue_id >= dev->data->nb_rx_queues) { 5955 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5956 return -EINVAL; 5957 } 5958 5959 if (mode == NULL) { 5960 RTE_ETHDEV_LOG(ERR, 5961 "Cannot get ethdev port %u Rx queue %u burst mode to NULL\n", 5962 port_id, queue_id); 5963 return -EINVAL; 5964 } 5965 5966 if (*dev->dev_ops->rx_burst_mode_get == NULL) 5967 return -ENOTSUP; 5968 memset(mode, 0, sizeof(*mode)); 5969 ret = eth_err(port_id, 5970 dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode)); 5971 5972 rte_eth_trace_rx_burst_mode_get(port_id, queue_id, mode, ret); 5973 5974 return ret; 5975 } 5976 5977 int 5978 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5979 struct rte_eth_burst_mode *mode) 5980 { 5981 struct rte_eth_dev *dev; 5982 int ret; 5983 5984 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5985 dev = &rte_eth_devices[port_id]; 5986 5987 if (queue_id >= dev->data->nb_tx_queues) { 5988 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 5989 return -EINVAL; 5990 } 5991 5992 if (mode == NULL) { 5993 RTE_ETHDEV_LOG(ERR, 5994 "Cannot get ethdev port %u Tx queue %u burst mode to NULL\n", 5995 port_id, queue_id); 5996 return -EINVAL; 5997 } 5998 5999 if (*dev->dev_ops->tx_burst_mode_get == NULL) 6000 return -ENOTSUP; 6001 memset(mode, 0, sizeof(*mode)); 6002 ret = eth_err(port_id, 6003 dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode)); 6004 6005 rte_eth_trace_tx_burst_mode_get(port_id, queue_id, mode, ret); 6006 6007 return ret; 6008 } 6009 6010 int 6011 rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id, 6012 struct rte_power_monitor_cond *pmc) 6013 { 6014 struct rte_eth_dev *dev; 6015 int ret; 6016 6017 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6018 dev = &rte_eth_devices[port_id]; 6019 6020 if (queue_id >= dev->data->nb_rx_queues) { 6021 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 6022 return -EINVAL; 6023 } 6024 6025 if (pmc == NULL) { 6026 RTE_ETHDEV_LOG(ERR, 6027 "Cannot get ethdev port %u Rx queue %u power monitor condition to NULL\n", 6028 port_id, queue_id); 6029 return -EINVAL; 6030 } 6031 6032 if (*dev->dev_ops->get_monitor_addr == NULL) 6033 return -ENOTSUP; 6034 ret = eth_err(port_id, 6035 dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc)); 6036 6037 rte_eth_trace_get_monitor_addr(port_id, queue_id, pmc, ret); 6038 6039 return ret; 6040 } 6041 6042 int 6043 rte_eth_dev_set_mc_addr_list(uint16_t port_id, 6044 struct rte_ether_addr *mc_addr_set, 6045 uint32_t nb_mc_addr) 6046 { 6047 struct rte_eth_dev *dev; 6048 int ret; 6049 6050 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6051 dev = &rte_eth_devices[port_id]; 6052 6053 if (*dev->dev_ops->set_mc_addr_list == NULL) 6054 return -ENOTSUP; 6055 ret = eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev, 6056 mc_addr_set, nb_mc_addr)); 6057 6058 rte_ethdev_trace_set_mc_addr_list(port_id, mc_addr_set, nb_mc_addr, 6059 ret); 6060 6061 return ret; 6062 } 6063 6064 int 6065 rte_eth_timesync_enable(uint16_t port_id) 6066 { 6067 struct rte_eth_dev *dev; 6068 int ret; 6069 6070 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6071 dev = &rte_eth_devices[port_id]; 6072 6073 if (*dev->dev_ops->timesync_enable == NULL) 6074 return -ENOTSUP; 6075 ret = eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev)); 6076 6077 rte_eth_trace_timesync_enable(port_id, ret); 6078 6079 return ret; 6080 } 6081 6082 int 6083 rte_eth_timesync_disable(uint16_t port_id) 6084 { 6085 struct rte_eth_dev *dev; 6086 int ret; 6087 6088 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6089 dev = &rte_eth_devices[port_id]; 6090 6091 if (*dev->dev_ops->timesync_disable == NULL) 6092 return -ENOTSUP; 6093 ret = eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev)); 6094 6095 rte_eth_trace_timesync_disable(port_id, ret); 6096 6097 return ret; 6098 } 6099 6100 int 6101 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, 6102 uint32_t flags) 6103 { 6104 struct rte_eth_dev *dev; 6105 int ret; 6106 6107 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6108 dev = &rte_eth_devices[port_id]; 6109 6110 if (timestamp == NULL) { 6111 RTE_ETHDEV_LOG(ERR, 6112 "Cannot read ethdev port %u Rx timestamp to NULL\n", 6113 port_id); 6114 return -EINVAL; 6115 } 6116 6117 if (*dev->dev_ops->timesync_read_rx_timestamp == NULL) 6118 return -ENOTSUP; 6119 6120 ret = eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp) 6121 (dev, timestamp, flags)); 6122 6123 rte_eth_trace_timesync_read_rx_timestamp(port_id, timestamp, flags, 6124 ret); 6125 6126 return ret; 6127 } 6128 6129 int 6130 rte_eth_timesync_read_tx_timestamp(uint16_t port_id, 6131 struct timespec *timestamp) 6132 { 6133 struct rte_eth_dev *dev; 6134 int ret; 6135 6136 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6137 dev = &rte_eth_devices[port_id]; 6138 6139 if (timestamp == NULL) { 6140 RTE_ETHDEV_LOG(ERR, 6141 "Cannot read ethdev port %u Tx timestamp to NULL\n", 6142 port_id); 6143 return -EINVAL; 6144 } 6145 6146 if (*dev->dev_ops->timesync_read_tx_timestamp == NULL) 6147 return -ENOTSUP; 6148 6149 ret = eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp) 6150 (dev, timestamp)); 6151 6152 rte_eth_trace_timesync_read_tx_timestamp(port_id, timestamp, ret); 6153 6154 return ret; 6155 6156 } 6157 6158 int 6159 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta) 6160 { 6161 struct rte_eth_dev *dev; 6162 int ret; 6163 6164 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6165 dev = &rte_eth_devices[port_id]; 6166 6167 if (*dev->dev_ops->timesync_adjust_time == NULL) 6168 return -ENOTSUP; 6169 ret = eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta)); 6170 6171 rte_eth_trace_timesync_adjust_time(port_id, delta, ret); 6172 6173 return ret; 6174 } 6175 6176 int 6177 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp) 6178 { 6179 struct rte_eth_dev *dev; 6180 int ret; 6181 6182 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6183 dev = &rte_eth_devices[port_id]; 6184 6185 if (timestamp == NULL) { 6186 RTE_ETHDEV_LOG(ERR, 6187 "Cannot read ethdev port %u timesync time to NULL\n", 6188 port_id); 6189 return -EINVAL; 6190 } 6191 6192 if (*dev->dev_ops->timesync_read_time == NULL) 6193 return -ENOTSUP; 6194 ret = eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev, 6195 timestamp)); 6196 6197 rte_eth_trace_timesync_read_time(port_id, timestamp, ret); 6198 6199 return ret; 6200 } 6201 6202 int 6203 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp) 6204 { 6205 struct rte_eth_dev *dev; 6206 int ret; 6207 6208 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6209 dev = &rte_eth_devices[port_id]; 6210 6211 if (timestamp == NULL) { 6212 RTE_ETHDEV_LOG(ERR, 6213 "Cannot write ethdev port %u timesync from NULL time\n", 6214 port_id); 6215 return -EINVAL; 6216 } 6217 6218 if (*dev->dev_ops->timesync_write_time == NULL) 6219 return -ENOTSUP; 6220 ret = eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev, 6221 timestamp)); 6222 6223 rte_eth_trace_timesync_write_time(port_id, timestamp, ret); 6224 6225 return ret; 6226 } 6227 6228 int 6229 rte_eth_read_clock(uint16_t port_id, uint64_t *clock) 6230 { 6231 struct rte_eth_dev *dev; 6232 int ret; 6233 6234 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6235 dev = &rte_eth_devices[port_id]; 6236 6237 if (clock == NULL) { 6238 RTE_ETHDEV_LOG(ERR, "Cannot read ethdev port %u clock to NULL\n", 6239 port_id); 6240 return -EINVAL; 6241 } 6242 6243 if (*dev->dev_ops->read_clock == NULL) 6244 return -ENOTSUP; 6245 ret = eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock)); 6246 6247 rte_eth_trace_read_clock(port_id, clock, ret); 6248 6249 return ret; 6250 } 6251 6252 int 6253 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info) 6254 { 6255 struct rte_eth_dev *dev; 6256 int ret; 6257 6258 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6259 dev = &rte_eth_devices[port_id]; 6260 6261 if (info == NULL) { 6262 RTE_ETHDEV_LOG(ERR, 6263 "Cannot get ethdev port %u register info to NULL\n", 6264 port_id); 6265 return -EINVAL; 6266 } 6267 6268 if (*dev->dev_ops->get_reg == NULL) 6269 return -ENOTSUP; 6270 ret = eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info)); 6271 6272 rte_ethdev_trace_get_reg_info(port_id, info, ret); 6273 6274 return ret; 6275 } 6276 6277 int 6278 rte_eth_dev_get_eeprom_length(uint16_t port_id) 6279 { 6280 struct rte_eth_dev *dev; 6281 int ret; 6282 6283 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6284 dev = &rte_eth_devices[port_id]; 6285 6286 if (*dev->dev_ops->get_eeprom_length == NULL) 6287 return -ENOTSUP; 6288 ret = eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev)); 6289 6290 rte_ethdev_trace_get_eeprom_length(port_id, ret); 6291 6292 return ret; 6293 } 6294 6295 int 6296 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 6297 { 6298 struct rte_eth_dev *dev; 6299 int ret; 6300 6301 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6302 dev = &rte_eth_devices[port_id]; 6303 6304 if (info == NULL) { 6305 RTE_ETHDEV_LOG(ERR, 6306 "Cannot get ethdev port %u EEPROM info to NULL\n", 6307 port_id); 6308 return -EINVAL; 6309 } 6310 6311 if (*dev->dev_ops->get_eeprom == NULL) 6312 return -ENOTSUP; 6313 ret = eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info)); 6314 6315 rte_ethdev_trace_get_eeprom(port_id, info, ret); 6316 6317 return ret; 6318 } 6319 6320 int 6321 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 6322 { 6323 struct rte_eth_dev *dev; 6324 int ret; 6325 6326 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6327 dev = &rte_eth_devices[port_id]; 6328 6329 if (info == NULL) { 6330 RTE_ETHDEV_LOG(ERR, 6331 "Cannot set ethdev port %u EEPROM from NULL info\n", 6332 port_id); 6333 return -EINVAL; 6334 } 6335 6336 if (*dev->dev_ops->set_eeprom == NULL) 6337 return -ENOTSUP; 6338 ret = eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info)); 6339 6340 rte_ethdev_trace_set_eeprom(port_id, info, ret); 6341 6342 return ret; 6343 } 6344 6345 int 6346 rte_eth_dev_get_module_info(uint16_t port_id, 6347 struct rte_eth_dev_module_info *modinfo) 6348 { 6349 struct rte_eth_dev *dev; 6350 int ret; 6351 6352 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6353 dev = &rte_eth_devices[port_id]; 6354 6355 if (modinfo == NULL) { 6356 RTE_ETHDEV_LOG(ERR, 6357 "Cannot get ethdev port %u EEPROM module info to NULL\n", 6358 port_id); 6359 return -EINVAL; 6360 } 6361 6362 if (*dev->dev_ops->get_module_info == NULL) 6363 return -ENOTSUP; 6364 ret = (*dev->dev_ops->get_module_info)(dev, modinfo); 6365 6366 rte_ethdev_trace_get_module_info(port_id, modinfo, ret); 6367 6368 return ret; 6369 } 6370 6371 int 6372 rte_eth_dev_get_module_eeprom(uint16_t port_id, 6373 struct rte_dev_eeprom_info *info) 6374 { 6375 struct rte_eth_dev *dev; 6376 int ret; 6377 6378 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6379 dev = &rte_eth_devices[port_id]; 6380 6381 if (info == NULL) { 6382 RTE_ETHDEV_LOG(ERR, 6383 "Cannot get ethdev port %u module EEPROM info to NULL\n", 6384 port_id); 6385 return -EINVAL; 6386 } 6387 6388 if (info->data == NULL) { 6389 RTE_ETHDEV_LOG(ERR, 6390 "Cannot get ethdev port %u module EEPROM data to NULL\n", 6391 port_id); 6392 return -EINVAL; 6393 } 6394 6395 if (info->length == 0) { 6396 RTE_ETHDEV_LOG(ERR, 6397 "Cannot get ethdev port %u module EEPROM to data with zero size\n", 6398 port_id); 6399 return -EINVAL; 6400 } 6401 6402 if (*dev->dev_ops->get_module_eeprom == NULL) 6403 return -ENOTSUP; 6404 ret = (*dev->dev_ops->get_module_eeprom)(dev, info); 6405 6406 rte_ethdev_trace_get_module_eeprom(port_id, info, ret); 6407 6408 return ret; 6409 } 6410 6411 int 6412 rte_eth_dev_get_dcb_info(uint16_t port_id, 6413 struct rte_eth_dcb_info *dcb_info) 6414 { 6415 struct rte_eth_dev *dev; 6416 int ret; 6417 6418 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6419 dev = &rte_eth_devices[port_id]; 6420 6421 if (dcb_info == NULL) { 6422 RTE_ETHDEV_LOG(ERR, 6423 "Cannot get ethdev port %u DCB info to NULL\n", 6424 port_id); 6425 return -EINVAL; 6426 } 6427 6428 memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info)); 6429 6430 if (*dev->dev_ops->get_dcb_info == NULL) 6431 return -ENOTSUP; 6432 ret = eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info)); 6433 6434 rte_ethdev_trace_get_dcb_info(port_id, dcb_info, ret); 6435 6436 return ret; 6437 } 6438 6439 static void 6440 eth_dev_adjust_nb_desc(uint16_t *nb_desc, 6441 const struct rte_eth_desc_lim *desc_lim) 6442 { 6443 if (desc_lim->nb_align != 0) 6444 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align); 6445 6446 if (desc_lim->nb_max != 0) 6447 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max); 6448 6449 *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min); 6450 } 6451 6452 int 6453 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, 6454 uint16_t *nb_rx_desc, 6455 uint16_t *nb_tx_desc) 6456 { 6457 struct rte_eth_dev_info dev_info; 6458 int ret; 6459 6460 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6461 6462 ret = rte_eth_dev_info_get(port_id, &dev_info); 6463 if (ret != 0) 6464 return ret; 6465 6466 if (nb_rx_desc != NULL) 6467 eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim); 6468 6469 if (nb_tx_desc != NULL) 6470 eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim); 6471 6472 rte_ethdev_trace_adjust_nb_rx_tx_desc(port_id); 6473 6474 return 0; 6475 } 6476 6477 int 6478 rte_eth_dev_hairpin_capability_get(uint16_t port_id, 6479 struct rte_eth_hairpin_cap *cap) 6480 { 6481 struct rte_eth_dev *dev; 6482 int ret; 6483 6484 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6485 dev = &rte_eth_devices[port_id]; 6486 6487 if (cap == NULL) { 6488 RTE_ETHDEV_LOG(ERR, 6489 "Cannot get ethdev port %u hairpin capability to NULL\n", 6490 port_id); 6491 return -EINVAL; 6492 } 6493 6494 if (*dev->dev_ops->hairpin_cap_get == NULL) 6495 return -ENOTSUP; 6496 memset(cap, 0, sizeof(*cap)); 6497 ret = eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap)); 6498 6499 rte_ethdev_trace_hairpin_capability_get(port_id, cap, ret); 6500 6501 return ret; 6502 } 6503 6504 int 6505 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool) 6506 { 6507 struct rte_eth_dev *dev; 6508 int ret; 6509 6510 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6511 dev = &rte_eth_devices[port_id]; 6512 6513 if (pool == NULL) { 6514 RTE_ETHDEV_LOG(ERR, 6515 "Cannot test ethdev port %u mempool operation from NULL pool\n", 6516 port_id); 6517 return -EINVAL; 6518 } 6519 6520 if (*dev->dev_ops->pool_ops_supported == NULL) 6521 return 1; /* all pools are supported */ 6522 6523 ret = (*dev->dev_ops->pool_ops_supported)(dev, pool); 6524 6525 rte_ethdev_trace_pool_ops_supported(port_id, pool, ret); 6526 6527 return ret; 6528 } 6529 6530 int 6531 rte_eth_representor_info_get(uint16_t port_id, 6532 struct rte_eth_representor_info *info) 6533 { 6534 struct rte_eth_dev *dev; 6535 int ret; 6536 6537 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6538 dev = &rte_eth_devices[port_id]; 6539 6540 if (*dev->dev_ops->representor_info_get == NULL) 6541 return -ENOTSUP; 6542 ret = eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info)); 6543 6544 rte_eth_trace_representor_info_get(port_id, info, ret); 6545 6546 return ret; 6547 } 6548 6549 int 6550 rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features) 6551 { 6552 struct rte_eth_dev *dev; 6553 int ret; 6554 6555 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6556 dev = &rte_eth_devices[port_id]; 6557 6558 if (dev->data->dev_configured != 0) { 6559 RTE_ETHDEV_LOG(ERR, 6560 "The port (ID=%"PRIu16") is already configured\n", 6561 port_id); 6562 return -EBUSY; 6563 } 6564 6565 if (features == NULL) { 6566 RTE_ETHDEV_LOG(ERR, "Invalid features (NULL)\n"); 6567 return -EINVAL; 6568 } 6569 6570 if ((*features & RTE_ETH_RX_METADATA_TUNNEL_ID) != 0 && 6571 rte_flow_restore_info_dynflag_register() < 0) 6572 *features &= ~RTE_ETH_RX_METADATA_TUNNEL_ID; 6573 6574 if (*dev->dev_ops->rx_metadata_negotiate == NULL) 6575 return -ENOTSUP; 6576 ret = eth_err(port_id, 6577 (*dev->dev_ops->rx_metadata_negotiate)(dev, features)); 6578 6579 rte_eth_trace_rx_metadata_negotiate(port_id, *features, ret); 6580 6581 return ret; 6582 } 6583 6584 int 6585 rte_eth_ip_reassembly_capability_get(uint16_t port_id, 6586 struct rte_eth_ip_reassembly_params *reassembly_capa) 6587 { 6588 struct rte_eth_dev *dev; 6589 int ret; 6590 6591 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6592 dev = &rte_eth_devices[port_id]; 6593 6594 if (dev->data->dev_configured == 0) { 6595 RTE_ETHDEV_LOG(ERR, 6596 "Device with port_id=%u is not configured.\n" 6597 "Cannot get IP reassembly capability\n", 6598 port_id); 6599 return -EINVAL; 6600 } 6601 6602 if (reassembly_capa == NULL) { 6603 RTE_ETHDEV_LOG(ERR, "Cannot get reassembly capability to NULL"); 6604 return -EINVAL; 6605 } 6606 6607 if (*dev->dev_ops->ip_reassembly_capability_get == NULL) 6608 return -ENOTSUP; 6609 memset(reassembly_capa, 0, sizeof(struct rte_eth_ip_reassembly_params)); 6610 6611 ret = eth_err(port_id, (*dev->dev_ops->ip_reassembly_capability_get) 6612 (dev, reassembly_capa)); 6613 6614 rte_eth_trace_ip_reassembly_capability_get(port_id, reassembly_capa, 6615 ret); 6616 6617 return ret; 6618 } 6619 6620 int 6621 rte_eth_ip_reassembly_conf_get(uint16_t port_id, 6622 struct rte_eth_ip_reassembly_params *conf) 6623 { 6624 struct rte_eth_dev *dev; 6625 int ret; 6626 6627 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6628 dev = &rte_eth_devices[port_id]; 6629 6630 if (dev->data->dev_configured == 0) { 6631 RTE_ETHDEV_LOG(ERR, 6632 "Device with port_id=%u is not configured.\n" 6633 "Cannot get IP reassembly configuration\n", 6634 port_id); 6635 return -EINVAL; 6636 } 6637 6638 if (conf == NULL) { 6639 RTE_ETHDEV_LOG(ERR, "Cannot get reassembly info to NULL"); 6640 return -EINVAL; 6641 } 6642 6643 if (*dev->dev_ops->ip_reassembly_conf_get == NULL) 6644 return -ENOTSUP; 6645 memset(conf, 0, sizeof(struct rte_eth_ip_reassembly_params)); 6646 ret = eth_err(port_id, 6647 (*dev->dev_ops->ip_reassembly_conf_get)(dev, conf)); 6648 6649 rte_eth_trace_ip_reassembly_conf_get(port_id, conf, ret); 6650 6651 return ret; 6652 } 6653 6654 int 6655 rte_eth_ip_reassembly_conf_set(uint16_t port_id, 6656 const struct rte_eth_ip_reassembly_params *conf) 6657 { 6658 struct rte_eth_dev *dev; 6659 int ret; 6660 6661 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6662 dev = &rte_eth_devices[port_id]; 6663 6664 if (dev->data->dev_configured == 0) { 6665 RTE_ETHDEV_LOG(ERR, 6666 "Device with port_id=%u is not configured.\n" 6667 "Cannot set IP reassembly configuration", 6668 port_id); 6669 return -EINVAL; 6670 } 6671 6672 if (dev->data->dev_started != 0) { 6673 RTE_ETHDEV_LOG(ERR, 6674 "Device with port_id=%u started,\n" 6675 "cannot configure IP reassembly params.\n", 6676 port_id); 6677 return -EINVAL; 6678 } 6679 6680 if (conf == NULL) { 6681 RTE_ETHDEV_LOG(ERR, 6682 "Invalid IP reassembly configuration (NULL)\n"); 6683 return -EINVAL; 6684 } 6685 6686 if (*dev->dev_ops->ip_reassembly_conf_set == NULL) 6687 return -ENOTSUP; 6688 ret = eth_err(port_id, 6689 (*dev->dev_ops->ip_reassembly_conf_set)(dev, conf)); 6690 6691 rte_eth_trace_ip_reassembly_conf_set(port_id, conf, ret); 6692 6693 return ret; 6694 } 6695 6696 int 6697 rte_eth_dev_priv_dump(uint16_t port_id, FILE *file) 6698 { 6699 struct rte_eth_dev *dev; 6700 6701 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6702 dev = &rte_eth_devices[port_id]; 6703 6704 if (file == NULL) { 6705 RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n"); 6706 return -EINVAL; 6707 } 6708 6709 if (*dev->dev_ops->eth_dev_priv_dump == NULL) 6710 return -ENOTSUP; 6711 return eth_err(port_id, (*dev->dev_ops->eth_dev_priv_dump)(dev, file)); 6712 } 6713 6714 int 6715 rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id, 6716 uint16_t offset, uint16_t num, FILE *file) 6717 { 6718 struct rte_eth_dev *dev; 6719 6720 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6721 dev = &rte_eth_devices[port_id]; 6722 6723 if (queue_id >= dev->data->nb_rx_queues) { 6724 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 6725 return -EINVAL; 6726 } 6727 6728 if (file == NULL) { 6729 RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n"); 6730 return -EINVAL; 6731 } 6732 6733 if (*dev->dev_ops->eth_rx_descriptor_dump == NULL) 6734 return -ENOTSUP; 6735 6736 return eth_err(port_id, (*dev->dev_ops->eth_rx_descriptor_dump)(dev, 6737 queue_id, offset, num, file)); 6738 } 6739 6740 int 6741 rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id, 6742 uint16_t offset, uint16_t num, FILE *file) 6743 { 6744 struct rte_eth_dev *dev; 6745 6746 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6747 dev = &rte_eth_devices[port_id]; 6748 6749 if (queue_id >= dev->data->nb_tx_queues) { 6750 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 6751 return -EINVAL; 6752 } 6753 6754 if (file == NULL) { 6755 RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n"); 6756 return -EINVAL; 6757 } 6758 6759 if (*dev->dev_ops->eth_tx_descriptor_dump == NULL) 6760 return -ENOTSUP; 6761 6762 return eth_err(port_id, (*dev->dev_ops->eth_tx_descriptor_dump)(dev, 6763 queue_id, offset, num, file)); 6764 } 6765 6766 int 6767 rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num) 6768 { 6769 int i, j; 6770 struct rte_eth_dev *dev; 6771 const uint32_t *all_types; 6772 6773 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6774 dev = &rte_eth_devices[port_id]; 6775 6776 if (ptypes == NULL && num > 0) { 6777 RTE_ETHDEV_LOG(ERR, 6778 "Cannot get ethdev port %u supported header protocol types to NULL when array size is non zero\n", 6779 port_id); 6780 return -EINVAL; 6781 } 6782 6783 if (*dev->dev_ops->buffer_split_supported_hdr_ptypes_get == NULL) 6784 return -ENOTSUP; 6785 all_types = (*dev->dev_ops->buffer_split_supported_hdr_ptypes_get)(dev); 6786 6787 if (all_types == NULL) 6788 return 0; 6789 6790 for (i = 0, j = 0; all_types[i] != RTE_PTYPE_UNKNOWN; ++i) { 6791 if (j < num) { 6792 ptypes[j] = all_types[i]; 6793 6794 rte_eth_trace_buffer_split_get_supported_hdr_ptypes( 6795 port_id, j, ptypes[j]); 6796 } 6797 j++; 6798 } 6799 6800 return j; 6801 } 6802 6803 int rte_eth_dev_count_aggr_ports(uint16_t port_id) 6804 { 6805 struct rte_eth_dev *dev; 6806 int ret; 6807 6808 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6809 dev = &rte_eth_devices[port_id]; 6810 6811 if (*dev->dev_ops->count_aggr_ports == NULL) 6812 return 0; 6813 ret = eth_err(port_id, (*dev->dev_ops->count_aggr_ports)(dev)); 6814 6815 rte_eth_trace_count_aggr_ports(port_id, ret); 6816 6817 return ret; 6818 } 6819 6820 int rte_eth_dev_map_aggr_tx_affinity(uint16_t port_id, uint16_t tx_queue_id, 6821 uint8_t affinity) 6822 { 6823 struct rte_eth_dev *dev; 6824 int aggr_ports; 6825 int ret; 6826 6827 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 6828 dev = &rte_eth_devices[port_id]; 6829 6830 if (tx_queue_id >= dev->data->nb_tx_queues) { 6831 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id); 6832 return -EINVAL; 6833 } 6834 6835 if (*dev->dev_ops->map_aggr_tx_affinity == NULL) 6836 return -ENOTSUP; 6837 6838 if (dev->data->dev_configured == 0) { 6839 RTE_ETHDEV_LOG(ERR, 6840 "Port %u must be configured before Tx affinity mapping\n", 6841 port_id); 6842 return -EINVAL; 6843 } 6844 6845 if (dev->data->dev_started) { 6846 RTE_ETHDEV_LOG(ERR, 6847 "Port %u must be stopped to allow configuration\n", 6848 port_id); 6849 return -EBUSY; 6850 } 6851 6852 aggr_ports = rte_eth_dev_count_aggr_ports(port_id); 6853 if (aggr_ports == 0) { 6854 RTE_ETHDEV_LOG(ERR, 6855 "Port %u has no aggregated port\n", 6856 port_id); 6857 return -ENOTSUP; 6858 } 6859 6860 if (affinity > aggr_ports) { 6861 RTE_ETHDEV_LOG(ERR, 6862 "Port %u map invalid affinity %u exceeds the maximum number %u\n", 6863 port_id, affinity, aggr_ports); 6864 return -EINVAL; 6865 } 6866 6867 ret = eth_err(port_id, (*dev->dev_ops->map_aggr_tx_affinity)(dev, 6868 tx_queue_id, affinity)); 6869 6870 rte_eth_trace_map_aggr_tx_affinity(port_id, tx_queue_id, affinity, ret); 6871 6872 return ret; 6873 } 6874 6875 RTE_LOG_REGISTER_DEFAULT(rte_eth_dev_logtype, INFO); 6876