1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <ctype.h> 6 #include <errno.h> 7 #include <inttypes.h> 8 #include <stdbool.h> 9 #include <stdint.h> 10 #include <stdlib.h> 11 #include <string.h> 12 #include <sys/queue.h> 13 14 #include <rte_byteorder.h> 15 #include <rte_log.h> 16 #include <rte_debug.h> 17 #include <rte_interrupts.h> 18 #include <rte_memory.h> 19 #include <rte_memcpy.h> 20 #include <rte_memzone.h> 21 #include <rte_launch.h> 22 #include <rte_eal.h> 23 #include <rte_per_lcore.h> 24 #include <rte_lcore.h> 25 #include <rte_branch_prediction.h> 26 #include <rte_common.h> 27 #include <rte_mempool.h> 28 #include <rte_malloc.h> 29 #include <rte_mbuf.h> 30 #include <rte_errno.h> 31 #include <rte_spinlock.h> 32 #include <rte_string_fns.h> 33 #include <rte_class.h> 34 #include <rte_ether.h> 35 #include <rte_telemetry.h> 36 37 #include "rte_ethdev_trace.h" 38 #include "rte_ethdev.h" 39 #include "ethdev_driver.h" 40 #include "ethdev_profile.h" 41 #include "ethdev_private.h" 42 #include "sff_telemetry.h" 43 44 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS]; 45 46 /* public fast-path API */ 47 struct rte_eth_fp_ops rte_eth_fp_ops[RTE_MAX_ETHPORTS]; 48 49 /* spinlock for add/remove Rx callbacks */ 50 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER; 51 52 /* spinlock for add/remove Tx callbacks */ 53 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER; 54 55 /* store statistics names and its offset in stats structure */ 56 struct rte_eth_xstats_name_off { 57 char name[RTE_ETH_XSTATS_NAME_SIZE]; 58 unsigned offset; 59 }; 60 61 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = { 62 {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)}, 63 {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)}, 64 {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)}, 65 {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)}, 66 {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)}, 67 {"rx_errors", offsetof(struct rte_eth_stats, ierrors)}, 68 {"tx_errors", offsetof(struct rte_eth_stats, oerrors)}, 69 {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats, 70 rx_nombuf)}, 71 }; 72 73 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings) 74 75 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = { 76 {"packets", offsetof(struct rte_eth_stats, q_ipackets)}, 77 {"bytes", offsetof(struct rte_eth_stats, q_ibytes)}, 78 {"errors", offsetof(struct rte_eth_stats, q_errors)}, 79 }; 80 81 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings) 82 83 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = { 84 {"packets", offsetof(struct rte_eth_stats, q_opackets)}, 85 {"bytes", offsetof(struct rte_eth_stats, q_obytes)}, 86 }; 87 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings) 88 89 #define RTE_RX_OFFLOAD_BIT2STR(_name) \ 90 { RTE_ETH_RX_OFFLOAD_##_name, #_name } 91 92 static const struct { 93 uint64_t offload; 94 const char *name; 95 } eth_dev_rx_offload_names[] = { 96 RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP), 97 RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM), 98 RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM), 99 RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM), 100 RTE_RX_OFFLOAD_BIT2STR(TCP_LRO), 101 RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP), 102 RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 103 RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP), 104 RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT), 105 RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER), 106 RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND), 107 RTE_RX_OFFLOAD_BIT2STR(SCATTER), 108 RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP), 109 RTE_RX_OFFLOAD_BIT2STR(SECURITY), 110 RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC), 111 RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM), 112 RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 113 RTE_RX_OFFLOAD_BIT2STR(RSS_HASH), 114 RTE_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT), 115 }; 116 117 #undef RTE_RX_OFFLOAD_BIT2STR 118 #undef RTE_ETH_RX_OFFLOAD_BIT2STR 119 120 #define RTE_TX_OFFLOAD_BIT2STR(_name) \ 121 { RTE_ETH_TX_OFFLOAD_##_name, #_name } 122 123 static const struct { 124 uint64_t offload; 125 const char *name; 126 } eth_dev_tx_offload_names[] = { 127 RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT), 128 RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM), 129 RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM), 130 RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM), 131 RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM), 132 RTE_TX_OFFLOAD_BIT2STR(TCP_TSO), 133 RTE_TX_OFFLOAD_BIT2STR(UDP_TSO), 134 RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 135 RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT), 136 RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO), 137 RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO), 138 RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO), 139 RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO), 140 RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT), 141 RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE), 142 RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS), 143 RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE), 144 RTE_TX_OFFLOAD_BIT2STR(SECURITY), 145 RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO), 146 RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO), 147 RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 148 RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP), 149 }; 150 151 #undef RTE_TX_OFFLOAD_BIT2STR 152 153 static const struct { 154 uint64_t offload; 155 const char *name; 156 } rte_eth_dev_capa_names[] = { 157 {RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP, "RUNTIME_RX_QUEUE_SETUP"}, 158 {RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP, "RUNTIME_TX_QUEUE_SETUP"}, 159 {RTE_ETH_DEV_CAPA_RXQ_SHARE, "RXQ_SHARE"}, 160 {RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP, "FLOW_RULE_KEEP"}, 161 {RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP, "FLOW_SHARED_OBJECT_KEEP"}, 162 }; 163 164 enum { 165 STAT_QMAP_TX = 0, 166 STAT_QMAP_RX 167 }; 168 169 int 170 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str) 171 { 172 int ret; 173 struct rte_devargs devargs; 174 const char *bus_param_key; 175 char *bus_str = NULL; 176 char *cls_str = NULL; 177 int str_size; 178 179 if (iter == NULL) { 180 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL iterator\n"); 181 return -EINVAL; 182 } 183 184 if (devargs_str == NULL) { 185 RTE_ETHDEV_LOG(ERR, 186 "Cannot initialize iterator from NULL device description string\n"); 187 return -EINVAL; 188 } 189 190 memset(iter, 0, sizeof(*iter)); 191 memset(&devargs, 0, sizeof(devargs)); 192 193 /* 194 * The devargs string may use various syntaxes: 195 * - 0000:08:00.0,representor=[1-3] 196 * - pci:0000:06:00.0,representor=[0,5] 197 * - class=eth,mac=00:11:22:33:44:55 198 * - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z 199 */ 200 201 /* 202 * Handle pure class filter (i.e. without any bus-level argument), 203 * from future new syntax. 204 * rte_devargs_parse() is not yet supporting the new syntax, 205 * that's why this simple case is temporarily parsed here. 206 */ 207 #define iter_anybus_str "class=eth," 208 if (strncmp(devargs_str, iter_anybus_str, 209 strlen(iter_anybus_str)) == 0) { 210 iter->cls_str = devargs_str + strlen(iter_anybus_str); 211 goto end; 212 } 213 214 /* Split bus, device and parameters. */ 215 ret = rte_devargs_parse(&devargs, devargs_str); 216 if (ret != 0) 217 goto error; 218 219 /* 220 * Assume parameters of old syntax can match only at ethdev level. 221 * Extra parameters will be ignored, thanks to "+" prefix. 222 */ 223 str_size = strlen(devargs.args) + 2; 224 cls_str = malloc(str_size); 225 if (cls_str == NULL) { 226 ret = -ENOMEM; 227 goto error; 228 } 229 ret = snprintf(cls_str, str_size, "+%s", devargs.args); 230 if (ret != str_size - 1) { 231 ret = -EINVAL; 232 goto error; 233 } 234 iter->cls_str = cls_str; 235 236 iter->bus = devargs.bus; 237 if (iter->bus->dev_iterate == NULL) { 238 ret = -ENOTSUP; 239 goto error; 240 } 241 242 /* Convert bus args to new syntax for use with new API dev_iterate. */ 243 if ((strcmp(iter->bus->name, "vdev") == 0) || 244 (strcmp(iter->bus->name, "fslmc") == 0) || 245 (strcmp(iter->bus->name, "dpaa_bus") == 0)) { 246 bus_param_key = "name"; 247 } else if (strcmp(iter->bus->name, "pci") == 0) { 248 bus_param_key = "addr"; 249 } else { 250 ret = -ENOTSUP; 251 goto error; 252 } 253 str_size = strlen(bus_param_key) + strlen(devargs.name) + 2; 254 bus_str = malloc(str_size); 255 if (bus_str == NULL) { 256 ret = -ENOMEM; 257 goto error; 258 } 259 ret = snprintf(bus_str, str_size, "%s=%s", 260 bus_param_key, devargs.name); 261 if (ret != str_size - 1) { 262 ret = -EINVAL; 263 goto error; 264 } 265 iter->bus_str = bus_str; 266 267 end: 268 iter->cls = rte_class_find_by_name("eth"); 269 rte_devargs_reset(&devargs); 270 return 0; 271 272 error: 273 if (ret == -ENOTSUP) 274 RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n", 275 iter->bus->name); 276 rte_devargs_reset(&devargs); 277 free(bus_str); 278 free(cls_str); 279 return ret; 280 } 281 282 uint16_t 283 rte_eth_iterator_next(struct rte_dev_iterator *iter) 284 { 285 if (iter == NULL) { 286 RTE_ETHDEV_LOG(ERR, 287 "Cannot get next device from NULL iterator\n"); 288 return RTE_MAX_ETHPORTS; 289 } 290 291 if (iter->cls == NULL) /* invalid ethdev iterator */ 292 return RTE_MAX_ETHPORTS; 293 294 do { /* loop to try all matching rte_device */ 295 /* If not pure ethdev filter and */ 296 if (iter->bus != NULL && 297 /* not in middle of rte_eth_dev iteration, */ 298 iter->class_device == NULL) { 299 /* get next rte_device to try. */ 300 iter->device = iter->bus->dev_iterate( 301 iter->device, iter->bus_str, iter); 302 if (iter->device == NULL) 303 break; /* no more rte_device candidate */ 304 } 305 /* A device is matching bus part, need to check ethdev part. */ 306 iter->class_device = iter->cls->dev_iterate( 307 iter->class_device, iter->cls_str, iter); 308 if (iter->class_device != NULL) 309 return eth_dev_to_id(iter->class_device); /* match */ 310 } while (iter->bus != NULL); /* need to try next rte_device */ 311 312 /* No more ethdev port to iterate. */ 313 rte_eth_iterator_cleanup(iter); 314 return RTE_MAX_ETHPORTS; 315 } 316 317 void 318 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter) 319 { 320 if (iter == NULL) { 321 RTE_ETHDEV_LOG(ERR, "Cannot do clean up from NULL iterator\n"); 322 return; 323 } 324 325 if (iter->bus_str == NULL) 326 return; /* nothing to free in pure class filter */ 327 free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */ 328 free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */ 329 memset(iter, 0, sizeof(*iter)); 330 } 331 332 uint16_t 333 rte_eth_find_next(uint16_t port_id) 334 { 335 while (port_id < RTE_MAX_ETHPORTS && 336 rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED) 337 port_id++; 338 339 if (port_id >= RTE_MAX_ETHPORTS) 340 return RTE_MAX_ETHPORTS; 341 342 return port_id; 343 } 344 345 /* 346 * Macro to iterate over all valid ports for internal usage. 347 * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports. 348 */ 349 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \ 350 for (port_id = rte_eth_find_next(0); \ 351 port_id < RTE_MAX_ETHPORTS; \ 352 port_id = rte_eth_find_next(port_id + 1)) 353 354 uint16_t 355 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent) 356 { 357 port_id = rte_eth_find_next(port_id); 358 while (port_id < RTE_MAX_ETHPORTS && 359 rte_eth_devices[port_id].device != parent) 360 port_id = rte_eth_find_next(port_id + 1); 361 362 return port_id; 363 } 364 365 uint16_t 366 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id) 367 { 368 RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS); 369 return rte_eth_find_next_of(port_id, 370 rte_eth_devices[ref_port_id].device); 371 } 372 373 static bool 374 eth_dev_is_allocated(const struct rte_eth_dev *ethdev) 375 { 376 return ethdev->data->name[0] != '\0'; 377 } 378 379 int 380 rte_eth_dev_is_valid_port(uint16_t port_id) 381 { 382 if (port_id >= RTE_MAX_ETHPORTS || 383 (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)) 384 return 0; 385 else 386 return 1; 387 } 388 389 static int 390 eth_is_valid_owner_id(uint64_t owner_id) 391 { 392 if (owner_id == RTE_ETH_DEV_NO_OWNER || 393 eth_dev_shared_data->next_owner_id <= owner_id) 394 return 0; 395 return 1; 396 } 397 398 uint64_t 399 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id) 400 { 401 port_id = rte_eth_find_next(port_id); 402 while (port_id < RTE_MAX_ETHPORTS && 403 rte_eth_devices[port_id].data->owner.id != owner_id) 404 port_id = rte_eth_find_next(port_id + 1); 405 406 return port_id; 407 } 408 409 int 410 rte_eth_dev_owner_new(uint64_t *owner_id) 411 { 412 if (owner_id == NULL) { 413 RTE_ETHDEV_LOG(ERR, "Cannot get new owner ID to NULL\n"); 414 return -EINVAL; 415 } 416 417 eth_dev_shared_data_prepare(); 418 419 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 420 421 *owner_id = eth_dev_shared_data->next_owner_id++; 422 423 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 424 return 0; 425 } 426 427 static int 428 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id, 429 const struct rte_eth_dev_owner *new_owner) 430 { 431 struct rte_eth_dev *ethdev = &rte_eth_devices[port_id]; 432 struct rte_eth_dev_owner *port_owner; 433 434 if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) { 435 RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n", 436 port_id); 437 return -ENODEV; 438 } 439 440 if (new_owner == NULL) { 441 RTE_ETHDEV_LOG(ERR, 442 "Cannot set ethdev port %u owner from NULL owner\n", 443 port_id); 444 return -EINVAL; 445 } 446 447 if (!eth_is_valid_owner_id(new_owner->id) && 448 !eth_is_valid_owner_id(old_owner_id)) { 449 RTE_ETHDEV_LOG(ERR, 450 "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n", 451 old_owner_id, new_owner->id); 452 return -EINVAL; 453 } 454 455 port_owner = &rte_eth_devices[port_id].data->owner; 456 if (port_owner->id != old_owner_id) { 457 RTE_ETHDEV_LOG(ERR, 458 "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n", 459 port_id, port_owner->name, port_owner->id); 460 return -EPERM; 461 } 462 463 /* can not truncate (same structure) */ 464 strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN); 465 466 port_owner->id = new_owner->id; 467 468 RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n", 469 port_id, new_owner->name, new_owner->id); 470 471 return 0; 472 } 473 474 int 475 rte_eth_dev_owner_set(const uint16_t port_id, 476 const struct rte_eth_dev_owner *owner) 477 { 478 int ret; 479 480 eth_dev_shared_data_prepare(); 481 482 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 483 484 ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner); 485 486 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 487 return ret; 488 } 489 490 int 491 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id) 492 { 493 const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner) 494 {.id = RTE_ETH_DEV_NO_OWNER, .name = ""}; 495 int ret; 496 497 eth_dev_shared_data_prepare(); 498 499 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 500 501 ret = eth_dev_owner_set(port_id, owner_id, &new_owner); 502 503 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 504 return ret; 505 } 506 507 int 508 rte_eth_dev_owner_delete(const uint64_t owner_id) 509 { 510 uint16_t port_id; 511 int ret = 0; 512 513 eth_dev_shared_data_prepare(); 514 515 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 516 517 if (eth_is_valid_owner_id(owner_id)) { 518 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) { 519 struct rte_eth_dev_data *data = 520 rte_eth_devices[port_id].data; 521 if (data != NULL && data->owner.id == owner_id) 522 memset(&data->owner, 0, 523 sizeof(struct rte_eth_dev_owner)); 524 } 525 RTE_ETHDEV_LOG(NOTICE, 526 "All port owners owned by %016"PRIx64" identifier have removed\n", 527 owner_id); 528 } else { 529 RTE_ETHDEV_LOG(ERR, 530 "Invalid owner ID=%016"PRIx64"\n", 531 owner_id); 532 ret = -EINVAL; 533 } 534 535 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 536 537 return ret; 538 } 539 540 int 541 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner) 542 { 543 struct rte_eth_dev *ethdev; 544 545 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 546 ethdev = &rte_eth_devices[port_id]; 547 548 if (!eth_dev_is_allocated(ethdev)) { 549 RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n", 550 port_id); 551 return -ENODEV; 552 } 553 554 if (owner == NULL) { 555 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u owner to NULL\n", 556 port_id); 557 return -EINVAL; 558 } 559 560 eth_dev_shared_data_prepare(); 561 562 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 563 rte_memcpy(owner, ðdev->data->owner, sizeof(*owner)); 564 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 565 566 return 0; 567 } 568 569 int 570 rte_eth_dev_socket_id(uint16_t port_id) 571 { 572 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 573 return rte_eth_devices[port_id].data->numa_node; 574 } 575 576 void * 577 rte_eth_dev_get_sec_ctx(uint16_t port_id) 578 { 579 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL); 580 return rte_eth_devices[port_id].security_ctx; 581 } 582 583 uint16_t 584 rte_eth_dev_count_avail(void) 585 { 586 uint16_t p; 587 uint16_t count; 588 589 count = 0; 590 591 RTE_ETH_FOREACH_DEV(p) 592 count++; 593 594 return count; 595 } 596 597 uint16_t 598 rte_eth_dev_count_total(void) 599 { 600 uint16_t port, count = 0; 601 602 RTE_ETH_FOREACH_VALID_DEV(port) 603 count++; 604 605 return count; 606 } 607 608 int 609 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name) 610 { 611 char *tmp; 612 613 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 614 615 if (name == NULL) { 616 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u name to NULL\n", 617 port_id); 618 return -EINVAL; 619 } 620 621 /* shouldn't check 'rte_eth_devices[i].data', 622 * because it might be overwritten by VDEV PMD */ 623 tmp = eth_dev_shared_data->data[port_id].name; 624 strcpy(name, tmp); 625 return 0; 626 } 627 628 int 629 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id) 630 { 631 uint16_t pid; 632 633 if (name == NULL) { 634 RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name"); 635 return -EINVAL; 636 } 637 638 if (port_id == NULL) { 639 RTE_ETHDEV_LOG(ERR, 640 "Cannot get port ID to NULL for %s\n", name); 641 return -EINVAL; 642 } 643 644 RTE_ETH_FOREACH_VALID_DEV(pid) 645 if (!strcmp(name, eth_dev_shared_data->data[pid].name)) { 646 *port_id = pid; 647 return 0; 648 } 649 650 return -ENODEV; 651 } 652 653 static int 654 eth_err(uint16_t port_id, int ret) 655 { 656 if (ret == 0) 657 return 0; 658 if (rte_eth_dev_is_removed(port_id)) 659 return -EIO; 660 return ret; 661 } 662 663 static int 664 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id) 665 { 666 uint16_t port_id; 667 668 if (rx_queue_id >= dev->data->nb_rx_queues) { 669 port_id = dev->data->port_id; 670 RTE_ETHDEV_LOG(ERR, 671 "Invalid Rx queue_id=%u of device with port_id=%u\n", 672 rx_queue_id, port_id); 673 return -EINVAL; 674 } 675 676 if (dev->data->rx_queues[rx_queue_id] == NULL) { 677 port_id = dev->data->port_id; 678 RTE_ETHDEV_LOG(ERR, 679 "Queue %u of device with port_id=%u has not been setup\n", 680 rx_queue_id, port_id); 681 return -EINVAL; 682 } 683 684 return 0; 685 } 686 687 static int 688 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id) 689 { 690 uint16_t port_id; 691 692 if (tx_queue_id >= dev->data->nb_tx_queues) { 693 port_id = dev->data->port_id; 694 RTE_ETHDEV_LOG(ERR, 695 "Invalid Tx queue_id=%u of device with port_id=%u\n", 696 tx_queue_id, port_id); 697 return -EINVAL; 698 } 699 700 if (dev->data->tx_queues[tx_queue_id] == NULL) { 701 port_id = dev->data->port_id; 702 RTE_ETHDEV_LOG(ERR, 703 "Queue %u of device with port_id=%u has not been setup\n", 704 tx_queue_id, port_id); 705 return -EINVAL; 706 } 707 708 return 0; 709 } 710 711 int 712 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id) 713 { 714 struct rte_eth_dev *dev; 715 int ret; 716 717 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 718 dev = &rte_eth_devices[port_id]; 719 720 if (!dev->data->dev_started) { 721 RTE_ETHDEV_LOG(ERR, 722 "Port %u must be started before start any queue\n", 723 port_id); 724 return -EINVAL; 725 } 726 727 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 728 if (ret != 0) 729 return ret; 730 731 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP); 732 733 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 734 RTE_ETHDEV_LOG(INFO, 735 "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 736 rx_queue_id, port_id); 737 return -EINVAL; 738 } 739 740 if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 741 RTE_ETHDEV_LOG(INFO, 742 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 743 rx_queue_id, port_id); 744 return 0; 745 } 746 747 return eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id)); 748 } 749 750 int 751 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id) 752 { 753 struct rte_eth_dev *dev; 754 int ret; 755 756 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 757 dev = &rte_eth_devices[port_id]; 758 759 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 760 if (ret != 0) 761 return ret; 762 763 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP); 764 765 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 766 RTE_ETHDEV_LOG(INFO, 767 "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 768 rx_queue_id, port_id); 769 return -EINVAL; 770 } 771 772 if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 773 RTE_ETHDEV_LOG(INFO, 774 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 775 rx_queue_id, port_id); 776 return 0; 777 } 778 779 return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id)); 780 } 781 782 int 783 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id) 784 { 785 struct rte_eth_dev *dev; 786 int ret; 787 788 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 789 dev = &rte_eth_devices[port_id]; 790 791 if (!dev->data->dev_started) { 792 RTE_ETHDEV_LOG(ERR, 793 "Port %u must be started before start any queue\n", 794 port_id); 795 return -EINVAL; 796 } 797 798 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 799 if (ret != 0) 800 return ret; 801 802 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP); 803 804 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 805 RTE_ETHDEV_LOG(INFO, 806 "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 807 tx_queue_id, port_id); 808 return -EINVAL; 809 } 810 811 if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 812 RTE_ETHDEV_LOG(INFO, 813 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 814 tx_queue_id, port_id); 815 return 0; 816 } 817 818 return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id)); 819 } 820 821 int 822 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id) 823 { 824 struct rte_eth_dev *dev; 825 int ret; 826 827 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 828 dev = &rte_eth_devices[port_id]; 829 830 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 831 if (ret != 0) 832 return ret; 833 834 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP); 835 836 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 837 RTE_ETHDEV_LOG(INFO, 838 "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 839 tx_queue_id, port_id); 840 return -EINVAL; 841 } 842 843 if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 844 RTE_ETHDEV_LOG(INFO, 845 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 846 tx_queue_id, port_id); 847 return 0; 848 } 849 850 return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id)); 851 } 852 853 uint32_t 854 rte_eth_speed_bitflag(uint32_t speed, int duplex) 855 { 856 switch (speed) { 857 case RTE_ETH_SPEED_NUM_10M: 858 return duplex ? RTE_ETH_LINK_SPEED_10M : RTE_ETH_LINK_SPEED_10M_HD; 859 case RTE_ETH_SPEED_NUM_100M: 860 return duplex ? RTE_ETH_LINK_SPEED_100M : RTE_ETH_LINK_SPEED_100M_HD; 861 case RTE_ETH_SPEED_NUM_1G: 862 return RTE_ETH_LINK_SPEED_1G; 863 case RTE_ETH_SPEED_NUM_2_5G: 864 return RTE_ETH_LINK_SPEED_2_5G; 865 case RTE_ETH_SPEED_NUM_5G: 866 return RTE_ETH_LINK_SPEED_5G; 867 case RTE_ETH_SPEED_NUM_10G: 868 return RTE_ETH_LINK_SPEED_10G; 869 case RTE_ETH_SPEED_NUM_20G: 870 return RTE_ETH_LINK_SPEED_20G; 871 case RTE_ETH_SPEED_NUM_25G: 872 return RTE_ETH_LINK_SPEED_25G; 873 case RTE_ETH_SPEED_NUM_40G: 874 return RTE_ETH_LINK_SPEED_40G; 875 case RTE_ETH_SPEED_NUM_50G: 876 return RTE_ETH_LINK_SPEED_50G; 877 case RTE_ETH_SPEED_NUM_56G: 878 return RTE_ETH_LINK_SPEED_56G; 879 case RTE_ETH_SPEED_NUM_100G: 880 return RTE_ETH_LINK_SPEED_100G; 881 case RTE_ETH_SPEED_NUM_200G: 882 return RTE_ETH_LINK_SPEED_200G; 883 default: 884 return 0; 885 } 886 } 887 888 const char * 889 rte_eth_dev_rx_offload_name(uint64_t offload) 890 { 891 const char *name = "UNKNOWN"; 892 unsigned int i; 893 894 for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) { 895 if (offload == eth_dev_rx_offload_names[i].offload) { 896 name = eth_dev_rx_offload_names[i].name; 897 break; 898 } 899 } 900 901 return name; 902 } 903 904 const char * 905 rte_eth_dev_tx_offload_name(uint64_t offload) 906 { 907 const char *name = "UNKNOWN"; 908 unsigned int i; 909 910 for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) { 911 if (offload == eth_dev_tx_offload_names[i].offload) { 912 name = eth_dev_tx_offload_names[i].name; 913 break; 914 } 915 } 916 917 return name; 918 } 919 920 const char * 921 rte_eth_dev_capability_name(uint64_t capability) 922 { 923 const char *name = "UNKNOWN"; 924 unsigned int i; 925 926 for (i = 0; i < RTE_DIM(rte_eth_dev_capa_names); ++i) { 927 if (capability == rte_eth_dev_capa_names[i].offload) { 928 name = rte_eth_dev_capa_names[i].name; 929 break; 930 } 931 } 932 933 return name; 934 } 935 936 static inline int 937 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size, 938 uint32_t max_rx_pkt_len, uint32_t dev_info_size) 939 { 940 int ret = 0; 941 942 if (dev_info_size == 0) { 943 if (config_size != max_rx_pkt_len) { 944 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size" 945 " %u != %u is not allowed\n", 946 port_id, config_size, max_rx_pkt_len); 947 ret = -EINVAL; 948 } 949 } else if (config_size > dev_info_size) { 950 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 951 "> max allowed value %u\n", port_id, config_size, 952 dev_info_size); 953 ret = -EINVAL; 954 } else if (config_size < RTE_ETHER_MIN_LEN) { 955 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 956 "< min allowed value %u\n", port_id, config_size, 957 (unsigned int)RTE_ETHER_MIN_LEN); 958 ret = -EINVAL; 959 } 960 return ret; 961 } 962 963 /* 964 * Validate offloads that are requested through rte_eth_dev_configure against 965 * the offloads successfully set by the Ethernet device. 966 * 967 * @param port_id 968 * The port identifier of the Ethernet device. 969 * @param req_offloads 970 * The offloads that have been requested through `rte_eth_dev_configure`. 971 * @param set_offloads 972 * The offloads successfully set by the Ethernet device. 973 * @param offload_type 974 * The offload type i.e. Rx/Tx string. 975 * @param offload_name 976 * The function that prints the offload name. 977 * @return 978 * - (0) if validation successful. 979 * - (-EINVAL) if requested offload has been silently disabled. 980 * 981 */ 982 static int 983 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads, 984 uint64_t set_offloads, const char *offload_type, 985 const char *(*offload_name)(uint64_t)) 986 { 987 uint64_t offloads_diff = req_offloads ^ set_offloads; 988 uint64_t offload; 989 int ret = 0; 990 991 while (offloads_diff != 0) { 992 /* Check if any offload is requested but not enabled. */ 993 offload = RTE_BIT64(__builtin_ctzll(offloads_diff)); 994 if (offload & req_offloads) { 995 RTE_ETHDEV_LOG(ERR, 996 "Port %u failed to enable %s offload %s\n", 997 port_id, offload_type, offload_name(offload)); 998 ret = -EINVAL; 999 } 1000 1001 /* Check if offload couldn't be disabled. */ 1002 if (offload & set_offloads) { 1003 RTE_ETHDEV_LOG(DEBUG, 1004 "Port %u %s offload %s is not requested but enabled\n", 1005 port_id, offload_type, offload_name(offload)); 1006 } 1007 1008 offloads_diff &= ~offload; 1009 } 1010 1011 return ret; 1012 } 1013 1014 static uint32_t 1015 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu) 1016 { 1017 uint32_t overhead_len; 1018 1019 if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu) 1020 overhead_len = max_rx_pktlen - max_mtu; 1021 else 1022 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 1023 1024 return overhead_len; 1025 } 1026 1027 /* rte_eth_dev_info_get() should be called prior to this function */ 1028 static int 1029 eth_dev_validate_mtu(uint16_t port_id, struct rte_eth_dev_info *dev_info, 1030 uint16_t mtu) 1031 { 1032 uint32_t overhead_len; 1033 uint32_t frame_size; 1034 1035 if (mtu < dev_info->min_mtu) { 1036 RTE_ETHDEV_LOG(ERR, 1037 "MTU (%u) < device min MTU (%u) for port_id %u\n", 1038 mtu, dev_info->min_mtu, port_id); 1039 return -EINVAL; 1040 } 1041 if (mtu > dev_info->max_mtu) { 1042 RTE_ETHDEV_LOG(ERR, 1043 "MTU (%u) > device max MTU (%u) for port_id %u\n", 1044 mtu, dev_info->max_mtu, port_id); 1045 return -EINVAL; 1046 } 1047 1048 overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen, 1049 dev_info->max_mtu); 1050 frame_size = mtu + overhead_len; 1051 if (frame_size < RTE_ETHER_MIN_LEN) { 1052 RTE_ETHDEV_LOG(ERR, 1053 "Frame size (%u) < min frame size (%u) for port_id %u\n", 1054 frame_size, RTE_ETHER_MIN_LEN, port_id); 1055 return -EINVAL; 1056 } 1057 1058 if (frame_size > dev_info->max_rx_pktlen) { 1059 RTE_ETHDEV_LOG(ERR, 1060 "Frame size (%u) > device max frame size (%u) for port_id %u\n", 1061 frame_size, dev_info->max_rx_pktlen, port_id); 1062 return -EINVAL; 1063 } 1064 1065 return 0; 1066 } 1067 1068 int 1069 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, 1070 const struct rte_eth_conf *dev_conf) 1071 { 1072 struct rte_eth_dev *dev; 1073 struct rte_eth_dev_info dev_info; 1074 struct rte_eth_conf orig_conf; 1075 int diag; 1076 int ret; 1077 uint16_t old_mtu; 1078 1079 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1080 dev = &rte_eth_devices[port_id]; 1081 1082 if (dev_conf == NULL) { 1083 RTE_ETHDEV_LOG(ERR, 1084 "Cannot configure ethdev port %u from NULL config\n", 1085 port_id); 1086 return -EINVAL; 1087 } 1088 1089 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP); 1090 1091 if (dev->data->dev_started) { 1092 RTE_ETHDEV_LOG(ERR, 1093 "Port %u must be stopped to allow configuration\n", 1094 port_id); 1095 return -EBUSY; 1096 } 1097 1098 /* 1099 * Ensure that "dev_configured" is always 0 each time prepare to do 1100 * dev_configure() to avoid any non-anticipated behaviour. 1101 * And set to 1 when dev_configure() is executed successfully. 1102 */ 1103 dev->data->dev_configured = 0; 1104 1105 /* Store original config, as rollback required on failure */ 1106 memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf)); 1107 1108 /* 1109 * Copy the dev_conf parameter into the dev structure. 1110 * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get 1111 */ 1112 if (dev_conf != &dev->data->dev_conf) 1113 memcpy(&dev->data->dev_conf, dev_conf, 1114 sizeof(dev->data->dev_conf)); 1115 1116 /* Backup mtu for rollback */ 1117 old_mtu = dev->data->mtu; 1118 1119 ret = rte_eth_dev_info_get(port_id, &dev_info); 1120 if (ret != 0) 1121 goto rollback; 1122 1123 /* If number of queues specified by application for both Rx and Tx is 1124 * zero, use driver preferred values. This cannot be done individually 1125 * as it is valid for either Tx or Rx (but not both) to be zero. 1126 * If driver does not provide any preferred valued, fall back on 1127 * EAL defaults. 1128 */ 1129 if (nb_rx_q == 0 && nb_tx_q == 0) { 1130 nb_rx_q = dev_info.default_rxportconf.nb_queues; 1131 if (nb_rx_q == 0) 1132 nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES; 1133 nb_tx_q = dev_info.default_txportconf.nb_queues; 1134 if (nb_tx_q == 0) 1135 nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES; 1136 } 1137 1138 if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) { 1139 RTE_ETHDEV_LOG(ERR, 1140 "Number of Rx queues requested (%u) is greater than max supported(%d)\n", 1141 nb_rx_q, RTE_MAX_QUEUES_PER_PORT); 1142 ret = -EINVAL; 1143 goto rollback; 1144 } 1145 1146 if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) { 1147 RTE_ETHDEV_LOG(ERR, 1148 "Number of Tx queues requested (%u) is greater than max supported(%d)\n", 1149 nb_tx_q, RTE_MAX_QUEUES_PER_PORT); 1150 ret = -EINVAL; 1151 goto rollback; 1152 } 1153 1154 /* 1155 * Check that the numbers of Rx and Tx queues are not greater 1156 * than the maximum number of Rx and Tx queues supported by the 1157 * configured device. 1158 */ 1159 if (nb_rx_q > dev_info.max_rx_queues) { 1160 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n", 1161 port_id, nb_rx_q, dev_info.max_rx_queues); 1162 ret = -EINVAL; 1163 goto rollback; 1164 } 1165 1166 if (nb_tx_q > dev_info.max_tx_queues) { 1167 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n", 1168 port_id, nb_tx_q, dev_info.max_tx_queues); 1169 ret = -EINVAL; 1170 goto rollback; 1171 } 1172 1173 /* Check that the device supports requested interrupts */ 1174 if ((dev_conf->intr_conf.lsc == 1) && 1175 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) { 1176 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n", 1177 dev->device->driver->name); 1178 ret = -EINVAL; 1179 goto rollback; 1180 } 1181 if ((dev_conf->intr_conf.rmv == 1) && 1182 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) { 1183 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n", 1184 dev->device->driver->name); 1185 ret = -EINVAL; 1186 goto rollback; 1187 } 1188 1189 if (dev_conf->rxmode.mtu == 0) 1190 dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU; 1191 1192 ret = eth_dev_validate_mtu(port_id, &dev_info, 1193 dev->data->dev_conf.rxmode.mtu); 1194 if (ret != 0) 1195 goto rollback; 1196 1197 dev->data->mtu = dev->data->dev_conf.rxmode.mtu; 1198 1199 /* 1200 * If LRO is enabled, check that the maximum aggregated packet 1201 * size is supported by the configured device. 1202 */ 1203 if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) { 1204 uint32_t max_rx_pktlen; 1205 uint32_t overhead_len; 1206 1207 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 1208 dev_info.max_mtu); 1209 max_rx_pktlen = dev->data->dev_conf.rxmode.mtu + overhead_len; 1210 if (dev_conf->rxmode.max_lro_pkt_size == 0) 1211 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 1212 ret = eth_dev_check_lro_pkt_size(port_id, 1213 dev->data->dev_conf.rxmode.max_lro_pkt_size, 1214 max_rx_pktlen, 1215 dev_info.max_lro_pkt_size); 1216 if (ret != 0) 1217 goto rollback; 1218 } 1219 1220 /* Any requested offloading must be within its device capabilities */ 1221 if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) != 1222 dev_conf->rxmode.offloads) { 1223 RTE_ETHDEV_LOG(ERR, 1224 "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads " 1225 "capabilities 0x%"PRIx64" in %s()\n", 1226 port_id, dev_conf->rxmode.offloads, 1227 dev_info.rx_offload_capa, 1228 __func__); 1229 ret = -EINVAL; 1230 goto rollback; 1231 } 1232 if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) != 1233 dev_conf->txmode.offloads) { 1234 RTE_ETHDEV_LOG(ERR, 1235 "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads " 1236 "capabilities 0x%"PRIx64" in %s()\n", 1237 port_id, dev_conf->txmode.offloads, 1238 dev_info.tx_offload_capa, 1239 __func__); 1240 ret = -EINVAL; 1241 goto rollback; 1242 } 1243 1244 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = 1245 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf); 1246 1247 /* Check that device supports requested rss hash functions. */ 1248 if ((dev_info.flow_type_rss_offloads | 1249 dev_conf->rx_adv_conf.rss_conf.rss_hf) != 1250 dev_info.flow_type_rss_offloads) { 1251 RTE_ETHDEV_LOG(ERR, 1252 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 1253 port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf, 1254 dev_info.flow_type_rss_offloads); 1255 ret = -EINVAL; 1256 goto rollback; 1257 } 1258 1259 /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */ 1260 if (((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0) && 1261 (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) { 1262 RTE_ETHDEV_LOG(ERR, 1263 "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n", 1264 port_id, 1265 rte_eth_dev_rx_offload_name(RTE_ETH_RX_OFFLOAD_RSS_HASH)); 1266 ret = -EINVAL; 1267 goto rollback; 1268 } 1269 1270 /* 1271 * Setup new number of Rx/Tx queues and reconfigure device. 1272 */ 1273 diag = eth_dev_rx_queue_config(dev, nb_rx_q); 1274 if (diag != 0) { 1275 RTE_ETHDEV_LOG(ERR, 1276 "Port%u eth_dev_rx_queue_config = %d\n", 1277 port_id, diag); 1278 ret = diag; 1279 goto rollback; 1280 } 1281 1282 diag = eth_dev_tx_queue_config(dev, nb_tx_q); 1283 if (diag != 0) { 1284 RTE_ETHDEV_LOG(ERR, 1285 "Port%u eth_dev_tx_queue_config = %d\n", 1286 port_id, diag); 1287 eth_dev_rx_queue_config(dev, 0); 1288 ret = diag; 1289 goto rollback; 1290 } 1291 1292 diag = (*dev->dev_ops->dev_configure)(dev); 1293 if (diag != 0) { 1294 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n", 1295 port_id, diag); 1296 ret = eth_err(port_id, diag); 1297 goto reset_queues; 1298 } 1299 1300 /* Initialize Rx profiling if enabled at compilation time. */ 1301 diag = __rte_eth_dev_profile_init(port_id, dev); 1302 if (diag != 0) { 1303 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n", 1304 port_id, diag); 1305 ret = eth_err(port_id, diag); 1306 goto reset_queues; 1307 } 1308 1309 /* Validate Rx offloads. */ 1310 diag = eth_dev_validate_offloads(port_id, 1311 dev_conf->rxmode.offloads, 1312 dev->data->dev_conf.rxmode.offloads, "Rx", 1313 rte_eth_dev_rx_offload_name); 1314 if (diag != 0) { 1315 ret = diag; 1316 goto reset_queues; 1317 } 1318 1319 /* Validate Tx offloads. */ 1320 diag = eth_dev_validate_offloads(port_id, 1321 dev_conf->txmode.offloads, 1322 dev->data->dev_conf.txmode.offloads, "Tx", 1323 rte_eth_dev_tx_offload_name); 1324 if (diag != 0) { 1325 ret = diag; 1326 goto reset_queues; 1327 } 1328 1329 dev->data->dev_configured = 1; 1330 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0); 1331 return 0; 1332 reset_queues: 1333 eth_dev_rx_queue_config(dev, 0); 1334 eth_dev_tx_queue_config(dev, 0); 1335 rollback: 1336 memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf)); 1337 if (old_mtu != dev->data->mtu) 1338 dev->data->mtu = old_mtu; 1339 1340 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret); 1341 return ret; 1342 } 1343 1344 static void 1345 eth_dev_mac_restore(struct rte_eth_dev *dev, 1346 struct rte_eth_dev_info *dev_info) 1347 { 1348 struct rte_ether_addr *addr; 1349 uint16_t i; 1350 uint32_t pool = 0; 1351 uint64_t pool_mask; 1352 1353 /* replay MAC address configuration including default MAC */ 1354 addr = &dev->data->mac_addrs[0]; 1355 if (*dev->dev_ops->mac_addr_set != NULL) 1356 (*dev->dev_ops->mac_addr_set)(dev, addr); 1357 else if (*dev->dev_ops->mac_addr_add != NULL) 1358 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool); 1359 1360 if (*dev->dev_ops->mac_addr_add != NULL) { 1361 for (i = 1; i < dev_info->max_mac_addrs; i++) { 1362 addr = &dev->data->mac_addrs[i]; 1363 1364 /* skip zero address */ 1365 if (rte_is_zero_ether_addr(addr)) 1366 continue; 1367 1368 pool = 0; 1369 pool_mask = dev->data->mac_pool_sel[i]; 1370 1371 do { 1372 if (pool_mask & UINT64_C(1)) 1373 (*dev->dev_ops->mac_addr_add)(dev, 1374 addr, i, pool); 1375 pool_mask >>= 1; 1376 pool++; 1377 } while (pool_mask); 1378 } 1379 } 1380 } 1381 1382 static int 1383 eth_dev_config_restore(struct rte_eth_dev *dev, 1384 struct rte_eth_dev_info *dev_info, uint16_t port_id) 1385 { 1386 int ret; 1387 1388 if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)) 1389 eth_dev_mac_restore(dev, dev_info); 1390 1391 /* replay promiscuous configuration */ 1392 /* 1393 * use callbacks directly since we don't need port_id check and 1394 * would like to bypass the same value set 1395 */ 1396 if (rte_eth_promiscuous_get(port_id) == 1 && 1397 *dev->dev_ops->promiscuous_enable != NULL) { 1398 ret = eth_err(port_id, 1399 (*dev->dev_ops->promiscuous_enable)(dev)); 1400 if (ret != 0 && ret != -ENOTSUP) { 1401 RTE_ETHDEV_LOG(ERR, 1402 "Failed to enable promiscuous mode for device (port %u): %s\n", 1403 port_id, rte_strerror(-ret)); 1404 return ret; 1405 } 1406 } else if (rte_eth_promiscuous_get(port_id) == 0 && 1407 *dev->dev_ops->promiscuous_disable != NULL) { 1408 ret = eth_err(port_id, 1409 (*dev->dev_ops->promiscuous_disable)(dev)); 1410 if (ret != 0 && ret != -ENOTSUP) { 1411 RTE_ETHDEV_LOG(ERR, 1412 "Failed to disable promiscuous mode for device (port %u): %s\n", 1413 port_id, rte_strerror(-ret)); 1414 return ret; 1415 } 1416 } 1417 1418 /* replay all multicast configuration */ 1419 /* 1420 * use callbacks directly since we don't need port_id check and 1421 * would like to bypass the same value set 1422 */ 1423 if (rte_eth_allmulticast_get(port_id) == 1 && 1424 *dev->dev_ops->allmulticast_enable != NULL) { 1425 ret = eth_err(port_id, 1426 (*dev->dev_ops->allmulticast_enable)(dev)); 1427 if (ret != 0 && ret != -ENOTSUP) { 1428 RTE_ETHDEV_LOG(ERR, 1429 "Failed to enable allmulticast mode for device (port %u): %s\n", 1430 port_id, rte_strerror(-ret)); 1431 return ret; 1432 } 1433 } else if (rte_eth_allmulticast_get(port_id) == 0 && 1434 *dev->dev_ops->allmulticast_disable != NULL) { 1435 ret = eth_err(port_id, 1436 (*dev->dev_ops->allmulticast_disable)(dev)); 1437 if (ret != 0 && ret != -ENOTSUP) { 1438 RTE_ETHDEV_LOG(ERR, 1439 "Failed to disable allmulticast mode for device (port %u): %s\n", 1440 port_id, rte_strerror(-ret)); 1441 return ret; 1442 } 1443 } 1444 1445 return 0; 1446 } 1447 1448 int 1449 rte_eth_dev_start(uint16_t port_id) 1450 { 1451 struct rte_eth_dev *dev; 1452 struct rte_eth_dev_info dev_info; 1453 int diag; 1454 int ret, ret_stop; 1455 1456 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1457 dev = &rte_eth_devices[port_id]; 1458 1459 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP); 1460 1461 if (dev->data->dev_configured == 0) { 1462 RTE_ETHDEV_LOG(INFO, 1463 "Device with port_id=%"PRIu16" is not configured.\n", 1464 port_id); 1465 return -EINVAL; 1466 } 1467 1468 if (dev->data->dev_started != 0) { 1469 RTE_ETHDEV_LOG(INFO, 1470 "Device with port_id=%"PRIu16" already started\n", 1471 port_id); 1472 return 0; 1473 } 1474 1475 ret = rte_eth_dev_info_get(port_id, &dev_info); 1476 if (ret != 0) 1477 return ret; 1478 1479 /* Lets restore MAC now if device does not support live change */ 1480 if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR) 1481 eth_dev_mac_restore(dev, &dev_info); 1482 1483 diag = (*dev->dev_ops->dev_start)(dev); 1484 if (diag == 0) 1485 dev->data->dev_started = 1; 1486 else 1487 return eth_err(port_id, diag); 1488 1489 ret = eth_dev_config_restore(dev, &dev_info, port_id); 1490 if (ret != 0) { 1491 RTE_ETHDEV_LOG(ERR, 1492 "Error during restoring configuration for device (port %u): %s\n", 1493 port_id, rte_strerror(-ret)); 1494 ret_stop = rte_eth_dev_stop(port_id); 1495 if (ret_stop != 0) { 1496 RTE_ETHDEV_LOG(ERR, 1497 "Failed to stop device (port %u): %s\n", 1498 port_id, rte_strerror(-ret_stop)); 1499 } 1500 1501 return ret; 1502 } 1503 1504 if (dev->data->dev_conf.intr_conf.lsc == 0) { 1505 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 1506 (*dev->dev_ops->link_update)(dev, 0); 1507 } 1508 1509 /* expose selection of PMD fast-path functions */ 1510 eth_dev_fp_ops_setup(rte_eth_fp_ops + port_id, dev); 1511 1512 rte_ethdev_trace_start(port_id); 1513 return 0; 1514 } 1515 1516 int 1517 rte_eth_dev_stop(uint16_t port_id) 1518 { 1519 struct rte_eth_dev *dev; 1520 int ret; 1521 1522 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1523 dev = &rte_eth_devices[port_id]; 1524 1525 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_stop, -ENOTSUP); 1526 1527 if (dev->data->dev_started == 0) { 1528 RTE_ETHDEV_LOG(INFO, 1529 "Device with port_id=%"PRIu16" already stopped\n", 1530 port_id); 1531 return 0; 1532 } 1533 1534 /* point fast-path functions to dummy ones */ 1535 eth_dev_fp_ops_reset(rte_eth_fp_ops + port_id); 1536 1537 ret = (*dev->dev_ops->dev_stop)(dev); 1538 if (ret == 0) 1539 dev->data->dev_started = 0; 1540 rte_ethdev_trace_stop(port_id, ret); 1541 1542 return ret; 1543 } 1544 1545 int 1546 rte_eth_dev_set_link_up(uint16_t port_id) 1547 { 1548 struct rte_eth_dev *dev; 1549 1550 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1551 dev = &rte_eth_devices[port_id]; 1552 1553 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP); 1554 return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev)); 1555 } 1556 1557 int 1558 rte_eth_dev_set_link_down(uint16_t port_id) 1559 { 1560 struct rte_eth_dev *dev; 1561 1562 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1563 dev = &rte_eth_devices[port_id]; 1564 1565 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP); 1566 return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev)); 1567 } 1568 1569 int 1570 rte_eth_dev_close(uint16_t port_id) 1571 { 1572 struct rte_eth_dev *dev; 1573 int firsterr, binerr; 1574 int *lasterr = &firsterr; 1575 1576 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1577 dev = &rte_eth_devices[port_id]; 1578 1579 if (dev->data->dev_started) { 1580 RTE_ETHDEV_LOG(ERR, "Cannot close started device (port %u)\n", 1581 port_id); 1582 return -EINVAL; 1583 } 1584 1585 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP); 1586 *lasterr = (*dev->dev_ops->dev_close)(dev); 1587 if (*lasterr != 0) 1588 lasterr = &binerr; 1589 1590 rte_ethdev_trace_close(port_id); 1591 *lasterr = rte_eth_dev_release_port(dev); 1592 1593 return firsterr; 1594 } 1595 1596 int 1597 rte_eth_dev_reset(uint16_t port_id) 1598 { 1599 struct rte_eth_dev *dev; 1600 int ret; 1601 1602 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1603 dev = &rte_eth_devices[port_id]; 1604 1605 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP); 1606 1607 ret = rte_eth_dev_stop(port_id); 1608 if (ret != 0) { 1609 RTE_ETHDEV_LOG(ERR, 1610 "Failed to stop device (port %u) before reset: %s - ignore\n", 1611 port_id, rte_strerror(-ret)); 1612 } 1613 ret = dev->dev_ops->dev_reset(dev); 1614 1615 return eth_err(port_id, ret); 1616 } 1617 1618 int 1619 rte_eth_dev_is_removed(uint16_t port_id) 1620 { 1621 struct rte_eth_dev *dev; 1622 int ret; 1623 1624 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0); 1625 dev = &rte_eth_devices[port_id]; 1626 1627 if (dev->state == RTE_ETH_DEV_REMOVED) 1628 return 1; 1629 1630 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0); 1631 1632 ret = dev->dev_ops->is_removed(dev); 1633 if (ret != 0) 1634 /* Device is physically removed. */ 1635 dev->state = RTE_ETH_DEV_REMOVED; 1636 1637 return ret; 1638 } 1639 1640 static int 1641 rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg, 1642 uint16_t n_seg, uint32_t *mbp_buf_size, 1643 const struct rte_eth_dev_info *dev_info) 1644 { 1645 const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa; 1646 struct rte_mempool *mp_first; 1647 uint32_t offset_mask; 1648 uint16_t seg_idx; 1649 1650 if (n_seg > seg_capa->max_nseg) { 1651 RTE_ETHDEV_LOG(ERR, 1652 "Requested Rx segments %u exceed supported %u\n", 1653 n_seg, seg_capa->max_nseg); 1654 return -EINVAL; 1655 } 1656 /* 1657 * Check the sizes and offsets against buffer sizes 1658 * for each segment specified in extended configuration. 1659 */ 1660 mp_first = rx_seg[0].mp; 1661 offset_mask = RTE_BIT32(seg_capa->offset_align_log2) - 1; 1662 for (seg_idx = 0; seg_idx < n_seg; seg_idx++) { 1663 struct rte_mempool *mpl = rx_seg[seg_idx].mp; 1664 uint32_t length = rx_seg[seg_idx].length; 1665 uint32_t offset = rx_seg[seg_idx].offset; 1666 1667 if (mpl == NULL) { 1668 RTE_ETHDEV_LOG(ERR, "null mempool pointer\n"); 1669 return -EINVAL; 1670 } 1671 if (seg_idx != 0 && mp_first != mpl && 1672 seg_capa->multi_pools == 0) { 1673 RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n"); 1674 return -ENOTSUP; 1675 } 1676 if (offset != 0) { 1677 if (seg_capa->offset_allowed == 0) { 1678 RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n"); 1679 return -ENOTSUP; 1680 } 1681 if (offset & offset_mask) { 1682 RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n", 1683 offset, 1684 seg_capa->offset_align_log2); 1685 return -EINVAL; 1686 } 1687 } 1688 if (mpl->private_data_size < 1689 sizeof(struct rte_pktmbuf_pool_private)) { 1690 RTE_ETHDEV_LOG(ERR, 1691 "%s private_data_size %u < %u\n", 1692 mpl->name, mpl->private_data_size, 1693 (unsigned int)sizeof 1694 (struct rte_pktmbuf_pool_private)); 1695 return -ENOSPC; 1696 } 1697 offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM; 1698 *mbp_buf_size = rte_pktmbuf_data_room_size(mpl); 1699 length = length != 0 ? length : *mbp_buf_size; 1700 if (*mbp_buf_size < length + offset) { 1701 RTE_ETHDEV_LOG(ERR, 1702 "%s mbuf_data_room_size %u < %u (segment length=%u + segment offset=%u)\n", 1703 mpl->name, *mbp_buf_size, 1704 length + offset, length, offset); 1705 return -EINVAL; 1706 } 1707 } 1708 return 0; 1709 } 1710 1711 int 1712 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 1713 uint16_t nb_rx_desc, unsigned int socket_id, 1714 const struct rte_eth_rxconf *rx_conf, 1715 struct rte_mempool *mp) 1716 { 1717 int ret; 1718 uint32_t mbp_buf_size; 1719 struct rte_eth_dev *dev; 1720 struct rte_eth_dev_info dev_info; 1721 struct rte_eth_rxconf local_conf; 1722 1723 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1724 dev = &rte_eth_devices[port_id]; 1725 1726 if (rx_queue_id >= dev->data->nb_rx_queues) { 1727 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id); 1728 return -EINVAL; 1729 } 1730 1731 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP); 1732 1733 ret = rte_eth_dev_info_get(port_id, &dev_info); 1734 if (ret != 0) 1735 return ret; 1736 1737 if (mp != NULL) { 1738 /* Single pool configuration check. */ 1739 if (rx_conf != NULL && rx_conf->rx_nseg != 0) { 1740 RTE_ETHDEV_LOG(ERR, 1741 "Ambiguous segment configuration\n"); 1742 return -EINVAL; 1743 } 1744 /* 1745 * Check the size of the mbuf data buffer, this value 1746 * must be provided in the private data of the memory pool. 1747 * First check that the memory pool(s) has a valid private data. 1748 */ 1749 if (mp->private_data_size < 1750 sizeof(struct rte_pktmbuf_pool_private)) { 1751 RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n", 1752 mp->name, mp->private_data_size, 1753 (unsigned int) 1754 sizeof(struct rte_pktmbuf_pool_private)); 1755 return -ENOSPC; 1756 } 1757 mbp_buf_size = rte_pktmbuf_data_room_size(mp); 1758 if (mbp_buf_size < dev_info.min_rx_bufsize + 1759 RTE_PKTMBUF_HEADROOM) { 1760 RTE_ETHDEV_LOG(ERR, 1761 "%s mbuf_data_room_size %u < %u (RTE_PKTMBUF_HEADROOM=%u + min_rx_bufsize(dev)=%u)\n", 1762 mp->name, mbp_buf_size, 1763 RTE_PKTMBUF_HEADROOM + 1764 dev_info.min_rx_bufsize, 1765 RTE_PKTMBUF_HEADROOM, 1766 dev_info.min_rx_bufsize); 1767 return -EINVAL; 1768 } 1769 } else { 1770 const struct rte_eth_rxseg_split *rx_seg; 1771 uint16_t n_seg; 1772 1773 /* Extended multi-segment configuration check. */ 1774 if (rx_conf == NULL || rx_conf->rx_seg == NULL || rx_conf->rx_nseg == 0) { 1775 RTE_ETHDEV_LOG(ERR, 1776 "Memory pool is null and no extended configuration provided\n"); 1777 return -EINVAL; 1778 } 1779 1780 rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg; 1781 n_seg = rx_conf->rx_nseg; 1782 1783 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { 1784 ret = rte_eth_rx_queue_check_split(rx_seg, n_seg, 1785 &mbp_buf_size, 1786 &dev_info); 1787 if (ret != 0) 1788 return ret; 1789 } else { 1790 RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n"); 1791 return -EINVAL; 1792 } 1793 } 1794 1795 /* Use default specified by driver, if nb_rx_desc is zero */ 1796 if (nb_rx_desc == 0) { 1797 nb_rx_desc = dev_info.default_rxportconf.ring_size; 1798 /* If driver default is also zero, fall back on EAL default */ 1799 if (nb_rx_desc == 0) 1800 nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 1801 } 1802 1803 if (nb_rx_desc > dev_info.rx_desc_lim.nb_max || 1804 nb_rx_desc < dev_info.rx_desc_lim.nb_min || 1805 nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) { 1806 1807 RTE_ETHDEV_LOG(ERR, 1808 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 1809 nb_rx_desc, dev_info.rx_desc_lim.nb_max, 1810 dev_info.rx_desc_lim.nb_min, 1811 dev_info.rx_desc_lim.nb_align); 1812 return -EINVAL; 1813 } 1814 1815 if (dev->data->dev_started && 1816 !(dev_info.dev_capa & 1817 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP)) 1818 return -EBUSY; 1819 1820 if (dev->data->dev_started && 1821 (dev->data->rx_queue_state[rx_queue_id] != 1822 RTE_ETH_QUEUE_STATE_STOPPED)) 1823 return -EBUSY; 1824 1825 eth_dev_rxq_release(dev, rx_queue_id); 1826 1827 if (rx_conf == NULL) 1828 rx_conf = &dev_info.default_rxconf; 1829 1830 local_conf = *rx_conf; 1831 1832 /* 1833 * If an offloading has already been enabled in 1834 * rte_eth_dev_configure(), it has been enabled on all queues, 1835 * so there is no need to enable it in this queue again. 1836 * The local_conf.offloads input to underlying PMD only carries 1837 * those offloadings which are only enabled on this queue and 1838 * not enabled on all queues. 1839 */ 1840 local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads; 1841 1842 /* 1843 * New added offloadings for this queue are those not enabled in 1844 * rte_eth_dev_configure() and they must be per-queue type. 1845 * A pure per-port offloading can't be enabled on a queue while 1846 * disabled on another queue. A pure per-port offloading can't 1847 * be enabled for any queue as new added one if it hasn't been 1848 * enabled in rte_eth_dev_configure(). 1849 */ 1850 if ((local_conf.offloads & dev_info.rx_queue_offload_capa) != 1851 local_conf.offloads) { 1852 RTE_ETHDEV_LOG(ERR, 1853 "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 1854 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 1855 port_id, rx_queue_id, local_conf.offloads, 1856 dev_info.rx_queue_offload_capa, 1857 __func__); 1858 return -EINVAL; 1859 } 1860 1861 if (local_conf.share_group > 0 && 1862 (dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) == 0) { 1863 RTE_ETHDEV_LOG(ERR, 1864 "Ethdev port_id=%d rx_queue_id=%d, enabled share_group=%hu while device doesn't support Rx queue share\n", 1865 port_id, rx_queue_id, local_conf.share_group); 1866 return -EINVAL; 1867 } 1868 1869 /* 1870 * If LRO is enabled, check that the maximum aggregated packet 1871 * size is supported by the configured device. 1872 */ 1873 /* Get the real Ethernet overhead length */ 1874 if (local_conf.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) { 1875 uint32_t overhead_len; 1876 uint32_t max_rx_pktlen; 1877 int ret; 1878 1879 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 1880 dev_info.max_mtu); 1881 max_rx_pktlen = dev->data->mtu + overhead_len; 1882 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0) 1883 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 1884 ret = eth_dev_check_lro_pkt_size(port_id, 1885 dev->data->dev_conf.rxmode.max_lro_pkt_size, 1886 max_rx_pktlen, 1887 dev_info.max_lro_pkt_size); 1888 if (ret != 0) 1889 return ret; 1890 } 1891 1892 ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc, 1893 socket_id, &local_conf, mp); 1894 if (!ret) { 1895 if (!dev->data->min_rx_buf_size || 1896 dev->data->min_rx_buf_size > mbp_buf_size) 1897 dev->data->min_rx_buf_size = mbp_buf_size; 1898 } 1899 1900 rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp, 1901 rx_conf, ret); 1902 return eth_err(port_id, ret); 1903 } 1904 1905 int 1906 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 1907 uint16_t nb_rx_desc, 1908 const struct rte_eth_hairpin_conf *conf) 1909 { 1910 int ret; 1911 struct rte_eth_dev *dev; 1912 struct rte_eth_hairpin_cap cap; 1913 int i; 1914 int count; 1915 1916 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1917 dev = &rte_eth_devices[port_id]; 1918 1919 if (rx_queue_id >= dev->data->nb_rx_queues) { 1920 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id); 1921 return -EINVAL; 1922 } 1923 1924 if (conf == NULL) { 1925 RTE_ETHDEV_LOG(ERR, 1926 "Cannot setup ethdev port %u Rx hairpin queue from NULL config\n", 1927 port_id); 1928 return -EINVAL; 1929 } 1930 1931 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 1932 if (ret != 0) 1933 return ret; 1934 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup, 1935 -ENOTSUP); 1936 /* if nb_rx_desc is zero use max number of desc from the driver. */ 1937 if (nb_rx_desc == 0) 1938 nb_rx_desc = cap.max_nb_desc; 1939 if (nb_rx_desc > cap.max_nb_desc) { 1940 RTE_ETHDEV_LOG(ERR, 1941 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu", 1942 nb_rx_desc, cap.max_nb_desc); 1943 return -EINVAL; 1944 } 1945 if (conf->peer_count > cap.max_rx_2_tx) { 1946 RTE_ETHDEV_LOG(ERR, 1947 "Invalid value for number of peers for Rx queue(=%u), should be: <= %hu", 1948 conf->peer_count, cap.max_rx_2_tx); 1949 return -EINVAL; 1950 } 1951 if (conf->peer_count == 0) { 1952 RTE_ETHDEV_LOG(ERR, 1953 "Invalid value for number of peers for Rx queue(=%u), should be: > 0", 1954 conf->peer_count); 1955 return -EINVAL; 1956 } 1957 for (i = 0, count = 0; i < dev->data->nb_rx_queues && 1958 cap.max_nb_queues != UINT16_MAX; i++) { 1959 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i)) 1960 count++; 1961 } 1962 if (count > cap.max_nb_queues) { 1963 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d", 1964 cap.max_nb_queues); 1965 return -EINVAL; 1966 } 1967 if (dev->data->dev_started) 1968 return -EBUSY; 1969 eth_dev_rxq_release(dev, rx_queue_id); 1970 ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id, 1971 nb_rx_desc, conf); 1972 if (ret == 0) 1973 dev->data->rx_queue_state[rx_queue_id] = 1974 RTE_ETH_QUEUE_STATE_HAIRPIN; 1975 return eth_err(port_id, ret); 1976 } 1977 1978 int 1979 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 1980 uint16_t nb_tx_desc, unsigned int socket_id, 1981 const struct rte_eth_txconf *tx_conf) 1982 { 1983 struct rte_eth_dev *dev; 1984 struct rte_eth_dev_info dev_info; 1985 struct rte_eth_txconf local_conf; 1986 int ret; 1987 1988 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1989 dev = &rte_eth_devices[port_id]; 1990 1991 if (tx_queue_id >= dev->data->nb_tx_queues) { 1992 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id); 1993 return -EINVAL; 1994 } 1995 1996 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP); 1997 1998 ret = rte_eth_dev_info_get(port_id, &dev_info); 1999 if (ret != 0) 2000 return ret; 2001 2002 /* Use default specified by driver, if nb_tx_desc is zero */ 2003 if (nb_tx_desc == 0) { 2004 nb_tx_desc = dev_info.default_txportconf.ring_size; 2005 /* If driver default is zero, fall back on EAL default */ 2006 if (nb_tx_desc == 0) 2007 nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 2008 } 2009 if (nb_tx_desc > dev_info.tx_desc_lim.nb_max || 2010 nb_tx_desc < dev_info.tx_desc_lim.nb_min || 2011 nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) { 2012 RTE_ETHDEV_LOG(ERR, 2013 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2014 nb_tx_desc, dev_info.tx_desc_lim.nb_max, 2015 dev_info.tx_desc_lim.nb_min, 2016 dev_info.tx_desc_lim.nb_align); 2017 return -EINVAL; 2018 } 2019 2020 if (dev->data->dev_started && 2021 !(dev_info.dev_capa & 2022 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP)) 2023 return -EBUSY; 2024 2025 if (dev->data->dev_started && 2026 (dev->data->tx_queue_state[tx_queue_id] != 2027 RTE_ETH_QUEUE_STATE_STOPPED)) 2028 return -EBUSY; 2029 2030 eth_dev_txq_release(dev, tx_queue_id); 2031 2032 if (tx_conf == NULL) 2033 tx_conf = &dev_info.default_txconf; 2034 2035 local_conf = *tx_conf; 2036 2037 /* 2038 * If an offloading has already been enabled in 2039 * rte_eth_dev_configure(), it has been enabled on all queues, 2040 * so there is no need to enable it in this queue again. 2041 * The local_conf.offloads input to underlying PMD only carries 2042 * those offloadings which are only enabled on this queue and 2043 * not enabled on all queues. 2044 */ 2045 local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads; 2046 2047 /* 2048 * New added offloadings for this queue are those not enabled in 2049 * rte_eth_dev_configure() and they must be per-queue type. 2050 * A pure per-port offloading can't be enabled on a queue while 2051 * disabled on another queue. A pure per-port offloading can't 2052 * be enabled for any queue as new added one if it hasn't been 2053 * enabled in rte_eth_dev_configure(). 2054 */ 2055 if ((local_conf.offloads & dev_info.tx_queue_offload_capa) != 2056 local_conf.offloads) { 2057 RTE_ETHDEV_LOG(ERR, 2058 "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2059 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2060 port_id, tx_queue_id, local_conf.offloads, 2061 dev_info.tx_queue_offload_capa, 2062 __func__); 2063 return -EINVAL; 2064 } 2065 2066 rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf); 2067 return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev, 2068 tx_queue_id, nb_tx_desc, socket_id, &local_conf)); 2069 } 2070 2071 int 2072 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2073 uint16_t nb_tx_desc, 2074 const struct rte_eth_hairpin_conf *conf) 2075 { 2076 struct rte_eth_dev *dev; 2077 struct rte_eth_hairpin_cap cap; 2078 int i; 2079 int count; 2080 int ret; 2081 2082 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2083 dev = &rte_eth_devices[port_id]; 2084 2085 if (tx_queue_id >= dev->data->nb_tx_queues) { 2086 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id); 2087 return -EINVAL; 2088 } 2089 2090 if (conf == NULL) { 2091 RTE_ETHDEV_LOG(ERR, 2092 "Cannot setup ethdev port %u Tx hairpin queue from NULL config\n", 2093 port_id); 2094 return -EINVAL; 2095 } 2096 2097 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2098 if (ret != 0) 2099 return ret; 2100 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup, 2101 -ENOTSUP); 2102 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2103 if (nb_tx_desc == 0) 2104 nb_tx_desc = cap.max_nb_desc; 2105 if (nb_tx_desc > cap.max_nb_desc) { 2106 RTE_ETHDEV_LOG(ERR, 2107 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu", 2108 nb_tx_desc, cap.max_nb_desc); 2109 return -EINVAL; 2110 } 2111 if (conf->peer_count > cap.max_tx_2_rx) { 2112 RTE_ETHDEV_LOG(ERR, 2113 "Invalid value for number of peers for Tx queue(=%u), should be: <= %hu", 2114 conf->peer_count, cap.max_tx_2_rx); 2115 return -EINVAL; 2116 } 2117 if (conf->peer_count == 0) { 2118 RTE_ETHDEV_LOG(ERR, 2119 "Invalid value for number of peers for Tx queue(=%u), should be: > 0", 2120 conf->peer_count); 2121 return -EINVAL; 2122 } 2123 for (i = 0, count = 0; i < dev->data->nb_tx_queues && 2124 cap.max_nb_queues != UINT16_MAX; i++) { 2125 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i)) 2126 count++; 2127 } 2128 if (count > cap.max_nb_queues) { 2129 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d", 2130 cap.max_nb_queues); 2131 return -EINVAL; 2132 } 2133 if (dev->data->dev_started) 2134 return -EBUSY; 2135 eth_dev_txq_release(dev, tx_queue_id); 2136 ret = (*dev->dev_ops->tx_hairpin_queue_setup) 2137 (dev, tx_queue_id, nb_tx_desc, conf); 2138 if (ret == 0) 2139 dev->data->tx_queue_state[tx_queue_id] = 2140 RTE_ETH_QUEUE_STATE_HAIRPIN; 2141 return eth_err(port_id, ret); 2142 } 2143 2144 int 2145 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port) 2146 { 2147 struct rte_eth_dev *dev; 2148 int ret; 2149 2150 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2151 dev = &rte_eth_devices[tx_port]; 2152 2153 if (dev->data->dev_started == 0) { 2154 RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port); 2155 return -EBUSY; 2156 } 2157 2158 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_bind, -ENOTSUP); 2159 ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port); 2160 if (ret != 0) 2161 RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d" 2162 " to Rx %d (%d - all ports)\n", 2163 tx_port, rx_port, RTE_MAX_ETHPORTS); 2164 2165 return ret; 2166 } 2167 2168 int 2169 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port) 2170 { 2171 struct rte_eth_dev *dev; 2172 int ret; 2173 2174 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2175 dev = &rte_eth_devices[tx_port]; 2176 2177 if (dev->data->dev_started == 0) { 2178 RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port); 2179 return -EBUSY; 2180 } 2181 2182 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_unbind, -ENOTSUP); 2183 ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port); 2184 if (ret != 0) 2185 RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d" 2186 " from Rx %d (%d - all ports)\n", 2187 tx_port, rx_port, RTE_MAX_ETHPORTS); 2188 2189 return ret; 2190 } 2191 2192 int 2193 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, 2194 size_t len, uint32_t direction) 2195 { 2196 struct rte_eth_dev *dev; 2197 int ret; 2198 2199 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2200 dev = &rte_eth_devices[port_id]; 2201 2202 if (peer_ports == NULL) { 2203 RTE_ETHDEV_LOG(ERR, 2204 "Cannot get ethdev port %u hairpin peer ports to NULL\n", 2205 port_id); 2206 return -EINVAL; 2207 } 2208 2209 if (len == 0) { 2210 RTE_ETHDEV_LOG(ERR, 2211 "Cannot get ethdev port %u hairpin peer ports to array with zero size\n", 2212 port_id); 2213 return -EINVAL; 2214 } 2215 2216 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_get_peer_ports, 2217 -ENOTSUP); 2218 2219 ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports, 2220 len, direction); 2221 if (ret < 0) 2222 RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n", 2223 port_id, direction ? "Rx" : "Tx"); 2224 2225 return ret; 2226 } 2227 2228 void 2229 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, 2230 void *userdata __rte_unused) 2231 { 2232 rte_pktmbuf_free_bulk(pkts, unsent); 2233 } 2234 2235 void 2236 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, 2237 void *userdata) 2238 { 2239 uint64_t *count = userdata; 2240 2241 rte_pktmbuf_free_bulk(pkts, unsent); 2242 *count += unsent; 2243 } 2244 2245 int 2246 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, 2247 buffer_tx_error_fn cbfn, void *userdata) 2248 { 2249 if (buffer == NULL) { 2250 RTE_ETHDEV_LOG(ERR, 2251 "Cannot set Tx buffer error callback to NULL buffer\n"); 2252 return -EINVAL; 2253 } 2254 2255 buffer->error_callback = cbfn; 2256 buffer->error_userdata = userdata; 2257 return 0; 2258 } 2259 2260 int 2261 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size) 2262 { 2263 int ret = 0; 2264 2265 if (buffer == NULL) { 2266 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL buffer\n"); 2267 return -EINVAL; 2268 } 2269 2270 buffer->size = size; 2271 if (buffer->error_callback == NULL) { 2272 ret = rte_eth_tx_buffer_set_err_callback( 2273 buffer, rte_eth_tx_buffer_drop_callback, NULL); 2274 } 2275 2276 return ret; 2277 } 2278 2279 int 2280 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt) 2281 { 2282 struct rte_eth_dev *dev; 2283 int ret; 2284 2285 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2286 dev = &rte_eth_devices[port_id]; 2287 2288 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP); 2289 2290 /* Call driver to free pending mbufs. */ 2291 ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id], 2292 free_cnt); 2293 return eth_err(port_id, ret); 2294 } 2295 2296 int 2297 rte_eth_promiscuous_enable(uint16_t port_id) 2298 { 2299 struct rte_eth_dev *dev; 2300 int diag = 0; 2301 2302 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2303 dev = &rte_eth_devices[port_id]; 2304 2305 if (dev->data->promiscuous == 1) 2306 return 0; 2307 2308 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP); 2309 2310 diag = (*dev->dev_ops->promiscuous_enable)(dev); 2311 dev->data->promiscuous = (diag == 0) ? 1 : 0; 2312 2313 return eth_err(port_id, diag); 2314 } 2315 2316 int 2317 rte_eth_promiscuous_disable(uint16_t port_id) 2318 { 2319 struct rte_eth_dev *dev; 2320 int diag = 0; 2321 2322 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2323 dev = &rte_eth_devices[port_id]; 2324 2325 if (dev->data->promiscuous == 0) 2326 return 0; 2327 2328 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP); 2329 2330 dev->data->promiscuous = 0; 2331 diag = (*dev->dev_ops->promiscuous_disable)(dev); 2332 if (diag != 0) 2333 dev->data->promiscuous = 1; 2334 2335 return eth_err(port_id, diag); 2336 } 2337 2338 int 2339 rte_eth_promiscuous_get(uint16_t port_id) 2340 { 2341 struct rte_eth_dev *dev; 2342 2343 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2344 dev = &rte_eth_devices[port_id]; 2345 2346 return dev->data->promiscuous; 2347 } 2348 2349 int 2350 rte_eth_allmulticast_enable(uint16_t port_id) 2351 { 2352 struct rte_eth_dev *dev; 2353 int diag; 2354 2355 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2356 dev = &rte_eth_devices[port_id]; 2357 2358 if (dev->data->all_multicast == 1) 2359 return 0; 2360 2361 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP); 2362 diag = (*dev->dev_ops->allmulticast_enable)(dev); 2363 dev->data->all_multicast = (diag == 0) ? 1 : 0; 2364 2365 return eth_err(port_id, diag); 2366 } 2367 2368 int 2369 rte_eth_allmulticast_disable(uint16_t port_id) 2370 { 2371 struct rte_eth_dev *dev; 2372 int diag; 2373 2374 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2375 dev = &rte_eth_devices[port_id]; 2376 2377 if (dev->data->all_multicast == 0) 2378 return 0; 2379 2380 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP); 2381 dev->data->all_multicast = 0; 2382 diag = (*dev->dev_ops->allmulticast_disable)(dev); 2383 if (diag != 0) 2384 dev->data->all_multicast = 1; 2385 2386 return eth_err(port_id, diag); 2387 } 2388 2389 int 2390 rte_eth_allmulticast_get(uint16_t port_id) 2391 { 2392 struct rte_eth_dev *dev; 2393 2394 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2395 dev = &rte_eth_devices[port_id]; 2396 2397 return dev->data->all_multicast; 2398 } 2399 2400 int 2401 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link) 2402 { 2403 struct rte_eth_dev *dev; 2404 2405 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2406 dev = &rte_eth_devices[port_id]; 2407 2408 if (eth_link == NULL) { 2409 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2410 port_id); 2411 return -EINVAL; 2412 } 2413 2414 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2415 rte_eth_linkstatus_get(dev, eth_link); 2416 else { 2417 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 2418 (*dev->dev_ops->link_update)(dev, 1); 2419 *eth_link = dev->data->dev_link; 2420 } 2421 2422 return 0; 2423 } 2424 2425 int 2426 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link) 2427 { 2428 struct rte_eth_dev *dev; 2429 2430 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2431 dev = &rte_eth_devices[port_id]; 2432 2433 if (eth_link == NULL) { 2434 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2435 port_id); 2436 return -EINVAL; 2437 } 2438 2439 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2440 rte_eth_linkstatus_get(dev, eth_link); 2441 else { 2442 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 2443 (*dev->dev_ops->link_update)(dev, 0); 2444 *eth_link = dev->data->dev_link; 2445 } 2446 2447 return 0; 2448 } 2449 2450 const char * 2451 rte_eth_link_speed_to_str(uint32_t link_speed) 2452 { 2453 switch (link_speed) { 2454 case RTE_ETH_SPEED_NUM_NONE: return "None"; 2455 case RTE_ETH_SPEED_NUM_10M: return "10 Mbps"; 2456 case RTE_ETH_SPEED_NUM_100M: return "100 Mbps"; 2457 case RTE_ETH_SPEED_NUM_1G: return "1 Gbps"; 2458 case RTE_ETH_SPEED_NUM_2_5G: return "2.5 Gbps"; 2459 case RTE_ETH_SPEED_NUM_5G: return "5 Gbps"; 2460 case RTE_ETH_SPEED_NUM_10G: return "10 Gbps"; 2461 case RTE_ETH_SPEED_NUM_20G: return "20 Gbps"; 2462 case RTE_ETH_SPEED_NUM_25G: return "25 Gbps"; 2463 case RTE_ETH_SPEED_NUM_40G: return "40 Gbps"; 2464 case RTE_ETH_SPEED_NUM_50G: return "50 Gbps"; 2465 case RTE_ETH_SPEED_NUM_56G: return "56 Gbps"; 2466 case RTE_ETH_SPEED_NUM_100G: return "100 Gbps"; 2467 case RTE_ETH_SPEED_NUM_200G: return "200 Gbps"; 2468 case RTE_ETH_SPEED_NUM_UNKNOWN: return "Unknown"; 2469 default: return "Invalid"; 2470 } 2471 } 2472 2473 int 2474 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link) 2475 { 2476 if (str == NULL) { 2477 RTE_ETHDEV_LOG(ERR, "Cannot convert link to NULL string\n"); 2478 return -EINVAL; 2479 } 2480 2481 if (len == 0) { 2482 RTE_ETHDEV_LOG(ERR, 2483 "Cannot convert link to string with zero size\n"); 2484 return -EINVAL; 2485 } 2486 2487 if (eth_link == NULL) { 2488 RTE_ETHDEV_LOG(ERR, "Cannot convert to string from NULL link\n"); 2489 return -EINVAL; 2490 } 2491 2492 if (eth_link->link_status == RTE_ETH_LINK_DOWN) 2493 return snprintf(str, len, "Link down"); 2494 else 2495 return snprintf(str, len, "Link up at %s %s %s", 2496 rte_eth_link_speed_to_str(eth_link->link_speed), 2497 (eth_link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 2498 "FDX" : "HDX", 2499 (eth_link->link_autoneg == RTE_ETH_LINK_AUTONEG) ? 2500 "Autoneg" : "Fixed"); 2501 } 2502 2503 int 2504 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats) 2505 { 2506 struct rte_eth_dev *dev; 2507 2508 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2509 dev = &rte_eth_devices[port_id]; 2510 2511 if (stats == NULL) { 2512 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u stats to NULL\n", 2513 port_id); 2514 return -EINVAL; 2515 } 2516 2517 memset(stats, 0, sizeof(*stats)); 2518 2519 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP); 2520 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed; 2521 return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats)); 2522 } 2523 2524 int 2525 rte_eth_stats_reset(uint16_t port_id) 2526 { 2527 struct rte_eth_dev *dev; 2528 int ret; 2529 2530 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2531 dev = &rte_eth_devices[port_id]; 2532 2533 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP); 2534 ret = (*dev->dev_ops->stats_reset)(dev); 2535 if (ret != 0) 2536 return eth_err(port_id, ret); 2537 2538 dev->data->rx_mbuf_alloc_failed = 0; 2539 2540 return 0; 2541 } 2542 2543 static inline int 2544 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev) 2545 { 2546 uint16_t nb_rxqs, nb_txqs; 2547 int count; 2548 2549 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2550 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2551 2552 count = RTE_NB_STATS; 2553 if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) { 2554 count += nb_rxqs * RTE_NB_RXQ_STATS; 2555 count += nb_txqs * RTE_NB_TXQ_STATS; 2556 } 2557 2558 return count; 2559 } 2560 2561 static int 2562 eth_dev_get_xstats_count(uint16_t port_id) 2563 { 2564 struct rte_eth_dev *dev; 2565 int count; 2566 2567 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2568 dev = &rte_eth_devices[port_id]; 2569 if (dev->dev_ops->xstats_get_names != NULL) { 2570 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0); 2571 if (count < 0) 2572 return eth_err(port_id, count); 2573 } else 2574 count = 0; 2575 2576 2577 count += eth_dev_get_xstats_basic_count(dev); 2578 2579 return count; 2580 } 2581 2582 int 2583 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, 2584 uint64_t *id) 2585 { 2586 int cnt_xstats, idx_xstat; 2587 2588 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2589 2590 if (xstat_name == NULL) { 2591 RTE_ETHDEV_LOG(ERR, 2592 "Cannot get ethdev port %u xstats ID from NULL xstat name\n", 2593 port_id); 2594 return -ENOMEM; 2595 } 2596 2597 if (id == NULL) { 2598 RTE_ETHDEV_LOG(ERR, 2599 "Cannot get ethdev port %u xstats ID to NULL\n", 2600 port_id); 2601 return -ENOMEM; 2602 } 2603 2604 /* Get count */ 2605 cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL); 2606 if (cnt_xstats < 0) { 2607 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n"); 2608 return -ENODEV; 2609 } 2610 2611 /* Get id-name lookup table */ 2612 struct rte_eth_xstat_name xstats_names[cnt_xstats]; 2613 2614 if (cnt_xstats != rte_eth_xstats_get_names_by_id( 2615 port_id, xstats_names, cnt_xstats, NULL)) { 2616 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n"); 2617 return -1; 2618 } 2619 2620 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 2621 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) { 2622 *id = idx_xstat; 2623 return 0; 2624 }; 2625 } 2626 2627 return -EINVAL; 2628 } 2629 2630 /* retrieve basic stats names */ 2631 static int 2632 eth_basic_stats_get_names(struct rte_eth_dev *dev, 2633 struct rte_eth_xstat_name *xstats_names) 2634 { 2635 int cnt_used_entries = 0; 2636 uint32_t idx, id_queue; 2637 uint16_t num_q; 2638 2639 for (idx = 0; idx < RTE_NB_STATS; idx++) { 2640 strlcpy(xstats_names[cnt_used_entries].name, 2641 eth_dev_stats_strings[idx].name, 2642 sizeof(xstats_names[0].name)); 2643 cnt_used_entries++; 2644 } 2645 2646 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 2647 return cnt_used_entries; 2648 2649 num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2650 for (id_queue = 0; id_queue < num_q; id_queue++) { 2651 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) { 2652 snprintf(xstats_names[cnt_used_entries].name, 2653 sizeof(xstats_names[0].name), 2654 "rx_q%u_%s", 2655 id_queue, eth_dev_rxq_stats_strings[idx].name); 2656 cnt_used_entries++; 2657 } 2658 2659 } 2660 num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2661 for (id_queue = 0; id_queue < num_q; id_queue++) { 2662 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) { 2663 snprintf(xstats_names[cnt_used_entries].name, 2664 sizeof(xstats_names[0].name), 2665 "tx_q%u_%s", 2666 id_queue, eth_dev_txq_stats_strings[idx].name); 2667 cnt_used_entries++; 2668 } 2669 } 2670 return cnt_used_entries; 2671 } 2672 2673 /* retrieve ethdev extended statistics names */ 2674 int 2675 rte_eth_xstats_get_names_by_id(uint16_t port_id, 2676 struct rte_eth_xstat_name *xstats_names, unsigned int size, 2677 uint64_t *ids) 2678 { 2679 struct rte_eth_xstat_name *xstats_names_copy; 2680 unsigned int no_basic_stat_requested = 1; 2681 unsigned int no_ext_stat_requested = 1; 2682 unsigned int expected_entries; 2683 unsigned int basic_count; 2684 struct rte_eth_dev *dev; 2685 unsigned int i; 2686 int ret; 2687 2688 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2689 dev = &rte_eth_devices[port_id]; 2690 2691 basic_count = eth_dev_get_xstats_basic_count(dev); 2692 ret = eth_dev_get_xstats_count(port_id); 2693 if (ret < 0) 2694 return ret; 2695 expected_entries = (unsigned int)ret; 2696 2697 /* Return max number of stats if no ids given */ 2698 if (!ids) { 2699 if (!xstats_names) 2700 return expected_entries; 2701 else if (xstats_names && size < expected_entries) 2702 return expected_entries; 2703 } 2704 2705 if (ids && !xstats_names) 2706 return -EINVAL; 2707 2708 if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) { 2709 uint64_t ids_copy[size]; 2710 2711 for (i = 0; i < size; i++) { 2712 if (ids[i] < basic_count) { 2713 no_basic_stat_requested = 0; 2714 break; 2715 } 2716 2717 /* 2718 * Convert ids to xstats ids that PMD knows. 2719 * ids known by user are basic + extended stats. 2720 */ 2721 ids_copy[i] = ids[i] - basic_count; 2722 } 2723 2724 if (no_basic_stat_requested) 2725 return (*dev->dev_ops->xstats_get_names_by_id)(dev, 2726 ids_copy, xstats_names, size); 2727 } 2728 2729 /* Retrieve all stats */ 2730 if (!ids) { 2731 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names, 2732 expected_entries); 2733 if (num_stats < 0 || num_stats > (int)expected_entries) 2734 return num_stats; 2735 else 2736 return expected_entries; 2737 } 2738 2739 xstats_names_copy = calloc(expected_entries, 2740 sizeof(struct rte_eth_xstat_name)); 2741 2742 if (!xstats_names_copy) { 2743 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n"); 2744 return -ENOMEM; 2745 } 2746 2747 if (ids) { 2748 for (i = 0; i < size; i++) { 2749 if (ids[i] >= basic_count) { 2750 no_ext_stat_requested = 0; 2751 break; 2752 } 2753 } 2754 } 2755 2756 /* Fill xstats_names_copy structure */ 2757 if (ids && no_ext_stat_requested) { 2758 eth_basic_stats_get_names(dev, xstats_names_copy); 2759 } else { 2760 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy, 2761 expected_entries); 2762 if (ret < 0) { 2763 free(xstats_names_copy); 2764 return ret; 2765 } 2766 } 2767 2768 /* Filter stats */ 2769 for (i = 0; i < size; i++) { 2770 if (ids[i] >= expected_entries) { 2771 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 2772 free(xstats_names_copy); 2773 return -1; 2774 } 2775 xstats_names[i] = xstats_names_copy[ids[i]]; 2776 } 2777 2778 free(xstats_names_copy); 2779 return size; 2780 } 2781 2782 int 2783 rte_eth_xstats_get_names(uint16_t port_id, 2784 struct rte_eth_xstat_name *xstats_names, 2785 unsigned int size) 2786 { 2787 struct rte_eth_dev *dev; 2788 int cnt_used_entries; 2789 int cnt_expected_entries; 2790 int cnt_driver_entries; 2791 2792 cnt_expected_entries = eth_dev_get_xstats_count(port_id); 2793 if (xstats_names == NULL || cnt_expected_entries < 0 || 2794 (int)size < cnt_expected_entries) 2795 return cnt_expected_entries; 2796 2797 /* port_id checked in eth_dev_get_xstats_count() */ 2798 dev = &rte_eth_devices[port_id]; 2799 2800 cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names); 2801 2802 if (dev->dev_ops->xstats_get_names != NULL) { 2803 /* If there are any driver-specific xstats, append them 2804 * to end of list. 2805 */ 2806 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)( 2807 dev, 2808 xstats_names + cnt_used_entries, 2809 size - cnt_used_entries); 2810 if (cnt_driver_entries < 0) 2811 return eth_err(port_id, cnt_driver_entries); 2812 cnt_used_entries += cnt_driver_entries; 2813 } 2814 2815 return cnt_used_entries; 2816 } 2817 2818 2819 static int 2820 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats) 2821 { 2822 struct rte_eth_dev *dev; 2823 struct rte_eth_stats eth_stats; 2824 unsigned int count = 0, i, q; 2825 uint64_t val, *stats_ptr; 2826 uint16_t nb_rxqs, nb_txqs; 2827 int ret; 2828 2829 ret = rte_eth_stats_get(port_id, ð_stats); 2830 if (ret < 0) 2831 return ret; 2832 2833 dev = &rte_eth_devices[port_id]; 2834 2835 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2836 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2837 2838 /* global stats */ 2839 for (i = 0; i < RTE_NB_STATS; i++) { 2840 stats_ptr = RTE_PTR_ADD(ð_stats, 2841 eth_dev_stats_strings[i].offset); 2842 val = *stats_ptr; 2843 xstats[count++].value = val; 2844 } 2845 2846 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 2847 return count; 2848 2849 /* per-rxq stats */ 2850 for (q = 0; q < nb_rxqs; q++) { 2851 for (i = 0; i < RTE_NB_RXQ_STATS; i++) { 2852 stats_ptr = RTE_PTR_ADD(ð_stats, 2853 eth_dev_rxq_stats_strings[i].offset + 2854 q * sizeof(uint64_t)); 2855 val = *stats_ptr; 2856 xstats[count++].value = val; 2857 } 2858 } 2859 2860 /* per-txq stats */ 2861 for (q = 0; q < nb_txqs; q++) { 2862 for (i = 0; i < RTE_NB_TXQ_STATS; i++) { 2863 stats_ptr = RTE_PTR_ADD(ð_stats, 2864 eth_dev_txq_stats_strings[i].offset + 2865 q * sizeof(uint64_t)); 2866 val = *stats_ptr; 2867 xstats[count++].value = val; 2868 } 2869 } 2870 return count; 2871 } 2872 2873 /* retrieve ethdev extended statistics */ 2874 int 2875 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, 2876 uint64_t *values, unsigned int size) 2877 { 2878 unsigned int no_basic_stat_requested = 1; 2879 unsigned int no_ext_stat_requested = 1; 2880 unsigned int num_xstats_filled; 2881 unsigned int basic_count; 2882 uint16_t expected_entries; 2883 struct rte_eth_dev *dev; 2884 unsigned int i; 2885 int ret; 2886 2887 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2888 dev = &rte_eth_devices[port_id]; 2889 2890 ret = eth_dev_get_xstats_count(port_id); 2891 if (ret < 0) 2892 return ret; 2893 expected_entries = (uint16_t)ret; 2894 struct rte_eth_xstat xstats[expected_entries]; 2895 basic_count = eth_dev_get_xstats_basic_count(dev); 2896 2897 /* Return max number of stats if no ids given */ 2898 if (!ids) { 2899 if (!values) 2900 return expected_entries; 2901 else if (values && size < expected_entries) 2902 return expected_entries; 2903 } 2904 2905 if (ids && !values) 2906 return -EINVAL; 2907 2908 if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) { 2909 unsigned int basic_count = eth_dev_get_xstats_basic_count(dev); 2910 uint64_t ids_copy[size]; 2911 2912 for (i = 0; i < size; i++) { 2913 if (ids[i] < basic_count) { 2914 no_basic_stat_requested = 0; 2915 break; 2916 } 2917 2918 /* 2919 * Convert ids to xstats ids that PMD knows. 2920 * ids known by user are basic + extended stats. 2921 */ 2922 ids_copy[i] = ids[i] - basic_count; 2923 } 2924 2925 if (no_basic_stat_requested) 2926 return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy, 2927 values, size); 2928 } 2929 2930 if (ids) { 2931 for (i = 0; i < size; i++) { 2932 if (ids[i] >= basic_count) { 2933 no_ext_stat_requested = 0; 2934 break; 2935 } 2936 } 2937 } 2938 2939 /* Fill the xstats structure */ 2940 if (ids && no_ext_stat_requested) 2941 ret = eth_basic_stats_get(port_id, xstats); 2942 else 2943 ret = rte_eth_xstats_get(port_id, xstats, expected_entries); 2944 2945 if (ret < 0) 2946 return ret; 2947 num_xstats_filled = (unsigned int)ret; 2948 2949 /* Return all stats */ 2950 if (!ids) { 2951 for (i = 0; i < num_xstats_filled; i++) 2952 values[i] = xstats[i].value; 2953 return expected_entries; 2954 } 2955 2956 /* Filter stats */ 2957 for (i = 0; i < size; i++) { 2958 if (ids[i] >= expected_entries) { 2959 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 2960 return -1; 2961 } 2962 values[i] = xstats[ids[i]].value; 2963 } 2964 return size; 2965 } 2966 2967 int 2968 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, 2969 unsigned int n) 2970 { 2971 struct rte_eth_dev *dev; 2972 unsigned int count, i; 2973 signed int xcount = 0; 2974 int ret; 2975 2976 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2977 if (xstats == NULL && n > 0) 2978 return -EINVAL; 2979 dev = &rte_eth_devices[port_id]; 2980 2981 count = eth_dev_get_xstats_basic_count(dev); 2982 2983 /* implemented by the driver */ 2984 if (dev->dev_ops->xstats_get != NULL) { 2985 /* Retrieve the xstats from the driver at the end of the 2986 * xstats struct. 2987 */ 2988 xcount = (*dev->dev_ops->xstats_get)(dev, 2989 (n > count) ? xstats + count : NULL, 2990 (n > count) ? n - count : 0); 2991 2992 if (xcount < 0) 2993 return eth_err(port_id, xcount); 2994 } 2995 2996 if (n < count + xcount || xstats == NULL) 2997 return count + xcount; 2998 2999 /* now fill the xstats structure */ 3000 ret = eth_basic_stats_get(port_id, xstats); 3001 if (ret < 0) 3002 return ret; 3003 count = ret; 3004 3005 for (i = 0; i < count; i++) 3006 xstats[i].id = i; 3007 /* add an offset to driver-specific stats */ 3008 for ( ; i < count + xcount; i++) 3009 xstats[i].id += count; 3010 3011 return count + xcount; 3012 } 3013 3014 /* reset ethdev extended statistics */ 3015 int 3016 rte_eth_xstats_reset(uint16_t port_id) 3017 { 3018 struct rte_eth_dev *dev; 3019 3020 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3021 dev = &rte_eth_devices[port_id]; 3022 3023 /* implemented by the driver */ 3024 if (dev->dev_ops->xstats_reset != NULL) 3025 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev)); 3026 3027 /* fallback to default */ 3028 return rte_eth_stats_reset(port_id); 3029 } 3030 3031 static int 3032 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, 3033 uint8_t stat_idx, uint8_t is_rx) 3034 { 3035 struct rte_eth_dev *dev; 3036 3037 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3038 dev = &rte_eth_devices[port_id]; 3039 3040 if (is_rx && (queue_id >= dev->data->nb_rx_queues)) 3041 return -EINVAL; 3042 3043 if (!is_rx && (queue_id >= dev->data->nb_tx_queues)) 3044 return -EINVAL; 3045 3046 if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) 3047 return -EINVAL; 3048 3049 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP); 3050 return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx); 3051 } 3052 3053 int 3054 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, 3055 uint8_t stat_idx) 3056 { 3057 return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3058 tx_queue_id, 3059 stat_idx, STAT_QMAP_TX)); 3060 } 3061 3062 int 3063 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, 3064 uint8_t stat_idx) 3065 { 3066 return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3067 rx_queue_id, 3068 stat_idx, STAT_QMAP_RX)); 3069 } 3070 3071 int 3072 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size) 3073 { 3074 struct rte_eth_dev *dev; 3075 3076 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3077 dev = &rte_eth_devices[port_id]; 3078 3079 if (fw_version == NULL && fw_size > 0) { 3080 RTE_ETHDEV_LOG(ERR, 3081 "Cannot get ethdev port %u FW version to NULL when string size is non zero\n", 3082 port_id); 3083 return -EINVAL; 3084 } 3085 3086 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP); 3087 return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev, 3088 fw_version, fw_size)); 3089 } 3090 3091 int 3092 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) 3093 { 3094 struct rte_eth_dev *dev; 3095 const struct rte_eth_desc_lim lim = { 3096 .nb_max = UINT16_MAX, 3097 .nb_min = 0, 3098 .nb_align = 1, 3099 .nb_seg_max = UINT16_MAX, 3100 .nb_mtu_seg_max = UINT16_MAX, 3101 }; 3102 int diag; 3103 3104 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3105 dev = &rte_eth_devices[port_id]; 3106 3107 if (dev_info == NULL) { 3108 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u info to NULL\n", 3109 port_id); 3110 return -EINVAL; 3111 } 3112 3113 /* 3114 * Init dev_info before port_id check since caller does not have 3115 * return status and does not know if get is successful or not. 3116 */ 3117 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3118 dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 3119 3120 dev_info->rx_desc_lim = lim; 3121 dev_info->tx_desc_lim = lim; 3122 dev_info->device = dev->device; 3123 dev_info->min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN - 3124 RTE_ETHER_CRC_LEN; 3125 dev_info->max_mtu = UINT16_MAX; 3126 3127 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); 3128 diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info); 3129 if (diag != 0) { 3130 /* Cleanup already filled in device information */ 3131 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3132 return eth_err(port_id, diag); 3133 } 3134 3135 /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */ 3136 dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues, 3137 RTE_MAX_QUEUES_PER_PORT); 3138 dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues, 3139 RTE_MAX_QUEUES_PER_PORT); 3140 3141 dev_info->driver_name = dev->device->driver->name; 3142 dev_info->nb_rx_queues = dev->data->nb_rx_queues; 3143 dev_info->nb_tx_queues = dev->data->nb_tx_queues; 3144 3145 dev_info->dev_flags = &dev->data->dev_flags; 3146 3147 return 0; 3148 } 3149 3150 int 3151 rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf) 3152 { 3153 struct rte_eth_dev *dev; 3154 3155 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3156 dev = &rte_eth_devices[port_id]; 3157 3158 if (dev_conf == NULL) { 3159 RTE_ETHDEV_LOG(ERR, 3160 "Cannot get ethdev port %u configuration to NULL\n", 3161 port_id); 3162 return -EINVAL; 3163 } 3164 3165 memcpy(dev_conf, &dev->data->dev_conf, sizeof(struct rte_eth_conf)); 3166 3167 return 0; 3168 } 3169 3170 int 3171 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, 3172 uint32_t *ptypes, int num) 3173 { 3174 int i, j; 3175 struct rte_eth_dev *dev; 3176 const uint32_t *all_ptypes; 3177 3178 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3179 dev = &rte_eth_devices[port_id]; 3180 3181 if (ptypes == NULL && num > 0) { 3182 RTE_ETHDEV_LOG(ERR, 3183 "Cannot get ethdev port %u supported packet types to NULL when array size is non zero\n", 3184 port_id); 3185 return -EINVAL; 3186 } 3187 3188 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0); 3189 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3190 3191 if (!all_ptypes) 3192 return 0; 3193 3194 for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i) 3195 if (all_ptypes[i] & ptype_mask) { 3196 if (j < num) 3197 ptypes[j] = all_ptypes[i]; 3198 j++; 3199 } 3200 3201 return j; 3202 } 3203 3204 int 3205 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, 3206 uint32_t *set_ptypes, unsigned int num) 3207 { 3208 const uint32_t valid_ptype_masks[] = { 3209 RTE_PTYPE_L2_MASK, 3210 RTE_PTYPE_L3_MASK, 3211 RTE_PTYPE_L4_MASK, 3212 RTE_PTYPE_TUNNEL_MASK, 3213 RTE_PTYPE_INNER_L2_MASK, 3214 RTE_PTYPE_INNER_L3_MASK, 3215 RTE_PTYPE_INNER_L4_MASK, 3216 }; 3217 const uint32_t *all_ptypes; 3218 struct rte_eth_dev *dev; 3219 uint32_t unused_mask; 3220 unsigned int i, j; 3221 int ret; 3222 3223 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3224 dev = &rte_eth_devices[port_id]; 3225 3226 if (num > 0 && set_ptypes == NULL) { 3227 RTE_ETHDEV_LOG(ERR, 3228 "Cannot get ethdev port %u set packet types to NULL when array size is non zero\n", 3229 port_id); 3230 return -EINVAL; 3231 } 3232 3233 if (*dev->dev_ops->dev_supported_ptypes_get == NULL || 3234 *dev->dev_ops->dev_ptypes_set == NULL) { 3235 ret = 0; 3236 goto ptype_unknown; 3237 } 3238 3239 if (ptype_mask == 0) { 3240 ret = (*dev->dev_ops->dev_ptypes_set)(dev, 3241 ptype_mask); 3242 goto ptype_unknown; 3243 } 3244 3245 unused_mask = ptype_mask; 3246 for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) { 3247 uint32_t mask = ptype_mask & valid_ptype_masks[i]; 3248 if (mask && mask != valid_ptype_masks[i]) { 3249 ret = -EINVAL; 3250 goto ptype_unknown; 3251 } 3252 unused_mask &= ~valid_ptype_masks[i]; 3253 } 3254 3255 if (unused_mask) { 3256 ret = -EINVAL; 3257 goto ptype_unknown; 3258 } 3259 3260 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3261 if (all_ptypes == NULL) { 3262 ret = 0; 3263 goto ptype_unknown; 3264 } 3265 3266 /* 3267 * Accommodate as many set_ptypes as possible. If the supplied 3268 * set_ptypes array is insufficient fill it partially. 3269 */ 3270 for (i = 0, j = 0; set_ptypes != NULL && 3271 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) { 3272 if (ptype_mask & all_ptypes[i]) { 3273 if (j < num - 1) { 3274 set_ptypes[j] = all_ptypes[i]; 3275 j++; 3276 continue; 3277 } 3278 break; 3279 } 3280 } 3281 3282 if (set_ptypes != NULL && j < num) 3283 set_ptypes[j] = RTE_PTYPE_UNKNOWN; 3284 3285 return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask); 3286 3287 ptype_unknown: 3288 if (num > 0) 3289 set_ptypes[0] = RTE_PTYPE_UNKNOWN; 3290 3291 return ret; 3292 } 3293 3294 int 3295 rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma, 3296 unsigned int num) 3297 { 3298 int32_t ret; 3299 struct rte_eth_dev *dev; 3300 struct rte_eth_dev_info dev_info; 3301 3302 if (ma == NULL) { 3303 RTE_ETHDEV_LOG(ERR, "%s: invalid parameters\n", __func__); 3304 return -EINVAL; 3305 } 3306 3307 /* will check for us that port_id is a valid one */ 3308 ret = rte_eth_dev_info_get(port_id, &dev_info); 3309 if (ret != 0) 3310 return ret; 3311 3312 dev = &rte_eth_devices[port_id]; 3313 num = RTE_MIN(dev_info.max_mac_addrs, num); 3314 memcpy(ma, dev->data->mac_addrs, num * sizeof(ma[0])); 3315 3316 return num; 3317 } 3318 3319 int 3320 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr) 3321 { 3322 struct rte_eth_dev *dev; 3323 3324 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3325 dev = &rte_eth_devices[port_id]; 3326 3327 if (mac_addr == NULL) { 3328 RTE_ETHDEV_LOG(ERR, 3329 "Cannot get ethdev port %u MAC address to NULL\n", 3330 port_id); 3331 return -EINVAL; 3332 } 3333 3334 rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr); 3335 3336 return 0; 3337 } 3338 3339 int 3340 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu) 3341 { 3342 struct rte_eth_dev *dev; 3343 3344 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3345 dev = &rte_eth_devices[port_id]; 3346 3347 if (mtu == NULL) { 3348 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u MTU to NULL\n", 3349 port_id); 3350 return -EINVAL; 3351 } 3352 3353 *mtu = dev->data->mtu; 3354 return 0; 3355 } 3356 3357 int 3358 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu) 3359 { 3360 int ret; 3361 struct rte_eth_dev_info dev_info; 3362 struct rte_eth_dev *dev; 3363 3364 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3365 dev = &rte_eth_devices[port_id]; 3366 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP); 3367 3368 /* 3369 * Check if the device supports dev_infos_get, if it does not 3370 * skip min_mtu/max_mtu validation here as this requires values 3371 * that are populated within the call to rte_eth_dev_info_get() 3372 * which relies on dev->dev_ops->dev_infos_get. 3373 */ 3374 if (*dev->dev_ops->dev_infos_get != NULL) { 3375 ret = rte_eth_dev_info_get(port_id, &dev_info); 3376 if (ret != 0) 3377 return ret; 3378 3379 ret = eth_dev_validate_mtu(port_id, &dev_info, mtu); 3380 if (ret != 0) 3381 return ret; 3382 } 3383 3384 if (dev->data->dev_configured == 0) { 3385 RTE_ETHDEV_LOG(ERR, 3386 "Port %u must be configured before MTU set\n", 3387 port_id); 3388 return -EINVAL; 3389 } 3390 3391 ret = (*dev->dev_ops->mtu_set)(dev, mtu); 3392 if (ret == 0) 3393 dev->data->mtu = mtu; 3394 3395 return eth_err(port_id, ret); 3396 } 3397 3398 int 3399 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on) 3400 { 3401 struct rte_eth_dev *dev; 3402 int ret; 3403 3404 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3405 dev = &rte_eth_devices[port_id]; 3406 3407 if (!(dev->data->dev_conf.rxmode.offloads & 3408 RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) { 3409 RTE_ETHDEV_LOG(ERR, "Port %u: VLAN-filtering disabled\n", 3410 port_id); 3411 return -ENOSYS; 3412 } 3413 3414 if (vlan_id > 4095) { 3415 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n", 3416 port_id, vlan_id); 3417 return -EINVAL; 3418 } 3419 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP); 3420 3421 ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on); 3422 if (ret == 0) { 3423 struct rte_vlan_filter_conf *vfc; 3424 int vidx; 3425 int vbit; 3426 3427 vfc = &dev->data->vlan_filter_conf; 3428 vidx = vlan_id / 64; 3429 vbit = vlan_id % 64; 3430 3431 if (on) 3432 vfc->ids[vidx] |= RTE_BIT64(vbit); 3433 else 3434 vfc->ids[vidx] &= ~RTE_BIT64(vbit); 3435 } 3436 3437 return eth_err(port_id, ret); 3438 } 3439 3440 int 3441 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, 3442 int on) 3443 { 3444 struct rte_eth_dev *dev; 3445 3446 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3447 dev = &rte_eth_devices[port_id]; 3448 3449 if (rx_queue_id >= dev->data->nb_rx_queues) { 3450 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id); 3451 return -EINVAL; 3452 } 3453 3454 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP); 3455 (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on); 3456 3457 return 0; 3458 } 3459 3460 int 3461 rte_eth_dev_set_vlan_ether_type(uint16_t port_id, 3462 enum rte_vlan_type vlan_type, 3463 uint16_t tpid) 3464 { 3465 struct rte_eth_dev *dev; 3466 3467 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3468 dev = &rte_eth_devices[port_id]; 3469 3470 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP); 3471 return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, 3472 tpid)); 3473 } 3474 3475 int 3476 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask) 3477 { 3478 struct rte_eth_dev_info dev_info; 3479 struct rte_eth_dev *dev; 3480 int ret = 0; 3481 int mask = 0; 3482 int cur, org = 0; 3483 uint64_t orig_offloads; 3484 uint64_t dev_offloads; 3485 uint64_t new_offloads; 3486 3487 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3488 dev = &rte_eth_devices[port_id]; 3489 3490 /* save original values in case of failure */ 3491 orig_offloads = dev->data->dev_conf.rxmode.offloads; 3492 dev_offloads = orig_offloads; 3493 3494 /* check which option changed by application */ 3495 cur = !!(offload_mask & RTE_ETH_VLAN_STRIP_OFFLOAD); 3496 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP); 3497 if (cur != org) { 3498 if (cur) 3499 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 3500 else 3501 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 3502 mask |= RTE_ETH_VLAN_STRIP_MASK; 3503 } 3504 3505 cur = !!(offload_mask & RTE_ETH_VLAN_FILTER_OFFLOAD); 3506 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER); 3507 if (cur != org) { 3508 if (cur) 3509 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 3510 else 3511 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 3512 mask |= RTE_ETH_VLAN_FILTER_MASK; 3513 } 3514 3515 cur = !!(offload_mask & RTE_ETH_VLAN_EXTEND_OFFLOAD); 3516 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND); 3517 if (cur != org) { 3518 if (cur) 3519 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 3520 else 3521 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 3522 mask |= RTE_ETH_VLAN_EXTEND_MASK; 3523 } 3524 3525 cur = !!(offload_mask & RTE_ETH_QINQ_STRIP_OFFLOAD); 3526 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP); 3527 if (cur != org) { 3528 if (cur) 3529 dev_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 3530 else 3531 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 3532 mask |= RTE_ETH_QINQ_STRIP_MASK; 3533 } 3534 3535 /*no change*/ 3536 if (mask == 0) 3537 return ret; 3538 3539 ret = rte_eth_dev_info_get(port_id, &dev_info); 3540 if (ret != 0) 3541 return ret; 3542 3543 /* Rx VLAN offloading must be within its device capabilities */ 3544 if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) { 3545 new_offloads = dev_offloads & ~orig_offloads; 3546 RTE_ETHDEV_LOG(ERR, 3547 "Ethdev port_id=%u requested new added VLAN offloads " 3548 "0x%" PRIx64 " must be within Rx offloads capabilities " 3549 "0x%" PRIx64 " in %s()\n", 3550 port_id, new_offloads, dev_info.rx_offload_capa, 3551 __func__); 3552 return -EINVAL; 3553 } 3554 3555 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP); 3556 dev->data->dev_conf.rxmode.offloads = dev_offloads; 3557 ret = (*dev->dev_ops->vlan_offload_set)(dev, mask); 3558 if (ret) { 3559 /* hit an error restore original values */ 3560 dev->data->dev_conf.rxmode.offloads = orig_offloads; 3561 } 3562 3563 return eth_err(port_id, ret); 3564 } 3565 3566 int 3567 rte_eth_dev_get_vlan_offload(uint16_t port_id) 3568 { 3569 struct rte_eth_dev *dev; 3570 uint64_t *dev_offloads; 3571 int ret = 0; 3572 3573 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3574 dev = &rte_eth_devices[port_id]; 3575 dev_offloads = &dev->data->dev_conf.rxmode.offloads; 3576 3577 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 3578 ret |= RTE_ETH_VLAN_STRIP_OFFLOAD; 3579 3580 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) 3581 ret |= RTE_ETH_VLAN_FILTER_OFFLOAD; 3582 3583 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) 3584 ret |= RTE_ETH_VLAN_EXTEND_OFFLOAD; 3585 3586 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) 3587 ret |= RTE_ETH_QINQ_STRIP_OFFLOAD; 3588 3589 return ret; 3590 } 3591 3592 int 3593 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on) 3594 { 3595 struct rte_eth_dev *dev; 3596 3597 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3598 dev = &rte_eth_devices[port_id]; 3599 3600 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP); 3601 return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on)); 3602 } 3603 3604 int 3605 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 3606 { 3607 struct rte_eth_dev *dev; 3608 3609 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3610 dev = &rte_eth_devices[port_id]; 3611 3612 if (fc_conf == NULL) { 3613 RTE_ETHDEV_LOG(ERR, 3614 "Cannot get ethdev port %u flow control config to NULL\n", 3615 port_id); 3616 return -EINVAL; 3617 } 3618 3619 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP); 3620 memset(fc_conf, 0, sizeof(*fc_conf)); 3621 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf)); 3622 } 3623 3624 int 3625 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 3626 { 3627 struct rte_eth_dev *dev; 3628 3629 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3630 dev = &rte_eth_devices[port_id]; 3631 3632 if (fc_conf == NULL) { 3633 RTE_ETHDEV_LOG(ERR, 3634 "Cannot set ethdev port %u flow control from NULL config\n", 3635 port_id); 3636 return -EINVAL; 3637 } 3638 3639 if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) { 3640 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n"); 3641 return -EINVAL; 3642 } 3643 3644 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP); 3645 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf)); 3646 } 3647 3648 int 3649 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, 3650 struct rte_eth_pfc_conf *pfc_conf) 3651 { 3652 struct rte_eth_dev *dev; 3653 3654 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3655 dev = &rte_eth_devices[port_id]; 3656 3657 if (pfc_conf == NULL) { 3658 RTE_ETHDEV_LOG(ERR, 3659 "Cannot set ethdev port %u priority flow control from NULL config\n", 3660 port_id); 3661 return -EINVAL; 3662 } 3663 3664 if (pfc_conf->priority > (RTE_ETH_DCB_NUM_USER_PRIORITIES - 1)) { 3665 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n"); 3666 return -EINVAL; 3667 } 3668 3669 /* High water, low water validation are device specific */ 3670 if (*dev->dev_ops->priority_flow_ctrl_set) 3671 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set) 3672 (dev, pfc_conf)); 3673 return -ENOTSUP; 3674 } 3675 3676 static int 3677 validate_rx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max, 3678 struct rte_eth_pfc_queue_conf *pfc_queue_conf) 3679 { 3680 if ((pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) || 3681 (pfc_queue_conf->mode == RTE_ETH_FC_FULL)) { 3682 if (pfc_queue_conf->rx_pause.tx_qid >= dev_info->nb_tx_queues) { 3683 RTE_ETHDEV_LOG(ERR, 3684 "PFC Tx queue not in range for Rx pause requested:%d configured:%d\n", 3685 pfc_queue_conf->rx_pause.tx_qid, 3686 dev_info->nb_tx_queues); 3687 return -EINVAL; 3688 } 3689 3690 if (pfc_queue_conf->rx_pause.tc >= tc_max) { 3691 RTE_ETHDEV_LOG(ERR, 3692 "PFC TC not in range for Rx pause requested:%d max:%d\n", 3693 pfc_queue_conf->rx_pause.tc, tc_max); 3694 return -EINVAL; 3695 } 3696 } 3697 3698 return 0; 3699 } 3700 3701 static int 3702 validate_tx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max, 3703 struct rte_eth_pfc_queue_conf *pfc_queue_conf) 3704 { 3705 if ((pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) || 3706 (pfc_queue_conf->mode == RTE_ETH_FC_FULL)) { 3707 if (pfc_queue_conf->tx_pause.rx_qid >= dev_info->nb_rx_queues) { 3708 RTE_ETHDEV_LOG(ERR, 3709 "PFC Rx queue not in range for Tx pause requested:%d configured:%d\n", 3710 pfc_queue_conf->tx_pause.rx_qid, 3711 dev_info->nb_rx_queues); 3712 return -EINVAL; 3713 } 3714 3715 if (pfc_queue_conf->tx_pause.tc >= tc_max) { 3716 RTE_ETHDEV_LOG(ERR, 3717 "PFC TC not in range for Tx pause requested:%d max:%d\n", 3718 pfc_queue_conf->tx_pause.tc, tc_max); 3719 return -EINVAL; 3720 } 3721 } 3722 3723 return 0; 3724 } 3725 3726 int 3727 rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id, 3728 struct rte_eth_pfc_queue_info *pfc_queue_info) 3729 { 3730 struct rte_eth_dev *dev; 3731 3732 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3733 dev = &rte_eth_devices[port_id]; 3734 3735 if (pfc_queue_info == NULL) { 3736 RTE_ETHDEV_LOG(ERR, "PFC info param is NULL for port (%u)\n", 3737 port_id); 3738 return -EINVAL; 3739 } 3740 3741 if (*dev->dev_ops->priority_flow_ctrl_queue_info_get) 3742 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_queue_info_get) 3743 (dev, pfc_queue_info)); 3744 return -ENOTSUP; 3745 } 3746 3747 int 3748 rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id, 3749 struct rte_eth_pfc_queue_conf *pfc_queue_conf) 3750 { 3751 struct rte_eth_pfc_queue_info pfc_info; 3752 struct rte_eth_dev_info dev_info; 3753 struct rte_eth_dev *dev; 3754 int ret; 3755 3756 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3757 dev = &rte_eth_devices[port_id]; 3758 3759 if (pfc_queue_conf == NULL) { 3760 RTE_ETHDEV_LOG(ERR, "PFC parameters are NULL for port (%u)\n", 3761 port_id); 3762 return -EINVAL; 3763 } 3764 3765 ret = rte_eth_dev_info_get(port_id, &dev_info); 3766 if (ret != 0) 3767 return ret; 3768 3769 ret = rte_eth_dev_priority_flow_ctrl_queue_info_get(port_id, &pfc_info); 3770 if (ret != 0) 3771 return ret; 3772 3773 if (pfc_info.tc_max == 0) { 3774 RTE_ETHDEV_LOG(ERR, "Ethdev port %u does not support PFC TC values\n", 3775 port_id); 3776 return -ENOTSUP; 3777 } 3778 3779 /* Check requested mode supported or not */ 3780 if (pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE && 3781 pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) { 3782 RTE_ETHDEV_LOG(ERR, "PFC Tx pause unsupported for port (%d)\n", 3783 port_id); 3784 return -EINVAL; 3785 } 3786 3787 if (pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE && 3788 pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) { 3789 RTE_ETHDEV_LOG(ERR, "PFC Rx pause unsupported for port (%d)\n", 3790 port_id); 3791 return -EINVAL; 3792 } 3793 3794 /* Validate Rx pause parameters */ 3795 if (pfc_info.mode_capa == RTE_ETH_FC_FULL || 3796 pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE) { 3797 ret = validate_rx_pause_config(&dev_info, pfc_info.tc_max, 3798 pfc_queue_conf); 3799 if (ret != 0) 3800 return ret; 3801 } 3802 3803 /* Validate Tx pause parameters */ 3804 if (pfc_info.mode_capa == RTE_ETH_FC_FULL || 3805 pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE) { 3806 ret = validate_tx_pause_config(&dev_info, pfc_info.tc_max, 3807 pfc_queue_conf); 3808 if (ret != 0) 3809 return ret; 3810 } 3811 3812 if (*dev->dev_ops->priority_flow_ctrl_queue_config) 3813 return eth_err(port_id, 3814 (*dev->dev_ops->priority_flow_ctrl_queue_config)( 3815 dev, pfc_queue_conf)); 3816 return -ENOTSUP; 3817 } 3818 3819 static int 3820 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf, 3821 uint16_t reta_size) 3822 { 3823 uint16_t i, num; 3824 3825 num = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) / RTE_ETH_RETA_GROUP_SIZE; 3826 for (i = 0; i < num; i++) { 3827 if (reta_conf[i].mask) 3828 return 0; 3829 } 3830 3831 return -EINVAL; 3832 } 3833 3834 static int 3835 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf, 3836 uint16_t reta_size, 3837 uint16_t max_rxq) 3838 { 3839 uint16_t i, idx, shift; 3840 3841 if (max_rxq == 0) { 3842 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n"); 3843 return -EINVAL; 3844 } 3845 3846 for (i = 0; i < reta_size; i++) { 3847 idx = i / RTE_ETH_RETA_GROUP_SIZE; 3848 shift = i % RTE_ETH_RETA_GROUP_SIZE; 3849 if ((reta_conf[idx].mask & RTE_BIT64(shift)) && 3850 (reta_conf[idx].reta[shift] >= max_rxq)) { 3851 RTE_ETHDEV_LOG(ERR, 3852 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n", 3853 idx, shift, 3854 reta_conf[idx].reta[shift], max_rxq); 3855 return -EINVAL; 3856 } 3857 } 3858 3859 return 0; 3860 } 3861 3862 int 3863 rte_eth_dev_rss_reta_update(uint16_t port_id, 3864 struct rte_eth_rss_reta_entry64 *reta_conf, 3865 uint16_t reta_size) 3866 { 3867 enum rte_eth_rx_mq_mode mq_mode; 3868 struct rte_eth_dev *dev; 3869 int ret; 3870 3871 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3872 dev = &rte_eth_devices[port_id]; 3873 3874 if (reta_conf == NULL) { 3875 RTE_ETHDEV_LOG(ERR, 3876 "Cannot update ethdev port %u RSS RETA to NULL\n", 3877 port_id); 3878 return -EINVAL; 3879 } 3880 3881 if (reta_size == 0) { 3882 RTE_ETHDEV_LOG(ERR, 3883 "Cannot update ethdev port %u RSS RETA with zero size\n", 3884 port_id); 3885 return -EINVAL; 3886 } 3887 3888 /* Check mask bits */ 3889 ret = eth_check_reta_mask(reta_conf, reta_size); 3890 if (ret < 0) 3891 return ret; 3892 3893 /* Check entry value */ 3894 ret = eth_check_reta_entry(reta_conf, reta_size, 3895 dev->data->nb_rx_queues); 3896 if (ret < 0) 3897 return ret; 3898 3899 mq_mode = dev->data->dev_conf.rxmode.mq_mode; 3900 if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) { 3901 RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n"); 3902 return -ENOTSUP; 3903 } 3904 3905 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP); 3906 return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf, 3907 reta_size)); 3908 } 3909 3910 int 3911 rte_eth_dev_rss_reta_query(uint16_t port_id, 3912 struct rte_eth_rss_reta_entry64 *reta_conf, 3913 uint16_t reta_size) 3914 { 3915 struct rte_eth_dev *dev; 3916 int ret; 3917 3918 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3919 dev = &rte_eth_devices[port_id]; 3920 3921 if (reta_conf == NULL) { 3922 RTE_ETHDEV_LOG(ERR, 3923 "Cannot query ethdev port %u RSS RETA from NULL config\n", 3924 port_id); 3925 return -EINVAL; 3926 } 3927 3928 /* Check mask bits */ 3929 ret = eth_check_reta_mask(reta_conf, reta_size); 3930 if (ret < 0) 3931 return ret; 3932 3933 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP); 3934 return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf, 3935 reta_size)); 3936 } 3937 3938 int 3939 rte_eth_dev_rss_hash_update(uint16_t port_id, 3940 struct rte_eth_rss_conf *rss_conf) 3941 { 3942 struct rte_eth_dev *dev; 3943 struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, }; 3944 enum rte_eth_rx_mq_mode mq_mode; 3945 int ret; 3946 3947 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3948 dev = &rte_eth_devices[port_id]; 3949 3950 if (rss_conf == NULL) { 3951 RTE_ETHDEV_LOG(ERR, 3952 "Cannot update ethdev port %u RSS hash from NULL config\n", 3953 port_id); 3954 return -EINVAL; 3955 } 3956 3957 ret = rte_eth_dev_info_get(port_id, &dev_info); 3958 if (ret != 0) 3959 return ret; 3960 3961 rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf); 3962 if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) != 3963 dev_info.flow_type_rss_offloads) { 3964 RTE_ETHDEV_LOG(ERR, 3965 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 3966 port_id, rss_conf->rss_hf, 3967 dev_info.flow_type_rss_offloads); 3968 return -EINVAL; 3969 } 3970 3971 mq_mode = dev->data->dev_conf.rxmode.mq_mode; 3972 if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) { 3973 RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n"); 3974 return -ENOTSUP; 3975 } 3976 3977 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP); 3978 return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev, 3979 rss_conf)); 3980 } 3981 3982 int 3983 rte_eth_dev_rss_hash_conf_get(uint16_t port_id, 3984 struct rte_eth_rss_conf *rss_conf) 3985 { 3986 struct rte_eth_dev *dev; 3987 3988 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3989 dev = &rte_eth_devices[port_id]; 3990 3991 if (rss_conf == NULL) { 3992 RTE_ETHDEV_LOG(ERR, 3993 "Cannot get ethdev port %u RSS hash config to NULL\n", 3994 port_id); 3995 return -EINVAL; 3996 } 3997 3998 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP); 3999 return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev, 4000 rss_conf)); 4001 } 4002 4003 int 4004 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, 4005 struct rte_eth_udp_tunnel *udp_tunnel) 4006 { 4007 struct rte_eth_dev *dev; 4008 4009 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4010 dev = &rte_eth_devices[port_id]; 4011 4012 if (udp_tunnel == NULL) { 4013 RTE_ETHDEV_LOG(ERR, 4014 "Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4015 port_id); 4016 return -EINVAL; 4017 } 4018 4019 if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) { 4020 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4021 return -EINVAL; 4022 } 4023 4024 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP); 4025 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev, 4026 udp_tunnel)); 4027 } 4028 4029 int 4030 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, 4031 struct rte_eth_udp_tunnel *udp_tunnel) 4032 { 4033 struct rte_eth_dev *dev; 4034 4035 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4036 dev = &rte_eth_devices[port_id]; 4037 4038 if (udp_tunnel == NULL) { 4039 RTE_ETHDEV_LOG(ERR, 4040 "Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4041 port_id); 4042 return -EINVAL; 4043 } 4044 4045 if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) { 4046 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4047 return -EINVAL; 4048 } 4049 4050 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP); 4051 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev, 4052 udp_tunnel)); 4053 } 4054 4055 int 4056 rte_eth_led_on(uint16_t port_id) 4057 { 4058 struct rte_eth_dev *dev; 4059 4060 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4061 dev = &rte_eth_devices[port_id]; 4062 4063 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP); 4064 return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev)); 4065 } 4066 4067 int 4068 rte_eth_led_off(uint16_t port_id) 4069 { 4070 struct rte_eth_dev *dev; 4071 4072 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4073 dev = &rte_eth_devices[port_id]; 4074 4075 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP); 4076 return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev)); 4077 } 4078 4079 int 4080 rte_eth_fec_get_capability(uint16_t port_id, 4081 struct rte_eth_fec_capa *speed_fec_capa, 4082 unsigned int num) 4083 { 4084 struct rte_eth_dev *dev; 4085 int ret; 4086 4087 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4088 dev = &rte_eth_devices[port_id]; 4089 4090 if (speed_fec_capa == NULL && num > 0) { 4091 RTE_ETHDEV_LOG(ERR, 4092 "Cannot get ethdev port %u FEC capability to NULL when array size is non zero\n", 4093 port_id); 4094 return -EINVAL; 4095 } 4096 4097 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get_capability, -ENOTSUP); 4098 ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num); 4099 4100 return ret; 4101 } 4102 4103 int 4104 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa) 4105 { 4106 struct rte_eth_dev *dev; 4107 4108 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4109 dev = &rte_eth_devices[port_id]; 4110 4111 if (fec_capa == NULL) { 4112 RTE_ETHDEV_LOG(ERR, 4113 "Cannot get ethdev port %u current FEC mode to NULL\n", 4114 port_id); 4115 return -EINVAL; 4116 } 4117 4118 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get, -ENOTSUP); 4119 return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa)); 4120 } 4121 4122 int 4123 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa) 4124 { 4125 struct rte_eth_dev *dev; 4126 4127 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4128 dev = &rte_eth_devices[port_id]; 4129 4130 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_set, -ENOTSUP); 4131 return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa)); 4132 } 4133 4134 /* 4135 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 4136 * an empty spot. 4137 */ 4138 static int 4139 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr) 4140 { 4141 struct rte_eth_dev_info dev_info; 4142 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4143 unsigned i; 4144 int ret; 4145 4146 ret = rte_eth_dev_info_get(port_id, &dev_info); 4147 if (ret != 0) 4148 return -1; 4149 4150 for (i = 0; i < dev_info.max_mac_addrs; i++) 4151 if (memcmp(addr, &dev->data->mac_addrs[i], 4152 RTE_ETHER_ADDR_LEN) == 0) 4153 return i; 4154 4155 return -1; 4156 } 4157 4158 static const struct rte_ether_addr null_mac_addr; 4159 4160 int 4161 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr, 4162 uint32_t pool) 4163 { 4164 struct rte_eth_dev *dev; 4165 int index; 4166 uint64_t pool_mask; 4167 int ret; 4168 4169 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4170 dev = &rte_eth_devices[port_id]; 4171 4172 if (addr == NULL) { 4173 RTE_ETHDEV_LOG(ERR, 4174 "Cannot add ethdev port %u MAC address from NULL address\n", 4175 port_id); 4176 return -EINVAL; 4177 } 4178 4179 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP); 4180 4181 if (rte_is_zero_ether_addr(addr)) { 4182 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 4183 port_id); 4184 return -EINVAL; 4185 } 4186 if (pool >= RTE_ETH_64_POOLS) { 4187 RTE_ETHDEV_LOG(ERR, "Pool ID must be 0-%d\n", RTE_ETH_64_POOLS - 1); 4188 return -EINVAL; 4189 } 4190 4191 index = eth_dev_get_mac_addr_index(port_id, addr); 4192 if (index < 0) { 4193 index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr); 4194 if (index < 0) { 4195 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 4196 port_id); 4197 return -ENOSPC; 4198 } 4199 } else { 4200 pool_mask = dev->data->mac_pool_sel[index]; 4201 4202 /* Check if both MAC address and pool is already there, and do nothing */ 4203 if (pool_mask & RTE_BIT64(pool)) 4204 return 0; 4205 } 4206 4207 /* Update NIC */ 4208 ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool); 4209 4210 if (ret == 0) { 4211 /* Update address in NIC data structure */ 4212 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]); 4213 4214 /* Update pool bitmap in NIC data structure */ 4215 dev->data->mac_pool_sel[index] |= RTE_BIT64(pool); 4216 } 4217 4218 return eth_err(port_id, ret); 4219 } 4220 4221 int 4222 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr) 4223 { 4224 struct rte_eth_dev *dev; 4225 int index; 4226 4227 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4228 dev = &rte_eth_devices[port_id]; 4229 4230 if (addr == NULL) { 4231 RTE_ETHDEV_LOG(ERR, 4232 "Cannot remove ethdev port %u MAC address from NULL address\n", 4233 port_id); 4234 return -EINVAL; 4235 } 4236 4237 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP); 4238 4239 index = eth_dev_get_mac_addr_index(port_id, addr); 4240 if (index == 0) { 4241 RTE_ETHDEV_LOG(ERR, 4242 "Port %u: Cannot remove default MAC address\n", 4243 port_id); 4244 return -EADDRINUSE; 4245 } else if (index < 0) 4246 return 0; /* Do nothing if address wasn't found */ 4247 4248 /* Update NIC */ 4249 (*dev->dev_ops->mac_addr_remove)(dev, index); 4250 4251 /* Update address in NIC data structure */ 4252 rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]); 4253 4254 /* reset pool bitmap */ 4255 dev->data->mac_pool_sel[index] = 0; 4256 4257 return 0; 4258 } 4259 4260 int 4261 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr) 4262 { 4263 struct rte_eth_dev *dev; 4264 int ret; 4265 4266 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4267 dev = &rte_eth_devices[port_id]; 4268 4269 if (addr == NULL) { 4270 RTE_ETHDEV_LOG(ERR, 4271 "Cannot set ethdev port %u default MAC address from NULL address\n", 4272 port_id); 4273 return -EINVAL; 4274 } 4275 4276 if (!rte_is_valid_assigned_ether_addr(addr)) 4277 return -EINVAL; 4278 4279 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP); 4280 4281 ret = (*dev->dev_ops->mac_addr_set)(dev, addr); 4282 if (ret < 0) 4283 return ret; 4284 4285 /* Update default address in NIC data structure */ 4286 rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]); 4287 4288 return 0; 4289 } 4290 4291 4292 /* 4293 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 4294 * an empty spot. 4295 */ 4296 static int 4297 eth_dev_get_hash_mac_addr_index(uint16_t port_id, 4298 const struct rte_ether_addr *addr) 4299 { 4300 struct rte_eth_dev_info dev_info; 4301 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4302 unsigned i; 4303 int ret; 4304 4305 ret = rte_eth_dev_info_get(port_id, &dev_info); 4306 if (ret != 0) 4307 return -1; 4308 4309 if (!dev->data->hash_mac_addrs) 4310 return -1; 4311 4312 for (i = 0; i < dev_info.max_hash_mac_addrs; i++) 4313 if (memcmp(addr, &dev->data->hash_mac_addrs[i], 4314 RTE_ETHER_ADDR_LEN) == 0) 4315 return i; 4316 4317 return -1; 4318 } 4319 4320 int 4321 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, 4322 uint8_t on) 4323 { 4324 int index; 4325 int ret; 4326 struct rte_eth_dev *dev; 4327 4328 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4329 dev = &rte_eth_devices[port_id]; 4330 4331 if (addr == NULL) { 4332 RTE_ETHDEV_LOG(ERR, 4333 "Cannot set ethdev port %u unicast hash table from NULL address\n", 4334 port_id); 4335 return -EINVAL; 4336 } 4337 4338 if (rte_is_zero_ether_addr(addr)) { 4339 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 4340 port_id); 4341 return -EINVAL; 4342 } 4343 4344 index = eth_dev_get_hash_mac_addr_index(port_id, addr); 4345 /* Check if it's already there, and do nothing */ 4346 if ((index >= 0) && on) 4347 return 0; 4348 4349 if (index < 0) { 4350 if (!on) { 4351 RTE_ETHDEV_LOG(ERR, 4352 "Port %u: the MAC address was not set in UTA\n", 4353 port_id); 4354 return -EINVAL; 4355 } 4356 4357 index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr); 4358 if (index < 0) { 4359 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 4360 port_id); 4361 return -ENOSPC; 4362 } 4363 } 4364 4365 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP); 4366 ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on); 4367 if (ret == 0) { 4368 /* Update address in NIC data structure */ 4369 if (on) 4370 rte_ether_addr_copy(addr, 4371 &dev->data->hash_mac_addrs[index]); 4372 else 4373 rte_ether_addr_copy(&null_mac_addr, 4374 &dev->data->hash_mac_addrs[index]); 4375 } 4376 4377 return eth_err(port_id, ret); 4378 } 4379 4380 int 4381 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on) 4382 { 4383 struct rte_eth_dev *dev; 4384 4385 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4386 dev = &rte_eth_devices[port_id]; 4387 4388 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP); 4389 return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev, 4390 on)); 4391 } 4392 4393 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, 4394 uint16_t tx_rate) 4395 { 4396 struct rte_eth_dev *dev; 4397 struct rte_eth_dev_info dev_info; 4398 struct rte_eth_link link; 4399 int ret; 4400 4401 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4402 dev = &rte_eth_devices[port_id]; 4403 4404 ret = rte_eth_dev_info_get(port_id, &dev_info); 4405 if (ret != 0) 4406 return ret; 4407 4408 link = dev->data->dev_link; 4409 4410 if (queue_idx > dev_info.max_tx_queues) { 4411 RTE_ETHDEV_LOG(ERR, 4412 "Set queue rate limit:port %u: invalid queue ID=%u\n", 4413 port_id, queue_idx); 4414 return -EINVAL; 4415 } 4416 4417 if (tx_rate > link.link_speed) { 4418 RTE_ETHDEV_LOG(ERR, 4419 "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n", 4420 tx_rate, link.link_speed); 4421 return -EINVAL; 4422 } 4423 4424 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP); 4425 return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev, 4426 queue_idx, tx_rate)); 4427 } 4428 4429 RTE_INIT(eth_dev_init_fp_ops) 4430 { 4431 uint32_t i; 4432 4433 for (i = 0; i != RTE_DIM(rte_eth_fp_ops); i++) 4434 eth_dev_fp_ops_reset(rte_eth_fp_ops + i); 4435 } 4436 4437 RTE_INIT(eth_dev_init_cb_lists) 4438 { 4439 uint16_t i; 4440 4441 for (i = 0; i < RTE_MAX_ETHPORTS; i++) 4442 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs); 4443 } 4444 4445 int 4446 rte_eth_dev_callback_register(uint16_t port_id, 4447 enum rte_eth_event_type event, 4448 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 4449 { 4450 struct rte_eth_dev *dev; 4451 struct rte_eth_dev_callback *user_cb; 4452 uint16_t next_port; 4453 uint16_t last_port; 4454 4455 if (cb_fn == NULL) { 4456 RTE_ETHDEV_LOG(ERR, 4457 "Cannot register ethdev port %u callback from NULL\n", 4458 port_id); 4459 return -EINVAL; 4460 } 4461 4462 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 4463 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 4464 return -EINVAL; 4465 } 4466 4467 if (port_id == RTE_ETH_ALL) { 4468 next_port = 0; 4469 last_port = RTE_MAX_ETHPORTS - 1; 4470 } else { 4471 next_port = last_port = port_id; 4472 } 4473 4474 rte_spinlock_lock(ð_dev_cb_lock); 4475 4476 do { 4477 dev = &rte_eth_devices[next_port]; 4478 4479 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) { 4480 if (user_cb->cb_fn == cb_fn && 4481 user_cb->cb_arg == cb_arg && 4482 user_cb->event == event) { 4483 break; 4484 } 4485 } 4486 4487 /* create a new callback. */ 4488 if (user_cb == NULL) { 4489 user_cb = rte_zmalloc("INTR_USER_CALLBACK", 4490 sizeof(struct rte_eth_dev_callback), 0); 4491 if (user_cb != NULL) { 4492 user_cb->cb_fn = cb_fn; 4493 user_cb->cb_arg = cb_arg; 4494 user_cb->event = event; 4495 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), 4496 user_cb, next); 4497 } else { 4498 rte_spinlock_unlock(ð_dev_cb_lock); 4499 rte_eth_dev_callback_unregister(port_id, event, 4500 cb_fn, cb_arg); 4501 return -ENOMEM; 4502 } 4503 4504 } 4505 } while (++next_port <= last_port); 4506 4507 rte_spinlock_unlock(ð_dev_cb_lock); 4508 return 0; 4509 } 4510 4511 int 4512 rte_eth_dev_callback_unregister(uint16_t port_id, 4513 enum rte_eth_event_type event, 4514 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 4515 { 4516 int ret; 4517 struct rte_eth_dev *dev; 4518 struct rte_eth_dev_callback *cb, *next; 4519 uint16_t next_port; 4520 uint16_t last_port; 4521 4522 if (cb_fn == NULL) { 4523 RTE_ETHDEV_LOG(ERR, 4524 "Cannot unregister ethdev port %u callback from NULL\n", 4525 port_id); 4526 return -EINVAL; 4527 } 4528 4529 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 4530 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 4531 return -EINVAL; 4532 } 4533 4534 if (port_id == RTE_ETH_ALL) { 4535 next_port = 0; 4536 last_port = RTE_MAX_ETHPORTS - 1; 4537 } else { 4538 next_port = last_port = port_id; 4539 } 4540 4541 rte_spinlock_lock(ð_dev_cb_lock); 4542 4543 do { 4544 dev = &rte_eth_devices[next_port]; 4545 ret = 0; 4546 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; 4547 cb = next) { 4548 4549 next = TAILQ_NEXT(cb, next); 4550 4551 if (cb->cb_fn != cb_fn || cb->event != event || 4552 (cb_arg != (void *)-1 && cb->cb_arg != cb_arg)) 4553 continue; 4554 4555 /* 4556 * if this callback is not executing right now, 4557 * then remove it. 4558 */ 4559 if (cb->active == 0) { 4560 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next); 4561 rte_free(cb); 4562 } else { 4563 ret = -EAGAIN; 4564 } 4565 } 4566 } while (++next_port <= last_port); 4567 4568 rte_spinlock_unlock(ð_dev_cb_lock); 4569 return ret; 4570 } 4571 4572 int 4573 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data) 4574 { 4575 uint32_t vec; 4576 struct rte_eth_dev *dev; 4577 struct rte_intr_handle *intr_handle; 4578 uint16_t qid; 4579 int rc; 4580 4581 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4582 dev = &rte_eth_devices[port_id]; 4583 4584 if (!dev->intr_handle) { 4585 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 4586 return -ENOTSUP; 4587 } 4588 4589 intr_handle = dev->intr_handle; 4590 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 4591 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 4592 return -EPERM; 4593 } 4594 4595 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) { 4596 vec = rte_intr_vec_list_index_get(intr_handle, qid); 4597 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 4598 if (rc && rc != -EEXIST) { 4599 RTE_ETHDEV_LOG(ERR, 4600 "p %u q %u Rx ctl error op %d epfd %d vec %u\n", 4601 port_id, qid, op, epfd, vec); 4602 } 4603 } 4604 4605 return 0; 4606 } 4607 4608 int 4609 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id) 4610 { 4611 struct rte_intr_handle *intr_handle; 4612 struct rte_eth_dev *dev; 4613 unsigned int efd_idx; 4614 uint32_t vec; 4615 int fd; 4616 4617 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 4618 dev = &rte_eth_devices[port_id]; 4619 4620 if (queue_id >= dev->data->nb_rx_queues) { 4621 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 4622 return -1; 4623 } 4624 4625 if (!dev->intr_handle) { 4626 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 4627 return -1; 4628 } 4629 4630 intr_handle = dev->intr_handle; 4631 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 4632 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 4633 return -1; 4634 } 4635 4636 vec = rte_intr_vec_list_index_get(intr_handle, queue_id); 4637 efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ? 4638 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec; 4639 fd = rte_intr_efds_index_get(intr_handle, efd_idx); 4640 4641 return fd; 4642 } 4643 4644 int 4645 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, 4646 int epfd, int op, void *data) 4647 { 4648 uint32_t vec; 4649 struct rte_eth_dev *dev; 4650 struct rte_intr_handle *intr_handle; 4651 int rc; 4652 4653 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4654 dev = &rte_eth_devices[port_id]; 4655 4656 if (queue_id >= dev->data->nb_rx_queues) { 4657 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 4658 return -EINVAL; 4659 } 4660 4661 if (!dev->intr_handle) { 4662 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 4663 return -ENOTSUP; 4664 } 4665 4666 intr_handle = dev->intr_handle; 4667 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 4668 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 4669 return -EPERM; 4670 } 4671 4672 vec = rte_intr_vec_list_index_get(intr_handle, queue_id); 4673 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 4674 if (rc && rc != -EEXIST) { 4675 RTE_ETHDEV_LOG(ERR, 4676 "p %u q %u Rx ctl error op %d epfd %d vec %u\n", 4677 port_id, queue_id, op, epfd, vec); 4678 return rc; 4679 } 4680 4681 return 0; 4682 } 4683 4684 int 4685 rte_eth_dev_rx_intr_enable(uint16_t port_id, 4686 uint16_t queue_id) 4687 { 4688 struct rte_eth_dev *dev; 4689 int ret; 4690 4691 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4692 dev = &rte_eth_devices[port_id]; 4693 4694 ret = eth_dev_validate_rx_queue(dev, queue_id); 4695 if (ret != 0) 4696 return ret; 4697 4698 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP); 4699 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id)); 4700 } 4701 4702 int 4703 rte_eth_dev_rx_intr_disable(uint16_t port_id, 4704 uint16_t queue_id) 4705 { 4706 struct rte_eth_dev *dev; 4707 int ret; 4708 4709 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4710 dev = &rte_eth_devices[port_id]; 4711 4712 ret = eth_dev_validate_rx_queue(dev, queue_id); 4713 if (ret != 0) 4714 return ret; 4715 4716 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP); 4717 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id)); 4718 } 4719 4720 4721 const struct rte_eth_rxtx_callback * 4722 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, 4723 rte_rx_callback_fn fn, void *user_param) 4724 { 4725 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 4726 rte_errno = ENOTSUP; 4727 return NULL; 4728 #endif 4729 struct rte_eth_dev *dev; 4730 4731 /* check input parameters */ 4732 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 4733 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 4734 rte_errno = EINVAL; 4735 return NULL; 4736 } 4737 dev = &rte_eth_devices[port_id]; 4738 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 4739 rte_errno = EINVAL; 4740 return NULL; 4741 } 4742 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 4743 4744 if (cb == NULL) { 4745 rte_errno = ENOMEM; 4746 return NULL; 4747 } 4748 4749 cb->fn.rx = fn; 4750 cb->param = user_param; 4751 4752 rte_spinlock_lock(ð_dev_rx_cb_lock); 4753 /* Add the callbacks in fifo order. */ 4754 struct rte_eth_rxtx_callback *tail = 4755 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 4756 4757 if (!tail) { 4758 /* Stores to cb->fn and cb->param should complete before 4759 * cb is visible to data plane. 4760 */ 4761 __atomic_store_n( 4762 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 4763 cb, __ATOMIC_RELEASE); 4764 4765 } else { 4766 while (tail->next) 4767 tail = tail->next; 4768 /* Stores to cb->fn and cb->param should complete before 4769 * cb is visible to data plane. 4770 */ 4771 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 4772 } 4773 rte_spinlock_unlock(ð_dev_rx_cb_lock); 4774 4775 return cb; 4776 } 4777 4778 const struct rte_eth_rxtx_callback * 4779 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, 4780 rte_rx_callback_fn fn, void *user_param) 4781 { 4782 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 4783 rte_errno = ENOTSUP; 4784 return NULL; 4785 #endif 4786 /* check input parameters */ 4787 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 4788 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 4789 rte_errno = EINVAL; 4790 return NULL; 4791 } 4792 4793 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 4794 4795 if (cb == NULL) { 4796 rte_errno = ENOMEM; 4797 return NULL; 4798 } 4799 4800 cb->fn.rx = fn; 4801 cb->param = user_param; 4802 4803 rte_spinlock_lock(ð_dev_rx_cb_lock); 4804 /* Add the callbacks at first position */ 4805 cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 4806 /* Stores to cb->fn, cb->param and cb->next should complete before 4807 * cb is visible to data plane threads. 4808 */ 4809 __atomic_store_n( 4810 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 4811 cb, __ATOMIC_RELEASE); 4812 rte_spinlock_unlock(ð_dev_rx_cb_lock); 4813 4814 return cb; 4815 } 4816 4817 const struct rte_eth_rxtx_callback * 4818 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, 4819 rte_tx_callback_fn fn, void *user_param) 4820 { 4821 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 4822 rte_errno = ENOTSUP; 4823 return NULL; 4824 #endif 4825 struct rte_eth_dev *dev; 4826 4827 /* check input parameters */ 4828 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 4829 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) { 4830 rte_errno = EINVAL; 4831 return NULL; 4832 } 4833 4834 dev = &rte_eth_devices[port_id]; 4835 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 4836 rte_errno = EINVAL; 4837 return NULL; 4838 } 4839 4840 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 4841 4842 if (cb == NULL) { 4843 rte_errno = ENOMEM; 4844 return NULL; 4845 } 4846 4847 cb->fn.tx = fn; 4848 cb->param = user_param; 4849 4850 rte_spinlock_lock(ð_dev_tx_cb_lock); 4851 /* Add the callbacks in fifo order. */ 4852 struct rte_eth_rxtx_callback *tail = 4853 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id]; 4854 4855 if (!tail) { 4856 /* Stores to cb->fn and cb->param should complete before 4857 * cb is visible to data plane. 4858 */ 4859 __atomic_store_n( 4860 &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id], 4861 cb, __ATOMIC_RELEASE); 4862 4863 } else { 4864 while (tail->next) 4865 tail = tail->next; 4866 /* Stores to cb->fn and cb->param should complete before 4867 * cb is visible to data plane. 4868 */ 4869 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 4870 } 4871 rte_spinlock_unlock(ð_dev_tx_cb_lock); 4872 4873 return cb; 4874 } 4875 4876 int 4877 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, 4878 const struct rte_eth_rxtx_callback *user_cb) 4879 { 4880 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 4881 return -ENOTSUP; 4882 #endif 4883 /* Check input parameters. */ 4884 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4885 if (user_cb == NULL || 4886 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) 4887 return -EINVAL; 4888 4889 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4890 struct rte_eth_rxtx_callback *cb; 4891 struct rte_eth_rxtx_callback **prev_cb; 4892 int ret = -EINVAL; 4893 4894 rte_spinlock_lock(ð_dev_rx_cb_lock); 4895 prev_cb = &dev->post_rx_burst_cbs[queue_id]; 4896 for (; *prev_cb != NULL; prev_cb = &cb->next) { 4897 cb = *prev_cb; 4898 if (cb == user_cb) { 4899 /* Remove the user cb from the callback list. */ 4900 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 4901 ret = 0; 4902 break; 4903 } 4904 } 4905 rte_spinlock_unlock(ð_dev_rx_cb_lock); 4906 4907 return ret; 4908 } 4909 4910 int 4911 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, 4912 const struct rte_eth_rxtx_callback *user_cb) 4913 { 4914 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 4915 return -ENOTSUP; 4916 #endif 4917 /* Check input parameters. */ 4918 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4919 if (user_cb == NULL || 4920 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) 4921 return -EINVAL; 4922 4923 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4924 int ret = -EINVAL; 4925 struct rte_eth_rxtx_callback *cb; 4926 struct rte_eth_rxtx_callback **prev_cb; 4927 4928 rte_spinlock_lock(ð_dev_tx_cb_lock); 4929 prev_cb = &dev->pre_tx_burst_cbs[queue_id]; 4930 for (; *prev_cb != NULL; prev_cb = &cb->next) { 4931 cb = *prev_cb; 4932 if (cb == user_cb) { 4933 /* Remove the user cb from the callback list. */ 4934 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 4935 ret = 0; 4936 break; 4937 } 4938 } 4939 rte_spinlock_unlock(ð_dev_tx_cb_lock); 4940 4941 return ret; 4942 } 4943 4944 int 4945 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, 4946 struct rte_eth_rxq_info *qinfo) 4947 { 4948 struct rte_eth_dev *dev; 4949 4950 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4951 dev = &rte_eth_devices[port_id]; 4952 4953 if (queue_id >= dev->data->nb_rx_queues) { 4954 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 4955 return -EINVAL; 4956 } 4957 4958 if (qinfo == NULL) { 4959 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL\n", 4960 port_id, queue_id); 4961 return -EINVAL; 4962 } 4963 4964 if (dev->data->rx_queues == NULL || 4965 dev->data->rx_queues[queue_id] == NULL) { 4966 RTE_ETHDEV_LOG(ERR, 4967 "Rx queue %"PRIu16" of device with port_id=%" 4968 PRIu16" has not been setup\n", 4969 queue_id, port_id); 4970 return -EINVAL; 4971 } 4972 4973 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 4974 RTE_ETHDEV_LOG(INFO, 4975 "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 4976 queue_id, port_id); 4977 return -EINVAL; 4978 } 4979 4980 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP); 4981 4982 memset(qinfo, 0, sizeof(*qinfo)); 4983 dev->dev_ops->rxq_info_get(dev, queue_id, qinfo); 4984 qinfo->queue_state = dev->data->rx_queue_state[queue_id]; 4985 4986 return 0; 4987 } 4988 4989 int 4990 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, 4991 struct rte_eth_txq_info *qinfo) 4992 { 4993 struct rte_eth_dev *dev; 4994 4995 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4996 dev = &rte_eth_devices[port_id]; 4997 4998 if (queue_id >= dev->data->nb_tx_queues) { 4999 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 5000 return -EINVAL; 5001 } 5002 5003 if (qinfo == NULL) { 5004 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL\n", 5005 port_id, queue_id); 5006 return -EINVAL; 5007 } 5008 5009 if (dev->data->tx_queues == NULL || 5010 dev->data->tx_queues[queue_id] == NULL) { 5011 RTE_ETHDEV_LOG(ERR, 5012 "Tx queue %"PRIu16" of device with port_id=%" 5013 PRIu16" has not been setup\n", 5014 queue_id, port_id); 5015 return -EINVAL; 5016 } 5017 5018 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 5019 RTE_ETHDEV_LOG(INFO, 5020 "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5021 queue_id, port_id); 5022 return -EINVAL; 5023 } 5024 5025 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP); 5026 5027 memset(qinfo, 0, sizeof(*qinfo)); 5028 dev->dev_ops->txq_info_get(dev, queue_id, qinfo); 5029 qinfo->queue_state = dev->data->tx_queue_state[queue_id]; 5030 5031 return 0; 5032 } 5033 5034 int 5035 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5036 struct rte_eth_burst_mode *mode) 5037 { 5038 struct rte_eth_dev *dev; 5039 5040 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5041 dev = &rte_eth_devices[port_id]; 5042 5043 if (queue_id >= dev->data->nb_rx_queues) { 5044 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5045 return -EINVAL; 5046 } 5047 5048 if (mode == NULL) { 5049 RTE_ETHDEV_LOG(ERR, 5050 "Cannot get ethdev port %u Rx queue %u burst mode to NULL\n", 5051 port_id, queue_id); 5052 return -EINVAL; 5053 } 5054 5055 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP); 5056 memset(mode, 0, sizeof(*mode)); 5057 return eth_err(port_id, 5058 dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode)); 5059 } 5060 5061 int 5062 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5063 struct rte_eth_burst_mode *mode) 5064 { 5065 struct rte_eth_dev *dev; 5066 5067 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5068 dev = &rte_eth_devices[port_id]; 5069 5070 if (queue_id >= dev->data->nb_tx_queues) { 5071 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 5072 return -EINVAL; 5073 } 5074 5075 if (mode == NULL) { 5076 RTE_ETHDEV_LOG(ERR, 5077 "Cannot get ethdev port %u Tx queue %u burst mode to NULL\n", 5078 port_id, queue_id); 5079 return -EINVAL; 5080 } 5081 5082 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP); 5083 memset(mode, 0, sizeof(*mode)); 5084 return eth_err(port_id, 5085 dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode)); 5086 } 5087 5088 int 5089 rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id, 5090 struct rte_power_monitor_cond *pmc) 5091 { 5092 struct rte_eth_dev *dev; 5093 5094 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5095 dev = &rte_eth_devices[port_id]; 5096 5097 if (queue_id >= dev->data->nb_rx_queues) { 5098 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5099 return -EINVAL; 5100 } 5101 5102 if (pmc == NULL) { 5103 RTE_ETHDEV_LOG(ERR, 5104 "Cannot get ethdev port %u Rx queue %u power monitor condition to NULL\n", 5105 port_id, queue_id); 5106 return -EINVAL; 5107 } 5108 5109 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_monitor_addr, -ENOTSUP); 5110 return eth_err(port_id, 5111 dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc)); 5112 } 5113 5114 int 5115 rte_eth_dev_set_mc_addr_list(uint16_t port_id, 5116 struct rte_ether_addr *mc_addr_set, 5117 uint32_t nb_mc_addr) 5118 { 5119 struct rte_eth_dev *dev; 5120 5121 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5122 dev = &rte_eth_devices[port_id]; 5123 5124 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP); 5125 return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev, 5126 mc_addr_set, nb_mc_addr)); 5127 } 5128 5129 int 5130 rte_eth_timesync_enable(uint16_t port_id) 5131 { 5132 struct rte_eth_dev *dev; 5133 5134 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5135 dev = &rte_eth_devices[port_id]; 5136 5137 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP); 5138 return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev)); 5139 } 5140 5141 int 5142 rte_eth_timesync_disable(uint16_t port_id) 5143 { 5144 struct rte_eth_dev *dev; 5145 5146 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5147 dev = &rte_eth_devices[port_id]; 5148 5149 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP); 5150 return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev)); 5151 } 5152 5153 int 5154 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, 5155 uint32_t flags) 5156 { 5157 struct rte_eth_dev *dev; 5158 5159 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5160 dev = &rte_eth_devices[port_id]; 5161 5162 if (timestamp == NULL) { 5163 RTE_ETHDEV_LOG(ERR, 5164 "Cannot read ethdev port %u Rx timestamp to NULL\n", 5165 port_id); 5166 return -EINVAL; 5167 } 5168 5169 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP); 5170 return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp) 5171 (dev, timestamp, flags)); 5172 } 5173 5174 int 5175 rte_eth_timesync_read_tx_timestamp(uint16_t port_id, 5176 struct timespec *timestamp) 5177 { 5178 struct rte_eth_dev *dev; 5179 5180 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5181 dev = &rte_eth_devices[port_id]; 5182 5183 if (timestamp == NULL) { 5184 RTE_ETHDEV_LOG(ERR, 5185 "Cannot read ethdev port %u Tx timestamp to NULL\n", 5186 port_id); 5187 return -EINVAL; 5188 } 5189 5190 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP); 5191 return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp) 5192 (dev, timestamp)); 5193 } 5194 5195 int 5196 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta) 5197 { 5198 struct rte_eth_dev *dev; 5199 5200 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5201 dev = &rte_eth_devices[port_id]; 5202 5203 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP); 5204 return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta)); 5205 } 5206 5207 int 5208 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp) 5209 { 5210 struct rte_eth_dev *dev; 5211 5212 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5213 dev = &rte_eth_devices[port_id]; 5214 5215 if (timestamp == NULL) { 5216 RTE_ETHDEV_LOG(ERR, 5217 "Cannot read ethdev port %u timesync time to NULL\n", 5218 port_id); 5219 return -EINVAL; 5220 } 5221 5222 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP); 5223 return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev, 5224 timestamp)); 5225 } 5226 5227 int 5228 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp) 5229 { 5230 struct rte_eth_dev *dev; 5231 5232 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5233 dev = &rte_eth_devices[port_id]; 5234 5235 if (timestamp == NULL) { 5236 RTE_ETHDEV_LOG(ERR, 5237 "Cannot write ethdev port %u timesync from NULL time\n", 5238 port_id); 5239 return -EINVAL; 5240 } 5241 5242 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP); 5243 return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev, 5244 timestamp)); 5245 } 5246 5247 int 5248 rte_eth_read_clock(uint16_t port_id, uint64_t *clock) 5249 { 5250 struct rte_eth_dev *dev; 5251 5252 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5253 dev = &rte_eth_devices[port_id]; 5254 5255 if (clock == NULL) { 5256 RTE_ETHDEV_LOG(ERR, "Cannot read ethdev port %u clock to NULL\n", 5257 port_id); 5258 return -EINVAL; 5259 } 5260 5261 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP); 5262 return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock)); 5263 } 5264 5265 int 5266 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info) 5267 { 5268 struct rte_eth_dev *dev; 5269 5270 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5271 dev = &rte_eth_devices[port_id]; 5272 5273 if (info == NULL) { 5274 RTE_ETHDEV_LOG(ERR, 5275 "Cannot get ethdev port %u register info to NULL\n", 5276 port_id); 5277 return -EINVAL; 5278 } 5279 5280 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP); 5281 return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info)); 5282 } 5283 5284 int 5285 rte_eth_dev_get_eeprom_length(uint16_t port_id) 5286 { 5287 struct rte_eth_dev *dev; 5288 5289 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5290 dev = &rte_eth_devices[port_id]; 5291 5292 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP); 5293 return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev)); 5294 } 5295 5296 int 5297 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 5298 { 5299 struct rte_eth_dev *dev; 5300 5301 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5302 dev = &rte_eth_devices[port_id]; 5303 5304 if (info == NULL) { 5305 RTE_ETHDEV_LOG(ERR, 5306 "Cannot get ethdev port %u EEPROM info to NULL\n", 5307 port_id); 5308 return -EINVAL; 5309 } 5310 5311 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP); 5312 return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info)); 5313 } 5314 5315 int 5316 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 5317 { 5318 struct rte_eth_dev *dev; 5319 5320 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5321 dev = &rte_eth_devices[port_id]; 5322 5323 if (info == NULL) { 5324 RTE_ETHDEV_LOG(ERR, 5325 "Cannot set ethdev port %u EEPROM from NULL info\n", 5326 port_id); 5327 return -EINVAL; 5328 } 5329 5330 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP); 5331 return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info)); 5332 } 5333 5334 int 5335 rte_eth_dev_get_module_info(uint16_t port_id, 5336 struct rte_eth_dev_module_info *modinfo) 5337 { 5338 struct rte_eth_dev *dev; 5339 5340 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5341 dev = &rte_eth_devices[port_id]; 5342 5343 if (modinfo == NULL) { 5344 RTE_ETHDEV_LOG(ERR, 5345 "Cannot get ethdev port %u EEPROM module info to NULL\n", 5346 port_id); 5347 return -EINVAL; 5348 } 5349 5350 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP); 5351 return (*dev->dev_ops->get_module_info)(dev, modinfo); 5352 } 5353 5354 int 5355 rte_eth_dev_get_module_eeprom(uint16_t port_id, 5356 struct rte_dev_eeprom_info *info) 5357 { 5358 struct rte_eth_dev *dev; 5359 5360 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5361 dev = &rte_eth_devices[port_id]; 5362 5363 if (info == NULL) { 5364 RTE_ETHDEV_LOG(ERR, 5365 "Cannot get ethdev port %u module EEPROM info to NULL\n", 5366 port_id); 5367 return -EINVAL; 5368 } 5369 5370 if (info->data == NULL) { 5371 RTE_ETHDEV_LOG(ERR, 5372 "Cannot get ethdev port %u module EEPROM data to NULL\n", 5373 port_id); 5374 return -EINVAL; 5375 } 5376 5377 if (info->length == 0) { 5378 RTE_ETHDEV_LOG(ERR, 5379 "Cannot get ethdev port %u module EEPROM to data with zero size\n", 5380 port_id); 5381 return -EINVAL; 5382 } 5383 5384 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP); 5385 return (*dev->dev_ops->get_module_eeprom)(dev, info); 5386 } 5387 5388 int 5389 rte_eth_dev_get_dcb_info(uint16_t port_id, 5390 struct rte_eth_dcb_info *dcb_info) 5391 { 5392 struct rte_eth_dev *dev; 5393 5394 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5395 dev = &rte_eth_devices[port_id]; 5396 5397 if (dcb_info == NULL) { 5398 RTE_ETHDEV_LOG(ERR, 5399 "Cannot get ethdev port %u DCB info to NULL\n", 5400 port_id); 5401 return -EINVAL; 5402 } 5403 5404 memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info)); 5405 5406 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP); 5407 return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info)); 5408 } 5409 5410 static void 5411 eth_dev_adjust_nb_desc(uint16_t *nb_desc, 5412 const struct rte_eth_desc_lim *desc_lim) 5413 { 5414 if (desc_lim->nb_align != 0) 5415 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align); 5416 5417 if (desc_lim->nb_max != 0) 5418 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max); 5419 5420 *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min); 5421 } 5422 5423 int 5424 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, 5425 uint16_t *nb_rx_desc, 5426 uint16_t *nb_tx_desc) 5427 { 5428 struct rte_eth_dev_info dev_info; 5429 int ret; 5430 5431 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5432 5433 ret = rte_eth_dev_info_get(port_id, &dev_info); 5434 if (ret != 0) 5435 return ret; 5436 5437 if (nb_rx_desc != NULL) 5438 eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim); 5439 5440 if (nb_tx_desc != NULL) 5441 eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim); 5442 5443 return 0; 5444 } 5445 5446 int 5447 rte_eth_dev_hairpin_capability_get(uint16_t port_id, 5448 struct rte_eth_hairpin_cap *cap) 5449 { 5450 struct rte_eth_dev *dev; 5451 5452 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5453 dev = &rte_eth_devices[port_id]; 5454 5455 if (cap == NULL) { 5456 RTE_ETHDEV_LOG(ERR, 5457 "Cannot get ethdev port %u hairpin capability to NULL\n", 5458 port_id); 5459 return -EINVAL; 5460 } 5461 5462 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP); 5463 memset(cap, 0, sizeof(*cap)); 5464 return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap)); 5465 } 5466 5467 int 5468 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool) 5469 { 5470 struct rte_eth_dev *dev; 5471 5472 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5473 dev = &rte_eth_devices[port_id]; 5474 5475 if (pool == NULL) { 5476 RTE_ETHDEV_LOG(ERR, 5477 "Cannot test ethdev port %u mempool operation from NULL pool\n", 5478 port_id); 5479 return -EINVAL; 5480 } 5481 5482 if (*dev->dev_ops->pool_ops_supported == NULL) 5483 return 1; /* all pools are supported */ 5484 5485 return (*dev->dev_ops->pool_ops_supported)(dev, pool); 5486 } 5487 5488 static int 5489 eth_dev_handle_port_list(const char *cmd __rte_unused, 5490 const char *params __rte_unused, 5491 struct rte_tel_data *d) 5492 { 5493 int port_id; 5494 5495 rte_tel_data_start_array(d, RTE_TEL_INT_VAL); 5496 RTE_ETH_FOREACH_DEV(port_id) 5497 rte_tel_data_add_array_int(d, port_id); 5498 return 0; 5499 } 5500 5501 static void 5502 eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats, 5503 const char *stat_name) 5504 { 5505 int q; 5506 struct rte_tel_data *q_data = rte_tel_data_alloc(); 5507 if (q_data == NULL) 5508 return; 5509 rte_tel_data_start_array(q_data, RTE_TEL_U64_VAL); 5510 for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++) 5511 rte_tel_data_add_array_u64(q_data, q_stats[q]); 5512 rte_tel_data_add_dict_container(d, stat_name, q_data, 0); 5513 } 5514 5515 #define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s) 5516 5517 static int 5518 eth_dev_handle_port_stats(const char *cmd __rte_unused, 5519 const char *params, 5520 struct rte_tel_data *d) 5521 { 5522 struct rte_eth_stats stats; 5523 int port_id, ret; 5524 5525 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 5526 return -1; 5527 5528 port_id = atoi(params); 5529 if (!rte_eth_dev_is_valid_port(port_id)) 5530 return -1; 5531 5532 ret = rte_eth_stats_get(port_id, &stats); 5533 if (ret < 0) 5534 return -1; 5535 5536 rte_tel_data_start_dict(d); 5537 ADD_DICT_STAT(stats, ipackets); 5538 ADD_DICT_STAT(stats, opackets); 5539 ADD_DICT_STAT(stats, ibytes); 5540 ADD_DICT_STAT(stats, obytes); 5541 ADD_DICT_STAT(stats, imissed); 5542 ADD_DICT_STAT(stats, ierrors); 5543 ADD_DICT_STAT(stats, oerrors); 5544 ADD_DICT_STAT(stats, rx_nombuf); 5545 eth_dev_add_port_queue_stats(d, stats.q_ipackets, "q_ipackets"); 5546 eth_dev_add_port_queue_stats(d, stats.q_opackets, "q_opackets"); 5547 eth_dev_add_port_queue_stats(d, stats.q_ibytes, "q_ibytes"); 5548 eth_dev_add_port_queue_stats(d, stats.q_obytes, "q_obytes"); 5549 eth_dev_add_port_queue_stats(d, stats.q_errors, "q_errors"); 5550 5551 return 0; 5552 } 5553 5554 static int 5555 eth_dev_handle_port_xstats(const char *cmd __rte_unused, 5556 const char *params, 5557 struct rte_tel_data *d) 5558 { 5559 struct rte_eth_xstat *eth_xstats; 5560 struct rte_eth_xstat_name *xstat_names; 5561 int port_id, num_xstats; 5562 int i, ret; 5563 char *end_param; 5564 5565 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 5566 return -1; 5567 5568 port_id = strtoul(params, &end_param, 0); 5569 if (*end_param != '\0') 5570 RTE_ETHDEV_LOG(NOTICE, 5571 "Extra parameters passed to ethdev telemetry command, ignoring"); 5572 if (!rte_eth_dev_is_valid_port(port_id)) 5573 return -1; 5574 5575 num_xstats = rte_eth_xstats_get(port_id, NULL, 0); 5576 if (num_xstats < 0) 5577 return -1; 5578 5579 /* use one malloc for both names and stats */ 5580 eth_xstats = malloc((sizeof(struct rte_eth_xstat) + 5581 sizeof(struct rte_eth_xstat_name)) * num_xstats); 5582 if (eth_xstats == NULL) 5583 return -1; 5584 xstat_names = (void *)ð_xstats[num_xstats]; 5585 5586 ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats); 5587 if (ret < 0 || ret > num_xstats) { 5588 free(eth_xstats); 5589 return -1; 5590 } 5591 5592 ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats); 5593 if (ret < 0 || ret > num_xstats) { 5594 free(eth_xstats); 5595 return -1; 5596 } 5597 5598 rte_tel_data_start_dict(d); 5599 for (i = 0; i < num_xstats; i++) 5600 rte_tel_data_add_dict_u64(d, xstat_names[i].name, 5601 eth_xstats[i].value); 5602 free(eth_xstats); 5603 return 0; 5604 } 5605 5606 static int 5607 eth_dev_handle_port_link_status(const char *cmd __rte_unused, 5608 const char *params, 5609 struct rte_tel_data *d) 5610 { 5611 static const char *status_str = "status"; 5612 int ret, port_id; 5613 struct rte_eth_link link; 5614 char *end_param; 5615 5616 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 5617 return -1; 5618 5619 port_id = strtoul(params, &end_param, 0); 5620 if (*end_param != '\0') 5621 RTE_ETHDEV_LOG(NOTICE, 5622 "Extra parameters passed to ethdev telemetry command, ignoring"); 5623 if (!rte_eth_dev_is_valid_port(port_id)) 5624 return -1; 5625 5626 ret = rte_eth_link_get_nowait(port_id, &link); 5627 if (ret < 0) 5628 return -1; 5629 5630 rte_tel_data_start_dict(d); 5631 if (!link.link_status) { 5632 rte_tel_data_add_dict_string(d, status_str, "DOWN"); 5633 return 0; 5634 } 5635 rte_tel_data_add_dict_string(d, status_str, "UP"); 5636 rte_tel_data_add_dict_u64(d, "speed", link.link_speed); 5637 rte_tel_data_add_dict_string(d, "duplex", 5638 (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 5639 "full-duplex" : "half-duplex"); 5640 return 0; 5641 } 5642 5643 static int 5644 eth_dev_handle_port_info(const char *cmd __rte_unused, 5645 const char *params, 5646 struct rte_tel_data *d) 5647 { 5648 struct rte_tel_data *rxq_state, *txq_state; 5649 char mac_addr[RTE_ETHER_ADDR_FMT_SIZE]; 5650 struct rte_eth_dev *eth_dev; 5651 char *end_param; 5652 int port_id, i; 5653 5654 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 5655 return -1; 5656 5657 port_id = strtoul(params, &end_param, 0); 5658 if (*end_param != '\0') 5659 RTE_ETHDEV_LOG(NOTICE, 5660 "Extra parameters passed to ethdev telemetry command, ignoring"); 5661 5662 if (!rte_eth_dev_is_valid_port(port_id)) 5663 return -EINVAL; 5664 5665 eth_dev = &rte_eth_devices[port_id]; 5666 5667 rxq_state = rte_tel_data_alloc(); 5668 if (!rxq_state) 5669 return -ENOMEM; 5670 5671 txq_state = rte_tel_data_alloc(); 5672 if (!txq_state) { 5673 rte_tel_data_free(rxq_state); 5674 return -ENOMEM; 5675 } 5676 5677 rte_tel_data_start_dict(d); 5678 rte_tel_data_add_dict_string(d, "name", eth_dev->data->name); 5679 rte_tel_data_add_dict_int(d, "state", eth_dev->state); 5680 rte_tel_data_add_dict_int(d, "nb_rx_queues", 5681 eth_dev->data->nb_rx_queues); 5682 rte_tel_data_add_dict_int(d, "nb_tx_queues", 5683 eth_dev->data->nb_tx_queues); 5684 rte_tel_data_add_dict_int(d, "port_id", eth_dev->data->port_id); 5685 rte_tel_data_add_dict_int(d, "mtu", eth_dev->data->mtu); 5686 rte_tel_data_add_dict_int(d, "rx_mbuf_size_min", 5687 eth_dev->data->min_rx_buf_size); 5688 rte_tel_data_add_dict_int(d, "rx_mbuf_alloc_fail", 5689 eth_dev->data->rx_mbuf_alloc_failed); 5690 rte_ether_format_addr(mac_addr, sizeof(mac_addr), 5691 eth_dev->data->mac_addrs); 5692 rte_tel_data_add_dict_string(d, "mac_addr", mac_addr); 5693 rte_tel_data_add_dict_int(d, "promiscuous", 5694 eth_dev->data->promiscuous); 5695 rte_tel_data_add_dict_int(d, "scattered_rx", 5696 eth_dev->data->scattered_rx); 5697 rte_tel_data_add_dict_int(d, "all_multicast", 5698 eth_dev->data->all_multicast); 5699 rte_tel_data_add_dict_int(d, "dev_started", eth_dev->data->dev_started); 5700 rte_tel_data_add_dict_int(d, "lro", eth_dev->data->lro); 5701 rte_tel_data_add_dict_int(d, "dev_configured", 5702 eth_dev->data->dev_configured); 5703 5704 rte_tel_data_start_array(rxq_state, RTE_TEL_INT_VAL); 5705 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) 5706 rte_tel_data_add_array_int(rxq_state, 5707 eth_dev->data->rx_queue_state[i]); 5708 5709 rte_tel_data_start_array(txq_state, RTE_TEL_INT_VAL); 5710 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) 5711 rte_tel_data_add_array_int(txq_state, 5712 eth_dev->data->tx_queue_state[i]); 5713 5714 rte_tel_data_add_dict_container(d, "rxq_state", rxq_state, 0); 5715 rte_tel_data_add_dict_container(d, "txq_state", txq_state, 0); 5716 rte_tel_data_add_dict_int(d, "numa_node", eth_dev->data->numa_node); 5717 rte_tel_data_add_dict_int(d, "dev_flags", eth_dev->data->dev_flags); 5718 rte_tel_data_add_dict_int(d, "rx_offloads", 5719 eth_dev->data->dev_conf.rxmode.offloads); 5720 rte_tel_data_add_dict_int(d, "tx_offloads", 5721 eth_dev->data->dev_conf.txmode.offloads); 5722 rte_tel_data_add_dict_int(d, "ethdev_rss_hf", 5723 eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf); 5724 5725 return 0; 5726 } 5727 5728 int 5729 rte_eth_representor_info_get(uint16_t port_id, 5730 struct rte_eth_representor_info *info) 5731 { 5732 struct rte_eth_dev *dev; 5733 5734 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5735 dev = &rte_eth_devices[port_id]; 5736 5737 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->representor_info_get, -ENOTSUP); 5738 return eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info)); 5739 } 5740 5741 int 5742 rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features) 5743 { 5744 struct rte_eth_dev *dev; 5745 5746 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5747 dev = &rte_eth_devices[port_id]; 5748 5749 if (dev->data->dev_configured != 0) { 5750 RTE_ETHDEV_LOG(ERR, 5751 "The port (ID=%"PRIu16") is already configured\n", 5752 port_id); 5753 return -EBUSY; 5754 } 5755 5756 if (features == NULL) { 5757 RTE_ETHDEV_LOG(ERR, "Invalid features (NULL)\n"); 5758 return -EINVAL; 5759 } 5760 5761 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_metadata_negotiate, -ENOTSUP); 5762 return eth_err(port_id, 5763 (*dev->dev_ops->rx_metadata_negotiate)(dev, features)); 5764 } 5765 5766 int 5767 rte_eth_ip_reassembly_capability_get(uint16_t port_id, 5768 struct rte_eth_ip_reassembly_params *reassembly_capa) 5769 { 5770 struct rte_eth_dev *dev; 5771 5772 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5773 dev = &rte_eth_devices[port_id]; 5774 5775 if (dev->data->dev_configured == 0) { 5776 RTE_ETHDEV_LOG(ERR, 5777 "Device with port_id=%u is not configured.\n" 5778 "Cannot get IP reassembly capability\n", 5779 port_id); 5780 return -EINVAL; 5781 } 5782 5783 if (reassembly_capa == NULL) { 5784 RTE_ETHDEV_LOG(ERR, "Cannot get reassembly capability to NULL"); 5785 return -EINVAL; 5786 } 5787 5788 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->ip_reassembly_capability_get, 5789 -ENOTSUP); 5790 memset(reassembly_capa, 0, sizeof(struct rte_eth_ip_reassembly_params)); 5791 5792 return eth_err(port_id, (*dev->dev_ops->ip_reassembly_capability_get) 5793 (dev, reassembly_capa)); 5794 } 5795 5796 int 5797 rte_eth_ip_reassembly_conf_get(uint16_t port_id, 5798 struct rte_eth_ip_reassembly_params *conf) 5799 { 5800 struct rte_eth_dev *dev; 5801 5802 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5803 dev = &rte_eth_devices[port_id]; 5804 5805 if (dev->data->dev_configured == 0) { 5806 RTE_ETHDEV_LOG(ERR, 5807 "Device with port_id=%u is not configured.\n" 5808 "Cannot get IP reassembly configuration\n", 5809 port_id); 5810 return -EINVAL; 5811 } 5812 5813 if (conf == NULL) { 5814 RTE_ETHDEV_LOG(ERR, "Cannot get reassembly info to NULL"); 5815 return -EINVAL; 5816 } 5817 5818 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->ip_reassembly_conf_get, 5819 -ENOTSUP); 5820 memset(conf, 0, sizeof(struct rte_eth_ip_reassembly_params)); 5821 return eth_err(port_id, 5822 (*dev->dev_ops->ip_reassembly_conf_get)(dev, conf)); 5823 } 5824 5825 int 5826 rte_eth_ip_reassembly_conf_set(uint16_t port_id, 5827 const struct rte_eth_ip_reassembly_params *conf) 5828 { 5829 struct rte_eth_dev *dev; 5830 5831 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5832 dev = &rte_eth_devices[port_id]; 5833 5834 if (dev->data->dev_configured == 0) { 5835 RTE_ETHDEV_LOG(ERR, 5836 "Device with port_id=%u is not configured.\n" 5837 "Cannot set IP reassembly configuration", 5838 port_id); 5839 return -EINVAL; 5840 } 5841 5842 if (dev->data->dev_started != 0) { 5843 RTE_ETHDEV_LOG(ERR, 5844 "Device with port_id=%u started,\n" 5845 "cannot configure IP reassembly params.\n", 5846 port_id); 5847 return -EINVAL; 5848 } 5849 5850 if (conf == NULL) { 5851 RTE_ETHDEV_LOG(ERR, 5852 "Invalid IP reassembly configuration (NULL)\n"); 5853 return -EINVAL; 5854 } 5855 5856 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->ip_reassembly_conf_set, 5857 -ENOTSUP); 5858 return eth_err(port_id, 5859 (*dev->dev_ops->ip_reassembly_conf_set)(dev, conf)); 5860 } 5861 5862 int 5863 rte_eth_dev_priv_dump(uint16_t port_id, FILE *file) 5864 { 5865 struct rte_eth_dev *dev; 5866 5867 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5868 dev = &rte_eth_devices[port_id]; 5869 5870 if (file == NULL) { 5871 RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n"); 5872 return -EINVAL; 5873 } 5874 5875 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->eth_dev_priv_dump, -ENOTSUP); 5876 return eth_err(port_id, (*dev->dev_ops->eth_dev_priv_dump)(dev, file)); 5877 } 5878 5879 RTE_LOG_REGISTER_DEFAULT(rte_eth_dev_logtype, INFO); 5880 5881 RTE_INIT(ethdev_init_telemetry) 5882 { 5883 rte_telemetry_register_cmd("/ethdev/list", eth_dev_handle_port_list, 5884 "Returns list of available ethdev ports. Takes no parameters"); 5885 rte_telemetry_register_cmd("/ethdev/stats", eth_dev_handle_port_stats, 5886 "Returns the common stats for a port. Parameters: int port_id"); 5887 rte_telemetry_register_cmd("/ethdev/xstats", eth_dev_handle_port_xstats, 5888 "Returns the extended stats for a port. Parameters: int port_id"); 5889 rte_telemetry_register_cmd("/ethdev/link_status", 5890 eth_dev_handle_port_link_status, 5891 "Returns the link status for a port. Parameters: int port_id"); 5892 rte_telemetry_register_cmd("/ethdev/info", eth_dev_handle_port_info, 5893 "Returns the device info for a port. Parameters: int port_id"); 5894 rte_telemetry_register_cmd("/ethdev/module_eeprom", eth_dev_handle_port_module_eeprom, 5895 "Returns module EEPROM info with SFF specs. Parameters: int port_id"); 5896 } 5897