1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <ctype.h> 6 #include <errno.h> 7 #include <inttypes.h> 8 #include <stdbool.h> 9 #include <stdint.h> 10 #include <stdlib.h> 11 #include <string.h> 12 #include <sys/queue.h> 13 14 #include <rte_byteorder.h> 15 #include <rte_log.h> 16 #include <rte_debug.h> 17 #include <rte_interrupts.h> 18 #include <rte_memory.h> 19 #include <rte_memcpy.h> 20 #include <rte_memzone.h> 21 #include <rte_launch.h> 22 #include <rte_eal.h> 23 #include <rte_per_lcore.h> 24 #include <rte_lcore.h> 25 #include <rte_branch_prediction.h> 26 #include <rte_common.h> 27 #include <rte_mempool.h> 28 #include <rte_malloc.h> 29 #include <rte_mbuf.h> 30 #include <rte_errno.h> 31 #include <rte_spinlock.h> 32 #include <rte_string_fns.h> 33 #include <rte_class.h> 34 #include <rte_ether.h> 35 #include <rte_telemetry.h> 36 37 #include "rte_ethdev_trace.h" 38 #include "rte_ethdev.h" 39 #include "ethdev_driver.h" 40 #include "ethdev_profile.h" 41 #include "ethdev_private.h" 42 #include "sff_telemetry.h" 43 44 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS]; 45 46 /* public fast-path API */ 47 struct rte_eth_fp_ops rte_eth_fp_ops[RTE_MAX_ETHPORTS]; 48 49 /* spinlock for add/remove Rx callbacks */ 50 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER; 51 52 /* spinlock for add/remove Tx callbacks */ 53 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER; 54 55 /* store statistics names and its offset in stats structure */ 56 struct rte_eth_xstats_name_off { 57 char name[RTE_ETH_XSTATS_NAME_SIZE]; 58 unsigned offset; 59 }; 60 61 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = { 62 {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)}, 63 {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)}, 64 {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)}, 65 {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)}, 66 {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)}, 67 {"rx_errors", offsetof(struct rte_eth_stats, ierrors)}, 68 {"tx_errors", offsetof(struct rte_eth_stats, oerrors)}, 69 {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats, 70 rx_nombuf)}, 71 }; 72 73 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings) 74 75 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = { 76 {"packets", offsetof(struct rte_eth_stats, q_ipackets)}, 77 {"bytes", offsetof(struct rte_eth_stats, q_ibytes)}, 78 {"errors", offsetof(struct rte_eth_stats, q_errors)}, 79 }; 80 81 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings) 82 83 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = { 84 {"packets", offsetof(struct rte_eth_stats, q_opackets)}, 85 {"bytes", offsetof(struct rte_eth_stats, q_obytes)}, 86 }; 87 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings) 88 89 #define RTE_RX_OFFLOAD_BIT2STR(_name) \ 90 { RTE_ETH_RX_OFFLOAD_##_name, #_name } 91 92 static const struct { 93 uint64_t offload; 94 const char *name; 95 } eth_dev_rx_offload_names[] = { 96 RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP), 97 RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM), 98 RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM), 99 RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM), 100 RTE_RX_OFFLOAD_BIT2STR(TCP_LRO), 101 RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP), 102 RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 103 RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP), 104 RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT), 105 RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER), 106 RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND), 107 RTE_RX_OFFLOAD_BIT2STR(SCATTER), 108 RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP), 109 RTE_RX_OFFLOAD_BIT2STR(SECURITY), 110 RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC), 111 RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM), 112 RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 113 RTE_RX_OFFLOAD_BIT2STR(RSS_HASH), 114 RTE_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT), 115 }; 116 117 #undef RTE_RX_OFFLOAD_BIT2STR 118 #undef RTE_ETH_RX_OFFLOAD_BIT2STR 119 120 #define RTE_TX_OFFLOAD_BIT2STR(_name) \ 121 { RTE_ETH_TX_OFFLOAD_##_name, #_name } 122 123 static const struct { 124 uint64_t offload; 125 const char *name; 126 } eth_dev_tx_offload_names[] = { 127 RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT), 128 RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM), 129 RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM), 130 RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM), 131 RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM), 132 RTE_TX_OFFLOAD_BIT2STR(TCP_TSO), 133 RTE_TX_OFFLOAD_BIT2STR(UDP_TSO), 134 RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 135 RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT), 136 RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO), 137 RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO), 138 RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO), 139 RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO), 140 RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT), 141 RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE), 142 RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS), 143 RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE), 144 RTE_TX_OFFLOAD_BIT2STR(SECURITY), 145 RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO), 146 RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO), 147 RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 148 RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP), 149 }; 150 151 #undef RTE_TX_OFFLOAD_BIT2STR 152 153 static const struct { 154 uint64_t offload; 155 const char *name; 156 } rte_eth_dev_capa_names[] = { 157 {RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP, "RUNTIME_RX_QUEUE_SETUP"}, 158 {RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP, "RUNTIME_TX_QUEUE_SETUP"}, 159 {RTE_ETH_DEV_CAPA_RXQ_SHARE, "RXQ_SHARE"}, 160 {RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP, "FLOW_RULE_KEEP"}, 161 {RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP, "FLOW_SHARED_OBJECT_KEEP"}, 162 }; 163 164 enum { 165 STAT_QMAP_TX = 0, 166 STAT_QMAP_RX 167 }; 168 169 int 170 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str) 171 { 172 int ret; 173 struct rte_devargs devargs; 174 const char *bus_param_key; 175 char *bus_str = NULL; 176 char *cls_str = NULL; 177 int str_size; 178 179 if (iter == NULL) { 180 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL iterator\n"); 181 return -EINVAL; 182 } 183 184 if (devargs_str == NULL) { 185 RTE_ETHDEV_LOG(ERR, 186 "Cannot initialize iterator from NULL device description string\n"); 187 return -EINVAL; 188 } 189 190 memset(iter, 0, sizeof(*iter)); 191 memset(&devargs, 0, sizeof(devargs)); 192 193 /* 194 * The devargs string may use various syntaxes: 195 * - 0000:08:00.0,representor=[1-3] 196 * - pci:0000:06:00.0,representor=[0,5] 197 * - class=eth,mac=00:11:22:33:44:55 198 * - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z 199 */ 200 201 /* 202 * Handle pure class filter (i.e. without any bus-level argument), 203 * from future new syntax. 204 * rte_devargs_parse() is not yet supporting the new syntax, 205 * that's why this simple case is temporarily parsed here. 206 */ 207 #define iter_anybus_str "class=eth," 208 if (strncmp(devargs_str, iter_anybus_str, 209 strlen(iter_anybus_str)) == 0) { 210 iter->cls_str = devargs_str + strlen(iter_anybus_str); 211 goto end; 212 } 213 214 /* Split bus, device and parameters. */ 215 ret = rte_devargs_parse(&devargs, devargs_str); 216 if (ret != 0) 217 goto error; 218 219 /* 220 * Assume parameters of old syntax can match only at ethdev level. 221 * Extra parameters will be ignored, thanks to "+" prefix. 222 */ 223 str_size = strlen(devargs.args) + 2; 224 cls_str = malloc(str_size); 225 if (cls_str == NULL) { 226 ret = -ENOMEM; 227 goto error; 228 } 229 ret = snprintf(cls_str, str_size, "+%s", devargs.args); 230 if (ret != str_size - 1) { 231 ret = -EINVAL; 232 goto error; 233 } 234 iter->cls_str = cls_str; 235 236 iter->bus = devargs.bus; 237 if (iter->bus->dev_iterate == NULL) { 238 ret = -ENOTSUP; 239 goto error; 240 } 241 242 /* Convert bus args to new syntax for use with new API dev_iterate. */ 243 if ((strcmp(iter->bus->name, "vdev") == 0) || 244 (strcmp(iter->bus->name, "fslmc") == 0) || 245 (strcmp(iter->bus->name, "dpaa_bus") == 0)) { 246 bus_param_key = "name"; 247 } else if (strcmp(iter->bus->name, "pci") == 0) { 248 bus_param_key = "addr"; 249 } else { 250 ret = -ENOTSUP; 251 goto error; 252 } 253 str_size = strlen(bus_param_key) + strlen(devargs.name) + 2; 254 bus_str = malloc(str_size); 255 if (bus_str == NULL) { 256 ret = -ENOMEM; 257 goto error; 258 } 259 ret = snprintf(bus_str, str_size, "%s=%s", 260 bus_param_key, devargs.name); 261 if (ret != str_size - 1) { 262 ret = -EINVAL; 263 goto error; 264 } 265 iter->bus_str = bus_str; 266 267 end: 268 iter->cls = rte_class_find_by_name("eth"); 269 rte_devargs_reset(&devargs); 270 return 0; 271 272 error: 273 if (ret == -ENOTSUP) 274 RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n", 275 iter->bus->name); 276 rte_devargs_reset(&devargs); 277 free(bus_str); 278 free(cls_str); 279 return ret; 280 } 281 282 uint16_t 283 rte_eth_iterator_next(struct rte_dev_iterator *iter) 284 { 285 if (iter == NULL) { 286 RTE_ETHDEV_LOG(ERR, 287 "Cannot get next device from NULL iterator\n"); 288 return RTE_MAX_ETHPORTS; 289 } 290 291 if (iter->cls == NULL) /* invalid ethdev iterator */ 292 return RTE_MAX_ETHPORTS; 293 294 do { /* loop to try all matching rte_device */ 295 /* If not pure ethdev filter and */ 296 if (iter->bus != NULL && 297 /* not in middle of rte_eth_dev iteration, */ 298 iter->class_device == NULL) { 299 /* get next rte_device to try. */ 300 iter->device = iter->bus->dev_iterate( 301 iter->device, iter->bus_str, iter); 302 if (iter->device == NULL) 303 break; /* no more rte_device candidate */ 304 } 305 /* A device is matching bus part, need to check ethdev part. */ 306 iter->class_device = iter->cls->dev_iterate( 307 iter->class_device, iter->cls_str, iter); 308 if (iter->class_device != NULL) 309 return eth_dev_to_id(iter->class_device); /* match */ 310 } while (iter->bus != NULL); /* need to try next rte_device */ 311 312 /* No more ethdev port to iterate. */ 313 rte_eth_iterator_cleanup(iter); 314 return RTE_MAX_ETHPORTS; 315 } 316 317 void 318 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter) 319 { 320 if (iter == NULL) { 321 RTE_ETHDEV_LOG(ERR, "Cannot do clean up from NULL iterator\n"); 322 return; 323 } 324 325 if (iter->bus_str == NULL) 326 return; /* nothing to free in pure class filter */ 327 free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */ 328 free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */ 329 memset(iter, 0, sizeof(*iter)); 330 } 331 332 uint16_t 333 rte_eth_find_next(uint16_t port_id) 334 { 335 while (port_id < RTE_MAX_ETHPORTS && 336 rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED) 337 port_id++; 338 339 if (port_id >= RTE_MAX_ETHPORTS) 340 return RTE_MAX_ETHPORTS; 341 342 return port_id; 343 } 344 345 /* 346 * Macro to iterate over all valid ports for internal usage. 347 * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports. 348 */ 349 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \ 350 for (port_id = rte_eth_find_next(0); \ 351 port_id < RTE_MAX_ETHPORTS; \ 352 port_id = rte_eth_find_next(port_id + 1)) 353 354 uint16_t 355 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent) 356 { 357 port_id = rte_eth_find_next(port_id); 358 while (port_id < RTE_MAX_ETHPORTS && 359 rte_eth_devices[port_id].device != parent) 360 port_id = rte_eth_find_next(port_id + 1); 361 362 return port_id; 363 } 364 365 uint16_t 366 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id) 367 { 368 RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS); 369 return rte_eth_find_next_of(port_id, 370 rte_eth_devices[ref_port_id].device); 371 } 372 373 static bool 374 eth_dev_is_allocated(const struct rte_eth_dev *ethdev) 375 { 376 return ethdev->data->name[0] != '\0'; 377 } 378 379 int 380 rte_eth_dev_is_valid_port(uint16_t port_id) 381 { 382 if (port_id >= RTE_MAX_ETHPORTS || 383 (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)) 384 return 0; 385 else 386 return 1; 387 } 388 389 static int 390 eth_is_valid_owner_id(uint64_t owner_id) 391 { 392 if (owner_id == RTE_ETH_DEV_NO_OWNER || 393 eth_dev_shared_data->next_owner_id <= owner_id) 394 return 0; 395 return 1; 396 } 397 398 uint64_t 399 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id) 400 { 401 port_id = rte_eth_find_next(port_id); 402 while (port_id < RTE_MAX_ETHPORTS && 403 rte_eth_devices[port_id].data->owner.id != owner_id) 404 port_id = rte_eth_find_next(port_id + 1); 405 406 return port_id; 407 } 408 409 int 410 rte_eth_dev_owner_new(uint64_t *owner_id) 411 { 412 if (owner_id == NULL) { 413 RTE_ETHDEV_LOG(ERR, "Cannot get new owner ID to NULL\n"); 414 return -EINVAL; 415 } 416 417 eth_dev_shared_data_prepare(); 418 419 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 420 421 *owner_id = eth_dev_shared_data->next_owner_id++; 422 423 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 424 return 0; 425 } 426 427 static int 428 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id, 429 const struct rte_eth_dev_owner *new_owner) 430 { 431 struct rte_eth_dev *ethdev = &rte_eth_devices[port_id]; 432 struct rte_eth_dev_owner *port_owner; 433 434 if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) { 435 RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n", 436 port_id); 437 return -ENODEV; 438 } 439 440 if (new_owner == NULL) { 441 RTE_ETHDEV_LOG(ERR, 442 "Cannot set ethdev port %u owner from NULL owner\n", 443 port_id); 444 return -EINVAL; 445 } 446 447 if (!eth_is_valid_owner_id(new_owner->id) && 448 !eth_is_valid_owner_id(old_owner_id)) { 449 RTE_ETHDEV_LOG(ERR, 450 "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n", 451 old_owner_id, new_owner->id); 452 return -EINVAL; 453 } 454 455 port_owner = &rte_eth_devices[port_id].data->owner; 456 if (port_owner->id != old_owner_id) { 457 RTE_ETHDEV_LOG(ERR, 458 "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n", 459 port_id, port_owner->name, port_owner->id); 460 return -EPERM; 461 } 462 463 /* can not truncate (same structure) */ 464 strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN); 465 466 port_owner->id = new_owner->id; 467 468 RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n", 469 port_id, new_owner->name, new_owner->id); 470 471 return 0; 472 } 473 474 int 475 rte_eth_dev_owner_set(const uint16_t port_id, 476 const struct rte_eth_dev_owner *owner) 477 { 478 int ret; 479 480 eth_dev_shared_data_prepare(); 481 482 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 483 484 ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner); 485 486 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 487 return ret; 488 } 489 490 int 491 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id) 492 { 493 const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner) 494 {.id = RTE_ETH_DEV_NO_OWNER, .name = ""}; 495 int ret; 496 497 eth_dev_shared_data_prepare(); 498 499 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 500 501 ret = eth_dev_owner_set(port_id, owner_id, &new_owner); 502 503 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 504 return ret; 505 } 506 507 int 508 rte_eth_dev_owner_delete(const uint64_t owner_id) 509 { 510 uint16_t port_id; 511 int ret = 0; 512 513 eth_dev_shared_data_prepare(); 514 515 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 516 517 if (eth_is_valid_owner_id(owner_id)) { 518 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) { 519 struct rte_eth_dev_data *data = 520 rte_eth_devices[port_id].data; 521 if (data != NULL && data->owner.id == owner_id) 522 memset(&data->owner, 0, 523 sizeof(struct rte_eth_dev_owner)); 524 } 525 RTE_ETHDEV_LOG(NOTICE, 526 "All port owners owned by %016"PRIx64" identifier have removed\n", 527 owner_id); 528 } else { 529 RTE_ETHDEV_LOG(ERR, 530 "Invalid owner ID=%016"PRIx64"\n", 531 owner_id); 532 ret = -EINVAL; 533 } 534 535 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 536 537 return ret; 538 } 539 540 int 541 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner) 542 { 543 struct rte_eth_dev *ethdev; 544 545 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 546 ethdev = &rte_eth_devices[port_id]; 547 548 if (!eth_dev_is_allocated(ethdev)) { 549 RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n", 550 port_id); 551 return -ENODEV; 552 } 553 554 if (owner == NULL) { 555 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u owner to NULL\n", 556 port_id); 557 return -EINVAL; 558 } 559 560 eth_dev_shared_data_prepare(); 561 562 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 563 rte_memcpy(owner, ðdev->data->owner, sizeof(*owner)); 564 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 565 566 return 0; 567 } 568 569 int 570 rte_eth_dev_socket_id(uint16_t port_id) 571 { 572 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 573 return rte_eth_devices[port_id].data->numa_node; 574 } 575 576 void * 577 rte_eth_dev_get_sec_ctx(uint16_t port_id) 578 { 579 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL); 580 return rte_eth_devices[port_id].security_ctx; 581 } 582 583 uint16_t 584 rte_eth_dev_count_avail(void) 585 { 586 uint16_t p; 587 uint16_t count; 588 589 count = 0; 590 591 RTE_ETH_FOREACH_DEV(p) 592 count++; 593 594 return count; 595 } 596 597 uint16_t 598 rte_eth_dev_count_total(void) 599 { 600 uint16_t port, count = 0; 601 602 RTE_ETH_FOREACH_VALID_DEV(port) 603 count++; 604 605 return count; 606 } 607 608 int 609 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name) 610 { 611 char *tmp; 612 613 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 614 615 if (name == NULL) { 616 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u name to NULL\n", 617 port_id); 618 return -EINVAL; 619 } 620 621 /* shouldn't check 'rte_eth_devices[i].data', 622 * because it might be overwritten by VDEV PMD */ 623 tmp = eth_dev_shared_data->data[port_id].name; 624 strcpy(name, tmp); 625 return 0; 626 } 627 628 int 629 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id) 630 { 631 uint16_t pid; 632 633 if (name == NULL) { 634 RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name"); 635 return -EINVAL; 636 } 637 638 if (port_id == NULL) { 639 RTE_ETHDEV_LOG(ERR, 640 "Cannot get port ID to NULL for %s\n", name); 641 return -EINVAL; 642 } 643 644 RTE_ETH_FOREACH_VALID_DEV(pid) 645 if (!strcmp(name, eth_dev_shared_data->data[pid].name)) { 646 *port_id = pid; 647 return 0; 648 } 649 650 return -ENODEV; 651 } 652 653 static int 654 eth_err(uint16_t port_id, int ret) 655 { 656 if (ret == 0) 657 return 0; 658 if (rte_eth_dev_is_removed(port_id)) 659 return -EIO; 660 return ret; 661 } 662 663 static int 664 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id) 665 { 666 uint16_t port_id; 667 668 if (rx_queue_id >= dev->data->nb_rx_queues) { 669 port_id = dev->data->port_id; 670 RTE_ETHDEV_LOG(ERR, 671 "Invalid Rx queue_id=%u of device with port_id=%u\n", 672 rx_queue_id, port_id); 673 return -EINVAL; 674 } 675 676 if (dev->data->rx_queues[rx_queue_id] == NULL) { 677 port_id = dev->data->port_id; 678 RTE_ETHDEV_LOG(ERR, 679 "Queue %u of device with port_id=%u has not been setup\n", 680 rx_queue_id, port_id); 681 return -EINVAL; 682 } 683 684 return 0; 685 } 686 687 static int 688 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id) 689 { 690 uint16_t port_id; 691 692 if (tx_queue_id >= dev->data->nb_tx_queues) { 693 port_id = dev->data->port_id; 694 RTE_ETHDEV_LOG(ERR, 695 "Invalid Tx queue_id=%u of device with port_id=%u\n", 696 tx_queue_id, port_id); 697 return -EINVAL; 698 } 699 700 if (dev->data->tx_queues[tx_queue_id] == NULL) { 701 port_id = dev->data->port_id; 702 RTE_ETHDEV_LOG(ERR, 703 "Queue %u of device with port_id=%u has not been setup\n", 704 tx_queue_id, port_id); 705 return -EINVAL; 706 } 707 708 return 0; 709 } 710 711 int 712 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id) 713 { 714 struct rte_eth_dev *dev; 715 int ret; 716 717 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 718 dev = &rte_eth_devices[port_id]; 719 720 if (!dev->data->dev_started) { 721 RTE_ETHDEV_LOG(ERR, 722 "Port %u must be started before start any queue\n", 723 port_id); 724 return -EINVAL; 725 } 726 727 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 728 if (ret != 0) 729 return ret; 730 731 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP); 732 733 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 734 RTE_ETHDEV_LOG(INFO, 735 "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 736 rx_queue_id, port_id); 737 return -EINVAL; 738 } 739 740 if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 741 RTE_ETHDEV_LOG(INFO, 742 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 743 rx_queue_id, port_id); 744 return 0; 745 } 746 747 return eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id)); 748 } 749 750 int 751 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id) 752 { 753 struct rte_eth_dev *dev; 754 int ret; 755 756 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 757 dev = &rte_eth_devices[port_id]; 758 759 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 760 if (ret != 0) 761 return ret; 762 763 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP); 764 765 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 766 RTE_ETHDEV_LOG(INFO, 767 "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 768 rx_queue_id, port_id); 769 return -EINVAL; 770 } 771 772 if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 773 RTE_ETHDEV_LOG(INFO, 774 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 775 rx_queue_id, port_id); 776 return 0; 777 } 778 779 return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id)); 780 } 781 782 int 783 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id) 784 { 785 struct rte_eth_dev *dev; 786 int ret; 787 788 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 789 dev = &rte_eth_devices[port_id]; 790 791 if (!dev->data->dev_started) { 792 RTE_ETHDEV_LOG(ERR, 793 "Port %u must be started before start any queue\n", 794 port_id); 795 return -EINVAL; 796 } 797 798 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 799 if (ret != 0) 800 return ret; 801 802 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP); 803 804 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 805 RTE_ETHDEV_LOG(INFO, 806 "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 807 tx_queue_id, port_id); 808 return -EINVAL; 809 } 810 811 if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 812 RTE_ETHDEV_LOG(INFO, 813 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 814 tx_queue_id, port_id); 815 return 0; 816 } 817 818 return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id)); 819 } 820 821 int 822 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id) 823 { 824 struct rte_eth_dev *dev; 825 int ret; 826 827 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 828 dev = &rte_eth_devices[port_id]; 829 830 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 831 if (ret != 0) 832 return ret; 833 834 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP); 835 836 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 837 RTE_ETHDEV_LOG(INFO, 838 "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 839 tx_queue_id, port_id); 840 return -EINVAL; 841 } 842 843 if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 844 RTE_ETHDEV_LOG(INFO, 845 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 846 tx_queue_id, port_id); 847 return 0; 848 } 849 850 return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id)); 851 } 852 853 uint32_t 854 rte_eth_speed_bitflag(uint32_t speed, int duplex) 855 { 856 switch (speed) { 857 case RTE_ETH_SPEED_NUM_10M: 858 return duplex ? RTE_ETH_LINK_SPEED_10M : RTE_ETH_LINK_SPEED_10M_HD; 859 case RTE_ETH_SPEED_NUM_100M: 860 return duplex ? RTE_ETH_LINK_SPEED_100M : RTE_ETH_LINK_SPEED_100M_HD; 861 case RTE_ETH_SPEED_NUM_1G: 862 return RTE_ETH_LINK_SPEED_1G; 863 case RTE_ETH_SPEED_NUM_2_5G: 864 return RTE_ETH_LINK_SPEED_2_5G; 865 case RTE_ETH_SPEED_NUM_5G: 866 return RTE_ETH_LINK_SPEED_5G; 867 case RTE_ETH_SPEED_NUM_10G: 868 return RTE_ETH_LINK_SPEED_10G; 869 case RTE_ETH_SPEED_NUM_20G: 870 return RTE_ETH_LINK_SPEED_20G; 871 case RTE_ETH_SPEED_NUM_25G: 872 return RTE_ETH_LINK_SPEED_25G; 873 case RTE_ETH_SPEED_NUM_40G: 874 return RTE_ETH_LINK_SPEED_40G; 875 case RTE_ETH_SPEED_NUM_50G: 876 return RTE_ETH_LINK_SPEED_50G; 877 case RTE_ETH_SPEED_NUM_56G: 878 return RTE_ETH_LINK_SPEED_56G; 879 case RTE_ETH_SPEED_NUM_100G: 880 return RTE_ETH_LINK_SPEED_100G; 881 case RTE_ETH_SPEED_NUM_200G: 882 return RTE_ETH_LINK_SPEED_200G; 883 default: 884 return 0; 885 } 886 } 887 888 const char * 889 rte_eth_dev_rx_offload_name(uint64_t offload) 890 { 891 const char *name = "UNKNOWN"; 892 unsigned int i; 893 894 for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) { 895 if (offload == eth_dev_rx_offload_names[i].offload) { 896 name = eth_dev_rx_offload_names[i].name; 897 break; 898 } 899 } 900 901 return name; 902 } 903 904 const char * 905 rte_eth_dev_tx_offload_name(uint64_t offload) 906 { 907 const char *name = "UNKNOWN"; 908 unsigned int i; 909 910 for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) { 911 if (offload == eth_dev_tx_offload_names[i].offload) { 912 name = eth_dev_tx_offload_names[i].name; 913 break; 914 } 915 } 916 917 return name; 918 } 919 920 const char * 921 rte_eth_dev_capability_name(uint64_t capability) 922 { 923 const char *name = "UNKNOWN"; 924 unsigned int i; 925 926 for (i = 0; i < RTE_DIM(rte_eth_dev_capa_names); ++i) { 927 if (capability == rte_eth_dev_capa_names[i].offload) { 928 name = rte_eth_dev_capa_names[i].name; 929 break; 930 } 931 } 932 933 return name; 934 } 935 936 static inline int 937 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size, 938 uint32_t max_rx_pkt_len, uint32_t dev_info_size) 939 { 940 int ret = 0; 941 942 if (dev_info_size == 0) { 943 if (config_size != max_rx_pkt_len) { 944 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size" 945 " %u != %u is not allowed\n", 946 port_id, config_size, max_rx_pkt_len); 947 ret = -EINVAL; 948 } 949 } else if (config_size > dev_info_size) { 950 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 951 "> max allowed value %u\n", port_id, config_size, 952 dev_info_size); 953 ret = -EINVAL; 954 } else if (config_size < RTE_ETHER_MIN_LEN) { 955 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 956 "< min allowed value %u\n", port_id, config_size, 957 (unsigned int)RTE_ETHER_MIN_LEN); 958 ret = -EINVAL; 959 } 960 return ret; 961 } 962 963 /* 964 * Validate offloads that are requested through rte_eth_dev_configure against 965 * the offloads successfully set by the Ethernet device. 966 * 967 * @param port_id 968 * The port identifier of the Ethernet device. 969 * @param req_offloads 970 * The offloads that have been requested through `rte_eth_dev_configure`. 971 * @param set_offloads 972 * The offloads successfully set by the Ethernet device. 973 * @param offload_type 974 * The offload type i.e. Rx/Tx string. 975 * @param offload_name 976 * The function that prints the offload name. 977 * @return 978 * - (0) if validation successful. 979 * - (-EINVAL) if requested offload has been silently disabled. 980 * 981 */ 982 static int 983 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads, 984 uint64_t set_offloads, const char *offload_type, 985 const char *(*offload_name)(uint64_t)) 986 { 987 uint64_t offloads_diff = req_offloads ^ set_offloads; 988 uint64_t offload; 989 int ret = 0; 990 991 while (offloads_diff != 0) { 992 /* Check if any offload is requested but not enabled. */ 993 offload = RTE_BIT64(__builtin_ctzll(offloads_diff)); 994 if (offload & req_offloads) { 995 RTE_ETHDEV_LOG(ERR, 996 "Port %u failed to enable %s offload %s\n", 997 port_id, offload_type, offload_name(offload)); 998 ret = -EINVAL; 999 } 1000 1001 /* Check if offload couldn't be disabled. */ 1002 if (offload & set_offloads) { 1003 RTE_ETHDEV_LOG(DEBUG, 1004 "Port %u %s offload %s is not requested but enabled\n", 1005 port_id, offload_type, offload_name(offload)); 1006 } 1007 1008 offloads_diff &= ~offload; 1009 } 1010 1011 return ret; 1012 } 1013 1014 static uint32_t 1015 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu) 1016 { 1017 uint32_t overhead_len; 1018 1019 if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu) 1020 overhead_len = max_rx_pktlen - max_mtu; 1021 else 1022 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 1023 1024 return overhead_len; 1025 } 1026 1027 /* rte_eth_dev_info_get() should be called prior to this function */ 1028 static int 1029 eth_dev_validate_mtu(uint16_t port_id, struct rte_eth_dev_info *dev_info, 1030 uint16_t mtu) 1031 { 1032 uint32_t overhead_len; 1033 uint32_t frame_size; 1034 1035 if (mtu < dev_info->min_mtu) { 1036 RTE_ETHDEV_LOG(ERR, 1037 "MTU (%u) < device min MTU (%u) for port_id %u\n", 1038 mtu, dev_info->min_mtu, port_id); 1039 return -EINVAL; 1040 } 1041 if (mtu > dev_info->max_mtu) { 1042 RTE_ETHDEV_LOG(ERR, 1043 "MTU (%u) > device max MTU (%u) for port_id %u\n", 1044 mtu, dev_info->max_mtu, port_id); 1045 return -EINVAL; 1046 } 1047 1048 overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen, 1049 dev_info->max_mtu); 1050 frame_size = mtu + overhead_len; 1051 if (frame_size < RTE_ETHER_MIN_LEN) { 1052 RTE_ETHDEV_LOG(ERR, 1053 "Frame size (%u) < min frame size (%u) for port_id %u\n", 1054 frame_size, RTE_ETHER_MIN_LEN, port_id); 1055 return -EINVAL; 1056 } 1057 1058 if (frame_size > dev_info->max_rx_pktlen) { 1059 RTE_ETHDEV_LOG(ERR, 1060 "Frame size (%u) > device max frame size (%u) for port_id %u\n", 1061 frame_size, dev_info->max_rx_pktlen, port_id); 1062 return -EINVAL; 1063 } 1064 1065 return 0; 1066 } 1067 1068 int 1069 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, 1070 const struct rte_eth_conf *dev_conf) 1071 { 1072 struct rte_eth_dev *dev; 1073 struct rte_eth_dev_info dev_info; 1074 struct rte_eth_conf orig_conf; 1075 int diag; 1076 int ret; 1077 uint16_t old_mtu; 1078 1079 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1080 dev = &rte_eth_devices[port_id]; 1081 1082 if (dev_conf == NULL) { 1083 RTE_ETHDEV_LOG(ERR, 1084 "Cannot configure ethdev port %u from NULL config\n", 1085 port_id); 1086 return -EINVAL; 1087 } 1088 1089 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP); 1090 1091 if (dev->data->dev_started) { 1092 RTE_ETHDEV_LOG(ERR, 1093 "Port %u must be stopped to allow configuration\n", 1094 port_id); 1095 return -EBUSY; 1096 } 1097 1098 /* 1099 * Ensure that "dev_configured" is always 0 each time prepare to do 1100 * dev_configure() to avoid any non-anticipated behaviour. 1101 * And set to 1 when dev_configure() is executed successfully. 1102 */ 1103 dev->data->dev_configured = 0; 1104 1105 /* Store original config, as rollback required on failure */ 1106 memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf)); 1107 1108 /* 1109 * Copy the dev_conf parameter into the dev structure. 1110 * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get 1111 */ 1112 if (dev_conf != &dev->data->dev_conf) 1113 memcpy(&dev->data->dev_conf, dev_conf, 1114 sizeof(dev->data->dev_conf)); 1115 1116 /* Backup mtu for rollback */ 1117 old_mtu = dev->data->mtu; 1118 1119 ret = rte_eth_dev_info_get(port_id, &dev_info); 1120 if (ret != 0) 1121 goto rollback; 1122 1123 /* If number of queues specified by application for both Rx and Tx is 1124 * zero, use driver preferred values. This cannot be done individually 1125 * as it is valid for either Tx or Rx (but not both) to be zero. 1126 * If driver does not provide any preferred valued, fall back on 1127 * EAL defaults. 1128 */ 1129 if (nb_rx_q == 0 && nb_tx_q == 0) { 1130 nb_rx_q = dev_info.default_rxportconf.nb_queues; 1131 if (nb_rx_q == 0) 1132 nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES; 1133 nb_tx_q = dev_info.default_txportconf.nb_queues; 1134 if (nb_tx_q == 0) 1135 nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES; 1136 } 1137 1138 if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) { 1139 RTE_ETHDEV_LOG(ERR, 1140 "Number of Rx queues requested (%u) is greater than max supported(%d)\n", 1141 nb_rx_q, RTE_MAX_QUEUES_PER_PORT); 1142 ret = -EINVAL; 1143 goto rollback; 1144 } 1145 1146 if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) { 1147 RTE_ETHDEV_LOG(ERR, 1148 "Number of Tx queues requested (%u) is greater than max supported(%d)\n", 1149 nb_tx_q, RTE_MAX_QUEUES_PER_PORT); 1150 ret = -EINVAL; 1151 goto rollback; 1152 } 1153 1154 /* 1155 * Check that the numbers of Rx and Tx queues are not greater 1156 * than the maximum number of Rx and Tx queues supported by the 1157 * configured device. 1158 */ 1159 if (nb_rx_q > dev_info.max_rx_queues) { 1160 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n", 1161 port_id, nb_rx_q, dev_info.max_rx_queues); 1162 ret = -EINVAL; 1163 goto rollback; 1164 } 1165 1166 if (nb_tx_q > dev_info.max_tx_queues) { 1167 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n", 1168 port_id, nb_tx_q, dev_info.max_tx_queues); 1169 ret = -EINVAL; 1170 goto rollback; 1171 } 1172 1173 /* Check that the device supports requested interrupts */ 1174 if ((dev_conf->intr_conf.lsc == 1) && 1175 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) { 1176 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n", 1177 dev->device->driver->name); 1178 ret = -EINVAL; 1179 goto rollback; 1180 } 1181 if ((dev_conf->intr_conf.rmv == 1) && 1182 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) { 1183 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n", 1184 dev->device->driver->name); 1185 ret = -EINVAL; 1186 goto rollback; 1187 } 1188 1189 if (dev_conf->rxmode.mtu == 0) 1190 dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU; 1191 1192 ret = eth_dev_validate_mtu(port_id, &dev_info, 1193 dev->data->dev_conf.rxmode.mtu); 1194 if (ret != 0) 1195 goto rollback; 1196 1197 dev->data->mtu = dev->data->dev_conf.rxmode.mtu; 1198 1199 /* 1200 * If LRO is enabled, check that the maximum aggregated packet 1201 * size is supported by the configured device. 1202 */ 1203 if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) { 1204 uint32_t max_rx_pktlen; 1205 uint32_t overhead_len; 1206 1207 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 1208 dev_info.max_mtu); 1209 max_rx_pktlen = dev->data->dev_conf.rxmode.mtu + overhead_len; 1210 if (dev_conf->rxmode.max_lro_pkt_size == 0) 1211 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 1212 ret = eth_dev_check_lro_pkt_size(port_id, 1213 dev->data->dev_conf.rxmode.max_lro_pkt_size, 1214 max_rx_pktlen, 1215 dev_info.max_lro_pkt_size); 1216 if (ret != 0) 1217 goto rollback; 1218 } 1219 1220 /* Any requested offloading must be within its device capabilities */ 1221 if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) != 1222 dev_conf->rxmode.offloads) { 1223 RTE_ETHDEV_LOG(ERR, 1224 "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads " 1225 "capabilities 0x%"PRIx64" in %s()\n", 1226 port_id, dev_conf->rxmode.offloads, 1227 dev_info.rx_offload_capa, 1228 __func__); 1229 ret = -EINVAL; 1230 goto rollback; 1231 } 1232 if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) != 1233 dev_conf->txmode.offloads) { 1234 RTE_ETHDEV_LOG(ERR, 1235 "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads " 1236 "capabilities 0x%"PRIx64" in %s()\n", 1237 port_id, dev_conf->txmode.offloads, 1238 dev_info.tx_offload_capa, 1239 __func__); 1240 ret = -EINVAL; 1241 goto rollback; 1242 } 1243 1244 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = 1245 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf); 1246 1247 /* Check that device supports requested rss hash functions. */ 1248 if ((dev_info.flow_type_rss_offloads | 1249 dev_conf->rx_adv_conf.rss_conf.rss_hf) != 1250 dev_info.flow_type_rss_offloads) { 1251 RTE_ETHDEV_LOG(ERR, 1252 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 1253 port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf, 1254 dev_info.flow_type_rss_offloads); 1255 ret = -EINVAL; 1256 goto rollback; 1257 } 1258 1259 /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */ 1260 if (((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0) && 1261 (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) { 1262 RTE_ETHDEV_LOG(ERR, 1263 "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n", 1264 port_id, 1265 rte_eth_dev_rx_offload_name(RTE_ETH_RX_OFFLOAD_RSS_HASH)); 1266 ret = -EINVAL; 1267 goto rollback; 1268 } 1269 1270 /* 1271 * Setup new number of Rx/Tx queues and reconfigure device. 1272 */ 1273 diag = eth_dev_rx_queue_config(dev, nb_rx_q); 1274 if (diag != 0) { 1275 RTE_ETHDEV_LOG(ERR, 1276 "Port%u eth_dev_rx_queue_config = %d\n", 1277 port_id, diag); 1278 ret = diag; 1279 goto rollback; 1280 } 1281 1282 diag = eth_dev_tx_queue_config(dev, nb_tx_q); 1283 if (diag != 0) { 1284 RTE_ETHDEV_LOG(ERR, 1285 "Port%u eth_dev_tx_queue_config = %d\n", 1286 port_id, diag); 1287 eth_dev_rx_queue_config(dev, 0); 1288 ret = diag; 1289 goto rollback; 1290 } 1291 1292 diag = (*dev->dev_ops->dev_configure)(dev); 1293 if (diag != 0) { 1294 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n", 1295 port_id, diag); 1296 ret = eth_err(port_id, diag); 1297 goto reset_queues; 1298 } 1299 1300 /* Initialize Rx profiling if enabled at compilation time. */ 1301 diag = __rte_eth_dev_profile_init(port_id, dev); 1302 if (diag != 0) { 1303 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n", 1304 port_id, diag); 1305 ret = eth_err(port_id, diag); 1306 goto reset_queues; 1307 } 1308 1309 /* Validate Rx offloads. */ 1310 diag = eth_dev_validate_offloads(port_id, 1311 dev_conf->rxmode.offloads, 1312 dev->data->dev_conf.rxmode.offloads, "Rx", 1313 rte_eth_dev_rx_offload_name); 1314 if (diag != 0) { 1315 ret = diag; 1316 goto reset_queues; 1317 } 1318 1319 /* Validate Tx offloads. */ 1320 diag = eth_dev_validate_offloads(port_id, 1321 dev_conf->txmode.offloads, 1322 dev->data->dev_conf.txmode.offloads, "Tx", 1323 rte_eth_dev_tx_offload_name); 1324 if (diag != 0) { 1325 ret = diag; 1326 goto reset_queues; 1327 } 1328 1329 dev->data->dev_configured = 1; 1330 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0); 1331 return 0; 1332 reset_queues: 1333 eth_dev_rx_queue_config(dev, 0); 1334 eth_dev_tx_queue_config(dev, 0); 1335 rollback: 1336 memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf)); 1337 if (old_mtu != dev->data->mtu) 1338 dev->data->mtu = old_mtu; 1339 1340 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret); 1341 return ret; 1342 } 1343 1344 static void 1345 eth_dev_mac_restore(struct rte_eth_dev *dev, 1346 struct rte_eth_dev_info *dev_info) 1347 { 1348 struct rte_ether_addr *addr; 1349 uint16_t i; 1350 uint32_t pool = 0; 1351 uint64_t pool_mask; 1352 1353 /* replay MAC address configuration including default MAC */ 1354 addr = &dev->data->mac_addrs[0]; 1355 if (*dev->dev_ops->mac_addr_set != NULL) 1356 (*dev->dev_ops->mac_addr_set)(dev, addr); 1357 else if (*dev->dev_ops->mac_addr_add != NULL) 1358 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool); 1359 1360 if (*dev->dev_ops->mac_addr_add != NULL) { 1361 for (i = 1; i < dev_info->max_mac_addrs; i++) { 1362 addr = &dev->data->mac_addrs[i]; 1363 1364 /* skip zero address */ 1365 if (rte_is_zero_ether_addr(addr)) 1366 continue; 1367 1368 pool = 0; 1369 pool_mask = dev->data->mac_pool_sel[i]; 1370 1371 do { 1372 if (pool_mask & UINT64_C(1)) 1373 (*dev->dev_ops->mac_addr_add)(dev, 1374 addr, i, pool); 1375 pool_mask >>= 1; 1376 pool++; 1377 } while (pool_mask); 1378 } 1379 } 1380 } 1381 1382 static int 1383 eth_dev_config_restore(struct rte_eth_dev *dev, 1384 struct rte_eth_dev_info *dev_info, uint16_t port_id) 1385 { 1386 int ret; 1387 1388 if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)) 1389 eth_dev_mac_restore(dev, dev_info); 1390 1391 /* replay promiscuous configuration */ 1392 /* 1393 * use callbacks directly since we don't need port_id check and 1394 * would like to bypass the same value set 1395 */ 1396 if (rte_eth_promiscuous_get(port_id) == 1 && 1397 *dev->dev_ops->promiscuous_enable != NULL) { 1398 ret = eth_err(port_id, 1399 (*dev->dev_ops->promiscuous_enable)(dev)); 1400 if (ret != 0 && ret != -ENOTSUP) { 1401 RTE_ETHDEV_LOG(ERR, 1402 "Failed to enable promiscuous mode for device (port %u): %s\n", 1403 port_id, rte_strerror(-ret)); 1404 return ret; 1405 } 1406 } else if (rte_eth_promiscuous_get(port_id) == 0 && 1407 *dev->dev_ops->promiscuous_disable != NULL) { 1408 ret = eth_err(port_id, 1409 (*dev->dev_ops->promiscuous_disable)(dev)); 1410 if (ret != 0 && ret != -ENOTSUP) { 1411 RTE_ETHDEV_LOG(ERR, 1412 "Failed to disable promiscuous mode for device (port %u): %s\n", 1413 port_id, rte_strerror(-ret)); 1414 return ret; 1415 } 1416 } 1417 1418 /* replay all multicast configuration */ 1419 /* 1420 * use callbacks directly since we don't need port_id check and 1421 * would like to bypass the same value set 1422 */ 1423 if (rte_eth_allmulticast_get(port_id) == 1 && 1424 *dev->dev_ops->allmulticast_enable != NULL) { 1425 ret = eth_err(port_id, 1426 (*dev->dev_ops->allmulticast_enable)(dev)); 1427 if (ret != 0 && ret != -ENOTSUP) { 1428 RTE_ETHDEV_LOG(ERR, 1429 "Failed to enable allmulticast mode for device (port %u): %s\n", 1430 port_id, rte_strerror(-ret)); 1431 return ret; 1432 } 1433 } else if (rte_eth_allmulticast_get(port_id) == 0 && 1434 *dev->dev_ops->allmulticast_disable != NULL) { 1435 ret = eth_err(port_id, 1436 (*dev->dev_ops->allmulticast_disable)(dev)); 1437 if (ret != 0 && ret != -ENOTSUP) { 1438 RTE_ETHDEV_LOG(ERR, 1439 "Failed to disable allmulticast mode for device (port %u): %s\n", 1440 port_id, rte_strerror(-ret)); 1441 return ret; 1442 } 1443 } 1444 1445 return 0; 1446 } 1447 1448 int 1449 rte_eth_dev_start(uint16_t port_id) 1450 { 1451 struct rte_eth_dev *dev; 1452 struct rte_eth_dev_info dev_info; 1453 int diag; 1454 int ret, ret_stop; 1455 1456 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1457 dev = &rte_eth_devices[port_id]; 1458 1459 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP); 1460 1461 if (dev->data->dev_configured == 0) { 1462 RTE_ETHDEV_LOG(INFO, 1463 "Device with port_id=%"PRIu16" is not configured.\n", 1464 port_id); 1465 return -EINVAL; 1466 } 1467 1468 if (dev->data->dev_started != 0) { 1469 RTE_ETHDEV_LOG(INFO, 1470 "Device with port_id=%"PRIu16" already started\n", 1471 port_id); 1472 return 0; 1473 } 1474 1475 ret = rte_eth_dev_info_get(port_id, &dev_info); 1476 if (ret != 0) 1477 return ret; 1478 1479 /* Lets restore MAC now if device does not support live change */ 1480 if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR) 1481 eth_dev_mac_restore(dev, &dev_info); 1482 1483 diag = (*dev->dev_ops->dev_start)(dev); 1484 if (diag == 0) 1485 dev->data->dev_started = 1; 1486 else 1487 return eth_err(port_id, diag); 1488 1489 ret = eth_dev_config_restore(dev, &dev_info, port_id); 1490 if (ret != 0) { 1491 RTE_ETHDEV_LOG(ERR, 1492 "Error during restoring configuration for device (port %u): %s\n", 1493 port_id, rte_strerror(-ret)); 1494 ret_stop = rte_eth_dev_stop(port_id); 1495 if (ret_stop != 0) { 1496 RTE_ETHDEV_LOG(ERR, 1497 "Failed to stop device (port %u): %s\n", 1498 port_id, rte_strerror(-ret_stop)); 1499 } 1500 1501 return ret; 1502 } 1503 1504 if (dev->data->dev_conf.intr_conf.lsc == 0) { 1505 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 1506 (*dev->dev_ops->link_update)(dev, 0); 1507 } 1508 1509 /* expose selection of PMD fast-path functions */ 1510 eth_dev_fp_ops_setup(rte_eth_fp_ops + port_id, dev); 1511 1512 rte_ethdev_trace_start(port_id); 1513 return 0; 1514 } 1515 1516 int 1517 rte_eth_dev_stop(uint16_t port_id) 1518 { 1519 struct rte_eth_dev *dev; 1520 int ret; 1521 1522 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1523 dev = &rte_eth_devices[port_id]; 1524 1525 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_stop, -ENOTSUP); 1526 1527 if (dev->data->dev_started == 0) { 1528 RTE_ETHDEV_LOG(INFO, 1529 "Device with port_id=%"PRIu16" already stopped\n", 1530 port_id); 1531 return 0; 1532 } 1533 1534 /* point fast-path functions to dummy ones */ 1535 eth_dev_fp_ops_reset(rte_eth_fp_ops + port_id); 1536 1537 ret = (*dev->dev_ops->dev_stop)(dev); 1538 if (ret == 0) 1539 dev->data->dev_started = 0; 1540 rte_ethdev_trace_stop(port_id, ret); 1541 1542 return ret; 1543 } 1544 1545 int 1546 rte_eth_dev_set_link_up(uint16_t port_id) 1547 { 1548 struct rte_eth_dev *dev; 1549 1550 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1551 dev = &rte_eth_devices[port_id]; 1552 1553 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP); 1554 return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev)); 1555 } 1556 1557 int 1558 rte_eth_dev_set_link_down(uint16_t port_id) 1559 { 1560 struct rte_eth_dev *dev; 1561 1562 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1563 dev = &rte_eth_devices[port_id]; 1564 1565 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP); 1566 return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev)); 1567 } 1568 1569 int 1570 rte_eth_dev_close(uint16_t port_id) 1571 { 1572 struct rte_eth_dev *dev; 1573 int firsterr, binerr; 1574 int *lasterr = &firsterr; 1575 1576 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1577 dev = &rte_eth_devices[port_id]; 1578 1579 /* 1580 * Secondary process needs to close device to release process private 1581 * resources. But secondary process should not be obliged to wait 1582 * for device stop before closing ethdev. 1583 */ 1584 if (rte_eal_process_type() == RTE_PROC_PRIMARY && 1585 dev->data->dev_started) { 1586 RTE_ETHDEV_LOG(ERR, "Cannot close started device (port %u)\n", 1587 port_id); 1588 return -EINVAL; 1589 } 1590 1591 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP); 1592 *lasterr = (*dev->dev_ops->dev_close)(dev); 1593 if (*lasterr != 0) 1594 lasterr = &binerr; 1595 1596 rte_ethdev_trace_close(port_id); 1597 *lasterr = rte_eth_dev_release_port(dev); 1598 1599 return firsterr; 1600 } 1601 1602 int 1603 rte_eth_dev_reset(uint16_t port_id) 1604 { 1605 struct rte_eth_dev *dev; 1606 int ret; 1607 1608 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1609 dev = &rte_eth_devices[port_id]; 1610 1611 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP); 1612 1613 ret = rte_eth_dev_stop(port_id); 1614 if (ret != 0) { 1615 RTE_ETHDEV_LOG(ERR, 1616 "Failed to stop device (port %u) before reset: %s - ignore\n", 1617 port_id, rte_strerror(-ret)); 1618 } 1619 ret = dev->dev_ops->dev_reset(dev); 1620 1621 return eth_err(port_id, ret); 1622 } 1623 1624 int 1625 rte_eth_dev_is_removed(uint16_t port_id) 1626 { 1627 struct rte_eth_dev *dev; 1628 int ret; 1629 1630 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0); 1631 dev = &rte_eth_devices[port_id]; 1632 1633 if (dev->state == RTE_ETH_DEV_REMOVED) 1634 return 1; 1635 1636 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0); 1637 1638 ret = dev->dev_ops->is_removed(dev); 1639 if (ret != 0) 1640 /* Device is physically removed. */ 1641 dev->state = RTE_ETH_DEV_REMOVED; 1642 1643 return ret; 1644 } 1645 1646 static int 1647 rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg, 1648 uint16_t n_seg, uint32_t *mbp_buf_size, 1649 const struct rte_eth_dev_info *dev_info) 1650 { 1651 const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa; 1652 struct rte_mempool *mp_first; 1653 uint32_t offset_mask; 1654 uint16_t seg_idx; 1655 1656 if (n_seg > seg_capa->max_nseg) { 1657 RTE_ETHDEV_LOG(ERR, 1658 "Requested Rx segments %u exceed supported %u\n", 1659 n_seg, seg_capa->max_nseg); 1660 return -EINVAL; 1661 } 1662 /* 1663 * Check the sizes and offsets against buffer sizes 1664 * for each segment specified in extended configuration. 1665 */ 1666 mp_first = rx_seg[0].mp; 1667 offset_mask = RTE_BIT32(seg_capa->offset_align_log2) - 1; 1668 for (seg_idx = 0; seg_idx < n_seg; seg_idx++) { 1669 struct rte_mempool *mpl = rx_seg[seg_idx].mp; 1670 uint32_t length = rx_seg[seg_idx].length; 1671 uint32_t offset = rx_seg[seg_idx].offset; 1672 1673 if (mpl == NULL) { 1674 RTE_ETHDEV_LOG(ERR, "null mempool pointer\n"); 1675 return -EINVAL; 1676 } 1677 if (seg_idx != 0 && mp_first != mpl && 1678 seg_capa->multi_pools == 0) { 1679 RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n"); 1680 return -ENOTSUP; 1681 } 1682 if (offset != 0) { 1683 if (seg_capa->offset_allowed == 0) { 1684 RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n"); 1685 return -ENOTSUP; 1686 } 1687 if (offset & offset_mask) { 1688 RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n", 1689 offset, 1690 seg_capa->offset_align_log2); 1691 return -EINVAL; 1692 } 1693 } 1694 if (mpl->private_data_size < 1695 sizeof(struct rte_pktmbuf_pool_private)) { 1696 RTE_ETHDEV_LOG(ERR, 1697 "%s private_data_size %u < %u\n", 1698 mpl->name, mpl->private_data_size, 1699 (unsigned int)sizeof 1700 (struct rte_pktmbuf_pool_private)); 1701 return -ENOSPC; 1702 } 1703 offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM; 1704 *mbp_buf_size = rte_pktmbuf_data_room_size(mpl); 1705 length = length != 0 ? length : *mbp_buf_size; 1706 if (*mbp_buf_size < length + offset) { 1707 RTE_ETHDEV_LOG(ERR, 1708 "%s mbuf_data_room_size %u < %u (segment length=%u + segment offset=%u)\n", 1709 mpl->name, *mbp_buf_size, 1710 length + offset, length, offset); 1711 return -EINVAL; 1712 } 1713 } 1714 return 0; 1715 } 1716 1717 int 1718 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 1719 uint16_t nb_rx_desc, unsigned int socket_id, 1720 const struct rte_eth_rxconf *rx_conf, 1721 struct rte_mempool *mp) 1722 { 1723 int ret; 1724 uint32_t mbp_buf_size; 1725 struct rte_eth_dev *dev; 1726 struct rte_eth_dev_info dev_info; 1727 struct rte_eth_rxconf local_conf; 1728 1729 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1730 dev = &rte_eth_devices[port_id]; 1731 1732 if (rx_queue_id >= dev->data->nb_rx_queues) { 1733 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id); 1734 return -EINVAL; 1735 } 1736 1737 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP); 1738 1739 ret = rte_eth_dev_info_get(port_id, &dev_info); 1740 if (ret != 0) 1741 return ret; 1742 1743 if (mp != NULL) { 1744 /* Single pool configuration check. */ 1745 if (rx_conf != NULL && rx_conf->rx_nseg != 0) { 1746 RTE_ETHDEV_LOG(ERR, 1747 "Ambiguous segment configuration\n"); 1748 return -EINVAL; 1749 } 1750 /* 1751 * Check the size of the mbuf data buffer, this value 1752 * must be provided in the private data of the memory pool. 1753 * First check that the memory pool(s) has a valid private data. 1754 */ 1755 if (mp->private_data_size < 1756 sizeof(struct rte_pktmbuf_pool_private)) { 1757 RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n", 1758 mp->name, mp->private_data_size, 1759 (unsigned int) 1760 sizeof(struct rte_pktmbuf_pool_private)); 1761 return -ENOSPC; 1762 } 1763 mbp_buf_size = rte_pktmbuf_data_room_size(mp); 1764 if (mbp_buf_size < dev_info.min_rx_bufsize + 1765 RTE_PKTMBUF_HEADROOM) { 1766 RTE_ETHDEV_LOG(ERR, 1767 "%s mbuf_data_room_size %u < %u (RTE_PKTMBUF_HEADROOM=%u + min_rx_bufsize(dev)=%u)\n", 1768 mp->name, mbp_buf_size, 1769 RTE_PKTMBUF_HEADROOM + 1770 dev_info.min_rx_bufsize, 1771 RTE_PKTMBUF_HEADROOM, 1772 dev_info.min_rx_bufsize); 1773 return -EINVAL; 1774 } 1775 } else { 1776 const struct rte_eth_rxseg_split *rx_seg; 1777 uint16_t n_seg; 1778 1779 /* Extended multi-segment configuration check. */ 1780 if (rx_conf == NULL || rx_conf->rx_seg == NULL || rx_conf->rx_nseg == 0) { 1781 RTE_ETHDEV_LOG(ERR, 1782 "Memory pool is null and no extended configuration provided\n"); 1783 return -EINVAL; 1784 } 1785 1786 rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg; 1787 n_seg = rx_conf->rx_nseg; 1788 1789 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { 1790 ret = rte_eth_rx_queue_check_split(rx_seg, n_seg, 1791 &mbp_buf_size, 1792 &dev_info); 1793 if (ret != 0) 1794 return ret; 1795 } else { 1796 RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n"); 1797 return -EINVAL; 1798 } 1799 } 1800 1801 /* Use default specified by driver, if nb_rx_desc is zero */ 1802 if (nb_rx_desc == 0) { 1803 nb_rx_desc = dev_info.default_rxportconf.ring_size; 1804 /* If driver default is also zero, fall back on EAL default */ 1805 if (nb_rx_desc == 0) 1806 nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 1807 } 1808 1809 if (nb_rx_desc > dev_info.rx_desc_lim.nb_max || 1810 nb_rx_desc < dev_info.rx_desc_lim.nb_min || 1811 nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) { 1812 1813 RTE_ETHDEV_LOG(ERR, 1814 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 1815 nb_rx_desc, dev_info.rx_desc_lim.nb_max, 1816 dev_info.rx_desc_lim.nb_min, 1817 dev_info.rx_desc_lim.nb_align); 1818 return -EINVAL; 1819 } 1820 1821 if (dev->data->dev_started && 1822 !(dev_info.dev_capa & 1823 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP)) 1824 return -EBUSY; 1825 1826 if (dev->data->dev_started && 1827 (dev->data->rx_queue_state[rx_queue_id] != 1828 RTE_ETH_QUEUE_STATE_STOPPED)) 1829 return -EBUSY; 1830 1831 eth_dev_rxq_release(dev, rx_queue_id); 1832 1833 if (rx_conf == NULL) 1834 rx_conf = &dev_info.default_rxconf; 1835 1836 local_conf = *rx_conf; 1837 1838 /* 1839 * If an offloading has already been enabled in 1840 * rte_eth_dev_configure(), it has been enabled on all queues, 1841 * so there is no need to enable it in this queue again. 1842 * The local_conf.offloads input to underlying PMD only carries 1843 * those offloadings which are only enabled on this queue and 1844 * not enabled on all queues. 1845 */ 1846 local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads; 1847 1848 /* 1849 * New added offloadings for this queue are those not enabled in 1850 * rte_eth_dev_configure() and they must be per-queue type. 1851 * A pure per-port offloading can't be enabled on a queue while 1852 * disabled on another queue. A pure per-port offloading can't 1853 * be enabled for any queue as new added one if it hasn't been 1854 * enabled in rte_eth_dev_configure(). 1855 */ 1856 if ((local_conf.offloads & dev_info.rx_queue_offload_capa) != 1857 local_conf.offloads) { 1858 RTE_ETHDEV_LOG(ERR, 1859 "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 1860 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 1861 port_id, rx_queue_id, local_conf.offloads, 1862 dev_info.rx_queue_offload_capa, 1863 __func__); 1864 return -EINVAL; 1865 } 1866 1867 if (local_conf.share_group > 0 && 1868 (dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) == 0) { 1869 RTE_ETHDEV_LOG(ERR, 1870 "Ethdev port_id=%d rx_queue_id=%d, enabled share_group=%hu while device doesn't support Rx queue share\n", 1871 port_id, rx_queue_id, local_conf.share_group); 1872 return -EINVAL; 1873 } 1874 1875 /* 1876 * If LRO is enabled, check that the maximum aggregated packet 1877 * size is supported by the configured device. 1878 */ 1879 /* Get the real Ethernet overhead length */ 1880 if (local_conf.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) { 1881 uint32_t overhead_len; 1882 uint32_t max_rx_pktlen; 1883 int ret; 1884 1885 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 1886 dev_info.max_mtu); 1887 max_rx_pktlen = dev->data->mtu + overhead_len; 1888 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0) 1889 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 1890 ret = eth_dev_check_lro_pkt_size(port_id, 1891 dev->data->dev_conf.rxmode.max_lro_pkt_size, 1892 max_rx_pktlen, 1893 dev_info.max_lro_pkt_size); 1894 if (ret != 0) 1895 return ret; 1896 } 1897 1898 ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc, 1899 socket_id, &local_conf, mp); 1900 if (!ret) { 1901 if (!dev->data->min_rx_buf_size || 1902 dev->data->min_rx_buf_size > mbp_buf_size) 1903 dev->data->min_rx_buf_size = mbp_buf_size; 1904 } 1905 1906 rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp, 1907 rx_conf, ret); 1908 return eth_err(port_id, ret); 1909 } 1910 1911 int 1912 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 1913 uint16_t nb_rx_desc, 1914 const struct rte_eth_hairpin_conf *conf) 1915 { 1916 int ret; 1917 struct rte_eth_dev *dev; 1918 struct rte_eth_hairpin_cap cap; 1919 int i; 1920 int count; 1921 1922 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1923 dev = &rte_eth_devices[port_id]; 1924 1925 if (rx_queue_id >= dev->data->nb_rx_queues) { 1926 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id); 1927 return -EINVAL; 1928 } 1929 1930 if (conf == NULL) { 1931 RTE_ETHDEV_LOG(ERR, 1932 "Cannot setup ethdev port %u Rx hairpin queue from NULL config\n", 1933 port_id); 1934 return -EINVAL; 1935 } 1936 1937 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 1938 if (ret != 0) 1939 return ret; 1940 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup, 1941 -ENOTSUP); 1942 /* if nb_rx_desc is zero use max number of desc from the driver. */ 1943 if (nb_rx_desc == 0) 1944 nb_rx_desc = cap.max_nb_desc; 1945 if (nb_rx_desc > cap.max_nb_desc) { 1946 RTE_ETHDEV_LOG(ERR, 1947 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu", 1948 nb_rx_desc, cap.max_nb_desc); 1949 return -EINVAL; 1950 } 1951 if (conf->peer_count > cap.max_rx_2_tx) { 1952 RTE_ETHDEV_LOG(ERR, 1953 "Invalid value for number of peers for Rx queue(=%u), should be: <= %hu", 1954 conf->peer_count, cap.max_rx_2_tx); 1955 return -EINVAL; 1956 } 1957 if (conf->peer_count == 0) { 1958 RTE_ETHDEV_LOG(ERR, 1959 "Invalid value for number of peers for Rx queue(=%u), should be: > 0", 1960 conf->peer_count); 1961 return -EINVAL; 1962 } 1963 for (i = 0, count = 0; i < dev->data->nb_rx_queues && 1964 cap.max_nb_queues != UINT16_MAX; i++) { 1965 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i)) 1966 count++; 1967 } 1968 if (count > cap.max_nb_queues) { 1969 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d", 1970 cap.max_nb_queues); 1971 return -EINVAL; 1972 } 1973 if (dev->data->dev_started) 1974 return -EBUSY; 1975 eth_dev_rxq_release(dev, rx_queue_id); 1976 ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id, 1977 nb_rx_desc, conf); 1978 if (ret == 0) 1979 dev->data->rx_queue_state[rx_queue_id] = 1980 RTE_ETH_QUEUE_STATE_HAIRPIN; 1981 return eth_err(port_id, ret); 1982 } 1983 1984 int 1985 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 1986 uint16_t nb_tx_desc, unsigned int socket_id, 1987 const struct rte_eth_txconf *tx_conf) 1988 { 1989 struct rte_eth_dev *dev; 1990 struct rte_eth_dev_info dev_info; 1991 struct rte_eth_txconf local_conf; 1992 int ret; 1993 1994 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1995 dev = &rte_eth_devices[port_id]; 1996 1997 if (tx_queue_id >= dev->data->nb_tx_queues) { 1998 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id); 1999 return -EINVAL; 2000 } 2001 2002 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP); 2003 2004 ret = rte_eth_dev_info_get(port_id, &dev_info); 2005 if (ret != 0) 2006 return ret; 2007 2008 /* Use default specified by driver, if nb_tx_desc is zero */ 2009 if (nb_tx_desc == 0) { 2010 nb_tx_desc = dev_info.default_txportconf.ring_size; 2011 /* If driver default is zero, fall back on EAL default */ 2012 if (nb_tx_desc == 0) 2013 nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 2014 } 2015 if (nb_tx_desc > dev_info.tx_desc_lim.nb_max || 2016 nb_tx_desc < dev_info.tx_desc_lim.nb_min || 2017 nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) { 2018 RTE_ETHDEV_LOG(ERR, 2019 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2020 nb_tx_desc, dev_info.tx_desc_lim.nb_max, 2021 dev_info.tx_desc_lim.nb_min, 2022 dev_info.tx_desc_lim.nb_align); 2023 return -EINVAL; 2024 } 2025 2026 if (dev->data->dev_started && 2027 !(dev_info.dev_capa & 2028 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP)) 2029 return -EBUSY; 2030 2031 if (dev->data->dev_started && 2032 (dev->data->tx_queue_state[tx_queue_id] != 2033 RTE_ETH_QUEUE_STATE_STOPPED)) 2034 return -EBUSY; 2035 2036 eth_dev_txq_release(dev, tx_queue_id); 2037 2038 if (tx_conf == NULL) 2039 tx_conf = &dev_info.default_txconf; 2040 2041 local_conf = *tx_conf; 2042 2043 /* 2044 * If an offloading has already been enabled in 2045 * rte_eth_dev_configure(), it has been enabled on all queues, 2046 * so there is no need to enable it in this queue again. 2047 * The local_conf.offloads input to underlying PMD only carries 2048 * those offloadings which are only enabled on this queue and 2049 * not enabled on all queues. 2050 */ 2051 local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads; 2052 2053 /* 2054 * New added offloadings for this queue are those not enabled in 2055 * rte_eth_dev_configure() and they must be per-queue type. 2056 * A pure per-port offloading can't be enabled on a queue while 2057 * disabled on another queue. A pure per-port offloading can't 2058 * be enabled for any queue as new added one if it hasn't been 2059 * enabled in rte_eth_dev_configure(). 2060 */ 2061 if ((local_conf.offloads & dev_info.tx_queue_offload_capa) != 2062 local_conf.offloads) { 2063 RTE_ETHDEV_LOG(ERR, 2064 "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2065 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2066 port_id, tx_queue_id, local_conf.offloads, 2067 dev_info.tx_queue_offload_capa, 2068 __func__); 2069 return -EINVAL; 2070 } 2071 2072 rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf); 2073 return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev, 2074 tx_queue_id, nb_tx_desc, socket_id, &local_conf)); 2075 } 2076 2077 int 2078 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2079 uint16_t nb_tx_desc, 2080 const struct rte_eth_hairpin_conf *conf) 2081 { 2082 struct rte_eth_dev *dev; 2083 struct rte_eth_hairpin_cap cap; 2084 int i; 2085 int count; 2086 int ret; 2087 2088 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2089 dev = &rte_eth_devices[port_id]; 2090 2091 if (tx_queue_id >= dev->data->nb_tx_queues) { 2092 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id); 2093 return -EINVAL; 2094 } 2095 2096 if (conf == NULL) { 2097 RTE_ETHDEV_LOG(ERR, 2098 "Cannot setup ethdev port %u Tx hairpin queue from NULL config\n", 2099 port_id); 2100 return -EINVAL; 2101 } 2102 2103 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2104 if (ret != 0) 2105 return ret; 2106 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup, 2107 -ENOTSUP); 2108 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2109 if (nb_tx_desc == 0) 2110 nb_tx_desc = cap.max_nb_desc; 2111 if (nb_tx_desc > cap.max_nb_desc) { 2112 RTE_ETHDEV_LOG(ERR, 2113 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu", 2114 nb_tx_desc, cap.max_nb_desc); 2115 return -EINVAL; 2116 } 2117 if (conf->peer_count > cap.max_tx_2_rx) { 2118 RTE_ETHDEV_LOG(ERR, 2119 "Invalid value for number of peers for Tx queue(=%u), should be: <= %hu", 2120 conf->peer_count, cap.max_tx_2_rx); 2121 return -EINVAL; 2122 } 2123 if (conf->peer_count == 0) { 2124 RTE_ETHDEV_LOG(ERR, 2125 "Invalid value for number of peers for Tx queue(=%u), should be: > 0", 2126 conf->peer_count); 2127 return -EINVAL; 2128 } 2129 for (i = 0, count = 0; i < dev->data->nb_tx_queues && 2130 cap.max_nb_queues != UINT16_MAX; i++) { 2131 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i)) 2132 count++; 2133 } 2134 if (count > cap.max_nb_queues) { 2135 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d", 2136 cap.max_nb_queues); 2137 return -EINVAL; 2138 } 2139 if (dev->data->dev_started) 2140 return -EBUSY; 2141 eth_dev_txq_release(dev, tx_queue_id); 2142 ret = (*dev->dev_ops->tx_hairpin_queue_setup) 2143 (dev, tx_queue_id, nb_tx_desc, conf); 2144 if (ret == 0) 2145 dev->data->tx_queue_state[tx_queue_id] = 2146 RTE_ETH_QUEUE_STATE_HAIRPIN; 2147 return eth_err(port_id, ret); 2148 } 2149 2150 int 2151 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port) 2152 { 2153 struct rte_eth_dev *dev; 2154 int ret; 2155 2156 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2157 dev = &rte_eth_devices[tx_port]; 2158 2159 if (dev->data->dev_started == 0) { 2160 RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port); 2161 return -EBUSY; 2162 } 2163 2164 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_bind, -ENOTSUP); 2165 ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port); 2166 if (ret != 0) 2167 RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d" 2168 " to Rx %d (%d - all ports)\n", 2169 tx_port, rx_port, RTE_MAX_ETHPORTS); 2170 2171 return ret; 2172 } 2173 2174 int 2175 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port) 2176 { 2177 struct rte_eth_dev *dev; 2178 int ret; 2179 2180 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2181 dev = &rte_eth_devices[tx_port]; 2182 2183 if (dev->data->dev_started == 0) { 2184 RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port); 2185 return -EBUSY; 2186 } 2187 2188 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_unbind, -ENOTSUP); 2189 ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port); 2190 if (ret != 0) 2191 RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d" 2192 " from Rx %d (%d - all ports)\n", 2193 tx_port, rx_port, RTE_MAX_ETHPORTS); 2194 2195 return ret; 2196 } 2197 2198 int 2199 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, 2200 size_t len, uint32_t direction) 2201 { 2202 struct rte_eth_dev *dev; 2203 int ret; 2204 2205 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2206 dev = &rte_eth_devices[port_id]; 2207 2208 if (peer_ports == NULL) { 2209 RTE_ETHDEV_LOG(ERR, 2210 "Cannot get ethdev port %u hairpin peer ports to NULL\n", 2211 port_id); 2212 return -EINVAL; 2213 } 2214 2215 if (len == 0) { 2216 RTE_ETHDEV_LOG(ERR, 2217 "Cannot get ethdev port %u hairpin peer ports to array with zero size\n", 2218 port_id); 2219 return -EINVAL; 2220 } 2221 2222 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_get_peer_ports, 2223 -ENOTSUP); 2224 2225 ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports, 2226 len, direction); 2227 if (ret < 0) 2228 RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n", 2229 port_id, direction ? "Rx" : "Tx"); 2230 2231 return ret; 2232 } 2233 2234 void 2235 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, 2236 void *userdata __rte_unused) 2237 { 2238 rte_pktmbuf_free_bulk(pkts, unsent); 2239 } 2240 2241 void 2242 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, 2243 void *userdata) 2244 { 2245 uint64_t *count = userdata; 2246 2247 rte_pktmbuf_free_bulk(pkts, unsent); 2248 *count += unsent; 2249 } 2250 2251 int 2252 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, 2253 buffer_tx_error_fn cbfn, void *userdata) 2254 { 2255 if (buffer == NULL) { 2256 RTE_ETHDEV_LOG(ERR, 2257 "Cannot set Tx buffer error callback to NULL buffer\n"); 2258 return -EINVAL; 2259 } 2260 2261 buffer->error_callback = cbfn; 2262 buffer->error_userdata = userdata; 2263 return 0; 2264 } 2265 2266 int 2267 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size) 2268 { 2269 int ret = 0; 2270 2271 if (buffer == NULL) { 2272 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL buffer\n"); 2273 return -EINVAL; 2274 } 2275 2276 buffer->size = size; 2277 if (buffer->error_callback == NULL) { 2278 ret = rte_eth_tx_buffer_set_err_callback( 2279 buffer, rte_eth_tx_buffer_drop_callback, NULL); 2280 } 2281 2282 return ret; 2283 } 2284 2285 int 2286 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt) 2287 { 2288 struct rte_eth_dev *dev; 2289 int ret; 2290 2291 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2292 dev = &rte_eth_devices[port_id]; 2293 2294 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP); 2295 2296 /* Call driver to free pending mbufs. */ 2297 ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id], 2298 free_cnt); 2299 return eth_err(port_id, ret); 2300 } 2301 2302 int 2303 rte_eth_promiscuous_enable(uint16_t port_id) 2304 { 2305 struct rte_eth_dev *dev; 2306 int diag = 0; 2307 2308 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2309 dev = &rte_eth_devices[port_id]; 2310 2311 if (dev->data->promiscuous == 1) 2312 return 0; 2313 2314 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP); 2315 2316 diag = (*dev->dev_ops->promiscuous_enable)(dev); 2317 dev->data->promiscuous = (diag == 0) ? 1 : 0; 2318 2319 return eth_err(port_id, diag); 2320 } 2321 2322 int 2323 rte_eth_promiscuous_disable(uint16_t port_id) 2324 { 2325 struct rte_eth_dev *dev; 2326 int diag = 0; 2327 2328 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2329 dev = &rte_eth_devices[port_id]; 2330 2331 if (dev->data->promiscuous == 0) 2332 return 0; 2333 2334 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP); 2335 2336 dev->data->promiscuous = 0; 2337 diag = (*dev->dev_ops->promiscuous_disable)(dev); 2338 if (diag != 0) 2339 dev->data->promiscuous = 1; 2340 2341 return eth_err(port_id, diag); 2342 } 2343 2344 int 2345 rte_eth_promiscuous_get(uint16_t port_id) 2346 { 2347 struct rte_eth_dev *dev; 2348 2349 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2350 dev = &rte_eth_devices[port_id]; 2351 2352 return dev->data->promiscuous; 2353 } 2354 2355 int 2356 rte_eth_allmulticast_enable(uint16_t port_id) 2357 { 2358 struct rte_eth_dev *dev; 2359 int diag; 2360 2361 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2362 dev = &rte_eth_devices[port_id]; 2363 2364 if (dev->data->all_multicast == 1) 2365 return 0; 2366 2367 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP); 2368 diag = (*dev->dev_ops->allmulticast_enable)(dev); 2369 dev->data->all_multicast = (diag == 0) ? 1 : 0; 2370 2371 return eth_err(port_id, diag); 2372 } 2373 2374 int 2375 rte_eth_allmulticast_disable(uint16_t port_id) 2376 { 2377 struct rte_eth_dev *dev; 2378 int diag; 2379 2380 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2381 dev = &rte_eth_devices[port_id]; 2382 2383 if (dev->data->all_multicast == 0) 2384 return 0; 2385 2386 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP); 2387 dev->data->all_multicast = 0; 2388 diag = (*dev->dev_ops->allmulticast_disable)(dev); 2389 if (diag != 0) 2390 dev->data->all_multicast = 1; 2391 2392 return eth_err(port_id, diag); 2393 } 2394 2395 int 2396 rte_eth_allmulticast_get(uint16_t port_id) 2397 { 2398 struct rte_eth_dev *dev; 2399 2400 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2401 dev = &rte_eth_devices[port_id]; 2402 2403 return dev->data->all_multicast; 2404 } 2405 2406 int 2407 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link) 2408 { 2409 struct rte_eth_dev *dev; 2410 2411 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2412 dev = &rte_eth_devices[port_id]; 2413 2414 if (eth_link == NULL) { 2415 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2416 port_id); 2417 return -EINVAL; 2418 } 2419 2420 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2421 rte_eth_linkstatus_get(dev, eth_link); 2422 else { 2423 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 2424 (*dev->dev_ops->link_update)(dev, 1); 2425 *eth_link = dev->data->dev_link; 2426 } 2427 2428 return 0; 2429 } 2430 2431 int 2432 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link) 2433 { 2434 struct rte_eth_dev *dev; 2435 2436 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2437 dev = &rte_eth_devices[port_id]; 2438 2439 if (eth_link == NULL) { 2440 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2441 port_id); 2442 return -EINVAL; 2443 } 2444 2445 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2446 rte_eth_linkstatus_get(dev, eth_link); 2447 else { 2448 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 2449 (*dev->dev_ops->link_update)(dev, 0); 2450 *eth_link = dev->data->dev_link; 2451 } 2452 2453 return 0; 2454 } 2455 2456 const char * 2457 rte_eth_link_speed_to_str(uint32_t link_speed) 2458 { 2459 switch (link_speed) { 2460 case RTE_ETH_SPEED_NUM_NONE: return "None"; 2461 case RTE_ETH_SPEED_NUM_10M: return "10 Mbps"; 2462 case RTE_ETH_SPEED_NUM_100M: return "100 Mbps"; 2463 case RTE_ETH_SPEED_NUM_1G: return "1 Gbps"; 2464 case RTE_ETH_SPEED_NUM_2_5G: return "2.5 Gbps"; 2465 case RTE_ETH_SPEED_NUM_5G: return "5 Gbps"; 2466 case RTE_ETH_SPEED_NUM_10G: return "10 Gbps"; 2467 case RTE_ETH_SPEED_NUM_20G: return "20 Gbps"; 2468 case RTE_ETH_SPEED_NUM_25G: return "25 Gbps"; 2469 case RTE_ETH_SPEED_NUM_40G: return "40 Gbps"; 2470 case RTE_ETH_SPEED_NUM_50G: return "50 Gbps"; 2471 case RTE_ETH_SPEED_NUM_56G: return "56 Gbps"; 2472 case RTE_ETH_SPEED_NUM_100G: return "100 Gbps"; 2473 case RTE_ETH_SPEED_NUM_200G: return "200 Gbps"; 2474 case RTE_ETH_SPEED_NUM_UNKNOWN: return "Unknown"; 2475 default: return "Invalid"; 2476 } 2477 } 2478 2479 int 2480 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link) 2481 { 2482 if (str == NULL) { 2483 RTE_ETHDEV_LOG(ERR, "Cannot convert link to NULL string\n"); 2484 return -EINVAL; 2485 } 2486 2487 if (len == 0) { 2488 RTE_ETHDEV_LOG(ERR, 2489 "Cannot convert link to string with zero size\n"); 2490 return -EINVAL; 2491 } 2492 2493 if (eth_link == NULL) { 2494 RTE_ETHDEV_LOG(ERR, "Cannot convert to string from NULL link\n"); 2495 return -EINVAL; 2496 } 2497 2498 if (eth_link->link_status == RTE_ETH_LINK_DOWN) 2499 return snprintf(str, len, "Link down"); 2500 else 2501 return snprintf(str, len, "Link up at %s %s %s", 2502 rte_eth_link_speed_to_str(eth_link->link_speed), 2503 (eth_link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 2504 "FDX" : "HDX", 2505 (eth_link->link_autoneg == RTE_ETH_LINK_AUTONEG) ? 2506 "Autoneg" : "Fixed"); 2507 } 2508 2509 int 2510 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats) 2511 { 2512 struct rte_eth_dev *dev; 2513 2514 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2515 dev = &rte_eth_devices[port_id]; 2516 2517 if (stats == NULL) { 2518 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u stats to NULL\n", 2519 port_id); 2520 return -EINVAL; 2521 } 2522 2523 memset(stats, 0, sizeof(*stats)); 2524 2525 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP); 2526 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed; 2527 return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats)); 2528 } 2529 2530 int 2531 rte_eth_stats_reset(uint16_t port_id) 2532 { 2533 struct rte_eth_dev *dev; 2534 int ret; 2535 2536 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2537 dev = &rte_eth_devices[port_id]; 2538 2539 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP); 2540 ret = (*dev->dev_ops->stats_reset)(dev); 2541 if (ret != 0) 2542 return eth_err(port_id, ret); 2543 2544 dev->data->rx_mbuf_alloc_failed = 0; 2545 2546 return 0; 2547 } 2548 2549 static inline int 2550 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev) 2551 { 2552 uint16_t nb_rxqs, nb_txqs; 2553 int count; 2554 2555 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2556 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2557 2558 count = RTE_NB_STATS; 2559 if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) { 2560 count += nb_rxqs * RTE_NB_RXQ_STATS; 2561 count += nb_txqs * RTE_NB_TXQ_STATS; 2562 } 2563 2564 return count; 2565 } 2566 2567 static int 2568 eth_dev_get_xstats_count(uint16_t port_id) 2569 { 2570 struct rte_eth_dev *dev; 2571 int count; 2572 2573 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2574 dev = &rte_eth_devices[port_id]; 2575 if (dev->dev_ops->xstats_get_names != NULL) { 2576 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0); 2577 if (count < 0) 2578 return eth_err(port_id, count); 2579 } else 2580 count = 0; 2581 2582 2583 count += eth_dev_get_xstats_basic_count(dev); 2584 2585 return count; 2586 } 2587 2588 int 2589 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, 2590 uint64_t *id) 2591 { 2592 int cnt_xstats, idx_xstat; 2593 2594 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2595 2596 if (xstat_name == NULL) { 2597 RTE_ETHDEV_LOG(ERR, 2598 "Cannot get ethdev port %u xstats ID from NULL xstat name\n", 2599 port_id); 2600 return -ENOMEM; 2601 } 2602 2603 if (id == NULL) { 2604 RTE_ETHDEV_LOG(ERR, 2605 "Cannot get ethdev port %u xstats ID to NULL\n", 2606 port_id); 2607 return -ENOMEM; 2608 } 2609 2610 /* Get count */ 2611 cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL); 2612 if (cnt_xstats < 0) { 2613 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n"); 2614 return -ENODEV; 2615 } 2616 2617 /* Get id-name lookup table */ 2618 struct rte_eth_xstat_name xstats_names[cnt_xstats]; 2619 2620 if (cnt_xstats != rte_eth_xstats_get_names_by_id( 2621 port_id, xstats_names, cnt_xstats, NULL)) { 2622 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n"); 2623 return -1; 2624 } 2625 2626 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 2627 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) { 2628 *id = idx_xstat; 2629 return 0; 2630 }; 2631 } 2632 2633 return -EINVAL; 2634 } 2635 2636 /* retrieve basic stats names */ 2637 static int 2638 eth_basic_stats_get_names(struct rte_eth_dev *dev, 2639 struct rte_eth_xstat_name *xstats_names) 2640 { 2641 int cnt_used_entries = 0; 2642 uint32_t idx, id_queue; 2643 uint16_t num_q; 2644 2645 for (idx = 0; idx < RTE_NB_STATS; idx++) { 2646 strlcpy(xstats_names[cnt_used_entries].name, 2647 eth_dev_stats_strings[idx].name, 2648 sizeof(xstats_names[0].name)); 2649 cnt_used_entries++; 2650 } 2651 2652 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 2653 return cnt_used_entries; 2654 2655 num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2656 for (id_queue = 0; id_queue < num_q; id_queue++) { 2657 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) { 2658 snprintf(xstats_names[cnt_used_entries].name, 2659 sizeof(xstats_names[0].name), 2660 "rx_q%u_%s", 2661 id_queue, eth_dev_rxq_stats_strings[idx].name); 2662 cnt_used_entries++; 2663 } 2664 2665 } 2666 num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2667 for (id_queue = 0; id_queue < num_q; id_queue++) { 2668 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) { 2669 snprintf(xstats_names[cnt_used_entries].name, 2670 sizeof(xstats_names[0].name), 2671 "tx_q%u_%s", 2672 id_queue, eth_dev_txq_stats_strings[idx].name); 2673 cnt_used_entries++; 2674 } 2675 } 2676 return cnt_used_entries; 2677 } 2678 2679 /* retrieve ethdev extended statistics names */ 2680 int 2681 rte_eth_xstats_get_names_by_id(uint16_t port_id, 2682 struct rte_eth_xstat_name *xstats_names, unsigned int size, 2683 uint64_t *ids) 2684 { 2685 struct rte_eth_xstat_name *xstats_names_copy; 2686 unsigned int no_basic_stat_requested = 1; 2687 unsigned int no_ext_stat_requested = 1; 2688 unsigned int expected_entries; 2689 unsigned int basic_count; 2690 struct rte_eth_dev *dev; 2691 unsigned int i; 2692 int ret; 2693 2694 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2695 dev = &rte_eth_devices[port_id]; 2696 2697 basic_count = eth_dev_get_xstats_basic_count(dev); 2698 ret = eth_dev_get_xstats_count(port_id); 2699 if (ret < 0) 2700 return ret; 2701 expected_entries = (unsigned int)ret; 2702 2703 /* Return max number of stats if no ids given */ 2704 if (!ids) { 2705 if (!xstats_names) 2706 return expected_entries; 2707 else if (xstats_names && size < expected_entries) 2708 return expected_entries; 2709 } 2710 2711 if (ids && !xstats_names) 2712 return -EINVAL; 2713 2714 if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) { 2715 uint64_t ids_copy[size]; 2716 2717 for (i = 0; i < size; i++) { 2718 if (ids[i] < basic_count) { 2719 no_basic_stat_requested = 0; 2720 break; 2721 } 2722 2723 /* 2724 * Convert ids to xstats ids that PMD knows. 2725 * ids known by user are basic + extended stats. 2726 */ 2727 ids_copy[i] = ids[i] - basic_count; 2728 } 2729 2730 if (no_basic_stat_requested) 2731 return (*dev->dev_ops->xstats_get_names_by_id)(dev, 2732 ids_copy, xstats_names, size); 2733 } 2734 2735 /* Retrieve all stats */ 2736 if (!ids) { 2737 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names, 2738 expected_entries); 2739 if (num_stats < 0 || num_stats > (int)expected_entries) 2740 return num_stats; 2741 else 2742 return expected_entries; 2743 } 2744 2745 xstats_names_copy = calloc(expected_entries, 2746 sizeof(struct rte_eth_xstat_name)); 2747 2748 if (!xstats_names_copy) { 2749 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n"); 2750 return -ENOMEM; 2751 } 2752 2753 if (ids) { 2754 for (i = 0; i < size; i++) { 2755 if (ids[i] >= basic_count) { 2756 no_ext_stat_requested = 0; 2757 break; 2758 } 2759 } 2760 } 2761 2762 /* Fill xstats_names_copy structure */ 2763 if (ids && no_ext_stat_requested) { 2764 eth_basic_stats_get_names(dev, xstats_names_copy); 2765 } else { 2766 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy, 2767 expected_entries); 2768 if (ret < 0) { 2769 free(xstats_names_copy); 2770 return ret; 2771 } 2772 } 2773 2774 /* Filter stats */ 2775 for (i = 0; i < size; i++) { 2776 if (ids[i] >= expected_entries) { 2777 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 2778 free(xstats_names_copy); 2779 return -1; 2780 } 2781 xstats_names[i] = xstats_names_copy[ids[i]]; 2782 } 2783 2784 free(xstats_names_copy); 2785 return size; 2786 } 2787 2788 int 2789 rte_eth_xstats_get_names(uint16_t port_id, 2790 struct rte_eth_xstat_name *xstats_names, 2791 unsigned int size) 2792 { 2793 struct rte_eth_dev *dev; 2794 int cnt_used_entries; 2795 int cnt_expected_entries; 2796 int cnt_driver_entries; 2797 2798 cnt_expected_entries = eth_dev_get_xstats_count(port_id); 2799 if (xstats_names == NULL || cnt_expected_entries < 0 || 2800 (int)size < cnt_expected_entries) 2801 return cnt_expected_entries; 2802 2803 /* port_id checked in eth_dev_get_xstats_count() */ 2804 dev = &rte_eth_devices[port_id]; 2805 2806 cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names); 2807 2808 if (dev->dev_ops->xstats_get_names != NULL) { 2809 /* If there are any driver-specific xstats, append them 2810 * to end of list. 2811 */ 2812 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)( 2813 dev, 2814 xstats_names + cnt_used_entries, 2815 size - cnt_used_entries); 2816 if (cnt_driver_entries < 0) 2817 return eth_err(port_id, cnt_driver_entries); 2818 cnt_used_entries += cnt_driver_entries; 2819 } 2820 2821 return cnt_used_entries; 2822 } 2823 2824 2825 static int 2826 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats) 2827 { 2828 struct rte_eth_dev *dev; 2829 struct rte_eth_stats eth_stats; 2830 unsigned int count = 0, i, q; 2831 uint64_t val, *stats_ptr; 2832 uint16_t nb_rxqs, nb_txqs; 2833 int ret; 2834 2835 ret = rte_eth_stats_get(port_id, ð_stats); 2836 if (ret < 0) 2837 return ret; 2838 2839 dev = &rte_eth_devices[port_id]; 2840 2841 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2842 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2843 2844 /* global stats */ 2845 for (i = 0; i < RTE_NB_STATS; i++) { 2846 stats_ptr = RTE_PTR_ADD(ð_stats, 2847 eth_dev_stats_strings[i].offset); 2848 val = *stats_ptr; 2849 xstats[count++].value = val; 2850 } 2851 2852 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 2853 return count; 2854 2855 /* per-rxq stats */ 2856 for (q = 0; q < nb_rxqs; q++) { 2857 for (i = 0; i < RTE_NB_RXQ_STATS; i++) { 2858 stats_ptr = RTE_PTR_ADD(ð_stats, 2859 eth_dev_rxq_stats_strings[i].offset + 2860 q * sizeof(uint64_t)); 2861 val = *stats_ptr; 2862 xstats[count++].value = val; 2863 } 2864 } 2865 2866 /* per-txq stats */ 2867 for (q = 0; q < nb_txqs; q++) { 2868 for (i = 0; i < RTE_NB_TXQ_STATS; i++) { 2869 stats_ptr = RTE_PTR_ADD(ð_stats, 2870 eth_dev_txq_stats_strings[i].offset + 2871 q * sizeof(uint64_t)); 2872 val = *stats_ptr; 2873 xstats[count++].value = val; 2874 } 2875 } 2876 return count; 2877 } 2878 2879 /* retrieve ethdev extended statistics */ 2880 int 2881 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, 2882 uint64_t *values, unsigned int size) 2883 { 2884 unsigned int no_basic_stat_requested = 1; 2885 unsigned int no_ext_stat_requested = 1; 2886 unsigned int num_xstats_filled; 2887 unsigned int basic_count; 2888 uint16_t expected_entries; 2889 struct rte_eth_dev *dev; 2890 unsigned int i; 2891 int ret; 2892 2893 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2894 dev = &rte_eth_devices[port_id]; 2895 2896 ret = eth_dev_get_xstats_count(port_id); 2897 if (ret < 0) 2898 return ret; 2899 expected_entries = (uint16_t)ret; 2900 struct rte_eth_xstat xstats[expected_entries]; 2901 basic_count = eth_dev_get_xstats_basic_count(dev); 2902 2903 /* Return max number of stats if no ids given */ 2904 if (!ids) { 2905 if (!values) 2906 return expected_entries; 2907 else if (values && size < expected_entries) 2908 return expected_entries; 2909 } 2910 2911 if (ids && !values) 2912 return -EINVAL; 2913 2914 if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) { 2915 unsigned int basic_count = eth_dev_get_xstats_basic_count(dev); 2916 uint64_t ids_copy[size]; 2917 2918 for (i = 0; i < size; i++) { 2919 if (ids[i] < basic_count) { 2920 no_basic_stat_requested = 0; 2921 break; 2922 } 2923 2924 /* 2925 * Convert ids to xstats ids that PMD knows. 2926 * ids known by user are basic + extended stats. 2927 */ 2928 ids_copy[i] = ids[i] - basic_count; 2929 } 2930 2931 if (no_basic_stat_requested) 2932 return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy, 2933 values, size); 2934 } 2935 2936 if (ids) { 2937 for (i = 0; i < size; i++) { 2938 if (ids[i] >= basic_count) { 2939 no_ext_stat_requested = 0; 2940 break; 2941 } 2942 } 2943 } 2944 2945 /* Fill the xstats structure */ 2946 if (ids && no_ext_stat_requested) 2947 ret = eth_basic_stats_get(port_id, xstats); 2948 else 2949 ret = rte_eth_xstats_get(port_id, xstats, expected_entries); 2950 2951 if (ret < 0) 2952 return ret; 2953 num_xstats_filled = (unsigned int)ret; 2954 2955 /* Return all stats */ 2956 if (!ids) { 2957 for (i = 0; i < num_xstats_filled; i++) 2958 values[i] = xstats[i].value; 2959 return expected_entries; 2960 } 2961 2962 /* Filter stats */ 2963 for (i = 0; i < size; i++) { 2964 if (ids[i] >= expected_entries) { 2965 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 2966 return -1; 2967 } 2968 values[i] = xstats[ids[i]].value; 2969 } 2970 return size; 2971 } 2972 2973 int 2974 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, 2975 unsigned int n) 2976 { 2977 struct rte_eth_dev *dev; 2978 unsigned int count, i; 2979 signed int xcount = 0; 2980 int ret; 2981 2982 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2983 if (xstats == NULL && n > 0) 2984 return -EINVAL; 2985 dev = &rte_eth_devices[port_id]; 2986 2987 count = eth_dev_get_xstats_basic_count(dev); 2988 2989 /* implemented by the driver */ 2990 if (dev->dev_ops->xstats_get != NULL) { 2991 /* Retrieve the xstats from the driver at the end of the 2992 * xstats struct. 2993 */ 2994 xcount = (*dev->dev_ops->xstats_get)(dev, 2995 (n > count) ? xstats + count : NULL, 2996 (n > count) ? n - count : 0); 2997 2998 if (xcount < 0) 2999 return eth_err(port_id, xcount); 3000 } 3001 3002 if (n < count + xcount || xstats == NULL) 3003 return count + xcount; 3004 3005 /* now fill the xstats structure */ 3006 ret = eth_basic_stats_get(port_id, xstats); 3007 if (ret < 0) 3008 return ret; 3009 count = ret; 3010 3011 for (i = 0; i < count; i++) 3012 xstats[i].id = i; 3013 /* add an offset to driver-specific stats */ 3014 for ( ; i < count + xcount; i++) 3015 xstats[i].id += count; 3016 3017 return count + xcount; 3018 } 3019 3020 /* reset ethdev extended statistics */ 3021 int 3022 rte_eth_xstats_reset(uint16_t port_id) 3023 { 3024 struct rte_eth_dev *dev; 3025 3026 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3027 dev = &rte_eth_devices[port_id]; 3028 3029 /* implemented by the driver */ 3030 if (dev->dev_ops->xstats_reset != NULL) 3031 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev)); 3032 3033 /* fallback to default */ 3034 return rte_eth_stats_reset(port_id); 3035 } 3036 3037 static int 3038 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, 3039 uint8_t stat_idx, uint8_t is_rx) 3040 { 3041 struct rte_eth_dev *dev; 3042 3043 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3044 dev = &rte_eth_devices[port_id]; 3045 3046 if (is_rx && (queue_id >= dev->data->nb_rx_queues)) 3047 return -EINVAL; 3048 3049 if (!is_rx && (queue_id >= dev->data->nb_tx_queues)) 3050 return -EINVAL; 3051 3052 if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) 3053 return -EINVAL; 3054 3055 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP); 3056 return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx); 3057 } 3058 3059 int 3060 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, 3061 uint8_t stat_idx) 3062 { 3063 return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3064 tx_queue_id, 3065 stat_idx, STAT_QMAP_TX)); 3066 } 3067 3068 int 3069 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, 3070 uint8_t stat_idx) 3071 { 3072 return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3073 rx_queue_id, 3074 stat_idx, STAT_QMAP_RX)); 3075 } 3076 3077 int 3078 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size) 3079 { 3080 struct rte_eth_dev *dev; 3081 3082 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3083 dev = &rte_eth_devices[port_id]; 3084 3085 if (fw_version == NULL && fw_size > 0) { 3086 RTE_ETHDEV_LOG(ERR, 3087 "Cannot get ethdev port %u FW version to NULL when string size is non zero\n", 3088 port_id); 3089 return -EINVAL; 3090 } 3091 3092 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP); 3093 return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev, 3094 fw_version, fw_size)); 3095 } 3096 3097 int 3098 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) 3099 { 3100 struct rte_eth_dev *dev; 3101 const struct rte_eth_desc_lim lim = { 3102 .nb_max = UINT16_MAX, 3103 .nb_min = 0, 3104 .nb_align = 1, 3105 .nb_seg_max = UINT16_MAX, 3106 .nb_mtu_seg_max = UINT16_MAX, 3107 }; 3108 int diag; 3109 3110 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3111 dev = &rte_eth_devices[port_id]; 3112 3113 if (dev_info == NULL) { 3114 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u info to NULL\n", 3115 port_id); 3116 return -EINVAL; 3117 } 3118 3119 /* 3120 * Init dev_info before port_id check since caller does not have 3121 * return status and does not know if get is successful or not. 3122 */ 3123 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3124 dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 3125 3126 dev_info->rx_desc_lim = lim; 3127 dev_info->tx_desc_lim = lim; 3128 dev_info->device = dev->device; 3129 dev_info->min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN - 3130 RTE_ETHER_CRC_LEN; 3131 dev_info->max_mtu = UINT16_MAX; 3132 3133 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); 3134 diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info); 3135 if (diag != 0) { 3136 /* Cleanup already filled in device information */ 3137 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3138 return eth_err(port_id, diag); 3139 } 3140 3141 /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */ 3142 dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues, 3143 RTE_MAX_QUEUES_PER_PORT); 3144 dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues, 3145 RTE_MAX_QUEUES_PER_PORT); 3146 3147 dev_info->driver_name = dev->device->driver->name; 3148 dev_info->nb_rx_queues = dev->data->nb_rx_queues; 3149 dev_info->nb_tx_queues = dev->data->nb_tx_queues; 3150 3151 dev_info->dev_flags = &dev->data->dev_flags; 3152 3153 return 0; 3154 } 3155 3156 int 3157 rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf) 3158 { 3159 struct rte_eth_dev *dev; 3160 3161 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3162 dev = &rte_eth_devices[port_id]; 3163 3164 if (dev_conf == NULL) { 3165 RTE_ETHDEV_LOG(ERR, 3166 "Cannot get ethdev port %u configuration to NULL\n", 3167 port_id); 3168 return -EINVAL; 3169 } 3170 3171 memcpy(dev_conf, &dev->data->dev_conf, sizeof(struct rte_eth_conf)); 3172 3173 return 0; 3174 } 3175 3176 int 3177 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, 3178 uint32_t *ptypes, int num) 3179 { 3180 int i, j; 3181 struct rte_eth_dev *dev; 3182 const uint32_t *all_ptypes; 3183 3184 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3185 dev = &rte_eth_devices[port_id]; 3186 3187 if (ptypes == NULL && num > 0) { 3188 RTE_ETHDEV_LOG(ERR, 3189 "Cannot get ethdev port %u supported packet types to NULL when array size is non zero\n", 3190 port_id); 3191 return -EINVAL; 3192 } 3193 3194 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0); 3195 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3196 3197 if (!all_ptypes) 3198 return 0; 3199 3200 for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i) 3201 if (all_ptypes[i] & ptype_mask) { 3202 if (j < num) 3203 ptypes[j] = all_ptypes[i]; 3204 j++; 3205 } 3206 3207 return j; 3208 } 3209 3210 int 3211 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, 3212 uint32_t *set_ptypes, unsigned int num) 3213 { 3214 const uint32_t valid_ptype_masks[] = { 3215 RTE_PTYPE_L2_MASK, 3216 RTE_PTYPE_L3_MASK, 3217 RTE_PTYPE_L4_MASK, 3218 RTE_PTYPE_TUNNEL_MASK, 3219 RTE_PTYPE_INNER_L2_MASK, 3220 RTE_PTYPE_INNER_L3_MASK, 3221 RTE_PTYPE_INNER_L4_MASK, 3222 }; 3223 const uint32_t *all_ptypes; 3224 struct rte_eth_dev *dev; 3225 uint32_t unused_mask; 3226 unsigned int i, j; 3227 int ret; 3228 3229 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3230 dev = &rte_eth_devices[port_id]; 3231 3232 if (num > 0 && set_ptypes == NULL) { 3233 RTE_ETHDEV_LOG(ERR, 3234 "Cannot get ethdev port %u set packet types to NULL when array size is non zero\n", 3235 port_id); 3236 return -EINVAL; 3237 } 3238 3239 if (*dev->dev_ops->dev_supported_ptypes_get == NULL || 3240 *dev->dev_ops->dev_ptypes_set == NULL) { 3241 ret = 0; 3242 goto ptype_unknown; 3243 } 3244 3245 if (ptype_mask == 0) { 3246 ret = (*dev->dev_ops->dev_ptypes_set)(dev, 3247 ptype_mask); 3248 goto ptype_unknown; 3249 } 3250 3251 unused_mask = ptype_mask; 3252 for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) { 3253 uint32_t mask = ptype_mask & valid_ptype_masks[i]; 3254 if (mask && mask != valid_ptype_masks[i]) { 3255 ret = -EINVAL; 3256 goto ptype_unknown; 3257 } 3258 unused_mask &= ~valid_ptype_masks[i]; 3259 } 3260 3261 if (unused_mask) { 3262 ret = -EINVAL; 3263 goto ptype_unknown; 3264 } 3265 3266 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3267 if (all_ptypes == NULL) { 3268 ret = 0; 3269 goto ptype_unknown; 3270 } 3271 3272 /* 3273 * Accommodate as many set_ptypes as possible. If the supplied 3274 * set_ptypes array is insufficient fill it partially. 3275 */ 3276 for (i = 0, j = 0; set_ptypes != NULL && 3277 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) { 3278 if (ptype_mask & all_ptypes[i]) { 3279 if (j < num - 1) { 3280 set_ptypes[j] = all_ptypes[i]; 3281 j++; 3282 continue; 3283 } 3284 break; 3285 } 3286 } 3287 3288 if (set_ptypes != NULL && j < num) 3289 set_ptypes[j] = RTE_PTYPE_UNKNOWN; 3290 3291 return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask); 3292 3293 ptype_unknown: 3294 if (num > 0) 3295 set_ptypes[0] = RTE_PTYPE_UNKNOWN; 3296 3297 return ret; 3298 } 3299 3300 int 3301 rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma, 3302 unsigned int num) 3303 { 3304 int32_t ret; 3305 struct rte_eth_dev *dev; 3306 struct rte_eth_dev_info dev_info; 3307 3308 if (ma == NULL) { 3309 RTE_ETHDEV_LOG(ERR, "%s: invalid parameters\n", __func__); 3310 return -EINVAL; 3311 } 3312 3313 /* will check for us that port_id is a valid one */ 3314 ret = rte_eth_dev_info_get(port_id, &dev_info); 3315 if (ret != 0) 3316 return ret; 3317 3318 dev = &rte_eth_devices[port_id]; 3319 num = RTE_MIN(dev_info.max_mac_addrs, num); 3320 memcpy(ma, dev->data->mac_addrs, num * sizeof(ma[0])); 3321 3322 return num; 3323 } 3324 3325 int 3326 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr) 3327 { 3328 struct rte_eth_dev *dev; 3329 3330 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3331 dev = &rte_eth_devices[port_id]; 3332 3333 if (mac_addr == NULL) { 3334 RTE_ETHDEV_LOG(ERR, 3335 "Cannot get ethdev port %u MAC address to NULL\n", 3336 port_id); 3337 return -EINVAL; 3338 } 3339 3340 rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr); 3341 3342 return 0; 3343 } 3344 3345 int 3346 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu) 3347 { 3348 struct rte_eth_dev *dev; 3349 3350 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3351 dev = &rte_eth_devices[port_id]; 3352 3353 if (mtu == NULL) { 3354 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u MTU to NULL\n", 3355 port_id); 3356 return -EINVAL; 3357 } 3358 3359 *mtu = dev->data->mtu; 3360 return 0; 3361 } 3362 3363 int 3364 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu) 3365 { 3366 int ret; 3367 struct rte_eth_dev_info dev_info; 3368 struct rte_eth_dev *dev; 3369 3370 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3371 dev = &rte_eth_devices[port_id]; 3372 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP); 3373 3374 /* 3375 * Check if the device supports dev_infos_get, if it does not 3376 * skip min_mtu/max_mtu validation here as this requires values 3377 * that are populated within the call to rte_eth_dev_info_get() 3378 * which relies on dev->dev_ops->dev_infos_get. 3379 */ 3380 if (*dev->dev_ops->dev_infos_get != NULL) { 3381 ret = rte_eth_dev_info_get(port_id, &dev_info); 3382 if (ret != 0) 3383 return ret; 3384 3385 ret = eth_dev_validate_mtu(port_id, &dev_info, mtu); 3386 if (ret != 0) 3387 return ret; 3388 } 3389 3390 if (dev->data->dev_configured == 0) { 3391 RTE_ETHDEV_LOG(ERR, 3392 "Port %u must be configured before MTU set\n", 3393 port_id); 3394 return -EINVAL; 3395 } 3396 3397 ret = (*dev->dev_ops->mtu_set)(dev, mtu); 3398 if (ret == 0) 3399 dev->data->mtu = mtu; 3400 3401 return eth_err(port_id, ret); 3402 } 3403 3404 int 3405 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on) 3406 { 3407 struct rte_eth_dev *dev; 3408 int ret; 3409 3410 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3411 dev = &rte_eth_devices[port_id]; 3412 3413 if (!(dev->data->dev_conf.rxmode.offloads & 3414 RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) { 3415 RTE_ETHDEV_LOG(ERR, "Port %u: VLAN-filtering disabled\n", 3416 port_id); 3417 return -ENOSYS; 3418 } 3419 3420 if (vlan_id > 4095) { 3421 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n", 3422 port_id, vlan_id); 3423 return -EINVAL; 3424 } 3425 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP); 3426 3427 ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on); 3428 if (ret == 0) { 3429 struct rte_vlan_filter_conf *vfc; 3430 int vidx; 3431 int vbit; 3432 3433 vfc = &dev->data->vlan_filter_conf; 3434 vidx = vlan_id / 64; 3435 vbit = vlan_id % 64; 3436 3437 if (on) 3438 vfc->ids[vidx] |= RTE_BIT64(vbit); 3439 else 3440 vfc->ids[vidx] &= ~RTE_BIT64(vbit); 3441 } 3442 3443 return eth_err(port_id, ret); 3444 } 3445 3446 int 3447 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, 3448 int on) 3449 { 3450 struct rte_eth_dev *dev; 3451 3452 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3453 dev = &rte_eth_devices[port_id]; 3454 3455 if (rx_queue_id >= dev->data->nb_rx_queues) { 3456 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id); 3457 return -EINVAL; 3458 } 3459 3460 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP); 3461 (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on); 3462 3463 return 0; 3464 } 3465 3466 int 3467 rte_eth_dev_set_vlan_ether_type(uint16_t port_id, 3468 enum rte_vlan_type vlan_type, 3469 uint16_t tpid) 3470 { 3471 struct rte_eth_dev *dev; 3472 3473 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3474 dev = &rte_eth_devices[port_id]; 3475 3476 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP); 3477 return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, 3478 tpid)); 3479 } 3480 3481 int 3482 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask) 3483 { 3484 struct rte_eth_dev_info dev_info; 3485 struct rte_eth_dev *dev; 3486 int ret = 0; 3487 int mask = 0; 3488 int cur, org = 0; 3489 uint64_t orig_offloads; 3490 uint64_t dev_offloads; 3491 uint64_t new_offloads; 3492 3493 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3494 dev = &rte_eth_devices[port_id]; 3495 3496 /* save original values in case of failure */ 3497 orig_offloads = dev->data->dev_conf.rxmode.offloads; 3498 dev_offloads = orig_offloads; 3499 3500 /* check which option changed by application */ 3501 cur = !!(offload_mask & RTE_ETH_VLAN_STRIP_OFFLOAD); 3502 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP); 3503 if (cur != org) { 3504 if (cur) 3505 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 3506 else 3507 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 3508 mask |= RTE_ETH_VLAN_STRIP_MASK; 3509 } 3510 3511 cur = !!(offload_mask & RTE_ETH_VLAN_FILTER_OFFLOAD); 3512 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER); 3513 if (cur != org) { 3514 if (cur) 3515 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 3516 else 3517 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 3518 mask |= RTE_ETH_VLAN_FILTER_MASK; 3519 } 3520 3521 cur = !!(offload_mask & RTE_ETH_VLAN_EXTEND_OFFLOAD); 3522 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND); 3523 if (cur != org) { 3524 if (cur) 3525 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 3526 else 3527 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 3528 mask |= RTE_ETH_VLAN_EXTEND_MASK; 3529 } 3530 3531 cur = !!(offload_mask & RTE_ETH_QINQ_STRIP_OFFLOAD); 3532 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP); 3533 if (cur != org) { 3534 if (cur) 3535 dev_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 3536 else 3537 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 3538 mask |= RTE_ETH_QINQ_STRIP_MASK; 3539 } 3540 3541 /*no change*/ 3542 if (mask == 0) 3543 return ret; 3544 3545 ret = rte_eth_dev_info_get(port_id, &dev_info); 3546 if (ret != 0) 3547 return ret; 3548 3549 /* Rx VLAN offloading must be within its device capabilities */ 3550 if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) { 3551 new_offloads = dev_offloads & ~orig_offloads; 3552 RTE_ETHDEV_LOG(ERR, 3553 "Ethdev port_id=%u requested new added VLAN offloads " 3554 "0x%" PRIx64 " must be within Rx offloads capabilities " 3555 "0x%" PRIx64 " in %s()\n", 3556 port_id, new_offloads, dev_info.rx_offload_capa, 3557 __func__); 3558 return -EINVAL; 3559 } 3560 3561 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP); 3562 dev->data->dev_conf.rxmode.offloads = dev_offloads; 3563 ret = (*dev->dev_ops->vlan_offload_set)(dev, mask); 3564 if (ret) { 3565 /* hit an error restore original values */ 3566 dev->data->dev_conf.rxmode.offloads = orig_offloads; 3567 } 3568 3569 return eth_err(port_id, ret); 3570 } 3571 3572 int 3573 rte_eth_dev_get_vlan_offload(uint16_t port_id) 3574 { 3575 struct rte_eth_dev *dev; 3576 uint64_t *dev_offloads; 3577 int ret = 0; 3578 3579 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3580 dev = &rte_eth_devices[port_id]; 3581 dev_offloads = &dev->data->dev_conf.rxmode.offloads; 3582 3583 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 3584 ret |= RTE_ETH_VLAN_STRIP_OFFLOAD; 3585 3586 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) 3587 ret |= RTE_ETH_VLAN_FILTER_OFFLOAD; 3588 3589 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) 3590 ret |= RTE_ETH_VLAN_EXTEND_OFFLOAD; 3591 3592 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) 3593 ret |= RTE_ETH_QINQ_STRIP_OFFLOAD; 3594 3595 return ret; 3596 } 3597 3598 int 3599 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on) 3600 { 3601 struct rte_eth_dev *dev; 3602 3603 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3604 dev = &rte_eth_devices[port_id]; 3605 3606 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP); 3607 return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on)); 3608 } 3609 3610 int 3611 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 3612 { 3613 struct rte_eth_dev *dev; 3614 3615 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3616 dev = &rte_eth_devices[port_id]; 3617 3618 if (fc_conf == NULL) { 3619 RTE_ETHDEV_LOG(ERR, 3620 "Cannot get ethdev port %u flow control config to NULL\n", 3621 port_id); 3622 return -EINVAL; 3623 } 3624 3625 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP); 3626 memset(fc_conf, 0, sizeof(*fc_conf)); 3627 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf)); 3628 } 3629 3630 int 3631 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 3632 { 3633 struct rte_eth_dev *dev; 3634 3635 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3636 dev = &rte_eth_devices[port_id]; 3637 3638 if (fc_conf == NULL) { 3639 RTE_ETHDEV_LOG(ERR, 3640 "Cannot set ethdev port %u flow control from NULL config\n", 3641 port_id); 3642 return -EINVAL; 3643 } 3644 3645 if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) { 3646 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n"); 3647 return -EINVAL; 3648 } 3649 3650 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP); 3651 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf)); 3652 } 3653 3654 int 3655 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, 3656 struct rte_eth_pfc_conf *pfc_conf) 3657 { 3658 struct rte_eth_dev *dev; 3659 3660 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3661 dev = &rte_eth_devices[port_id]; 3662 3663 if (pfc_conf == NULL) { 3664 RTE_ETHDEV_LOG(ERR, 3665 "Cannot set ethdev port %u priority flow control from NULL config\n", 3666 port_id); 3667 return -EINVAL; 3668 } 3669 3670 if (pfc_conf->priority > (RTE_ETH_DCB_NUM_USER_PRIORITIES - 1)) { 3671 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n"); 3672 return -EINVAL; 3673 } 3674 3675 /* High water, low water validation are device specific */ 3676 if (*dev->dev_ops->priority_flow_ctrl_set) 3677 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set) 3678 (dev, pfc_conf)); 3679 return -ENOTSUP; 3680 } 3681 3682 static int 3683 validate_rx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max, 3684 struct rte_eth_pfc_queue_conf *pfc_queue_conf) 3685 { 3686 if ((pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) || 3687 (pfc_queue_conf->mode == RTE_ETH_FC_FULL)) { 3688 if (pfc_queue_conf->rx_pause.tx_qid >= dev_info->nb_tx_queues) { 3689 RTE_ETHDEV_LOG(ERR, 3690 "PFC Tx queue not in range for Rx pause requested:%d configured:%d\n", 3691 pfc_queue_conf->rx_pause.tx_qid, 3692 dev_info->nb_tx_queues); 3693 return -EINVAL; 3694 } 3695 3696 if (pfc_queue_conf->rx_pause.tc >= tc_max) { 3697 RTE_ETHDEV_LOG(ERR, 3698 "PFC TC not in range for Rx pause requested:%d max:%d\n", 3699 pfc_queue_conf->rx_pause.tc, tc_max); 3700 return -EINVAL; 3701 } 3702 } 3703 3704 return 0; 3705 } 3706 3707 static int 3708 validate_tx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max, 3709 struct rte_eth_pfc_queue_conf *pfc_queue_conf) 3710 { 3711 if ((pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) || 3712 (pfc_queue_conf->mode == RTE_ETH_FC_FULL)) { 3713 if (pfc_queue_conf->tx_pause.rx_qid >= dev_info->nb_rx_queues) { 3714 RTE_ETHDEV_LOG(ERR, 3715 "PFC Rx queue not in range for Tx pause requested:%d configured:%d\n", 3716 pfc_queue_conf->tx_pause.rx_qid, 3717 dev_info->nb_rx_queues); 3718 return -EINVAL; 3719 } 3720 3721 if (pfc_queue_conf->tx_pause.tc >= tc_max) { 3722 RTE_ETHDEV_LOG(ERR, 3723 "PFC TC not in range for Tx pause requested:%d max:%d\n", 3724 pfc_queue_conf->tx_pause.tc, tc_max); 3725 return -EINVAL; 3726 } 3727 } 3728 3729 return 0; 3730 } 3731 3732 int 3733 rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id, 3734 struct rte_eth_pfc_queue_info *pfc_queue_info) 3735 { 3736 struct rte_eth_dev *dev; 3737 3738 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3739 dev = &rte_eth_devices[port_id]; 3740 3741 if (pfc_queue_info == NULL) { 3742 RTE_ETHDEV_LOG(ERR, "PFC info param is NULL for port (%u)\n", 3743 port_id); 3744 return -EINVAL; 3745 } 3746 3747 if (*dev->dev_ops->priority_flow_ctrl_queue_info_get) 3748 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_queue_info_get) 3749 (dev, pfc_queue_info)); 3750 return -ENOTSUP; 3751 } 3752 3753 int 3754 rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id, 3755 struct rte_eth_pfc_queue_conf *pfc_queue_conf) 3756 { 3757 struct rte_eth_pfc_queue_info pfc_info; 3758 struct rte_eth_dev_info dev_info; 3759 struct rte_eth_dev *dev; 3760 int ret; 3761 3762 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3763 dev = &rte_eth_devices[port_id]; 3764 3765 if (pfc_queue_conf == NULL) { 3766 RTE_ETHDEV_LOG(ERR, "PFC parameters are NULL for port (%u)\n", 3767 port_id); 3768 return -EINVAL; 3769 } 3770 3771 ret = rte_eth_dev_info_get(port_id, &dev_info); 3772 if (ret != 0) 3773 return ret; 3774 3775 ret = rte_eth_dev_priority_flow_ctrl_queue_info_get(port_id, &pfc_info); 3776 if (ret != 0) 3777 return ret; 3778 3779 if (pfc_info.tc_max == 0) { 3780 RTE_ETHDEV_LOG(ERR, "Ethdev port %u does not support PFC TC values\n", 3781 port_id); 3782 return -ENOTSUP; 3783 } 3784 3785 /* Check requested mode supported or not */ 3786 if (pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE && 3787 pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) { 3788 RTE_ETHDEV_LOG(ERR, "PFC Tx pause unsupported for port (%d)\n", 3789 port_id); 3790 return -EINVAL; 3791 } 3792 3793 if (pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE && 3794 pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) { 3795 RTE_ETHDEV_LOG(ERR, "PFC Rx pause unsupported for port (%d)\n", 3796 port_id); 3797 return -EINVAL; 3798 } 3799 3800 /* Validate Rx pause parameters */ 3801 if (pfc_info.mode_capa == RTE_ETH_FC_FULL || 3802 pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE) { 3803 ret = validate_rx_pause_config(&dev_info, pfc_info.tc_max, 3804 pfc_queue_conf); 3805 if (ret != 0) 3806 return ret; 3807 } 3808 3809 /* Validate Tx pause parameters */ 3810 if (pfc_info.mode_capa == RTE_ETH_FC_FULL || 3811 pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE) { 3812 ret = validate_tx_pause_config(&dev_info, pfc_info.tc_max, 3813 pfc_queue_conf); 3814 if (ret != 0) 3815 return ret; 3816 } 3817 3818 if (*dev->dev_ops->priority_flow_ctrl_queue_config) 3819 return eth_err(port_id, 3820 (*dev->dev_ops->priority_flow_ctrl_queue_config)( 3821 dev, pfc_queue_conf)); 3822 return -ENOTSUP; 3823 } 3824 3825 static int 3826 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf, 3827 uint16_t reta_size) 3828 { 3829 uint16_t i, num; 3830 3831 num = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) / RTE_ETH_RETA_GROUP_SIZE; 3832 for (i = 0; i < num; i++) { 3833 if (reta_conf[i].mask) 3834 return 0; 3835 } 3836 3837 return -EINVAL; 3838 } 3839 3840 static int 3841 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf, 3842 uint16_t reta_size, 3843 uint16_t max_rxq) 3844 { 3845 uint16_t i, idx, shift; 3846 3847 if (max_rxq == 0) { 3848 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n"); 3849 return -EINVAL; 3850 } 3851 3852 for (i = 0; i < reta_size; i++) { 3853 idx = i / RTE_ETH_RETA_GROUP_SIZE; 3854 shift = i % RTE_ETH_RETA_GROUP_SIZE; 3855 if ((reta_conf[idx].mask & RTE_BIT64(shift)) && 3856 (reta_conf[idx].reta[shift] >= max_rxq)) { 3857 RTE_ETHDEV_LOG(ERR, 3858 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n", 3859 idx, shift, 3860 reta_conf[idx].reta[shift], max_rxq); 3861 return -EINVAL; 3862 } 3863 } 3864 3865 return 0; 3866 } 3867 3868 int 3869 rte_eth_dev_rss_reta_update(uint16_t port_id, 3870 struct rte_eth_rss_reta_entry64 *reta_conf, 3871 uint16_t reta_size) 3872 { 3873 enum rte_eth_rx_mq_mode mq_mode; 3874 struct rte_eth_dev *dev; 3875 int ret; 3876 3877 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3878 dev = &rte_eth_devices[port_id]; 3879 3880 if (reta_conf == NULL) { 3881 RTE_ETHDEV_LOG(ERR, 3882 "Cannot update ethdev port %u RSS RETA to NULL\n", 3883 port_id); 3884 return -EINVAL; 3885 } 3886 3887 if (reta_size == 0) { 3888 RTE_ETHDEV_LOG(ERR, 3889 "Cannot update ethdev port %u RSS RETA with zero size\n", 3890 port_id); 3891 return -EINVAL; 3892 } 3893 3894 /* Check mask bits */ 3895 ret = eth_check_reta_mask(reta_conf, reta_size); 3896 if (ret < 0) 3897 return ret; 3898 3899 /* Check entry value */ 3900 ret = eth_check_reta_entry(reta_conf, reta_size, 3901 dev->data->nb_rx_queues); 3902 if (ret < 0) 3903 return ret; 3904 3905 mq_mode = dev->data->dev_conf.rxmode.mq_mode; 3906 if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) { 3907 RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n"); 3908 return -ENOTSUP; 3909 } 3910 3911 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP); 3912 return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf, 3913 reta_size)); 3914 } 3915 3916 int 3917 rte_eth_dev_rss_reta_query(uint16_t port_id, 3918 struct rte_eth_rss_reta_entry64 *reta_conf, 3919 uint16_t reta_size) 3920 { 3921 struct rte_eth_dev *dev; 3922 int ret; 3923 3924 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3925 dev = &rte_eth_devices[port_id]; 3926 3927 if (reta_conf == NULL) { 3928 RTE_ETHDEV_LOG(ERR, 3929 "Cannot query ethdev port %u RSS RETA from NULL config\n", 3930 port_id); 3931 return -EINVAL; 3932 } 3933 3934 /* Check mask bits */ 3935 ret = eth_check_reta_mask(reta_conf, reta_size); 3936 if (ret < 0) 3937 return ret; 3938 3939 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP); 3940 return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf, 3941 reta_size)); 3942 } 3943 3944 int 3945 rte_eth_dev_rss_hash_update(uint16_t port_id, 3946 struct rte_eth_rss_conf *rss_conf) 3947 { 3948 struct rte_eth_dev *dev; 3949 struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, }; 3950 enum rte_eth_rx_mq_mode mq_mode; 3951 int ret; 3952 3953 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3954 dev = &rte_eth_devices[port_id]; 3955 3956 if (rss_conf == NULL) { 3957 RTE_ETHDEV_LOG(ERR, 3958 "Cannot update ethdev port %u RSS hash from NULL config\n", 3959 port_id); 3960 return -EINVAL; 3961 } 3962 3963 ret = rte_eth_dev_info_get(port_id, &dev_info); 3964 if (ret != 0) 3965 return ret; 3966 3967 rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf); 3968 if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) != 3969 dev_info.flow_type_rss_offloads) { 3970 RTE_ETHDEV_LOG(ERR, 3971 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 3972 port_id, rss_conf->rss_hf, 3973 dev_info.flow_type_rss_offloads); 3974 return -EINVAL; 3975 } 3976 3977 mq_mode = dev->data->dev_conf.rxmode.mq_mode; 3978 if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) { 3979 RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n"); 3980 return -ENOTSUP; 3981 } 3982 3983 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP); 3984 return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev, 3985 rss_conf)); 3986 } 3987 3988 int 3989 rte_eth_dev_rss_hash_conf_get(uint16_t port_id, 3990 struct rte_eth_rss_conf *rss_conf) 3991 { 3992 struct rte_eth_dev *dev; 3993 3994 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3995 dev = &rte_eth_devices[port_id]; 3996 3997 if (rss_conf == NULL) { 3998 RTE_ETHDEV_LOG(ERR, 3999 "Cannot get ethdev port %u RSS hash config to NULL\n", 4000 port_id); 4001 return -EINVAL; 4002 } 4003 4004 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP); 4005 return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev, 4006 rss_conf)); 4007 } 4008 4009 int 4010 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, 4011 struct rte_eth_udp_tunnel *udp_tunnel) 4012 { 4013 struct rte_eth_dev *dev; 4014 4015 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4016 dev = &rte_eth_devices[port_id]; 4017 4018 if (udp_tunnel == NULL) { 4019 RTE_ETHDEV_LOG(ERR, 4020 "Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4021 port_id); 4022 return -EINVAL; 4023 } 4024 4025 if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) { 4026 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4027 return -EINVAL; 4028 } 4029 4030 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP); 4031 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev, 4032 udp_tunnel)); 4033 } 4034 4035 int 4036 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, 4037 struct rte_eth_udp_tunnel *udp_tunnel) 4038 { 4039 struct rte_eth_dev *dev; 4040 4041 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4042 dev = &rte_eth_devices[port_id]; 4043 4044 if (udp_tunnel == NULL) { 4045 RTE_ETHDEV_LOG(ERR, 4046 "Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4047 port_id); 4048 return -EINVAL; 4049 } 4050 4051 if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) { 4052 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4053 return -EINVAL; 4054 } 4055 4056 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP); 4057 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev, 4058 udp_tunnel)); 4059 } 4060 4061 int 4062 rte_eth_led_on(uint16_t port_id) 4063 { 4064 struct rte_eth_dev *dev; 4065 4066 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4067 dev = &rte_eth_devices[port_id]; 4068 4069 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP); 4070 return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev)); 4071 } 4072 4073 int 4074 rte_eth_led_off(uint16_t port_id) 4075 { 4076 struct rte_eth_dev *dev; 4077 4078 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4079 dev = &rte_eth_devices[port_id]; 4080 4081 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP); 4082 return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev)); 4083 } 4084 4085 int 4086 rte_eth_fec_get_capability(uint16_t port_id, 4087 struct rte_eth_fec_capa *speed_fec_capa, 4088 unsigned int num) 4089 { 4090 struct rte_eth_dev *dev; 4091 int ret; 4092 4093 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4094 dev = &rte_eth_devices[port_id]; 4095 4096 if (speed_fec_capa == NULL && num > 0) { 4097 RTE_ETHDEV_LOG(ERR, 4098 "Cannot get ethdev port %u FEC capability to NULL when array size is non zero\n", 4099 port_id); 4100 return -EINVAL; 4101 } 4102 4103 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get_capability, -ENOTSUP); 4104 ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num); 4105 4106 return ret; 4107 } 4108 4109 int 4110 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa) 4111 { 4112 struct rte_eth_dev *dev; 4113 4114 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4115 dev = &rte_eth_devices[port_id]; 4116 4117 if (fec_capa == NULL) { 4118 RTE_ETHDEV_LOG(ERR, 4119 "Cannot get ethdev port %u current FEC mode to NULL\n", 4120 port_id); 4121 return -EINVAL; 4122 } 4123 4124 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get, -ENOTSUP); 4125 return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa)); 4126 } 4127 4128 int 4129 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa) 4130 { 4131 struct rte_eth_dev *dev; 4132 4133 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4134 dev = &rte_eth_devices[port_id]; 4135 4136 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_set, -ENOTSUP); 4137 return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa)); 4138 } 4139 4140 /* 4141 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 4142 * an empty spot. 4143 */ 4144 static int 4145 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr) 4146 { 4147 struct rte_eth_dev_info dev_info; 4148 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4149 unsigned i; 4150 int ret; 4151 4152 ret = rte_eth_dev_info_get(port_id, &dev_info); 4153 if (ret != 0) 4154 return -1; 4155 4156 for (i = 0; i < dev_info.max_mac_addrs; i++) 4157 if (memcmp(addr, &dev->data->mac_addrs[i], 4158 RTE_ETHER_ADDR_LEN) == 0) 4159 return i; 4160 4161 return -1; 4162 } 4163 4164 static const struct rte_ether_addr null_mac_addr; 4165 4166 int 4167 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr, 4168 uint32_t pool) 4169 { 4170 struct rte_eth_dev *dev; 4171 int index; 4172 uint64_t pool_mask; 4173 int ret; 4174 4175 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4176 dev = &rte_eth_devices[port_id]; 4177 4178 if (addr == NULL) { 4179 RTE_ETHDEV_LOG(ERR, 4180 "Cannot add ethdev port %u MAC address from NULL address\n", 4181 port_id); 4182 return -EINVAL; 4183 } 4184 4185 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP); 4186 4187 if (rte_is_zero_ether_addr(addr)) { 4188 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 4189 port_id); 4190 return -EINVAL; 4191 } 4192 if (pool >= RTE_ETH_64_POOLS) { 4193 RTE_ETHDEV_LOG(ERR, "Pool ID must be 0-%d\n", RTE_ETH_64_POOLS - 1); 4194 return -EINVAL; 4195 } 4196 4197 index = eth_dev_get_mac_addr_index(port_id, addr); 4198 if (index < 0) { 4199 index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr); 4200 if (index < 0) { 4201 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 4202 port_id); 4203 return -ENOSPC; 4204 } 4205 } else { 4206 pool_mask = dev->data->mac_pool_sel[index]; 4207 4208 /* Check if both MAC address and pool is already there, and do nothing */ 4209 if (pool_mask & RTE_BIT64(pool)) 4210 return 0; 4211 } 4212 4213 /* Update NIC */ 4214 ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool); 4215 4216 if (ret == 0) { 4217 /* Update address in NIC data structure */ 4218 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]); 4219 4220 /* Update pool bitmap in NIC data structure */ 4221 dev->data->mac_pool_sel[index] |= RTE_BIT64(pool); 4222 } 4223 4224 return eth_err(port_id, ret); 4225 } 4226 4227 int 4228 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr) 4229 { 4230 struct rte_eth_dev *dev; 4231 int index; 4232 4233 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4234 dev = &rte_eth_devices[port_id]; 4235 4236 if (addr == NULL) { 4237 RTE_ETHDEV_LOG(ERR, 4238 "Cannot remove ethdev port %u MAC address from NULL address\n", 4239 port_id); 4240 return -EINVAL; 4241 } 4242 4243 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP); 4244 4245 index = eth_dev_get_mac_addr_index(port_id, addr); 4246 if (index == 0) { 4247 RTE_ETHDEV_LOG(ERR, 4248 "Port %u: Cannot remove default MAC address\n", 4249 port_id); 4250 return -EADDRINUSE; 4251 } else if (index < 0) 4252 return 0; /* Do nothing if address wasn't found */ 4253 4254 /* Update NIC */ 4255 (*dev->dev_ops->mac_addr_remove)(dev, index); 4256 4257 /* Update address in NIC data structure */ 4258 rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]); 4259 4260 /* reset pool bitmap */ 4261 dev->data->mac_pool_sel[index] = 0; 4262 4263 return 0; 4264 } 4265 4266 int 4267 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr) 4268 { 4269 struct rte_eth_dev *dev; 4270 int ret; 4271 4272 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4273 dev = &rte_eth_devices[port_id]; 4274 4275 if (addr == NULL) { 4276 RTE_ETHDEV_LOG(ERR, 4277 "Cannot set ethdev port %u default MAC address from NULL address\n", 4278 port_id); 4279 return -EINVAL; 4280 } 4281 4282 if (!rte_is_valid_assigned_ether_addr(addr)) 4283 return -EINVAL; 4284 4285 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP); 4286 4287 ret = (*dev->dev_ops->mac_addr_set)(dev, addr); 4288 if (ret < 0) 4289 return ret; 4290 4291 /* Update default address in NIC data structure */ 4292 rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]); 4293 4294 return 0; 4295 } 4296 4297 4298 /* 4299 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 4300 * an empty spot. 4301 */ 4302 static int 4303 eth_dev_get_hash_mac_addr_index(uint16_t port_id, 4304 const struct rte_ether_addr *addr) 4305 { 4306 struct rte_eth_dev_info dev_info; 4307 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4308 unsigned i; 4309 int ret; 4310 4311 ret = rte_eth_dev_info_get(port_id, &dev_info); 4312 if (ret != 0) 4313 return -1; 4314 4315 if (!dev->data->hash_mac_addrs) 4316 return -1; 4317 4318 for (i = 0; i < dev_info.max_hash_mac_addrs; i++) 4319 if (memcmp(addr, &dev->data->hash_mac_addrs[i], 4320 RTE_ETHER_ADDR_LEN) == 0) 4321 return i; 4322 4323 return -1; 4324 } 4325 4326 int 4327 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, 4328 uint8_t on) 4329 { 4330 int index; 4331 int ret; 4332 struct rte_eth_dev *dev; 4333 4334 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4335 dev = &rte_eth_devices[port_id]; 4336 4337 if (addr == NULL) { 4338 RTE_ETHDEV_LOG(ERR, 4339 "Cannot set ethdev port %u unicast hash table from NULL address\n", 4340 port_id); 4341 return -EINVAL; 4342 } 4343 4344 if (rte_is_zero_ether_addr(addr)) { 4345 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 4346 port_id); 4347 return -EINVAL; 4348 } 4349 4350 index = eth_dev_get_hash_mac_addr_index(port_id, addr); 4351 /* Check if it's already there, and do nothing */ 4352 if ((index >= 0) && on) 4353 return 0; 4354 4355 if (index < 0) { 4356 if (!on) { 4357 RTE_ETHDEV_LOG(ERR, 4358 "Port %u: the MAC address was not set in UTA\n", 4359 port_id); 4360 return -EINVAL; 4361 } 4362 4363 index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr); 4364 if (index < 0) { 4365 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 4366 port_id); 4367 return -ENOSPC; 4368 } 4369 } 4370 4371 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP); 4372 ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on); 4373 if (ret == 0) { 4374 /* Update address in NIC data structure */ 4375 if (on) 4376 rte_ether_addr_copy(addr, 4377 &dev->data->hash_mac_addrs[index]); 4378 else 4379 rte_ether_addr_copy(&null_mac_addr, 4380 &dev->data->hash_mac_addrs[index]); 4381 } 4382 4383 return eth_err(port_id, ret); 4384 } 4385 4386 int 4387 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on) 4388 { 4389 struct rte_eth_dev *dev; 4390 4391 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4392 dev = &rte_eth_devices[port_id]; 4393 4394 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP); 4395 return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev, 4396 on)); 4397 } 4398 4399 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, 4400 uint16_t tx_rate) 4401 { 4402 struct rte_eth_dev *dev; 4403 struct rte_eth_dev_info dev_info; 4404 struct rte_eth_link link; 4405 int ret; 4406 4407 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4408 dev = &rte_eth_devices[port_id]; 4409 4410 ret = rte_eth_dev_info_get(port_id, &dev_info); 4411 if (ret != 0) 4412 return ret; 4413 4414 link = dev->data->dev_link; 4415 4416 if (queue_idx > dev_info.max_tx_queues) { 4417 RTE_ETHDEV_LOG(ERR, 4418 "Set queue rate limit:port %u: invalid queue ID=%u\n", 4419 port_id, queue_idx); 4420 return -EINVAL; 4421 } 4422 4423 if (tx_rate > link.link_speed) { 4424 RTE_ETHDEV_LOG(ERR, 4425 "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n", 4426 tx_rate, link.link_speed); 4427 return -EINVAL; 4428 } 4429 4430 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP); 4431 return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev, 4432 queue_idx, tx_rate)); 4433 } 4434 4435 RTE_INIT(eth_dev_init_fp_ops) 4436 { 4437 uint32_t i; 4438 4439 for (i = 0; i != RTE_DIM(rte_eth_fp_ops); i++) 4440 eth_dev_fp_ops_reset(rte_eth_fp_ops + i); 4441 } 4442 4443 RTE_INIT(eth_dev_init_cb_lists) 4444 { 4445 uint16_t i; 4446 4447 for (i = 0; i < RTE_MAX_ETHPORTS; i++) 4448 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs); 4449 } 4450 4451 int 4452 rte_eth_dev_callback_register(uint16_t port_id, 4453 enum rte_eth_event_type event, 4454 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 4455 { 4456 struct rte_eth_dev *dev; 4457 struct rte_eth_dev_callback *user_cb; 4458 uint16_t next_port; 4459 uint16_t last_port; 4460 4461 if (cb_fn == NULL) { 4462 RTE_ETHDEV_LOG(ERR, 4463 "Cannot register ethdev port %u callback from NULL\n", 4464 port_id); 4465 return -EINVAL; 4466 } 4467 4468 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 4469 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 4470 return -EINVAL; 4471 } 4472 4473 if (port_id == RTE_ETH_ALL) { 4474 next_port = 0; 4475 last_port = RTE_MAX_ETHPORTS - 1; 4476 } else { 4477 next_port = last_port = port_id; 4478 } 4479 4480 rte_spinlock_lock(ð_dev_cb_lock); 4481 4482 do { 4483 dev = &rte_eth_devices[next_port]; 4484 4485 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) { 4486 if (user_cb->cb_fn == cb_fn && 4487 user_cb->cb_arg == cb_arg && 4488 user_cb->event == event) { 4489 break; 4490 } 4491 } 4492 4493 /* create a new callback. */ 4494 if (user_cb == NULL) { 4495 user_cb = rte_zmalloc("INTR_USER_CALLBACK", 4496 sizeof(struct rte_eth_dev_callback), 0); 4497 if (user_cb != NULL) { 4498 user_cb->cb_fn = cb_fn; 4499 user_cb->cb_arg = cb_arg; 4500 user_cb->event = event; 4501 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), 4502 user_cb, next); 4503 } else { 4504 rte_spinlock_unlock(ð_dev_cb_lock); 4505 rte_eth_dev_callback_unregister(port_id, event, 4506 cb_fn, cb_arg); 4507 return -ENOMEM; 4508 } 4509 4510 } 4511 } while (++next_port <= last_port); 4512 4513 rte_spinlock_unlock(ð_dev_cb_lock); 4514 return 0; 4515 } 4516 4517 int 4518 rte_eth_dev_callback_unregister(uint16_t port_id, 4519 enum rte_eth_event_type event, 4520 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 4521 { 4522 int ret; 4523 struct rte_eth_dev *dev; 4524 struct rte_eth_dev_callback *cb, *next; 4525 uint16_t next_port; 4526 uint16_t last_port; 4527 4528 if (cb_fn == NULL) { 4529 RTE_ETHDEV_LOG(ERR, 4530 "Cannot unregister ethdev port %u callback from NULL\n", 4531 port_id); 4532 return -EINVAL; 4533 } 4534 4535 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 4536 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 4537 return -EINVAL; 4538 } 4539 4540 if (port_id == RTE_ETH_ALL) { 4541 next_port = 0; 4542 last_port = RTE_MAX_ETHPORTS - 1; 4543 } else { 4544 next_port = last_port = port_id; 4545 } 4546 4547 rte_spinlock_lock(ð_dev_cb_lock); 4548 4549 do { 4550 dev = &rte_eth_devices[next_port]; 4551 ret = 0; 4552 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; 4553 cb = next) { 4554 4555 next = TAILQ_NEXT(cb, next); 4556 4557 if (cb->cb_fn != cb_fn || cb->event != event || 4558 (cb_arg != (void *)-1 && cb->cb_arg != cb_arg)) 4559 continue; 4560 4561 /* 4562 * if this callback is not executing right now, 4563 * then remove it. 4564 */ 4565 if (cb->active == 0) { 4566 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next); 4567 rte_free(cb); 4568 } else { 4569 ret = -EAGAIN; 4570 } 4571 } 4572 } while (++next_port <= last_port); 4573 4574 rte_spinlock_unlock(ð_dev_cb_lock); 4575 return ret; 4576 } 4577 4578 int 4579 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data) 4580 { 4581 uint32_t vec; 4582 struct rte_eth_dev *dev; 4583 struct rte_intr_handle *intr_handle; 4584 uint16_t qid; 4585 int rc; 4586 4587 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4588 dev = &rte_eth_devices[port_id]; 4589 4590 if (!dev->intr_handle) { 4591 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 4592 return -ENOTSUP; 4593 } 4594 4595 intr_handle = dev->intr_handle; 4596 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 4597 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 4598 return -EPERM; 4599 } 4600 4601 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) { 4602 vec = rte_intr_vec_list_index_get(intr_handle, qid); 4603 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 4604 if (rc && rc != -EEXIST) { 4605 RTE_ETHDEV_LOG(ERR, 4606 "p %u q %u Rx ctl error op %d epfd %d vec %u\n", 4607 port_id, qid, op, epfd, vec); 4608 } 4609 } 4610 4611 return 0; 4612 } 4613 4614 int 4615 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id) 4616 { 4617 struct rte_intr_handle *intr_handle; 4618 struct rte_eth_dev *dev; 4619 unsigned int efd_idx; 4620 uint32_t vec; 4621 int fd; 4622 4623 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 4624 dev = &rte_eth_devices[port_id]; 4625 4626 if (queue_id >= dev->data->nb_rx_queues) { 4627 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 4628 return -1; 4629 } 4630 4631 if (!dev->intr_handle) { 4632 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 4633 return -1; 4634 } 4635 4636 intr_handle = dev->intr_handle; 4637 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 4638 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 4639 return -1; 4640 } 4641 4642 vec = rte_intr_vec_list_index_get(intr_handle, queue_id); 4643 efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ? 4644 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec; 4645 fd = rte_intr_efds_index_get(intr_handle, efd_idx); 4646 4647 return fd; 4648 } 4649 4650 int 4651 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, 4652 int epfd, int op, void *data) 4653 { 4654 uint32_t vec; 4655 struct rte_eth_dev *dev; 4656 struct rte_intr_handle *intr_handle; 4657 int rc; 4658 4659 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4660 dev = &rte_eth_devices[port_id]; 4661 4662 if (queue_id >= dev->data->nb_rx_queues) { 4663 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 4664 return -EINVAL; 4665 } 4666 4667 if (!dev->intr_handle) { 4668 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 4669 return -ENOTSUP; 4670 } 4671 4672 intr_handle = dev->intr_handle; 4673 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 4674 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 4675 return -EPERM; 4676 } 4677 4678 vec = rte_intr_vec_list_index_get(intr_handle, queue_id); 4679 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 4680 if (rc && rc != -EEXIST) { 4681 RTE_ETHDEV_LOG(ERR, 4682 "p %u q %u Rx ctl error op %d epfd %d vec %u\n", 4683 port_id, queue_id, op, epfd, vec); 4684 return rc; 4685 } 4686 4687 return 0; 4688 } 4689 4690 int 4691 rte_eth_dev_rx_intr_enable(uint16_t port_id, 4692 uint16_t queue_id) 4693 { 4694 struct rte_eth_dev *dev; 4695 int ret; 4696 4697 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4698 dev = &rte_eth_devices[port_id]; 4699 4700 ret = eth_dev_validate_rx_queue(dev, queue_id); 4701 if (ret != 0) 4702 return ret; 4703 4704 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP); 4705 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id)); 4706 } 4707 4708 int 4709 rte_eth_dev_rx_intr_disable(uint16_t port_id, 4710 uint16_t queue_id) 4711 { 4712 struct rte_eth_dev *dev; 4713 int ret; 4714 4715 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4716 dev = &rte_eth_devices[port_id]; 4717 4718 ret = eth_dev_validate_rx_queue(dev, queue_id); 4719 if (ret != 0) 4720 return ret; 4721 4722 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP); 4723 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id)); 4724 } 4725 4726 4727 const struct rte_eth_rxtx_callback * 4728 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, 4729 rte_rx_callback_fn fn, void *user_param) 4730 { 4731 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 4732 rte_errno = ENOTSUP; 4733 return NULL; 4734 #endif 4735 struct rte_eth_dev *dev; 4736 4737 /* check input parameters */ 4738 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 4739 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 4740 rte_errno = EINVAL; 4741 return NULL; 4742 } 4743 dev = &rte_eth_devices[port_id]; 4744 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 4745 rte_errno = EINVAL; 4746 return NULL; 4747 } 4748 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 4749 4750 if (cb == NULL) { 4751 rte_errno = ENOMEM; 4752 return NULL; 4753 } 4754 4755 cb->fn.rx = fn; 4756 cb->param = user_param; 4757 4758 rte_spinlock_lock(ð_dev_rx_cb_lock); 4759 /* Add the callbacks in fifo order. */ 4760 struct rte_eth_rxtx_callback *tail = 4761 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 4762 4763 if (!tail) { 4764 /* Stores to cb->fn and cb->param should complete before 4765 * cb is visible to data plane. 4766 */ 4767 __atomic_store_n( 4768 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 4769 cb, __ATOMIC_RELEASE); 4770 4771 } else { 4772 while (tail->next) 4773 tail = tail->next; 4774 /* Stores to cb->fn and cb->param should complete before 4775 * cb is visible to data plane. 4776 */ 4777 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 4778 } 4779 rte_spinlock_unlock(ð_dev_rx_cb_lock); 4780 4781 return cb; 4782 } 4783 4784 const struct rte_eth_rxtx_callback * 4785 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, 4786 rte_rx_callback_fn fn, void *user_param) 4787 { 4788 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 4789 rte_errno = ENOTSUP; 4790 return NULL; 4791 #endif 4792 /* check input parameters */ 4793 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 4794 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 4795 rte_errno = EINVAL; 4796 return NULL; 4797 } 4798 4799 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 4800 4801 if (cb == NULL) { 4802 rte_errno = ENOMEM; 4803 return NULL; 4804 } 4805 4806 cb->fn.rx = fn; 4807 cb->param = user_param; 4808 4809 rte_spinlock_lock(ð_dev_rx_cb_lock); 4810 /* Add the callbacks at first position */ 4811 cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 4812 /* Stores to cb->fn, cb->param and cb->next should complete before 4813 * cb is visible to data plane threads. 4814 */ 4815 __atomic_store_n( 4816 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 4817 cb, __ATOMIC_RELEASE); 4818 rte_spinlock_unlock(ð_dev_rx_cb_lock); 4819 4820 return cb; 4821 } 4822 4823 const struct rte_eth_rxtx_callback * 4824 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, 4825 rte_tx_callback_fn fn, void *user_param) 4826 { 4827 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 4828 rte_errno = ENOTSUP; 4829 return NULL; 4830 #endif 4831 struct rte_eth_dev *dev; 4832 4833 /* check input parameters */ 4834 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 4835 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) { 4836 rte_errno = EINVAL; 4837 return NULL; 4838 } 4839 4840 dev = &rte_eth_devices[port_id]; 4841 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 4842 rte_errno = EINVAL; 4843 return NULL; 4844 } 4845 4846 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 4847 4848 if (cb == NULL) { 4849 rte_errno = ENOMEM; 4850 return NULL; 4851 } 4852 4853 cb->fn.tx = fn; 4854 cb->param = user_param; 4855 4856 rte_spinlock_lock(ð_dev_tx_cb_lock); 4857 /* Add the callbacks in fifo order. */ 4858 struct rte_eth_rxtx_callback *tail = 4859 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id]; 4860 4861 if (!tail) { 4862 /* Stores to cb->fn and cb->param should complete before 4863 * cb is visible to data plane. 4864 */ 4865 __atomic_store_n( 4866 &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id], 4867 cb, __ATOMIC_RELEASE); 4868 4869 } else { 4870 while (tail->next) 4871 tail = tail->next; 4872 /* Stores to cb->fn and cb->param should complete before 4873 * cb is visible to data plane. 4874 */ 4875 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 4876 } 4877 rte_spinlock_unlock(ð_dev_tx_cb_lock); 4878 4879 return cb; 4880 } 4881 4882 int 4883 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, 4884 const struct rte_eth_rxtx_callback *user_cb) 4885 { 4886 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 4887 return -ENOTSUP; 4888 #endif 4889 /* Check input parameters. */ 4890 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4891 if (user_cb == NULL || 4892 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) 4893 return -EINVAL; 4894 4895 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4896 struct rte_eth_rxtx_callback *cb; 4897 struct rte_eth_rxtx_callback **prev_cb; 4898 int ret = -EINVAL; 4899 4900 rte_spinlock_lock(ð_dev_rx_cb_lock); 4901 prev_cb = &dev->post_rx_burst_cbs[queue_id]; 4902 for (; *prev_cb != NULL; prev_cb = &cb->next) { 4903 cb = *prev_cb; 4904 if (cb == user_cb) { 4905 /* Remove the user cb from the callback list. */ 4906 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 4907 ret = 0; 4908 break; 4909 } 4910 } 4911 rte_spinlock_unlock(ð_dev_rx_cb_lock); 4912 4913 return ret; 4914 } 4915 4916 int 4917 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, 4918 const struct rte_eth_rxtx_callback *user_cb) 4919 { 4920 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 4921 return -ENOTSUP; 4922 #endif 4923 /* Check input parameters. */ 4924 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4925 if (user_cb == NULL || 4926 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) 4927 return -EINVAL; 4928 4929 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4930 int ret = -EINVAL; 4931 struct rte_eth_rxtx_callback *cb; 4932 struct rte_eth_rxtx_callback **prev_cb; 4933 4934 rte_spinlock_lock(ð_dev_tx_cb_lock); 4935 prev_cb = &dev->pre_tx_burst_cbs[queue_id]; 4936 for (; *prev_cb != NULL; prev_cb = &cb->next) { 4937 cb = *prev_cb; 4938 if (cb == user_cb) { 4939 /* Remove the user cb from the callback list. */ 4940 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 4941 ret = 0; 4942 break; 4943 } 4944 } 4945 rte_spinlock_unlock(ð_dev_tx_cb_lock); 4946 4947 return ret; 4948 } 4949 4950 int 4951 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, 4952 struct rte_eth_rxq_info *qinfo) 4953 { 4954 struct rte_eth_dev *dev; 4955 4956 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4957 dev = &rte_eth_devices[port_id]; 4958 4959 if (queue_id >= dev->data->nb_rx_queues) { 4960 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 4961 return -EINVAL; 4962 } 4963 4964 if (qinfo == NULL) { 4965 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL\n", 4966 port_id, queue_id); 4967 return -EINVAL; 4968 } 4969 4970 if (dev->data->rx_queues == NULL || 4971 dev->data->rx_queues[queue_id] == NULL) { 4972 RTE_ETHDEV_LOG(ERR, 4973 "Rx queue %"PRIu16" of device with port_id=%" 4974 PRIu16" has not been setup\n", 4975 queue_id, port_id); 4976 return -EINVAL; 4977 } 4978 4979 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 4980 RTE_ETHDEV_LOG(INFO, 4981 "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 4982 queue_id, port_id); 4983 return -EINVAL; 4984 } 4985 4986 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP); 4987 4988 memset(qinfo, 0, sizeof(*qinfo)); 4989 dev->dev_ops->rxq_info_get(dev, queue_id, qinfo); 4990 qinfo->queue_state = dev->data->rx_queue_state[queue_id]; 4991 4992 return 0; 4993 } 4994 4995 int 4996 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, 4997 struct rte_eth_txq_info *qinfo) 4998 { 4999 struct rte_eth_dev *dev; 5000 5001 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5002 dev = &rte_eth_devices[port_id]; 5003 5004 if (queue_id >= dev->data->nb_tx_queues) { 5005 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 5006 return -EINVAL; 5007 } 5008 5009 if (qinfo == NULL) { 5010 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL\n", 5011 port_id, queue_id); 5012 return -EINVAL; 5013 } 5014 5015 if (dev->data->tx_queues == NULL || 5016 dev->data->tx_queues[queue_id] == NULL) { 5017 RTE_ETHDEV_LOG(ERR, 5018 "Tx queue %"PRIu16" of device with port_id=%" 5019 PRIu16" has not been setup\n", 5020 queue_id, port_id); 5021 return -EINVAL; 5022 } 5023 5024 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 5025 RTE_ETHDEV_LOG(INFO, 5026 "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5027 queue_id, port_id); 5028 return -EINVAL; 5029 } 5030 5031 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP); 5032 5033 memset(qinfo, 0, sizeof(*qinfo)); 5034 dev->dev_ops->txq_info_get(dev, queue_id, qinfo); 5035 qinfo->queue_state = dev->data->tx_queue_state[queue_id]; 5036 5037 return 0; 5038 } 5039 5040 int 5041 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5042 struct rte_eth_burst_mode *mode) 5043 { 5044 struct rte_eth_dev *dev; 5045 5046 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5047 dev = &rte_eth_devices[port_id]; 5048 5049 if (queue_id >= dev->data->nb_rx_queues) { 5050 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5051 return -EINVAL; 5052 } 5053 5054 if (mode == NULL) { 5055 RTE_ETHDEV_LOG(ERR, 5056 "Cannot get ethdev port %u Rx queue %u burst mode to NULL\n", 5057 port_id, queue_id); 5058 return -EINVAL; 5059 } 5060 5061 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP); 5062 memset(mode, 0, sizeof(*mode)); 5063 return eth_err(port_id, 5064 dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode)); 5065 } 5066 5067 int 5068 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5069 struct rte_eth_burst_mode *mode) 5070 { 5071 struct rte_eth_dev *dev; 5072 5073 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5074 dev = &rte_eth_devices[port_id]; 5075 5076 if (queue_id >= dev->data->nb_tx_queues) { 5077 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 5078 return -EINVAL; 5079 } 5080 5081 if (mode == NULL) { 5082 RTE_ETHDEV_LOG(ERR, 5083 "Cannot get ethdev port %u Tx queue %u burst mode to NULL\n", 5084 port_id, queue_id); 5085 return -EINVAL; 5086 } 5087 5088 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP); 5089 memset(mode, 0, sizeof(*mode)); 5090 return eth_err(port_id, 5091 dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode)); 5092 } 5093 5094 int 5095 rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id, 5096 struct rte_power_monitor_cond *pmc) 5097 { 5098 struct rte_eth_dev *dev; 5099 5100 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5101 dev = &rte_eth_devices[port_id]; 5102 5103 if (queue_id >= dev->data->nb_rx_queues) { 5104 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5105 return -EINVAL; 5106 } 5107 5108 if (pmc == NULL) { 5109 RTE_ETHDEV_LOG(ERR, 5110 "Cannot get ethdev port %u Rx queue %u power monitor condition to NULL\n", 5111 port_id, queue_id); 5112 return -EINVAL; 5113 } 5114 5115 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_monitor_addr, -ENOTSUP); 5116 return eth_err(port_id, 5117 dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc)); 5118 } 5119 5120 int 5121 rte_eth_dev_set_mc_addr_list(uint16_t port_id, 5122 struct rte_ether_addr *mc_addr_set, 5123 uint32_t nb_mc_addr) 5124 { 5125 struct rte_eth_dev *dev; 5126 5127 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5128 dev = &rte_eth_devices[port_id]; 5129 5130 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP); 5131 return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev, 5132 mc_addr_set, nb_mc_addr)); 5133 } 5134 5135 int 5136 rte_eth_timesync_enable(uint16_t port_id) 5137 { 5138 struct rte_eth_dev *dev; 5139 5140 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5141 dev = &rte_eth_devices[port_id]; 5142 5143 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP); 5144 return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev)); 5145 } 5146 5147 int 5148 rte_eth_timesync_disable(uint16_t port_id) 5149 { 5150 struct rte_eth_dev *dev; 5151 5152 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5153 dev = &rte_eth_devices[port_id]; 5154 5155 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP); 5156 return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev)); 5157 } 5158 5159 int 5160 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, 5161 uint32_t flags) 5162 { 5163 struct rte_eth_dev *dev; 5164 5165 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5166 dev = &rte_eth_devices[port_id]; 5167 5168 if (timestamp == NULL) { 5169 RTE_ETHDEV_LOG(ERR, 5170 "Cannot read ethdev port %u Rx timestamp to NULL\n", 5171 port_id); 5172 return -EINVAL; 5173 } 5174 5175 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP); 5176 return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp) 5177 (dev, timestamp, flags)); 5178 } 5179 5180 int 5181 rte_eth_timesync_read_tx_timestamp(uint16_t port_id, 5182 struct timespec *timestamp) 5183 { 5184 struct rte_eth_dev *dev; 5185 5186 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5187 dev = &rte_eth_devices[port_id]; 5188 5189 if (timestamp == NULL) { 5190 RTE_ETHDEV_LOG(ERR, 5191 "Cannot read ethdev port %u Tx timestamp to NULL\n", 5192 port_id); 5193 return -EINVAL; 5194 } 5195 5196 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP); 5197 return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp) 5198 (dev, timestamp)); 5199 } 5200 5201 int 5202 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta) 5203 { 5204 struct rte_eth_dev *dev; 5205 5206 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5207 dev = &rte_eth_devices[port_id]; 5208 5209 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP); 5210 return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta)); 5211 } 5212 5213 int 5214 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp) 5215 { 5216 struct rte_eth_dev *dev; 5217 5218 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5219 dev = &rte_eth_devices[port_id]; 5220 5221 if (timestamp == NULL) { 5222 RTE_ETHDEV_LOG(ERR, 5223 "Cannot read ethdev port %u timesync time to NULL\n", 5224 port_id); 5225 return -EINVAL; 5226 } 5227 5228 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP); 5229 return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev, 5230 timestamp)); 5231 } 5232 5233 int 5234 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp) 5235 { 5236 struct rte_eth_dev *dev; 5237 5238 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5239 dev = &rte_eth_devices[port_id]; 5240 5241 if (timestamp == NULL) { 5242 RTE_ETHDEV_LOG(ERR, 5243 "Cannot write ethdev port %u timesync from NULL time\n", 5244 port_id); 5245 return -EINVAL; 5246 } 5247 5248 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP); 5249 return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev, 5250 timestamp)); 5251 } 5252 5253 int 5254 rte_eth_read_clock(uint16_t port_id, uint64_t *clock) 5255 { 5256 struct rte_eth_dev *dev; 5257 5258 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5259 dev = &rte_eth_devices[port_id]; 5260 5261 if (clock == NULL) { 5262 RTE_ETHDEV_LOG(ERR, "Cannot read ethdev port %u clock to NULL\n", 5263 port_id); 5264 return -EINVAL; 5265 } 5266 5267 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP); 5268 return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock)); 5269 } 5270 5271 int 5272 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info) 5273 { 5274 struct rte_eth_dev *dev; 5275 5276 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5277 dev = &rte_eth_devices[port_id]; 5278 5279 if (info == NULL) { 5280 RTE_ETHDEV_LOG(ERR, 5281 "Cannot get ethdev port %u register info to NULL\n", 5282 port_id); 5283 return -EINVAL; 5284 } 5285 5286 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP); 5287 return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info)); 5288 } 5289 5290 int 5291 rte_eth_dev_get_eeprom_length(uint16_t port_id) 5292 { 5293 struct rte_eth_dev *dev; 5294 5295 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5296 dev = &rte_eth_devices[port_id]; 5297 5298 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP); 5299 return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev)); 5300 } 5301 5302 int 5303 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 5304 { 5305 struct rte_eth_dev *dev; 5306 5307 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5308 dev = &rte_eth_devices[port_id]; 5309 5310 if (info == NULL) { 5311 RTE_ETHDEV_LOG(ERR, 5312 "Cannot get ethdev port %u EEPROM info to NULL\n", 5313 port_id); 5314 return -EINVAL; 5315 } 5316 5317 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP); 5318 return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info)); 5319 } 5320 5321 int 5322 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 5323 { 5324 struct rte_eth_dev *dev; 5325 5326 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5327 dev = &rte_eth_devices[port_id]; 5328 5329 if (info == NULL) { 5330 RTE_ETHDEV_LOG(ERR, 5331 "Cannot set ethdev port %u EEPROM from NULL info\n", 5332 port_id); 5333 return -EINVAL; 5334 } 5335 5336 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP); 5337 return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info)); 5338 } 5339 5340 int 5341 rte_eth_dev_get_module_info(uint16_t port_id, 5342 struct rte_eth_dev_module_info *modinfo) 5343 { 5344 struct rte_eth_dev *dev; 5345 5346 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5347 dev = &rte_eth_devices[port_id]; 5348 5349 if (modinfo == NULL) { 5350 RTE_ETHDEV_LOG(ERR, 5351 "Cannot get ethdev port %u EEPROM module info to NULL\n", 5352 port_id); 5353 return -EINVAL; 5354 } 5355 5356 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP); 5357 return (*dev->dev_ops->get_module_info)(dev, modinfo); 5358 } 5359 5360 int 5361 rte_eth_dev_get_module_eeprom(uint16_t port_id, 5362 struct rte_dev_eeprom_info *info) 5363 { 5364 struct rte_eth_dev *dev; 5365 5366 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5367 dev = &rte_eth_devices[port_id]; 5368 5369 if (info == NULL) { 5370 RTE_ETHDEV_LOG(ERR, 5371 "Cannot get ethdev port %u module EEPROM info to NULL\n", 5372 port_id); 5373 return -EINVAL; 5374 } 5375 5376 if (info->data == NULL) { 5377 RTE_ETHDEV_LOG(ERR, 5378 "Cannot get ethdev port %u module EEPROM data to NULL\n", 5379 port_id); 5380 return -EINVAL; 5381 } 5382 5383 if (info->length == 0) { 5384 RTE_ETHDEV_LOG(ERR, 5385 "Cannot get ethdev port %u module EEPROM to data with zero size\n", 5386 port_id); 5387 return -EINVAL; 5388 } 5389 5390 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP); 5391 return (*dev->dev_ops->get_module_eeprom)(dev, info); 5392 } 5393 5394 int 5395 rte_eth_dev_get_dcb_info(uint16_t port_id, 5396 struct rte_eth_dcb_info *dcb_info) 5397 { 5398 struct rte_eth_dev *dev; 5399 5400 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5401 dev = &rte_eth_devices[port_id]; 5402 5403 if (dcb_info == NULL) { 5404 RTE_ETHDEV_LOG(ERR, 5405 "Cannot get ethdev port %u DCB info to NULL\n", 5406 port_id); 5407 return -EINVAL; 5408 } 5409 5410 memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info)); 5411 5412 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP); 5413 return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info)); 5414 } 5415 5416 static void 5417 eth_dev_adjust_nb_desc(uint16_t *nb_desc, 5418 const struct rte_eth_desc_lim *desc_lim) 5419 { 5420 if (desc_lim->nb_align != 0) 5421 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align); 5422 5423 if (desc_lim->nb_max != 0) 5424 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max); 5425 5426 *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min); 5427 } 5428 5429 int 5430 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, 5431 uint16_t *nb_rx_desc, 5432 uint16_t *nb_tx_desc) 5433 { 5434 struct rte_eth_dev_info dev_info; 5435 int ret; 5436 5437 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5438 5439 ret = rte_eth_dev_info_get(port_id, &dev_info); 5440 if (ret != 0) 5441 return ret; 5442 5443 if (nb_rx_desc != NULL) 5444 eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim); 5445 5446 if (nb_tx_desc != NULL) 5447 eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim); 5448 5449 return 0; 5450 } 5451 5452 int 5453 rte_eth_dev_hairpin_capability_get(uint16_t port_id, 5454 struct rte_eth_hairpin_cap *cap) 5455 { 5456 struct rte_eth_dev *dev; 5457 5458 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5459 dev = &rte_eth_devices[port_id]; 5460 5461 if (cap == NULL) { 5462 RTE_ETHDEV_LOG(ERR, 5463 "Cannot get ethdev port %u hairpin capability to NULL\n", 5464 port_id); 5465 return -EINVAL; 5466 } 5467 5468 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP); 5469 memset(cap, 0, sizeof(*cap)); 5470 return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap)); 5471 } 5472 5473 int 5474 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool) 5475 { 5476 struct rte_eth_dev *dev; 5477 5478 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5479 dev = &rte_eth_devices[port_id]; 5480 5481 if (pool == NULL) { 5482 RTE_ETHDEV_LOG(ERR, 5483 "Cannot test ethdev port %u mempool operation from NULL pool\n", 5484 port_id); 5485 return -EINVAL; 5486 } 5487 5488 if (*dev->dev_ops->pool_ops_supported == NULL) 5489 return 1; /* all pools are supported */ 5490 5491 return (*dev->dev_ops->pool_ops_supported)(dev, pool); 5492 } 5493 5494 static int 5495 eth_dev_handle_port_list(const char *cmd __rte_unused, 5496 const char *params __rte_unused, 5497 struct rte_tel_data *d) 5498 { 5499 int port_id; 5500 5501 rte_tel_data_start_array(d, RTE_TEL_INT_VAL); 5502 RTE_ETH_FOREACH_DEV(port_id) 5503 rte_tel_data_add_array_int(d, port_id); 5504 return 0; 5505 } 5506 5507 static void 5508 eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats, 5509 const char *stat_name) 5510 { 5511 int q; 5512 struct rte_tel_data *q_data = rte_tel_data_alloc(); 5513 if (q_data == NULL) 5514 return; 5515 rte_tel_data_start_array(q_data, RTE_TEL_U64_VAL); 5516 for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++) 5517 rte_tel_data_add_array_u64(q_data, q_stats[q]); 5518 rte_tel_data_add_dict_container(d, stat_name, q_data, 0); 5519 } 5520 5521 #define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s) 5522 5523 static int 5524 eth_dev_handle_port_stats(const char *cmd __rte_unused, 5525 const char *params, 5526 struct rte_tel_data *d) 5527 { 5528 struct rte_eth_stats stats; 5529 int port_id, ret; 5530 5531 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 5532 return -1; 5533 5534 port_id = atoi(params); 5535 if (!rte_eth_dev_is_valid_port(port_id)) 5536 return -1; 5537 5538 ret = rte_eth_stats_get(port_id, &stats); 5539 if (ret < 0) 5540 return -1; 5541 5542 rte_tel_data_start_dict(d); 5543 ADD_DICT_STAT(stats, ipackets); 5544 ADD_DICT_STAT(stats, opackets); 5545 ADD_DICT_STAT(stats, ibytes); 5546 ADD_DICT_STAT(stats, obytes); 5547 ADD_DICT_STAT(stats, imissed); 5548 ADD_DICT_STAT(stats, ierrors); 5549 ADD_DICT_STAT(stats, oerrors); 5550 ADD_DICT_STAT(stats, rx_nombuf); 5551 eth_dev_add_port_queue_stats(d, stats.q_ipackets, "q_ipackets"); 5552 eth_dev_add_port_queue_stats(d, stats.q_opackets, "q_opackets"); 5553 eth_dev_add_port_queue_stats(d, stats.q_ibytes, "q_ibytes"); 5554 eth_dev_add_port_queue_stats(d, stats.q_obytes, "q_obytes"); 5555 eth_dev_add_port_queue_stats(d, stats.q_errors, "q_errors"); 5556 5557 return 0; 5558 } 5559 5560 static int 5561 eth_dev_handle_port_xstats(const char *cmd __rte_unused, 5562 const char *params, 5563 struct rte_tel_data *d) 5564 { 5565 struct rte_eth_xstat *eth_xstats; 5566 struct rte_eth_xstat_name *xstat_names; 5567 int port_id, num_xstats; 5568 int i, ret; 5569 char *end_param; 5570 5571 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 5572 return -1; 5573 5574 port_id = strtoul(params, &end_param, 0); 5575 if (*end_param != '\0') 5576 RTE_ETHDEV_LOG(NOTICE, 5577 "Extra parameters passed to ethdev telemetry command, ignoring"); 5578 if (!rte_eth_dev_is_valid_port(port_id)) 5579 return -1; 5580 5581 num_xstats = rte_eth_xstats_get(port_id, NULL, 0); 5582 if (num_xstats < 0) 5583 return -1; 5584 5585 /* use one malloc for both names and stats */ 5586 eth_xstats = malloc((sizeof(struct rte_eth_xstat) + 5587 sizeof(struct rte_eth_xstat_name)) * num_xstats); 5588 if (eth_xstats == NULL) 5589 return -1; 5590 xstat_names = (void *)ð_xstats[num_xstats]; 5591 5592 ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats); 5593 if (ret < 0 || ret > num_xstats) { 5594 free(eth_xstats); 5595 return -1; 5596 } 5597 5598 ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats); 5599 if (ret < 0 || ret > num_xstats) { 5600 free(eth_xstats); 5601 return -1; 5602 } 5603 5604 rte_tel_data_start_dict(d); 5605 for (i = 0; i < num_xstats; i++) 5606 rte_tel_data_add_dict_u64(d, xstat_names[i].name, 5607 eth_xstats[i].value); 5608 free(eth_xstats); 5609 return 0; 5610 } 5611 5612 static int 5613 eth_dev_handle_port_link_status(const char *cmd __rte_unused, 5614 const char *params, 5615 struct rte_tel_data *d) 5616 { 5617 static const char *status_str = "status"; 5618 int ret, port_id; 5619 struct rte_eth_link link; 5620 char *end_param; 5621 5622 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 5623 return -1; 5624 5625 port_id = strtoul(params, &end_param, 0); 5626 if (*end_param != '\0') 5627 RTE_ETHDEV_LOG(NOTICE, 5628 "Extra parameters passed to ethdev telemetry command, ignoring"); 5629 if (!rte_eth_dev_is_valid_port(port_id)) 5630 return -1; 5631 5632 ret = rte_eth_link_get_nowait(port_id, &link); 5633 if (ret < 0) 5634 return -1; 5635 5636 rte_tel_data_start_dict(d); 5637 if (!link.link_status) { 5638 rte_tel_data_add_dict_string(d, status_str, "DOWN"); 5639 return 0; 5640 } 5641 rte_tel_data_add_dict_string(d, status_str, "UP"); 5642 rte_tel_data_add_dict_u64(d, "speed", link.link_speed); 5643 rte_tel_data_add_dict_string(d, "duplex", 5644 (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 5645 "full-duplex" : "half-duplex"); 5646 return 0; 5647 } 5648 5649 static int 5650 eth_dev_handle_port_info(const char *cmd __rte_unused, 5651 const char *params, 5652 struct rte_tel_data *d) 5653 { 5654 struct rte_tel_data *rxq_state, *txq_state; 5655 char mac_addr[RTE_ETHER_ADDR_FMT_SIZE]; 5656 struct rte_eth_dev *eth_dev; 5657 char *end_param; 5658 int port_id, i; 5659 5660 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 5661 return -1; 5662 5663 port_id = strtoul(params, &end_param, 0); 5664 if (*end_param != '\0') 5665 RTE_ETHDEV_LOG(NOTICE, 5666 "Extra parameters passed to ethdev telemetry command, ignoring"); 5667 5668 if (!rte_eth_dev_is_valid_port(port_id)) 5669 return -EINVAL; 5670 5671 eth_dev = &rte_eth_devices[port_id]; 5672 5673 rxq_state = rte_tel_data_alloc(); 5674 if (!rxq_state) 5675 return -ENOMEM; 5676 5677 txq_state = rte_tel_data_alloc(); 5678 if (!txq_state) { 5679 rte_tel_data_free(rxq_state); 5680 return -ENOMEM; 5681 } 5682 5683 rte_tel_data_start_dict(d); 5684 rte_tel_data_add_dict_string(d, "name", eth_dev->data->name); 5685 rte_tel_data_add_dict_int(d, "state", eth_dev->state); 5686 rte_tel_data_add_dict_int(d, "nb_rx_queues", 5687 eth_dev->data->nb_rx_queues); 5688 rte_tel_data_add_dict_int(d, "nb_tx_queues", 5689 eth_dev->data->nb_tx_queues); 5690 rte_tel_data_add_dict_int(d, "port_id", eth_dev->data->port_id); 5691 rte_tel_data_add_dict_int(d, "mtu", eth_dev->data->mtu); 5692 rte_tel_data_add_dict_int(d, "rx_mbuf_size_min", 5693 eth_dev->data->min_rx_buf_size); 5694 rte_tel_data_add_dict_int(d, "rx_mbuf_alloc_fail", 5695 eth_dev->data->rx_mbuf_alloc_failed); 5696 rte_ether_format_addr(mac_addr, sizeof(mac_addr), 5697 eth_dev->data->mac_addrs); 5698 rte_tel_data_add_dict_string(d, "mac_addr", mac_addr); 5699 rte_tel_data_add_dict_int(d, "promiscuous", 5700 eth_dev->data->promiscuous); 5701 rte_tel_data_add_dict_int(d, "scattered_rx", 5702 eth_dev->data->scattered_rx); 5703 rte_tel_data_add_dict_int(d, "all_multicast", 5704 eth_dev->data->all_multicast); 5705 rte_tel_data_add_dict_int(d, "dev_started", eth_dev->data->dev_started); 5706 rte_tel_data_add_dict_int(d, "lro", eth_dev->data->lro); 5707 rte_tel_data_add_dict_int(d, "dev_configured", 5708 eth_dev->data->dev_configured); 5709 5710 rte_tel_data_start_array(rxq_state, RTE_TEL_INT_VAL); 5711 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) 5712 rte_tel_data_add_array_int(rxq_state, 5713 eth_dev->data->rx_queue_state[i]); 5714 5715 rte_tel_data_start_array(txq_state, RTE_TEL_INT_VAL); 5716 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) 5717 rte_tel_data_add_array_int(txq_state, 5718 eth_dev->data->tx_queue_state[i]); 5719 5720 rte_tel_data_add_dict_container(d, "rxq_state", rxq_state, 0); 5721 rte_tel_data_add_dict_container(d, "txq_state", txq_state, 0); 5722 rte_tel_data_add_dict_int(d, "numa_node", eth_dev->data->numa_node); 5723 rte_tel_data_add_dict_int(d, "dev_flags", eth_dev->data->dev_flags); 5724 rte_tel_data_add_dict_int(d, "rx_offloads", 5725 eth_dev->data->dev_conf.rxmode.offloads); 5726 rte_tel_data_add_dict_int(d, "tx_offloads", 5727 eth_dev->data->dev_conf.txmode.offloads); 5728 rte_tel_data_add_dict_int(d, "ethdev_rss_hf", 5729 eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf); 5730 5731 return 0; 5732 } 5733 5734 int 5735 rte_eth_representor_info_get(uint16_t port_id, 5736 struct rte_eth_representor_info *info) 5737 { 5738 struct rte_eth_dev *dev; 5739 5740 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5741 dev = &rte_eth_devices[port_id]; 5742 5743 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->representor_info_get, -ENOTSUP); 5744 return eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info)); 5745 } 5746 5747 int 5748 rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features) 5749 { 5750 struct rte_eth_dev *dev; 5751 5752 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5753 dev = &rte_eth_devices[port_id]; 5754 5755 if (dev->data->dev_configured != 0) { 5756 RTE_ETHDEV_LOG(ERR, 5757 "The port (ID=%"PRIu16") is already configured\n", 5758 port_id); 5759 return -EBUSY; 5760 } 5761 5762 if (features == NULL) { 5763 RTE_ETHDEV_LOG(ERR, "Invalid features (NULL)\n"); 5764 return -EINVAL; 5765 } 5766 5767 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_metadata_negotiate, -ENOTSUP); 5768 return eth_err(port_id, 5769 (*dev->dev_ops->rx_metadata_negotiate)(dev, features)); 5770 } 5771 5772 int 5773 rte_eth_ip_reassembly_capability_get(uint16_t port_id, 5774 struct rte_eth_ip_reassembly_params *reassembly_capa) 5775 { 5776 struct rte_eth_dev *dev; 5777 5778 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5779 dev = &rte_eth_devices[port_id]; 5780 5781 if (dev->data->dev_configured == 0) { 5782 RTE_ETHDEV_LOG(ERR, 5783 "Device with port_id=%u is not configured.\n" 5784 "Cannot get IP reassembly capability\n", 5785 port_id); 5786 return -EINVAL; 5787 } 5788 5789 if (reassembly_capa == NULL) { 5790 RTE_ETHDEV_LOG(ERR, "Cannot get reassembly capability to NULL"); 5791 return -EINVAL; 5792 } 5793 5794 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->ip_reassembly_capability_get, 5795 -ENOTSUP); 5796 memset(reassembly_capa, 0, sizeof(struct rte_eth_ip_reassembly_params)); 5797 5798 return eth_err(port_id, (*dev->dev_ops->ip_reassembly_capability_get) 5799 (dev, reassembly_capa)); 5800 } 5801 5802 int 5803 rte_eth_ip_reassembly_conf_get(uint16_t port_id, 5804 struct rte_eth_ip_reassembly_params *conf) 5805 { 5806 struct rte_eth_dev *dev; 5807 5808 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5809 dev = &rte_eth_devices[port_id]; 5810 5811 if (dev->data->dev_configured == 0) { 5812 RTE_ETHDEV_LOG(ERR, 5813 "Device with port_id=%u is not configured.\n" 5814 "Cannot get IP reassembly configuration\n", 5815 port_id); 5816 return -EINVAL; 5817 } 5818 5819 if (conf == NULL) { 5820 RTE_ETHDEV_LOG(ERR, "Cannot get reassembly info to NULL"); 5821 return -EINVAL; 5822 } 5823 5824 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->ip_reassembly_conf_get, 5825 -ENOTSUP); 5826 memset(conf, 0, sizeof(struct rte_eth_ip_reassembly_params)); 5827 return eth_err(port_id, 5828 (*dev->dev_ops->ip_reassembly_conf_get)(dev, conf)); 5829 } 5830 5831 int 5832 rte_eth_ip_reassembly_conf_set(uint16_t port_id, 5833 const struct rte_eth_ip_reassembly_params *conf) 5834 { 5835 struct rte_eth_dev *dev; 5836 5837 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5838 dev = &rte_eth_devices[port_id]; 5839 5840 if (dev->data->dev_configured == 0) { 5841 RTE_ETHDEV_LOG(ERR, 5842 "Device with port_id=%u is not configured.\n" 5843 "Cannot set IP reassembly configuration", 5844 port_id); 5845 return -EINVAL; 5846 } 5847 5848 if (dev->data->dev_started != 0) { 5849 RTE_ETHDEV_LOG(ERR, 5850 "Device with port_id=%u started,\n" 5851 "cannot configure IP reassembly params.\n", 5852 port_id); 5853 return -EINVAL; 5854 } 5855 5856 if (conf == NULL) { 5857 RTE_ETHDEV_LOG(ERR, 5858 "Invalid IP reassembly configuration (NULL)\n"); 5859 return -EINVAL; 5860 } 5861 5862 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->ip_reassembly_conf_set, 5863 -ENOTSUP); 5864 return eth_err(port_id, 5865 (*dev->dev_ops->ip_reassembly_conf_set)(dev, conf)); 5866 } 5867 5868 int 5869 rte_eth_dev_priv_dump(uint16_t port_id, FILE *file) 5870 { 5871 struct rte_eth_dev *dev; 5872 5873 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5874 dev = &rte_eth_devices[port_id]; 5875 5876 if (file == NULL) { 5877 RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n"); 5878 return -EINVAL; 5879 } 5880 5881 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->eth_dev_priv_dump, -ENOTSUP); 5882 return eth_err(port_id, (*dev->dev_ops->eth_dev_priv_dump)(dev, file)); 5883 } 5884 5885 RTE_LOG_REGISTER_DEFAULT(rte_eth_dev_logtype, INFO); 5886 5887 RTE_INIT(ethdev_init_telemetry) 5888 { 5889 rte_telemetry_register_cmd("/ethdev/list", eth_dev_handle_port_list, 5890 "Returns list of available ethdev ports. Takes no parameters"); 5891 rte_telemetry_register_cmd("/ethdev/stats", eth_dev_handle_port_stats, 5892 "Returns the common stats for a port. Parameters: int port_id"); 5893 rte_telemetry_register_cmd("/ethdev/xstats", eth_dev_handle_port_xstats, 5894 "Returns the extended stats for a port. Parameters: int port_id"); 5895 rte_telemetry_register_cmd("/ethdev/link_status", 5896 eth_dev_handle_port_link_status, 5897 "Returns the link status for a port. Parameters: int port_id"); 5898 rte_telemetry_register_cmd("/ethdev/info", eth_dev_handle_port_info, 5899 "Returns the device info for a port. Parameters: int port_id"); 5900 rte_telemetry_register_cmd("/ethdev/module_eeprom", eth_dev_handle_port_module_eeprom, 5901 "Returns module EEPROM info with SFF specs. Parameters: int port_id"); 5902 } 5903