1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <ctype.h> 6 #include <errno.h> 7 #include <inttypes.h> 8 #include <stdbool.h> 9 #include <stdint.h> 10 #include <stdlib.h> 11 #include <string.h> 12 #include <sys/queue.h> 13 14 #include <rte_byteorder.h> 15 #include <rte_log.h> 16 #include <rte_debug.h> 17 #include <rte_interrupts.h> 18 #include <rte_memory.h> 19 #include <rte_memcpy.h> 20 #include <rte_memzone.h> 21 #include <rte_launch.h> 22 #include <rte_eal.h> 23 #include <rte_per_lcore.h> 24 #include <rte_lcore.h> 25 #include <rte_branch_prediction.h> 26 #include <rte_common.h> 27 #include <rte_mempool.h> 28 #include <rte_malloc.h> 29 #include <rte_mbuf.h> 30 #include <rte_errno.h> 31 #include <rte_spinlock.h> 32 #include <rte_string_fns.h> 33 #include <rte_class.h> 34 #include <rte_ether.h> 35 #include <rte_telemetry.h> 36 37 #include "rte_ethdev_trace.h" 38 #include "rte_ethdev.h" 39 #include "ethdev_driver.h" 40 #include "ethdev_profile.h" 41 #include "ethdev_private.h" 42 #include "sff_telemetry.h" 43 44 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS]; 45 46 /* public fast-path API */ 47 struct rte_eth_fp_ops rte_eth_fp_ops[RTE_MAX_ETHPORTS]; 48 49 /* spinlock for add/remove Rx callbacks */ 50 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER; 51 52 /* spinlock for add/remove Tx callbacks */ 53 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER; 54 55 /* store statistics names and its offset in stats structure */ 56 struct rte_eth_xstats_name_off { 57 char name[RTE_ETH_XSTATS_NAME_SIZE]; 58 unsigned offset; 59 }; 60 61 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = { 62 {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)}, 63 {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)}, 64 {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)}, 65 {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)}, 66 {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)}, 67 {"rx_errors", offsetof(struct rte_eth_stats, ierrors)}, 68 {"tx_errors", offsetof(struct rte_eth_stats, oerrors)}, 69 {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats, 70 rx_nombuf)}, 71 }; 72 73 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings) 74 75 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = { 76 {"packets", offsetof(struct rte_eth_stats, q_ipackets)}, 77 {"bytes", offsetof(struct rte_eth_stats, q_ibytes)}, 78 {"errors", offsetof(struct rte_eth_stats, q_errors)}, 79 }; 80 81 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings) 82 83 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = { 84 {"packets", offsetof(struct rte_eth_stats, q_opackets)}, 85 {"bytes", offsetof(struct rte_eth_stats, q_obytes)}, 86 }; 87 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings) 88 89 #define RTE_RX_OFFLOAD_BIT2STR(_name) \ 90 { RTE_ETH_RX_OFFLOAD_##_name, #_name } 91 92 static const struct { 93 uint64_t offload; 94 const char *name; 95 } eth_dev_rx_offload_names[] = { 96 RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP), 97 RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM), 98 RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM), 99 RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM), 100 RTE_RX_OFFLOAD_BIT2STR(TCP_LRO), 101 RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP), 102 RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 103 RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP), 104 RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT), 105 RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER), 106 RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND), 107 RTE_RX_OFFLOAD_BIT2STR(SCATTER), 108 RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP), 109 RTE_RX_OFFLOAD_BIT2STR(SECURITY), 110 RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC), 111 RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM), 112 RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 113 RTE_RX_OFFLOAD_BIT2STR(RSS_HASH), 114 RTE_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT), 115 }; 116 117 #undef RTE_RX_OFFLOAD_BIT2STR 118 #undef RTE_ETH_RX_OFFLOAD_BIT2STR 119 120 #define RTE_TX_OFFLOAD_BIT2STR(_name) \ 121 { RTE_ETH_TX_OFFLOAD_##_name, #_name } 122 123 static const struct { 124 uint64_t offload; 125 const char *name; 126 } eth_dev_tx_offload_names[] = { 127 RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT), 128 RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM), 129 RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM), 130 RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM), 131 RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM), 132 RTE_TX_OFFLOAD_BIT2STR(TCP_TSO), 133 RTE_TX_OFFLOAD_BIT2STR(UDP_TSO), 134 RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM), 135 RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT), 136 RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO), 137 RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO), 138 RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO), 139 RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO), 140 RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT), 141 RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE), 142 RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS), 143 RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE), 144 RTE_TX_OFFLOAD_BIT2STR(SECURITY), 145 RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO), 146 RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO), 147 RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM), 148 RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP), 149 }; 150 151 #undef RTE_TX_OFFLOAD_BIT2STR 152 153 static const struct { 154 uint64_t offload; 155 const char *name; 156 } rte_eth_dev_capa_names[] = { 157 {RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP, "RUNTIME_RX_QUEUE_SETUP"}, 158 {RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP, "RUNTIME_TX_QUEUE_SETUP"}, 159 {RTE_ETH_DEV_CAPA_RXQ_SHARE, "RXQ_SHARE"}, 160 {RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP, "FLOW_RULE_KEEP"}, 161 {RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP, "FLOW_SHARED_OBJECT_KEEP"}, 162 }; 163 164 enum { 165 STAT_QMAP_TX = 0, 166 STAT_QMAP_RX 167 }; 168 169 int 170 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str) 171 { 172 int ret; 173 struct rte_devargs devargs; 174 const char *bus_param_key; 175 char *bus_str = NULL; 176 char *cls_str = NULL; 177 int str_size; 178 179 if (iter == NULL) { 180 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL iterator\n"); 181 return -EINVAL; 182 } 183 184 if (devargs_str == NULL) { 185 RTE_ETHDEV_LOG(ERR, 186 "Cannot initialize iterator from NULL device description string\n"); 187 return -EINVAL; 188 } 189 190 memset(iter, 0, sizeof(*iter)); 191 memset(&devargs, 0, sizeof(devargs)); 192 193 /* 194 * The devargs string may use various syntaxes: 195 * - 0000:08:00.0,representor=[1-3] 196 * - pci:0000:06:00.0,representor=[0,5] 197 * - class=eth,mac=00:11:22:33:44:55 198 * - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z 199 */ 200 201 /* 202 * Handle pure class filter (i.e. without any bus-level argument), 203 * from future new syntax. 204 * rte_devargs_parse() is not yet supporting the new syntax, 205 * that's why this simple case is temporarily parsed here. 206 */ 207 #define iter_anybus_str "class=eth," 208 if (strncmp(devargs_str, iter_anybus_str, 209 strlen(iter_anybus_str)) == 0) { 210 iter->cls_str = devargs_str + strlen(iter_anybus_str); 211 goto end; 212 } 213 214 /* Split bus, device and parameters. */ 215 ret = rte_devargs_parse(&devargs, devargs_str); 216 if (ret != 0) 217 goto error; 218 219 /* 220 * Assume parameters of old syntax can match only at ethdev level. 221 * Extra parameters will be ignored, thanks to "+" prefix. 222 */ 223 str_size = strlen(devargs.args) + 2; 224 cls_str = malloc(str_size); 225 if (cls_str == NULL) { 226 ret = -ENOMEM; 227 goto error; 228 } 229 ret = snprintf(cls_str, str_size, "+%s", devargs.args); 230 if (ret != str_size - 1) { 231 ret = -EINVAL; 232 goto error; 233 } 234 iter->cls_str = cls_str; 235 236 iter->bus = devargs.bus; 237 if (iter->bus->dev_iterate == NULL) { 238 ret = -ENOTSUP; 239 goto error; 240 } 241 242 /* Convert bus args to new syntax for use with new API dev_iterate. */ 243 if ((strcmp(iter->bus->name, "vdev") == 0) || 244 (strcmp(iter->bus->name, "fslmc") == 0) || 245 (strcmp(iter->bus->name, "dpaa_bus") == 0)) { 246 bus_param_key = "name"; 247 } else if (strcmp(iter->bus->name, "pci") == 0) { 248 bus_param_key = "addr"; 249 } else { 250 ret = -ENOTSUP; 251 goto error; 252 } 253 str_size = strlen(bus_param_key) + strlen(devargs.name) + 2; 254 bus_str = malloc(str_size); 255 if (bus_str == NULL) { 256 ret = -ENOMEM; 257 goto error; 258 } 259 ret = snprintf(bus_str, str_size, "%s=%s", 260 bus_param_key, devargs.name); 261 if (ret != str_size - 1) { 262 ret = -EINVAL; 263 goto error; 264 } 265 iter->bus_str = bus_str; 266 267 end: 268 iter->cls = rte_class_find_by_name("eth"); 269 rte_devargs_reset(&devargs); 270 return 0; 271 272 error: 273 if (ret == -ENOTSUP) 274 RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n", 275 iter->bus->name); 276 rte_devargs_reset(&devargs); 277 free(bus_str); 278 free(cls_str); 279 return ret; 280 } 281 282 uint16_t 283 rte_eth_iterator_next(struct rte_dev_iterator *iter) 284 { 285 if (iter == NULL) { 286 RTE_ETHDEV_LOG(ERR, 287 "Cannot get next device from NULL iterator\n"); 288 return RTE_MAX_ETHPORTS; 289 } 290 291 if (iter->cls == NULL) /* invalid ethdev iterator */ 292 return RTE_MAX_ETHPORTS; 293 294 do { /* loop to try all matching rte_device */ 295 /* If not pure ethdev filter and */ 296 if (iter->bus != NULL && 297 /* not in middle of rte_eth_dev iteration, */ 298 iter->class_device == NULL) { 299 /* get next rte_device to try. */ 300 iter->device = iter->bus->dev_iterate( 301 iter->device, iter->bus_str, iter); 302 if (iter->device == NULL) 303 break; /* no more rte_device candidate */ 304 } 305 /* A device is matching bus part, need to check ethdev part. */ 306 iter->class_device = iter->cls->dev_iterate( 307 iter->class_device, iter->cls_str, iter); 308 if (iter->class_device != NULL) 309 return eth_dev_to_id(iter->class_device); /* match */ 310 } while (iter->bus != NULL); /* need to try next rte_device */ 311 312 /* No more ethdev port to iterate. */ 313 rte_eth_iterator_cleanup(iter); 314 return RTE_MAX_ETHPORTS; 315 } 316 317 void 318 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter) 319 { 320 if (iter == NULL) { 321 RTE_ETHDEV_LOG(ERR, "Cannot do clean up from NULL iterator\n"); 322 return; 323 } 324 325 if (iter->bus_str == NULL) 326 return; /* nothing to free in pure class filter */ 327 free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */ 328 free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */ 329 memset(iter, 0, sizeof(*iter)); 330 } 331 332 uint16_t 333 rte_eth_find_next(uint16_t port_id) 334 { 335 while (port_id < RTE_MAX_ETHPORTS && 336 rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED) 337 port_id++; 338 339 if (port_id >= RTE_MAX_ETHPORTS) 340 return RTE_MAX_ETHPORTS; 341 342 return port_id; 343 } 344 345 /* 346 * Macro to iterate over all valid ports for internal usage. 347 * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports. 348 */ 349 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \ 350 for (port_id = rte_eth_find_next(0); \ 351 port_id < RTE_MAX_ETHPORTS; \ 352 port_id = rte_eth_find_next(port_id + 1)) 353 354 uint16_t 355 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent) 356 { 357 port_id = rte_eth_find_next(port_id); 358 while (port_id < RTE_MAX_ETHPORTS && 359 rte_eth_devices[port_id].device != parent) 360 port_id = rte_eth_find_next(port_id + 1); 361 362 return port_id; 363 } 364 365 uint16_t 366 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id) 367 { 368 RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS); 369 return rte_eth_find_next_of(port_id, 370 rte_eth_devices[ref_port_id].device); 371 } 372 373 static bool 374 eth_dev_is_allocated(const struct rte_eth_dev *ethdev) 375 { 376 return ethdev->data->name[0] != '\0'; 377 } 378 379 int 380 rte_eth_dev_is_valid_port(uint16_t port_id) 381 { 382 if (port_id >= RTE_MAX_ETHPORTS || 383 (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)) 384 return 0; 385 else 386 return 1; 387 } 388 389 static int 390 eth_is_valid_owner_id(uint64_t owner_id) 391 { 392 if (owner_id == RTE_ETH_DEV_NO_OWNER || 393 eth_dev_shared_data->next_owner_id <= owner_id) 394 return 0; 395 return 1; 396 } 397 398 uint64_t 399 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id) 400 { 401 port_id = rte_eth_find_next(port_id); 402 while (port_id < RTE_MAX_ETHPORTS && 403 rte_eth_devices[port_id].data->owner.id != owner_id) 404 port_id = rte_eth_find_next(port_id + 1); 405 406 return port_id; 407 } 408 409 int 410 rte_eth_dev_owner_new(uint64_t *owner_id) 411 { 412 if (owner_id == NULL) { 413 RTE_ETHDEV_LOG(ERR, "Cannot get new owner ID to NULL\n"); 414 return -EINVAL; 415 } 416 417 eth_dev_shared_data_prepare(); 418 419 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 420 421 *owner_id = eth_dev_shared_data->next_owner_id++; 422 423 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 424 return 0; 425 } 426 427 static int 428 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id, 429 const struct rte_eth_dev_owner *new_owner) 430 { 431 struct rte_eth_dev *ethdev = &rte_eth_devices[port_id]; 432 struct rte_eth_dev_owner *port_owner; 433 434 if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) { 435 RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n", 436 port_id); 437 return -ENODEV; 438 } 439 440 if (new_owner == NULL) { 441 RTE_ETHDEV_LOG(ERR, 442 "Cannot set ethdev port %u owner from NULL owner\n", 443 port_id); 444 return -EINVAL; 445 } 446 447 if (!eth_is_valid_owner_id(new_owner->id) && 448 !eth_is_valid_owner_id(old_owner_id)) { 449 RTE_ETHDEV_LOG(ERR, 450 "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n", 451 old_owner_id, new_owner->id); 452 return -EINVAL; 453 } 454 455 port_owner = &rte_eth_devices[port_id].data->owner; 456 if (port_owner->id != old_owner_id) { 457 RTE_ETHDEV_LOG(ERR, 458 "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n", 459 port_id, port_owner->name, port_owner->id); 460 return -EPERM; 461 } 462 463 /* can not truncate (same structure) */ 464 strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN); 465 466 port_owner->id = new_owner->id; 467 468 RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n", 469 port_id, new_owner->name, new_owner->id); 470 471 return 0; 472 } 473 474 int 475 rte_eth_dev_owner_set(const uint16_t port_id, 476 const struct rte_eth_dev_owner *owner) 477 { 478 int ret; 479 480 eth_dev_shared_data_prepare(); 481 482 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 483 484 ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner); 485 486 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 487 return ret; 488 } 489 490 int 491 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id) 492 { 493 const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner) 494 {.id = RTE_ETH_DEV_NO_OWNER, .name = ""}; 495 int ret; 496 497 eth_dev_shared_data_prepare(); 498 499 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 500 501 ret = eth_dev_owner_set(port_id, owner_id, &new_owner); 502 503 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 504 return ret; 505 } 506 507 int 508 rte_eth_dev_owner_delete(const uint64_t owner_id) 509 { 510 uint16_t port_id; 511 int ret = 0; 512 513 eth_dev_shared_data_prepare(); 514 515 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 516 517 if (eth_is_valid_owner_id(owner_id)) { 518 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) { 519 struct rte_eth_dev_data *data = 520 rte_eth_devices[port_id].data; 521 if (data != NULL && data->owner.id == owner_id) 522 memset(&data->owner, 0, 523 sizeof(struct rte_eth_dev_owner)); 524 } 525 RTE_ETHDEV_LOG(NOTICE, 526 "All port owners owned by %016"PRIx64" identifier have removed\n", 527 owner_id); 528 } else { 529 RTE_ETHDEV_LOG(ERR, 530 "Invalid owner ID=%016"PRIx64"\n", 531 owner_id); 532 ret = -EINVAL; 533 } 534 535 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 536 537 return ret; 538 } 539 540 int 541 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner) 542 { 543 struct rte_eth_dev *ethdev; 544 545 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 546 ethdev = &rte_eth_devices[port_id]; 547 548 if (!eth_dev_is_allocated(ethdev)) { 549 RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n", 550 port_id); 551 return -ENODEV; 552 } 553 554 if (owner == NULL) { 555 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u owner to NULL\n", 556 port_id); 557 return -EINVAL; 558 } 559 560 eth_dev_shared_data_prepare(); 561 562 rte_spinlock_lock(ð_dev_shared_data->ownership_lock); 563 rte_memcpy(owner, ðdev->data->owner, sizeof(*owner)); 564 rte_spinlock_unlock(ð_dev_shared_data->ownership_lock); 565 566 return 0; 567 } 568 569 int 570 rte_eth_dev_socket_id(uint16_t port_id) 571 { 572 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 573 return rte_eth_devices[port_id].data->numa_node; 574 } 575 576 void * 577 rte_eth_dev_get_sec_ctx(uint16_t port_id) 578 { 579 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL); 580 return rte_eth_devices[port_id].security_ctx; 581 } 582 583 uint16_t 584 rte_eth_dev_count_avail(void) 585 { 586 uint16_t p; 587 uint16_t count; 588 589 count = 0; 590 591 RTE_ETH_FOREACH_DEV(p) 592 count++; 593 594 return count; 595 } 596 597 uint16_t 598 rte_eth_dev_count_total(void) 599 { 600 uint16_t port, count = 0; 601 602 RTE_ETH_FOREACH_VALID_DEV(port) 603 count++; 604 605 return count; 606 } 607 608 int 609 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name) 610 { 611 char *tmp; 612 613 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 614 615 if (name == NULL) { 616 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u name to NULL\n", 617 port_id); 618 return -EINVAL; 619 } 620 621 /* shouldn't check 'rte_eth_devices[i].data', 622 * because it might be overwritten by VDEV PMD */ 623 tmp = eth_dev_shared_data->data[port_id].name; 624 strcpy(name, tmp); 625 return 0; 626 } 627 628 int 629 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id) 630 { 631 uint16_t pid; 632 633 if (name == NULL) { 634 RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name"); 635 return -EINVAL; 636 } 637 638 if (port_id == NULL) { 639 RTE_ETHDEV_LOG(ERR, 640 "Cannot get port ID to NULL for %s\n", name); 641 return -EINVAL; 642 } 643 644 RTE_ETH_FOREACH_VALID_DEV(pid) 645 if (!strcmp(name, eth_dev_shared_data->data[pid].name)) { 646 *port_id = pid; 647 return 0; 648 } 649 650 return -ENODEV; 651 } 652 653 static int 654 eth_err(uint16_t port_id, int ret) 655 { 656 if (ret == 0) 657 return 0; 658 if (rte_eth_dev_is_removed(port_id)) 659 return -EIO; 660 return ret; 661 } 662 663 static int 664 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id) 665 { 666 uint16_t port_id; 667 668 if (rx_queue_id >= dev->data->nb_rx_queues) { 669 port_id = dev->data->port_id; 670 RTE_ETHDEV_LOG(ERR, 671 "Invalid Rx queue_id=%u of device with port_id=%u\n", 672 rx_queue_id, port_id); 673 return -EINVAL; 674 } 675 676 if (dev->data->rx_queues[rx_queue_id] == NULL) { 677 port_id = dev->data->port_id; 678 RTE_ETHDEV_LOG(ERR, 679 "Queue %u of device with port_id=%u has not been setup\n", 680 rx_queue_id, port_id); 681 return -EINVAL; 682 } 683 684 return 0; 685 } 686 687 static int 688 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id) 689 { 690 uint16_t port_id; 691 692 if (tx_queue_id >= dev->data->nb_tx_queues) { 693 port_id = dev->data->port_id; 694 RTE_ETHDEV_LOG(ERR, 695 "Invalid Tx queue_id=%u of device with port_id=%u\n", 696 tx_queue_id, port_id); 697 return -EINVAL; 698 } 699 700 if (dev->data->tx_queues[tx_queue_id] == NULL) { 701 port_id = dev->data->port_id; 702 RTE_ETHDEV_LOG(ERR, 703 "Queue %u of device with port_id=%u has not been setup\n", 704 tx_queue_id, port_id); 705 return -EINVAL; 706 } 707 708 return 0; 709 } 710 711 int 712 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id) 713 { 714 struct rte_eth_dev *dev; 715 int ret; 716 717 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 718 dev = &rte_eth_devices[port_id]; 719 720 if (!dev->data->dev_started) { 721 RTE_ETHDEV_LOG(ERR, 722 "Port %u must be started before start any queue\n", 723 port_id); 724 return -EINVAL; 725 } 726 727 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 728 if (ret != 0) 729 return ret; 730 731 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP); 732 733 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 734 RTE_ETHDEV_LOG(INFO, 735 "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 736 rx_queue_id, port_id); 737 return -EINVAL; 738 } 739 740 if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 741 RTE_ETHDEV_LOG(INFO, 742 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 743 rx_queue_id, port_id); 744 return 0; 745 } 746 747 return eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id)); 748 } 749 750 int 751 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id) 752 { 753 struct rte_eth_dev *dev; 754 int ret; 755 756 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 757 dev = &rte_eth_devices[port_id]; 758 759 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); 760 if (ret != 0) 761 return ret; 762 763 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP); 764 765 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { 766 RTE_ETHDEV_LOG(INFO, 767 "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 768 rx_queue_id, port_id); 769 return -EINVAL; 770 } 771 772 if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 773 RTE_ETHDEV_LOG(INFO, 774 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 775 rx_queue_id, port_id); 776 return 0; 777 } 778 779 return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id)); 780 } 781 782 int 783 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id) 784 { 785 struct rte_eth_dev *dev; 786 int ret; 787 788 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 789 dev = &rte_eth_devices[port_id]; 790 791 if (!dev->data->dev_started) { 792 RTE_ETHDEV_LOG(ERR, 793 "Port %u must be started before start any queue\n", 794 port_id); 795 return -EINVAL; 796 } 797 798 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 799 if (ret != 0) 800 return ret; 801 802 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP); 803 804 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 805 RTE_ETHDEV_LOG(INFO, 806 "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 807 tx_queue_id, port_id); 808 return -EINVAL; 809 } 810 811 if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { 812 RTE_ETHDEV_LOG(INFO, 813 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n", 814 tx_queue_id, port_id); 815 return 0; 816 } 817 818 return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id)); 819 } 820 821 int 822 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id) 823 { 824 struct rte_eth_dev *dev; 825 int ret; 826 827 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 828 dev = &rte_eth_devices[port_id]; 829 830 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); 831 if (ret != 0) 832 return ret; 833 834 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP); 835 836 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { 837 RTE_ETHDEV_LOG(INFO, 838 "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n", 839 tx_queue_id, port_id); 840 return -EINVAL; 841 } 842 843 if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { 844 RTE_ETHDEV_LOG(INFO, 845 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n", 846 tx_queue_id, port_id); 847 return 0; 848 } 849 850 return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id)); 851 } 852 853 uint32_t 854 rte_eth_speed_bitflag(uint32_t speed, int duplex) 855 { 856 switch (speed) { 857 case RTE_ETH_SPEED_NUM_10M: 858 return duplex ? RTE_ETH_LINK_SPEED_10M : RTE_ETH_LINK_SPEED_10M_HD; 859 case RTE_ETH_SPEED_NUM_100M: 860 return duplex ? RTE_ETH_LINK_SPEED_100M : RTE_ETH_LINK_SPEED_100M_HD; 861 case RTE_ETH_SPEED_NUM_1G: 862 return RTE_ETH_LINK_SPEED_1G; 863 case RTE_ETH_SPEED_NUM_2_5G: 864 return RTE_ETH_LINK_SPEED_2_5G; 865 case RTE_ETH_SPEED_NUM_5G: 866 return RTE_ETH_LINK_SPEED_5G; 867 case RTE_ETH_SPEED_NUM_10G: 868 return RTE_ETH_LINK_SPEED_10G; 869 case RTE_ETH_SPEED_NUM_20G: 870 return RTE_ETH_LINK_SPEED_20G; 871 case RTE_ETH_SPEED_NUM_25G: 872 return RTE_ETH_LINK_SPEED_25G; 873 case RTE_ETH_SPEED_NUM_40G: 874 return RTE_ETH_LINK_SPEED_40G; 875 case RTE_ETH_SPEED_NUM_50G: 876 return RTE_ETH_LINK_SPEED_50G; 877 case RTE_ETH_SPEED_NUM_56G: 878 return RTE_ETH_LINK_SPEED_56G; 879 case RTE_ETH_SPEED_NUM_100G: 880 return RTE_ETH_LINK_SPEED_100G; 881 case RTE_ETH_SPEED_NUM_200G: 882 return RTE_ETH_LINK_SPEED_200G; 883 default: 884 return 0; 885 } 886 } 887 888 const char * 889 rte_eth_dev_rx_offload_name(uint64_t offload) 890 { 891 const char *name = "UNKNOWN"; 892 unsigned int i; 893 894 for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) { 895 if (offload == eth_dev_rx_offload_names[i].offload) { 896 name = eth_dev_rx_offload_names[i].name; 897 break; 898 } 899 } 900 901 return name; 902 } 903 904 const char * 905 rte_eth_dev_tx_offload_name(uint64_t offload) 906 { 907 const char *name = "UNKNOWN"; 908 unsigned int i; 909 910 for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) { 911 if (offload == eth_dev_tx_offload_names[i].offload) { 912 name = eth_dev_tx_offload_names[i].name; 913 break; 914 } 915 } 916 917 return name; 918 } 919 920 const char * 921 rte_eth_dev_capability_name(uint64_t capability) 922 { 923 const char *name = "UNKNOWN"; 924 unsigned int i; 925 926 for (i = 0; i < RTE_DIM(rte_eth_dev_capa_names); ++i) { 927 if (capability == rte_eth_dev_capa_names[i].offload) { 928 name = rte_eth_dev_capa_names[i].name; 929 break; 930 } 931 } 932 933 return name; 934 } 935 936 static inline int 937 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size, 938 uint32_t max_rx_pkt_len, uint32_t dev_info_size) 939 { 940 int ret = 0; 941 942 if (dev_info_size == 0) { 943 if (config_size != max_rx_pkt_len) { 944 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size" 945 " %u != %u is not allowed\n", 946 port_id, config_size, max_rx_pkt_len); 947 ret = -EINVAL; 948 } 949 } else if (config_size > dev_info_size) { 950 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 951 "> max allowed value %u\n", port_id, config_size, 952 dev_info_size); 953 ret = -EINVAL; 954 } else if (config_size < RTE_ETHER_MIN_LEN) { 955 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " 956 "< min allowed value %u\n", port_id, config_size, 957 (unsigned int)RTE_ETHER_MIN_LEN); 958 ret = -EINVAL; 959 } 960 return ret; 961 } 962 963 /* 964 * Validate offloads that are requested through rte_eth_dev_configure against 965 * the offloads successfully set by the Ethernet device. 966 * 967 * @param port_id 968 * The port identifier of the Ethernet device. 969 * @param req_offloads 970 * The offloads that have been requested through `rte_eth_dev_configure`. 971 * @param set_offloads 972 * The offloads successfully set by the Ethernet device. 973 * @param offload_type 974 * The offload type i.e. Rx/Tx string. 975 * @param offload_name 976 * The function that prints the offload name. 977 * @return 978 * - (0) if validation successful. 979 * - (-EINVAL) if requested offload has been silently disabled. 980 * 981 */ 982 static int 983 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads, 984 uint64_t set_offloads, const char *offload_type, 985 const char *(*offload_name)(uint64_t)) 986 { 987 uint64_t offloads_diff = req_offloads ^ set_offloads; 988 uint64_t offload; 989 int ret = 0; 990 991 while (offloads_diff != 0) { 992 /* Check if any offload is requested but not enabled. */ 993 offload = RTE_BIT64(__builtin_ctzll(offloads_diff)); 994 if (offload & req_offloads) { 995 RTE_ETHDEV_LOG(ERR, 996 "Port %u failed to enable %s offload %s\n", 997 port_id, offload_type, offload_name(offload)); 998 ret = -EINVAL; 999 } 1000 1001 /* Check if offload couldn't be disabled. */ 1002 if (offload & set_offloads) { 1003 RTE_ETHDEV_LOG(DEBUG, 1004 "Port %u %s offload %s is not requested but enabled\n", 1005 port_id, offload_type, offload_name(offload)); 1006 } 1007 1008 offloads_diff &= ~offload; 1009 } 1010 1011 return ret; 1012 } 1013 1014 static uint32_t 1015 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu) 1016 { 1017 uint32_t overhead_len; 1018 1019 if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu) 1020 overhead_len = max_rx_pktlen - max_mtu; 1021 else 1022 overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 1023 1024 return overhead_len; 1025 } 1026 1027 /* rte_eth_dev_info_get() should be called prior to this function */ 1028 static int 1029 eth_dev_validate_mtu(uint16_t port_id, struct rte_eth_dev_info *dev_info, 1030 uint16_t mtu) 1031 { 1032 uint32_t overhead_len; 1033 uint32_t frame_size; 1034 1035 if (mtu < dev_info->min_mtu) { 1036 RTE_ETHDEV_LOG(ERR, 1037 "MTU (%u) < device min MTU (%u) for port_id %u\n", 1038 mtu, dev_info->min_mtu, port_id); 1039 return -EINVAL; 1040 } 1041 if (mtu > dev_info->max_mtu) { 1042 RTE_ETHDEV_LOG(ERR, 1043 "MTU (%u) > device max MTU (%u) for port_id %u\n", 1044 mtu, dev_info->max_mtu, port_id); 1045 return -EINVAL; 1046 } 1047 1048 overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen, 1049 dev_info->max_mtu); 1050 frame_size = mtu + overhead_len; 1051 if (frame_size < RTE_ETHER_MIN_LEN) { 1052 RTE_ETHDEV_LOG(ERR, 1053 "Frame size (%u) < min frame size (%u) for port_id %u\n", 1054 frame_size, RTE_ETHER_MIN_LEN, port_id); 1055 return -EINVAL; 1056 } 1057 1058 if (frame_size > dev_info->max_rx_pktlen) { 1059 RTE_ETHDEV_LOG(ERR, 1060 "Frame size (%u) > device max frame size (%u) for port_id %u\n", 1061 frame_size, dev_info->max_rx_pktlen, port_id); 1062 return -EINVAL; 1063 } 1064 1065 return 0; 1066 } 1067 1068 int 1069 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, 1070 const struct rte_eth_conf *dev_conf) 1071 { 1072 struct rte_eth_dev *dev; 1073 struct rte_eth_dev_info dev_info; 1074 struct rte_eth_conf orig_conf; 1075 int diag; 1076 int ret; 1077 uint16_t old_mtu; 1078 1079 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1080 dev = &rte_eth_devices[port_id]; 1081 1082 if (dev_conf == NULL) { 1083 RTE_ETHDEV_LOG(ERR, 1084 "Cannot configure ethdev port %u from NULL config\n", 1085 port_id); 1086 return -EINVAL; 1087 } 1088 1089 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP); 1090 1091 if (dev->data->dev_started) { 1092 RTE_ETHDEV_LOG(ERR, 1093 "Port %u must be stopped to allow configuration\n", 1094 port_id); 1095 return -EBUSY; 1096 } 1097 1098 /* 1099 * Ensure that "dev_configured" is always 0 each time prepare to do 1100 * dev_configure() to avoid any non-anticipated behaviour. 1101 * And set to 1 when dev_configure() is executed successfully. 1102 */ 1103 dev->data->dev_configured = 0; 1104 1105 /* Store original config, as rollback required on failure */ 1106 memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf)); 1107 1108 /* 1109 * Copy the dev_conf parameter into the dev structure. 1110 * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get 1111 */ 1112 if (dev_conf != &dev->data->dev_conf) 1113 memcpy(&dev->data->dev_conf, dev_conf, 1114 sizeof(dev->data->dev_conf)); 1115 1116 /* Backup mtu for rollback */ 1117 old_mtu = dev->data->mtu; 1118 1119 ret = rte_eth_dev_info_get(port_id, &dev_info); 1120 if (ret != 0) 1121 goto rollback; 1122 1123 /* If number of queues specified by application for both Rx and Tx is 1124 * zero, use driver preferred values. This cannot be done individually 1125 * as it is valid for either Tx or Rx (but not both) to be zero. 1126 * If driver does not provide any preferred valued, fall back on 1127 * EAL defaults. 1128 */ 1129 if (nb_rx_q == 0 && nb_tx_q == 0) { 1130 nb_rx_q = dev_info.default_rxportconf.nb_queues; 1131 if (nb_rx_q == 0) 1132 nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES; 1133 nb_tx_q = dev_info.default_txportconf.nb_queues; 1134 if (nb_tx_q == 0) 1135 nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES; 1136 } 1137 1138 if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) { 1139 RTE_ETHDEV_LOG(ERR, 1140 "Number of Rx queues requested (%u) is greater than max supported(%d)\n", 1141 nb_rx_q, RTE_MAX_QUEUES_PER_PORT); 1142 ret = -EINVAL; 1143 goto rollback; 1144 } 1145 1146 if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) { 1147 RTE_ETHDEV_LOG(ERR, 1148 "Number of Tx queues requested (%u) is greater than max supported(%d)\n", 1149 nb_tx_q, RTE_MAX_QUEUES_PER_PORT); 1150 ret = -EINVAL; 1151 goto rollback; 1152 } 1153 1154 /* 1155 * Check that the numbers of Rx and Tx queues are not greater 1156 * than the maximum number of Rx and Tx queues supported by the 1157 * configured device. 1158 */ 1159 if (nb_rx_q > dev_info.max_rx_queues) { 1160 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n", 1161 port_id, nb_rx_q, dev_info.max_rx_queues); 1162 ret = -EINVAL; 1163 goto rollback; 1164 } 1165 1166 if (nb_tx_q > dev_info.max_tx_queues) { 1167 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n", 1168 port_id, nb_tx_q, dev_info.max_tx_queues); 1169 ret = -EINVAL; 1170 goto rollback; 1171 } 1172 1173 /* Check that the device supports requested interrupts */ 1174 if ((dev_conf->intr_conf.lsc == 1) && 1175 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) { 1176 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n", 1177 dev->device->driver->name); 1178 ret = -EINVAL; 1179 goto rollback; 1180 } 1181 if ((dev_conf->intr_conf.rmv == 1) && 1182 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) { 1183 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n", 1184 dev->device->driver->name); 1185 ret = -EINVAL; 1186 goto rollback; 1187 } 1188 1189 if (dev_conf->rxmode.mtu == 0) 1190 dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU; 1191 1192 ret = eth_dev_validate_mtu(port_id, &dev_info, 1193 dev->data->dev_conf.rxmode.mtu); 1194 if (ret != 0) 1195 goto rollback; 1196 1197 dev->data->mtu = dev->data->dev_conf.rxmode.mtu; 1198 1199 /* 1200 * If LRO is enabled, check that the maximum aggregated packet 1201 * size is supported by the configured device. 1202 */ 1203 if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) { 1204 uint32_t max_rx_pktlen; 1205 uint32_t overhead_len; 1206 1207 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 1208 dev_info.max_mtu); 1209 max_rx_pktlen = dev->data->dev_conf.rxmode.mtu + overhead_len; 1210 if (dev_conf->rxmode.max_lro_pkt_size == 0) 1211 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 1212 ret = eth_dev_check_lro_pkt_size(port_id, 1213 dev->data->dev_conf.rxmode.max_lro_pkt_size, 1214 max_rx_pktlen, 1215 dev_info.max_lro_pkt_size); 1216 if (ret != 0) 1217 goto rollback; 1218 } 1219 1220 /* Any requested offloading must be within its device capabilities */ 1221 if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) != 1222 dev_conf->rxmode.offloads) { 1223 RTE_ETHDEV_LOG(ERR, 1224 "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads " 1225 "capabilities 0x%"PRIx64" in %s()\n", 1226 port_id, dev_conf->rxmode.offloads, 1227 dev_info.rx_offload_capa, 1228 __func__); 1229 ret = -EINVAL; 1230 goto rollback; 1231 } 1232 if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) != 1233 dev_conf->txmode.offloads) { 1234 RTE_ETHDEV_LOG(ERR, 1235 "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads " 1236 "capabilities 0x%"PRIx64" in %s()\n", 1237 port_id, dev_conf->txmode.offloads, 1238 dev_info.tx_offload_capa, 1239 __func__); 1240 ret = -EINVAL; 1241 goto rollback; 1242 } 1243 1244 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = 1245 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf); 1246 1247 /* Check that device supports requested rss hash functions. */ 1248 if ((dev_info.flow_type_rss_offloads | 1249 dev_conf->rx_adv_conf.rss_conf.rss_hf) != 1250 dev_info.flow_type_rss_offloads) { 1251 RTE_ETHDEV_LOG(ERR, 1252 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 1253 port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf, 1254 dev_info.flow_type_rss_offloads); 1255 ret = -EINVAL; 1256 goto rollback; 1257 } 1258 1259 /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */ 1260 if (((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0) && 1261 (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) { 1262 RTE_ETHDEV_LOG(ERR, 1263 "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n", 1264 port_id, 1265 rte_eth_dev_rx_offload_name(RTE_ETH_RX_OFFLOAD_RSS_HASH)); 1266 ret = -EINVAL; 1267 goto rollback; 1268 } 1269 1270 /* 1271 * Setup new number of Rx/Tx queues and reconfigure device. 1272 */ 1273 diag = eth_dev_rx_queue_config(dev, nb_rx_q); 1274 if (diag != 0) { 1275 RTE_ETHDEV_LOG(ERR, 1276 "Port%u eth_dev_rx_queue_config = %d\n", 1277 port_id, diag); 1278 ret = diag; 1279 goto rollback; 1280 } 1281 1282 diag = eth_dev_tx_queue_config(dev, nb_tx_q); 1283 if (diag != 0) { 1284 RTE_ETHDEV_LOG(ERR, 1285 "Port%u eth_dev_tx_queue_config = %d\n", 1286 port_id, diag); 1287 eth_dev_rx_queue_config(dev, 0); 1288 ret = diag; 1289 goto rollback; 1290 } 1291 1292 diag = (*dev->dev_ops->dev_configure)(dev); 1293 if (diag != 0) { 1294 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n", 1295 port_id, diag); 1296 ret = eth_err(port_id, diag); 1297 goto reset_queues; 1298 } 1299 1300 /* Initialize Rx profiling if enabled at compilation time. */ 1301 diag = __rte_eth_dev_profile_init(port_id, dev); 1302 if (diag != 0) { 1303 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n", 1304 port_id, diag); 1305 ret = eth_err(port_id, diag); 1306 goto reset_queues; 1307 } 1308 1309 /* Validate Rx offloads. */ 1310 diag = eth_dev_validate_offloads(port_id, 1311 dev_conf->rxmode.offloads, 1312 dev->data->dev_conf.rxmode.offloads, "Rx", 1313 rte_eth_dev_rx_offload_name); 1314 if (diag != 0) { 1315 ret = diag; 1316 goto reset_queues; 1317 } 1318 1319 /* Validate Tx offloads. */ 1320 diag = eth_dev_validate_offloads(port_id, 1321 dev_conf->txmode.offloads, 1322 dev->data->dev_conf.txmode.offloads, "Tx", 1323 rte_eth_dev_tx_offload_name); 1324 if (diag != 0) { 1325 ret = diag; 1326 goto reset_queues; 1327 } 1328 1329 dev->data->dev_configured = 1; 1330 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0); 1331 return 0; 1332 reset_queues: 1333 eth_dev_rx_queue_config(dev, 0); 1334 eth_dev_tx_queue_config(dev, 0); 1335 rollback: 1336 memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf)); 1337 if (old_mtu != dev->data->mtu) 1338 dev->data->mtu = old_mtu; 1339 1340 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret); 1341 return ret; 1342 } 1343 1344 static void 1345 eth_dev_mac_restore(struct rte_eth_dev *dev, 1346 struct rte_eth_dev_info *dev_info) 1347 { 1348 struct rte_ether_addr *addr; 1349 uint16_t i; 1350 uint32_t pool = 0; 1351 uint64_t pool_mask; 1352 1353 /* replay MAC address configuration including default MAC */ 1354 addr = &dev->data->mac_addrs[0]; 1355 if (*dev->dev_ops->mac_addr_set != NULL) 1356 (*dev->dev_ops->mac_addr_set)(dev, addr); 1357 else if (*dev->dev_ops->mac_addr_add != NULL) 1358 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool); 1359 1360 if (*dev->dev_ops->mac_addr_add != NULL) { 1361 for (i = 1; i < dev_info->max_mac_addrs; i++) { 1362 addr = &dev->data->mac_addrs[i]; 1363 1364 /* skip zero address */ 1365 if (rte_is_zero_ether_addr(addr)) 1366 continue; 1367 1368 pool = 0; 1369 pool_mask = dev->data->mac_pool_sel[i]; 1370 1371 do { 1372 if (pool_mask & UINT64_C(1)) 1373 (*dev->dev_ops->mac_addr_add)(dev, 1374 addr, i, pool); 1375 pool_mask >>= 1; 1376 pool++; 1377 } while (pool_mask); 1378 } 1379 } 1380 } 1381 1382 static int 1383 eth_dev_config_restore(struct rte_eth_dev *dev, 1384 struct rte_eth_dev_info *dev_info, uint16_t port_id) 1385 { 1386 int ret; 1387 1388 if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)) 1389 eth_dev_mac_restore(dev, dev_info); 1390 1391 /* replay promiscuous configuration */ 1392 /* 1393 * use callbacks directly since we don't need port_id check and 1394 * would like to bypass the same value set 1395 */ 1396 if (rte_eth_promiscuous_get(port_id) == 1 && 1397 *dev->dev_ops->promiscuous_enable != NULL) { 1398 ret = eth_err(port_id, 1399 (*dev->dev_ops->promiscuous_enable)(dev)); 1400 if (ret != 0 && ret != -ENOTSUP) { 1401 RTE_ETHDEV_LOG(ERR, 1402 "Failed to enable promiscuous mode for device (port %u): %s\n", 1403 port_id, rte_strerror(-ret)); 1404 return ret; 1405 } 1406 } else if (rte_eth_promiscuous_get(port_id) == 0 && 1407 *dev->dev_ops->promiscuous_disable != NULL) { 1408 ret = eth_err(port_id, 1409 (*dev->dev_ops->promiscuous_disable)(dev)); 1410 if (ret != 0 && ret != -ENOTSUP) { 1411 RTE_ETHDEV_LOG(ERR, 1412 "Failed to disable promiscuous mode for device (port %u): %s\n", 1413 port_id, rte_strerror(-ret)); 1414 return ret; 1415 } 1416 } 1417 1418 /* replay all multicast configuration */ 1419 /* 1420 * use callbacks directly since we don't need port_id check and 1421 * would like to bypass the same value set 1422 */ 1423 if (rte_eth_allmulticast_get(port_id) == 1 && 1424 *dev->dev_ops->allmulticast_enable != NULL) { 1425 ret = eth_err(port_id, 1426 (*dev->dev_ops->allmulticast_enable)(dev)); 1427 if (ret != 0 && ret != -ENOTSUP) { 1428 RTE_ETHDEV_LOG(ERR, 1429 "Failed to enable allmulticast mode for device (port %u): %s\n", 1430 port_id, rte_strerror(-ret)); 1431 return ret; 1432 } 1433 } else if (rte_eth_allmulticast_get(port_id) == 0 && 1434 *dev->dev_ops->allmulticast_disable != NULL) { 1435 ret = eth_err(port_id, 1436 (*dev->dev_ops->allmulticast_disable)(dev)); 1437 if (ret != 0 && ret != -ENOTSUP) { 1438 RTE_ETHDEV_LOG(ERR, 1439 "Failed to disable allmulticast mode for device (port %u): %s\n", 1440 port_id, rte_strerror(-ret)); 1441 return ret; 1442 } 1443 } 1444 1445 return 0; 1446 } 1447 1448 int 1449 rte_eth_dev_start(uint16_t port_id) 1450 { 1451 struct rte_eth_dev *dev; 1452 struct rte_eth_dev_info dev_info; 1453 int diag; 1454 int ret, ret_stop; 1455 1456 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1457 dev = &rte_eth_devices[port_id]; 1458 1459 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP); 1460 1461 if (dev->data->dev_configured == 0) { 1462 RTE_ETHDEV_LOG(INFO, 1463 "Device with port_id=%"PRIu16" is not configured.\n", 1464 port_id); 1465 return -EINVAL; 1466 } 1467 1468 if (dev->data->dev_started != 0) { 1469 RTE_ETHDEV_LOG(INFO, 1470 "Device with port_id=%"PRIu16" already started\n", 1471 port_id); 1472 return 0; 1473 } 1474 1475 ret = rte_eth_dev_info_get(port_id, &dev_info); 1476 if (ret != 0) 1477 return ret; 1478 1479 /* Lets restore MAC now if device does not support live change */ 1480 if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR) 1481 eth_dev_mac_restore(dev, &dev_info); 1482 1483 diag = (*dev->dev_ops->dev_start)(dev); 1484 if (diag == 0) 1485 dev->data->dev_started = 1; 1486 else 1487 return eth_err(port_id, diag); 1488 1489 ret = eth_dev_config_restore(dev, &dev_info, port_id); 1490 if (ret != 0) { 1491 RTE_ETHDEV_LOG(ERR, 1492 "Error during restoring configuration for device (port %u): %s\n", 1493 port_id, rte_strerror(-ret)); 1494 ret_stop = rte_eth_dev_stop(port_id); 1495 if (ret_stop != 0) { 1496 RTE_ETHDEV_LOG(ERR, 1497 "Failed to stop device (port %u): %s\n", 1498 port_id, rte_strerror(-ret_stop)); 1499 } 1500 1501 return ret; 1502 } 1503 1504 if (dev->data->dev_conf.intr_conf.lsc == 0) { 1505 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 1506 (*dev->dev_ops->link_update)(dev, 0); 1507 } 1508 1509 /* expose selection of PMD fast-path functions */ 1510 eth_dev_fp_ops_setup(rte_eth_fp_ops + port_id, dev); 1511 1512 rte_ethdev_trace_start(port_id); 1513 return 0; 1514 } 1515 1516 int 1517 rte_eth_dev_stop(uint16_t port_id) 1518 { 1519 struct rte_eth_dev *dev; 1520 int ret; 1521 1522 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1523 dev = &rte_eth_devices[port_id]; 1524 1525 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_stop, -ENOTSUP); 1526 1527 if (dev->data->dev_started == 0) { 1528 RTE_ETHDEV_LOG(INFO, 1529 "Device with port_id=%"PRIu16" already stopped\n", 1530 port_id); 1531 return 0; 1532 } 1533 1534 /* point fast-path functions to dummy ones */ 1535 eth_dev_fp_ops_reset(rte_eth_fp_ops + port_id); 1536 1537 ret = (*dev->dev_ops->dev_stop)(dev); 1538 if (ret == 0) 1539 dev->data->dev_started = 0; 1540 rte_ethdev_trace_stop(port_id, ret); 1541 1542 return ret; 1543 } 1544 1545 int 1546 rte_eth_dev_set_link_up(uint16_t port_id) 1547 { 1548 struct rte_eth_dev *dev; 1549 1550 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1551 dev = &rte_eth_devices[port_id]; 1552 1553 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP); 1554 return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev)); 1555 } 1556 1557 int 1558 rte_eth_dev_set_link_down(uint16_t port_id) 1559 { 1560 struct rte_eth_dev *dev; 1561 1562 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1563 dev = &rte_eth_devices[port_id]; 1564 1565 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP); 1566 return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev)); 1567 } 1568 1569 int 1570 rte_eth_dev_close(uint16_t port_id) 1571 { 1572 struct rte_eth_dev *dev; 1573 int firsterr, binerr; 1574 int *lasterr = &firsterr; 1575 1576 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1577 dev = &rte_eth_devices[port_id]; 1578 1579 /* 1580 * Secondary process needs to close device to release process private 1581 * resources. But secondary process should not be obliged to wait 1582 * for device stop before closing ethdev. 1583 */ 1584 if (rte_eal_process_type() == RTE_PROC_PRIMARY && 1585 dev->data->dev_started) { 1586 RTE_ETHDEV_LOG(ERR, "Cannot close started device (port %u)\n", 1587 port_id); 1588 return -EINVAL; 1589 } 1590 1591 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP); 1592 *lasterr = (*dev->dev_ops->dev_close)(dev); 1593 if (*lasterr != 0) 1594 lasterr = &binerr; 1595 1596 rte_ethdev_trace_close(port_id); 1597 *lasterr = rte_eth_dev_release_port(dev); 1598 1599 return firsterr; 1600 } 1601 1602 int 1603 rte_eth_dev_reset(uint16_t port_id) 1604 { 1605 struct rte_eth_dev *dev; 1606 int ret; 1607 1608 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1609 dev = &rte_eth_devices[port_id]; 1610 1611 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP); 1612 1613 ret = rte_eth_dev_stop(port_id); 1614 if (ret != 0) { 1615 RTE_ETHDEV_LOG(ERR, 1616 "Failed to stop device (port %u) before reset: %s - ignore\n", 1617 port_id, rte_strerror(-ret)); 1618 } 1619 ret = dev->dev_ops->dev_reset(dev); 1620 1621 return eth_err(port_id, ret); 1622 } 1623 1624 int 1625 rte_eth_dev_is_removed(uint16_t port_id) 1626 { 1627 struct rte_eth_dev *dev; 1628 int ret; 1629 1630 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0); 1631 dev = &rte_eth_devices[port_id]; 1632 1633 if (dev->state == RTE_ETH_DEV_REMOVED) 1634 return 1; 1635 1636 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0); 1637 1638 ret = dev->dev_ops->is_removed(dev); 1639 if (ret != 0) 1640 /* Device is physically removed. */ 1641 dev->state = RTE_ETH_DEV_REMOVED; 1642 1643 return ret; 1644 } 1645 1646 static int 1647 rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg, 1648 uint16_t n_seg, uint32_t *mbp_buf_size, 1649 const struct rte_eth_dev_info *dev_info) 1650 { 1651 const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa; 1652 struct rte_mempool *mp_first; 1653 uint32_t offset_mask; 1654 uint16_t seg_idx; 1655 1656 if (n_seg > seg_capa->max_nseg) { 1657 RTE_ETHDEV_LOG(ERR, 1658 "Requested Rx segments %u exceed supported %u\n", 1659 n_seg, seg_capa->max_nseg); 1660 return -EINVAL; 1661 } 1662 /* 1663 * Check the sizes and offsets against buffer sizes 1664 * for each segment specified in extended configuration. 1665 */ 1666 mp_first = rx_seg[0].mp; 1667 offset_mask = RTE_BIT32(seg_capa->offset_align_log2) - 1; 1668 for (seg_idx = 0; seg_idx < n_seg; seg_idx++) { 1669 struct rte_mempool *mpl = rx_seg[seg_idx].mp; 1670 uint32_t length = rx_seg[seg_idx].length; 1671 uint32_t offset = rx_seg[seg_idx].offset; 1672 1673 if (mpl == NULL) { 1674 RTE_ETHDEV_LOG(ERR, "null mempool pointer\n"); 1675 return -EINVAL; 1676 } 1677 if (seg_idx != 0 && mp_first != mpl && 1678 seg_capa->multi_pools == 0) { 1679 RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n"); 1680 return -ENOTSUP; 1681 } 1682 if (offset != 0) { 1683 if (seg_capa->offset_allowed == 0) { 1684 RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n"); 1685 return -ENOTSUP; 1686 } 1687 if (offset & offset_mask) { 1688 RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n", 1689 offset, 1690 seg_capa->offset_align_log2); 1691 return -EINVAL; 1692 } 1693 } 1694 if (mpl->private_data_size < 1695 sizeof(struct rte_pktmbuf_pool_private)) { 1696 RTE_ETHDEV_LOG(ERR, 1697 "%s private_data_size %u < %u\n", 1698 mpl->name, mpl->private_data_size, 1699 (unsigned int)sizeof 1700 (struct rte_pktmbuf_pool_private)); 1701 return -ENOSPC; 1702 } 1703 offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM; 1704 *mbp_buf_size = rte_pktmbuf_data_room_size(mpl); 1705 length = length != 0 ? length : *mbp_buf_size; 1706 if (*mbp_buf_size < length + offset) { 1707 RTE_ETHDEV_LOG(ERR, 1708 "%s mbuf_data_room_size %u < %u (segment length=%u + segment offset=%u)\n", 1709 mpl->name, *mbp_buf_size, 1710 length + offset, length, offset); 1711 return -EINVAL; 1712 } 1713 } 1714 return 0; 1715 } 1716 1717 int 1718 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 1719 uint16_t nb_rx_desc, unsigned int socket_id, 1720 const struct rte_eth_rxconf *rx_conf, 1721 struct rte_mempool *mp) 1722 { 1723 int ret; 1724 uint32_t mbp_buf_size; 1725 struct rte_eth_dev *dev; 1726 struct rte_eth_dev_info dev_info; 1727 struct rte_eth_rxconf local_conf; 1728 1729 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1730 dev = &rte_eth_devices[port_id]; 1731 1732 if (rx_queue_id >= dev->data->nb_rx_queues) { 1733 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id); 1734 return -EINVAL; 1735 } 1736 1737 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP); 1738 1739 ret = rte_eth_dev_info_get(port_id, &dev_info); 1740 if (ret != 0) 1741 return ret; 1742 1743 if (mp != NULL) { 1744 /* Single pool configuration check. */ 1745 if (rx_conf != NULL && rx_conf->rx_nseg != 0) { 1746 RTE_ETHDEV_LOG(ERR, 1747 "Ambiguous segment configuration\n"); 1748 return -EINVAL; 1749 } 1750 /* 1751 * Check the size of the mbuf data buffer, this value 1752 * must be provided in the private data of the memory pool. 1753 * First check that the memory pool(s) has a valid private data. 1754 */ 1755 if (mp->private_data_size < 1756 sizeof(struct rte_pktmbuf_pool_private)) { 1757 RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n", 1758 mp->name, mp->private_data_size, 1759 (unsigned int) 1760 sizeof(struct rte_pktmbuf_pool_private)); 1761 return -ENOSPC; 1762 } 1763 mbp_buf_size = rte_pktmbuf_data_room_size(mp); 1764 if (mbp_buf_size < dev_info.min_rx_bufsize + 1765 RTE_PKTMBUF_HEADROOM) { 1766 RTE_ETHDEV_LOG(ERR, 1767 "%s mbuf_data_room_size %u < %u (RTE_PKTMBUF_HEADROOM=%u + min_rx_bufsize(dev)=%u)\n", 1768 mp->name, mbp_buf_size, 1769 RTE_PKTMBUF_HEADROOM + 1770 dev_info.min_rx_bufsize, 1771 RTE_PKTMBUF_HEADROOM, 1772 dev_info.min_rx_bufsize); 1773 return -EINVAL; 1774 } 1775 } else { 1776 const struct rte_eth_rxseg_split *rx_seg; 1777 uint16_t n_seg; 1778 1779 /* Extended multi-segment configuration check. */ 1780 if (rx_conf == NULL || rx_conf->rx_seg == NULL || rx_conf->rx_nseg == 0) { 1781 RTE_ETHDEV_LOG(ERR, 1782 "Memory pool is null and no extended configuration provided\n"); 1783 return -EINVAL; 1784 } 1785 1786 rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg; 1787 n_seg = rx_conf->rx_nseg; 1788 1789 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { 1790 ret = rte_eth_rx_queue_check_split(rx_seg, n_seg, 1791 &mbp_buf_size, 1792 &dev_info); 1793 if (ret != 0) 1794 return ret; 1795 } else { 1796 RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n"); 1797 return -EINVAL; 1798 } 1799 } 1800 1801 /* Use default specified by driver, if nb_rx_desc is zero */ 1802 if (nb_rx_desc == 0) { 1803 nb_rx_desc = dev_info.default_rxportconf.ring_size; 1804 /* If driver default is also zero, fall back on EAL default */ 1805 if (nb_rx_desc == 0) 1806 nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE; 1807 } 1808 1809 if (nb_rx_desc > dev_info.rx_desc_lim.nb_max || 1810 nb_rx_desc < dev_info.rx_desc_lim.nb_min || 1811 nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) { 1812 1813 RTE_ETHDEV_LOG(ERR, 1814 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 1815 nb_rx_desc, dev_info.rx_desc_lim.nb_max, 1816 dev_info.rx_desc_lim.nb_min, 1817 dev_info.rx_desc_lim.nb_align); 1818 return -EINVAL; 1819 } 1820 1821 if (dev->data->dev_started && 1822 !(dev_info.dev_capa & 1823 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP)) 1824 return -EBUSY; 1825 1826 if (dev->data->dev_started && 1827 (dev->data->rx_queue_state[rx_queue_id] != 1828 RTE_ETH_QUEUE_STATE_STOPPED)) 1829 return -EBUSY; 1830 1831 eth_dev_rxq_release(dev, rx_queue_id); 1832 1833 if (rx_conf == NULL) 1834 rx_conf = &dev_info.default_rxconf; 1835 1836 local_conf = *rx_conf; 1837 1838 /* 1839 * If an offloading has already been enabled in 1840 * rte_eth_dev_configure(), it has been enabled on all queues, 1841 * so there is no need to enable it in this queue again. 1842 * The local_conf.offloads input to underlying PMD only carries 1843 * those offloadings which are only enabled on this queue and 1844 * not enabled on all queues. 1845 */ 1846 local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads; 1847 1848 /* 1849 * New added offloadings for this queue are those not enabled in 1850 * rte_eth_dev_configure() and they must be per-queue type. 1851 * A pure per-port offloading can't be enabled on a queue while 1852 * disabled on another queue. A pure per-port offloading can't 1853 * be enabled for any queue as new added one if it hasn't been 1854 * enabled in rte_eth_dev_configure(). 1855 */ 1856 if ((local_conf.offloads & dev_info.rx_queue_offload_capa) != 1857 local_conf.offloads) { 1858 RTE_ETHDEV_LOG(ERR, 1859 "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 1860 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 1861 port_id, rx_queue_id, local_conf.offloads, 1862 dev_info.rx_queue_offload_capa, 1863 __func__); 1864 return -EINVAL; 1865 } 1866 1867 if (local_conf.share_group > 0 && 1868 (dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) == 0) { 1869 RTE_ETHDEV_LOG(ERR, 1870 "Ethdev port_id=%d rx_queue_id=%d, enabled share_group=%hu while device doesn't support Rx queue share\n", 1871 port_id, rx_queue_id, local_conf.share_group); 1872 return -EINVAL; 1873 } 1874 1875 /* 1876 * If LRO is enabled, check that the maximum aggregated packet 1877 * size is supported by the configured device. 1878 */ 1879 /* Get the real Ethernet overhead length */ 1880 if (local_conf.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) { 1881 uint32_t overhead_len; 1882 uint32_t max_rx_pktlen; 1883 int ret; 1884 1885 overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen, 1886 dev_info.max_mtu); 1887 max_rx_pktlen = dev->data->mtu + overhead_len; 1888 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0) 1889 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; 1890 ret = eth_dev_check_lro_pkt_size(port_id, 1891 dev->data->dev_conf.rxmode.max_lro_pkt_size, 1892 max_rx_pktlen, 1893 dev_info.max_lro_pkt_size); 1894 if (ret != 0) 1895 return ret; 1896 } 1897 1898 ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc, 1899 socket_id, &local_conf, mp); 1900 if (!ret) { 1901 if (!dev->data->min_rx_buf_size || 1902 dev->data->min_rx_buf_size > mbp_buf_size) 1903 dev->data->min_rx_buf_size = mbp_buf_size; 1904 } 1905 1906 rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp, 1907 rx_conf, ret); 1908 return eth_err(port_id, ret); 1909 } 1910 1911 int 1912 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, 1913 uint16_t nb_rx_desc, 1914 const struct rte_eth_hairpin_conf *conf) 1915 { 1916 int ret; 1917 struct rte_eth_dev *dev; 1918 struct rte_eth_hairpin_cap cap; 1919 int i; 1920 int count; 1921 1922 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1923 dev = &rte_eth_devices[port_id]; 1924 1925 if (rx_queue_id >= dev->data->nb_rx_queues) { 1926 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id); 1927 return -EINVAL; 1928 } 1929 1930 if (conf == NULL) { 1931 RTE_ETHDEV_LOG(ERR, 1932 "Cannot setup ethdev port %u Rx hairpin queue from NULL config\n", 1933 port_id); 1934 return -EINVAL; 1935 } 1936 1937 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 1938 if (ret != 0) 1939 return ret; 1940 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup, 1941 -ENOTSUP); 1942 /* if nb_rx_desc is zero use max number of desc from the driver. */ 1943 if (nb_rx_desc == 0) 1944 nb_rx_desc = cap.max_nb_desc; 1945 if (nb_rx_desc > cap.max_nb_desc) { 1946 RTE_ETHDEV_LOG(ERR, 1947 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu", 1948 nb_rx_desc, cap.max_nb_desc); 1949 return -EINVAL; 1950 } 1951 if (conf->peer_count > cap.max_rx_2_tx) { 1952 RTE_ETHDEV_LOG(ERR, 1953 "Invalid value for number of peers for Rx queue(=%u), should be: <= %hu", 1954 conf->peer_count, cap.max_rx_2_tx); 1955 return -EINVAL; 1956 } 1957 if (conf->peer_count == 0) { 1958 RTE_ETHDEV_LOG(ERR, 1959 "Invalid value for number of peers for Rx queue(=%u), should be: > 0", 1960 conf->peer_count); 1961 return -EINVAL; 1962 } 1963 for (i = 0, count = 0; i < dev->data->nb_rx_queues && 1964 cap.max_nb_queues != UINT16_MAX; i++) { 1965 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i)) 1966 count++; 1967 } 1968 if (count > cap.max_nb_queues) { 1969 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d", 1970 cap.max_nb_queues); 1971 return -EINVAL; 1972 } 1973 if (dev->data->dev_started) 1974 return -EBUSY; 1975 eth_dev_rxq_release(dev, rx_queue_id); 1976 ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id, 1977 nb_rx_desc, conf); 1978 if (ret == 0) 1979 dev->data->rx_queue_state[rx_queue_id] = 1980 RTE_ETH_QUEUE_STATE_HAIRPIN; 1981 return eth_err(port_id, ret); 1982 } 1983 1984 int 1985 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 1986 uint16_t nb_tx_desc, unsigned int socket_id, 1987 const struct rte_eth_txconf *tx_conf) 1988 { 1989 struct rte_eth_dev *dev; 1990 struct rte_eth_dev_info dev_info; 1991 struct rte_eth_txconf local_conf; 1992 int ret; 1993 1994 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 1995 dev = &rte_eth_devices[port_id]; 1996 1997 if (tx_queue_id >= dev->data->nb_tx_queues) { 1998 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id); 1999 return -EINVAL; 2000 } 2001 2002 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP); 2003 2004 ret = rte_eth_dev_info_get(port_id, &dev_info); 2005 if (ret != 0) 2006 return ret; 2007 2008 /* Use default specified by driver, if nb_tx_desc is zero */ 2009 if (nb_tx_desc == 0) { 2010 nb_tx_desc = dev_info.default_txportconf.ring_size; 2011 /* If driver default is zero, fall back on EAL default */ 2012 if (nb_tx_desc == 0) 2013 nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE; 2014 } 2015 if (nb_tx_desc > dev_info.tx_desc_lim.nb_max || 2016 nb_tx_desc < dev_info.tx_desc_lim.nb_min || 2017 nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) { 2018 RTE_ETHDEV_LOG(ERR, 2019 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", 2020 nb_tx_desc, dev_info.tx_desc_lim.nb_max, 2021 dev_info.tx_desc_lim.nb_min, 2022 dev_info.tx_desc_lim.nb_align); 2023 return -EINVAL; 2024 } 2025 2026 if (dev->data->dev_started && 2027 !(dev_info.dev_capa & 2028 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP)) 2029 return -EBUSY; 2030 2031 if (dev->data->dev_started && 2032 (dev->data->tx_queue_state[tx_queue_id] != 2033 RTE_ETH_QUEUE_STATE_STOPPED)) 2034 return -EBUSY; 2035 2036 eth_dev_txq_release(dev, tx_queue_id); 2037 2038 if (tx_conf == NULL) 2039 tx_conf = &dev_info.default_txconf; 2040 2041 local_conf = *tx_conf; 2042 2043 /* 2044 * If an offloading has already been enabled in 2045 * rte_eth_dev_configure(), it has been enabled on all queues, 2046 * so there is no need to enable it in this queue again. 2047 * The local_conf.offloads input to underlying PMD only carries 2048 * those offloadings which are only enabled on this queue and 2049 * not enabled on all queues. 2050 */ 2051 local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads; 2052 2053 /* 2054 * New added offloadings for this queue are those not enabled in 2055 * rte_eth_dev_configure() and they must be per-queue type. 2056 * A pure per-port offloading can't be enabled on a queue while 2057 * disabled on another queue. A pure per-port offloading can't 2058 * be enabled for any queue as new added one if it hasn't been 2059 * enabled in rte_eth_dev_configure(). 2060 */ 2061 if ((local_conf.offloads & dev_info.tx_queue_offload_capa) != 2062 local_conf.offloads) { 2063 RTE_ETHDEV_LOG(ERR, 2064 "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be " 2065 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", 2066 port_id, tx_queue_id, local_conf.offloads, 2067 dev_info.tx_queue_offload_capa, 2068 __func__); 2069 return -EINVAL; 2070 } 2071 2072 rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf); 2073 return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev, 2074 tx_queue_id, nb_tx_desc, socket_id, &local_conf)); 2075 } 2076 2077 int 2078 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, 2079 uint16_t nb_tx_desc, 2080 const struct rte_eth_hairpin_conf *conf) 2081 { 2082 struct rte_eth_dev *dev; 2083 struct rte_eth_hairpin_cap cap; 2084 int i; 2085 int count; 2086 int ret; 2087 2088 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2089 dev = &rte_eth_devices[port_id]; 2090 2091 if (tx_queue_id >= dev->data->nb_tx_queues) { 2092 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id); 2093 return -EINVAL; 2094 } 2095 2096 if (conf == NULL) { 2097 RTE_ETHDEV_LOG(ERR, 2098 "Cannot setup ethdev port %u Tx hairpin queue from NULL config\n", 2099 port_id); 2100 return -EINVAL; 2101 } 2102 2103 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap); 2104 if (ret != 0) 2105 return ret; 2106 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup, 2107 -ENOTSUP); 2108 /* if nb_rx_desc is zero use max number of desc from the driver. */ 2109 if (nb_tx_desc == 0) 2110 nb_tx_desc = cap.max_nb_desc; 2111 if (nb_tx_desc > cap.max_nb_desc) { 2112 RTE_ETHDEV_LOG(ERR, 2113 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu", 2114 nb_tx_desc, cap.max_nb_desc); 2115 return -EINVAL; 2116 } 2117 if (conf->peer_count > cap.max_tx_2_rx) { 2118 RTE_ETHDEV_LOG(ERR, 2119 "Invalid value for number of peers for Tx queue(=%u), should be: <= %hu", 2120 conf->peer_count, cap.max_tx_2_rx); 2121 return -EINVAL; 2122 } 2123 if (conf->peer_count == 0) { 2124 RTE_ETHDEV_LOG(ERR, 2125 "Invalid value for number of peers for Tx queue(=%u), should be: > 0", 2126 conf->peer_count); 2127 return -EINVAL; 2128 } 2129 for (i = 0, count = 0; i < dev->data->nb_tx_queues && 2130 cap.max_nb_queues != UINT16_MAX; i++) { 2131 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i)) 2132 count++; 2133 } 2134 if (count > cap.max_nb_queues) { 2135 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d", 2136 cap.max_nb_queues); 2137 return -EINVAL; 2138 } 2139 if (dev->data->dev_started) 2140 return -EBUSY; 2141 eth_dev_txq_release(dev, tx_queue_id); 2142 ret = (*dev->dev_ops->tx_hairpin_queue_setup) 2143 (dev, tx_queue_id, nb_tx_desc, conf); 2144 if (ret == 0) 2145 dev->data->tx_queue_state[tx_queue_id] = 2146 RTE_ETH_QUEUE_STATE_HAIRPIN; 2147 return eth_err(port_id, ret); 2148 } 2149 2150 int 2151 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port) 2152 { 2153 struct rte_eth_dev *dev; 2154 int ret; 2155 2156 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2157 dev = &rte_eth_devices[tx_port]; 2158 2159 if (dev->data->dev_started == 0) { 2160 RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port); 2161 return -EBUSY; 2162 } 2163 2164 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_bind, -ENOTSUP); 2165 ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port); 2166 if (ret != 0) 2167 RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d" 2168 " to Rx %d (%d - all ports)\n", 2169 tx_port, rx_port, RTE_MAX_ETHPORTS); 2170 2171 return ret; 2172 } 2173 2174 int 2175 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port) 2176 { 2177 struct rte_eth_dev *dev; 2178 int ret; 2179 2180 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV); 2181 dev = &rte_eth_devices[tx_port]; 2182 2183 if (dev->data->dev_started == 0) { 2184 RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port); 2185 return -EBUSY; 2186 } 2187 2188 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_unbind, -ENOTSUP); 2189 ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port); 2190 if (ret != 0) 2191 RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d" 2192 " from Rx %d (%d - all ports)\n", 2193 tx_port, rx_port, RTE_MAX_ETHPORTS); 2194 2195 return ret; 2196 } 2197 2198 int 2199 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, 2200 size_t len, uint32_t direction) 2201 { 2202 struct rte_eth_dev *dev; 2203 int ret; 2204 2205 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2206 dev = &rte_eth_devices[port_id]; 2207 2208 if (peer_ports == NULL) { 2209 RTE_ETHDEV_LOG(ERR, 2210 "Cannot get ethdev port %u hairpin peer ports to NULL\n", 2211 port_id); 2212 return -EINVAL; 2213 } 2214 2215 if (len == 0) { 2216 RTE_ETHDEV_LOG(ERR, 2217 "Cannot get ethdev port %u hairpin peer ports to array with zero size\n", 2218 port_id); 2219 return -EINVAL; 2220 } 2221 2222 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_get_peer_ports, 2223 -ENOTSUP); 2224 2225 ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports, 2226 len, direction); 2227 if (ret < 0) 2228 RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n", 2229 port_id, direction ? "Rx" : "Tx"); 2230 2231 return ret; 2232 } 2233 2234 void 2235 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, 2236 void *userdata __rte_unused) 2237 { 2238 rte_pktmbuf_free_bulk(pkts, unsent); 2239 } 2240 2241 void 2242 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, 2243 void *userdata) 2244 { 2245 uint64_t *count = userdata; 2246 2247 rte_pktmbuf_free_bulk(pkts, unsent); 2248 *count += unsent; 2249 } 2250 2251 int 2252 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, 2253 buffer_tx_error_fn cbfn, void *userdata) 2254 { 2255 if (buffer == NULL) { 2256 RTE_ETHDEV_LOG(ERR, 2257 "Cannot set Tx buffer error callback to NULL buffer\n"); 2258 return -EINVAL; 2259 } 2260 2261 buffer->error_callback = cbfn; 2262 buffer->error_userdata = userdata; 2263 return 0; 2264 } 2265 2266 int 2267 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size) 2268 { 2269 int ret = 0; 2270 2271 if (buffer == NULL) { 2272 RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL buffer\n"); 2273 return -EINVAL; 2274 } 2275 2276 buffer->size = size; 2277 if (buffer->error_callback == NULL) { 2278 ret = rte_eth_tx_buffer_set_err_callback( 2279 buffer, rte_eth_tx_buffer_drop_callback, NULL); 2280 } 2281 2282 return ret; 2283 } 2284 2285 int 2286 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt) 2287 { 2288 struct rte_eth_dev *dev; 2289 int ret; 2290 2291 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2292 dev = &rte_eth_devices[port_id]; 2293 2294 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP); 2295 2296 /* Call driver to free pending mbufs. */ 2297 ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id], 2298 free_cnt); 2299 return eth_err(port_id, ret); 2300 } 2301 2302 int 2303 rte_eth_promiscuous_enable(uint16_t port_id) 2304 { 2305 struct rte_eth_dev *dev; 2306 int diag = 0; 2307 2308 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2309 dev = &rte_eth_devices[port_id]; 2310 2311 if (dev->data->promiscuous == 1) 2312 return 0; 2313 2314 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP); 2315 2316 diag = (*dev->dev_ops->promiscuous_enable)(dev); 2317 dev->data->promiscuous = (diag == 0) ? 1 : 0; 2318 2319 return eth_err(port_id, diag); 2320 } 2321 2322 int 2323 rte_eth_promiscuous_disable(uint16_t port_id) 2324 { 2325 struct rte_eth_dev *dev; 2326 int diag = 0; 2327 2328 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2329 dev = &rte_eth_devices[port_id]; 2330 2331 if (dev->data->promiscuous == 0) 2332 return 0; 2333 2334 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP); 2335 2336 dev->data->promiscuous = 0; 2337 diag = (*dev->dev_ops->promiscuous_disable)(dev); 2338 if (diag != 0) 2339 dev->data->promiscuous = 1; 2340 2341 return eth_err(port_id, diag); 2342 } 2343 2344 int 2345 rte_eth_promiscuous_get(uint16_t port_id) 2346 { 2347 struct rte_eth_dev *dev; 2348 2349 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2350 dev = &rte_eth_devices[port_id]; 2351 2352 return dev->data->promiscuous; 2353 } 2354 2355 int 2356 rte_eth_allmulticast_enable(uint16_t port_id) 2357 { 2358 struct rte_eth_dev *dev; 2359 int diag; 2360 2361 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2362 dev = &rte_eth_devices[port_id]; 2363 2364 if (dev->data->all_multicast == 1) 2365 return 0; 2366 2367 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP); 2368 diag = (*dev->dev_ops->allmulticast_enable)(dev); 2369 dev->data->all_multicast = (diag == 0) ? 1 : 0; 2370 2371 return eth_err(port_id, diag); 2372 } 2373 2374 int 2375 rte_eth_allmulticast_disable(uint16_t port_id) 2376 { 2377 struct rte_eth_dev *dev; 2378 int diag; 2379 2380 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2381 dev = &rte_eth_devices[port_id]; 2382 2383 if (dev->data->all_multicast == 0) 2384 return 0; 2385 2386 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP); 2387 dev->data->all_multicast = 0; 2388 diag = (*dev->dev_ops->allmulticast_disable)(dev); 2389 if (diag != 0) 2390 dev->data->all_multicast = 1; 2391 2392 return eth_err(port_id, diag); 2393 } 2394 2395 int 2396 rte_eth_allmulticast_get(uint16_t port_id) 2397 { 2398 struct rte_eth_dev *dev; 2399 2400 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2401 dev = &rte_eth_devices[port_id]; 2402 2403 return dev->data->all_multicast; 2404 } 2405 2406 int 2407 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link) 2408 { 2409 struct rte_eth_dev *dev; 2410 2411 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2412 dev = &rte_eth_devices[port_id]; 2413 2414 if (eth_link == NULL) { 2415 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2416 port_id); 2417 return -EINVAL; 2418 } 2419 2420 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2421 rte_eth_linkstatus_get(dev, eth_link); 2422 else { 2423 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 2424 (*dev->dev_ops->link_update)(dev, 1); 2425 *eth_link = dev->data->dev_link; 2426 } 2427 2428 return 0; 2429 } 2430 2431 int 2432 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link) 2433 { 2434 struct rte_eth_dev *dev; 2435 2436 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2437 dev = &rte_eth_devices[port_id]; 2438 2439 if (eth_link == NULL) { 2440 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n", 2441 port_id); 2442 return -EINVAL; 2443 } 2444 2445 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) 2446 rte_eth_linkstatus_get(dev, eth_link); 2447 else { 2448 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); 2449 (*dev->dev_ops->link_update)(dev, 0); 2450 *eth_link = dev->data->dev_link; 2451 } 2452 2453 return 0; 2454 } 2455 2456 const char * 2457 rte_eth_link_speed_to_str(uint32_t link_speed) 2458 { 2459 switch (link_speed) { 2460 case RTE_ETH_SPEED_NUM_NONE: return "None"; 2461 case RTE_ETH_SPEED_NUM_10M: return "10 Mbps"; 2462 case RTE_ETH_SPEED_NUM_100M: return "100 Mbps"; 2463 case RTE_ETH_SPEED_NUM_1G: return "1 Gbps"; 2464 case RTE_ETH_SPEED_NUM_2_5G: return "2.5 Gbps"; 2465 case RTE_ETH_SPEED_NUM_5G: return "5 Gbps"; 2466 case RTE_ETH_SPEED_NUM_10G: return "10 Gbps"; 2467 case RTE_ETH_SPEED_NUM_20G: return "20 Gbps"; 2468 case RTE_ETH_SPEED_NUM_25G: return "25 Gbps"; 2469 case RTE_ETH_SPEED_NUM_40G: return "40 Gbps"; 2470 case RTE_ETH_SPEED_NUM_50G: return "50 Gbps"; 2471 case RTE_ETH_SPEED_NUM_56G: return "56 Gbps"; 2472 case RTE_ETH_SPEED_NUM_100G: return "100 Gbps"; 2473 case RTE_ETH_SPEED_NUM_200G: return "200 Gbps"; 2474 case RTE_ETH_SPEED_NUM_UNKNOWN: return "Unknown"; 2475 default: return "Invalid"; 2476 } 2477 } 2478 2479 int 2480 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link) 2481 { 2482 if (str == NULL) { 2483 RTE_ETHDEV_LOG(ERR, "Cannot convert link to NULL string\n"); 2484 return -EINVAL; 2485 } 2486 2487 if (len == 0) { 2488 RTE_ETHDEV_LOG(ERR, 2489 "Cannot convert link to string with zero size\n"); 2490 return -EINVAL; 2491 } 2492 2493 if (eth_link == NULL) { 2494 RTE_ETHDEV_LOG(ERR, "Cannot convert to string from NULL link\n"); 2495 return -EINVAL; 2496 } 2497 2498 if (eth_link->link_status == RTE_ETH_LINK_DOWN) 2499 return snprintf(str, len, "Link down"); 2500 else 2501 return snprintf(str, len, "Link up at %s %s %s", 2502 rte_eth_link_speed_to_str(eth_link->link_speed), 2503 (eth_link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 2504 "FDX" : "HDX", 2505 (eth_link->link_autoneg == RTE_ETH_LINK_AUTONEG) ? 2506 "Autoneg" : "Fixed"); 2507 } 2508 2509 int 2510 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats) 2511 { 2512 struct rte_eth_dev *dev; 2513 2514 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2515 dev = &rte_eth_devices[port_id]; 2516 2517 if (stats == NULL) { 2518 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u stats to NULL\n", 2519 port_id); 2520 return -EINVAL; 2521 } 2522 2523 memset(stats, 0, sizeof(*stats)); 2524 2525 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP); 2526 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed; 2527 return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats)); 2528 } 2529 2530 int 2531 rte_eth_stats_reset(uint16_t port_id) 2532 { 2533 struct rte_eth_dev *dev; 2534 int ret; 2535 2536 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2537 dev = &rte_eth_devices[port_id]; 2538 2539 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP); 2540 ret = (*dev->dev_ops->stats_reset)(dev); 2541 if (ret != 0) 2542 return eth_err(port_id, ret); 2543 2544 dev->data->rx_mbuf_alloc_failed = 0; 2545 2546 return 0; 2547 } 2548 2549 static inline int 2550 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev) 2551 { 2552 uint16_t nb_rxqs, nb_txqs; 2553 int count; 2554 2555 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2556 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2557 2558 count = RTE_NB_STATS; 2559 if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) { 2560 count += nb_rxqs * RTE_NB_RXQ_STATS; 2561 count += nb_txqs * RTE_NB_TXQ_STATS; 2562 } 2563 2564 return count; 2565 } 2566 2567 static int 2568 eth_dev_get_xstats_count(uint16_t port_id) 2569 { 2570 struct rte_eth_dev *dev; 2571 int count; 2572 2573 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2574 dev = &rte_eth_devices[port_id]; 2575 if (dev->dev_ops->xstats_get_names != NULL) { 2576 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0); 2577 if (count < 0) 2578 return eth_err(port_id, count); 2579 } else 2580 count = 0; 2581 2582 2583 count += eth_dev_get_xstats_basic_count(dev); 2584 2585 return count; 2586 } 2587 2588 int 2589 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, 2590 uint64_t *id) 2591 { 2592 int cnt_xstats, idx_xstat; 2593 2594 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2595 2596 if (xstat_name == NULL) { 2597 RTE_ETHDEV_LOG(ERR, 2598 "Cannot get ethdev port %u xstats ID from NULL xstat name\n", 2599 port_id); 2600 return -ENOMEM; 2601 } 2602 2603 if (id == NULL) { 2604 RTE_ETHDEV_LOG(ERR, 2605 "Cannot get ethdev port %u xstats ID to NULL\n", 2606 port_id); 2607 return -ENOMEM; 2608 } 2609 2610 /* Get count */ 2611 cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL); 2612 if (cnt_xstats < 0) { 2613 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n"); 2614 return -ENODEV; 2615 } 2616 2617 /* Get id-name lookup table */ 2618 struct rte_eth_xstat_name xstats_names[cnt_xstats]; 2619 2620 if (cnt_xstats != rte_eth_xstats_get_names_by_id( 2621 port_id, xstats_names, cnt_xstats, NULL)) { 2622 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n"); 2623 return -1; 2624 } 2625 2626 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { 2627 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) { 2628 *id = idx_xstat; 2629 return 0; 2630 }; 2631 } 2632 2633 return -EINVAL; 2634 } 2635 2636 /* retrieve basic stats names */ 2637 static int 2638 eth_basic_stats_get_names(struct rte_eth_dev *dev, 2639 struct rte_eth_xstat_name *xstats_names) 2640 { 2641 int cnt_used_entries = 0; 2642 uint32_t idx, id_queue; 2643 uint16_t num_q; 2644 2645 for (idx = 0; idx < RTE_NB_STATS; idx++) { 2646 strlcpy(xstats_names[cnt_used_entries].name, 2647 eth_dev_stats_strings[idx].name, 2648 sizeof(xstats_names[0].name)); 2649 cnt_used_entries++; 2650 } 2651 2652 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 2653 return cnt_used_entries; 2654 2655 num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2656 for (id_queue = 0; id_queue < num_q; id_queue++) { 2657 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) { 2658 snprintf(xstats_names[cnt_used_entries].name, 2659 sizeof(xstats_names[0].name), 2660 "rx_q%u_%s", 2661 id_queue, eth_dev_rxq_stats_strings[idx].name); 2662 cnt_used_entries++; 2663 } 2664 2665 } 2666 num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2667 for (id_queue = 0; id_queue < num_q; id_queue++) { 2668 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) { 2669 snprintf(xstats_names[cnt_used_entries].name, 2670 sizeof(xstats_names[0].name), 2671 "tx_q%u_%s", 2672 id_queue, eth_dev_txq_stats_strings[idx].name); 2673 cnt_used_entries++; 2674 } 2675 } 2676 return cnt_used_entries; 2677 } 2678 2679 /* retrieve ethdev extended statistics names */ 2680 int 2681 rte_eth_xstats_get_names_by_id(uint16_t port_id, 2682 struct rte_eth_xstat_name *xstats_names, unsigned int size, 2683 uint64_t *ids) 2684 { 2685 struct rte_eth_xstat_name *xstats_names_copy; 2686 unsigned int no_basic_stat_requested = 1; 2687 unsigned int no_ext_stat_requested = 1; 2688 unsigned int expected_entries; 2689 unsigned int basic_count; 2690 struct rte_eth_dev *dev; 2691 unsigned int i; 2692 int ret; 2693 2694 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2695 dev = &rte_eth_devices[port_id]; 2696 2697 basic_count = eth_dev_get_xstats_basic_count(dev); 2698 ret = eth_dev_get_xstats_count(port_id); 2699 if (ret < 0) 2700 return ret; 2701 expected_entries = (unsigned int)ret; 2702 2703 /* Return max number of stats if no ids given */ 2704 if (!ids) { 2705 if (!xstats_names) 2706 return expected_entries; 2707 else if (xstats_names && size < expected_entries) 2708 return expected_entries; 2709 } 2710 2711 if (ids && !xstats_names) 2712 return -EINVAL; 2713 2714 if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) { 2715 uint64_t ids_copy[size]; 2716 2717 for (i = 0; i < size; i++) { 2718 if (ids[i] < basic_count) { 2719 no_basic_stat_requested = 0; 2720 break; 2721 } 2722 2723 /* 2724 * Convert ids to xstats ids that PMD knows. 2725 * ids known by user are basic + extended stats. 2726 */ 2727 ids_copy[i] = ids[i] - basic_count; 2728 } 2729 2730 if (no_basic_stat_requested) 2731 return (*dev->dev_ops->xstats_get_names_by_id)(dev, 2732 ids_copy, xstats_names, size); 2733 } 2734 2735 /* Retrieve all stats */ 2736 if (!ids) { 2737 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names, 2738 expected_entries); 2739 if (num_stats < 0 || num_stats > (int)expected_entries) 2740 return num_stats; 2741 else 2742 return expected_entries; 2743 } 2744 2745 xstats_names_copy = calloc(expected_entries, 2746 sizeof(struct rte_eth_xstat_name)); 2747 2748 if (!xstats_names_copy) { 2749 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n"); 2750 return -ENOMEM; 2751 } 2752 2753 if (ids) { 2754 for (i = 0; i < size; i++) { 2755 if (ids[i] >= basic_count) { 2756 no_ext_stat_requested = 0; 2757 break; 2758 } 2759 } 2760 } 2761 2762 /* Fill xstats_names_copy structure */ 2763 if (ids && no_ext_stat_requested) { 2764 eth_basic_stats_get_names(dev, xstats_names_copy); 2765 } else { 2766 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy, 2767 expected_entries); 2768 if (ret < 0) { 2769 free(xstats_names_copy); 2770 return ret; 2771 } 2772 } 2773 2774 /* Filter stats */ 2775 for (i = 0; i < size; i++) { 2776 if (ids[i] >= expected_entries) { 2777 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 2778 free(xstats_names_copy); 2779 return -1; 2780 } 2781 xstats_names[i] = xstats_names_copy[ids[i]]; 2782 } 2783 2784 free(xstats_names_copy); 2785 return size; 2786 } 2787 2788 int 2789 rte_eth_xstats_get_names(uint16_t port_id, 2790 struct rte_eth_xstat_name *xstats_names, 2791 unsigned int size) 2792 { 2793 struct rte_eth_dev *dev; 2794 int cnt_used_entries; 2795 int cnt_expected_entries; 2796 int cnt_driver_entries; 2797 2798 cnt_expected_entries = eth_dev_get_xstats_count(port_id); 2799 if (xstats_names == NULL || cnt_expected_entries < 0 || 2800 (int)size < cnt_expected_entries) 2801 return cnt_expected_entries; 2802 2803 /* port_id checked in eth_dev_get_xstats_count() */ 2804 dev = &rte_eth_devices[port_id]; 2805 2806 cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names); 2807 2808 if (dev->dev_ops->xstats_get_names != NULL) { 2809 /* If there are any driver-specific xstats, append them 2810 * to end of list. 2811 */ 2812 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)( 2813 dev, 2814 xstats_names + cnt_used_entries, 2815 size - cnt_used_entries); 2816 if (cnt_driver_entries < 0) 2817 return eth_err(port_id, cnt_driver_entries); 2818 cnt_used_entries += cnt_driver_entries; 2819 } 2820 2821 return cnt_used_entries; 2822 } 2823 2824 2825 static int 2826 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats) 2827 { 2828 struct rte_eth_dev *dev; 2829 struct rte_eth_stats eth_stats; 2830 unsigned int count = 0, i, q; 2831 uint64_t val, *stats_ptr; 2832 uint16_t nb_rxqs, nb_txqs; 2833 int ret; 2834 2835 ret = rte_eth_stats_get(port_id, ð_stats); 2836 if (ret < 0) 2837 return ret; 2838 2839 dev = &rte_eth_devices[port_id]; 2840 2841 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2842 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); 2843 2844 /* global stats */ 2845 for (i = 0; i < RTE_NB_STATS; i++) { 2846 stats_ptr = RTE_PTR_ADD(ð_stats, 2847 eth_dev_stats_strings[i].offset); 2848 val = *stats_ptr; 2849 xstats[count++].value = val; 2850 } 2851 2852 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) 2853 return count; 2854 2855 /* per-rxq stats */ 2856 for (q = 0; q < nb_rxqs; q++) { 2857 for (i = 0; i < RTE_NB_RXQ_STATS; i++) { 2858 stats_ptr = RTE_PTR_ADD(ð_stats, 2859 eth_dev_rxq_stats_strings[i].offset + 2860 q * sizeof(uint64_t)); 2861 val = *stats_ptr; 2862 xstats[count++].value = val; 2863 } 2864 } 2865 2866 /* per-txq stats */ 2867 for (q = 0; q < nb_txqs; q++) { 2868 for (i = 0; i < RTE_NB_TXQ_STATS; i++) { 2869 stats_ptr = RTE_PTR_ADD(ð_stats, 2870 eth_dev_txq_stats_strings[i].offset + 2871 q * sizeof(uint64_t)); 2872 val = *stats_ptr; 2873 xstats[count++].value = val; 2874 } 2875 } 2876 return count; 2877 } 2878 2879 /* retrieve ethdev extended statistics */ 2880 int 2881 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, 2882 uint64_t *values, unsigned int size) 2883 { 2884 unsigned int no_basic_stat_requested = 1; 2885 unsigned int no_ext_stat_requested = 1; 2886 unsigned int num_xstats_filled; 2887 unsigned int basic_count; 2888 uint16_t expected_entries; 2889 struct rte_eth_dev *dev; 2890 unsigned int i; 2891 int ret; 2892 2893 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2894 dev = &rte_eth_devices[port_id]; 2895 2896 ret = eth_dev_get_xstats_count(port_id); 2897 if (ret < 0) 2898 return ret; 2899 expected_entries = (uint16_t)ret; 2900 struct rte_eth_xstat xstats[expected_entries]; 2901 basic_count = eth_dev_get_xstats_basic_count(dev); 2902 2903 /* Return max number of stats if no ids given */ 2904 if (!ids) { 2905 if (!values) 2906 return expected_entries; 2907 else if (values && size < expected_entries) 2908 return expected_entries; 2909 } 2910 2911 if (ids && !values) 2912 return -EINVAL; 2913 2914 if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) { 2915 unsigned int basic_count = eth_dev_get_xstats_basic_count(dev); 2916 uint64_t ids_copy[size]; 2917 2918 for (i = 0; i < size; i++) { 2919 if (ids[i] < basic_count) { 2920 no_basic_stat_requested = 0; 2921 break; 2922 } 2923 2924 /* 2925 * Convert ids to xstats ids that PMD knows. 2926 * ids known by user are basic + extended stats. 2927 */ 2928 ids_copy[i] = ids[i] - basic_count; 2929 } 2930 2931 if (no_basic_stat_requested) 2932 return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy, 2933 values, size); 2934 } 2935 2936 if (ids) { 2937 for (i = 0; i < size; i++) { 2938 if (ids[i] >= basic_count) { 2939 no_ext_stat_requested = 0; 2940 break; 2941 } 2942 } 2943 } 2944 2945 /* Fill the xstats structure */ 2946 if (ids && no_ext_stat_requested) 2947 ret = eth_basic_stats_get(port_id, xstats); 2948 else 2949 ret = rte_eth_xstats_get(port_id, xstats, expected_entries); 2950 2951 if (ret < 0) 2952 return ret; 2953 num_xstats_filled = (unsigned int)ret; 2954 2955 /* Return all stats */ 2956 if (!ids) { 2957 for (i = 0; i < num_xstats_filled; i++) 2958 values[i] = xstats[i].value; 2959 return expected_entries; 2960 } 2961 2962 /* Filter stats */ 2963 for (i = 0; i < size; i++) { 2964 if (ids[i] >= expected_entries) { 2965 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n"); 2966 return -1; 2967 } 2968 values[i] = xstats[ids[i]].value; 2969 } 2970 return size; 2971 } 2972 2973 int 2974 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, 2975 unsigned int n) 2976 { 2977 struct rte_eth_dev *dev; 2978 unsigned int count, i; 2979 signed int xcount = 0; 2980 int ret; 2981 2982 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 2983 if (xstats == NULL && n > 0) 2984 return -EINVAL; 2985 dev = &rte_eth_devices[port_id]; 2986 2987 count = eth_dev_get_xstats_basic_count(dev); 2988 2989 /* implemented by the driver */ 2990 if (dev->dev_ops->xstats_get != NULL) { 2991 /* Retrieve the xstats from the driver at the end of the 2992 * xstats struct. 2993 */ 2994 xcount = (*dev->dev_ops->xstats_get)(dev, 2995 (n > count) ? xstats + count : NULL, 2996 (n > count) ? n - count : 0); 2997 2998 if (xcount < 0) 2999 return eth_err(port_id, xcount); 3000 } 3001 3002 if (n < count + xcount || xstats == NULL) 3003 return count + xcount; 3004 3005 /* now fill the xstats structure */ 3006 ret = eth_basic_stats_get(port_id, xstats); 3007 if (ret < 0) 3008 return ret; 3009 count = ret; 3010 3011 for (i = 0; i < count; i++) 3012 xstats[i].id = i; 3013 /* add an offset to driver-specific stats */ 3014 for ( ; i < count + xcount; i++) 3015 xstats[i].id += count; 3016 3017 return count + xcount; 3018 } 3019 3020 /* reset ethdev extended statistics */ 3021 int 3022 rte_eth_xstats_reset(uint16_t port_id) 3023 { 3024 struct rte_eth_dev *dev; 3025 3026 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3027 dev = &rte_eth_devices[port_id]; 3028 3029 /* implemented by the driver */ 3030 if (dev->dev_ops->xstats_reset != NULL) 3031 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev)); 3032 3033 /* fallback to default */ 3034 return rte_eth_stats_reset(port_id); 3035 } 3036 3037 static int 3038 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, 3039 uint8_t stat_idx, uint8_t is_rx) 3040 { 3041 struct rte_eth_dev *dev; 3042 3043 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3044 dev = &rte_eth_devices[port_id]; 3045 3046 if (is_rx && (queue_id >= dev->data->nb_rx_queues)) 3047 return -EINVAL; 3048 3049 if (!is_rx && (queue_id >= dev->data->nb_tx_queues)) 3050 return -EINVAL; 3051 3052 if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS) 3053 return -EINVAL; 3054 3055 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP); 3056 return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx); 3057 } 3058 3059 int 3060 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, 3061 uint8_t stat_idx) 3062 { 3063 return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3064 tx_queue_id, 3065 stat_idx, STAT_QMAP_TX)); 3066 } 3067 3068 int 3069 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, 3070 uint8_t stat_idx) 3071 { 3072 return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id, 3073 rx_queue_id, 3074 stat_idx, STAT_QMAP_RX)); 3075 } 3076 3077 int 3078 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size) 3079 { 3080 struct rte_eth_dev *dev; 3081 3082 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3083 dev = &rte_eth_devices[port_id]; 3084 3085 if (fw_version == NULL && fw_size > 0) { 3086 RTE_ETHDEV_LOG(ERR, 3087 "Cannot get ethdev port %u FW version to NULL when string size is non zero\n", 3088 port_id); 3089 return -EINVAL; 3090 } 3091 3092 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP); 3093 return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev, 3094 fw_version, fw_size)); 3095 } 3096 3097 int 3098 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) 3099 { 3100 struct rte_eth_dev *dev; 3101 const struct rte_eth_desc_lim lim = { 3102 .nb_max = UINT16_MAX, 3103 .nb_min = 0, 3104 .nb_align = 1, 3105 .nb_seg_max = UINT16_MAX, 3106 .nb_mtu_seg_max = UINT16_MAX, 3107 }; 3108 int diag; 3109 3110 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3111 dev = &rte_eth_devices[port_id]; 3112 3113 if (dev_info == NULL) { 3114 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u info to NULL\n", 3115 port_id); 3116 return -EINVAL; 3117 } 3118 3119 /* 3120 * Init dev_info before port_id check since caller does not have 3121 * return status and does not know if get is successful or not. 3122 */ 3123 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3124 dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 3125 3126 dev_info->rx_desc_lim = lim; 3127 dev_info->tx_desc_lim = lim; 3128 dev_info->device = dev->device; 3129 dev_info->min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN - 3130 RTE_ETHER_CRC_LEN; 3131 dev_info->max_mtu = UINT16_MAX; 3132 3133 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); 3134 diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info); 3135 if (diag != 0) { 3136 /* Cleanup already filled in device information */ 3137 memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); 3138 return eth_err(port_id, diag); 3139 } 3140 3141 /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */ 3142 dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues, 3143 RTE_MAX_QUEUES_PER_PORT); 3144 dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues, 3145 RTE_MAX_QUEUES_PER_PORT); 3146 3147 dev_info->driver_name = dev->device->driver->name; 3148 dev_info->nb_rx_queues = dev->data->nb_rx_queues; 3149 dev_info->nb_tx_queues = dev->data->nb_tx_queues; 3150 3151 dev_info->dev_flags = &dev->data->dev_flags; 3152 3153 return 0; 3154 } 3155 3156 int 3157 rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf) 3158 { 3159 struct rte_eth_dev *dev; 3160 3161 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3162 dev = &rte_eth_devices[port_id]; 3163 3164 if (dev_conf == NULL) { 3165 RTE_ETHDEV_LOG(ERR, 3166 "Cannot get ethdev port %u configuration to NULL\n", 3167 port_id); 3168 return -EINVAL; 3169 } 3170 3171 memcpy(dev_conf, &dev->data->dev_conf, sizeof(struct rte_eth_conf)); 3172 3173 return 0; 3174 } 3175 3176 int 3177 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, 3178 uint32_t *ptypes, int num) 3179 { 3180 int i, j; 3181 struct rte_eth_dev *dev; 3182 const uint32_t *all_ptypes; 3183 3184 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3185 dev = &rte_eth_devices[port_id]; 3186 3187 if (ptypes == NULL && num > 0) { 3188 RTE_ETHDEV_LOG(ERR, 3189 "Cannot get ethdev port %u supported packet types to NULL when array size is non zero\n", 3190 port_id); 3191 return -EINVAL; 3192 } 3193 3194 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0); 3195 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3196 3197 if (!all_ptypes) 3198 return 0; 3199 3200 for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i) 3201 if (all_ptypes[i] & ptype_mask) { 3202 if (j < num) 3203 ptypes[j] = all_ptypes[i]; 3204 j++; 3205 } 3206 3207 return j; 3208 } 3209 3210 int 3211 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, 3212 uint32_t *set_ptypes, unsigned int num) 3213 { 3214 const uint32_t valid_ptype_masks[] = { 3215 RTE_PTYPE_L2_MASK, 3216 RTE_PTYPE_L3_MASK, 3217 RTE_PTYPE_L4_MASK, 3218 RTE_PTYPE_TUNNEL_MASK, 3219 RTE_PTYPE_INNER_L2_MASK, 3220 RTE_PTYPE_INNER_L3_MASK, 3221 RTE_PTYPE_INNER_L4_MASK, 3222 }; 3223 const uint32_t *all_ptypes; 3224 struct rte_eth_dev *dev; 3225 uint32_t unused_mask; 3226 unsigned int i, j; 3227 int ret; 3228 3229 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3230 dev = &rte_eth_devices[port_id]; 3231 3232 if (num > 0 && set_ptypes == NULL) { 3233 RTE_ETHDEV_LOG(ERR, 3234 "Cannot get ethdev port %u set packet types to NULL when array size is non zero\n", 3235 port_id); 3236 return -EINVAL; 3237 } 3238 3239 if (*dev->dev_ops->dev_supported_ptypes_get == NULL || 3240 *dev->dev_ops->dev_ptypes_set == NULL) { 3241 ret = 0; 3242 goto ptype_unknown; 3243 } 3244 3245 if (ptype_mask == 0) { 3246 ret = (*dev->dev_ops->dev_ptypes_set)(dev, 3247 ptype_mask); 3248 goto ptype_unknown; 3249 } 3250 3251 unused_mask = ptype_mask; 3252 for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) { 3253 uint32_t mask = ptype_mask & valid_ptype_masks[i]; 3254 if (mask && mask != valid_ptype_masks[i]) { 3255 ret = -EINVAL; 3256 goto ptype_unknown; 3257 } 3258 unused_mask &= ~valid_ptype_masks[i]; 3259 } 3260 3261 if (unused_mask) { 3262 ret = -EINVAL; 3263 goto ptype_unknown; 3264 } 3265 3266 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); 3267 if (all_ptypes == NULL) { 3268 ret = 0; 3269 goto ptype_unknown; 3270 } 3271 3272 /* 3273 * Accommodate as many set_ptypes as possible. If the supplied 3274 * set_ptypes array is insufficient fill it partially. 3275 */ 3276 for (i = 0, j = 0; set_ptypes != NULL && 3277 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) { 3278 if (ptype_mask & all_ptypes[i]) { 3279 if (j < num - 1) { 3280 set_ptypes[j] = all_ptypes[i]; 3281 j++; 3282 continue; 3283 } 3284 break; 3285 } 3286 } 3287 3288 if (set_ptypes != NULL && j < num) 3289 set_ptypes[j] = RTE_PTYPE_UNKNOWN; 3290 3291 return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask); 3292 3293 ptype_unknown: 3294 if (num > 0) 3295 set_ptypes[0] = RTE_PTYPE_UNKNOWN; 3296 3297 return ret; 3298 } 3299 3300 int 3301 rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma, 3302 unsigned int num) 3303 { 3304 int32_t ret; 3305 struct rte_eth_dev *dev; 3306 struct rte_eth_dev_info dev_info; 3307 3308 if (ma == NULL) { 3309 RTE_ETHDEV_LOG(ERR, "%s: invalid parameters\n", __func__); 3310 return -EINVAL; 3311 } 3312 3313 /* will check for us that port_id is a valid one */ 3314 ret = rte_eth_dev_info_get(port_id, &dev_info); 3315 if (ret != 0) 3316 return ret; 3317 3318 dev = &rte_eth_devices[port_id]; 3319 num = RTE_MIN(dev_info.max_mac_addrs, num); 3320 memcpy(ma, dev->data->mac_addrs, num * sizeof(ma[0])); 3321 3322 return num; 3323 } 3324 3325 int 3326 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr) 3327 { 3328 struct rte_eth_dev *dev; 3329 3330 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3331 dev = &rte_eth_devices[port_id]; 3332 3333 if (mac_addr == NULL) { 3334 RTE_ETHDEV_LOG(ERR, 3335 "Cannot get ethdev port %u MAC address to NULL\n", 3336 port_id); 3337 return -EINVAL; 3338 } 3339 3340 rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr); 3341 3342 return 0; 3343 } 3344 3345 int 3346 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu) 3347 { 3348 struct rte_eth_dev *dev; 3349 3350 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3351 dev = &rte_eth_devices[port_id]; 3352 3353 if (mtu == NULL) { 3354 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u MTU to NULL\n", 3355 port_id); 3356 return -EINVAL; 3357 } 3358 3359 *mtu = dev->data->mtu; 3360 return 0; 3361 } 3362 3363 int 3364 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu) 3365 { 3366 int ret; 3367 struct rte_eth_dev_info dev_info; 3368 struct rte_eth_dev *dev; 3369 3370 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3371 dev = &rte_eth_devices[port_id]; 3372 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP); 3373 3374 /* 3375 * Check if the device supports dev_infos_get, if it does not 3376 * skip min_mtu/max_mtu validation here as this requires values 3377 * that are populated within the call to rte_eth_dev_info_get() 3378 * which relies on dev->dev_ops->dev_infos_get. 3379 */ 3380 if (*dev->dev_ops->dev_infos_get != NULL) { 3381 ret = rte_eth_dev_info_get(port_id, &dev_info); 3382 if (ret != 0) 3383 return ret; 3384 3385 ret = eth_dev_validate_mtu(port_id, &dev_info, mtu); 3386 if (ret != 0) 3387 return ret; 3388 } 3389 3390 if (dev->data->dev_configured == 0) { 3391 RTE_ETHDEV_LOG(ERR, 3392 "Port %u must be configured before MTU set\n", 3393 port_id); 3394 return -EINVAL; 3395 } 3396 3397 ret = (*dev->dev_ops->mtu_set)(dev, mtu); 3398 if (ret == 0) 3399 dev->data->mtu = mtu; 3400 3401 return eth_err(port_id, ret); 3402 } 3403 3404 int 3405 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on) 3406 { 3407 struct rte_eth_dev *dev; 3408 int ret; 3409 3410 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3411 dev = &rte_eth_devices[port_id]; 3412 3413 if (!(dev->data->dev_conf.rxmode.offloads & 3414 RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) { 3415 RTE_ETHDEV_LOG(ERR, "Port %u: VLAN-filtering disabled\n", 3416 port_id); 3417 return -ENOSYS; 3418 } 3419 3420 if (vlan_id > 4095) { 3421 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n", 3422 port_id, vlan_id); 3423 return -EINVAL; 3424 } 3425 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP); 3426 3427 ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on); 3428 if (ret == 0) { 3429 struct rte_vlan_filter_conf *vfc; 3430 int vidx; 3431 int vbit; 3432 3433 vfc = &dev->data->vlan_filter_conf; 3434 vidx = vlan_id / 64; 3435 vbit = vlan_id % 64; 3436 3437 if (on) 3438 vfc->ids[vidx] |= RTE_BIT64(vbit); 3439 else 3440 vfc->ids[vidx] &= ~RTE_BIT64(vbit); 3441 } 3442 3443 return eth_err(port_id, ret); 3444 } 3445 3446 int 3447 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, 3448 int on) 3449 { 3450 struct rte_eth_dev *dev; 3451 3452 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3453 dev = &rte_eth_devices[port_id]; 3454 3455 if (rx_queue_id >= dev->data->nb_rx_queues) { 3456 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id); 3457 return -EINVAL; 3458 } 3459 3460 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP); 3461 (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on); 3462 3463 return 0; 3464 } 3465 3466 int 3467 rte_eth_dev_set_vlan_ether_type(uint16_t port_id, 3468 enum rte_vlan_type vlan_type, 3469 uint16_t tpid) 3470 { 3471 struct rte_eth_dev *dev; 3472 3473 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3474 dev = &rte_eth_devices[port_id]; 3475 3476 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP); 3477 return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, 3478 tpid)); 3479 } 3480 3481 int 3482 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask) 3483 { 3484 struct rte_eth_dev_info dev_info; 3485 struct rte_eth_dev *dev; 3486 int ret = 0; 3487 int mask = 0; 3488 int cur, org = 0; 3489 uint64_t orig_offloads; 3490 uint64_t dev_offloads; 3491 uint64_t new_offloads; 3492 3493 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3494 dev = &rte_eth_devices[port_id]; 3495 3496 /* save original values in case of failure */ 3497 orig_offloads = dev->data->dev_conf.rxmode.offloads; 3498 dev_offloads = orig_offloads; 3499 3500 /* check which option changed by application */ 3501 cur = !!(offload_mask & RTE_ETH_VLAN_STRIP_OFFLOAD); 3502 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP); 3503 if (cur != org) { 3504 if (cur) 3505 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 3506 else 3507 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 3508 mask |= RTE_ETH_VLAN_STRIP_MASK; 3509 } 3510 3511 cur = !!(offload_mask & RTE_ETH_VLAN_FILTER_OFFLOAD); 3512 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER); 3513 if (cur != org) { 3514 if (cur) 3515 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 3516 else 3517 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 3518 mask |= RTE_ETH_VLAN_FILTER_MASK; 3519 } 3520 3521 cur = !!(offload_mask & RTE_ETH_VLAN_EXTEND_OFFLOAD); 3522 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND); 3523 if (cur != org) { 3524 if (cur) 3525 dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 3526 else 3527 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 3528 mask |= RTE_ETH_VLAN_EXTEND_MASK; 3529 } 3530 3531 cur = !!(offload_mask & RTE_ETH_QINQ_STRIP_OFFLOAD); 3532 org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP); 3533 if (cur != org) { 3534 if (cur) 3535 dev_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 3536 else 3537 dev_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP; 3538 mask |= RTE_ETH_QINQ_STRIP_MASK; 3539 } 3540 3541 /*no change*/ 3542 if (mask == 0) 3543 return ret; 3544 3545 ret = rte_eth_dev_info_get(port_id, &dev_info); 3546 if (ret != 0) 3547 return ret; 3548 3549 /* Rx VLAN offloading must be within its device capabilities */ 3550 if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) { 3551 new_offloads = dev_offloads & ~orig_offloads; 3552 RTE_ETHDEV_LOG(ERR, 3553 "Ethdev port_id=%u requested new added VLAN offloads " 3554 "0x%" PRIx64 " must be within Rx offloads capabilities " 3555 "0x%" PRIx64 " in %s()\n", 3556 port_id, new_offloads, dev_info.rx_offload_capa, 3557 __func__); 3558 return -EINVAL; 3559 } 3560 3561 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP); 3562 dev->data->dev_conf.rxmode.offloads = dev_offloads; 3563 ret = (*dev->dev_ops->vlan_offload_set)(dev, mask); 3564 if (ret) { 3565 /* hit an error restore original values */ 3566 dev->data->dev_conf.rxmode.offloads = orig_offloads; 3567 } 3568 3569 return eth_err(port_id, ret); 3570 } 3571 3572 int 3573 rte_eth_dev_get_vlan_offload(uint16_t port_id) 3574 { 3575 struct rte_eth_dev *dev; 3576 uint64_t *dev_offloads; 3577 int ret = 0; 3578 3579 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3580 dev = &rte_eth_devices[port_id]; 3581 dev_offloads = &dev->data->dev_conf.rxmode.offloads; 3582 3583 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 3584 ret |= RTE_ETH_VLAN_STRIP_OFFLOAD; 3585 3586 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) 3587 ret |= RTE_ETH_VLAN_FILTER_OFFLOAD; 3588 3589 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) 3590 ret |= RTE_ETH_VLAN_EXTEND_OFFLOAD; 3591 3592 if (*dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) 3593 ret |= RTE_ETH_QINQ_STRIP_OFFLOAD; 3594 3595 return ret; 3596 } 3597 3598 int 3599 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on) 3600 { 3601 struct rte_eth_dev *dev; 3602 3603 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3604 dev = &rte_eth_devices[port_id]; 3605 3606 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP); 3607 return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on)); 3608 } 3609 3610 int 3611 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 3612 { 3613 struct rte_eth_dev *dev; 3614 3615 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3616 dev = &rte_eth_devices[port_id]; 3617 3618 if (fc_conf == NULL) { 3619 RTE_ETHDEV_LOG(ERR, 3620 "Cannot get ethdev port %u flow control config to NULL\n", 3621 port_id); 3622 return -EINVAL; 3623 } 3624 3625 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP); 3626 memset(fc_conf, 0, sizeof(*fc_conf)); 3627 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf)); 3628 } 3629 3630 int 3631 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf) 3632 { 3633 struct rte_eth_dev *dev; 3634 3635 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3636 dev = &rte_eth_devices[port_id]; 3637 3638 if (fc_conf == NULL) { 3639 RTE_ETHDEV_LOG(ERR, 3640 "Cannot set ethdev port %u flow control from NULL config\n", 3641 port_id); 3642 return -EINVAL; 3643 } 3644 3645 if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) { 3646 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n"); 3647 return -EINVAL; 3648 } 3649 3650 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP); 3651 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf)); 3652 } 3653 3654 int 3655 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, 3656 struct rte_eth_pfc_conf *pfc_conf) 3657 { 3658 struct rte_eth_dev *dev; 3659 3660 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3661 dev = &rte_eth_devices[port_id]; 3662 3663 if (pfc_conf == NULL) { 3664 RTE_ETHDEV_LOG(ERR, 3665 "Cannot set ethdev port %u priority flow control from NULL config\n", 3666 port_id); 3667 return -EINVAL; 3668 } 3669 3670 if (pfc_conf->priority > (RTE_ETH_DCB_NUM_USER_PRIORITIES - 1)) { 3671 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n"); 3672 return -EINVAL; 3673 } 3674 3675 /* High water, low water validation are device specific */ 3676 if (*dev->dev_ops->priority_flow_ctrl_set) 3677 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set) 3678 (dev, pfc_conf)); 3679 return -ENOTSUP; 3680 } 3681 3682 static int 3683 validate_rx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max, 3684 struct rte_eth_pfc_queue_conf *pfc_queue_conf) 3685 { 3686 if ((pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) || 3687 (pfc_queue_conf->mode == RTE_ETH_FC_FULL)) { 3688 if (pfc_queue_conf->rx_pause.tx_qid >= dev_info->nb_tx_queues) { 3689 RTE_ETHDEV_LOG(ERR, 3690 "PFC Tx queue not in range for Rx pause requested:%d configured:%d\n", 3691 pfc_queue_conf->rx_pause.tx_qid, 3692 dev_info->nb_tx_queues); 3693 return -EINVAL; 3694 } 3695 3696 if (pfc_queue_conf->rx_pause.tc >= tc_max) { 3697 RTE_ETHDEV_LOG(ERR, 3698 "PFC TC not in range for Rx pause requested:%d max:%d\n", 3699 pfc_queue_conf->rx_pause.tc, tc_max); 3700 return -EINVAL; 3701 } 3702 } 3703 3704 return 0; 3705 } 3706 3707 static int 3708 validate_tx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max, 3709 struct rte_eth_pfc_queue_conf *pfc_queue_conf) 3710 { 3711 if ((pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) || 3712 (pfc_queue_conf->mode == RTE_ETH_FC_FULL)) { 3713 if (pfc_queue_conf->tx_pause.rx_qid >= dev_info->nb_rx_queues) { 3714 RTE_ETHDEV_LOG(ERR, 3715 "PFC Rx queue not in range for Tx pause requested:%d configured:%d\n", 3716 pfc_queue_conf->tx_pause.rx_qid, 3717 dev_info->nb_rx_queues); 3718 return -EINVAL; 3719 } 3720 3721 if (pfc_queue_conf->tx_pause.tc >= tc_max) { 3722 RTE_ETHDEV_LOG(ERR, 3723 "PFC TC not in range for Tx pause requested:%d max:%d\n", 3724 pfc_queue_conf->tx_pause.tc, tc_max); 3725 return -EINVAL; 3726 } 3727 } 3728 3729 return 0; 3730 } 3731 3732 int 3733 rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id, 3734 struct rte_eth_pfc_queue_info *pfc_queue_info) 3735 { 3736 struct rte_eth_dev *dev; 3737 3738 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3739 dev = &rte_eth_devices[port_id]; 3740 3741 if (pfc_queue_info == NULL) { 3742 RTE_ETHDEV_LOG(ERR, "PFC info param is NULL for port (%u)\n", 3743 port_id); 3744 return -EINVAL; 3745 } 3746 3747 if (*dev->dev_ops->priority_flow_ctrl_queue_info_get) 3748 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_queue_info_get) 3749 (dev, pfc_queue_info)); 3750 return -ENOTSUP; 3751 } 3752 3753 int 3754 rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id, 3755 struct rte_eth_pfc_queue_conf *pfc_queue_conf) 3756 { 3757 struct rte_eth_pfc_queue_info pfc_info; 3758 struct rte_eth_dev_info dev_info; 3759 struct rte_eth_dev *dev; 3760 int ret; 3761 3762 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3763 dev = &rte_eth_devices[port_id]; 3764 3765 if (pfc_queue_conf == NULL) { 3766 RTE_ETHDEV_LOG(ERR, "PFC parameters are NULL for port (%u)\n", 3767 port_id); 3768 return -EINVAL; 3769 } 3770 3771 ret = rte_eth_dev_info_get(port_id, &dev_info); 3772 if (ret != 0) 3773 return ret; 3774 3775 ret = rte_eth_dev_priority_flow_ctrl_queue_info_get(port_id, &pfc_info); 3776 if (ret != 0) 3777 return ret; 3778 3779 if (pfc_info.tc_max == 0) { 3780 RTE_ETHDEV_LOG(ERR, "Ethdev port %u does not support PFC TC values\n", 3781 port_id); 3782 return -ENOTSUP; 3783 } 3784 3785 /* Check requested mode supported or not */ 3786 if (pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE && 3787 pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) { 3788 RTE_ETHDEV_LOG(ERR, "PFC Tx pause unsupported for port (%d)\n", 3789 port_id); 3790 return -EINVAL; 3791 } 3792 3793 if (pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE && 3794 pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) { 3795 RTE_ETHDEV_LOG(ERR, "PFC Rx pause unsupported for port (%d)\n", 3796 port_id); 3797 return -EINVAL; 3798 } 3799 3800 /* Validate Rx pause parameters */ 3801 if (pfc_info.mode_capa == RTE_ETH_FC_FULL || 3802 pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE) { 3803 ret = validate_rx_pause_config(&dev_info, pfc_info.tc_max, 3804 pfc_queue_conf); 3805 if (ret != 0) 3806 return ret; 3807 } 3808 3809 /* Validate Tx pause parameters */ 3810 if (pfc_info.mode_capa == RTE_ETH_FC_FULL || 3811 pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE) { 3812 ret = validate_tx_pause_config(&dev_info, pfc_info.tc_max, 3813 pfc_queue_conf); 3814 if (ret != 0) 3815 return ret; 3816 } 3817 3818 if (*dev->dev_ops->priority_flow_ctrl_queue_config) 3819 return eth_err(port_id, 3820 (*dev->dev_ops->priority_flow_ctrl_queue_config)( 3821 dev, pfc_queue_conf)); 3822 return -ENOTSUP; 3823 } 3824 3825 static int 3826 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf, 3827 uint16_t reta_size) 3828 { 3829 uint16_t i, num; 3830 3831 num = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) / RTE_ETH_RETA_GROUP_SIZE; 3832 for (i = 0; i < num; i++) { 3833 if (reta_conf[i].mask) 3834 return 0; 3835 } 3836 3837 return -EINVAL; 3838 } 3839 3840 static int 3841 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf, 3842 uint16_t reta_size, 3843 uint16_t max_rxq) 3844 { 3845 uint16_t i, idx, shift; 3846 3847 if (max_rxq == 0) { 3848 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n"); 3849 return -EINVAL; 3850 } 3851 3852 for (i = 0; i < reta_size; i++) { 3853 idx = i / RTE_ETH_RETA_GROUP_SIZE; 3854 shift = i % RTE_ETH_RETA_GROUP_SIZE; 3855 if ((reta_conf[idx].mask & RTE_BIT64(shift)) && 3856 (reta_conf[idx].reta[shift] >= max_rxq)) { 3857 RTE_ETHDEV_LOG(ERR, 3858 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n", 3859 idx, shift, 3860 reta_conf[idx].reta[shift], max_rxq); 3861 return -EINVAL; 3862 } 3863 } 3864 3865 return 0; 3866 } 3867 3868 int 3869 rte_eth_dev_rss_reta_update(uint16_t port_id, 3870 struct rte_eth_rss_reta_entry64 *reta_conf, 3871 uint16_t reta_size) 3872 { 3873 enum rte_eth_rx_mq_mode mq_mode; 3874 struct rte_eth_dev *dev; 3875 int ret; 3876 3877 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3878 dev = &rte_eth_devices[port_id]; 3879 3880 if (reta_conf == NULL) { 3881 RTE_ETHDEV_LOG(ERR, 3882 "Cannot update ethdev port %u RSS RETA to NULL\n", 3883 port_id); 3884 return -EINVAL; 3885 } 3886 3887 if (reta_size == 0) { 3888 RTE_ETHDEV_LOG(ERR, 3889 "Cannot update ethdev port %u RSS RETA with zero size\n", 3890 port_id); 3891 return -EINVAL; 3892 } 3893 3894 /* Check mask bits */ 3895 ret = eth_check_reta_mask(reta_conf, reta_size); 3896 if (ret < 0) 3897 return ret; 3898 3899 /* Check entry value */ 3900 ret = eth_check_reta_entry(reta_conf, reta_size, 3901 dev->data->nb_rx_queues); 3902 if (ret < 0) 3903 return ret; 3904 3905 mq_mode = dev->data->dev_conf.rxmode.mq_mode; 3906 if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) { 3907 RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n"); 3908 return -ENOTSUP; 3909 } 3910 3911 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP); 3912 return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf, 3913 reta_size)); 3914 } 3915 3916 int 3917 rte_eth_dev_rss_reta_query(uint16_t port_id, 3918 struct rte_eth_rss_reta_entry64 *reta_conf, 3919 uint16_t reta_size) 3920 { 3921 struct rte_eth_dev *dev; 3922 int ret; 3923 3924 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3925 dev = &rte_eth_devices[port_id]; 3926 3927 if (reta_conf == NULL) { 3928 RTE_ETHDEV_LOG(ERR, 3929 "Cannot query ethdev port %u RSS RETA from NULL config\n", 3930 port_id); 3931 return -EINVAL; 3932 } 3933 3934 /* Check mask bits */ 3935 ret = eth_check_reta_mask(reta_conf, reta_size); 3936 if (ret < 0) 3937 return ret; 3938 3939 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP); 3940 return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf, 3941 reta_size)); 3942 } 3943 3944 int 3945 rte_eth_dev_rss_hash_update(uint16_t port_id, 3946 struct rte_eth_rss_conf *rss_conf) 3947 { 3948 struct rte_eth_dev *dev; 3949 struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, }; 3950 enum rte_eth_rx_mq_mode mq_mode; 3951 int ret; 3952 3953 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3954 dev = &rte_eth_devices[port_id]; 3955 3956 if (rss_conf == NULL) { 3957 RTE_ETHDEV_LOG(ERR, 3958 "Cannot update ethdev port %u RSS hash from NULL config\n", 3959 port_id); 3960 return -EINVAL; 3961 } 3962 3963 ret = rte_eth_dev_info_get(port_id, &dev_info); 3964 if (ret != 0) 3965 return ret; 3966 3967 rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf); 3968 if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) != 3969 dev_info.flow_type_rss_offloads) { 3970 RTE_ETHDEV_LOG(ERR, 3971 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n", 3972 port_id, rss_conf->rss_hf, 3973 dev_info.flow_type_rss_offloads); 3974 return -EINVAL; 3975 } 3976 3977 mq_mode = dev->data->dev_conf.rxmode.mq_mode; 3978 if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) { 3979 RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n"); 3980 return -ENOTSUP; 3981 } 3982 3983 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP); 3984 return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev, 3985 rss_conf)); 3986 } 3987 3988 int 3989 rte_eth_dev_rss_hash_conf_get(uint16_t port_id, 3990 struct rte_eth_rss_conf *rss_conf) 3991 { 3992 struct rte_eth_dev *dev; 3993 3994 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 3995 dev = &rte_eth_devices[port_id]; 3996 3997 if (rss_conf == NULL) { 3998 RTE_ETHDEV_LOG(ERR, 3999 "Cannot get ethdev port %u RSS hash config to NULL\n", 4000 port_id); 4001 return -EINVAL; 4002 } 4003 4004 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP); 4005 return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev, 4006 rss_conf)); 4007 } 4008 4009 int 4010 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, 4011 struct rte_eth_udp_tunnel *udp_tunnel) 4012 { 4013 struct rte_eth_dev *dev; 4014 4015 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4016 dev = &rte_eth_devices[port_id]; 4017 4018 if (udp_tunnel == NULL) { 4019 RTE_ETHDEV_LOG(ERR, 4020 "Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4021 port_id); 4022 return -EINVAL; 4023 } 4024 4025 if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) { 4026 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4027 return -EINVAL; 4028 } 4029 4030 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP); 4031 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev, 4032 udp_tunnel)); 4033 } 4034 4035 int 4036 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, 4037 struct rte_eth_udp_tunnel *udp_tunnel) 4038 { 4039 struct rte_eth_dev *dev; 4040 4041 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4042 dev = &rte_eth_devices[port_id]; 4043 4044 if (udp_tunnel == NULL) { 4045 RTE_ETHDEV_LOG(ERR, 4046 "Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel\n", 4047 port_id); 4048 return -EINVAL; 4049 } 4050 4051 if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) { 4052 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n"); 4053 return -EINVAL; 4054 } 4055 4056 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP); 4057 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev, 4058 udp_tunnel)); 4059 } 4060 4061 int 4062 rte_eth_led_on(uint16_t port_id) 4063 { 4064 struct rte_eth_dev *dev; 4065 4066 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4067 dev = &rte_eth_devices[port_id]; 4068 4069 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP); 4070 return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev)); 4071 } 4072 4073 int 4074 rte_eth_led_off(uint16_t port_id) 4075 { 4076 struct rte_eth_dev *dev; 4077 4078 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4079 dev = &rte_eth_devices[port_id]; 4080 4081 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP); 4082 return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev)); 4083 } 4084 4085 int 4086 rte_eth_fec_get_capability(uint16_t port_id, 4087 struct rte_eth_fec_capa *speed_fec_capa, 4088 unsigned int num) 4089 { 4090 struct rte_eth_dev *dev; 4091 int ret; 4092 4093 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4094 dev = &rte_eth_devices[port_id]; 4095 4096 if (speed_fec_capa == NULL && num > 0) { 4097 RTE_ETHDEV_LOG(ERR, 4098 "Cannot get ethdev port %u FEC capability to NULL when array size is non zero\n", 4099 port_id); 4100 return -EINVAL; 4101 } 4102 4103 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get_capability, -ENOTSUP); 4104 ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num); 4105 4106 return ret; 4107 } 4108 4109 int 4110 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa) 4111 { 4112 struct rte_eth_dev *dev; 4113 4114 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4115 dev = &rte_eth_devices[port_id]; 4116 4117 if (fec_capa == NULL) { 4118 RTE_ETHDEV_LOG(ERR, 4119 "Cannot get ethdev port %u current FEC mode to NULL\n", 4120 port_id); 4121 return -EINVAL; 4122 } 4123 4124 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get, -ENOTSUP); 4125 return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa)); 4126 } 4127 4128 int 4129 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa) 4130 { 4131 struct rte_eth_dev *dev; 4132 4133 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4134 dev = &rte_eth_devices[port_id]; 4135 4136 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_set, -ENOTSUP); 4137 return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa)); 4138 } 4139 4140 /* 4141 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 4142 * an empty spot. 4143 */ 4144 static int 4145 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr) 4146 { 4147 struct rte_eth_dev_info dev_info; 4148 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4149 unsigned i; 4150 int ret; 4151 4152 ret = rte_eth_dev_info_get(port_id, &dev_info); 4153 if (ret != 0) 4154 return -1; 4155 4156 for (i = 0; i < dev_info.max_mac_addrs; i++) 4157 if (memcmp(addr, &dev->data->mac_addrs[i], 4158 RTE_ETHER_ADDR_LEN) == 0) 4159 return i; 4160 4161 return -1; 4162 } 4163 4164 static const struct rte_ether_addr null_mac_addr; 4165 4166 int 4167 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr, 4168 uint32_t pool) 4169 { 4170 struct rte_eth_dev *dev; 4171 int index; 4172 uint64_t pool_mask; 4173 int ret; 4174 4175 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4176 dev = &rte_eth_devices[port_id]; 4177 4178 if (addr == NULL) { 4179 RTE_ETHDEV_LOG(ERR, 4180 "Cannot add ethdev port %u MAC address from NULL address\n", 4181 port_id); 4182 return -EINVAL; 4183 } 4184 4185 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP); 4186 4187 if (rte_is_zero_ether_addr(addr)) { 4188 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 4189 port_id); 4190 return -EINVAL; 4191 } 4192 if (pool >= RTE_ETH_64_POOLS) { 4193 RTE_ETHDEV_LOG(ERR, "Pool ID must be 0-%d\n", RTE_ETH_64_POOLS - 1); 4194 return -EINVAL; 4195 } 4196 4197 index = eth_dev_get_mac_addr_index(port_id, addr); 4198 if (index < 0) { 4199 index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr); 4200 if (index < 0) { 4201 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 4202 port_id); 4203 return -ENOSPC; 4204 } 4205 } else { 4206 pool_mask = dev->data->mac_pool_sel[index]; 4207 4208 /* Check if both MAC address and pool is already there, and do nothing */ 4209 if (pool_mask & RTE_BIT64(pool)) 4210 return 0; 4211 } 4212 4213 /* Update NIC */ 4214 ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool); 4215 4216 if (ret == 0) { 4217 /* Update address in NIC data structure */ 4218 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]); 4219 4220 /* Update pool bitmap in NIC data structure */ 4221 dev->data->mac_pool_sel[index] |= RTE_BIT64(pool); 4222 } 4223 4224 return eth_err(port_id, ret); 4225 } 4226 4227 int 4228 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr) 4229 { 4230 struct rte_eth_dev *dev; 4231 int index; 4232 4233 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4234 dev = &rte_eth_devices[port_id]; 4235 4236 if (addr == NULL) { 4237 RTE_ETHDEV_LOG(ERR, 4238 "Cannot remove ethdev port %u MAC address from NULL address\n", 4239 port_id); 4240 return -EINVAL; 4241 } 4242 4243 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP); 4244 4245 index = eth_dev_get_mac_addr_index(port_id, addr); 4246 if (index == 0) { 4247 RTE_ETHDEV_LOG(ERR, 4248 "Port %u: Cannot remove default MAC address\n", 4249 port_id); 4250 return -EADDRINUSE; 4251 } else if (index < 0) 4252 return 0; /* Do nothing if address wasn't found */ 4253 4254 /* Update NIC */ 4255 (*dev->dev_ops->mac_addr_remove)(dev, index); 4256 4257 /* Update address in NIC data structure */ 4258 rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]); 4259 4260 /* reset pool bitmap */ 4261 dev->data->mac_pool_sel[index] = 0; 4262 4263 return 0; 4264 } 4265 4266 int 4267 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr) 4268 { 4269 struct rte_eth_dev *dev; 4270 int ret; 4271 4272 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4273 dev = &rte_eth_devices[port_id]; 4274 4275 if (addr == NULL) { 4276 RTE_ETHDEV_LOG(ERR, 4277 "Cannot set ethdev port %u default MAC address from NULL address\n", 4278 port_id); 4279 return -EINVAL; 4280 } 4281 4282 if (!rte_is_valid_assigned_ether_addr(addr)) 4283 return -EINVAL; 4284 4285 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP); 4286 4287 ret = (*dev->dev_ops->mac_addr_set)(dev, addr); 4288 if (ret < 0) 4289 return ret; 4290 4291 /* Update default address in NIC data structure */ 4292 rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]); 4293 4294 return 0; 4295 } 4296 4297 4298 /* 4299 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find 4300 * an empty spot. 4301 */ 4302 static int 4303 eth_dev_get_hash_mac_addr_index(uint16_t port_id, 4304 const struct rte_ether_addr *addr) 4305 { 4306 struct rte_eth_dev_info dev_info; 4307 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4308 unsigned i; 4309 int ret; 4310 4311 ret = rte_eth_dev_info_get(port_id, &dev_info); 4312 if (ret != 0) 4313 return -1; 4314 4315 if (!dev->data->hash_mac_addrs) 4316 return -1; 4317 4318 for (i = 0; i < dev_info.max_hash_mac_addrs; i++) 4319 if (memcmp(addr, &dev->data->hash_mac_addrs[i], 4320 RTE_ETHER_ADDR_LEN) == 0) 4321 return i; 4322 4323 return -1; 4324 } 4325 4326 int 4327 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, 4328 uint8_t on) 4329 { 4330 int index; 4331 int ret; 4332 struct rte_eth_dev *dev; 4333 4334 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4335 dev = &rte_eth_devices[port_id]; 4336 4337 if (addr == NULL) { 4338 RTE_ETHDEV_LOG(ERR, 4339 "Cannot set ethdev port %u unicast hash table from NULL address\n", 4340 port_id); 4341 return -EINVAL; 4342 } 4343 4344 if (rte_is_zero_ether_addr(addr)) { 4345 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", 4346 port_id); 4347 return -EINVAL; 4348 } 4349 4350 index = eth_dev_get_hash_mac_addr_index(port_id, addr); 4351 /* Check if it's already there, and do nothing */ 4352 if ((index >= 0) && on) 4353 return 0; 4354 4355 if (index < 0) { 4356 if (!on) { 4357 RTE_ETHDEV_LOG(ERR, 4358 "Port %u: the MAC address was not set in UTA\n", 4359 port_id); 4360 return -EINVAL; 4361 } 4362 4363 index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr); 4364 if (index < 0) { 4365 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n", 4366 port_id); 4367 return -ENOSPC; 4368 } 4369 } 4370 4371 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP); 4372 ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on); 4373 if (ret == 0) { 4374 /* Update address in NIC data structure */ 4375 if (on) 4376 rte_ether_addr_copy(addr, 4377 &dev->data->hash_mac_addrs[index]); 4378 else 4379 rte_ether_addr_copy(&null_mac_addr, 4380 &dev->data->hash_mac_addrs[index]); 4381 } 4382 4383 return eth_err(port_id, ret); 4384 } 4385 4386 int 4387 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on) 4388 { 4389 struct rte_eth_dev *dev; 4390 4391 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4392 dev = &rte_eth_devices[port_id]; 4393 4394 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP); 4395 return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev, 4396 on)); 4397 } 4398 4399 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, 4400 uint16_t tx_rate) 4401 { 4402 struct rte_eth_dev *dev; 4403 struct rte_eth_dev_info dev_info; 4404 struct rte_eth_link link; 4405 int ret; 4406 4407 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4408 dev = &rte_eth_devices[port_id]; 4409 4410 ret = rte_eth_dev_info_get(port_id, &dev_info); 4411 if (ret != 0) 4412 return ret; 4413 4414 link = dev->data->dev_link; 4415 4416 if (queue_idx > dev_info.max_tx_queues) { 4417 RTE_ETHDEV_LOG(ERR, 4418 "Set queue rate limit:port %u: invalid queue ID=%u\n", 4419 port_id, queue_idx); 4420 return -EINVAL; 4421 } 4422 4423 if (tx_rate > link.link_speed) { 4424 RTE_ETHDEV_LOG(ERR, 4425 "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n", 4426 tx_rate, link.link_speed); 4427 return -EINVAL; 4428 } 4429 4430 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP); 4431 return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev, 4432 queue_idx, tx_rate)); 4433 } 4434 4435 int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id, 4436 uint8_t avail_thresh) 4437 { 4438 struct rte_eth_dev *dev; 4439 4440 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4441 dev = &rte_eth_devices[port_id]; 4442 4443 if (queue_id > dev->data->nb_rx_queues) { 4444 RTE_ETHDEV_LOG(ERR, 4445 "Set queue avail thresh: port %u: invalid queue ID=%u.\n", 4446 port_id, queue_id); 4447 return -EINVAL; 4448 } 4449 4450 if (avail_thresh > 99) { 4451 RTE_ETHDEV_LOG(ERR, 4452 "Set queue avail thresh: port %u: threshold should be <= 99.\n", 4453 port_id); 4454 return -EINVAL; 4455 } 4456 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_avail_thresh_set, -ENOTSUP); 4457 return eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_set)(dev, 4458 queue_id, avail_thresh)); 4459 } 4460 4461 int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id, 4462 uint8_t *avail_thresh) 4463 { 4464 struct rte_eth_dev *dev; 4465 4466 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4467 dev = &rte_eth_devices[port_id]; 4468 4469 if (queue_id == NULL) 4470 return -EINVAL; 4471 if (*queue_id >= dev->data->nb_rx_queues) 4472 *queue_id = 0; 4473 4474 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_avail_thresh_query, -ENOTSUP); 4475 return eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_query)(dev, 4476 queue_id, avail_thresh)); 4477 } 4478 4479 RTE_INIT(eth_dev_init_fp_ops) 4480 { 4481 uint32_t i; 4482 4483 for (i = 0; i != RTE_DIM(rte_eth_fp_ops); i++) 4484 eth_dev_fp_ops_reset(rte_eth_fp_ops + i); 4485 } 4486 4487 RTE_INIT(eth_dev_init_cb_lists) 4488 { 4489 uint16_t i; 4490 4491 for (i = 0; i < RTE_MAX_ETHPORTS; i++) 4492 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs); 4493 } 4494 4495 int 4496 rte_eth_dev_callback_register(uint16_t port_id, 4497 enum rte_eth_event_type event, 4498 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 4499 { 4500 struct rte_eth_dev *dev; 4501 struct rte_eth_dev_callback *user_cb; 4502 uint16_t next_port; 4503 uint16_t last_port; 4504 4505 if (cb_fn == NULL) { 4506 RTE_ETHDEV_LOG(ERR, 4507 "Cannot register ethdev port %u callback from NULL\n", 4508 port_id); 4509 return -EINVAL; 4510 } 4511 4512 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 4513 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 4514 return -EINVAL; 4515 } 4516 4517 if (port_id == RTE_ETH_ALL) { 4518 next_port = 0; 4519 last_port = RTE_MAX_ETHPORTS - 1; 4520 } else { 4521 next_port = last_port = port_id; 4522 } 4523 4524 rte_spinlock_lock(ð_dev_cb_lock); 4525 4526 do { 4527 dev = &rte_eth_devices[next_port]; 4528 4529 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) { 4530 if (user_cb->cb_fn == cb_fn && 4531 user_cb->cb_arg == cb_arg && 4532 user_cb->event == event) { 4533 break; 4534 } 4535 } 4536 4537 /* create a new callback. */ 4538 if (user_cb == NULL) { 4539 user_cb = rte_zmalloc("INTR_USER_CALLBACK", 4540 sizeof(struct rte_eth_dev_callback), 0); 4541 if (user_cb != NULL) { 4542 user_cb->cb_fn = cb_fn; 4543 user_cb->cb_arg = cb_arg; 4544 user_cb->event = event; 4545 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), 4546 user_cb, next); 4547 } else { 4548 rte_spinlock_unlock(ð_dev_cb_lock); 4549 rte_eth_dev_callback_unregister(port_id, event, 4550 cb_fn, cb_arg); 4551 return -ENOMEM; 4552 } 4553 4554 } 4555 } while (++next_port <= last_port); 4556 4557 rte_spinlock_unlock(ð_dev_cb_lock); 4558 return 0; 4559 } 4560 4561 int 4562 rte_eth_dev_callback_unregister(uint16_t port_id, 4563 enum rte_eth_event_type event, 4564 rte_eth_dev_cb_fn cb_fn, void *cb_arg) 4565 { 4566 int ret; 4567 struct rte_eth_dev *dev; 4568 struct rte_eth_dev_callback *cb, *next; 4569 uint16_t next_port; 4570 uint16_t last_port; 4571 4572 if (cb_fn == NULL) { 4573 RTE_ETHDEV_LOG(ERR, 4574 "Cannot unregister ethdev port %u callback from NULL\n", 4575 port_id); 4576 return -EINVAL; 4577 } 4578 4579 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) { 4580 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id); 4581 return -EINVAL; 4582 } 4583 4584 if (port_id == RTE_ETH_ALL) { 4585 next_port = 0; 4586 last_port = RTE_MAX_ETHPORTS - 1; 4587 } else { 4588 next_port = last_port = port_id; 4589 } 4590 4591 rte_spinlock_lock(ð_dev_cb_lock); 4592 4593 do { 4594 dev = &rte_eth_devices[next_port]; 4595 ret = 0; 4596 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; 4597 cb = next) { 4598 4599 next = TAILQ_NEXT(cb, next); 4600 4601 if (cb->cb_fn != cb_fn || cb->event != event || 4602 (cb_arg != (void *)-1 && cb->cb_arg != cb_arg)) 4603 continue; 4604 4605 /* 4606 * if this callback is not executing right now, 4607 * then remove it. 4608 */ 4609 if (cb->active == 0) { 4610 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next); 4611 rte_free(cb); 4612 } else { 4613 ret = -EAGAIN; 4614 } 4615 } 4616 } while (++next_port <= last_port); 4617 4618 rte_spinlock_unlock(ð_dev_cb_lock); 4619 return ret; 4620 } 4621 4622 int 4623 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data) 4624 { 4625 uint32_t vec; 4626 struct rte_eth_dev *dev; 4627 struct rte_intr_handle *intr_handle; 4628 uint16_t qid; 4629 int rc; 4630 4631 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4632 dev = &rte_eth_devices[port_id]; 4633 4634 if (!dev->intr_handle) { 4635 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 4636 return -ENOTSUP; 4637 } 4638 4639 intr_handle = dev->intr_handle; 4640 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 4641 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 4642 return -EPERM; 4643 } 4644 4645 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) { 4646 vec = rte_intr_vec_list_index_get(intr_handle, qid); 4647 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 4648 if (rc && rc != -EEXIST) { 4649 RTE_ETHDEV_LOG(ERR, 4650 "p %u q %u Rx ctl error op %d epfd %d vec %u\n", 4651 port_id, qid, op, epfd, vec); 4652 } 4653 } 4654 4655 return 0; 4656 } 4657 4658 int 4659 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id) 4660 { 4661 struct rte_intr_handle *intr_handle; 4662 struct rte_eth_dev *dev; 4663 unsigned int efd_idx; 4664 uint32_t vec; 4665 int fd; 4666 4667 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 4668 dev = &rte_eth_devices[port_id]; 4669 4670 if (queue_id >= dev->data->nb_rx_queues) { 4671 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 4672 return -1; 4673 } 4674 4675 if (!dev->intr_handle) { 4676 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 4677 return -1; 4678 } 4679 4680 intr_handle = dev->intr_handle; 4681 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 4682 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 4683 return -1; 4684 } 4685 4686 vec = rte_intr_vec_list_index_get(intr_handle, queue_id); 4687 efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ? 4688 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec; 4689 fd = rte_intr_efds_index_get(intr_handle, efd_idx); 4690 4691 return fd; 4692 } 4693 4694 int 4695 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, 4696 int epfd, int op, void *data) 4697 { 4698 uint32_t vec; 4699 struct rte_eth_dev *dev; 4700 struct rte_intr_handle *intr_handle; 4701 int rc; 4702 4703 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4704 dev = &rte_eth_devices[port_id]; 4705 4706 if (queue_id >= dev->data->nb_rx_queues) { 4707 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 4708 return -EINVAL; 4709 } 4710 4711 if (!dev->intr_handle) { 4712 RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n"); 4713 return -ENOTSUP; 4714 } 4715 4716 intr_handle = dev->intr_handle; 4717 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) { 4718 RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n"); 4719 return -EPERM; 4720 } 4721 4722 vec = rte_intr_vec_list_index_get(intr_handle, queue_id); 4723 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); 4724 if (rc && rc != -EEXIST) { 4725 RTE_ETHDEV_LOG(ERR, 4726 "p %u q %u Rx ctl error op %d epfd %d vec %u\n", 4727 port_id, queue_id, op, epfd, vec); 4728 return rc; 4729 } 4730 4731 return 0; 4732 } 4733 4734 int 4735 rte_eth_dev_rx_intr_enable(uint16_t port_id, 4736 uint16_t queue_id) 4737 { 4738 struct rte_eth_dev *dev; 4739 int ret; 4740 4741 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4742 dev = &rte_eth_devices[port_id]; 4743 4744 ret = eth_dev_validate_rx_queue(dev, queue_id); 4745 if (ret != 0) 4746 return ret; 4747 4748 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP); 4749 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id)); 4750 } 4751 4752 int 4753 rte_eth_dev_rx_intr_disable(uint16_t port_id, 4754 uint16_t queue_id) 4755 { 4756 struct rte_eth_dev *dev; 4757 int ret; 4758 4759 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4760 dev = &rte_eth_devices[port_id]; 4761 4762 ret = eth_dev_validate_rx_queue(dev, queue_id); 4763 if (ret != 0) 4764 return ret; 4765 4766 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP); 4767 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id)); 4768 } 4769 4770 4771 const struct rte_eth_rxtx_callback * 4772 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, 4773 rte_rx_callback_fn fn, void *user_param) 4774 { 4775 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 4776 rte_errno = ENOTSUP; 4777 return NULL; 4778 #endif 4779 struct rte_eth_dev *dev; 4780 4781 /* check input parameters */ 4782 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 4783 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 4784 rte_errno = EINVAL; 4785 return NULL; 4786 } 4787 dev = &rte_eth_devices[port_id]; 4788 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 4789 rte_errno = EINVAL; 4790 return NULL; 4791 } 4792 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 4793 4794 if (cb == NULL) { 4795 rte_errno = ENOMEM; 4796 return NULL; 4797 } 4798 4799 cb->fn.rx = fn; 4800 cb->param = user_param; 4801 4802 rte_spinlock_lock(ð_dev_rx_cb_lock); 4803 /* Add the callbacks in fifo order. */ 4804 struct rte_eth_rxtx_callback *tail = 4805 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 4806 4807 if (!tail) { 4808 /* Stores to cb->fn and cb->param should complete before 4809 * cb is visible to data plane. 4810 */ 4811 __atomic_store_n( 4812 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 4813 cb, __ATOMIC_RELEASE); 4814 4815 } else { 4816 while (tail->next) 4817 tail = tail->next; 4818 /* Stores to cb->fn and cb->param should complete before 4819 * cb is visible to data plane. 4820 */ 4821 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 4822 } 4823 rte_spinlock_unlock(ð_dev_rx_cb_lock); 4824 4825 return cb; 4826 } 4827 4828 const struct rte_eth_rxtx_callback * 4829 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, 4830 rte_rx_callback_fn fn, void *user_param) 4831 { 4832 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 4833 rte_errno = ENOTSUP; 4834 return NULL; 4835 #endif 4836 /* check input parameters */ 4837 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 4838 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) { 4839 rte_errno = EINVAL; 4840 return NULL; 4841 } 4842 4843 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 4844 4845 if (cb == NULL) { 4846 rte_errno = ENOMEM; 4847 return NULL; 4848 } 4849 4850 cb->fn.rx = fn; 4851 cb->param = user_param; 4852 4853 rte_spinlock_lock(ð_dev_rx_cb_lock); 4854 /* Add the callbacks at first position */ 4855 cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id]; 4856 /* Stores to cb->fn, cb->param and cb->next should complete before 4857 * cb is visible to data plane threads. 4858 */ 4859 __atomic_store_n( 4860 &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id], 4861 cb, __ATOMIC_RELEASE); 4862 rte_spinlock_unlock(ð_dev_rx_cb_lock); 4863 4864 return cb; 4865 } 4866 4867 const struct rte_eth_rxtx_callback * 4868 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, 4869 rte_tx_callback_fn fn, void *user_param) 4870 { 4871 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 4872 rte_errno = ENOTSUP; 4873 return NULL; 4874 #endif 4875 struct rte_eth_dev *dev; 4876 4877 /* check input parameters */ 4878 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL || 4879 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) { 4880 rte_errno = EINVAL; 4881 return NULL; 4882 } 4883 4884 dev = &rte_eth_devices[port_id]; 4885 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 4886 rte_errno = EINVAL; 4887 return NULL; 4888 } 4889 4890 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0); 4891 4892 if (cb == NULL) { 4893 rte_errno = ENOMEM; 4894 return NULL; 4895 } 4896 4897 cb->fn.tx = fn; 4898 cb->param = user_param; 4899 4900 rte_spinlock_lock(ð_dev_tx_cb_lock); 4901 /* Add the callbacks in fifo order. */ 4902 struct rte_eth_rxtx_callback *tail = 4903 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id]; 4904 4905 if (!tail) { 4906 /* Stores to cb->fn and cb->param should complete before 4907 * cb is visible to data plane. 4908 */ 4909 __atomic_store_n( 4910 &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id], 4911 cb, __ATOMIC_RELEASE); 4912 4913 } else { 4914 while (tail->next) 4915 tail = tail->next; 4916 /* Stores to cb->fn and cb->param should complete before 4917 * cb is visible to data plane. 4918 */ 4919 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); 4920 } 4921 rte_spinlock_unlock(ð_dev_tx_cb_lock); 4922 4923 return cb; 4924 } 4925 4926 int 4927 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, 4928 const struct rte_eth_rxtx_callback *user_cb) 4929 { 4930 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 4931 return -ENOTSUP; 4932 #endif 4933 /* Check input parameters. */ 4934 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4935 if (user_cb == NULL || 4936 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) 4937 return -EINVAL; 4938 4939 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4940 struct rte_eth_rxtx_callback *cb; 4941 struct rte_eth_rxtx_callback **prev_cb; 4942 int ret = -EINVAL; 4943 4944 rte_spinlock_lock(ð_dev_rx_cb_lock); 4945 prev_cb = &dev->post_rx_burst_cbs[queue_id]; 4946 for (; *prev_cb != NULL; prev_cb = &cb->next) { 4947 cb = *prev_cb; 4948 if (cb == user_cb) { 4949 /* Remove the user cb from the callback list. */ 4950 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 4951 ret = 0; 4952 break; 4953 } 4954 } 4955 rte_spinlock_unlock(ð_dev_rx_cb_lock); 4956 4957 return ret; 4958 } 4959 4960 int 4961 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, 4962 const struct rte_eth_rxtx_callback *user_cb) 4963 { 4964 #ifndef RTE_ETHDEV_RXTX_CALLBACKS 4965 return -ENOTSUP; 4966 #endif 4967 /* Check input parameters. */ 4968 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 4969 if (user_cb == NULL || 4970 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) 4971 return -EINVAL; 4972 4973 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 4974 int ret = -EINVAL; 4975 struct rte_eth_rxtx_callback *cb; 4976 struct rte_eth_rxtx_callback **prev_cb; 4977 4978 rte_spinlock_lock(ð_dev_tx_cb_lock); 4979 prev_cb = &dev->pre_tx_burst_cbs[queue_id]; 4980 for (; *prev_cb != NULL; prev_cb = &cb->next) { 4981 cb = *prev_cb; 4982 if (cb == user_cb) { 4983 /* Remove the user cb from the callback list. */ 4984 __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED); 4985 ret = 0; 4986 break; 4987 } 4988 } 4989 rte_spinlock_unlock(ð_dev_tx_cb_lock); 4990 4991 return ret; 4992 } 4993 4994 int 4995 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, 4996 struct rte_eth_rxq_info *qinfo) 4997 { 4998 struct rte_eth_dev *dev; 4999 5000 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5001 dev = &rte_eth_devices[port_id]; 5002 5003 if (queue_id >= dev->data->nb_rx_queues) { 5004 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5005 return -EINVAL; 5006 } 5007 5008 if (qinfo == NULL) { 5009 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL\n", 5010 port_id, queue_id); 5011 return -EINVAL; 5012 } 5013 5014 if (dev->data->rx_queues == NULL || 5015 dev->data->rx_queues[queue_id] == NULL) { 5016 RTE_ETHDEV_LOG(ERR, 5017 "Rx queue %"PRIu16" of device with port_id=%" 5018 PRIu16" has not been setup\n", 5019 queue_id, port_id); 5020 return -EINVAL; 5021 } 5022 5023 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { 5024 RTE_ETHDEV_LOG(INFO, 5025 "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5026 queue_id, port_id); 5027 return -EINVAL; 5028 } 5029 5030 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP); 5031 5032 memset(qinfo, 0, sizeof(*qinfo)); 5033 dev->dev_ops->rxq_info_get(dev, queue_id, qinfo); 5034 qinfo->queue_state = dev->data->rx_queue_state[queue_id]; 5035 5036 return 0; 5037 } 5038 5039 int 5040 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, 5041 struct rte_eth_txq_info *qinfo) 5042 { 5043 struct rte_eth_dev *dev; 5044 5045 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5046 dev = &rte_eth_devices[port_id]; 5047 5048 if (queue_id >= dev->data->nb_tx_queues) { 5049 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 5050 return -EINVAL; 5051 } 5052 5053 if (qinfo == NULL) { 5054 RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL\n", 5055 port_id, queue_id); 5056 return -EINVAL; 5057 } 5058 5059 if (dev->data->tx_queues == NULL || 5060 dev->data->tx_queues[queue_id] == NULL) { 5061 RTE_ETHDEV_LOG(ERR, 5062 "Tx queue %"PRIu16" of device with port_id=%" 5063 PRIu16" has not been setup\n", 5064 queue_id, port_id); 5065 return -EINVAL; 5066 } 5067 5068 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { 5069 RTE_ETHDEV_LOG(INFO, 5070 "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n", 5071 queue_id, port_id); 5072 return -EINVAL; 5073 } 5074 5075 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP); 5076 5077 memset(qinfo, 0, sizeof(*qinfo)); 5078 dev->dev_ops->txq_info_get(dev, queue_id, qinfo); 5079 qinfo->queue_state = dev->data->tx_queue_state[queue_id]; 5080 5081 return 0; 5082 } 5083 5084 int 5085 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5086 struct rte_eth_burst_mode *mode) 5087 { 5088 struct rte_eth_dev *dev; 5089 5090 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5091 dev = &rte_eth_devices[port_id]; 5092 5093 if (queue_id >= dev->data->nb_rx_queues) { 5094 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5095 return -EINVAL; 5096 } 5097 5098 if (mode == NULL) { 5099 RTE_ETHDEV_LOG(ERR, 5100 "Cannot get ethdev port %u Rx queue %u burst mode to NULL\n", 5101 port_id, queue_id); 5102 return -EINVAL; 5103 } 5104 5105 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP); 5106 memset(mode, 0, sizeof(*mode)); 5107 return eth_err(port_id, 5108 dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode)); 5109 } 5110 5111 int 5112 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, 5113 struct rte_eth_burst_mode *mode) 5114 { 5115 struct rte_eth_dev *dev; 5116 5117 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5118 dev = &rte_eth_devices[port_id]; 5119 5120 if (queue_id >= dev->data->nb_tx_queues) { 5121 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id); 5122 return -EINVAL; 5123 } 5124 5125 if (mode == NULL) { 5126 RTE_ETHDEV_LOG(ERR, 5127 "Cannot get ethdev port %u Tx queue %u burst mode to NULL\n", 5128 port_id, queue_id); 5129 return -EINVAL; 5130 } 5131 5132 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP); 5133 memset(mode, 0, sizeof(*mode)); 5134 return eth_err(port_id, 5135 dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode)); 5136 } 5137 5138 int 5139 rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id, 5140 struct rte_power_monitor_cond *pmc) 5141 { 5142 struct rte_eth_dev *dev; 5143 5144 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5145 dev = &rte_eth_devices[port_id]; 5146 5147 if (queue_id >= dev->data->nb_rx_queues) { 5148 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id); 5149 return -EINVAL; 5150 } 5151 5152 if (pmc == NULL) { 5153 RTE_ETHDEV_LOG(ERR, 5154 "Cannot get ethdev port %u Rx queue %u power monitor condition to NULL\n", 5155 port_id, queue_id); 5156 return -EINVAL; 5157 } 5158 5159 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_monitor_addr, -ENOTSUP); 5160 return eth_err(port_id, 5161 dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc)); 5162 } 5163 5164 int 5165 rte_eth_dev_set_mc_addr_list(uint16_t port_id, 5166 struct rte_ether_addr *mc_addr_set, 5167 uint32_t nb_mc_addr) 5168 { 5169 struct rte_eth_dev *dev; 5170 5171 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5172 dev = &rte_eth_devices[port_id]; 5173 5174 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP); 5175 return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev, 5176 mc_addr_set, nb_mc_addr)); 5177 } 5178 5179 int 5180 rte_eth_timesync_enable(uint16_t port_id) 5181 { 5182 struct rte_eth_dev *dev; 5183 5184 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5185 dev = &rte_eth_devices[port_id]; 5186 5187 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP); 5188 return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev)); 5189 } 5190 5191 int 5192 rte_eth_timesync_disable(uint16_t port_id) 5193 { 5194 struct rte_eth_dev *dev; 5195 5196 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5197 dev = &rte_eth_devices[port_id]; 5198 5199 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP); 5200 return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev)); 5201 } 5202 5203 int 5204 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, 5205 uint32_t flags) 5206 { 5207 struct rte_eth_dev *dev; 5208 5209 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5210 dev = &rte_eth_devices[port_id]; 5211 5212 if (timestamp == NULL) { 5213 RTE_ETHDEV_LOG(ERR, 5214 "Cannot read ethdev port %u Rx timestamp to NULL\n", 5215 port_id); 5216 return -EINVAL; 5217 } 5218 5219 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP); 5220 return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp) 5221 (dev, timestamp, flags)); 5222 } 5223 5224 int 5225 rte_eth_timesync_read_tx_timestamp(uint16_t port_id, 5226 struct timespec *timestamp) 5227 { 5228 struct rte_eth_dev *dev; 5229 5230 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5231 dev = &rte_eth_devices[port_id]; 5232 5233 if (timestamp == NULL) { 5234 RTE_ETHDEV_LOG(ERR, 5235 "Cannot read ethdev port %u Tx timestamp to NULL\n", 5236 port_id); 5237 return -EINVAL; 5238 } 5239 5240 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP); 5241 return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp) 5242 (dev, timestamp)); 5243 } 5244 5245 int 5246 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta) 5247 { 5248 struct rte_eth_dev *dev; 5249 5250 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5251 dev = &rte_eth_devices[port_id]; 5252 5253 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP); 5254 return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta)); 5255 } 5256 5257 int 5258 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp) 5259 { 5260 struct rte_eth_dev *dev; 5261 5262 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5263 dev = &rte_eth_devices[port_id]; 5264 5265 if (timestamp == NULL) { 5266 RTE_ETHDEV_LOG(ERR, 5267 "Cannot read ethdev port %u timesync time to NULL\n", 5268 port_id); 5269 return -EINVAL; 5270 } 5271 5272 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP); 5273 return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev, 5274 timestamp)); 5275 } 5276 5277 int 5278 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp) 5279 { 5280 struct rte_eth_dev *dev; 5281 5282 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5283 dev = &rte_eth_devices[port_id]; 5284 5285 if (timestamp == NULL) { 5286 RTE_ETHDEV_LOG(ERR, 5287 "Cannot write ethdev port %u timesync from NULL time\n", 5288 port_id); 5289 return -EINVAL; 5290 } 5291 5292 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP); 5293 return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev, 5294 timestamp)); 5295 } 5296 5297 int 5298 rte_eth_read_clock(uint16_t port_id, uint64_t *clock) 5299 { 5300 struct rte_eth_dev *dev; 5301 5302 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5303 dev = &rte_eth_devices[port_id]; 5304 5305 if (clock == NULL) { 5306 RTE_ETHDEV_LOG(ERR, "Cannot read ethdev port %u clock to NULL\n", 5307 port_id); 5308 return -EINVAL; 5309 } 5310 5311 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP); 5312 return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock)); 5313 } 5314 5315 int 5316 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info) 5317 { 5318 struct rte_eth_dev *dev; 5319 5320 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5321 dev = &rte_eth_devices[port_id]; 5322 5323 if (info == NULL) { 5324 RTE_ETHDEV_LOG(ERR, 5325 "Cannot get ethdev port %u register info to NULL\n", 5326 port_id); 5327 return -EINVAL; 5328 } 5329 5330 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP); 5331 return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info)); 5332 } 5333 5334 int 5335 rte_eth_dev_get_eeprom_length(uint16_t port_id) 5336 { 5337 struct rte_eth_dev *dev; 5338 5339 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5340 dev = &rte_eth_devices[port_id]; 5341 5342 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP); 5343 return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev)); 5344 } 5345 5346 int 5347 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 5348 { 5349 struct rte_eth_dev *dev; 5350 5351 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5352 dev = &rte_eth_devices[port_id]; 5353 5354 if (info == NULL) { 5355 RTE_ETHDEV_LOG(ERR, 5356 "Cannot get ethdev port %u EEPROM info to NULL\n", 5357 port_id); 5358 return -EINVAL; 5359 } 5360 5361 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP); 5362 return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info)); 5363 } 5364 5365 int 5366 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) 5367 { 5368 struct rte_eth_dev *dev; 5369 5370 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5371 dev = &rte_eth_devices[port_id]; 5372 5373 if (info == NULL) { 5374 RTE_ETHDEV_LOG(ERR, 5375 "Cannot set ethdev port %u EEPROM from NULL info\n", 5376 port_id); 5377 return -EINVAL; 5378 } 5379 5380 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP); 5381 return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info)); 5382 } 5383 5384 int 5385 rte_eth_dev_get_module_info(uint16_t port_id, 5386 struct rte_eth_dev_module_info *modinfo) 5387 { 5388 struct rte_eth_dev *dev; 5389 5390 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5391 dev = &rte_eth_devices[port_id]; 5392 5393 if (modinfo == NULL) { 5394 RTE_ETHDEV_LOG(ERR, 5395 "Cannot get ethdev port %u EEPROM module info to NULL\n", 5396 port_id); 5397 return -EINVAL; 5398 } 5399 5400 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP); 5401 return (*dev->dev_ops->get_module_info)(dev, modinfo); 5402 } 5403 5404 int 5405 rte_eth_dev_get_module_eeprom(uint16_t port_id, 5406 struct rte_dev_eeprom_info *info) 5407 { 5408 struct rte_eth_dev *dev; 5409 5410 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5411 dev = &rte_eth_devices[port_id]; 5412 5413 if (info == NULL) { 5414 RTE_ETHDEV_LOG(ERR, 5415 "Cannot get ethdev port %u module EEPROM info to NULL\n", 5416 port_id); 5417 return -EINVAL; 5418 } 5419 5420 if (info->data == NULL) { 5421 RTE_ETHDEV_LOG(ERR, 5422 "Cannot get ethdev port %u module EEPROM data to NULL\n", 5423 port_id); 5424 return -EINVAL; 5425 } 5426 5427 if (info->length == 0) { 5428 RTE_ETHDEV_LOG(ERR, 5429 "Cannot get ethdev port %u module EEPROM to data with zero size\n", 5430 port_id); 5431 return -EINVAL; 5432 } 5433 5434 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP); 5435 return (*dev->dev_ops->get_module_eeprom)(dev, info); 5436 } 5437 5438 int 5439 rte_eth_dev_get_dcb_info(uint16_t port_id, 5440 struct rte_eth_dcb_info *dcb_info) 5441 { 5442 struct rte_eth_dev *dev; 5443 5444 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5445 dev = &rte_eth_devices[port_id]; 5446 5447 if (dcb_info == NULL) { 5448 RTE_ETHDEV_LOG(ERR, 5449 "Cannot get ethdev port %u DCB info to NULL\n", 5450 port_id); 5451 return -EINVAL; 5452 } 5453 5454 memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info)); 5455 5456 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP); 5457 return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info)); 5458 } 5459 5460 static void 5461 eth_dev_adjust_nb_desc(uint16_t *nb_desc, 5462 const struct rte_eth_desc_lim *desc_lim) 5463 { 5464 if (desc_lim->nb_align != 0) 5465 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align); 5466 5467 if (desc_lim->nb_max != 0) 5468 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max); 5469 5470 *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min); 5471 } 5472 5473 int 5474 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, 5475 uint16_t *nb_rx_desc, 5476 uint16_t *nb_tx_desc) 5477 { 5478 struct rte_eth_dev_info dev_info; 5479 int ret; 5480 5481 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5482 5483 ret = rte_eth_dev_info_get(port_id, &dev_info); 5484 if (ret != 0) 5485 return ret; 5486 5487 if (nb_rx_desc != NULL) 5488 eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim); 5489 5490 if (nb_tx_desc != NULL) 5491 eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim); 5492 5493 return 0; 5494 } 5495 5496 int 5497 rte_eth_dev_hairpin_capability_get(uint16_t port_id, 5498 struct rte_eth_hairpin_cap *cap) 5499 { 5500 struct rte_eth_dev *dev; 5501 5502 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5503 dev = &rte_eth_devices[port_id]; 5504 5505 if (cap == NULL) { 5506 RTE_ETHDEV_LOG(ERR, 5507 "Cannot get ethdev port %u hairpin capability to NULL\n", 5508 port_id); 5509 return -EINVAL; 5510 } 5511 5512 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP); 5513 memset(cap, 0, sizeof(*cap)); 5514 return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap)); 5515 } 5516 5517 int 5518 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool) 5519 { 5520 struct rte_eth_dev *dev; 5521 5522 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5523 dev = &rte_eth_devices[port_id]; 5524 5525 if (pool == NULL) { 5526 RTE_ETHDEV_LOG(ERR, 5527 "Cannot test ethdev port %u mempool operation from NULL pool\n", 5528 port_id); 5529 return -EINVAL; 5530 } 5531 5532 if (*dev->dev_ops->pool_ops_supported == NULL) 5533 return 1; /* all pools are supported */ 5534 5535 return (*dev->dev_ops->pool_ops_supported)(dev, pool); 5536 } 5537 5538 static int 5539 eth_dev_handle_port_list(const char *cmd __rte_unused, 5540 const char *params __rte_unused, 5541 struct rte_tel_data *d) 5542 { 5543 int port_id; 5544 5545 rte_tel_data_start_array(d, RTE_TEL_INT_VAL); 5546 RTE_ETH_FOREACH_DEV(port_id) 5547 rte_tel_data_add_array_int(d, port_id); 5548 return 0; 5549 } 5550 5551 static void 5552 eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats, 5553 const char *stat_name) 5554 { 5555 int q; 5556 struct rte_tel_data *q_data = rte_tel_data_alloc(); 5557 if (q_data == NULL) 5558 return; 5559 rte_tel_data_start_array(q_data, RTE_TEL_U64_VAL); 5560 for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++) 5561 rte_tel_data_add_array_u64(q_data, q_stats[q]); 5562 rte_tel_data_add_dict_container(d, stat_name, q_data, 0); 5563 } 5564 5565 #define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s) 5566 5567 static int 5568 eth_dev_handle_port_stats(const char *cmd __rte_unused, 5569 const char *params, 5570 struct rte_tel_data *d) 5571 { 5572 struct rte_eth_stats stats; 5573 int port_id, ret; 5574 5575 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 5576 return -1; 5577 5578 port_id = atoi(params); 5579 if (!rte_eth_dev_is_valid_port(port_id)) 5580 return -1; 5581 5582 ret = rte_eth_stats_get(port_id, &stats); 5583 if (ret < 0) 5584 return -1; 5585 5586 rte_tel_data_start_dict(d); 5587 ADD_DICT_STAT(stats, ipackets); 5588 ADD_DICT_STAT(stats, opackets); 5589 ADD_DICT_STAT(stats, ibytes); 5590 ADD_DICT_STAT(stats, obytes); 5591 ADD_DICT_STAT(stats, imissed); 5592 ADD_DICT_STAT(stats, ierrors); 5593 ADD_DICT_STAT(stats, oerrors); 5594 ADD_DICT_STAT(stats, rx_nombuf); 5595 eth_dev_add_port_queue_stats(d, stats.q_ipackets, "q_ipackets"); 5596 eth_dev_add_port_queue_stats(d, stats.q_opackets, "q_opackets"); 5597 eth_dev_add_port_queue_stats(d, stats.q_ibytes, "q_ibytes"); 5598 eth_dev_add_port_queue_stats(d, stats.q_obytes, "q_obytes"); 5599 eth_dev_add_port_queue_stats(d, stats.q_errors, "q_errors"); 5600 5601 return 0; 5602 } 5603 5604 static int 5605 eth_dev_handle_port_xstats(const char *cmd __rte_unused, 5606 const char *params, 5607 struct rte_tel_data *d) 5608 { 5609 struct rte_eth_xstat *eth_xstats; 5610 struct rte_eth_xstat_name *xstat_names; 5611 int port_id, num_xstats; 5612 int i, ret; 5613 char *end_param; 5614 5615 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 5616 return -1; 5617 5618 port_id = strtoul(params, &end_param, 0); 5619 if (*end_param != '\0') 5620 RTE_ETHDEV_LOG(NOTICE, 5621 "Extra parameters passed to ethdev telemetry command, ignoring"); 5622 if (!rte_eth_dev_is_valid_port(port_id)) 5623 return -1; 5624 5625 num_xstats = rte_eth_xstats_get(port_id, NULL, 0); 5626 if (num_xstats < 0) 5627 return -1; 5628 5629 /* use one malloc for both names and stats */ 5630 eth_xstats = malloc((sizeof(struct rte_eth_xstat) + 5631 sizeof(struct rte_eth_xstat_name)) * num_xstats); 5632 if (eth_xstats == NULL) 5633 return -1; 5634 xstat_names = (void *)ð_xstats[num_xstats]; 5635 5636 ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats); 5637 if (ret < 0 || ret > num_xstats) { 5638 free(eth_xstats); 5639 return -1; 5640 } 5641 5642 ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats); 5643 if (ret < 0 || ret > num_xstats) { 5644 free(eth_xstats); 5645 return -1; 5646 } 5647 5648 rte_tel_data_start_dict(d); 5649 for (i = 0; i < num_xstats; i++) 5650 rte_tel_data_add_dict_u64(d, xstat_names[i].name, 5651 eth_xstats[i].value); 5652 free(eth_xstats); 5653 return 0; 5654 } 5655 5656 static int 5657 eth_dev_handle_port_link_status(const char *cmd __rte_unused, 5658 const char *params, 5659 struct rte_tel_data *d) 5660 { 5661 static const char *status_str = "status"; 5662 int ret, port_id; 5663 struct rte_eth_link link; 5664 char *end_param; 5665 5666 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 5667 return -1; 5668 5669 port_id = strtoul(params, &end_param, 0); 5670 if (*end_param != '\0') 5671 RTE_ETHDEV_LOG(NOTICE, 5672 "Extra parameters passed to ethdev telemetry command, ignoring"); 5673 if (!rte_eth_dev_is_valid_port(port_id)) 5674 return -1; 5675 5676 ret = rte_eth_link_get_nowait(port_id, &link); 5677 if (ret < 0) 5678 return -1; 5679 5680 rte_tel_data_start_dict(d); 5681 if (!link.link_status) { 5682 rte_tel_data_add_dict_string(d, status_str, "DOWN"); 5683 return 0; 5684 } 5685 rte_tel_data_add_dict_string(d, status_str, "UP"); 5686 rte_tel_data_add_dict_u64(d, "speed", link.link_speed); 5687 rte_tel_data_add_dict_string(d, "duplex", 5688 (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 5689 "full-duplex" : "half-duplex"); 5690 return 0; 5691 } 5692 5693 static int 5694 eth_dev_handle_port_info(const char *cmd __rte_unused, 5695 const char *params, 5696 struct rte_tel_data *d) 5697 { 5698 struct rte_tel_data *rxq_state, *txq_state; 5699 char mac_addr[RTE_ETHER_ADDR_FMT_SIZE]; 5700 struct rte_eth_dev *eth_dev; 5701 char *end_param; 5702 int port_id, i; 5703 5704 if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 5705 return -1; 5706 5707 port_id = strtoul(params, &end_param, 0); 5708 if (*end_param != '\0') 5709 RTE_ETHDEV_LOG(NOTICE, 5710 "Extra parameters passed to ethdev telemetry command, ignoring"); 5711 5712 if (!rte_eth_dev_is_valid_port(port_id)) 5713 return -EINVAL; 5714 5715 eth_dev = &rte_eth_devices[port_id]; 5716 5717 rxq_state = rte_tel_data_alloc(); 5718 if (!rxq_state) 5719 return -ENOMEM; 5720 5721 txq_state = rte_tel_data_alloc(); 5722 if (!txq_state) { 5723 rte_tel_data_free(rxq_state); 5724 return -ENOMEM; 5725 } 5726 5727 rte_tel_data_start_dict(d); 5728 rte_tel_data_add_dict_string(d, "name", eth_dev->data->name); 5729 rte_tel_data_add_dict_int(d, "state", eth_dev->state); 5730 rte_tel_data_add_dict_int(d, "nb_rx_queues", 5731 eth_dev->data->nb_rx_queues); 5732 rte_tel_data_add_dict_int(d, "nb_tx_queues", 5733 eth_dev->data->nb_tx_queues); 5734 rte_tel_data_add_dict_int(d, "port_id", eth_dev->data->port_id); 5735 rte_tel_data_add_dict_int(d, "mtu", eth_dev->data->mtu); 5736 rte_tel_data_add_dict_int(d, "rx_mbuf_size_min", 5737 eth_dev->data->min_rx_buf_size); 5738 rte_tel_data_add_dict_int(d, "rx_mbuf_alloc_fail", 5739 eth_dev->data->rx_mbuf_alloc_failed); 5740 rte_ether_format_addr(mac_addr, sizeof(mac_addr), 5741 eth_dev->data->mac_addrs); 5742 rte_tel_data_add_dict_string(d, "mac_addr", mac_addr); 5743 rte_tel_data_add_dict_int(d, "promiscuous", 5744 eth_dev->data->promiscuous); 5745 rte_tel_data_add_dict_int(d, "scattered_rx", 5746 eth_dev->data->scattered_rx); 5747 rte_tel_data_add_dict_int(d, "all_multicast", 5748 eth_dev->data->all_multicast); 5749 rte_tel_data_add_dict_int(d, "dev_started", eth_dev->data->dev_started); 5750 rte_tel_data_add_dict_int(d, "lro", eth_dev->data->lro); 5751 rte_tel_data_add_dict_int(d, "dev_configured", 5752 eth_dev->data->dev_configured); 5753 5754 rte_tel_data_start_array(rxq_state, RTE_TEL_INT_VAL); 5755 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) 5756 rte_tel_data_add_array_int(rxq_state, 5757 eth_dev->data->rx_queue_state[i]); 5758 5759 rte_tel_data_start_array(txq_state, RTE_TEL_INT_VAL); 5760 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) 5761 rte_tel_data_add_array_int(txq_state, 5762 eth_dev->data->tx_queue_state[i]); 5763 5764 rte_tel_data_add_dict_container(d, "rxq_state", rxq_state, 0); 5765 rte_tel_data_add_dict_container(d, "txq_state", txq_state, 0); 5766 rte_tel_data_add_dict_int(d, "numa_node", eth_dev->data->numa_node); 5767 rte_tel_data_add_dict_int(d, "dev_flags", eth_dev->data->dev_flags); 5768 rte_tel_data_add_dict_int(d, "rx_offloads", 5769 eth_dev->data->dev_conf.rxmode.offloads); 5770 rte_tel_data_add_dict_int(d, "tx_offloads", 5771 eth_dev->data->dev_conf.txmode.offloads); 5772 rte_tel_data_add_dict_int(d, "ethdev_rss_hf", 5773 eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf); 5774 5775 return 0; 5776 } 5777 5778 int 5779 rte_eth_representor_info_get(uint16_t port_id, 5780 struct rte_eth_representor_info *info) 5781 { 5782 struct rte_eth_dev *dev; 5783 5784 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5785 dev = &rte_eth_devices[port_id]; 5786 5787 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->representor_info_get, -ENOTSUP); 5788 return eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info)); 5789 } 5790 5791 int 5792 rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features) 5793 { 5794 struct rte_eth_dev *dev; 5795 5796 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5797 dev = &rte_eth_devices[port_id]; 5798 5799 if (dev->data->dev_configured != 0) { 5800 RTE_ETHDEV_LOG(ERR, 5801 "The port (ID=%"PRIu16") is already configured\n", 5802 port_id); 5803 return -EBUSY; 5804 } 5805 5806 if (features == NULL) { 5807 RTE_ETHDEV_LOG(ERR, "Invalid features (NULL)\n"); 5808 return -EINVAL; 5809 } 5810 5811 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_metadata_negotiate, -ENOTSUP); 5812 return eth_err(port_id, 5813 (*dev->dev_ops->rx_metadata_negotiate)(dev, features)); 5814 } 5815 5816 int 5817 rte_eth_ip_reassembly_capability_get(uint16_t port_id, 5818 struct rte_eth_ip_reassembly_params *reassembly_capa) 5819 { 5820 struct rte_eth_dev *dev; 5821 5822 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5823 dev = &rte_eth_devices[port_id]; 5824 5825 if (dev->data->dev_configured == 0) { 5826 RTE_ETHDEV_LOG(ERR, 5827 "Device with port_id=%u is not configured.\n" 5828 "Cannot get IP reassembly capability\n", 5829 port_id); 5830 return -EINVAL; 5831 } 5832 5833 if (reassembly_capa == NULL) { 5834 RTE_ETHDEV_LOG(ERR, "Cannot get reassembly capability to NULL"); 5835 return -EINVAL; 5836 } 5837 5838 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->ip_reassembly_capability_get, 5839 -ENOTSUP); 5840 memset(reassembly_capa, 0, sizeof(struct rte_eth_ip_reassembly_params)); 5841 5842 return eth_err(port_id, (*dev->dev_ops->ip_reassembly_capability_get) 5843 (dev, reassembly_capa)); 5844 } 5845 5846 int 5847 rte_eth_ip_reassembly_conf_get(uint16_t port_id, 5848 struct rte_eth_ip_reassembly_params *conf) 5849 { 5850 struct rte_eth_dev *dev; 5851 5852 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5853 dev = &rte_eth_devices[port_id]; 5854 5855 if (dev->data->dev_configured == 0) { 5856 RTE_ETHDEV_LOG(ERR, 5857 "Device with port_id=%u is not configured.\n" 5858 "Cannot get IP reassembly configuration\n", 5859 port_id); 5860 return -EINVAL; 5861 } 5862 5863 if (conf == NULL) { 5864 RTE_ETHDEV_LOG(ERR, "Cannot get reassembly info to NULL"); 5865 return -EINVAL; 5866 } 5867 5868 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->ip_reassembly_conf_get, 5869 -ENOTSUP); 5870 memset(conf, 0, sizeof(struct rte_eth_ip_reassembly_params)); 5871 return eth_err(port_id, 5872 (*dev->dev_ops->ip_reassembly_conf_get)(dev, conf)); 5873 } 5874 5875 int 5876 rte_eth_ip_reassembly_conf_set(uint16_t port_id, 5877 const struct rte_eth_ip_reassembly_params *conf) 5878 { 5879 struct rte_eth_dev *dev; 5880 5881 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5882 dev = &rte_eth_devices[port_id]; 5883 5884 if (dev->data->dev_configured == 0) { 5885 RTE_ETHDEV_LOG(ERR, 5886 "Device with port_id=%u is not configured.\n" 5887 "Cannot set IP reassembly configuration", 5888 port_id); 5889 return -EINVAL; 5890 } 5891 5892 if (dev->data->dev_started != 0) { 5893 RTE_ETHDEV_LOG(ERR, 5894 "Device with port_id=%u started,\n" 5895 "cannot configure IP reassembly params.\n", 5896 port_id); 5897 return -EINVAL; 5898 } 5899 5900 if (conf == NULL) { 5901 RTE_ETHDEV_LOG(ERR, 5902 "Invalid IP reassembly configuration (NULL)\n"); 5903 return -EINVAL; 5904 } 5905 5906 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->ip_reassembly_conf_set, 5907 -ENOTSUP); 5908 return eth_err(port_id, 5909 (*dev->dev_ops->ip_reassembly_conf_set)(dev, conf)); 5910 } 5911 5912 int 5913 rte_eth_dev_priv_dump(uint16_t port_id, FILE *file) 5914 { 5915 struct rte_eth_dev *dev; 5916 5917 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); 5918 dev = &rte_eth_devices[port_id]; 5919 5920 if (file == NULL) { 5921 RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n"); 5922 return -EINVAL; 5923 } 5924 5925 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->eth_dev_priv_dump, -ENOTSUP); 5926 return eth_err(port_id, (*dev->dev_ops->eth_dev_priv_dump)(dev, file)); 5927 } 5928 5929 RTE_LOG_REGISTER_DEFAULT(rte_eth_dev_logtype, INFO); 5930 5931 RTE_INIT(ethdev_init_telemetry) 5932 { 5933 rte_telemetry_register_cmd("/ethdev/list", eth_dev_handle_port_list, 5934 "Returns list of available ethdev ports. Takes no parameters"); 5935 rte_telemetry_register_cmd("/ethdev/stats", eth_dev_handle_port_stats, 5936 "Returns the common stats for a port. Parameters: int port_id"); 5937 rte_telemetry_register_cmd("/ethdev/xstats", eth_dev_handle_port_xstats, 5938 "Returns the extended stats for a port. Parameters: int port_id"); 5939 rte_telemetry_register_cmd("/ethdev/link_status", 5940 eth_dev_handle_port_link_status, 5941 "Returns the link status for a port. Parameters: int port_id"); 5942 rte_telemetry_register_cmd("/ethdev/info", eth_dev_handle_port_info, 5943 "Returns the device info for a port. Parameters: int port_id"); 5944 rte_telemetry_register_cmd("/ethdev/module_eeprom", eth_dev_handle_port_module_eeprom, 5945 "Returns module EEPROM info with SFF specs. Parameters: int port_id"); 5946 } 5947