1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates. 3 * All rights reserved. 4 */ 5 6 #include <rte_string_fns.h> 7 #include <rte_ether.h> 8 #include <rte_ethdev_driver.h> 9 #include <rte_ethdev_pci.h> 10 #include <rte_tcp.h> 11 #include <rte_atomic.h> 12 #include <rte_dev.h> 13 #include <rte_errno.h> 14 #include <rte_version.h> 15 #include <rte_net.h> 16 17 #include "ena_ethdev.h" 18 #include "ena_logs.h" 19 #include "ena_platform.h" 20 #include "ena_com.h" 21 #include "ena_eth_com.h" 22 23 #include <ena_common_defs.h> 24 #include <ena_regs_defs.h> 25 #include <ena_admin_defs.h> 26 #include <ena_eth_io_defs.h> 27 28 #define DRV_MODULE_VER_MAJOR 2 29 #define DRV_MODULE_VER_MINOR 0 30 #define DRV_MODULE_VER_SUBMINOR 3 31 32 #define ENA_IO_TXQ_IDX(q) (2 * (q)) 33 #define ENA_IO_RXQ_IDX(q) (2 * (q) + 1) 34 /*reverse version of ENA_IO_RXQ_IDX*/ 35 #define ENA_IO_RXQ_IDX_REV(q) ((q - 1) / 2) 36 37 /* While processing submitted and completed descriptors (rx and tx path 38 * respectively) in a loop it is desired to: 39 * - perform batch submissions while populating sumbissmion queue 40 * - avoid blocking transmission of other packets during cleanup phase 41 * Hence the utilization ratio of 1/8 of a queue size. 42 */ 43 #define ENA_RING_DESCS_RATIO(ring_size) (ring_size / 8) 44 45 #define __MERGE_64B_H_L(h, l) (((uint64_t)h << 32) | l) 46 #define TEST_BIT(val, bit_shift) (val & (1UL << bit_shift)) 47 48 #define GET_L4_HDR_LEN(mbuf) \ 49 ((rte_pktmbuf_mtod_offset(mbuf, struct rte_tcp_hdr *, \ 50 mbuf->l3_len + mbuf->l2_len)->data_off) >> 4) 51 52 #define ENA_RX_RSS_TABLE_LOG_SIZE 7 53 #define ENA_RX_RSS_TABLE_SIZE (1 << ENA_RX_RSS_TABLE_LOG_SIZE) 54 #define ENA_HASH_KEY_SIZE 40 55 #define ETH_GSTRING_LEN 32 56 57 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) 58 59 #define ENA_MIN_RING_DESC 128 60 61 enum ethtool_stringset { 62 ETH_SS_TEST = 0, 63 ETH_SS_STATS, 64 }; 65 66 struct ena_stats { 67 char name[ETH_GSTRING_LEN]; 68 int stat_offset; 69 }; 70 71 #define ENA_STAT_ENTRY(stat, stat_type) { \ 72 .name = #stat, \ 73 .stat_offset = offsetof(struct ena_stats_##stat_type, stat) \ 74 } 75 76 #define ENA_STAT_RX_ENTRY(stat) \ 77 ENA_STAT_ENTRY(stat, rx) 78 79 #define ENA_STAT_TX_ENTRY(stat) \ 80 ENA_STAT_ENTRY(stat, tx) 81 82 #define ENA_STAT_GLOBAL_ENTRY(stat) \ 83 ENA_STAT_ENTRY(stat, dev) 84 85 #define ENA_MAX_RING_SIZE_RX 8192 86 #define ENA_MAX_RING_SIZE_TX 1024 87 88 /* 89 * Each rte_memzone should have unique name. 90 * To satisfy it, count number of allocation and add it to name. 91 */ 92 rte_atomic32_t ena_alloc_cnt; 93 94 static const struct ena_stats ena_stats_global_strings[] = { 95 ENA_STAT_GLOBAL_ENTRY(wd_expired), 96 ENA_STAT_GLOBAL_ENTRY(dev_start), 97 ENA_STAT_GLOBAL_ENTRY(dev_stop), 98 }; 99 100 static const struct ena_stats ena_stats_tx_strings[] = { 101 ENA_STAT_TX_ENTRY(cnt), 102 ENA_STAT_TX_ENTRY(bytes), 103 ENA_STAT_TX_ENTRY(prepare_ctx_err), 104 ENA_STAT_TX_ENTRY(linearize), 105 ENA_STAT_TX_ENTRY(linearize_failed), 106 ENA_STAT_TX_ENTRY(tx_poll), 107 ENA_STAT_TX_ENTRY(doorbells), 108 ENA_STAT_TX_ENTRY(bad_req_id), 109 ENA_STAT_TX_ENTRY(available_desc), 110 }; 111 112 static const struct ena_stats ena_stats_rx_strings[] = { 113 ENA_STAT_RX_ENTRY(cnt), 114 ENA_STAT_RX_ENTRY(bytes), 115 ENA_STAT_RX_ENTRY(refill_partial), 116 ENA_STAT_RX_ENTRY(bad_csum), 117 ENA_STAT_RX_ENTRY(mbuf_alloc_fail), 118 ENA_STAT_RX_ENTRY(bad_desc_num), 119 ENA_STAT_RX_ENTRY(bad_req_id), 120 }; 121 122 #define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings) 123 #define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings) 124 #define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings) 125 126 #define QUEUE_OFFLOADS (DEV_TX_OFFLOAD_TCP_CKSUM |\ 127 DEV_TX_OFFLOAD_UDP_CKSUM |\ 128 DEV_TX_OFFLOAD_IPV4_CKSUM |\ 129 DEV_TX_OFFLOAD_TCP_TSO) 130 #define MBUF_OFFLOADS (PKT_TX_L4_MASK |\ 131 PKT_TX_IP_CKSUM |\ 132 PKT_TX_TCP_SEG) 133 134 /** Vendor ID used by Amazon devices */ 135 #define PCI_VENDOR_ID_AMAZON 0x1D0F 136 /** Amazon devices */ 137 #define PCI_DEVICE_ID_ENA_VF 0xEC20 138 #define PCI_DEVICE_ID_ENA_LLQ_VF 0xEC21 139 140 #define ENA_TX_OFFLOAD_MASK (\ 141 PKT_TX_L4_MASK | \ 142 PKT_TX_IPV6 | \ 143 PKT_TX_IPV4 | \ 144 PKT_TX_IP_CKSUM | \ 145 PKT_TX_TCP_SEG) 146 147 #define ENA_TX_OFFLOAD_NOTSUP_MASK \ 148 (PKT_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK) 149 150 int ena_logtype_init; 151 int ena_logtype_driver; 152 153 #ifdef RTE_LIBRTE_ENA_DEBUG_RX 154 int ena_logtype_rx; 155 #endif 156 #ifdef RTE_LIBRTE_ENA_DEBUG_TX 157 int ena_logtype_tx; 158 #endif 159 #ifdef RTE_LIBRTE_ENA_DEBUG_TX_FREE 160 int ena_logtype_tx_free; 161 #endif 162 #ifdef RTE_LIBRTE_ENA_COM_DEBUG 163 int ena_logtype_com; 164 #endif 165 166 static const struct rte_pci_id pci_id_ena_map[] = { 167 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) }, 168 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_LLQ_VF) }, 169 { .device_id = 0 }, 170 }; 171 172 static struct ena_aenq_handlers aenq_handlers; 173 174 static int ena_device_init(struct ena_com_dev *ena_dev, 175 struct ena_com_dev_get_features_ctx *get_feat_ctx, 176 bool *wd_state); 177 static int ena_dev_configure(struct rte_eth_dev *dev); 178 static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 179 uint16_t nb_pkts); 180 static uint16_t eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 181 uint16_t nb_pkts); 182 static int ena_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 183 uint16_t nb_desc, unsigned int socket_id, 184 const struct rte_eth_txconf *tx_conf); 185 static int ena_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 186 uint16_t nb_desc, unsigned int socket_id, 187 const struct rte_eth_rxconf *rx_conf, 188 struct rte_mempool *mp); 189 static uint16_t eth_ena_recv_pkts(void *rx_queue, 190 struct rte_mbuf **rx_pkts, uint16_t nb_pkts); 191 static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count); 192 static void ena_init_rings(struct ena_adapter *adapter); 193 static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 194 static int ena_start(struct rte_eth_dev *dev); 195 static void ena_stop(struct rte_eth_dev *dev); 196 static void ena_close(struct rte_eth_dev *dev); 197 static int ena_dev_reset(struct rte_eth_dev *dev); 198 static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); 199 static void ena_rx_queue_release_all(struct rte_eth_dev *dev); 200 static void ena_tx_queue_release_all(struct rte_eth_dev *dev); 201 static void ena_rx_queue_release(void *queue); 202 static void ena_tx_queue_release(void *queue); 203 static void ena_rx_queue_release_bufs(struct ena_ring *ring); 204 static void ena_tx_queue_release_bufs(struct ena_ring *ring); 205 static int ena_link_update(struct rte_eth_dev *dev, 206 int wait_to_complete); 207 static int ena_create_io_queue(struct ena_ring *ring); 208 static void ena_queue_stop(struct ena_ring *ring); 209 static void ena_queue_stop_all(struct rte_eth_dev *dev, 210 enum ena_ring_type ring_type); 211 static int ena_queue_start(struct ena_ring *ring); 212 static int ena_queue_start_all(struct rte_eth_dev *dev, 213 enum ena_ring_type ring_type); 214 static void ena_stats_restart(struct rte_eth_dev *dev); 215 static int ena_infos_get(struct rte_eth_dev *dev, 216 struct rte_eth_dev_info *dev_info); 217 static int ena_rss_reta_update(struct rte_eth_dev *dev, 218 struct rte_eth_rss_reta_entry64 *reta_conf, 219 uint16_t reta_size); 220 static int ena_rss_reta_query(struct rte_eth_dev *dev, 221 struct rte_eth_rss_reta_entry64 *reta_conf, 222 uint16_t reta_size); 223 static void ena_interrupt_handler_rte(void *cb_arg); 224 static void ena_timer_wd_callback(struct rte_timer *timer, void *arg); 225 static void ena_destroy_device(struct rte_eth_dev *eth_dev); 226 static int eth_ena_dev_init(struct rte_eth_dev *eth_dev); 227 static int ena_xstats_get_names(struct rte_eth_dev *dev, 228 struct rte_eth_xstat_name *xstats_names, 229 unsigned int n); 230 static int ena_xstats_get(struct rte_eth_dev *dev, 231 struct rte_eth_xstat *stats, 232 unsigned int n); 233 static int ena_xstats_get_by_id(struct rte_eth_dev *dev, 234 const uint64_t *ids, 235 uint64_t *values, 236 unsigned int n); 237 238 static const struct eth_dev_ops ena_dev_ops = { 239 .dev_configure = ena_dev_configure, 240 .dev_infos_get = ena_infos_get, 241 .rx_queue_setup = ena_rx_queue_setup, 242 .tx_queue_setup = ena_tx_queue_setup, 243 .dev_start = ena_start, 244 .dev_stop = ena_stop, 245 .link_update = ena_link_update, 246 .stats_get = ena_stats_get, 247 .xstats_get_names = ena_xstats_get_names, 248 .xstats_get = ena_xstats_get, 249 .xstats_get_by_id = ena_xstats_get_by_id, 250 .mtu_set = ena_mtu_set, 251 .rx_queue_release = ena_rx_queue_release, 252 .tx_queue_release = ena_tx_queue_release, 253 .dev_close = ena_close, 254 .dev_reset = ena_dev_reset, 255 .reta_update = ena_rss_reta_update, 256 .reta_query = ena_rss_reta_query, 257 }; 258 259 void ena_rss_key_fill(void *key, size_t size) 260 { 261 static bool key_generated; 262 static uint8_t default_key[ENA_HASH_KEY_SIZE]; 263 size_t i; 264 265 RTE_ASSERT(size <= ENA_HASH_KEY_SIZE); 266 267 if (!key_generated) { 268 for (i = 0; i < ENA_HASH_KEY_SIZE; ++i) 269 default_key[i] = rte_rand() & 0xff; 270 key_generated = true; 271 } 272 273 rte_memcpy(key, default_key, size); 274 } 275 276 static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf, 277 struct ena_com_rx_ctx *ena_rx_ctx) 278 { 279 uint64_t ol_flags = 0; 280 uint32_t packet_type = 0; 281 282 if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) 283 packet_type |= RTE_PTYPE_L4_TCP; 284 else if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP) 285 packet_type |= RTE_PTYPE_L4_UDP; 286 287 if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) 288 packet_type |= RTE_PTYPE_L3_IPV4; 289 else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6) 290 packet_type |= RTE_PTYPE_L3_IPV6; 291 292 if (!ena_rx_ctx->l4_csum_checked) 293 ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN; 294 else 295 if (unlikely(ena_rx_ctx->l4_csum_err) && !ena_rx_ctx->frag) 296 ol_flags |= PKT_RX_L4_CKSUM_BAD; 297 else 298 ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN; 299 300 if (unlikely(ena_rx_ctx->l3_csum_err)) 301 ol_flags |= PKT_RX_IP_CKSUM_BAD; 302 303 mbuf->ol_flags = ol_flags; 304 mbuf->packet_type = packet_type; 305 } 306 307 static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, 308 struct ena_com_tx_ctx *ena_tx_ctx, 309 uint64_t queue_offloads) 310 { 311 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; 312 313 if ((mbuf->ol_flags & MBUF_OFFLOADS) && 314 (queue_offloads & QUEUE_OFFLOADS)) { 315 /* check if TSO is required */ 316 if ((mbuf->ol_flags & PKT_TX_TCP_SEG) && 317 (queue_offloads & DEV_TX_OFFLOAD_TCP_TSO)) { 318 ena_tx_ctx->tso_enable = true; 319 320 ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf); 321 } 322 323 /* check if L3 checksum is needed */ 324 if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) && 325 (queue_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)) 326 ena_tx_ctx->l3_csum_enable = true; 327 328 if (mbuf->ol_flags & PKT_TX_IPV6) { 329 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6; 330 } else { 331 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4; 332 333 /* set don't fragment (DF) flag */ 334 if (mbuf->packet_type & 335 (RTE_PTYPE_L4_NONFRAG 336 | RTE_PTYPE_INNER_L4_NONFRAG)) 337 ena_tx_ctx->df = true; 338 } 339 340 /* check if L4 checksum is needed */ 341 if (((mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) && 342 (queue_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) { 343 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP; 344 ena_tx_ctx->l4_csum_enable = true; 345 } else if (((mbuf->ol_flags & PKT_TX_L4_MASK) == 346 PKT_TX_UDP_CKSUM) && 347 (queue_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) { 348 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP; 349 ena_tx_ctx->l4_csum_enable = true; 350 } else { 351 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN; 352 ena_tx_ctx->l4_csum_enable = false; 353 } 354 355 ena_meta->mss = mbuf->tso_segsz; 356 ena_meta->l3_hdr_len = mbuf->l3_len; 357 ena_meta->l3_hdr_offset = mbuf->l2_len; 358 359 ena_tx_ctx->meta_valid = true; 360 } else { 361 ena_tx_ctx->meta_valid = false; 362 } 363 } 364 365 static inline int validate_rx_req_id(struct ena_ring *rx_ring, uint16_t req_id) 366 { 367 if (likely(req_id < rx_ring->ring_size)) 368 return 0; 369 370 PMD_DRV_LOG(ERR, "Invalid rx req_id: %hu\n", req_id); 371 372 rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID; 373 rx_ring->adapter->trigger_reset = true; 374 ++rx_ring->rx_stats.bad_req_id; 375 376 return -EFAULT; 377 } 378 379 static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) 380 { 381 struct ena_tx_buffer *tx_info = NULL; 382 383 if (likely(req_id < tx_ring->ring_size)) { 384 tx_info = &tx_ring->tx_buffer_info[req_id]; 385 if (likely(tx_info->mbuf)) 386 return 0; 387 } 388 389 if (tx_info) 390 PMD_DRV_LOG(ERR, "tx_info doesn't have valid mbuf\n"); 391 else 392 PMD_DRV_LOG(ERR, "Invalid req_id: %hu\n", req_id); 393 394 /* Trigger device reset */ 395 ++tx_ring->tx_stats.bad_req_id; 396 tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID; 397 tx_ring->adapter->trigger_reset = true; 398 return -EFAULT; 399 } 400 401 static void ena_config_host_info(struct ena_com_dev *ena_dev) 402 { 403 struct ena_admin_host_info *host_info; 404 int rc; 405 406 /* Allocate only the host info */ 407 rc = ena_com_allocate_host_info(ena_dev); 408 if (rc) { 409 PMD_DRV_LOG(ERR, "Cannot allocate host info\n"); 410 return; 411 } 412 413 host_info = ena_dev->host_attr.host_info; 414 415 host_info->os_type = ENA_ADMIN_OS_DPDK; 416 host_info->kernel_ver = RTE_VERSION; 417 strlcpy((char *)host_info->kernel_ver_str, rte_version(), 418 sizeof(host_info->kernel_ver_str)); 419 host_info->os_dist = RTE_VERSION; 420 strlcpy((char *)host_info->os_dist_str, rte_version(), 421 sizeof(host_info->os_dist_str)); 422 host_info->driver_version = 423 (DRV_MODULE_VER_MAJOR) | 424 (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | 425 (DRV_MODULE_VER_SUBMINOR << 426 ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); 427 host_info->num_cpus = rte_lcore_count(); 428 429 host_info->driver_supported_features = 430 ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK; 431 432 rc = ena_com_set_host_attributes(ena_dev); 433 if (rc) { 434 if (rc == -ENA_COM_UNSUPPORTED) 435 PMD_DRV_LOG(WARNING, "Cannot set host attributes\n"); 436 else 437 PMD_DRV_LOG(ERR, "Cannot set host attributes\n"); 438 439 goto err; 440 } 441 442 return; 443 444 err: 445 ena_com_delete_host_info(ena_dev); 446 } 447 448 /* This function calculates the number of xstats based on the current config */ 449 static unsigned int ena_xstats_calc_num(struct rte_eth_dev *dev) 450 { 451 return ENA_STATS_ARRAY_GLOBAL + 452 (dev->data->nb_tx_queues * ENA_STATS_ARRAY_TX) + 453 (dev->data->nb_rx_queues * ENA_STATS_ARRAY_RX); 454 } 455 456 static void ena_config_debug_area(struct ena_adapter *adapter) 457 { 458 u32 debug_area_size; 459 int rc, ss_count; 460 461 ss_count = ena_xstats_calc_num(adapter->rte_dev); 462 463 /* allocate 32 bytes for each string and 64bit for the value */ 464 debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count; 465 466 rc = ena_com_allocate_debug_area(&adapter->ena_dev, debug_area_size); 467 if (rc) { 468 PMD_DRV_LOG(ERR, "Cannot allocate debug area\n"); 469 return; 470 } 471 472 rc = ena_com_set_host_attributes(&adapter->ena_dev); 473 if (rc) { 474 if (rc == -ENA_COM_UNSUPPORTED) 475 PMD_DRV_LOG(WARNING, "Cannot set host attributes\n"); 476 else 477 PMD_DRV_LOG(ERR, "Cannot set host attributes\n"); 478 479 goto err; 480 } 481 482 return; 483 err: 484 ena_com_delete_debug_area(&adapter->ena_dev); 485 } 486 487 static void ena_close(struct rte_eth_dev *dev) 488 { 489 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 490 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 491 struct ena_adapter *adapter = dev->data->dev_private; 492 493 if (adapter->state == ENA_ADAPTER_STATE_RUNNING) 494 ena_stop(dev); 495 adapter->state = ENA_ADAPTER_STATE_CLOSED; 496 497 ena_rx_queue_release_all(dev); 498 ena_tx_queue_release_all(dev); 499 500 rte_free(adapter->drv_stats); 501 adapter->drv_stats = NULL; 502 503 rte_intr_disable(intr_handle); 504 rte_intr_callback_unregister(intr_handle, 505 ena_interrupt_handler_rte, 506 adapter); 507 508 /* 509 * MAC is not allocated dynamically. Setting NULL should prevent from 510 * release of the resource in the rte_eth_dev_release_port(). 511 */ 512 dev->data->mac_addrs = NULL; 513 } 514 515 static int 516 ena_dev_reset(struct rte_eth_dev *dev) 517 { 518 int rc = 0; 519 520 ena_destroy_device(dev); 521 rc = eth_ena_dev_init(dev); 522 if (rc) 523 PMD_INIT_LOG(CRIT, "Cannot initialize device"); 524 525 return rc; 526 } 527 528 static int ena_rss_reta_update(struct rte_eth_dev *dev, 529 struct rte_eth_rss_reta_entry64 *reta_conf, 530 uint16_t reta_size) 531 { 532 struct ena_adapter *adapter = dev->data->dev_private; 533 struct ena_com_dev *ena_dev = &adapter->ena_dev; 534 int rc, i; 535 u16 entry_value; 536 int conf_idx; 537 int idx; 538 539 if ((reta_size == 0) || (reta_conf == NULL)) 540 return -EINVAL; 541 542 if (reta_size > ENA_RX_RSS_TABLE_SIZE) { 543 PMD_DRV_LOG(WARNING, 544 "indirection table %d is bigger than supported (%d)\n", 545 reta_size, ENA_RX_RSS_TABLE_SIZE); 546 return -EINVAL; 547 } 548 549 for (i = 0 ; i < reta_size ; i++) { 550 /* each reta_conf is for 64 entries. 551 * to support 128 we use 2 conf of 64 552 */ 553 conf_idx = i / RTE_RETA_GROUP_SIZE; 554 idx = i % RTE_RETA_GROUP_SIZE; 555 if (TEST_BIT(reta_conf[conf_idx].mask, idx)) { 556 entry_value = 557 ENA_IO_RXQ_IDX(reta_conf[conf_idx].reta[idx]); 558 559 rc = ena_com_indirect_table_fill_entry(ena_dev, 560 i, 561 entry_value); 562 if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) { 563 PMD_DRV_LOG(ERR, 564 "Cannot fill indirect table\n"); 565 return rc; 566 } 567 } 568 } 569 570 rc = ena_com_indirect_table_set(ena_dev); 571 if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) { 572 PMD_DRV_LOG(ERR, "Cannot flush the indirect table\n"); 573 return rc; 574 } 575 576 PMD_DRV_LOG(DEBUG, "%s(): RSS configured %d entries for port %d\n", 577 __func__, reta_size, adapter->rte_dev->data->port_id); 578 579 return 0; 580 } 581 582 /* Query redirection table. */ 583 static int ena_rss_reta_query(struct rte_eth_dev *dev, 584 struct rte_eth_rss_reta_entry64 *reta_conf, 585 uint16_t reta_size) 586 { 587 struct ena_adapter *adapter = dev->data->dev_private; 588 struct ena_com_dev *ena_dev = &adapter->ena_dev; 589 int rc; 590 int i; 591 u32 indirect_table[ENA_RX_RSS_TABLE_SIZE] = {0}; 592 int reta_conf_idx; 593 int reta_idx; 594 595 if (reta_size == 0 || reta_conf == NULL || 596 (reta_size > RTE_RETA_GROUP_SIZE && ((reta_conf + 1) == NULL))) 597 return -EINVAL; 598 599 rc = ena_com_indirect_table_get(ena_dev, indirect_table); 600 if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) { 601 PMD_DRV_LOG(ERR, "cannot get indirect table\n"); 602 return -ENOTSUP; 603 } 604 605 for (i = 0 ; i < reta_size ; i++) { 606 reta_conf_idx = i / RTE_RETA_GROUP_SIZE; 607 reta_idx = i % RTE_RETA_GROUP_SIZE; 608 if (TEST_BIT(reta_conf[reta_conf_idx].mask, reta_idx)) 609 reta_conf[reta_conf_idx].reta[reta_idx] = 610 ENA_IO_RXQ_IDX_REV(indirect_table[i]); 611 } 612 613 return 0; 614 } 615 616 static int ena_rss_init_default(struct ena_adapter *adapter) 617 { 618 struct ena_com_dev *ena_dev = &adapter->ena_dev; 619 uint16_t nb_rx_queues = adapter->rte_dev->data->nb_rx_queues; 620 int rc, i; 621 u32 val; 622 623 rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE); 624 if (unlikely(rc)) { 625 PMD_DRV_LOG(ERR, "Cannot init indirect table\n"); 626 goto err_rss_init; 627 } 628 629 for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) { 630 val = i % nb_rx_queues; 631 rc = ena_com_indirect_table_fill_entry(ena_dev, i, 632 ENA_IO_RXQ_IDX(val)); 633 if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { 634 PMD_DRV_LOG(ERR, "Cannot fill indirect table\n"); 635 goto err_fill_indir; 636 } 637 } 638 639 rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL, 640 ENA_HASH_KEY_SIZE, 0xFFFFFFFF); 641 if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { 642 PMD_DRV_LOG(INFO, "Cannot fill hash function\n"); 643 goto err_fill_indir; 644 } 645 646 rc = ena_com_set_default_hash_ctrl(ena_dev); 647 if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { 648 PMD_DRV_LOG(INFO, "Cannot fill hash control\n"); 649 goto err_fill_indir; 650 } 651 652 rc = ena_com_indirect_table_set(ena_dev); 653 if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { 654 PMD_DRV_LOG(ERR, "Cannot flush the indirect table\n"); 655 goto err_fill_indir; 656 } 657 PMD_DRV_LOG(DEBUG, "RSS configured for port %d\n", 658 adapter->rte_dev->data->port_id); 659 660 return 0; 661 662 err_fill_indir: 663 ena_com_rss_destroy(ena_dev); 664 err_rss_init: 665 666 return rc; 667 } 668 669 static void ena_rx_queue_release_all(struct rte_eth_dev *dev) 670 { 671 struct ena_ring **queues = (struct ena_ring **)dev->data->rx_queues; 672 int nb_queues = dev->data->nb_rx_queues; 673 int i; 674 675 for (i = 0; i < nb_queues; i++) 676 ena_rx_queue_release(queues[i]); 677 } 678 679 static void ena_tx_queue_release_all(struct rte_eth_dev *dev) 680 { 681 struct ena_ring **queues = (struct ena_ring **)dev->data->tx_queues; 682 int nb_queues = dev->data->nb_tx_queues; 683 int i; 684 685 for (i = 0; i < nb_queues; i++) 686 ena_tx_queue_release(queues[i]); 687 } 688 689 static void ena_rx_queue_release(void *queue) 690 { 691 struct ena_ring *ring = (struct ena_ring *)queue; 692 693 /* Free ring resources */ 694 if (ring->rx_buffer_info) 695 rte_free(ring->rx_buffer_info); 696 ring->rx_buffer_info = NULL; 697 698 if (ring->rx_refill_buffer) 699 rte_free(ring->rx_refill_buffer); 700 ring->rx_refill_buffer = NULL; 701 702 if (ring->empty_rx_reqs) 703 rte_free(ring->empty_rx_reqs); 704 ring->empty_rx_reqs = NULL; 705 706 ring->configured = 0; 707 708 PMD_DRV_LOG(NOTICE, "RX Queue %d:%d released\n", 709 ring->port_id, ring->id); 710 } 711 712 static void ena_tx_queue_release(void *queue) 713 { 714 struct ena_ring *ring = (struct ena_ring *)queue; 715 716 /* Free ring resources */ 717 if (ring->push_buf_intermediate_buf) 718 rte_free(ring->push_buf_intermediate_buf); 719 720 if (ring->tx_buffer_info) 721 rte_free(ring->tx_buffer_info); 722 723 if (ring->empty_tx_reqs) 724 rte_free(ring->empty_tx_reqs); 725 726 ring->empty_tx_reqs = NULL; 727 ring->tx_buffer_info = NULL; 728 ring->push_buf_intermediate_buf = NULL; 729 730 ring->configured = 0; 731 732 PMD_DRV_LOG(NOTICE, "TX Queue %d:%d released\n", 733 ring->port_id, ring->id); 734 } 735 736 static void ena_rx_queue_release_bufs(struct ena_ring *ring) 737 { 738 unsigned int i; 739 740 for (i = 0; i < ring->ring_size; ++i) 741 if (ring->rx_buffer_info[i]) { 742 rte_mbuf_raw_free(ring->rx_buffer_info[i]); 743 ring->rx_buffer_info[i] = NULL; 744 } 745 } 746 747 static void ena_tx_queue_release_bufs(struct ena_ring *ring) 748 { 749 unsigned int i; 750 751 for (i = 0; i < ring->ring_size; ++i) { 752 struct ena_tx_buffer *tx_buf = &ring->tx_buffer_info[i]; 753 754 if (tx_buf->mbuf) 755 rte_pktmbuf_free(tx_buf->mbuf); 756 } 757 } 758 759 static int ena_link_update(struct rte_eth_dev *dev, 760 __rte_unused int wait_to_complete) 761 { 762 struct rte_eth_link *link = &dev->data->dev_link; 763 struct ena_adapter *adapter = dev->data->dev_private; 764 765 link->link_status = adapter->link_status ? ETH_LINK_UP : ETH_LINK_DOWN; 766 link->link_speed = ETH_SPEED_NUM_NONE; 767 link->link_duplex = ETH_LINK_FULL_DUPLEX; 768 769 return 0; 770 } 771 772 static int ena_queue_start_all(struct rte_eth_dev *dev, 773 enum ena_ring_type ring_type) 774 { 775 struct ena_adapter *adapter = dev->data->dev_private; 776 struct ena_ring *queues = NULL; 777 int nb_queues; 778 int i = 0; 779 int rc = 0; 780 781 if (ring_type == ENA_RING_TYPE_RX) { 782 queues = adapter->rx_ring; 783 nb_queues = dev->data->nb_rx_queues; 784 } else { 785 queues = adapter->tx_ring; 786 nb_queues = dev->data->nb_tx_queues; 787 } 788 for (i = 0; i < nb_queues; i++) { 789 if (queues[i].configured) { 790 if (ring_type == ENA_RING_TYPE_RX) { 791 ena_assert_msg( 792 dev->data->rx_queues[i] == &queues[i], 793 "Inconsistent state of rx queues\n"); 794 } else { 795 ena_assert_msg( 796 dev->data->tx_queues[i] == &queues[i], 797 "Inconsistent state of tx queues\n"); 798 } 799 800 rc = ena_queue_start(&queues[i]); 801 802 if (rc) { 803 PMD_INIT_LOG(ERR, 804 "failed to start queue %d type(%d)", 805 i, ring_type); 806 goto err; 807 } 808 } 809 } 810 811 return 0; 812 813 err: 814 while (i--) 815 if (queues[i].configured) 816 ena_queue_stop(&queues[i]); 817 818 return rc; 819 } 820 821 static uint32_t ena_get_mtu_conf(struct ena_adapter *adapter) 822 { 823 uint32_t max_frame_len = adapter->max_mtu; 824 825 if (adapter->rte_eth_dev_data->dev_conf.rxmode.offloads & 826 DEV_RX_OFFLOAD_JUMBO_FRAME) 827 max_frame_len = 828 adapter->rte_eth_dev_data->dev_conf.rxmode.max_rx_pkt_len; 829 830 return max_frame_len; 831 } 832 833 static int ena_check_valid_conf(struct ena_adapter *adapter) 834 { 835 uint32_t max_frame_len = ena_get_mtu_conf(adapter); 836 837 if (max_frame_len > adapter->max_mtu || max_frame_len < ENA_MIN_MTU) { 838 PMD_INIT_LOG(ERR, "Unsupported MTU of %d. " 839 "max mtu: %d, min mtu: %d", 840 max_frame_len, adapter->max_mtu, ENA_MIN_MTU); 841 return ENA_COM_UNSUPPORTED; 842 } 843 844 return 0; 845 } 846 847 static int 848 ena_calc_queue_size(struct ena_calc_queue_size_ctx *ctx) 849 { 850 struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq; 851 struct ena_com_dev *ena_dev = ctx->ena_dev; 852 uint32_t tx_queue_size = ENA_MAX_RING_SIZE_TX; 853 uint32_t rx_queue_size = ENA_MAX_RING_SIZE_RX; 854 855 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 856 struct ena_admin_queue_ext_feature_fields *max_queue_ext = 857 &ctx->get_feat_ctx->max_queue_ext.max_queue_ext; 858 rx_queue_size = RTE_MIN(rx_queue_size, 859 max_queue_ext->max_rx_cq_depth); 860 rx_queue_size = RTE_MIN(rx_queue_size, 861 max_queue_ext->max_rx_sq_depth); 862 tx_queue_size = RTE_MIN(tx_queue_size, 863 max_queue_ext->max_tx_cq_depth); 864 865 if (ena_dev->tx_mem_queue_type == 866 ENA_ADMIN_PLACEMENT_POLICY_DEV) { 867 tx_queue_size = RTE_MIN(tx_queue_size, 868 llq->max_llq_depth); 869 } else { 870 tx_queue_size = RTE_MIN(tx_queue_size, 871 max_queue_ext->max_tx_sq_depth); 872 } 873 874 ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 875 max_queue_ext->max_per_packet_rx_descs); 876 ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 877 max_queue_ext->max_per_packet_tx_descs); 878 } else { 879 struct ena_admin_queue_feature_desc *max_queues = 880 &ctx->get_feat_ctx->max_queues; 881 rx_queue_size = RTE_MIN(rx_queue_size, 882 max_queues->max_cq_depth); 883 rx_queue_size = RTE_MIN(rx_queue_size, 884 max_queues->max_sq_depth); 885 tx_queue_size = RTE_MIN(tx_queue_size, 886 max_queues->max_cq_depth); 887 888 if (ena_dev->tx_mem_queue_type == 889 ENA_ADMIN_PLACEMENT_POLICY_DEV) { 890 tx_queue_size = RTE_MIN(tx_queue_size, 891 llq->max_llq_depth); 892 } else { 893 tx_queue_size = RTE_MIN(tx_queue_size, 894 max_queues->max_sq_depth); 895 } 896 897 ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 898 max_queues->max_packet_tx_descs); 899 ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 900 max_queues->max_packet_rx_descs); 901 } 902 903 /* Round down to the nearest power of 2 */ 904 rx_queue_size = rte_align32prevpow2(rx_queue_size); 905 tx_queue_size = rte_align32prevpow2(tx_queue_size); 906 907 if (unlikely(rx_queue_size == 0 || tx_queue_size == 0)) { 908 PMD_INIT_LOG(ERR, "Invalid queue size"); 909 return -EFAULT; 910 } 911 912 ctx->rx_queue_size = rx_queue_size; 913 ctx->tx_queue_size = tx_queue_size; 914 915 return 0; 916 } 917 918 static void ena_stats_restart(struct rte_eth_dev *dev) 919 { 920 struct ena_adapter *adapter = dev->data->dev_private; 921 922 rte_atomic64_init(&adapter->drv_stats->ierrors); 923 rte_atomic64_init(&adapter->drv_stats->oerrors); 924 rte_atomic64_init(&adapter->drv_stats->rx_nombuf); 925 rte_atomic64_init(&adapter->drv_stats->rx_drops); 926 } 927 928 static int ena_stats_get(struct rte_eth_dev *dev, 929 struct rte_eth_stats *stats) 930 { 931 struct ena_admin_basic_stats ena_stats; 932 struct ena_adapter *adapter = dev->data->dev_private; 933 struct ena_com_dev *ena_dev = &adapter->ena_dev; 934 int rc; 935 int i; 936 int max_rings_stats; 937 938 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 939 return -ENOTSUP; 940 941 memset(&ena_stats, 0, sizeof(ena_stats)); 942 rc = ena_com_get_dev_basic_stats(ena_dev, &ena_stats); 943 if (unlikely(rc)) { 944 PMD_DRV_LOG(ERR, "Could not retrieve statistics from ENA\n"); 945 return rc; 946 } 947 948 /* Set of basic statistics from ENA */ 949 stats->ipackets = __MERGE_64B_H_L(ena_stats.rx_pkts_high, 950 ena_stats.rx_pkts_low); 951 stats->opackets = __MERGE_64B_H_L(ena_stats.tx_pkts_high, 952 ena_stats.tx_pkts_low); 953 stats->ibytes = __MERGE_64B_H_L(ena_stats.rx_bytes_high, 954 ena_stats.rx_bytes_low); 955 stats->obytes = __MERGE_64B_H_L(ena_stats.tx_bytes_high, 956 ena_stats.tx_bytes_low); 957 958 /* Driver related stats */ 959 stats->imissed = rte_atomic64_read(&adapter->drv_stats->rx_drops); 960 stats->ierrors = rte_atomic64_read(&adapter->drv_stats->ierrors); 961 stats->oerrors = rte_atomic64_read(&adapter->drv_stats->oerrors); 962 stats->rx_nombuf = rte_atomic64_read(&adapter->drv_stats->rx_nombuf); 963 964 max_rings_stats = RTE_MIN(dev->data->nb_rx_queues, 965 RTE_ETHDEV_QUEUE_STAT_CNTRS); 966 for (i = 0; i < max_rings_stats; ++i) { 967 struct ena_stats_rx *rx_stats = &adapter->rx_ring[i].rx_stats; 968 969 stats->q_ibytes[i] = rx_stats->bytes; 970 stats->q_ipackets[i] = rx_stats->cnt; 971 stats->q_errors[i] = rx_stats->bad_desc_num + 972 rx_stats->bad_req_id; 973 } 974 975 max_rings_stats = RTE_MIN(dev->data->nb_tx_queues, 976 RTE_ETHDEV_QUEUE_STAT_CNTRS); 977 for (i = 0; i < max_rings_stats; ++i) { 978 struct ena_stats_tx *tx_stats = &adapter->tx_ring[i].tx_stats; 979 980 stats->q_obytes[i] = tx_stats->bytes; 981 stats->q_opackets[i] = tx_stats->cnt; 982 } 983 984 return 0; 985 } 986 987 static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 988 { 989 struct ena_adapter *adapter; 990 struct ena_com_dev *ena_dev; 991 int rc = 0; 992 993 ena_assert_msg(dev->data != NULL, "Uninitialized device\n"); 994 ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n"); 995 adapter = dev->data->dev_private; 996 997 ena_dev = &adapter->ena_dev; 998 ena_assert_msg(ena_dev != NULL, "Uninitialized device\n"); 999 1000 if (mtu > ena_get_mtu_conf(adapter) || mtu < ENA_MIN_MTU) { 1001 PMD_DRV_LOG(ERR, 1002 "Invalid MTU setting. new_mtu: %d " 1003 "max mtu: %d min mtu: %d\n", 1004 mtu, ena_get_mtu_conf(adapter), ENA_MIN_MTU); 1005 return -EINVAL; 1006 } 1007 1008 rc = ena_com_set_dev_mtu(ena_dev, mtu); 1009 if (rc) 1010 PMD_DRV_LOG(ERR, "Could not set MTU: %d\n", mtu); 1011 else 1012 PMD_DRV_LOG(NOTICE, "Set MTU: %d\n", mtu); 1013 1014 return rc; 1015 } 1016 1017 static int ena_start(struct rte_eth_dev *dev) 1018 { 1019 struct ena_adapter *adapter = dev->data->dev_private; 1020 uint64_t ticks; 1021 int rc = 0; 1022 1023 rc = ena_check_valid_conf(adapter); 1024 if (rc) 1025 return rc; 1026 1027 rc = ena_queue_start_all(dev, ENA_RING_TYPE_RX); 1028 if (rc) 1029 return rc; 1030 1031 rc = ena_queue_start_all(dev, ENA_RING_TYPE_TX); 1032 if (rc) 1033 goto err_start_tx; 1034 1035 if (adapter->rte_dev->data->dev_conf.rxmode.mq_mode & 1036 ETH_MQ_RX_RSS_FLAG && adapter->rte_dev->data->nb_rx_queues > 0) { 1037 rc = ena_rss_init_default(adapter); 1038 if (rc) 1039 goto err_rss_init; 1040 } 1041 1042 ena_stats_restart(dev); 1043 1044 adapter->timestamp_wd = rte_get_timer_cycles(); 1045 adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT; 1046 1047 ticks = rte_get_timer_hz(); 1048 rte_timer_reset(&adapter->timer_wd, ticks, PERIODICAL, rte_lcore_id(), 1049 ena_timer_wd_callback, adapter); 1050 1051 ++adapter->dev_stats.dev_start; 1052 adapter->state = ENA_ADAPTER_STATE_RUNNING; 1053 1054 return 0; 1055 1056 err_rss_init: 1057 ena_queue_stop_all(dev, ENA_RING_TYPE_TX); 1058 err_start_tx: 1059 ena_queue_stop_all(dev, ENA_RING_TYPE_RX); 1060 return rc; 1061 } 1062 1063 static void ena_stop(struct rte_eth_dev *dev) 1064 { 1065 struct ena_adapter *adapter = dev->data->dev_private; 1066 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1067 int rc; 1068 1069 rte_timer_stop_sync(&adapter->timer_wd); 1070 ena_queue_stop_all(dev, ENA_RING_TYPE_TX); 1071 ena_queue_stop_all(dev, ENA_RING_TYPE_RX); 1072 1073 if (adapter->trigger_reset) { 1074 rc = ena_com_dev_reset(ena_dev, adapter->reset_reason); 1075 if (rc) 1076 PMD_DRV_LOG(ERR, "Device reset failed rc=%d\n", rc); 1077 } 1078 1079 ++adapter->dev_stats.dev_stop; 1080 adapter->state = ENA_ADAPTER_STATE_STOPPED; 1081 } 1082 1083 static int ena_create_io_queue(struct ena_ring *ring) 1084 { 1085 struct ena_adapter *adapter; 1086 struct ena_com_dev *ena_dev; 1087 struct ena_com_create_io_ctx ctx = 1088 /* policy set to _HOST just to satisfy icc compiler */ 1089 { ENA_ADMIN_PLACEMENT_POLICY_HOST, 1090 0, 0, 0, 0, 0 }; 1091 uint16_t ena_qid; 1092 unsigned int i; 1093 int rc; 1094 1095 adapter = ring->adapter; 1096 ena_dev = &adapter->ena_dev; 1097 1098 if (ring->type == ENA_RING_TYPE_TX) { 1099 ena_qid = ENA_IO_TXQ_IDX(ring->id); 1100 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; 1101 ctx.mem_queue_type = ena_dev->tx_mem_queue_type; 1102 for (i = 0; i < ring->ring_size; i++) 1103 ring->empty_tx_reqs[i] = i; 1104 } else { 1105 ena_qid = ENA_IO_RXQ_IDX(ring->id); 1106 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; 1107 for (i = 0; i < ring->ring_size; i++) 1108 ring->empty_rx_reqs[i] = i; 1109 } 1110 ctx.queue_size = ring->ring_size; 1111 ctx.qid = ena_qid; 1112 ctx.msix_vector = -1; /* interrupts not used */ 1113 ctx.numa_node = ring->numa_socket_id; 1114 1115 rc = ena_com_create_io_queue(ena_dev, &ctx); 1116 if (rc) { 1117 PMD_DRV_LOG(ERR, 1118 "failed to create io queue #%d (qid:%d) rc: %d\n", 1119 ring->id, ena_qid, rc); 1120 return rc; 1121 } 1122 1123 rc = ena_com_get_io_handlers(ena_dev, ena_qid, 1124 &ring->ena_com_io_sq, 1125 &ring->ena_com_io_cq); 1126 if (rc) { 1127 PMD_DRV_LOG(ERR, 1128 "Failed to get io queue handlers. queue num %d rc: %d\n", 1129 ring->id, rc); 1130 ena_com_destroy_io_queue(ena_dev, ena_qid); 1131 return rc; 1132 } 1133 1134 if (ring->type == ENA_RING_TYPE_TX) 1135 ena_com_update_numa_node(ring->ena_com_io_cq, ctx.numa_node); 1136 1137 return 0; 1138 } 1139 1140 static void ena_queue_stop(struct ena_ring *ring) 1141 { 1142 struct ena_com_dev *ena_dev = &ring->adapter->ena_dev; 1143 1144 if (ring->type == ENA_RING_TYPE_RX) { 1145 ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(ring->id)); 1146 ena_rx_queue_release_bufs(ring); 1147 } else { 1148 ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(ring->id)); 1149 ena_tx_queue_release_bufs(ring); 1150 } 1151 } 1152 1153 static void ena_queue_stop_all(struct rte_eth_dev *dev, 1154 enum ena_ring_type ring_type) 1155 { 1156 struct ena_adapter *adapter = dev->data->dev_private; 1157 struct ena_ring *queues = NULL; 1158 uint16_t nb_queues, i; 1159 1160 if (ring_type == ENA_RING_TYPE_RX) { 1161 queues = adapter->rx_ring; 1162 nb_queues = dev->data->nb_rx_queues; 1163 } else { 1164 queues = adapter->tx_ring; 1165 nb_queues = dev->data->nb_tx_queues; 1166 } 1167 1168 for (i = 0; i < nb_queues; ++i) 1169 if (queues[i].configured) 1170 ena_queue_stop(&queues[i]); 1171 } 1172 1173 static int ena_queue_start(struct ena_ring *ring) 1174 { 1175 int rc, bufs_num; 1176 1177 ena_assert_msg(ring->configured == 1, 1178 "Trying to start unconfigured queue\n"); 1179 1180 rc = ena_create_io_queue(ring); 1181 if (rc) { 1182 PMD_INIT_LOG(ERR, "Failed to create IO queue!"); 1183 return rc; 1184 } 1185 1186 ring->next_to_clean = 0; 1187 ring->next_to_use = 0; 1188 1189 if (ring->type == ENA_RING_TYPE_TX) { 1190 ring->tx_stats.available_desc = 1191 ena_com_free_q_entries(ring->ena_com_io_sq); 1192 return 0; 1193 } 1194 1195 bufs_num = ring->ring_size - 1; 1196 rc = ena_populate_rx_queue(ring, bufs_num); 1197 if (rc != bufs_num) { 1198 ena_com_destroy_io_queue(&ring->adapter->ena_dev, 1199 ENA_IO_RXQ_IDX(ring->id)); 1200 PMD_INIT_LOG(ERR, "Failed to populate rx ring !"); 1201 return ENA_COM_FAULT; 1202 } 1203 1204 return 0; 1205 } 1206 1207 static int ena_tx_queue_setup(struct rte_eth_dev *dev, 1208 uint16_t queue_idx, 1209 uint16_t nb_desc, 1210 unsigned int socket_id, 1211 const struct rte_eth_txconf *tx_conf) 1212 { 1213 struct ena_ring *txq = NULL; 1214 struct ena_adapter *adapter = dev->data->dev_private; 1215 unsigned int i; 1216 1217 txq = &adapter->tx_ring[queue_idx]; 1218 1219 if (txq->configured) { 1220 PMD_DRV_LOG(CRIT, 1221 "API violation. Queue %d is already configured\n", 1222 queue_idx); 1223 return ENA_COM_FAULT; 1224 } 1225 1226 if (!rte_is_power_of_2(nb_desc)) { 1227 PMD_DRV_LOG(ERR, 1228 "Unsupported size of TX queue: %d is not a power of 2.\n", 1229 nb_desc); 1230 return -EINVAL; 1231 } 1232 1233 if (nb_desc > adapter->tx_ring_size) { 1234 PMD_DRV_LOG(ERR, 1235 "Unsupported size of TX queue (max size: %d)\n", 1236 adapter->tx_ring_size); 1237 return -EINVAL; 1238 } 1239 1240 if (nb_desc == RTE_ETH_DEV_FALLBACK_TX_RINGSIZE) 1241 nb_desc = adapter->tx_ring_size; 1242 1243 txq->port_id = dev->data->port_id; 1244 txq->next_to_clean = 0; 1245 txq->next_to_use = 0; 1246 txq->ring_size = nb_desc; 1247 txq->numa_socket_id = socket_id; 1248 1249 txq->tx_buffer_info = rte_zmalloc("txq->tx_buffer_info", 1250 sizeof(struct ena_tx_buffer) * 1251 txq->ring_size, 1252 RTE_CACHE_LINE_SIZE); 1253 if (!txq->tx_buffer_info) { 1254 PMD_DRV_LOG(ERR, "failed to alloc mem for tx buffer info\n"); 1255 return -ENOMEM; 1256 } 1257 1258 txq->empty_tx_reqs = rte_zmalloc("txq->empty_tx_reqs", 1259 sizeof(u16) * txq->ring_size, 1260 RTE_CACHE_LINE_SIZE); 1261 if (!txq->empty_tx_reqs) { 1262 PMD_DRV_LOG(ERR, "failed to alloc mem for tx reqs\n"); 1263 rte_free(txq->tx_buffer_info); 1264 return -ENOMEM; 1265 } 1266 1267 txq->push_buf_intermediate_buf = 1268 rte_zmalloc("txq->push_buf_intermediate_buf", 1269 txq->tx_max_header_size, 1270 RTE_CACHE_LINE_SIZE); 1271 if (!txq->push_buf_intermediate_buf) { 1272 PMD_DRV_LOG(ERR, "failed to alloc push buff for LLQ\n"); 1273 rte_free(txq->tx_buffer_info); 1274 rte_free(txq->empty_tx_reqs); 1275 return -ENOMEM; 1276 } 1277 1278 for (i = 0; i < txq->ring_size; i++) 1279 txq->empty_tx_reqs[i] = i; 1280 1281 if (tx_conf != NULL) { 1282 txq->offloads = 1283 tx_conf->offloads | dev->data->dev_conf.txmode.offloads; 1284 } 1285 /* Store pointer to this queue in upper layer */ 1286 txq->configured = 1; 1287 dev->data->tx_queues[queue_idx] = txq; 1288 1289 return 0; 1290 } 1291 1292 static int ena_rx_queue_setup(struct rte_eth_dev *dev, 1293 uint16_t queue_idx, 1294 uint16_t nb_desc, 1295 unsigned int socket_id, 1296 __rte_unused const struct rte_eth_rxconf *rx_conf, 1297 struct rte_mempool *mp) 1298 { 1299 struct ena_adapter *adapter = dev->data->dev_private; 1300 struct ena_ring *rxq = NULL; 1301 size_t buffer_size; 1302 int i; 1303 1304 rxq = &adapter->rx_ring[queue_idx]; 1305 if (rxq->configured) { 1306 PMD_DRV_LOG(CRIT, 1307 "API violation. Queue %d is already configured\n", 1308 queue_idx); 1309 return ENA_COM_FAULT; 1310 } 1311 1312 if (nb_desc == RTE_ETH_DEV_FALLBACK_RX_RINGSIZE) 1313 nb_desc = adapter->rx_ring_size; 1314 1315 if (!rte_is_power_of_2(nb_desc)) { 1316 PMD_DRV_LOG(ERR, 1317 "Unsupported size of RX queue: %d is not a power of 2.\n", 1318 nb_desc); 1319 return -EINVAL; 1320 } 1321 1322 if (nb_desc > adapter->rx_ring_size) { 1323 PMD_DRV_LOG(ERR, 1324 "Unsupported size of RX queue (max size: %d)\n", 1325 adapter->rx_ring_size); 1326 return -EINVAL; 1327 } 1328 1329 /* ENA isn't supporting buffers smaller than 1400 bytes */ 1330 buffer_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM; 1331 if (buffer_size < ENA_RX_BUF_MIN_SIZE) { 1332 PMD_DRV_LOG(ERR, 1333 "Unsupported size of RX buffer: %zu (min size: %d)\n", 1334 buffer_size, ENA_RX_BUF_MIN_SIZE); 1335 return -EINVAL; 1336 } 1337 1338 rxq->port_id = dev->data->port_id; 1339 rxq->next_to_clean = 0; 1340 rxq->next_to_use = 0; 1341 rxq->ring_size = nb_desc; 1342 rxq->numa_socket_id = socket_id; 1343 rxq->mb_pool = mp; 1344 1345 rxq->rx_buffer_info = rte_zmalloc("rxq->buffer_info", 1346 sizeof(struct rte_mbuf *) * nb_desc, 1347 RTE_CACHE_LINE_SIZE); 1348 if (!rxq->rx_buffer_info) { 1349 PMD_DRV_LOG(ERR, "failed to alloc mem for rx buffer info\n"); 1350 return -ENOMEM; 1351 } 1352 1353 rxq->rx_refill_buffer = rte_zmalloc("rxq->rx_refill_buffer", 1354 sizeof(struct rte_mbuf *) * nb_desc, 1355 RTE_CACHE_LINE_SIZE); 1356 1357 if (!rxq->rx_refill_buffer) { 1358 PMD_DRV_LOG(ERR, "failed to alloc mem for rx refill buffer\n"); 1359 rte_free(rxq->rx_buffer_info); 1360 rxq->rx_buffer_info = NULL; 1361 return -ENOMEM; 1362 } 1363 1364 rxq->empty_rx_reqs = rte_zmalloc("rxq->empty_rx_reqs", 1365 sizeof(uint16_t) * nb_desc, 1366 RTE_CACHE_LINE_SIZE); 1367 if (!rxq->empty_rx_reqs) { 1368 PMD_DRV_LOG(ERR, "failed to alloc mem for empty rx reqs\n"); 1369 rte_free(rxq->rx_buffer_info); 1370 rxq->rx_buffer_info = NULL; 1371 rte_free(rxq->rx_refill_buffer); 1372 rxq->rx_refill_buffer = NULL; 1373 return -ENOMEM; 1374 } 1375 1376 for (i = 0; i < nb_desc; i++) 1377 rxq->empty_rx_reqs[i] = i; 1378 1379 /* Store pointer to this queue in upper layer */ 1380 rxq->configured = 1; 1381 dev->data->rx_queues[queue_idx] = rxq; 1382 1383 return 0; 1384 } 1385 1386 static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) 1387 { 1388 unsigned int i; 1389 int rc; 1390 uint16_t ring_size = rxq->ring_size; 1391 uint16_t ring_mask = ring_size - 1; 1392 uint16_t next_to_use = rxq->next_to_use; 1393 uint16_t in_use, req_id; 1394 struct rte_mbuf **mbufs = rxq->rx_refill_buffer; 1395 1396 if (unlikely(!count)) 1397 return 0; 1398 1399 in_use = rxq->next_to_use - rxq->next_to_clean; 1400 ena_assert_msg(((in_use + count) < ring_size), "bad ring state\n"); 1401 1402 /* get resources for incoming packets */ 1403 rc = rte_mempool_get_bulk(rxq->mb_pool, (void **)mbufs, count); 1404 if (unlikely(rc < 0)) { 1405 rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf); 1406 ++rxq->rx_stats.mbuf_alloc_fail; 1407 PMD_RX_LOG(DEBUG, "there are no enough free buffers"); 1408 return 0; 1409 } 1410 1411 for (i = 0; i < count; i++) { 1412 uint16_t next_to_use_masked = next_to_use & ring_mask; 1413 struct rte_mbuf *mbuf = mbufs[i]; 1414 struct ena_com_buf ebuf; 1415 1416 if (likely((i + 4) < count)) 1417 rte_prefetch0(mbufs[i + 4]); 1418 1419 req_id = rxq->empty_rx_reqs[next_to_use_masked]; 1420 rc = validate_rx_req_id(rxq, req_id); 1421 if (unlikely(rc < 0)) 1422 break; 1423 rxq->rx_buffer_info[req_id] = mbuf; 1424 1425 /* prepare physical address for DMA transaction */ 1426 ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM; 1427 ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM; 1428 /* pass resource to device */ 1429 rc = ena_com_add_single_rx_desc(rxq->ena_com_io_sq, 1430 &ebuf, req_id); 1431 if (unlikely(rc)) { 1432 PMD_DRV_LOG(WARNING, "failed adding rx desc\n"); 1433 rxq->rx_buffer_info[req_id] = NULL; 1434 break; 1435 } 1436 next_to_use++; 1437 } 1438 1439 if (unlikely(i < count)) { 1440 PMD_DRV_LOG(WARNING, "refilled rx qid %d with only %d " 1441 "buffers (from %d)\n", rxq->id, i, count); 1442 rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbufs[i]), 1443 count - i); 1444 ++rxq->rx_stats.refill_partial; 1445 } 1446 1447 /* When we submitted free recources to device... */ 1448 if (likely(i > 0)) { 1449 /* ...let HW know that it can fill buffers with data 1450 * 1451 * Add memory barrier to make sure the desc were written before 1452 * issue a doorbell 1453 */ 1454 rte_wmb(); 1455 ena_com_write_sq_doorbell(rxq->ena_com_io_sq); 1456 1457 rxq->next_to_use = next_to_use; 1458 } 1459 1460 return i; 1461 } 1462 1463 static int ena_device_init(struct ena_com_dev *ena_dev, 1464 struct ena_com_dev_get_features_ctx *get_feat_ctx, 1465 bool *wd_state) 1466 { 1467 uint32_t aenq_groups; 1468 int rc; 1469 bool readless_supported; 1470 1471 /* Initialize mmio registers */ 1472 rc = ena_com_mmio_reg_read_request_init(ena_dev); 1473 if (rc) { 1474 PMD_DRV_LOG(ERR, "failed to init mmio read less\n"); 1475 return rc; 1476 } 1477 1478 /* The PCIe configuration space revision id indicate if mmio reg 1479 * read is disabled. 1480 */ 1481 readless_supported = 1482 !(((struct rte_pci_device *)ena_dev->dmadev)->id.class_id 1483 & ENA_MMIO_DISABLE_REG_READ); 1484 ena_com_set_mmio_read_mode(ena_dev, readless_supported); 1485 1486 /* reset device */ 1487 rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL); 1488 if (rc) { 1489 PMD_DRV_LOG(ERR, "cannot reset device\n"); 1490 goto err_mmio_read_less; 1491 } 1492 1493 /* check FW version */ 1494 rc = ena_com_validate_version(ena_dev); 1495 if (rc) { 1496 PMD_DRV_LOG(ERR, "device version is too low\n"); 1497 goto err_mmio_read_less; 1498 } 1499 1500 ena_dev->dma_addr_bits = ena_com_get_dma_width(ena_dev); 1501 1502 /* ENA device administration layer init */ 1503 rc = ena_com_admin_init(ena_dev, &aenq_handlers); 1504 if (rc) { 1505 PMD_DRV_LOG(ERR, 1506 "cannot initialize ena admin queue with device\n"); 1507 goto err_mmio_read_less; 1508 } 1509 1510 /* To enable the msix interrupts the driver needs to know the number 1511 * of queues. So the driver uses polling mode to retrieve this 1512 * information. 1513 */ 1514 ena_com_set_admin_polling_mode(ena_dev, true); 1515 1516 ena_config_host_info(ena_dev); 1517 1518 /* Get Device Attributes and features */ 1519 rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); 1520 if (rc) { 1521 PMD_DRV_LOG(ERR, 1522 "cannot get attribute for ena device rc= %d\n", rc); 1523 goto err_admin_init; 1524 } 1525 1526 aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | 1527 BIT(ENA_ADMIN_NOTIFICATION) | 1528 BIT(ENA_ADMIN_KEEP_ALIVE) | 1529 BIT(ENA_ADMIN_FATAL_ERROR) | 1530 BIT(ENA_ADMIN_WARNING); 1531 1532 aenq_groups &= get_feat_ctx->aenq.supported_groups; 1533 rc = ena_com_set_aenq_config(ena_dev, aenq_groups); 1534 if (rc) { 1535 PMD_DRV_LOG(ERR, "Cannot configure aenq groups rc: %d\n", rc); 1536 goto err_admin_init; 1537 } 1538 1539 *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); 1540 1541 return 0; 1542 1543 err_admin_init: 1544 ena_com_admin_destroy(ena_dev); 1545 1546 err_mmio_read_less: 1547 ena_com_mmio_reg_read_request_destroy(ena_dev); 1548 1549 return rc; 1550 } 1551 1552 static void ena_interrupt_handler_rte(void *cb_arg) 1553 { 1554 struct ena_adapter *adapter = cb_arg; 1555 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1556 1557 ena_com_admin_q_comp_intr_handler(ena_dev); 1558 if (likely(adapter->state != ENA_ADAPTER_STATE_CLOSED)) 1559 ena_com_aenq_intr_handler(ena_dev, adapter); 1560 } 1561 1562 static void check_for_missing_keep_alive(struct ena_adapter *adapter) 1563 { 1564 if (!adapter->wd_state) 1565 return; 1566 1567 if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) 1568 return; 1569 1570 if (unlikely((rte_get_timer_cycles() - adapter->timestamp_wd) >= 1571 adapter->keep_alive_timeout)) { 1572 PMD_DRV_LOG(ERR, "Keep alive timeout\n"); 1573 adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO; 1574 adapter->trigger_reset = true; 1575 ++adapter->dev_stats.wd_expired; 1576 } 1577 } 1578 1579 /* Check if admin queue is enabled */ 1580 static void check_for_admin_com_state(struct ena_adapter *adapter) 1581 { 1582 if (unlikely(!ena_com_get_admin_running_state(&adapter->ena_dev))) { 1583 PMD_DRV_LOG(ERR, "ENA admin queue is not in running state!\n"); 1584 adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO; 1585 adapter->trigger_reset = true; 1586 } 1587 } 1588 1589 static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer, 1590 void *arg) 1591 { 1592 struct ena_adapter *adapter = arg; 1593 struct rte_eth_dev *dev = adapter->rte_dev; 1594 1595 check_for_missing_keep_alive(adapter); 1596 check_for_admin_com_state(adapter); 1597 1598 if (unlikely(adapter->trigger_reset)) { 1599 PMD_DRV_LOG(ERR, "Trigger reset is on\n"); 1600 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, 1601 NULL); 1602 } 1603 } 1604 1605 static inline void 1606 set_default_llq_configurations(struct ena_llq_configurations *llq_config) 1607 { 1608 llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; 1609 llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B; 1610 llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; 1611 llq_config->llq_num_decs_before_header = 1612 ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; 1613 llq_config->llq_ring_entry_size_value = 128; 1614 } 1615 1616 static int 1617 ena_set_queues_placement_policy(struct ena_adapter *adapter, 1618 struct ena_com_dev *ena_dev, 1619 struct ena_admin_feature_llq_desc *llq, 1620 struct ena_llq_configurations *llq_default_configurations) 1621 { 1622 int rc; 1623 u32 llq_feature_mask; 1624 1625 llq_feature_mask = 1 << ENA_ADMIN_LLQ; 1626 if (!(ena_dev->supported_features & llq_feature_mask)) { 1627 PMD_DRV_LOG(INFO, 1628 "LLQ is not supported. Fallback to host mode policy.\n"); 1629 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 1630 return 0; 1631 } 1632 1633 rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); 1634 if (unlikely(rc)) { 1635 PMD_INIT_LOG(WARNING, "Failed to config dev mode. " 1636 "Fallback to host mode policy."); 1637 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 1638 return 0; 1639 } 1640 1641 /* Nothing to config, exit */ 1642 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 1643 return 0; 1644 1645 if (!adapter->dev_mem_base) { 1646 PMD_DRV_LOG(ERR, "Unable to access LLQ bar resource. " 1647 "Fallback to host mode policy.\n."); 1648 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 1649 return 0; 1650 } 1651 1652 ena_dev->mem_bar = adapter->dev_mem_base; 1653 1654 return 0; 1655 } 1656 1657 static int ena_calc_io_queue_num(struct ena_com_dev *ena_dev, 1658 struct ena_com_dev_get_features_ctx *get_feat_ctx) 1659 { 1660 uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, io_queue_num; 1661 1662 /* Regular queues capabilities */ 1663 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 1664 struct ena_admin_queue_ext_feature_fields *max_queue_ext = 1665 &get_feat_ctx->max_queue_ext.max_queue_ext; 1666 io_rx_num = RTE_MIN(max_queue_ext->max_rx_sq_num, 1667 max_queue_ext->max_rx_cq_num); 1668 io_tx_sq_num = max_queue_ext->max_tx_sq_num; 1669 io_tx_cq_num = max_queue_ext->max_tx_cq_num; 1670 } else { 1671 struct ena_admin_queue_feature_desc *max_queues = 1672 &get_feat_ctx->max_queues; 1673 io_tx_sq_num = max_queues->max_sq_num; 1674 io_tx_cq_num = max_queues->max_cq_num; 1675 io_rx_num = RTE_MIN(io_tx_sq_num, io_tx_cq_num); 1676 } 1677 1678 /* In case of LLQ use the llq number in the get feature cmd */ 1679 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) 1680 io_tx_sq_num = get_feat_ctx->llq.max_llq_num; 1681 1682 io_queue_num = RTE_MIN(ENA_MAX_NUM_IO_QUEUES, io_rx_num); 1683 io_queue_num = RTE_MIN(io_queue_num, io_tx_sq_num); 1684 io_queue_num = RTE_MIN(io_queue_num, io_tx_cq_num); 1685 1686 if (unlikely(io_queue_num == 0)) { 1687 PMD_DRV_LOG(ERR, "Number of IO queues should not be 0\n"); 1688 return -EFAULT; 1689 } 1690 1691 return io_queue_num; 1692 } 1693 1694 static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) 1695 { 1696 struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 }; 1697 struct rte_pci_device *pci_dev; 1698 struct rte_intr_handle *intr_handle; 1699 struct ena_adapter *adapter = eth_dev->data->dev_private; 1700 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1701 struct ena_com_dev_get_features_ctx get_feat_ctx; 1702 struct ena_llq_configurations llq_config; 1703 const char *queue_type_str; 1704 int rc; 1705 1706 static int adapters_found; 1707 bool wd_state; 1708 1709 eth_dev->dev_ops = &ena_dev_ops; 1710 eth_dev->rx_pkt_burst = ð_ena_recv_pkts; 1711 eth_dev->tx_pkt_burst = ð_ena_xmit_pkts; 1712 eth_dev->tx_pkt_prepare = ð_ena_prep_pkts; 1713 1714 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1715 return 0; 1716 1717 memset(adapter, 0, sizeof(struct ena_adapter)); 1718 ena_dev = &adapter->ena_dev; 1719 1720 adapter->rte_eth_dev_data = eth_dev->data; 1721 adapter->rte_dev = eth_dev; 1722 1723 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1724 adapter->pdev = pci_dev; 1725 1726 PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d", 1727 pci_dev->addr.domain, 1728 pci_dev->addr.bus, 1729 pci_dev->addr.devid, 1730 pci_dev->addr.function); 1731 1732 intr_handle = &pci_dev->intr_handle; 1733 1734 adapter->regs = pci_dev->mem_resource[ENA_REGS_BAR].addr; 1735 adapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr; 1736 1737 if (!adapter->regs) { 1738 PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)", 1739 ENA_REGS_BAR); 1740 return -ENXIO; 1741 } 1742 1743 ena_dev->reg_bar = adapter->regs; 1744 ena_dev->dmadev = adapter->pdev; 1745 1746 adapter->id_number = adapters_found; 1747 1748 snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", 1749 adapter->id_number); 1750 1751 /* device specific initialization routine */ 1752 rc = ena_device_init(ena_dev, &get_feat_ctx, &wd_state); 1753 if (rc) { 1754 PMD_INIT_LOG(CRIT, "Failed to init ENA device"); 1755 goto err; 1756 } 1757 adapter->wd_state = wd_state; 1758 1759 set_default_llq_configurations(&llq_config); 1760 rc = ena_set_queues_placement_policy(adapter, ena_dev, 1761 &get_feat_ctx.llq, &llq_config); 1762 if (unlikely(rc)) { 1763 PMD_INIT_LOG(CRIT, "Failed to set placement policy"); 1764 return rc; 1765 } 1766 1767 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 1768 queue_type_str = "Regular"; 1769 else 1770 queue_type_str = "Low latency"; 1771 PMD_DRV_LOG(INFO, "Placement policy: %s\n", queue_type_str); 1772 1773 calc_queue_ctx.ena_dev = ena_dev; 1774 calc_queue_ctx.get_feat_ctx = &get_feat_ctx; 1775 adapter->num_queues = ena_calc_io_queue_num(ena_dev, 1776 &get_feat_ctx); 1777 1778 rc = ena_calc_queue_size(&calc_queue_ctx); 1779 if (unlikely((rc != 0) || (adapter->num_queues <= 0))) { 1780 rc = -EFAULT; 1781 goto err_device_destroy; 1782 } 1783 1784 adapter->tx_ring_size = calc_queue_ctx.tx_queue_size; 1785 adapter->rx_ring_size = calc_queue_ctx.rx_queue_size; 1786 1787 adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size; 1788 adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size; 1789 1790 /* prepare ring structures */ 1791 ena_init_rings(adapter); 1792 1793 ena_config_debug_area(adapter); 1794 1795 /* Set max MTU for this device */ 1796 adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu; 1797 1798 /* set device support for offloads */ 1799 adapter->offloads.tso4_supported = (get_feat_ctx.offload.tx & 1800 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) != 0; 1801 adapter->offloads.tx_csum_supported = (get_feat_ctx.offload.tx & 1802 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) != 0; 1803 adapter->offloads.rx_csum_supported = 1804 (get_feat_ctx.offload.rx_supported & 1805 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) != 0; 1806 1807 /* Copy MAC address and point DPDK to it */ 1808 eth_dev->data->mac_addrs = (struct rte_ether_addr *)adapter->mac_addr; 1809 rte_ether_addr_copy((struct rte_ether_addr *) 1810 get_feat_ctx.dev_attr.mac_addr, 1811 (struct rte_ether_addr *)adapter->mac_addr); 1812 1813 /* 1814 * Pass the information to the rte_eth_dev_close() that it should also 1815 * release the private port resources. 1816 */ 1817 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; 1818 1819 adapter->drv_stats = rte_zmalloc("adapter stats", 1820 sizeof(*adapter->drv_stats), 1821 RTE_CACHE_LINE_SIZE); 1822 if (!adapter->drv_stats) { 1823 PMD_DRV_LOG(ERR, "failed to alloc mem for adapter stats\n"); 1824 rc = -ENOMEM; 1825 goto err_delete_debug_area; 1826 } 1827 1828 rte_intr_callback_register(intr_handle, 1829 ena_interrupt_handler_rte, 1830 adapter); 1831 rte_intr_enable(intr_handle); 1832 ena_com_set_admin_polling_mode(ena_dev, false); 1833 ena_com_admin_aenq_enable(ena_dev); 1834 1835 if (adapters_found == 0) 1836 rte_timer_subsystem_init(); 1837 rte_timer_init(&adapter->timer_wd); 1838 1839 adapters_found++; 1840 adapter->state = ENA_ADAPTER_STATE_INIT; 1841 1842 return 0; 1843 1844 err_delete_debug_area: 1845 ena_com_delete_debug_area(ena_dev); 1846 1847 err_device_destroy: 1848 ena_com_delete_host_info(ena_dev); 1849 ena_com_admin_destroy(ena_dev); 1850 1851 err: 1852 return rc; 1853 } 1854 1855 static void ena_destroy_device(struct rte_eth_dev *eth_dev) 1856 { 1857 struct ena_adapter *adapter = eth_dev->data->dev_private; 1858 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1859 1860 if (adapter->state == ENA_ADAPTER_STATE_FREE) 1861 return; 1862 1863 ena_com_set_admin_running_state(ena_dev, false); 1864 1865 if (adapter->state != ENA_ADAPTER_STATE_CLOSED) 1866 ena_close(eth_dev); 1867 1868 ena_com_delete_debug_area(ena_dev); 1869 ena_com_delete_host_info(ena_dev); 1870 1871 ena_com_abort_admin_commands(ena_dev); 1872 ena_com_wait_for_abort_completion(ena_dev); 1873 ena_com_admin_destroy(ena_dev); 1874 ena_com_mmio_reg_read_request_destroy(ena_dev); 1875 1876 adapter->state = ENA_ADAPTER_STATE_FREE; 1877 } 1878 1879 static int eth_ena_dev_uninit(struct rte_eth_dev *eth_dev) 1880 { 1881 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1882 return 0; 1883 1884 ena_destroy_device(eth_dev); 1885 1886 eth_dev->dev_ops = NULL; 1887 eth_dev->rx_pkt_burst = NULL; 1888 eth_dev->tx_pkt_burst = NULL; 1889 eth_dev->tx_pkt_prepare = NULL; 1890 1891 return 0; 1892 } 1893 1894 static int ena_dev_configure(struct rte_eth_dev *dev) 1895 { 1896 struct ena_adapter *adapter = dev->data->dev_private; 1897 1898 adapter->state = ENA_ADAPTER_STATE_CONFIG; 1899 1900 adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads; 1901 adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads; 1902 return 0; 1903 } 1904 1905 static void ena_init_rings(struct ena_adapter *adapter) 1906 { 1907 int i; 1908 1909 for (i = 0; i < adapter->num_queues; i++) { 1910 struct ena_ring *ring = &adapter->tx_ring[i]; 1911 1912 ring->configured = 0; 1913 ring->type = ENA_RING_TYPE_TX; 1914 ring->adapter = adapter; 1915 ring->id = i; 1916 ring->tx_mem_queue_type = adapter->ena_dev.tx_mem_queue_type; 1917 ring->tx_max_header_size = adapter->ena_dev.tx_max_header_size; 1918 ring->sgl_size = adapter->max_tx_sgl_size; 1919 } 1920 1921 for (i = 0; i < adapter->num_queues; i++) { 1922 struct ena_ring *ring = &adapter->rx_ring[i]; 1923 1924 ring->configured = 0; 1925 ring->type = ENA_RING_TYPE_RX; 1926 ring->adapter = adapter; 1927 ring->id = i; 1928 ring->sgl_size = adapter->max_rx_sgl_size; 1929 } 1930 } 1931 1932 static int ena_infos_get(struct rte_eth_dev *dev, 1933 struct rte_eth_dev_info *dev_info) 1934 { 1935 struct ena_adapter *adapter; 1936 struct ena_com_dev *ena_dev; 1937 uint64_t rx_feat = 0, tx_feat = 0; 1938 1939 ena_assert_msg(dev->data != NULL, "Uninitialized device\n"); 1940 ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n"); 1941 adapter = dev->data->dev_private; 1942 1943 ena_dev = &adapter->ena_dev; 1944 ena_assert_msg(ena_dev != NULL, "Uninitialized device\n"); 1945 1946 dev_info->speed_capa = 1947 ETH_LINK_SPEED_1G | 1948 ETH_LINK_SPEED_2_5G | 1949 ETH_LINK_SPEED_5G | 1950 ETH_LINK_SPEED_10G | 1951 ETH_LINK_SPEED_25G | 1952 ETH_LINK_SPEED_40G | 1953 ETH_LINK_SPEED_50G | 1954 ETH_LINK_SPEED_100G; 1955 1956 /* Set Tx & Rx features available for device */ 1957 if (adapter->offloads.tso4_supported) 1958 tx_feat |= DEV_TX_OFFLOAD_TCP_TSO; 1959 1960 if (adapter->offloads.tx_csum_supported) 1961 tx_feat |= DEV_TX_OFFLOAD_IPV4_CKSUM | 1962 DEV_TX_OFFLOAD_UDP_CKSUM | 1963 DEV_TX_OFFLOAD_TCP_CKSUM; 1964 1965 if (adapter->offloads.rx_csum_supported) 1966 rx_feat |= DEV_RX_OFFLOAD_IPV4_CKSUM | 1967 DEV_RX_OFFLOAD_UDP_CKSUM | 1968 DEV_RX_OFFLOAD_TCP_CKSUM; 1969 1970 rx_feat |= DEV_RX_OFFLOAD_JUMBO_FRAME; 1971 1972 /* Inform framework about available features */ 1973 dev_info->rx_offload_capa = rx_feat; 1974 dev_info->rx_queue_offload_capa = rx_feat; 1975 dev_info->tx_offload_capa = tx_feat; 1976 dev_info->tx_queue_offload_capa = tx_feat; 1977 1978 dev_info->flow_type_rss_offloads = ETH_RSS_IP | ETH_RSS_TCP | 1979 ETH_RSS_UDP; 1980 1981 dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN; 1982 dev_info->max_rx_pktlen = adapter->max_mtu; 1983 dev_info->max_mac_addrs = 1; 1984 1985 dev_info->max_rx_queues = adapter->num_queues; 1986 dev_info->max_tx_queues = adapter->num_queues; 1987 dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE; 1988 1989 adapter->tx_supported_offloads = tx_feat; 1990 adapter->rx_supported_offloads = rx_feat; 1991 1992 dev_info->rx_desc_lim.nb_max = adapter->rx_ring_size; 1993 dev_info->rx_desc_lim.nb_min = ENA_MIN_RING_DESC; 1994 dev_info->rx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 1995 adapter->max_rx_sgl_size); 1996 dev_info->rx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 1997 adapter->max_rx_sgl_size); 1998 1999 dev_info->tx_desc_lim.nb_max = adapter->tx_ring_size; 2000 dev_info->tx_desc_lim.nb_min = ENA_MIN_RING_DESC; 2001 dev_info->tx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2002 adapter->max_tx_sgl_size); 2003 dev_info->tx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2004 adapter->max_tx_sgl_size); 2005 2006 return 0; 2007 } 2008 2009 static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 2010 uint16_t nb_pkts) 2011 { 2012 struct ena_ring *rx_ring = (struct ena_ring *)(rx_queue); 2013 unsigned int ring_size = rx_ring->ring_size; 2014 unsigned int ring_mask = ring_size - 1; 2015 uint16_t next_to_clean = rx_ring->next_to_clean; 2016 uint16_t desc_in_use = 0; 2017 uint16_t req_id; 2018 unsigned int recv_idx = 0; 2019 struct rte_mbuf *mbuf = NULL; 2020 struct rte_mbuf *mbuf_head = NULL; 2021 struct rte_mbuf *mbuf_prev = NULL; 2022 struct rte_mbuf **rx_buff_info = rx_ring->rx_buffer_info; 2023 unsigned int completed; 2024 2025 struct ena_com_rx_ctx ena_rx_ctx; 2026 int rc = 0; 2027 2028 /* Check adapter state */ 2029 if (unlikely(rx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { 2030 PMD_DRV_LOG(ALERT, 2031 "Trying to receive pkts while device is NOT running\n"); 2032 return 0; 2033 } 2034 2035 desc_in_use = rx_ring->next_to_use - next_to_clean; 2036 if (unlikely(nb_pkts > desc_in_use)) 2037 nb_pkts = desc_in_use; 2038 2039 for (completed = 0; completed < nb_pkts; completed++) { 2040 int segments = 0; 2041 2042 ena_rx_ctx.max_bufs = rx_ring->sgl_size; 2043 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; 2044 ena_rx_ctx.descs = 0; 2045 ena_rx_ctx.pkt_offset = 0; 2046 /* receive packet context */ 2047 rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq, 2048 rx_ring->ena_com_io_sq, 2049 &ena_rx_ctx); 2050 if (unlikely(rc)) { 2051 PMD_DRV_LOG(ERR, "ena_com_rx_pkt error %d\n", rc); 2052 rx_ring->adapter->reset_reason = 2053 ENA_REGS_RESET_TOO_MANY_RX_DESCS; 2054 rx_ring->adapter->trigger_reset = true; 2055 ++rx_ring->rx_stats.bad_desc_num; 2056 return 0; 2057 } 2058 2059 if (unlikely(ena_rx_ctx.descs == 0)) 2060 break; 2061 2062 while (segments < ena_rx_ctx.descs) { 2063 req_id = ena_rx_ctx.ena_bufs[segments].req_id; 2064 rc = validate_rx_req_id(rx_ring, req_id); 2065 if (unlikely(rc)) { 2066 if (segments != 0) 2067 rte_mbuf_raw_free(mbuf_head); 2068 break; 2069 } 2070 2071 mbuf = rx_buff_info[req_id]; 2072 rx_buff_info[req_id] = NULL; 2073 mbuf->data_len = ena_rx_ctx.ena_bufs[segments].len; 2074 mbuf->data_off = RTE_PKTMBUF_HEADROOM; 2075 mbuf->refcnt = 1; 2076 mbuf->next = NULL; 2077 if (unlikely(segments == 0)) { 2078 mbuf->nb_segs = ena_rx_ctx.descs; 2079 mbuf->port = rx_ring->port_id; 2080 mbuf->pkt_len = 0; 2081 mbuf->data_off += ena_rx_ctx.pkt_offset; 2082 mbuf_head = mbuf; 2083 } else { 2084 /* for multi-segment pkts create mbuf chain */ 2085 mbuf_prev->next = mbuf; 2086 } 2087 mbuf_head->pkt_len += mbuf->data_len; 2088 2089 mbuf_prev = mbuf; 2090 rx_ring->empty_rx_reqs[next_to_clean & ring_mask] = 2091 req_id; 2092 segments++; 2093 next_to_clean++; 2094 } 2095 if (unlikely(rc)) 2096 break; 2097 2098 /* fill mbuf attributes if any */ 2099 ena_rx_mbuf_prepare(mbuf_head, &ena_rx_ctx); 2100 2101 if (unlikely(mbuf_head->ol_flags & 2102 (PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD))) { 2103 rte_atomic64_inc(&rx_ring->adapter->drv_stats->ierrors); 2104 ++rx_ring->rx_stats.bad_csum; 2105 } 2106 2107 mbuf_head->hash.rss = ena_rx_ctx.hash; 2108 2109 /* pass to DPDK application head mbuf */ 2110 rx_pkts[recv_idx] = mbuf_head; 2111 recv_idx++; 2112 rx_ring->rx_stats.bytes += mbuf_head->pkt_len; 2113 } 2114 2115 rx_ring->rx_stats.cnt += recv_idx; 2116 rx_ring->next_to_clean = next_to_clean; 2117 2118 desc_in_use = desc_in_use - completed + 1; 2119 /* Burst refill to save doorbells, memory barriers, const interval */ 2120 if (ring_size - desc_in_use > ENA_RING_DESCS_RATIO(ring_size)) { 2121 ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq); 2122 ena_populate_rx_queue(rx_ring, ring_size - desc_in_use); 2123 } 2124 2125 return recv_idx; 2126 } 2127 2128 static uint16_t 2129 eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 2130 uint16_t nb_pkts) 2131 { 2132 int32_t ret; 2133 uint32_t i; 2134 struct rte_mbuf *m; 2135 struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); 2136 struct rte_ipv4_hdr *ip_hdr; 2137 uint64_t ol_flags; 2138 uint16_t frag_field; 2139 2140 for (i = 0; i != nb_pkts; i++) { 2141 m = tx_pkts[i]; 2142 ol_flags = m->ol_flags; 2143 2144 if (!(ol_flags & PKT_TX_IPV4)) 2145 continue; 2146 2147 /* If there was not L2 header length specified, assume it is 2148 * length of the ethernet header. 2149 */ 2150 if (unlikely(m->l2_len == 0)) 2151 m->l2_len = sizeof(struct rte_ether_hdr); 2152 2153 ip_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, 2154 m->l2_len); 2155 frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset); 2156 2157 if ((frag_field & RTE_IPV4_HDR_DF_FLAG) != 0) { 2158 m->packet_type |= RTE_PTYPE_L4_NONFRAG; 2159 2160 /* If IPv4 header has DF flag enabled and TSO support is 2161 * disabled, partial chcecksum should not be calculated. 2162 */ 2163 if (!tx_ring->adapter->offloads.tso4_supported) 2164 continue; 2165 } 2166 2167 if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) != 0 || 2168 (ol_flags & PKT_TX_L4_MASK) == 2169 PKT_TX_SCTP_CKSUM) { 2170 rte_errno = ENOTSUP; 2171 return i; 2172 } 2173 2174 #ifdef RTE_LIBRTE_ETHDEV_DEBUG 2175 ret = rte_validate_tx_offload(m); 2176 if (ret != 0) { 2177 rte_errno = -ret; 2178 return i; 2179 } 2180 #endif 2181 2182 /* In case we are supposed to TSO and have DF not set (DF=0) 2183 * hardware must be provided with partial checksum, otherwise 2184 * it will take care of necessary calculations. 2185 */ 2186 2187 ret = rte_net_intel_cksum_flags_prepare(m, 2188 ol_flags & ~PKT_TX_TCP_SEG); 2189 if (ret != 0) { 2190 rte_errno = -ret; 2191 return i; 2192 } 2193 } 2194 2195 return i; 2196 } 2197 2198 static void ena_update_hints(struct ena_adapter *adapter, 2199 struct ena_admin_ena_hw_hints *hints) 2200 { 2201 if (hints->admin_completion_tx_timeout) 2202 adapter->ena_dev.admin_queue.completion_timeout = 2203 hints->admin_completion_tx_timeout * 1000; 2204 2205 if (hints->mmio_read_timeout) 2206 /* convert to usec */ 2207 adapter->ena_dev.mmio_read.reg_read_to = 2208 hints->mmio_read_timeout * 1000; 2209 2210 if (hints->driver_watchdog_timeout) { 2211 if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT) 2212 adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT; 2213 else 2214 // Convert msecs to ticks 2215 adapter->keep_alive_timeout = 2216 (hints->driver_watchdog_timeout * 2217 rte_get_timer_hz()) / 1000; 2218 } 2219 } 2220 2221 static int ena_check_and_linearize_mbuf(struct ena_ring *tx_ring, 2222 struct rte_mbuf *mbuf) 2223 { 2224 struct ena_com_dev *ena_dev; 2225 int num_segments, header_len, rc; 2226 2227 ena_dev = &tx_ring->adapter->ena_dev; 2228 num_segments = mbuf->nb_segs; 2229 header_len = mbuf->data_len; 2230 2231 if (likely(num_segments < tx_ring->sgl_size)) 2232 return 0; 2233 2234 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && 2235 (num_segments == tx_ring->sgl_size) && 2236 (header_len < tx_ring->tx_max_header_size)) 2237 return 0; 2238 2239 ++tx_ring->tx_stats.linearize; 2240 rc = rte_pktmbuf_linearize(mbuf); 2241 if (unlikely(rc)) { 2242 PMD_DRV_LOG(WARNING, "Mbuf linearize failed\n"); 2243 rte_atomic64_inc(&tx_ring->adapter->drv_stats->ierrors); 2244 ++tx_ring->tx_stats.linearize_failed; 2245 return rc; 2246 } 2247 2248 return rc; 2249 } 2250 2251 static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 2252 uint16_t nb_pkts) 2253 { 2254 struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); 2255 uint16_t next_to_use = tx_ring->next_to_use; 2256 uint16_t next_to_clean = tx_ring->next_to_clean; 2257 struct rte_mbuf *mbuf; 2258 uint16_t seg_len; 2259 unsigned int ring_size = tx_ring->ring_size; 2260 unsigned int ring_mask = ring_size - 1; 2261 struct ena_com_tx_ctx ena_tx_ctx; 2262 struct ena_tx_buffer *tx_info; 2263 struct ena_com_buf *ebuf; 2264 uint16_t rc, req_id, total_tx_descs = 0; 2265 uint16_t sent_idx = 0, empty_tx_reqs; 2266 uint16_t push_len = 0; 2267 uint16_t delta = 0; 2268 int nb_hw_desc; 2269 uint32_t total_length; 2270 2271 /* Check adapter state */ 2272 if (unlikely(tx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { 2273 PMD_DRV_LOG(ALERT, 2274 "Trying to xmit pkts while device is NOT running\n"); 2275 return 0; 2276 } 2277 2278 empty_tx_reqs = ring_size - (next_to_use - next_to_clean); 2279 if (nb_pkts > empty_tx_reqs) 2280 nb_pkts = empty_tx_reqs; 2281 2282 for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) { 2283 mbuf = tx_pkts[sent_idx]; 2284 total_length = 0; 2285 2286 rc = ena_check_and_linearize_mbuf(tx_ring, mbuf); 2287 if (unlikely(rc)) 2288 break; 2289 2290 req_id = tx_ring->empty_tx_reqs[next_to_use & ring_mask]; 2291 tx_info = &tx_ring->tx_buffer_info[req_id]; 2292 tx_info->mbuf = mbuf; 2293 tx_info->num_of_bufs = 0; 2294 ebuf = tx_info->bufs; 2295 2296 /* Prepare TX context */ 2297 memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx)); 2298 memset(&ena_tx_ctx.ena_meta, 0x0, 2299 sizeof(struct ena_com_tx_meta)); 2300 ena_tx_ctx.ena_bufs = ebuf; 2301 ena_tx_ctx.req_id = req_id; 2302 2303 delta = 0; 2304 seg_len = mbuf->data_len; 2305 2306 if (tx_ring->tx_mem_queue_type == 2307 ENA_ADMIN_PLACEMENT_POLICY_DEV) { 2308 push_len = RTE_MIN(mbuf->pkt_len, 2309 tx_ring->tx_max_header_size); 2310 ena_tx_ctx.header_len = push_len; 2311 2312 if (likely(push_len <= seg_len)) { 2313 /* If the push header is in the single segment, 2314 * then just point it to the 1st mbuf data. 2315 */ 2316 ena_tx_ctx.push_header = 2317 rte_pktmbuf_mtod(mbuf, uint8_t *); 2318 } else { 2319 /* If the push header lays in the several 2320 * segments, copy it to the intermediate buffer. 2321 */ 2322 rte_pktmbuf_read(mbuf, 0, push_len, 2323 tx_ring->push_buf_intermediate_buf); 2324 ena_tx_ctx.push_header = 2325 tx_ring->push_buf_intermediate_buf; 2326 delta = push_len - seg_len; 2327 } 2328 } /* there's no else as we take advantage of memset zeroing */ 2329 2330 /* Set TX offloads flags, if applicable */ 2331 ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx, tx_ring->offloads); 2332 2333 rte_prefetch0(tx_pkts[(sent_idx + 4) & ring_mask]); 2334 2335 /* Process first segment taking into 2336 * consideration pushed header 2337 */ 2338 if (seg_len > push_len) { 2339 ebuf->paddr = mbuf->buf_iova + 2340 mbuf->data_off + 2341 push_len; 2342 ebuf->len = seg_len - push_len; 2343 ebuf++; 2344 tx_info->num_of_bufs++; 2345 } 2346 total_length += mbuf->data_len; 2347 2348 while ((mbuf = mbuf->next) != NULL) { 2349 seg_len = mbuf->data_len; 2350 2351 /* Skip mbufs if whole data is pushed as a header */ 2352 if (unlikely(delta > seg_len)) { 2353 delta -= seg_len; 2354 continue; 2355 } 2356 2357 ebuf->paddr = mbuf->buf_iova + mbuf->data_off + delta; 2358 ebuf->len = seg_len - delta; 2359 total_length += ebuf->len; 2360 ebuf++; 2361 tx_info->num_of_bufs++; 2362 2363 delta = 0; 2364 } 2365 2366 ena_tx_ctx.num_bufs = tx_info->num_of_bufs; 2367 2368 if (ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq, 2369 &ena_tx_ctx)) { 2370 PMD_DRV_LOG(DEBUG, "llq tx max burst size of queue %d" 2371 " achieved, writing doorbell to send burst\n", 2372 tx_ring->id); 2373 rte_wmb(); 2374 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); 2375 } 2376 2377 /* prepare the packet's descriptors to dma engine */ 2378 rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, 2379 &ena_tx_ctx, &nb_hw_desc); 2380 if (unlikely(rc)) { 2381 ++tx_ring->tx_stats.prepare_ctx_err; 2382 break; 2383 } 2384 tx_info->tx_descs = nb_hw_desc; 2385 2386 next_to_use++; 2387 tx_ring->tx_stats.cnt++; 2388 tx_ring->tx_stats.bytes += total_length; 2389 } 2390 tx_ring->tx_stats.available_desc = 2391 ena_com_free_q_entries(tx_ring->ena_com_io_sq); 2392 2393 /* If there are ready packets to be xmitted... */ 2394 if (sent_idx > 0) { 2395 /* ...let HW do its best :-) */ 2396 rte_wmb(); 2397 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); 2398 tx_ring->tx_stats.doorbells++; 2399 tx_ring->next_to_use = next_to_use; 2400 } 2401 2402 /* Clear complete packets */ 2403 while (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) >= 0) { 2404 rc = validate_tx_req_id(tx_ring, req_id); 2405 if (rc) 2406 break; 2407 2408 /* Get Tx info & store how many descs were processed */ 2409 tx_info = &tx_ring->tx_buffer_info[req_id]; 2410 total_tx_descs += tx_info->tx_descs; 2411 2412 /* Free whole mbuf chain */ 2413 mbuf = tx_info->mbuf; 2414 rte_pktmbuf_free(mbuf); 2415 tx_info->mbuf = NULL; 2416 2417 /* Put back descriptor to the ring for reuse */ 2418 tx_ring->empty_tx_reqs[next_to_clean & ring_mask] = req_id; 2419 next_to_clean++; 2420 2421 /* If too many descs to clean, leave it for another run */ 2422 if (unlikely(total_tx_descs > ENA_RING_DESCS_RATIO(ring_size))) 2423 break; 2424 } 2425 tx_ring->tx_stats.available_desc = 2426 ena_com_free_q_entries(tx_ring->ena_com_io_sq); 2427 2428 if (total_tx_descs > 0) { 2429 /* acknowledge completion of sent packets */ 2430 tx_ring->next_to_clean = next_to_clean; 2431 ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs); 2432 ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq); 2433 } 2434 2435 tx_ring->tx_stats.tx_poll++; 2436 2437 return sent_idx; 2438 } 2439 2440 /** 2441 * DPDK callback to retrieve names of extended device statistics 2442 * 2443 * @param dev 2444 * Pointer to Ethernet device structure. 2445 * @param[out] xstats_names 2446 * Buffer to insert names into. 2447 * @param n 2448 * Number of names. 2449 * 2450 * @return 2451 * Number of xstats names. 2452 */ 2453 static int ena_xstats_get_names(struct rte_eth_dev *dev, 2454 struct rte_eth_xstat_name *xstats_names, 2455 unsigned int n) 2456 { 2457 unsigned int xstats_count = ena_xstats_calc_num(dev); 2458 unsigned int stat, i, count = 0; 2459 2460 if (n < xstats_count || !xstats_names) 2461 return xstats_count; 2462 2463 for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++) 2464 strcpy(xstats_names[count].name, 2465 ena_stats_global_strings[stat].name); 2466 2467 for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++) 2468 for (i = 0; i < dev->data->nb_rx_queues; i++, count++) 2469 snprintf(xstats_names[count].name, 2470 sizeof(xstats_names[count].name), 2471 "rx_q%d_%s", i, 2472 ena_stats_rx_strings[stat].name); 2473 2474 for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++) 2475 for (i = 0; i < dev->data->nb_tx_queues; i++, count++) 2476 snprintf(xstats_names[count].name, 2477 sizeof(xstats_names[count].name), 2478 "tx_q%d_%s", i, 2479 ena_stats_tx_strings[stat].name); 2480 2481 return xstats_count; 2482 } 2483 2484 /** 2485 * DPDK callback to get extended device statistics. 2486 * 2487 * @param dev 2488 * Pointer to Ethernet device structure. 2489 * @param[out] stats 2490 * Stats table output buffer. 2491 * @param n 2492 * The size of the stats table. 2493 * 2494 * @return 2495 * Number of xstats on success, negative on failure. 2496 */ 2497 static int ena_xstats_get(struct rte_eth_dev *dev, 2498 struct rte_eth_xstat *xstats, 2499 unsigned int n) 2500 { 2501 struct ena_adapter *adapter = dev->data->dev_private; 2502 unsigned int xstats_count = ena_xstats_calc_num(dev); 2503 unsigned int stat, i, count = 0; 2504 int stat_offset; 2505 void *stats_begin; 2506 2507 if (n < xstats_count) 2508 return xstats_count; 2509 2510 if (!xstats) 2511 return 0; 2512 2513 for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++) { 2514 stat_offset = ena_stats_rx_strings[stat].stat_offset; 2515 stats_begin = &adapter->dev_stats; 2516 2517 xstats[count].id = count; 2518 xstats[count].value = *((uint64_t *) 2519 ((char *)stats_begin + stat_offset)); 2520 } 2521 2522 for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++) { 2523 for (i = 0; i < dev->data->nb_rx_queues; i++, count++) { 2524 stat_offset = ena_stats_rx_strings[stat].stat_offset; 2525 stats_begin = &adapter->rx_ring[i].rx_stats; 2526 2527 xstats[count].id = count; 2528 xstats[count].value = *((uint64_t *) 2529 ((char *)stats_begin + stat_offset)); 2530 } 2531 } 2532 2533 for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++) { 2534 for (i = 0; i < dev->data->nb_tx_queues; i++, count++) { 2535 stat_offset = ena_stats_tx_strings[stat].stat_offset; 2536 stats_begin = &adapter->tx_ring[i].rx_stats; 2537 2538 xstats[count].id = count; 2539 xstats[count].value = *((uint64_t *) 2540 ((char *)stats_begin + stat_offset)); 2541 } 2542 } 2543 2544 return count; 2545 } 2546 2547 static int ena_xstats_get_by_id(struct rte_eth_dev *dev, 2548 const uint64_t *ids, 2549 uint64_t *values, 2550 unsigned int n) 2551 { 2552 struct ena_adapter *adapter = dev->data->dev_private; 2553 uint64_t id; 2554 uint64_t rx_entries, tx_entries; 2555 unsigned int i; 2556 int qid; 2557 int valid = 0; 2558 for (i = 0; i < n; ++i) { 2559 id = ids[i]; 2560 /* Check if id belongs to global statistics */ 2561 if (id < ENA_STATS_ARRAY_GLOBAL) { 2562 values[i] = *((uint64_t *)&adapter->dev_stats + id); 2563 ++valid; 2564 continue; 2565 } 2566 2567 /* Check if id belongs to rx queue statistics */ 2568 id -= ENA_STATS_ARRAY_GLOBAL; 2569 rx_entries = ENA_STATS_ARRAY_RX * dev->data->nb_rx_queues; 2570 if (id < rx_entries) { 2571 qid = id % dev->data->nb_rx_queues; 2572 id /= dev->data->nb_rx_queues; 2573 values[i] = *((uint64_t *) 2574 &adapter->rx_ring[qid].rx_stats + id); 2575 ++valid; 2576 continue; 2577 } 2578 /* Check if id belongs to rx queue statistics */ 2579 id -= rx_entries; 2580 tx_entries = ENA_STATS_ARRAY_TX * dev->data->nb_tx_queues; 2581 if (id < tx_entries) { 2582 qid = id % dev->data->nb_tx_queues; 2583 id /= dev->data->nb_tx_queues; 2584 values[i] = *((uint64_t *) 2585 &adapter->tx_ring[qid].tx_stats + id); 2586 ++valid; 2587 continue; 2588 } 2589 } 2590 2591 return valid; 2592 } 2593 2594 /********************************************************************* 2595 * PMD configuration 2596 *********************************************************************/ 2597 static int eth_ena_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2598 struct rte_pci_device *pci_dev) 2599 { 2600 return rte_eth_dev_pci_generic_probe(pci_dev, 2601 sizeof(struct ena_adapter), eth_ena_dev_init); 2602 } 2603 2604 static int eth_ena_pci_remove(struct rte_pci_device *pci_dev) 2605 { 2606 return rte_eth_dev_pci_generic_remove(pci_dev, eth_ena_dev_uninit); 2607 } 2608 2609 static struct rte_pci_driver rte_ena_pmd = { 2610 .id_table = pci_id_ena_map, 2611 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | 2612 RTE_PCI_DRV_WC_ACTIVATE, 2613 .probe = eth_ena_pci_probe, 2614 .remove = eth_ena_pci_remove, 2615 }; 2616 2617 RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd); 2618 RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map); 2619 RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci"); 2620 2621 RTE_INIT(ena_init_log) 2622 { 2623 ena_logtype_init = rte_log_register("pmd.net.ena.init"); 2624 if (ena_logtype_init >= 0) 2625 rte_log_set_level(ena_logtype_init, RTE_LOG_NOTICE); 2626 ena_logtype_driver = rte_log_register("pmd.net.ena.driver"); 2627 if (ena_logtype_driver >= 0) 2628 rte_log_set_level(ena_logtype_driver, RTE_LOG_NOTICE); 2629 2630 #ifdef RTE_LIBRTE_ENA_DEBUG_RX 2631 ena_logtype_rx = rte_log_register("pmd.net.ena.rx"); 2632 if (ena_logtype_rx >= 0) 2633 rte_log_set_level(ena_logtype_rx, RTE_LOG_NOTICE); 2634 #endif 2635 2636 #ifdef RTE_LIBRTE_ENA_DEBUG_TX 2637 ena_logtype_tx = rte_log_register("pmd.net.ena.tx"); 2638 if (ena_logtype_tx >= 0) 2639 rte_log_set_level(ena_logtype_tx, RTE_LOG_NOTICE); 2640 #endif 2641 2642 #ifdef RTE_LIBRTE_ENA_DEBUG_TX_FREE 2643 ena_logtype_tx_free = rte_log_register("pmd.net.ena.tx_free"); 2644 if (ena_logtype_tx_free >= 0) 2645 rte_log_set_level(ena_logtype_tx_free, RTE_LOG_NOTICE); 2646 #endif 2647 2648 #ifdef RTE_LIBRTE_ENA_COM_DEBUG 2649 ena_logtype_com = rte_log_register("pmd.net.ena.com"); 2650 if (ena_logtype_com >= 0) 2651 rte_log_set_level(ena_logtype_com, RTE_LOG_NOTICE); 2652 #endif 2653 } 2654 2655 /****************************************************************************** 2656 ******************************** AENQ Handlers ******************************* 2657 *****************************************************************************/ 2658 static void ena_update_on_link_change(void *adapter_data, 2659 struct ena_admin_aenq_entry *aenq_e) 2660 { 2661 struct rte_eth_dev *eth_dev; 2662 struct ena_adapter *adapter; 2663 struct ena_admin_aenq_link_change_desc *aenq_link_desc; 2664 uint32_t status; 2665 2666 adapter = adapter_data; 2667 aenq_link_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e; 2668 eth_dev = adapter->rte_dev; 2669 2670 status = get_ena_admin_aenq_link_change_desc_link_status(aenq_link_desc); 2671 adapter->link_status = status; 2672 2673 ena_link_update(eth_dev, 0); 2674 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL); 2675 } 2676 2677 static void ena_notification(void *data, 2678 struct ena_admin_aenq_entry *aenq_e) 2679 { 2680 struct ena_adapter *adapter = data; 2681 struct ena_admin_ena_hw_hints *hints; 2682 2683 if (aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION) 2684 PMD_DRV_LOG(WARNING, "Invalid group(%x) expected %x\n", 2685 aenq_e->aenq_common_desc.group, 2686 ENA_ADMIN_NOTIFICATION); 2687 2688 switch (aenq_e->aenq_common_desc.syndrom) { 2689 case ENA_ADMIN_UPDATE_HINTS: 2690 hints = (struct ena_admin_ena_hw_hints *) 2691 (&aenq_e->inline_data_w4); 2692 ena_update_hints(adapter, hints); 2693 break; 2694 default: 2695 PMD_DRV_LOG(ERR, "Invalid aenq notification link state %d\n", 2696 aenq_e->aenq_common_desc.syndrom); 2697 } 2698 } 2699 2700 static void ena_keep_alive(void *adapter_data, 2701 __rte_unused struct ena_admin_aenq_entry *aenq_e) 2702 { 2703 struct ena_adapter *adapter = adapter_data; 2704 struct ena_admin_aenq_keep_alive_desc *desc; 2705 uint64_t rx_drops; 2706 2707 adapter->timestamp_wd = rte_get_timer_cycles(); 2708 2709 desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e; 2710 rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low; 2711 rte_atomic64_set(&adapter->drv_stats->rx_drops, rx_drops); 2712 } 2713 2714 /** 2715 * This handler will called for unknown event group or unimplemented handlers 2716 **/ 2717 static void unimplemented_aenq_handler(__rte_unused void *data, 2718 __rte_unused struct ena_admin_aenq_entry *aenq_e) 2719 { 2720 PMD_DRV_LOG(ERR, "Unknown event was received or event with " 2721 "unimplemented handler\n"); 2722 } 2723 2724 static struct ena_aenq_handlers aenq_handlers = { 2725 .handlers = { 2726 [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change, 2727 [ENA_ADMIN_NOTIFICATION] = ena_notification, 2728 [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive 2729 }, 2730 .unimplemented_handler = unimplemented_aenq_handler 2731 }; 2732