1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates. 3 * All rights reserved. 4 */ 5 6 #include <rte_string_fns.h> 7 #include <rte_ether.h> 8 #include <rte_ethdev_driver.h> 9 #include <rte_ethdev_pci.h> 10 #include <rte_tcp.h> 11 #include <rte_atomic.h> 12 #include <rte_dev.h> 13 #include <rte_errno.h> 14 #include <rte_version.h> 15 #include <rte_net.h> 16 #include <rte_kvargs.h> 17 18 #include "ena_ethdev.h" 19 #include "ena_logs.h" 20 #include "ena_platform.h" 21 #include "ena_com.h" 22 #include "ena_eth_com.h" 23 24 #include <ena_common_defs.h> 25 #include <ena_regs_defs.h> 26 #include <ena_admin_defs.h> 27 #include <ena_eth_io_defs.h> 28 29 #define DRV_MODULE_VER_MAJOR 2 30 #define DRV_MODULE_VER_MINOR 0 31 #define DRV_MODULE_VER_SUBMINOR 3 32 33 #define ENA_IO_TXQ_IDX(q) (2 * (q)) 34 #define ENA_IO_RXQ_IDX(q) (2 * (q) + 1) 35 /*reverse version of ENA_IO_RXQ_IDX*/ 36 #define ENA_IO_RXQ_IDX_REV(q) ((q - 1) / 2) 37 38 /* While processing submitted and completed descriptors (rx and tx path 39 * respectively) in a loop it is desired to: 40 * - perform batch submissions while populating sumbissmion queue 41 * - avoid blocking transmission of other packets during cleanup phase 42 * Hence the utilization ratio of 1/8 of a queue size. 43 */ 44 #define ENA_RING_DESCS_RATIO(ring_size) (ring_size / 8) 45 46 #define __MERGE_64B_H_L(h, l) (((uint64_t)h << 32) | l) 47 #define TEST_BIT(val, bit_shift) (val & (1UL << bit_shift)) 48 49 #define GET_L4_HDR_LEN(mbuf) \ 50 ((rte_pktmbuf_mtod_offset(mbuf, struct rte_tcp_hdr *, \ 51 mbuf->l3_len + mbuf->l2_len)->data_off) >> 4) 52 53 #define ENA_RX_RSS_TABLE_LOG_SIZE 7 54 #define ENA_RX_RSS_TABLE_SIZE (1 << ENA_RX_RSS_TABLE_LOG_SIZE) 55 #define ENA_HASH_KEY_SIZE 40 56 #define ETH_GSTRING_LEN 32 57 58 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) 59 60 #define ENA_MIN_RING_DESC 128 61 62 enum ethtool_stringset { 63 ETH_SS_TEST = 0, 64 ETH_SS_STATS, 65 }; 66 67 struct ena_stats { 68 char name[ETH_GSTRING_LEN]; 69 int stat_offset; 70 }; 71 72 #define ENA_STAT_ENTRY(stat, stat_type) { \ 73 .name = #stat, \ 74 .stat_offset = offsetof(struct ena_stats_##stat_type, stat) \ 75 } 76 77 #define ENA_STAT_RX_ENTRY(stat) \ 78 ENA_STAT_ENTRY(stat, rx) 79 80 #define ENA_STAT_TX_ENTRY(stat) \ 81 ENA_STAT_ENTRY(stat, tx) 82 83 #define ENA_STAT_GLOBAL_ENTRY(stat) \ 84 ENA_STAT_ENTRY(stat, dev) 85 86 /* Device arguments */ 87 #define ENA_DEVARG_LARGE_LLQ_HDR "large_llq_hdr" 88 89 /* 90 * Each rte_memzone should have unique name. 91 * To satisfy it, count number of allocation and add it to name. 92 */ 93 rte_atomic32_t ena_alloc_cnt; 94 95 static const struct ena_stats ena_stats_global_strings[] = { 96 ENA_STAT_GLOBAL_ENTRY(wd_expired), 97 ENA_STAT_GLOBAL_ENTRY(dev_start), 98 ENA_STAT_GLOBAL_ENTRY(dev_stop), 99 ENA_STAT_GLOBAL_ENTRY(tx_drops), 100 }; 101 102 static const struct ena_stats ena_stats_tx_strings[] = { 103 ENA_STAT_TX_ENTRY(cnt), 104 ENA_STAT_TX_ENTRY(bytes), 105 ENA_STAT_TX_ENTRY(prepare_ctx_err), 106 ENA_STAT_TX_ENTRY(linearize), 107 ENA_STAT_TX_ENTRY(linearize_failed), 108 ENA_STAT_TX_ENTRY(tx_poll), 109 ENA_STAT_TX_ENTRY(doorbells), 110 ENA_STAT_TX_ENTRY(bad_req_id), 111 ENA_STAT_TX_ENTRY(available_desc), 112 }; 113 114 static const struct ena_stats ena_stats_rx_strings[] = { 115 ENA_STAT_RX_ENTRY(cnt), 116 ENA_STAT_RX_ENTRY(bytes), 117 ENA_STAT_RX_ENTRY(refill_partial), 118 ENA_STAT_RX_ENTRY(bad_csum), 119 ENA_STAT_RX_ENTRY(mbuf_alloc_fail), 120 ENA_STAT_RX_ENTRY(bad_desc_num), 121 ENA_STAT_RX_ENTRY(bad_req_id), 122 }; 123 124 #define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings) 125 #define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings) 126 #define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings) 127 128 #define QUEUE_OFFLOADS (DEV_TX_OFFLOAD_TCP_CKSUM |\ 129 DEV_TX_OFFLOAD_UDP_CKSUM |\ 130 DEV_TX_OFFLOAD_IPV4_CKSUM |\ 131 DEV_TX_OFFLOAD_TCP_TSO) 132 #define MBUF_OFFLOADS (PKT_TX_L4_MASK |\ 133 PKT_TX_IP_CKSUM |\ 134 PKT_TX_TCP_SEG) 135 136 /** Vendor ID used by Amazon devices */ 137 #define PCI_VENDOR_ID_AMAZON 0x1D0F 138 /** Amazon devices */ 139 #define PCI_DEVICE_ID_ENA_VF 0xEC20 140 #define PCI_DEVICE_ID_ENA_LLQ_VF 0xEC21 141 142 #define ENA_TX_OFFLOAD_MASK (\ 143 PKT_TX_L4_MASK | \ 144 PKT_TX_IPV6 | \ 145 PKT_TX_IPV4 | \ 146 PKT_TX_IP_CKSUM | \ 147 PKT_TX_TCP_SEG) 148 149 #define ENA_TX_OFFLOAD_NOTSUP_MASK \ 150 (PKT_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK) 151 152 int ena_logtype_init; 153 int ena_logtype_driver; 154 155 #ifdef RTE_LIBRTE_ENA_DEBUG_RX 156 int ena_logtype_rx; 157 #endif 158 #ifdef RTE_LIBRTE_ENA_DEBUG_TX 159 int ena_logtype_tx; 160 #endif 161 #ifdef RTE_LIBRTE_ENA_DEBUG_TX_FREE 162 int ena_logtype_tx_free; 163 #endif 164 #ifdef RTE_LIBRTE_ENA_COM_DEBUG 165 int ena_logtype_com; 166 #endif 167 168 static const struct rte_pci_id pci_id_ena_map[] = { 169 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) }, 170 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_LLQ_VF) }, 171 { .device_id = 0 }, 172 }; 173 174 static struct ena_aenq_handlers aenq_handlers; 175 176 static int ena_device_init(struct ena_com_dev *ena_dev, 177 struct ena_com_dev_get_features_ctx *get_feat_ctx, 178 bool *wd_state); 179 static int ena_dev_configure(struct rte_eth_dev *dev); 180 static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 181 uint16_t nb_pkts); 182 static uint16_t eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 183 uint16_t nb_pkts); 184 static int ena_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 185 uint16_t nb_desc, unsigned int socket_id, 186 const struct rte_eth_txconf *tx_conf); 187 static int ena_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 188 uint16_t nb_desc, unsigned int socket_id, 189 const struct rte_eth_rxconf *rx_conf, 190 struct rte_mempool *mp); 191 static inline void ena_init_rx_mbuf(struct rte_mbuf *mbuf, uint16_t len); 192 static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring, 193 struct ena_com_rx_buf_info *ena_bufs, 194 uint32_t descs, 195 uint16_t *next_to_clean, 196 uint8_t offset); 197 static uint16_t eth_ena_recv_pkts(void *rx_queue, 198 struct rte_mbuf **rx_pkts, uint16_t nb_pkts); 199 static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count); 200 static void ena_init_rings(struct ena_adapter *adapter, 201 bool disable_meta_caching); 202 static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 203 static int ena_start(struct rte_eth_dev *dev); 204 static void ena_stop(struct rte_eth_dev *dev); 205 static void ena_close(struct rte_eth_dev *dev); 206 static int ena_dev_reset(struct rte_eth_dev *dev); 207 static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); 208 static void ena_rx_queue_release_all(struct rte_eth_dev *dev); 209 static void ena_tx_queue_release_all(struct rte_eth_dev *dev); 210 static void ena_rx_queue_release(void *queue); 211 static void ena_tx_queue_release(void *queue); 212 static void ena_rx_queue_release_bufs(struct ena_ring *ring); 213 static void ena_tx_queue_release_bufs(struct ena_ring *ring); 214 static int ena_link_update(struct rte_eth_dev *dev, 215 int wait_to_complete); 216 static int ena_create_io_queue(struct ena_ring *ring); 217 static void ena_queue_stop(struct ena_ring *ring); 218 static void ena_queue_stop_all(struct rte_eth_dev *dev, 219 enum ena_ring_type ring_type); 220 static int ena_queue_start(struct ena_ring *ring); 221 static int ena_queue_start_all(struct rte_eth_dev *dev, 222 enum ena_ring_type ring_type); 223 static void ena_stats_restart(struct rte_eth_dev *dev); 224 static int ena_infos_get(struct rte_eth_dev *dev, 225 struct rte_eth_dev_info *dev_info); 226 static int ena_rss_reta_update(struct rte_eth_dev *dev, 227 struct rte_eth_rss_reta_entry64 *reta_conf, 228 uint16_t reta_size); 229 static int ena_rss_reta_query(struct rte_eth_dev *dev, 230 struct rte_eth_rss_reta_entry64 *reta_conf, 231 uint16_t reta_size); 232 static void ena_interrupt_handler_rte(void *cb_arg); 233 static void ena_timer_wd_callback(struct rte_timer *timer, void *arg); 234 static void ena_destroy_device(struct rte_eth_dev *eth_dev); 235 static int eth_ena_dev_init(struct rte_eth_dev *eth_dev); 236 static int ena_xstats_get_names(struct rte_eth_dev *dev, 237 struct rte_eth_xstat_name *xstats_names, 238 unsigned int n); 239 static int ena_xstats_get(struct rte_eth_dev *dev, 240 struct rte_eth_xstat *stats, 241 unsigned int n); 242 static int ena_xstats_get_by_id(struct rte_eth_dev *dev, 243 const uint64_t *ids, 244 uint64_t *values, 245 unsigned int n); 246 static int ena_process_bool_devarg(const char *key, 247 const char *value, 248 void *opaque); 249 static int ena_parse_devargs(struct ena_adapter *adapter, 250 struct rte_devargs *devargs); 251 252 static const struct eth_dev_ops ena_dev_ops = { 253 .dev_configure = ena_dev_configure, 254 .dev_infos_get = ena_infos_get, 255 .rx_queue_setup = ena_rx_queue_setup, 256 .tx_queue_setup = ena_tx_queue_setup, 257 .dev_start = ena_start, 258 .dev_stop = ena_stop, 259 .link_update = ena_link_update, 260 .stats_get = ena_stats_get, 261 .xstats_get_names = ena_xstats_get_names, 262 .xstats_get = ena_xstats_get, 263 .xstats_get_by_id = ena_xstats_get_by_id, 264 .mtu_set = ena_mtu_set, 265 .rx_queue_release = ena_rx_queue_release, 266 .tx_queue_release = ena_tx_queue_release, 267 .dev_close = ena_close, 268 .dev_reset = ena_dev_reset, 269 .reta_update = ena_rss_reta_update, 270 .reta_query = ena_rss_reta_query, 271 }; 272 273 void ena_rss_key_fill(void *key, size_t size) 274 { 275 static bool key_generated; 276 static uint8_t default_key[ENA_HASH_KEY_SIZE]; 277 size_t i; 278 279 RTE_ASSERT(size <= ENA_HASH_KEY_SIZE); 280 281 if (!key_generated) { 282 for (i = 0; i < ENA_HASH_KEY_SIZE; ++i) 283 default_key[i] = rte_rand() & 0xff; 284 key_generated = true; 285 } 286 287 rte_memcpy(key, default_key, size); 288 } 289 290 static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf, 291 struct ena_com_rx_ctx *ena_rx_ctx) 292 { 293 uint64_t ol_flags = 0; 294 uint32_t packet_type = 0; 295 296 if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) 297 packet_type |= RTE_PTYPE_L4_TCP; 298 else if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP) 299 packet_type |= RTE_PTYPE_L4_UDP; 300 301 if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) 302 packet_type |= RTE_PTYPE_L3_IPV4; 303 else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6) 304 packet_type |= RTE_PTYPE_L3_IPV6; 305 306 if (!ena_rx_ctx->l4_csum_checked) 307 ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN; 308 else 309 if (unlikely(ena_rx_ctx->l4_csum_err) && !ena_rx_ctx->frag) 310 ol_flags |= PKT_RX_L4_CKSUM_BAD; 311 else 312 ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN; 313 314 if (unlikely(ena_rx_ctx->l3_csum_err)) 315 ol_flags |= PKT_RX_IP_CKSUM_BAD; 316 317 mbuf->ol_flags = ol_flags; 318 mbuf->packet_type = packet_type; 319 } 320 321 static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, 322 struct ena_com_tx_ctx *ena_tx_ctx, 323 uint64_t queue_offloads, 324 bool disable_meta_caching) 325 { 326 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; 327 328 if ((mbuf->ol_flags & MBUF_OFFLOADS) && 329 (queue_offloads & QUEUE_OFFLOADS)) { 330 /* check if TSO is required */ 331 if ((mbuf->ol_flags & PKT_TX_TCP_SEG) && 332 (queue_offloads & DEV_TX_OFFLOAD_TCP_TSO)) { 333 ena_tx_ctx->tso_enable = true; 334 335 ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf); 336 } 337 338 /* check if L3 checksum is needed */ 339 if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) && 340 (queue_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)) 341 ena_tx_ctx->l3_csum_enable = true; 342 343 if (mbuf->ol_flags & PKT_TX_IPV6) { 344 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6; 345 } else { 346 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4; 347 348 /* set don't fragment (DF) flag */ 349 if (mbuf->packet_type & 350 (RTE_PTYPE_L4_NONFRAG 351 | RTE_PTYPE_INNER_L4_NONFRAG)) 352 ena_tx_ctx->df = true; 353 } 354 355 /* check if L4 checksum is needed */ 356 if (((mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) && 357 (queue_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) { 358 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP; 359 ena_tx_ctx->l4_csum_enable = true; 360 } else if (((mbuf->ol_flags & PKT_TX_L4_MASK) == 361 PKT_TX_UDP_CKSUM) && 362 (queue_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) { 363 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP; 364 ena_tx_ctx->l4_csum_enable = true; 365 } else { 366 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN; 367 ena_tx_ctx->l4_csum_enable = false; 368 } 369 370 ena_meta->mss = mbuf->tso_segsz; 371 ena_meta->l3_hdr_len = mbuf->l3_len; 372 ena_meta->l3_hdr_offset = mbuf->l2_len; 373 374 ena_tx_ctx->meta_valid = true; 375 } else if (disable_meta_caching) { 376 memset(ena_meta, 0, sizeof(*ena_meta)); 377 ena_tx_ctx->meta_valid = true; 378 } else { 379 ena_tx_ctx->meta_valid = false; 380 } 381 } 382 383 static inline int validate_rx_req_id(struct ena_ring *rx_ring, uint16_t req_id) 384 { 385 if (likely(req_id < rx_ring->ring_size)) 386 return 0; 387 388 PMD_DRV_LOG(ERR, "Invalid rx req_id: %hu\n", req_id); 389 390 rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID; 391 rx_ring->adapter->trigger_reset = true; 392 ++rx_ring->rx_stats.bad_req_id; 393 394 return -EFAULT; 395 } 396 397 static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) 398 { 399 struct ena_tx_buffer *tx_info = NULL; 400 401 if (likely(req_id < tx_ring->ring_size)) { 402 tx_info = &tx_ring->tx_buffer_info[req_id]; 403 if (likely(tx_info->mbuf)) 404 return 0; 405 } 406 407 if (tx_info) 408 PMD_DRV_LOG(ERR, "tx_info doesn't have valid mbuf\n"); 409 else 410 PMD_DRV_LOG(ERR, "Invalid req_id: %hu\n", req_id); 411 412 /* Trigger device reset */ 413 ++tx_ring->tx_stats.bad_req_id; 414 tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID; 415 tx_ring->adapter->trigger_reset = true; 416 return -EFAULT; 417 } 418 419 static void ena_config_host_info(struct ena_com_dev *ena_dev) 420 { 421 struct ena_admin_host_info *host_info; 422 int rc; 423 424 /* Allocate only the host info */ 425 rc = ena_com_allocate_host_info(ena_dev); 426 if (rc) { 427 PMD_DRV_LOG(ERR, "Cannot allocate host info\n"); 428 return; 429 } 430 431 host_info = ena_dev->host_attr.host_info; 432 433 host_info->os_type = ENA_ADMIN_OS_DPDK; 434 host_info->kernel_ver = RTE_VERSION; 435 strlcpy((char *)host_info->kernel_ver_str, rte_version(), 436 sizeof(host_info->kernel_ver_str)); 437 host_info->os_dist = RTE_VERSION; 438 strlcpy((char *)host_info->os_dist_str, rte_version(), 439 sizeof(host_info->os_dist_str)); 440 host_info->driver_version = 441 (DRV_MODULE_VER_MAJOR) | 442 (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | 443 (DRV_MODULE_VER_SUBMINOR << 444 ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); 445 host_info->num_cpus = rte_lcore_count(); 446 447 host_info->driver_supported_features = 448 ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK; 449 450 rc = ena_com_set_host_attributes(ena_dev); 451 if (rc) { 452 if (rc == -ENA_COM_UNSUPPORTED) 453 PMD_DRV_LOG(WARNING, "Cannot set host attributes\n"); 454 else 455 PMD_DRV_LOG(ERR, "Cannot set host attributes\n"); 456 457 goto err; 458 } 459 460 return; 461 462 err: 463 ena_com_delete_host_info(ena_dev); 464 } 465 466 /* This function calculates the number of xstats based on the current config */ 467 static unsigned int ena_xstats_calc_num(struct rte_eth_dev *dev) 468 { 469 return ENA_STATS_ARRAY_GLOBAL + 470 (dev->data->nb_tx_queues * ENA_STATS_ARRAY_TX) + 471 (dev->data->nb_rx_queues * ENA_STATS_ARRAY_RX); 472 } 473 474 static void ena_config_debug_area(struct ena_adapter *adapter) 475 { 476 u32 debug_area_size; 477 int rc, ss_count; 478 479 ss_count = ena_xstats_calc_num(adapter->rte_dev); 480 481 /* allocate 32 bytes for each string and 64bit for the value */ 482 debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count; 483 484 rc = ena_com_allocate_debug_area(&adapter->ena_dev, debug_area_size); 485 if (rc) { 486 PMD_DRV_LOG(ERR, "Cannot allocate debug area\n"); 487 return; 488 } 489 490 rc = ena_com_set_host_attributes(&adapter->ena_dev); 491 if (rc) { 492 if (rc == -ENA_COM_UNSUPPORTED) 493 PMD_DRV_LOG(WARNING, "Cannot set host attributes\n"); 494 else 495 PMD_DRV_LOG(ERR, "Cannot set host attributes\n"); 496 497 goto err; 498 } 499 500 return; 501 err: 502 ena_com_delete_debug_area(&adapter->ena_dev); 503 } 504 505 static void ena_close(struct rte_eth_dev *dev) 506 { 507 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 508 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 509 struct ena_adapter *adapter = dev->data->dev_private; 510 511 if (adapter->state == ENA_ADAPTER_STATE_RUNNING) 512 ena_stop(dev); 513 adapter->state = ENA_ADAPTER_STATE_CLOSED; 514 515 ena_rx_queue_release_all(dev); 516 ena_tx_queue_release_all(dev); 517 518 rte_free(adapter->drv_stats); 519 adapter->drv_stats = NULL; 520 521 rte_intr_disable(intr_handle); 522 rte_intr_callback_unregister(intr_handle, 523 ena_interrupt_handler_rte, 524 adapter); 525 526 /* 527 * MAC is not allocated dynamically. Setting NULL should prevent from 528 * release of the resource in the rte_eth_dev_release_port(). 529 */ 530 dev->data->mac_addrs = NULL; 531 } 532 533 static int 534 ena_dev_reset(struct rte_eth_dev *dev) 535 { 536 int rc = 0; 537 538 ena_destroy_device(dev); 539 rc = eth_ena_dev_init(dev); 540 if (rc) 541 PMD_INIT_LOG(CRIT, "Cannot initialize device"); 542 543 return rc; 544 } 545 546 static int ena_rss_reta_update(struct rte_eth_dev *dev, 547 struct rte_eth_rss_reta_entry64 *reta_conf, 548 uint16_t reta_size) 549 { 550 struct ena_adapter *adapter = dev->data->dev_private; 551 struct ena_com_dev *ena_dev = &adapter->ena_dev; 552 int rc, i; 553 u16 entry_value; 554 int conf_idx; 555 int idx; 556 557 if ((reta_size == 0) || (reta_conf == NULL)) 558 return -EINVAL; 559 560 if (reta_size > ENA_RX_RSS_TABLE_SIZE) { 561 PMD_DRV_LOG(WARNING, 562 "indirection table %d is bigger than supported (%d)\n", 563 reta_size, ENA_RX_RSS_TABLE_SIZE); 564 return -EINVAL; 565 } 566 567 for (i = 0 ; i < reta_size ; i++) { 568 /* each reta_conf is for 64 entries. 569 * to support 128 we use 2 conf of 64 570 */ 571 conf_idx = i / RTE_RETA_GROUP_SIZE; 572 idx = i % RTE_RETA_GROUP_SIZE; 573 if (TEST_BIT(reta_conf[conf_idx].mask, idx)) { 574 entry_value = 575 ENA_IO_RXQ_IDX(reta_conf[conf_idx].reta[idx]); 576 577 rc = ena_com_indirect_table_fill_entry(ena_dev, 578 i, 579 entry_value); 580 if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) { 581 PMD_DRV_LOG(ERR, 582 "Cannot fill indirect table\n"); 583 return rc; 584 } 585 } 586 } 587 588 rc = ena_com_indirect_table_set(ena_dev); 589 if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) { 590 PMD_DRV_LOG(ERR, "Cannot flush the indirect table\n"); 591 return rc; 592 } 593 594 PMD_DRV_LOG(DEBUG, "%s(): RSS configured %d entries for port %d\n", 595 __func__, reta_size, adapter->rte_dev->data->port_id); 596 597 return 0; 598 } 599 600 /* Query redirection table. */ 601 static int ena_rss_reta_query(struct rte_eth_dev *dev, 602 struct rte_eth_rss_reta_entry64 *reta_conf, 603 uint16_t reta_size) 604 { 605 struct ena_adapter *adapter = dev->data->dev_private; 606 struct ena_com_dev *ena_dev = &adapter->ena_dev; 607 int rc; 608 int i; 609 u32 indirect_table[ENA_RX_RSS_TABLE_SIZE] = {0}; 610 int reta_conf_idx; 611 int reta_idx; 612 613 if (reta_size == 0 || reta_conf == NULL || 614 (reta_size > RTE_RETA_GROUP_SIZE && ((reta_conf + 1) == NULL))) 615 return -EINVAL; 616 617 rc = ena_com_indirect_table_get(ena_dev, indirect_table); 618 if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) { 619 PMD_DRV_LOG(ERR, "cannot get indirect table\n"); 620 return -ENOTSUP; 621 } 622 623 for (i = 0 ; i < reta_size ; i++) { 624 reta_conf_idx = i / RTE_RETA_GROUP_SIZE; 625 reta_idx = i % RTE_RETA_GROUP_SIZE; 626 if (TEST_BIT(reta_conf[reta_conf_idx].mask, reta_idx)) 627 reta_conf[reta_conf_idx].reta[reta_idx] = 628 ENA_IO_RXQ_IDX_REV(indirect_table[i]); 629 } 630 631 return 0; 632 } 633 634 static int ena_rss_init_default(struct ena_adapter *adapter) 635 { 636 struct ena_com_dev *ena_dev = &adapter->ena_dev; 637 uint16_t nb_rx_queues = adapter->rte_dev->data->nb_rx_queues; 638 int rc, i; 639 u32 val; 640 641 rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE); 642 if (unlikely(rc)) { 643 PMD_DRV_LOG(ERR, "Cannot init indirect table\n"); 644 goto err_rss_init; 645 } 646 647 for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) { 648 val = i % nb_rx_queues; 649 rc = ena_com_indirect_table_fill_entry(ena_dev, i, 650 ENA_IO_RXQ_IDX(val)); 651 if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { 652 PMD_DRV_LOG(ERR, "Cannot fill indirect table\n"); 653 goto err_fill_indir; 654 } 655 } 656 657 rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL, 658 ENA_HASH_KEY_SIZE, 0xFFFFFFFF); 659 if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { 660 PMD_DRV_LOG(INFO, "Cannot fill hash function\n"); 661 goto err_fill_indir; 662 } 663 664 rc = ena_com_set_default_hash_ctrl(ena_dev); 665 if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { 666 PMD_DRV_LOG(INFO, "Cannot fill hash control\n"); 667 goto err_fill_indir; 668 } 669 670 rc = ena_com_indirect_table_set(ena_dev); 671 if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { 672 PMD_DRV_LOG(ERR, "Cannot flush the indirect table\n"); 673 goto err_fill_indir; 674 } 675 PMD_DRV_LOG(DEBUG, "RSS configured for port %d\n", 676 adapter->rte_dev->data->port_id); 677 678 return 0; 679 680 err_fill_indir: 681 ena_com_rss_destroy(ena_dev); 682 err_rss_init: 683 684 return rc; 685 } 686 687 static void ena_rx_queue_release_all(struct rte_eth_dev *dev) 688 { 689 struct ena_ring **queues = (struct ena_ring **)dev->data->rx_queues; 690 int nb_queues = dev->data->nb_rx_queues; 691 int i; 692 693 for (i = 0; i < nb_queues; i++) 694 ena_rx_queue_release(queues[i]); 695 } 696 697 static void ena_tx_queue_release_all(struct rte_eth_dev *dev) 698 { 699 struct ena_ring **queues = (struct ena_ring **)dev->data->tx_queues; 700 int nb_queues = dev->data->nb_tx_queues; 701 int i; 702 703 for (i = 0; i < nb_queues; i++) 704 ena_tx_queue_release(queues[i]); 705 } 706 707 static void ena_rx_queue_release(void *queue) 708 { 709 struct ena_ring *ring = (struct ena_ring *)queue; 710 711 /* Free ring resources */ 712 if (ring->rx_buffer_info) 713 rte_free(ring->rx_buffer_info); 714 ring->rx_buffer_info = NULL; 715 716 if (ring->rx_refill_buffer) 717 rte_free(ring->rx_refill_buffer); 718 ring->rx_refill_buffer = NULL; 719 720 if (ring->empty_rx_reqs) 721 rte_free(ring->empty_rx_reqs); 722 ring->empty_rx_reqs = NULL; 723 724 ring->configured = 0; 725 726 PMD_DRV_LOG(NOTICE, "RX Queue %d:%d released\n", 727 ring->port_id, ring->id); 728 } 729 730 static void ena_tx_queue_release(void *queue) 731 { 732 struct ena_ring *ring = (struct ena_ring *)queue; 733 734 /* Free ring resources */ 735 if (ring->push_buf_intermediate_buf) 736 rte_free(ring->push_buf_intermediate_buf); 737 738 if (ring->tx_buffer_info) 739 rte_free(ring->tx_buffer_info); 740 741 if (ring->empty_tx_reqs) 742 rte_free(ring->empty_tx_reqs); 743 744 ring->empty_tx_reqs = NULL; 745 ring->tx_buffer_info = NULL; 746 ring->push_buf_intermediate_buf = NULL; 747 748 ring->configured = 0; 749 750 PMD_DRV_LOG(NOTICE, "TX Queue %d:%d released\n", 751 ring->port_id, ring->id); 752 } 753 754 static void ena_rx_queue_release_bufs(struct ena_ring *ring) 755 { 756 unsigned int i; 757 758 for (i = 0; i < ring->ring_size; ++i) { 759 struct ena_rx_buffer *rx_info = &ring->rx_buffer_info[i]; 760 if (rx_info->mbuf) { 761 rte_mbuf_raw_free(rx_info->mbuf); 762 rx_info->mbuf = NULL; 763 } 764 } 765 } 766 767 static void ena_tx_queue_release_bufs(struct ena_ring *ring) 768 { 769 unsigned int i; 770 771 for (i = 0; i < ring->ring_size; ++i) { 772 struct ena_tx_buffer *tx_buf = &ring->tx_buffer_info[i]; 773 774 if (tx_buf->mbuf) 775 rte_pktmbuf_free(tx_buf->mbuf); 776 } 777 } 778 779 static int ena_link_update(struct rte_eth_dev *dev, 780 __rte_unused int wait_to_complete) 781 { 782 struct rte_eth_link *link = &dev->data->dev_link; 783 struct ena_adapter *adapter = dev->data->dev_private; 784 785 link->link_status = adapter->link_status ? ETH_LINK_UP : ETH_LINK_DOWN; 786 link->link_speed = ETH_SPEED_NUM_NONE; 787 link->link_duplex = ETH_LINK_FULL_DUPLEX; 788 789 return 0; 790 } 791 792 static int ena_queue_start_all(struct rte_eth_dev *dev, 793 enum ena_ring_type ring_type) 794 { 795 struct ena_adapter *adapter = dev->data->dev_private; 796 struct ena_ring *queues = NULL; 797 int nb_queues; 798 int i = 0; 799 int rc = 0; 800 801 if (ring_type == ENA_RING_TYPE_RX) { 802 queues = adapter->rx_ring; 803 nb_queues = dev->data->nb_rx_queues; 804 } else { 805 queues = adapter->tx_ring; 806 nb_queues = dev->data->nb_tx_queues; 807 } 808 for (i = 0; i < nb_queues; i++) { 809 if (queues[i].configured) { 810 if (ring_type == ENA_RING_TYPE_RX) { 811 ena_assert_msg( 812 dev->data->rx_queues[i] == &queues[i], 813 "Inconsistent state of rx queues\n"); 814 } else { 815 ena_assert_msg( 816 dev->data->tx_queues[i] == &queues[i], 817 "Inconsistent state of tx queues\n"); 818 } 819 820 rc = ena_queue_start(&queues[i]); 821 822 if (rc) { 823 PMD_INIT_LOG(ERR, 824 "failed to start queue %d type(%d)", 825 i, ring_type); 826 goto err; 827 } 828 } 829 } 830 831 return 0; 832 833 err: 834 while (i--) 835 if (queues[i].configured) 836 ena_queue_stop(&queues[i]); 837 838 return rc; 839 } 840 841 static uint32_t ena_get_mtu_conf(struct ena_adapter *adapter) 842 { 843 uint32_t max_frame_len = adapter->max_mtu; 844 845 if (adapter->rte_eth_dev_data->dev_conf.rxmode.offloads & 846 DEV_RX_OFFLOAD_JUMBO_FRAME) 847 max_frame_len = 848 adapter->rte_eth_dev_data->dev_conf.rxmode.max_rx_pkt_len; 849 850 return max_frame_len; 851 } 852 853 static int ena_check_valid_conf(struct ena_adapter *adapter) 854 { 855 uint32_t max_frame_len = ena_get_mtu_conf(adapter); 856 857 if (max_frame_len > adapter->max_mtu || max_frame_len < ENA_MIN_MTU) { 858 PMD_INIT_LOG(ERR, "Unsupported MTU of %d. " 859 "max mtu: %d, min mtu: %d", 860 max_frame_len, adapter->max_mtu, ENA_MIN_MTU); 861 return ENA_COM_UNSUPPORTED; 862 } 863 864 return 0; 865 } 866 867 static int 868 ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx, 869 bool use_large_llq_hdr) 870 { 871 struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq; 872 struct ena_com_dev *ena_dev = ctx->ena_dev; 873 uint32_t max_tx_queue_size; 874 uint32_t max_rx_queue_size; 875 876 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 877 struct ena_admin_queue_ext_feature_fields *max_queue_ext = 878 &ctx->get_feat_ctx->max_queue_ext.max_queue_ext; 879 max_rx_queue_size = RTE_MIN(max_queue_ext->max_rx_cq_depth, 880 max_queue_ext->max_rx_sq_depth); 881 max_tx_queue_size = max_queue_ext->max_tx_cq_depth; 882 883 if (ena_dev->tx_mem_queue_type == 884 ENA_ADMIN_PLACEMENT_POLICY_DEV) { 885 max_tx_queue_size = RTE_MIN(max_tx_queue_size, 886 llq->max_llq_depth); 887 } else { 888 max_tx_queue_size = RTE_MIN(max_tx_queue_size, 889 max_queue_ext->max_tx_sq_depth); 890 } 891 892 ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 893 max_queue_ext->max_per_packet_rx_descs); 894 ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 895 max_queue_ext->max_per_packet_tx_descs); 896 } else { 897 struct ena_admin_queue_feature_desc *max_queues = 898 &ctx->get_feat_ctx->max_queues; 899 max_rx_queue_size = RTE_MIN(max_queues->max_cq_depth, 900 max_queues->max_sq_depth); 901 max_tx_queue_size = max_queues->max_cq_depth; 902 903 if (ena_dev->tx_mem_queue_type == 904 ENA_ADMIN_PLACEMENT_POLICY_DEV) { 905 max_tx_queue_size = RTE_MIN(max_tx_queue_size, 906 llq->max_llq_depth); 907 } else { 908 max_tx_queue_size = RTE_MIN(max_tx_queue_size, 909 max_queues->max_sq_depth); 910 } 911 912 ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 913 max_queues->max_packet_rx_descs); 914 ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 915 max_queues->max_packet_tx_descs); 916 } 917 918 /* Round down to the nearest power of 2 */ 919 max_rx_queue_size = rte_align32prevpow2(max_rx_queue_size); 920 max_tx_queue_size = rte_align32prevpow2(max_tx_queue_size); 921 922 if (use_large_llq_hdr) { 923 if ((llq->entry_size_ctrl_supported & 924 ENA_ADMIN_LIST_ENTRY_SIZE_256B) && 925 (ena_dev->tx_mem_queue_type == 926 ENA_ADMIN_PLACEMENT_POLICY_DEV)) { 927 max_tx_queue_size /= 2; 928 PMD_INIT_LOG(INFO, 929 "Forcing large headers and decreasing maximum TX queue size to %d\n", 930 max_tx_queue_size); 931 } else { 932 PMD_INIT_LOG(ERR, 933 "Forcing large headers failed: LLQ is disabled or device does not support large headers\n"); 934 } 935 } 936 937 if (unlikely(max_rx_queue_size == 0 || max_tx_queue_size == 0)) { 938 PMD_INIT_LOG(ERR, "Invalid queue size"); 939 return -EFAULT; 940 } 941 942 ctx->max_tx_queue_size = max_tx_queue_size; 943 ctx->max_rx_queue_size = max_rx_queue_size; 944 945 return 0; 946 } 947 948 static void ena_stats_restart(struct rte_eth_dev *dev) 949 { 950 struct ena_adapter *adapter = dev->data->dev_private; 951 952 rte_atomic64_init(&adapter->drv_stats->ierrors); 953 rte_atomic64_init(&adapter->drv_stats->oerrors); 954 rte_atomic64_init(&adapter->drv_stats->rx_nombuf); 955 adapter->drv_stats->rx_drops = 0; 956 } 957 958 static int ena_stats_get(struct rte_eth_dev *dev, 959 struct rte_eth_stats *stats) 960 { 961 struct ena_admin_basic_stats ena_stats; 962 struct ena_adapter *adapter = dev->data->dev_private; 963 struct ena_com_dev *ena_dev = &adapter->ena_dev; 964 int rc; 965 int i; 966 int max_rings_stats; 967 968 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 969 return -ENOTSUP; 970 971 memset(&ena_stats, 0, sizeof(ena_stats)); 972 rc = ena_com_get_dev_basic_stats(ena_dev, &ena_stats); 973 if (unlikely(rc)) { 974 PMD_DRV_LOG(ERR, "Could not retrieve statistics from ENA\n"); 975 return rc; 976 } 977 978 /* Set of basic statistics from ENA */ 979 stats->ipackets = __MERGE_64B_H_L(ena_stats.rx_pkts_high, 980 ena_stats.rx_pkts_low); 981 stats->opackets = __MERGE_64B_H_L(ena_stats.tx_pkts_high, 982 ena_stats.tx_pkts_low); 983 stats->ibytes = __MERGE_64B_H_L(ena_stats.rx_bytes_high, 984 ena_stats.rx_bytes_low); 985 stats->obytes = __MERGE_64B_H_L(ena_stats.tx_bytes_high, 986 ena_stats.tx_bytes_low); 987 988 /* Driver related stats */ 989 stats->imissed = adapter->drv_stats->rx_drops; 990 stats->ierrors = rte_atomic64_read(&adapter->drv_stats->ierrors); 991 stats->oerrors = rte_atomic64_read(&adapter->drv_stats->oerrors); 992 stats->rx_nombuf = rte_atomic64_read(&adapter->drv_stats->rx_nombuf); 993 994 max_rings_stats = RTE_MIN(dev->data->nb_rx_queues, 995 RTE_ETHDEV_QUEUE_STAT_CNTRS); 996 for (i = 0; i < max_rings_stats; ++i) { 997 struct ena_stats_rx *rx_stats = &adapter->rx_ring[i].rx_stats; 998 999 stats->q_ibytes[i] = rx_stats->bytes; 1000 stats->q_ipackets[i] = rx_stats->cnt; 1001 stats->q_errors[i] = rx_stats->bad_desc_num + 1002 rx_stats->bad_req_id; 1003 } 1004 1005 max_rings_stats = RTE_MIN(dev->data->nb_tx_queues, 1006 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1007 for (i = 0; i < max_rings_stats; ++i) { 1008 struct ena_stats_tx *tx_stats = &adapter->tx_ring[i].tx_stats; 1009 1010 stats->q_obytes[i] = tx_stats->bytes; 1011 stats->q_opackets[i] = tx_stats->cnt; 1012 } 1013 1014 return 0; 1015 } 1016 1017 static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 1018 { 1019 struct ena_adapter *adapter; 1020 struct ena_com_dev *ena_dev; 1021 int rc = 0; 1022 1023 ena_assert_msg(dev->data != NULL, "Uninitialized device\n"); 1024 ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n"); 1025 adapter = dev->data->dev_private; 1026 1027 ena_dev = &adapter->ena_dev; 1028 ena_assert_msg(ena_dev != NULL, "Uninitialized device\n"); 1029 1030 if (mtu > ena_get_mtu_conf(adapter) || mtu < ENA_MIN_MTU) { 1031 PMD_DRV_LOG(ERR, 1032 "Invalid MTU setting. new_mtu: %d " 1033 "max mtu: %d min mtu: %d\n", 1034 mtu, ena_get_mtu_conf(adapter), ENA_MIN_MTU); 1035 return -EINVAL; 1036 } 1037 1038 rc = ena_com_set_dev_mtu(ena_dev, mtu); 1039 if (rc) 1040 PMD_DRV_LOG(ERR, "Could not set MTU: %d\n", mtu); 1041 else 1042 PMD_DRV_LOG(NOTICE, "Set MTU: %d\n", mtu); 1043 1044 return rc; 1045 } 1046 1047 static int ena_start(struct rte_eth_dev *dev) 1048 { 1049 struct ena_adapter *adapter = dev->data->dev_private; 1050 uint64_t ticks; 1051 int rc = 0; 1052 1053 rc = ena_check_valid_conf(adapter); 1054 if (rc) 1055 return rc; 1056 1057 rc = ena_queue_start_all(dev, ENA_RING_TYPE_RX); 1058 if (rc) 1059 return rc; 1060 1061 rc = ena_queue_start_all(dev, ENA_RING_TYPE_TX); 1062 if (rc) 1063 goto err_start_tx; 1064 1065 if (adapter->rte_dev->data->dev_conf.rxmode.mq_mode & 1066 ETH_MQ_RX_RSS_FLAG && adapter->rte_dev->data->nb_rx_queues > 0) { 1067 rc = ena_rss_init_default(adapter); 1068 if (rc) 1069 goto err_rss_init; 1070 } 1071 1072 ena_stats_restart(dev); 1073 1074 adapter->timestamp_wd = rte_get_timer_cycles(); 1075 adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT; 1076 1077 ticks = rte_get_timer_hz(); 1078 rte_timer_reset(&adapter->timer_wd, ticks, PERIODICAL, rte_lcore_id(), 1079 ena_timer_wd_callback, adapter); 1080 1081 ++adapter->dev_stats.dev_start; 1082 adapter->state = ENA_ADAPTER_STATE_RUNNING; 1083 1084 return 0; 1085 1086 err_rss_init: 1087 ena_queue_stop_all(dev, ENA_RING_TYPE_TX); 1088 err_start_tx: 1089 ena_queue_stop_all(dev, ENA_RING_TYPE_RX); 1090 return rc; 1091 } 1092 1093 static void ena_stop(struct rte_eth_dev *dev) 1094 { 1095 struct ena_adapter *adapter = dev->data->dev_private; 1096 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1097 int rc; 1098 1099 rte_timer_stop_sync(&adapter->timer_wd); 1100 ena_queue_stop_all(dev, ENA_RING_TYPE_TX); 1101 ena_queue_stop_all(dev, ENA_RING_TYPE_RX); 1102 1103 if (adapter->trigger_reset) { 1104 rc = ena_com_dev_reset(ena_dev, adapter->reset_reason); 1105 if (rc) 1106 PMD_DRV_LOG(ERR, "Device reset failed rc=%d\n", rc); 1107 } 1108 1109 ++adapter->dev_stats.dev_stop; 1110 adapter->state = ENA_ADAPTER_STATE_STOPPED; 1111 } 1112 1113 static int ena_create_io_queue(struct ena_ring *ring) 1114 { 1115 struct ena_adapter *adapter; 1116 struct ena_com_dev *ena_dev; 1117 struct ena_com_create_io_ctx ctx = 1118 /* policy set to _HOST just to satisfy icc compiler */ 1119 { ENA_ADMIN_PLACEMENT_POLICY_HOST, 1120 0, 0, 0, 0, 0 }; 1121 uint16_t ena_qid; 1122 unsigned int i; 1123 int rc; 1124 1125 adapter = ring->adapter; 1126 ena_dev = &adapter->ena_dev; 1127 1128 if (ring->type == ENA_RING_TYPE_TX) { 1129 ena_qid = ENA_IO_TXQ_IDX(ring->id); 1130 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; 1131 ctx.mem_queue_type = ena_dev->tx_mem_queue_type; 1132 for (i = 0; i < ring->ring_size; i++) 1133 ring->empty_tx_reqs[i] = i; 1134 } else { 1135 ena_qid = ENA_IO_RXQ_IDX(ring->id); 1136 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; 1137 for (i = 0; i < ring->ring_size; i++) 1138 ring->empty_rx_reqs[i] = i; 1139 } 1140 ctx.queue_size = ring->ring_size; 1141 ctx.qid = ena_qid; 1142 ctx.msix_vector = -1; /* interrupts not used */ 1143 ctx.numa_node = ring->numa_socket_id; 1144 1145 rc = ena_com_create_io_queue(ena_dev, &ctx); 1146 if (rc) { 1147 PMD_DRV_LOG(ERR, 1148 "failed to create io queue #%d (qid:%d) rc: %d\n", 1149 ring->id, ena_qid, rc); 1150 return rc; 1151 } 1152 1153 rc = ena_com_get_io_handlers(ena_dev, ena_qid, 1154 &ring->ena_com_io_sq, 1155 &ring->ena_com_io_cq); 1156 if (rc) { 1157 PMD_DRV_LOG(ERR, 1158 "Failed to get io queue handlers. queue num %d rc: %d\n", 1159 ring->id, rc); 1160 ena_com_destroy_io_queue(ena_dev, ena_qid); 1161 return rc; 1162 } 1163 1164 if (ring->type == ENA_RING_TYPE_TX) 1165 ena_com_update_numa_node(ring->ena_com_io_cq, ctx.numa_node); 1166 1167 return 0; 1168 } 1169 1170 static void ena_queue_stop(struct ena_ring *ring) 1171 { 1172 struct ena_com_dev *ena_dev = &ring->adapter->ena_dev; 1173 1174 if (ring->type == ENA_RING_TYPE_RX) { 1175 ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(ring->id)); 1176 ena_rx_queue_release_bufs(ring); 1177 } else { 1178 ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(ring->id)); 1179 ena_tx_queue_release_bufs(ring); 1180 } 1181 } 1182 1183 static void ena_queue_stop_all(struct rte_eth_dev *dev, 1184 enum ena_ring_type ring_type) 1185 { 1186 struct ena_adapter *adapter = dev->data->dev_private; 1187 struct ena_ring *queues = NULL; 1188 uint16_t nb_queues, i; 1189 1190 if (ring_type == ENA_RING_TYPE_RX) { 1191 queues = adapter->rx_ring; 1192 nb_queues = dev->data->nb_rx_queues; 1193 } else { 1194 queues = adapter->tx_ring; 1195 nb_queues = dev->data->nb_tx_queues; 1196 } 1197 1198 for (i = 0; i < nb_queues; ++i) 1199 if (queues[i].configured) 1200 ena_queue_stop(&queues[i]); 1201 } 1202 1203 static int ena_queue_start(struct ena_ring *ring) 1204 { 1205 int rc, bufs_num; 1206 1207 ena_assert_msg(ring->configured == 1, 1208 "Trying to start unconfigured queue\n"); 1209 1210 rc = ena_create_io_queue(ring); 1211 if (rc) { 1212 PMD_INIT_LOG(ERR, "Failed to create IO queue!"); 1213 return rc; 1214 } 1215 1216 ring->next_to_clean = 0; 1217 ring->next_to_use = 0; 1218 1219 if (ring->type == ENA_RING_TYPE_TX) { 1220 ring->tx_stats.available_desc = 1221 ena_com_free_q_entries(ring->ena_com_io_sq); 1222 return 0; 1223 } 1224 1225 bufs_num = ring->ring_size - 1; 1226 rc = ena_populate_rx_queue(ring, bufs_num); 1227 if (rc != bufs_num) { 1228 ena_com_destroy_io_queue(&ring->adapter->ena_dev, 1229 ENA_IO_RXQ_IDX(ring->id)); 1230 PMD_INIT_LOG(ERR, "Failed to populate rx ring !"); 1231 return ENA_COM_FAULT; 1232 } 1233 1234 return 0; 1235 } 1236 1237 static int ena_tx_queue_setup(struct rte_eth_dev *dev, 1238 uint16_t queue_idx, 1239 uint16_t nb_desc, 1240 unsigned int socket_id, 1241 const struct rte_eth_txconf *tx_conf) 1242 { 1243 struct ena_ring *txq = NULL; 1244 struct ena_adapter *adapter = dev->data->dev_private; 1245 unsigned int i; 1246 1247 txq = &adapter->tx_ring[queue_idx]; 1248 1249 if (txq->configured) { 1250 PMD_DRV_LOG(CRIT, 1251 "API violation. Queue %d is already configured\n", 1252 queue_idx); 1253 return ENA_COM_FAULT; 1254 } 1255 1256 if (!rte_is_power_of_2(nb_desc)) { 1257 PMD_DRV_LOG(ERR, 1258 "Unsupported size of TX queue: %d is not a power of 2.\n", 1259 nb_desc); 1260 return -EINVAL; 1261 } 1262 1263 if (nb_desc > adapter->max_tx_ring_size) { 1264 PMD_DRV_LOG(ERR, 1265 "Unsupported size of TX queue (max size: %d)\n", 1266 adapter->max_tx_ring_size); 1267 return -EINVAL; 1268 } 1269 1270 if (nb_desc == RTE_ETH_DEV_FALLBACK_TX_RINGSIZE) 1271 nb_desc = adapter->max_tx_ring_size; 1272 1273 txq->port_id = dev->data->port_id; 1274 txq->next_to_clean = 0; 1275 txq->next_to_use = 0; 1276 txq->ring_size = nb_desc; 1277 txq->numa_socket_id = socket_id; 1278 1279 txq->tx_buffer_info = rte_zmalloc("txq->tx_buffer_info", 1280 sizeof(struct ena_tx_buffer) * 1281 txq->ring_size, 1282 RTE_CACHE_LINE_SIZE); 1283 if (!txq->tx_buffer_info) { 1284 PMD_DRV_LOG(ERR, "failed to alloc mem for tx buffer info\n"); 1285 return -ENOMEM; 1286 } 1287 1288 txq->empty_tx_reqs = rte_zmalloc("txq->empty_tx_reqs", 1289 sizeof(u16) * txq->ring_size, 1290 RTE_CACHE_LINE_SIZE); 1291 if (!txq->empty_tx_reqs) { 1292 PMD_DRV_LOG(ERR, "failed to alloc mem for tx reqs\n"); 1293 rte_free(txq->tx_buffer_info); 1294 return -ENOMEM; 1295 } 1296 1297 txq->push_buf_intermediate_buf = 1298 rte_zmalloc("txq->push_buf_intermediate_buf", 1299 txq->tx_max_header_size, 1300 RTE_CACHE_LINE_SIZE); 1301 if (!txq->push_buf_intermediate_buf) { 1302 PMD_DRV_LOG(ERR, "failed to alloc push buff for LLQ\n"); 1303 rte_free(txq->tx_buffer_info); 1304 rte_free(txq->empty_tx_reqs); 1305 return -ENOMEM; 1306 } 1307 1308 for (i = 0; i < txq->ring_size; i++) 1309 txq->empty_tx_reqs[i] = i; 1310 1311 if (tx_conf != NULL) { 1312 txq->offloads = 1313 tx_conf->offloads | dev->data->dev_conf.txmode.offloads; 1314 } 1315 /* Store pointer to this queue in upper layer */ 1316 txq->configured = 1; 1317 dev->data->tx_queues[queue_idx] = txq; 1318 1319 return 0; 1320 } 1321 1322 static int ena_rx_queue_setup(struct rte_eth_dev *dev, 1323 uint16_t queue_idx, 1324 uint16_t nb_desc, 1325 unsigned int socket_id, 1326 __rte_unused const struct rte_eth_rxconf *rx_conf, 1327 struct rte_mempool *mp) 1328 { 1329 struct ena_adapter *adapter = dev->data->dev_private; 1330 struct ena_ring *rxq = NULL; 1331 size_t buffer_size; 1332 int i; 1333 1334 rxq = &adapter->rx_ring[queue_idx]; 1335 if (rxq->configured) { 1336 PMD_DRV_LOG(CRIT, 1337 "API violation. Queue %d is already configured\n", 1338 queue_idx); 1339 return ENA_COM_FAULT; 1340 } 1341 1342 if (nb_desc == RTE_ETH_DEV_FALLBACK_RX_RINGSIZE) 1343 nb_desc = adapter->max_rx_ring_size; 1344 1345 if (!rte_is_power_of_2(nb_desc)) { 1346 PMD_DRV_LOG(ERR, 1347 "Unsupported size of RX queue: %d is not a power of 2.\n", 1348 nb_desc); 1349 return -EINVAL; 1350 } 1351 1352 if (nb_desc > adapter->max_rx_ring_size) { 1353 PMD_DRV_LOG(ERR, 1354 "Unsupported size of RX queue (max size: %d)\n", 1355 adapter->max_rx_ring_size); 1356 return -EINVAL; 1357 } 1358 1359 /* ENA isn't supporting buffers smaller than 1400 bytes */ 1360 buffer_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM; 1361 if (buffer_size < ENA_RX_BUF_MIN_SIZE) { 1362 PMD_DRV_LOG(ERR, 1363 "Unsupported size of RX buffer: %zu (min size: %d)\n", 1364 buffer_size, ENA_RX_BUF_MIN_SIZE); 1365 return -EINVAL; 1366 } 1367 1368 rxq->port_id = dev->data->port_id; 1369 rxq->next_to_clean = 0; 1370 rxq->next_to_use = 0; 1371 rxq->ring_size = nb_desc; 1372 rxq->numa_socket_id = socket_id; 1373 rxq->mb_pool = mp; 1374 1375 rxq->rx_buffer_info = rte_zmalloc("rxq->buffer_info", 1376 sizeof(struct ena_rx_buffer) * nb_desc, 1377 RTE_CACHE_LINE_SIZE); 1378 if (!rxq->rx_buffer_info) { 1379 PMD_DRV_LOG(ERR, "failed to alloc mem for rx buffer info\n"); 1380 return -ENOMEM; 1381 } 1382 1383 rxq->rx_refill_buffer = rte_zmalloc("rxq->rx_refill_buffer", 1384 sizeof(struct rte_mbuf *) * nb_desc, 1385 RTE_CACHE_LINE_SIZE); 1386 1387 if (!rxq->rx_refill_buffer) { 1388 PMD_DRV_LOG(ERR, "failed to alloc mem for rx refill buffer\n"); 1389 rte_free(rxq->rx_buffer_info); 1390 rxq->rx_buffer_info = NULL; 1391 return -ENOMEM; 1392 } 1393 1394 rxq->empty_rx_reqs = rte_zmalloc("rxq->empty_rx_reqs", 1395 sizeof(uint16_t) * nb_desc, 1396 RTE_CACHE_LINE_SIZE); 1397 if (!rxq->empty_rx_reqs) { 1398 PMD_DRV_LOG(ERR, "failed to alloc mem for empty rx reqs\n"); 1399 rte_free(rxq->rx_buffer_info); 1400 rxq->rx_buffer_info = NULL; 1401 rte_free(rxq->rx_refill_buffer); 1402 rxq->rx_refill_buffer = NULL; 1403 return -ENOMEM; 1404 } 1405 1406 for (i = 0; i < nb_desc; i++) 1407 rxq->empty_rx_reqs[i] = i; 1408 1409 /* Store pointer to this queue in upper layer */ 1410 rxq->configured = 1; 1411 dev->data->rx_queues[queue_idx] = rxq; 1412 1413 return 0; 1414 } 1415 1416 static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) 1417 { 1418 unsigned int i; 1419 int rc; 1420 uint16_t ring_size = rxq->ring_size; 1421 uint16_t ring_mask = ring_size - 1; 1422 uint16_t next_to_use = rxq->next_to_use; 1423 uint16_t in_use, req_id; 1424 struct rte_mbuf **mbufs = rxq->rx_refill_buffer; 1425 1426 if (unlikely(!count)) 1427 return 0; 1428 1429 in_use = rxq->next_to_use - rxq->next_to_clean; 1430 ena_assert_msg(((in_use + count) < ring_size), "bad ring state\n"); 1431 1432 /* get resources for incoming packets */ 1433 rc = rte_mempool_get_bulk(rxq->mb_pool, (void **)mbufs, count); 1434 if (unlikely(rc < 0)) { 1435 rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf); 1436 ++rxq->rx_stats.mbuf_alloc_fail; 1437 PMD_RX_LOG(DEBUG, "there are no enough free buffers"); 1438 return 0; 1439 } 1440 1441 for (i = 0; i < count; i++) { 1442 uint16_t next_to_use_masked = next_to_use & ring_mask; 1443 struct rte_mbuf *mbuf = mbufs[i]; 1444 struct ena_com_buf ebuf; 1445 struct ena_rx_buffer *rx_info; 1446 1447 if (likely((i + 4) < count)) 1448 rte_prefetch0(mbufs[i + 4]); 1449 1450 req_id = rxq->empty_rx_reqs[next_to_use_masked]; 1451 rc = validate_rx_req_id(rxq, req_id); 1452 if (unlikely(rc)) 1453 break; 1454 1455 rx_info = &rxq->rx_buffer_info[req_id]; 1456 1457 /* prepare physical address for DMA transaction */ 1458 ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM; 1459 ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM; 1460 /* pass resource to device */ 1461 rc = ena_com_add_single_rx_desc(rxq->ena_com_io_sq, 1462 &ebuf, req_id); 1463 if (unlikely(rc)) { 1464 PMD_DRV_LOG(WARNING, "failed adding rx desc\n"); 1465 break; 1466 } 1467 rx_info->mbuf = mbuf; 1468 next_to_use++; 1469 } 1470 1471 if (unlikely(i < count)) { 1472 PMD_DRV_LOG(WARNING, "refilled rx qid %d with only %d " 1473 "buffers (from %d)\n", rxq->id, i, count); 1474 rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbufs[i]), 1475 count - i); 1476 ++rxq->rx_stats.refill_partial; 1477 } 1478 1479 /* When we submitted free recources to device... */ 1480 if (likely(i > 0)) { 1481 /* ...let HW know that it can fill buffers with data. */ 1482 ena_com_write_sq_doorbell(rxq->ena_com_io_sq); 1483 1484 rxq->next_to_use = next_to_use; 1485 } 1486 1487 return i; 1488 } 1489 1490 static int ena_device_init(struct ena_com_dev *ena_dev, 1491 struct ena_com_dev_get_features_ctx *get_feat_ctx, 1492 bool *wd_state) 1493 { 1494 uint32_t aenq_groups; 1495 int rc; 1496 bool readless_supported; 1497 1498 /* Initialize mmio registers */ 1499 rc = ena_com_mmio_reg_read_request_init(ena_dev); 1500 if (rc) { 1501 PMD_DRV_LOG(ERR, "failed to init mmio read less\n"); 1502 return rc; 1503 } 1504 1505 /* The PCIe configuration space revision id indicate if mmio reg 1506 * read is disabled. 1507 */ 1508 readless_supported = 1509 !(((struct rte_pci_device *)ena_dev->dmadev)->id.class_id 1510 & ENA_MMIO_DISABLE_REG_READ); 1511 ena_com_set_mmio_read_mode(ena_dev, readless_supported); 1512 1513 /* reset device */ 1514 rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL); 1515 if (rc) { 1516 PMD_DRV_LOG(ERR, "cannot reset device\n"); 1517 goto err_mmio_read_less; 1518 } 1519 1520 /* check FW version */ 1521 rc = ena_com_validate_version(ena_dev); 1522 if (rc) { 1523 PMD_DRV_LOG(ERR, "device version is too low\n"); 1524 goto err_mmio_read_less; 1525 } 1526 1527 ena_dev->dma_addr_bits = ena_com_get_dma_width(ena_dev); 1528 1529 /* ENA device administration layer init */ 1530 rc = ena_com_admin_init(ena_dev, &aenq_handlers); 1531 if (rc) { 1532 PMD_DRV_LOG(ERR, 1533 "cannot initialize ena admin queue with device\n"); 1534 goto err_mmio_read_less; 1535 } 1536 1537 /* To enable the msix interrupts the driver needs to know the number 1538 * of queues. So the driver uses polling mode to retrieve this 1539 * information. 1540 */ 1541 ena_com_set_admin_polling_mode(ena_dev, true); 1542 1543 ena_config_host_info(ena_dev); 1544 1545 /* Get Device Attributes and features */ 1546 rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); 1547 if (rc) { 1548 PMD_DRV_LOG(ERR, 1549 "cannot get attribute for ena device rc= %d\n", rc); 1550 goto err_admin_init; 1551 } 1552 1553 aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | 1554 BIT(ENA_ADMIN_NOTIFICATION) | 1555 BIT(ENA_ADMIN_KEEP_ALIVE) | 1556 BIT(ENA_ADMIN_FATAL_ERROR) | 1557 BIT(ENA_ADMIN_WARNING); 1558 1559 aenq_groups &= get_feat_ctx->aenq.supported_groups; 1560 rc = ena_com_set_aenq_config(ena_dev, aenq_groups); 1561 if (rc) { 1562 PMD_DRV_LOG(ERR, "Cannot configure aenq groups rc: %d\n", rc); 1563 goto err_admin_init; 1564 } 1565 1566 *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); 1567 1568 return 0; 1569 1570 err_admin_init: 1571 ena_com_admin_destroy(ena_dev); 1572 1573 err_mmio_read_less: 1574 ena_com_mmio_reg_read_request_destroy(ena_dev); 1575 1576 return rc; 1577 } 1578 1579 static void ena_interrupt_handler_rte(void *cb_arg) 1580 { 1581 struct ena_adapter *adapter = cb_arg; 1582 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1583 1584 ena_com_admin_q_comp_intr_handler(ena_dev); 1585 if (likely(adapter->state != ENA_ADAPTER_STATE_CLOSED)) 1586 ena_com_aenq_intr_handler(ena_dev, adapter); 1587 } 1588 1589 static void check_for_missing_keep_alive(struct ena_adapter *adapter) 1590 { 1591 if (!adapter->wd_state) 1592 return; 1593 1594 if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) 1595 return; 1596 1597 if (unlikely((rte_get_timer_cycles() - adapter->timestamp_wd) >= 1598 adapter->keep_alive_timeout)) { 1599 PMD_DRV_LOG(ERR, "Keep alive timeout\n"); 1600 adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO; 1601 adapter->trigger_reset = true; 1602 ++adapter->dev_stats.wd_expired; 1603 } 1604 } 1605 1606 /* Check if admin queue is enabled */ 1607 static void check_for_admin_com_state(struct ena_adapter *adapter) 1608 { 1609 if (unlikely(!ena_com_get_admin_running_state(&adapter->ena_dev))) { 1610 PMD_DRV_LOG(ERR, "ENA admin queue is not in running state!\n"); 1611 adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO; 1612 adapter->trigger_reset = true; 1613 } 1614 } 1615 1616 static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer, 1617 void *arg) 1618 { 1619 struct ena_adapter *adapter = arg; 1620 struct rte_eth_dev *dev = adapter->rte_dev; 1621 1622 check_for_missing_keep_alive(adapter); 1623 check_for_admin_com_state(adapter); 1624 1625 if (unlikely(adapter->trigger_reset)) { 1626 PMD_DRV_LOG(ERR, "Trigger reset is on\n"); 1627 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, 1628 NULL); 1629 } 1630 } 1631 1632 static inline void 1633 set_default_llq_configurations(struct ena_llq_configurations *llq_config, 1634 struct ena_admin_feature_llq_desc *llq, 1635 bool use_large_llq_hdr) 1636 { 1637 llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; 1638 llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; 1639 llq_config->llq_num_decs_before_header = 1640 ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; 1641 1642 if (use_large_llq_hdr && 1643 (llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B)) { 1644 llq_config->llq_ring_entry_size = 1645 ENA_ADMIN_LIST_ENTRY_SIZE_256B; 1646 llq_config->llq_ring_entry_size_value = 256; 1647 } else { 1648 llq_config->llq_ring_entry_size = 1649 ENA_ADMIN_LIST_ENTRY_SIZE_128B; 1650 llq_config->llq_ring_entry_size_value = 128; 1651 } 1652 } 1653 1654 static int 1655 ena_set_queues_placement_policy(struct ena_adapter *adapter, 1656 struct ena_com_dev *ena_dev, 1657 struct ena_admin_feature_llq_desc *llq, 1658 struct ena_llq_configurations *llq_default_configurations) 1659 { 1660 int rc; 1661 u32 llq_feature_mask; 1662 1663 llq_feature_mask = 1 << ENA_ADMIN_LLQ; 1664 if (!(ena_dev->supported_features & llq_feature_mask)) { 1665 PMD_DRV_LOG(INFO, 1666 "LLQ is not supported. Fallback to host mode policy.\n"); 1667 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 1668 return 0; 1669 } 1670 1671 rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); 1672 if (unlikely(rc)) { 1673 PMD_INIT_LOG(WARNING, "Failed to config dev mode. " 1674 "Fallback to host mode policy."); 1675 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 1676 return 0; 1677 } 1678 1679 /* Nothing to config, exit */ 1680 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 1681 return 0; 1682 1683 if (!adapter->dev_mem_base) { 1684 PMD_DRV_LOG(ERR, "Unable to access LLQ bar resource. " 1685 "Fallback to host mode policy.\n."); 1686 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 1687 return 0; 1688 } 1689 1690 ena_dev->mem_bar = adapter->dev_mem_base; 1691 1692 return 0; 1693 } 1694 1695 static uint32_t ena_calc_max_io_queue_num(struct ena_com_dev *ena_dev, 1696 struct ena_com_dev_get_features_ctx *get_feat_ctx) 1697 { 1698 uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues; 1699 1700 /* Regular queues capabilities */ 1701 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 1702 struct ena_admin_queue_ext_feature_fields *max_queue_ext = 1703 &get_feat_ctx->max_queue_ext.max_queue_ext; 1704 io_rx_num = RTE_MIN(max_queue_ext->max_rx_sq_num, 1705 max_queue_ext->max_rx_cq_num); 1706 io_tx_sq_num = max_queue_ext->max_tx_sq_num; 1707 io_tx_cq_num = max_queue_ext->max_tx_cq_num; 1708 } else { 1709 struct ena_admin_queue_feature_desc *max_queues = 1710 &get_feat_ctx->max_queues; 1711 io_tx_sq_num = max_queues->max_sq_num; 1712 io_tx_cq_num = max_queues->max_cq_num; 1713 io_rx_num = RTE_MIN(io_tx_sq_num, io_tx_cq_num); 1714 } 1715 1716 /* In case of LLQ use the llq number in the get feature cmd */ 1717 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) 1718 io_tx_sq_num = get_feat_ctx->llq.max_llq_num; 1719 1720 max_num_io_queues = RTE_MIN(ENA_MAX_NUM_IO_QUEUES, io_rx_num); 1721 max_num_io_queues = RTE_MIN(max_num_io_queues, io_tx_sq_num); 1722 max_num_io_queues = RTE_MIN(max_num_io_queues, io_tx_cq_num); 1723 1724 if (unlikely(max_num_io_queues == 0)) { 1725 PMD_DRV_LOG(ERR, "Number of IO queues should not be 0\n"); 1726 return -EFAULT; 1727 } 1728 1729 return max_num_io_queues; 1730 } 1731 1732 static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) 1733 { 1734 struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 }; 1735 struct rte_pci_device *pci_dev; 1736 struct rte_intr_handle *intr_handle; 1737 struct ena_adapter *adapter = eth_dev->data->dev_private; 1738 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1739 struct ena_com_dev_get_features_ctx get_feat_ctx; 1740 struct ena_llq_configurations llq_config; 1741 const char *queue_type_str; 1742 uint32_t max_num_io_queues; 1743 int rc; 1744 static int adapters_found; 1745 bool disable_meta_caching; 1746 bool wd_state; 1747 1748 eth_dev->dev_ops = &ena_dev_ops; 1749 eth_dev->rx_pkt_burst = ð_ena_recv_pkts; 1750 eth_dev->tx_pkt_burst = ð_ena_xmit_pkts; 1751 eth_dev->tx_pkt_prepare = ð_ena_prep_pkts; 1752 1753 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1754 return 0; 1755 1756 memset(adapter, 0, sizeof(struct ena_adapter)); 1757 ena_dev = &adapter->ena_dev; 1758 1759 adapter->rte_eth_dev_data = eth_dev->data; 1760 adapter->rte_dev = eth_dev; 1761 1762 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1763 adapter->pdev = pci_dev; 1764 1765 PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d", 1766 pci_dev->addr.domain, 1767 pci_dev->addr.bus, 1768 pci_dev->addr.devid, 1769 pci_dev->addr.function); 1770 1771 intr_handle = &pci_dev->intr_handle; 1772 1773 adapter->regs = pci_dev->mem_resource[ENA_REGS_BAR].addr; 1774 adapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr; 1775 1776 if (!adapter->regs) { 1777 PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)", 1778 ENA_REGS_BAR); 1779 return -ENXIO; 1780 } 1781 1782 ena_dev->reg_bar = adapter->regs; 1783 ena_dev->dmadev = adapter->pdev; 1784 1785 adapter->id_number = adapters_found; 1786 1787 snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", 1788 adapter->id_number); 1789 1790 rc = ena_parse_devargs(adapter, pci_dev->device.devargs); 1791 if (rc != 0) { 1792 PMD_INIT_LOG(CRIT, "Failed to parse devargs\n"); 1793 goto err; 1794 } 1795 1796 /* device specific initialization routine */ 1797 rc = ena_device_init(ena_dev, &get_feat_ctx, &wd_state); 1798 if (rc) { 1799 PMD_INIT_LOG(CRIT, "Failed to init ENA device"); 1800 goto err; 1801 } 1802 adapter->wd_state = wd_state; 1803 1804 set_default_llq_configurations(&llq_config, &get_feat_ctx.llq, 1805 adapter->use_large_llq_hdr); 1806 rc = ena_set_queues_placement_policy(adapter, ena_dev, 1807 &get_feat_ctx.llq, &llq_config); 1808 if (unlikely(rc)) { 1809 PMD_INIT_LOG(CRIT, "Failed to set placement policy"); 1810 return rc; 1811 } 1812 1813 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 1814 queue_type_str = "Regular"; 1815 else 1816 queue_type_str = "Low latency"; 1817 PMD_DRV_LOG(INFO, "Placement policy: %s\n", queue_type_str); 1818 1819 calc_queue_ctx.ena_dev = ena_dev; 1820 calc_queue_ctx.get_feat_ctx = &get_feat_ctx; 1821 1822 max_num_io_queues = ena_calc_max_io_queue_num(ena_dev, &get_feat_ctx); 1823 rc = ena_calc_io_queue_size(&calc_queue_ctx, 1824 adapter->use_large_llq_hdr); 1825 if (unlikely((rc != 0) || (max_num_io_queues == 0))) { 1826 rc = -EFAULT; 1827 goto err_device_destroy; 1828 } 1829 1830 adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size; 1831 adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size; 1832 adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size; 1833 adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size; 1834 adapter->max_num_io_queues = max_num_io_queues; 1835 1836 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 1837 disable_meta_caching = 1838 !!(get_feat_ctx.llq.accel_mode.u.get.supported_flags & 1839 BIT(ENA_ADMIN_DISABLE_META_CACHING)); 1840 } else { 1841 disable_meta_caching = false; 1842 } 1843 1844 /* prepare ring structures */ 1845 ena_init_rings(adapter, disable_meta_caching); 1846 1847 ena_config_debug_area(adapter); 1848 1849 /* Set max MTU for this device */ 1850 adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu; 1851 1852 /* set device support for offloads */ 1853 adapter->offloads.tso4_supported = (get_feat_ctx.offload.tx & 1854 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) != 0; 1855 adapter->offloads.tx_csum_supported = (get_feat_ctx.offload.tx & 1856 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) != 0; 1857 adapter->offloads.rx_csum_supported = 1858 (get_feat_ctx.offload.rx_supported & 1859 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) != 0; 1860 1861 /* Copy MAC address and point DPDK to it */ 1862 eth_dev->data->mac_addrs = (struct rte_ether_addr *)adapter->mac_addr; 1863 rte_ether_addr_copy((struct rte_ether_addr *) 1864 get_feat_ctx.dev_attr.mac_addr, 1865 (struct rte_ether_addr *)adapter->mac_addr); 1866 1867 /* 1868 * Pass the information to the rte_eth_dev_close() that it should also 1869 * release the private port resources. 1870 */ 1871 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; 1872 1873 adapter->drv_stats = rte_zmalloc("adapter stats", 1874 sizeof(*adapter->drv_stats), 1875 RTE_CACHE_LINE_SIZE); 1876 if (!adapter->drv_stats) { 1877 PMD_DRV_LOG(ERR, "failed to alloc mem for adapter stats\n"); 1878 rc = -ENOMEM; 1879 goto err_delete_debug_area; 1880 } 1881 1882 rte_intr_callback_register(intr_handle, 1883 ena_interrupt_handler_rte, 1884 adapter); 1885 rte_intr_enable(intr_handle); 1886 ena_com_set_admin_polling_mode(ena_dev, false); 1887 ena_com_admin_aenq_enable(ena_dev); 1888 1889 if (adapters_found == 0) 1890 rte_timer_subsystem_init(); 1891 rte_timer_init(&adapter->timer_wd); 1892 1893 adapters_found++; 1894 adapter->state = ENA_ADAPTER_STATE_INIT; 1895 1896 return 0; 1897 1898 err_delete_debug_area: 1899 ena_com_delete_debug_area(ena_dev); 1900 1901 err_device_destroy: 1902 ena_com_delete_host_info(ena_dev); 1903 ena_com_admin_destroy(ena_dev); 1904 1905 err: 1906 return rc; 1907 } 1908 1909 static void ena_destroy_device(struct rte_eth_dev *eth_dev) 1910 { 1911 struct ena_adapter *adapter = eth_dev->data->dev_private; 1912 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1913 1914 if (adapter->state == ENA_ADAPTER_STATE_FREE) 1915 return; 1916 1917 ena_com_set_admin_running_state(ena_dev, false); 1918 1919 if (adapter->state != ENA_ADAPTER_STATE_CLOSED) 1920 ena_close(eth_dev); 1921 1922 ena_com_delete_debug_area(ena_dev); 1923 ena_com_delete_host_info(ena_dev); 1924 1925 ena_com_abort_admin_commands(ena_dev); 1926 ena_com_wait_for_abort_completion(ena_dev); 1927 ena_com_admin_destroy(ena_dev); 1928 ena_com_mmio_reg_read_request_destroy(ena_dev); 1929 1930 adapter->state = ENA_ADAPTER_STATE_FREE; 1931 } 1932 1933 static int eth_ena_dev_uninit(struct rte_eth_dev *eth_dev) 1934 { 1935 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1936 return 0; 1937 1938 ena_destroy_device(eth_dev); 1939 1940 eth_dev->dev_ops = NULL; 1941 eth_dev->rx_pkt_burst = NULL; 1942 eth_dev->tx_pkt_burst = NULL; 1943 eth_dev->tx_pkt_prepare = NULL; 1944 1945 return 0; 1946 } 1947 1948 static int ena_dev_configure(struct rte_eth_dev *dev) 1949 { 1950 struct ena_adapter *adapter = dev->data->dev_private; 1951 1952 adapter->state = ENA_ADAPTER_STATE_CONFIG; 1953 1954 adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads; 1955 adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads; 1956 return 0; 1957 } 1958 1959 static void ena_init_rings(struct ena_adapter *adapter, 1960 bool disable_meta_caching) 1961 { 1962 size_t i; 1963 1964 for (i = 0; i < adapter->max_num_io_queues; i++) { 1965 struct ena_ring *ring = &adapter->tx_ring[i]; 1966 1967 ring->configured = 0; 1968 ring->type = ENA_RING_TYPE_TX; 1969 ring->adapter = adapter; 1970 ring->id = i; 1971 ring->tx_mem_queue_type = adapter->ena_dev.tx_mem_queue_type; 1972 ring->tx_max_header_size = adapter->ena_dev.tx_max_header_size; 1973 ring->sgl_size = adapter->max_tx_sgl_size; 1974 ring->disable_meta_caching = disable_meta_caching; 1975 } 1976 1977 for (i = 0; i < adapter->max_num_io_queues; i++) { 1978 struct ena_ring *ring = &adapter->rx_ring[i]; 1979 1980 ring->configured = 0; 1981 ring->type = ENA_RING_TYPE_RX; 1982 ring->adapter = adapter; 1983 ring->id = i; 1984 ring->sgl_size = adapter->max_rx_sgl_size; 1985 } 1986 } 1987 1988 static int ena_infos_get(struct rte_eth_dev *dev, 1989 struct rte_eth_dev_info *dev_info) 1990 { 1991 struct ena_adapter *adapter; 1992 struct ena_com_dev *ena_dev; 1993 uint64_t rx_feat = 0, tx_feat = 0; 1994 1995 ena_assert_msg(dev->data != NULL, "Uninitialized device\n"); 1996 ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n"); 1997 adapter = dev->data->dev_private; 1998 1999 ena_dev = &adapter->ena_dev; 2000 ena_assert_msg(ena_dev != NULL, "Uninitialized device\n"); 2001 2002 dev_info->speed_capa = 2003 ETH_LINK_SPEED_1G | 2004 ETH_LINK_SPEED_2_5G | 2005 ETH_LINK_SPEED_5G | 2006 ETH_LINK_SPEED_10G | 2007 ETH_LINK_SPEED_25G | 2008 ETH_LINK_SPEED_40G | 2009 ETH_LINK_SPEED_50G | 2010 ETH_LINK_SPEED_100G; 2011 2012 /* Set Tx & Rx features available for device */ 2013 if (adapter->offloads.tso4_supported) 2014 tx_feat |= DEV_TX_OFFLOAD_TCP_TSO; 2015 2016 if (adapter->offloads.tx_csum_supported) 2017 tx_feat |= DEV_TX_OFFLOAD_IPV4_CKSUM | 2018 DEV_TX_OFFLOAD_UDP_CKSUM | 2019 DEV_TX_OFFLOAD_TCP_CKSUM; 2020 2021 if (adapter->offloads.rx_csum_supported) 2022 rx_feat |= DEV_RX_OFFLOAD_IPV4_CKSUM | 2023 DEV_RX_OFFLOAD_UDP_CKSUM | 2024 DEV_RX_OFFLOAD_TCP_CKSUM; 2025 2026 rx_feat |= DEV_RX_OFFLOAD_JUMBO_FRAME; 2027 2028 /* Inform framework about available features */ 2029 dev_info->rx_offload_capa = rx_feat; 2030 dev_info->rx_queue_offload_capa = rx_feat; 2031 dev_info->tx_offload_capa = tx_feat; 2032 dev_info->tx_queue_offload_capa = tx_feat; 2033 2034 dev_info->flow_type_rss_offloads = ETH_RSS_IP | ETH_RSS_TCP | 2035 ETH_RSS_UDP; 2036 2037 dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN; 2038 dev_info->max_rx_pktlen = adapter->max_mtu; 2039 dev_info->max_mac_addrs = 1; 2040 2041 dev_info->max_rx_queues = adapter->max_num_io_queues; 2042 dev_info->max_tx_queues = adapter->max_num_io_queues; 2043 dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE; 2044 2045 adapter->tx_supported_offloads = tx_feat; 2046 adapter->rx_supported_offloads = rx_feat; 2047 2048 dev_info->rx_desc_lim.nb_max = adapter->max_rx_ring_size; 2049 dev_info->rx_desc_lim.nb_min = ENA_MIN_RING_DESC; 2050 dev_info->rx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2051 adapter->max_rx_sgl_size); 2052 dev_info->rx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2053 adapter->max_rx_sgl_size); 2054 2055 dev_info->tx_desc_lim.nb_max = adapter->max_tx_ring_size; 2056 dev_info->tx_desc_lim.nb_min = ENA_MIN_RING_DESC; 2057 dev_info->tx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2058 adapter->max_tx_sgl_size); 2059 dev_info->tx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2060 adapter->max_tx_sgl_size); 2061 2062 return 0; 2063 } 2064 2065 static inline void ena_init_rx_mbuf(struct rte_mbuf *mbuf, uint16_t len) 2066 { 2067 mbuf->data_len = len; 2068 mbuf->data_off = RTE_PKTMBUF_HEADROOM; 2069 mbuf->refcnt = 1; 2070 mbuf->next = NULL; 2071 } 2072 2073 static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring, 2074 struct ena_com_rx_buf_info *ena_bufs, 2075 uint32_t descs, 2076 uint16_t *next_to_clean, 2077 uint8_t offset) 2078 { 2079 struct rte_mbuf *mbuf; 2080 struct rte_mbuf *mbuf_head; 2081 struct ena_rx_buffer *rx_info; 2082 unsigned int ring_mask = rx_ring->ring_size - 1; 2083 uint16_t ntc, len, req_id, buf = 0; 2084 2085 if (unlikely(descs == 0)) 2086 return NULL; 2087 2088 ntc = *next_to_clean; 2089 2090 len = ena_bufs[buf].len; 2091 req_id = ena_bufs[buf].req_id; 2092 if (unlikely(validate_rx_req_id(rx_ring, req_id))) 2093 return NULL; 2094 2095 rx_info = &rx_ring->rx_buffer_info[req_id]; 2096 2097 mbuf = rx_info->mbuf; 2098 RTE_ASSERT(mbuf != NULL); 2099 2100 ena_init_rx_mbuf(mbuf, len); 2101 2102 /* Fill the mbuf head with the data specific for 1st segment. */ 2103 mbuf_head = mbuf; 2104 mbuf_head->nb_segs = descs; 2105 mbuf_head->port = rx_ring->port_id; 2106 mbuf_head->pkt_len = len; 2107 mbuf_head->data_off += offset; 2108 2109 rx_info->mbuf = NULL; 2110 rx_ring->empty_rx_reqs[ntc & ring_mask] = req_id; 2111 ++ntc; 2112 2113 while (--descs) { 2114 ++buf; 2115 len = ena_bufs[buf].len; 2116 req_id = ena_bufs[buf].req_id; 2117 if (unlikely(validate_rx_req_id(rx_ring, req_id))) { 2118 rte_mbuf_raw_free(mbuf_head); 2119 return NULL; 2120 } 2121 2122 rx_info = &rx_ring->rx_buffer_info[req_id]; 2123 RTE_ASSERT(rx_info->mbuf != NULL); 2124 2125 /* Create an mbuf chain. */ 2126 mbuf->next = rx_info->mbuf; 2127 mbuf = mbuf->next; 2128 2129 ena_init_rx_mbuf(mbuf, len); 2130 mbuf_head->pkt_len += len; 2131 2132 rx_info->mbuf = NULL; 2133 rx_ring->empty_rx_reqs[ntc & ring_mask] = req_id; 2134 ++ntc; 2135 } 2136 2137 *next_to_clean = ntc; 2138 2139 return mbuf_head; 2140 } 2141 2142 static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 2143 uint16_t nb_pkts) 2144 { 2145 struct ena_ring *rx_ring = (struct ena_ring *)(rx_queue); 2146 unsigned int ring_size = rx_ring->ring_size; 2147 unsigned int ring_mask = ring_size - 1; 2148 uint16_t next_to_clean = rx_ring->next_to_clean; 2149 uint16_t desc_in_use = 0; 2150 struct rte_mbuf *mbuf; 2151 uint16_t completed; 2152 struct ena_com_rx_ctx ena_rx_ctx; 2153 int i, rc = 0; 2154 2155 /* Check adapter state */ 2156 if (unlikely(rx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { 2157 PMD_DRV_LOG(ALERT, 2158 "Trying to receive pkts while device is NOT running\n"); 2159 return 0; 2160 } 2161 2162 desc_in_use = rx_ring->next_to_use - next_to_clean; 2163 if (unlikely(nb_pkts > desc_in_use)) 2164 nb_pkts = desc_in_use; 2165 2166 for (completed = 0; completed < nb_pkts; completed++) { 2167 ena_rx_ctx.max_bufs = rx_ring->sgl_size; 2168 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; 2169 ena_rx_ctx.descs = 0; 2170 ena_rx_ctx.pkt_offset = 0; 2171 /* receive packet context */ 2172 rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq, 2173 rx_ring->ena_com_io_sq, 2174 &ena_rx_ctx); 2175 if (unlikely(rc)) { 2176 PMD_DRV_LOG(ERR, "ena_com_rx_pkt error %d\n", rc); 2177 rx_ring->adapter->reset_reason = 2178 ENA_REGS_RESET_TOO_MANY_RX_DESCS; 2179 rx_ring->adapter->trigger_reset = true; 2180 ++rx_ring->rx_stats.bad_desc_num; 2181 return 0; 2182 } 2183 2184 mbuf = ena_rx_mbuf(rx_ring, 2185 ena_rx_ctx.ena_bufs, 2186 ena_rx_ctx.descs, 2187 &next_to_clean, 2188 ena_rx_ctx.pkt_offset); 2189 if (unlikely(mbuf == NULL)) { 2190 for (i = 0; i < ena_rx_ctx.descs; ++i) { 2191 rx_ring->empty_rx_reqs[next_to_clean & ring_mask] = 2192 rx_ring->ena_bufs[i].req_id; 2193 ++next_to_clean; 2194 } 2195 break; 2196 } 2197 2198 /* fill mbuf attributes if any */ 2199 ena_rx_mbuf_prepare(mbuf, &ena_rx_ctx); 2200 2201 if (unlikely(mbuf->ol_flags & 2202 (PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD))) { 2203 rte_atomic64_inc(&rx_ring->adapter->drv_stats->ierrors); 2204 ++rx_ring->rx_stats.bad_csum; 2205 } 2206 2207 mbuf->hash.rss = ena_rx_ctx.hash; 2208 2209 rx_pkts[completed] = mbuf; 2210 rx_ring->rx_stats.bytes += mbuf->pkt_len; 2211 } 2212 2213 rx_ring->rx_stats.cnt += completed; 2214 rx_ring->next_to_clean = next_to_clean; 2215 2216 desc_in_use = desc_in_use - completed + 1; 2217 /* Burst refill to save doorbells, memory barriers, const interval */ 2218 if (ring_size - desc_in_use > ENA_RING_DESCS_RATIO(ring_size)) { 2219 ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq); 2220 ena_populate_rx_queue(rx_ring, ring_size - desc_in_use); 2221 } 2222 2223 return completed; 2224 } 2225 2226 static uint16_t 2227 eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 2228 uint16_t nb_pkts) 2229 { 2230 int32_t ret; 2231 uint32_t i; 2232 struct rte_mbuf *m; 2233 struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); 2234 struct rte_ipv4_hdr *ip_hdr; 2235 uint64_t ol_flags; 2236 uint16_t frag_field; 2237 2238 for (i = 0; i != nb_pkts; i++) { 2239 m = tx_pkts[i]; 2240 ol_flags = m->ol_flags; 2241 2242 if (!(ol_flags & PKT_TX_IPV4)) 2243 continue; 2244 2245 /* If there was not L2 header length specified, assume it is 2246 * length of the ethernet header. 2247 */ 2248 if (unlikely(m->l2_len == 0)) 2249 m->l2_len = sizeof(struct rte_ether_hdr); 2250 2251 ip_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, 2252 m->l2_len); 2253 frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset); 2254 2255 if ((frag_field & RTE_IPV4_HDR_DF_FLAG) != 0) { 2256 m->packet_type |= RTE_PTYPE_L4_NONFRAG; 2257 2258 /* If IPv4 header has DF flag enabled and TSO support is 2259 * disabled, partial chcecksum should not be calculated. 2260 */ 2261 if (!tx_ring->adapter->offloads.tso4_supported) 2262 continue; 2263 } 2264 2265 if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) != 0 || 2266 (ol_flags & PKT_TX_L4_MASK) == 2267 PKT_TX_SCTP_CKSUM) { 2268 rte_errno = ENOTSUP; 2269 return i; 2270 } 2271 2272 #ifdef RTE_LIBRTE_ETHDEV_DEBUG 2273 ret = rte_validate_tx_offload(m); 2274 if (ret != 0) { 2275 rte_errno = -ret; 2276 return i; 2277 } 2278 #endif 2279 2280 /* In case we are supposed to TSO and have DF not set (DF=0) 2281 * hardware must be provided with partial checksum, otherwise 2282 * it will take care of necessary calculations. 2283 */ 2284 2285 ret = rte_net_intel_cksum_flags_prepare(m, 2286 ol_flags & ~PKT_TX_TCP_SEG); 2287 if (ret != 0) { 2288 rte_errno = -ret; 2289 return i; 2290 } 2291 } 2292 2293 return i; 2294 } 2295 2296 static void ena_update_hints(struct ena_adapter *adapter, 2297 struct ena_admin_ena_hw_hints *hints) 2298 { 2299 if (hints->admin_completion_tx_timeout) 2300 adapter->ena_dev.admin_queue.completion_timeout = 2301 hints->admin_completion_tx_timeout * 1000; 2302 2303 if (hints->mmio_read_timeout) 2304 /* convert to usec */ 2305 adapter->ena_dev.mmio_read.reg_read_to = 2306 hints->mmio_read_timeout * 1000; 2307 2308 if (hints->driver_watchdog_timeout) { 2309 if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT) 2310 adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT; 2311 else 2312 // Convert msecs to ticks 2313 adapter->keep_alive_timeout = 2314 (hints->driver_watchdog_timeout * 2315 rte_get_timer_hz()) / 1000; 2316 } 2317 } 2318 2319 static int ena_check_and_linearize_mbuf(struct ena_ring *tx_ring, 2320 struct rte_mbuf *mbuf) 2321 { 2322 struct ena_com_dev *ena_dev; 2323 int num_segments, header_len, rc; 2324 2325 ena_dev = &tx_ring->adapter->ena_dev; 2326 num_segments = mbuf->nb_segs; 2327 header_len = mbuf->data_len; 2328 2329 if (likely(num_segments < tx_ring->sgl_size)) 2330 return 0; 2331 2332 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && 2333 (num_segments == tx_ring->sgl_size) && 2334 (header_len < tx_ring->tx_max_header_size)) 2335 return 0; 2336 2337 ++tx_ring->tx_stats.linearize; 2338 rc = rte_pktmbuf_linearize(mbuf); 2339 if (unlikely(rc)) { 2340 PMD_DRV_LOG(WARNING, "Mbuf linearize failed\n"); 2341 rte_atomic64_inc(&tx_ring->adapter->drv_stats->ierrors); 2342 ++tx_ring->tx_stats.linearize_failed; 2343 return rc; 2344 } 2345 2346 return rc; 2347 } 2348 2349 static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 2350 uint16_t nb_pkts) 2351 { 2352 struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); 2353 uint16_t next_to_use = tx_ring->next_to_use; 2354 uint16_t next_to_clean = tx_ring->next_to_clean; 2355 struct rte_mbuf *mbuf; 2356 uint16_t seg_len; 2357 unsigned int ring_size = tx_ring->ring_size; 2358 unsigned int ring_mask = ring_size - 1; 2359 struct ena_com_tx_ctx ena_tx_ctx; 2360 struct ena_tx_buffer *tx_info; 2361 struct ena_com_buf *ebuf; 2362 uint16_t rc, req_id, total_tx_descs = 0; 2363 uint16_t sent_idx = 0, empty_tx_reqs; 2364 uint16_t push_len = 0; 2365 uint16_t delta = 0; 2366 int nb_hw_desc; 2367 uint32_t total_length; 2368 2369 /* Check adapter state */ 2370 if (unlikely(tx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { 2371 PMD_DRV_LOG(ALERT, 2372 "Trying to xmit pkts while device is NOT running\n"); 2373 return 0; 2374 } 2375 2376 empty_tx_reqs = ring_size - (next_to_use - next_to_clean); 2377 if (nb_pkts > empty_tx_reqs) 2378 nb_pkts = empty_tx_reqs; 2379 2380 for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) { 2381 mbuf = tx_pkts[sent_idx]; 2382 total_length = 0; 2383 2384 rc = ena_check_and_linearize_mbuf(tx_ring, mbuf); 2385 if (unlikely(rc)) 2386 break; 2387 2388 req_id = tx_ring->empty_tx_reqs[next_to_use & ring_mask]; 2389 tx_info = &tx_ring->tx_buffer_info[req_id]; 2390 tx_info->mbuf = mbuf; 2391 tx_info->num_of_bufs = 0; 2392 ebuf = tx_info->bufs; 2393 2394 /* Prepare TX context */ 2395 memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx)); 2396 memset(&ena_tx_ctx.ena_meta, 0x0, 2397 sizeof(struct ena_com_tx_meta)); 2398 ena_tx_ctx.ena_bufs = ebuf; 2399 ena_tx_ctx.req_id = req_id; 2400 2401 delta = 0; 2402 seg_len = mbuf->data_len; 2403 2404 if (tx_ring->tx_mem_queue_type == 2405 ENA_ADMIN_PLACEMENT_POLICY_DEV) { 2406 push_len = RTE_MIN(mbuf->pkt_len, 2407 tx_ring->tx_max_header_size); 2408 ena_tx_ctx.header_len = push_len; 2409 2410 if (likely(push_len <= seg_len)) { 2411 /* If the push header is in the single segment, 2412 * then just point it to the 1st mbuf data. 2413 */ 2414 ena_tx_ctx.push_header = 2415 rte_pktmbuf_mtod(mbuf, uint8_t *); 2416 } else { 2417 /* If the push header lays in the several 2418 * segments, copy it to the intermediate buffer. 2419 */ 2420 rte_pktmbuf_read(mbuf, 0, push_len, 2421 tx_ring->push_buf_intermediate_buf); 2422 ena_tx_ctx.push_header = 2423 tx_ring->push_buf_intermediate_buf; 2424 delta = push_len - seg_len; 2425 } 2426 } /* there's no else as we take advantage of memset zeroing */ 2427 2428 /* Set TX offloads flags, if applicable */ 2429 ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx, tx_ring->offloads, 2430 tx_ring->disable_meta_caching); 2431 2432 rte_prefetch0(tx_pkts[(sent_idx + 4) & ring_mask]); 2433 2434 /* Process first segment taking into 2435 * consideration pushed header 2436 */ 2437 if (seg_len > push_len) { 2438 ebuf->paddr = mbuf->buf_iova + 2439 mbuf->data_off + 2440 push_len; 2441 ebuf->len = seg_len - push_len; 2442 ebuf++; 2443 tx_info->num_of_bufs++; 2444 } 2445 total_length += mbuf->data_len; 2446 2447 while ((mbuf = mbuf->next) != NULL) { 2448 seg_len = mbuf->data_len; 2449 2450 /* Skip mbufs if whole data is pushed as a header */ 2451 if (unlikely(delta > seg_len)) { 2452 delta -= seg_len; 2453 continue; 2454 } 2455 2456 ebuf->paddr = mbuf->buf_iova + mbuf->data_off + delta; 2457 ebuf->len = seg_len - delta; 2458 total_length += ebuf->len; 2459 ebuf++; 2460 tx_info->num_of_bufs++; 2461 2462 delta = 0; 2463 } 2464 2465 ena_tx_ctx.num_bufs = tx_info->num_of_bufs; 2466 2467 if (ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq, 2468 &ena_tx_ctx)) { 2469 PMD_DRV_LOG(DEBUG, "llq tx max burst size of queue %d" 2470 " achieved, writing doorbell to send burst\n", 2471 tx_ring->id); 2472 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); 2473 } 2474 2475 /* prepare the packet's descriptors to dma engine */ 2476 rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, 2477 &ena_tx_ctx, &nb_hw_desc); 2478 if (unlikely(rc)) { 2479 ++tx_ring->tx_stats.prepare_ctx_err; 2480 break; 2481 } 2482 tx_info->tx_descs = nb_hw_desc; 2483 2484 next_to_use++; 2485 tx_ring->tx_stats.cnt++; 2486 tx_ring->tx_stats.bytes += total_length; 2487 } 2488 tx_ring->tx_stats.available_desc = 2489 ena_com_free_q_entries(tx_ring->ena_com_io_sq); 2490 2491 /* If there are ready packets to be xmitted... */ 2492 if (sent_idx > 0) { 2493 /* ...let HW do its best :-) */ 2494 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); 2495 tx_ring->tx_stats.doorbells++; 2496 tx_ring->next_to_use = next_to_use; 2497 } 2498 2499 /* Clear complete packets */ 2500 while (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) >= 0) { 2501 rc = validate_tx_req_id(tx_ring, req_id); 2502 if (rc) 2503 break; 2504 2505 /* Get Tx info & store how many descs were processed */ 2506 tx_info = &tx_ring->tx_buffer_info[req_id]; 2507 total_tx_descs += tx_info->tx_descs; 2508 2509 /* Free whole mbuf chain */ 2510 mbuf = tx_info->mbuf; 2511 rte_pktmbuf_free(mbuf); 2512 tx_info->mbuf = NULL; 2513 2514 /* Put back descriptor to the ring for reuse */ 2515 tx_ring->empty_tx_reqs[next_to_clean & ring_mask] = req_id; 2516 next_to_clean++; 2517 2518 /* If too many descs to clean, leave it for another run */ 2519 if (unlikely(total_tx_descs > ENA_RING_DESCS_RATIO(ring_size))) 2520 break; 2521 } 2522 tx_ring->tx_stats.available_desc = 2523 ena_com_free_q_entries(tx_ring->ena_com_io_sq); 2524 2525 if (total_tx_descs > 0) { 2526 /* acknowledge completion of sent packets */ 2527 tx_ring->next_to_clean = next_to_clean; 2528 ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs); 2529 ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq); 2530 } 2531 2532 tx_ring->tx_stats.tx_poll++; 2533 2534 return sent_idx; 2535 } 2536 2537 /** 2538 * DPDK callback to retrieve names of extended device statistics 2539 * 2540 * @param dev 2541 * Pointer to Ethernet device structure. 2542 * @param[out] xstats_names 2543 * Buffer to insert names into. 2544 * @param n 2545 * Number of names. 2546 * 2547 * @return 2548 * Number of xstats names. 2549 */ 2550 static int ena_xstats_get_names(struct rte_eth_dev *dev, 2551 struct rte_eth_xstat_name *xstats_names, 2552 unsigned int n) 2553 { 2554 unsigned int xstats_count = ena_xstats_calc_num(dev); 2555 unsigned int stat, i, count = 0; 2556 2557 if (n < xstats_count || !xstats_names) 2558 return xstats_count; 2559 2560 for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++) 2561 strcpy(xstats_names[count].name, 2562 ena_stats_global_strings[stat].name); 2563 2564 for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++) 2565 for (i = 0; i < dev->data->nb_rx_queues; i++, count++) 2566 snprintf(xstats_names[count].name, 2567 sizeof(xstats_names[count].name), 2568 "rx_q%d_%s", i, 2569 ena_stats_rx_strings[stat].name); 2570 2571 for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++) 2572 for (i = 0; i < dev->data->nb_tx_queues; i++, count++) 2573 snprintf(xstats_names[count].name, 2574 sizeof(xstats_names[count].name), 2575 "tx_q%d_%s", i, 2576 ena_stats_tx_strings[stat].name); 2577 2578 return xstats_count; 2579 } 2580 2581 /** 2582 * DPDK callback to get extended device statistics. 2583 * 2584 * @param dev 2585 * Pointer to Ethernet device structure. 2586 * @param[out] stats 2587 * Stats table output buffer. 2588 * @param n 2589 * The size of the stats table. 2590 * 2591 * @return 2592 * Number of xstats on success, negative on failure. 2593 */ 2594 static int ena_xstats_get(struct rte_eth_dev *dev, 2595 struct rte_eth_xstat *xstats, 2596 unsigned int n) 2597 { 2598 struct ena_adapter *adapter = dev->data->dev_private; 2599 unsigned int xstats_count = ena_xstats_calc_num(dev); 2600 unsigned int stat, i, count = 0; 2601 int stat_offset; 2602 void *stats_begin; 2603 2604 if (n < xstats_count) 2605 return xstats_count; 2606 2607 if (!xstats) 2608 return 0; 2609 2610 for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++) { 2611 stat_offset = ena_stats_rx_strings[stat].stat_offset; 2612 stats_begin = &adapter->dev_stats; 2613 2614 xstats[count].id = count; 2615 xstats[count].value = *((uint64_t *) 2616 ((char *)stats_begin + stat_offset)); 2617 } 2618 2619 for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++) { 2620 for (i = 0; i < dev->data->nb_rx_queues; i++, count++) { 2621 stat_offset = ena_stats_rx_strings[stat].stat_offset; 2622 stats_begin = &adapter->rx_ring[i].rx_stats; 2623 2624 xstats[count].id = count; 2625 xstats[count].value = *((uint64_t *) 2626 ((char *)stats_begin + stat_offset)); 2627 } 2628 } 2629 2630 for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++) { 2631 for (i = 0; i < dev->data->nb_tx_queues; i++, count++) { 2632 stat_offset = ena_stats_tx_strings[stat].stat_offset; 2633 stats_begin = &adapter->tx_ring[i].rx_stats; 2634 2635 xstats[count].id = count; 2636 xstats[count].value = *((uint64_t *) 2637 ((char *)stats_begin + stat_offset)); 2638 } 2639 } 2640 2641 return count; 2642 } 2643 2644 static int ena_xstats_get_by_id(struct rte_eth_dev *dev, 2645 const uint64_t *ids, 2646 uint64_t *values, 2647 unsigned int n) 2648 { 2649 struct ena_adapter *adapter = dev->data->dev_private; 2650 uint64_t id; 2651 uint64_t rx_entries, tx_entries; 2652 unsigned int i; 2653 int qid; 2654 int valid = 0; 2655 for (i = 0; i < n; ++i) { 2656 id = ids[i]; 2657 /* Check if id belongs to global statistics */ 2658 if (id < ENA_STATS_ARRAY_GLOBAL) { 2659 values[i] = *((uint64_t *)&adapter->dev_stats + id); 2660 ++valid; 2661 continue; 2662 } 2663 2664 /* Check if id belongs to rx queue statistics */ 2665 id -= ENA_STATS_ARRAY_GLOBAL; 2666 rx_entries = ENA_STATS_ARRAY_RX * dev->data->nb_rx_queues; 2667 if (id < rx_entries) { 2668 qid = id % dev->data->nb_rx_queues; 2669 id /= dev->data->nb_rx_queues; 2670 values[i] = *((uint64_t *) 2671 &adapter->rx_ring[qid].rx_stats + id); 2672 ++valid; 2673 continue; 2674 } 2675 /* Check if id belongs to rx queue statistics */ 2676 id -= rx_entries; 2677 tx_entries = ENA_STATS_ARRAY_TX * dev->data->nb_tx_queues; 2678 if (id < tx_entries) { 2679 qid = id % dev->data->nb_tx_queues; 2680 id /= dev->data->nb_tx_queues; 2681 values[i] = *((uint64_t *) 2682 &adapter->tx_ring[qid].tx_stats + id); 2683 ++valid; 2684 continue; 2685 } 2686 } 2687 2688 return valid; 2689 } 2690 2691 static int ena_process_bool_devarg(const char *key, 2692 const char *value, 2693 void *opaque) 2694 { 2695 struct ena_adapter *adapter = opaque; 2696 bool bool_value; 2697 2698 /* Parse the value. */ 2699 if (strcmp(value, "1") == 0) { 2700 bool_value = true; 2701 } else if (strcmp(value, "0") == 0) { 2702 bool_value = false; 2703 } else { 2704 PMD_INIT_LOG(ERR, 2705 "Invalid value: '%s' for key '%s'. Accepted: '0' or '1'\n", 2706 value, key); 2707 return -EINVAL; 2708 } 2709 2710 /* Now, assign it to the proper adapter field. */ 2711 if (strcmp(key, ENA_DEVARG_LARGE_LLQ_HDR)) 2712 adapter->use_large_llq_hdr = bool_value; 2713 2714 return 0; 2715 } 2716 2717 static int ena_parse_devargs(struct ena_adapter *adapter, 2718 struct rte_devargs *devargs) 2719 { 2720 static const char * const allowed_args[] = { 2721 ENA_DEVARG_LARGE_LLQ_HDR, 2722 }; 2723 struct rte_kvargs *kvlist; 2724 int rc; 2725 2726 if (devargs == NULL) 2727 return 0; 2728 2729 kvlist = rte_kvargs_parse(devargs->args, allowed_args); 2730 if (kvlist == NULL) { 2731 PMD_INIT_LOG(ERR, "Invalid device arguments: %s\n", 2732 devargs->args); 2733 return -EINVAL; 2734 } 2735 2736 rc = rte_kvargs_process(kvlist, ENA_DEVARG_LARGE_LLQ_HDR, 2737 ena_process_bool_devarg, adapter); 2738 2739 rte_kvargs_free(kvlist); 2740 2741 return rc; 2742 } 2743 2744 /********************************************************************* 2745 * PMD configuration 2746 *********************************************************************/ 2747 static int eth_ena_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2748 struct rte_pci_device *pci_dev) 2749 { 2750 return rte_eth_dev_pci_generic_probe(pci_dev, 2751 sizeof(struct ena_adapter), eth_ena_dev_init); 2752 } 2753 2754 static int eth_ena_pci_remove(struct rte_pci_device *pci_dev) 2755 { 2756 return rte_eth_dev_pci_generic_remove(pci_dev, eth_ena_dev_uninit); 2757 } 2758 2759 static struct rte_pci_driver rte_ena_pmd = { 2760 .id_table = pci_id_ena_map, 2761 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | 2762 RTE_PCI_DRV_WC_ACTIVATE, 2763 .probe = eth_ena_pci_probe, 2764 .remove = eth_ena_pci_remove, 2765 }; 2766 2767 RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd); 2768 RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map); 2769 RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci"); 2770 RTE_PMD_REGISTER_PARAM_STRING(net_ena, ENA_DEVARG_LARGE_LLQ_HDR "=<0|1>"); 2771 2772 RTE_INIT(ena_init_log) 2773 { 2774 ena_logtype_init = rte_log_register("pmd.net.ena.init"); 2775 if (ena_logtype_init >= 0) 2776 rte_log_set_level(ena_logtype_init, RTE_LOG_NOTICE); 2777 ena_logtype_driver = rte_log_register("pmd.net.ena.driver"); 2778 if (ena_logtype_driver >= 0) 2779 rte_log_set_level(ena_logtype_driver, RTE_LOG_NOTICE); 2780 2781 #ifdef RTE_LIBRTE_ENA_DEBUG_RX 2782 ena_logtype_rx = rte_log_register("pmd.net.ena.rx"); 2783 if (ena_logtype_rx >= 0) 2784 rte_log_set_level(ena_logtype_rx, RTE_LOG_NOTICE); 2785 #endif 2786 2787 #ifdef RTE_LIBRTE_ENA_DEBUG_TX 2788 ena_logtype_tx = rte_log_register("pmd.net.ena.tx"); 2789 if (ena_logtype_tx >= 0) 2790 rte_log_set_level(ena_logtype_tx, RTE_LOG_NOTICE); 2791 #endif 2792 2793 #ifdef RTE_LIBRTE_ENA_DEBUG_TX_FREE 2794 ena_logtype_tx_free = rte_log_register("pmd.net.ena.tx_free"); 2795 if (ena_logtype_tx_free >= 0) 2796 rte_log_set_level(ena_logtype_tx_free, RTE_LOG_NOTICE); 2797 #endif 2798 2799 #ifdef RTE_LIBRTE_ENA_COM_DEBUG 2800 ena_logtype_com = rte_log_register("pmd.net.ena.com"); 2801 if (ena_logtype_com >= 0) 2802 rte_log_set_level(ena_logtype_com, RTE_LOG_NOTICE); 2803 #endif 2804 } 2805 2806 /****************************************************************************** 2807 ******************************** AENQ Handlers ******************************* 2808 *****************************************************************************/ 2809 static void ena_update_on_link_change(void *adapter_data, 2810 struct ena_admin_aenq_entry *aenq_e) 2811 { 2812 struct rte_eth_dev *eth_dev; 2813 struct ena_adapter *adapter; 2814 struct ena_admin_aenq_link_change_desc *aenq_link_desc; 2815 uint32_t status; 2816 2817 adapter = adapter_data; 2818 aenq_link_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e; 2819 eth_dev = adapter->rte_dev; 2820 2821 status = get_ena_admin_aenq_link_change_desc_link_status(aenq_link_desc); 2822 adapter->link_status = status; 2823 2824 ena_link_update(eth_dev, 0); 2825 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL); 2826 } 2827 2828 static void ena_notification(void *data, 2829 struct ena_admin_aenq_entry *aenq_e) 2830 { 2831 struct ena_adapter *adapter = data; 2832 struct ena_admin_ena_hw_hints *hints; 2833 2834 if (aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION) 2835 PMD_DRV_LOG(WARNING, "Invalid group(%x) expected %x\n", 2836 aenq_e->aenq_common_desc.group, 2837 ENA_ADMIN_NOTIFICATION); 2838 2839 switch (aenq_e->aenq_common_desc.syndrom) { 2840 case ENA_ADMIN_UPDATE_HINTS: 2841 hints = (struct ena_admin_ena_hw_hints *) 2842 (&aenq_e->inline_data_w4); 2843 ena_update_hints(adapter, hints); 2844 break; 2845 default: 2846 PMD_DRV_LOG(ERR, "Invalid aenq notification link state %d\n", 2847 aenq_e->aenq_common_desc.syndrom); 2848 } 2849 } 2850 2851 static void ena_keep_alive(void *adapter_data, 2852 __rte_unused struct ena_admin_aenq_entry *aenq_e) 2853 { 2854 struct ena_adapter *adapter = adapter_data; 2855 struct ena_admin_aenq_keep_alive_desc *desc; 2856 uint64_t rx_drops; 2857 uint64_t tx_drops; 2858 2859 adapter->timestamp_wd = rte_get_timer_cycles(); 2860 2861 desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e; 2862 rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low; 2863 tx_drops = ((uint64_t)desc->tx_drops_high << 32) | desc->tx_drops_low; 2864 2865 adapter->drv_stats->rx_drops = rx_drops; 2866 adapter->dev_stats.tx_drops = tx_drops; 2867 } 2868 2869 /** 2870 * This handler will called for unknown event group or unimplemented handlers 2871 **/ 2872 static void unimplemented_aenq_handler(__rte_unused void *data, 2873 __rte_unused struct ena_admin_aenq_entry *aenq_e) 2874 { 2875 PMD_DRV_LOG(ERR, "Unknown event was received or event with " 2876 "unimplemented handler\n"); 2877 } 2878 2879 static struct ena_aenq_handlers aenq_handlers = { 2880 .handlers = { 2881 [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change, 2882 [ENA_ADMIN_NOTIFICATION] = ena_notification, 2883 [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive 2884 }, 2885 .unimplemented_handler = unimplemented_aenq_handler 2886 }; 2887