1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of copyright holder nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <rte_ether.h> 35 #include <rte_ethdev_driver.h> 36 #include <rte_ethdev_pci.h> 37 #include <rte_tcp.h> 38 #include <rte_atomic.h> 39 #include <rte_dev.h> 40 #include <rte_errno.h> 41 #include <rte_version.h> 42 #include <rte_eal_memconfig.h> 43 #include <rte_net.h> 44 45 #include "ena_ethdev.h" 46 #include "ena_logs.h" 47 #include "ena_platform.h" 48 #include "ena_com.h" 49 #include "ena_eth_com.h" 50 51 #include <ena_common_defs.h> 52 #include <ena_regs_defs.h> 53 #include <ena_admin_defs.h> 54 #include <ena_eth_io_defs.h> 55 56 #define DRV_MODULE_VER_MAJOR 1 57 #define DRV_MODULE_VER_MINOR 1 58 #define DRV_MODULE_VER_SUBMINOR 1 59 60 #define ENA_IO_TXQ_IDX(q) (2 * (q)) 61 #define ENA_IO_RXQ_IDX(q) (2 * (q) + 1) 62 /*reverse version of ENA_IO_RXQ_IDX*/ 63 #define ENA_IO_RXQ_IDX_REV(q) ((q - 1) / 2) 64 65 /* While processing submitted and completed descriptors (rx and tx path 66 * respectively) in a loop it is desired to: 67 * - perform batch submissions while populating sumbissmion queue 68 * - avoid blocking transmission of other packets during cleanup phase 69 * Hence the utilization ratio of 1/8 of a queue size. 70 */ 71 #define ENA_RING_DESCS_RATIO(ring_size) (ring_size / 8) 72 73 #define __MERGE_64B_H_L(h, l) (((uint64_t)h << 32) | l) 74 #define TEST_BIT(val, bit_shift) (val & (1UL << bit_shift)) 75 76 #define GET_L4_HDR_LEN(mbuf) \ 77 ((rte_pktmbuf_mtod_offset(mbuf, struct tcp_hdr *, \ 78 mbuf->l3_len + mbuf->l2_len)->data_off) >> 4) 79 80 #define ENA_RX_RSS_TABLE_LOG_SIZE 7 81 #define ENA_RX_RSS_TABLE_SIZE (1 << ENA_RX_RSS_TABLE_LOG_SIZE) 82 #define ENA_HASH_KEY_SIZE 40 83 #define ENA_ETH_SS_STATS 0xFF 84 #define ETH_GSTRING_LEN 32 85 86 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) 87 88 #define ENA_MAX_RING_DESC ENA_DEFAULT_RING_SIZE 89 #define ENA_MIN_RING_DESC 128 90 91 enum ethtool_stringset { 92 ETH_SS_TEST = 0, 93 ETH_SS_STATS, 94 }; 95 96 struct ena_stats { 97 char name[ETH_GSTRING_LEN]; 98 int stat_offset; 99 }; 100 101 #define ENA_STAT_ENA_COM_ENTRY(stat) { \ 102 .name = #stat, \ 103 .stat_offset = offsetof(struct ena_com_stats_admin, stat) \ 104 } 105 106 #define ENA_STAT_ENTRY(stat, stat_type) { \ 107 .name = #stat, \ 108 .stat_offset = offsetof(struct ena_stats_##stat_type, stat) \ 109 } 110 111 #define ENA_STAT_RX_ENTRY(stat) \ 112 ENA_STAT_ENTRY(stat, rx) 113 114 #define ENA_STAT_TX_ENTRY(stat) \ 115 ENA_STAT_ENTRY(stat, tx) 116 117 #define ENA_STAT_GLOBAL_ENTRY(stat) \ 118 ENA_STAT_ENTRY(stat, dev) 119 120 /* 121 * Each rte_memzone should have unique name. 122 * To satisfy it, count number of allocation and add it to name. 123 */ 124 uint32_t ena_alloc_cnt; 125 126 static const struct ena_stats ena_stats_global_strings[] = { 127 ENA_STAT_GLOBAL_ENTRY(tx_timeout), 128 ENA_STAT_GLOBAL_ENTRY(io_suspend), 129 ENA_STAT_GLOBAL_ENTRY(io_resume), 130 ENA_STAT_GLOBAL_ENTRY(wd_expired), 131 ENA_STAT_GLOBAL_ENTRY(interface_up), 132 ENA_STAT_GLOBAL_ENTRY(interface_down), 133 ENA_STAT_GLOBAL_ENTRY(admin_q_pause), 134 }; 135 136 static const struct ena_stats ena_stats_tx_strings[] = { 137 ENA_STAT_TX_ENTRY(cnt), 138 ENA_STAT_TX_ENTRY(bytes), 139 ENA_STAT_TX_ENTRY(queue_stop), 140 ENA_STAT_TX_ENTRY(queue_wakeup), 141 ENA_STAT_TX_ENTRY(dma_mapping_err), 142 ENA_STAT_TX_ENTRY(linearize), 143 ENA_STAT_TX_ENTRY(linearize_failed), 144 ENA_STAT_TX_ENTRY(tx_poll), 145 ENA_STAT_TX_ENTRY(doorbells), 146 ENA_STAT_TX_ENTRY(prepare_ctx_err), 147 ENA_STAT_TX_ENTRY(missing_tx_comp), 148 ENA_STAT_TX_ENTRY(bad_req_id), 149 }; 150 151 static const struct ena_stats ena_stats_rx_strings[] = { 152 ENA_STAT_RX_ENTRY(cnt), 153 ENA_STAT_RX_ENTRY(bytes), 154 ENA_STAT_RX_ENTRY(refil_partial), 155 ENA_STAT_RX_ENTRY(bad_csum), 156 ENA_STAT_RX_ENTRY(page_alloc_fail), 157 ENA_STAT_RX_ENTRY(skb_alloc_fail), 158 ENA_STAT_RX_ENTRY(dma_mapping_err), 159 ENA_STAT_RX_ENTRY(bad_desc_num), 160 ENA_STAT_RX_ENTRY(small_copy_len_pkt), 161 }; 162 163 static const struct ena_stats ena_stats_ena_com_strings[] = { 164 ENA_STAT_ENA_COM_ENTRY(aborted_cmd), 165 ENA_STAT_ENA_COM_ENTRY(submitted_cmd), 166 ENA_STAT_ENA_COM_ENTRY(completed_cmd), 167 ENA_STAT_ENA_COM_ENTRY(out_of_space), 168 ENA_STAT_ENA_COM_ENTRY(no_completion), 169 }; 170 171 #define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings) 172 #define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings) 173 #define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings) 174 #define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings) 175 176 #define QUEUE_OFFLOADS (DEV_TX_OFFLOAD_TCP_CKSUM |\ 177 DEV_TX_OFFLOAD_UDP_CKSUM |\ 178 DEV_TX_OFFLOAD_IPV4_CKSUM |\ 179 DEV_TX_OFFLOAD_TCP_TSO) 180 #define MBUF_OFFLOADS (PKT_TX_L4_MASK |\ 181 PKT_TX_IP_CKSUM |\ 182 PKT_TX_TCP_SEG) 183 184 /** Vendor ID used by Amazon devices */ 185 #define PCI_VENDOR_ID_AMAZON 0x1D0F 186 /** Amazon devices */ 187 #define PCI_DEVICE_ID_ENA_VF 0xEC20 188 #define PCI_DEVICE_ID_ENA_LLQ_VF 0xEC21 189 190 #define ENA_TX_OFFLOAD_MASK (\ 191 PKT_TX_L4_MASK | \ 192 PKT_TX_IP_CKSUM | \ 193 PKT_TX_TCP_SEG) 194 195 #define ENA_TX_OFFLOAD_NOTSUP_MASK \ 196 (PKT_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK) 197 198 int ena_logtype_init; 199 int ena_logtype_driver; 200 201 static const struct rte_pci_id pci_id_ena_map[] = { 202 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) }, 203 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_LLQ_VF) }, 204 { .device_id = 0 }, 205 }; 206 207 static struct ena_aenq_handlers aenq_handlers; 208 209 static int ena_device_init(struct ena_com_dev *ena_dev, 210 struct ena_com_dev_get_features_ctx *get_feat_ctx, 211 bool *wd_state); 212 static int ena_dev_configure(struct rte_eth_dev *dev); 213 static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 214 uint16_t nb_pkts); 215 static uint16_t eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 216 uint16_t nb_pkts); 217 static int ena_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 218 uint16_t nb_desc, unsigned int socket_id, 219 const struct rte_eth_txconf *tx_conf); 220 static int ena_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 221 uint16_t nb_desc, unsigned int socket_id, 222 const struct rte_eth_rxconf *rx_conf, 223 struct rte_mempool *mp); 224 static uint16_t eth_ena_recv_pkts(void *rx_queue, 225 struct rte_mbuf **rx_pkts, uint16_t nb_pkts); 226 static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count); 227 static void ena_init_rings(struct ena_adapter *adapter); 228 static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 229 static int ena_start(struct rte_eth_dev *dev); 230 static void ena_stop(struct rte_eth_dev *dev); 231 static void ena_close(struct rte_eth_dev *dev); 232 static int ena_dev_reset(struct rte_eth_dev *dev); 233 static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); 234 static void ena_rx_queue_release_all(struct rte_eth_dev *dev); 235 static void ena_tx_queue_release_all(struct rte_eth_dev *dev); 236 static void ena_rx_queue_release(void *queue); 237 static void ena_tx_queue_release(void *queue); 238 static void ena_rx_queue_release_bufs(struct ena_ring *ring); 239 static void ena_tx_queue_release_bufs(struct ena_ring *ring); 240 static int ena_link_update(struct rte_eth_dev *dev, 241 int wait_to_complete); 242 static int ena_create_io_queue(struct ena_ring *ring); 243 static void ena_free_io_queues_all(struct ena_adapter *adapter); 244 static int ena_queue_restart(struct ena_ring *ring); 245 static int ena_queue_restart_all(struct rte_eth_dev *dev, 246 enum ena_ring_type ring_type); 247 static void ena_stats_restart(struct rte_eth_dev *dev); 248 static void ena_infos_get(struct rte_eth_dev *dev, 249 struct rte_eth_dev_info *dev_info); 250 static int ena_rss_reta_update(struct rte_eth_dev *dev, 251 struct rte_eth_rss_reta_entry64 *reta_conf, 252 uint16_t reta_size); 253 static int ena_rss_reta_query(struct rte_eth_dev *dev, 254 struct rte_eth_rss_reta_entry64 *reta_conf, 255 uint16_t reta_size); 256 static int ena_get_sset_count(struct rte_eth_dev *dev, int sset); 257 static void ena_interrupt_handler_rte(void *cb_arg); 258 static void ena_timer_wd_callback(struct rte_timer *timer, void *arg); 259 260 static const struct eth_dev_ops ena_dev_ops = { 261 .dev_configure = ena_dev_configure, 262 .dev_infos_get = ena_infos_get, 263 .rx_queue_setup = ena_rx_queue_setup, 264 .tx_queue_setup = ena_tx_queue_setup, 265 .dev_start = ena_start, 266 .dev_stop = ena_stop, 267 .link_update = ena_link_update, 268 .stats_get = ena_stats_get, 269 .mtu_set = ena_mtu_set, 270 .rx_queue_release = ena_rx_queue_release, 271 .tx_queue_release = ena_tx_queue_release, 272 .dev_close = ena_close, 273 .dev_reset = ena_dev_reset, 274 .reta_update = ena_rss_reta_update, 275 .reta_query = ena_rss_reta_query, 276 }; 277 278 #define NUMA_NO_NODE SOCKET_ID_ANY 279 280 static inline int ena_cpu_to_node(int cpu) 281 { 282 struct rte_config *config = rte_eal_get_configuration(); 283 struct rte_fbarray *arr = &config->mem_config->memzones; 284 const struct rte_memzone *mz; 285 286 if (unlikely(cpu >= RTE_MAX_MEMZONE)) 287 return NUMA_NO_NODE; 288 289 mz = rte_fbarray_get(arr, cpu); 290 291 return mz->socket_id; 292 } 293 294 static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf, 295 struct ena_com_rx_ctx *ena_rx_ctx) 296 { 297 uint64_t ol_flags = 0; 298 uint32_t packet_type = 0; 299 300 if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) 301 packet_type |= RTE_PTYPE_L4_TCP; 302 else if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP) 303 packet_type |= RTE_PTYPE_L4_UDP; 304 305 if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) 306 packet_type |= RTE_PTYPE_L3_IPV4; 307 else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6) 308 packet_type |= RTE_PTYPE_L3_IPV6; 309 310 if (unlikely(ena_rx_ctx->l4_csum_err)) 311 ol_flags |= PKT_RX_L4_CKSUM_BAD; 312 if (unlikely(ena_rx_ctx->l3_csum_err)) 313 ol_flags |= PKT_RX_IP_CKSUM_BAD; 314 315 mbuf->ol_flags = ol_flags; 316 mbuf->packet_type = packet_type; 317 } 318 319 static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, 320 struct ena_com_tx_ctx *ena_tx_ctx, 321 uint64_t queue_offloads) 322 { 323 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; 324 325 if ((mbuf->ol_flags & MBUF_OFFLOADS) && 326 (queue_offloads & QUEUE_OFFLOADS)) { 327 /* check if TSO is required */ 328 if ((mbuf->ol_flags & PKT_TX_TCP_SEG) && 329 (queue_offloads & DEV_TX_OFFLOAD_TCP_TSO)) { 330 ena_tx_ctx->tso_enable = true; 331 332 ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf); 333 } 334 335 /* check if L3 checksum is needed */ 336 if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) && 337 (queue_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)) 338 ena_tx_ctx->l3_csum_enable = true; 339 340 if (mbuf->ol_flags & PKT_TX_IPV6) { 341 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6; 342 } else { 343 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4; 344 345 /* set don't fragment (DF) flag */ 346 if (mbuf->packet_type & 347 (RTE_PTYPE_L4_NONFRAG 348 | RTE_PTYPE_INNER_L4_NONFRAG)) 349 ena_tx_ctx->df = true; 350 } 351 352 /* check if L4 checksum is needed */ 353 if ((mbuf->ol_flags & PKT_TX_TCP_CKSUM) && 354 (queue_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) { 355 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP; 356 ena_tx_ctx->l4_csum_enable = true; 357 } else if ((mbuf->ol_flags & PKT_TX_UDP_CKSUM) && 358 (queue_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) { 359 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP; 360 ena_tx_ctx->l4_csum_enable = true; 361 } else { 362 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN; 363 ena_tx_ctx->l4_csum_enable = false; 364 } 365 366 ena_meta->mss = mbuf->tso_segsz; 367 ena_meta->l3_hdr_len = mbuf->l3_len; 368 ena_meta->l3_hdr_offset = mbuf->l2_len; 369 370 ena_tx_ctx->meta_valid = true; 371 } else { 372 ena_tx_ctx->meta_valid = false; 373 } 374 } 375 376 static inline int validate_rx_req_id(struct ena_ring *rx_ring, uint16_t req_id) 377 { 378 if (likely(req_id < rx_ring->ring_size)) 379 return 0; 380 381 RTE_LOG(ERR, PMD, "Invalid rx req_id: %hu\n", req_id); 382 383 rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID; 384 rx_ring->adapter->trigger_reset = true; 385 386 return -EFAULT; 387 } 388 389 static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) 390 { 391 struct ena_tx_buffer *tx_info = NULL; 392 393 if (likely(req_id < tx_ring->ring_size)) { 394 tx_info = &tx_ring->tx_buffer_info[req_id]; 395 if (likely(tx_info->mbuf)) 396 return 0; 397 } 398 399 if (tx_info) 400 RTE_LOG(ERR, PMD, "tx_info doesn't have valid mbuf\n"); 401 else 402 RTE_LOG(ERR, PMD, "Invalid req_id: %hu\n", req_id); 403 404 /* Trigger device reset */ 405 tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID; 406 tx_ring->adapter->trigger_reset = true; 407 return -EFAULT; 408 } 409 410 static void ena_config_host_info(struct ena_com_dev *ena_dev) 411 { 412 struct ena_admin_host_info *host_info; 413 int rc; 414 415 /* Allocate only the host info */ 416 rc = ena_com_allocate_host_info(ena_dev); 417 if (rc) { 418 RTE_LOG(ERR, PMD, "Cannot allocate host info\n"); 419 return; 420 } 421 422 host_info = ena_dev->host_attr.host_info; 423 424 host_info->os_type = ENA_ADMIN_OS_DPDK; 425 host_info->kernel_ver = RTE_VERSION; 426 snprintf((char *)host_info->kernel_ver_str, 427 sizeof(host_info->kernel_ver_str), 428 "%s", rte_version()); 429 host_info->os_dist = RTE_VERSION; 430 snprintf((char *)host_info->os_dist_str, 431 sizeof(host_info->os_dist_str), 432 "%s", rte_version()); 433 host_info->driver_version = 434 (DRV_MODULE_VER_MAJOR) | 435 (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | 436 (DRV_MODULE_VER_SUBMINOR << 437 ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); 438 439 rc = ena_com_set_host_attributes(ena_dev); 440 if (rc) { 441 if (rc == -ENA_COM_UNSUPPORTED) 442 RTE_LOG(WARNING, PMD, "Cannot set host attributes\n"); 443 else 444 RTE_LOG(ERR, PMD, "Cannot set host attributes\n"); 445 446 goto err; 447 } 448 449 return; 450 451 err: 452 ena_com_delete_host_info(ena_dev); 453 } 454 455 static int 456 ena_get_sset_count(struct rte_eth_dev *dev, int sset) 457 { 458 if (sset != ETH_SS_STATS) 459 return -EOPNOTSUPP; 460 461 /* Workaround for clang: 462 * touch internal structures to prevent 463 * compiler error 464 */ 465 ENA_TOUCH(ena_stats_global_strings); 466 ENA_TOUCH(ena_stats_tx_strings); 467 ENA_TOUCH(ena_stats_rx_strings); 468 ENA_TOUCH(ena_stats_ena_com_strings); 469 470 return dev->data->nb_tx_queues * 471 (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX) + 472 ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENA_COM; 473 } 474 475 static void ena_config_debug_area(struct ena_adapter *adapter) 476 { 477 u32 debug_area_size; 478 int rc, ss_count; 479 480 ss_count = ena_get_sset_count(adapter->rte_dev, ETH_SS_STATS); 481 if (ss_count <= 0) { 482 RTE_LOG(ERR, PMD, "SS count is negative\n"); 483 return; 484 } 485 486 /* allocate 32 bytes for each string and 64bit for the value */ 487 debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count; 488 489 rc = ena_com_allocate_debug_area(&adapter->ena_dev, debug_area_size); 490 if (rc) { 491 RTE_LOG(ERR, PMD, "Cannot allocate debug area\n"); 492 return; 493 } 494 495 rc = ena_com_set_host_attributes(&adapter->ena_dev); 496 if (rc) { 497 if (rc == -ENA_COM_UNSUPPORTED) 498 RTE_LOG(WARNING, PMD, "Cannot set host attributes\n"); 499 else 500 RTE_LOG(ERR, PMD, "Cannot set host attributes\n"); 501 502 goto err; 503 } 504 505 return; 506 err: 507 ena_com_delete_debug_area(&adapter->ena_dev); 508 } 509 510 static void ena_close(struct rte_eth_dev *dev) 511 { 512 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 513 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 514 struct ena_adapter *adapter = 515 (struct ena_adapter *)(dev->data->dev_private); 516 517 if (adapter->state == ENA_ADAPTER_STATE_RUNNING) 518 ena_stop(dev); 519 adapter->state = ENA_ADAPTER_STATE_CLOSED; 520 521 ena_rx_queue_release_all(dev); 522 ena_tx_queue_release_all(dev); 523 524 rte_free(adapter->drv_stats); 525 adapter->drv_stats = NULL; 526 527 rte_intr_disable(intr_handle); 528 rte_intr_callback_unregister(intr_handle, 529 ena_interrupt_handler_rte, 530 adapter); 531 532 /* 533 * MAC is not allocated dynamically. Setting NULL should prevent from 534 * release of the resource in the rte_eth_dev_release_port(). 535 */ 536 dev->data->mac_addrs = NULL; 537 } 538 539 static int 540 ena_dev_reset(struct rte_eth_dev *dev) 541 { 542 struct rte_mempool *mb_pool_rx[ENA_MAX_NUM_QUEUES]; 543 struct rte_eth_dev *eth_dev; 544 struct rte_pci_device *pci_dev; 545 struct rte_intr_handle *intr_handle; 546 struct ena_com_dev *ena_dev; 547 struct ena_com_dev_get_features_ctx get_feat_ctx; 548 struct ena_adapter *adapter; 549 int nb_queues; 550 int rc, i; 551 bool wd_state; 552 553 adapter = (struct ena_adapter *)(dev->data->dev_private); 554 ena_dev = &adapter->ena_dev; 555 eth_dev = adapter->rte_dev; 556 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 557 intr_handle = &pci_dev->intr_handle; 558 nb_queues = eth_dev->data->nb_rx_queues; 559 560 ena_com_set_admin_running_state(ena_dev, false); 561 562 rc = ena_com_dev_reset(ena_dev, adapter->reset_reason); 563 if (rc) 564 RTE_LOG(ERR, PMD, "Device reset failed\n"); 565 566 for (i = 0; i < nb_queues; i++) 567 mb_pool_rx[i] = adapter->rx_ring[i].mb_pool; 568 569 ena_rx_queue_release_all(eth_dev); 570 ena_tx_queue_release_all(eth_dev); 571 572 rte_intr_disable(intr_handle); 573 574 ena_com_abort_admin_commands(ena_dev); 575 ena_com_wait_for_abort_completion(ena_dev); 576 ena_com_admin_destroy(ena_dev); 577 ena_com_mmio_reg_read_request_destroy(ena_dev); 578 579 rc = ena_device_init(ena_dev, &get_feat_ctx, &wd_state); 580 if (rc) { 581 PMD_INIT_LOG(CRIT, "Cannot initialize device\n"); 582 return rc; 583 } 584 adapter->wd_state = wd_state; 585 586 rte_intr_enable(intr_handle); 587 ena_com_set_admin_polling_mode(ena_dev, false); 588 ena_com_admin_aenq_enable(ena_dev); 589 590 for (i = 0; i < nb_queues; ++i) 591 ena_rx_queue_setup(eth_dev, i, adapter->rx_ring_size, 0, NULL, 592 mb_pool_rx[i]); 593 594 for (i = 0; i < nb_queues; ++i) 595 ena_tx_queue_setup(eth_dev, i, adapter->tx_ring_size, 0, NULL); 596 597 adapter->trigger_reset = false; 598 599 return 0; 600 } 601 602 static int ena_rss_reta_update(struct rte_eth_dev *dev, 603 struct rte_eth_rss_reta_entry64 *reta_conf, 604 uint16_t reta_size) 605 { 606 struct ena_adapter *adapter = 607 (struct ena_adapter *)(dev->data->dev_private); 608 struct ena_com_dev *ena_dev = &adapter->ena_dev; 609 int rc, i; 610 u16 entry_value; 611 int conf_idx; 612 int idx; 613 614 if ((reta_size == 0) || (reta_conf == NULL)) 615 return -EINVAL; 616 617 if (reta_size > ENA_RX_RSS_TABLE_SIZE) { 618 RTE_LOG(WARNING, PMD, 619 "indirection table %d is bigger than supported (%d)\n", 620 reta_size, ENA_RX_RSS_TABLE_SIZE); 621 return -EINVAL; 622 } 623 624 for (i = 0 ; i < reta_size ; i++) { 625 /* each reta_conf is for 64 entries. 626 * to support 128 we use 2 conf of 64 627 */ 628 conf_idx = i / RTE_RETA_GROUP_SIZE; 629 idx = i % RTE_RETA_GROUP_SIZE; 630 if (TEST_BIT(reta_conf[conf_idx].mask, idx)) { 631 entry_value = 632 ENA_IO_RXQ_IDX(reta_conf[conf_idx].reta[idx]); 633 634 rc = ena_com_indirect_table_fill_entry(ena_dev, 635 i, 636 entry_value); 637 if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) { 638 RTE_LOG(ERR, PMD, 639 "Cannot fill indirect table\n"); 640 return rc; 641 } 642 } 643 } 644 645 rc = ena_com_indirect_table_set(ena_dev); 646 if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) { 647 RTE_LOG(ERR, PMD, "Cannot flush the indirect table\n"); 648 return rc; 649 } 650 651 RTE_LOG(DEBUG, PMD, "%s(): RSS configured %d entries for port %d\n", 652 __func__, reta_size, adapter->rte_dev->data->port_id); 653 654 return 0; 655 } 656 657 /* Query redirection table. */ 658 static int ena_rss_reta_query(struct rte_eth_dev *dev, 659 struct rte_eth_rss_reta_entry64 *reta_conf, 660 uint16_t reta_size) 661 { 662 struct ena_adapter *adapter = 663 (struct ena_adapter *)(dev->data->dev_private); 664 struct ena_com_dev *ena_dev = &adapter->ena_dev; 665 int rc; 666 int i; 667 u32 indirect_table[ENA_RX_RSS_TABLE_SIZE] = {0}; 668 int reta_conf_idx; 669 int reta_idx; 670 671 if (reta_size == 0 || reta_conf == NULL || 672 (reta_size > RTE_RETA_GROUP_SIZE && ((reta_conf + 1) == NULL))) 673 return -EINVAL; 674 675 rc = ena_com_indirect_table_get(ena_dev, indirect_table); 676 if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) { 677 RTE_LOG(ERR, PMD, "cannot get indirect table\n"); 678 return -ENOTSUP; 679 } 680 681 for (i = 0 ; i < reta_size ; i++) { 682 reta_conf_idx = i / RTE_RETA_GROUP_SIZE; 683 reta_idx = i % RTE_RETA_GROUP_SIZE; 684 if (TEST_BIT(reta_conf[reta_conf_idx].mask, reta_idx)) 685 reta_conf[reta_conf_idx].reta[reta_idx] = 686 ENA_IO_RXQ_IDX_REV(indirect_table[i]); 687 } 688 689 return 0; 690 } 691 692 static int ena_rss_init_default(struct ena_adapter *adapter) 693 { 694 struct ena_com_dev *ena_dev = &adapter->ena_dev; 695 uint16_t nb_rx_queues = adapter->rte_dev->data->nb_rx_queues; 696 int rc, i; 697 u32 val; 698 699 rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE); 700 if (unlikely(rc)) { 701 RTE_LOG(ERR, PMD, "Cannot init indirect table\n"); 702 goto err_rss_init; 703 } 704 705 for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) { 706 val = i % nb_rx_queues; 707 rc = ena_com_indirect_table_fill_entry(ena_dev, i, 708 ENA_IO_RXQ_IDX(val)); 709 if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { 710 RTE_LOG(ERR, PMD, "Cannot fill indirect table\n"); 711 goto err_fill_indir; 712 } 713 } 714 715 rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL, 716 ENA_HASH_KEY_SIZE, 0xFFFFFFFF); 717 if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { 718 RTE_LOG(INFO, PMD, "Cannot fill hash function\n"); 719 goto err_fill_indir; 720 } 721 722 rc = ena_com_set_default_hash_ctrl(ena_dev); 723 if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { 724 RTE_LOG(INFO, PMD, "Cannot fill hash control\n"); 725 goto err_fill_indir; 726 } 727 728 rc = ena_com_indirect_table_set(ena_dev); 729 if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { 730 RTE_LOG(ERR, PMD, "Cannot flush the indirect table\n"); 731 goto err_fill_indir; 732 } 733 RTE_LOG(DEBUG, PMD, "RSS configured for port %d\n", 734 adapter->rte_dev->data->port_id); 735 736 return 0; 737 738 err_fill_indir: 739 ena_com_rss_destroy(ena_dev); 740 err_rss_init: 741 742 return rc; 743 } 744 745 static void ena_rx_queue_release_all(struct rte_eth_dev *dev) 746 { 747 struct ena_ring **queues = (struct ena_ring **)dev->data->rx_queues; 748 int nb_queues = dev->data->nb_rx_queues; 749 int i; 750 751 for (i = 0; i < nb_queues; i++) 752 ena_rx_queue_release(queues[i]); 753 } 754 755 static void ena_tx_queue_release_all(struct rte_eth_dev *dev) 756 { 757 struct ena_ring **queues = (struct ena_ring **)dev->data->tx_queues; 758 int nb_queues = dev->data->nb_tx_queues; 759 int i; 760 761 for (i = 0; i < nb_queues; i++) 762 ena_tx_queue_release(queues[i]); 763 } 764 765 static void ena_rx_queue_release(void *queue) 766 { 767 struct ena_ring *ring = (struct ena_ring *)queue; 768 769 ena_assert_msg(ring->configured, 770 "API violation - releasing not configured queue"); 771 ena_assert_msg(ring->adapter->state != ENA_ADAPTER_STATE_RUNNING, 772 "API violation"); 773 774 /* Free ring resources */ 775 if (ring->rx_buffer_info) 776 rte_free(ring->rx_buffer_info); 777 ring->rx_buffer_info = NULL; 778 779 if (ring->empty_rx_reqs) 780 rte_free(ring->empty_rx_reqs); 781 ring->empty_rx_reqs = NULL; 782 783 ring->configured = 0; 784 785 RTE_LOG(NOTICE, PMD, "RX Queue %d:%d released\n", 786 ring->port_id, ring->id); 787 } 788 789 static void ena_tx_queue_release(void *queue) 790 { 791 struct ena_ring *ring = (struct ena_ring *)queue; 792 793 ena_assert_msg(ring->configured, 794 "API violation. Releasing not configured queue"); 795 ena_assert_msg(ring->adapter->state != ENA_ADAPTER_STATE_RUNNING, 796 "API violation"); 797 798 /* Free all bufs */ 799 ena_tx_queue_release_bufs(ring); 800 801 /* Free ring resources */ 802 if (ring->tx_buffer_info) 803 rte_free(ring->tx_buffer_info); 804 805 if (ring->empty_tx_reqs) 806 rte_free(ring->empty_tx_reqs); 807 808 ring->empty_tx_reqs = NULL; 809 ring->tx_buffer_info = NULL; 810 811 ring->configured = 0; 812 813 RTE_LOG(NOTICE, PMD, "TX Queue %d:%d released\n", 814 ring->port_id, ring->id); 815 } 816 817 static void ena_rx_queue_release_bufs(struct ena_ring *ring) 818 { 819 unsigned int ring_mask = ring->ring_size - 1; 820 821 while (ring->next_to_clean != ring->next_to_use) { 822 struct rte_mbuf *m = 823 ring->rx_buffer_info[ring->next_to_clean & ring_mask]; 824 825 if (m) 826 rte_mbuf_raw_free(m); 827 828 ring->next_to_clean++; 829 } 830 } 831 832 static void ena_tx_queue_release_bufs(struct ena_ring *ring) 833 { 834 unsigned int i; 835 836 for (i = 0; i < ring->ring_size; ++i) { 837 struct ena_tx_buffer *tx_buf = &ring->tx_buffer_info[i]; 838 839 if (tx_buf->mbuf) 840 rte_pktmbuf_free(tx_buf->mbuf); 841 842 ring->next_to_clean++; 843 } 844 } 845 846 static int ena_link_update(struct rte_eth_dev *dev, 847 __rte_unused int wait_to_complete) 848 { 849 struct rte_eth_link *link = &dev->data->dev_link; 850 struct ena_adapter *adapter; 851 852 adapter = (struct ena_adapter *)(dev->data->dev_private); 853 854 link->link_status = adapter->link_status ? ETH_LINK_UP : ETH_LINK_DOWN; 855 link->link_speed = ETH_SPEED_NUM_NONE; 856 link->link_duplex = ETH_LINK_FULL_DUPLEX; 857 858 return 0; 859 } 860 861 static int ena_queue_restart_all(struct rte_eth_dev *dev, 862 enum ena_ring_type ring_type) 863 { 864 struct ena_adapter *adapter = 865 (struct ena_adapter *)(dev->data->dev_private); 866 struct ena_ring *queues = NULL; 867 int nb_queues; 868 int i = 0; 869 int rc = 0; 870 871 if (ring_type == ENA_RING_TYPE_RX) { 872 queues = adapter->rx_ring; 873 nb_queues = dev->data->nb_rx_queues; 874 } else { 875 queues = adapter->tx_ring; 876 nb_queues = dev->data->nb_tx_queues; 877 } 878 for (i = 0; i < nb_queues; i++) { 879 if (queues[i].configured) { 880 if (ring_type == ENA_RING_TYPE_RX) { 881 ena_assert_msg( 882 dev->data->rx_queues[i] == &queues[i], 883 "Inconsistent state of rx queues\n"); 884 } else { 885 ena_assert_msg( 886 dev->data->tx_queues[i] == &queues[i], 887 "Inconsistent state of tx queues\n"); 888 } 889 890 rc = ena_queue_restart(&queues[i]); 891 892 if (rc) { 893 PMD_INIT_LOG(ERR, 894 "failed to restart queue %d type(%d)", 895 i, ring_type); 896 return rc; 897 } 898 } 899 } 900 901 return 0; 902 } 903 904 static uint32_t ena_get_mtu_conf(struct ena_adapter *adapter) 905 { 906 uint32_t max_frame_len = adapter->max_mtu; 907 908 if (adapter->rte_eth_dev_data->dev_conf.rxmode.offloads & 909 DEV_RX_OFFLOAD_JUMBO_FRAME) 910 max_frame_len = 911 adapter->rte_eth_dev_data->dev_conf.rxmode.max_rx_pkt_len; 912 913 return max_frame_len; 914 } 915 916 static int ena_check_valid_conf(struct ena_adapter *adapter) 917 { 918 uint32_t max_frame_len = ena_get_mtu_conf(adapter); 919 920 if (max_frame_len > adapter->max_mtu || max_frame_len < ENA_MIN_MTU) { 921 PMD_INIT_LOG(ERR, "Unsupported MTU of %d. " 922 "max mtu: %d, min mtu: %d\n", 923 max_frame_len, adapter->max_mtu, ENA_MIN_MTU); 924 return ENA_COM_UNSUPPORTED; 925 } 926 927 return 0; 928 } 929 930 static int 931 ena_calc_queue_size(struct ena_com_dev *ena_dev, 932 u16 *max_tx_sgl_size, 933 struct ena_com_dev_get_features_ctx *get_feat_ctx) 934 { 935 uint32_t queue_size = ENA_DEFAULT_RING_SIZE; 936 937 queue_size = RTE_MIN(queue_size, 938 get_feat_ctx->max_queues.max_cq_depth); 939 queue_size = RTE_MIN(queue_size, 940 get_feat_ctx->max_queues.max_sq_depth); 941 942 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) 943 queue_size = RTE_MIN(queue_size, 944 get_feat_ctx->max_queues.max_llq_depth); 945 946 /* Round down to power of 2 */ 947 if (!rte_is_power_of_2(queue_size)) 948 queue_size = rte_align32pow2(queue_size >> 1); 949 950 if (unlikely(queue_size == 0)) { 951 PMD_INIT_LOG(ERR, "Invalid queue size"); 952 return -EFAULT; 953 } 954 955 *max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 956 get_feat_ctx->max_queues.max_packet_tx_descs); 957 958 return queue_size; 959 } 960 961 static void ena_stats_restart(struct rte_eth_dev *dev) 962 { 963 struct ena_adapter *adapter = 964 (struct ena_adapter *)(dev->data->dev_private); 965 966 rte_atomic64_init(&adapter->drv_stats->ierrors); 967 rte_atomic64_init(&adapter->drv_stats->oerrors); 968 rte_atomic64_init(&adapter->drv_stats->rx_nombuf); 969 } 970 971 static int ena_stats_get(struct rte_eth_dev *dev, 972 struct rte_eth_stats *stats) 973 { 974 struct ena_admin_basic_stats ena_stats; 975 struct ena_adapter *adapter = 976 (struct ena_adapter *)(dev->data->dev_private); 977 struct ena_com_dev *ena_dev = &adapter->ena_dev; 978 int rc; 979 980 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 981 return -ENOTSUP; 982 983 memset(&ena_stats, 0, sizeof(ena_stats)); 984 rc = ena_com_get_dev_basic_stats(ena_dev, &ena_stats); 985 if (unlikely(rc)) { 986 RTE_LOG(ERR, PMD, "Could not retrieve statistics from ENA"); 987 return rc; 988 } 989 990 /* Set of basic statistics from ENA */ 991 stats->ipackets = __MERGE_64B_H_L(ena_stats.rx_pkts_high, 992 ena_stats.rx_pkts_low); 993 stats->opackets = __MERGE_64B_H_L(ena_stats.tx_pkts_high, 994 ena_stats.tx_pkts_low); 995 stats->ibytes = __MERGE_64B_H_L(ena_stats.rx_bytes_high, 996 ena_stats.rx_bytes_low); 997 stats->obytes = __MERGE_64B_H_L(ena_stats.tx_bytes_high, 998 ena_stats.tx_bytes_low); 999 stats->imissed = __MERGE_64B_H_L(ena_stats.rx_drops_high, 1000 ena_stats.rx_drops_low); 1001 1002 /* Driver related stats */ 1003 stats->ierrors = rte_atomic64_read(&adapter->drv_stats->ierrors); 1004 stats->oerrors = rte_atomic64_read(&adapter->drv_stats->oerrors); 1005 stats->rx_nombuf = rte_atomic64_read(&adapter->drv_stats->rx_nombuf); 1006 return 0; 1007 } 1008 1009 static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 1010 { 1011 struct ena_adapter *adapter; 1012 struct ena_com_dev *ena_dev; 1013 int rc = 0; 1014 1015 ena_assert_msg(dev->data != NULL, "Uninitialized device"); 1016 ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device"); 1017 adapter = (struct ena_adapter *)(dev->data->dev_private); 1018 1019 ena_dev = &adapter->ena_dev; 1020 ena_assert_msg(ena_dev != NULL, "Uninitialized device"); 1021 1022 if (mtu > ena_get_mtu_conf(adapter) || mtu < ENA_MIN_MTU) { 1023 RTE_LOG(ERR, PMD, 1024 "Invalid MTU setting. new_mtu: %d " 1025 "max mtu: %d min mtu: %d\n", 1026 mtu, ena_get_mtu_conf(adapter), ENA_MIN_MTU); 1027 return -EINVAL; 1028 } 1029 1030 rc = ena_com_set_dev_mtu(ena_dev, mtu); 1031 if (rc) 1032 RTE_LOG(ERR, PMD, "Could not set MTU: %d\n", mtu); 1033 else 1034 RTE_LOG(NOTICE, PMD, "Set MTU: %d\n", mtu); 1035 1036 return rc; 1037 } 1038 1039 static int ena_start(struct rte_eth_dev *dev) 1040 { 1041 struct ena_adapter *adapter = 1042 (struct ena_adapter *)(dev->data->dev_private); 1043 uint64_t ticks; 1044 int rc = 0; 1045 1046 rc = ena_check_valid_conf(adapter); 1047 if (rc) 1048 return rc; 1049 1050 rc = ena_queue_restart_all(dev, ENA_RING_TYPE_RX); 1051 if (rc) 1052 return rc; 1053 1054 rc = ena_queue_restart_all(dev, ENA_RING_TYPE_TX); 1055 if (rc) 1056 return rc; 1057 1058 if (adapter->rte_dev->data->dev_conf.rxmode.mq_mode & 1059 ETH_MQ_RX_RSS_FLAG && adapter->rte_dev->data->nb_rx_queues > 0) { 1060 rc = ena_rss_init_default(adapter); 1061 if (rc) 1062 return rc; 1063 } 1064 1065 ena_stats_restart(dev); 1066 1067 adapter->timestamp_wd = rte_get_timer_cycles(); 1068 adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT; 1069 1070 ticks = rte_get_timer_hz(); 1071 rte_timer_reset(&adapter->timer_wd, ticks, PERIODICAL, rte_lcore_id(), 1072 ena_timer_wd_callback, adapter); 1073 1074 adapter->state = ENA_ADAPTER_STATE_RUNNING; 1075 1076 return 0; 1077 } 1078 1079 static void ena_stop(struct rte_eth_dev *dev) 1080 { 1081 struct ena_adapter *adapter = 1082 (struct ena_adapter *)(dev->data->dev_private); 1083 1084 rte_timer_stop_sync(&adapter->timer_wd); 1085 ena_free_io_queues_all(adapter); 1086 1087 adapter->state = ENA_ADAPTER_STATE_STOPPED; 1088 } 1089 1090 static int ena_create_io_queue(struct ena_ring *ring) 1091 { 1092 struct ena_adapter *adapter; 1093 struct ena_com_dev *ena_dev; 1094 struct ena_com_create_io_ctx ctx = 1095 /* policy set to _HOST just to satisfy icc compiler */ 1096 { ENA_ADMIN_PLACEMENT_POLICY_HOST, 1097 0, 0, 0, 0, 0 }; 1098 uint16_t ena_qid; 1099 unsigned int i; 1100 int rc; 1101 1102 adapter = ring->adapter; 1103 ena_dev = &adapter->ena_dev; 1104 1105 if (ring->type == ENA_RING_TYPE_TX) { 1106 ena_qid = ENA_IO_TXQ_IDX(ring->id); 1107 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; 1108 ctx.mem_queue_type = ena_dev->tx_mem_queue_type; 1109 ctx.queue_size = adapter->tx_ring_size; 1110 for (i = 0; i < ring->ring_size; i++) 1111 ring->empty_tx_reqs[i] = i; 1112 } else { 1113 ena_qid = ENA_IO_RXQ_IDX(ring->id); 1114 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; 1115 ctx.queue_size = adapter->rx_ring_size; 1116 for (i = 0; i < ring->ring_size; i++) 1117 ring->empty_rx_reqs[i] = i; 1118 } 1119 ctx.qid = ena_qid; 1120 ctx.msix_vector = -1; /* interrupts not used */ 1121 ctx.numa_node = ena_cpu_to_node(ring->id); 1122 1123 rc = ena_com_create_io_queue(ena_dev, &ctx); 1124 if (rc) { 1125 RTE_LOG(ERR, PMD, 1126 "failed to create io queue #%d (qid:%d) rc: %d\n", 1127 ring->id, ena_qid, rc); 1128 return rc; 1129 } 1130 1131 rc = ena_com_get_io_handlers(ena_dev, ena_qid, 1132 &ring->ena_com_io_sq, 1133 &ring->ena_com_io_cq); 1134 if (rc) { 1135 RTE_LOG(ERR, PMD, 1136 "Failed to get io queue handlers. queue num %d rc: %d\n", 1137 ring->id, rc); 1138 ena_com_destroy_io_queue(ena_dev, ena_qid); 1139 return rc; 1140 } 1141 1142 if (ring->type == ENA_RING_TYPE_TX) 1143 ena_com_update_numa_node(ring->ena_com_io_cq, ctx.numa_node); 1144 1145 return 0; 1146 } 1147 1148 static void ena_free_io_queues_all(struct ena_adapter *adapter) 1149 { 1150 struct rte_eth_dev *eth_dev = adapter->rte_dev; 1151 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1152 int i; 1153 uint16_t ena_qid; 1154 uint16_t nb_rxq = eth_dev->data->nb_rx_queues; 1155 uint16_t nb_txq = eth_dev->data->nb_tx_queues; 1156 1157 for (i = 0; i < nb_txq; ++i) { 1158 ena_qid = ENA_IO_TXQ_IDX(i); 1159 ena_com_destroy_io_queue(ena_dev, ena_qid); 1160 1161 ena_tx_queue_release_bufs(&adapter->tx_ring[i]); 1162 } 1163 1164 for (i = 0; i < nb_rxq; ++i) { 1165 ena_qid = ENA_IO_RXQ_IDX(i); 1166 ena_com_destroy_io_queue(ena_dev, ena_qid); 1167 1168 ena_rx_queue_release_bufs(&adapter->rx_ring[i]); 1169 } 1170 } 1171 1172 static int ena_queue_restart(struct ena_ring *ring) 1173 { 1174 int rc, bufs_num; 1175 1176 ena_assert_msg(ring->configured == 1, 1177 "Trying to restart unconfigured queue\n"); 1178 1179 rc = ena_create_io_queue(ring); 1180 if (rc) { 1181 PMD_INIT_LOG(ERR, "Failed to create IO queue!\n"); 1182 return rc; 1183 } 1184 1185 ring->next_to_clean = 0; 1186 ring->next_to_use = 0; 1187 1188 if (ring->type == ENA_RING_TYPE_TX) 1189 return 0; 1190 1191 bufs_num = ring->ring_size - 1; 1192 rc = ena_populate_rx_queue(ring, bufs_num); 1193 if (rc != bufs_num) { 1194 PMD_INIT_LOG(ERR, "Failed to populate rx ring !"); 1195 return ENA_COM_FAULT; 1196 } 1197 1198 return 0; 1199 } 1200 1201 static int ena_tx_queue_setup(struct rte_eth_dev *dev, 1202 uint16_t queue_idx, 1203 uint16_t nb_desc, 1204 __rte_unused unsigned int socket_id, 1205 const struct rte_eth_txconf *tx_conf) 1206 { 1207 struct ena_ring *txq = NULL; 1208 struct ena_adapter *adapter = 1209 (struct ena_adapter *)(dev->data->dev_private); 1210 unsigned int i; 1211 1212 txq = &adapter->tx_ring[queue_idx]; 1213 1214 if (txq->configured) { 1215 RTE_LOG(CRIT, PMD, 1216 "API violation. Queue %d is already configured\n", 1217 queue_idx); 1218 return ENA_COM_FAULT; 1219 } 1220 1221 if (!rte_is_power_of_2(nb_desc)) { 1222 RTE_LOG(ERR, PMD, 1223 "Unsupported size of TX queue: %d is not a power of 2.", 1224 nb_desc); 1225 return -EINVAL; 1226 } 1227 1228 if (nb_desc > adapter->tx_ring_size) { 1229 RTE_LOG(ERR, PMD, 1230 "Unsupported size of TX queue (max size: %d)\n", 1231 adapter->tx_ring_size); 1232 return -EINVAL; 1233 } 1234 1235 txq->port_id = dev->data->port_id; 1236 txq->next_to_clean = 0; 1237 txq->next_to_use = 0; 1238 txq->ring_size = nb_desc; 1239 1240 txq->tx_buffer_info = rte_zmalloc("txq->tx_buffer_info", 1241 sizeof(struct ena_tx_buffer) * 1242 txq->ring_size, 1243 RTE_CACHE_LINE_SIZE); 1244 if (!txq->tx_buffer_info) { 1245 RTE_LOG(ERR, PMD, "failed to alloc mem for tx buffer info\n"); 1246 return -ENOMEM; 1247 } 1248 1249 txq->empty_tx_reqs = rte_zmalloc("txq->empty_tx_reqs", 1250 sizeof(u16) * txq->ring_size, 1251 RTE_CACHE_LINE_SIZE); 1252 if (!txq->empty_tx_reqs) { 1253 RTE_LOG(ERR, PMD, "failed to alloc mem for tx reqs\n"); 1254 rte_free(txq->tx_buffer_info); 1255 return -ENOMEM; 1256 } 1257 1258 for (i = 0; i < txq->ring_size; i++) 1259 txq->empty_tx_reqs[i] = i; 1260 1261 if (tx_conf != NULL) { 1262 txq->offloads = 1263 tx_conf->offloads | dev->data->dev_conf.txmode.offloads; 1264 } 1265 1266 /* Store pointer to this queue in upper layer */ 1267 txq->configured = 1; 1268 dev->data->tx_queues[queue_idx] = txq; 1269 1270 return 0; 1271 } 1272 1273 static int ena_rx_queue_setup(struct rte_eth_dev *dev, 1274 uint16_t queue_idx, 1275 uint16_t nb_desc, 1276 __rte_unused unsigned int socket_id, 1277 __rte_unused const struct rte_eth_rxconf *rx_conf, 1278 struct rte_mempool *mp) 1279 { 1280 struct ena_adapter *adapter = 1281 (struct ena_adapter *)(dev->data->dev_private); 1282 struct ena_ring *rxq = NULL; 1283 int i; 1284 1285 rxq = &adapter->rx_ring[queue_idx]; 1286 if (rxq->configured) { 1287 RTE_LOG(CRIT, PMD, 1288 "API violation. Queue %d is already configured\n", 1289 queue_idx); 1290 return ENA_COM_FAULT; 1291 } 1292 1293 if (!rte_is_power_of_2(nb_desc)) { 1294 RTE_LOG(ERR, PMD, 1295 "Unsupported size of RX queue: %d is not a power of 2.", 1296 nb_desc); 1297 return -EINVAL; 1298 } 1299 1300 if (nb_desc > adapter->rx_ring_size) { 1301 RTE_LOG(ERR, PMD, 1302 "Unsupported size of RX queue (max size: %d)\n", 1303 adapter->rx_ring_size); 1304 return -EINVAL; 1305 } 1306 1307 rxq->port_id = dev->data->port_id; 1308 rxq->next_to_clean = 0; 1309 rxq->next_to_use = 0; 1310 rxq->ring_size = nb_desc; 1311 rxq->mb_pool = mp; 1312 1313 rxq->rx_buffer_info = rte_zmalloc("rxq->buffer_info", 1314 sizeof(struct rte_mbuf *) * nb_desc, 1315 RTE_CACHE_LINE_SIZE); 1316 if (!rxq->rx_buffer_info) { 1317 RTE_LOG(ERR, PMD, "failed to alloc mem for rx buffer info\n"); 1318 return -ENOMEM; 1319 } 1320 1321 rxq->empty_rx_reqs = rte_zmalloc("rxq->empty_rx_reqs", 1322 sizeof(uint16_t) * nb_desc, 1323 RTE_CACHE_LINE_SIZE); 1324 if (!rxq->empty_rx_reqs) { 1325 RTE_LOG(ERR, PMD, "failed to alloc mem for empty rx reqs\n"); 1326 rte_free(rxq->rx_buffer_info); 1327 rxq->rx_buffer_info = NULL; 1328 return -ENOMEM; 1329 } 1330 1331 for (i = 0; i < nb_desc; i++) 1332 rxq->empty_tx_reqs[i] = i; 1333 1334 /* Store pointer to this queue in upper layer */ 1335 rxq->configured = 1; 1336 dev->data->rx_queues[queue_idx] = rxq; 1337 1338 return 0; 1339 } 1340 1341 static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) 1342 { 1343 unsigned int i; 1344 int rc; 1345 uint16_t ring_size = rxq->ring_size; 1346 uint16_t ring_mask = ring_size - 1; 1347 uint16_t next_to_use = rxq->next_to_use; 1348 uint16_t in_use, req_id; 1349 struct rte_mbuf **mbufs = &rxq->rx_buffer_info[0]; 1350 1351 if (unlikely(!count)) 1352 return 0; 1353 1354 in_use = rxq->next_to_use - rxq->next_to_clean; 1355 ena_assert_msg(((in_use + count) < ring_size), "bad ring state"); 1356 1357 count = RTE_MIN(count, 1358 (uint16_t)(ring_size - (next_to_use & ring_mask))); 1359 1360 /* get resources for incoming packets */ 1361 rc = rte_mempool_get_bulk(rxq->mb_pool, 1362 (void **)(&mbufs[next_to_use & ring_mask]), 1363 count); 1364 if (unlikely(rc < 0)) { 1365 rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf); 1366 PMD_RX_LOG(DEBUG, "there are no enough free buffers"); 1367 return 0; 1368 } 1369 1370 for (i = 0; i < count; i++) { 1371 uint16_t next_to_use_masked = next_to_use & ring_mask; 1372 struct rte_mbuf *mbuf = mbufs[next_to_use_masked]; 1373 struct ena_com_buf ebuf; 1374 1375 rte_prefetch0(mbufs[((next_to_use + 4) & ring_mask)]); 1376 1377 req_id = rxq->empty_rx_reqs[next_to_use_masked]; 1378 rc = validate_rx_req_id(rxq, req_id); 1379 if (unlikely(rc < 0)) 1380 break; 1381 1382 /* prepare physical address for DMA transaction */ 1383 ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM; 1384 ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM; 1385 /* pass resource to device */ 1386 rc = ena_com_add_single_rx_desc(rxq->ena_com_io_sq, 1387 &ebuf, req_id); 1388 if (unlikely(rc)) { 1389 rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbuf), 1390 count - i); 1391 RTE_LOG(WARNING, PMD, "failed adding rx desc\n"); 1392 break; 1393 } 1394 next_to_use++; 1395 } 1396 1397 if (unlikely(i < count)) 1398 RTE_LOG(WARNING, PMD, "refilled rx qid %d with only %d " 1399 "buffers (from %d)\n", rxq->id, i, count); 1400 1401 /* When we submitted free recources to device... */ 1402 if (likely(i > 0)) { 1403 /* ...let HW know that it can fill buffers with data 1404 * 1405 * Add memory barrier to make sure the desc were written before 1406 * issue a doorbell 1407 */ 1408 rte_wmb(); 1409 ena_com_write_sq_doorbell(rxq->ena_com_io_sq); 1410 1411 rxq->next_to_use = next_to_use; 1412 } 1413 1414 return i; 1415 } 1416 1417 static int ena_device_init(struct ena_com_dev *ena_dev, 1418 struct ena_com_dev_get_features_ctx *get_feat_ctx, 1419 bool *wd_state) 1420 { 1421 uint32_t aenq_groups; 1422 int rc; 1423 bool readless_supported; 1424 1425 /* Initialize mmio registers */ 1426 rc = ena_com_mmio_reg_read_request_init(ena_dev); 1427 if (rc) { 1428 RTE_LOG(ERR, PMD, "failed to init mmio read less\n"); 1429 return rc; 1430 } 1431 1432 /* The PCIe configuration space revision id indicate if mmio reg 1433 * read is disabled. 1434 */ 1435 readless_supported = 1436 !(((struct rte_pci_device *)ena_dev->dmadev)->id.class_id 1437 & ENA_MMIO_DISABLE_REG_READ); 1438 ena_com_set_mmio_read_mode(ena_dev, readless_supported); 1439 1440 /* reset device */ 1441 rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL); 1442 if (rc) { 1443 RTE_LOG(ERR, PMD, "cannot reset device\n"); 1444 goto err_mmio_read_less; 1445 } 1446 1447 /* check FW version */ 1448 rc = ena_com_validate_version(ena_dev); 1449 if (rc) { 1450 RTE_LOG(ERR, PMD, "device version is too low\n"); 1451 goto err_mmio_read_less; 1452 } 1453 1454 ena_dev->dma_addr_bits = ena_com_get_dma_width(ena_dev); 1455 1456 /* ENA device administration layer init */ 1457 rc = ena_com_admin_init(ena_dev, &aenq_handlers, true); 1458 if (rc) { 1459 RTE_LOG(ERR, PMD, 1460 "cannot initialize ena admin queue with device\n"); 1461 goto err_mmio_read_less; 1462 } 1463 1464 /* To enable the msix interrupts the driver needs to know the number 1465 * of queues. So the driver uses polling mode to retrieve this 1466 * information. 1467 */ 1468 ena_com_set_admin_polling_mode(ena_dev, true); 1469 1470 ena_config_host_info(ena_dev); 1471 1472 /* Get Device Attributes and features */ 1473 rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); 1474 if (rc) { 1475 RTE_LOG(ERR, PMD, 1476 "cannot get attribute for ena device rc= %d\n", rc); 1477 goto err_admin_init; 1478 } 1479 1480 aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | 1481 BIT(ENA_ADMIN_NOTIFICATION) | 1482 BIT(ENA_ADMIN_KEEP_ALIVE) | 1483 BIT(ENA_ADMIN_FATAL_ERROR) | 1484 BIT(ENA_ADMIN_WARNING); 1485 1486 aenq_groups &= get_feat_ctx->aenq.supported_groups; 1487 rc = ena_com_set_aenq_config(ena_dev, aenq_groups); 1488 if (rc) { 1489 RTE_LOG(ERR, PMD, "Cannot configure aenq groups rc: %d\n", rc); 1490 goto err_admin_init; 1491 } 1492 1493 *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); 1494 1495 return 0; 1496 1497 err_admin_init: 1498 ena_com_admin_destroy(ena_dev); 1499 1500 err_mmio_read_less: 1501 ena_com_mmio_reg_read_request_destroy(ena_dev); 1502 1503 return rc; 1504 } 1505 1506 static void ena_interrupt_handler_rte(void *cb_arg) 1507 { 1508 struct ena_adapter *adapter = (struct ena_adapter *)cb_arg; 1509 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1510 1511 ena_com_admin_q_comp_intr_handler(ena_dev); 1512 if (likely(adapter->state != ENA_ADAPTER_STATE_CLOSED)) 1513 ena_com_aenq_intr_handler(ena_dev, adapter); 1514 } 1515 1516 static void check_for_missing_keep_alive(struct ena_adapter *adapter) 1517 { 1518 if (!adapter->wd_state) 1519 return; 1520 1521 if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) 1522 return; 1523 1524 if (unlikely((rte_get_timer_cycles() - adapter->timestamp_wd) >= 1525 adapter->keep_alive_timeout)) { 1526 RTE_LOG(ERR, PMD, "Keep alive timeout\n"); 1527 adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO; 1528 adapter->trigger_reset = true; 1529 } 1530 } 1531 1532 /* Check if admin queue is enabled */ 1533 static void check_for_admin_com_state(struct ena_adapter *adapter) 1534 { 1535 if (unlikely(!ena_com_get_admin_running_state(&adapter->ena_dev))) { 1536 RTE_LOG(ERR, PMD, "ENA admin queue is not in running state!\n"); 1537 adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO; 1538 adapter->trigger_reset = true; 1539 } 1540 } 1541 1542 static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer, 1543 void *arg) 1544 { 1545 struct ena_adapter *adapter = (struct ena_adapter *)arg; 1546 struct rte_eth_dev *dev = adapter->rte_dev; 1547 1548 check_for_missing_keep_alive(adapter); 1549 check_for_admin_com_state(adapter); 1550 1551 if (unlikely(adapter->trigger_reset)) { 1552 RTE_LOG(ERR, PMD, "Trigger reset is on\n"); 1553 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, 1554 NULL); 1555 } 1556 } 1557 1558 static int ena_calc_io_queue_num(__rte_unused struct ena_com_dev *ena_dev, 1559 struct ena_com_dev_get_features_ctx *get_feat_ctx) 1560 { 1561 int io_sq_num, io_cq_num, io_queue_num; 1562 1563 io_sq_num = get_feat_ctx->max_queues.max_sq_num; 1564 io_cq_num = get_feat_ctx->max_queues.max_cq_num; 1565 1566 io_queue_num = RTE_MIN(io_sq_num, io_cq_num); 1567 1568 if (unlikely(io_queue_num == 0)) { 1569 RTE_LOG(ERR, PMD, "Number of IO queues should not be 0\n"); 1570 return -EFAULT; 1571 } 1572 1573 return io_queue_num; 1574 } 1575 1576 static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) 1577 { 1578 struct rte_pci_device *pci_dev; 1579 struct rte_intr_handle *intr_handle; 1580 struct ena_adapter *adapter = 1581 (struct ena_adapter *)(eth_dev->data->dev_private); 1582 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1583 struct ena_com_dev_get_features_ctx get_feat_ctx; 1584 int queue_size, rc; 1585 u16 tx_sgl_size = 0; 1586 1587 static int adapters_found; 1588 bool wd_state; 1589 1590 memset(adapter, 0, sizeof(struct ena_adapter)); 1591 ena_dev = &adapter->ena_dev; 1592 1593 eth_dev->dev_ops = &ena_dev_ops; 1594 eth_dev->rx_pkt_burst = ð_ena_recv_pkts; 1595 eth_dev->tx_pkt_burst = ð_ena_xmit_pkts; 1596 eth_dev->tx_pkt_prepare = ð_ena_prep_pkts; 1597 adapter->rte_eth_dev_data = eth_dev->data; 1598 adapter->rte_dev = eth_dev; 1599 1600 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1601 return 0; 1602 1603 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1604 adapter->pdev = pci_dev; 1605 1606 PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d", 1607 pci_dev->addr.domain, 1608 pci_dev->addr.bus, 1609 pci_dev->addr.devid, 1610 pci_dev->addr.function); 1611 1612 intr_handle = &pci_dev->intr_handle; 1613 1614 adapter->regs = pci_dev->mem_resource[ENA_REGS_BAR].addr; 1615 adapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr; 1616 1617 if (!adapter->regs) { 1618 PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)", 1619 ENA_REGS_BAR); 1620 return -ENXIO; 1621 } 1622 1623 ena_dev->reg_bar = adapter->regs; 1624 ena_dev->dmadev = adapter->pdev; 1625 1626 adapter->id_number = adapters_found; 1627 1628 snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", 1629 adapter->id_number); 1630 1631 /* device specific initialization routine */ 1632 rc = ena_device_init(ena_dev, &get_feat_ctx, &wd_state); 1633 if (rc) { 1634 PMD_INIT_LOG(CRIT, "Failed to init ENA device"); 1635 goto err; 1636 } 1637 adapter->wd_state = wd_state; 1638 1639 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 1640 adapter->num_queues = ena_calc_io_queue_num(ena_dev, 1641 &get_feat_ctx); 1642 1643 queue_size = ena_calc_queue_size(ena_dev, &tx_sgl_size, &get_feat_ctx); 1644 if (queue_size <= 0 || adapter->num_queues <= 0) { 1645 rc = -EFAULT; 1646 goto err_device_destroy; 1647 } 1648 1649 adapter->tx_ring_size = queue_size; 1650 adapter->rx_ring_size = queue_size; 1651 1652 adapter->max_tx_sgl_size = tx_sgl_size; 1653 1654 /* prepare ring structures */ 1655 ena_init_rings(adapter); 1656 1657 ena_config_debug_area(adapter); 1658 1659 /* Set max MTU for this device */ 1660 adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu; 1661 1662 /* set device support for TSO */ 1663 adapter->tso4_supported = get_feat_ctx.offload.tx & 1664 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK; 1665 1666 /* Copy MAC address and point DPDK to it */ 1667 eth_dev->data->mac_addrs = (struct ether_addr *)adapter->mac_addr; 1668 ether_addr_copy((struct ether_addr *)get_feat_ctx.dev_attr.mac_addr, 1669 (struct ether_addr *)adapter->mac_addr); 1670 1671 /* 1672 * Pass the information to the rte_eth_dev_close() that it should also 1673 * release the private port resources. 1674 */ 1675 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; 1676 1677 adapter->drv_stats = rte_zmalloc("adapter stats", 1678 sizeof(*adapter->drv_stats), 1679 RTE_CACHE_LINE_SIZE); 1680 if (!adapter->drv_stats) { 1681 RTE_LOG(ERR, PMD, "failed to alloc mem for adapter stats\n"); 1682 rc = -ENOMEM; 1683 goto err_delete_debug_area; 1684 } 1685 1686 rte_intr_callback_register(intr_handle, 1687 ena_interrupt_handler_rte, 1688 adapter); 1689 rte_intr_enable(intr_handle); 1690 ena_com_set_admin_polling_mode(ena_dev, false); 1691 ena_com_admin_aenq_enable(ena_dev); 1692 1693 if (adapters_found == 0) 1694 rte_timer_subsystem_init(); 1695 rte_timer_init(&adapter->timer_wd); 1696 1697 adapters_found++; 1698 adapter->state = ENA_ADAPTER_STATE_INIT; 1699 1700 return 0; 1701 1702 err_delete_debug_area: 1703 ena_com_delete_debug_area(ena_dev); 1704 1705 err_device_destroy: 1706 ena_com_delete_host_info(ena_dev); 1707 ena_com_admin_destroy(ena_dev); 1708 1709 err: 1710 return rc; 1711 } 1712 1713 static int eth_ena_dev_uninit(struct rte_eth_dev *eth_dev) 1714 { 1715 struct ena_adapter *adapter = 1716 (struct ena_adapter *)(eth_dev->data->dev_private); 1717 1718 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1719 return 0; 1720 1721 if (adapter->state != ENA_ADAPTER_STATE_CLOSED) 1722 ena_close(eth_dev); 1723 1724 eth_dev->dev_ops = NULL; 1725 eth_dev->rx_pkt_burst = NULL; 1726 eth_dev->tx_pkt_burst = NULL; 1727 eth_dev->tx_pkt_prepare = NULL; 1728 1729 adapter->state = ENA_ADAPTER_STATE_FREE; 1730 1731 return 0; 1732 } 1733 1734 static int ena_dev_configure(struct rte_eth_dev *dev) 1735 { 1736 struct ena_adapter *adapter = 1737 (struct ena_adapter *)(dev->data->dev_private); 1738 1739 adapter->state = ENA_ADAPTER_STATE_CONFIG; 1740 1741 adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads; 1742 adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads; 1743 return 0; 1744 } 1745 1746 static void ena_init_rings(struct ena_adapter *adapter) 1747 { 1748 int i; 1749 1750 for (i = 0; i < adapter->num_queues; i++) { 1751 struct ena_ring *ring = &adapter->tx_ring[i]; 1752 1753 ring->configured = 0; 1754 ring->type = ENA_RING_TYPE_TX; 1755 ring->adapter = adapter; 1756 ring->id = i; 1757 ring->tx_mem_queue_type = adapter->ena_dev.tx_mem_queue_type; 1758 ring->tx_max_header_size = adapter->ena_dev.tx_max_header_size; 1759 ring->sgl_size = adapter->max_tx_sgl_size; 1760 } 1761 1762 for (i = 0; i < adapter->num_queues; i++) { 1763 struct ena_ring *ring = &adapter->rx_ring[i]; 1764 1765 ring->configured = 0; 1766 ring->type = ENA_RING_TYPE_RX; 1767 ring->adapter = adapter; 1768 ring->id = i; 1769 } 1770 } 1771 1772 static void ena_infos_get(struct rte_eth_dev *dev, 1773 struct rte_eth_dev_info *dev_info) 1774 { 1775 struct ena_adapter *adapter; 1776 struct ena_com_dev *ena_dev; 1777 struct ena_com_dev_get_features_ctx feat; 1778 uint64_t rx_feat = 0, tx_feat = 0; 1779 int rc = 0; 1780 1781 ena_assert_msg(dev->data != NULL, "Uninitialized device"); 1782 ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device"); 1783 adapter = (struct ena_adapter *)(dev->data->dev_private); 1784 1785 ena_dev = &adapter->ena_dev; 1786 ena_assert_msg(ena_dev != NULL, "Uninitialized device"); 1787 1788 dev_info->speed_capa = 1789 ETH_LINK_SPEED_1G | 1790 ETH_LINK_SPEED_2_5G | 1791 ETH_LINK_SPEED_5G | 1792 ETH_LINK_SPEED_10G | 1793 ETH_LINK_SPEED_25G | 1794 ETH_LINK_SPEED_40G | 1795 ETH_LINK_SPEED_50G | 1796 ETH_LINK_SPEED_100G; 1797 1798 /* Get supported features from HW */ 1799 rc = ena_com_get_dev_attr_feat(ena_dev, &feat); 1800 if (unlikely(rc)) { 1801 RTE_LOG(ERR, PMD, 1802 "Cannot get attribute for ena device rc= %d\n", rc); 1803 return; 1804 } 1805 1806 /* Set Tx & Rx features available for device */ 1807 if (feat.offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) 1808 tx_feat |= DEV_TX_OFFLOAD_TCP_TSO; 1809 1810 if (feat.offload.tx & 1811 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) 1812 tx_feat |= DEV_TX_OFFLOAD_IPV4_CKSUM | 1813 DEV_TX_OFFLOAD_UDP_CKSUM | 1814 DEV_TX_OFFLOAD_TCP_CKSUM; 1815 1816 if (feat.offload.rx_supported & 1817 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) 1818 rx_feat |= DEV_RX_OFFLOAD_IPV4_CKSUM | 1819 DEV_RX_OFFLOAD_UDP_CKSUM | 1820 DEV_RX_OFFLOAD_TCP_CKSUM; 1821 1822 rx_feat |= DEV_RX_OFFLOAD_JUMBO_FRAME; 1823 1824 /* Inform framework about available features */ 1825 dev_info->rx_offload_capa = rx_feat; 1826 dev_info->rx_queue_offload_capa = rx_feat; 1827 dev_info->tx_offload_capa = tx_feat; 1828 dev_info->tx_queue_offload_capa = tx_feat; 1829 1830 dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN; 1831 dev_info->max_rx_pktlen = adapter->max_mtu; 1832 dev_info->max_mac_addrs = 1; 1833 1834 dev_info->max_rx_queues = adapter->num_queues; 1835 dev_info->max_tx_queues = adapter->num_queues; 1836 dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE; 1837 1838 adapter->tx_supported_offloads = tx_feat; 1839 adapter->rx_supported_offloads = rx_feat; 1840 1841 dev_info->rx_desc_lim.nb_max = ENA_MAX_RING_DESC; 1842 dev_info->rx_desc_lim.nb_min = ENA_MIN_RING_DESC; 1843 1844 dev_info->tx_desc_lim.nb_max = ENA_MAX_RING_DESC; 1845 dev_info->tx_desc_lim.nb_min = ENA_MIN_RING_DESC; 1846 dev_info->tx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 1847 feat.max_queues.max_packet_tx_descs); 1848 dev_info->tx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 1849 feat.max_queues.max_packet_tx_descs); 1850 } 1851 1852 static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 1853 uint16_t nb_pkts) 1854 { 1855 struct ena_ring *rx_ring = (struct ena_ring *)(rx_queue); 1856 unsigned int ring_size = rx_ring->ring_size; 1857 unsigned int ring_mask = ring_size - 1; 1858 uint16_t next_to_clean = rx_ring->next_to_clean; 1859 uint16_t desc_in_use = 0; 1860 uint16_t req_id; 1861 unsigned int recv_idx = 0; 1862 struct rte_mbuf *mbuf = NULL; 1863 struct rte_mbuf *mbuf_head = NULL; 1864 struct rte_mbuf *mbuf_prev = NULL; 1865 struct rte_mbuf **rx_buff_info = rx_ring->rx_buffer_info; 1866 unsigned int completed; 1867 1868 struct ena_com_rx_ctx ena_rx_ctx; 1869 int rc = 0; 1870 1871 /* Check adapter state */ 1872 if (unlikely(rx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { 1873 RTE_LOG(ALERT, PMD, 1874 "Trying to receive pkts while device is NOT running\n"); 1875 return 0; 1876 } 1877 1878 desc_in_use = rx_ring->next_to_use - next_to_clean; 1879 if (unlikely(nb_pkts > desc_in_use)) 1880 nb_pkts = desc_in_use; 1881 1882 for (completed = 0; completed < nb_pkts; completed++) { 1883 int segments = 0; 1884 1885 ena_rx_ctx.max_bufs = rx_ring->ring_size; 1886 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; 1887 ena_rx_ctx.descs = 0; 1888 /* receive packet context */ 1889 rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq, 1890 rx_ring->ena_com_io_sq, 1891 &ena_rx_ctx); 1892 if (unlikely(rc)) { 1893 RTE_LOG(ERR, PMD, "ena_com_rx_pkt error %d\n", rc); 1894 rx_ring->adapter->trigger_reset = true; 1895 return 0; 1896 } 1897 1898 if (unlikely(ena_rx_ctx.descs == 0)) 1899 break; 1900 1901 while (segments < ena_rx_ctx.descs) { 1902 req_id = ena_rx_ctx.ena_bufs[segments].req_id; 1903 rc = validate_rx_req_id(rx_ring, req_id); 1904 if (unlikely(rc)) 1905 break; 1906 1907 mbuf = rx_buff_info[req_id]; 1908 mbuf->data_len = ena_rx_ctx.ena_bufs[segments].len; 1909 mbuf->data_off = RTE_PKTMBUF_HEADROOM; 1910 mbuf->refcnt = 1; 1911 mbuf->next = NULL; 1912 if (unlikely(segments == 0)) { 1913 mbuf->nb_segs = ena_rx_ctx.descs; 1914 mbuf->port = rx_ring->port_id; 1915 mbuf->pkt_len = 0; 1916 mbuf_head = mbuf; 1917 } else { 1918 /* for multi-segment pkts create mbuf chain */ 1919 mbuf_prev->next = mbuf; 1920 } 1921 mbuf_head->pkt_len += mbuf->data_len; 1922 1923 mbuf_prev = mbuf; 1924 rx_ring->empty_rx_reqs[next_to_clean & ring_mask] = 1925 req_id; 1926 segments++; 1927 next_to_clean++; 1928 } 1929 1930 /* fill mbuf attributes if any */ 1931 ena_rx_mbuf_prepare(mbuf_head, &ena_rx_ctx); 1932 mbuf_head->hash.rss = ena_rx_ctx.hash; 1933 1934 /* pass to DPDK application head mbuf */ 1935 rx_pkts[recv_idx] = mbuf_head; 1936 recv_idx++; 1937 } 1938 1939 rx_ring->next_to_clean = next_to_clean; 1940 1941 desc_in_use = desc_in_use - completed + 1; 1942 /* Burst refill to save doorbells, memory barriers, const interval */ 1943 if (ring_size - desc_in_use > ENA_RING_DESCS_RATIO(ring_size)) 1944 ena_populate_rx_queue(rx_ring, ring_size - desc_in_use); 1945 1946 return recv_idx; 1947 } 1948 1949 static uint16_t 1950 eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 1951 uint16_t nb_pkts) 1952 { 1953 int32_t ret; 1954 uint32_t i; 1955 struct rte_mbuf *m; 1956 struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); 1957 struct ipv4_hdr *ip_hdr; 1958 uint64_t ol_flags; 1959 uint16_t frag_field; 1960 1961 for (i = 0; i != nb_pkts; i++) { 1962 m = tx_pkts[i]; 1963 ol_flags = m->ol_flags; 1964 1965 if (!(ol_flags & PKT_TX_IPV4)) 1966 continue; 1967 1968 /* If there was not L2 header length specified, assume it is 1969 * length of the ethernet header. 1970 */ 1971 if (unlikely(m->l2_len == 0)) 1972 m->l2_len = sizeof(struct ether_hdr); 1973 1974 ip_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, 1975 m->l2_len); 1976 frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset); 1977 1978 if ((frag_field & IPV4_HDR_DF_FLAG) != 0) { 1979 m->packet_type |= RTE_PTYPE_L4_NONFRAG; 1980 1981 /* If IPv4 header has DF flag enabled and TSO support is 1982 * disabled, partial chcecksum should not be calculated. 1983 */ 1984 if (!tx_ring->adapter->tso4_supported) 1985 continue; 1986 } 1987 1988 if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) != 0 || 1989 (ol_flags & PKT_TX_L4_MASK) == 1990 PKT_TX_SCTP_CKSUM) { 1991 rte_errno = -ENOTSUP; 1992 return i; 1993 } 1994 1995 #ifdef RTE_LIBRTE_ETHDEV_DEBUG 1996 ret = rte_validate_tx_offload(m); 1997 if (ret != 0) { 1998 rte_errno = ret; 1999 return i; 2000 } 2001 #endif 2002 2003 /* In case we are supposed to TSO and have DF not set (DF=0) 2004 * hardware must be provided with partial checksum, otherwise 2005 * it will take care of necessary calculations. 2006 */ 2007 2008 ret = rte_net_intel_cksum_flags_prepare(m, 2009 ol_flags & ~PKT_TX_TCP_SEG); 2010 if (ret != 0) { 2011 rte_errno = ret; 2012 return i; 2013 } 2014 } 2015 2016 return i; 2017 } 2018 2019 static void ena_update_hints(struct ena_adapter *adapter, 2020 struct ena_admin_ena_hw_hints *hints) 2021 { 2022 if (hints->admin_completion_tx_timeout) 2023 adapter->ena_dev.admin_queue.completion_timeout = 2024 hints->admin_completion_tx_timeout * 1000; 2025 2026 if (hints->mmio_read_timeout) 2027 /* convert to usec */ 2028 adapter->ena_dev.mmio_read.reg_read_to = 2029 hints->mmio_read_timeout * 1000; 2030 2031 if (hints->driver_watchdog_timeout) { 2032 if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT) 2033 adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT; 2034 else 2035 // Convert msecs to ticks 2036 adapter->keep_alive_timeout = 2037 (hints->driver_watchdog_timeout * 2038 rte_get_timer_hz()) / 1000; 2039 } 2040 } 2041 2042 static int ena_check_and_linearize_mbuf(struct ena_ring *tx_ring, 2043 struct rte_mbuf *mbuf) 2044 { 2045 int num_segments, rc; 2046 2047 num_segments = mbuf->nb_segs; 2048 2049 if (likely(num_segments < tx_ring->sgl_size)) 2050 return 0; 2051 2052 rc = rte_pktmbuf_linearize(mbuf); 2053 if (unlikely(rc)) 2054 RTE_LOG(WARNING, PMD, "Mbuf linearize failed\n"); 2055 2056 return rc; 2057 } 2058 2059 static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 2060 uint16_t nb_pkts) 2061 { 2062 struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); 2063 uint16_t next_to_use = tx_ring->next_to_use; 2064 uint16_t next_to_clean = tx_ring->next_to_clean; 2065 struct rte_mbuf *mbuf; 2066 unsigned int ring_size = tx_ring->ring_size; 2067 unsigned int ring_mask = ring_size - 1; 2068 struct ena_com_tx_ctx ena_tx_ctx; 2069 struct ena_tx_buffer *tx_info; 2070 struct ena_com_buf *ebuf; 2071 uint16_t rc, req_id, total_tx_descs = 0; 2072 uint16_t sent_idx = 0, empty_tx_reqs; 2073 int nb_hw_desc; 2074 2075 /* Check adapter state */ 2076 if (unlikely(tx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { 2077 RTE_LOG(ALERT, PMD, 2078 "Trying to xmit pkts while device is NOT running\n"); 2079 return 0; 2080 } 2081 2082 empty_tx_reqs = ring_size - (next_to_use - next_to_clean); 2083 if (nb_pkts > empty_tx_reqs) 2084 nb_pkts = empty_tx_reqs; 2085 2086 for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) { 2087 mbuf = tx_pkts[sent_idx]; 2088 2089 rc = ena_check_and_linearize_mbuf(tx_ring, mbuf); 2090 if (unlikely(rc)) 2091 break; 2092 2093 req_id = tx_ring->empty_tx_reqs[next_to_use & ring_mask]; 2094 tx_info = &tx_ring->tx_buffer_info[req_id]; 2095 tx_info->mbuf = mbuf; 2096 tx_info->num_of_bufs = 0; 2097 ebuf = tx_info->bufs; 2098 2099 /* Prepare TX context */ 2100 memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx)); 2101 memset(&ena_tx_ctx.ena_meta, 0x0, 2102 sizeof(struct ena_com_tx_meta)); 2103 ena_tx_ctx.ena_bufs = ebuf; 2104 ena_tx_ctx.req_id = req_id; 2105 if (tx_ring->tx_mem_queue_type == 2106 ENA_ADMIN_PLACEMENT_POLICY_DEV) { 2107 /* prepare the push buffer with 2108 * virtual address of the data 2109 */ 2110 ena_tx_ctx.header_len = 2111 RTE_MIN(mbuf->data_len, 2112 tx_ring->tx_max_header_size); 2113 ena_tx_ctx.push_header = 2114 (void *)((char *)mbuf->buf_addr + 2115 mbuf->data_off); 2116 } /* there's no else as we take advantage of memset zeroing */ 2117 2118 /* Set TX offloads flags, if applicable */ 2119 ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx, tx_ring->offloads); 2120 2121 if (unlikely(mbuf->ol_flags & 2122 (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD))) 2123 rte_atomic64_inc(&tx_ring->adapter->drv_stats->ierrors); 2124 2125 rte_prefetch0(tx_pkts[(sent_idx + 4) & ring_mask]); 2126 2127 /* Process first segment taking into 2128 * consideration pushed header 2129 */ 2130 if (mbuf->data_len > ena_tx_ctx.header_len) { 2131 ebuf->paddr = mbuf->buf_iova + 2132 mbuf->data_off + 2133 ena_tx_ctx.header_len; 2134 ebuf->len = mbuf->data_len - ena_tx_ctx.header_len; 2135 ebuf++; 2136 tx_info->num_of_bufs++; 2137 } 2138 2139 while ((mbuf = mbuf->next) != NULL) { 2140 ebuf->paddr = mbuf->buf_iova + mbuf->data_off; 2141 ebuf->len = mbuf->data_len; 2142 ebuf++; 2143 tx_info->num_of_bufs++; 2144 } 2145 2146 ena_tx_ctx.num_bufs = tx_info->num_of_bufs; 2147 2148 /* Write data to device */ 2149 rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, 2150 &ena_tx_ctx, &nb_hw_desc); 2151 if (unlikely(rc)) 2152 break; 2153 2154 tx_info->tx_descs = nb_hw_desc; 2155 2156 next_to_use++; 2157 } 2158 2159 /* If there are ready packets to be xmitted... */ 2160 if (sent_idx > 0) { 2161 /* ...let HW do its best :-) */ 2162 rte_wmb(); 2163 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); 2164 2165 tx_ring->next_to_use = next_to_use; 2166 } 2167 2168 /* Clear complete packets */ 2169 while (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) >= 0) { 2170 rc = validate_tx_req_id(tx_ring, req_id); 2171 if (rc) 2172 break; 2173 2174 /* Get Tx info & store how many descs were processed */ 2175 tx_info = &tx_ring->tx_buffer_info[req_id]; 2176 total_tx_descs += tx_info->tx_descs; 2177 2178 /* Free whole mbuf chain */ 2179 mbuf = tx_info->mbuf; 2180 rte_pktmbuf_free(mbuf); 2181 tx_info->mbuf = NULL; 2182 2183 /* Put back descriptor to the ring for reuse */ 2184 tx_ring->empty_tx_reqs[next_to_clean & ring_mask] = req_id; 2185 next_to_clean++; 2186 2187 /* If too many descs to clean, leave it for another run */ 2188 if (unlikely(total_tx_descs > ENA_RING_DESCS_RATIO(ring_size))) 2189 break; 2190 } 2191 2192 if (total_tx_descs > 0) { 2193 /* acknowledge completion of sent packets */ 2194 ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs); 2195 tx_ring->next_to_clean = next_to_clean; 2196 } 2197 2198 return sent_idx; 2199 } 2200 2201 /********************************************************************* 2202 * PMD configuration 2203 *********************************************************************/ 2204 static int eth_ena_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2205 struct rte_pci_device *pci_dev) 2206 { 2207 return rte_eth_dev_pci_generic_probe(pci_dev, 2208 sizeof(struct ena_adapter), eth_ena_dev_init); 2209 } 2210 2211 static int eth_ena_pci_remove(struct rte_pci_device *pci_dev) 2212 { 2213 return rte_eth_dev_pci_generic_remove(pci_dev, eth_ena_dev_uninit); 2214 } 2215 2216 static struct rte_pci_driver rte_ena_pmd = { 2217 .id_table = pci_id_ena_map, 2218 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | 2219 RTE_PCI_DRV_WC_ACTIVATE, 2220 .probe = eth_ena_pci_probe, 2221 .remove = eth_ena_pci_remove, 2222 }; 2223 2224 RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd); 2225 RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map); 2226 RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci"); 2227 2228 RTE_INIT(ena_init_log) 2229 { 2230 ena_logtype_init = rte_log_register("pmd.net.ena.init"); 2231 if (ena_logtype_init >= 0) 2232 rte_log_set_level(ena_logtype_init, RTE_LOG_NOTICE); 2233 ena_logtype_driver = rte_log_register("pmd.net.ena.driver"); 2234 if (ena_logtype_driver >= 0) 2235 rte_log_set_level(ena_logtype_driver, RTE_LOG_NOTICE); 2236 } 2237 2238 /****************************************************************************** 2239 ******************************** AENQ Handlers ******************************* 2240 *****************************************************************************/ 2241 static void ena_update_on_link_change(void *adapter_data, 2242 struct ena_admin_aenq_entry *aenq_e) 2243 { 2244 struct rte_eth_dev *eth_dev; 2245 struct ena_adapter *adapter; 2246 struct ena_admin_aenq_link_change_desc *aenq_link_desc; 2247 uint32_t status; 2248 2249 adapter = (struct ena_adapter *)adapter_data; 2250 aenq_link_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e; 2251 eth_dev = adapter->rte_dev; 2252 2253 status = get_ena_admin_aenq_link_change_desc_link_status(aenq_link_desc); 2254 adapter->link_status = status; 2255 2256 ena_link_update(eth_dev, 0); 2257 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL); 2258 } 2259 2260 static void ena_notification(void *data, 2261 struct ena_admin_aenq_entry *aenq_e) 2262 { 2263 struct ena_adapter *adapter = (struct ena_adapter *)data; 2264 struct ena_admin_ena_hw_hints *hints; 2265 2266 if (aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION) 2267 RTE_LOG(WARNING, PMD, "Invalid group(%x) expected %x\n", 2268 aenq_e->aenq_common_desc.group, 2269 ENA_ADMIN_NOTIFICATION); 2270 2271 switch (aenq_e->aenq_common_desc.syndrom) { 2272 case ENA_ADMIN_UPDATE_HINTS: 2273 hints = (struct ena_admin_ena_hw_hints *) 2274 (&aenq_e->inline_data_w4); 2275 ena_update_hints(adapter, hints); 2276 break; 2277 default: 2278 RTE_LOG(ERR, PMD, "Invalid aenq notification link state %d\n", 2279 aenq_e->aenq_common_desc.syndrom); 2280 } 2281 } 2282 2283 static void ena_keep_alive(void *adapter_data, 2284 __rte_unused struct ena_admin_aenq_entry *aenq_e) 2285 { 2286 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 2287 2288 adapter->timestamp_wd = rte_get_timer_cycles(); 2289 } 2290 2291 /** 2292 * This handler will called for unknown event group or unimplemented handlers 2293 **/ 2294 static void unimplemented_aenq_handler(__rte_unused void *data, 2295 __rte_unused struct ena_admin_aenq_entry *aenq_e) 2296 { 2297 RTE_LOG(ERR, PMD, "Unknown event was received or event with " 2298 "unimplemented handler\n"); 2299 } 2300 2301 static struct ena_aenq_handlers aenq_handlers = { 2302 .handlers = { 2303 [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change, 2304 [ENA_ADMIN_NOTIFICATION] = ena_notification, 2305 [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive 2306 }, 2307 .unimplemented_handler = unimplemented_aenq_handler 2308 }; 2309