1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates. 3 * All rights reserved. 4 */ 5 6 #include <rte_string_fns.h> 7 #include <rte_errno.h> 8 #include <rte_version.h> 9 #include <rte_net.h> 10 #include <rte_kvargs.h> 11 12 #include "ena_ethdev.h" 13 #include "ena_logs.h" 14 #include "ena_platform.h" 15 #include "ena_com.h" 16 #include "ena_eth_com.h" 17 18 #include <ena_common_defs.h> 19 #include <ena_regs_defs.h> 20 #include <ena_admin_defs.h> 21 #include <ena_eth_io_defs.h> 22 23 #define DRV_MODULE_VER_MAJOR 2 24 #define DRV_MODULE_VER_MINOR 4 25 #define DRV_MODULE_VER_SUBMINOR 0 26 27 #define __MERGE_64B_H_L(h, l) (((uint64_t)h << 32) | l) 28 29 #define GET_L4_HDR_LEN(mbuf) \ 30 ((rte_pktmbuf_mtod_offset(mbuf, struct rte_tcp_hdr *, \ 31 mbuf->l3_len + mbuf->l2_len)->data_off) >> 4) 32 33 #define ETH_GSTRING_LEN 32 34 35 #define ARRAY_SIZE(x) RTE_DIM(x) 36 37 #define ENA_MIN_RING_DESC 128 38 39 #define ENA_PTYPE_HAS_HASH (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP) 40 41 enum ethtool_stringset { 42 ETH_SS_TEST = 0, 43 ETH_SS_STATS, 44 }; 45 46 struct ena_stats { 47 char name[ETH_GSTRING_LEN]; 48 int stat_offset; 49 }; 50 51 #define ENA_STAT_ENTRY(stat, stat_type) { \ 52 .name = #stat, \ 53 .stat_offset = offsetof(struct ena_stats_##stat_type, stat) \ 54 } 55 56 #define ENA_STAT_RX_ENTRY(stat) \ 57 ENA_STAT_ENTRY(stat, rx) 58 59 #define ENA_STAT_TX_ENTRY(stat) \ 60 ENA_STAT_ENTRY(stat, tx) 61 62 #define ENA_STAT_ENI_ENTRY(stat) \ 63 ENA_STAT_ENTRY(stat, eni) 64 65 #define ENA_STAT_GLOBAL_ENTRY(stat) \ 66 ENA_STAT_ENTRY(stat, dev) 67 68 /* Device arguments */ 69 #define ENA_DEVARG_LARGE_LLQ_HDR "large_llq_hdr" 70 71 /* 72 * Each rte_memzone should have unique name. 73 * To satisfy it, count number of allocation and add it to name. 74 */ 75 rte_atomic64_t ena_alloc_cnt; 76 77 static const struct ena_stats ena_stats_global_strings[] = { 78 ENA_STAT_GLOBAL_ENTRY(wd_expired), 79 ENA_STAT_GLOBAL_ENTRY(dev_start), 80 ENA_STAT_GLOBAL_ENTRY(dev_stop), 81 ENA_STAT_GLOBAL_ENTRY(tx_drops), 82 }; 83 84 static const struct ena_stats ena_stats_eni_strings[] = { 85 ENA_STAT_ENI_ENTRY(bw_in_allowance_exceeded), 86 ENA_STAT_ENI_ENTRY(bw_out_allowance_exceeded), 87 ENA_STAT_ENI_ENTRY(pps_allowance_exceeded), 88 ENA_STAT_ENI_ENTRY(conntrack_allowance_exceeded), 89 ENA_STAT_ENI_ENTRY(linklocal_allowance_exceeded), 90 }; 91 92 static const struct ena_stats ena_stats_tx_strings[] = { 93 ENA_STAT_TX_ENTRY(cnt), 94 ENA_STAT_TX_ENTRY(bytes), 95 ENA_STAT_TX_ENTRY(prepare_ctx_err), 96 ENA_STAT_TX_ENTRY(linearize), 97 ENA_STAT_TX_ENTRY(linearize_failed), 98 ENA_STAT_TX_ENTRY(tx_poll), 99 ENA_STAT_TX_ENTRY(doorbells), 100 ENA_STAT_TX_ENTRY(bad_req_id), 101 ENA_STAT_TX_ENTRY(available_desc), 102 }; 103 104 static const struct ena_stats ena_stats_rx_strings[] = { 105 ENA_STAT_RX_ENTRY(cnt), 106 ENA_STAT_RX_ENTRY(bytes), 107 ENA_STAT_RX_ENTRY(refill_partial), 108 ENA_STAT_RX_ENTRY(bad_csum), 109 ENA_STAT_RX_ENTRY(mbuf_alloc_fail), 110 ENA_STAT_RX_ENTRY(bad_desc_num), 111 ENA_STAT_RX_ENTRY(bad_req_id), 112 }; 113 114 #define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings) 115 #define ENA_STATS_ARRAY_ENI ARRAY_SIZE(ena_stats_eni_strings) 116 #define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings) 117 #define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings) 118 119 #define QUEUE_OFFLOADS (DEV_TX_OFFLOAD_TCP_CKSUM |\ 120 DEV_TX_OFFLOAD_UDP_CKSUM |\ 121 DEV_TX_OFFLOAD_IPV4_CKSUM |\ 122 DEV_TX_OFFLOAD_TCP_TSO) 123 #define MBUF_OFFLOADS (PKT_TX_L4_MASK |\ 124 PKT_TX_IP_CKSUM |\ 125 PKT_TX_TCP_SEG) 126 127 /** Vendor ID used by Amazon devices */ 128 #define PCI_VENDOR_ID_AMAZON 0x1D0F 129 /** Amazon devices */ 130 #define PCI_DEVICE_ID_ENA_VF 0xEC20 131 #define PCI_DEVICE_ID_ENA_VF_RSERV0 0xEC21 132 133 #define ENA_TX_OFFLOAD_MASK (\ 134 PKT_TX_L4_MASK | \ 135 PKT_TX_IPV6 | \ 136 PKT_TX_IPV4 | \ 137 PKT_TX_IP_CKSUM | \ 138 PKT_TX_TCP_SEG) 139 140 #define ENA_TX_OFFLOAD_NOTSUP_MASK \ 141 (PKT_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK) 142 143 static const struct rte_pci_id pci_id_ena_map[] = { 144 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) }, 145 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF_RSERV0) }, 146 { .device_id = 0 }, 147 }; 148 149 static struct ena_aenq_handlers aenq_handlers; 150 151 static int ena_device_init(struct ena_com_dev *ena_dev, 152 struct rte_pci_device *pdev, 153 struct ena_com_dev_get_features_ctx *get_feat_ctx, 154 bool *wd_state); 155 static int ena_dev_configure(struct rte_eth_dev *dev); 156 static void ena_tx_map_mbuf(struct ena_ring *tx_ring, 157 struct ena_tx_buffer *tx_info, 158 struct rte_mbuf *mbuf, 159 void **push_header, 160 uint16_t *header_len); 161 static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf); 162 static void ena_tx_cleanup(struct ena_ring *tx_ring); 163 static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 164 uint16_t nb_pkts); 165 static uint16_t eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 166 uint16_t nb_pkts); 167 static int ena_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 168 uint16_t nb_desc, unsigned int socket_id, 169 const struct rte_eth_txconf *tx_conf); 170 static int ena_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 171 uint16_t nb_desc, unsigned int socket_id, 172 const struct rte_eth_rxconf *rx_conf, 173 struct rte_mempool *mp); 174 static inline void ena_init_rx_mbuf(struct rte_mbuf *mbuf, uint16_t len); 175 static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring, 176 struct ena_com_rx_buf_info *ena_bufs, 177 uint32_t descs, 178 uint16_t *next_to_clean, 179 uint8_t offset); 180 static uint16_t eth_ena_recv_pkts(void *rx_queue, 181 struct rte_mbuf **rx_pkts, uint16_t nb_pkts); 182 static int ena_add_single_rx_desc(struct ena_com_io_sq *io_sq, 183 struct rte_mbuf *mbuf, uint16_t id); 184 static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count); 185 static void ena_init_rings(struct ena_adapter *adapter, 186 bool disable_meta_caching); 187 static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 188 static int ena_start(struct rte_eth_dev *dev); 189 static int ena_stop(struct rte_eth_dev *dev); 190 static int ena_close(struct rte_eth_dev *dev); 191 static int ena_dev_reset(struct rte_eth_dev *dev); 192 static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); 193 static void ena_rx_queue_release_all(struct rte_eth_dev *dev); 194 static void ena_tx_queue_release_all(struct rte_eth_dev *dev); 195 static void ena_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid); 196 static void ena_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid); 197 static void ena_rx_queue_release_bufs(struct ena_ring *ring); 198 static void ena_tx_queue_release_bufs(struct ena_ring *ring); 199 static int ena_link_update(struct rte_eth_dev *dev, 200 int wait_to_complete); 201 static int ena_create_io_queue(struct rte_eth_dev *dev, struct ena_ring *ring); 202 static void ena_queue_stop(struct ena_ring *ring); 203 static void ena_queue_stop_all(struct rte_eth_dev *dev, 204 enum ena_ring_type ring_type); 205 static int ena_queue_start(struct rte_eth_dev *dev, struct ena_ring *ring); 206 static int ena_queue_start_all(struct rte_eth_dev *dev, 207 enum ena_ring_type ring_type); 208 static void ena_stats_restart(struct rte_eth_dev *dev); 209 static int ena_infos_get(struct rte_eth_dev *dev, 210 struct rte_eth_dev_info *dev_info); 211 static void ena_interrupt_handler_rte(void *cb_arg); 212 static void ena_timer_wd_callback(struct rte_timer *timer, void *arg); 213 static void ena_destroy_device(struct rte_eth_dev *eth_dev); 214 static int eth_ena_dev_init(struct rte_eth_dev *eth_dev); 215 static int ena_xstats_get_names(struct rte_eth_dev *dev, 216 struct rte_eth_xstat_name *xstats_names, 217 unsigned int n); 218 static int ena_xstats_get(struct rte_eth_dev *dev, 219 struct rte_eth_xstat *stats, 220 unsigned int n); 221 static int ena_xstats_get_by_id(struct rte_eth_dev *dev, 222 const uint64_t *ids, 223 uint64_t *values, 224 unsigned int n); 225 static int ena_process_bool_devarg(const char *key, 226 const char *value, 227 void *opaque); 228 static int ena_parse_devargs(struct ena_adapter *adapter, 229 struct rte_devargs *devargs); 230 static int ena_copy_eni_stats(struct ena_adapter *adapter); 231 static int ena_setup_rx_intr(struct rte_eth_dev *dev); 232 static int ena_rx_queue_intr_enable(struct rte_eth_dev *dev, 233 uint16_t queue_id); 234 static int ena_rx_queue_intr_disable(struct rte_eth_dev *dev, 235 uint16_t queue_id); 236 237 static const struct eth_dev_ops ena_dev_ops = { 238 .dev_configure = ena_dev_configure, 239 .dev_infos_get = ena_infos_get, 240 .rx_queue_setup = ena_rx_queue_setup, 241 .tx_queue_setup = ena_tx_queue_setup, 242 .dev_start = ena_start, 243 .dev_stop = ena_stop, 244 .link_update = ena_link_update, 245 .stats_get = ena_stats_get, 246 .xstats_get_names = ena_xstats_get_names, 247 .xstats_get = ena_xstats_get, 248 .xstats_get_by_id = ena_xstats_get_by_id, 249 .mtu_set = ena_mtu_set, 250 .rx_queue_release = ena_rx_queue_release, 251 .tx_queue_release = ena_tx_queue_release, 252 .dev_close = ena_close, 253 .dev_reset = ena_dev_reset, 254 .reta_update = ena_rss_reta_update, 255 .reta_query = ena_rss_reta_query, 256 .rx_queue_intr_enable = ena_rx_queue_intr_enable, 257 .rx_queue_intr_disable = ena_rx_queue_intr_disable, 258 .rss_hash_update = ena_rss_hash_update, 259 .rss_hash_conf_get = ena_rss_hash_conf_get, 260 }; 261 262 static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf, 263 struct ena_com_rx_ctx *ena_rx_ctx, 264 bool fill_hash) 265 { 266 uint64_t ol_flags = 0; 267 uint32_t packet_type = 0; 268 269 if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) 270 packet_type |= RTE_PTYPE_L4_TCP; 271 else if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP) 272 packet_type |= RTE_PTYPE_L4_UDP; 273 274 if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) { 275 packet_type |= RTE_PTYPE_L3_IPV4; 276 if (unlikely(ena_rx_ctx->l3_csum_err)) 277 ol_flags |= PKT_RX_IP_CKSUM_BAD; 278 else 279 ol_flags |= PKT_RX_IP_CKSUM_GOOD; 280 } else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6) { 281 packet_type |= RTE_PTYPE_L3_IPV6; 282 } 283 284 if (!ena_rx_ctx->l4_csum_checked || ena_rx_ctx->frag) 285 ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN; 286 else 287 if (unlikely(ena_rx_ctx->l4_csum_err)) 288 ol_flags |= PKT_RX_L4_CKSUM_BAD; 289 else 290 ol_flags |= PKT_RX_L4_CKSUM_GOOD; 291 292 if (fill_hash && 293 likely((packet_type & ENA_PTYPE_HAS_HASH) && !ena_rx_ctx->frag)) { 294 ol_flags |= PKT_RX_RSS_HASH; 295 mbuf->hash.rss = ena_rx_ctx->hash; 296 } 297 298 mbuf->ol_flags = ol_flags; 299 mbuf->packet_type = packet_type; 300 } 301 302 static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, 303 struct ena_com_tx_ctx *ena_tx_ctx, 304 uint64_t queue_offloads, 305 bool disable_meta_caching) 306 { 307 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; 308 309 if ((mbuf->ol_flags & MBUF_OFFLOADS) && 310 (queue_offloads & QUEUE_OFFLOADS)) { 311 /* check if TSO is required */ 312 if ((mbuf->ol_flags & PKT_TX_TCP_SEG) && 313 (queue_offloads & DEV_TX_OFFLOAD_TCP_TSO)) { 314 ena_tx_ctx->tso_enable = true; 315 316 ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf); 317 } 318 319 /* check if L3 checksum is needed */ 320 if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) && 321 (queue_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)) 322 ena_tx_ctx->l3_csum_enable = true; 323 324 if (mbuf->ol_flags & PKT_TX_IPV6) { 325 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6; 326 } else { 327 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4; 328 329 /* set don't fragment (DF) flag */ 330 if (mbuf->packet_type & 331 (RTE_PTYPE_L4_NONFRAG 332 | RTE_PTYPE_INNER_L4_NONFRAG)) 333 ena_tx_ctx->df = true; 334 } 335 336 /* check if L4 checksum is needed */ 337 if (((mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) && 338 (queue_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) { 339 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP; 340 ena_tx_ctx->l4_csum_enable = true; 341 } else if (((mbuf->ol_flags & PKT_TX_L4_MASK) == 342 PKT_TX_UDP_CKSUM) && 343 (queue_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) { 344 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP; 345 ena_tx_ctx->l4_csum_enable = true; 346 } else { 347 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN; 348 ena_tx_ctx->l4_csum_enable = false; 349 } 350 351 ena_meta->mss = mbuf->tso_segsz; 352 ena_meta->l3_hdr_len = mbuf->l3_len; 353 ena_meta->l3_hdr_offset = mbuf->l2_len; 354 355 ena_tx_ctx->meta_valid = true; 356 } else if (disable_meta_caching) { 357 memset(ena_meta, 0, sizeof(*ena_meta)); 358 ena_tx_ctx->meta_valid = true; 359 } else { 360 ena_tx_ctx->meta_valid = false; 361 } 362 } 363 364 static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) 365 { 366 struct ena_tx_buffer *tx_info = NULL; 367 368 if (likely(req_id < tx_ring->ring_size)) { 369 tx_info = &tx_ring->tx_buffer_info[req_id]; 370 if (likely(tx_info->mbuf)) 371 return 0; 372 } 373 374 if (tx_info) 375 PMD_TX_LOG(ERR, "tx_info doesn't have valid mbuf\n"); 376 else 377 PMD_TX_LOG(ERR, "Invalid req_id: %hu\n", req_id); 378 379 /* Trigger device reset */ 380 ++tx_ring->tx_stats.bad_req_id; 381 tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID; 382 tx_ring->adapter->trigger_reset = true; 383 return -EFAULT; 384 } 385 386 static void ena_config_host_info(struct ena_com_dev *ena_dev) 387 { 388 struct ena_admin_host_info *host_info; 389 int rc; 390 391 /* Allocate only the host info */ 392 rc = ena_com_allocate_host_info(ena_dev); 393 if (rc) { 394 PMD_DRV_LOG(ERR, "Cannot allocate host info\n"); 395 return; 396 } 397 398 host_info = ena_dev->host_attr.host_info; 399 400 host_info->os_type = ENA_ADMIN_OS_DPDK; 401 host_info->kernel_ver = RTE_VERSION; 402 strlcpy((char *)host_info->kernel_ver_str, rte_version(), 403 sizeof(host_info->kernel_ver_str)); 404 host_info->os_dist = RTE_VERSION; 405 strlcpy((char *)host_info->os_dist_str, rte_version(), 406 sizeof(host_info->os_dist_str)); 407 host_info->driver_version = 408 (DRV_MODULE_VER_MAJOR) | 409 (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | 410 (DRV_MODULE_VER_SUBMINOR << 411 ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); 412 host_info->num_cpus = rte_lcore_count(); 413 414 host_info->driver_supported_features = 415 ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK | 416 ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK; 417 418 rc = ena_com_set_host_attributes(ena_dev); 419 if (rc) { 420 if (rc == -ENA_COM_UNSUPPORTED) 421 PMD_DRV_LOG(WARNING, "Cannot set host attributes\n"); 422 else 423 PMD_DRV_LOG(ERR, "Cannot set host attributes\n"); 424 425 goto err; 426 } 427 428 return; 429 430 err: 431 ena_com_delete_host_info(ena_dev); 432 } 433 434 /* This function calculates the number of xstats based on the current config */ 435 static unsigned int ena_xstats_calc_num(struct rte_eth_dev_data *data) 436 { 437 return ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENI + 438 (data->nb_tx_queues * ENA_STATS_ARRAY_TX) + 439 (data->nb_rx_queues * ENA_STATS_ARRAY_RX); 440 } 441 442 static void ena_config_debug_area(struct ena_adapter *adapter) 443 { 444 u32 debug_area_size; 445 int rc, ss_count; 446 447 ss_count = ena_xstats_calc_num(adapter->edev_data); 448 449 /* allocate 32 bytes for each string and 64bit for the value */ 450 debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count; 451 452 rc = ena_com_allocate_debug_area(&adapter->ena_dev, debug_area_size); 453 if (rc) { 454 PMD_DRV_LOG(ERR, "Cannot allocate debug area\n"); 455 return; 456 } 457 458 rc = ena_com_set_host_attributes(&adapter->ena_dev); 459 if (rc) { 460 if (rc == -ENA_COM_UNSUPPORTED) 461 PMD_DRV_LOG(WARNING, "Cannot set host attributes\n"); 462 else 463 PMD_DRV_LOG(ERR, "Cannot set host attributes\n"); 464 465 goto err; 466 } 467 468 return; 469 err: 470 ena_com_delete_debug_area(&adapter->ena_dev); 471 } 472 473 static int ena_close(struct rte_eth_dev *dev) 474 { 475 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 476 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 477 struct ena_adapter *adapter = dev->data->dev_private; 478 int ret = 0; 479 480 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 481 return 0; 482 483 if (adapter->state == ENA_ADAPTER_STATE_RUNNING) 484 ret = ena_stop(dev); 485 adapter->state = ENA_ADAPTER_STATE_CLOSED; 486 487 ena_rx_queue_release_all(dev); 488 ena_tx_queue_release_all(dev); 489 490 rte_free(adapter->drv_stats); 491 adapter->drv_stats = NULL; 492 493 rte_intr_disable(intr_handle); 494 rte_intr_callback_unregister(intr_handle, 495 ena_interrupt_handler_rte, 496 dev); 497 498 /* 499 * MAC is not allocated dynamically. Setting NULL should prevent from 500 * release of the resource in the rte_eth_dev_release_port(). 501 */ 502 dev->data->mac_addrs = NULL; 503 504 return ret; 505 } 506 507 static int 508 ena_dev_reset(struct rte_eth_dev *dev) 509 { 510 int rc = 0; 511 512 /* Cannot release memory in secondary process */ 513 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 514 PMD_DRV_LOG(WARNING, "dev_reset not supported in secondary.\n"); 515 return -EPERM; 516 } 517 518 ena_destroy_device(dev); 519 rc = eth_ena_dev_init(dev); 520 if (rc) 521 PMD_INIT_LOG(CRIT, "Cannot initialize device\n"); 522 523 return rc; 524 } 525 526 static void ena_rx_queue_release_all(struct rte_eth_dev *dev) 527 { 528 int nb_queues = dev->data->nb_rx_queues; 529 int i; 530 531 for (i = 0; i < nb_queues; i++) 532 ena_rx_queue_release(dev, i); 533 } 534 535 static void ena_tx_queue_release_all(struct rte_eth_dev *dev) 536 { 537 int nb_queues = dev->data->nb_tx_queues; 538 int i; 539 540 for (i = 0; i < nb_queues; i++) 541 ena_tx_queue_release(dev, i); 542 } 543 544 static void ena_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 545 { 546 struct ena_ring *ring = dev->data->rx_queues[qid]; 547 548 /* Free ring resources */ 549 if (ring->rx_buffer_info) 550 rte_free(ring->rx_buffer_info); 551 ring->rx_buffer_info = NULL; 552 553 if (ring->rx_refill_buffer) 554 rte_free(ring->rx_refill_buffer); 555 ring->rx_refill_buffer = NULL; 556 557 if (ring->empty_rx_reqs) 558 rte_free(ring->empty_rx_reqs); 559 ring->empty_rx_reqs = NULL; 560 561 ring->configured = 0; 562 563 PMD_DRV_LOG(NOTICE, "Rx queue %d:%d released\n", 564 ring->port_id, ring->id); 565 } 566 567 static void ena_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 568 { 569 struct ena_ring *ring = dev->data->tx_queues[qid]; 570 571 /* Free ring resources */ 572 if (ring->push_buf_intermediate_buf) 573 rte_free(ring->push_buf_intermediate_buf); 574 575 if (ring->tx_buffer_info) 576 rte_free(ring->tx_buffer_info); 577 578 if (ring->empty_tx_reqs) 579 rte_free(ring->empty_tx_reqs); 580 581 ring->empty_tx_reqs = NULL; 582 ring->tx_buffer_info = NULL; 583 ring->push_buf_intermediate_buf = NULL; 584 585 ring->configured = 0; 586 587 PMD_DRV_LOG(NOTICE, "Tx queue %d:%d released\n", 588 ring->port_id, ring->id); 589 } 590 591 static void ena_rx_queue_release_bufs(struct ena_ring *ring) 592 { 593 unsigned int i; 594 595 for (i = 0; i < ring->ring_size; ++i) { 596 struct ena_rx_buffer *rx_info = &ring->rx_buffer_info[i]; 597 if (rx_info->mbuf) { 598 rte_mbuf_raw_free(rx_info->mbuf); 599 rx_info->mbuf = NULL; 600 } 601 } 602 } 603 604 static void ena_tx_queue_release_bufs(struct ena_ring *ring) 605 { 606 unsigned int i; 607 608 for (i = 0; i < ring->ring_size; ++i) { 609 struct ena_tx_buffer *tx_buf = &ring->tx_buffer_info[i]; 610 611 if (tx_buf->mbuf) { 612 rte_pktmbuf_free(tx_buf->mbuf); 613 tx_buf->mbuf = NULL; 614 } 615 } 616 } 617 618 static int ena_link_update(struct rte_eth_dev *dev, 619 __rte_unused int wait_to_complete) 620 { 621 struct rte_eth_link *link = &dev->data->dev_link; 622 struct ena_adapter *adapter = dev->data->dev_private; 623 624 link->link_status = adapter->link_status ? ETH_LINK_UP : ETH_LINK_DOWN; 625 link->link_speed = ETH_SPEED_NUM_NONE; 626 link->link_duplex = ETH_LINK_FULL_DUPLEX; 627 628 return 0; 629 } 630 631 static int ena_queue_start_all(struct rte_eth_dev *dev, 632 enum ena_ring_type ring_type) 633 { 634 struct ena_adapter *adapter = dev->data->dev_private; 635 struct ena_ring *queues = NULL; 636 int nb_queues; 637 int i = 0; 638 int rc = 0; 639 640 if (ring_type == ENA_RING_TYPE_RX) { 641 queues = adapter->rx_ring; 642 nb_queues = dev->data->nb_rx_queues; 643 } else { 644 queues = adapter->tx_ring; 645 nb_queues = dev->data->nb_tx_queues; 646 } 647 for (i = 0; i < nb_queues; i++) { 648 if (queues[i].configured) { 649 if (ring_type == ENA_RING_TYPE_RX) { 650 ena_assert_msg( 651 dev->data->rx_queues[i] == &queues[i], 652 "Inconsistent state of Rx queues\n"); 653 } else { 654 ena_assert_msg( 655 dev->data->tx_queues[i] == &queues[i], 656 "Inconsistent state of Tx queues\n"); 657 } 658 659 rc = ena_queue_start(dev, &queues[i]); 660 661 if (rc) { 662 PMD_INIT_LOG(ERR, 663 "Failed to start queue[%d] of type(%d)\n", 664 i, ring_type); 665 goto err; 666 } 667 } 668 } 669 670 return 0; 671 672 err: 673 while (i--) 674 if (queues[i].configured) 675 ena_queue_stop(&queues[i]); 676 677 return rc; 678 } 679 680 static uint32_t ena_get_mtu_conf(struct ena_adapter *adapter) 681 { 682 uint32_t max_frame_len = adapter->max_mtu; 683 684 if (adapter->edev_data->dev_conf.rxmode.offloads & 685 DEV_RX_OFFLOAD_JUMBO_FRAME) 686 max_frame_len = 687 adapter->edev_data->dev_conf.rxmode.max_rx_pkt_len; 688 689 return max_frame_len; 690 } 691 692 static int ena_check_valid_conf(struct ena_adapter *adapter) 693 { 694 uint32_t max_frame_len = ena_get_mtu_conf(adapter); 695 696 if (max_frame_len > adapter->max_mtu || max_frame_len < ENA_MIN_MTU) { 697 PMD_INIT_LOG(ERR, 698 "Unsupported MTU of %d. Max MTU: %d, min MTU: %d\n", 699 max_frame_len, adapter->max_mtu, ENA_MIN_MTU); 700 return ENA_COM_UNSUPPORTED; 701 } 702 703 return 0; 704 } 705 706 static int 707 ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx, 708 bool use_large_llq_hdr) 709 { 710 struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq; 711 struct ena_com_dev *ena_dev = ctx->ena_dev; 712 uint32_t max_tx_queue_size; 713 uint32_t max_rx_queue_size; 714 715 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 716 struct ena_admin_queue_ext_feature_fields *max_queue_ext = 717 &ctx->get_feat_ctx->max_queue_ext.max_queue_ext; 718 max_rx_queue_size = RTE_MIN(max_queue_ext->max_rx_cq_depth, 719 max_queue_ext->max_rx_sq_depth); 720 max_tx_queue_size = max_queue_ext->max_tx_cq_depth; 721 722 if (ena_dev->tx_mem_queue_type == 723 ENA_ADMIN_PLACEMENT_POLICY_DEV) { 724 max_tx_queue_size = RTE_MIN(max_tx_queue_size, 725 llq->max_llq_depth); 726 } else { 727 max_tx_queue_size = RTE_MIN(max_tx_queue_size, 728 max_queue_ext->max_tx_sq_depth); 729 } 730 731 ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 732 max_queue_ext->max_per_packet_rx_descs); 733 ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 734 max_queue_ext->max_per_packet_tx_descs); 735 } else { 736 struct ena_admin_queue_feature_desc *max_queues = 737 &ctx->get_feat_ctx->max_queues; 738 max_rx_queue_size = RTE_MIN(max_queues->max_cq_depth, 739 max_queues->max_sq_depth); 740 max_tx_queue_size = max_queues->max_cq_depth; 741 742 if (ena_dev->tx_mem_queue_type == 743 ENA_ADMIN_PLACEMENT_POLICY_DEV) { 744 max_tx_queue_size = RTE_MIN(max_tx_queue_size, 745 llq->max_llq_depth); 746 } else { 747 max_tx_queue_size = RTE_MIN(max_tx_queue_size, 748 max_queues->max_sq_depth); 749 } 750 751 ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 752 max_queues->max_packet_rx_descs); 753 ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 754 max_queues->max_packet_tx_descs); 755 } 756 757 /* Round down to the nearest power of 2 */ 758 max_rx_queue_size = rte_align32prevpow2(max_rx_queue_size); 759 max_tx_queue_size = rte_align32prevpow2(max_tx_queue_size); 760 761 if (use_large_llq_hdr) { 762 if ((llq->entry_size_ctrl_supported & 763 ENA_ADMIN_LIST_ENTRY_SIZE_256B) && 764 (ena_dev->tx_mem_queue_type == 765 ENA_ADMIN_PLACEMENT_POLICY_DEV)) { 766 max_tx_queue_size /= 2; 767 PMD_INIT_LOG(INFO, 768 "Forcing large headers and decreasing maximum Tx queue size to %d\n", 769 max_tx_queue_size); 770 } else { 771 PMD_INIT_LOG(ERR, 772 "Forcing large headers failed: LLQ is disabled or device does not support large headers\n"); 773 } 774 } 775 776 if (unlikely(max_rx_queue_size == 0 || max_tx_queue_size == 0)) { 777 PMD_INIT_LOG(ERR, "Invalid queue size\n"); 778 return -EFAULT; 779 } 780 781 ctx->max_tx_queue_size = max_tx_queue_size; 782 ctx->max_rx_queue_size = max_rx_queue_size; 783 784 return 0; 785 } 786 787 static void ena_stats_restart(struct rte_eth_dev *dev) 788 { 789 struct ena_adapter *adapter = dev->data->dev_private; 790 791 rte_atomic64_init(&adapter->drv_stats->ierrors); 792 rte_atomic64_init(&adapter->drv_stats->oerrors); 793 rte_atomic64_init(&adapter->drv_stats->rx_nombuf); 794 adapter->drv_stats->rx_drops = 0; 795 } 796 797 static int ena_stats_get(struct rte_eth_dev *dev, 798 struct rte_eth_stats *stats) 799 { 800 struct ena_admin_basic_stats ena_stats; 801 struct ena_adapter *adapter = dev->data->dev_private; 802 struct ena_com_dev *ena_dev = &adapter->ena_dev; 803 int rc; 804 int i; 805 int max_rings_stats; 806 807 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 808 return -ENOTSUP; 809 810 memset(&ena_stats, 0, sizeof(ena_stats)); 811 812 rte_spinlock_lock(&adapter->admin_lock); 813 rc = ena_com_get_dev_basic_stats(ena_dev, &ena_stats); 814 rte_spinlock_unlock(&adapter->admin_lock); 815 if (unlikely(rc)) { 816 PMD_DRV_LOG(ERR, "Could not retrieve statistics from ENA\n"); 817 return rc; 818 } 819 820 /* Set of basic statistics from ENA */ 821 stats->ipackets = __MERGE_64B_H_L(ena_stats.rx_pkts_high, 822 ena_stats.rx_pkts_low); 823 stats->opackets = __MERGE_64B_H_L(ena_stats.tx_pkts_high, 824 ena_stats.tx_pkts_low); 825 stats->ibytes = __MERGE_64B_H_L(ena_stats.rx_bytes_high, 826 ena_stats.rx_bytes_low); 827 stats->obytes = __MERGE_64B_H_L(ena_stats.tx_bytes_high, 828 ena_stats.tx_bytes_low); 829 830 /* Driver related stats */ 831 stats->imissed = adapter->drv_stats->rx_drops; 832 stats->ierrors = rte_atomic64_read(&adapter->drv_stats->ierrors); 833 stats->oerrors = rte_atomic64_read(&adapter->drv_stats->oerrors); 834 stats->rx_nombuf = rte_atomic64_read(&adapter->drv_stats->rx_nombuf); 835 836 max_rings_stats = RTE_MIN(dev->data->nb_rx_queues, 837 RTE_ETHDEV_QUEUE_STAT_CNTRS); 838 for (i = 0; i < max_rings_stats; ++i) { 839 struct ena_stats_rx *rx_stats = &adapter->rx_ring[i].rx_stats; 840 841 stats->q_ibytes[i] = rx_stats->bytes; 842 stats->q_ipackets[i] = rx_stats->cnt; 843 stats->q_errors[i] = rx_stats->bad_desc_num + 844 rx_stats->bad_req_id; 845 } 846 847 max_rings_stats = RTE_MIN(dev->data->nb_tx_queues, 848 RTE_ETHDEV_QUEUE_STAT_CNTRS); 849 for (i = 0; i < max_rings_stats; ++i) { 850 struct ena_stats_tx *tx_stats = &adapter->tx_ring[i].tx_stats; 851 852 stats->q_obytes[i] = tx_stats->bytes; 853 stats->q_opackets[i] = tx_stats->cnt; 854 } 855 856 return 0; 857 } 858 859 static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 860 { 861 struct ena_adapter *adapter; 862 struct ena_com_dev *ena_dev; 863 int rc = 0; 864 865 ena_assert_msg(dev->data != NULL, "Uninitialized device\n"); 866 ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n"); 867 adapter = dev->data->dev_private; 868 869 ena_dev = &adapter->ena_dev; 870 ena_assert_msg(ena_dev != NULL, "Uninitialized device\n"); 871 872 if (mtu > ena_get_mtu_conf(adapter) || mtu < ENA_MIN_MTU) { 873 PMD_DRV_LOG(ERR, 874 "Invalid MTU setting. New MTU: %d, max MTU: %d, min MTU: %d\n", 875 mtu, ena_get_mtu_conf(adapter), ENA_MIN_MTU); 876 return -EINVAL; 877 } 878 879 rc = ena_com_set_dev_mtu(ena_dev, mtu); 880 if (rc) 881 PMD_DRV_LOG(ERR, "Could not set MTU: %d\n", mtu); 882 else 883 PMD_DRV_LOG(NOTICE, "MTU set to: %d\n", mtu); 884 885 return rc; 886 } 887 888 static int ena_start(struct rte_eth_dev *dev) 889 { 890 struct ena_adapter *adapter = dev->data->dev_private; 891 uint64_t ticks; 892 int rc = 0; 893 894 /* Cannot allocate memory in secondary process */ 895 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 896 PMD_DRV_LOG(WARNING, "dev_start not supported in secondary.\n"); 897 return -EPERM; 898 } 899 900 rc = ena_check_valid_conf(adapter); 901 if (rc) 902 return rc; 903 904 rc = ena_setup_rx_intr(dev); 905 if (rc) 906 return rc; 907 908 rc = ena_queue_start_all(dev, ENA_RING_TYPE_RX); 909 if (rc) 910 return rc; 911 912 rc = ena_queue_start_all(dev, ENA_RING_TYPE_TX); 913 if (rc) 914 goto err_start_tx; 915 916 if (adapter->edev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) { 917 rc = ena_rss_configure(adapter); 918 if (rc) 919 goto err_rss_init; 920 } 921 922 ena_stats_restart(dev); 923 924 adapter->timestamp_wd = rte_get_timer_cycles(); 925 adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT; 926 927 ticks = rte_get_timer_hz(); 928 rte_timer_reset(&adapter->timer_wd, ticks, PERIODICAL, rte_lcore_id(), 929 ena_timer_wd_callback, dev); 930 931 ++adapter->dev_stats.dev_start; 932 adapter->state = ENA_ADAPTER_STATE_RUNNING; 933 934 return 0; 935 936 err_rss_init: 937 ena_queue_stop_all(dev, ENA_RING_TYPE_TX); 938 err_start_tx: 939 ena_queue_stop_all(dev, ENA_RING_TYPE_RX); 940 return rc; 941 } 942 943 static int ena_stop(struct rte_eth_dev *dev) 944 { 945 struct ena_adapter *adapter = dev->data->dev_private; 946 struct ena_com_dev *ena_dev = &adapter->ena_dev; 947 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 948 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 949 int rc; 950 951 /* Cannot free memory in secondary process */ 952 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 953 PMD_DRV_LOG(WARNING, "dev_stop not supported in secondary.\n"); 954 return -EPERM; 955 } 956 957 rte_timer_stop_sync(&adapter->timer_wd); 958 ena_queue_stop_all(dev, ENA_RING_TYPE_TX); 959 ena_queue_stop_all(dev, ENA_RING_TYPE_RX); 960 961 if (adapter->trigger_reset) { 962 rc = ena_com_dev_reset(ena_dev, adapter->reset_reason); 963 if (rc) 964 PMD_DRV_LOG(ERR, "Device reset failed, rc: %d\n", rc); 965 } 966 967 rte_intr_disable(intr_handle); 968 969 rte_intr_efd_disable(intr_handle); 970 if (intr_handle->intr_vec != NULL) { 971 rte_free(intr_handle->intr_vec); 972 intr_handle->intr_vec = NULL; 973 } 974 975 rte_intr_enable(intr_handle); 976 977 ++adapter->dev_stats.dev_stop; 978 adapter->state = ENA_ADAPTER_STATE_STOPPED; 979 dev->data->dev_started = 0; 980 981 return 0; 982 } 983 984 static int ena_create_io_queue(struct rte_eth_dev *dev, struct ena_ring *ring) 985 { 986 struct ena_adapter *adapter = ring->adapter; 987 struct ena_com_dev *ena_dev = &adapter->ena_dev; 988 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 989 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 990 struct ena_com_create_io_ctx ctx = 991 /* policy set to _HOST just to satisfy icc compiler */ 992 { ENA_ADMIN_PLACEMENT_POLICY_HOST, 993 0, 0, 0, 0, 0 }; 994 uint16_t ena_qid; 995 unsigned int i; 996 int rc; 997 998 ctx.msix_vector = -1; 999 if (ring->type == ENA_RING_TYPE_TX) { 1000 ena_qid = ENA_IO_TXQ_IDX(ring->id); 1001 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; 1002 ctx.mem_queue_type = ena_dev->tx_mem_queue_type; 1003 for (i = 0; i < ring->ring_size; i++) 1004 ring->empty_tx_reqs[i] = i; 1005 } else { 1006 ena_qid = ENA_IO_RXQ_IDX(ring->id); 1007 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; 1008 if (rte_intr_dp_is_en(intr_handle)) 1009 ctx.msix_vector = intr_handle->intr_vec[ring->id]; 1010 for (i = 0; i < ring->ring_size; i++) 1011 ring->empty_rx_reqs[i] = i; 1012 } 1013 ctx.queue_size = ring->ring_size; 1014 ctx.qid = ena_qid; 1015 ctx.numa_node = ring->numa_socket_id; 1016 1017 rc = ena_com_create_io_queue(ena_dev, &ctx); 1018 if (rc) { 1019 PMD_DRV_LOG(ERR, 1020 "Failed to create IO queue[%d] (qid:%d), rc: %d\n", 1021 ring->id, ena_qid, rc); 1022 return rc; 1023 } 1024 1025 rc = ena_com_get_io_handlers(ena_dev, ena_qid, 1026 &ring->ena_com_io_sq, 1027 &ring->ena_com_io_cq); 1028 if (rc) { 1029 PMD_DRV_LOG(ERR, 1030 "Failed to get IO queue[%d] handlers, rc: %d\n", 1031 ring->id, rc); 1032 ena_com_destroy_io_queue(ena_dev, ena_qid); 1033 return rc; 1034 } 1035 1036 if (ring->type == ENA_RING_TYPE_TX) 1037 ena_com_update_numa_node(ring->ena_com_io_cq, ctx.numa_node); 1038 1039 /* Start with Rx interrupts being masked. */ 1040 if (ring->type == ENA_RING_TYPE_RX && rte_intr_dp_is_en(intr_handle)) 1041 ena_rx_queue_intr_disable(dev, ring->id); 1042 1043 return 0; 1044 } 1045 1046 static void ena_queue_stop(struct ena_ring *ring) 1047 { 1048 struct ena_com_dev *ena_dev = &ring->adapter->ena_dev; 1049 1050 if (ring->type == ENA_RING_TYPE_RX) { 1051 ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(ring->id)); 1052 ena_rx_queue_release_bufs(ring); 1053 } else { 1054 ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(ring->id)); 1055 ena_tx_queue_release_bufs(ring); 1056 } 1057 } 1058 1059 static void ena_queue_stop_all(struct rte_eth_dev *dev, 1060 enum ena_ring_type ring_type) 1061 { 1062 struct ena_adapter *adapter = dev->data->dev_private; 1063 struct ena_ring *queues = NULL; 1064 uint16_t nb_queues, i; 1065 1066 if (ring_type == ENA_RING_TYPE_RX) { 1067 queues = adapter->rx_ring; 1068 nb_queues = dev->data->nb_rx_queues; 1069 } else { 1070 queues = adapter->tx_ring; 1071 nb_queues = dev->data->nb_tx_queues; 1072 } 1073 1074 for (i = 0; i < nb_queues; ++i) 1075 if (queues[i].configured) 1076 ena_queue_stop(&queues[i]); 1077 } 1078 1079 static int ena_queue_start(struct rte_eth_dev *dev, struct ena_ring *ring) 1080 { 1081 int rc, bufs_num; 1082 1083 ena_assert_msg(ring->configured == 1, 1084 "Trying to start unconfigured queue\n"); 1085 1086 rc = ena_create_io_queue(dev, ring); 1087 if (rc) { 1088 PMD_INIT_LOG(ERR, "Failed to create IO queue\n"); 1089 return rc; 1090 } 1091 1092 ring->next_to_clean = 0; 1093 ring->next_to_use = 0; 1094 1095 if (ring->type == ENA_RING_TYPE_TX) { 1096 ring->tx_stats.available_desc = 1097 ena_com_free_q_entries(ring->ena_com_io_sq); 1098 return 0; 1099 } 1100 1101 bufs_num = ring->ring_size - 1; 1102 rc = ena_populate_rx_queue(ring, bufs_num); 1103 if (rc != bufs_num) { 1104 ena_com_destroy_io_queue(&ring->adapter->ena_dev, 1105 ENA_IO_RXQ_IDX(ring->id)); 1106 PMD_INIT_LOG(ERR, "Failed to populate Rx ring\n"); 1107 return ENA_COM_FAULT; 1108 } 1109 /* Flush per-core RX buffers pools cache as they can be used on other 1110 * cores as well. 1111 */ 1112 rte_mempool_cache_flush(NULL, ring->mb_pool); 1113 1114 return 0; 1115 } 1116 1117 static int ena_tx_queue_setup(struct rte_eth_dev *dev, 1118 uint16_t queue_idx, 1119 uint16_t nb_desc, 1120 unsigned int socket_id, 1121 const struct rte_eth_txconf *tx_conf) 1122 { 1123 struct ena_ring *txq = NULL; 1124 struct ena_adapter *adapter = dev->data->dev_private; 1125 unsigned int i; 1126 1127 txq = &adapter->tx_ring[queue_idx]; 1128 1129 if (txq->configured) { 1130 PMD_DRV_LOG(CRIT, 1131 "API violation. Queue[%d] is already configured\n", 1132 queue_idx); 1133 return ENA_COM_FAULT; 1134 } 1135 1136 if (!rte_is_power_of_2(nb_desc)) { 1137 PMD_DRV_LOG(ERR, 1138 "Unsupported size of Tx queue: %d is not a power of 2.\n", 1139 nb_desc); 1140 return -EINVAL; 1141 } 1142 1143 if (nb_desc > adapter->max_tx_ring_size) { 1144 PMD_DRV_LOG(ERR, 1145 "Unsupported size of Tx queue (max size: %d)\n", 1146 adapter->max_tx_ring_size); 1147 return -EINVAL; 1148 } 1149 1150 txq->port_id = dev->data->port_id; 1151 txq->next_to_clean = 0; 1152 txq->next_to_use = 0; 1153 txq->ring_size = nb_desc; 1154 txq->size_mask = nb_desc - 1; 1155 txq->numa_socket_id = socket_id; 1156 txq->pkts_without_db = false; 1157 1158 txq->tx_buffer_info = rte_zmalloc("txq->tx_buffer_info", 1159 sizeof(struct ena_tx_buffer) * 1160 txq->ring_size, 1161 RTE_CACHE_LINE_SIZE); 1162 if (!txq->tx_buffer_info) { 1163 PMD_DRV_LOG(ERR, 1164 "Failed to allocate memory for Tx buffer info\n"); 1165 return -ENOMEM; 1166 } 1167 1168 txq->empty_tx_reqs = rte_zmalloc("txq->empty_tx_reqs", 1169 sizeof(u16) * txq->ring_size, 1170 RTE_CACHE_LINE_SIZE); 1171 if (!txq->empty_tx_reqs) { 1172 PMD_DRV_LOG(ERR, 1173 "Failed to allocate memory for empty Tx requests\n"); 1174 rte_free(txq->tx_buffer_info); 1175 return -ENOMEM; 1176 } 1177 1178 txq->push_buf_intermediate_buf = 1179 rte_zmalloc("txq->push_buf_intermediate_buf", 1180 txq->tx_max_header_size, 1181 RTE_CACHE_LINE_SIZE); 1182 if (!txq->push_buf_intermediate_buf) { 1183 PMD_DRV_LOG(ERR, "Failed to alloc push buffer for LLQ\n"); 1184 rte_free(txq->tx_buffer_info); 1185 rte_free(txq->empty_tx_reqs); 1186 return -ENOMEM; 1187 } 1188 1189 for (i = 0; i < txq->ring_size; i++) 1190 txq->empty_tx_reqs[i] = i; 1191 1192 if (tx_conf != NULL) { 1193 txq->offloads = 1194 tx_conf->offloads | dev->data->dev_conf.txmode.offloads; 1195 } 1196 /* Store pointer to this queue in upper layer */ 1197 txq->configured = 1; 1198 dev->data->tx_queues[queue_idx] = txq; 1199 1200 return 0; 1201 } 1202 1203 static int ena_rx_queue_setup(struct rte_eth_dev *dev, 1204 uint16_t queue_idx, 1205 uint16_t nb_desc, 1206 unsigned int socket_id, 1207 const struct rte_eth_rxconf *rx_conf, 1208 struct rte_mempool *mp) 1209 { 1210 struct ena_adapter *adapter = dev->data->dev_private; 1211 struct ena_ring *rxq = NULL; 1212 size_t buffer_size; 1213 int i; 1214 1215 rxq = &adapter->rx_ring[queue_idx]; 1216 if (rxq->configured) { 1217 PMD_DRV_LOG(CRIT, 1218 "API violation. Queue[%d] is already configured\n", 1219 queue_idx); 1220 return ENA_COM_FAULT; 1221 } 1222 1223 if (!rte_is_power_of_2(nb_desc)) { 1224 PMD_DRV_LOG(ERR, 1225 "Unsupported size of Rx queue: %d is not a power of 2.\n", 1226 nb_desc); 1227 return -EINVAL; 1228 } 1229 1230 if (nb_desc > adapter->max_rx_ring_size) { 1231 PMD_DRV_LOG(ERR, 1232 "Unsupported size of Rx queue (max size: %d)\n", 1233 adapter->max_rx_ring_size); 1234 return -EINVAL; 1235 } 1236 1237 /* ENA isn't supporting buffers smaller than 1400 bytes */ 1238 buffer_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM; 1239 if (buffer_size < ENA_RX_BUF_MIN_SIZE) { 1240 PMD_DRV_LOG(ERR, 1241 "Unsupported size of Rx buffer: %zu (min size: %d)\n", 1242 buffer_size, ENA_RX_BUF_MIN_SIZE); 1243 return -EINVAL; 1244 } 1245 1246 rxq->port_id = dev->data->port_id; 1247 rxq->next_to_clean = 0; 1248 rxq->next_to_use = 0; 1249 rxq->ring_size = nb_desc; 1250 rxq->size_mask = nb_desc - 1; 1251 rxq->numa_socket_id = socket_id; 1252 rxq->mb_pool = mp; 1253 1254 rxq->rx_buffer_info = rte_zmalloc("rxq->buffer_info", 1255 sizeof(struct ena_rx_buffer) * nb_desc, 1256 RTE_CACHE_LINE_SIZE); 1257 if (!rxq->rx_buffer_info) { 1258 PMD_DRV_LOG(ERR, 1259 "Failed to allocate memory for Rx buffer info\n"); 1260 return -ENOMEM; 1261 } 1262 1263 rxq->rx_refill_buffer = rte_zmalloc("rxq->rx_refill_buffer", 1264 sizeof(struct rte_mbuf *) * nb_desc, 1265 RTE_CACHE_LINE_SIZE); 1266 1267 if (!rxq->rx_refill_buffer) { 1268 PMD_DRV_LOG(ERR, 1269 "Failed to allocate memory for Rx refill buffer\n"); 1270 rte_free(rxq->rx_buffer_info); 1271 rxq->rx_buffer_info = NULL; 1272 return -ENOMEM; 1273 } 1274 1275 rxq->empty_rx_reqs = rte_zmalloc("rxq->empty_rx_reqs", 1276 sizeof(uint16_t) * nb_desc, 1277 RTE_CACHE_LINE_SIZE); 1278 if (!rxq->empty_rx_reqs) { 1279 PMD_DRV_LOG(ERR, 1280 "Failed to allocate memory for empty Rx requests\n"); 1281 rte_free(rxq->rx_buffer_info); 1282 rxq->rx_buffer_info = NULL; 1283 rte_free(rxq->rx_refill_buffer); 1284 rxq->rx_refill_buffer = NULL; 1285 return -ENOMEM; 1286 } 1287 1288 for (i = 0; i < nb_desc; i++) 1289 rxq->empty_rx_reqs[i] = i; 1290 1291 rxq->offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; 1292 1293 /* Store pointer to this queue in upper layer */ 1294 rxq->configured = 1; 1295 dev->data->rx_queues[queue_idx] = rxq; 1296 1297 return 0; 1298 } 1299 1300 static int ena_add_single_rx_desc(struct ena_com_io_sq *io_sq, 1301 struct rte_mbuf *mbuf, uint16_t id) 1302 { 1303 struct ena_com_buf ebuf; 1304 int rc; 1305 1306 /* prepare physical address for DMA transaction */ 1307 ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM; 1308 ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM; 1309 1310 /* pass resource to device */ 1311 rc = ena_com_add_single_rx_desc(io_sq, &ebuf, id); 1312 if (unlikely(rc != 0)) 1313 PMD_RX_LOG(WARNING, "Failed adding Rx desc\n"); 1314 1315 return rc; 1316 } 1317 1318 static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) 1319 { 1320 unsigned int i; 1321 int rc; 1322 uint16_t next_to_use = rxq->next_to_use; 1323 uint16_t req_id; 1324 #ifdef RTE_ETHDEV_DEBUG_RX 1325 uint16_t in_use; 1326 #endif 1327 struct rte_mbuf **mbufs = rxq->rx_refill_buffer; 1328 1329 if (unlikely(!count)) 1330 return 0; 1331 1332 #ifdef RTE_ETHDEV_DEBUG_RX 1333 in_use = rxq->ring_size - 1 - 1334 ena_com_free_q_entries(rxq->ena_com_io_sq); 1335 if (unlikely((in_use + count) >= rxq->ring_size)) 1336 PMD_RX_LOG(ERR, "Bad Rx ring state\n"); 1337 #endif 1338 1339 /* get resources for incoming packets */ 1340 rc = rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, count); 1341 if (unlikely(rc < 0)) { 1342 rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf); 1343 ++rxq->rx_stats.mbuf_alloc_fail; 1344 PMD_RX_LOG(DEBUG, "There are not enough free buffers\n"); 1345 return 0; 1346 } 1347 1348 for (i = 0; i < count; i++) { 1349 struct rte_mbuf *mbuf = mbufs[i]; 1350 struct ena_rx_buffer *rx_info; 1351 1352 if (likely((i + 4) < count)) 1353 rte_prefetch0(mbufs[i + 4]); 1354 1355 req_id = rxq->empty_rx_reqs[next_to_use]; 1356 rx_info = &rxq->rx_buffer_info[req_id]; 1357 1358 rc = ena_add_single_rx_desc(rxq->ena_com_io_sq, mbuf, req_id); 1359 if (unlikely(rc != 0)) 1360 break; 1361 1362 rx_info->mbuf = mbuf; 1363 next_to_use = ENA_IDX_NEXT_MASKED(next_to_use, rxq->size_mask); 1364 } 1365 1366 if (unlikely(i < count)) { 1367 PMD_RX_LOG(WARNING, 1368 "Refilled Rx queue[%d] with only %d/%d buffers\n", 1369 rxq->id, i, count); 1370 rte_pktmbuf_free_bulk(&mbufs[i], count - i); 1371 ++rxq->rx_stats.refill_partial; 1372 } 1373 1374 /* When we submitted free recources to device... */ 1375 if (likely(i > 0)) { 1376 /* ...let HW know that it can fill buffers with data. */ 1377 ena_com_write_sq_doorbell(rxq->ena_com_io_sq); 1378 1379 rxq->next_to_use = next_to_use; 1380 } 1381 1382 return i; 1383 } 1384 1385 static int ena_device_init(struct ena_com_dev *ena_dev, 1386 struct rte_pci_device *pdev, 1387 struct ena_com_dev_get_features_ctx *get_feat_ctx, 1388 bool *wd_state) 1389 { 1390 uint32_t aenq_groups; 1391 int rc; 1392 bool readless_supported; 1393 1394 /* Initialize mmio registers */ 1395 rc = ena_com_mmio_reg_read_request_init(ena_dev); 1396 if (rc) { 1397 PMD_DRV_LOG(ERR, "Failed to init MMIO read less\n"); 1398 return rc; 1399 } 1400 1401 /* The PCIe configuration space revision id indicate if mmio reg 1402 * read is disabled. 1403 */ 1404 readless_supported = !(pdev->id.class_id & ENA_MMIO_DISABLE_REG_READ); 1405 ena_com_set_mmio_read_mode(ena_dev, readless_supported); 1406 1407 /* reset device */ 1408 rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL); 1409 if (rc) { 1410 PMD_DRV_LOG(ERR, "Cannot reset device\n"); 1411 goto err_mmio_read_less; 1412 } 1413 1414 /* check FW version */ 1415 rc = ena_com_validate_version(ena_dev); 1416 if (rc) { 1417 PMD_DRV_LOG(ERR, "Device version is too low\n"); 1418 goto err_mmio_read_less; 1419 } 1420 1421 ena_dev->dma_addr_bits = ena_com_get_dma_width(ena_dev); 1422 1423 /* ENA device administration layer init */ 1424 rc = ena_com_admin_init(ena_dev, &aenq_handlers); 1425 if (rc) { 1426 PMD_DRV_LOG(ERR, 1427 "Cannot initialize ENA admin queue\n"); 1428 goto err_mmio_read_less; 1429 } 1430 1431 /* To enable the msix interrupts the driver needs to know the number 1432 * of queues. So the driver uses polling mode to retrieve this 1433 * information. 1434 */ 1435 ena_com_set_admin_polling_mode(ena_dev, true); 1436 1437 ena_config_host_info(ena_dev); 1438 1439 /* Get Device Attributes and features */ 1440 rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); 1441 if (rc) { 1442 PMD_DRV_LOG(ERR, 1443 "Cannot get attribute for ENA device, rc: %d\n", rc); 1444 goto err_admin_init; 1445 } 1446 1447 aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | 1448 BIT(ENA_ADMIN_NOTIFICATION) | 1449 BIT(ENA_ADMIN_KEEP_ALIVE) | 1450 BIT(ENA_ADMIN_FATAL_ERROR) | 1451 BIT(ENA_ADMIN_WARNING); 1452 1453 aenq_groups &= get_feat_ctx->aenq.supported_groups; 1454 rc = ena_com_set_aenq_config(ena_dev, aenq_groups); 1455 if (rc) { 1456 PMD_DRV_LOG(ERR, "Cannot configure AENQ groups, rc: %d\n", rc); 1457 goto err_admin_init; 1458 } 1459 1460 *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); 1461 1462 return 0; 1463 1464 err_admin_init: 1465 ena_com_admin_destroy(ena_dev); 1466 1467 err_mmio_read_less: 1468 ena_com_mmio_reg_read_request_destroy(ena_dev); 1469 1470 return rc; 1471 } 1472 1473 static void ena_interrupt_handler_rte(void *cb_arg) 1474 { 1475 struct rte_eth_dev *dev = cb_arg; 1476 struct ena_adapter *adapter = dev->data->dev_private; 1477 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1478 1479 ena_com_admin_q_comp_intr_handler(ena_dev); 1480 if (likely(adapter->state != ENA_ADAPTER_STATE_CLOSED)) 1481 ena_com_aenq_intr_handler(ena_dev, dev); 1482 } 1483 1484 static void check_for_missing_keep_alive(struct ena_adapter *adapter) 1485 { 1486 if (!adapter->wd_state) 1487 return; 1488 1489 if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) 1490 return; 1491 1492 if (unlikely((rte_get_timer_cycles() - adapter->timestamp_wd) >= 1493 adapter->keep_alive_timeout)) { 1494 PMD_DRV_LOG(ERR, "Keep alive timeout\n"); 1495 adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO; 1496 adapter->trigger_reset = true; 1497 ++adapter->dev_stats.wd_expired; 1498 } 1499 } 1500 1501 /* Check if admin queue is enabled */ 1502 static void check_for_admin_com_state(struct ena_adapter *adapter) 1503 { 1504 if (unlikely(!ena_com_get_admin_running_state(&adapter->ena_dev))) { 1505 PMD_DRV_LOG(ERR, "ENA admin queue is not in running state\n"); 1506 adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO; 1507 adapter->trigger_reset = true; 1508 } 1509 } 1510 1511 static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer, 1512 void *arg) 1513 { 1514 struct rte_eth_dev *dev = arg; 1515 struct ena_adapter *adapter = dev->data->dev_private; 1516 1517 check_for_missing_keep_alive(adapter); 1518 check_for_admin_com_state(adapter); 1519 1520 if (unlikely(adapter->trigger_reset)) { 1521 PMD_DRV_LOG(ERR, "Trigger reset is on\n"); 1522 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, 1523 NULL); 1524 } 1525 } 1526 1527 static inline void 1528 set_default_llq_configurations(struct ena_llq_configurations *llq_config, 1529 struct ena_admin_feature_llq_desc *llq, 1530 bool use_large_llq_hdr) 1531 { 1532 llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; 1533 llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; 1534 llq_config->llq_num_decs_before_header = 1535 ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; 1536 1537 if (use_large_llq_hdr && 1538 (llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B)) { 1539 llq_config->llq_ring_entry_size = 1540 ENA_ADMIN_LIST_ENTRY_SIZE_256B; 1541 llq_config->llq_ring_entry_size_value = 256; 1542 } else { 1543 llq_config->llq_ring_entry_size = 1544 ENA_ADMIN_LIST_ENTRY_SIZE_128B; 1545 llq_config->llq_ring_entry_size_value = 128; 1546 } 1547 } 1548 1549 static int 1550 ena_set_queues_placement_policy(struct ena_adapter *adapter, 1551 struct ena_com_dev *ena_dev, 1552 struct ena_admin_feature_llq_desc *llq, 1553 struct ena_llq_configurations *llq_default_configurations) 1554 { 1555 int rc; 1556 u32 llq_feature_mask; 1557 1558 llq_feature_mask = 1 << ENA_ADMIN_LLQ; 1559 if (!(ena_dev->supported_features & llq_feature_mask)) { 1560 PMD_DRV_LOG(INFO, 1561 "LLQ is not supported. Fallback to host mode policy.\n"); 1562 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 1563 return 0; 1564 } 1565 1566 rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); 1567 if (unlikely(rc)) { 1568 PMD_INIT_LOG(WARNING, 1569 "Failed to config dev mode. Fallback to host mode policy.\n"); 1570 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 1571 return 0; 1572 } 1573 1574 /* Nothing to config, exit */ 1575 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 1576 return 0; 1577 1578 if (!adapter->dev_mem_base) { 1579 PMD_DRV_LOG(ERR, 1580 "Unable to access LLQ BAR resource. Fallback to host mode policy.\n"); 1581 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 1582 return 0; 1583 } 1584 1585 ena_dev->mem_bar = adapter->dev_mem_base; 1586 1587 return 0; 1588 } 1589 1590 static uint32_t ena_calc_max_io_queue_num(struct ena_com_dev *ena_dev, 1591 struct ena_com_dev_get_features_ctx *get_feat_ctx) 1592 { 1593 uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues; 1594 1595 /* Regular queues capabilities */ 1596 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 1597 struct ena_admin_queue_ext_feature_fields *max_queue_ext = 1598 &get_feat_ctx->max_queue_ext.max_queue_ext; 1599 io_rx_num = RTE_MIN(max_queue_ext->max_rx_sq_num, 1600 max_queue_ext->max_rx_cq_num); 1601 io_tx_sq_num = max_queue_ext->max_tx_sq_num; 1602 io_tx_cq_num = max_queue_ext->max_tx_cq_num; 1603 } else { 1604 struct ena_admin_queue_feature_desc *max_queues = 1605 &get_feat_ctx->max_queues; 1606 io_tx_sq_num = max_queues->max_sq_num; 1607 io_tx_cq_num = max_queues->max_cq_num; 1608 io_rx_num = RTE_MIN(io_tx_sq_num, io_tx_cq_num); 1609 } 1610 1611 /* In case of LLQ use the llq number in the get feature cmd */ 1612 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) 1613 io_tx_sq_num = get_feat_ctx->llq.max_llq_num; 1614 1615 max_num_io_queues = RTE_MIN(ENA_MAX_NUM_IO_QUEUES, io_rx_num); 1616 max_num_io_queues = RTE_MIN(max_num_io_queues, io_tx_sq_num); 1617 max_num_io_queues = RTE_MIN(max_num_io_queues, io_tx_cq_num); 1618 1619 if (unlikely(max_num_io_queues == 0)) { 1620 PMD_DRV_LOG(ERR, "Number of IO queues cannot not be 0\n"); 1621 return -EFAULT; 1622 } 1623 1624 return max_num_io_queues; 1625 } 1626 1627 static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) 1628 { 1629 struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 }; 1630 struct rte_pci_device *pci_dev; 1631 struct rte_intr_handle *intr_handle; 1632 struct ena_adapter *adapter = eth_dev->data->dev_private; 1633 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1634 struct ena_com_dev_get_features_ctx get_feat_ctx; 1635 struct ena_llq_configurations llq_config; 1636 const char *queue_type_str; 1637 uint32_t max_num_io_queues; 1638 int rc; 1639 static int adapters_found; 1640 bool disable_meta_caching; 1641 bool wd_state = false; 1642 1643 eth_dev->dev_ops = &ena_dev_ops; 1644 eth_dev->rx_pkt_burst = ð_ena_recv_pkts; 1645 eth_dev->tx_pkt_burst = ð_ena_xmit_pkts; 1646 eth_dev->tx_pkt_prepare = ð_ena_prep_pkts; 1647 1648 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1649 return 0; 1650 1651 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 1652 1653 memset(adapter, 0, sizeof(struct ena_adapter)); 1654 ena_dev = &adapter->ena_dev; 1655 1656 adapter->edev_data = eth_dev->data; 1657 1658 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1659 1660 PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d\n", 1661 pci_dev->addr.domain, 1662 pci_dev->addr.bus, 1663 pci_dev->addr.devid, 1664 pci_dev->addr.function); 1665 1666 intr_handle = &pci_dev->intr_handle; 1667 1668 adapter->regs = pci_dev->mem_resource[ENA_REGS_BAR].addr; 1669 adapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr; 1670 1671 if (!adapter->regs) { 1672 PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)\n", 1673 ENA_REGS_BAR); 1674 return -ENXIO; 1675 } 1676 1677 ena_dev->reg_bar = adapter->regs; 1678 /* This is a dummy pointer for ena_com functions. */ 1679 ena_dev->dmadev = adapter; 1680 1681 adapter->id_number = adapters_found; 1682 1683 snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", 1684 adapter->id_number); 1685 1686 rc = ena_parse_devargs(adapter, pci_dev->device.devargs); 1687 if (rc != 0) { 1688 PMD_INIT_LOG(CRIT, "Failed to parse devargs\n"); 1689 goto err; 1690 } 1691 1692 /* device specific initialization routine */ 1693 rc = ena_device_init(ena_dev, pci_dev, &get_feat_ctx, &wd_state); 1694 if (rc) { 1695 PMD_INIT_LOG(CRIT, "Failed to init ENA device\n"); 1696 goto err; 1697 } 1698 adapter->wd_state = wd_state; 1699 1700 set_default_llq_configurations(&llq_config, &get_feat_ctx.llq, 1701 adapter->use_large_llq_hdr); 1702 rc = ena_set_queues_placement_policy(adapter, ena_dev, 1703 &get_feat_ctx.llq, &llq_config); 1704 if (unlikely(rc)) { 1705 PMD_INIT_LOG(CRIT, "Failed to set placement policy\n"); 1706 return rc; 1707 } 1708 1709 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 1710 queue_type_str = "Regular"; 1711 else 1712 queue_type_str = "Low latency"; 1713 PMD_DRV_LOG(INFO, "Placement policy: %s\n", queue_type_str); 1714 1715 calc_queue_ctx.ena_dev = ena_dev; 1716 calc_queue_ctx.get_feat_ctx = &get_feat_ctx; 1717 1718 max_num_io_queues = ena_calc_max_io_queue_num(ena_dev, &get_feat_ctx); 1719 rc = ena_calc_io_queue_size(&calc_queue_ctx, 1720 adapter->use_large_llq_hdr); 1721 if (unlikely((rc != 0) || (max_num_io_queues == 0))) { 1722 rc = -EFAULT; 1723 goto err_device_destroy; 1724 } 1725 1726 adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size; 1727 adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size; 1728 adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size; 1729 adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size; 1730 adapter->max_num_io_queues = max_num_io_queues; 1731 1732 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 1733 disable_meta_caching = 1734 !!(get_feat_ctx.llq.accel_mode.u.get.supported_flags & 1735 BIT(ENA_ADMIN_DISABLE_META_CACHING)); 1736 } else { 1737 disable_meta_caching = false; 1738 } 1739 1740 /* prepare ring structures */ 1741 ena_init_rings(adapter, disable_meta_caching); 1742 1743 ena_config_debug_area(adapter); 1744 1745 /* Set max MTU for this device */ 1746 adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu; 1747 1748 /* set device support for offloads */ 1749 adapter->offloads.tso4_supported = (get_feat_ctx.offload.tx & 1750 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) != 0; 1751 adapter->offloads.tx_csum_supported = (get_feat_ctx.offload.tx & 1752 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) != 0; 1753 adapter->offloads.rx_csum_supported = 1754 (get_feat_ctx.offload.rx_supported & 1755 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) != 0; 1756 adapter->offloads.rss_hash_supported = 1757 (get_feat_ctx.offload.rx_supported & 1758 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK) != 0; 1759 1760 /* Copy MAC address and point DPDK to it */ 1761 eth_dev->data->mac_addrs = (struct rte_ether_addr *)adapter->mac_addr; 1762 rte_ether_addr_copy((struct rte_ether_addr *) 1763 get_feat_ctx.dev_attr.mac_addr, 1764 (struct rte_ether_addr *)adapter->mac_addr); 1765 1766 rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE); 1767 if (unlikely(rc != 0)) { 1768 PMD_DRV_LOG(ERR, "Failed to initialize RSS in ENA device\n"); 1769 goto err_delete_debug_area; 1770 } 1771 1772 adapter->drv_stats = rte_zmalloc("adapter stats", 1773 sizeof(*adapter->drv_stats), 1774 RTE_CACHE_LINE_SIZE); 1775 if (!adapter->drv_stats) { 1776 PMD_DRV_LOG(ERR, 1777 "Failed to allocate memory for adapter statistics\n"); 1778 rc = -ENOMEM; 1779 goto err_rss_destroy; 1780 } 1781 1782 rte_spinlock_init(&adapter->admin_lock); 1783 1784 rte_intr_callback_register(intr_handle, 1785 ena_interrupt_handler_rte, 1786 eth_dev); 1787 rte_intr_enable(intr_handle); 1788 ena_com_set_admin_polling_mode(ena_dev, false); 1789 ena_com_admin_aenq_enable(ena_dev); 1790 1791 if (adapters_found == 0) 1792 rte_timer_subsystem_init(); 1793 rte_timer_init(&adapter->timer_wd); 1794 1795 adapters_found++; 1796 adapter->state = ENA_ADAPTER_STATE_INIT; 1797 1798 return 0; 1799 1800 err_rss_destroy: 1801 ena_com_rss_destroy(ena_dev); 1802 err_delete_debug_area: 1803 ena_com_delete_debug_area(ena_dev); 1804 1805 err_device_destroy: 1806 ena_com_delete_host_info(ena_dev); 1807 ena_com_admin_destroy(ena_dev); 1808 1809 err: 1810 return rc; 1811 } 1812 1813 static void ena_destroy_device(struct rte_eth_dev *eth_dev) 1814 { 1815 struct ena_adapter *adapter = eth_dev->data->dev_private; 1816 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1817 1818 if (adapter->state == ENA_ADAPTER_STATE_FREE) 1819 return; 1820 1821 ena_com_set_admin_running_state(ena_dev, false); 1822 1823 if (adapter->state != ENA_ADAPTER_STATE_CLOSED) 1824 ena_close(eth_dev); 1825 1826 ena_com_rss_destroy(ena_dev); 1827 1828 ena_com_delete_debug_area(ena_dev); 1829 ena_com_delete_host_info(ena_dev); 1830 1831 ena_com_abort_admin_commands(ena_dev); 1832 ena_com_wait_for_abort_completion(ena_dev); 1833 ena_com_admin_destroy(ena_dev); 1834 ena_com_mmio_reg_read_request_destroy(ena_dev); 1835 1836 adapter->state = ENA_ADAPTER_STATE_FREE; 1837 } 1838 1839 static int eth_ena_dev_uninit(struct rte_eth_dev *eth_dev) 1840 { 1841 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1842 return 0; 1843 1844 ena_destroy_device(eth_dev); 1845 1846 return 0; 1847 } 1848 1849 static int ena_dev_configure(struct rte_eth_dev *dev) 1850 { 1851 struct ena_adapter *adapter = dev->data->dev_private; 1852 1853 adapter->state = ENA_ADAPTER_STATE_CONFIG; 1854 1855 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) 1856 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; 1857 dev->data->dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS; 1858 1859 adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads; 1860 adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads; 1861 return 0; 1862 } 1863 1864 static void ena_init_rings(struct ena_adapter *adapter, 1865 bool disable_meta_caching) 1866 { 1867 size_t i; 1868 1869 for (i = 0; i < adapter->max_num_io_queues; i++) { 1870 struct ena_ring *ring = &adapter->tx_ring[i]; 1871 1872 ring->configured = 0; 1873 ring->type = ENA_RING_TYPE_TX; 1874 ring->adapter = adapter; 1875 ring->id = i; 1876 ring->tx_mem_queue_type = adapter->ena_dev.tx_mem_queue_type; 1877 ring->tx_max_header_size = adapter->ena_dev.tx_max_header_size; 1878 ring->sgl_size = adapter->max_tx_sgl_size; 1879 ring->disable_meta_caching = disable_meta_caching; 1880 } 1881 1882 for (i = 0; i < adapter->max_num_io_queues; i++) { 1883 struct ena_ring *ring = &adapter->rx_ring[i]; 1884 1885 ring->configured = 0; 1886 ring->type = ENA_RING_TYPE_RX; 1887 ring->adapter = adapter; 1888 ring->id = i; 1889 ring->sgl_size = adapter->max_rx_sgl_size; 1890 } 1891 } 1892 1893 static int ena_infos_get(struct rte_eth_dev *dev, 1894 struct rte_eth_dev_info *dev_info) 1895 { 1896 struct ena_adapter *adapter; 1897 struct ena_com_dev *ena_dev; 1898 uint64_t rx_feat = 0, tx_feat = 0; 1899 1900 ena_assert_msg(dev->data != NULL, "Uninitialized device\n"); 1901 ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n"); 1902 adapter = dev->data->dev_private; 1903 1904 ena_dev = &adapter->ena_dev; 1905 ena_assert_msg(ena_dev != NULL, "Uninitialized device\n"); 1906 1907 dev_info->speed_capa = 1908 ETH_LINK_SPEED_1G | 1909 ETH_LINK_SPEED_2_5G | 1910 ETH_LINK_SPEED_5G | 1911 ETH_LINK_SPEED_10G | 1912 ETH_LINK_SPEED_25G | 1913 ETH_LINK_SPEED_40G | 1914 ETH_LINK_SPEED_50G | 1915 ETH_LINK_SPEED_100G; 1916 1917 /* Set Tx & Rx features available for device */ 1918 if (adapter->offloads.tso4_supported) 1919 tx_feat |= DEV_TX_OFFLOAD_TCP_TSO; 1920 1921 if (adapter->offloads.tx_csum_supported) 1922 tx_feat |= DEV_TX_OFFLOAD_IPV4_CKSUM | 1923 DEV_TX_OFFLOAD_UDP_CKSUM | 1924 DEV_TX_OFFLOAD_TCP_CKSUM; 1925 1926 if (adapter->offloads.rx_csum_supported) 1927 rx_feat |= DEV_RX_OFFLOAD_IPV4_CKSUM | 1928 DEV_RX_OFFLOAD_UDP_CKSUM | 1929 DEV_RX_OFFLOAD_TCP_CKSUM; 1930 1931 rx_feat |= DEV_RX_OFFLOAD_JUMBO_FRAME; 1932 tx_feat |= DEV_TX_OFFLOAD_MULTI_SEGS; 1933 1934 /* Inform framework about available features */ 1935 dev_info->rx_offload_capa = rx_feat; 1936 if (adapter->offloads.rss_hash_supported) 1937 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_RSS_HASH; 1938 dev_info->rx_queue_offload_capa = rx_feat; 1939 dev_info->tx_offload_capa = tx_feat; 1940 dev_info->tx_queue_offload_capa = tx_feat; 1941 1942 dev_info->flow_type_rss_offloads = ENA_ALL_RSS_HF; 1943 dev_info->hash_key_size = ENA_HASH_KEY_SIZE; 1944 1945 dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN; 1946 dev_info->max_rx_pktlen = adapter->max_mtu; 1947 dev_info->max_mac_addrs = 1; 1948 1949 dev_info->max_rx_queues = adapter->max_num_io_queues; 1950 dev_info->max_tx_queues = adapter->max_num_io_queues; 1951 dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE; 1952 1953 adapter->tx_supported_offloads = tx_feat; 1954 adapter->rx_supported_offloads = rx_feat; 1955 1956 dev_info->rx_desc_lim.nb_max = adapter->max_rx_ring_size; 1957 dev_info->rx_desc_lim.nb_min = ENA_MIN_RING_DESC; 1958 dev_info->rx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 1959 adapter->max_rx_sgl_size); 1960 dev_info->rx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 1961 adapter->max_rx_sgl_size); 1962 1963 dev_info->tx_desc_lim.nb_max = adapter->max_tx_ring_size; 1964 dev_info->tx_desc_lim.nb_min = ENA_MIN_RING_DESC; 1965 dev_info->tx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 1966 adapter->max_tx_sgl_size); 1967 dev_info->tx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 1968 adapter->max_tx_sgl_size); 1969 1970 dev_info->default_rxportconf.ring_size = ENA_DEFAULT_RING_SIZE; 1971 dev_info->default_txportconf.ring_size = ENA_DEFAULT_RING_SIZE; 1972 1973 return 0; 1974 } 1975 1976 static inline void ena_init_rx_mbuf(struct rte_mbuf *mbuf, uint16_t len) 1977 { 1978 mbuf->data_len = len; 1979 mbuf->data_off = RTE_PKTMBUF_HEADROOM; 1980 mbuf->refcnt = 1; 1981 mbuf->next = NULL; 1982 } 1983 1984 static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring, 1985 struct ena_com_rx_buf_info *ena_bufs, 1986 uint32_t descs, 1987 uint16_t *next_to_clean, 1988 uint8_t offset) 1989 { 1990 struct rte_mbuf *mbuf; 1991 struct rte_mbuf *mbuf_head; 1992 struct ena_rx_buffer *rx_info; 1993 int rc; 1994 uint16_t ntc, len, req_id, buf = 0; 1995 1996 if (unlikely(descs == 0)) 1997 return NULL; 1998 1999 ntc = *next_to_clean; 2000 2001 len = ena_bufs[buf].len; 2002 req_id = ena_bufs[buf].req_id; 2003 2004 rx_info = &rx_ring->rx_buffer_info[req_id]; 2005 2006 mbuf = rx_info->mbuf; 2007 RTE_ASSERT(mbuf != NULL); 2008 2009 ena_init_rx_mbuf(mbuf, len); 2010 2011 /* Fill the mbuf head with the data specific for 1st segment. */ 2012 mbuf_head = mbuf; 2013 mbuf_head->nb_segs = descs; 2014 mbuf_head->port = rx_ring->port_id; 2015 mbuf_head->pkt_len = len; 2016 mbuf_head->data_off += offset; 2017 2018 rx_info->mbuf = NULL; 2019 rx_ring->empty_rx_reqs[ntc] = req_id; 2020 ntc = ENA_IDX_NEXT_MASKED(ntc, rx_ring->size_mask); 2021 2022 while (--descs) { 2023 ++buf; 2024 len = ena_bufs[buf].len; 2025 req_id = ena_bufs[buf].req_id; 2026 2027 rx_info = &rx_ring->rx_buffer_info[req_id]; 2028 RTE_ASSERT(rx_info->mbuf != NULL); 2029 2030 if (unlikely(len == 0)) { 2031 /* 2032 * Some devices can pass descriptor with the length 0. 2033 * To avoid confusion, the PMD is simply putting the 2034 * descriptor back, as it was never used. We'll avoid 2035 * mbuf allocation that way. 2036 */ 2037 rc = ena_add_single_rx_desc(rx_ring->ena_com_io_sq, 2038 rx_info->mbuf, req_id); 2039 if (unlikely(rc != 0)) { 2040 /* Free the mbuf in case of an error. */ 2041 rte_mbuf_raw_free(rx_info->mbuf); 2042 } else { 2043 /* 2044 * If there was no error, just exit the loop as 2045 * 0 length descriptor is always the last one. 2046 */ 2047 break; 2048 } 2049 } else { 2050 /* Create an mbuf chain. */ 2051 mbuf->next = rx_info->mbuf; 2052 mbuf = mbuf->next; 2053 2054 ena_init_rx_mbuf(mbuf, len); 2055 mbuf_head->pkt_len += len; 2056 } 2057 2058 /* 2059 * Mark the descriptor as depleted and perform necessary 2060 * cleanup. 2061 * This code will execute in two cases: 2062 * 1. Descriptor len was greater than 0 - normal situation. 2063 * 2. Descriptor len was 0 and we failed to add the descriptor 2064 * to the device. In that situation, we should try to add 2065 * the mbuf again in the populate routine and mark the 2066 * descriptor as used up by the device. 2067 */ 2068 rx_info->mbuf = NULL; 2069 rx_ring->empty_rx_reqs[ntc] = req_id; 2070 ntc = ENA_IDX_NEXT_MASKED(ntc, rx_ring->size_mask); 2071 } 2072 2073 *next_to_clean = ntc; 2074 2075 return mbuf_head; 2076 } 2077 2078 static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 2079 uint16_t nb_pkts) 2080 { 2081 struct ena_ring *rx_ring = (struct ena_ring *)(rx_queue); 2082 unsigned int free_queue_entries; 2083 unsigned int refill_threshold; 2084 uint16_t next_to_clean = rx_ring->next_to_clean; 2085 uint16_t descs_in_use; 2086 struct rte_mbuf *mbuf; 2087 uint16_t completed; 2088 struct ena_com_rx_ctx ena_rx_ctx; 2089 int i, rc = 0; 2090 bool fill_hash; 2091 2092 #ifdef RTE_ETHDEV_DEBUG_RX 2093 /* Check adapter state */ 2094 if (unlikely(rx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { 2095 PMD_RX_LOG(ALERT, 2096 "Trying to receive pkts while device is NOT running\n"); 2097 return 0; 2098 } 2099 #endif 2100 2101 fill_hash = rx_ring->offloads & DEV_RX_OFFLOAD_RSS_HASH; 2102 2103 descs_in_use = rx_ring->ring_size - 2104 ena_com_free_q_entries(rx_ring->ena_com_io_sq) - 1; 2105 nb_pkts = RTE_MIN(descs_in_use, nb_pkts); 2106 2107 for (completed = 0; completed < nb_pkts; completed++) { 2108 ena_rx_ctx.max_bufs = rx_ring->sgl_size; 2109 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; 2110 ena_rx_ctx.descs = 0; 2111 ena_rx_ctx.pkt_offset = 0; 2112 /* receive packet context */ 2113 rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq, 2114 rx_ring->ena_com_io_sq, 2115 &ena_rx_ctx); 2116 if (unlikely(rc)) { 2117 PMD_RX_LOG(ERR, 2118 "Failed to get the packet from the device, rc: %d\n", 2119 rc); 2120 if (rc == ENA_COM_NO_SPACE) { 2121 ++rx_ring->rx_stats.bad_desc_num; 2122 rx_ring->adapter->reset_reason = 2123 ENA_REGS_RESET_TOO_MANY_RX_DESCS; 2124 } else { 2125 ++rx_ring->rx_stats.bad_req_id; 2126 rx_ring->adapter->reset_reason = 2127 ENA_REGS_RESET_INV_RX_REQ_ID; 2128 } 2129 rx_ring->adapter->trigger_reset = true; 2130 return 0; 2131 } 2132 2133 mbuf = ena_rx_mbuf(rx_ring, 2134 ena_rx_ctx.ena_bufs, 2135 ena_rx_ctx.descs, 2136 &next_to_clean, 2137 ena_rx_ctx.pkt_offset); 2138 if (unlikely(mbuf == NULL)) { 2139 for (i = 0; i < ena_rx_ctx.descs; ++i) { 2140 rx_ring->empty_rx_reqs[next_to_clean] = 2141 rx_ring->ena_bufs[i].req_id; 2142 next_to_clean = ENA_IDX_NEXT_MASKED( 2143 next_to_clean, rx_ring->size_mask); 2144 } 2145 break; 2146 } 2147 2148 /* fill mbuf attributes if any */ 2149 ena_rx_mbuf_prepare(mbuf, &ena_rx_ctx, fill_hash); 2150 2151 if (unlikely(mbuf->ol_flags & 2152 (PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD))) { 2153 rte_atomic64_inc(&rx_ring->adapter->drv_stats->ierrors); 2154 ++rx_ring->rx_stats.bad_csum; 2155 } 2156 2157 rx_pkts[completed] = mbuf; 2158 rx_ring->rx_stats.bytes += mbuf->pkt_len; 2159 } 2160 2161 rx_ring->rx_stats.cnt += completed; 2162 rx_ring->next_to_clean = next_to_clean; 2163 2164 free_queue_entries = ena_com_free_q_entries(rx_ring->ena_com_io_sq); 2165 refill_threshold = 2166 RTE_MIN(rx_ring->ring_size / ENA_REFILL_THRESH_DIVIDER, 2167 (unsigned int)ENA_REFILL_THRESH_PACKET); 2168 2169 /* Burst refill to save doorbells, memory barriers, const interval */ 2170 if (free_queue_entries > refill_threshold) { 2171 ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq); 2172 ena_populate_rx_queue(rx_ring, free_queue_entries); 2173 } 2174 2175 return completed; 2176 } 2177 2178 static uint16_t 2179 eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 2180 uint16_t nb_pkts) 2181 { 2182 int32_t ret; 2183 uint32_t i; 2184 struct rte_mbuf *m; 2185 struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); 2186 struct rte_ipv4_hdr *ip_hdr; 2187 uint64_t ol_flags; 2188 uint16_t frag_field; 2189 2190 for (i = 0; i != nb_pkts; i++) { 2191 m = tx_pkts[i]; 2192 ol_flags = m->ol_flags; 2193 2194 if (!(ol_flags & PKT_TX_IPV4)) 2195 continue; 2196 2197 /* If there was not L2 header length specified, assume it is 2198 * length of the ethernet header. 2199 */ 2200 if (unlikely(m->l2_len == 0)) 2201 m->l2_len = sizeof(struct rte_ether_hdr); 2202 2203 ip_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, 2204 m->l2_len); 2205 frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset); 2206 2207 if ((frag_field & RTE_IPV4_HDR_DF_FLAG) != 0) { 2208 m->packet_type |= RTE_PTYPE_L4_NONFRAG; 2209 2210 /* If IPv4 header has DF flag enabled and TSO support is 2211 * disabled, partial chcecksum should not be calculated. 2212 */ 2213 if (!tx_ring->adapter->offloads.tso4_supported) 2214 continue; 2215 } 2216 2217 if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) != 0 || 2218 (ol_flags & PKT_TX_L4_MASK) == 2219 PKT_TX_SCTP_CKSUM) { 2220 rte_errno = ENOTSUP; 2221 return i; 2222 } 2223 2224 #ifdef RTE_LIBRTE_ETHDEV_DEBUG 2225 ret = rte_validate_tx_offload(m); 2226 if (ret != 0) { 2227 rte_errno = -ret; 2228 return i; 2229 } 2230 #endif 2231 2232 /* In case we are supposed to TSO and have DF not set (DF=0) 2233 * hardware must be provided with partial checksum, otherwise 2234 * it will take care of necessary calculations. 2235 */ 2236 2237 ret = rte_net_intel_cksum_flags_prepare(m, 2238 ol_flags & ~PKT_TX_TCP_SEG); 2239 if (ret != 0) { 2240 rte_errno = -ret; 2241 return i; 2242 } 2243 } 2244 2245 return i; 2246 } 2247 2248 static void ena_update_hints(struct ena_adapter *adapter, 2249 struct ena_admin_ena_hw_hints *hints) 2250 { 2251 if (hints->admin_completion_tx_timeout) 2252 adapter->ena_dev.admin_queue.completion_timeout = 2253 hints->admin_completion_tx_timeout * 1000; 2254 2255 if (hints->mmio_read_timeout) 2256 /* convert to usec */ 2257 adapter->ena_dev.mmio_read.reg_read_to = 2258 hints->mmio_read_timeout * 1000; 2259 2260 if (hints->driver_watchdog_timeout) { 2261 if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT) 2262 adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT; 2263 else 2264 // Convert msecs to ticks 2265 adapter->keep_alive_timeout = 2266 (hints->driver_watchdog_timeout * 2267 rte_get_timer_hz()) / 1000; 2268 } 2269 } 2270 2271 static int ena_check_space_and_linearize_mbuf(struct ena_ring *tx_ring, 2272 struct rte_mbuf *mbuf) 2273 { 2274 struct ena_com_dev *ena_dev; 2275 int num_segments, header_len, rc; 2276 2277 ena_dev = &tx_ring->adapter->ena_dev; 2278 num_segments = mbuf->nb_segs; 2279 header_len = mbuf->data_len; 2280 2281 if (likely(num_segments < tx_ring->sgl_size)) 2282 goto checkspace; 2283 2284 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && 2285 (num_segments == tx_ring->sgl_size) && 2286 (header_len < tx_ring->tx_max_header_size)) 2287 goto checkspace; 2288 2289 /* Checking for space for 2 additional metadata descriptors due to 2290 * possible header split and metadata descriptor. Linearization will 2291 * be needed so we reduce the segments number from num_segments to 1 2292 */ 2293 if (!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 3)) { 2294 PMD_TX_LOG(DEBUG, "Not enough space in the Tx queue\n"); 2295 return ENA_COM_NO_MEM; 2296 } 2297 ++tx_ring->tx_stats.linearize; 2298 rc = rte_pktmbuf_linearize(mbuf); 2299 if (unlikely(rc)) { 2300 PMD_TX_LOG(WARNING, "Mbuf linearize failed\n"); 2301 rte_atomic64_inc(&tx_ring->adapter->drv_stats->ierrors); 2302 ++tx_ring->tx_stats.linearize_failed; 2303 return rc; 2304 } 2305 2306 return 0; 2307 2308 checkspace: 2309 /* Checking for space for 2 additional metadata descriptors due to 2310 * possible header split and metadata descriptor 2311 */ 2312 if (!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 2313 num_segments + 2)) { 2314 PMD_TX_LOG(DEBUG, "Not enough space in the Tx queue\n"); 2315 return ENA_COM_NO_MEM; 2316 } 2317 2318 return 0; 2319 } 2320 2321 static void ena_tx_map_mbuf(struct ena_ring *tx_ring, 2322 struct ena_tx_buffer *tx_info, 2323 struct rte_mbuf *mbuf, 2324 void **push_header, 2325 uint16_t *header_len) 2326 { 2327 struct ena_com_buf *ena_buf; 2328 uint16_t delta, seg_len, push_len; 2329 2330 delta = 0; 2331 seg_len = mbuf->data_len; 2332 2333 tx_info->mbuf = mbuf; 2334 ena_buf = tx_info->bufs; 2335 2336 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 2337 /* 2338 * Tx header might be (and will be in most cases) smaller than 2339 * tx_max_header_size. But it's not an issue to send more data 2340 * to the device, than actually needed if the mbuf size is 2341 * greater than tx_max_header_size. 2342 */ 2343 push_len = RTE_MIN(mbuf->pkt_len, tx_ring->tx_max_header_size); 2344 *header_len = push_len; 2345 2346 if (likely(push_len <= seg_len)) { 2347 /* If the push header is in the single segment, then 2348 * just point it to the 1st mbuf data. 2349 */ 2350 *push_header = rte_pktmbuf_mtod(mbuf, uint8_t *); 2351 } else { 2352 /* If the push header lays in the several segments, copy 2353 * it to the intermediate buffer. 2354 */ 2355 rte_pktmbuf_read(mbuf, 0, push_len, 2356 tx_ring->push_buf_intermediate_buf); 2357 *push_header = tx_ring->push_buf_intermediate_buf; 2358 delta = push_len - seg_len; 2359 } 2360 } else { 2361 *push_header = NULL; 2362 *header_len = 0; 2363 push_len = 0; 2364 } 2365 2366 /* Process first segment taking into consideration pushed header */ 2367 if (seg_len > push_len) { 2368 ena_buf->paddr = mbuf->buf_iova + 2369 mbuf->data_off + 2370 push_len; 2371 ena_buf->len = seg_len - push_len; 2372 ena_buf++; 2373 tx_info->num_of_bufs++; 2374 } 2375 2376 while ((mbuf = mbuf->next) != NULL) { 2377 seg_len = mbuf->data_len; 2378 2379 /* Skip mbufs if whole data is pushed as a header */ 2380 if (unlikely(delta > seg_len)) { 2381 delta -= seg_len; 2382 continue; 2383 } 2384 2385 ena_buf->paddr = mbuf->buf_iova + mbuf->data_off + delta; 2386 ena_buf->len = seg_len - delta; 2387 ena_buf++; 2388 tx_info->num_of_bufs++; 2389 2390 delta = 0; 2391 } 2392 } 2393 2394 static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf) 2395 { 2396 struct ena_tx_buffer *tx_info; 2397 struct ena_com_tx_ctx ena_tx_ctx = { { 0 } }; 2398 uint16_t next_to_use; 2399 uint16_t header_len; 2400 uint16_t req_id; 2401 void *push_header; 2402 int nb_hw_desc; 2403 int rc; 2404 2405 rc = ena_check_space_and_linearize_mbuf(tx_ring, mbuf); 2406 if (unlikely(rc)) 2407 return rc; 2408 2409 next_to_use = tx_ring->next_to_use; 2410 2411 req_id = tx_ring->empty_tx_reqs[next_to_use]; 2412 tx_info = &tx_ring->tx_buffer_info[req_id]; 2413 tx_info->num_of_bufs = 0; 2414 2415 ena_tx_map_mbuf(tx_ring, tx_info, mbuf, &push_header, &header_len); 2416 2417 ena_tx_ctx.ena_bufs = tx_info->bufs; 2418 ena_tx_ctx.push_header = push_header; 2419 ena_tx_ctx.num_bufs = tx_info->num_of_bufs; 2420 ena_tx_ctx.req_id = req_id; 2421 ena_tx_ctx.header_len = header_len; 2422 2423 /* Set Tx offloads flags, if applicable */ 2424 ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx, tx_ring->offloads, 2425 tx_ring->disable_meta_caching); 2426 2427 if (unlikely(ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq, 2428 &ena_tx_ctx))) { 2429 PMD_TX_LOG(DEBUG, 2430 "LLQ Tx max burst size of queue %d achieved, writing doorbell to send burst\n", 2431 tx_ring->id); 2432 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); 2433 tx_ring->tx_stats.doorbells++; 2434 tx_ring->pkts_without_db = false; 2435 } 2436 2437 /* prepare the packet's descriptors to dma engine */ 2438 rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx, 2439 &nb_hw_desc); 2440 if (unlikely(rc)) { 2441 PMD_DRV_LOG(ERR, "Failed to prepare Tx buffers, rc: %d\n", rc); 2442 ++tx_ring->tx_stats.prepare_ctx_err; 2443 tx_ring->adapter->reset_reason = 2444 ENA_REGS_RESET_DRIVER_INVALID_STATE; 2445 tx_ring->adapter->trigger_reset = true; 2446 return rc; 2447 } 2448 2449 tx_info->tx_descs = nb_hw_desc; 2450 2451 tx_ring->tx_stats.cnt++; 2452 tx_ring->tx_stats.bytes += mbuf->pkt_len; 2453 2454 tx_ring->next_to_use = ENA_IDX_NEXT_MASKED(next_to_use, 2455 tx_ring->size_mask); 2456 2457 return 0; 2458 } 2459 2460 static void ena_tx_cleanup(struct ena_ring *tx_ring) 2461 { 2462 unsigned int cleanup_budget; 2463 unsigned int total_tx_descs = 0; 2464 uint16_t next_to_clean = tx_ring->next_to_clean; 2465 2466 cleanup_budget = RTE_MIN(tx_ring->ring_size / ENA_REFILL_THRESH_DIVIDER, 2467 (unsigned int)ENA_REFILL_THRESH_PACKET); 2468 2469 while (likely(total_tx_descs < cleanup_budget)) { 2470 struct rte_mbuf *mbuf; 2471 struct ena_tx_buffer *tx_info; 2472 uint16_t req_id; 2473 2474 if (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) != 0) 2475 break; 2476 2477 if (unlikely(validate_tx_req_id(tx_ring, req_id) != 0)) 2478 break; 2479 2480 /* Get Tx info & store how many descs were processed */ 2481 tx_info = &tx_ring->tx_buffer_info[req_id]; 2482 2483 mbuf = tx_info->mbuf; 2484 rte_pktmbuf_free(mbuf); 2485 2486 tx_info->mbuf = NULL; 2487 tx_ring->empty_tx_reqs[next_to_clean] = req_id; 2488 2489 total_tx_descs += tx_info->tx_descs; 2490 2491 /* Put back descriptor to the ring for reuse */ 2492 next_to_clean = ENA_IDX_NEXT_MASKED(next_to_clean, 2493 tx_ring->size_mask); 2494 } 2495 2496 if (likely(total_tx_descs > 0)) { 2497 /* acknowledge completion of sent packets */ 2498 tx_ring->next_to_clean = next_to_clean; 2499 ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs); 2500 ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq); 2501 } 2502 } 2503 2504 static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 2505 uint16_t nb_pkts) 2506 { 2507 struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); 2508 uint16_t sent_idx = 0; 2509 2510 #ifdef RTE_ETHDEV_DEBUG_TX 2511 /* Check adapter state */ 2512 if (unlikely(tx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { 2513 PMD_TX_LOG(ALERT, 2514 "Trying to xmit pkts while device is NOT running\n"); 2515 return 0; 2516 } 2517 #endif 2518 2519 for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) { 2520 if (ena_xmit_mbuf(tx_ring, tx_pkts[sent_idx])) 2521 break; 2522 tx_ring->pkts_without_db = true; 2523 rte_prefetch0(tx_pkts[ENA_IDX_ADD_MASKED(sent_idx, 4, 2524 tx_ring->size_mask)]); 2525 } 2526 2527 tx_ring->tx_stats.available_desc = 2528 ena_com_free_q_entries(tx_ring->ena_com_io_sq); 2529 2530 /* If there are ready packets to be xmitted... */ 2531 if (likely(tx_ring->pkts_without_db)) { 2532 /* ...let HW do its best :-) */ 2533 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); 2534 tx_ring->tx_stats.doorbells++; 2535 tx_ring->pkts_without_db = false; 2536 } 2537 2538 ena_tx_cleanup(tx_ring); 2539 2540 tx_ring->tx_stats.available_desc = 2541 ena_com_free_q_entries(tx_ring->ena_com_io_sq); 2542 tx_ring->tx_stats.tx_poll++; 2543 2544 return sent_idx; 2545 } 2546 2547 int ena_copy_eni_stats(struct ena_adapter *adapter) 2548 { 2549 struct ena_admin_eni_stats admin_eni_stats; 2550 int rc; 2551 2552 rte_spinlock_lock(&adapter->admin_lock); 2553 rc = ena_com_get_eni_stats(&adapter->ena_dev, &admin_eni_stats); 2554 rte_spinlock_unlock(&adapter->admin_lock); 2555 if (rc != 0) { 2556 if (rc == ENA_COM_UNSUPPORTED) { 2557 PMD_DRV_LOG(DEBUG, 2558 "Retrieving ENI metrics is not supported\n"); 2559 } else { 2560 PMD_DRV_LOG(WARNING, 2561 "Failed to get ENI metrics, rc: %d\n", rc); 2562 } 2563 return rc; 2564 } 2565 2566 rte_memcpy(&adapter->eni_stats, &admin_eni_stats, 2567 sizeof(struct ena_stats_eni)); 2568 2569 return 0; 2570 } 2571 2572 /** 2573 * DPDK callback to retrieve names of extended device statistics 2574 * 2575 * @param dev 2576 * Pointer to Ethernet device structure. 2577 * @param[out] xstats_names 2578 * Buffer to insert names into. 2579 * @param n 2580 * Number of names. 2581 * 2582 * @return 2583 * Number of xstats names. 2584 */ 2585 static int ena_xstats_get_names(struct rte_eth_dev *dev, 2586 struct rte_eth_xstat_name *xstats_names, 2587 unsigned int n) 2588 { 2589 unsigned int xstats_count = ena_xstats_calc_num(dev->data); 2590 unsigned int stat, i, count = 0; 2591 2592 if (n < xstats_count || !xstats_names) 2593 return xstats_count; 2594 2595 for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++) 2596 strcpy(xstats_names[count].name, 2597 ena_stats_global_strings[stat].name); 2598 2599 for (stat = 0; stat < ENA_STATS_ARRAY_ENI; stat++, count++) 2600 strcpy(xstats_names[count].name, 2601 ena_stats_eni_strings[stat].name); 2602 2603 for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++) 2604 for (i = 0; i < dev->data->nb_rx_queues; i++, count++) 2605 snprintf(xstats_names[count].name, 2606 sizeof(xstats_names[count].name), 2607 "rx_q%d_%s", i, 2608 ena_stats_rx_strings[stat].name); 2609 2610 for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++) 2611 for (i = 0; i < dev->data->nb_tx_queues; i++, count++) 2612 snprintf(xstats_names[count].name, 2613 sizeof(xstats_names[count].name), 2614 "tx_q%d_%s", i, 2615 ena_stats_tx_strings[stat].name); 2616 2617 return xstats_count; 2618 } 2619 2620 /** 2621 * DPDK callback to get extended device statistics. 2622 * 2623 * @param dev 2624 * Pointer to Ethernet device structure. 2625 * @param[out] stats 2626 * Stats table output buffer. 2627 * @param n 2628 * The size of the stats table. 2629 * 2630 * @return 2631 * Number of xstats on success, negative on failure. 2632 */ 2633 static int ena_xstats_get(struct rte_eth_dev *dev, 2634 struct rte_eth_xstat *xstats, 2635 unsigned int n) 2636 { 2637 struct ena_adapter *adapter = dev->data->dev_private; 2638 unsigned int xstats_count = ena_xstats_calc_num(dev->data); 2639 unsigned int stat, i, count = 0; 2640 int stat_offset; 2641 void *stats_begin; 2642 2643 if (n < xstats_count) 2644 return xstats_count; 2645 2646 if (!xstats) 2647 return 0; 2648 2649 for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++) { 2650 stat_offset = ena_stats_global_strings[stat].stat_offset; 2651 stats_begin = &adapter->dev_stats; 2652 2653 xstats[count].id = count; 2654 xstats[count].value = *((uint64_t *) 2655 ((char *)stats_begin + stat_offset)); 2656 } 2657 2658 /* Even if the function below fails, we should copy previous (or initial 2659 * values) to keep structure of rte_eth_xstat consistent. 2660 */ 2661 ena_copy_eni_stats(adapter); 2662 for (stat = 0; stat < ENA_STATS_ARRAY_ENI; stat++, count++) { 2663 stat_offset = ena_stats_eni_strings[stat].stat_offset; 2664 stats_begin = &adapter->eni_stats; 2665 2666 xstats[count].id = count; 2667 xstats[count].value = *((uint64_t *) 2668 ((char *)stats_begin + stat_offset)); 2669 } 2670 2671 for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++) { 2672 for (i = 0; i < dev->data->nb_rx_queues; i++, count++) { 2673 stat_offset = ena_stats_rx_strings[stat].stat_offset; 2674 stats_begin = &adapter->rx_ring[i].rx_stats; 2675 2676 xstats[count].id = count; 2677 xstats[count].value = *((uint64_t *) 2678 ((char *)stats_begin + stat_offset)); 2679 } 2680 } 2681 2682 for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++) { 2683 for (i = 0; i < dev->data->nb_tx_queues; i++, count++) { 2684 stat_offset = ena_stats_tx_strings[stat].stat_offset; 2685 stats_begin = &adapter->tx_ring[i].rx_stats; 2686 2687 xstats[count].id = count; 2688 xstats[count].value = *((uint64_t *) 2689 ((char *)stats_begin + stat_offset)); 2690 } 2691 } 2692 2693 return count; 2694 } 2695 2696 static int ena_xstats_get_by_id(struct rte_eth_dev *dev, 2697 const uint64_t *ids, 2698 uint64_t *values, 2699 unsigned int n) 2700 { 2701 struct ena_adapter *adapter = dev->data->dev_private; 2702 uint64_t id; 2703 uint64_t rx_entries, tx_entries; 2704 unsigned int i; 2705 int qid; 2706 int valid = 0; 2707 bool was_eni_copied = false; 2708 2709 for (i = 0; i < n; ++i) { 2710 id = ids[i]; 2711 /* Check if id belongs to global statistics */ 2712 if (id < ENA_STATS_ARRAY_GLOBAL) { 2713 values[i] = *((uint64_t *)&adapter->dev_stats + id); 2714 ++valid; 2715 continue; 2716 } 2717 2718 /* Check if id belongs to ENI statistics */ 2719 id -= ENA_STATS_ARRAY_GLOBAL; 2720 if (id < ENA_STATS_ARRAY_ENI) { 2721 /* Avoid reading ENI stats multiple times in a single 2722 * function call, as it requires communication with the 2723 * admin queue. 2724 */ 2725 if (!was_eni_copied) { 2726 was_eni_copied = true; 2727 ena_copy_eni_stats(adapter); 2728 } 2729 values[i] = *((uint64_t *)&adapter->eni_stats + id); 2730 ++valid; 2731 continue; 2732 } 2733 2734 /* Check if id belongs to rx queue statistics */ 2735 id -= ENA_STATS_ARRAY_ENI; 2736 rx_entries = ENA_STATS_ARRAY_RX * dev->data->nb_rx_queues; 2737 if (id < rx_entries) { 2738 qid = id % dev->data->nb_rx_queues; 2739 id /= dev->data->nb_rx_queues; 2740 values[i] = *((uint64_t *) 2741 &adapter->rx_ring[qid].rx_stats + id); 2742 ++valid; 2743 continue; 2744 } 2745 /* Check if id belongs to rx queue statistics */ 2746 id -= rx_entries; 2747 tx_entries = ENA_STATS_ARRAY_TX * dev->data->nb_tx_queues; 2748 if (id < tx_entries) { 2749 qid = id % dev->data->nb_tx_queues; 2750 id /= dev->data->nb_tx_queues; 2751 values[i] = *((uint64_t *) 2752 &adapter->tx_ring[qid].tx_stats + id); 2753 ++valid; 2754 continue; 2755 } 2756 } 2757 2758 return valid; 2759 } 2760 2761 static int ena_process_bool_devarg(const char *key, 2762 const char *value, 2763 void *opaque) 2764 { 2765 struct ena_adapter *adapter = opaque; 2766 bool bool_value; 2767 2768 /* Parse the value. */ 2769 if (strcmp(value, "1") == 0) { 2770 bool_value = true; 2771 } else if (strcmp(value, "0") == 0) { 2772 bool_value = false; 2773 } else { 2774 PMD_INIT_LOG(ERR, 2775 "Invalid value: '%s' for key '%s'. Accepted: '0' or '1'\n", 2776 value, key); 2777 return -EINVAL; 2778 } 2779 2780 /* Now, assign it to the proper adapter field. */ 2781 if (strcmp(key, ENA_DEVARG_LARGE_LLQ_HDR) == 0) 2782 adapter->use_large_llq_hdr = bool_value; 2783 2784 return 0; 2785 } 2786 2787 static int ena_parse_devargs(struct ena_adapter *adapter, 2788 struct rte_devargs *devargs) 2789 { 2790 static const char * const allowed_args[] = { 2791 ENA_DEVARG_LARGE_LLQ_HDR, 2792 NULL, 2793 }; 2794 struct rte_kvargs *kvlist; 2795 int rc; 2796 2797 if (devargs == NULL) 2798 return 0; 2799 2800 kvlist = rte_kvargs_parse(devargs->args, allowed_args); 2801 if (kvlist == NULL) { 2802 PMD_INIT_LOG(ERR, "Invalid device arguments: %s\n", 2803 devargs->args); 2804 return -EINVAL; 2805 } 2806 2807 rc = rte_kvargs_process(kvlist, ENA_DEVARG_LARGE_LLQ_HDR, 2808 ena_process_bool_devarg, adapter); 2809 2810 rte_kvargs_free(kvlist); 2811 2812 return rc; 2813 } 2814 2815 static int ena_setup_rx_intr(struct rte_eth_dev *dev) 2816 { 2817 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2818 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 2819 int rc; 2820 uint16_t vectors_nb, i; 2821 bool rx_intr_requested = dev->data->dev_conf.intr_conf.rxq; 2822 2823 if (!rx_intr_requested) 2824 return 0; 2825 2826 if (!rte_intr_cap_multiple(intr_handle)) { 2827 PMD_DRV_LOG(ERR, 2828 "Rx interrupt requested, but it isn't supported by the PCI driver\n"); 2829 return -ENOTSUP; 2830 } 2831 2832 /* Disable interrupt mapping before the configuration starts. */ 2833 rte_intr_disable(intr_handle); 2834 2835 /* Verify if there are enough vectors available. */ 2836 vectors_nb = dev->data->nb_rx_queues; 2837 if (vectors_nb > RTE_MAX_RXTX_INTR_VEC_ID) { 2838 PMD_DRV_LOG(ERR, 2839 "Too many Rx interrupts requested, maximum number: %d\n", 2840 RTE_MAX_RXTX_INTR_VEC_ID); 2841 rc = -ENOTSUP; 2842 goto enable_intr; 2843 } 2844 2845 intr_handle->intr_vec = rte_zmalloc("intr_vec", 2846 dev->data->nb_rx_queues * sizeof(*intr_handle->intr_vec), 0); 2847 if (intr_handle->intr_vec == NULL) { 2848 PMD_DRV_LOG(ERR, 2849 "Failed to allocate interrupt vector for %d queues\n", 2850 dev->data->nb_rx_queues); 2851 rc = -ENOMEM; 2852 goto enable_intr; 2853 } 2854 2855 rc = rte_intr_efd_enable(intr_handle, vectors_nb); 2856 if (rc != 0) 2857 goto free_intr_vec; 2858 2859 if (!rte_intr_allow_others(intr_handle)) { 2860 PMD_DRV_LOG(ERR, 2861 "Not enough interrupts available to use both ENA Admin and Rx interrupts\n"); 2862 goto disable_intr_efd; 2863 } 2864 2865 for (i = 0; i < vectors_nb; ++i) 2866 intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + i; 2867 2868 rte_intr_enable(intr_handle); 2869 return 0; 2870 2871 disable_intr_efd: 2872 rte_intr_efd_disable(intr_handle); 2873 free_intr_vec: 2874 rte_free(intr_handle->intr_vec); 2875 intr_handle->intr_vec = NULL; 2876 enable_intr: 2877 rte_intr_enable(intr_handle); 2878 return rc; 2879 } 2880 2881 static void ena_rx_queue_intr_set(struct rte_eth_dev *dev, 2882 uint16_t queue_id, 2883 bool unmask) 2884 { 2885 struct ena_adapter *adapter = dev->data->dev_private; 2886 struct ena_ring *rxq = &adapter->rx_ring[queue_id]; 2887 struct ena_eth_io_intr_reg intr_reg; 2888 2889 ena_com_update_intr_reg(&intr_reg, 0, 0, unmask); 2890 ena_com_unmask_intr(rxq->ena_com_io_cq, &intr_reg); 2891 } 2892 2893 static int ena_rx_queue_intr_enable(struct rte_eth_dev *dev, 2894 uint16_t queue_id) 2895 { 2896 ena_rx_queue_intr_set(dev, queue_id, true); 2897 2898 return 0; 2899 } 2900 2901 static int ena_rx_queue_intr_disable(struct rte_eth_dev *dev, 2902 uint16_t queue_id) 2903 { 2904 ena_rx_queue_intr_set(dev, queue_id, false); 2905 2906 return 0; 2907 } 2908 2909 /********************************************************************* 2910 * PMD configuration 2911 *********************************************************************/ 2912 static int eth_ena_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2913 struct rte_pci_device *pci_dev) 2914 { 2915 return rte_eth_dev_pci_generic_probe(pci_dev, 2916 sizeof(struct ena_adapter), eth_ena_dev_init); 2917 } 2918 2919 static int eth_ena_pci_remove(struct rte_pci_device *pci_dev) 2920 { 2921 return rte_eth_dev_pci_generic_remove(pci_dev, eth_ena_dev_uninit); 2922 } 2923 2924 static struct rte_pci_driver rte_ena_pmd = { 2925 .id_table = pci_id_ena_map, 2926 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | 2927 RTE_PCI_DRV_WC_ACTIVATE, 2928 .probe = eth_ena_pci_probe, 2929 .remove = eth_ena_pci_remove, 2930 }; 2931 2932 RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd); 2933 RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map); 2934 RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci"); 2935 RTE_PMD_REGISTER_PARAM_STRING(net_ena, ENA_DEVARG_LARGE_LLQ_HDR "=<0|1>"); 2936 RTE_LOG_REGISTER_SUFFIX(ena_logtype_init, init, NOTICE); 2937 RTE_LOG_REGISTER_SUFFIX(ena_logtype_driver, driver, NOTICE); 2938 #ifdef RTE_ETHDEV_DEBUG_RX 2939 RTE_LOG_REGISTER_SUFFIX(ena_logtype_rx, rx, DEBUG); 2940 #endif 2941 #ifdef RTE_ETHDEV_DEBUG_TX 2942 RTE_LOG_REGISTER_SUFFIX(ena_logtype_tx, tx, DEBUG); 2943 #endif 2944 RTE_LOG_REGISTER_SUFFIX(ena_logtype_com, com, WARNING); 2945 2946 /****************************************************************************** 2947 ******************************** AENQ Handlers ******************************* 2948 *****************************************************************************/ 2949 static void ena_update_on_link_change(void *adapter_data, 2950 struct ena_admin_aenq_entry *aenq_e) 2951 { 2952 struct rte_eth_dev *eth_dev = adapter_data; 2953 struct ena_adapter *adapter = eth_dev->data->dev_private; 2954 struct ena_admin_aenq_link_change_desc *aenq_link_desc; 2955 uint32_t status; 2956 2957 aenq_link_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e; 2958 2959 status = get_ena_admin_aenq_link_change_desc_link_status(aenq_link_desc); 2960 adapter->link_status = status; 2961 2962 ena_link_update(eth_dev, 0); 2963 rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL); 2964 } 2965 2966 static void ena_notification(void *adapter_data, 2967 struct ena_admin_aenq_entry *aenq_e) 2968 { 2969 struct rte_eth_dev *eth_dev = adapter_data; 2970 struct ena_adapter *adapter = eth_dev->data->dev_private; 2971 struct ena_admin_ena_hw_hints *hints; 2972 2973 if (aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION) 2974 PMD_DRV_LOG(WARNING, "Invalid AENQ group: %x. Expected: %x\n", 2975 aenq_e->aenq_common_desc.group, 2976 ENA_ADMIN_NOTIFICATION); 2977 2978 switch (aenq_e->aenq_common_desc.syndrome) { 2979 case ENA_ADMIN_UPDATE_HINTS: 2980 hints = (struct ena_admin_ena_hw_hints *) 2981 (&aenq_e->inline_data_w4); 2982 ena_update_hints(adapter, hints); 2983 break; 2984 default: 2985 PMD_DRV_LOG(ERR, "Invalid AENQ notification link state: %d\n", 2986 aenq_e->aenq_common_desc.syndrome); 2987 } 2988 } 2989 2990 static void ena_keep_alive(void *adapter_data, 2991 __rte_unused struct ena_admin_aenq_entry *aenq_e) 2992 { 2993 struct rte_eth_dev *eth_dev = adapter_data; 2994 struct ena_adapter *adapter = eth_dev->data->dev_private; 2995 struct ena_admin_aenq_keep_alive_desc *desc; 2996 uint64_t rx_drops; 2997 uint64_t tx_drops; 2998 2999 adapter->timestamp_wd = rte_get_timer_cycles(); 3000 3001 desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e; 3002 rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low; 3003 tx_drops = ((uint64_t)desc->tx_drops_high << 32) | desc->tx_drops_low; 3004 3005 adapter->drv_stats->rx_drops = rx_drops; 3006 adapter->dev_stats.tx_drops = tx_drops; 3007 } 3008 3009 /** 3010 * This handler will called for unknown event group or unimplemented handlers 3011 **/ 3012 static void unimplemented_aenq_handler(__rte_unused void *data, 3013 __rte_unused struct ena_admin_aenq_entry *aenq_e) 3014 { 3015 PMD_DRV_LOG(ERR, 3016 "Unknown event was received or event with unimplemented handler\n"); 3017 } 3018 3019 static struct ena_aenq_handlers aenq_handlers = { 3020 .handlers = { 3021 [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change, 3022 [ENA_ADMIN_NOTIFICATION] = ena_notification, 3023 [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive 3024 }, 3025 .unimplemented_handler = unimplemented_aenq_handler 3026 }; 3027