1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates. 3 * All rights reserved. 4 */ 5 6 #include <rte_string_fns.h> 7 #include <rte_ether.h> 8 #include <ethdev_driver.h> 9 #include <ethdev_pci.h> 10 #include <rte_tcp.h> 11 #include <rte_atomic.h> 12 #include <rte_dev.h> 13 #include <rte_errno.h> 14 #include <rte_version.h> 15 #include <rte_net.h> 16 #include <rte_kvargs.h> 17 18 #include "ena_ethdev.h" 19 #include "ena_logs.h" 20 #include "ena_platform.h" 21 #include "ena_com.h" 22 #include "ena_eth_com.h" 23 24 #include <ena_common_defs.h> 25 #include <ena_regs_defs.h> 26 #include <ena_admin_defs.h> 27 #include <ena_eth_io_defs.h> 28 29 #define DRV_MODULE_VER_MAJOR 2 30 #define DRV_MODULE_VER_MINOR 2 31 #define DRV_MODULE_VER_SUBMINOR 1 32 33 #define ENA_IO_TXQ_IDX(q) (2 * (q)) 34 #define ENA_IO_RXQ_IDX(q) (2 * (q) + 1) 35 /*reverse version of ENA_IO_RXQ_IDX*/ 36 #define ENA_IO_RXQ_IDX_REV(q) ((q - 1) / 2) 37 38 #define __MERGE_64B_H_L(h, l) (((uint64_t)h << 32) | l) 39 #define TEST_BIT(val, bit_shift) (val & (1UL << bit_shift)) 40 41 #define GET_L4_HDR_LEN(mbuf) \ 42 ((rte_pktmbuf_mtod_offset(mbuf, struct rte_tcp_hdr *, \ 43 mbuf->l3_len + mbuf->l2_len)->data_off) >> 4) 44 45 #define ENA_RX_RSS_TABLE_LOG_SIZE 7 46 #define ENA_RX_RSS_TABLE_SIZE (1 << ENA_RX_RSS_TABLE_LOG_SIZE) 47 #define ENA_HASH_KEY_SIZE 40 48 #define ETH_GSTRING_LEN 32 49 50 #define ARRAY_SIZE(x) RTE_DIM(x) 51 52 #define ENA_MIN_RING_DESC 128 53 54 enum ethtool_stringset { 55 ETH_SS_TEST = 0, 56 ETH_SS_STATS, 57 }; 58 59 struct ena_stats { 60 char name[ETH_GSTRING_LEN]; 61 int stat_offset; 62 }; 63 64 #define ENA_STAT_ENTRY(stat, stat_type) { \ 65 .name = #stat, \ 66 .stat_offset = offsetof(struct ena_stats_##stat_type, stat) \ 67 } 68 69 #define ENA_STAT_RX_ENTRY(stat) \ 70 ENA_STAT_ENTRY(stat, rx) 71 72 #define ENA_STAT_TX_ENTRY(stat) \ 73 ENA_STAT_ENTRY(stat, tx) 74 75 #define ENA_STAT_ENI_ENTRY(stat) \ 76 ENA_STAT_ENTRY(stat, eni) 77 78 #define ENA_STAT_GLOBAL_ENTRY(stat) \ 79 ENA_STAT_ENTRY(stat, dev) 80 81 /* Device arguments */ 82 #define ENA_DEVARG_LARGE_LLQ_HDR "large_llq_hdr" 83 84 /* 85 * Each rte_memzone should have unique name. 86 * To satisfy it, count number of allocation and add it to name. 87 */ 88 rte_atomic32_t ena_alloc_cnt; 89 90 static const struct ena_stats ena_stats_global_strings[] = { 91 ENA_STAT_GLOBAL_ENTRY(wd_expired), 92 ENA_STAT_GLOBAL_ENTRY(dev_start), 93 ENA_STAT_GLOBAL_ENTRY(dev_stop), 94 ENA_STAT_GLOBAL_ENTRY(tx_drops), 95 }; 96 97 static const struct ena_stats ena_stats_eni_strings[] = { 98 ENA_STAT_ENI_ENTRY(bw_in_allowance_exceeded), 99 ENA_STAT_ENI_ENTRY(bw_out_allowance_exceeded), 100 ENA_STAT_ENI_ENTRY(pps_allowance_exceeded), 101 ENA_STAT_ENI_ENTRY(conntrack_allowance_exceeded), 102 ENA_STAT_ENI_ENTRY(linklocal_allowance_exceeded), 103 }; 104 105 static const struct ena_stats ena_stats_tx_strings[] = { 106 ENA_STAT_TX_ENTRY(cnt), 107 ENA_STAT_TX_ENTRY(bytes), 108 ENA_STAT_TX_ENTRY(prepare_ctx_err), 109 ENA_STAT_TX_ENTRY(linearize), 110 ENA_STAT_TX_ENTRY(linearize_failed), 111 ENA_STAT_TX_ENTRY(tx_poll), 112 ENA_STAT_TX_ENTRY(doorbells), 113 ENA_STAT_TX_ENTRY(bad_req_id), 114 ENA_STAT_TX_ENTRY(available_desc), 115 }; 116 117 static const struct ena_stats ena_stats_rx_strings[] = { 118 ENA_STAT_RX_ENTRY(cnt), 119 ENA_STAT_RX_ENTRY(bytes), 120 ENA_STAT_RX_ENTRY(refill_partial), 121 ENA_STAT_RX_ENTRY(bad_csum), 122 ENA_STAT_RX_ENTRY(mbuf_alloc_fail), 123 ENA_STAT_RX_ENTRY(bad_desc_num), 124 ENA_STAT_RX_ENTRY(bad_req_id), 125 }; 126 127 #define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings) 128 #define ENA_STATS_ARRAY_ENI ARRAY_SIZE(ena_stats_eni_strings) 129 #define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings) 130 #define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings) 131 132 #define QUEUE_OFFLOADS (DEV_TX_OFFLOAD_TCP_CKSUM |\ 133 DEV_TX_OFFLOAD_UDP_CKSUM |\ 134 DEV_TX_OFFLOAD_IPV4_CKSUM |\ 135 DEV_TX_OFFLOAD_TCP_TSO) 136 #define MBUF_OFFLOADS (PKT_TX_L4_MASK |\ 137 PKT_TX_IP_CKSUM |\ 138 PKT_TX_TCP_SEG) 139 140 /** Vendor ID used by Amazon devices */ 141 #define PCI_VENDOR_ID_AMAZON 0x1D0F 142 /** Amazon devices */ 143 #define PCI_DEVICE_ID_ENA_VF 0xEC20 144 #define PCI_DEVICE_ID_ENA_VF_RSERV0 0xEC21 145 146 #define ENA_TX_OFFLOAD_MASK (\ 147 PKT_TX_L4_MASK | \ 148 PKT_TX_IPV6 | \ 149 PKT_TX_IPV4 | \ 150 PKT_TX_IP_CKSUM | \ 151 PKT_TX_TCP_SEG) 152 153 #define ENA_TX_OFFLOAD_NOTSUP_MASK \ 154 (PKT_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK) 155 156 static const struct rte_pci_id pci_id_ena_map[] = { 157 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) }, 158 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF_RSERV0) }, 159 { .device_id = 0 }, 160 }; 161 162 static struct ena_aenq_handlers aenq_handlers; 163 164 static int ena_device_init(struct ena_com_dev *ena_dev, 165 struct ena_com_dev_get_features_ctx *get_feat_ctx, 166 bool *wd_state); 167 static int ena_dev_configure(struct rte_eth_dev *dev); 168 static void ena_tx_map_mbuf(struct ena_ring *tx_ring, 169 struct ena_tx_buffer *tx_info, 170 struct rte_mbuf *mbuf, 171 void **push_header, 172 uint16_t *header_len); 173 static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf); 174 static void ena_tx_cleanup(struct ena_ring *tx_ring); 175 static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 176 uint16_t nb_pkts); 177 static uint16_t eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 178 uint16_t nb_pkts); 179 static int ena_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 180 uint16_t nb_desc, unsigned int socket_id, 181 const struct rte_eth_txconf *tx_conf); 182 static int ena_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 183 uint16_t nb_desc, unsigned int socket_id, 184 const struct rte_eth_rxconf *rx_conf, 185 struct rte_mempool *mp); 186 static inline void ena_init_rx_mbuf(struct rte_mbuf *mbuf, uint16_t len); 187 static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring, 188 struct ena_com_rx_buf_info *ena_bufs, 189 uint32_t descs, 190 uint16_t *next_to_clean, 191 uint8_t offset); 192 static uint16_t eth_ena_recv_pkts(void *rx_queue, 193 struct rte_mbuf **rx_pkts, uint16_t nb_pkts); 194 static int ena_add_single_rx_desc(struct ena_com_io_sq *io_sq, 195 struct rte_mbuf *mbuf, uint16_t id); 196 static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count); 197 static void ena_init_rings(struct ena_adapter *adapter, 198 bool disable_meta_caching); 199 static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 200 static int ena_start(struct rte_eth_dev *dev); 201 static int ena_stop(struct rte_eth_dev *dev); 202 static int ena_close(struct rte_eth_dev *dev); 203 static int ena_dev_reset(struct rte_eth_dev *dev); 204 static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); 205 static void ena_rx_queue_release_all(struct rte_eth_dev *dev); 206 static void ena_tx_queue_release_all(struct rte_eth_dev *dev); 207 static void ena_rx_queue_release(void *queue); 208 static void ena_tx_queue_release(void *queue); 209 static void ena_rx_queue_release_bufs(struct ena_ring *ring); 210 static void ena_tx_queue_release_bufs(struct ena_ring *ring); 211 static int ena_link_update(struct rte_eth_dev *dev, 212 int wait_to_complete); 213 static int ena_create_io_queue(struct ena_ring *ring); 214 static void ena_queue_stop(struct ena_ring *ring); 215 static void ena_queue_stop_all(struct rte_eth_dev *dev, 216 enum ena_ring_type ring_type); 217 static int ena_queue_start(struct ena_ring *ring); 218 static int ena_queue_start_all(struct rte_eth_dev *dev, 219 enum ena_ring_type ring_type); 220 static void ena_stats_restart(struct rte_eth_dev *dev); 221 static int ena_infos_get(struct rte_eth_dev *dev, 222 struct rte_eth_dev_info *dev_info); 223 static int ena_rss_reta_update(struct rte_eth_dev *dev, 224 struct rte_eth_rss_reta_entry64 *reta_conf, 225 uint16_t reta_size); 226 static int ena_rss_reta_query(struct rte_eth_dev *dev, 227 struct rte_eth_rss_reta_entry64 *reta_conf, 228 uint16_t reta_size); 229 static void ena_interrupt_handler_rte(void *cb_arg); 230 static void ena_timer_wd_callback(struct rte_timer *timer, void *arg); 231 static void ena_destroy_device(struct rte_eth_dev *eth_dev); 232 static int eth_ena_dev_init(struct rte_eth_dev *eth_dev); 233 static int ena_xstats_get_names(struct rte_eth_dev *dev, 234 struct rte_eth_xstat_name *xstats_names, 235 unsigned int n); 236 static int ena_xstats_get(struct rte_eth_dev *dev, 237 struct rte_eth_xstat *stats, 238 unsigned int n); 239 static int ena_xstats_get_by_id(struct rte_eth_dev *dev, 240 const uint64_t *ids, 241 uint64_t *values, 242 unsigned int n); 243 static int ena_process_bool_devarg(const char *key, 244 const char *value, 245 void *opaque); 246 static int ena_parse_devargs(struct ena_adapter *adapter, 247 struct rte_devargs *devargs); 248 static int ena_copy_eni_stats(struct ena_adapter *adapter); 249 250 static const struct eth_dev_ops ena_dev_ops = { 251 .dev_configure = ena_dev_configure, 252 .dev_infos_get = ena_infos_get, 253 .rx_queue_setup = ena_rx_queue_setup, 254 .tx_queue_setup = ena_tx_queue_setup, 255 .dev_start = ena_start, 256 .dev_stop = ena_stop, 257 .link_update = ena_link_update, 258 .stats_get = ena_stats_get, 259 .xstats_get_names = ena_xstats_get_names, 260 .xstats_get = ena_xstats_get, 261 .xstats_get_by_id = ena_xstats_get_by_id, 262 .mtu_set = ena_mtu_set, 263 .rx_queue_release = ena_rx_queue_release, 264 .tx_queue_release = ena_tx_queue_release, 265 .dev_close = ena_close, 266 .dev_reset = ena_dev_reset, 267 .reta_update = ena_rss_reta_update, 268 .reta_query = ena_rss_reta_query, 269 }; 270 271 void ena_rss_key_fill(void *key, size_t size) 272 { 273 static bool key_generated; 274 static uint8_t default_key[ENA_HASH_KEY_SIZE]; 275 size_t i; 276 277 RTE_ASSERT(size <= ENA_HASH_KEY_SIZE); 278 279 if (!key_generated) { 280 for (i = 0; i < ENA_HASH_KEY_SIZE; ++i) 281 default_key[i] = rte_rand() & 0xff; 282 key_generated = true; 283 } 284 285 rte_memcpy(key, default_key, size); 286 } 287 288 static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf, 289 struct ena_com_rx_ctx *ena_rx_ctx) 290 { 291 uint64_t ol_flags = 0; 292 uint32_t packet_type = 0; 293 294 if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) 295 packet_type |= RTE_PTYPE_L4_TCP; 296 else if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP) 297 packet_type |= RTE_PTYPE_L4_UDP; 298 299 if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) { 300 packet_type |= RTE_PTYPE_L3_IPV4; 301 if (unlikely(ena_rx_ctx->l3_csum_err)) 302 ol_flags |= PKT_RX_IP_CKSUM_BAD; 303 else 304 ol_flags |= PKT_RX_IP_CKSUM_GOOD; 305 } else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6) { 306 packet_type |= RTE_PTYPE_L3_IPV6; 307 } 308 309 if (!ena_rx_ctx->l4_csum_checked || ena_rx_ctx->frag) 310 ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN; 311 else 312 if (unlikely(ena_rx_ctx->l4_csum_err)) 313 ol_flags |= PKT_RX_L4_CKSUM_BAD; 314 else 315 ol_flags |= PKT_RX_L4_CKSUM_GOOD; 316 317 mbuf->ol_flags = ol_flags; 318 mbuf->packet_type = packet_type; 319 } 320 321 static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, 322 struct ena_com_tx_ctx *ena_tx_ctx, 323 uint64_t queue_offloads, 324 bool disable_meta_caching) 325 { 326 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; 327 328 if ((mbuf->ol_flags & MBUF_OFFLOADS) && 329 (queue_offloads & QUEUE_OFFLOADS)) { 330 /* check if TSO is required */ 331 if ((mbuf->ol_flags & PKT_TX_TCP_SEG) && 332 (queue_offloads & DEV_TX_OFFLOAD_TCP_TSO)) { 333 ena_tx_ctx->tso_enable = true; 334 335 ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf); 336 } 337 338 /* check if L3 checksum is needed */ 339 if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) && 340 (queue_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)) 341 ena_tx_ctx->l3_csum_enable = true; 342 343 if (mbuf->ol_flags & PKT_TX_IPV6) { 344 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6; 345 } else { 346 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4; 347 348 /* set don't fragment (DF) flag */ 349 if (mbuf->packet_type & 350 (RTE_PTYPE_L4_NONFRAG 351 | RTE_PTYPE_INNER_L4_NONFRAG)) 352 ena_tx_ctx->df = true; 353 } 354 355 /* check if L4 checksum is needed */ 356 if (((mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) && 357 (queue_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) { 358 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP; 359 ena_tx_ctx->l4_csum_enable = true; 360 } else if (((mbuf->ol_flags & PKT_TX_L4_MASK) == 361 PKT_TX_UDP_CKSUM) && 362 (queue_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) { 363 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP; 364 ena_tx_ctx->l4_csum_enable = true; 365 } else { 366 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN; 367 ena_tx_ctx->l4_csum_enable = false; 368 } 369 370 ena_meta->mss = mbuf->tso_segsz; 371 ena_meta->l3_hdr_len = mbuf->l3_len; 372 ena_meta->l3_hdr_offset = mbuf->l2_len; 373 374 ena_tx_ctx->meta_valid = true; 375 } else if (disable_meta_caching) { 376 memset(ena_meta, 0, sizeof(*ena_meta)); 377 ena_tx_ctx->meta_valid = true; 378 } else { 379 ena_tx_ctx->meta_valid = false; 380 } 381 } 382 383 static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) 384 { 385 struct ena_tx_buffer *tx_info = NULL; 386 387 if (likely(req_id < tx_ring->ring_size)) { 388 tx_info = &tx_ring->tx_buffer_info[req_id]; 389 if (likely(tx_info->mbuf)) 390 return 0; 391 } 392 393 if (tx_info) 394 PMD_DRV_LOG(ERR, "tx_info doesn't have valid mbuf\n"); 395 else 396 PMD_DRV_LOG(ERR, "Invalid req_id: %hu\n", req_id); 397 398 /* Trigger device reset */ 399 ++tx_ring->tx_stats.bad_req_id; 400 tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID; 401 tx_ring->adapter->trigger_reset = true; 402 return -EFAULT; 403 } 404 405 static void ena_config_host_info(struct ena_com_dev *ena_dev) 406 { 407 struct ena_admin_host_info *host_info; 408 int rc; 409 410 /* Allocate only the host info */ 411 rc = ena_com_allocate_host_info(ena_dev); 412 if (rc) { 413 PMD_DRV_LOG(ERR, "Cannot allocate host info\n"); 414 return; 415 } 416 417 host_info = ena_dev->host_attr.host_info; 418 419 host_info->os_type = ENA_ADMIN_OS_DPDK; 420 host_info->kernel_ver = RTE_VERSION; 421 strlcpy((char *)host_info->kernel_ver_str, rte_version(), 422 sizeof(host_info->kernel_ver_str)); 423 host_info->os_dist = RTE_VERSION; 424 strlcpy((char *)host_info->os_dist_str, rte_version(), 425 sizeof(host_info->os_dist_str)); 426 host_info->driver_version = 427 (DRV_MODULE_VER_MAJOR) | 428 (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | 429 (DRV_MODULE_VER_SUBMINOR << 430 ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); 431 host_info->num_cpus = rte_lcore_count(); 432 433 host_info->driver_supported_features = 434 ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK; 435 436 rc = ena_com_set_host_attributes(ena_dev); 437 if (rc) { 438 if (rc == -ENA_COM_UNSUPPORTED) 439 PMD_DRV_LOG(WARNING, "Cannot set host attributes\n"); 440 else 441 PMD_DRV_LOG(ERR, "Cannot set host attributes\n"); 442 443 goto err; 444 } 445 446 return; 447 448 err: 449 ena_com_delete_host_info(ena_dev); 450 } 451 452 /* This function calculates the number of xstats based on the current config */ 453 static unsigned int ena_xstats_calc_num(struct rte_eth_dev *dev) 454 { 455 return ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENI + 456 (dev->data->nb_tx_queues * ENA_STATS_ARRAY_TX) + 457 (dev->data->nb_rx_queues * ENA_STATS_ARRAY_RX); 458 } 459 460 static void ena_config_debug_area(struct ena_adapter *adapter) 461 { 462 u32 debug_area_size; 463 int rc, ss_count; 464 465 ss_count = ena_xstats_calc_num(adapter->rte_dev); 466 467 /* allocate 32 bytes for each string and 64bit for the value */ 468 debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count; 469 470 rc = ena_com_allocate_debug_area(&adapter->ena_dev, debug_area_size); 471 if (rc) { 472 PMD_DRV_LOG(ERR, "Cannot allocate debug area\n"); 473 return; 474 } 475 476 rc = ena_com_set_host_attributes(&adapter->ena_dev); 477 if (rc) { 478 if (rc == -ENA_COM_UNSUPPORTED) 479 PMD_DRV_LOG(WARNING, "Cannot set host attributes\n"); 480 else 481 PMD_DRV_LOG(ERR, "Cannot set host attributes\n"); 482 483 goto err; 484 } 485 486 return; 487 err: 488 ena_com_delete_debug_area(&adapter->ena_dev); 489 } 490 491 static int ena_close(struct rte_eth_dev *dev) 492 { 493 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 494 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 495 struct ena_adapter *adapter = dev->data->dev_private; 496 int ret = 0; 497 498 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 499 return 0; 500 501 if (adapter->state == ENA_ADAPTER_STATE_RUNNING) 502 ret = ena_stop(dev); 503 adapter->state = ENA_ADAPTER_STATE_CLOSED; 504 505 ena_rx_queue_release_all(dev); 506 ena_tx_queue_release_all(dev); 507 508 rte_free(adapter->drv_stats); 509 adapter->drv_stats = NULL; 510 511 rte_intr_disable(intr_handle); 512 rte_intr_callback_unregister(intr_handle, 513 ena_interrupt_handler_rte, 514 adapter); 515 516 /* 517 * MAC is not allocated dynamically. Setting NULL should prevent from 518 * release of the resource in the rte_eth_dev_release_port(). 519 */ 520 dev->data->mac_addrs = NULL; 521 522 return ret; 523 } 524 525 static int 526 ena_dev_reset(struct rte_eth_dev *dev) 527 { 528 int rc = 0; 529 530 ena_destroy_device(dev); 531 rc = eth_ena_dev_init(dev); 532 if (rc) 533 PMD_INIT_LOG(CRIT, "Cannot initialize device"); 534 535 return rc; 536 } 537 538 static int ena_rss_reta_update(struct rte_eth_dev *dev, 539 struct rte_eth_rss_reta_entry64 *reta_conf, 540 uint16_t reta_size) 541 { 542 struct ena_adapter *adapter = dev->data->dev_private; 543 struct ena_com_dev *ena_dev = &adapter->ena_dev; 544 int rc, i; 545 u16 entry_value; 546 int conf_idx; 547 int idx; 548 549 if ((reta_size == 0) || (reta_conf == NULL)) 550 return -EINVAL; 551 552 if (reta_size > ENA_RX_RSS_TABLE_SIZE) { 553 PMD_DRV_LOG(WARNING, 554 "indirection table %d is bigger than supported (%d)\n", 555 reta_size, ENA_RX_RSS_TABLE_SIZE); 556 return -EINVAL; 557 } 558 559 for (i = 0 ; i < reta_size ; i++) { 560 /* each reta_conf is for 64 entries. 561 * to support 128 we use 2 conf of 64 562 */ 563 conf_idx = i / RTE_RETA_GROUP_SIZE; 564 idx = i % RTE_RETA_GROUP_SIZE; 565 if (TEST_BIT(reta_conf[conf_idx].mask, idx)) { 566 entry_value = 567 ENA_IO_RXQ_IDX(reta_conf[conf_idx].reta[idx]); 568 569 rc = ena_com_indirect_table_fill_entry(ena_dev, 570 i, 571 entry_value); 572 if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) { 573 PMD_DRV_LOG(ERR, 574 "Cannot fill indirect table\n"); 575 return rc; 576 } 577 } 578 } 579 580 rte_spinlock_lock(&adapter->admin_lock); 581 rc = ena_com_indirect_table_set(ena_dev); 582 rte_spinlock_unlock(&adapter->admin_lock); 583 if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) { 584 PMD_DRV_LOG(ERR, "Cannot flush the indirect table\n"); 585 return rc; 586 } 587 588 PMD_DRV_LOG(DEBUG, "%s(): RSS configured %d entries for port %d\n", 589 __func__, reta_size, adapter->rte_dev->data->port_id); 590 591 return 0; 592 } 593 594 /* Query redirection table. */ 595 static int ena_rss_reta_query(struct rte_eth_dev *dev, 596 struct rte_eth_rss_reta_entry64 *reta_conf, 597 uint16_t reta_size) 598 { 599 struct ena_adapter *adapter = dev->data->dev_private; 600 struct ena_com_dev *ena_dev = &adapter->ena_dev; 601 int rc; 602 int i; 603 u32 indirect_table[ENA_RX_RSS_TABLE_SIZE] = {0}; 604 int reta_conf_idx; 605 int reta_idx; 606 607 if (reta_size == 0 || reta_conf == NULL || 608 (reta_size > RTE_RETA_GROUP_SIZE && ((reta_conf + 1) == NULL))) 609 return -EINVAL; 610 611 rte_spinlock_lock(&adapter->admin_lock); 612 rc = ena_com_indirect_table_get(ena_dev, indirect_table); 613 rte_spinlock_unlock(&adapter->admin_lock); 614 if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) { 615 PMD_DRV_LOG(ERR, "cannot get indirect table\n"); 616 return -ENOTSUP; 617 } 618 619 for (i = 0 ; i < reta_size ; i++) { 620 reta_conf_idx = i / RTE_RETA_GROUP_SIZE; 621 reta_idx = i % RTE_RETA_GROUP_SIZE; 622 if (TEST_BIT(reta_conf[reta_conf_idx].mask, reta_idx)) 623 reta_conf[reta_conf_idx].reta[reta_idx] = 624 ENA_IO_RXQ_IDX_REV(indirect_table[i]); 625 } 626 627 return 0; 628 } 629 630 static int ena_rss_init_default(struct ena_adapter *adapter) 631 { 632 struct ena_com_dev *ena_dev = &adapter->ena_dev; 633 uint16_t nb_rx_queues = adapter->rte_dev->data->nb_rx_queues; 634 int rc, i; 635 u32 val; 636 637 rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE); 638 if (unlikely(rc)) { 639 PMD_DRV_LOG(ERR, "Cannot init indirect table\n"); 640 goto err_rss_init; 641 } 642 643 for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) { 644 val = i % nb_rx_queues; 645 rc = ena_com_indirect_table_fill_entry(ena_dev, i, 646 ENA_IO_RXQ_IDX(val)); 647 if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { 648 PMD_DRV_LOG(ERR, "Cannot fill indirect table\n"); 649 goto err_fill_indir; 650 } 651 } 652 653 rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL, 654 ENA_HASH_KEY_SIZE, 0xFFFFFFFF); 655 if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { 656 PMD_DRV_LOG(INFO, "Cannot fill hash function\n"); 657 goto err_fill_indir; 658 } 659 660 rc = ena_com_set_default_hash_ctrl(ena_dev); 661 if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { 662 PMD_DRV_LOG(INFO, "Cannot fill hash control\n"); 663 goto err_fill_indir; 664 } 665 666 rc = ena_com_indirect_table_set(ena_dev); 667 if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { 668 PMD_DRV_LOG(ERR, "Cannot flush the indirect table\n"); 669 goto err_fill_indir; 670 } 671 PMD_DRV_LOG(DEBUG, "RSS configured for port %d\n", 672 adapter->rte_dev->data->port_id); 673 674 return 0; 675 676 err_fill_indir: 677 ena_com_rss_destroy(ena_dev); 678 err_rss_init: 679 680 return rc; 681 } 682 683 static void ena_rx_queue_release_all(struct rte_eth_dev *dev) 684 { 685 struct ena_ring **queues = (struct ena_ring **)dev->data->rx_queues; 686 int nb_queues = dev->data->nb_rx_queues; 687 int i; 688 689 for (i = 0; i < nb_queues; i++) 690 ena_rx_queue_release(queues[i]); 691 } 692 693 static void ena_tx_queue_release_all(struct rte_eth_dev *dev) 694 { 695 struct ena_ring **queues = (struct ena_ring **)dev->data->tx_queues; 696 int nb_queues = dev->data->nb_tx_queues; 697 int i; 698 699 for (i = 0; i < nb_queues; i++) 700 ena_tx_queue_release(queues[i]); 701 } 702 703 static void ena_rx_queue_release(void *queue) 704 { 705 struct ena_ring *ring = (struct ena_ring *)queue; 706 707 /* Free ring resources */ 708 if (ring->rx_buffer_info) 709 rte_free(ring->rx_buffer_info); 710 ring->rx_buffer_info = NULL; 711 712 if (ring->rx_refill_buffer) 713 rte_free(ring->rx_refill_buffer); 714 ring->rx_refill_buffer = NULL; 715 716 if (ring->empty_rx_reqs) 717 rte_free(ring->empty_rx_reqs); 718 ring->empty_rx_reqs = NULL; 719 720 ring->configured = 0; 721 722 PMD_DRV_LOG(NOTICE, "RX Queue %d:%d released\n", 723 ring->port_id, ring->id); 724 } 725 726 static void ena_tx_queue_release(void *queue) 727 { 728 struct ena_ring *ring = (struct ena_ring *)queue; 729 730 /* Free ring resources */ 731 if (ring->push_buf_intermediate_buf) 732 rte_free(ring->push_buf_intermediate_buf); 733 734 if (ring->tx_buffer_info) 735 rte_free(ring->tx_buffer_info); 736 737 if (ring->empty_tx_reqs) 738 rte_free(ring->empty_tx_reqs); 739 740 ring->empty_tx_reqs = NULL; 741 ring->tx_buffer_info = NULL; 742 ring->push_buf_intermediate_buf = NULL; 743 744 ring->configured = 0; 745 746 PMD_DRV_LOG(NOTICE, "TX Queue %d:%d released\n", 747 ring->port_id, ring->id); 748 } 749 750 static void ena_rx_queue_release_bufs(struct ena_ring *ring) 751 { 752 unsigned int i; 753 754 for (i = 0; i < ring->ring_size; ++i) { 755 struct ena_rx_buffer *rx_info = &ring->rx_buffer_info[i]; 756 if (rx_info->mbuf) { 757 rte_mbuf_raw_free(rx_info->mbuf); 758 rx_info->mbuf = NULL; 759 } 760 } 761 } 762 763 static void ena_tx_queue_release_bufs(struct ena_ring *ring) 764 { 765 unsigned int i; 766 767 for (i = 0; i < ring->ring_size; ++i) { 768 struct ena_tx_buffer *tx_buf = &ring->tx_buffer_info[i]; 769 770 if (tx_buf->mbuf) 771 rte_pktmbuf_free(tx_buf->mbuf); 772 } 773 } 774 775 static int ena_link_update(struct rte_eth_dev *dev, 776 __rte_unused int wait_to_complete) 777 { 778 struct rte_eth_link *link = &dev->data->dev_link; 779 struct ena_adapter *adapter = dev->data->dev_private; 780 781 link->link_status = adapter->link_status ? ETH_LINK_UP : ETH_LINK_DOWN; 782 link->link_speed = ETH_SPEED_NUM_NONE; 783 link->link_duplex = ETH_LINK_FULL_DUPLEX; 784 785 return 0; 786 } 787 788 static int ena_queue_start_all(struct rte_eth_dev *dev, 789 enum ena_ring_type ring_type) 790 { 791 struct ena_adapter *adapter = dev->data->dev_private; 792 struct ena_ring *queues = NULL; 793 int nb_queues; 794 int i = 0; 795 int rc = 0; 796 797 if (ring_type == ENA_RING_TYPE_RX) { 798 queues = adapter->rx_ring; 799 nb_queues = dev->data->nb_rx_queues; 800 } else { 801 queues = adapter->tx_ring; 802 nb_queues = dev->data->nb_tx_queues; 803 } 804 for (i = 0; i < nb_queues; i++) { 805 if (queues[i].configured) { 806 if (ring_type == ENA_RING_TYPE_RX) { 807 ena_assert_msg( 808 dev->data->rx_queues[i] == &queues[i], 809 "Inconsistent state of rx queues\n"); 810 } else { 811 ena_assert_msg( 812 dev->data->tx_queues[i] == &queues[i], 813 "Inconsistent state of tx queues\n"); 814 } 815 816 rc = ena_queue_start(&queues[i]); 817 818 if (rc) { 819 PMD_INIT_LOG(ERR, 820 "failed to start queue %d type(%d)", 821 i, ring_type); 822 goto err; 823 } 824 } 825 } 826 827 return 0; 828 829 err: 830 while (i--) 831 if (queues[i].configured) 832 ena_queue_stop(&queues[i]); 833 834 return rc; 835 } 836 837 static uint32_t ena_get_mtu_conf(struct ena_adapter *adapter) 838 { 839 uint32_t max_frame_len = adapter->max_mtu; 840 841 if (adapter->rte_eth_dev_data->dev_conf.rxmode.offloads & 842 DEV_RX_OFFLOAD_JUMBO_FRAME) 843 max_frame_len = 844 adapter->rte_eth_dev_data->dev_conf.rxmode.max_rx_pkt_len; 845 846 return max_frame_len; 847 } 848 849 static int ena_check_valid_conf(struct ena_adapter *adapter) 850 { 851 uint32_t max_frame_len = ena_get_mtu_conf(adapter); 852 853 if (max_frame_len > adapter->max_mtu || max_frame_len < ENA_MIN_MTU) { 854 PMD_INIT_LOG(ERR, "Unsupported MTU of %d. " 855 "max mtu: %d, min mtu: %d", 856 max_frame_len, adapter->max_mtu, ENA_MIN_MTU); 857 return ENA_COM_UNSUPPORTED; 858 } 859 860 return 0; 861 } 862 863 static int 864 ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx, 865 bool use_large_llq_hdr) 866 { 867 struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq; 868 struct ena_com_dev *ena_dev = ctx->ena_dev; 869 uint32_t max_tx_queue_size; 870 uint32_t max_rx_queue_size; 871 872 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 873 struct ena_admin_queue_ext_feature_fields *max_queue_ext = 874 &ctx->get_feat_ctx->max_queue_ext.max_queue_ext; 875 max_rx_queue_size = RTE_MIN(max_queue_ext->max_rx_cq_depth, 876 max_queue_ext->max_rx_sq_depth); 877 max_tx_queue_size = max_queue_ext->max_tx_cq_depth; 878 879 if (ena_dev->tx_mem_queue_type == 880 ENA_ADMIN_PLACEMENT_POLICY_DEV) { 881 max_tx_queue_size = RTE_MIN(max_tx_queue_size, 882 llq->max_llq_depth); 883 } else { 884 max_tx_queue_size = RTE_MIN(max_tx_queue_size, 885 max_queue_ext->max_tx_sq_depth); 886 } 887 888 ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 889 max_queue_ext->max_per_packet_rx_descs); 890 ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 891 max_queue_ext->max_per_packet_tx_descs); 892 } else { 893 struct ena_admin_queue_feature_desc *max_queues = 894 &ctx->get_feat_ctx->max_queues; 895 max_rx_queue_size = RTE_MIN(max_queues->max_cq_depth, 896 max_queues->max_sq_depth); 897 max_tx_queue_size = max_queues->max_cq_depth; 898 899 if (ena_dev->tx_mem_queue_type == 900 ENA_ADMIN_PLACEMENT_POLICY_DEV) { 901 max_tx_queue_size = RTE_MIN(max_tx_queue_size, 902 llq->max_llq_depth); 903 } else { 904 max_tx_queue_size = RTE_MIN(max_tx_queue_size, 905 max_queues->max_sq_depth); 906 } 907 908 ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 909 max_queues->max_packet_rx_descs); 910 ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 911 max_queues->max_packet_tx_descs); 912 } 913 914 /* Round down to the nearest power of 2 */ 915 max_rx_queue_size = rte_align32prevpow2(max_rx_queue_size); 916 max_tx_queue_size = rte_align32prevpow2(max_tx_queue_size); 917 918 if (use_large_llq_hdr) { 919 if ((llq->entry_size_ctrl_supported & 920 ENA_ADMIN_LIST_ENTRY_SIZE_256B) && 921 (ena_dev->tx_mem_queue_type == 922 ENA_ADMIN_PLACEMENT_POLICY_DEV)) { 923 max_tx_queue_size /= 2; 924 PMD_INIT_LOG(INFO, 925 "Forcing large headers and decreasing maximum TX queue size to %d\n", 926 max_tx_queue_size); 927 } else { 928 PMD_INIT_LOG(ERR, 929 "Forcing large headers failed: LLQ is disabled or device does not support large headers\n"); 930 } 931 } 932 933 if (unlikely(max_rx_queue_size == 0 || max_tx_queue_size == 0)) { 934 PMD_INIT_LOG(ERR, "Invalid queue size"); 935 return -EFAULT; 936 } 937 938 ctx->max_tx_queue_size = max_tx_queue_size; 939 ctx->max_rx_queue_size = max_rx_queue_size; 940 941 return 0; 942 } 943 944 static void ena_stats_restart(struct rte_eth_dev *dev) 945 { 946 struct ena_adapter *adapter = dev->data->dev_private; 947 948 rte_atomic64_init(&adapter->drv_stats->ierrors); 949 rte_atomic64_init(&adapter->drv_stats->oerrors); 950 rte_atomic64_init(&adapter->drv_stats->rx_nombuf); 951 adapter->drv_stats->rx_drops = 0; 952 } 953 954 static int ena_stats_get(struct rte_eth_dev *dev, 955 struct rte_eth_stats *stats) 956 { 957 struct ena_admin_basic_stats ena_stats; 958 struct ena_adapter *adapter = dev->data->dev_private; 959 struct ena_com_dev *ena_dev = &adapter->ena_dev; 960 int rc; 961 int i; 962 int max_rings_stats; 963 964 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 965 return -ENOTSUP; 966 967 memset(&ena_stats, 0, sizeof(ena_stats)); 968 969 rte_spinlock_lock(&adapter->admin_lock); 970 rc = ena_com_get_dev_basic_stats(ena_dev, &ena_stats); 971 rte_spinlock_unlock(&adapter->admin_lock); 972 if (unlikely(rc)) { 973 PMD_DRV_LOG(ERR, "Could not retrieve statistics from ENA\n"); 974 return rc; 975 } 976 977 /* Set of basic statistics from ENA */ 978 stats->ipackets = __MERGE_64B_H_L(ena_stats.rx_pkts_high, 979 ena_stats.rx_pkts_low); 980 stats->opackets = __MERGE_64B_H_L(ena_stats.tx_pkts_high, 981 ena_stats.tx_pkts_low); 982 stats->ibytes = __MERGE_64B_H_L(ena_stats.rx_bytes_high, 983 ena_stats.rx_bytes_low); 984 stats->obytes = __MERGE_64B_H_L(ena_stats.tx_bytes_high, 985 ena_stats.tx_bytes_low); 986 987 /* Driver related stats */ 988 stats->imissed = adapter->drv_stats->rx_drops; 989 stats->ierrors = rte_atomic64_read(&adapter->drv_stats->ierrors); 990 stats->oerrors = rte_atomic64_read(&adapter->drv_stats->oerrors); 991 stats->rx_nombuf = rte_atomic64_read(&adapter->drv_stats->rx_nombuf); 992 993 max_rings_stats = RTE_MIN(dev->data->nb_rx_queues, 994 RTE_ETHDEV_QUEUE_STAT_CNTRS); 995 for (i = 0; i < max_rings_stats; ++i) { 996 struct ena_stats_rx *rx_stats = &adapter->rx_ring[i].rx_stats; 997 998 stats->q_ibytes[i] = rx_stats->bytes; 999 stats->q_ipackets[i] = rx_stats->cnt; 1000 stats->q_errors[i] = rx_stats->bad_desc_num + 1001 rx_stats->bad_req_id; 1002 } 1003 1004 max_rings_stats = RTE_MIN(dev->data->nb_tx_queues, 1005 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1006 for (i = 0; i < max_rings_stats; ++i) { 1007 struct ena_stats_tx *tx_stats = &adapter->tx_ring[i].tx_stats; 1008 1009 stats->q_obytes[i] = tx_stats->bytes; 1010 stats->q_opackets[i] = tx_stats->cnt; 1011 } 1012 1013 return 0; 1014 } 1015 1016 static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 1017 { 1018 struct ena_adapter *adapter; 1019 struct ena_com_dev *ena_dev; 1020 int rc = 0; 1021 1022 ena_assert_msg(dev->data != NULL, "Uninitialized device\n"); 1023 ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n"); 1024 adapter = dev->data->dev_private; 1025 1026 ena_dev = &adapter->ena_dev; 1027 ena_assert_msg(ena_dev != NULL, "Uninitialized device\n"); 1028 1029 if (mtu > ena_get_mtu_conf(adapter) || mtu < ENA_MIN_MTU) { 1030 PMD_DRV_LOG(ERR, 1031 "Invalid MTU setting. new_mtu: %d " 1032 "max mtu: %d min mtu: %d\n", 1033 mtu, ena_get_mtu_conf(adapter), ENA_MIN_MTU); 1034 return -EINVAL; 1035 } 1036 1037 rc = ena_com_set_dev_mtu(ena_dev, mtu); 1038 if (rc) 1039 PMD_DRV_LOG(ERR, "Could not set MTU: %d\n", mtu); 1040 else 1041 PMD_DRV_LOG(NOTICE, "Set MTU: %d\n", mtu); 1042 1043 return rc; 1044 } 1045 1046 static int ena_start(struct rte_eth_dev *dev) 1047 { 1048 struct ena_adapter *adapter = dev->data->dev_private; 1049 uint64_t ticks; 1050 int rc = 0; 1051 1052 rc = ena_check_valid_conf(adapter); 1053 if (rc) 1054 return rc; 1055 1056 rc = ena_queue_start_all(dev, ENA_RING_TYPE_RX); 1057 if (rc) 1058 return rc; 1059 1060 rc = ena_queue_start_all(dev, ENA_RING_TYPE_TX); 1061 if (rc) 1062 goto err_start_tx; 1063 1064 if (adapter->rte_dev->data->dev_conf.rxmode.mq_mode & 1065 ETH_MQ_RX_RSS_FLAG && adapter->rte_dev->data->nb_rx_queues > 0) { 1066 rc = ena_rss_init_default(adapter); 1067 if (rc) 1068 goto err_rss_init; 1069 } 1070 1071 ena_stats_restart(dev); 1072 1073 adapter->timestamp_wd = rte_get_timer_cycles(); 1074 adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT; 1075 1076 ticks = rte_get_timer_hz(); 1077 rte_timer_reset(&adapter->timer_wd, ticks, PERIODICAL, rte_lcore_id(), 1078 ena_timer_wd_callback, adapter); 1079 1080 ++adapter->dev_stats.dev_start; 1081 adapter->state = ENA_ADAPTER_STATE_RUNNING; 1082 1083 return 0; 1084 1085 err_rss_init: 1086 ena_queue_stop_all(dev, ENA_RING_TYPE_TX); 1087 err_start_tx: 1088 ena_queue_stop_all(dev, ENA_RING_TYPE_RX); 1089 return rc; 1090 } 1091 1092 static int ena_stop(struct rte_eth_dev *dev) 1093 { 1094 struct ena_adapter *adapter = dev->data->dev_private; 1095 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1096 int rc; 1097 1098 rte_timer_stop_sync(&adapter->timer_wd); 1099 ena_queue_stop_all(dev, ENA_RING_TYPE_TX); 1100 ena_queue_stop_all(dev, ENA_RING_TYPE_RX); 1101 1102 if (adapter->trigger_reset) { 1103 rc = ena_com_dev_reset(ena_dev, adapter->reset_reason); 1104 if (rc) 1105 PMD_DRV_LOG(ERR, "Device reset failed rc=%d\n", rc); 1106 } 1107 1108 ++adapter->dev_stats.dev_stop; 1109 adapter->state = ENA_ADAPTER_STATE_STOPPED; 1110 dev->data->dev_started = 0; 1111 1112 return 0; 1113 } 1114 1115 static int ena_create_io_queue(struct ena_ring *ring) 1116 { 1117 struct ena_adapter *adapter; 1118 struct ena_com_dev *ena_dev; 1119 struct ena_com_create_io_ctx ctx = 1120 /* policy set to _HOST just to satisfy icc compiler */ 1121 { ENA_ADMIN_PLACEMENT_POLICY_HOST, 1122 0, 0, 0, 0, 0 }; 1123 uint16_t ena_qid; 1124 unsigned int i; 1125 int rc; 1126 1127 adapter = ring->adapter; 1128 ena_dev = &adapter->ena_dev; 1129 1130 if (ring->type == ENA_RING_TYPE_TX) { 1131 ena_qid = ENA_IO_TXQ_IDX(ring->id); 1132 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; 1133 ctx.mem_queue_type = ena_dev->tx_mem_queue_type; 1134 for (i = 0; i < ring->ring_size; i++) 1135 ring->empty_tx_reqs[i] = i; 1136 } else { 1137 ena_qid = ENA_IO_RXQ_IDX(ring->id); 1138 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; 1139 for (i = 0; i < ring->ring_size; i++) 1140 ring->empty_rx_reqs[i] = i; 1141 } 1142 ctx.queue_size = ring->ring_size; 1143 ctx.qid = ena_qid; 1144 ctx.msix_vector = -1; /* interrupts not used */ 1145 ctx.numa_node = ring->numa_socket_id; 1146 1147 rc = ena_com_create_io_queue(ena_dev, &ctx); 1148 if (rc) { 1149 PMD_DRV_LOG(ERR, 1150 "failed to create io queue #%d (qid:%d) rc: %d\n", 1151 ring->id, ena_qid, rc); 1152 return rc; 1153 } 1154 1155 rc = ena_com_get_io_handlers(ena_dev, ena_qid, 1156 &ring->ena_com_io_sq, 1157 &ring->ena_com_io_cq); 1158 if (rc) { 1159 PMD_DRV_LOG(ERR, 1160 "Failed to get io queue handlers. queue num %d rc: %d\n", 1161 ring->id, rc); 1162 ena_com_destroy_io_queue(ena_dev, ena_qid); 1163 return rc; 1164 } 1165 1166 if (ring->type == ENA_RING_TYPE_TX) 1167 ena_com_update_numa_node(ring->ena_com_io_cq, ctx.numa_node); 1168 1169 return 0; 1170 } 1171 1172 static void ena_queue_stop(struct ena_ring *ring) 1173 { 1174 struct ena_com_dev *ena_dev = &ring->adapter->ena_dev; 1175 1176 if (ring->type == ENA_RING_TYPE_RX) { 1177 ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(ring->id)); 1178 ena_rx_queue_release_bufs(ring); 1179 } else { 1180 ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(ring->id)); 1181 ena_tx_queue_release_bufs(ring); 1182 } 1183 } 1184 1185 static void ena_queue_stop_all(struct rte_eth_dev *dev, 1186 enum ena_ring_type ring_type) 1187 { 1188 struct ena_adapter *adapter = dev->data->dev_private; 1189 struct ena_ring *queues = NULL; 1190 uint16_t nb_queues, i; 1191 1192 if (ring_type == ENA_RING_TYPE_RX) { 1193 queues = adapter->rx_ring; 1194 nb_queues = dev->data->nb_rx_queues; 1195 } else { 1196 queues = adapter->tx_ring; 1197 nb_queues = dev->data->nb_tx_queues; 1198 } 1199 1200 for (i = 0; i < nb_queues; ++i) 1201 if (queues[i].configured) 1202 ena_queue_stop(&queues[i]); 1203 } 1204 1205 static int ena_queue_start(struct ena_ring *ring) 1206 { 1207 int rc, bufs_num; 1208 1209 ena_assert_msg(ring->configured == 1, 1210 "Trying to start unconfigured queue\n"); 1211 1212 rc = ena_create_io_queue(ring); 1213 if (rc) { 1214 PMD_INIT_LOG(ERR, "Failed to create IO queue!"); 1215 return rc; 1216 } 1217 1218 ring->next_to_clean = 0; 1219 ring->next_to_use = 0; 1220 1221 if (ring->type == ENA_RING_TYPE_TX) { 1222 ring->tx_stats.available_desc = 1223 ena_com_free_q_entries(ring->ena_com_io_sq); 1224 return 0; 1225 } 1226 1227 bufs_num = ring->ring_size - 1; 1228 rc = ena_populate_rx_queue(ring, bufs_num); 1229 if (rc != bufs_num) { 1230 ena_com_destroy_io_queue(&ring->adapter->ena_dev, 1231 ENA_IO_RXQ_IDX(ring->id)); 1232 PMD_INIT_LOG(ERR, "Failed to populate rx ring !"); 1233 return ENA_COM_FAULT; 1234 } 1235 /* Flush per-core RX buffers pools cache as they can be used on other 1236 * cores as well. 1237 */ 1238 rte_mempool_cache_flush(NULL, ring->mb_pool); 1239 1240 return 0; 1241 } 1242 1243 static int ena_tx_queue_setup(struct rte_eth_dev *dev, 1244 uint16_t queue_idx, 1245 uint16_t nb_desc, 1246 unsigned int socket_id, 1247 const struct rte_eth_txconf *tx_conf) 1248 { 1249 struct ena_ring *txq = NULL; 1250 struct ena_adapter *adapter = dev->data->dev_private; 1251 unsigned int i; 1252 1253 txq = &adapter->tx_ring[queue_idx]; 1254 1255 if (txq->configured) { 1256 PMD_DRV_LOG(CRIT, 1257 "API violation. Queue %d is already configured\n", 1258 queue_idx); 1259 return ENA_COM_FAULT; 1260 } 1261 1262 if (!rte_is_power_of_2(nb_desc)) { 1263 PMD_DRV_LOG(ERR, 1264 "Unsupported size of TX queue: %d is not a power of 2.\n", 1265 nb_desc); 1266 return -EINVAL; 1267 } 1268 1269 if (nb_desc > adapter->max_tx_ring_size) { 1270 PMD_DRV_LOG(ERR, 1271 "Unsupported size of TX queue (max size: %d)\n", 1272 adapter->max_tx_ring_size); 1273 return -EINVAL; 1274 } 1275 1276 if (nb_desc == RTE_ETH_DEV_FALLBACK_TX_RINGSIZE) 1277 nb_desc = adapter->max_tx_ring_size; 1278 1279 txq->port_id = dev->data->port_id; 1280 txq->next_to_clean = 0; 1281 txq->next_to_use = 0; 1282 txq->ring_size = nb_desc; 1283 txq->size_mask = nb_desc - 1; 1284 txq->numa_socket_id = socket_id; 1285 txq->pkts_without_db = false; 1286 1287 txq->tx_buffer_info = rte_zmalloc("txq->tx_buffer_info", 1288 sizeof(struct ena_tx_buffer) * 1289 txq->ring_size, 1290 RTE_CACHE_LINE_SIZE); 1291 if (!txq->tx_buffer_info) { 1292 PMD_DRV_LOG(ERR, "failed to alloc mem for tx buffer info\n"); 1293 return -ENOMEM; 1294 } 1295 1296 txq->empty_tx_reqs = rte_zmalloc("txq->empty_tx_reqs", 1297 sizeof(u16) * txq->ring_size, 1298 RTE_CACHE_LINE_SIZE); 1299 if (!txq->empty_tx_reqs) { 1300 PMD_DRV_LOG(ERR, "failed to alloc mem for tx reqs\n"); 1301 rte_free(txq->tx_buffer_info); 1302 return -ENOMEM; 1303 } 1304 1305 txq->push_buf_intermediate_buf = 1306 rte_zmalloc("txq->push_buf_intermediate_buf", 1307 txq->tx_max_header_size, 1308 RTE_CACHE_LINE_SIZE); 1309 if (!txq->push_buf_intermediate_buf) { 1310 PMD_DRV_LOG(ERR, "failed to alloc push buff for LLQ\n"); 1311 rte_free(txq->tx_buffer_info); 1312 rte_free(txq->empty_tx_reqs); 1313 return -ENOMEM; 1314 } 1315 1316 for (i = 0; i < txq->ring_size; i++) 1317 txq->empty_tx_reqs[i] = i; 1318 1319 if (tx_conf != NULL) { 1320 txq->offloads = 1321 tx_conf->offloads | dev->data->dev_conf.txmode.offloads; 1322 } 1323 /* Store pointer to this queue in upper layer */ 1324 txq->configured = 1; 1325 dev->data->tx_queues[queue_idx] = txq; 1326 1327 return 0; 1328 } 1329 1330 static int ena_rx_queue_setup(struct rte_eth_dev *dev, 1331 uint16_t queue_idx, 1332 uint16_t nb_desc, 1333 unsigned int socket_id, 1334 __rte_unused const struct rte_eth_rxconf *rx_conf, 1335 struct rte_mempool *mp) 1336 { 1337 struct ena_adapter *adapter = dev->data->dev_private; 1338 struct ena_ring *rxq = NULL; 1339 size_t buffer_size; 1340 int i; 1341 1342 rxq = &adapter->rx_ring[queue_idx]; 1343 if (rxq->configured) { 1344 PMD_DRV_LOG(CRIT, 1345 "API violation. Queue %d is already configured\n", 1346 queue_idx); 1347 return ENA_COM_FAULT; 1348 } 1349 1350 if (nb_desc == RTE_ETH_DEV_FALLBACK_RX_RINGSIZE) 1351 nb_desc = adapter->max_rx_ring_size; 1352 1353 if (!rte_is_power_of_2(nb_desc)) { 1354 PMD_DRV_LOG(ERR, 1355 "Unsupported size of RX queue: %d is not a power of 2.\n", 1356 nb_desc); 1357 return -EINVAL; 1358 } 1359 1360 if (nb_desc > adapter->max_rx_ring_size) { 1361 PMD_DRV_LOG(ERR, 1362 "Unsupported size of RX queue (max size: %d)\n", 1363 adapter->max_rx_ring_size); 1364 return -EINVAL; 1365 } 1366 1367 /* ENA isn't supporting buffers smaller than 1400 bytes */ 1368 buffer_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM; 1369 if (buffer_size < ENA_RX_BUF_MIN_SIZE) { 1370 PMD_DRV_LOG(ERR, 1371 "Unsupported size of RX buffer: %zu (min size: %d)\n", 1372 buffer_size, ENA_RX_BUF_MIN_SIZE); 1373 return -EINVAL; 1374 } 1375 1376 rxq->port_id = dev->data->port_id; 1377 rxq->next_to_clean = 0; 1378 rxq->next_to_use = 0; 1379 rxq->ring_size = nb_desc; 1380 rxq->size_mask = nb_desc - 1; 1381 rxq->numa_socket_id = socket_id; 1382 rxq->mb_pool = mp; 1383 1384 rxq->rx_buffer_info = rte_zmalloc("rxq->buffer_info", 1385 sizeof(struct ena_rx_buffer) * nb_desc, 1386 RTE_CACHE_LINE_SIZE); 1387 if (!rxq->rx_buffer_info) { 1388 PMD_DRV_LOG(ERR, "failed to alloc mem for rx buffer info\n"); 1389 return -ENOMEM; 1390 } 1391 1392 rxq->rx_refill_buffer = rte_zmalloc("rxq->rx_refill_buffer", 1393 sizeof(struct rte_mbuf *) * nb_desc, 1394 RTE_CACHE_LINE_SIZE); 1395 1396 if (!rxq->rx_refill_buffer) { 1397 PMD_DRV_LOG(ERR, "failed to alloc mem for rx refill buffer\n"); 1398 rte_free(rxq->rx_buffer_info); 1399 rxq->rx_buffer_info = NULL; 1400 return -ENOMEM; 1401 } 1402 1403 rxq->empty_rx_reqs = rte_zmalloc("rxq->empty_rx_reqs", 1404 sizeof(uint16_t) * nb_desc, 1405 RTE_CACHE_LINE_SIZE); 1406 if (!rxq->empty_rx_reqs) { 1407 PMD_DRV_LOG(ERR, "failed to alloc mem for empty rx reqs\n"); 1408 rte_free(rxq->rx_buffer_info); 1409 rxq->rx_buffer_info = NULL; 1410 rte_free(rxq->rx_refill_buffer); 1411 rxq->rx_refill_buffer = NULL; 1412 return -ENOMEM; 1413 } 1414 1415 for (i = 0; i < nb_desc; i++) 1416 rxq->empty_rx_reqs[i] = i; 1417 1418 /* Store pointer to this queue in upper layer */ 1419 rxq->configured = 1; 1420 dev->data->rx_queues[queue_idx] = rxq; 1421 1422 return 0; 1423 } 1424 1425 static int ena_add_single_rx_desc(struct ena_com_io_sq *io_sq, 1426 struct rte_mbuf *mbuf, uint16_t id) 1427 { 1428 struct ena_com_buf ebuf; 1429 int rc; 1430 1431 /* prepare physical address for DMA transaction */ 1432 ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM; 1433 ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM; 1434 1435 /* pass resource to device */ 1436 rc = ena_com_add_single_rx_desc(io_sq, &ebuf, id); 1437 if (unlikely(rc != 0)) 1438 PMD_DRV_LOG(WARNING, "failed adding rx desc\n"); 1439 1440 return rc; 1441 } 1442 1443 static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) 1444 { 1445 unsigned int i; 1446 int rc; 1447 uint16_t next_to_use = rxq->next_to_use; 1448 uint16_t in_use, req_id; 1449 struct rte_mbuf **mbufs = rxq->rx_refill_buffer; 1450 1451 if (unlikely(!count)) 1452 return 0; 1453 1454 in_use = rxq->ring_size - 1 - 1455 ena_com_free_q_entries(rxq->ena_com_io_sq); 1456 ena_assert_msg(((in_use + count) < rxq->ring_size), 1457 "bad ring state\n"); 1458 1459 /* get resources for incoming packets */ 1460 rc = rte_mempool_get_bulk(rxq->mb_pool, (void **)mbufs, count); 1461 if (unlikely(rc < 0)) { 1462 rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf); 1463 ++rxq->rx_stats.mbuf_alloc_fail; 1464 PMD_RX_LOG(DEBUG, "there are no enough free buffers"); 1465 return 0; 1466 } 1467 1468 for (i = 0; i < count; i++) { 1469 struct rte_mbuf *mbuf = mbufs[i]; 1470 struct ena_rx_buffer *rx_info; 1471 1472 if (likely((i + 4) < count)) 1473 rte_prefetch0(mbufs[i + 4]); 1474 1475 req_id = rxq->empty_rx_reqs[next_to_use]; 1476 rx_info = &rxq->rx_buffer_info[req_id]; 1477 1478 rc = ena_add_single_rx_desc(rxq->ena_com_io_sq, mbuf, req_id); 1479 if (unlikely(rc != 0)) 1480 break; 1481 1482 rx_info->mbuf = mbuf; 1483 next_to_use = ENA_IDX_NEXT_MASKED(next_to_use, rxq->size_mask); 1484 } 1485 1486 if (unlikely(i < count)) { 1487 PMD_DRV_LOG(WARNING, "refilled rx qid %d with only %d " 1488 "buffers (from %d)\n", rxq->id, i, count); 1489 rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbufs[i]), 1490 count - i); 1491 ++rxq->rx_stats.refill_partial; 1492 } 1493 1494 /* When we submitted free recources to device... */ 1495 if (likely(i > 0)) { 1496 /* ...let HW know that it can fill buffers with data. */ 1497 ena_com_write_sq_doorbell(rxq->ena_com_io_sq); 1498 1499 rxq->next_to_use = next_to_use; 1500 } 1501 1502 return i; 1503 } 1504 1505 static int ena_device_init(struct ena_com_dev *ena_dev, 1506 struct ena_com_dev_get_features_ctx *get_feat_ctx, 1507 bool *wd_state) 1508 { 1509 uint32_t aenq_groups; 1510 int rc; 1511 bool readless_supported; 1512 1513 /* Initialize mmio registers */ 1514 rc = ena_com_mmio_reg_read_request_init(ena_dev); 1515 if (rc) { 1516 PMD_DRV_LOG(ERR, "failed to init mmio read less\n"); 1517 return rc; 1518 } 1519 1520 /* The PCIe configuration space revision id indicate if mmio reg 1521 * read is disabled. 1522 */ 1523 readless_supported = 1524 !(((struct rte_pci_device *)ena_dev->dmadev)->id.class_id 1525 & ENA_MMIO_DISABLE_REG_READ); 1526 ena_com_set_mmio_read_mode(ena_dev, readless_supported); 1527 1528 /* reset device */ 1529 rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL); 1530 if (rc) { 1531 PMD_DRV_LOG(ERR, "cannot reset device\n"); 1532 goto err_mmio_read_less; 1533 } 1534 1535 /* check FW version */ 1536 rc = ena_com_validate_version(ena_dev); 1537 if (rc) { 1538 PMD_DRV_LOG(ERR, "device version is too low\n"); 1539 goto err_mmio_read_less; 1540 } 1541 1542 ena_dev->dma_addr_bits = ena_com_get_dma_width(ena_dev); 1543 1544 /* ENA device administration layer init */ 1545 rc = ena_com_admin_init(ena_dev, &aenq_handlers); 1546 if (rc) { 1547 PMD_DRV_LOG(ERR, 1548 "cannot initialize ena admin queue with device\n"); 1549 goto err_mmio_read_less; 1550 } 1551 1552 /* To enable the msix interrupts the driver needs to know the number 1553 * of queues. So the driver uses polling mode to retrieve this 1554 * information. 1555 */ 1556 ena_com_set_admin_polling_mode(ena_dev, true); 1557 1558 ena_config_host_info(ena_dev); 1559 1560 /* Get Device Attributes and features */ 1561 rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); 1562 if (rc) { 1563 PMD_DRV_LOG(ERR, 1564 "cannot get attribute for ena device rc= %d\n", rc); 1565 goto err_admin_init; 1566 } 1567 1568 aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | 1569 BIT(ENA_ADMIN_NOTIFICATION) | 1570 BIT(ENA_ADMIN_KEEP_ALIVE) | 1571 BIT(ENA_ADMIN_FATAL_ERROR) | 1572 BIT(ENA_ADMIN_WARNING); 1573 1574 aenq_groups &= get_feat_ctx->aenq.supported_groups; 1575 rc = ena_com_set_aenq_config(ena_dev, aenq_groups); 1576 if (rc) { 1577 PMD_DRV_LOG(ERR, "Cannot configure aenq groups rc: %d\n", rc); 1578 goto err_admin_init; 1579 } 1580 1581 *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); 1582 1583 return 0; 1584 1585 err_admin_init: 1586 ena_com_admin_destroy(ena_dev); 1587 1588 err_mmio_read_less: 1589 ena_com_mmio_reg_read_request_destroy(ena_dev); 1590 1591 return rc; 1592 } 1593 1594 static void ena_interrupt_handler_rte(void *cb_arg) 1595 { 1596 struct ena_adapter *adapter = cb_arg; 1597 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1598 1599 ena_com_admin_q_comp_intr_handler(ena_dev); 1600 if (likely(adapter->state != ENA_ADAPTER_STATE_CLOSED)) 1601 ena_com_aenq_intr_handler(ena_dev, adapter); 1602 } 1603 1604 static void check_for_missing_keep_alive(struct ena_adapter *adapter) 1605 { 1606 if (!adapter->wd_state) 1607 return; 1608 1609 if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) 1610 return; 1611 1612 if (unlikely((rte_get_timer_cycles() - adapter->timestamp_wd) >= 1613 adapter->keep_alive_timeout)) { 1614 PMD_DRV_LOG(ERR, "Keep alive timeout\n"); 1615 adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO; 1616 adapter->trigger_reset = true; 1617 ++adapter->dev_stats.wd_expired; 1618 } 1619 } 1620 1621 /* Check if admin queue is enabled */ 1622 static void check_for_admin_com_state(struct ena_adapter *adapter) 1623 { 1624 if (unlikely(!ena_com_get_admin_running_state(&adapter->ena_dev))) { 1625 PMD_DRV_LOG(ERR, "ENA admin queue is not in running state!\n"); 1626 adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO; 1627 adapter->trigger_reset = true; 1628 } 1629 } 1630 1631 static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer, 1632 void *arg) 1633 { 1634 struct ena_adapter *adapter = arg; 1635 struct rte_eth_dev *dev = adapter->rte_dev; 1636 1637 check_for_missing_keep_alive(adapter); 1638 check_for_admin_com_state(adapter); 1639 1640 if (unlikely(adapter->trigger_reset)) { 1641 PMD_DRV_LOG(ERR, "Trigger reset is on\n"); 1642 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, 1643 NULL); 1644 } 1645 } 1646 1647 static inline void 1648 set_default_llq_configurations(struct ena_llq_configurations *llq_config, 1649 struct ena_admin_feature_llq_desc *llq, 1650 bool use_large_llq_hdr) 1651 { 1652 llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; 1653 llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; 1654 llq_config->llq_num_decs_before_header = 1655 ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; 1656 1657 if (use_large_llq_hdr && 1658 (llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B)) { 1659 llq_config->llq_ring_entry_size = 1660 ENA_ADMIN_LIST_ENTRY_SIZE_256B; 1661 llq_config->llq_ring_entry_size_value = 256; 1662 } else { 1663 llq_config->llq_ring_entry_size = 1664 ENA_ADMIN_LIST_ENTRY_SIZE_128B; 1665 llq_config->llq_ring_entry_size_value = 128; 1666 } 1667 } 1668 1669 static int 1670 ena_set_queues_placement_policy(struct ena_adapter *adapter, 1671 struct ena_com_dev *ena_dev, 1672 struct ena_admin_feature_llq_desc *llq, 1673 struct ena_llq_configurations *llq_default_configurations) 1674 { 1675 int rc; 1676 u32 llq_feature_mask; 1677 1678 llq_feature_mask = 1 << ENA_ADMIN_LLQ; 1679 if (!(ena_dev->supported_features & llq_feature_mask)) { 1680 PMD_DRV_LOG(INFO, 1681 "LLQ is not supported. Fallback to host mode policy.\n"); 1682 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 1683 return 0; 1684 } 1685 1686 rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); 1687 if (unlikely(rc)) { 1688 PMD_INIT_LOG(WARNING, "Failed to config dev mode. " 1689 "Fallback to host mode policy."); 1690 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 1691 return 0; 1692 } 1693 1694 /* Nothing to config, exit */ 1695 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 1696 return 0; 1697 1698 if (!adapter->dev_mem_base) { 1699 PMD_DRV_LOG(ERR, "Unable to access LLQ bar resource. " 1700 "Fallback to host mode policy.\n."); 1701 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 1702 return 0; 1703 } 1704 1705 ena_dev->mem_bar = adapter->dev_mem_base; 1706 1707 return 0; 1708 } 1709 1710 static uint32_t ena_calc_max_io_queue_num(struct ena_com_dev *ena_dev, 1711 struct ena_com_dev_get_features_ctx *get_feat_ctx) 1712 { 1713 uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues; 1714 1715 /* Regular queues capabilities */ 1716 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 1717 struct ena_admin_queue_ext_feature_fields *max_queue_ext = 1718 &get_feat_ctx->max_queue_ext.max_queue_ext; 1719 io_rx_num = RTE_MIN(max_queue_ext->max_rx_sq_num, 1720 max_queue_ext->max_rx_cq_num); 1721 io_tx_sq_num = max_queue_ext->max_tx_sq_num; 1722 io_tx_cq_num = max_queue_ext->max_tx_cq_num; 1723 } else { 1724 struct ena_admin_queue_feature_desc *max_queues = 1725 &get_feat_ctx->max_queues; 1726 io_tx_sq_num = max_queues->max_sq_num; 1727 io_tx_cq_num = max_queues->max_cq_num; 1728 io_rx_num = RTE_MIN(io_tx_sq_num, io_tx_cq_num); 1729 } 1730 1731 /* In case of LLQ use the llq number in the get feature cmd */ 1732 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) 1733 io_tx_sq_num = get_feat_ctx->llq.max_llq_num; 1734 1735 max_num_io_queues = RTE_MIN(ENA_MAX_NUM_IO_QUEUES, io_rx_num); 1736 max_num_io_queues = RTE_MIN(max_num_io_queues, io_tx_sq_num); 1737 max_num_io_queues = RTE_MIN(max_num_io_queues, io_tx_cq_num); 1738 1739 if (unlikely(max_num_io_queues == 0)) { 1740 PMD_DRV_LOG(ERR, "Number of IO queues should not be 0\n"); 1741 return -EFAULT; 1742 } 1743 1744 return max_num_io_queues; 1745 } 1746 1747 static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) 1748 { 1749 struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 }; 1750 struct rte_pci_device *pci_dev; 1751 struct rte_intr_handle *intr_handle; 1752 struct ena_adapter *adapter = eth_dev->data->dev_private; 1753 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1754 struct ena_com_dev_get_features_ctx get_feat_ctx; 1755 struct ena_llq_configurations llq_config; 1756 const char *queue_type_str; 1757 uint32_t max_num_io_queues; 1758 int rc; 1759 static int adapters_found; 1760 bool disable_meta_caching; 1761 bool wd_state = false; 1762 1763 eth_dev->dev_ops = &ena_dev_ops; 1764 eth_dev->rx_pkt_burst = ð_ena_recv_pkts; 1765 eth_dev->tx_pkt_burst = ð_ena_xmit_pkts; 1766 eth_dev->tx_pkt_prepare = ð_ena_prep_pkts; 1767 1768 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1769 return 0; 1770 1771 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 1772 1773 memset(adapter, 0, sizeof(struct ena_adapter)); 1774 ena_dev = &adapter->ena_dev; 1775 1776 adapter->rte_eth_dev_data = eth_dev->data; 1777 adapter->rte_dev = eth_dev; 1778 1779 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1780 adapter->pdev = pci_dev; 1781 1782 PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d", 1783 pci_dev->addr.domain, 1784 pci_dev->addr.bus, 1785 pci_dev->addr.devid, 1786 pci_dev->addr.function); 1787 1788 intr_handle = &pci_dev->intr_handle; 1789 1790 adapter->regs = pci_dev->mem_resource[ENA_REGS_BAR].addr; 1791 adapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr; 1792 1793 if (!adapter->regs) { 1794 PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)", 1795 ENA_REGS_BAR); 1796 return -ENXIO; 1797 } 1798 1799 ena_dev->reg_bar = adapter->regs; 1800 ena_dev->dmadev = adapter->pdev; 1801 1802 adapter->id_number = adapters_found; 1803 1804 snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", 1805 adapter->id_number); 1806 1807 rc = ena_parse_devargs(adapter, pci_dev->device.devargs); 1808 if (rc != 0) { 1809 PMD_INIT_LOG(CRIT, "Failed to parse devargs\n"); 1810 goto err; 1811 } 1812 1813 /* device specific initialization routine */ 1814 rc = ena_device_init(ena_dev, &get_feat_ctx, &wd_state); 1815 if (rc) { 1816 PMD_INIT_LOG(CRIT, "Failed to init ENA device"); 1817 goto err; 1818 } 1819 adapter->wd_state = wd_state; 1820 1821 set_default_llq_configurations(&llq_config, &get_feat_ctx.llq, 1822 adapter->use_large_llq_hdr); 1823 rc = ena_set_queues_placement_policy(adapter, ena_dev, 1824 &get_feat_ctx.llq, &llq_config); 1825 if (unlikely(rc)) { 1826 PMD_INIT_LOG(CRIT, "Failed to set placement policy"); 1827 return rc; 1828 } 1829 1830 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 1831 queue_type_str = "Regular"; 1832 else 1833 queue_type_str = "Low latency"; 1834 PMD_DRV_LOG(INFO, "Placement policy: %s\n", queue_type_str); 1835 1836 calc_queue_ctx.ena_dev = ena_dev; 1837 calc_queue_ctx.get_feat_ctx = &get_feat_ctx; 1838 1839 max_num_io_queues = ena_calc_max_io_queue_num(ena_dev, &get_feat_ctx); 1840 rc = ena_calc_io_queue_size(&calc_queue_ctx, 1841 adapter->use_large_llq_hdr); 1842 if (unlikely((rc != 0) || (max_num_io_queues == 0))) { 1843 rc = -EFAULT; 1844 goto err_device_destroy; 1845 } 1846 1847 adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size; 1848 adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size; 1849 adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size; 1850 adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size; 1851 adapter->max_num_io_queues = max_num_io_queues; 1852 1853 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 1854 disable_meta_caching = 1855 !!(get_feat_ctx.llq.accel_mode.u.get.supported_flags & 1856 BIT(ENA_ADMIN_DISABLE_META_CACHING)); 1857 } else { 1858 disable_meta_caching = false; 1859 } 1860 1861 /* prepare ring structures */ 1862 ena_init_rings(adapter, disable_meta_caching); 1863 1864 ena_config_debug_area(adapter); 1865 1866 /* Set max MTU for this device */ 1867 adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu; 1868 1869 /* set device support for offloads */ 1870 adapter->offloads.tso4_supported = (get_feat_ctx.offload.tx & 1871 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) != 0; 1872 adapter->offloads.tx_csum_supported = (get_feat_ctx.offload.tx & 1873 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) != 0; 1874 adapter->offloads.rx_csum_supported = 1875 (get_feat_ctx.offload.rx_supported & 1876 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) != 0; 1877 1878 /* Copy MAC address and point DPDK to it */ 1879 eth_dev->data->mac_addrs = (struct rte_ether_addr *)adapter->mac_addr; 1880 rte_ether_addr_copy((struct rte_ether_addr *) 1881 get_feat_ctx.dev_attr.mac_addr, 1882 (struct rte_ether_addr *)adapter->mac_addr); 1883 1884 adapter->drv_stats = rte_zmalloc("adapter stats", 1885 sizeof(*adapter->drv_stats), 1886 RTE_CACHE_LINE_SIZE); 1887 if (!adapter->drv_stats) { 1888 PMD_DRV_LOG(ERR, "failed to alloc mem for adapter stats\n"); 1889 rc = -ENOMEM; 1890 goto err_delete_debug_area; 1891 } 1892 1893 rte_spinlock_init(&adapter->admin_lock); 1894 1895 rte_intr_callback_register(intr_handle, 1896 ena_interrupt_handler_rte, 1897 adapter); 1898 rte_intr_enable(intr_handle); 1899 ena_com_set_admin_polling_mode(ena_dev, false); 1900 ena_com_admin_aenq_enable(ena_dev); 1901 1902 if (adapters_found == 0) 1903 rte_timer_subsystem_init(); 1904 rte_timer_init(&adapter->timer_wd); 1905 1906 adapters_found++; 1907 adapter->state = ENA_ADAPTER_STATE_INIT; 1908 1909 return 0; 1910 1911 err_delete_debug_area: 1912 ena_com_delete_debug_area(ena_dev); 1913 1914 err_device_destroy: 1915 ena_com_delete_host_info(ena_dev); 1916 ena_com_admin_destroy(ena_dev); 1917 1918 err: 1919 return rc; 1920 } 1921 1922 static void ena_destroy_device(struct rte_eth_dev *eth_dev) 1923 { 1924 struct ena_adapter *adapter = eth_dev->data->dev_private; 1925 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1926 1927 if (adapter->state == ENA_ADAPTER_STATE_FREE) 1928 return; 1929 1930 ena_com_set_admin_running_state(ena_dev, false); 1931 1932 if (adapter->state != ENA_ADAPTER_STATE_CLOSED) 1933 ena_close(eth_dev); 1934 1935 ena_com_delete_debug_area(ena_dev); 1936 ena_com_delete_host_info(ena_dev); 1937 1938 ena_com_abort_admin_commands(ena_dev); 1939 ena_com_wait_for_abort_completion(ena_dev); 1940 ena_com_admin_destroy(ena_dev); 1941 ena_com_mmio_reg_read_request_destroy(ena_dev); 1942 1943 adapter->state = ENA_ADAPTER_STATE_FREE; 1944 } 1945 1946 static int eth_ena_dev_uninit(struct rte_eth_dev *eth_dev) 1947 { 1948 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1949 return 0; 1950 1951 ena_destroy_device(eth_dev); 1952 1953 return 0; 1954 } 1955 1956 static int ena_dev_configure(struct rte_eth_dev *dev) 1957 { 1958 struct ena_adapter *adapter = dev->data->dev_private; 1959 1960 adapter->state = ENA_ADAPTER_STATE_CONFIG; 1961 1962 adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads; 1963 adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads; 1964 return 0; 1965 } 1966 1967 static void ena_init_rings(struct ena_adapter *adapter, 1968 bool disable_meta_caching) 1969 { 1970 size_t i; 1971 1972 for (i = 0; i < adapter->max_num_io_queues; i++) { 1973 struct ena_ring *ring = &adapter->tx_ring[i]; 1974 1975 ring->configured = 0; 1976 ring->type = ENA_RING_TYPE_TX; 1977 ring->adapter = adapter; 1978 ring->id = i; 1979 ring->tx_mem_queue_type = adapter->ena_dev.tx_mem_queue_type; 1980 ring->tx_max_header_size = adapter->ena_dev.tx_max_header_size; 1981 ring->sgl_size = adapter->max_tx_sgl_size; 1982 ring->disable_meta_caching = disable_meta_caching; 1983 } 1984 1985 for (i = 0; i < adapter->max_num_io_queues; i++) { 1986 struct ena_ring *ring = &adapter->rx_ring[i]; 1987 1988 ring->configured = 0; 1989 ring->type = ENA_RING_TYPE_RX; 1990 ring->adapter = adapter; 1991 ring->id = i; 1992 ring->sgl_size = adapter->max_rx_sgl_size; 1993 } 1994 } 1995 1996 static int ena_infos_get(struct rte_eth_dev *dev, 1997 struct rte_eth_dev_info *dev_info) 1998 { 1999 struct ena_adapter *adapter; 2000 struct ena_com_dev *ena_dev; 2001 uint64_t rx_feat = 0, tx_feat = 0; 2002 2003 ena_assert_msg(dev->data != NULL, "Uninitialized device\n"); 2004 ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n"); 2005 adapter = dev->data->dev_private; 2006 2007 ena_dev = &adapter->ena_dev; 2008 ena_assert_msg(ena_dev != NULL, "Uninitialized device\n"); 2009 2010 dev_info->speed_capa = 2011 ETH_LINK_SPEED_1G | 2012 ETH_LINK_SPEED_2_5G | 2013 ETH_LINK_SPEED_5G | 2014 ETH_LINK_SPEED_10G | 2015 ETH_LINK_SPEED_25G | 2016 ETH_LINK_SPEED_40G | 2017 ETH_LINK_SPEED_50G | 2018 ETH_LINK_SPEED_100G; 2019 2020 /* Set Tx & Rx features available for device */ 2021 if (adapter->offloads.tso4_supported) 2022 tx_feat |= DEV_TX_OFFLOAD_TCP_TSO; 2023 2024 if (adapter->offloads.tx_csum_supported) 2025 tx_feat |= DEV_TX_OFFLOAD_IPV4_CKSUM | 2026 DEV_TX_OFFLOAD_UDP_CKSUM | 2027 DEV_TX_OFFLOAD_TCP_CKSUM; 2028 2029 if (adapter->offloads.rx_csum_supported) 2030 rx_feat |= DEV_RX_OFFLOAD_IPV4_CKSUM | 2031 DEV_RX_OFFLOAD_UDP_CKSUM | 2032 DEV_RX_OFFLOAD_TCP_CKSUM; 2033 2034 rx_feat |= DEV_RX_OFFLOAD_JUMBO_FRAME; 2035 2036 /* Inform framework about available features */ 2037 dev_info->rx_offload_capa = rx_feat; 2038 dev_info->rx_queue_offload_capa = rx_feat; 2039 dev_info->tx_offload_capa = tx_feat; 2040 dev_info->tx_queue_offload_capa = tx_feat; 2041 2042 dev_info->flow_type_rss_offloads = ETH_RSS_IP | ETH_RSS_TCP | 2043 ETH_RSS_UDP; 2044 2045 dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN; 2046 dev_info->max_rx_pktlen = adapter->max_mtu; 2047 dev_info->max_mac_addrs = 1; 2048 2049 dev_info->max_rx_queues = adapter->max_num_io_queues; 2050 dev_info->max_tx_queues = adapter->max_num_io_queues; 2051 dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE; 2052 2053 adapter->tx_supported_offloads = tx_feat; 2054 adapter->rx_supported_offloads = rx_feat; 2055 2056 dev_info->rx_desc_lim.nb_max = adapter->max_rx_ring_size; 2057 dev_info->rx_desc_lim.nb_min = ENA_MIN_RING_DESC; 2058 dev_info->rx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2059 adapter->max_rx_sgl_size); 2060 dev_info->rx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2061 adapter->max_rx_sgl_size); 2062 2063 dev_info->tx_desc_lim.nb_max = adapter->max_tx_ring_size; 2064 dev_info->tx_desc_lim.nb_min = ENA_MIN_RING_DESC; 2065 dev_info->tx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2066 adapter->max_tx_sgl_size); 2067 dev_info->tx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2068 adapter->max_tx_sgl_size); 2069 2070 return 0; 2071 } 2072 2073 static inline void ena_init_rx_mbuf(struct rte_mbuf *mbuf, uint16_t len) 2074 { 2075 mbuf->data_len = len; 2076 mbuf->data_off = RTE_PKTMBUF_HEADROOM; 2077 mbuf->refcnt = 1; 2078 mbuf->next = NULL; 2079 } 2080 2081 static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring, 2082 struct ena_com_rx_buf_info *ena_bufs, 2083 uint32_t descs, 2084 uint16_t *next_to_clean, 2085 uint8_t offset) 2086 { 2087 struct rte_mbuf *mbuf; 2088 struct rte_mbuf *mbuf_head; 2089 struct ena_rx_buffer *rx_info; 2090 int rc; 2091 uint16_t ntc, len, req_id, buf = 0; 2092 2093 if (unlikely(descs == 0)) 2094 return NULL; 2095 2096 ntc = *next_to_clean; 2097 2098 len = ena_bufs[buf].len; 2099 req_id = ena_bufs[buf].req_id; 2100 2101 rx_info = &rx_ring->rx_buffer_info[req_id]; 2102 2103 mbuf = rx_info->mbuf; 2104 RTE_ASSERT(mbuf != NULL); 2105 2106 ena_init_rx_mbuf(mbuf, len); 2107 2108 /* Fill the mbuf head with the data specific for 1st segment. */ 2109 mbuf_head = mbuf; 2110 mbuf_head->nb_segs = descs; 2111 mbuf_head->port = rx_ring->port_id; 2112 mbuf_head->pkt_len = len; 2113 mbuf_head->data_off += offset; 2114 2115 rx_info->mbuf = NULL; 2116 rx_ring->empty_rx_reqs[ntc] = req_id; 2117 ntc = ENA_IDX_NEXT_MASKED(ntc, rx_ring->size_mask); 2118 2119 while (--descs) { 2120 ++buf; 2121 len = ena_bufs[buf].len; 2122 req_id = ena_bufs[buf].req_id; 2123 2124 rx_info = &rx_ring->rx_buffer_info[req_id]; 2125 RTE_ASSERT(rx_info->mbuf != NULL); 2126 2127 if (unlikely(len == 0)) { 2128 /* 2129 * Some devices can pass descriptor with the length 0. 2130 * To avoid confusion, the PMD is simply putting the 2131 * descriptor back, as it was never used. We'll avoid 2132 * mbuf allocation that way. 2133 */ 2134 rc = ena_add_single_rx_desc(rx_ring->ena_com_io_sq, 2135 rx_info->mbuf, req_id); 2136 if (unlikely(rc != 0)) { 2137 /* Free the mbuf in case of an error. */ 2138 rte_mbuf_raw_free(rx_info->mbuf); 2139 } else { 2140 /* 2141 * If there was no error, just exit the loop as 2142 * 0 length descriptor is always the last one. 2143 */ 2144 break; 2145 } 2146 } else { 2147 /* Create an mbuf chain. */ 2148 mbuf->next = rx_info->mbuf; 2149 mbuf = mbuf->next; 2150 2151 ena_init_rx_mbuf(mbuf, len); 2152 mbuf_head->pkt_len += len; 2153 } 2154 2155 /* 2156 * Mark the descriptor as depleted and perform necessary 2157 * cleanup. 2158 * This code will execute in two cases: 2159 * 1. Descriptor len was greater than 0 - normal situation. 2160 * 2. Descriptor len was 0 and we failed to add the descriptor 2161 * to the device. In that situation, we should try to add 2162 * the mbuf again in the populate routine and mark the 2163 * descriptor as used up by the device. 2164 */ 2165 rx_info->mbuf = NULL; 2166 rx_ring->empty_rx_reqs[ntc] = req_id; 2167 ntc = ENA_IDX_NEXT_MASKED(ntc, rx_ring->size_mask); 2168 } 2169 2170 *next_to_clean = ntc; 2171 2172 return mbuf_head; 2173 } 2174 2175 static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 2176 uint16_t nb_pkts) 2177 { 2178 struct ena_ring *rx_ring = (struct ena_ring *)(rx_queue); 2179 unsigned int free_queue_entries; 2180 unsigned int refill_threshold; 2181 uint16_t next_to_clean = rx_ring->next_to_clean; 2182 uint16_t descs_in_use; 2183 struct rte_mbuf *mbuf; 2184 uint16_t completed; 2185 struct ena_com_rx_ctx ena_rx_ctx; 2186 int i, rc = 0; 2187 2188 /* Check adapter state */ 2189 if (unlikely(rx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { 2190 PMD_DRV_LOG(ALERT, 2191 "Trying to receive pkts while device is NOT running\n"); 2192 return 0; 2193 } 2194 2195 descs_in_use = rx_ring->ring_size - 2196 ena_com_free_q_entries(rx_ring->ena_com_io_sq) - 1; 2197 nb_pkts = RTE_MIN(descs_in_use, nb_pkts); 2198 2199 for (completed = 0; completed < nb_pkts; completed++) { 2200 ena_rx_ctx.max_bufs = rx_ring->sgl_size; 2201 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; 2202 ena_rx_ctx.descs = 0; 2203 ena_rx_ctx.pkt_offset = 0; 2204 /* receive packet context */ 2205 rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq, 2206 rx_ring->ena_com_io_sq, 2207 &ena_rx_ctx); 2208 if (unlikely(rc)) { 2209 PMD_DRV_LOG(ERR, "ena_com_rx_pkt error %d\n", rc); 2210 if (rc == ENA_COM_NO_SPACE) { 2211 ++rx_ring->rx_stats.bad_desc_num; 2212 rx_ring->adapter->reset_reason = 2213 ENA_REGS_RESET_TOO_MANY_RX_DESCS; 2214 } else { 2215 ++rx_ring->rx_stats.bad_req_id; 2216 rx_ring->adapter->reset_reason = 2217 ENA_REGS_RESET_INV_RX_REQ_ID; 2218 } 2219 rx_ring->adapter->trigger_reset = true; 2220 return 0; 2221 } 2222 2223 mbuf = ena_rx_mbuf(rx_ring, 2224 ena_rx_ctx.ena_bufs, 2225 ena_rx_ctx.descs, 2226 &next_to_clean, 2227 ena_rx_ctx.pkt_offset); 2228 if (unlikely(mbuf == NULL)) { 2229 for (i = 0; i < ena_rx_ctx.descs; ++i) { 2230 rx_ring->empty_rx_reqs[next_to_clean] = 2231 rx_ring->ena_bufs[i].req_id; 2232 next_to_clean = ENA_IDX_NEXT_MASKED( 2233 next_to_clean, rx_ring->size_mask); 2234 } 2235 break; 2236 } 2237 2238 /* fill mbuf attributes if any */ 2239 ena_rx_mbuf_prepare(mbuf, &ena_rx_ctx); 2240 2241 if (unlikely(mbuf->ol_flags & 2242 (PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD))) { 2243 rte_atomic64_inc(&rx_ring->adapter->drv_stats->ierrors); 2244 ++rx_ring->rx_stats.bad_csum; 2245 } 2246 2247 mbuf->hash.rss = ena_rx_ctx.hash; 2248 2249 rx_pkts[completed] = mbuf; 2250 rx_ring->rx_stats.bytes += mbuf->pkt_len; 2251 } 2252 2253 rx_ring->rx_stats.cnt += completed; 2254 rx_ring->next_to_clean = next_to_clean; 2255 2256 free_queue_entries = ena_com_free_q_entries(rx_ring->ena_com_io_sq); 2257 refill_threshold = 2258 RTE_MIN(rx_ring->ring_size / ENA_REFILL_THRESH_DIVIDER, 2259 (unsigned int)ENA_REFILL_THRESH_PACKET); 2260 2261 /* Burst refill to save doorbells, memory barriers, const interval */ 2262 if (free_queue_entries > refill_threshold) { 2263 ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq); 2264 ena_populate_rx_queue(rx_ring, free_queue_entries); 2265 } 2266 2267 return completed; 2268 } 2269 2270 static uint16_t 2271 eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 2272 uint16_t nb_pkts) 2273 { 2274 int32_t ret; 2275 uint32_t i; 2276 struct rte_mbuf *m; 2277 struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); 2278 struct rte_ipv4_hdr *ip_hdr; 2279 uint64_t ol_flags; 2280 uint16_t frag_field; 2281 2282 for (i = 0; i != nb_pkts; i++) { 2283 m = tx_pkts[i]; 2284 ol_flags = m->ol_flags; 2285 2286 if (!(ol_flags & PKT_TX_IPV4)) 2287 continue; 2288 2289 /* If there was not L2 header length specified, assume it is 2290 * length of the ethernet header. 2291 */ 2292 if (unlikely(m->l2_len == 0)) 2293 m->l2_len = sizeof(struct rte_ether_hdr); 2294 2295 ip_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, 2296 m->l2_len); 2297 frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset); 2298 2299 if ((frag_field & RTE_IPV4_HDR_DF_FLAG) != 0) { 2300 m->packet_type |= RTE_PTYPE_L4_NONFRAG; 2301 2302 /* If IPv4 header has DF flag enabled and TSO support is 2303 * disabled, partial chcecksum should not be calculated. 2304 */ 2305 if (!tx_ring->adapter->offloads.tso4_supported) 2306 continue; 2307 } 2308 2309 if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) != 0 || 2310 (ol_flags & PKT_TX_L4_MASK) == 2311 PKT_TX_SCTP_CKSUM) { 2312 rte_errno = ENOTSUP; 2313 return i; 2314 } 2315 2316 #ifdef RTE_LIBRTE_ETHDEV_DEBUG 2317 ret = rte_validate_tx_offload(m); 2318 if (ret != 0) { 2319 rte_errno = -ret; 2320 return i; 2321 } 2322 #endif 2323 2324 /* In case we are supposed to TSO and have DF not set (DF=0) 2325 * hardware must be provided with partial checksum, otherwise 2326 * it will take care of necessary calculations. 2327 */ 2328 2329 ret = rte_net_intel_cksum_flags_prepare(m, 2330 ol_flags & ~PKT_TX_TCP_SEG); 2331 if (ret != 0) { 2332 rte_errno = -ret; 2333 return i; 2334 } 2335 } 2336 2337 return i; 2338 } 2339 2340 static void ena_update_hints(struct ena_adapter *adapter, 2341 struct ena_admin_ena_hw_hints *hints) 2342 { 2343 if (hints->admin_completion_tx_timeout) 2344 adapter->ena_dev.admin_queue.completion_timeout = 2345 hints->admin_completion_tx_timeout * 1000; 2346 2347 if (hints->mmio_read_timeout) 2348 /* convert to usec */ 2349 adapter->ena_dev.mmio_read.reg_read_to = 2350 hints->mmio_read_timeout * 1000; 2351 2352 if (hints->driver_watchdog_timeout) { 2353 if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT) 2354 adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT; 2355 else 2356 // Convert msecs to ticks 2357 adapter->keep_alive_timeout = 2358 (hints->driver_watchdog_timeout * 2359 rte_get_timer_hz()) / 1000; 2360 } 2361 } 2362 2363 static int ena_check_space_and_linearize_mbuf(struct ena_ring *tx_ring, 2364 struct rte_mbuf *mbuf) 2365 { 2366 struct ena_com_dev *ena_dev; 2367 int num_segments, header_len, rc; 2368 2369 ena_dev = &tx_ring->adapter->ena_dev; 2370 num_segments = mbuf->nb_segs; 2371 header_len = mbuf->data_len; 2372 2373 if (likely(num_segments < tx_ring->sgl_size)) 2374 goto checkspace; 2375 2376 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && 2377 (num_segments == tx_ring->sgl_size) && 2378 (header_len < tx_ring->tx_max_header_size)) 2379 goto checkspace; 2380 2381 /* Checking for space for 2 additional metadata descriptors due to 2382 * possible header split and metadata descriptor. Linearization will 2383 * be needed so we reduce the segments number from num_segments to 1 2384 */ 2385 if (!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 3)) { 2386 PMD_DRV_LOG(DEBUG, "Not enough space in the tx queue\n"); 2387 return ENA_COM_NO_MEM; 2388 } 2389 ++tx_ring->tx_stats.linearize; 2390 rc = rte_pktmbuf_linearize(mbuf); 2391 if (unlikely(rc)) { 2392 PMD_DRV_LOG(WARNING, "Mbuf linearize failed\n"); 2393 rte_atomic64_inc(&tx_ring->adapter->drv_stats->ierrors); 2394 ++tx_ring->tx_stats.linearize_failed; 2395 return rc; 2396 } 2397 2398 return 0; 2399 2400 checkspace: 2401 /* Checking for space for 2 additional metadata descriptors due to 2402 * possible header split and metadata descriptor 2403 */ 2404 if (!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 2405 num_segments + 2)) { 2406 PMD_DRV_LOG(DEBUG, "Not enough space in the tx queue\n"); 2407 return ENA_COM_NO_MEM; 2408 } 2409 2410 return 0; 2411 } 2412 2413 static void ena_tx_map_mbuf(struct ena_ring *tx_ring, 2414 struct ena_tx_buffer *tx_info, 2415 struct rte_mbuf *mbuf, 2416 void **push_header, 2417 uint16_t *header_len) 2418 { 2419 struct ena_com_buf *ena_buf; 2420 uint16_t delta, seg_len, push_len; 2421 2422 delta = 0; 2423 seg_len = mbuf->data_len; 2424 2425 tx_info->mbuf = mbuf; 2426 ena_buf = tx_info->bufs; 2427 2428 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 2429 /* 2430 * Tx header might be (and will be in most cases) smaller than 2431 * tx_max_header_size. But it's not an issue to send more data 2432 * to the device, than actually needed if the mbuf size is 2433 * greater than tx_max_header_size. 2434 */ 2435 push_len = RTE_MIN(mbuf->pkt_len, tx_ring->tx_max_header_size); 2436 *header_len = push_len; 2437 2438 if (likely(push_len <= seg_len)) { 2439 /* If the push header is in the single segment, then 2440 * just point it to the 1st mbuf data. 2441 */ 2442 *push_header = rte_pktmbuf_mtod(mbuf, uint8_t *); 2443 } else { 2444 /* If the push header lays in the several segments, copy 2445 * it to the intermediate buffer. 2446 */ 2447 rte_pktmbuf_read(mbuf, 0, push_len, 2448 tx_ring->push_buf_intermediate_buf); 2449 *push_header = tx_ring->push_buf_intermediate_buf; 2450 delta = push_len - seg_len; 2451 } 2452 } else { 2453 *push_header = NULL; 2454 *header_len = 0; 2455 push_len = 0; 2456 } 2457 2458 /* Process first segment taking into consideration pushed header */ 2459 if (seg_len > push_len) { 2460 ena_buf->paddr = mbuf->buf_iova + 2461 mbuf->data_off + 2462 push_len; 2463 ena_buf->len = seg_len - push_len; 2464 ena_buf++; 2465 tx_info->num_of_bufs++; 2466 } 2467 2468 while ((mbuf = mbuf->next) != NULL) { 2469 seg_len = mbuf->data_len; 2470 2471 /* Skip mbufs if whole data is pushed as a header */ 2472 if (unlikely(delta > seg_len)) { 2473 delta -= seg_len; 2474 continue; 2475 } 2476 2477 ena_buf->paddr = mbuf->buf_iova + mbuf->data_off + delta; 2478 ena_buf->len = seg_len - delta; 2479 ena_buf++; 2480 tx_info->num_of_bufs++; 2481 2482 delta = 0; 2483 } 2484 } 2485 2486 static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf) 2487 { 2488 struct ena_tx_buffer *tx_info; 2489 struct ena_com_tx_ctx ena_tx_ctx = { { 0 } }; 2490 uint16_t next_to_use; 2491 uint16_t header_len; 2492 uint16_t req_id; 2493 void *push_header; 2494 int nb_hw_desc; 2495 int rc; 2496 2497 rc = ena_check_space_and_linearize_mbuf(tx_ring, mbuf); 2498 if (unlikely(rc)) 2499 return rc; 2500 2501 next_to_use = tx_ring->next_to_use; 2502 2503 req_id = tx_ring->empty_tx_reqs[next_to_use]; 2504 tx_info = &tx_ring->tx_buffer_info[req_id]; 2505 tx_info->num_of_bufs = 0; 2506 2507 ena_tx_map_mbuf(tx_ring, tx_info, mbuf, &push_header, &header_len); 2508 2509 ena_tx_ctx.ena_bufs = tx_info->bufs; 2510 ena_tx_ctx.push_header = push_header; 2511 ena_tx_ctx.num_bufs = tx_info->num_of_bufs; 2512 ena_tx_ctx.req_id = req_id; 2513 ena_tx_ctx.header_len = header_len; 2514 2515 /* Set Tx offloads flags, if applicable */ 2516 ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx, tx_ring->offloads, 2517 tx_ring->disable_meta_caching); 2518 2519 if (unlikely(ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq, 2520 &ena_tx_ctx))) { 2521 PMD_DRV_LOG(DEBUG, 2522 "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n", 2523 tx_ring->id); 2524 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); 2525 tx_ring->tx_stats.doorbells++; 2526 tx_ring->pkts_without_db = false; 2527 } 2528 2529 /* prepare the packet's descriptors to dma engine */ 2530 rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx, 2531 &nb_hw_desc); 2532 if (unlikely(rc)) { 2533 ++tx_ring->tx_stats.prepare_ctx_err; 2534 return rc; 2535 } 2536 2537 tx_info->tx_descs = nb_hw_desc; 2538 2539 tx_ring->tx_stats.cnt++; 2540 tx_ring->tx_stats.bytes += mbuf->pkt_len; 2541 2542 tx_ring->next_to_use = ENA_IDX_NEXT_MASKED(next_to_use, 2543 tx_ring->size_mask); 2544 2545 return 0; 2546 } 2547 2548 static void ena_tx_cleanup(struct ena_ring *tx_ring) 2549 { 2550 unsigned int cleanup_budget; 2551 unsigned int total_tx_descs = 0; 2552 uint16_t next_to_clean = tx_ring->next_to_clean; 2553 2554 cleanup_budget = RTE_MIN(tx_ring->ring_size / ENA_REFILL_THRESH_DIVIDER, 2555 (unsigned int)ENA_REFILL_THRESH_PACKET); 2556 2557 while (likely(total_tx_descs < cleanup_budget)) { 2558 struct rte_mbuf *mbuf; 2559 struct ena_tx_buffer *tx_info; 2560 uint16_t req_id; 2561 2562 if (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) != 0) 2563 break; 2564 2565 if (unlikely(validate_tx_req_id(tx_ring, req_id) != 0)) 2566 break; 2567 2568 /* Get Tx info & store how many descs were processed */ 2569 tx_info = &tx_ring->tx_buffer_info[req_id]; 2570 2571 mbuf = tx_info->mbuf; 2572 rte_pktmbuf_free(mbuf); 2573 2574 tx_info->mbuf = NULL; 2575 tx_ring->empty_tx_reqs[next_to_clean] = req_id; 2576 2577 total_tx_descs += tx_info->tx_descs; 2578 2579 /* Put back descriptor to the ring for reuse */ 2580 next_to_clean = ENA_IDX_NEXT_MASKED(next_to_clean, 2581 tx_ring->size_mask); 2582 } 2583 2584 if (likely(total_tx_descs > 0)) { 2585 /* acknowledge completion of sent packets */ 2586 tx_ring->next_to_clean = next_to_clean; 2587 ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs); 2588 ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq); 2589 } 2590 } 2591 2592 static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 2593 uint16_t nb_pkts) 2594 { 2595 struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); 2596 uint16_t sent_idx = 0; 2597 2598 /* Check adapter state */ 2599 if (unlikely(tx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { 2600 PMD_DRV_LOG(ALERT, 2601 "Trying to xmit pkts while device is NOT running\n"); 2602 return 0; 2603 } 2604 2605 for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) { 2606 if (ena_xmit_mbuf(tx_ring, tx_pkts[sent_idx])) 2607 break; 2608 tx_ring->pkts_without_db = true; 2609 rte_prefetch0(tx_pkts[ENA_IDX_ADD_MASKED(sent_idx, 4, 2610 tx_ring->size_mask)]); 2611 } 2612 2613 tx_ring->tx_stats.available_desc = 2614 ena_com_free_q_entries(tx_ring->ena_com_io_sq); 2615 2616 /* If there are ready packets to be xmitted... */ 2617 if (likely(tx_ring->pkts_without_db)) { 2618 /* ...let HW do its best :-) */ 2619 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); 2620 tx_ring->tx_stats.doorbells++; 2621 tx_ring->pkts_without_db = false; 2622 } 2623 2624 ena_tx_cleanup(tx_ring); 2625 2626 tx_ring->tx_stats.available_desc = 2627 ena_com_free_q_entries(tx_ring->ena_com_io_sq); 2628 tx_ring->tx_stats.tx_poll++; 2629 2630 return sent_idx; 2631 } 2632 2633 int ena_copy_eni_stats(struct ena_adapter *adapter) 2634 { 2635 struct ena_admin_eni_stats admin_eni_stats; 2636 int rc; 2637 2638 rte_spinlock_lock(&adapter->admin_lock); 2639 rc = ena_com_get_eni_stats(&adapter->ena_dev, &admin_eni_stats); 2640 rte_spinlock_unlock(&adapter->admin_lock); 2641 if (rc != 0) { 2642 if (rc == ENA_COM_UNSUPPORTED) { 2643 PMD_DRV_LOG(DEBUG, 2644 "Retrieving ENI metrics is not supported.\n"); 2645 } else { 2646 PMD_DRV_LOG(WARNING, 2647 "Failed to get ENI metrics: %d\n", rc); 2648 } 2649 return rc; 2650 } 2651 2652 rte_memcpy(&adapter->eni_stats, &admin_eni_stats, 2653 sizeof(struct ena_stats_eni)); 2654 2655 return 0; 2656 } 2657 2658 /** 2659 * DPDK callback to retrieve names of extended device statistics 2660 * 2661 * @param dev 2662 * Pointer to Ethernet device structure. 2663 * @param[out] xstats_names 2664 * Buffer to insert names into. 2665 * @param n 2666 * Number of names. 2667 * 2668 * @return 2669 * Number of xstats names. 2670 */ 2671 static int ena_xstats_get_names(struct rte_eth_dev *dev, 2672 struct rte_eth_xstat_name *xstats_names, 2673 unsigned int n) 2674 { 2675 unsigned int xstats_count = ena_xstats_calc_num(dev); 2676 unsigned int stat, i, count = 0; 2677 2678 if (n < xstats_count || !xstats_names) 2679 return xstats_count; 2680 2681 for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++) 2682 strcpy(xstats_names[count].name, 2683 ena_stats_global_strings[stat].name); 2684 2685 for (stat = 0; stat < ENA_STATS_ARRAY_ENI; stat++, count++) 2686 strcpy(xstats_names[count].name, 2687 ena_stats_eni_strings[stat].name); 2688 2689 for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++) 2690 for (i = 0; i < dev->data->nb_rx_queues; i++, count++) 2691 snprintf(xstats_names[count].name, 2692 sizeof(xstats_names[count].name), 2693 "rx_q%d_%s", i, 2694 ena_stats_rx_strings[stat].name); 2695 2696 for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++) 2697 for (i = 0; i < dev->data->nb_tx_queues; i++, count++) 2698 snprintf(xstats_names[count].name, 2699 sizeof(xstats_names[count].name), 2700 "tx_q%d_%s", i, 2701 ena_stats_tx_strings[stat].name); 2702 2703 return xstats_count; 2704 } 2705 2706 /** 2707 * DPDK callback to get extended device statistics. 2708 * 2709 * @param dev 2710 * Pointer to Ethernet device structure. 2711 * @param[out] stats 2712 * Stats table output buffer. 2713 * @param n 2714 * The size of the stats table. 2715 * 2716 * @return 2717 * Number of xstats on success, negative on failure. 2718 */ 2719 static int ena_xstats_get(struct rte_eth_dev *dev, 2720 struct rte_eth_xstat *xstats, 2721 unsigned int n) 2722 { 2723 struct ena_adapter *adapter = dev->data->dev_private; 2724 unsigned int xstats_count = ena_xstats_calc_num(dev); 2725 unsigned int stat, i, count = 0; 2726 int stat_offset; 2727 void *stats_begin; 2728 2729 if (n < xstats_count) 2730 return xstats_count; 2731 2732 if (!xstats) 2733 return 0; 2734 2735 for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++) { 2736 stat_offset = ena_stats_global_strings[stat].stat_offset; 2737 stats_begin = &adapter->dev_stats; 2738 2739 xstats[count].id = count; 2740 xstats[count].value = *((uint64_t *) 2741 ((char *)stats_begin + stat_offset)); 2742 } 2743 2744 /* Even if the function below fails, we should copy previous (or initial 2745 * values) to keep structure of rte_eth_xstat consistent. 2746 */ 2747 ena_copy_eni_stats(adapter); 2748 for (stat = 0; stat < ENA_STATS_ARRAY_ENI; stat++, count++) { 2749 stat_offset = ena_stats_eni_strings[stat].stat_offset; 2750 stats_begin = &adapter->eni_stats; 2751 2752 xstats[count].id = count; 2753 xstats[count].value = *((uint64_t *) 2754 ((char *)stats_begin + stat_offset)); 2755 } 2756 2757 for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++) { 2758 for (i = 0; i < dev->data->nb_rx_queues; i++, count++) { 2759 stat_offset = ena_stats_rx_strings[stat].stat_offset; 2760 stats_begin = &adapter->rx_ring[i].rx_stats; 2761 2762 xstats[count].id = count; 2763 xstats[count].value = *((uint64_t *) 2764 ((char *)stats_begin + stat_offset)); 2765 } 2766 } 2767 2768 for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++) { 2769 for (i = 0; i < dev->data->nb_tx_queues; i++, count++) { 2770 stat_offset = ena_stats_tx_strings[stat].stat_offset; 2771 stats_begin = &adapter->tx_ring[i].rx_stats; 2772 2773 xstats[count].id = count; 2774 xstats[count].value = *((uint64_t *) 2775 ((char *)stats_begin + stat_offset)); 2776 } 2777 } 2778 2779 return count; 2780 } 2781 2782 static int ena_xstats_get_by_id(struct rte_eth_dev *dev, 2783 const uint64_t *ids, 2784 uint64_t *values, 2785 unsigned int n) 2786 { 2787 struct ena_adapter *adapter = dev->data->dev_private; 2788 uint64_t id; 2789 uint64_t rx_entries, tx_entries; 2790 unsigned int i; 2791 int qid; 2792 int valid = 0; 2793 bool was_eni_copied = false; 2794 2795 for (i = 0; i < n; ++i) { 2796 id = ids[i]; 2797 /* Check if id belongs to global statistics */ 2798 if (id < ENA_STATS_ARRAY_GLOBAL) { 2799 values[i] = *((uint64_t *)&adapter->dev_stats + id); 2800 ++valid; 2801 continue; 2802 } 2803 2804 /* Check if id belongs to ENI statistics */ 2805 id -= ENA_STATS_ARRAY_GLOBAL; 2806 if (id < ENA_STATS_ARRAY_ENI) { 2807 /* Avoid reading ENI stats multiple times in a single 2808 * function call, as it requires communication with the 2809 * admin queue. 2810 */ 2811 if (!was_eni_copied) { 2812 was_eni_copied = true; 2813 ena_copy_eni_stats(adapter); 2814 } 2815 values[i] = *((uint64_t *)&adapter->eni_stats + id); 2816 ++valid; 2817 continue; 2818 } 2819 2820 /* Check if id belongs to rx queue statistics */ 2821 id -= ENA_STATS_ARRAY_ENI; 2822 rx_entries = ENA_STATS_ARRAY_RX * dev->data->nb_rx_queues; 2823 if (id < rx_entries) { 2824 qid = id % dev->data->nb_rx_queues; 2825 id /= dev->data->nb_rx_queues; 2826 values[i] = *((uint64_t *) 2827 &adapter->rx_ring[qid].rx_stats + id); 2828 ++valid; 2829 continue; 2830 } 2831 /* Check if id belongs to rx queue statistics */ 2832 id -= rx_entries; 2833 tx_entries = ENA_STATS_ARRAY_TX * dev->data->nb_tx_queues; 2834 if (id < tx_entries) { 2835 qid = id % dev->data->nb_tx_queues; 2836 id /= dev->data->nb_tx_queues; 2837 values[i] = *((uint64_t *) 2838 &adapter->tx_ring[qid].tx_stats + id); 2839 ++valid; 2840 continue; 2841 } 2842 } 2843 2844 return valid; 2845 } 2846 2847 static int ena_process_bool_devarg(const char *key, 2848 const char *value, 2849 void *opaque) 2850 { 2851 struct ena_adapter *adapter = opaque; 2852 bool bool_value; 2853 2854 /* Parse the value. */ 2855 if (strcmp(value, "1") == 0) { 2856 bool_value = true; 2857 } else if (strcmp(value, "0") == 0) { 2858 bool_value = false; 2859 } else { 2860 PMD_INIT_LOG(ERR, 2861 "Invalid value: '%s' for key '%s'. Accepted: '0' or '1'\n", 2862 value, key); 2863 return -EINVAL; 2864 } 2865 2866 /* Now, assign it to the proper adapter field. */ 2867 if (strcmp(key, ENA_DEVARG_LARGE_LLQ_HDR)) 2868 adapter->use_large_llq_hdr = bool_value; 2869 2870 return 0; 2871 } 2872 2873 static int ena_parse_devargs(struct ena_adapter *adapter, 2874 struct rte_devargs *devargs) 2875 { 2876 static const char * const allowed_args[] = { 2877 ENA_DEVARG_LARGE_LLQ_HDR, 2878 }; 2879 struct rte_kvargs *kvlist; 2880 int rc; 2881 2882 if (devargs == NULL) 2883 return 0; 2884 2885 kvlist = rte_kvargs_parse(devargs->args, allowed_args); 2886 if (kvlist == NULL) { 2887 PMD_INIT_LOG(ERR, "Invalid device arguments: %s\n", 2888 devargs->args); 2889 return -EINVAL; 2890 } 2891 2892 rc = rte_kvargs_process(kvlist, ENA_DEVARG_LARGE_LLQ_HDR, 2893 ena_process_bool_devarg, adapter); 2894 2895 rte_kvargs_free(kvlist); 2896 2897 return rc; 2898 } 2899 2900 /********************************************************************* 2901 * PMD configuration 2902 *********************************************************************/ 2903 static int eth_ena_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2904 struct rte_pci_device *pci_dev) 2905 { 2906 return rte_eth_dev_pci_generic_probe(pci_dev, 2907 sizeof(struct ena_adapter), eth_ena_dev_init); 2908 } 2909 2910 static int eth_ena_pci_remove(struct rte_pci_device *pci_dev) 2911 { 2912 return rte_eth_dev_pci_generic_remove(pci_dev, eth_ena_dev_uninit); 2913 } 2914 2915 static struct rte_pci_driver rte_ena_pmd = { 2916 .id_table = pci_id_ena_map, 2917 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | 2918 RTE_PCI_DRV_WC_ACTIVATE, 2919 .probe = eth_ena_pci_probe, 2920 .remove = eth_ena_pci_remove, 2921 }; 2922 2923 RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd); 2924 RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map); 2925 RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci"); 2926 RTE_PMD_REGISTER_PARAM_STRING(net_ena, ENA_DEVARG_LARGE_LLQ_HDR "=<0|1>"); 2927 RTE_LOG_REGISTER(ena_logtype_init, pmd.net.ena.init, NOTICE); 2928 RTE_LOG_REGISTER(ena_logtype_driver, pmd.net.ena.driver, NOTICE); 2929 #ifdef RTE_LIBRTE_ENA_DEBUG_RX 2930 RTE_LOG_REGISTER(ena_logtype_rx, pmd.net.ena.rx, NOTICE); 2931 #endif 2932 #ifdef RTE_LIBRTE_ENA_DEBUG_TX 2933 RTE_LOG_REGISTER(ena_logtype_tx, pmd.net.ena.tx, NOTICE); 2934 #endif 2935 #ifdef RTE_LIBRTE_ENA_DEBUG_TX_FREE 2936 RTE_LOG_REGISTER(ena_logtype_tx_free, pmd.net.ena.tx_free, NOTICE); 2937 #endif 2938 #ifdef RTE_LIBRTE_ENA_COM_DEBUG 2939 RTE_LOG_REGISTER(ena_logtype_com, pmd.net.ena.com, NOTICE); 2940 #endif 2941 2942 /****************************************************************************** 2943 ******************************** AENQ Handlers ******************************* 2944 *****************************************************************************/ 2945 static void ena_update_on_link_change(void *adapter_data, 2946 struct ena_admin_aenq_entry *aenq_e) 2947 { 2948 struct rte_eth_dev *eth_dev; 2949 struct ena_adapter *adapter; 2950 struct ena_admin_aenq_link_change_desc *aenq_link_desc; 2951 uint32_t status; 2952 2953 adapter = adapter_data; 2954 aenq_link_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e; 2955 eth_dev = adapter->rte_dev; 2956 2957 status = get_ena_admin_aenq_link_change_desc_link_status(aenq_link_desc); 2958 adapter->link_status = status; 2959 2960 ena_link_update(eth_dev, 0); 2961 rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL); 2962 } 2963 2964 static void ena_notification(void *data, 2965 struct ena_admin_aenq_entry *aenq_e) 2966 { 2967 struct ena_adapter *adapter = data; 2968 struct ena_admin_ena_hw_hints *hints; 2969 2970 if (aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION) 2971 PMD_DRV_LOG(WARNING, "Invalid group(%x) expected %x\n", 2972 aenq_e->aenq_common_desc.group, 2973 ENA_ADMIN_NOTIFICATION); 2974 2975 switch (aenq_e->aenq_common_desc.syndrom) { 2976 case ENA_ADMIN_UPDATE_HINTS: 2977 hints = (struct ena_admin_ena_hw_hints *) 2978 (&aenq_e->inline_data_w4); 2979 ena_update_hints(adapter, hints); 2980 break; 2981 default: 2982 PMD_DRV_LOG(ERR, "Invalid aenq notification link state %d\n", 2983 aenq_e->aenq_common_desc.syndrom); 2984 } 2985 } 2986 2987 static void ena_keep_alive(void *adapter_data, 2988 __rte_unused struct ena_admin_aenq_entry *aenq_e) 2989 { 2990 struct ena_adapter *adapter = adapter_data; 2991 struct ena_admin_aenq_keep_alive_desc *desc; 2992 uint64_t rx_drops; 2993 uint64_t tx_drops; 2994 2995 adapter->timestamp_wd = rte_get_timer_cycles(); 2996 2997 desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e; 2998 rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low; 2999 tx_drops = ((uint64_t)desc->tx_drops_high << 32) | desc->tx_drops_low; 3000 3001 adapter->drv_stats->rx_drops = rx_drops; 3002 adapter->dev_stats.tx_drops = tx_drops; 3003 } 3004 3005 /** 3006 * This handler will called for unknown event group or unimplemented handlers 3007 **/ 3008 static void unimplemented_aenq_handler(__rte_unused void *data, 3009 __rte_unused struct ena_admin_aenq_entry *aenq_e) 3010 { 3011 PMD_DRV_LOG(ERR, "Unknown event was received or event with " 3012 "unimplemented handler\n"); 3013 } 3014 3015 static struct ena_aenq_handlers aenq_handlers = { 3016 .handlers = { 3017 [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change, 3018 [ENA_ADMIN_NOTIFICATION] = ena_notification, 3019 [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive 3020 }, 3021 .unimplemented_handler = unimplemented_aenq_handler 3022 }; 3023