1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates. 3 * All rights reserved. 4 */ 5 6 #include <rte_string_fns.h> 7 #include <rte_errno.h> 8 #include <rte_version.h> 9 #include <rte_net.h> 10 #include <rte_kvargs.h> 11 12 #include "ena_ethdev.h" 13 #include "ena_logs.h" 14 #include "ena_platform.h" 15 #include "ena_com.h" 16 #include "ena_eth_com.h" 17 18 #include <ena_common_defs.h> 19 #include <ena_regs_defs.h> 20 #include <ena_admin_defs.h> 21 #include <ena_eth_io_defs.h> 22 23 #define DRV_MODULE_VER_MAJOR 2 24 #define DRV_MODULE_VER_MINOR 5 25 #define DRV_MODULE_VER_SUBMINOR 0 26 27 #define __MERGE_64B_H_L(h, l) (((uint64_t)h << 32) | l) 28 29 #define GET_L4_HDR_LEN(mbuf) \ 30 ((rte_pktmbuf_mtod_offset(mbuf, struct rte_tcp_hdr *, \ 31 mbuf->l3_len + mbuf->l2_len)->data_off) >> 4) 32 33 #define ETH_GSTRING_LEN 32 34 35 #define ARRAY_SIZE(x) RTE_DIM(x) 36 37 #define ENA_MIN_RING_DESC 128 38 39 #define ENA_PTYPE_HAS_HASH (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP) 40 41 enum ethtool_stringset { 42 ETH_SS_TEST = 0, 43 ETH_SS_STATS, 44 }; 45 46 struct ena_stats { 47 char name[ETH_GSTRING_LEN]; 48 int stat_offset; 49 }; 50 51 #define ENA_STAT_ENTRY(stat, stat_type) { \ 52 .name = #stat, \ 53 .stat_offset = offsetof(struct ena_stats_##stat_type, stat) \ 54 } 55 56 #define ENA_STAT_RX_ENTRY(stat) \ 57 ENA_STAT_ENTRY(stat, rx) 58 59 #define ENA_STAT_TX_ENTRY(stat) \ 60 ENA_STAT_ENTRY(stat, tx) 61 62 #define ENA_STAT_ENI_ENTRY(stat) \ 63 ENA_STAT_ENTRY(stat, eni) 64 65 #define ENA_STAT_GLOBAL_ENTRY(stat) \ 66 ENA_STAT_ENTRY(stat, dev) 67 68 /* Device arguments */ 69 #define ENA_DEVARG_LARGE_LLQ_HDR "large_llq_hdr" 70 71 /* 72 * Each rte_memzone should have unique name. 73 * To satisfy it, count number of allocation and add it to name. 74 */ 75 rte_atomic64_t ena_alloc_cnt; 76 77 static const struct ena_stats ena_stats_global_strings[] = { 78 ENA_STAT_GLOBAL_ENTRY(wd_expired), 79 ENA_STAT_GLOBAL_ENTRY(dev_start), 80 ENA_STAT_GLOBAL_ENTRY(dev_stop), 81 ENA_STAT_GLOBAL_ENTRY(tx_drops), 82 }; 83 84 static const struct ena_stats ena_stats_eni_strings[] = { 85 ENA_STAT_ENI_ENTRY(bw_in_allowance_exceeded), 86 ENA_STAT_ENI_ENTRY(bw_out_allowance_exceeded), 87 ENA_STAT_ENI_ENTRY(pps_allowance_exceeded), 88 ENA_STAT_ENI_ENTRY(conntrack_allowance_exceeded), 89 ENA_STAT_ENI_ENTRY(linklocal_allowance_exceeded), 90 }; 91 92 static const struct ena_stats ena_stats_tx_strings[] = { 93 ENA_STAT_TX_ENTRY(cnt), 94 ENA_STAT_TX_ENTRY(bytes), 95 ENA_STAT_TX_ENTRY(prepare_ctx_err), 96 ENA_STAT_TX_ENTRY(linearize), 97 ENA_STAT_TX_ENTRY(linearize_failed), 98 ENA_STAT_TX_ENTRY(tx_poll), 99 ENA_STAT_TX_ENTRY(doorbells), 100 ENA_STAT_TX_ENTRY(bad_req_id), 101 ENA_STAT_TX_ENTRY(available_desc), 102 ENA_STAT_TX_ENTRY(missed_tx), 103 }; 104 105 static const struct ena_stats ena_stats_rx_strings[] = { 106 ENA_STAT_RX_ENTRY(cnt), 107 ENA_STAT_RX_ENTRY(bytes), 108 ENA_STAT_RX_ENTRY(refill_partial), 109 ENA_STAT_RX_ENTRY(bad_csum), 110 ENA_STAT_RX_ENTRY(mbuf_alloc_fail), 111 ENA_STAT_RX_ENTRY(bad_desc_num), 112 ENA_STAT_RX_ENTRY(bad_req_id), 113 }; 114 115 #define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings) 116 #define ENA_STATS_ARRAY_ENI ARRAY_SIZE(ena_stats_eni_strings) 117 #define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings) 118 #define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings) 119 120 #define QUEUE_OFFLOADS (DEV_TX_OFFLOAD_TCP_CKSUM |\ 121 DEV_TX_OFFLOAD_UDP_CKSUM |\ 122 DEV_TX_OFFLOAD_IPV4_CKSUM |\ 123 DEV_TX_OFFLOAD_TCP_TSO) 124 #define MBUF_OFFLOADS (PKT_TX_L4_MASK |\ 125 PKT_TX_IP_CKSUM |\ 126 PKT_TX_TCP_SEG) 127 128 /** Vendor ID used by Amazon devices */ 129 #define PCI_VENDOR_ID_AMAZON 0x1D0F 130 /** Amazon devices */ 131 #define PCI_DEVICE_ID_ENA_VF 0xEC20 132 #define PCI_DEVICE_ID_ENA_VF_RSERV0 0xEC21 133 134 #define ENA_TX_OFFLOAD_MASK (\ 135 PKT_TX_L4_MASK | \ 136 PKT_TX_IPV6 | \ 137 PKT_TX_IPV4 | \ 138 PKT_TX_IP_CKSUM | \ 139 PKT_TX_TCP_SEG) 140 141 #define ENA_TX_OFFLOAD_NOTSUP_MASK \ 142 (PKT_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK) 143 144 /** HW specific offloads capabilities. */ 145 /* IPv4 checksum offload. */ 146 #define ENA_L3_IPV4_CSUM 0x0001 147 /* TCP/UDP checksum offload for IPv4 packets. */ 148 #define ENA_L4_IPV4_CSUM 0x0002 149 /* TCP/UDP checksum offload for IPv4 packets with pseudo header checksum. */ 150 #define ENA_L4_IPV4_CSUM_PARTIAL 0x0004 151 /* TCP/UDP checksum offload for IPv6 packets. */ 152 #define ENA_L4_IPV6_CSUM 0x0008 153 /* TCP/UDP checksum offload for IPv6 packets with pseudo header checksum. */ 154 #define ENA_L4_IPV6_CSUM_PARTIAL 0x0010 155 /* TSO support for IPv4 packets. */ 156 #define ENA_IPV4_TSO 0x0020 157 158 /* Device supports setting RSS hash. */ 159 #define ENA_RX_RSS_HASH 0x0040 160 161 static const struct rte_pci_id pci_id_ena_map[] = { 162 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) }, 163 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF_RSERV0) }, 164 { .device_id = 0 }, 165 }; 166 167 static struct ena_aenq_handlers aenq_handlers; 168 169 static int ena_device_init(struct ena_com_dev *ena_dev, 170 struct rte_pci_device *pdev, 171 struct ena_com_dev_get_features_ctx *get_feat_ctx, 172 bool *wd_state); 173 static int ena_dev_configure(struct rte_eth_dev *dev); 174 static void ena_tx_map_mbuf(struct ena_ring *tx_ring, 175 struct ena_tx_buffer *tx_info, 176 struct rte_mbuf *mbuf, 177 void **push_header, 178 uint16_t *header_len); 179 static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf); 180 static void ena_tx_cleanup(struct ena_ring *tx_ring); 181 static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 182 uint16_t nb_pkts); 183 static uint16_t eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 184 uint16_t nb_pkts); 185 static int ena_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 186 uint16_t nb_desc, unsigned int socket_id, 187 const struct rte_eth_txconf *tx_conf); 188 static int ena_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 189 uint16_t nb_desc, unsigned int socket_id, 190 const struct rte_eth_rxconf *rx_conf, 191 struct rte_mempool *mp); 192 static inline void ena_init_rx_mbuf(struct rte_mbuf *mbuf, uint16_t len); 193 static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring, 194 struct ena_com_rx_buf_info *ena_bufs, 195 uint32_t descs, 196 uint16_t *next_to_clean, 197 uint8_t offset); 198 static uint16_t eth_ena_recv_pkts(void *rx_queue, 199 struct rte_mbuf **rx_pkts, uint16_t nb_pkts); 200 static int ena_add_single_rx_desc(struct ena_com_io_sq *io_sq, 201 struct rte_mbuf *mbuf, uint16_t id); 202 static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count); 203 static void ena_init_rings(struct ena_adapter *adapter, 204 bool disable_meta_caching); 205 static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 206 static int ena_start(struct rte_eth_dev *dev); 207 static int ena_stop(struct rte_eth_dev *dev); 208 static int ena_close(struct rte_eth_dev *dev); 209 static int ena_dev_reset(struct rte_eth_dev *dev); 210 static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); 211 static void ena_rx_queue_release_all(struct rte_eth_dev *dev); 212 static void ena_tx_queue_release_all(struct rte_eth_dev *dev); 213 static void ena_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid); 214 static void ena_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid); 215 static void ena_rx_queue_release_bufs(struct ena_ring *ring); 216 static void ena_tx_queue_release_bufs(struct ena_ring *ring); 217 static int ena_link_update(struct rte_eth_dev *dev, 218 int wait_to_complete); 219 static int ena_create_io_queue(struct rte_eth_dev *dev, struct ena_ring *ring); 220 static void ena_queue_stop(struct ena_ring *ring); 221 static void ena_queue_stop_all(struct rte_eth_dev *dev, 222 enum ena_ring_type ring_type); 223 static int ena_queue_start(struct rte_eth_dev *dev, struct ena_ring *ring); 224 static int ena_queue_start_all(struct rte_eth_dev *dev, 225 enum ena_ring_type ring_type); 226 static void ena_stats_restart(struct rte_eth_dev *dev); 227 static uint64_t ena_get_rx_port_offloads(struct ena_adapter *adapter); 228 static uint64_t ena_get_tx_port_offloads(struct ena_adapter *adapter); 229 static uint64_t ena_get_rx_queue_offloads(struct ena_adapter *adapter); 230 static uint64_t ena_get_tx_queue_offloads(struct ena_adapter *adapter); 231 static int ena_infos_get(struct rte_eth_dev *dev, 232 struct rte_eth_dev_info *dev_info); 233 static void ena_interrupt_handler_rte(void *cb_arg); 234 static void ena_timer_wd_callback(struct rte_timer *timer, void *arg); 235 static void ena_destroy_device(struct rte_eth_dev *eth_dev); 236 static int eth_ena_dev_init(struct rte_eth_dev *eth_dev); 237 static int ena_xstats_get_names(struct rte_eth_dev *dev, 238 struct rte_eth_xstat_name *xstats_names, 239 unsigned int n); 240 static int ena_xstats_get(struct rte_eth_dev *dev, 241 struct rte_eth_xstat *stats, 242 unsigned int n); 243 static int ena_xstats_get_by_id(struct rte_eth_dev *dev, 244 const uint64_t *ids, 245 uint64_t *values, 246 unsigned int n); 247 static int ena_process_bool_devarg(const char *key, 248 const char *value, 249 void *opaque); 250 static int ena_parse_devargs(struct ena_adapter *adapter, 251 struct rte_devargs *devargs); 252 static int ena_copy_eni_stats(struct ena_adapter *adapter); 253 static int ena_setup_rx_intr(struct rte_eth_dev *dev); 254 static int ena_rx_queue_intr_enable(struct rte_eth_dev *dev, 255 uint16_t queue_id); 256 static int ena_rx_queue_intr_disable(struct rte_eth_dev *dev, 257 uint16_t queue_id); 258 259 static const struct eth_dev_ops ena_dev_ops = { 260 .dev_configure = ena_dev_configure, 261 .dev_infos_get = ena_infos_get, 262 .rx_queue_setup = ena_rx_queue_setup, 263 .tx_queue_setup = ena_tx_queue_setup, 264 .dev_start = ena_start, 265 .dev_stop = ena_stop, 266 .link_update = ena_link_update, 267 .stats_get = ena_stats_get, 268 .xstats_get_names = ena_xstats_get_names, 269 .xstats_get = ena_xstats_get, 270 .xstats_get_by_id = ena_xstats_get_by_id, 271 .mtu_set = ena_mtu_set, 272 .rx_queue_release = ena_rx_queue_release, 273 .tx_queue_release = ena_tx_queue_release, 274 .dev_close = ena_close, 275 .dev_reset = ena_dev_reset, 276 .reta_update = ena_rss_reta_update, 277 .reta_query = ena_rss_reta_query, 278 .rx_queue_intr_enable = ena_rx_queue_intr_enable, 279 .rx_queue_intr_disable = ena_rx_queue_intr_disable, 280 .rss_hash_update = ena_rss_hash_update, 281 .rss_hash_conf_get = ena_rss_hash_conf_get, 282 }; 283 284 static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf, 285 struct ena_com_rx_ctx *ena_rx_ctx, 286 bool fill_hash) 287 { 288 uint64_t ol_flags = 0; 289 uint32_t packet_type = 0; 290 291 if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) 292 packet_type |= RTE_PTYPE_L4_TCP; 293 else if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP) 294 packet_type |= RTE_PTYPE_L4_UDP; 295 296 if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) { 297 packet_type |= RTE_PTYPE_L3_IPV4; 298 if (unlikely(ena_rx_ctx->l3_csum_err)) 299 ol_flags |= PKT_RX_IP_CKSUM_BAD; 300 else 301 ol_flags |= PKT_RX_IP_CKSUM_GOOD; 302 } else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6) { 303 packet_type |= RTE_PTYPE_L3_IPV6; 304 } 305 306 if (!ena_rx_ctx->l4_csum_checked || ena_rx_ctx->frag) 307 ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN; 308 else 309 if (unlikely(ena_rx_ctx->l4_csum_err)) 310 ol_flags |= PKT_RX_L4_CKSUM_BAD; 311 else 312 ol_flags |= PKT_RX_L4_CKSUM_GOOD; 313 314 if (fill_hash && 315 likely((packet_type & ENA_PTYPE_HAS_HASH) && !ena_rx_ctx->frag)) { 316 ol_flags |= PKT_RX_RSS_HASH; 317 mbuf->hash.rss = ena_rx_ctx->hash; 318 } 319 320 mbuf->ol_flags = ol_flags; 321 mbuf->packet_type = packet_type; 322 } 323 324 static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, 325 struct ena_com_tx_ctx *ena_tx_ctx, 326 uint64_t queue_offloads, 327 bool disable_meta_caching) 328 { 329 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; 330 331 if ((mbuf->ol_flags & MBUF_OFFLOADS) && 332 (queue_offloads & QUEUE_OFFLOADS)) { 333 /* check if TSO is required */ 334 if ((mbuf->ol_flags & PKT_TX_TCP_SEG) && 335 (queue_offloads & DEV_TX_OFFLOAD_TCP_TSO)) { 336 ena_tx_ctx->tso_enable = true; 337 338 ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf); 339 } 340 341 /* check if L3 checksum is needed */ 342 if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) && 343 (queue_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)) 344 ena_tx_ctx->l3_csum_enable = true; 345 346 if (mbuf->ol_flags & PKT_TX_IPV6) { 347 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6; 348 } else { 349 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4; 350 351 /* set don't fragment (DF) flag */ 352 if (mbuf->packet_type & 353 (RTE_PTYPE_L4_NONFRAG 354 | RTE_PTYPE_INNER_L4_NONFRAG)) 355 ena_tx_ctx->df = true; 356 } 357 358 /* check if L4 checksum is needed */ 359 if (((mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) && 360 (queue_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) { 361 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP; 362 ena_tx_ctx->l4_csum_enable = true; 363 } else if (((mbuf->ol_flags & PKT_TX_L4_MASK) == 364 PKT_TX_UDP_CKSUM) && 365 (queue_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) { 366 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP; 367 ena_tx_ctx->l4_csum_enable = true; 368 } else { 369 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN; 370 ena_tx_ctx->l4_csum_enable = false; 371 } 372 373 ena_meta->mss = mbuf->tso_segsz; 374 ena_meta->l3_hdr_len = mbuf->l3_len; 375 ena_meta->l3_hdr_offset = mbuf->l2_len; 376 377 ena_tx_ctx->meta_valid = true; 378 } else if (disable_meta_caching) { 379 memset(ena_meta, 0, sizeof(*ena_meta)); 380 ena_tx_ctx->meta_valid = true; 381 } else { 382 ena_tx_ctx->meta_valid = false; 383 } 384 } 385 386 static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) 387 { 388 struct ena_tx_buffer *tx_info = NULL; 389 390 if (likely(req_id < tx_ring->ring_size)) { 391 tx_info = &tx_ring->tx_buffer_info[req_id]; 392 if (likely(tx_info->mbuf)) 393 return 0; 394 } 395 396 if (tx_info) 397 PMD_TX_LOG(ERR, "tx_info doesn't have valid mbuf\n"); 398 else 399 PMD_TX_LOG(ERR, "Invalid req_id: %hu\n", req_id); 400 401 /* Trigger device reset */ 402 ++tx_ring->tx_stats.bad_req_id; 403 tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID; 404 tx_ring->adapter->trigger_reset = true; 405 return -EFAULT; 406 } 407 408 static void ena_config_host_info(struct ena_com_dev *ena_dev) 409 { 410 struct ena_admin_host_info *host_info; 411 int rc; 412 413 /* Allocate only the host info */ 414 rc = ena_com_allocate_host_info(ena_dev); 415 if (rc) { 416 PMD_DRV_LOG(ERR, "Cannot allocate host info\n"); 417 return; 418 } 419 420 host_info = ena_dev->host_attr.host_info; 421 422 host_info->os_type = ENA_ADMIN_OS_DPDK; 423 host_info->kernel_ver = RTE_VERSION; 424 strlcpy((char *)host_info->kernel_ver_str, rte_version(), 425 sizeof(host_info->kernel_ver_str)); 426 host_info->os_dist = RTE_VERSION; 427 strlcpy((char *)host_info->os_dist_str, rte_version(), 428 sizeof(host_info->os_dist_str)); 429 host_info->driver_version = 430 (DRV_MODULE_VER_MAJOR) | 431 (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | 432 (DRV_MODULE_VER_SUBMINOR << 433 ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); 434 host_info->num_cpus = rte_lcore_count(); 435 436 host_info->driver_supported_features = 437 ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK | 438 ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK; 439 440 rc = ena_com_set_host_attributes(ena_dev); 441 if (rc) { 442 if (rc == -ENA_COM_UNSUPPORTED) 443 PMD_DRV_LOG(WARNING, "Cannot set host attributes\n"); 444 else 445 PMD_DRV_LOG(ERR, "Cannot set host attributes\n"); 446 447 goto err; 448 } 449 450 return; 451 452 err: 453 ena_com_delete_host_info(ena_dev); 454 } 455 456 /* This function calculates the number of xstats based on the current config */ 457 static unsigned int ena_xstats_calc_num(struct rte_eth_dev_data *data) 458 { 459 return ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENI + 460 (data->nb_tx_queues * ENA_STATS_ARRAY_TX) + 461 (data->nb_rx_queues * ENA_STATS_ARRAY_RX); 462 } 463 464 static void ena_config_debug_area(struct ena_adapter *adapter) 465 { 466 u32 debug_area_size; 467 int rc, ss_count; 468 469 ss_count = ena_xstats_calc_num(adapter->edev_data); 470 471 /* allocate 32 bytes for each string and 64bit for the value */ 472 debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count; 473 474 rc = ena_com_allocate_debug_area(&adapter->ena_dev, debug_area_size); 475 if (rc) { 476 PMD_DRV_LOG(ERR, "Cannot allocate debug area\n"); 477 return; 478 } 479 480 rc = ena_com_set_host_attributes(&adapter->ena_dev); 481 if (rc) { 482 if (rc == -ENA_COM_UNSUPPORTED) 483 PMD_DRV_LOG(WARNING, "Cannot set host attributes\n"); 484 else 485 PMD_DRV_LOG(ERR, "Cannot set host attributes\n"); 486 487 goto err; 488 } 489 490 return; 491 err: 492 ena_com_delete_debug_area(&adapter->ena_dev); 493 } 494 495 static int ena_close(struct rte_eth_dev *dev) 496 { 497 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 498 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 499 struct ena_adapter *adapter = dev->data->dev_private; 500 int ret = 0; 501 502 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 503 return 0; 504 505 if (adapter->state == ENA_ADAPTER_STATE_RUNNING) 506 ret = ena_stop(dev); 507 adapter->state = ENA_ADAPTER_STATE_CLOSED; 508 509 ena_rx_queue_release_all(dev); 510 ena_tx_queue_release_all(dev); 511 512 rte_free(adapter->drv_stats); 513 adapter->drv_stats = NULL; 514 515 rte_intr_disable(intr_handle); 516 rte_intr_callback_unregister(intr_handle, 517 ena_interrupt_handler_rte, 518 dev); 519 520 /* 521 * MAC is not allocated dynamically. Setting NULL should prevent from 522 * release of the resource in the rte_eth_dev_release_port(). 523 */ 524 dev->data->mac_addrs = NULL; 525 526 return ret; 527 } 528 529 static int 530 ena_dev_reset(struct rte_eth_dev *dev) 531 { 532 int rc = 0; 533 534 /* Cannot release memory in secondary process */ 535 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 536 PMD_DRV_LOG(WARNING, "dev_reset not supported in secondary.\n"); 537 return -EPERM; 538 } 539 540 ena_destroy_device(dev); 541 rc = eth_ena_dev_init(dev); 542 if (rc) 543 PMD_INIT_LOG(CRIT, "Cannot initialize device\n"); 544 545 return rc; 546 } 547 548 static void ena_rx_queue_release_all(struct rte_eth_dev *dev) 549 { 550 int nb_queues = dev->data->nb_rx_queues; 551 int i; 552 553 for (i = 0; i < nb_queues; i++) 554 ena_rx_queue_release(dev, i); 555 } 556 557 static void ena_tx_queue_release_all(struct rte_eth_dev *dev) 558 { 559 int nb_queues = dev->data->nb_tx_queues; 560 int i; 561 562 for (i = 0; i < nb_queues; i++) 563 ena_tx_queue_release(dev, i); 564 } 565 566 static void ena_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 567 { 568 struct ena_ring *ring = dev->data->rx_queues[qid]; 569 570 /* Free ring resources */ 571 if (ring->rx_buffer_info) 572 rte_free(ring->rx_buffer_info); 573 ring->rx_buffer_info = NULL; 574 575 if (ring->rx_refill_buffer) 576 rte_free(ring->rx_refill_buffer); 577 ring->rx_refill_buffer = NULL; 578 579 if (ring->empty_rx_reqs) 580 rte_free(ring->empty_rx_reqs); 581 ring->empty_rx_reqs = NULL; 582 583 ring->configured = 0; 584 585 PMD_DRV_LOG(NOTICE, "Rx queue %d:%d released\n", 586 ring->port_id, ring->id); 587 } 588 589 static void ena_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 590 { 591 struct ena_ring *ring = dev->data->tx_queues[qid]; 592 593 /* Free ring resources */ 594 if (ring->push_buf_intermediate_buf) 595 rte_free(ring->push_buf_intermediate_buf); 596 597 if (ring->tx_buffer_info) 598 rte_free(ring->tx_buffer_info); 599 600 if (ring->empty_tx_reqs) 601 rte_free(ring->empty_tx_reqs); 602 603 ring->empty_tx_reqs = NULL; 604 ring->tx_buffer_info = NULL; 605 ring->push_buf_intermediate_buf = NULL; 606 607 ring->configured = 0; 608 609 PMD_DRV_LOG(NOTICE, "Tx queue %d:%d released\n", 610 ring->port_id, ring->id); 611 } 612 613 static void ena_rx_queue_release_bufs(struct ena_ring *ring) 614 { 615 unsigned int i; 616 617 for (i = 0; i < ring->ring_size; ++i) { 618 struct ena_rx_buffer *rx_info = &ring->rx_buffer_info[i]; 619 if (rx_info->mbuf) { 620 rte_mbuf_raw_free(rx_info->mbuf); 621 rx_info->mbuf = NULL; 622 } 623 } 624 } 625 626 static void ena_tx_queue_release_bufs(struct ena_ring *ring) 627 { 628 unsigned int i; 629 630 for (i = 0; i < ring->ring_size; ++i) { 631 struct ena_tx_buffer *tx_buf = &ring->tx_buffer_info[i]; 632 633 if (tx_buf->mbuf) { 634 rte_pktmbuf_free(tx_buf->mbuf); 635 tx_buf->mbuf = NULL; 636 } 637 } 638 } 639 640 static int ena_link_update(struct rte_eth_dev *dev, 641 __rte_unused int wait_to_complete) 642 { 643 struct rte_eth_link *link = &dev->data->dev_link; 644 struct ena_adapter *adapter = dev->data->dev_private; 645 646 link->link_status = adapter->link_status ? ETH_LINK_UP : ETH_LINK_DOWN; 647 link->link_speed = ETH_SPEED_NUM_NONE; 648 link->link_duplex = ETH_LINK_FULL_DUPLEX; 649 650 return 0; 651 } 652 653 static int ena_queue_start_all(struct rte_eth_dev *dev, 654 enum ena_ring_type ring_type) 655 { 656 struct ena_adapter *adapter = dev->data->dev_private; 657 struct ena_ring *queues = NULL; 658 int nb_queues; 659 int i = 0; 660 int rc = 0; 661 662 if (ring_type == ENA_RING_TYPE_RX) { 663 queues = adapter->rx_ring; 664 nb_queues = dev->data->nb_rx_queues; 665 } else { 666 queues = adapter->tx_ring; 667 nb_queues = dev->data->nb_tx_queues; 668 } 669 for (i = 0; i < nb_queues; i++) { 670 if (queues[i].configured) { 671 if (ring_type == ENA_RING_TYPE_RX) { 672 ena_assert_msg( 673 dev->data->rx_queues[i] == &queues[i], 674 "Inconsistent state of Rx queues\n"); 675 } else { 676 ena_assert_msg( 677 dev->data->tx_queues[i] == &queues[i], 678 "Inconsistent state of Tx queues\n"); 679 } 680 681 rc = ena_queue_start(dev, &queues[i]); 682 683 if (rc) { 684 PMD_INIT_LOG(ERR, 685 "Failed to start queue[%d] of type(%d)\n", 686 i, ring_type); 687 goto err; 688 } 689 } 690 } 691 692 return 0; 693 694 err: 695 while (i--) 696 if (queues[i].configured) 697 ena_queue_stop(&queues[i]); 698 699 return rc; 700 } 701 702 static int ena_check_valid_conf(struct ena_adapter *adapter) 703 { 704 uint32_t mtu = adapter->edev_data->mtu; 705 706 if (mtu > adapter->max_mtu || mtu < ENA_MIN_MTU) { 707 PMD_INIT_LOG(ERR, 708 "Unsupported MTU of %d. Max MTU: %d, min MTU: %d\n", 709 mtu, adapter->max_mtu, ENA_MIN_MTU); 710 return ENA_COM_UNSUPPORTED; 711 } 712 713 return 0; 714 } 715 716 static int 717 ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx, 718 bool use_large_llq_hdr) 719 { 720 struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq; 721 struct ena_com_dev *ena_dev = ctx->ena_dev; 722 uint32_t max_tx_queue_size; 723 uint32_t max_rx_queue_size; 724 725 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 726 struct ena_admin_queue_ext_feature_fields *max_queue_ext = 727 &ctx->get_feat_ctx->max_queue_ext.max_queue_ext; 728 max_rx_queue_size = RTE_MIN(max_queue_ext->max_rx_cq_depth, 729 max_queue_ext->max_rx_sq_depth); 730 max_tx_queue_size = max_queue_ext->max_tx_cq_depth; 731 732 if (ena_dev->tx_mem_queue_type == 733 ENA_ADMIN_PLACEMENT_POLICY_DEV) { 734 max_tx_queue_size = RTE_MIN(max_tx_queue_size, 735 llq->max_llq_depth); 736 } else { 737 max_tx_queue_size = RTE_MIN(max_tx_queue_size, 738 max_queue_ext->max_tx_sq_depth); 739 } 740 741 ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 742 max_queue_ext->max_per_packet_rx_descs); 743 ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 744 max_queue_ext->max_per_packet_tx_descs); 745 } else { 746 struct ena_admin_queue_feature_desc *max_queues = 747 &ctx->get_feat_ctx->max_queues; 748 max_rx_queue_size = RTE_MIN(max_queues->max_cq_depth, 749 max_queues->max_sq_depth); 750 max_tx_queue_size = max_queues->max_cq_depth; 751 752 if (ena_dev->tx_mem_queue_type == 753 ENA_ADMIN_PLACEMENT_POLICY_DEV) { 754 max_tx_queue_size = RTE_MIN(max_tx_queue_size, 755 llq->max_llq_depth); 756 } else { 757 max_tx_queue_size = RTE_MIN(max_tx_queue_size, 758 max_queues->max_sq_depth); 759 } 760 761 ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 762 max_queues->max_packet_rx_descs); 763 ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 764 max_queues->max_packet_tx_descs); 765 } 766 767 /* Round down to the nearest power of 2 */ 768 max_rx_queue_size = rte_align32prevpow2(max_rx_queue_size); 769 max_tx_queue_size = rte_align32prevpow2(max_tx_queue_size); 770 771 if (use_large_llq_hdr) { 772 if ((llq->entry_size_ctrl_supported & 773 ENA_ADMIN_LIST_ENTRY_SIZE_256B) && 774 (ena_dev->tx_mem_queue_type == 775 ENA_ADMIN_PLACEMENT_POLICY_DEV)) { 776 max_tx_queue_size /= 2; 777 PMD_INIT_LOG(INFO, 778 "Forcing large headers and decreasing maximum Tx queue size to %d\n", 779 max_tx_queue_size); 780 } else { 781 PMD_INIT_LOG(ERR, 782 "Forcing large headers failed: LLQ is disabled or device does not support large headers\n"); 783 } 784 } 785 786 if (unlikely(max_rx_queue_size == 0 || max_tx_queue_size == 0)) { 787 PMD_INIT_LOG(ERR, "Invalid queue size\n"); 788 return -EFAULT; 789 } 790 791 ctx->max_tx_queue_size = max_tx_queue_size; 792 ctx->max_rx_queue_size = max_rx_queue_size; 793 794 return 0; 795 } 796 797 static void ena_stats_restart(struct rte_eth_dev *dev) 798 { 799 struct ena_adapter *adapter = dev->data->dev_private; 800 801 rte_atomic64_init(&adapter->drv_stats->ierrors); 802 rte_atomic64_init(&adapter->drv_stats->oerrors); 803 rte_atomic64_init(&adapter->drv_stats->rx_nombuf); 804 adapter->drv_stats->rx_drops = 0; 805 } 806 807 static int ena_stats_get(struct rte_eth_dev *dev, 808 struct rte_eth_stats *stats) 809 { 810 struct ena_admin_basic_stats ena_stats; 811 struct ena_adapter *adapter = dev->data->dev_private; 812 struct ena_com_dev *ena_dev = &adapter->ena_dev; 813 int rc; 814 int i; 815 int max_rings_stats; 816 817 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 818 return -ENOTSUP; 819 820 memset(&ena_stats, 0, sizeof(ena_stats)); 821 822 rte_spinlock_lock(&adapter->admin_lock); 823 rc = ena_com_get_dev_basic_stats(ena_dev, &ena_stats); 824 rte_spinlock_unlock(&adapter->admin_lock); 825 if (unlikely(rc)) { 826 PMD_DRV_LOG(ERR, "Could not retrieve statistics from ENA\n"); 827 return rc; 828 } 829 830 /* Set of basic statistics from ENA */ 831 stats->ipackets = __MERGE_64B_H_L(ena_stats.rx_pkts_high, 832 ena_stats.rx_pkts_low); 833 stats->opackets = __MERGE_64B_H_L(ena_stats.tx_pkts_high, 834 ena_stats.tx_pkts_low); 835 stats->ibytes = __MERGE_64B_H_L(ena_stats.rx_bytes_high, 836 ena_stats.rx_bytes_low); 837 stats->obytes = __MERGE_64B_H_L(ena_stats.tx_bytes_high, 838 ena_stats.tx_bytes_low); 839 840 /* Driver related stats */ 841 stats->imissed = adapter->drv_stats->rx_drops; 842 stats->ierrors = rte_atomic64_read(&adapter->drv_stats->ierrors); 843 stats->oerrors = rte_atomic64_read(&adapter->drv_stats->oerrors); 844 stats->rx_nombuf = rte_atomic64_read(&adapter->drv_stats->rx_nombuf); 845 846 max_rings_stats = RTE_MIN(dev->data->nb_rx_queues, 847 RTE_ETHDEV_QUEUE_STAT_CNTRS); 848 for (i = 0; i < max_rings_stats; ++i) { 849 struct ena_stats_rx *rx_stats = &adapter->rx_ring[i].rx_stats; 850 851 stats->q_ibytes[i] = rx_stats->bytes; 852 stats->q_ipackets[i] = rx_stats->cnt; 853 stats->q_errors[i] = rx_stats->bad_desc_num + 854 rx_stats->bad_req_id; 855 } 856 857 max_rings_stats = RTE_MIN(dev->data->nb_tx_queues, 858 RTE_ETHDEV_QUEUE_STAT_CNTRS); 859 for (i = 0; i < max_rings_stats; ++i) { 860 struct ena_stats_tx *tx_stats = &adapter->tx_ring[i].tx_stats; 861 862 stats->q_obytes[i] = tx_stats->bytes; 863 stats->q_opackets[i] = tx_stats->cnt; 864 } 865 866 return 0; 867 } 868 869 static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 870 { 871 struct ena_adapter *adapter; 872 struct ena_com_dev *ena_dev; 873 int rc = 0; 874 875 ena_assert_msg(dev->data != NULL, "Uninitialized device\n"); 876 ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n"); 877 adapter = dev->data->dev_private; 878 879 ena_dev = &adapter->ena_dev; 880 ena_assert_msg(ena_dev != NULL, "Uninitialized device\n"); 881 882 if (mtu > adapter->max_mtu || mtu < ENA_MIN_MTU) { 883 PMD_DRV_LOG(ERR, 884 "Invalid MTU setting. New MTU: %d, max MTU: %d, min MTU: %d\n", 885 mtu, adapter->max_mtu, ENA_MIN_MTU); 886 return -EINVAL; 887 } 888 889 rc = ena_com_set_dev_mtu(ena_dev, mtu); 890 if (rc) 891 PMD_DRV_LOG(ERR, "Could not set MTU: %d\n", mtu); 892 else 893 PMD_DRV_LOG(NOTICE, "MTU set to: %d\n", mtu); 894 895 return rc; 896 } 897 898 static int ena_start(struct rte_eth_dev *dev) 899 { 900 struct ena_adapter *adapter = dev->data->dev_private; 901 uint64_t ticks; 902 int rc = 0; 903 904 /* Cannot allocate memory in secondary process */ 905 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 906 PMD_DRV_LOG(WARNING, "dev_start not supported in secondary.\n"); 907 return -EPERM; 908 } 909 910 rc = ena_check_valid_conf(adapter); 911 if (rc) 912 return rc; 913 914 rc = ena_setup_rx_intr(dev); 915 if (rc) 916 return rc; 917 918 rc = ena_queue_start_all(dev, ENA_RING_TYPE_RX); 919 if (rc) 920 return rc; 921 922 rc = ena_queue_start_all(dev, ENA_RING_TYPE_TX); 923 if (rc) 924 goto err_start_tx; 925 926 if (adapter->edev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) { 927 rc = ena_rss_configure(adapter); 928 if (rc) 929 goto err_rss_init; 930 } 931 932 ena_stats_restart(dev); 933 934 adapter->timestamp_wd = rte_get_timer_cycles(); 935 adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT; 936 937 ticks = rte_get_timer_hz(); 938 rte_timer_reset(&adapter->timer_wd, ticks, PERIODICAL, rte_lcore_id(), 939 ena_timer_wd_callback, dev); 940 941 ++adapter->dev_stats.dev_start; 942 adapter->state = ENA_ADAPTER_STATE_RUNNING; 943 944 return 0; 945 946 err_rss_init: 947 ena_queue_stop_all(dev, ENA_RING_TYPE_TX); 948 err_start_tx: 949 ena_queue_stop_all(dev, ENA_RING_TYPE_RX); 950 return rc; 951 } 952 953 static int ena_stop(struct rte_eth_dev *dev) 954 { 955 struct ena_adapter *adapter = dev->data->dev_private; 956 struct ena_com_dev *ena_dev = &adapter->ena_dev; 957 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 958 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 959 int rc; 960 961 /* Cannot free memory in secondary process */ 962 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 963 PMD_DRV_LOG(WARNING, "dev_stop not supported in secondary.\n"); 964 return -EPERM; 965 } 966 967 rte_timer_stop_sync(&adapter->timer_wd); 968 ena_queue_stop_all(dev, ENA_RING_TYPE_TX); 969 ena_queue_stop_all(dev, ENA_RING_TYPE_RX); 970 971 if (adapter->trigger_reset) { 972 rc = ena_com_dev_reset(ena_dev, adapter->reset_reason); 973 if (rc) 974 PMD_DRV_LOG(ERR, "Device reset failed, rc: %d\n", rc); 975 } 976 977 rte_intr_disable(intr_handle); 978 979 rte_intr_efd_disable(intr_handle); 980 if (intr_handle->intr_vec != NULL) { 981 rte_free(intr_handle->intr_vec); 982 intr_handle->intr_vec = NULL; 983 } 984 985 rte_intr_enable(intr_handle); 986 987 ++adapter->dev_stats.dev_stop; 988 adapter->state = ENA_ADAPTER_STATE_STOPPED; 989 dev->data->dev_started = 0; 990 991 return 0; 992 } 993 994 static int ena_create_io_queue(struct rte_eth_dev *dev, struct ena_ring *ring) 995 { 996 struct ena_adapter *adapter = ring->adapter; 997 struct ena_com_dev *ena_dev = &adapter->ena_dev; 998 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 999 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 1000 struct ena_com_create_io_ctx ctx = 1001 /* policy set to _HOST just to satisfy icc compiler */ 1002 { ENA_ADMIN_PLACEMENT_POLICY_HOST, 1003 0, 0, 0, 0, 0 }; 1004 uint16_t ena_qid; 1005 unsigned int i; 1006 int rc; 1007 1008 ctx.msix_vector = -1; 1009 if (ring->type == ENA_RING_TYPE_TX) { 1010 ena_qid = ENA_IO_TXQ_IDX(ring->id); 1011 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; 1012 ctx.mem_queue_type = ena_dev->tx_mem_queue_type; 1013 for (i = 0; i < ring->ring_size; i++) 1014 ring->empty_tx_reqs[i] = i; 1015 } else { 1016 ena_qid = ENA_IO_RXQ_IDX(ring->id); 1017 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; 1018 if (rte_intr_dp_is_en(intr_handle)) 1019 ctx.msix_vector = intr_handle->intr_vec[ring->id]; 1020 for (i = 0; i < ring->ring_size; i++) 1021 ring->empty_rx_reqs[i] = i; 1022 } 1023 ctx.queue_size = ring->ring_size; 1024 ctx.qid = ena_qid; 1025 ctx.numa_node = ring->numa_socket_id; 1026 1027 rc = ena_com_create_io_queue(ena_dev, &ctx); 1028 if (rc) { 1029 PMD_DRV_LOG(ERR, 1030 "Failed to create IO queue[%d] (qid:%d), rc: %d\n", 1031 ring->id, ena_qid, rc); 1032 return rc; 1033 } 1034 1035 rc = ena_com_get_io_handlers(ena_dev, ena_qid, 1036 &ring->ena_com_io_sq, 1037 &ring->ena_com_io_cq); 1038 if (rc) { 1039 PMD_DRV_LOG(ERR, 1040 "Failed to get IO queue[%d] handlers, rc: %d\n", 1041 ring->id, rc); 1042 ena_com_destroy_io_queue(ena_dev, ena_qid); 1043 return rc; 1044 } 1045 1046 if (ring->type == ENA_RING_TYPE_TX) 1047 ena_com_update_numa_node(ring->ena_com_io_cq, ctx.numa_node); 1048 1049 /* Start with Rx interrupts being masked. */ 1050 if (ring->type == ENA_RING_TYPE_RX && rte_intr_dp_is_en(intr_handle)) 1051 ena_rx_queue_intr_disable(dev, ring->id); 1052 1053 return 0; 1054 } 1055 1056 static void ena_queue_stop(struct ena_ring *ring) 1057 { 1058 struct ena_com_dev *ena_dev = &ring->adapter->ena_dev; 1059 1060 if (ring->type == ENA_RING_TYPE_RX) { 1061 ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(ring->id)); 1062 ena_rx_queue_release_bufs(ring); 1063 } else { 1064 ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(ring->id)); 1065 ena_tx_queue_release_bufs(ring); 1066 } 1067 } 1068 1069 static void ena_queue_stop_all(struct rte_eth_dev *dev, 1070 enum ena_ring_type ring_type) 1071 { 1072 struct ena_adapter *adapter = dev->data->dev_private; 1073 struct ena_ring *queues = NULL; 1074 uint16_t nb_queues, i; 1075 1076 if (ring_type == ENA_RING_TYPE_RX) { 1077 queues = adapter->rx_ring; 1078 nb_queues = dev->data->nb_rx_queues; 1079 } else { 1080 queues = adapter->tx_ring; 1081 nb_queues = dev->data->nb_tx_queues; 1082 } 1083 1084 for (i = 0; i < nb_queues; ++i) 1085 if (queues[i].configured) 1086 ena_queue_stop(&queues[i]); 1087 } 1088 1089 static int ena_queue_start(struct rte_eth_dev *dev, struct ena_ring *ring) 1090 { 1091 int rc, bufs_num; 1092 1093 ena_assert_msg(ring->configured == 1, 1094 "Trying to start unconfigured queue\n"); 1095 1096 rc = ena_create_io_queue(dev, ring); 1097 if (rc) { 1098 PMD_INIT_LOG(ERR, "Failed to create IO queue\n"); 1099 return rc; 1100 } 1101 1102 ring->next_to_clean = 0; 1103 ring->next_to_use = 0; 1104 1105 if (ring->type == ENA_RING_TYPE_TX) { 1106 ring->tx_stats.available_desc = 1107 ena_com_free_q_entries(ring->ena_com_io_sq); 1108 return 0; 1109 } 1110 1111 bufs_num = ring->ring_size - 1; 1112 rc = ena_populate_rx_queue(ring, bufs_num); 1113 if (rc != bufs_num) { 1114 ena_com_destroy_io_queue(&ring->adapter->ena_dev, 1115 ENA_IO_RXQ_IDX(ring->id)); 1116 PMD_INIT_LOG(ERR, "Failed to populate Rx ring\n"); 1117 return ENA_COM_FAULT; 1118 } 1119 /* Flush per-core RX buffers pools cache as they can be used on other 1120 * cores as well. 1121 */ 1122 rte_mempool_cache_flush(NULL, ring->mb_pool); 1123 1124 return 0; 1125 } 1126 1127 static int ena_tx_queue_setup(struct rte_eth_dev *dev, 1128 uint16_t queue_idx, 1129 uint16_t nb_desc, 1130 unsigned int socket_id, 1131 const struct rte_eth_txconf *tx_conf) 1132 { 1133 struct ena_ring *txq = NULL; 1134 struct ena_adapter *adapter = dev->data->dev_private; 1135 unsigned int i; 1136 uint16_t dyn_thresh; 1137 1138 txq = &adapter->tx_ring[queue_idx]; 1139 1140 if (txq->configured) { 1141 PMD_DRV_LOG(CRIT, 1142 "API violation. Queue[%d] is already configured\n", 1143 queue_idx); 1144 return ENA_COM_FAULT; 1145 } 1146 1147 if (!rte_is_power_of_2(nb_desc)) { 1148 PMD_DRV_LOG(ERR, 1149 "Unsupported size of Tx queue: %d is not a power of 2.\n", 1150 nb_desc); 1151 return -EINVAL; 1152 } 1153 1154 if (nb_desc > adapter->max_tx_ring_size) { 1155 PMD_DRV_LOG(ERR, 1156 "Unsupported size of Tx queue (max size: %d)\n", 1157 adapter->max_tx_ring_size); 1158 return -EINVAL; 1159 } 1160 1161 txq->port_id = dev->data->port_id; 1162 txq->next_to_clean = 0; 1163 txq->next_to_use = 0; 1164 txq->ring_size = nb_desc; 1165 txq->size_mask = nb_desc - 1; 1166 txq->numa_socket_id = socket_id; 1167 txq->pkts_without_db = false; 1168 txq->last_cleanup_ticks = 0; 1169 1170 txq->tx_buffer_info = rte_zmalloc_socket("txq->tx_buffer_info", 1171 sizeof(struct ena_tx_buffer) * txq->ring_size, 1172 RTE_CACHE_LINE_SIZE, 1173 socket_id); 1174 if (!txq->tx_buffer_info) { 1175 PMD_DRV_LOG(ERR, 1176 "Failed to allocate memory for Tx buffer info\n"); 1177 return -ENOMEM; 1178 } 1179 1180 txq->empty_tx_reqs = rte_zmalloc_socket("txq->empty_tx_reqs", 1181 sizeof(uint16_t) * txq->ring_size, 1182 RTE_CACHE_LINE_SIZE, 1183 socket_id); 1184 if (!txq->empty_tx_reqs) { 1185 PMD_DRV_LOG(ERR, 1186 "Failed to allocate memory for empty Tx requests\n"); 1187 rte_free(txq->tx_buffer_info); 1188 return -ENOMEM; 1189 } 1190 1191 txq->push_buf_intermediate_buf = 1192 rte_zmalloc_socket("txq->push_buf_intermediate_buf", 1193 txq->tx_max_header_size, 1194 RTE_CACHE_LINE_SIZE, 1195 socket_id); 1196 if (!txq->push_buf_intermediate_buf) { 1197 PMD_DRV_LOG(ERR, "Failed to alloc push buffer for LLQ\n"); 1198 rte_free(txq->tx_buffer_info); 1199 rte_free(txq->empty_tx_reqs); 1200 return -ENOMEM; 1201 } 1202 1203 for (i = 0; i < txq->ring_size; i++) 1204 txq->empty_tx_reqs[i] = i; 1205 1206 txq->offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; 1207 1208 /* Check if caller provided the Tx cleanup threshold value. */ 1209 if (tx_conf->tx_free_thresh != 0) { 1210 txq->tx_free_thresh = tx_conf->tx_free_thresh; 1211 } else { 1212 dyn_thresh = txq->ring_size - 1213 txq->ring_size / ENA_REFILL_THRESH_DIVIDER; 1214 txq->tx_free_thresh = RTE_MAX(dyn_thresh, 1215 txq->ring_size - ENA_REFILL_THRESH_PACKET); 1216 } 1217 1218 txq->missing_tx_completion_threshold = 1219 RTE_MIN(txq->ring_size / 2, ENA_DEFAULT_MISSING_COMP); 1220 1221 /* Store pointer to this queue in upper layer */ 1222 txq->configured = 1; 1223 dev->data->tx_queues[queue_idx] = txq; 1224 1225 return 0; 1226 } 1227 1228 static int ena_rx_queue_setup(struct rte_eth_dev *dev, 1229 uint16_t queue_idx, 1230 uint16_t nb_desc, 1231 unsigned int socket_id, 1232 const struct rte_eth_rxconf *rx_conf, 1233 struct rte_mempool *mp) 1234 { 1235 struct ena_adapter *adapter = dev->data->dev_private; 1236 struct ena_ring *rxq = NULL; 1237 size_t buffer_size; 1238 int i; 1239 uint16_t dyn_thresh; 1240 1241 rxq = &adapter->rx_ring[queue_idx]; 1242 if (rxq->configured) { 1243 PMD_DRV_LOG(CRIT, 1244 "API violation. Queue[%d] is already configured\n", 1245 queue_idx); 1246 return ENA_COM_FAULT; 1247 } 1248 1249 if (!rte_is_power_of_2(nb_desc)) { 1250 PMD_DRV_LOG(ERR, 1251 "Unsupported size of Rx queue: %d is not a power of 2.\n", 1252 nb_desc); 1253 return -EINVAL; 1254 } 1255 1256 if (nb_desc > adapter->max_rx_ring_size) { 1257 PMD_DRV_LOG(ERR, 1258 "Unsupported size of Rx queue (max size: %d)\n", 1259 adapter->max_rx_ring_size); 1260 return -EINVAL; 1261 } 1262 1263 /* ENA isn't supporting buffers smaller than 1400 bytes */ 1264 buffer_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM; 1265 if (buffer_size < ENA_RX_BUF_MIN_SIZE) { 1266 PMD_DRV_LOG(ERR, 1267 "Unsupported size of Rx buffer: %zu (min size: %d)\n", 1268 buffer_size, ENA_RX_BUF_MIN_SIZE); 1269 return -EINVAL; 1270 } 1271 1272 rxq->port_id = dev->data->port_id; 1273 rxq->next_to_clean = 0; 1274 rxq->next_to_use = 0; 1275 rxq->ring_size = nb_desc; 1276 rxq->size_mask = nb_desc - 1; 1277 rxq->numa_socket_id = socket_id; 1278 rxq->mb_pool = mp; 1279 1280 rxq->rx_buffer_info = rte_zmalloc_socket("rxq->buffer_info", 1281 sizeof(struct ena_rx_buffer) * nb_desc, 1282 RTE_CACHE_LINE_SIZE, 1283 socket_id); 1284 if (!rxq->rx_buffer_info) { 1285 PMD_DRV_LOG(ERR, 1286 "Failed to allocate memory for Rx buffer info\n"); 1287 return -ENOMEM; 1288 } 1289 1290 rxq->rx_refill_buffer = rte_zmalloc_socket("rxq->rx_refill_buffer", 1291 sizeof(struct rte_mbuf *) * nb_desc, 1292 RTE_CACHE_LINE_SIZE, 1293 socket_id); 1294 if (!rxq->rx_refill_buffer) { 1295 PMD_DRV_LOG(ERR, 1296 "Failed to allocate memory for Rx refill buffer\n"); 1297 rte_free(rxq->rx_buffer_info); 1298 rxq->rx_buffer_info = NULL; 1299 return -ENOMEM; 1300 } 1301 1302 rxq->empty_rx_reqs = rte_zmalloc_socket("rxq->empty_rx_reqs", 1303 sizeof(uint16_t) * nb_desc, 1304 RTE_CACHE_LINE_SIZE, 1305 socket_id); 1306 if (!rxq->empty_rx_reqs) { 1307 PMD_DRV_LOG(ERR, 1308 "Failed to allocate memory for empty Rx requests\n"); 1309 rte_free(rxq->rx_buffer_info); 1310 rxq->rx_buffer_info = NULL; 1311 rte_free(rxq->rx_refill_buffer); 1312 rxq->rx_refill_buffer = NULL; 1313 return -ENOMEM; 1314 } 1315 1316 for (i = 0; i < nb_desc; i++) 1317 rxq->empty_rx_reqs[i] = i; 1318 1319 rxq->offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; 1320 1321 if (rx_conf->rx_free_thresh != 0) { 1322 rxq->rx_free_thresh = rx_conf->rx_free_thresh; 1323 } else { 1324 dyn_thresh = rxq->ring_size / ENA_REFILL_THRESH_DIVIDER; 1325 rxq->rx_free_thresh = RTE_MIN(dyn_thresh, 1326 (uint16_t)(ENA_REFILL_THRESH_PACKET)); 1327 } 1328 1329 /* Store pointer to this queue in upper layer */ 1330 rxq->configured = 1; 1331 dev->data->rx_queues[queue_idx] = rxq; 1332 1333 return 0; 1334 } 1335 1336 static int ena_add_single_rx_desc(struct ena_com_io_sq *io_sq, 1337 struct rte_mbuf *mbuf, uint16_t id) 1338 { 1339 struct ena_com_buf ebuf; 1340 int rc; 1341 1342 /* prepare physical address for DMA transaction */ 1343 ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM; 1344 ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM; 1345 1346 /* pass resource to device */ 1347 rc = ena_com_add_single_rx_desc(io_sq, &ebuf, id); 1348 if (unlikely(rc != 0)) 1349 PMD_RX_LOG(WARNING, "Failed adding Rx desc\n"); 1350 1351 return rc; 1352 } 1353 1354 static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) 1355 { 1356 unsigned int i; 1357 int rc; 1358 uint16_t next_to_use = rxq->next_to_use; 1359 uint16_t req_id; 1360 #ifdef RTE_ETHDEV_DEBUG_RX 1361 uint16_t in_use; 1362 #endif 1363 struct rte_mbuf **mbufs = rxq->rx_refill_buffer; 1364 1365 if (unlikely(!count)) 1366 return 0; 1367 1368 #ifdef RTE_ETHDEV_DEBUG_RX 1369 in_use = rxq->ring_size - 1 - 1370 ena_com_free_q_entries(rxq->ena_com_io_sq); 1371 if (unlikely((in_use + count) >= rxq->ring_size)) 1372 PMD_RX_LOG(ERR, "Bad Rx ring state\n"); 1373 #endif 1374 1375 /* get resources for incoming packets */ 1376 rc = rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, count); 1377 if (unlikely(rc < 0)) { 1378 rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf); 1379 ++rxq->rx_stats.mbuf_alloc_fail; 1380 PMD_RX_LOG(DEBUG, "There are not enough free buffers\n"); 1381 return 0; 1382 } 1383 1384 for (i = 0; i < count; i++) { 1385 struct rte_mbuf *mbuf = mbufs[i]; 1386 struct ena_rx_buffer *rx_info; 1387 1388 if (likely((i + 4) < count)) 1389 rte_prefetch0(mbufs[i + 4]); 1390 1391 req_id = rxq->empty_rx_reqs[next_to_use]; 1392 rx_info = &rxq->rx_buffer_info[req_id]; 1393 1394 rc = ena_add_single_rx_desc(rxq->ena_com_io_sq, mbuf, req_id); 1395 if (unlikely(rc != 0)) 1396 break; 1397 1398 rx_info->mbuf = mbuf; 1399 next_to_use = ENA_IDX_NEXT_MASKED(next_to_use, rxq->size_mask); 1400 } 1401 1402 if (unlikely(i < count)) { 1403 PMD_RX_LOG(WARNING, 1404 "Refilled Rx queue[%d] with only %d/%d buffers\n", 1405 rxq->id, i, count); 1406 rte_pktmbuf_free_bulk(&mbufs[i], count - i); 1407 ++rxq->rx_stats.refill_partial; 1408 } 1409 1410 /* When we submitted free recources to device... */ 1411 if (likely(i > 0)) { 1412 /* ...let HW know that it can fill buffers with data. */ 1413 ena_com_write_sq_doorbell(rxq->ena_com_io_sq); 1414 1415 rxq->next_to_use = next_to_use; 1416 } 1417 1418 return i; 1419 } 1420 1421 static int ena_device_init(struct ena_com_dev *ena_dev, 1422 struct rte_pci_device *pdev, 1423 struct ena_com_dev_get_features_ctx *get_feat_ctx, 1424 bool *wd_state) 1425 { 1426 uint32_t aenq_groups; 1427 int rc; 1428 bool readless_supported; 1429 1430 /* Initialize mmio registers */ 1431 rc = ena_com_mmio_reg_read_request_init(ena_dev); 1432 if (rc) { 1433 PMD_DRV_LOG(ERR, "Failed to init MMIO read less\n"); 1434 return rc; 1435 } 1436 1437 /* The PCIe configuration space revision id indicate if mmio reg 1438 * read is disabled. 1439 */ 1440 readless_supported = !(pdev->id.class_id & ENA_MMIO_DISABLE_REG_READ); 1441 ena_com_set_mmio_read_mode(ena_dev, readless_supported); 1442 1443 /* reset device */ 1444 rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL); 1445 if (rc) { 1446 PMD_DRV_LOG(ERR, "Cannot reset device\n"); 1447 goto err_mmio_read_less; 1448 } 1449 1450 /* check FW version */ 1451 rc = ena_com_validate_version(ena_dev); 1452 if (rc) { 1453 PMD_DRV_LOG(ERR, "Device version is too low\n"); 1454 goto err_mmio_read_less; 1455 } 1456 1457 ena_dev->dma_addr_bits = ena_com_get_dma_width(ena_dev); 1458 1459 /* ENA device administration layer init */ 1460 rc = ena_com_admin_init(ena_dev, &aenq_handlers); 1461 if (rc) { 1462 PMD_DRV_LOG(ERR, 1463 "Cannot initialize ENA admin queue\n"); 1464 goto err_mmio_read_less; 1465 } 1466 1467 /* To enable the msix interrupts the driver needs to know the number 1468 * of queues. So the driver uses polling mode to retrieve this 1469 * information. 1470 */ 1471 ena_com_set_admin_polling_mode(ena_dev, true); 1472 1473 ena_config_host_info(ena_dev); 1474 1475 /* Get Device Attributes and features */ 1476 rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); 1477 if (rc) { 1478 PMD_DRV_LOG(ERR, 1479 "Cannot get attribute for ENA device, rc: %d\n", rc); 1480 goto err_admin_init; 1481 } 1482 1483 aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | 1484 BIT(ENA_ADMIN_NOTIFICATION) | 1485 BIT(ENA_ADMIN_KEEP_ALIVE) | 1486 BIT(ENA_ADMIN_FATAL_ERROR) | 1487 BIT(ENA_ADMIN_WARNING); 1488 1489 aenq_groups &= get_feat_ctx->aenq.supported_groups; 1490 rc = ena_com_set_aenq_config(ena_dev, aenq_groups); 1491 if (rc) { 1492 PMD_DRV_LOG(ERR, "Cannot configure AENQ groups, rc: %d\n", rc); 1493 goto err_admin_init; 1494 } 1495 1496 *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); 1497 1498 return 0; 1499 1500 err_admin_init: 1501 ena_com_admin_destroy(ena_dev); 1502 1503 err_mmio_read_less: 1504 ena_com_mmio_reg_read_request_destroy(ena_dev); 1505 1506 return rc; 1507 } 1508 1509 static void ena_interrupt_handler_rte(void *cb_arg) 1510 { 1511 struct rte_eth_dev *dev = cb_arg; 1512 struct ena_adapter *adapter = dev->data->dev_private; 1513 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1514 1515 ena_com_admin_q_comp_intr_handler(ena_dev); 1516 if (likely(adapter->state != ENA_ADAPTER_STATE_CLOSED)) 1517 ena_com_aenq_intr_handler(ena_dev, dev); 1518 } 1519 1520 static void check_for_missing_keep_alive(struct ena_adapter *adapter) 1521 { 1522 if (!adapter->wd_state) 1523 return; 1524 1525 if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) 1526 return; 1527 1528 if (unlikely((rte_get_timer_cycles() - adapter->timestamp_wd) >= 1529 adapter->keep_alive_timeout)) { 1530 PMD_DRV_LOG(ERR, "Keep alive timeout\n"); 1531 adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO; 1532 adapter->trigger_reset = true; 1533 ++adapter->dev_stats.wd_expired; 1534 } 1535 } 1536 1537 /* Check if admin queue is enabled */ 1538 static void check_for_admin_com_state(struct ena_adapter *adapter) 1539 { 1540 if (unlikely(!ena_com_get_admin_running_state(&adapter->ena_dev))) { 1541 PMD_DRV_LOG(ERR, "ENA admin queue is not in running state\n"); 1542 adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO; 1543 adapter->trigger_reset = true; 1544 } 1545 } 1546 1547 static int check_for_tx_completion_in_queue(struct ena_adapter *adapter, 1548 struct ena_ring *tx_ring) 1549 { 1550 struct ena_tx_buffer *tx_buf; 1551 uint64_t timestamp; 1552 uint64_t completion_delay; 1553 uint32_t missed_tx = 0; 1554 unsigned int i; 1555 int rc = 0; 1556 1557 for (i = 0; i < tx_ring->ring_size; ++i) { 1558 tx_buf = &tx_ring->tx_buffer_info[i]; 1559 timestamp = tx_buf->timestamp; 1560 1561 if (timestamp == 0) 1562 continue; 1563 1564 completion_delay = rte_get_timer_cycles() - timestamp; 1565 if (completion_delay > adapter->missing_tx_completion_to) { 1566 if (unlikely(!tx_buf->print_once)) { 1567 PMD_TX_LOG(WARNING, 1568 "Found a Tx that wasn't completed on time, qid %d, index %d. " 1569 "Missing Tx outstanding for %" PRIu64 " msecs.\n", 1570 tx_ring->id, i, completion_delay / 1571 rte_get_timer_hz() * 1000); 1572 tx_buf->print_once = true; 1573 } 1574 ++missed_tx; 1575 } 1576 } 1577 1578 if (unlikely(missed_tx > tx_ring->missing_tx_completion_threshold)) { 1579 PMD_DRV_LOG(ERR, 1580 "The number of lost Tx completions is above the threshold (%d > %d). " 1581 "Trigger the device reset.\n", 1582 missed_tx, 1583 tx_ring->missing_tx_completion_threshold); 1584 adapter->reset_reason = ENA_REGS_RESET_MISS_TX_CMPL; 1585 adapter->trigger_reset = true; 1586 rc = -EIO; 1587 } 1588 1589 tx_ring->tx_stats.missed_tx += missed_tx; 1590 1591 return rc; 1592 } 1593 1594 static void check_for_tx_completions(struct ena_adapter *adapter) 1595 { 1596 struct ena_ring *tx_ring; 1597 uint64_t tx_cleanup_delay; 1598 size_t qid; 1599 int budget; 1600 uint16_t nb_tx_queues = adapter->edev_data->nb_tx_queues; 1601 1602 if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT) 1603 return; 1604 1605 nb_tx_queues = adapter->edev_data->nb_tx_queues; 1606 budget = adapter->missing_tx_completion_budget; 1607 1608 qid = adapter->last_tx_comp_qid; 1609 while (budget-- > 0) { 1610 tx_ring = &adapter->tx_ring[qid]; 1611 1612 /* Tx cleanup is called only by the burst function and can be 1613 * called dynamically by the application. Also cleanup is 1614 * limited by the threshold. To avoid false detection of the 1615 * missing HW Tx completion, get the delay since last cleanup 1616 * function was called. 1617 */ 1618 tx_cleanup_delay = rte_get_timer_cycles() - 1619 tx_ring->last_cleanup_ticks; 1620 if (tx_cleanup_delay < adapter->tx_cleanup_stall_delay) 1621 check_for_tx_completion_in_queue(adapter, tx_ring); 1622 qid = (qid + 1) % nb_tx_queues; 1623 } 1624 1625 adapter->last_tx_comp_qid = qid; 1626 } 1627 1628 static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer, 1629 void *arg) 1630 { 1631 struct rte_eth_dev *dev = arg; 1632 struct ena_adapter *adapter = dev->data->dev_private; 1633 1634 check_for_missing_keep_alive(adapter); 1635 check_for_admin_com_state(adapter); 1636 check_for_tx_completions(adapter); 1637 1638 if (unlikely(adapter->trigger_reset)) { 1639 PMD_DRV_LOG(ERR, "Trigger reset is on\n"); 1640 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, 1641 NULL); 1642 } 1643 } 1644 1645 static inline void 1646 set_default_llq_configurations(struct ena_llq_configurations *llq_config, 1647 struct ena_admin_feature_llq_desc *llq, 1648 bool use_large_llq_hdr) 1649 { 1650 llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; 1651 llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; 1652 llq_config->llq_num_decs_before_header = 1653 ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; 1654 1655 if (use_large_llq_hdr && 1656 (llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B)) { 1657 llq_config->llq_ring_entry_size = 1658 ENA_ADMIN_LIST_ENTRY_SIZE_256B; 1659 llq_config->llq_ring_entry_size_value = 256; 1660 } else { 1661 llq_config->llq_ring_entry_size = 1662 ENA_ADMIN_LIST_ENTRY_SIZE_128B; 1663 llq_config->llq_ring_entry_size_value = 128; 1664 } 1665 } 1666 1667 static int 1668 ena_set_queues_placement_policy(struct ena_adapter *adapter, 1669 struct ena_com_dev *ena_dev, 1670 struct ena_admin_feature_llq_desc *llq, 1671 struct ena_llq_configurations *llq_default_configurations) 1672 { 1673 int rc; 1674 u32 llq_feature_mask; 1675 1676 llq_feature_mask = 1 << ENA_ADMIN_LLQ; 1677 if (!(ena_dev->supported_features & llq_feature_mask)) { 1678 PMD_DRV_LOG(INFO, 1679 "LLQ is not supported. Fallback to host mode policy.\n"); 1680 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 1681 return 0; 1682 } 1683 1684 rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); 1685 if (unlikely(rc)) { 1686 PMD_INIT_LOG(WARNING, 1687 "Failed to config dev mode. Fallback to host mode policy.\n"); 1688 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 1689 return 0; 1690 } 1691 1692 /* Nothing to config, exit */ 1693 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 1694 return 0; 1695 1696 if (!adapter->dev_mem_base) { 1697 PMD_DRV_LOG(ERR, 1698 "Unable to access LLQ BAR resource. Fallback to host mode policy.\n"); 1699 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 1700 return 0; 1701 } 1702 1703 ena_dev->mem_bar = adapter->dev_mem_base; 1704 1705 return 0; 1706 } 1707 1708 static uint32_t ena_calc_max_io_queue_num(struct ena_com_dev *ena_dev, 1709 struct ena_com_dev_get_features_ctx *get_feat_ctx) 1710 { 1711 uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues; 1712 1713 /* Regular queues capabilities */ 1714 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 1715 struct ena_admin_queue_ext_feature_fields *max_queue_ext = 1716 &get_feat_ctx->max_queue_ext.max_queue_ext; 1717 io_rx_num = RTE_MIN(max_queue_ext->max_rx_sq_num, 1718 max_queue_ext->max_rx_cq_num); 1719 io_tx_sq_num = max_queue_ext->max_tx_sq_num; 1720 io_tx_cq_num = max_queue_ext->max_tx_cq_num; 1721 } else { 1722 struct ena_admin_queue_feature_desc *max_queues = 1723 &get_feat_ctx->max_queues; 1724 io_tx_sq_num = max_queues->max_sq_num; 1725 io_tx_cq_num = max_queues->max_cq_num; 1726 io_rx_num = RTE_MIN(io_tx_sq_num, io_tx_cq_num); 1727 } 1728 1729 /* In case of LLQ use the llq number in the get feature cmd */ 1730 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) 1731 io_tx_sq_num = get_feat_ctx->llq.max_llq_num; 1732 1733 max_num_io_queues = RTE_MIN(ENA_MAX_NUM_IO_QUEUES, io_rx_num); 1734 max_num_io_queues = RTE_MIN(max_num_io_queues, io_tx_sq_num); 1735 max_num_io_queues = RTE_MIN(max_num_io_queues, io_tx_cq_num); 1736 1737 if (unlikely(max_num_io_queues == 0)) { 1738 PMD_DRV_LOG(ERR, "Number of IO queues cannot not be 0\n"); 1739 return -EFAULT; 1740 } 1741 1742 return max_num_io_queues; 1743 } 1744 1745 static void 1746 ena_set_offloads(struct ena_offloads *offloads, 1747 struct ena_admin_feature_offload_desc *offload_desc) 1748 { 1749 if (offload_desc->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) 1750 offloads->tx_offloads |= ENA_IPV4_TSO; 1751 1752 /* Tx IPv4 checksum offloads */ 1753 if (offload_desc->tx & 1754 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK) 1755 offloads->tx_offloads |= ENA_L3_IPV4_CSUM; 1756 if (offload_desc->tx & 1757 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK) 1758 offloads->tx_offloads |= ENA_L4_IPV4_CSUM; 1759 if (offload_desc->tx & 1760 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) 1761 offloads->tx_offloads |= ENA_L4_IPV4_CSUM_PARTIAL; 1762 1763 /* Tx IPv6 checksum offloads */ 1764 if (offload_desc->tx & 1765 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK) 1766 offloads->tx_offloads |= ENA_L4_IPV6_CSUM; 1767 if (offload_desc->tx & 1768 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK) 1769 offloads->tx_offloads |= ENA_L4_IPV6_CSUM_PARTIAL; 1770 1771 /* Rx IPv4 checksum offloads */ 1772 if (offload_desc->rx_supported & 1773 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK) 1774 offloads->rx_offloads |= ENA_L3_IPV4_CSUM; 1775 if (offload_desc->rx_supported & 1776 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) 1777 offloads->rx_offloads |= ENA_L4_IPV4_CSUM; 1778 1779 /* Rx IPv6 checksum offloads */ 1780 if (offload_desc->rx_supported & 1781 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) 1782 offloads->rx_offloads |= ENA_L4_IPV6_CSUM; 1783 1784 if (offload_desc->rx_supported & 1785 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK) 1786 offloads->rx_offloads |= ENA_RX_RSS_HASH; 1787 } 1788 1789 static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) 1790 { 1791 struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 }; 1792 struct rte_pci_device *pci_dev; 1793 struct rte_intr_handle *intr_handle; 1794 struct ena_adapter *adapter = eth_dev->data->dev_private; 1795 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1796 struct ena_com_dev_get_features_ctx get_feat_ctx; 1797 struct ena_llq_configurations llq_config; 1798 const char *queue_type_str; 1799 uint32_t max_num_io_queues; 1800 int rc; 1801 static int adapters_found; 1802 bool disable_meta_caching; 1803 bool wd_state = false; 1804 1805 eth_dev->dev_ops = &ena_dev_ops; 1806 eth_dev->rx_pkt_burst = ð_ena_recv_pkts; 1807 eth_dev->tx_pkt_burst = ð_ena_xmit_pkts; 1808 eth_dev->tx_pkt_prepare = ð_ena_prep_pkts; 1809 1810 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1811 return 0; 1812 1813 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 1814 1815 memset(adapter, 0, sizeof(struct ena_adapter)); 1816 ena_dev = &adapter->ena_dev; 1817 1818 adapter->edev_data = eth_dev->data; 1819 1820 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1821 1822 PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d\n", 1823 pci_dev->addr.domain, 1824 pci_dev->addr.bus, 1825 pci_dev->addr.devid, 1826 pci_dev->addr.function); 1827 1828 intr_handle = &pci_dev->intr_handle; 1829 1830 adapter->regs = pci_dev->mem_resource[ENA_REGS_BAR].addr; 1831 adapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr; 1832 1833 if (!adapter->regs) { 1834 PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)\n", 1835 ENA_REGS_BAR); 1836 return -ENXIO; 1837 } 1838 1839 ena_dev->reg_bar = adapter->regs; 1840 /* This is a dummy pointer for ena_com functions. */ 1841 ena_dev->dmadev = adapter; 1842 1843 adapter->id_number = adapters_found; 1844 1845 snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", 1846 adapter->id_number); 1847 1848 rc = ena_parse_devargs(adapter, pci_dev->device.devargs); 1849 if (rc != 0) { 1850 PMD_INIT_LOG(CRIT, "Failed to parse devargs\n"); 1851 goto err; 1852 } 1853 1854 /* device specific initialization routine */ 1855 rc = ena_device_init(ena_dev, pci_dev, &get_feat_ctx, &wd_state); 1856 if (rc) { 1857 PMD_INIT_LOG(CRIT, "Failed to init ENA device\n"); 1858 goto err; 1859 } 1860 adapter->wd_state = wd_state; 1861 1862 set_default_llq_configurations(&llq_config, &get_feat_ctx.llq, 1863 adapter->use_large_llq_hdr); 1864 rc = ena_set_queues_placement_policy(adapter, ena_dev, 1865 &get_feat_ctx.llq, &llq_config); 1866 if (unlikely(rc)) { 1867 PMD_INIT_LOG(CRIT, "Failed to set placement policy\n"); 1868 return rc; 1869 } 1870 1871 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 1872 queue_type_str = "Regular"; 1873 else 1874 queue_type_str = "Low latency"; 1875 PMD_DRV_LOG(INFO, "Placement policy: %s\n", queue_type_str); 1876 1877 calc_queue_ctx.ena_dev = ena_dev; 1878 calc_queue_ctx.get_feat_ctx = &get_feat_ctx; 1879 1880 max_num_io_queues = ena_calc_max_io_queue_num(ena_dev, &get_feat_ctx); 1881 rc = ena_calc_io_queue_size(&calc_queue_ctx, 1882 adapter->use_large_llq_hdr); 1883 if (unlikely((rc != 0) || (max_num_io_queues == 0))) { 1884 rc = -EFAULT; 1885 goto err_device_destroy; 1886 } 1887 1888 adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size; 1889 adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size; 1890 adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size; 1891 adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size; 1892 adapter->max_num_io_queues = max_num_io_queues; 1893 1894 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 1895 disable_meta_caching = 1896 !!(get_feat_ctx.llq.accel_mode.u.get.supported_flags & 1897 BIT(ENA_ADMIN_DISABLE_META_CACHING)); 1898 } else { 1899 disable_meta_caching = false; 1900 } 1901 1902 /* prepare ring structures */ 1903 ena_init_rings(adapter, disable_meta_caching); 1904 1905 ena_config_debug_area(adapter); 1906 1907 /* Set max MTU for this device */ 1908 adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu; 1909 1910 ena_set_offloads(&adapter->offloads, &get_feat_ctx.offload); 1911 1912 /* Copy MAC address and point DPDK to it */ 1913 eth_dev->data->mac_addrs = (struct rte_ether_addr *)adapter->mac_addr; 1914 rte_ether_addr_copy((struct rte_ether_addr *) 1915 get_feat_ctx.dev_attr.mac_addr, 1916 (struct rte_ether_addr *)adapter->mac_addr); 1917 1918 rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE); 1919 if (unlikely(rc != 0)) { 1920 PMD_DRV_LOG(ERR, "Failed to initialize RSS in ENA device\n"); 1921 goto err_delete_debug_area; 1922 } 1923 1924 adapter->drv_stats = rte_zmalloc("adapter stats", 1925 sizeof(*adapter->drv_stats), 1926 RTE_CACHE_LINE_SIZE); 1927 if (!adapter->drv_stats) { 1928 PMD_DRV_LOG(ERR, 1929 "Failed to allocate memory for adapter statistics\n"); 1930 rc = -ENOMEM; 1931 goto err_rss_destroy; 1932 } 1933 1934 rte_spinlock_init(&adapter->admin_lock); 1935 1936 rte_intr_callback_register(intr_handle, 1937 ena_interrupt_handler_rte, 1938 eth_dev); 1939 rte_intr_enable(intr_handle); 1940 ena_com_set_admin_polling_mode(ena_dev, false); 1941 ena_com_admin_aenq_enable(ena_dev); 1942 1943 if (adapters_found == 0) 1944 rte_timer_subsystem_init(); 1945 rte_timer_init(&adapter->timer_wd); 1946 1947 adapters_found++; 1948 adapter->state = ENA_ADAPTER_STATE_INIT; 1949 1950 return 0; 1951 1952 err_rss_destroy: 1953 ena_com_rss_destroy(ena_dev); 1954 err_delete_debug_area: 1955 ena_com_delete_debug_area(ena_dev); 1956 1957 err_device_destroy: 1958 ena_com_delete_host_info(ena_dev); 1959 ena_com_admin_destroy(ena_dev); 1960 1961 err: 1962 return rc; 1963 } 1964 1965 static void ena_destroy_device(struct rte_eth_dev *eth_dev) 1966 { 1967 struct ena_adapter *adapter = eth_dev->data->dev_private; 1968 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1969 1970 if (adapter->state == ENA_ADAPTER_STATE_FREE) 1971 return; 1972 1973 ena_com_set_admin_running_state(ena_dev, false); 1974 1975 if (adapter->state != ENA_ADAPTER_STATE_CLOSED) 1976 ena_close(eth_dev); 1977 1978 ena_com_rss_destroy(ena_dev); 1979 1980 ena_com_delete_debug_area(ena_dev); 1981 ena_com_delete_host_info(ena_dev); 1982 1983 ena_com_abort_admin_commands(ena_dev); 1984 ena_com_wait_for_abort_completion(ena_dev); 1985 ena_com_admin_destroy(ena_dev); 1986 ena_com_mmio_reg_read_request_destroy(ena_dev); 1987 1988 adapter->state = ENA_ADAPTER_STATE_FREE; 1989 } 1990 1991 static int eth_ena_dev_uninit(struct rte_eth_dev *eth_dev) 1992 { 1993 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1994 return 0; 1995 1996 ena_destroy_device(eth_dev); 1997 1998 return 0; 1999 } 2000 2001 static int ena_dev_configure(struct rte_eth_dev *dev) 2002 { 2003 struct ena_adapter *adapter = dev->data->dev_private; 2004 2005 adapter->state = ENA_ADAPTER_STATE_CONFIG; 2006 2007 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) 2008 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; 2009 dev->data->dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS; 2010 2011 /* Scattered Rx cannot be turned off in the HW, so this capability must 2012 * be forced. 2013 */ 2014 dev->data->scattered_rx = 1; 2015 2016 adapter->last_tx_comp_qid = 0; 2017 2018 adapter->missing_tx_completion_budget = 2019 RTE_MIN(ENA_MONITORED_TX_QUEUES, dev->data->nb_tx_queues); 2020 2021 adapter->missing_tx_completion_to = ENA_TX_TIMEOUT; 2022 /* To avoid detection of the spurious Tx completion timeout due to 2023 * application not calling the Tx cleanup function, set timeout for the 2024 * Tx queue which should be half of the missing completion timeout for a 2025 * safety. If there will be a lot of missing Tx completions in the 2026 * queue, they will be detected sooner or later. 2027 */ 2028 adapter->tx_cleanup_stall_delay = adapter->missing_tx_completion_to / 2; 2029 2030 adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads; 2031 adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads; 2032 2033 return 0; 2034 } 2035 2036 static void ena_init_rings(struct ena_adapter *adapter, 2037 bool disable_meta_caching) 2038 { 2039 size_t i; 2040 2041 for (i = 0; i < adapter->max_num_io_queues; i++) { 2042 struct ena_ring *ring = &adapter->tx_ring[i]; 2043 2044 ring->configured = 0; 2045 ring->type = ENA_RING_TYPE_TX; 2046 ring->adapter = adapter; 2047 ring->id = i; 2048 ring->tx_mem_queue_type = adapter->ena_dev.tx_mem_queue_type; 2049 ring->tx_max_header_size = adapter->ena_dev.tx_max_header_size; 2050 ring->sgl_size = adapter->max_tx_sgl_size; 2051 ring->disable_meta_caching = disable_meta_caching; 2052 } 2053 2054 for (i = 0; i < adapter->max_num_io_queues; i++) { 2055 struct ena_ring *ring = &adapter->rx_ring[i]; 2056 2057 ring->configured = 0; 2058 ring->type = ENA_RING_TYPE_RX; 2059 ring->adapter = adapter; 2060 ring->id = i; 2061 ring->sgl_size = adapter->max_rx_sgl_size; 2062 } 2063 } 2064 2065 static uint64_t ena_get_rx_port_offloads(struct ena_adapter *adapter) 2066 { 2067 uint64_t port_offloads = 0; 2068 2069 if (adapter->offloads.rx_offloads & ENA_L3_IPV4_CSUM) 2070 port_offloads |= DEV_RX_OFFLOAD_IPV4_CKSUM; 2071 2072 if (adapter->offloads.rx_offloads & 2073 (ENA_L4_IPV4_CSUM | ENA_L4_IPV6_CSUM)) 2074 port_offloads |= 2075 DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM; 2076 2077 if (adapter->offloads.rx_offloads & ENA_RX_RSS_HASH) 2078 port_offloads |= DEV_RX_OFFLOAD_RSS_HASH; 2079 2080 port_offloads |= DEV_RX_OFFLOAD_SCATTER; 2081 2082 return port_offloads; 2083 } 2084 2085 static uint64_t ena_get_tx_port_offloads(struct ena_adapter *adapter) 2086 { 2087 uint64_t port_offloads = 0; 2088 2089 if (adapter->offloads.tx_offloads & ENA_IPV4_TSO) 2090 port_offloads |= DEV_TX_OFFLOAD_TCP_TSO; 2091 2092 if (adapter->offloads.tx_offloads & ENA_L3_IPV4_CSUM) 2093 port_offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM; 2094 if (adapter->offloads.tx_offloads & 2095 (ENA_L4_IPV4_CSUM_PARTIAL | ENA_L4_IPV4_CSUM | 2096 ENA_L4_IPV6_CSUM | ENA_L4_IPV6_CSUM_PARTIAL)) 2097 port_offloads |= 2098 DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM; 2099 2100 port_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS; 2101 2102 return port_offloads; 2103 } 2104 2105 static uint64_t ena_get_rx_queue_offloads(struct ena_adapter *adapter) 2106 { 2107 RTE_SET_USED(adapter); 2108 2109 return 0; 2110 } 2111 2112 static uint64_t ena_get_tx_queue_offloads(struct ena_adapter *adapter) 2113 { 2114 RTE_SET_USED(adapter); 2115 2116 return 0; 2117 } 2118 2119 static int ena_infos_get(struct rte_eth_dev *dev, 2120 struct rte_eth_dev_info *dev_info) 2121 { 2122 struct ena_adapter *adapter; 2123 struct ena_com_dev *ena_dev; 2124 2125 ena_assert_msg(dev->data != NULL, "Uninitialized device\n"); 2126 ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n"); 2127 adapter = dev->data->dev_private; 2128 2129 ena_dev = &adapter->ena_dev; 2130 ena_assert_msg(ena_dev != NULL, "Uninitialized device\n"); 2131 2132 dev_info->speed_capa = 2133 ETH_LINK_SPEED_1G | 2134 ETH_LINK_SPEED_2_5G | 2135 ETH_LINK_SPEED_5G | 2136 ETH_LINK_SPEED_10G | 2137 ETH_LINK_SPEED_25G | 2138 ETH_LINK_SPEED_40G | 2139 ETH_LINK_SPEED_50G | 2140 ETH_LINK_SPEED_100G; 2141 2142 /* Inform framework about available features */ 2143 dev_info->rx_offload_capa = ena_get_rx_port_offloads(adapter); 2144 dev_info->tx_offload_capa = ena_get_tx_port_offloads(adapter); 2145 dev_info->rx_queue_offload_capa = ena_get_rx_queue_offloads(adapter); 2146 dev_info->tx_queue_offload_capa = ena_get_tx_queue_offloads(adapter); 2147 2148 dev_info->flow_type_rss_offloads = ENA_ALL_RSS_HF; 2149 dev_info->hash_key_size = ENA_HASH_KEY_SIZE; 2150 2151 dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN; 2152 dev_info->max_rx_pktlen = adapter->max_mtu + RTE_ETHER_HDR_LEN + 2153 RTE_ETHER_CRC_LEN; 2154 dev_info->min_mtu = ENA_MIN_MTU; 2155 dev_info->max_mtu = adapter->max_mtu; 2156 dev_info->max_mac_addrs = 1; 2157 2158 dev_info->max_rx_queues = adapter->max_num_io_queues; 2159 dev_info->max_tx_queues = adapter->max_num_io_queues; 2160 dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE; 2161 2162 dev_info->rx_desc_lim.nb_max = adapter->max_rx_ring_size; 2163 dev_info->rx_desc_lim.nb_min = ENA_MIN_RING_DESC; 2164 dev_info->rx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2165 adapter->max_rx_sgl_size); 2166 dev_info->rx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2167 adapter->max_rx_sgl_size); 2168 2169 dev_info->tx_desc_lim.nb_max = adapter->max_tx_ring_size; 2170 dev_info->tx_desc_lim.nb_min = ENA_MIN_RING_DESC; 2171 dev_info->tx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2172 adapter->max_tx_sgl_size); 2173 dev_info->tx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2174 adapter->max_tx_sgl_size); 2175 2176 dev_info->default_rxportconf.ring_size = ENA_DEFAULT_RING_SIZE; 2177 dev_info->default_txportconf.ring_size = ENA_DEFAULT_RING_SIZE; 2178 2179 return 0; 2180 } 2181 2182 static inline void ena_init_rx_mbuf(struct rte_mbuf *mbuf, uint16_t len) 2183 { 2184 mbuf->data_len = len; 2185 mbuf->data_off = RTE_PKTMBUF_HEADROOM; 2186 mbuf->refcnt = 1; 2187 mbuf->next = NULL; 2188 } 2189 2190 static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring, 2191 struct ena_com_rx_buf_info *ena_bufs, 2192 uint32_t descs, 2193 uint16_t *next_to_clean, 2194 uint8_t offset) 2195 { 2196 struct rte_mbuf *mbuf; 2197 struct rte_mbuf *mbuf_head; 2198 struct ena_rx_buffer *rx_info; 2199 int rc; 2200 uint16_t ntc, len, req_id, buf = 0; 2201 2202 if (unlikely(descs == 0)) 2203 return NULL; 2204 2205 ntc = *next_to_clean; 2206 2207 len = ena_bufs[buf].len; 2208 req_id = ena_bufs[buf].req_id; 2209 2210 rx_info = &rx_ring->rx_buffer_info[req_id]; 2211 2212 mbuf = rx_info->mbuf; 2213 RTE_ASSERT(mbuf != NULL); 2214 2215 ena_init_rx_mbuf(mbuf, len); 2216 2217 /* Fill the mbuf head with the data specific for 1st segment. */ 2218 mbuf_head = mbuf; 2219 mbuf_head->nb_segs = descs; 2220 mbuf_head->port = rx_ring->port_id; 2221 mbuf_head->pkt_len = len; 2222 mbuf_head->data_off += offset; 2223 2224 rx_info->mbuf = NULL; 2225 rx_ring->empty_rx_reqs[ntc] = req_id; 2226 ntc = ENA_IDX_NEXT_MASKED(ntc, rx_ring->size_mask); 2227 2228 while (--descs) { 2229 ++buf; 2230 len = ena_bufs[buf].len; 2231 req_id = ena_bufs[buf].req_id; 2232 2233 rx_info = &rx_ring->rx_buffer_info[req_id]; 2234 RTE_ASSERT(rx_info->mbuf != NULL); 2235 2236 if (unlikely(len == 0)) { 2237 /* 2238 * Some devices can pass descriptor with the length 0. 2239 * To avoid confusion, the PMD is simply putting the 2240 * descriptor back, as it was never used. We'll avoid 2241 * mbuf allocation that way. 2242 */ 2243 rc = ena_add_single_rx_desc(rx_ring->ena_com_io_sq, 2244 rx_info->mbuf, req_id); 2245 if (unlikely(rc != 0)) { 2246 /* Free the mbuf in case of an error. */ 2247 rte_mbuf_raw_free(rx_info->mbuf); 2248 } else { 2249 /* 2250 * If there was no error, just exit the loop as 2251 * 0 length descriptor is always the last one. 2252 */ 2253 break; 2254 } 2255 } else { 2256 /* Create an mbuf chain. */ 2257 mbuf->next = rx_info->mbuf; 2258 mbuf = mbuf->next; 2259 2260 ena_init_rx_mbuf(mbuf, len); 2261 mbuf_head->pkt_len += len; 2262 } 2263 2264 /* 2265 * Mark the descriptor as depleted and perform necessary 2266 * cleanup. 2267 * This code will execute in two cases: 2268 * 1. Descriptor len was greater than 0 - normal situation. 2269 * 2. Descriptor len was 0 and we failed to add the descriptor 2270 * to the device. In that situation, we should try to add 2271 * the mbuf again in the populate routine and mark the 2272 * descriptor as used up by the device. 2273 */ 2274 rx_info->mbuf = NULL; 2275 rx_ring->empty_rx_reqs[ntc] = req_id; 2276 ntc = ENA_IDX_NEXT_MASKED(ntc, rx_ring->size_mask); 2277 } 2278 2279 *next_to_clean = ntc; 2280 2281 return mbuf_head; 2282 } 2283 2284 static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 2285 uint16_t nb_pkts) 2286 { 2287 struct ena_ring *rx_ring = (struct ena_ring *)(rx_queue); 2288 unsigned int free_queue_entries; 2289 uint16_t next_to_clean = rx_ring->next_to_clean; 2290 uint16_t descs_in_use; 2291 struct rte_mbuf *mbuf; 2292 uint16_t completed; 2293 struct ena_com_rx_ctx ena_rx_ctx; 2294 int i, rc = 0; 2295 bool fill_hash; 2296 2297 #ifdef RTE_ETHDEV_DEBUG_RX 2298 /* Check adapter state */ 2299 if (unlikely(rx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { 2300 PMD_RX_LOG(ALERT, 2301 "Trying to receive pkts while device is NOT running\n"); 2302 return 0; 2303 } 2304 #endif 2305 2306 fill_hash = rx_ring->offloads & DEV_RX_OFFLOAD_RSS_HASH; 2307 2308 descs_in_use = rx_ring->ring_size - 2309 ena_com_free_q_entries(rx_ring->ena_com_io_sq) - 1; 2310 nb_pkts = RTE_MIN(descs_in_use, nb_pkts); 2311 2312 for (completed = 0; completed < nb_pkts; completed++) { 2313 ena_rx_ctx.max_bufs = rx_ring->sgl_size; 2314 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; 2315 ena_rx_ctx.descs = 0; 2316 ena_rx_ctx.pkt_offset = 0; 2317 /* receive packet context */ 2318 rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq, 2319 rx_ring->ena_com_io_sq, 2320 &ena_rx_ctx); 2321 if (unlikely(rc)) { 2322 PMD_RX_LOG(ERR, 2323 "Failed to get the packet from the device, rc: %d\n", 2324 rc); 2325 if (rc == ENA_COM_NO_SPACE) { 2326 ++rx_ring->rx_stats.bad_desc_num; 2327 rx_ring->adapter->reset_reason = 2328 ENA_REGS_RESET_TOO_MANY_RX_DESCS; 2329 } else { 2330 ++rx_ring->rx_stats.bad_req_id; 2331 rx_ring->adapter->reset_reason = 2332 ENA_REGS_RESET_INV_RX_REQ_ID; 2333 } 2334 rx_ring->adapter->trigger_reset = true; 2335 return 0; 2336 } 2337 2338 mbuf = ena_rx_mbuf(rx_ring, 2339 ena_rx_ctx.ena_bufs, 2340 ena_rx_ctx.descs, 2341 &next_to_clean, 2342 ena_rx_ctx.pkt_offset); 2343 if (unlikely(mbuf == NULL)) { 2344 for (i = 0; i < ena_rx_ctx.descs; ++i) { 2345 rx_ring->empty_rx_reqs[next_to_clean] = 2346 rx_ring->ena_bufs[i].req_id; 2347 next_to_clean = ENA_IDX_NEXT_MASKED( 2348 next_to_clean, rx_ring->size_mask); 2349 } 2350 break; 2351 } 2352 2353 /* fill mbuf attributes if any */ 2354 ena_rx_mbuf_prepare(mbuf, &ena_rx_ctx, fill_hash); 2355 2356 if (unlikely(mbuf->ol_flags & 2357 (PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD))) { 2358 rte_atomic64_inc(&rx_ring->adapter->drv_stats->ierrors); 2359 ++rx_ring->rx_stats.bad_csum; 2360 } 2361 2362 rx_pkts[completed] = mbuf; 2363 rx_ring->rx_stats.bytes += mbuf->pkt_len; 2364 } 2365 2366 rx_ring->rx_stats.cnt += completed; 2367 rx_ring->next_to_clean = next_to_clean; 2368 2369 free_queue_entries = ena_com_free_q_entries(rx_ring->ena_com_io_sq); 2370 2371 /* Burst refill to save doorbells, memory barriers, const interval */ 2372 if (free_queue_entries >= rx_ring->rx_free_thresh) { 2373 ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq); 2374 ena_populate_rx_queue(rx_ring, free_queue_entries); 2375 } 2376 2377 return completed; 2378 } 2379 2380 static uint16_t 2381 eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 2382 uint16_t nb_pkts) 2383 { 2384 int32_t ret; 2385 uint32_t i; 2386 struct rte_mbuf *m; 2387 struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); 2388 struct ena_adapter *adapter = tx_ring->adapter; 2389 struct rte_ipv4_hdr *ip_hdr; 2390 uint64_t ol_flags; 2391 uint64_t l4_csum_flag; 2392 uint64_t dev_offload_capa; 2393 uint16_t frag_field; 2394 bool need_pseudo_csum; 2395 2396 dev_offload_capa = adapter->offloads.tx_offloads; 2397 for (i = 0; i != nb_pkts; i++) { 2398 m = tx_pkts[i]; 2399 ol_flags = m->ol_flags; 2400 2401 /* Check if any offload flag was set */ 2402 if (ol_flags == 0) 2403 continue; 2404 2405 l4_csum_flag = ol_flags & PKT_TX_L4_MASK; 2406 /* SCTP checksum offload is not supported by the ENA. */ 2407 if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) || 2408 l4_csum_flag == PKT_TX_SCTP_CKSUM) { 2409 PMD_TX_LOG(DEBUG, 2410 "mbuf[%" PRIu32 "] has unsupported offloads flags set: 0x%" PRIu64 "\n", 2411 i, ol_flags); 2412 rte_errno = ENOTSUP; 2413 return i; 2414 } 2415 2416 #ifdef RTE_LIBRTE_ETHDEV_DEBUG 2417 /* Check if requested offload is also enabled for the queue */ 2418 if ((ol_flags & PKT_TX_IP_CKSUM && 2419 !(tx_ring->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)) || 2420 (l4_csum_flag == PKT_TX_TCP_CKSUM && 2421 !(tx_ring->offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) || 2422 (l4_csum_flag == PKT_TX_UDP_CKSUM && 2423 !(tx_ring->offloads & DEV_TX_OFFLOAD_UDP_CKSUM))) { 2424 PMD_TX_LOG(DEBUG, 2425 "mbuf[%" PRIu32 "]: requested offloads: %" PRIu16 " are not enabled for the queue[%u]\n", 2426 i, m->nb_segs, tx_ring->id); 2427 rte_errno = EINVAL; 2428 return i; 2429 } 2430 2431 /* The caller is obligated to set l2 and l3 len if any cksum 2432 * offload is enabled. 2433 */ 2434 if (unlikely(ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK) && 2435 (m->l2_len == 0 || m->l3_len == 0))) { 2436 PMD_TX_LOG(DEBUG, 2437 "mbuf[%" PRIu32 "]: l2_len or l3_len values are 0 while the offload was requested\n", 2438 i); 2439 rte_errno = EINVAL; 2440 return i; 2441 } 2442 ret = rte_validate_tx_offload(m); 2443 if (ret != 0) { 2444 rte_errno = -ret; 2445 return i; 2446 } 2447 #endif 2448 2449 /* Verify HW support for requested offloads and determine if 2450 * pseudo header checksum is needed. 2451 */ 2452 need_pseudo_csum = false; 2453 if (ol_flags & PKT_TX_IPV4) { 2454 if (ol_flags & PKT_TX_IP_CKSUM && 2455 !(dev_offload_capa & ENA_L3_IPV4_CSUM)) { 2456 rte_errno = ENOTSUP; 2457 return i; 2458 } 2459 2460 if (ol_flags & PKT_TX_TCP_SEG && 2461 !(dev_offload_capa & ENA_IPV4_TSO)) { 2462 rte_errno = ENOTSUP; 2463 return i; 2464 } 2465 2466 /* Check HW capabilities and if pseudo csum is needed 2467 * for L4 offloads. 2468 */ 2469 if (l4_csum_flag != PKT_TX_L4_NO_CKSUM && 2470 !(dev_offload_capa & ENA_L4_IPV4_CSUM)) { 2471 if (dev_offload_capa & 2472 ENA_L4_IPV4_CSUM_PARTIAL) { 2473 need_pseudo_csum = true; 2474 } else { 2475 rte_errno = ENOTSUP; 2476 return i; 2477 } 2478 } 2479 2480 /* Parse the DF flag */ 2481 ip_hdr = rte_pktmbuf_mtod_offset(m, 2482 struct rte_ipv4_hdr *, m->l2_len); 2483 frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset); 2484 if (frag_field & RTE_IPV4_HDR_DF_FLAG) { 2485 m->packet_type |= RTE_PTYPE_L4_NONFRAG; 2486 } else if (ol_flags & PKT_TX_TCP_SEG) { 2487 /* In case we are supposed to TSO and have DF 2488 * not set (DF=0) hardware must be provided with 2489 * partial checksum. 2490 */ 2491 need_pseudo_csum = true; 2492 } 2493 } else if (ol_flags & PKT_TX_IPV6) { 2494 /* There is no support for IPv6 TSO as for now. */ 2495 if (ol_flags & PKT_TX_TCP_SEG) { 2496 rte_errno = ENOTSUP; 2497 return i; 2498 } 2499 2500 /* Check HW capabilities and if pseudo csum is needed */ 2501 if (l4_csum_flag != PKT_TX_L4_NO_CKSUM && 2502 !(dev_offload_capa & ENA_L4_IPV6_CSUM)) { 2503 if (dev_offload_capa & 2504 ENA_L4_IPV6_CSUM_PARTIAL) { 2505 need_pseudo_csum = true; 2506 } else { 2507 rte_errno = ENOTSUP; 2508 return i; 2509 } 2510 } 2511 } 2512 2513 if (need_pseudo_csum) { 2514 ret = rte_net_intel_cksum_flags_prepare(m, ol_flags); 2515 if (ret != 0) { 2516 rte_errno = -ret; 2517 return i; 2518 } 2519 } 2520 } 2521 2522 return i; 2523 } 2524 2525 static void ena_update_hints(struct ena_adapter *adapter, 2526 struct ena_admin_ena_hw_hints *hints) 2527 { 2528 if (hints->admin_completion_tx_timeout) 2529 adapter->ena_dev.admin_queue.completion_timeout = 2530 hints->admin_completion_tx_timeout * 1000; 2531 2532 if (hints->mmio_read_timeout) 2533 /* convert to usec */ 2534 adapter->ena_dev.mmio_read.reg_read_to = 2535 hints->mmio_read_timeout * 1000; 2536 2537 if (hints->missing_tx_completion_timeout) { 2538 if (hints->missing_tx_completion_timeout == 2539 ENA_HW_HINTS_NO_TIMEOUT) { 2540 adapter->missing_tx_completion_to = 2541 ENA_HW_HINTS_NO_TIMEOUT; 2542 } else { 2543 /* Convert from msecs to ticks */ 2544 adapter->missing_tx_completion_to = rte_get_timer_hz() * 2545 hints->missing_tx_completion_timeout / 1000; 2546 adapter->tx_cleanup_stall_delay = 2547 adapter->missing_tx_completion_to / 2; 2548 } 2549 } 2550 2551 if (hints->driver_watchdog_timeout) { 2552 if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT) 2553 adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT; 2554 else 2555 // Convert msecs to ticks 2556 adapter->keep_alive_timeout = 2557 (hints->driver_watchdog_timeout * 2558 rte_get_timer_hz()) / 1000; 2559 } 2560 } 2561 2562 static int ena_check_space_and_linearize_mbuf(struct ena_ring *tx_ring, 2563 struct rte_mbuf *mbuf) 2564 { 2565 struct ena_com_dev *ena_dev; 2566 int num_segments, header_len, rc; 2567 2568 ena_dev = &tx_ring->adapter->ena_dev; 2569 num_segments = mbuf->nb_segs; 2570 header_len = mbuf->data_len; 2571 2572 if (likely(num_segments < tx_ring->sgl_size)) 2573 goto checkspace; 2574 2575 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && 2576 (num_segments == tx_ring->sgl_size) && 2577 (header_len < tx_ring->tx_max_header_size)) 2578 goto checkspace; 2579 2580 /* Checking for space for 2 additional metadata descriptors due to 2581 * possible header split and metadata descriptor. Linearization will 2582 * be needed so we reduce the segments number from num_segments to 1 2583 */ 2584 if (!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 3)) { 2585 PMD_TX_LOG(DEBUG, "Not enough space in the Tx queue\n"); 2586 return ENA_COM_NO_MEM; 2587 } 2588 ++tx_ring->tx_stats.linearize; 2589 rc = rte_pktmbuf_linearize(mbuf); 2590 if (unlikely(rc)) { 2591 PMD_TX_LOG(WARNING, "Mbuf linearize failed\n"); 2592 rte_atomic64_inc(&tx_ring->adapter->drv_stats->ierrors); 2593 ++tx_ring->tx_stats.linearize_failed; 2594 return rc; 2595 } 2596 2597 return 0; 2598 2599 checkspace: 2600 /* Checking for space for 2 additional metadata descriptors due to 2601 * possible header split and metadata descriptor 2602 */ 2603 if (!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 2604 num_segments + 2)) { 2605 PMD_TX_LOG(DEBUG, "Not enough space in the Tx queue\n"); 2606 return ENA_COM_NO_MEM; 2607 } 2608 2609 return 0; 2610 } 2611 2612 static void ena_tx_map_mbuf(struct ena_ring *tx_ring, 2613 struct ena_tx_buffer *tx_info, 2614 struct rte_mbuf *mbuf, 2615 void **push_header, 2616 uint16_t *header_len) 2617 { 2618 struct ena_com_buf *ena_buf; 2619 uint16_t delta, seg_len, push_len; 2620 2621 delta = 0; 2622 seg_len = mbuf->data_len; 2623 2624 tx_info->mbuf = mbuf; 2625 ena_buf = tx_info->bufs; 2626 2627 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 2628 /* 2629 * Tx header might be (and will be in most cases) smaller than 2630 * tx_max_header_size. But it's not an issue to send more data 2631 * to the device, than actually needed if the mbuf size is 2632 * greater than tx_max_header_size. 2633 */ 2634 push_len = RTE_MIN(mbuf->pkt_len, tx_ring->tx_max_header_size); 2635 *header_len = push_len; 2636 2637 if (likely(push_len <= seg_len)) { 2638 /* If the push header is in the single segment, then 2639 * just point it to the 1st mbuf data. 2640 */ 2641 *push_header = rte_pktmbuf_mtod(mbuf, uint8_t *); 2642 } else { 2643 /* If the push header lays in the several segments, copy 2644 * it to the intermediate buffer. 2645 */ 2646 rte_pktmbuf_read(mbuf, 0, push_len, 2647 tx_ring->push_buf_intermediate_buf); 2648 *push_header = tx_ring->push_buf_intermediate_buf; 2649 delta = push_len - seg_len; 2650 } 2651 } else { 2652 *push_header = NULL; 2653 *header_len = 0; 2654 push_len = 0; 2655 } 2656 2657 /* Process first segment taking into consideration pushed header */ 2658 if (seg_len > push_len) { 2659 ena_buf->paddr = mbuf->buf_iova + 2660 mbuf->data_off + 2661 push_len; 2662 ena_buf->len = seg_len - push_len; 2663 ena_buf++; 2664 tx_info->num_of_bufs++; 2665 } 2666 2667 while ((mbuf = mbuf->next) != NULL) { 2668 seg_len = mbuf->data_len; 2669 2670 /* Skip mbufs if whole data is pushed as a header */ 2671 if (unlikely(delta > seg_len)) { 2672 delta -= seg_len; 2673 continue; 2674 } 2675 2676 ena_buf->paddr = mbuf->buf_iova + mbuf->data_off + delta; 2677 ena_buf->len = seg_len - delta; 2678 ena_buf++; 2679 tx_info->num_of_bufs++; 2680 2681 delta = 0; 2682 } 2683 } 2684 2685 static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf) 2686 { 2687 struct ena_tx_buffer *tx_info; 2688 struct ena_com_tx_ctx ena_tx_ctx = { { 0 } }; 2689 uint16_t next_to_use; 2690 uint16_t header_len; 2691 uint16_t req_id; 2692 void *push_header; 2693 int nb_hw_desc; 2694 int rc; 2695 2696 rc = ena_check_space_and_linearize_mbuf(tx_ring, mbuf); 2697 if (unlikely(rc)) 2698 return rc; 2699 2700 next_to_use = tx_ring->next_to_use; 2701 2702 req_id = tx_ring->empty_tx_reqs[next_to_use]; 2703 tx_info = &tx_ring->tx_buffer_info[req_id]; 2704 tx_info->num_of_bufs = 0; 2705 2706 ena_tx_map_mbuf(tx_ring, tx_info, mbuf, &push_header, &header_len); 2707 2708 ena_tx_ctx.ena_bufs = tx_info->bufs; 2709 ena_tx_ctx.push_header = push_header; 2710 ena_tx_ctx.num_bufs = tx_info->num_of_bufs; 2711 ena_tx_ctx.req_id = req_id; 2712 ena_tx_ctx.header_len = header_len; 2713 2714 /* Set Tx offloads flags, if applicable */ 2715 ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx, tx_ring->offloads, 2716 tx_ring->disable_meta_caching); 2717 2718 if (unlikely(ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq, 2719 &ena_tx_ctx))) { 2720 PMD_TX_LOG(DEBUG, 2721 "LLQ Tx max burst size of queue %d achieved, writing doorbell to send burst\n", 2722 tx_ring->id); 2723 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); 2724 tx_ring->tx_stats.doorbells++; 2725 tx_ring->pkts_without_db = false; 2726 } 2727 2728 /* prepare the packet's descriptors to dma engine */ 2729 rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx, 2730 &nb_hw_desc); 2731 if (unlikely(rc)) { 2732 PMD_DRV_LOG(ERR, "Failed to prepare Tx buffers, rc: %d\n", rc); 2733 ++tx_ring->tx_stats.prepare_ctx_err; 2734 tx_ring->adapter->reset_reason = 2735 ENA_REGS_RESET_DRIVER_INVALID_STATE; 2736 tx_ring->adapter->trigger_reset = true; 2737 return rc; 2738 } 2739 2740 tx_info->tx_descs = nb_hw_desc; 2741 tx_info->timestamp = rte_get_timer_cycles(); 2742 2743 tx_ring->tx_stats.cnt++; 2744 tx_ring->tx_stats.bytes += mbuf->pkt_len; 2745 2746 tx_ring->next_to_use = ENA_IDX_NEXT_MASKED(next_to_use, 2747 tx_ring->size_mask); 2748 2749 return 0; 2750 } 2751 2752 static void ena_tx_cleanup(struct ena_ring *tx_ring) 2753 { 2754 unsigned int total_tx_descs = 0; 2755 uint16_t cleanup_budget; 2756 uint16_t next_to_clean = tx_ring->next_to_clean; 2757 2758 /* Attempt to release all Tx descriptors (ring_size - 1 -> size_mask) */ 2759 cleanup_budget = tx_ring->size_mask; 2760 2761 while (likely(total_tx_descs < cleanup_budget)) { 2762 struct rte_mbuf *mbuf; 2763 struct ena_tx_buffer *tx_info; 2764 uint16_t req_id; 2765 2766 if (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) != 0) 2767 break; 2768 2769 if (unlikely(validate_tx_req_id(tx_ring, req_id) != 0)) 2770 break; 2771 2772 /* Get Tx info & store how many descs were processed */ 2773 tx_info = &tx_ring->tx_buffer_info[req_id]; 2774 tx_info->timestamp = 0; 2775 2776 mbuf = tx_info->mbuf; 2777 rte_pktmbuf_free(mbuf); 2778 2779 tx_info->mbuf = NULL; 2780 tx_ring->empty_tx_reqs[next_to_clean] = req_id; 2781 2782 total_tx_descs += tx_info->tx_descs; 2783 2784 /* Put back descriptor to the ring for reuse */ 2785 next_to_clean = ENA_IDX_NEXT_MASKED(next_to_clean, 2786 tx_ring->size_mask); 2787 } 2788 2789 if (likely(total_tx_descs > 0)) { 2790 /* acknowledge completion of sent packets */ 2791 tx_ring->next_to_clean = next_to_clean; 2792 ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs); 2793 ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq); 2794 } 2795 2796 /* Notify completion handler that the cleanup was just called */ 2797 tx_ring->last_cleanup_ticks = rte_get_timer_cycles(); 2798 } 2799 2800 static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 2801 uint16_t nb_pkts) 2802 { 2803 struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); 2804 int available_desc; 2805 uint16_t sent_idx = 0; 2806 2807 #ifdef RTE_ETHDEV_DEBUG_TX 2808 /* Check adapter state */ 2809 if (unlikely(tx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { 2810 PMD_TX_LOG(ALERT, 2811 "Trying to xmit pkts while device is NOT running\n"); 2812 return 0; 2813 } 2814 #endif 2815 2816 for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) { 2817 if (ena_xmit_mbuf(tx_ring, tx_pkts[sent_idx])) 2818 break; 2819 tx_ring->pkts_without_db = true; 2820 rte_prefetch0(tx_pkts[ENA_IDX_ADD_MASKED(sent_idx, 4, 2821 tx_ring->size_mask)]); 2822 } 2823 2824 available_desc = ena_com_free_q_entries(tx_ring->ena_com_io_sq); 2825 tx_ring->tx_stats.available_desc = available_desc; 2826 2827 /* If there are ready packets to be xmitted... */ 2828 if (likely(tx_ring->pkts_without_db)) { 2829 /* ...let HW do its best :-) */ 2830 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); 2831 tx_ring->tx_stats.doorbells++; 2832 tx_ring->pkts_without_db = false; 2833 } 2834 2835 if (available_desc < tx_ring->tx_free_thresh) 2836 ena_tx_cleanup(tx_ring); 2837 2838 tx_ring->tx_stats.available_desc = 2839 ena_com_free_q_entries(tx_ring->ena_com_io_sq); 2840 tx_ring->tx_stats.tx_poll++; 2841 2842 return sent_idx; 2843 } 2844 2845 int ena_copy_eni_stats(struct ena_adapter *adapter) 2846 { 2847 struct ena_admin_eni_stats admin_eni_stats; 2848 int rc; 2849 2850 rte_spinlock_lock(&adapter->admin_lock); 2851 rc = ena_com_get_eni_stats(&adapter->ena_dev, &admin_eni_stats); 2852 rte_spinlock_unlock(&adapter->admin_lock); 2853 if (rc != 0) { 2854 if (rc == ENA_COM_UNSUPPORTED) { 2855 PMD_DRV_LOG(DEBUG, 2856 "Retrieving ENI metrics is not supported\n"); 2857 } else { 2858 PMD_DRV_LOG(WARNING, 2859 "Failed to get ENI metrics, rc: %d\n", rc); 2860 } 2861 return rc; 2862 } 2863 2864 rte_memcpy(&adapter->eni_stats, &admin_eni_stats, 2865 sizeof(struct ena_stats_eni)); 2866 2867 return 0; 2868 } 2869 2870 /** 2871 * DPDK callback to retrieve names of extended device statistics 2872 * 2873 * @param dev 2874 * Pointer to Ethernet device structure. 2875 * @param[out] xstats_names 2876 * Buffer to insert names into. 2877 * @param n 2878 * Number of names. 2879 * 2880 * @return 2881 * Number of xstats names. 2882 */ 2883 static int ena_xstats_get_names(struct rte_eth_dev *dev, 2884 struct rte_eth_xstat_name *xstats_names, 2885 unsigned int n) 2886 { 2887 unsigned int xstats_count = ena_xstats_calc_num(dev->data); 2888 unsigned int stat, i, count = 0; 2889 2890 if (n < xstats_count || !xstats_names) 2891 return xstats_count; 2892 2893 for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++) 2894 strcpy(xstats_names[count].name, 2895 ena_stats_global_strings[stat].name); 2896 2897 for (stat = 0; stat < ENA_STATS_ARRAY_ENI; stat++, count++) 2898 strcpy(xstats_names[count].name, 2899 ena_stats_eni_strings[stat].name); 2900 2901 for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++) 2902 for (i = 0; i < dev->data->nb_rx_queues; i++, count++) 2903 snprintf(xstats_names[count].name, 2904 sizeof(xstats_names[count].name), 2905 "rx_q%d_%s", i, 2906 ena_stats_rx_strings[stat].name); 2907 2908 for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++) 2909 for (i = 0; i < dev->data->nb_tx_queues; i++, count++) 2910 snprintf(xstats_names[count].name, 2911 sizeof(xstats_names[count].name), 2912 "tx_q%d_%s", i, 2913 ena_stats_tx_strings[stat].name); 2914 2915 return xstats_count; 2916 } 2917 2918 /** 2919 * DPDK callback to get extended device statistics. 2920 * 2921 * @param dev 2922 * Pointer to Ethernet device structure. 2923 * @param[out] stats 2924 * Stats table output buffer. 2925 * @param n 2926 * The size of the stats table. 2927 * 2928 * @return 2929 * Number of xstats on success, negative on failure. 2930 */ 2931 static int ena_xstats_get(struct rte_eth_dev *dev, 2932 struct rte_eth_xstat *xstats, 2933 unsigned int n) 2934 { 2935 struct ena_adapter *adapter = dev->data->dev_private; 2936 unsigned int xstats_count = ena_xstats_calc_num(dev->data); 2937 unsigned int stat, i, count = 0; 2938 int stat_offset; 2939 void *stats_begin; 2940 2941 if (n < xstats_count) 2942 return xstats_count; 2943 2944 if (!xstats) 2945 return 0; 2946 2947 for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++) { 2948 stat_offset = ena_stats_global_strings[stat].stat_offset; 2949 stats_begin = &adapter->dev_stats; 2950 2951 xstats[count].id = count; 2952 xstats[count].value = *((uint64_t *) 2953 ((char *)stats_begin + stat_offset)); 2954 } 2955 2956 /* Even if the function below fails, we should copy previous (or initial 2957 * values) to keep structure of rte_eth_xstat consistent. 2958 */ 2959 ena_copy_eni_stats(adapter); 2960 for (stat = 0; stat < ENA_STATS_ARRAY_ENI; stat++, count++) { 2961 stat_offset = ena_stats_eni_strings[stat].stat_offset; 2962 stats_begin = &adapter->eni_stats; 2963 2964 xstats[count].id = count; 2965 xstats[count].value = *((uint64_t *) 2966 ((char *)stats_begin + stat_offset)); 2967 } 2968 2969 for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++) { 2970 for (i = 0; i < dev->data->nb_rx_queues; i++, count++) { 2971 stat_offset = ena_stats_rx_strings[stat].stat_offset; 2972 stats_begin = &adapter->rx_ring[i].rx_stats; 2973 2974 xstats[count].id = count; 2975 xstats[count].value = *((uint64_t *) 2976 ((char *)stats_begin + stat_offset)); 2977 } 2978 } 2979 2980 for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++) { 2981 for (i = 0; i < dev->data->nb_tx_queues; i++, count++) { 2982 stat_offset = ena_stats_tx_strings[stat].stat_offset; 2983 stats_begin = &adapter->tx_ring[i].rx_stats; 2984 2985 xstats[count].id = count; 2986 xstats[count].value = *((uint64_t *) 2987 ((char *)stats_begin + stat_offset)); 2988 } 2989 } 2990 2991 return count; 2992 } 2993 2994 static int ena_xstats_get_by_id(struct rte_eth_dev *dev, 2995 const uint64_t *ids, 2996 uint64_t *values, 2997 unsigned int n) 2998 { 2999 struct ena_adapter *adapter = dev->data->dev_private; 3000 uint64_t id; 3001 uint64_t rx_entries, tx_entries; 3002 unsigned int i; 3003 int qid; 3004 int valid = 0; 3005 bool was_eni_copied = false; 3006 3007 for (i = 0; i < n; ++i) { 3008 id = ids[i]; 3009 /* Check if id belongs to global statistics */ 3010 if (id < ENA_STATS_ARRAY_GLOBAL) { 3011 values[i] = *((uint64_t *)&adapter->dev_stats + id); 3012 ++valid; 3013 continue; 3014 } 3015 3016 /* Check if id belongs to ENI statistics */ 3017 id -= ENA_STATS_ARRAY_GLOBAL; 3018 if (id < ENA_STATS_ARRAY_ENI) { 3019 /* Avoid reading ENI stats multiple times in a single 3020 * function call, as it requires communication with the 3021 * admin queue. 3022 */ 3023 if (!was_eni_copied) { 3024 was_eni_copied = true; 3025 ena_copy_eni_stats(adapter); 3026 } 3027 values[i] = *((uint64_t *)&adapter->eni_stats + id); 3028 ++valid; 3029 continue; 3030 } 3031 3032 /* Check if id belongs to rx queue statistics */ 3033 id -= ENA_STATS_ARRAY_ENI; 3034 rx_entries = ENA_STATS_ARRAY_RX * dev->data->nb_rx_queues; 3035 if (id < rx_entries) { 3036 qid = id % dev->data->nb_rx_queues; 3037 id /= dev->data->nb_rx_queues; 3038 values[i] = *((uint64_t *) 3039 &adapter->rx_ring[qid].rx_stats + id); 3040 ++valid; 3041 continue; 3042 } 3043 /* Check if id belongs to rx queue statistics */ 3044 id -= rx_entries; 3045 tx_entries = ENA_STATS_ARRAY_TX * dev->data->nb_tx_queues; 3046 if (id < tx_entries) { 3047 qid = id % dev->data->nb_tx_queues; 3048 id /= dev->data->nb_tx_queues; 3049 values[i] = *((uint64_t *) 3050 &adapter->tx_ring[qid].tx_stats + id); 3051 ++valid; 3052 continue; 3053 } 3054 } 3055 3056 return valid; 3057 } 3058 3059 static int ena_process_bool_devarg(const char *key, 3060 const char *value, 3061 void *opaque) 3062 { 3063 struct ena_adapter *adapter = opaque; 3064 bool bool_value; 3065 3066 /* Parse the value. */ 3067 if (strcmp(value, "1") == 0) { 3068 bool_value = true; 3069 } else if (strcmp(value, "0") == 0) { 3070 bool_value = false; 3071 } else { 3072 PMD_INIT_LOG(ERR, 3073 "Invalid value: '%s' for key '%s'. Accepted: '0' or '1'\n", 3074 value, key); 3075 return -EINVAL; 3076 } 3077 3078 /* Now, assign it to the proper adapter field. */ 3079 if (strcmp(key, ENA_DEVARG_LARGE_LLQ_HDR) == 0) 3080 adapter->use_large_llq_hdr = bool_value; 3081 3082 return 0; 3083 } 3084 3085 static int ena_parse_devargs(struct ena_adapter *adapter, 3086 struct rte_devargs *devargs) 3087 { 3088 static const char * const allowed_args[] = { 3089 ENA_DEVARG_LARGE_LLQ_HDR, 3090 NULL, 3091 }; 3092 struct rte_kvargs *kvlist; 3093 int rc; 3094 3095 if (devargs == NULL) 3096 return 0; 3097 3098 kvlist = rte_kvargs_parse(devargs->args, allowed_args); 3099 if (kvlist == NULL) { 3100 PMD_INIT_LOG(ERR, "Invalid device arguments: %s\n", 3101 devargs->args); 3102 return -EINVAL; 3103 } 3104 3105 rc = rte_kvargs_process(kvlist, ENA_DEVARG_LARGE_LLQ_HDR, 3106 ena_process_bool_devarg, adapter); 3107 3108 rte_kvargs_free(kvlist); 3109 3110 return rc; 3111 } 3112 3113 static int ena_setup_rx_intr(struct rte_eth_dev *dev) 3114 { 3115 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 3116 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 3117 int rc; 3118 uint16_t vectors_nb, i; 3119 bool rx_intr_requested = dev->data->dev_conf.intr_conf.rxq; 3120 3121 if (!rx_intr_requested) 3122 return 0; 3123 3124 if (!rte_intr_cap_multiple(intr_handle)) { 3125 PMD_DRV_LOG(ERR, 3126 "Rx interrupt requested, but it isn't supported by the PCI driver\n"); 3127 return -ENOTSUP; 3128 } 3129 3130 /* Disable interrupt mapping before the configuration starts. */ 3131 rte_intr_disable(intr_handle); 3132 3133 /* Verify if there are enough vectors available. */ 3134 vectors_nb = dev->data->nb_rx_queues; 3135 if (vectors_nb > RTE_MAX_RXTX_INTR_VEC_ID) { 3136 PMD_DRV_LOG(ERR, 3137 "Too many Rx interrupts requested, maximum number: %d\n", 3138 RTE_MAX_RXTX_INTR_VEC_ID); 3139 rc = -ENOTSUP; 3140 goto enable_intr; 3141 } 3142 3143 intr_handle->intr_vec = rte_zmalloc("intr_vec", 3144 dev->data->nb_rx_queues * sizeof(*intr_handle->intr_vec), 0); 3145 if (intr_handle->intr_vec == NULL) { 3146 PMD_DRV_LOG(ERR, 3147 "Failed to allocate interrupt vector for %d queues\n", 3148 dev->data->nb_rx_queues); 3149 rc = -ENOMEM; 3150 goto enable_intr; 3151 } 3152 3153 rc = rte_intr_efd_enable(intr_handle, vectors_nb); 3154 if (rc != 0) 3155 goto free_intr_vec; 3156 3157 if (!rte_intr_allow_others(intr_handle)) { 3158 PMD_DRV_LOG(ERR, 3159 "Not enough interrupts available to use both ENA Admin and Rx interrupts\n"); 3160 goto disable_intr_efd; 3161 } 3162 3163 for (i = 0; i < vectors_nb; ++i) 3164 intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + i; 3165 3166 rte_intr_enable(intr_handle); 3167 return 0; 3168 3169 disable_intr_efd: 3170 rte_intr_efd_disable(intr_handle); 3171 free_intr_vec: 3172 rte_free(intr_handle->intr_vec); 3173 intr_handle->intr_vec = NULL; 3174 enable_intr: 3175 rte_intr_enable(intr_handle); 3176 return rc; 3177 } 3178 3179 static void ena_rx_queue_intr_set(struct rte_eth_dev *dev, 3180 uint16_t queue_id, 3181 bool unmask) 3182 { 3183 struct ena_adapter *adapter = dev->data->dev_private; 3184 struct ena_ring *rxq = &adapter->rx_ring[queue_id]; 3185 struct ena_eth_io_intr_reg intr_reg; 3186 3187 ena_com_update_intr_reg(&intr_reg, 0, 0, unmask); 3188 ena_com_unmask_intr(rxq->ena_com_io_cq, &intr_reg); 3189 } 3190 3191 static int ena_rx_queue_intr_enable(struct rte_eth_dev *dev, 3192 uint16_t queue_id) 3193 { 3194 ena_rx_queue_intr_set(dev, queue_id, true); 3195 3196 return 0; 3197 } 3198 3199 static int ena_rx_queue_intr_disable(struct rte_eth_dev *dev, 3200 uint16_t queue_id) 3201 { 3202 ena_rx_queue_intr_set(dev, queue_id, false); 3203 3204 return 0; 3205 } 3206 3207 /********************************************************************* 3208 * PMD configuration 3209 *********************************************************************/ 3210 static int eth_ena_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 3211 struct rte_pci_device *pci_dev) 3212 { 3213 return rte_eth_dev_pci_generic_probe(pci_dev, 3214 sizeof(struct ena_adapter), eth_ena_dev_init); 3215 } 3216 3217 static int eth_ena_pci_remove(struct rte_pci_device *pci_dev) 3218 { 3219 return rte_eth_dev_pci_generic_remove(pci_dev, eth_ena_dev_uninit); 3220 } 3221 3222 static struct rte_pci_driver rte_ena_pmd = { 3223 .id_table = pci_id_ena_map, 3224 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | 3225 RTE_PCI_DRV_WC_ACTIVATE, 3226 .probe = eth_ena_pci_probe, 3227 .remove = eth_ena_pci_remove, 3228 }; 3229 3230 RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd); 3231 RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map); 3232 RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci"); 3233 RTE_PMD_REGISTER_PARAM_STRING(net_ena, ENA_DEVARG_LARGE_LLQ_HDR "=<0|1>"); 3234 RTE_LOG_REGISTER_SUFFIX(ena_logtype_init, init, NOTICE); 3235 RTE_LOG_REGISTER_SUFFIX(ena_logtype_driver, driver, NOTICE); 3236 #ifdef RTE_ETHDEV_DEBUG_RX 3237 RTE_LOG_REGISTER_SUFFIX(ena_logtype_rx, rx, DEBUG); 3238 #endif 3239 #ifdef RTE_ETHDEV_DEBUG_TX 3240 RTE_LOG_REGISTER_SUFFIX(ena_logtype_tx, tx, DEBUG); 3241 #endif 3242 RTE_LOG_REGISTER_SUFFIX(ena_logtype_com, com, WARNING); 3243 3244 /****************************************************************************** 3245 ******************************** AENQ Handlers ******************************* 3246 *****************************************************************************/ 3247 static void ena_update_on_link_change(void *adapter_data, 3248 struct ena_admin_aenq_entry *aenq_e) 3249 { 3250 struct rte_eth_dev *eth_dev = adapter_data; 3251 struct ena_adapter *adapter = eth_dev->data->dev_private; 3252 struct ena_admin_aenq_link_change_desc *aenq_link_desc; 3253 uint32_t status; 3254 3255 aenq_link_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e; 3256 3257 status = get_ena_admin_aenq_link_change_desc_link_status(aenq_link_desc); 3258 adapter->link_status = status; 3259 3260 ena_link_update(eth_dev, 0); 3261 rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL); 3262 } 3263 3264 static void ena_notification(void *adapter_data, 3265 struct ena_admin_aenq_entry *aenq_e) 3266 { 3267 struct rte_eth_dev *eth_dev = adapter_data; 3268 struct ena_adapter *adapter = eth_dev->data->dev_private; 3269 struct ena_admin_ena_hw_hints *hints; 3270 3271 if (aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION) 3272 PMD_DRV_LOG(WARNING, "Invalid AENQ group: %x. Expected: %x\n", 3273 aenq_e->aenq_common_desc.group, 3274 ENA_ADMIN_NOTIFICATION); 3275 3276 switch (aenq_e->aenq_common_desc.syndrome) { 3277 case ENA_ADMIN_UPDATE_HINTS: 3278 hints = (struct ena_admin_ena_hw_hints *) 3279 (&aenq_e->inline_data_w4); 3280 ena_update_hints(adapter, hints); 3281 break; 3282 default: 3283 PMD_DRV_LOG(ERR, "Invalid AENQ notification link state: %d\n", 3284 aenq_e->aenq_common_desc.syndrome); 3285 } 3286 } 3287 3288 static void ena_keep_alive(void *adapter_data, 3289 __rte_unused struct ena_admin_aenq_entry *aenq_e) 3290 { 3291 struct rte_eth_dev *eth_dev = adapter_data; 3292 struct ena_adapter *adapter = eth_dev->data->dev_private; 3293 struct ena_admin_aenq_keep_alive_desc *desc; 3294 uint64_t rx_drops; 3295 uint64_t tx_drops; 3296 3297 adapter->timestamp_wd = rte_get_timer_cycles(); 3298 3299 desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e; 3300 rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low; 3301 tx_drops = ((uint64_t)desc->tx_drops_high << 32) | desc->tx_drops_low; 3302 3303 adapter->drv_stats->rx_drops = rx_drops; 3304 adapter->dev_stats.tx_drops = tx_drops; 3305 } 3306 3307 /** 3308 * This handler will called for unknown event group or unimplemented handlers 3309 **/ 3310 static void unimplemented_aenq_handler(__rte_unused void *data, 3311 __rte_unused struct ena_admin_aenq_entry *aenq_e) 3312 { 3313 PMD_DRV_LOG(ERR, 3314 "Unknown event was received or event with unimplemented handler\n"); 3315 } 3316 3317 static struct ena_aenq_handlers aenq_handlers = { 3318 .handlers = { 3319 [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change, 3320 [ENA_ADMIN_NOTIFICATION] = ena_notification, 3321 [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive 3322 }, 3323 .unimplemented_handler = unimplemented_aenq_handler 3324 }; 3325