1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates. 3 * All rights reserved. 4 */ 5 6 #include <rte_string_fns.h> 7 #include <rte_errno.h> 8 #include <rte_version.h> 9 #include <rte_net.h> 10 #include <rte_kvargs.h> 11 12 #include "ena_ethdev.h" 13 #include "ena_logs.h" 14 #include "ena_platform.h" 15 #include "ena_com.h" 16 #include "ena_eth_com.h" 17 18 #include <ena_common_defs.h> 19 #include <ena_regs_defs.h> 20 #include <ena_admin_defs.h> 21 #include <ena_eth_io_defs.h> 22 23 #define DRV_MODULE_VER_MAJOR 2 24 #define DRV_MODULE_VER_MINOR 5 25 #define DRV_MODULE_VER_SUBMINOR 0 26 27 #define __MERGE_64B_H_L(h, l) (((uint64_t)h << 32) | l) 28 29 #define GET_L4_HDR_LEN(mbuf) \ 30 ((rte_pktmbuf_mtod_offset(mbuf, struct rte_tcp_hdr *, \ 31 mbuf->l3_len + mbuf->l2_len)->data_off) >> 4) 32 33 #define ETH_GSTRING_LEN 32 34 35 #define ARRAY_SIZE(x) RTE_DIM(x) 36 37 #define ENA_MIN_RING_DESC 128 38 39 #define ENA_PTYPE_HAS_HASH (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP) 40 41 enum ethtool_stringset { 42 ETH_SS_TEST = 0, 43 ETH_SS_STATS, 44 }; 45 46 struct ena_stats { 47 char name[ETH_GSTRING_LEN]; 48 int stat_offset; 49 }; 50 51 #define ENA_STAT_ENTRY(stat, stat_type) { \ 52 .name = #stat, \ 53 .stat_offset = offsetof(struct ena_stats_##stat_type, stat) \ 54 } 55 56 #define ENA_STAT_RX_ENTRY(stat) \ 57 ENA_STAT_ENTRY(stat, rx) 58 59 #define ENA_STAT_TX_ENTRY(stat) \ 60 ENA_STAT_ENTRY(stat, tx) 61 62 #define ENA_STAT_ENI_ENTRY(stat) \ 63 ENA_STAT_ENTRY(stat, eni) 64 65 #define ENA_STAT_GLOBAL_ENTRY(stat) \ 66 ENA_STAT_ENTRY(stat, dev) 67 68 /* Device arguments */ 69 #define ENA_DEVARG_LARGE_LLQ_HDR "large_llq_hdr" 70 71 /* 72 * Each rte_memzone should have unique name. 73 * To satisfy it, count number of allocation and add it to name. 74 */ 75 rte_atomic64_t ena_alloc_cnt; 76 77 static const struct ena_stats ena_stats_global_strings[] = { 78 ENA_STAT_GLOBAL_ENTRY(wd_expired), 79 ENA_STAT_GLOBAL_ENTRY(dev_start), 80 ENA_STAT_GLOBAL_ENTRY(dev_stop), 81 ENA_STAT_GLOBAL_ENTRY(tx_drops), 82 }; 83 84 static const struct ena_stats ena_stats_eni_strings[] = { 85 ENA_STAT_ENI_ENTRY(bw_in_allowance_exceeded), 86 ENA_STAT_ENI_ENTRY(bw_out_allowance_exceeded), 87 ENA_STAT_ENI_ENTRY(pps_allowance_exceeded), 88 ENA_STAT_ENI_ENTRY(conntrack_allowance_exceeded), 89 ENA_STAT_ENI_ENTRY(linklocal_allowance_exceeded), 90 }; 91 92 static const struct ena_stats ena_stats_tx_strings[] = { 93 ENA_STAT_TX_ENTRY(cnt), 94 ENA_STAT_TX_ENTRY(bytes), 95 ENA_STAT_TX_ENTRY(prepare_ctx_err), 96 ENA_STAT_TX_ENTRY(linearize), 97 ENA_STAT_TX_ENTRY(linearize_failed), 98 ENA_STAT_TX_ENTRY(tx_poll), 99 ENA_STAT_TX_ENTRY(doorbells), 100 ENA_STAT_TX_ENTRY(bad_req_id), 101 ENA_STAT_TX_ENTRY(available_desc), 102 ENA_STAT_TX_ENTRY(missed_tx), 103 }; 104 105 static const struct ena_stats ena_stats_rx_strings[] = { 106 ENA_STAT_RX_ENTRY(cnt), 107 ENA_STAT_RX_ENTRY(bytes), 108 ENA_STAT_RX_ENTRY(refill_partial), 109 ENA_STAT_RX_ENTRY(bad_csum), 110 ENA_STAT_RX_ENTRY(mbuf_alloc_fail), 111 ENA_STAT_RX_ENTRY(bad_desc_num), 112 ENA_STAT_RX_ENTRY(bad_req_id), 113 }; 114 115 #define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings) 116 #define ENA_STATS_ARRAY_ENI ARRAY_SIZE(ena_stats_eni_strings) 117 #define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings) 118 #define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings) 119 120 #define QUEUE_OFFLOADS (RTE_ETH_TX_OFFLOAD_TCP_CKSUM |\ 121 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |\ 122 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |\ 123 RTE_ETH_TX_OFFLOAD_TCP_TSO) 124 #define MBUF_OFFLOADS (RTE_MBUF_F_TX_L4_MASK |\ 125 RTE_MBUF_F_TX_IP_CKSUM |\ 126 RTE_MBUF_F_TX_TCP_SEG) 127 128 /** Vendor ID used by Amazon devices */ 129 #define PCI_VENDOR_ID_AMAZON 0x1D0F 130 /** Amazon devices */ 131 #define PCI_DEVICE_ID_ENA_VF 0xEC20 132 #define PCI_DEVICE_ID_ENA_VF_RSERV0 0xEC21 133 134 #define ENA_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_L4_MASK | \ 135 RTE_MBUF_F_TX_IPV6 | \ 136 RTE_MBUF_F_TX_IPV4 | \ 137 RTE_MBUF_F_TX_IP_CKSUM | \ 138 RTE_MBUF_F_TX_TCP_SEG) 139 140 #define ENA_TX_OFFLOAD_NOTSUP_MASK \ 141 (RTE_MBUF_F_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK) 142 143 /** HW specific offloads capabilities. */ 144 /* IPv4 checksum offload. */ 145 #define ENA_L3_IPV4_CSUM 0x0001 146 /* TCP/UDP checksum offload for IPv4 packets. */ 147 #define ENA_L4_IPV4_CSUM 0x0002 148 /* TCP/UDP checksum offload for IPv4 packets with pseudo header checksum. */ 149 #define ENA_L4_IPV4_CSUM_PARTIAL 0x0004 150 /* TCP/UDP checksum offload for IPv6 packets. */ 151 #define ENA_L4_IPV6_CSUM 0x0008 152 /* TCP/UDP checksum offload for IPv6 packets with pseudo header checksum. */ 153 #define ENA_L4_IPV6_CSUM_PARTIAL 0x0010 154 /* TSO support for IPv4 packets. */ 155 #define ENA_IPV4_TSO 0x0020 156 157 /* Device supports setting RSS hash. */ 158 #define ENA_RX_RSS_HASH 0x0040 159 160 static const struct rte_pci_id pci_id_ena_map[] = { 161 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) }, 162 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF_RSERV0) }, 163 { .device_id = 0 }, 164 }; 165 166 static struct ena_aenq_handlers aenq_handlers; 167 168 static int ena_device_init(struct ena_com_dev *ena_dev, 169 struct rte_pci_device *pdev, 170 struct ena_com_dev_get_features_ctx *get_feat_ctx, 171 bool *wd_state); 172 static int ena_dev_configure(struct rte_eth_dev *dev); 173 static void ena_tx_map_mbuf(struct ena_ring *tx_ring, 174 struct ena_tx_buffer *tx_info, 175 struct rte_mbuf *mbuf, 176 void **push_header, 177 uint16_t *header_len); 178 static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf); 179 static void ena_tx_cleanup(struct ena_ring *tx_ring); 180 static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 181 uint16_t nb_pkts); 182 static uint16_t eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 183 uint16_t nb_pkts); 184 static int ena_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 185 uint16_t nb_desc, unsigned int socket_id, 186 const struct rte_eth_txconf *tx_conf); 187 static int ena_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 188 uint16_t nb_desc, unsigned int socket_id, 189 const struct rte_eth_rxconf *rx_conf, 190 struct rte_mempool *mp); 191 static inline void ena_init_rx_mbuf(struct rte_mbuf *mbuf, uint16_t len); 192 static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring, 193 struct ena_com_rx_buf_info *ena_bufs, 194 uint32_t descs, 195 uint16_t *next_to_clean, 196 uint8_t offset); 197 static uint16_t eth_ena_recv_pkts(void *rx_queue, 198 struct rte_mbuf **rx_pkts, uint16_t nb_pkts); 199 static int ena_add_single_rx_desc(struct ena_com_io_sq *io_sq, 200 struct rte_mbuf *mbuf, uint16_t id); 201 static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count); 202 static void ena_init_rings(struct ena_adapter *adapter, 203 bool disable_meta_caching); 204 static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 205 static int ena_start(struct rte_eth_dev *dev); 206 static int ena_stop(struct rte_eth_dev *dev); 207 static int ena_close(struct rte_eth_dev *dev); 208 static int ena_dev_reset(struct rte_eth_dev *dev); 209 static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); 210 static void ena_rx_queue_release_all(struct rte_eth_dev *dev); 211 static void ena_tx_queue_release_all(struct rte_eth_dev *dev); 212 static void ena_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid); 213 static void ena_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid); 214 static void ena_rx_queue_release_bufs(struct ena_ring *ring); 215 static void ena_tx_queue_release_bufs(struct ena_ring *ring); 216 static int ena_link_update(struct rte_eth_dev *dev, 217 int wait_to_complete); 218 static int ena_create_io_queue(struct rte_eth_dev *dev, struct ena_ring *ring); 219 static void ena_queue_stop(struct ena_ring *ring); 220 static void ena_queue_stop_all(struct rte_eth_dev *dev, 221 enum ena_ring_type ring_type); 222 static int ena_queue_start(struct rte_eth_dev *dev, struct ena_ring *ring); 223 static int ena_queue_start_all(struct rte_eth_dev *dev, 224 enum ena_ring_type ring_type); 225 static void ena_stats_restart(struct rte_eth_dev *dev); 226 static uint64_t ena_get_rx_port_offloads(struct ena_adapter *adapter); 227 static uint64_t ena_get_tx_port_offloads(struct ena_adapter *adapter); 228 static uint64_t ena_get_rx_queue_offloads(struct ena_adapter *adapter); 229 static uint64_t ena_get_tx_queue_offloads(struct ena_adapter *adapter); 230 static int ena_infos_get(struct rte_eth_dev *dev, 231 struct rte_eth_dev_info *dev_info); 232 static void ena_interrupt_handler_rte(void *cb_arg); 233 static void ena_timer_wd_callback(struct rte_timer *timer, void *arg); 234 static void ena_destroy_device(struct rte_eth_dev *eth_dev); 235 static int eth_ena_dev_init(struct rte_eth_dev *eth_dev); 236 static int ena_xstats_get_names(struct rte_eth_dev *dev, 237 struct rte_eth_xstat_name *xstats_names, 238 unsigned int n); 239 static int ena_xstats_get(struct rte_eth_dev *dev, 240 struct rte_eth_xstat *stats, 241 unsigned int n); 242 static int ena_xstats_get_by_id(struct rte_eth_dev *dev, 243 const uint64_t *ids, 244 uint64_t *values, 245 unsigned int n); 246 static int ena_process_bool_devarg(const char *key, 247 const char *value, 248 void *opaque); 249 static int ena_parse_devargs(struct ena_adapter *adapter, 250 struct rte_devargs *devargs); 251 static int ena_copy_eni_stats(struct ena_adapter *adapter); 252 static int ena_setup_rx_intr(struct rte_eth_dev *dev); 253 static int ena_rx_queue_intr_enable(struct rte_eth_dev *dev, 254 uint16_t queue_id); 255 static int ena_rx_queue_intr_disable(struct rte_eth_dev *dev, 256 uint16_t queue_id); 257 258 static const struct eth_dev_ops ena_dev_ops = { 259 .dev_configure = ena_dev_configure, 260 .dev_infos_get = ena_infos_get, 261 .rx_queue_setup = ena_rx_queue_setup, 262 .tx_queue_setup = ena_tx_queue_setup, 263 .dev_start = ena_start, 264 .dev_stop = ena_stop, 265 .link_update = ena_link_update, 266 .stats_get = ena_stats_get, 267 .xstats_get_names = ena_xstats_get_names, 268 .xstats_get = ena_xstats_get, 269 .xstats_get_by_id = ena_xstats_get_by_id, 270 .mtu_set = ena_mtu_set, 271 .rx_queue_release = ena_rx_queue_release, 272 .tx_queue_release = ena_tx_queue_release, 273 .dev_close = ena_close, 274 .dev_reset = ena_dev_reset, 275 .reta_update = ena_rss_reta_update, 276 .reta_query = ena_rss_reta_query, 277 .rx_queue_intr_enable = ena_rx_queue_intr_enable, 278 .rx_queue_intr_disable = ena_rx_queue_intr_disable, 279 .rss_hash_update = ena_rss_hash_update, 280 .rss_hash_conf_get = ena_rss_hash_conf_get, 281 }; 282 283 static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf, 284 struct ena_com_rx_ctx *ena_rx_ctx, 285 bool fill_hash) 286 { 287 uint64_t ol_flags = 0; 288 uint32_t packet_type = 0; 289 290 if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) 291 packet_type |= RTE_PTYPE_L4_TCP; 292 else if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP) 293 packet_type |= RTE_PTYPE_L4_UDP; 294 295 if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) { 296 packet_type |= RTE_PTYPE_L3_IPV4; 297 if (unlikely(ena_rx_ctx->l3_csum_err)) 298 ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; 299 else 300 ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; 301 } else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6) { 302 packet_type |= RTE_PTYPE_L3_IPV6; 303 } 304 305 if (!ena_rx_ctx->l4_csum_checked || ena_rx_ctx->frag) 306 ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN; 307 else 308 if (unlikely(ena_rx_ctx->l4_csum_err)) 309 ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; 310 else 311 ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; 312 313 if (fill_hash && 314 likely((packet_type & ENA_PTYPE_HAS_HASH) && !ena_rx_ctx->frag)) { 315 ol_flags |= RTE_MBUF_F_RX_RSS_HASH; 316 mbuf->hash.rss = ena_rx_ctx->hash; 317 } 318 319 mbuf->ol_flags = ol_flags; 320 mbuf->packet_type = packet_type; 321 } 322 323 static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, 324 struct ena_com_tx_ctx *ena_tx_ctx, 325 uint64_t queue_offloads, 326 bool disable_meta_caching) 327 { 328 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; 329 330 if ((mbuf->ol_flags & MBUF_OFFLOADS) && 331 (queue_offloads & QUEUE_OFFLOADS)) { 332 /* check if TSO is required */ 333 if ((mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG) && 334 (queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO)) { 335 ena_tx_ctx->tso_enable = true; 336 337 ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf); 338 } 339 340 /* check if L3 checksum is needed */ 341 if ((mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) && 342 (queue_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)) 343 ena_tx_ctx->l3_csum_enable = true; 344 345 if (mbuf->ol_flags & RTE_MBUF_F_TX_IPV6) { 346 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6; 347 } else { 348 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4; 349 350 /* set don't fragment (DF) flag */ 351 if (mbuf->packet_type & 352 (RTE_PTYPE_L4_NONFRAG 353 | RTE_PTYPE_INNER_L4_NONFRAG)) 354 ena_tx_ctx->df = true; 355 } 356 357 /* check if L4 checksum is needed */ 358 if (((mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_TCP_CKSUM) && 359 (queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) { 360 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP; 361 ena_tx_ctx->l4_csum_enable = true; 362 } else if (((mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == 363 RTE_MBUF_F_TX_UDP_CKSUM) && 364 (queue_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) { 365 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP; 366 ena_tx_ctx->l4_csum_enable = true; 367 } else { 368 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN; 369 ena_tx_ctx->l4_csum_enable = false; 370 } 371 372 ena_meta->mss = mbuf->tso_segsz; 373 ena_meta->l3_hdr_len = mbuf->l3_len; 374 ena_meta->l3_hdr_offset = mbuf->l2_len; 375 376 ena_tx_ctx->meta_valid = true; 377 } else if (disable_meta_caching) { 378 memset(ena_meta, 0, sizeof(*ena_meta)); 379 ena_tx_ctx->meta_valid = true; 380 } else { 381 ena_tx_ctx->meta_valid = false; 382 } 383 } 384 385 static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) 386 { 387 struct ena_tx_buffer *tx_info = NULL; 388 389 if (likely(req_id < tx_ring->ring_size)) { 390 tx_info = &tx_ring->tx_buffer_info[req_id]; 391 if (likely(tx_info->mbuf)) 392 return 0; 393 } 394 395 if (tx_info) 396 PMD_TX_LOG(ERR, "tx_info doesn't have valid mbuf\n"); 397 else 398 PMD_TX_LOG(ERR, "Invalid req_id: %hu\n", req_id); 399 400 /* Trigger device reset */ 401 ++tx_ring->tx_stats.bad_req_id; 402 tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID; 403 tx_ring->adapter->trigger_reset = true; 404 return -EFAULT; 405 } 406 407 static void ena_config_host_info(struct ena_com_dev *ena_dev) 408 { 409 struct ena_admin_host_info *host_info; 410 int rc; 411 412 /* Allocate only the host info */ 413 rc = ena_com_allocate_host_info(ena_dev); 414 if (rc) { 415 PMD_DRV_LOG(ERR, "Cannot allocate host info\n"); 416 return; 417 } 418 419 host_info = ena_dev->host_attr.host_info; 420 421 host_info->os_type = ENA_ADMIN_OS_DPDK; 422 host_info->kernel_ver = RTE_VERSION; 423 strlcpy((char *)host_info->kernel_ver_str, rte_version(), 424 sizeof(host_info->kernel_ver_str)); 425 host_info->os_dist = RTE_VERSION; 426 strlcpy((char *)host_info->os_dist_str, rte_version(), 427 sizeof(host_info->os_dist_str)); 428 host_info->driver_version = 429 (DRV_MODULE_VER_MAJOR) | 430 (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | 431 (DRV_MODULE_VER_SUBMINOR << 432 ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); 433 host_info->num_cpus = rte_lcore_count(); 434 435 host_info->driver_supported_features = 436 ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK | 437 ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK; 438 439 rc = ena_com_set_host_attributes(ena_dev); 440 if (rc) { 441 if (rc == -ENA_COM_UNSUPPORTED) 442 PMD_DRV_LOG(WARNING, "Cannot set host attributes\n"); 443 else 444 PMD_DRV_LOG(ERR, "Cannot set host attributes\n"); 445 446 goto err; 447 } 448 449 return; 450 451 err: 452 ena_com_delete_host_info(ena_dev); 453 } 454 455 /* This function calculates the number of xstats based on the current config */ 456 static unsigned int ena_xstats_calc_num(struct rte_eth_dev_data *data) 457 { 458 return ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENI + 459 (data->nb_tx_queues * ENA_STATS_ARRAY_TX) + 460 (data->nb_rx_queues * ENA_STATS_ARRAY_RX); 461 } 462 463 static void ena_config_debug_area(struct ena_adapter *adapter) 464 { 465 u32 debug_area_size; 466 int rc, ss_count; 467 468 ss_count = ena_xstats_calc_num(adapter->edev_data); 469 470 /* allocate 32 bytes for each string and 64bit for the value */ 471 debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count; 472 473 rc = ena_com_allocate_debug_area(&adapter->ena_dev, debug_area_size); 474 if (rc) { 475 PMD_DRV_LOG(ERR, "Cannot allocate debug area\n"); 476 return; 477 } 478 479 rc = ena_com_set_host_attributes(&adapter->ena_dev); 480 if (rc) { 481 if (rc == -ENA_COM_UNSUPPORTED) 482 PMD_DRV_LOG(WARNING, "Cannot set host attributes\n"); 483 else 484 PMD_DRV_LOG(ERR, "Cannot set host attributes\n"); 485 486 goto err; 487 } 488 489 return; 490 err: 491 ena_com_delete_debug_area(&adapter->ena_dev); 492 } 493 494 static int ena_close(struct rte_eth_dev *dev) 495 { 496 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 497 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 498 struct ena_adapter *adapter = dev->data->dev_private; 499 int ret = 0; 500 501 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 502 return 0; 503 504 if (adapter->state == ENA_ADAPTER_STATE_RUNNING) 505 ret = ena_stop(dev); 506 adapter->state = ENA_ADAPTER_STATE_CLOSED; 507 508 ena_rx_queue_release_all(dev); 509 ena_tx_queue_release_all(dev); 510 511 rte_free(adapter->drv_stats); 512 adapter->drv_stats = NULL; 513 514 rte_intr_disable(intr_handle); 515 rte_intr_callback_unregister(intr_handle, 516 ena_interrupt_handler_rte, 517 dev); 518 519 /* 520 * MAC is not allocated dynamically. Setting NULL should prevent from 521 * release of the resource in the rte_eth_dev_release_port(). 522 */ 523 dev->data->mac_addrs = NULL; 524 525 return ret; 526 } 527 528 static int 529 ena_dev_reset(struct rte_eth_dev *dev) 530 { 531 int rc = 0; 532 533 /* Cannot release memory in secondary process */ 534 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 535 PMD_DRV_LOG(WARNING, "dev_reset not supported in secondary.\n"); 536 return -EPERM; 537 } 538 539 ena_destroy_device(dev); 540 rc = eth_ena_dev_init(dev); 541 if (rc) 542 PMD_INIT_LOG(CRIT, "Cannot initialize device\n"); 543 544 return rc; 545 } 546 547 static void ena_rx_queue_release_all(struct rte_eth_dev *dev) 548 { 549 int nb_queues = dev->data->nb_rx_queues; 550 int i; 551 552 for (i = 0; i < nb_queues; i++) 553 ena_rx_queue_release(dev, i); 554 } 555 556 static void ena_tx_queue_release_all(struct rte_eth_dev *dev) 557 { 558 int nb_queues = dev->data->nb_tx_queues; 559 int i; 560 561 for (i = 0; i < nb_queues; i++) 562 ena_tx_queue_release(dev, i); 563 } 564 565 static void ena_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 566 { 567 struct ena_ring *ring = dev->data->rx_queues[qid]; 568 569 /* Free ring resources */ 570 rte_free(ring->rx_buffer_info); 571 ring->rx_buffer_info = NULL; 572 573 rte_free(ring->rx_refill_buffer); 574 ring->rx_refill_buffer = NULL; 575 576 rte_free(ring->empty_rx_reqs); 577 ring->empty_rx_reqs = NULL; 578 579 ring->configured = 0; 580 581 PMD_DRV_LOG(NOTICE, "Rx queue %d:%d released\n", 582 ring->port_id, ring->id); 583 } 584 585 static void ena_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 586 { 587 struct ena_ring *ring = dev->data->tx_queues[qid]; 588 589 /* Free ring resources */ 590 rte_free(ring->push_buf_intermediate_buf); 591 592 rte_free(ring->tx_buffer_info); 593 594 rte_free(ring->empty_tx_reqs); 595 596 ring->empty_tx_reqs = NULL; 597 ring->tx_buffer_info = NULL; 598 ring->push_buf_intermediate_buf = NULL; 599 600 ring->configured = 0; 601 602 PMD_DRV_LOG(NOTICE, "Tx queue %d:%d released\n", 603 ring->port_id, ring->id); 604 } 605 606 static void ena_rx_queue_release_bufs(struct ena_ring *ring) 607 { 608 unsigned int i; 609 610 for (i = 0; i < ring->ring_size; ++i) { 611 struct ena_rx_buffer *rx_info = &ring->rx_buffer_info[i]; 612 if (rx_info->mbuf) { 613 rte_mbuf_raw_free(rx_info->mbuf); 614 rx_info->mbuf = NULL; 615 } 616 } 617 } 618 619 static void ena_tx_queue_release_bufs(struct ena_ring *ring) 620 { 621 unsigned int i; 622 623 for (i = 0; i < ring->ring_size; ++i) { 624 struct ena_tx_buffer *tx_buf = &ring->tx_buffer_info[i]; 625 626 if (tx_buf->mbuf) { 627 rte_pktmbuf_free(tx_buf->mbuf); 628 tx_buf->mbuf = NULL; 629 } 630 } 631 } 632 633 static int ena_link_update(struct rte_eth_dev *dev, 634 __rte_unused int wait_to_complete) 635 { 636 struct rte_eth_link *link = &dev->data->dev_link; 637 struct ena_adapter *adapter = dev->data->dev_private; 638 639 link->link_status = adapter->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN; 640 link->link_speed = RTE_ETH_SPEED_NUM_NONE; 641 link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 642 643 return 0; 644 } 645 646 static int ena_queue_start_all(struct rte_eth_dev *dev, 647 enum ena_ring_type ring_type) 648 { 649 struct ena_adapter *adapter = dev->data->dev_private; 650 struct ena_ring *queues = NULL; 651 int nb_queues; 652 int i = 0; 653 int rc = 0; 654 655 if (ring_type == ENA_RING_TYPE_RX) { 656 queues = adapter->rx_ring; 657 nb_queues = dev->data->nb_rx_queues; 658 } else { 659 queues = adapter->tx_ring; 660 nb_queues = dev->data->nb_tx_queues; 661 } 662 for (i = 0; i < nb_queues; i++) { 663 if (queues[i].configured) { 664 if (ring_type == ENA_RING_TYPE_RX) { 665 ena_assert_msg( 666 dev->data->rx_queues[i] == &queues[i], 667 "Inconsistent state of Rx queues\n"); 668 } else { 669 ena_assert_msg( 670 dev->data->tx_queues[i] == &queues[i], 671 "Inconsistent state of Tx queues\n"); 672 } 673 674 rc = ena_queue_start(dev, &queues[i]); 675 676 if (rc) { 677 PMD_INIT_LOG(ERR, 678 "Failed to start queue[%d] of type(%d)\n", 679 i, ring_type); 680 goto err; 681 } 682 } 683 } 684 685 return 0; 686 687 err: 688 while (i--) 689 if (queues[i].configured) 690 ena_queue_stop(&queues[i]); 691 692 return rc; 693 } 694 695 static int ena_check_valid_conf(struct ena_adapter *adapter) 696 { 697 uint32_t mtu = adapter->edev_data->mtu; 698 699 if (mtu > adapter->max_mtu || mtu < ENA_MIN_MTU) { 700 PMD_INIT_LOG(ERR, 701 "Unsupported MTU of %d. Max MTU: %d, min MTU: %d\n", 702 mtu, adapter->max_mtu, ENA_MIN_MTU); 703 return ENA_COM_UNSUPPORTED; 704 } 705 706 return 0; 707 } 708 709 static int 710 ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx, 711 bool use_large_llq_hdr) 712 { 713 struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq; 714 struct ena_com_dev *ena_dev = ctx->ena_dev; 715 uint32_t max_tx_queue_size; 716 uint32_t max_rx_queue_size; 717 718 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 719 struct ena_admin_queue_ext_feature_fields *max_queue_ext = 720 &ctx->get_feat_ctx->max_queue_ext.max_queue_ext; 721 max_rx_queue_size = RTE_MIN(max_queue_ext->max_rx_cq_depth, 722 max_queue_ext->max_rx_sq_depth); 723 max_tx_queue_size = max_queue_ext->max_tx_cq_depth; 724 725 if (ena_dev->tx_mem_queue_type == 726 ENA_ADMIN_PLACEMENT_POLICY_DEV) { 727 max_tx_queue_size = RTE_MIN(max_tx_queue_size, 728 llq->max_llq_depth); 729 } else { 730 max_tx_queue_size = RTE_MIN(max_tx_queue_size, 731 max_queue_ext->max_tx_sq_depth); 732 } 733 734 ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 735 max_queue_ext->max_per_packet_rx_descs); 736 ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 737 max_queue_ext->max_per_packet_tx_descs); 738 } else { 739 struct ena_admin_queue_feature_desc *max_queues = 740 &ctx->get_feat_ctx->max_queues; 741 max_rx_queue_size = RTE_MIN(max_queues->max_cq_depth, 742 max_queues->max_sq_depth); 743 max_tx_queue_size = max_queues->max_cq_depth; 744 745 if (ena_dev->tx_mem_queue_type == 746 ENA_ADMIN_PLACEMENT_POLICY_DEV) { 747 max_tx_queue_size = RTE_MIN(max_tx_queue_size, 748 llq->max_llq_depth); 749 } else { 750 max_tx_queue_size = RTE_MIN(max_tx_queue_size, 751 max_queues->max_sq_depth); 752 } 753 754 ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 755 max_queues->max_packet_rx_descs); 756 ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 757 max_queues->max_packet_tx_descs); 758 } 759 760 /* Round down to the nearest power of 2 */ 761 max_rx_queue_size = rte_align32prevpow2(max_rx_queue_size); 762 max_tx_queue_size = rte_align32prevpow2(max_tx_queue_size); 763 764 if (use_large_llq_hdr) { 765 if ((llq->entry_size_ctrl_supported & 766 ENA_ADMIN_LIST_ENTRY_SIZE_256B) && 767 (ena_dev->tx_mem_queue_type == 768 ENA_ADMIN_PLACEMENT_POLICY_DEV)) { 769 max_tx_queue_size /= 2; 770 PMD_INIT_LOG(INFO, 771 "Forcing large headers and decreasing maximum Tx queue size to %d\n", 772 max_tx_queue_size); 773 } else { 774 PMD_INIT_LOG(ERR, 775 "Forcing large headers failed: LLQ is disabled or device does not support large headers\n"); 776 } 777 } 778 779 if (unlikely(max_rx_queue_size == 0 || max_tx_queue_size == 0)) { 780 PMD_INIT_LOG(ERR, "Invalid queue size\n"); 781 return -EFAULT; 782 } 783 784 ctx->max_tx_queue_size = max_tx_queue_size; 785 ctx->max_rx_queue_size = max_rx_queue_size; 786 787 return 0; 788 } 789 790 static void ena_stats_restart(struct rte_eth_dev *dev) 791 { 792 struct ena_adapter *adapter = dev->data->dev_private; 793 794 rte_atomic64_init(&adapter->drv_stats->ierrors); 795 rte_atomic64_init(&adapter->drv_stats->oerrors); 796 rte_atomic64_init(&adapter->drv_stats->rx_nombuf); 797 adapter->drv_stats->rx_drops = 0; 798 } 799 800 static int ena_stats_get(struct rte_eth_dev *dev, 801 struct rte_eth_stats *stats) 802 { 803 struct ena_admin_basic_stats ena_stats; 804 struct ena_adapter *adapter = dev->data->dev_private; 805 struct ena_com_dev *ena_dev = &adapter->ena_dev; 806 int rc; 807 int i; 808 int max_rings_stats; 809 810 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 811 return -ENOTSUP; 812 813 memset(&ena_stats, 0, sizeof(ena_stats)); 814 815 rte_spinlock_lock(&adapter->admin_lock); 816 rc = ena_com_get_dev_basic_stats(ena_dev, &ena_stats); 817 rte_spinlock_unlock(&adapter->admin_lock); 818 if (unlikely(rc)) { 819 PMD_DRV_LOG(ERR, "Could not retrieve statistics from ENA\n"); 820 return rc; 821 } 822 823 /* Set of basic statistics from ENA */ 824 stats->ipackets = __MERGE_64B_H_L(ena_stats.rx_pkts_high, 825 ena_stats.rx_pkts_low); 826 stats->opackets = __MERGE_64B_H_L(ena_stats.tx_pkts_high, 827 ena_stats.tx_pkts_low); 828 stats->ibytes = __MERGE_64B_H_L(ena_stats.rx_bytes_high, 829 ena_stats.rx_bytes_low); 830 stats->obytes = __MERGE_64B_H_L(ena_stats.tx_bytes_high, 831 ena_stats.tx_bytes_low); 832 833 /* Driver related stats */ 834 stats->imissed = adapter->drv_stats->rx_drops; 835 stats->ierrors = rte_atomic64_read(&adapter->drv_stats->ierrors); 836 stats->oerrors = rte_atomic64_read(&adapter->drv_stats->oerrors); 837 stats->rx_nombuf = rte_atomic64_read(&adapter->drv_stats->rx_nombuf); 838 839 max_rings_stats = RTE_MIN(dev->data->nb_rx_queues, 840 RTE_ETHDEV_QUEUE_STAT_CNTRS); 841 for (i = 0; i < max_rings_stats; ++i) { 842 struct ena_stats_rx *rx_stats = &adapter->rx_ring[i].rx_stats; 843 844 stats->q_ibytes[i] = rx_stats->bytes; 845 stats->q_ipackets[i] = rx_stats->cnt; 846 stats->q_errors[i] = rx_stats->bad_desc_num + 847 rx_stats->bad_req_id; 848 } 849 850 max_rings_stats = RTE_MIN(dev->data->nb_tx_queues, 851 RTE_ETHDEV_QUEUE_STAT_CNTRS); 852 for (i = 0; i < max_rings_stats; ++i) { 853 struct ena_stats_tx *tx_stats = &adapter->tx_ring[i].tx_stats; 854 855 stats->q_obytes[i] = tx_stats->bytes; 856 stats->q_opackets[i] = tx_stats->cnt; 857 } 858 859 return 0; 860 } 861 862 static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 863 { 864 struct ena_adapter *adapter; 865 struct ena_com_dev *ena_dev; 866 int rc = 0; 867 868 ena_assert_msg(dev->data != NULL, "Uninitialized device\n"); 869 ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n"); 870 adapter = dev->data->dev_private; 871 872 ena_dev = &adapter->ena_dev; 873 ena_assert_msg(ena_dev != NULL, "Uninitialized device\n"); 874 875 if (mtu > adapter->max_mtu || mtu < ENA_MIN_MTU) { 876 PMD_DRV_LOG(ERR, 877 "Invalid MTU setting. New MTU: %d, max MTU: %d, min MTU: %d\n", 878 mtu, adapter->max_mtu, ENA_MIN_MTU); 879 return -EINVAL; 880 } 881 882 rc = ena_com_set_dev_mtu(ena_dev, mtu); 883 if (rc) 884 PMD_DRV_LOG(ERR, "Could not set MTU: %d\n", mtu); 885 else 886 PMD_DRV_LOG(NOTICE, "MTU set to: %d\n", mtu); 887 888 return rc; 889 } 890 891 static int ena_start(struct rte_eth_dev *dev) 892 { 893 struct ena_adapter *adapter = dev->data->dev_private; 894 uint64_t ticks; 895 int rc = 0; 896 897 /* Cannot allocate memory in secondary process */ 898 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 899 PMD_DRV_LOG(WARNING, "dev_start not supported in secondary.\n"); 900 return -EPERM; 901 } 902 903 rc = ena_check_valid_conf(adapter); 904 if (rc) 905 return rc; 906 907 rc = ena_setup_rx_intr(dev); 908 if (rc) 909 return rc; 910 911 rc = ena_queue_start_all(dev, ENA_RING_TYPE_RX); 912 if (rc) 913 return rc; 914 915 rc = ena_queue_start_all(dev, ENA_RING_TYPE_TX); 916 if (rc) 917 goto err_start_tx; 918 919 if (adapter->edev_data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) { 920 rc = ena_rss_configure(adapter); 921 if (rc) 922 goto err_rss_init; 923 } 924 925 ena_stats_restart(dev); 926 927 adapter->timestamp_wd = rte_get_timer_cycles(); 928 adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT; 929 930 ticks = rte_get_timer_hz(); 931 rte_timer_reset(&adapter->timer_wd, ticks, PERIODICAL, rte_lcore_id(), 932 ena_timer_wd_callback, dev); 933 934 ++adapter->dev_stats.dev_start; 935 adapter->state = ENA_ADAPTER_STATE_RUNNING; 936 937 return 0; 938 939 err_rss_init: 940 ena_queue_stop_all(dev, ENA_RING_TYPE_TX); 941 err_start_tx: 942 ena_queue_stop_all(dev, ENA_RING_TYPE_RX); 943 return rc; 944 } 945 946 static int ena_stop(struct rte_eth_dev *dev) 947 { 948 struct ena_adapter *adapter = dev->data->dev_private; 949 struct ena_com_dev *ena_dev = &adapter->ena_dev; 950 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 951 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 952 int rc; 953 954 /* Cannot free memory in secondary process */ 955 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 956 PMD_DRV_LOG(WARNING, "dev_stop not supported in secondary.\n"); 957 return -EPERM; 958 } 959 960 rte_timer_stop_sync(&adapter->timer_wd); 961 ena_queue_stop_all(dev, ENA_RING_TYPE_TX); 962 ena_queue_stop_all(dev, ENA_RING_TYPE_RX); 963 964 if (adapter->trigger_reset) { 965 rc = ena_com_dev_reset(ena_dev, adapter->reset_reason); 966 if (rc) 967 PMD_DRV_LOG(ERR, "Device reset failed, rc: %d\n", rc); 968 } 969 970 rte_intr_disable(intr_handle); 971 972 rte_intr_efd_disable(intr_handle); 973 974 /* Cleanup vector list */ 975 rte_intr_vec_list_free(intr_handle); 976 977 rte_intr_enable(intr_handle); 978 979 ++adapter->dev_stats.dev_stop; 980 adapter->state = ENA_ADAPTER_STATE_STOPPED; 981 dev->data->dev_started = 0; 982 983 return 0; 984 } 985 986 static int ena_create_io_queue(struct rte_eth_dev *dev, struct ena_ring *ring) 987 { 988 struct ena_adapter *adapter = ring->adapter; 989 struct ena_com_dev *ena_dev = &adapter->ena_dev; 990 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 991 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 992 struct ena_com_create_io_ctx ctx = 993 /* policy set to _HOST just to satisfy icc compiler */ 994 { ENA_ADMIN_PLACEMENT_POLICY_HOST, 995 0, 0, 0, 0, 0 }; 996 uint16_t ena_qid; 997 unsigned int i; 998 int rc; 999 1000 ctx.msix_vector = -1; 1001 if (ring->type == ENA_RING_TYPE_TX) { 1002 ena_qid = ENA_IO_TXQ_IDX(ring->id); 1003 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; 1004 ctx.mem_queue_type = ena_dev->tx_mem_queue_type; 1005 for (i = 0; i < ring->ring_size; i++) 1006 ring->empty_tx_reqs[i] = i; 1007 } else { 1008 ena_qid = ENA_IO_RXQ_IDX(ring->id); 1009 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; 1010 if (rte_intr_dp_is_en(intr_handle)) 1011 ctx.msix_vector = 1012 rte_intr_vec_list_index_get(intr_handle, 1013 ring->id); 1014 1015 for (i = 0; i < ring->ring_size; i++) 1016 ring->empty_rx_reqs[i] = i; 1017 } 1018 ctx.queue_size = ring->ring_size; 1019 ctx.qid = ena_qid; 1020 ctx.numa_node = ring->numa_socket_id; 1021 1022 rc = ena_com_create_io_queue(ena_dev, &ctx); 1023 if (rc) { 1024 PMD_DRV_LOG(ERR, 1025 "Failed to create IO queue[%d] (qid:%d), rc: %d\n", 1026 ring->id, ena_qid, rc); 1027 return rc; 1028 } 1029 1030 rc = ena_com_get_io_handlers(ena_dev, ena_qid, 1031 &ring->ena_com_io_sq, 1032 &ring->ena_com_io_cq); 1033 if (rc) { 1034 PMD_DRV_LOG(ERR, 1035 "Failed to get IO queue[%d] handlers, rc: %d\n", 1036 ring->id, rc); 1037 ena_com_destroy_io_queue(ena_dev, ena_qid); 1038 return rc; 1039 } 1040 1041 if (ring->type == ENA_RING_TYPE_TX) 1042 ena_com_update_numa_node(ring->ena_com_io_cq, ctx.numa_node); 1043 1044 /* Start with Rx interrupts being masked. */ 1045 if (ring->type == ENA_RING_TYPE_RX && rte_intr_dp_is_en(intr_handle)) 1046 ena_rx_queue_intr_disable(dev, ring->id); 1047 1048 return 0; 1049 } 1050 1051 static void ena_queue_stop(struct ena_ring *ring) 1052 { 1053 struct ena_com_dev *ena_dev = &ring->adapter->ena_dev; 1054 1055 if (ring->type == ENA_RING_TYPE_RX) { 1056 ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(ring->id)); 1057 ena_rx_queue_release_bufs(ring); 1058 } else { 1059 ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(ring->id)); 1060 ena_tx_queue_release_bufs(ring); 1061 } 1062 } 1063 1064 static void ena_queue_stop_all(struct rte_eth_dev *dev, 1065 enum ena_ring_type ring_type) 1066 { 1067 struct ena_adapter *adapter = dev->data->dev_private; 1068 struct ena_ring *queues = NULL; 1069 uint16_t nb_queues, i; 1070 1071 if (ring_type == ENA_RING_TYPE_RX) { 1072 queues = adapter->rx_ring; 1073 nb_queues = dev->data->nb_rx_queues; 1074 } else { 1075 queues = adapter->tx_ring; 1076 nb_queues = dev->data->nb_tx_queues; 1077 } 1078 1079 for (i = 0; i < nb_queues; ++i) 1080 if (queues[i].configured) 1081 ena_queue_stop(&queues[i]); 1082 } 1083 1084 static int ena_queue_start(struct rte_eth_dev *dev, struct ena_ring *ring) 1085 { 1086 int rc, bufs_num; 1087 1088 ena_assert_msg(ring->configured == 1, 1089 "Trying to start unconfigured queue\n"); 1090 1091 rc = ena_create_io_queue(dev, ring); 1092 if (rc) { 1093 PMD_INIT_LOG(ERR, "Failed to create IO queue\n"); 1094 return rc; 1095 } 1096 1097 ring->next_to_clean = 0; 1098 ring->next_to_use = 0; 1099 1100 if (ring->type == ENA_RING_TYPE_TX) { 1101 ring->tx_stats.available_desc = 1102 ena_com_free_q_entries(ring->ena_com_io_sq); 1103 return 0; 1104 } 1105 1106 bufs_num = ring->ring_size - 1; 1107 rc = ena_populate_rx_queue(ring, bufs_num); 1108 if (rc != bufs_num) { 1109 ena_com_destroy_io_queue(&ring->adapter->ena_dev, 1110 ENA_IO_RXQ_IDX(ring->id)); 1111 PMD_INIT_LOG(ERR, "Failed to populate Rx ring\n"); 1112 return ENA_COM_FAULT; 1113 } 1114 /* Flush per-core RX buffers pools cache as they can be used on other 1115 * cores as well. 1116 */ 1117 rte_mempool_cache_flush(NULL, ring->mb_pool); 1118 1119 return 0; 1120 } 1121 1122 static int ena_tx_queue_setup(struct rte_eth_dev *dev, 1123 uint16_t queue_idx, 1124 uint16_t nb_desc, 1125 unsigned int socket_id, 1126 const struct rte_eth_txconf *tx_conf) 1127 { 1128 struct ena_ring *txq = NULL; 1129 struct ena_adapter *adapter = dev->data->dev_private; 1130 unsigned int i; 1131 uint16_t dyn_thresh; 1132 1133 txq = &adapter->tx_ring[queue_idx]; 1134 1135 if (txq->configured) { 1136 PMD_DRV_LOG(CRIT, 1137 "API violation. Queue[%d] is already configured\n", 1138 queue_idx); 1139 return ENA_COM_FAULT; 1140 } 1141 1142 if (!rte_is_power_of_2(nb_desc)) { 1143 PMD_DRV_LOG(ERR, 1144 "Unsupported size of Tx queue: %d is not a power of 2.\n", 1145 nb_desc); 1146 return -EINVAL; 1147 } 1148 1149 if (nb_desc > adapter->max_tx_ring_size) { 1150 PMD_DRV_LOG(ERR, 1151 "Unsupported size of Tx queue (max size: %d)\n", 1152 adapter->max_tx_ring_size); 1153 return -EINVAL; 1154 } 1155 1156 txq->port_id = dev->data->port_id; 1157 txq->next_to_clean = 0; 1158 txq->next_to_use = 0; 1159 txq->ring_size = nb_desc; 1160 txq->size_mask = nb_desc - 1; 1161 txq->numa_socket_id = socket_id; 1162 txq->pkts_without_db = false; 1163 txq->last_cleanup_ticks = 0; 1164 1165 txq->tx_buffer_info = rte_zmalloc_socket("txq->tx_buffer_info", 1166 sizeof(struct ena_tx_buffer) * txq->ring_size, 1167 RTE_CACHE_LINE_SIZE, 1168 socket_id); 1169 if (!txq->tx_buffer_info) { 1170 PMD_DRV_LOG(ERR, 1171 "Failed to allocate memory for Tx buffer info\n"); 1172 return -ENOMEM; 1173 } 1174 1175 txq->empty_tx_reqs = rte_zmalloc_socket("txq->empty_tx_reqs", 1176 sizeof(uint16_t) * txq->ring_size, 1177 RTE_CACHE_LINE_SIZE, 1178 socket_id); 1179 if (!txq->empty_tx_reqs) { 1180 PMD_DRV_LOG(ERR, 1181 "Failed to allocate memory for empty Tx requests\n"); 1182 rte_free(txq->tx_buffer_info); 1183 return -ENOMEM; 1184 } 1185 1186 txq->push_buf_intermediate_buf = 1187 rte_zmalloc_socket("txq->push_buf_intermediate_buf", 1188 txq->tx_max_header_size, 1189 RTE_CACHE_LINE_SIZE, 1190 socket_id); 1191 if (!txq->push_buf_intermediate_buf) { 1192 PMD_DRV_LOG(ERR, "Failed to alloc push buffer for LLQ\n"); 1193 rte_free(txq->tx_buffer_info); 1194 rte_free(txq->empty_tx_reqs); 1195 return -ENOMEM; 1196 } 1197 1198 for (i = 0; i < txq->ring_size; i++) 1199 txq->empty_tx_reqs[i] = i; 1200 1201 txq->offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; 1202 1203 /* Check if caller provided the Tx cleanup threshold value. */ 1204 if (tx_conf->tx_free_thresh != 0) { 1205 txq->tx_free_thresh = tx_conf->tx_free_thresh; 1206 } else { 1207 dyn_thresh = txq->ring_size - 1208 txq->ring_size / ENA_REFILL_THRESH_DIVIDER; 1209 txq->tx_free_thresh = RTE_MAX(dyn_thresh, 1210 txq->ring_size - ENA_REFILL_THRESH_PACKET); 1211 } 1212 1213 txq->missing_tx_completion_threshold = 1214 RTE_MIN(txq->ring_size / 2, ENA_DEFAULT_MISSING_COMP); 1215 1216 /* Store pointer to this queue in upper layer */ 1217 txq->configured = 1; 1218 dev->data->tx_queues[queue_idx] = txq; 1219 1220 return 0; 1221 } 1222 1223 static int ena_rx_queue_setup(struct rte_eth_dev *dev, 1224 uint16_t queue_idx, 1225 uint16_t nb_desc, 1226 unsigned int socket_id, 1227 const struct rte_eth_rxconf *rx_conf, 1228 struct rte_mempool *mp) 1229 { 1230 struct ena_adapter *adapter = dev->data->dev_private; 1231 struct ena_ring *rxq = NULL; 1232 size_t buffer_size; 1233 int i; 1234 uint16_t dyn_thresh; 1235 1236 rxq = &adapter->rx_ring[queue_idx]; 1237 if (rxq->configured) { 1238 PMD_DRV_LOG(CRIT, 1239 "API violation. Queue[%d] is already configured\n", 1240 queue_idx); 1241 return ENA_COM_FAULT; 1242 } 1243 1244 if (!rte_is_power_of_2(nb_desc)) { 1245 PMD_DRV_LOG(ERR, 1246 "Unsupported size of Rx queue: %d is not a power of 2.\n", 1247 nb_desc); 1248 return -EINVAL; 1249 } 1250 1251 if (nb_desc > adapter->max_rx_ring_size) { 1252 PMD_DRV_LOG(ERR, 1253 "Unsupported size of Rx queue (max size: %d)\n", 1254 adapter->max_rx_ring_size); 1255 return -EINVAL; 1256 } 1257 1258 /* ENA isn't supporting buffers smaller than 1400 bytes */ 1259 buffer_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM; 1260 if (buffer_size < ENA_RX_BUF_MIN_SIZE) { 1261 PMD_DRV_LOG(ERR, 1262 "Unsupported size of Rx buffer: %zu (min size: %d)\n", 1263 buffer_size, ENA_RX_BUF_MIN_SIZE); 1264 return -EINVAL; 1265 } 1266 1267 rxq->port_id = dev->data->port_id; 1268 rxq->next_to_clean = 0; 1269 rxq->next_to_use = 0; 1270 rxq->ring_size = nb_desc; 1271 rxq->size_mask = nb_desc - 1; 1272 rxq->numa_socket_id = socket_id; 1273 rxq->mb_pool = mp; 1274 1275 rxq->rx_buffer_info = rte_zmalloc_socket("rxq->buffer_info", 1276 sizeof(struct ena_rx_buffer) * nb_desc, 1277 RTE_CACHE_LINE_SIZE, 1278 socket_id); 1279 if (!rxq->rx_buffer_info) { 1280 PMD_DRV_LOG(ERR, 1281 "Failed to allocate memory for Rx buffer info\n"); 1282 return -ENOMEM; 1283 } 1284 1285 rxq->rx_refill_buffer = rte_zmalloc_socket("rxq->rx_refill_buffer", 1286 sizeof(struct rte_mbuf *) * nb_desc, 1287 RTE_CACHE_LINE_SIZE, 1288 socket_id); 1289 if (!rxq->rx_refill_buffer) { 1290 PMD_DRV_LOG(ERR, 1291 "Failed to allocate memory for Rx refill buffer\n"); 1292 rte_free(rxq->rx_buffer_info); 1293 rxq->rx_buffer_info = NULL; 1294 return -ENOMEM; 1295 } 1296 1297 rxq->empty_rx_reqs = rte_zmalloc_socket("rxq->empty_rx_reqs", 1298 sizeof(uint16_t) * nb_desc, 1299 RTE_CACHE_LINE_SIZE, 1300 socket_id); 1301 if (!rxq->empty_rx_reqs) { 1302 PMD_DRV_LOG(ERR, 1303 "Failed to allocate memory for empty Rx requests\n"); 1304 rte_free(rxq->rx_buffer_info); 1305 rxq->rx_buffer_info = NULL; 1306 rte_free(rxq->rx_refill_buffer); 1307 rxq->rx_refill_buffer = NULL; 1308 return -ENOMEM; 1309 } 1310 1311 for (i = 0; i < nb_desc; i++) 1312 rxq->empty_rx_reqs[i] = i; 1313 1314 rxq->offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; 1315 1316 if (rx_conf->rx_free_thresh != 0) { 1317 rxq->rx_free_thresh = rx_conf->rx_free_thresh; 1318 } else { 1319 dyn_thresh = rxq->ring_size / ENA_REFILL_THRESH_DIVIDER; 1320 rxq->rx_free_thresh = RTE_MIN(dyn_thresh, 1321 (uint16_t)(ENA_REFILL_THRESH_PACKET)); 1322 } 1323 1324 /* Store pointer to this queue in upper layer */ 1325 rxq->configured = 1; 1326 dev->data->rx_queues[queue_idx] = rxq; 1327 1328 return 0; 1329 } 1330 1331 static int ena_add_single_rx_desc(struct ena_com_io_sq *io_sq, 1332 struct rte_mbuf *mbuf, uint16_t id) 1333 { 1334 struct ena_com_buf ebuf; 1335 int rc; 1336 1337 /* prepare physical address for DMA transaction */ 1338 ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM; 1339 ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM; 1340 1341 /* pass resource to device */ 1342 rc = ena_com_add_single_rx_desc(io_sq, &ebuf, id); 1343 if (unlikely(rc != 0)) 1344 PMD_RX_LOG(WARNING, "Failed adding Rx desc\n"); 1345 1346 return rc; 1347 } 1348 1349 static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) 1350 { 1351 unsigned int i; 1352 int rc; 1353 uint16_t next_to_use = rxq->next_to_use; 1354 uint16_t req_id; 1355 #ifdef RTE_ETHDEV_DEBUG_RX 1356 uint16_t in_use; 1357 #endif 1358 struct rte_mbuf **mbufs = rxq->rx_refill_buffer; 1359 1360 if (unlikely(!count)) 1361 return 0; 1362 1363 #ifdef RTE_ETHDEV_DEBUG_RX 1364 in_use = rxq->ring_size - 1 - 1365 ena_com_free_q_entries(rxq->ena_com_io_sq); 1366 if (unlikely((in_use + count) >= rxq->ring_size)) 1367 PMD_RX_LOG(ERR, "Bad Rx ring state\n"); 1368 #endif 1369 1370 /* get resources for incoming packets */ 1371 rc = rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, count); 1372 if (unlikely(rc < 0)) { 1373 rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf); 1374 ++rxq->rx_stats.mbuf_alloc_fail; 1375 PMD_RX_LOG(DEBUG, "There are not enough free buffers\n"); 1376 return 0; 1377 } 1378 1379 for (i = 0; i < count; i++) { 1380 struct rte_mbuf *mbuf = mbufs[i]; 1381 struct ena_rx_buffer *rx_info; 1382 1383 if (likely((i + 4) < count)) 1384 rte_prefetch0(mbufs[i + 4]); 1385 1386 req_id = rxq->empty_rx_reqs[next_to_use]; 1387 rx_info = &rxq->rx_buffer_info[req_id]; 1388 1389 rc = ena_add_single_rx_desc(rxq->ena_com_io_sq, mbuf, req_id); 1390 if (unlikely(rc != 0)) 1391 break; 1392 1393 rx_info->mbuf = mbuf; 1394 next_to_use = ENA_IDX_NEXT_MASKED(next_to_use, rxq->size_mask); 1395 } 1396 1397 if (unlikely(i < count)) { 1398 PMD_RX_LOG(WARNING, 1399 "Refilled Rx queue[%d] with only %d/%d buffers\n", 1400 rxq->id, i, count); 1401 rte_pktmbuf_free_bulk(&mbufs[i], count - i); 1402 ++rxq->rx_stats.refill_partial; 1403 } 1404 1405 /* When we submitted free resources to device... */ 1406 if (likely(i > 0)) { 1407 /* ...let HW know that it can fill buffers with data. */ 1408 ena_com_write_sq_doorbell(rxq->ena_com_io_sq); 1409 1410 rxq->next_to_use = next_to_use; 1411 } 1412 1413 return i; 1414 } 1415 1416 static int ena_device_init(struct ena_com_dev *ena_dev, 1417 struct rte_pci_device *pdev, 1418 struct ena_com_dev_get_features_ctx *get_feat_ctx, 1419 bool *wd_state) 1420 { 1421 uint32_t aenq_groups; 1422 int rc; 1423 bool readless_supported; 1424 1425 /* Initialize mmio registers */ 1426 rc = ena_com_mmio_reg_read_request_init(ena_dev); 1427 if (rc) { 1428 PMD_DRV_LOG(ERR, "Failed to init MMIO read less\n"); 1429 return rc; 1430 } 1431 1432 /* The PCIe configuration space revision id indicate if mmio reg 1433 * read is disabled. 1434 */ 1435 readless_supported = !(pdev->id.class_id & ENA_MMIO_DISABLE_REG_READ); 1436 ena_com_set_mmio_read_mode(ena_dev, readless_supported); 1437 1438 /* reset device */ 1439 rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL); 1440 if (rc) { 1441 PMD_DRV_LOG(ERR, "Cannot reset device\n"); 1442 goto err_mmio_read_less; 1443 } 1444 1445 /* check FW version */ 1446 rc = ena_com_validate_version(ena_dev); 1447 if (rc) { 1448 PMD_DRV_LOG(ERR, "Device version is too low\n"); 1449 goto err_mmio_read_less; 1450 } 1451 1452 ena_dev->dma_addr_bits = ena_com_get_dma_width(ena_dev); 1453 1454 /* ENA device administration layer init */ 1455 rc = ena_com_admin_init(ena_dev, &aenq_handlers); 1456 if (rc) { 1457 PMD_DRV_LOG(ERR, 1458 "Cannot initialize ENA admin queue\n"); 1459 goto err_mmio_read_less; 1460 } 1461 1462 /* To enable the msix interrupts the driver needs to know the number 1463 * of queues. So the driver uses polling mode to retrieve this 1464 * information. 1465 */ 1466 ena_com_set_admin_polling_mode(ena_dev, true); 1467 1468 ena_config_host_info(ena_dev); 1469 1470 /* Get Device Attributes and features */ 1471 rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); 1472 if (rc) { 1473 PMD_DRV_LOG(ERR, 1474 "Cannot get attribute for ENA device, rc: %d\n", rc); 1475 goto err_admin_init; 1476 } 1477 1478 aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | 1479 BIT(ENA_ADMIN_NOTIFICATION) | 1480 BIT(ENA_ADMIN_KEEP_ALIVE) | 1481 BIT(ENA_ADMIN_FATAL_ERROR) | 1482 BIT(ENA_ADMIN_WARNING); 1483 1484 aenq_groups &= get_feat_ctx->aenq.supported_groups; 1485 rc = ena_com_set_aenq_config(ena_dev, aenq_groups); 1486 if (rc) { 1487 PMD_DRV_LOG(ERR, "Cannot configure AENQ groups, rc: %d\n", rc); 1488 goto err_admin_init; 1489 } 1490 1491 *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); 1492 1493 return 0; 1494 1495 err_admin_init: 1496 ena_com_admin_destroy(ena_dev); 1497 1498 err_mmio_read_less: 1499 ena_com_mmio_reg_read_request_destroy(ena_dev); 1500 1501 return rc; 1502 } 1503 1504 static void ena_interrupt_handler_rte(void *cb_arg) 1505 { 1506 struct rte_eth_dev *dev = cb_arg; 1507 struct ena_adapter *adapter = dev->data->dev_private; 1508 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1509 1510 ena_com_admin_q_comp_intr_handler(ena_dev); 1511 if (likely(adapter->state != ENA_ADAPTER_STATE_CLOSED)) 1512 ena_com_aenq_intr_handler(ena_dev, dev); 1513 } 1514 1515 static void check_for_missing_keep_alive(struct ena_adapter *adapter) 1516 { 1517 if (!adapter->wd_state) 1518 return; 1519 1520 if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) 1521 return; 1522 1523 if (unlikely((rte_get_timer_cycles() - adapter->timestamp_wd) >= 1524 adapter->keep_alive_timeout)) { 1525 PMD_DRV_LOG(ERR, "Keep alive timeout\n"); 1526 adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO; 1527 adapter->trigger_reset = true; 1528 ++adapter->dev_stats.wd_expired; 1529 } 1530 } 1531 1532 /* Check if admin queue is enabled */ 1533 static void check_for_admin_com_state(struct ena_adapter *adapter) 1534 { 1535 if (unlikely(!ena_com_get_admin_running_state(&adapter->ena_dev))) { 1536 PMD_DRV_LOG(ERR, "ENA admin queue is not in running state\n"); 1537 adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO; 1538 adapter->trigger_reset = true; 1539 } 1540 } 1541 1542 static int check_for_tx_completion_in_queue(struct ena_adapter *adapter, 1543 struct ena_ring *tx_ring) 1544 { 1545 struct ena_tx_buffer *tx_buf; 1546 uint64_t timestamp; 1547 uint64_t completion_delay; 1548 uint32_t missed_tx = 0; 1549 unsigned int i; 1550 int rc = 0; 1551 1552 for (i = 0; i < tx_ring->ring_size; ++i) { 1553 tx_buf = &tx_ring->tx_buffer_info[i]; 1554 timestamp = tx_buf->timestamp; 1555 1556 if (timestamp == 0) 1557 continue; 1558 1559 completion_delay = rte_get_timer_cycles() - timestamp; 1560 if (completion_delay > adapter->missing_tx_completion_to) { 1561 if (unlikely(!tx_buf->print_once)) { 1562 PMD_TX_LOG(WARNING, 1563 "Found a Tx that wasn't completed on time, qid %d, index %d. " 1564 "Missing Tx outstanding for %" PRIu64 " msecs.\n", 1565 tx_ring->id, i, completion_delay / 1566 rte_get_timer_hz() * 1000); 1567 tx_buf->print_once = true; 1568 } 1569 ++missed_tx; 1570 } 1571 } 1572 1573 if (unlikely(missed_tx > tx_ring->missing_tx_completion_threshold)) { 1574 PMD_DRV_LOG(ERR, 1575 "The number of lost Tx completions is above the threshold (%d > %d). " 1576 "Trigger the device reset.\n", 1577 missed_tx, 1578 tx_ring->missing_tx_completion_threshold); 1579 adapter->reset_reason = ENA_REGS_RESET_MISS_TX_CMPL; 1580 adapter->trigger_reset = true; 1581 rc = -EIO; 1582 } 1583 1584 tx_ring->tx_stats.missed_tx += missed_tx; 1585 1586 return rc; 1587 } 1588 1589 static void check_for_tx_completions(struct ena_adapter *adapter) 1590 { 1591 struct ena_ring *tx_ring; 1592 uint64_t tx_cleanup_delay; 1593 size_t qid; 1594 int budget; 1595 uint16_t nb_tx_queues = adapter->edev_data->nb_tx_queues; 1596 1597 if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT) 1598 return; 1599 1600 nb_tx_queues = adapter->edev_data->nb_tx_queues; 1601 budget = adapter->missing_tx_completion_budget; 1602 1603 qid = adapter->last_tx_comp_qid; 1604 while (budget-- > 0) { 1605 tx_ring = &adapter->tx_ring[qid]; 1606 1607 /* Tx cleanup is called only by the burst function and can be 1608 * called dynamically by the application. Also cleanup is 1609 * limited by the threshold. To avoid false detection of the 1610 * missing HW Tx completion, get the delay since last cleanup 1611 * function was called. 1612 */ 1613 tx_cleanup_delay = rte_get_timer_cycles() - 1614 tx_ring->last_cleanup_ticks; 1615 if (tx_cleanup_delay < adapter->tx_cleanup_stall_delay) 1616 check_for_tx_completion_in_queue(adapter, tx_ring); 1617 qid = (qid + 1) % nb_tx_queues; 1618 } 1619 1620 adapter->last_tx_comp_qid = qid; 1621 } 1622 1623 static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer, 1624 void *arg) 1625 { 1626 struct rte_eth_dev *dev = arg; 1627 struct ena_adapter *adapter = dev->data->dev_private; 1628 1629 check_for_missing_keep_alive(adapter); 1630 check_for_admin_com_state(adapter); 1631 check_for_tx_completions(adapter); 1632 1633 if (unlikely(adapter->trigger_reset)) { 1634 PMD_DRV_LOG(ERR, "Trigger reset is on\n"); 1635 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, 1636 NULL); 1637 } 1638 } 1639 1640 static inline void 1641 set_default_llq_configurations(struct ena_llq_configurations *llq_config, 1642 struct ena_admin_feature_llq_desc *llq, 1643 bool use_large_llq_hdr) 1644 { 1645 llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; 1646 llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; 1647 llq_config->llq_num_decs_before_header = 1648 ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; 1649 1650 if (use_large_llq_hdr && 1651 (llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B)) { 1652 llq_config->llq_ring_entry_size = 1653 ENA_ADMIN_LIST_ENTRY_SIZE_256B; 1654 llq_config->llq_ring_entry_size_value = 256; 1655 } else { 1656 llq_config->llq_ring_entry_size = 1657 ENA_ADMIN_LIST_ENTRY_SIZE_128B; 1658 llq_config->llq_ring_entry_size_value = 128; 1659 } 1660 } 1661 1662 static int 1663 ena_set_queues_placement_policy(struct ena_adapter *adapter, 1664 struct ena_com_dev *ena_dev, 1665 struct ena_admin_feature_llq_desc *llq, 1666 struct ena_llq_configurations *llq_default_configurations) 1667 { 1668 int rc; 1669 u32 llq_feature_mask; 1670 1671 llq_feature_mask = 1 << ENA_ADMIN_LLQ; 1672 if (!(ena_dev->supported_features & llq_feature_mask)) { 1673 PMD_DRV_LOG(INFO, 1674 "LLQ is not supported. Fallback to host mode policy.\n"); 1675 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 1676 return 0; 1677 } 1678 1679 rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); 1680 if (unlikely(rc)) { 1681 PMD_INIT_LOG(WARNING, 1682 "Failed to config dev mode. Fallback to host mode policy.\n"); 1683 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 1684 return 0; 1685 } 1686 1687 /* Nothing to config, exit */ 1688 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 1689 return 0; 1690 1691 if (!adapter->dev_mem_base) { 1692 PMD_DRV_LOG(ERR, 1693 "Unable to access LLQ BAR resource. Fallback to host mode policy.\n"); 1694 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 1695 return 0; 1696 } 1697 1698 ena_dev->mem_bar = adapter->dev_mem_base; 1699 1700 return 0; 1701 } 1702 1703 static uint32_t ena_calc_max_io_queue_num(struct ena_com_dev *ena_dev, 1704 struct ena_com_dev_get_features_ctx *get_feat_ctx) 1705 { 1706 uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues; 1707 1708 /* Regular queues capabilities */ 1709 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 1710 struct ena_admin_queue_ext_feature_fields *max_queue_ext = 1711 &get_feat_ctx->max_queue_ext.max_queue_ext; 1712 io_rx_num = RTE_MIN(max_queue_ext->max_rx_sq_num, 1713 max_queue_ext->max_rx_cq_num); 1714 io_tx_sq_num = max_queue_ext->max_tx_sq_num; 1715 io_tx_cq_num = max_queue_ext->max_tx_cq_num; 1716 } else { 1717 struct ena_admin_queue_feature_desc *max_queues = 1718 &get_feat_ctx->max_queues; 1719 io_tx_sq_num = max_queues->max_sq_num; 1720 io_tx_cq_num = max_queues->max_cq_num; 1721 io_rx_num = RTE_MIN(io_tx_sq_num, io_tx_cq_num); 1722 } 1723 1724 /* In case of LLQ use the llq number in the get feature cmd */ 1725 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) 1726 io_tx_sq_num = get_feat_ctx->llq.max_llq_num; 1727 1728 max_num_io_queues = RTE_MIN(ENA_MAX_NUM_IO_QUEUES, io_rx_num); 1729 max_num_io_queues = RTE_MIN(max_num_io_queues, io_tx_sq_num); 1730 max_num_io_queues = RTE_MIN(max_num_io_queues, io_tx_cq_num); 1731 1732 if (unlikely(max_num_io_queues == 0)) { 1733 PMD_DRV_LOG(ERR, "Number of IO queues cannot not be 0\n"); 1734 return -EFAULT; 1735 } 1736 1737 return max_num_io_queues; 1738 } 1739 1740 static void 1741 ena_set_offloads(struct ena_offloads *offloads, 1742 struct ena_admin_feature_offload_desc *offload_desc) 1743 { 1744 if (offload_desc->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) 1745 offloads->tx_offloads |= ENA_IPV4_TSO; 1746 1747 /* Tx IPv4 checksum offloads */ 1748 if (offload_desc->tx & 1749 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK) 1750 offloads->tx_offloads |= ENA_L3_IPV4_CSUM; 1751 if (offload_desc->tx & 1752 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK) 1753 offloads->tx_offloads |= ENA_L4_IPV4_CSUM; 1754 if (offload_desc->tx & 1755 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) 1756 offloads->tx_offloads |= ENA_L4_IPV4_CSUM_PARTIAL; 1757 1758 /* Tx IPv6 checksum offloads */ 1759 if (offload_desc->tx & 1760 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK) 1761 offloads->tx_offloads |= ENA_L4_IPV6_CSUM; 1762 if (offload_desc->tx & 1763 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK) 1764 offloads->tx_offloads |= ENA_L4_IPV6_CSUM_PARTIAL; 1765 1766 /* Rx IPv4 checksum offloads */ 1767 if (offload_desc->rx_supported & 1768 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK) 1769 offloads->rx_offloads |= ENA_L3_IPV4_CSUM; 1770 if (offload_desc->rx_supported & 1771 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) 1772 offloads->rx_offloads |= ENA_L4_IPV4_CSUM; 1773 1774 /* Rx IPv6 checksum offloads */ 1775 if (offload_desc->rx_supported & 1776 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) 1777 offloads->rx_offloads |= ENA_L4_IPV6_CSUM; 1778 1779 if (offload_desc->rx_supported & 1780 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK) 1781 offloads->rx_offloads |= ENA_RX_RSS_HASH; 1782 } 1783 1784 static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) 1785 { 1786 struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 }; 1787 struct rte_pci_device *pci_dev; 1788 struct rte_intr_handle *intr_handle; 1789 struct ena_adapter *adapter = eth_dev->data->dev_private; 1790 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1791 struct ena_com_dev_get_features_ctx get_feat_ctx; 1792 struct ena_llq_configurations llq_config; 1793 const char *queue_type_str; 1794 uint32_t max_num_io_queues; 1795 int rc; 1796 static int adapters_found; 1797 bool disable_meta_caching; 1798 bool wd_state = false; 1799 1800 eth_dev->dev_ops = &ena_dev_ops; 1801 eth_dev->rx_pkt_burst = ð_ena_recv_pkts; 1802 eth_dev->tx_pkt_burst = ð_ena_xmit_pkts; 1803 eth_dev->tx_pkt_prepare = ð_ena_prep_pkts; 1804 1805 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1806 return 0; 1807 1808 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 1809 1810 memset(adapter, 0, sizeof(struct ena_adapter)); 1811 ena_dev = &adapter->ena_dev; 1812 1813 adapter->edev_data = eth_dev->data; 1814 1815 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1816 1817 PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d\n", 1818 pci_dev->addr.domain, 1819 pci_dev->addr.bus, 1820 pci_dev->addr.devid, 1821 pci_dev->addr.function); 1822 1823 intr_handle = pci_dev->intr_handle; 1824 1825 adapter->regs = pci_dev->mem_resource[ENA_REGS_BAR].addr; 1826 adapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr; 1827 1828 if (!adapter->regs) { 1829 PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)\n", 1830 ENA_REGS_BAR); 1831 return -ENXIO; 1832 } 1833 1834 ena_dev->reg_bar = adapter->regs; 1835 /* This is a dummy pointer for ena_com functions. */ 1836 ena_dev->dmadev = adapter; 1837 1838 adapter->id_number = adapters_found; 1839 1840 snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", 1841 adapter->id_number); 1842 1843 rc = ena_parse_devargs(adapter, pci_dev->device.devargs); 1844 if (rc != 0) { 1845 PMD_INIT_LOG(CRIT, "Failed to parse devargs\n"); 1846 goto err; 1847 } 1848 1849 /* device specific initialization routine */ 1850 rc = ena_device_init(ena_dev, pci_dev, &get_feat_ctx, &wd_state); 1851 if (rc) { 1852 PMD_INIT_LOG(CRIT, "Failed to init ENA device\n"); 1853 goto err; 1854 } 1855 adapter->wd_state = wd_state; 1856 1857 set_default_llq_configurations(&llq_config, &get_feat_ctx.llq, 1858 adapter->use_large_llq_hdr); 1859 rc = ena_set_queues_placement_policy(adapter, ena_dev, 1860 &get_feat_ctx.llq, &llq_config); 1861 if (unlikely(rc)) { 1862 PMD_INIT_LOG(CRIT, "Failed to set placement policy\n"); 1863 return rc; 1864 } 1865 1866 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 1867 queue_type_str = "Regular"; 1868 else 1869 queue_type_str = "Low latency"; 1870 PMD_DRV_LOG(INFO, "Placement policy: %s\n", queue_type_str); 1871 1872 calc_queue_ctx.ena_dev = ena_dev; 1873 calc_queue_ctx.get_feat_ctx = &get_feat_ctx; 1874 1875 max_num_io_queues = ena_calc_max_io_queue_num(ena_dev, &get_feat_ctx); 1876 rc = ena_calc_io_queue_size(&calc_queue_ctx, 1877 adapter->use_large_llq_hdr); 1878 if (unlikely((rc != 0) || (max_num_io_queues == 0))) { 1879 rc = -EFAULT; 1880 goto err_device_destroy; 1881 } 1882 1883 adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size; 1884 adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size; 1885 adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size; 1886 adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size; 1887 adapter->max_num_io_queues = max_num_io_queues; 1888 1889 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 1890 disable_meta_caching = 1891 !!(get_feat_ctx.llq.accel_mode.u.get.supported_flags & 1892 BIT(ENA_ADMIN_DISABLE_META_CACHING)); 1893 } else { 1894 disable_meta_caching = false; 1895 } 1896 1897 /* prepare ring structures */ 1898 ena_init_rings(adapter, disable_meta_caching); 1899 1900 ena_config_debug_area(adapter); 1901 1902 /* Set max MTU for this device */ 1903 adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu; 1904 1905 ena_set_offloads(&adapter->offloads, &get_feat_ctx.offload); 1906 1907 /* Copy MAC address and point DPDK to it */ 1908 eth_dev->data->mac_addrs = (struct rte_ether_addr *)adapter->mac_addr; 1909 rte_ether_addr_copy((struct rte_ether_addr *) 1910 get_feat_ctx.dev_attr.mac_addr, 1911 (struct rte_ether_addr *)adapter->mac_addr); 1912 1913 rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE); 1914 if (unlikely(rc != 0)) { 1915 PMD_DRV_LOG(ERR, "Failed to initialize RSS in ENA device\n"); 1916 goto err_delete_debug_area; 1917 } 1918 1919 adapter->drv_stats = rte_zmalloc("adapter stats", 1920 sizeof(*adapter->drv_stats), 1921 RTE_CACHE_LINE_SIZE); 1922 if (!adapter->drv_stats) { 1923 PMD_DRV_LOG(ERR, 1924 "Failed to allocate memory for adapter statistics\n"); 1925 rc = -ENOMEM; 1926 goto err_rss_destroy; 1927 } 1928 1929 rte_spinlock_init(&adapter->admin_lock); 1930 1931 rte_intr_callback_register(intr_handle, 1932 ena_interrupt_handler_rte, 1933 eth_dev); 1934 rte_intr_enable(intr_handle); 1935 ena_com_set_admin_polling_mode(ena_dev, false); 1936 ena_com_admin_aenq_enable(ena_dev); 1937 1938 if (adapters_found == 0) 1939 rte_timer_subsystem_init(); 1940 rte_timer_init(&adapter->timer_wd); 1941 1942 adapters_found++; 1943 adapter->state = ENA_ADAPTER_STATE_INIT; 1944 1945 return 0; 1946 1947 err_rss_destroy: 1948 ena_com_rss_destroy(ena_dev); 1949 err_delete_debug_area: 1950 ena_com_delete_debug_area(ena_dev); 1951 1952 err_device_destroy: 1953 ena_com_delete_host_info(ena_dev); 1954 ena_com_admin_destroy(ena_dev); 1955 1956 err: 1957 return rc; 1958 } 1959 1960 static void ena_destroy_device(struct rte_eth_dev *eth_dev) 1961 { 1962 struct ena_adapter *adapter = eth_dev->data->dev_private; 1963 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1964 1965 if (adapter->state == ENA_ADAPTER_STATE_FREE) 1966 return; 1967 1968 ena_com_set_admin_running_state(ena_dev, false); 1969 1970 if (adapter->state != ENA_ADAPTER_STATE_CLOSED) 1971 ena_close(eth_dev); 1972 1973 ena_com_rss_destroy(ena_dev); 1974 1975 ena_com_delete_debug_area(ena_dev); 1976 ena_com_delete_host_info(ena_dev); 1977 1978 ena_com_abort_admin_commands(ena_dev); 1979 ena_com_wait_for_abort_completion(ena_dev); 1980 ena_com_admin_destroy(ena_dev); 1981 ena_com_mmio_reg_read_request_destroy(ena_dev); 1982 1983 adapter->state = ENA_ADAPTER_STATE_FREE; 1984 } 1985 1986 static int eth_ena_dev_uninit(struct rte_eth_dev *eth_dev) 1987 { 1988 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1989 return 0; 1990 1991 ena_destroy_device(eth_dev); 1992 1993 return 0; 1994 } 1995 1996 static int ena_dev_configure(struct rte_eth_dev *dev) 1997 { 1998 struct ena_adapter *adapter = dev->data->dev_private; 1999 2000 adapter->state = ENA_ADAPTER_STATE_CONFIG; 2001 2002 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) 2003 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; 2004 dev->data->dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS; 2005 2006 /* Scattered Rx cannot be turned off in the HW, so this capability must 2007 * be forced. 2008 */ 2009 dev->data->scattered_rx = 1; 2010 2011 adapter->last_tx_comp_qid = 0; 2012 2013 adapter->missing_tx_completion_budget = 2014 RTE_MIN(ENA_MONITORED_TX_QUEUES, dev->data->nb_tx_queues); 2015 2016 adapter->missing_tx_completion_to = ENA_TX_TIMEOUT; 2017 /* To avoid detection of the spurious Tx completion timeout due to 2018 * application not calling the Tx cleanup function, set timeout for the 2019 * Tx queue which should be half of the missing completion timeout for a 2020 * safety. If there will be a lot of missing Tx completions in the 2021 * queue, they will be detected sooner or later. 2022 */ 2023 adapter->tx_cleanup_stall_delay = adapter->missing_tx_completion_to / 2; 2024 2025 adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads; 2026 adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads; 2027 2028 return 0; 2029 } 2030 2031 static void ena_init_rings(struct ena_adapter *adapter, 2032 bool disable_meta_caching) 2033 { 2034 size_t i; 2035 2036 for (i = 0; i < adapter->max_num_io_queues; i++) { 2037 struct ena_ring *ring = &adapter->tx_ring[i]; 2038 2039 ring->configured = 0; 2040 ring->type = ENA_RING_TYPE_TX; 2041 ring->adapter = adapter; 2042 ring->id = i; 2043 ring->tx_mem_queue_type = adapter->ena_dev.tx_mem_queue_type; 2044 ring->tx_max_header_size = adapter->ena_dev.tx_max_header_size; 2045 ring->sgl_size = adapter->max_tx_sgl_size; 2046 ring->disable_meta_caching = disable_meta_caching; 2047 } 2048 2049 for (i = 0; i < adapter->max_num_io_queues; i++) { 2050 struct ena_ring *ring = &adapter->rx_ring[i]; 2051 2052 ring->configured = 0; 2053 ring->type = ENA_RING_TYPE_RX; 2054 ring->adapter = adapter; 2055 ring->id = i; 2056 ring->sgl_size = adapter->max_rx_sgl_size; 2057 } 2058 } 2059 2060 static uint64_t ena_get_rx_port_offloads(struct ena_adapter *adapter) 2061 { 2062 uint64_t port_offloads = 0; 2063 2064 if (adapter->offloads.rx_offloads & ENA_L3_IPV4_CSUM) 2065 port_offloads |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM; 2066 2067 if (adapter->offloads.rx_offloads & 2068 (ENA_L4_IPV4_CSUM | ENA_L4_IPV6_CSUM)) 2069 port_offloads |= 2070 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM; 2071 2072 if (adapter->offloads.rx_offloads & ENA_RX_RSS_HASH) 2073 port_offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; 2074 2075 port_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER; 2076 2077 return port_offloads; 2078 } 2079 2080 static uint64_t ena_get_tx_port_offloads(struct ena_adapter *adapter) 2081 { 2082 uint64_t port_offloads = 0; 2083 2084 if (adapter->offloads.tx_offloads & ENA_IPV4_TSO) 2085 port_offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO; 2086 2087 if (adapter->offloads.tx_offloads & ENA_L3_IPV4_CSUM) 2088 port_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM; 2089 if (adapter->offloads.tx_offloads & 2090 (ENA_L4_IPV4_CSUM_PARTIAL | ENA_L4_IPV4_CSUM | 2091 ENA_L4_IPV6_CSUM | ENA_L4_IPV6_CSUM_PARTIAL)) 2092 port_offloads |= 2093 RTE_ETH_TX_OFFLOAD_UDP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_CKSUM; 2094 2095 port_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS; 2096 2097 return port_offloads; 2098 } 2099 2100 static uint64_t ena_get_rx_queue_offloads(struct ena_adapter *adapter) 2101 { 2102 RTE_SET_USED(adapter); 2103 2104 return 0; 2105 } 2106 2107 static uint64_t ena_get_tx_queue_offloads(struct ena_adapter *adapter) 2108 { 2109 RTE_SET_USED(adapter); 2110 2111 return 0; 2112 } 2113 2114 static int ena_infos_get(struct rte_eth_dev *dev, 2115 struct rte_eth_dev_info *dev_info) 2116 { 2117 struct ena_adapter *adapter; 2118 struct ena_com_dev *ena_dev; 2119 2120 ena_assert_msg(dev->data != NULL, "Uninitialized device\n"); 2121 ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n"); 2122 adapter = dev->data->dev_private; 2123 2124 ena_dev = &adapter->ena_dev; 2125 ena_assert_msg(ena_dev != NULL, "Uninitialized device\n"); 2126 2127 dev_info->speed_capa = 2128 RTE_ETH_LINK_SPEED_1G | 2129 RTE_ETH_LINK_SPEED_2_5G | 2130 RTE_ETH_LINK_SPEED_5G | 2131 RTE_ETH_LINK_SPEED_10G | 2132 RTE_ETH_LINK_SPEED_25G | 2133 RTE_ETH_LINK_SPEED_40G | 2134 RTE_ETH_LINK_SPEED_50G | 2135 RTE_ETH_LINK_SPEED_100G; 2136 2137 /* Inform framework about available features */ 2138 dev_info->rx_offload_capa = ena_get_rx_port_offloads(adapter); 2139 dev_info->tx_offload_capa = ena_get_tx_port_offloads(adapter); 2140 dev_info->rx_queue_offload_capa = ena_get_rx_queue_offloads(adapter); 2141 dev_info->tx_queue_offload_capa = ena_get_tx_queue_offloads(adapter); 2142 2143 dev_info->flow_type_rss_offloads = ENA_ALL_RSS_HF; 2144 dev_info->hash_key_size = ENA_HASH_KEY_SIZE; 2145 2146 dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN; 2147 dev_info->max_rx_pktlen = adapter->max_mtu + RTE_ETHER_HDR_LEN + 2148 RTE_ETHER_CRC_LEN; 2149 dev_info->min_mtu = ENA_MIN_MTU; 2150 dev_info->max_mtu = adapter->max_mtu; 2151 dev_info->max_mac_addrs = 1; 2152 2153 dev_info->max_rx_queues = adapter->max_num_io_queues; 2154 dev_info->max_tx_queues = adapter->max_num_io_queues; 2155 dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE; 2156 2157 dev_info->rx_desc_lim.nb_max = adapter->max_rx_ring_size; 2158 dev_info->rx_desc_lim.nb_min = ENA_MIN_RING_DESC; 2159 dev_info->rx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2160 adapter->max_rx_sgl_size); 2161 dev_info->rx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2162 adapter->max_rx_sgl_size); 2163 2164 dev_info->tx_desc_lim.nb_max = adapter->max_tx_ring_size; 2165 dev_info->tx_desc_lim.nb_min = ENA_MIN_RING_DESC; 2166 dev_info->tx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2167 adapter->max_tx_sgl_size); 2168 dev_info->tx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2169 adapter->max_tx_sgl_size); 2170 2171 dev_info->default_rxportconf.ring_size = ENA_DEFAULT_RING_SIZE; 2172 dev_info->default_txportconf.ring_size = ENA_DEFAULT_RING_SIZE; 2173 2174 return 0; 2175 } 2176 2177 static inline void ena_init_rx_mbuf(struct rte_mbuf *mbuf, uint16_t len) 2178 { 2179 mbuf->data_len = len; 2180 mbuf->data_off = RTE_PKTMBUF_HEADROOM; 2181 mbuf->refcnt = 1; 2182 mbuf->next = NULL; 2183 } 2184 2185 static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring, 2186 struct ena_com_rx_buf_info *ena_bufs, 2187 uint32_t descs, 2188 uint16_t *next_to_clean, 2189 uint8_t offset) 2190 { 2191 struct rte_mbuf *mbuf; 2192 struct rte_mbuf *mbuf_head; 2193 struct ena_rx_buffer *rx_info; 2194 int rc; 2195 uint16_t ntc, len, req_id, buf = 0; 2196 2197 if (unlikely(descs == 0)) 2198 return NULL; 2199 2200 ntc = *next_to_clean; 2201 2202 len = ena_bufs[buf].len; 2203 req_id = ena_bufs[buf].req_id; 2204 2205 rx_info = &rx_ring->rx_buffer_info[req_id]; 2206 2207 mbuf = rx_info->mbuf; 2208 RTE_ASSERT(mbuf != NULL); 2209 2210 ena_init_rx_mbuf(mbuf, len); 2211 2212 /* Fill the mbuf head with the data specific for 1st segment. */ 2213 mbuf_head = mbuf; 2214 mbuf_head->nb_segs = descs; 2215 mbuf_head->port = rx_ring->port_id; 2216 mbuf_head->pkt_len = len; 2217 mbuf_head->data_off += offset; 2218 2219 rx_info->mbuf = NULL; 2220 rx_ring->empty_rx_reqs[ntc] = req_id; 2221 ntc = ENA_IDX_NEXT_MASKED(ntc, rx_ring->size_mask); 2222 2223 while (--descs) { 2224 ++buf; 2225 len = ena_bufs[buf].len; 2226 req_id = ena_bufs[buf].req_id; 2227 2228 rx_info = &rx_ring->rx_buffer_info[req_id]; 2229 RTE_ASSERT(rx_info->mbuf != NULL); 2230 2231 if (unlikely(len == 0)) { 2232 /* 2233 * Some devices can pass descriptor with the length 0. 2234 * To avoid confusion, the PMD is simply putting the 2235 * descriptor back, as it was never used. We'll avoid 2236 * mbuf allocation that way. 2237 */ 2238 rc = ena_add_single_rx_desc(rx_ring->ena_com_io_sq, 2239 rx_info->mbuf, req_id); 2240 if (unlikely(rc != 0)) { 2241 /* Free the mbuf in case of an error. */ 2242 rte_mbuf_raw_free(rx_info->mbuf); 2243 } else { 2244 /* 2245 * If there was no error, just exit the loop as 2246 * 0 length descriptor is always the last one. 2247 */ 2248 break; 2249 } 2250 } else { 2251 /* Create an mbuf chain. */ 2252 mbuf->next = rx_info->mbuf; 2253 mbuf = mbuf->next; 2254 2255 ena_init_rx_mbuf(mbuf, len); 2256 mbuf_head->pkt_len += len; 2257 } 2258 2259 /* 2260 * Mark the descriptor as depleted and perform necessary 2261 * cleanup. 2262 * This code will execute in two cases: 2263 * 1. Descriptor len was greater than 0 - normal situation. 2264 * 2. Descriptor len was 0 and we failed to add the descriptor 2265 * to the device. In that situation, we should try to add 2266 * the mbuf again in the populate routine and mark the 2267 * descriptor as used up by the device. 2268 */ 2269 rx_info->mbuf = NULL; 2270 rx_ring->empty_rx_reqs[ntc] = req_id; 2271 ntc = ENA_IDX_NEXT_MASKED(ntc, rx_ring->size_mask); 2272 } 2273 2274 *next_to_clean = ntc; 2275 2276 return mbuf_head; 2277 } 2278 2279 static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 2280 uint16_t nb_pkts) 2281 { 2282 struct ena_ring *rx_ring = (struct ena_ring *)(rx_queue); 2283 unsigned int free_queue_entries; 2284 uint16_t next_to_clean = rx_ring->next_to_clean; 2285 uint16_t descs_in_use; 2286 struct rte_mbuf *mbuf; 2287 uint16_t completed; 2288 struct ena_com_rx_ctx ena_rx_ctx; 2289 int i, rc = 0; 2290 bool fill_hash; 2291 2292 #ifdef RTE_ETHDEV_DEBUG_RX 2293 /* Check adapter state */ 2294 if (unlikely(rx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { 2295 PMD_RX_LOG(ALERT, 2296 "Trying to receive pkts while device is NOT running\n"); 2297 return 0; 2298 } 2299 #endif 2300 2301 fill_hash = rx_ring->offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH; 2302 2303 descs_in_use = rx_ring->ring_size - 2304 ena_com_free_q_entries(rx_ring->ena_com_io_sq) - 1; 2305 nb_pkts = RTE_MIN(descs_in_use, nb_pkts); 2306 2307 for (completed = 0; completed < nb_pkts; completed++) { 2308 ena_rx_ctx.max_bufs = rx_ring->sgl_size; 2309 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; 2310 ena_rx_ctx.descs = 0; 2311 ena_rx_ctx.pkt_offset = 0; 2312 /* receive packet context */ 2313 rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq, 2314 rx_ring->ena_com_io_sq, 2315 &ena_rx_ctx); 2316 if (unlikely(rc)) { 2317 PMD_RX_LOG(ERR, 2318 "Failed to get the packet from the device, rc: %d\n", 2319 rc); 2320 if (rc == ENA_COM_NO_SPACE) { 2321 ++rx_ring->rx_stats.bad_desc_num; 2322 rx_ring->adapter->reset_reason = 2323 ENA_REGS_RESET_TOO_MANY_RX_DESCS; 2324 } else { 2325 ++rx_ring->rx_stats.bad_req_id; 2326 rx_ring->adapter->reset_reason = 2327 ENA_REGS_RESET_INV_RX_REQ_ID; 2328 } 2329 rx_ring->adapter->trigger_reset = true; 2330 return 0; 2331 } 2332 2333 mbuf = ena_rx_mbuf(rx_ring, 2334 ena_rx_ctx.ena_bufs, 2335 ena_rx_ctx.descs, 2336 &next_to_clean, 2337 ena_rx_ctx.pkt_offset); 2338 if (unlikely(mbuf == NULL)) { 2339 for (i = 0; i < ena_rx_ctx.descs; ++i) { 2340 rx_ring->empty_rx_reqs[next_to_clean] = 2341 rx_ring->ena_bufs[i].req_id; 2342 next_to_clean = ENA_IDX_NEXT_MASKED( 2343 next_to_clean, rx_ring->size_mask); 2344 } 2345 break; 2346 } 2347 2348 /* fill mbuf attributes if any */ 2349 ena_rx_mbuf_prepare(mbuf, &ena_rx_ctx, fill_hash); 2350 2351 if (unlikely(mbuf->ol_flags & 2352 (RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD))) { 2353 rte_atomic64_inc(&rx_ring->adapter->drv_stats->ierrors); 2354 ++rx_ring->rx_stats.bad_csum; 2355 } 2356 2357 rx_pkts[completed] = mbuf; 2358 rx_ring->rx_stats.bytes += mbuf->pkt_len; 2359 } 2360 2361 rx_ring->rx_stats.cnt += completed; 2362 rx_ring->next_to_clean = next_to_clean; 2363 2364 free_queue_entries = ena_com_free_q_entries(rx_ring->ena_com_io_sq); 2365 2366 /* Burst refill to save doorbells, memory barriers, const interval */ 2367 if (free_queue_entries >= rx_ring->rx_free_thresh) { 2368 ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq); 2369 ena_populate_rx_queue(rx_ring, free_queue_entries); 2370 } 2371 2372 return completed; 2373 } 2374 2375 static uint16_t 2376 eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 2377 uint16_t nb_pkts) 2378 { 2379 int32_t ret; 2380 uint32_t i; 2381 struct rte_mbuf *m; 2382 struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); 2383 struct ena_adapter *adapter = tx_ring->adapter; 2384 struct rte_ipv4_hdr *ip_hdr; 2385 uint64_t ol_flags; 2386 uint64_t l4_csum_flag; 2387 uint64_t dev_offload_capa; 2388 uint16_t frag_field; 2389 bool need_pseudo_csum; 2390 2391 dev_offload_capa = adapter->offloads.tx_offloads; 2392 for (i = 0; i != nb_pkts; i++) { 2393 m = tx_pkts[i]; 2394 ol_flags = m->ol_flags; 2395 2396 /* Check if any offload flag was set */ 2397 if (ol_flags == 0) 2398 continue; 2399 2400 l4_csum_flag = ol_flags & RTE_MBUF_F_TX_L4_MASK; 2401 /* SCTP checksum offload is not supported by the ENA. */ 2402 if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) || 2403 l4_csum_flag == RTE_MBUF_F_TX_SCTP_CKSUM) { 2404 PMD_TX_LOG(DEBUG, 2405 "mbuf[%" PRIu32 "] has unsupported offloads flags set: 0x%" PRIu64 "\n", 2406 i, ol_flags); 2407 rte_errno = ENOTSUP; 2408 return i; 2409 } 2410 2411 #ifdef RTE_LIBRTE_ETHDEV_DEBUG 2412 /* Check if requested offload is also enabled for the queue */ 2413 if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM && 2414 !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)) || 2415 (l4_csum_flag == RTE_MBUF_F_TX_TCP_CKSUM && 2416 !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) || 2417 (l4_csum_flag == RTE_MBUF_F_TX_UDP_CKSUM && 2418 !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM))) { 2419 PMD_TX_LOG(DEBUG, 2420 "mbuf[%" PRIu32 "]: requested offloads: %" PRIu16 " are not enabled for the queue[%u]\n", 2421 i, m->nb_segs, tx_ring->id); 2422 rte_errno = EINVAL; 2423 return i; 2424 } 2425 2426 /* The caller is obligated to set l2 and l3 len if any cksum 2427 * offload is enabled. 2428 */ 2429 if (unlikely(ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK) && 2430 (m->l2_len == 0 || m->l3_len == 0))) { 2431 PMD_TX_LOG(DEBUG, 2432 "mbuf[%" PRIu32 "]: l2_len or l3_len values are 0 while the offload was requested\n", 2433 i); 2434 rte_errno = EINVAL; 2435 return i; 2436 } 2437 ret = rte_validate_tx_offload(m); 2438 if (ret != 0) { 2439 rte_errno = -ret; 2440 return i; 2441 } 2442 #endif 2443 2444 /* Verify HW support for requested offloads and determine if 2445 * pseudo header checksum is needed. 2446 */ 2447 need_pseudo_csum = false; 2448 if (ol_flags & RTE_MBUF_F_TX_IPV4) { 2449 if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM && 2450 !(dev_offload_capa & ENA_L3_IPV4_CSUM)) { 2451 rte_errno = ENOTSUP; 2452 return i; 2453 } 2454 2455 if (ol_flags & RTE_MBUF_F_TX_TCP_SEG && 2456 !(dev_offload_capa & ENA_IPV4_TSO)) { 2457 rte_errno = ENOTSUP; 2458 return i; 2459 } 2460 2461 /* Check HW capabilities and if pseudo csum is needed 2462 * for L4 offloads. 2463 */ 2464 if (l4_csum_flag != RTE_MBUF_F_TX_L4_NO_CKSUM && 2465 !(dev_offload_capa & ENA_L4_IPV4_CSUM)) { 2466 if (dev_offload_capa & 2467 ENA_L4_IPV4_CSUM_PARTIAL) { 2468 need_pseudo_csum = true; 2469 } else { 2470 rte_errno = ENOTSUP; 2471 return i; 2472 } 2473 } 2474 2475 /* Parse the DF flag */ 2476 ip_hdr = rte_pktmbuf_mtod_offset(m, 2477 struct rte_ipv4_hdr *, m->l2_len); 2478 frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset); 2479 if (frag_field & RTE_IPV4_HDR_DF_FLAG) { 2480 m->packet_type |= RTE_PTYPE_L4_NONFRAG; 2481 } else if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) { 2482 /* In case we are supposed to TSO and have DF 2483 * not set (DF=0) hardware must be provided with 2484 * partial checksum. 2485 */ 2486 need_pseudo_csum = true; 2487 } 2488 } else if (ol_flags & RTE_MBUF_F_TX_IPV6) { 2489 /* There is no support for IPv6 TSO as for now. */ 2490 if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) { 2491 rte_errno = ENOTSUP; 2492 return i; 2493 } 2494 2495 /* Check HW capabilities and if pseudo csum is needed */ 2496 if (l4_csum_flag != RTE_MBUF_F_TX_L4_NO_CKSUM && 2497 !(dev_offload_capa & ENA_L4_IPV6_CSUM)) { 2498 if (dev_offload_capa & 2499 ENA_L4_IPV6_CSUM_PARTIAL) { 2500 need_pseudo_csum = true; 2501 } else { 2502 rte_errno = ENOTSUP; 2503 return i; 2504 } 2505 } 2506 } 2507 2508 if (need_pseudo_csum) { 2509 ret = rte_net_intel_cksum_flags_prepare(m, ol_flags); 2510 if (ret != 0) { 2511 rte_errno = -ret; 2512 return i; 2513 } 2514 } 2515 } 2516 2517 return i; 2518 } 2519 2520 static void ena_update_hints(struct ena_adapter *adapter, 2521 struct ena_admin_ena_hw_hints *hints) 2522 { 2523 if (hints->admin_completion_tx_timeout) 2524 adapter->ena_dev.admin_queue.completion_timeout = 2525 hints->admin_completion_tx_timeout * 1000; 2526 2527 if (hints->mmio_read_timeout) 2528 /* convert to usec */ 2529 adapter->ena_dev.mmio_read.reg_read_to = 2530 hints->mmio_read_timeout * 1000; 2531 2532 if (hints->missing_tx_completion_timeout) { 2533 if (hints->missing_tx_completion_timeout == 2534 ENA_HW_HINTS_NO_TIMEOUT) { 2535 adapter->missing_tx_completion_to = 2536 ENA_HW_HINTS_NO_TIMEOUT; 2537 } else { 2538 /* Convert from msecs to ticks */ 2539 adapter->missing_tx_completion_to = rte_get_timer_hz() * 2540 hints->missing_tx_completion_timeout / 1000; 2541 adapter->tx_cleanup_stall_delay = 2542 adapter->missing_tx_completion_to / 2; 2543 } 2544 } 2545 2546 if (hints->driver_watchdog_timeout) { 2547 if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT) 2548 adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT; 2549 else 2550 // Convert msecs to ticks 2551 adapter->keep_alive_timeout = 2552 (hints->driver_watchdog_timeout * 2553 rte_get_timer_hz()) / 1000; 2554 } 2555 } 2556 2557 static int ena_check_space_and_linearize_mbuf(struct ena_ring *tx_ring, 2558 struct rte_mbuf *mbuf) 2559 { 2560 struct ena_com_dev *ena_dev; 2561 int num_segments, header_len, rc; 2562 2563 ena_dev = &tx_ring->adapter->ena_dev; 2564 num_segments = mbuf->nb_segs; 2565 header_len = mbuf->data_len; 2566 2567 if (likely(num_segments < tx_ring->sgl_size)) 2568 goto checkspace; 2569 2570 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && 2571 (num_segments == tx_ring->sgl_size) && 2572 (header_len < tx_ring->tx_max_header_size)) 2573 goto checkspace; 2574 2575 /* Checking for space for 2 additional metadata descriptors due to 2576 * possible header split and metadata descriptor. Linearization will 2577 * be needed so we reduce the segments number from num_segments to 1 2578 */ 2579 if (!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 3)) { 2580 PMD_TX_LOG(DEBUG, "Not enough space in the Tx queue\n"); 2581 return ENA_COM_NO_MEM; 2582 } 2583 ++tx_ring->tx_stats.linearize; 2584 rc = rte_pktmbuf_linearize(mbuf); 2585 if (unlikely(rc)) { 2586 PMD_TX_LOG(WARNING, "Mbuf linearize failed\n"); 2587 rte_atomic64_inc(&tx_ring->adapter->drv_stats->ierrors); 2588 ++tx_ring->tx_stats.linearize_failed; 2589 return rc; 2590 } 2591 2592 return 0; 2593 2594 checkspace: 2595 /* Checking for space for 2 additional metadata descriptors due to 2596 * possible header split and metadata descriptor 2597 */ 2598 if (!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 2599 num_segments + 2)) { 2600 PMD_TX_LOG(DEBUG, "Not enough space in the Tx queue\n"); 2601 return ENA_COM_NO_MEM; 2602 } 2603 2604 return 0; 2605 } 2606 2607 static void ena_tx_map_mbuf(struct ena_ring *tx_ring, 2608 struct ena_tx_buffer *tx_info, 2609 struct rte_mbuf *mbuf, 2610 void **push_header, 2611 uint16_t *header_len) 2612 { 2613 struct ena_com_buf *ena_buf; 2614 uint16_t delta, seg_len, push_len; 2615 2616 delta = 0; 2617 seg_len = mbuf->data_len; 2618 2619 tx_info->mbuf = mbuf; 2620 ena_buf = tx_info->bufs; 2621 2622 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 2623 /* 2624 * Tx header might be (and will be in most cases) smaller than 2625 * tx_max_header_size. But it's not an issue to send more data 2626 * to the device, than actually needed if the mbuf size is 2627 * greater than tx_max_header_size. 2628 */ 2629 push_len = RTE_MIN(mbuf->pkt_len, tx_ring->tx_max_header_size); 2630 *header_len = push_len; 2631 2632 if (likely(push_len <= seg_len)) { 2633 /* If the push header is in the single segment, then 2634 * just point it to the 1st mbuf data. 2635 */ 2636 *push_header = rte_pktmbuf_mtod(mbuf, uint8_t *); 2637 } else { 2638 /* If the push header lays in the several segments, copy 2639 * it to the intermediate buffer. 2640 */ 2641 rte_pktmbuf_read(mbuf, 0, push_len, 2642 tx_ring->push_buf_intermediate_buf); 2643 *push_header = tx_ring->push_buf_intermediate_buf; 2644 delta = push_len - seg_len; 2645 } 2646 } else { 2647 *push_header = NULL; 2648 *header_len = 0; 2649 push_len = 0; 2650 } 2651 2652 /* Process first segment taking into consideration pushed header */ 2653 if (seg_len > push_len) { 2654 ena_buf->paddr = mbuf->buf_iova + 2655 mbuf->data_off + 2656 push_len; 2657 ena_buf->len = seg_len - push_len; 2658 ena_buf++; 2659 tx_info->num_of_bufs++; 2660 } 2661 2662 while ((mbuf = mbuf->next) != NULL) { 2663 seg_len = mbuf->data_len; 2664 2665 /* Skip mbufs if whole data is pushed as a header */ 2666 if (unlikely(delta > seg_len)) { 2667 delta -= seg_len; 2668 continue; 2669 } 2670 2671 ena_buf->paddr = mbuf->buf_iova + mbuf->data_off + delta; 2672 ena_buf->len = seg_len - delta; 2673 ena_buf++; 2674 tx_info->num_of_bufs++; 2675 2676 delta = 0; 2677 } 2678 } 2679 2680 static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf) 2681 { 2682 struct ena_tx_buffer *tx_info; 2683 struct ena_com_tx_ctx ena_tx_ctx = { { 0 } }; 2684 uint16_t next_to_use; 2685 uint16_t header_len; 2686 uint16_t req_id; 2687 void *push_header; 2688 int nb_hw_desc; 2689 int rc; 2690 2691 rc = ena_check_space_and_linearize_mbuf(tx_ring, mbuf); 2692 if (unlikely(rc)) 2693 return rc; 2694 2695 next_to_use = tx_ring->next_to_use; 2696 2697 req_id = tx_ring->empty_tx_reqs[next_to_use]; 2698 tx_info = &tx_ring->tx_buffer_info[req_id]; 2699 tx_info->num_of_bufs = 0; 2700 2701 ena_tx_map_mbuf(tx_ring, tx_info, mbuf, &push_header, &header_len); 2702 2703 ena_tx_ctx.ena_bufs = tx_info->bufs; 2704 ena_tx_ctx.push_header = push_header; 2705 ena_tx_ctx.num_bufs = tx_info->num_of_bufs; 2706 ena_tx_ctx.req_id = req_id; 2707 ena_tx_ctx.header_len = header_len; 2708 2709 /* Set Tx offloads flags, if applicable */ 2710 ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx, tx_ring->offloads, 2711 tx_ring->disable_meta_caching); 2712 2713 if (unlikely(ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq, 2714 &ena_tx_ctx))) { 2715 PMD_TX_LOG(DEBUG, 2716 "LLQ Tx max burst size of queue %d achieved, writing doorbell to send burst\n", 2717 tx_ring->id); 2718 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); 2719 tx_ring->tx_stats.doorbells++; 2720 tx_ring->pkts_without_db = false; 2721 } 2722 2723 /* prepare the packet's descriptors to dma engine */ 2724 rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx, 2725 &nb_hw_desc); 2726 if (unlikely(rc)) { 2727 PMD_DRV_LOG(ERR, "Failed to prepare Tx buffers, rc: %d\n", rc); 2728 ++tx_ring->tx_stats.prepare_ctx_err; 2729 tx_ring->adapter->reset_reason = 2730 ENA_REGS_RESET_DRIVER_INVALID_STATE; 2731 tx_ring->adapter->trigger_reset = true; 2732 return rc; 2733 } 2734 2735 tx_info->tx_descs = nb_hw_desc; 2736 tx_info->timestamp = rte_get_timer_cycles(); 2737 2738 tx_ring->tx_stats.cnt++; 2739 tx_ring->tx_stats.bytes += mbuf->pkt_len; 2740 2741 tx_ring->next_to_use = ENA_IDX_NEXT_MASKED(next_to_use, 2742 tx_ring->size_mask); 2743 2744 return 0; 2745 } 2746 2747 static void ena_tx_cleanup(struct ena_ring *tx_ring) 2748 { 2749 unsigned int total_tx_descs = 0; 2750 uint16_t cleanup_budget; 2751 uint16_t next_to_clean = tx_ring->next_to_clean; 2752 2753 /* Attempt to release all Tx descriptors (ring_size - 1 -> size_mask) */ 2754 cleanup_budget = tx_ring->size_mask; 2755 2756 while (likely(total_tx_descs < cleanup_budget)) { 2757 struct rte_mbuf *mbuf; 2758 struct ena_tx_buffer *tx_info; 2759 uint16_t req_id; 2760 2761 if (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) != 0) 2762 break; 2763 2764 if (unlikely(validate_tx_req_id(tx_ring, req_id) != 0)) 2765 break; 2766 2767 /* Get Tx info & store how many descs were processed */ 2768 tx_info = &tx_ring->tx_buffer_info[req_id]; 2769 tx_info->timestamp = 0; 2770 2771 mbuf = tx_info->mbuf; 2772 rte_pktmbuf_free(mbuf); 2773 2774 tx_info->mbuf = NULL; 2775 tx_ring->empty_tx_reqs[next_to_clean] = req_id; 2776 2777 total_tx_descs += tx_info->tx_descs; 2778 2779 /* Put back descriptor to the ring for reuse */ 2780 next_to_clean = ENA_IDX_NEXT_MASKED(next_to_clean, 2781 tx_ring->size_mask); 2782 } 2783 2784 if (likely(total_tx_descs > 0)) { 2785 /* acknowledge completion of sent packets */ 2786 tx_ring->next_to_clean = next_to_clean; 2787 ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs); 2788 ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq); 2789 } 2790 2791 /* Notify completion handler that the cleanup was just called */ 2792 tx_ring->last_cleanup_ticks = rte_get_timer_cycles(); 2793 } 2794 2795 static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 2796 uint16_t nb_pkts) 2797 { 2798 struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); 2799 int available_desc; 2800 uint16_t sent_idx = 0; 2801 2802 #ifdef RTE_ETHDEV_DEBUG_TX 2803 /* Check adapter state */ 2804 if (unlikely(tx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { 2805 PMD_TX_LOG(ALERT, 2806 "Trying to xmit pkts while device is NOT running\n"); 2807 return 0; 2808 } 2809 #endif 2810 2811 for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) { 2812 if (ena_xmit_mbuf(tx_ring, tx_pkts[sent_idx])) 2813 break; 2814 tx_ring->pkts_without_db = true; 2815 rte_prefetch0(tx_pkts[ENA_IDX_ADD_MASKED(sent_idx, 4, 2816 tx_ring->size_mask)]); 2817 } 2818 2819 available_desc = ena_com_free_q_entries(tx_ring->ena_com_io_sq); 2820 tx_ring->tx_stats.available_desc = available_desc; 2821 2822 /* If there are ready packets to be xmitted... */ 2823 if (likely(tx_ring->pkts_without_db)) { 2824 /* ...let HW do its best :-) */ 2825 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); 2826 tx_ring->tx_stats.doorbells++; 2827 tx_ring->pkts_without_db = false; 2828 } 2829 2830 if (available_desc < tx_ring->tx_free_thresh) 2831 ena_tx_cleanup(tx_ring); 2832 2833 tx_ring->tx_stats.available_desc = 2834 ena_com_free_q_entries(tx_ring->ena_com_io_sq); 2835 tx_ring->tx_stats.tx_poll++; 2836 2837 return sent_idx; 2838 } 2839 2840 int ena_copy_eni_stats(struct ena_adapter *adapter) 2841 { 2842 struct ena_admin_eni_stats admin_eni_stats; 2843 int rc; 2844 2845 rte_spinlock_lock(&adapter->admin_lock); 2846 rc = ena_com_get_eni_stats(&adapter->ena_dev, &admin_eni_stats); 2847 rte_spinlock_unlock(&adapter->admin_lock); 2848 if (rc != 0) { 2849 if (rc == ENA_COM_UNSUPPORTED) { 2850 PMD_DRV_LOG(DEBUG, 2851 "Retrieving ENI metrics is not supported\n"); 2852 } else { 2853 PMD_DRV_LOG(WARNING, 2854 "Failed to get ENI metrics, rc: %d\n", rc); 2855 } 2856 return rc; 2857 } 2858 2859 rte_memcpy(&adapter->eni_stats, &admin_eni_stats, 2860 sizeof(struct ena_stats_eni)); 2861 2862 return 0; 2863 } 2864 2865 /** 2866 * DPDK callback to retrieve names of extended device statistics 2867 * 2868 * @param dev 2869 * Pointer to Ethernet device structure. 2870 * @param[out] xstats_names 2871 * Buffer to insert names into. 2872 * @param n 2873 * Number of names. 2874 * 2875 * @return 2876 * Number of xstats names. 2877 */ 2878 static int ena_xstats_get_names(struct rte_eth_dev *dev, 2879 struct rte_eth_xstat_name *xstats_names, 2880 unsigned int n) 2881 { 2882 unsigned int xstats_count = ena_xstats_calc_num(dev->data); 2883 unsigned int stat, i, count = 0; 2884 2885 if (n < xstats_count || !xstats_names) 2886 return xstats_count; 2887 2888 for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++) 2889 strcpy(xstats_names[count].name, 2890 ena_stats_global_strings[stat].name); 2891 2892 for (stat = 0; stat < ENA_STATS_ARRAY_ENI; stat++, count++) 2893 strcpy(xstats_names[count].name, 2894 ena_stats_eni_strings[stat].name); 2895 2896 for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++) 2897 for (i = 0; i < dev->data->nb_rx_queues; i++, count++) 2898 snprintf(xstats_names[count].name, 2899 sizeof(xstats_names[count].name), 2900 "rx_q%d_%s", i, 2901 ena_stats_rx_strings[stat].name); 2902 2903 for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++) 2904 for (i = 0; i < dev->data->nb_tx_queues; i++, count++) 2905 snprintf(xstats_names[count].name, 2906 sizeof(xstats_names[count].name), 2907 "tx_q%d_%s", i, 2908 ena_stats_tx_strings[stat].name); 2909 2910 return xstats_count; 2911 } 2912 2913 /** 2914 * DPDK callback to get extended device statistics. 2915 * 2916 * @param dev 2917 * Pointer to Ethernet device structure. 2918 * @param[out] stats 2919 * Stats table output buffer. 2920 * @param n 2921 * The size of the stats table. 2922 * 2923 * @return 2924 * Number of xstats on success, negative on failure. 2925 */ 2926 static int ena_xstats_get(struct rte_eth_dev *dev, 2927 struct rte_eth_xstat *xstats, 2928 unsigned int n) 2929 { 2930 struct ena_adapter *adapter = dev->data->dev_private; 2931 unsigned int xstats_count = ena_xstats_calc_num(dev->data); 2932 unsigned int stat, i, count = 0; 2933 int stat_offset; 2934 void *stats_begin; 2935 2936 if (n < xstats_count) 2937 return xstats_count; 2938 2939 if (!xstats) 2940 return 0; 2941 2942 for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++) { 2943 stat_offset = ena_stats_global_strings[stat].stat_offset; 2944 stats_begin = &adapter->dev_stats; 2945 2946 xstats[count].id = count; 2947 xstats[count].value = *((uint64_t *) 2948 ((char *)stats_begin + stat_offset)); 2949 } 2950 2951 /* Even if the function below fails, we should copy previous (or initial 2952 * values) to keep structure of rte_eth_xstat consistent. 2953 */ 2954 ena_copy_eni_stats(adapter); 2955 for (stat = 0; stat < ENA_STATS_ARRAY_ENI; stat++, count++) { 2956 stat_offset = ena_stats_eni_strings[stat].stat_offset; 2957 stats_begin = &adapter->eni_stats; 2958 2959 xstats[count].id = count; 2960 xstats[count].value = *((uint64_t *) 2961 ((char *)stats_begin + stat_offset)); 2962 } 2963 2964 for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++) { 2965 for (i = 0; i < dev->data->nb_rx_queues; i++, count++) { 2966 stat_offset = ena_stats_rx_strings[stat].stat_offset; 2967 stats_begin = &adapter->rx_ring[i].rx_stats; 2968 2969 xstats[count].id = count; 2970 xstats[count].value = *((uint64_t *) 2971 ((char *)stats_begin + stat_offset)); 2972 } 2973 } 2974 2975 for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++) { 2976 for (i = 0; i < dev->data->nb_tx_queues; i++, count++) { 2977 stat_offset = ena_stats_tx_strings[stat].stat_offset; 2978 stats_begin = &adapter->tx_ring[i].rx_stats; 2979 2980 xstats[count].id = count; 2981 xstats[count].value = *((uint64_t *) 2982 ((char *)stats_begin + stat_offset)); 2983 } 2984 } 2985 2986 return count; 2987 } 2988 2989 static int ena_xstats_get_by_id(struct rte_eth_dev *dev, 2990 const uint64_t *ids, 2991 uint64_t *values, 2992 unsigned int n) 2993 { 2994 struct ena_adapter *adapter = dev->data->dev_private; 2995 uint64_t id; 2996 uint64_t rx_entries, tx_entries; 2997 unsigned int i; 2998 int qid; 2999 int valid = 0; 3000 bool was_eni_copied = false; 3001 3002 for (i = 0; i < n; ++i) { 3003 id = ids[i]; 3004 /* Check if id belongs to global statistics */ 3005 if (id < ENA_STATS_ARRAY_GLOBAL) { 3006 values[i] = *((uint64_t *)&adapter->dev_stats + id); 3007 ++valid; 3008 continue; 3009 } 3010 3011 /* Check if id belongs to ENI statistics */ 3012 id -= ENA_STATS_ARRAY_GLOBAL; 3013 if (id < ENA_STATS_ARRAY_ENI) { 3014 /* Avoid reading ENI stats multiple times in a single 3015 * function call, as it requires communication with the 3016 * admin queue. 3017 */ 3018 if (!was_eni_copied) { 3019 was_eni_copied = true; 3020 ena_copy_eni_stats(adapter); 3021 } 3022 values[i] = *((uint64_t *)&adapter->eni_stats + id); 3023 ++valid; 3024 continue; 3025 } 3026 3027 /* Check if id belongs to rx queue statistics */ 3028 id -= ENA_STATS_ARRAY_ENI; 3029 rx_entries = ENA_STATS_ARRAY_RX * dev->data->nb_rx_queues; 3030 if (id < rx_entries) { 3031 qid = id % dev->data->nb_rx_queues; 3032 id /= dev->data->nb_rx_queues; 3033 values[i] = *((uint64_t *) 3034 &adapter->rx_ring[qid].rx_stats + id); 3035 ++valid; 3036 continue; 3037 } 3038 /* Check if id belongs to rx queue statistics */ 3039 id -= rx_entries; 3040 tx_entries = ENA_STATS_ARRAY_TX * dev->data->nb_tx_queues; 3041 if (id < tx_entries) { 3042 qid = id % dev->data->nb_tx_queues; 3043 id /= dev->data->nb_tx_queues; 3044 values[i] = *((uint64_t *) 3045 &adapter->tx_ring[qid].tx_stats + id); 3046 ++valid; 3047 continue; 3048 } 3049 } 3050 3051 return valid; 3052 } 3053 3054 static int ena_process_bool_devarg(const char *key, 3055 const char *value, 3056 void *opaque) 3057 { 3058 struct ena_adapter *adapter = opaque; 3059 bool bool_value; 3060 3061 /* Parse the value. */ 3062 if (strcmp(value, "1") == 0) { 3063 bool_value = true; 3064 } else if (strcmp(value, "0") == 0) { 3065 bool_value = false; 3066 } else { 3067 PMD_INIT_LOG(ERR, 3068 "Invalid value: '%s' for key '%s'. Accepted: '0' or '1'\n", 3069 value, key); 3070 return -EINVAL; 3071 } 3072 3073 /* Now, assign it to the proper adapter field. */ 3074 if (strcmp(key, ENA_DEVARG_LARGE_LLQ_HDR) == 0) 3075 adapter->use_large_llq_hdr = bool_value; 3076 3077 return 0; 3078 } 3079 3080 static int ena_parse_devargs(struct ena_adapter *adapter, 3081 struct rte_devargs *devargs) 3082 { 3083 static const char * const allowed_args[] = { 3084 ENA_DEVARG_LARGE_LLQ_HDR, 3085 NULL, 3086 }; 3087 struct rte_kvargs *kvlist; 3088 int rc; 3089 3090 if (devargs == NULL) 3091 return 0; 3092 3093 kvlist = rte_kvargs_parse(devargs->args, allowed_args); 3094 if (kvlist == NULL) { 3095 PMD_INIT_LOG(ERR, "Invalid device arguments: %s\n", 3096 devargs->args); 3097 return -EINVAL; 3098 } 3099 3100 rc = rte_kvargs_process(kvlist, ENA_DEVARG_LARGE_LLQ_HDR, 3101 ena_process_bool_devarg, adapter); 3102 3103 rte_kvargs_free(kvlist); 3104 3105 return rc; 3106 } 3107 3108 static int ena_setup_rx_intr(struct rte_eth_dev *dev) 3109 { 3110 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 3111 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 3112 int rc; 3113 uint16_t vectors_nb, i; 3114 bool rx_intr_requested = dev->data->dev_conf.intr_conf.rxq; 3115 3116 if (!rx_intr_requested) 3117 return 0; 3118 3119 if (!rte_intr_cap_multiple(intr_handle)) { 3120 PMD_DRV_LOG(ERR, 3121 "Rx interrupt requested, but it isn't supported by the PCI driver\n"); 3122 return -ENOTSUP; 3123 } 3124 3125 /* Disable interrupt mapping before the configuration starts. */ 3126 rte_intr_disable(intr_handle); 3127 3128 /* Verify if there are enough vectors available. */ 3129 vectors_nb = dev->data->nb_rx_queues; 3130 if (vectors_nb > RTE_MAX_RXTX_INTR_VEC_ID) { 3131 PMD_DRV_LOG(ERR, 3132 "Too many Rx interrupts requested, maximum number: %d\n", 3133 RTE_MAX_RXTX_INTR_VEC_ID); 3134 rc = -ENOTSUP; 3135 goto enable_intr; 3136 } 3137 3138 /* Allocate the vector list */ 3139 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", 3140 dev->data->nb_rx_queues)) { 3141 PMD_DRV_LOG(ERR, 3142 "Failed to allocate interrupt vector for %d queues\n", 3143 dev->data->nb_rx_queues); 3144 rc = -ENOMEM; 3145 goto enable_intr; 3146 } 3147 3148 rc = rte_intr_efd_enable(intr_handle, vectors_nb); 3149 if (rc != 0) 3150 goto free_intr_vec; 3151 3152 if (!rte_intr_allow_others(intr_handle)) { 3153 PMD_DRV_LOG(ERR, 3154 "Not enough interrupts available to use both ENA Admin and Rx interrupts\n"); 3155 goto disable_intr_efd; 3156 } 3157 3158 for (i = 0; i < vectors_nb; ++i) 3159 if (rte_intr_vec_list_index_set(intr_handle, i, 3160 RTE_INTR_VEC_RXTX_OFFSET + i)) 3161 goto disable_intr_efd; 3162 3163 rte_intr_enable(intr_handle); 3164 return 0; 3165 3166 disable_intr_efd: 3167 rte_intr_efd_disable(intr_handle); 3168 free_intr_vec: 3169 rte_intr_vec_list_free(intr_handle); 3170 enable_intr: 3171 rte_intr_enable(intr_handle); 3172 return rc; 3173 } 3174 3175 static void ena_rx_queue_intr_set(struct rte_eth_dev *dev, 3176 uint16_t queue_id, 3177 bool unmask) 3178 { 3179 struct ena_adapter *adapter = dev->data->dev_private; 3180 struct ena_ring *rxq = &adapter->rx_ring[queue_id]; 3181 struct ena_eth_io_intr_reg intr_reg; 3182 3183 ena_com_update_intr_reg(&intr_reg, 0, 0, unmask); 3184 ena_com_unmask_intr(rxq->ena_com_io_cq, &intr_reg); 3185 } 3186 3187 static int ena_rx_queue_intr_enable(struct rte_eth_dev *dev, 3188 uint16_t queue_id) 3189 { 3190 ena_rx_queue_intr_set(dev, queue_id, true); 3191 3192 return 0; 3193 } 3194 3195 static int ena_rx_queue_intr_disable(struct rte_eth_dev *dev, 3196 uint16_t queue_id) 3197 { 3198 ena_rx_queue_intr_set(dev, queue_id, false); 3199 3200 return 0; 3201 } 3202 3203 /********************************************************************* 3204 * PMD configuration 3205 *********************************************************************/ 3206 static int eth_ena_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 3207 struct rte_pci_device *pci_dev) 3208 { 3209 return rte_eth_dev_pci_generic_probe(pci_dev, 3210 sizeof(struct ena_adapter), eth_ena_dev_init); 3211 } 3212 3213 static int eth_ena_pci_remove(struct rte_pci_device *pci_dev) 3214 { 3215 return rte_eth_dev_pci_generic_remove(pci_dev, eth_ena_dev_uninit); 3216 } 3217 3218 static struct rte_pci_driver rte_ena_pmd = { 3219 .id_table = pci_id_ena_map, 3220 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | 3221 RTE_PCI_DRV_WC_ACTIVATE, 3222 .probe = eth_ena_pci_probe, 3223 .remove = eth_ena_pci_remove, 3224 }; 3225 3226 RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd); 3227 RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map); 3228 RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci"); 3229 RTE_PMD_REGISTER_PARAM_STRING(net_ena, ENA_DEVARG_LARGE_LLQ_HDR "=<0|1>"); 3230 RTE_LOG_REGISTER_SUFFIX(ena_logtype_init, init, NOTICE); 3231 RTE_LOG_REGISTER_SUFFIX(ena_logtype_driver, driver, NOTICE); 3232 #ifdef RTE_ETHDEV_DEBUG_RX 3233 RTE_LOG_REGISTER_SUFFIX(ena_logtype_rx, rx, DEBUG); 3234 #endif 3235 #ifdef RTE_ETHDEV_DEBUG_TX 3236 RTE_LOG_REGISTER_SUFFIX(ena_logtype_tx, tx, DEBUG); 3237 #endif 3238 RTE_LOG_REGISTER_SUFFIX(ena_logtype_com, com, WARNING); 3239 3240 /****************************************************************************** 3241 ******************************** AENQ Handlers ******************************* 3242 *****************************************************************************/ 3243 static void ena_update_on_link_change(void *adapter_data, 3244 struct ena_admin_aenq_entry *aenq_e) 3245 { 3246 struct rte_eth_dev *eth_dev = adapter_data; 3247 struct ena_adapter *adapter = eth_dev->data->dev_private; 3248 struct ena_admin_aenq_link_change_desc *aenq_link_desc; 3249 uint32_t status; 3250 3251 aenq_link_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e; 3252 3253 status = get_ena_admin_aenq_link_change_desc_link_status(aenq_link_desc); 3254 adapter->link_status = status; 3255 3256 ena_link_update(eth_dev, 0); 3257 rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL); 3258 } 3259 3260 static void ena_notification(void *adapter_data, 3261 struct ena_admin_aenq_entry *aenq_e) 3262 { 3263 struct rte_eth_dev *eth_dev = adapter_data; 3264 struct ena_adapter *adapter = eth_dev->data->dev_private; 3265 struct ena_admin_ena_hw_hints *hints; 3266 3267 if (aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION) 3268 PMD_DRV_LOG(WARNING, "Invalid AENQ group: %x. Expected: %x\n", 3269 aenq_e->aenq_common_desc.group, 3270 ENA_ADMIN_NOTIFICATION); 3271 3272 switch (aenq_e->aenq_common_desc.syndrome) { 3273 case ENA_ADMIN_UPDATE_HINTS: 3274 hints = (struct ena_admin_ena_hw_hints *) 3275 (&aenq_e->inline_data_w4); 3276 ena_update_hints(adapter, hints); 3277 break; 3278 default: 3279 PMD_DRV_LOG(ERR, "Invalid AENQ notification link state: %d\n", 3280 aenq_e->aenq_common_desc.syndrome); 3281 } 3282 } 3283 3284 static void ena_keep_alive(void *adapter_data, 3285 __rte_unused struct ena_admin_aenq_entry *aenq_e) 3286 { 3287 struct rte_eth_dev *eth_dev = adapter_data; 3288 struct ena_adapter *adapter = eth_dev->data->dev_private; 3289 struct ena_admin_aenq_keep_alive_desc *desc; 3290 uint64_t rx_drops; 3291 uint64_t tx_drops; 3292 3293 adapter->timestamp_wd = rte_get_timer_cycles(); 3294 3295 desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e; 3296 rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low; 3297 tx_drops = ((uint64_t)desc->tx_drops_high << 32) | desc->tx_drops_low; 3298 3299 adapter->drv_stats->rx_drops = rx_drops; 3300 adapter->dev_stats.tx_drops = tx_drops; 3301 } 3302 3303 /** 3304 * This handler will called for unknown event group or unimplemented handlers 3305 **/ 3306 static void unimplemented_aenq_handler(__rte_unused void *data, 3307 __rte_unused struct ena_admin_aenq_entry *aenq_e) 3308 { 3309 PMD_DRV_LOG(ERR, 3310 "Unknown event was received or event with unimplemented handler\n"); 3311 } 3312 3313 static struct ena_aenq_handlers aenq_handlers = { 3314 .handlers = { 3315 [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change, 3316 [ENA_ADMIN_NOTIFICATION] = ena_notification, 3317 [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive 3318 }, 3319 .unimplemented_handler = unimplemented_aenq_handler 3320 }; 3321