1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of copyright holder nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <rte_ether.h> 35 #include <rte_ethdev_driver.h> 36 #include <rte_ethdev_pci.h> 37 #include <rte_tcp.h> 38 #include <rte_atomic.h> 39 #include <rte_dev.h> 40 #include <rte_errno.h> 41 #include <rte_version.h> 42 #include <rte_eal_memconfig.h> 43 #include <rte_net.h> 44 45 #include "ena_ethdev.h" 46 #include "ena_logs.h" 47 #include "ena_platform.h" 48 #include "ena_com.h" 49 #include "ena_eth_com.h" 50 51 #include <ena_common_defs.h> 52 #include <ena_regs_defs.h> 53 #include <ena_admin_defs.h> 54 #include <ena_eth_io_defs.h> 55 56 #define DRV_MODULE_VER_MAJOR 1 57 #define DRV_MODULE_VER_MINOR 0 58 #define DRV_MODULE_VER_SUBMINOR 0 59 60 #define ENA_IO_TXQ_IDX(q) (2 * (q)) 61 #define ENA_IO_RXQ_IDX(q) (2 * (q) + 1) 62 /*reverse version of ENA_IO_RXQ_IDX*/ 63 #define ENA_IO_RXQ_IDX_REV(q) ((q - 1) / 2) 64 65 /* While processing submitted and completed descriptors (rx and tx path 66 * respectively) in a loop it is desired to: 67 * - perform batch submissions while populating sumbissmion queue 68 * - avoid blocking transmission of other packets during cleanup phase 69 * Hence the utilization ratio of 1/8 of a queue size. 70 */ 71 #define ENA_RING_DESCS_RATIO(ring_size) (ring_size / 8) 72 73 #define __MERGE_64B_H_L(h, l) (((uint64_t)h << 32) | l) 74 #define TEST_BIT(val, bit_shift) (val & (1UL << bit_shift)) 75 76 #define GET_L4_HDR_LEN(mbuf) \ 77 ((rte_pktmbuf_mtod_offset(mbuf, struct tcp_hdr *, \ 78 mbuf->l3_len + mbuf->l2_len)->data_off) >> 4) 79 80 #define ENA_RX_RSS_TABLE_LOG_SIZE 7 81 #define ENA_RX_RSS_TABLE_SIZE (1 << ENA_RX_RSS_TABLE_LOG_SIZE) 82 #define ENA_HASH_KEY_SIZE 40 83 #define ENA_ETH_SS_STATS 0xFF 84 #define ETH_GSTRING_LEN 32 85 86 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) 87 88 enum ethtool_stringset { 89 ETH_SS_TEST = 0, 90 ETH_SS_STATS, 91 }; 92 93 struct ena_stats { 94 char name[ETH_GSTRING_LEN]; 95 int stat_offset; 96 }; 97 98 #define ENA_STAT_ENA_COM_ENTRY(stat) { \ 99 .name = #stat, \ 100 .stat_offset = offsetof(struct ena_com_stats_admin, stat) \ 101 } 102 103 #define ENA_STAT_ENTRY(stat, stat_type) { \ 104 .name = #stat, \ 105 .stat_offset = offsetof(struct ena_stats_##stat_type, stat) \ 106 } 107 108 #define ENA_STAT_RX_ENTRY(stat) \ 109 ENA_STAT_ENTRY(stat, rx) 110 111 #define ENA_STAT_TX_ENTRY(stat) \ 112 ENA_STAT_ENTRY(stat, tx) 113 114 #define ENA_STAT_GLOBAL_ENTRY(stat) \ 115 ENA_STAT_ENTRY(stat, dev) 116 117 static const struct ena_stats ena_stats_global_strings[] = { 118 ENA_STAT_GLOBAL_ENTRY(tx_timeout), 119 ENA_STAT_GLOBAL_ENTRY(io_suspend), 120 ENA_STAT_GLOBAL_ENTRY(io_resume), 121 ENA_STAT_GLOBAL_ENTRY(wd_expired), 122 ENA_STAT_GLOBAL_ENTRY(interface_up), 123 ENA_STAT_GLOBAL_ENTRY(interface_down), 124 ENA_STAT_GLOBAL_ENTRY(admin_q_pause), 125 }; 126 127 static const struct ena_stats ena_stats_tx_strings[] = { 128 ENA_STAT_TX_ENTRY(cnt), 129 ENA_STAT_TX_ENTRY(bytes), 130 ENA_STAT_TX_ENTRY(queue_stop), 131 ENA_STAT_TX_ENTRY(queue_wakeup), 132 ENA_STAT_TX_ENTRY(dma_mapping_err), 133 ENA_STAT_TX_ENTRY(linearize), 134 ENA_STAT_TX_ENTRY(linearize_failed), 135 ENA_STAT_TX_ENTRY(tx_poll), 136 ENA_STAT_TX_ENTRY(doorbells), 137 ENA_STAT_TX_ENTRY(prepare_ctx_err), 138 ENA_STAT_TX_ENTRY(missing_tx_comp), 139 ENA_STAT_TX_ENTRY(bad_req_id), 140 }; 141 142 static const struct ena_stats ena_stats_rx_strings[] = { 143 ENA_STAT_RX_ENTRY(cnt), 144 ENA_STAT_RX_ENTRY(bytes), 145 ENA_STAT_RX_ENTRY(refil_partial), 146 ENA_STAT_RX_ENTRY(bad_csum), 147 ENA_STAT_RX_ENTRY(page_alloc_fail), 148 ENA_STAT_RX_ENTRY(skb_alloc_fail), 149 ENA_STAT_RX_ENTRY(dma_mapping_err), 150 ENA_STAT_RX_ENTRY(bad_desc_num), 151 ENA_STAT_RX_ENTRY(small_copy_len_pkt), 152 }; 153 154 static const struct ena_stats ena_stats_ena_com_strings[] = { 155 ENA_STAT_ENA_COM_ENTRY(aborted_cmd), 156 ENA_STAT_ENA_COM_ENTRY(submitted_cmd), 157 ENA_STAT_ENA_COM_ENTRY(completed_cmd), 158 ENA_STAT_ENA_COM_ENTRY(out_of_space), 159 ENA_STAT_ENA_COM_ENTRY(no_completion), 160 }; 161 162 #define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings) 163 #define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings) 164 #define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings) 165 #define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings) 166 167 #define QUEUE_OFFLOADS (DEV_TX_OFFLOAD_TCP_CKSUM |\ 168 DEV_TX_OFFLOAD_UDP_CKSUM |\ 169 DEV_TX_OFFLOAD_IPV4_CKSUM |\ 170 DEV_TX_OFFLOAD_TCP_TSO) 171 #define MBUF_OFFLOADS (PKT_TX_L4_MASK |\ 172 PKT_TX_IP_CKSUM |\ 173 PKT_TX_TCP_SEG) 174 175 /** Vendor ID used by Amazon devices */ 176 #define PCI_VENDOR_ID_AMAZON 0x1D0F 177 /** Amazon devices */ 178 #define PCI_DEVICE_ID_ENA_VF 0xEC20 179 #define PCI_DEVICE_ID_ENA_LLQ_VF 0xEC21 180 181 #define ENA_TX_OFFLOAD_MASK (\ 182 PKT_TX_L4_MASK | \ 183 PKT_TX_IP_CKSUM | \ 184 PKT_TX_TCP_SEG) 185 186 #define ENA_TX_OFFLOAD_NOTSUP_MASK \ 187 (PKT_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK) 188 189 int ena_logtype_init; 190 int ena_logtype_driver; 191 192 static const struct rte_pci_id pci_id_ena_map[] = { 193 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) }, 194 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_LLQ_VF) }, 195 { .device_id = 0 }, 196 }; 197 198 static int ena_device_init(struct ena_com_dev *ena_dev, 199 struct ena_com_dev_get_features_ctx *get_feat_ctx); 200 static int ena_dev_configure(struct rte_eth_dev *dev); 201 static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 202 uint16_t nb_pkts); 203 static uint16_t eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 204 uint16_t nb_pkts); 205 static int ena_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 206 uint16_t nb_desc, unsigned int socket_id, 207 const struct rte_eth_txconf *tx_conf); 208 static int ena_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 209 uint16_t nb_desc, unsigned int socket_id, 210 const struct rte_eth_rxconf *rx_conf, 211 struct rte_mempool *mp); 212 static uint16_t eth_ena_recv_pkts(void *rx_queue, 213 struct rte_mbuf **rx_pkts, uint16_t nb_pkts); 214 static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count); 215 static void ena_init_rings(struct ena_adapter *adapter); 216 static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 217 static int ena_start(struct rte_eth_dev *dev); 218 static void ena_close(struct rte_eth_dev *dev); 219 static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); 220 static void ena_rx_queue_release_all(struct rte_eth_dev *dev); 221 static void ena_tx_queue_release_all(struct rte_eth_dev *dev); 222 static void ena_rx_queue_release(void *queue); 223 static void ena_tx_queue_release(void *queue); 224 static void ena_rx_queue_release_bufs(struct ena_ring *ring); 225 static void ena_tx_queue_release_bufs(struct ena_ring *ring); 226 static int ena_link_update(struct rte_eth_dev *dev, 227 int wait_to_complete); 228 static int ena_queue_restart(struct ena_ring *ring); 229 static int ena_queue_restart_all(struct rte_eth_dev *dev, 230 enum ena_ring_type ring_type); 231 static void ena_stats_restart(struct rte_eth_dev *dev); 232 static void ena_infos_get(struct rte_eth_dev *dev, 233 struct rte_eth_dev_info *dev_info); 234 static int ena_rss_reta_update(struct rte_eth_dev *dev, 235 struct rte_eth_rss_reta_entry64 *reta_conf, 236 uint16_t reta_size); 237 static int ena_rss_reta_query(struct rte_eth_dev *dev, 238 struct rte_eth_rss_reta_entry64 *reta_conf, 239 uint16_t reta_size); 240 static int ena_get_sset_count(struct rte_eth_dev *dev, int sset); 241 static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter *adapter, 242 uint64_t offloads); 243 static bool ena_are_rx_queue_offloads_allowed(struct ena_adapter *adapter, 244 uint64_t offloads); 245 246 static const struct eth_dev_ops ena_dev_ops = { 247 .dev_configure = ena_dev_configure, 248 .dev_infos_get = ena_infos_get, 249 .rx_queue_setup = ena_rx_queue_setup, 250 .tx_queue_setup = ena_tx_queue_setup, 251 .dev_start = ena_start, 252 .link_update = ena_link_update, 253 .stats_get = ena_stats_get, 254 .mtu_set = ena_mtu_set, 255 .rx_queue_release = ena_rx_queue_release, 256 .tx_queue_release = ena_tx_queue_release, 257 .dev_close = ena_close, 258 .reta_update = ena_rss_reta_update, 259 .reta_query = ena_rss_reta_query, 260 }; 261 262 #define NUMA_NO_NODE SOCKET_ID_ANY 263 264 static inline int ena_cpu_to_node(int cpu) 265 { 266 struct rte_config *config = rte_eal_get_configuration(); 267 268 if (likely(cpu < RTE_MAX_MEMZONE)) 269 return config->mem_config->memzone[cpu].socket_id; 270 271 return NUMA_NO_NODE; 272 } 273 274 static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf, 275 struct ena_com_rx_ctx *ena_rx_ctx) 276 { 277 uint64_t ol_flags = 0; 278 279 if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) 280 ol_flags |= PKT_TX_TCP_CKSUM; 281 else if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP) 282 ol_flags |= PKT_TX_UDP_CKSUM; 283 284 if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) 285 ol_flags |= PKT_TX_IPV4; 286 else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6) 287 ol_flags |= PKT_TX_IPV6; 288 289 if (unlikely(ena_rx_ctx->l4_csum_err)) 290 ol_flags |= PKT_RX_L4_CKSUM_BAD; 291 if (unlikely(ena_rx_ctx->l3_csum_err)) 292 ol_flags |= PKT_RX_IP_CKSUM_BAD; 293 294 mbuf->ol_flags = ol_flags; 295 } 296 297 static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, 298 struct ena_com_tx_ctx *ena_tx_ctx, 299 uint64_t queue_offloads) 300 { 301 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; 302 303 if ((mbuf->ol_flags & MBUF_OFFLOADS) && 304 (queue_offloads & QUEUE_OFFLOADS)) { 305 /* check if TSO is required */ 306 if ((mbuf->ol_flags & PKT_TX_TCP_SEG) && 307 (queue_offloads & DEV_TX_OFFLOAD_TCP_TSO)) { 308 ena_tx_ctx->tso_enable = true; 309 310 ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf); 311 } 312 313 /* check if L3 checksum is needed */ 314 if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) && 315 (queue_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)) 316 ena_tx_ctx->l3_csum_enable = true; 317 318 if (mbuf->ol_flags & PKT_TX_IPV6) { 319 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6; 320 } else { 321 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4; 322 323 /* set don't fragment (DF) flag */ 324 if (mbuf->packet_type & 325 (RTE_PTYPE_L4_NONFRAG 326 | RTE_PTYPE_INNER_L4_NONFRAG)) 327 ena_tx_ctx->df = true; 328 } 329 330 /* check if L4 checksum is needed */ 331 if ((mbuf->ol_flags & PKT_TX_TCP_CKSUM) && 332 (queue_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) { 333 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP; 334 ena_tx_ctx->l4_csum_enable = true; 335 } else if ((mbuf->ol_flags & PKT_TX_UDP_CKSUM) && 336 (queue_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) { 337 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP; 338 ena_tx_ctx->l4_csum_enable = true; 339 } else { 340 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN; 341 ena_tx_ctx->l4_csum_enable = false; 342 } 343 344 ena_meta->mss = mbuf->tso_segsz; 345 ena_meta->l3_hdr_len = mbuf->l3_len; 346 ena_meta->l3_hdr_offset = mbuf->l2_len; 347 /* this param needed only for TSO */ 348 ena_meta->l3_outer_hdr_len = 0; 349 ena_meta->l3_outer_hdr_offset = 0; 350 351 ena_tx_ctx->meta_valid = true; 352 } else { 353 ena_tx_ctx->meta_valid = false; 354 } 355 } 356 357 static void ena_config_host_info(struct ena_com_dev *ena_dev) 358 { 359 struct ena_admin_host_info *host_info; 360 int rc; 361 362 /* Allocate only the host info */ 363 rc = ena_com_allocate_host_info(ena_dev); 364 if (rc) { 365 RTE_LOG(ERR, PMD, "Cannot allocate host info\n"); 366 return; 367 } 368 369 host_info = ena_dev->host_attr.host_info; 370 371 host_info->os_type = ENA_ADMIN_OS_DPDK; 372 host_info->kernel_ver = RTE_VERSION; 373 snprintf((char *)host_info->kernel_ver_str, 374 sizeof(host_info->kernel_ver_str), 375 "%s", rte_version()); 376 host_info->os_dist = RTE_VERSION; 377 snprintf((char *)host_info->os_dist_str, 378 sizeof(host_info->os_dist_str), 379 "%s", rte_version()); 380 host_info->driver_version = 381 (DRV_MODULE_VER_MAJOR) | 382 (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | 383 (DRV_MODULE_VER_SUBMINOR << 384 ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); 385 386 rc = ena_com_set_host_attributes(ena_dev); 387 if (rc) { 388 RTE_LOG(ERR, PMD, "Cannot set host attributes\n"); 389 if (rc != -EPERM) 390 goto err; 391 } 392 393 return; 394 395 err: 396 ena_com_delete_host_info(ena_dev); 397 } 398 399 static int 400 ena_get_sset_count(struct rte_eth_dev *dev, int sset) 401 { 402 if (sset != ETH_SS_STATS) 403 return -EOPNOTSUPP; 404 405 /* Workaround for clang: 406 * touch internal structures to prevent 407 * compiler error 408 */ 409 ENA_TOUCH(ena_stats_global_strings); 410 ENA_TOUCH(ena_stats_tx_strings); 411 ENA_TOUCH(ena_stats_rx_strings); 412 ENA_TOUCH(ena_stats_ena_com_strings); 413 414 return dev->data->nb_tx_queues * 415 (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX) + 416 ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENA_COM; 417 } 418 419 static void ena_config_debug_area(struct ena_adapter *adapter) 420 { 421 u32 debug_area_size; 422 int rc, ss_count; 423 424 ss_count = ena_get_sset_count(adapter->rte_dev, ETH_SS_STATS); 425 if (ss_count <= 0) { 426 RTE_LOG(ERR, PMD, "SS count is negative\n"); 427 return; 428 } 429 430 /* allocate 32 bytes for each string and 64bit for the value */ 431 debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count; 432 433 rc = ena_com_allocate_debug_area(&adapter->ena_dev, debug_area_size); 434 if (rc) { 435 RTE_LOG(ERR, PMD, "Cannot allocate debug area\n"); 436 return; 437 } 438 439 rc = ena_com_set_host_attributes(&adapter->ena_dev); 440 if (rc) { 441 RTE_LOG(WARNING, PMD, "Cannot set host attributes\n"); 442 if (rc != -EPERM) 443 goto err; 444 } 445 446 return; 447 err: 448 ena_com_delete_debug_area(&adapter->ena_dev); 449 } 450 451 static void ena_close(struct rte_eth_dev *dev) 452 { 453 struct ena_adapter *adapter = 454 (struct ena_adapter *)(dev->data->dev_private); 455 456 adapter->state = ENA_ADAPTER_STATE_STOPPED; 457 458 ena_rx_queue_release_all(dev); 459 ena_tx_queue_release_all(dev); 460 } 461 462 static int ena_rss_reta_update(struct rte_eth_dev *dev, 463 struct rte_eth_rss_reta_entry64 *reta_conf, 464 uint16_t reta_size) 465 { 466 struct ena_adapter *adapter = 467 (struct ena_adapter *)(dev->data->dev_private); 468 struct ena_com_dev *ena_dev = &adapter->ena_dev; 469 int ret, i; 470 u16 entry_value; 471 int conf_idx; 472 int idx; 473 474 if ((reta_size == 0) || (reta_conf == NULL)) 475 return -EINVAL; 476 477 if (reta_size > ENA_RX_RSS_TABLE_SIZE) { 478 RTE_LOG(WARNING, PMD, 479 "indirection table %d is bigger than supported (%d)\n", 480 reta_size, ENA_RX_RSS_TABLE_SIZE); 481 ret = -EINVAL; 482 goto err; 483 } 484 485 for (i = 0 ; i < reta_size ; i++) { 486 /* each reta_conf is for 64 entries. 487 * to support 128 we use 2 conf of 64 488 */ 489 conf_idx = i / RTE_RETA_GROUP_SIZE; 490 idx = i % RTE_RETA_GROUP_SIZE; 491 if (TEST_BIT(reta_conf[conf_idx].mask, idx)) { 492 entry_value = 493 ENA_IO_RXQ_IDX(reta_conf[conf_idx].reta[idx]); 494 ret = ena_com_indirect_table_fill_entry(ena_dev, 495 i, 496 entry_value); 497 if (unlikely(ret && (ret != ENA_COM_PERMISSION))) { 498 RTE_LOG(ERR, PMD, 499 "Cannot fill indirect table\n"); 500 ret = -ENOTSUP; 501 goto err; 502 } 503 } 504 } 505 506 ret = ena_com_indirect_table_set(ena_dev); 507 if (unlikely(ret && (ret != ENA_COM_PERMISSION))) { 508 RTE_LOG(ERR, PMD, "Cannot flush the indirect table\n"); 509 ret = -ENOTSUP; 510 goto err; 511 } 512 513 RTE_LOG(DEBUG, PMD, "%s(): RSS configured %d entries for port %d\n", 514 __func__, reta_size, adapter->rte_dev->data->port_id); 515 err: 516 return ret; 517 } 518 519 /* Query redirection table. */ 520 static int ena_rss_reta_query(struct rte_eth_dev *dev, 521 struct rte_eth_rss_reta_entry64 *reta_conf, 522 uint16_t reta_size) 523 { 524 struct ena_adapter *adapter = 525 (struct ena_adapter *)(dev->data->dev_private); 526 struct ena_com_dev *ena_dev = &adapter->ena_dev; 527 int ret; 528 int i; 529 u32 indirect_table[ENA_RX_RSS_TABLE_SIZE] = {0}; 530 int reta_conf_idx; 531 int reta_idx; 532 533 if (reta_size == 0 || reta_conf == NULL || 534 (reta_size > RTE_RETA_GROUP_SIZE && ((reta_conf + 1) == NULL))) 535 return -EINVAL; 536 537 ret = ena_com_indirect_table_get(ena_dev, indirect_table); 538 if (unlikely(ret && (ret != ENA_COM_PERMISSION))) { 539 RTE_LOG(ERR, PMD, "cannot get indirect table\n"); 540 ret = -ENOTSUP; 541 goto err; 542 } 543 544 for (i = 0 ; i < reta_size ; i++) { 545 reta_conf_idx = i / RTE_RETA_GROUP_SIZE; 546 reta_idx = i % RTE_RETA_GROUP_SIZE; 547 if (TEST_BIT(reta_conf[reta_conf_idx].mask, reta_idx)) 548 reta_conf[reta_conf_idx].reta[reta_idx] = 549 ENA_IO_RXQ_IDX_REV(indirect_table[i]); 550 } 551 err: 552 return ret; 553 } 554 555 static int ena_rss_init_default(struct ena_adapter *adapter) 556 { 557 struct ena_com_dev *ena_dev = &adapter->ena_dev; 558 uint16_t nb_rx_queues = adapter->rte_dev->data->nb_rx_queues; 559 int rc, i; 560 u32 val; 561 562 rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE); 563 if (unlikely(rc)) { 564 RTE_LOG(ERR, PMD, "Cannot init indirect table\n"); 565 goto err_rss_init; 566 } 567 568 for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) { 569 val = i % nb_rx_queues; 570 rc = ena_com_indirect_table_fill_entry(ena_dev, i, 571 ENA_IO_RXQ_IDX(val)); 572 if (unlikely(rc && (rc != ENA_COM_PERMISSION))) { 573 RTE_LOG(ERR, PMD, "Cannot fill indirect table\n"); 574 goto err_fill_indir; 575 } 576 } 577 578 rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL, 579 ENA_HASH_KEY_SIZE, 0xFFFFFFFF); 580 if (unlikely(rc && (rc != ENA_COM_PERMISSION))) { 581 RTE_LOG(INFO, PMD, "Cannot fill hash function\n"); 582 goto err_fill_indir; 583 } 584 585 rc = ena_com_set_default_hash_ctrl(ena_dev); 586 if (unlikely(rc && (rc != ENA_COM_PERMISSION))) { 587 RTE_LOG(INFO, PMD, "Cannot fill hash control\n"); 588 goto err_fill_indir; 589 } 590 591 rc = ena_com_indirect_table_set(ena_dev); 592 if (unlikely(rc && (rc != ENA_COM_PERMISSION))) { 593 RTE_LOG(ERR, PMD, "Cannot flush the indirect table\n"); 594 goto err_fill_indir; 595 } 596 RTE_LOG(DEBUG, PMD, "RSS configured for port %d\n", 597 adapter->rte_dev->data->port_id); 598 599 return 0; 600 601 err_fill_indir: 602 ena_com_rss_destroy(ena_dev); 603 err_rss_init: 604 605 return rc; 606 } 607 608 static void ena_rx_queue_release_all(struct rte_eth_dev *dev) 609 { 610 struct ena_ring **queues = (struct ena_ring **)dev->data->rx_queues; 611 int nb_queues = dev->data->nb_rx_queues; 612 int i; 613 614 for (i = 0; i < nb_queues; i++) 615 ena_rx_queue_release(queues[i]); 616 } 617 618 static void ena_tx_queue_release_all(struct rte_eth_dev *dev) 619 { 620 struct ena_ring **queues = (struct ena_ring **)dev->data->tx_queues; 621 int nb_queues = dev->data->nb_tx_queues; 622 int i; 623 624 for (i = 0; i < nb_queues; i++) 625 ena_tx_queue_release(queues[i]); 626 } 627 628 static void ena_rx_queue_release(void *queue) 629 { 630 struct ena_ring *ring = (struct ena_ring *)queue; 631 struct ena_adapter *adapter = ring->adapter; 632 int ena_qid; 633 634 ena_assert_msg(ring->configured, 635 "API violation - releasing not configured queue"); 636 ena_assert_msg(ring->adapter->state != ENA_ADAPTER_STATE_RUNNING, 637 "API violation"); 638 639 /* Destroy HW queue */ 640 ena_qid = ENA_IO_RXQ_IDX(ring->id); 641 ena_com_destroy_io_queue(&adapter->ena_dev, ena_qid); 642 643 /* Free all bufs */ 644 ena_rx_queue_release_bufs(ring); 645 646 /* Free ring resources */ 647 if (ring->rx_buffer_info) 648 rte_free(ring->rx_buffer_info); 649 ring->rx_buffer_info = NULL; 650 651 ring->configured = 0; 652 653 RTE_LOG(NOTICE, PMD, "RX Queue %d:%d released\n", 654 ring->port_id, ring->id); 655 } 656 657 static void ena_tx_queue_release(void *queue) 658 { 659 struct ena_ring *ring = (struct ena_ring *)queue; 660 struct ena_adapter *adapter = ring->adapter; 661 int ena_qid; 662 663 ena_assert_msg(ring->configured, 664 "API violation. Releasing not configured queue"); 665 ena_assert_msg(ring->adapter->state != ENA_ADAPTER_STATE_RUNNING, 666 "API violation"); 667 668 /* Destroy HW queue */ 669 ena_qid = ENA_IO_TXQ_IDX(ring->id); 670 ena_com_destroy_io_queue(&adapter->ena_dev, ena_qid); 671 672 /* Free all bufs */ 673 ena_tx_queue_release_bufs(ring); 674 675 /* Free ring resources */ 676 if (ring->tx_buffer_info) 677 rte_free(ring->tx_buffer_info); 678 679 if (ring->empty_tx_reqs) 680 rte_free(ring->empty_tx_reqs); 681 682 ring->empty_tx_reqs = NULL; 683 ring->tx_buffer_info = NULL; 684 685 ring->configured = 0; 686 687 RTE_LOG(NOTICE, PMD, "TX Queue %d:%d released\n", 688 ring->port_id, ring->id); 689 } 690 691 static void ena_rx_queue_release_bufs(struct ena_ring *ring) 692 { 693 unsigned int ring_mask = ring->ring_size - 1; 694 695 while (ring->next_to_clean != ring->next_to_use) { 696 struct rte_mbuf *m = 697 ring->rx_buffer_info[ring->next_to_clean & ring_mask]; 698 699 if (m) 700 rte_mbuf_raw_free(m); 701 702 ring->next_to_clean++; 703 } 704 } 705 706 static void ena_tx_queue_release_bufs(struct ena_ring *ring) 707 { 708 unsigned int i; 709 710 for (i = 0; i < ring->ring_size; ++i) { 711 struct ena_tx_buffer *tx_buf = &ring->tx_buffer_info[i]; 712 713 if (tx_buf->mbuf) 714 rte_pktmbuf_free(tx_buf->mbuf); 715 716 ring->next_to_clean++; 717 } 718 } 719 720 static int ena_link_update(struct rte_eth_dev *dev, 721 __rte_unused int wait_to_complete) 722 { 723 struct rte_eth_link *link = &dev->data->dev_link; 724 725 link->link_status = 1; 726 link->link_speed = ETH_SPEED_NUM_10G; 727 link->link_duplex = ETH_LINK_FULL_DUPLEX; 728 729 return 0; 730 } 731 732 static int ena_queue_restart_all(struct rte_eth_dev *dev, 733 enum ena_ring_type ring_type) 734 { 735 struct ena_adapter *adapter = 736 (struct ena_adapter *)(dev->data->dev_private); 737 struct ena_ring *queues = NULL; 738 int i = 0; 739 int rc = 0; 740 741 queues = (ring_type == ENA_RING_TYPE_RX) ? 742 adapter->rx_ring : adapter->tx_ring; 743 744 for (i = 0; i < adapter->num_queues; i++) { 745 if (queues[i].configured) { 746 if (ring_type == ENA_RING_TYPE_RX) { 747 ena_assert_msg( 748 dev->data->rx_queues[i] == &queues[i], 749 "Inconsistent state of rx queues\n"); 750 } else { 751 ena_assert_msg( 752 dev->data->tx_queues[i] == &queues[i], 753 "Inconsistent state of tx queues\n"); 754 } 755 756 rc = ena_queue_restart(&queues[i]); 757 758 if (rc) { 759 PMD_INIT_LOG(ERR, 760 "failed to restart queue %d type(%d)", 761 i, ring_type); 762 return -1; 763 } 764 } 765 } 766 767 return 0; 768 } 769 770 static uint32_t ena_get_mtu_conf(struct ena_adapter *adapter) 771 { 772 uint32_t max_frame_len = adapter->max_mtu; 773 774 if (adapter->rte_eth_dev_data->dev_conf.rxmode.offloads & 775 DEV_RX_OFFLOAD_JUMBO_FRAME) 776 max_frame_len = 777 adapter->rte_eth_dev_data->dev_conf.rxmode.max_rx_pkt_len; 778 779 return max_frame_len; 780 } 781 782 static int ena_check_valid_conf(struct ena_adapter *adapter) 783 { 784 uint32_t max_frame_len = ena_get_mtu_conf(adapter); 785 786 if (max_frame_len > adapter->max_mtu) { 787 PMD_INIT_LOG(ERR, "Unsupported MTU of %d", max_frame_len); 788 return -1; 789 } 790 791 return 0; 792 } 793 794 static int 795 ena_calc_queue_size(struct ena_com_dev *ena_dev, 796 struct ena_com_dev_get_features_ctx *get_feat_ctx) 797 { 798 uint32_t queue_size = ENA_DEFAULT_RING_SIZE; 799 800 queue_size = RTE_MIN(queue_size, 801 get_feat_ctx->max_queues.max_cq_depth); 802 queue_size = RTE_MIN(queue_size, 803 get_feat_ctx->max_queues.max_sq_depth); 804 805 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) 806 queue_size = RTE_MIN(queue_size, 807 get_feat_ctx->max_queues.max_llq_depth); 808 809 /* Round down to power of 2 */ 810 if (!rte_is_power_of_2(queue_size)) 811 queue_size = rte_align32pow2(queue_size >> 1); 812 813 if (queue_size == 0) { 814 PMD_INIT_LOG(ERR, "Invalid queue size"); 815 return -EFAULT; 816 } 817 818 return queue_size; 819 } 820 821 static void ena_stats_restart(struct rte_eth_dev *dev) 822 { 823 struct ena_adapter *adapter = 824 (struct ena_adapter *)(dev->data->dev_private); 825 826 rte_atomic64_init(&adapter->drv_stats->ierrors); 827 rte_atomic64_init(&adapter->drv_stats->oerrors); 828 rte_atomic64_init(&adapter->drv_stats->rx_nombuf); 829 } 830 831 static int ena_stats_get(struct rte_eth_dev *dev, 832 struct rte_eth_stats *stats) 833 { 834 struct ena_admin_basic_stats ena_stats; 835 struct ena_adapter *adapter = 836 (struct ena_adapter *)(dev->data->dev_private); 837 struct ena_com_dev *ena_dev = &adapter->ena_dev; 838 int rc; 839 840 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 841 return -ENOTSUP; 842 843 memset(&ena_stats, 0, sizeof(ena_stats)); 844 rc = ena_com_get_dev_basic_stats(ena_dev, &ena_stats); 845 if (unlikely(rc)) { 846 RTE_LOG(ERR, PMD, "Could not retrieve statistics from ENA"); 847 return rc; 848 } 849 850 /* Set of basic statistics from ENA */ 851 stats->ipackets = __MERGE_64B_H_L(ena_stats.rx_pkts_high, 852 ena_stats.rx_pkts_low); 853 stats->opackets = __MERGE_64B_H_L(ena_stats.tx_pkts_high, 854 ena_stats.tx_pkts_low); 855 stats->ibytes = __MERGE_64B_H_L(ena_stats.rx_bytes_high, 856 ena_stats.rx_bytes_low); 857 stats->obytes = __MERGE_64B_H_L(ena_stats.tx_bytes_high, 858 ena_stats.tx_bytes_low); 859 stats->imissed = __MERGE_64B_H_L(ena_stats.rx_drops_high, 860 ena_stats.rx_drops_low); 861 862 /* Driver related stats */ 863 stats->ierrors = rte_atomic64_read(&adapter->drv_stats->ierrors); 864 stats->oerrors = rte_atomic64_read(&adapter->drv_stats->oerrors); 865 stats->rx_nombuf = rte_atomic64_read(&adapter->drv_stats->rx_nombuf); 866 return 0; 867 } 868 869 static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 870 { 871 struct ena_adapter *adapter; 872 struct ena_com_dev *ena_dev; 873 int rc = 0; 874 875 ena_assert_msg(dev->data != NULL, "Uninitialized device"); 876 ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device"); 877 adapter = (struct ena_adapter *)(dev->data->dev_private); 878 879 ena_dev = &adapter->ena_dev; 880 ena_assert_msg(ena_dev != NULL, "Uninitialized device"); 881 882 if (mtu > ena_get_mtu_conf(adapter)) { 883 RTE_LOG(ERR, PMD, 884 "Given MTU (%d) exceeds maximum MTU supported (%d)\n", 885 mtu, ena_get_mtu_conf(adapter)); 886 rc = -EINVAL; 887 goto err; 888 } 889 890 rc = ena_com_set_dev_mtu(ena_dev, mtu); 891 if (rc) 892 RTE_LOG(ERR, PMD, "Could not set MTU: %d\n", mtu); 893 else 894 RTE_LOG(NOTICE, PMD, "Set MTU: %d\n", mtu); 895 896 err: 897 return rc; 898 } 899 900 static int ena_start(struct rte_eth_dev *dev) 901 { 902 struct ena_adapter *adapter = 903 (struct ena_adapter *)(dev->data->dev_private); 904 int rc = 0; 905 906 if (!(adapter->state == ENA_ADAPTER_STATE_CONFIG || 907 adapter->state == ENA_ADAPTER_STATE_STOPPED)) { 908 PMD_INIT_LOG(ERR, "API violation"); 909 return -1; 910 } 911 912 rc = ena_check_valid_conf(adapter); 913 if (rc) 914 return rc; 915 916 rc = ena_queue_restart_all(dev, ENA_RING_TYPE_RX); 917 if (rc) 918 return rc; 919 920 rc = ena_queue_restart_all(dev, ENA_RING_TYPE_TX); 921 if (rc) 922 return rc; 923 924 if (adapter->rte_dev->data->dev_conf.rxmode.mq_mode & 925 ETH_MQ_RX_RSS_FLAG) { 926 rc = ena_rss_init_default(adapter); 927 if (rc) 928 return rc; 929 } 930 931 ena_stats_restart(dev); 932 933 adapter->state = ENA_ADAPTER_STATE_RUNNING; 934 935 return 0; 936 } 937 938 static int ena_queue_restart(struct ena_ring *ring) 939 { 940 int rc, bufs_num; 941 942 ena_assert_msg(ring->configured == 1, 943 "Trying to restart unconfigured queue\n"); 944 945 ring->next_to_clean = 0; 946 ring->next_to_use = 0; 947 948 if (ring->type == ENA_RING_TYPE_TX) 949 return 0; 950 951 bufs_num = ring->ring_size - 1; 952 rc = ena_populate_rx_queue(ring, bufs_num); 953 if (rc != bufs_num) { 954 PMD_INIT_LOG(ERR, "Failed to populate rx ring !"); 955 return (-1); 956 } 957 958 return 0; 959 } 960 961 static int ena_tx_queue_setup(struct rte_eth_dev *dev, 962 uint16_t queue_idx, 963 uint16_t nb_desc, 964 __rte_unused unsigned int socket_id, 965 const struct rte_eth_txconf *tx_conf) 966 { 967 struct ena_com_create_io_ctx ctx = 968 /* policy set to _HOST just to satisfy icc compiler */ 969 { ENA_ADMIN_PLACEMENT_POLICY_HOST, 970 ENA_COM_IO_QUEUE_DIRECTION_TX, 0, 0, 0, 0 }; 971 struct ena_ring *txq = NULL; 972 struct ena_adapter *adapter = 973 (struct ena_adapter *)(dev->data->dev_private); 974 unsigned int i; 975 int ena_qid; 976 int rc; 977 struct ena_com_dev *ena_dev = &adapter->ena_dev; 978 979 txq = &adapter->tx_ring[queue_idx]; 980 981 if (txq->configured) { 982 RTE_LOG(CRIT, PMD, 983 "API violation. Queue %d is already configured\n", 984 queue_idx); 985 return -1; 986 } 987 988 if (!rte_is_power_of_2(nb_desc)) { 989 RTE_LOG(ERR, PMD, 990 "Unsupported size of RX queue: %d is not a power of 2.", 991 nb_desc); 992 return -EINVAL; 993 } 994 995 if (nb_desc > adapter->tx_ring_size) { 996 RTE_LOG(ERR, PMD, 997 "Unsupported size of TX queue (max size: %d)\n", 998 adapter->tx_ring_size); 999 return -EINVAL; 1000 } 1001 1002 if (tx_conf->txq_flags == ETH_TXQ_FLAGS_IGNORE && 1003 !ena_are_tx_queue_offloads_allowed(adapter, tx_conf->offloads)) { 1004 RTE_LOG(ERR, PMD, "Unsupported queue offloads\n"); 1005 return -EINVAL; 1006 } 1007 1008 ena_qid = ENA_IO_TXQ_IDX(queue_idx); 1009 1010 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; 1011 ctx.qid = ena_qid; 1012 ctx.msix_vector = -1; /* admin interrupts not used */ 1013 ctx.mem_queue_type = ena_dev->tx_mem_queue_type; 1014 ctx.queue_size = adapter->tx_ring_size; 1015 ctx.numa_node = ena_cpu_to_node(queue_idx); 1016 1017 rc = ena_com_create_io_queue(ena_dev, &ctx); 1018 if (rc) { 1019 RTE_LOG(ERR, PMD, 1020 "failed to create io TX queue #%d (qid:%d) rc: %d\n", 1021 queue_idx, ena_qid, rc); 1022 } 1023 txq->ena_com_io_cq = &ena_dev->io_cq_queues[ena_qid]; 1024 txq->ena_com_io_sq = &ena_dev->io_sq_queues[ena_qid]; 1025 1026 rc = ena_com_get_io_handlers(ena_dev, ena_qid, 1027 &txq->ena_com_io_sq, 1028 &txq->ena_com_io_cq); 1029 if (rc) { 1030 RTE_LOG(ERR, PMD, 1031 "Failed to get TX queue handlers. TX queue num %d rc: %d\n", 1032 queue_idx, rc); 1033 ena_com_destroy_io_queue(ena_dev, ena_qid); 1034 goto err; 1035 } 1036 1037 txq->port_id = dev->data->port_id; 1038 txq->next_to_clean = 0; 1039 txq->next_to_use = 0; 1040 txq->ring_size = nb_desc; 1041 1042 txq->tx_buffer_info = rte_zmalloc("txq->tx_buffer_info", 1043 sizeof(struct ena_tx_buffer) * 1044 txq->ring_size, 1045 RTE_CACHE_LINE_SIZE); 1046 if (!txq->tx_buffer_info) { 1047 RTE_LOG(ERR, PMD, "failed to alloc mem for tx buffer info\n"); 1048 return -ENOMEM; 1049 } 1050 1051 txq->empty_tx_reqs = rte_zmalloc("txq->empty_tx_reqs", 1052 sizeof(u16) * txq->ring_size, 1053 RTE_CACHE_LINE_SIZE); 1054 if (!txq->empty_tx_reqs) { 1055 RTE_LOG(ERR, PMD, "failed to alloc mem for tx reqs\n"); 1056 rte_free(txq->tx_buffer_info); 1057 return -ENOMEM; 1058 } 1059 for (i = 0; i < txq->ring_size; i++) 1060 txq->empty_tx_reqs[i] = i; 1061 1062 txq->offloads = tx_conf->offloads; 1063 1064 /* Store pointer to this queue in upper layer */ 1065 txq->configured = 1; 1066 dev->data->tx_queues[queue_idx] = txq; 1067 err: 1068 return rc; 1069 } 1070 1071 static int ena_rx_queue_setup(struct rte_eth_dev *dev, 1072 uint16_t queue_idx, 1073 uint16_t nb_desc, 1074 __rte_unused unsigned int socket_id, 1075 const struct rte_eth_rxconf *rx_conf, 1076 struct rte_mempool *mp) 1077 { 1078 struct ena_com_create_io_ctx ctx = 1079 /* policy set to _HOST just to satisfy icc compiler */ 1080 { ENA_ADMIN_PLACEMENT_POLICY_HOST, 1081 ENA_COM_IO_QUEUE_DIRECTION_RX, 0, 0, 0, 0 }; 1082 struct ena_adapter *adapter = 1083 (struct ena_adapter *)(dev->data->dev_private); 1084 struct ena_ring *rxq = NULL; 1085 uint16_t ena_qid = 0; 1086 int rc = 0; 1087 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1088 1089 rxq = &adapter->rx_ring[queue_idx]; 1090 if (rxq->configured) { 1091 RTE_LOG(CRIT, PMD, 1092 "API violation. Queue %d is already configured\n", 1093 queue_idx); 1094 return -1; 1095 } 1096 1097 if (!rte_is_power_of_2(nb_desc)) { 1098 RTE_LOG(ERR, PMD, 1099 "Unsupported size of TX queue: %d is not a power of 2.", 1100 nb_desc); 1101 return -EINVAL; 1102 } 1103 1104 if (nb_desc > adapter->rx_ring_size) { 1105 RTE_LOG(ERR, PMD, 1106 "Unsupported size of RX queue (max size: %d)\n", 1107 adapter->rx_ring_size); 1108 return -EINVAL; 1109 } 1110 1111 if (!ena_are_rx_queue_offloads_allowed(adapter, rx_conf->offloads)) { 1112 RTE_LOG(ERR, PMD, "Unsupported queue offloads\n"); 1113 return -EINVAL; 1114 } 1115 1116 ena_qid = ENA_IO_RXQ_IDX(queue_idx); 1117 1118 ctx.qid = ena_qid; 1119 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; 1120 ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 1121 ctx.msix_vector = -1; /* admin interrupts not used */ 1122 ctx.queue_size = adapter->rx_ring_size; 1123 ctx.numa_node = ena_cpu_to_node(queue_idx); 1124 1125 rc = ena_com_create_io_queue(ena_dev, &ctx); 1126 if (rc) 1127 RTE_LOG(ERR, PMD, "failed to create io RX queue #%d rc: %d\n", 1128 queue_idx, rc); 1129 1130 rxq->ena_com_io_cq = &ena_dev->io_cq_queues[ena_qid]; 1131 rxq->ena_com_io_sq = &ena_dev->io_sq_queues[ena_qid]; 1132 1133 rc = ena_com_get_io_handlers(ena_dev, ena_qid, 1134 &rxq->ena_com_io_sq, 1135 &rxq->ena_com_io_cq); 1136 if (rc) { 1137 RTE_LOG(ERR, PMD, 1138 "Failed to get RX queue handlers. RX queue num %d rc: %d\n", 1139 queue_idx, rc); 1140 ena_com_destroy_io_queue(ena_dev, ena_qid); 1141 } 1142 1143 rxq->port_id = dev->data->port_id; 1144 rxq->next_to_clean = 0; 1145 rxq->next_to_use = 0; 1146 rxq->ring_size = nb_desc; 1147 rxq->mb_pool = mp; 1148 1149 rxq->rx_buffer_info = rte_zmalloc("rxq->buffer_info", 1150 sizeof(struct rte_mbuf *) * nb_desc, 1151 RTE_CACHE_LINE_SIZE); 1152 if (!rxq->rx_buffer_info) { 1153 RTE_LOG(ERR, PMD, "failed to alloc mem for rx buffer info\n"); 1154 return -ENOMEM; 1155 } 1156 1157 /* Store pointer to this queue in upper layer */ 1158 rxq->configured = 1; 1159 dev->data->rx_queues[queue_idx] = rxq; 1160 1161 return rc; 1162 } 1163 1164 static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) 1165 { 1166 unsigned int i; 1167 int rc; 1168 uint16_t ring_size = rxq->ring_size; 1169 uint16_t ring_mask = ring_size - 1; 1170 uint16_t next_to_use = rxq->next_to_use; 1171 uint16_t in_use; 1172 struct rte_mbuf **mbufs = &rxq->rx_buffer_info[0]; 1173 1174 if (unlikely(!count)) 1175 return 0; 1176 1177 in_use = rxq->next_to_use - rxq->next_to_clean; 1178 ena_assert_msg(((in_use + count) < ring_size), "bad ring state"); 1179 1180 count = RTE_MIN(count, 1181 (uint16_t)(ring_size - (next_to_use & ring_mask))); 1182 1183 /* get resources for incoming packets */ 1184 rc = rte_mempool_get_bulk(rxq->mb_pool, 1185 (void **)(&mbufs[next_to_use & ring_mask]), 1186 count); 1187 if (unlikely(rc < 0)) { 1188 rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf); 1189 PMD_RX_LOG(DEBUG, "there are no enough free buffers"); 1190 return 0; 1191 } 1192 1193 for (i = 0; i < count; i++) { 1194 uint16_t next_to_use_masked = next_to_use & ring_mask; 1195 struct rte_mbuf *mbuf = mbufs[next_to_use_masked]; 1196 struct ena_com_buf ebuf; 1197 1198 rte_prefetch0(mbufs[((next_to_use + 4) & ring_mask)]); 1199 /* prepare physical address for DMA transaction */ 1200 ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM; 1201 ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM; 1202 /* pass resource to device */ 1203 rc = ena_com_add_single_rx_desc(rxq->ena_com_io_sq, 1204 &ebuf, next_to_use_masked); 1205 if (unlikely(rc)) { 1206 rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbuf), 1207 count - i); 1208 RTE_LOG(WARNING, PMD, "failed adding rx desc\n"); 1209 break; 1210 } 1211 next_to_use++; 1212 } 1213 1214 /* When we submitted free recources to device... */ 1215 if (i > 0) { 1216 /* ...let HW know that it can fill buffers with data */ 1217 rte_wmb(); 1218 ena_com_write_sq_doorbell(rxq->ena_com_io_sq); 1219 1220 rxq->next_to_use = next_to_use; 1221 } 1222 1223 return i; 1224 } 1225 1226 static int ena_device_init(struct ena_com_dev *ena_dev, 1227 struct ena_com_dev_get_features_ctx *get_feat_ctx) 1228 { 1229 int rc; 1230 bool readless_supported; 1231 1232 /* Initialize mmio registers */ 1233 rc = ena_com_mmio_reg_read_request_init(ena_dev); 1234 if (rc) { 1235 RTE_LOG(ERR, PMD, "failed to init mmio read less\n"); 1236 return rc; 1237 } 1238 1239 /* The PCIe configuration space revision id indicate if mmio reg 1240 * read is disabled. 1241 */ 1242 readless_supported = 1243 !(((struct rte_pci_device *)ena_dev->dmadev)->id.class_id 1244 & ENA_MMIO_DISABLE_REG_READ); 1245 ena_com_set_mmio_read_mode(ena_dev, readless_supported); 1246 1247 /* reset device */ 1248 rc = ena_com_dev_reset(ena_dev); 1249 if (rc) { 1250 RTE_LOG(ERR, PMD, "cannot reset device\n"); 1251 goto err_mmio_read_less; 1252 } 1253 1254 /* check FW version */ 1255 rc = ena_com_validate_version(ena_dev); 1256 if (rc) { 1257 RTE_LOG(ERR, PMD, "device version is too low\n"); 1258 goto err_mmio_read_less; 1259 } 1260 1261 ena_dev->dma_addr_bits = ena_com_get_dma_width(ena_dev); 1262 1263 /* ENA device administration layer init */ 1264 rc = ena_com_admin_init(ena_dev, NULL, true); 1265 if (rc) { 1266 RTE_LOG(ERR, PMD, 1267 "cannot initialize ena admin queue with device\n"); 1268 goto err_mmio_read_less; 1269 } 1270 1271 /* To enable the msix interrupts the driver needs to know the number 1272 * of queues. So the driver uses polling mode to retrieve this 1273 * information. 1274 */ 1275 ena_com_set_admin_polling_mode(ena_dev, true); 1276 1277 ena_config_host_info(ena_dev); 1278 1279 /* Get Device Attributes and features */ 1280 rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); 1281 if (rc) { 1282 RTE_LOG(ERR, PMD, 1283 "cannot get attribute for ena device rc= %d\n", rc); 1284 goto err_admin_init; 1285 } 1286 1287 return 0; 1288 1289 err_admin_init: 1290 ena_com_admin_destroy(ena_dev); 1291 1292 err_mmio_read_less: 1293 ena_com_mmio_reg_read_request_destroy(ena_dev); 1294 1295 return rc; 1296 } 1297 1298 static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) 1299 { 1300 struct rte_pci_device *pci_dev; 1301 struct ena_adapter *adapter = 1302 (struct ena_adapter *)(eth_dev->data->dev_private); 1303 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1304 struct ena_com_dev_get_features_ctx get_feat_ctx; 1305 int queue_size, rc; 1306 1307 static int adapters_found; 1308 1309 memset(adapter, 0, sizeof(struct ena_adapter)); 1310 ena_dev = &adapter->ena_dev; 1311 1312 eth_dev->dev_ops = &ena_dev_ops; 1313 eth_dev->rx_pkt_burst = ð_ena_recv_pkts; 1314 eth_dev->tx_pkt_burst = ð_ena_xmit_pkts; 1315 eth_dev->tx_pkt_prepare = ð_ena_prep_pkts; 1316 adapter->rte_eth_dev_data = eth_dev->data; 1317 adapter->rte_dev = eth_dev; 1318 1319 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1320 return 0; 1321 1322 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1323 adapter->pdev = pci_dev; 1324 1325 PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d", 1326 pci_dev->addr.domain, 1327 pci_dev->addr.bus, 1328 pci_dev->addr.devid, 1329 pci_dev->addr.function); 1330 1331 adapter->regs = pci_dev->mem_resource[ENA_REGS_BAR].addr; 1332 adapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr; 1333 1334 /* Present ENA_MEM_BAR indicates available LLQ mode. 1335 * Use corresponding policy 1336 */ 1337 if (adapter->dev_mem_base) 1338 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV; 1339 else if (adapter->regs) 1340 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 1341 else 1342 PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)", 1343 ENA_REGS_BAR); 1344 1345 ena_dev->reg_bar = adapter->regs; 1346 ena_dev->dmadev = adapter->pdev; 1347 1348 adapter->id_number = adapters_found; 1349 1350 snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", 1351 adapter->id_number); 1352 1353 /* device specific initialization routine */ 1354 rc = ena_device_init(ena_dev, &get_feat_ctx); 1355 if (rc) { 1356 PMD_INIT_LOG(CRIT, "Failed to init ENA device"); 1357 return -1; 1358 } 1359 1360 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 1361 if (get_feat_ctx.max_queues.max_llq_num == 0) { 1362 PMD_INIT_LOG(ERR, 1363 "Trying to use LLQ but llq_num is 0.\n" 1364 "Fall back into regular queues."); 1365 ena_dev->tx_mem_queue_type = 1366 ENA_ADMIN_PLACEMENT_POLICY_HOST; 1367 adapter->num_queues = 1368 get_feat_ctx.max_queues.max_sq_num; 1369 } else { 1370 adapter->num_queues = 1371 get_feat_ctx.max_queues.max_llq_num; 1372 } 1373 } else { 1374 adapter->num_queues = get_feat_ctx.max_queues.max_sq_num; 1375 } 1376 1377 queue_size = ena_calc_queue_size(ena_dev, &get_feat_ctx); 1378 if ((queue_size <= 0) || (adapter->num_queues <= 0)) 1379 return -EFAULT; 1380 1381 adapter->tx_ring_size = queue_size; 1382 adapter->rx_ring_size = queue_size; 1383 1384 /* prepare ring structures */ 1385 ena_init_rings(adapter); 1386 1387 ena_config_debug_area(adapter); 1388 1389 /* Set max MTU for this device */ 1390 adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu; 1391 1392 /* set device support for TSO */ 1393 adapter->tso4_supported = get_feat_ctx.offload.tx & 1394 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK; 1395 1396 /* Copy MAC address and point DPDK to it */ 1397 eth_dev->data->mac_addrs = (struct ether_addr *)adapter->mac_addr; 1398 ether_addr_copy((struct ether_addr *)get_feat_ctx.dev_attr.mac_addr, 1399 (struct ether_addr *)adapter->mac_addr); 1400 1401 adapter->drv_stats = rte_zmalloc("adapter stats", 1402 sizeof(*adapter->drv_stats), 1403 RTE_CACHE_LINE_SIZE); 1404 if (!adapter->drv_stats) { 1405 RTE_LOG(ERR, PMD, "failed to alloc mem for adapter stats\n"); 1406 return -ENOMEM; 1407 } 1408 1409 adapters_found++; 1410 adapter->state = ENA_ADAPTER_STATE_INIT; 1411 1412 return 0; 1413 } 1414 1415 static int ena_dev_configure(struct rte_eth_dev *dev) 1416 { 1417 struct ena_adapter *adapter = 1418 (struct ena_adapter *)(dev->data->dev_private); 1419 uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; 1420 uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; 1421 1422 if ((tx_offloads & adapter->tx_supported_offloads) != tx_offloads) { 1423 RTE_LOG(ERR, PMD, "Some Tx offloads are not supported " 1424 "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n", 1425 tx_offloads, adapter->tx_supported_offloads); 1426 return -ENOTSUP; 1427 } 1428 1429 if ((rx_offloads & adapter->rx_supported_offloads) != rx_offloads) { 1430 RTE_LOG(ERR, PMD, "Some Rx offloads are not supported " 1431 "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n", 1432 rx_offloads, adapter->rx_supported_offloads); 1433 return -ENOTSUP; 1434 } 1435 1436 if (!(adapter->state == ENA_ADAPTER_STATE_INIT || 1437 adapter->state == ENA_ADAPTER_STATE_STOPPED)) { 1438 PMD_INIT_LOG(ERR, "Illegal adapter state: %d", 1439 adapter->state); 1440 return -1; 1441 } 1442 1443 switch (adapter->state) { 1444 case ENA_ADAPTER_STATE_INIT: 1445 case ENA_ADAPTER_STATE_STOPPED: 1446 adapter->state = ENA_ADAPTER_STATE_CONFIG; 1447 break; 1448 case ENA_ADAPTER_STATE_CONFIG: 1449 RTE_LOG(WARNING, PMD, 1450 "Ivalid driver state while trying to configure device\n"); 1451 break; 1452 default: 1453 break; 1454 } 1455 1456 adapter->tx_selected_offloads = tx_offloads; 1457 adapter->rx_selected_offloads = rx_offloads; 1458 return 0; 1459 } 1460 1461 static void ena_init_rings(struct ena_adapter *adapter) 1462 { 1463 int i; 1464 1465 for (i = 0; i < adapter->num_queues; i++) { 1466 struct ena_ring *ring = &adapter->tx_ring[i]; 1467 1468 ring->configured = 0; 1469 ring->type = ENA_RING_TYPE_TX; 1470 ring->adapter = adapter; 1471 ring->id = i; 1472 ring->tx_mem_queue_type = adapter->ena_dev.tx_mem_queue_type; 1473 ring->tx_max_header_size = adapter->ena_dev.tx_max_header_size; 1474 } 1475 1476 for (i = 0; i < adapter->num_queues; i++) { 1477 struct ena_ring *ring = &adapter->rx_ring[i]; 1478 1479 ring->configured = 0; 1480 ring->type = ENA_RING_TYPE_RX; 1481 ring->adapter = adapter; 1482 ring->id = i; 1483 } 1484 } 1485 1486 static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter *adapter, 1487 uint64_t offloads) 1488 { 1489 uint64_t port_offloads = adapter->tx_selected_offloads; 1490 1491 /* Check if port supports all requested offloads. 1492 * True if all offloads selected for queue are set for port. 1493 */ 1494 if ((offloads & port_offloads) != offloads) 1495 return false; 1496 return true; 1497 } 1498 1499 static bool ena_are_rx_queue_offloads_allowed(struct ena_adapter *adapter, 1500 uint64_t offloads) 1501 { 1502 uint64_t port_offloads = adapter->rx_selected_offloads; 1503 1504 /* Check if port supports all requested offloads. 1505 * True if all offloads selected for queue are set for port. 1506 */ 1507 if ((offloads & port_offloads) != offloads) 1508 return false; 1509 return true; 1510 } 1511 1512 static void ena_infos_get(struct rte_eth_dev *dev, 1513 struct rte_eth_dev_info *dev_info) 1514 { 1515 struct ena_adapter *adapter; 1516 struct ena_com_dev *ena_dev; 1517 struct ena_com_dev_get_features_ctx feat; 1518 uint64_t rx_feat = 0, tx_feat = 0; 1519 int rc = 0; 1520 1521 ena_assert_msg(dev->data != NULL, "Uninitialized device"); 1522 ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device"); 1523 adapter = (struct ena_adapter *)(dev->data->dev_private); 1524 1525 ena_dev = &adapter->ena_dev; 1526 ena_assert_msg(ena_dev != NULL, "Uninitialized device"); 1527 1528 dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1529 1530 dev_info->speed_capa = 1531 ETH_LINK_SPEED_1G | 1532 ETH_LINK_SPEED_2_5G | 1533 ETH_LINK_SPEED_5G | 1534 ETH_LINK_SPEED_10G | 1535 ETH_LINK_SPEED_25G | 1536 ETH_LINK_SPEED_40G | 1537 ETH_LINK_SPEED_50G | 1538 ETH_LINK_SPEED_100G; 1539 1540 /* Get supported features from HW */ 1541 rc = ena_com_get_dev_attr_feat(ena_dev, &feat); 1542 if (unlikely(rc)) { 1543 RTE_LOG(ERR, PMD, 1544 "Cannot get attribute for ena device rc= %d\n", rc); 1545 return; 1546 } 1547 1548 /* Set Tx & Rx features available for device */ 1549 if (feat.offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) 1550 tx_feat |= DEV_TX_OFFLOAD_TCP_TSO; 1551 1552 if (feat.offload.tx & 1553 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) 1554 tx_feat |= DEV_TX_OFFLOAD_IPV4_CKSUM | 1555 DEV_TX_OFFLOAD_UDP_CKSUM | 1556 DEV_TX_OFFLOAD_TCP_CKSUM; 1557 1558 if (feat.offload.rx_supported & 1559 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) 1560 rx_feat |= DEV_RX_OFFLOAD_IPV4_CKSUM | 1561 DEV_RX_OFFLOAD_UDP_CKSUM | 1562 DEV_RX_OFFLOAD_TCP_CKSUM; 1563 1564 /* Inform framework about available features */ 1565 dev_info->rx_offload_capa = rx_feat; 1566 dev_info->rx_queue_offload_capa = rx_feat; 1567 dev_info->tx_offload_capa = tx_feat; 1568 dev_info->tx_queue_offload_capa = tx_feat; 1569 1570 dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN; 1571 dev_info->max_rx_pktlen = adapter->max_mtu; 1572 dev_info->max_mac_addrs = 1; 1573 1574 dev_info->max_rx_queues = adapter->num_queues; 1575 dev_info->max_tx_queues = adapter->num_queues; 1576 dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE; 1577 1578 adapter->tx_supported_offloads = tx_feat; 1579 adapter->rx_supported_offloads = rx_feat; 1580 } 1581 1582 static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 1583 uint16_t nb_pkts) 1584 { 1585 struct ena_ring *rx_ring = (struct ena_ring *)(rx_queue); 1586 unsigned int ring_size = rx_ring->ring_size; 1587 unsigned int ring_mask = ring_size - 1; 1588 uint16_t next_to_clean = rx_ring->next_to_clean; 1589 uint16_t desc_in_use = 0; 1590 unsigned int recv_idx = 0; 1591 struct rte_mbuf *mbuf = NULL; 1592 struct rte_mbuf *mbuf_head = NULL; 1593 struct rte_mbuf *mbuf_prev = NULL; 1594 struct rte_mbuf **rx_buff_info = rx_ring->rx_buffer_info; 1595 unsigned int completed; 1596 1597 struct ena_com_rx_ctx ena_rx_ctx; 1598 int rc = 0; 1599 1600 /* Check adapter state */ 1601 if (unlikely(rx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { 1602 RTE_LOG(ALERT, PMD, 1603 "Trying to receive pkts while device is NOT running\n"); 1604 return 0; 1605 } 1606 1607 desc_in_use = rx_ring->next_to_use - next_to_clean; 1608 if (unlikely(nb_pkts > desc_in_use)) 1609 nb_pkts = desc_in_use; 1610 1611 for (completed = 0; completed < nb_pkts; completed++) { 1612 int segments = 0; 1613 1614 ena_rx_ctx.max_bufs = rx_ring->ring_size; 1615 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; 1616 ena_rx_ctx.descs = 0; 1617 /* receive packet context */ 1618 rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq, 1619 rx_ring->ena_com_io_sq, 1620 &ena_rx_ctx); 1621 if (unlikely(rc)) { 1622 RTE_LOG(ERR, PMD, "ena_com_rx_pkt error %d\n", rc); 1623 return 0; 1624 } 1625 1626 if (unlikely(ena_rx_ctx.descs == 0)) 1627 break; 1628 1629 while (segments < ena_rx_ctx.descs) { 1630 mbuf = rx_buff_info[next_to_clean & ring_mask]; 1631 mbuf->data_len = ena_rx_ctx.ena_bufs[segments].len; 1632 mbuf->data_off = RTE_PKTMBUF_HEADROOM; 1633 mbuf->refcnt = 1; 1634 mbuf->next = NULL; 1635 if (segments == 0) { 1636 mbuf->nb_segs = ena_rx_ctx.descs; 1637 mbuf->port = rx_ring->port_id; 1638 mbuf->pkt_len = 0; 1639 mbuf_head = mbuf; 1640 } else { 1641 /* for multi-segment pkts create mbuf chain */ 1642 mbuf_prev->next = mbuf; 1643 } 1644 mbuf_head->pkt_len += mbuf->data_len; 1645 1646 mbuf_prev = mbuf; 1647 segments++; 1648 next_to_clean++; 1649 } 1650 1651 /* fill mbuf attributes if any */ 1652 ena_rx_mbuf_prepare(mbuf_head, &ena_rx_ctx); 1653 mbuf_head->hash.rss = (uint32_t)rx_ring->id; 1654 1655 /* pass to DPDK application head mbuf */ 1656 rx_pkts[recv_idx] = mbuf_head; 1657 recv_idx++; 1658 } 1659 1660 rx_ring->next_to_clean = next_to_clean; 1661 1662 desc_in_use = desc_in_use - completed + 1; 1663 /* Burst refill to save doorbells, memory barriers, const interval */ 1664 if (ring_size - desc_in_use > ENA_RING_DESCS_RATIO(ring_size)) 1665 ena_populate_rx_queue(rx_ring, ring_size - desc_in_use); 1666 1667 return recv_idx; 1668 } 1669 1670 static uint16_t 1671 eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 1672 uint16_t nb_pkts) 1673 { 1674 int32_t ret; 1675 uint32_t i; 1676 struct rte_mbuf *m; 1677 struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); 1678 struct ipv4_hdr *ip_hdr; 1679 uint64_t ol_flags; 1680 uint16_t frag_field; 1681 1682 for (i = 0; i != nb_pkts; i++) { 1683 m = tx_pkts[i]; 1684 ol_flags = m->ol_flags; 1685 1686 if (!(ol_flags & PKT_TX_IPV4)) 1687 continue; 1688 1689 /* If there was not L2 header length specified, assume it is 1690 * length of the ethernet header. 1691 */ 1692 if (unlikely(m->l2_len == 0)) 1693 m->l2_len = sizeof(struct ether_hdr); 1694 1695 ip_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, 1696 m->l2_len); 1697 frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset); 1698 1699 if ((frag_field & IPV4_HDR_DF_FLAG) != 0) { 1700 m->packet_type |= RTE_PTYPE_L4_NONFRAG; 1701 1702 /* If IPv4 header has DF flag enabled and TSO support is 1703 * disabled, partial chcecksum should not be calculated. 1704 */ 1705 if (!tx_ring->adapter->tso4_supported) 1706 continue; 1707 } 1708 1709 if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) != 0 || 1710 (ol_flags & PKT_TX_L4_MASK) == 1711 PKT_TX_SCTP_CKSUM) { 1712 rte_errno = -ENOTSUP; 1713 return i; 1714 } 1715 1716 #ifdef RTE_LIBRTE_ETHDEV_DEBUG 1717 ret = rte_validate_tx_offload(m); 1718 if (ret != 0) { 1719 rte_errno = ret; 1720 return i; 1721 } 1722 #endif 1723 1724 /* In case we are supposed to TSO and have DF not set (DF=0) 1725 * hardware must be provided with partial checksum, otherwise 1726 * it will take care of necessary calculations. 1727 */ 1728 1729 ret = rte_net_intel_cksum_flags_prepare(m, 1730 ol_flags & ~PKT_TX_TCP_SEG); 1731 if (ret != 0) { 1732 rte_errno = ret; 1733 return i; 1734 } 1735 } 1736 1737 return i; 1738 } 1739 1740 static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 1741 uint16_t nb_pkts) 1742 { 1743 struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); 1744 uint16_t next_to_use = tx_ring->next_to_use; 1745 uint16_t next_to_clean = tx_ring->next_to_clean; 1746 struct rte_mbuf *mbuf; 1747 unsigned int ring_size = tx_ring->ring_size; 1748 unsigned int ring_mask = ring_size - 1; 1749 struct ena_com_tx_ctx ena_tx_ctx; 1750 struct ena_tx_buffer *tx_info; 1751 struct ena_com_buf *ebuf; 1752 uint16_t rc, req_id, total_tx_descs = 0; 1753 uint16_t sent_idx = 0, empty_tx_reqs; 1754 int nb_hw_desc; 1755 1756 /* Check adapter state */ 1757 if (unlikely(tx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { 1758 RTE_LOG(ALERT, PMD, 1759 "Trying to xmit pkts while device is NOT running\n"); 1760 return 0; 1761 } 1762 1763 empty_tx_reqs = ring_size - (next_to_use - next_to_clean); 1764 if (nb_pkts > empty_tx_reqs) 1765 nb_pkts = empty_tx_reqs; 1766 1767 for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) { 1768 mbuf = tx_pkts[sent_idx]; 1769 1770 req_id = tx_ring->empty_tx_reqs[next_to_use & ring_mask]; 1771 tx_info = &tx_ring->tx_buffer_info[req_id]; 1772 tx_info->mbuf = mbuf; 1773 tx_info->num_of_bufs = 0; 1774 ebuf = tx_info->bufs; 1775 1776 /* Prepare TX context */ 1777 memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx)); 1778 memset(&ena_tx_ctx.ena_meta, 0x0, 1779 sizeof(struct ena_com_tx_meta)); 1780 ena_tx_ctx.ena_bufs = ebuf; 1781 ena_tx_ctx.req_id = req_id; 1782 if (tx_ring->tx_mem_queue_type == 1783 ENA_ADMIN_PLACEMENT_POLICY_DEV) { 1784 /* prepare the push buffer with 1785 * virtual address of the data 1786 */ 1787 ena_tx_ctx.header_len = 1788 RTE_MIN(mbuf->data_len, 1789 tx_ring->tx_max_header_size); 1790 ena_tx_ctx.push_header = 1791 (void *)((char *)mbuf->buf_addr + 1792 mbuf->data_off); 1793 } /* there's no else as we take advantage of memset zeroing */ 1794 1795 /* Set TX offloads flags, if applicable */ 1796 ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx, tx_ring->offloads); 1797 1798 if (unlikely(mbuf->ol_flags & 1799 (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD))) 1800 rte_atomic64_inc(&tx_ring->adapter->drv_stats->ierrors); 1801 1802 rte_prefetch0(tx_pkts[(sent_idx + 4) & ring_mask]); 1803 1804 /* Process first segment taking into 1805 * consideration pushed header 1806 */ 1807 if (mbuf->data_len > ena_tx_ctx.header_len) { 1808 ebuf->paddr = mbuf->buf_iova + 1809 mbuf->data_off + 1810 ena_tx_ctx.header_len; 1811 ebuf->len = mbuf->data_len - ena_tx_ctx.header_len; 1812 ebuf++; 1813 tx_info->num_of_bufs++; 1814 } 1815 1816 while ((mbuf = mbuf->next) != NULL) { 1817 ebuf->paddr = mbuf->buf_iova + mbuf->data_off; 1818 ebuf->len = mbuf->data_len; 1819 ebuf++; 1820 tx_info->num_of_bufs++; 1821 } 1822 1823 ena_tx_ctx.num_bufs = tx_info->num_of_bufs; 1824 1825 /* Write data to device */ 1826 rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, 1827 &ena_tx_ctx, &nb_hw_desc); 1828 if (unlikely(rc)) 1829 break; 1830 1831 tx_info->tx_descs = nb_hw_desc; 1832 1833 next_to_use++; 1834 } 1835 1836 /* If there are ready packets to be xmitted... */ 1837 if (sent_idx > 0) { 1838 /* ...let HW do its best :-) */ 1839 rte_wmb(); 1840 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); 1841 1842 tx_ring->next_to_use = next_to_use; 1843 } 1844 1845 /* Clear complete packets */ 1846 while (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) >= 0) { 1847 /* Get Tx info & store how many descs were processed */ 1848 tx_info = &tx_ring->tx_buffer_info[req_id]; 1849 total_tx_descs += tx_info->tx_descs; 1850 1851 /* Free whole mbuf chain */ 1852 mbuf = tx_info->mbuf; 1853 rte_pktmbuf_free(mbuf); 1854 tx_info->mbuf = NULL; 1855 1856 /* Put back descriptor to the ring for reuse */ 1857 tx_ring->empty_tx_reqs[next_to_clean & ring_mask] = req_id; 1858 next_to_clean++; 1859 1860 /* If too many descs to clean, leave it for another run */ 1861 if (unlikely(total_tx_descs > ENA_RING_DESCS_RATIO(ring_size))) 1862 break; 1863 } 1864 1865 if (total_tx_descs > 0) { 1866 /* acknowledge completion of sent packets */ 1867 ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs); 1868 tx_ring->next_to_clean = next_to_clean; 1869 } 1870 1871 return sent_idx; 1872 } 1873 1874 static int eth_ena_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1875 struct rte_pci_device *pci_dev) 1876 { 1877 return rte_eth_dev_pci_generic_probe(pci_dev, 1878 sizeof(struct ena_adapter), eth_ena_dev_init); 1879 } 1880 1881 static int eth_ena_pci_remove(struct rte_pci_device *pci_dev) 1882 { 1883 return rte_eth_dev_pci_generic_remove(pci_dev, NULL); 1884 } 1885 1886 static struct rte_pci_driver rte_ena_pmd = { 1887 .id_table = pci_id_ena_map, 1888 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 1889 .probe = eth_ena_pci_probe, 1890 .remove = eth_ena_pci_remove, 1891 }; 1892 1893 RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd); 1894 RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map); 1895 RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci"); 1896 1897 RTE_INIT(ena_init_log); 1898 static void 1899 ena_init_log(void) 1900 { 1901 ena_logtype_init = rte_log_register("pmd.ena.init"); 1902 if (ena_logtype_init >= 0) 1903 rte_log_set_level(ena_logtype_init, RTE_LOG_NOTICE); 1904 ena_logtype_driver = rte_log_register("pmd.ena.driver"); 1905 if (ena_logtype_driver >= 0) 1906 rte_log_set_level(ena_logtype_driver, RTE_LOG_NOTICE); 1907 } 1908