1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of copyright holder nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <rte_ether.h> 35 #include <rte_ethdev.h> 36 #include <rte_ethdev_pci.h> 37 #include <rte_tcp.h> 38 #include <rte_atomic.h> 39 #include <rte_dev.h> 40 #include <rte_errno.h> 41 #include <rte_version.h> 42 #include <rte_eal_memconfig.h> 43 #include <rte_net.h> 44 45 #include "ena_ethdev.h" 46 #include "ena_logs.h" 47 #include "ena_platform.h" 48 #include "ena_com.h" 49 #include "ena_eth_com.h" 50 51 #include <ena_common_defs.h> 52 #include <ena_regs_defs.h> 53 #include <ena_admin_defs.h> 54 #include <ena_eth_io_defs.h> 55 56 #define DRV_MODULE_VER_MAJOR 1 57 #define DRV_MODULE_VER_MINOR 0 58 #define DRV_MODULE_VER_SUBMINOR 0 59 60 #define ENA_IO_TXQ_IDX(q) (2 * (q)) 61 #define ENA_IO_RXQ_IDX(q) (2 * (q) + 1) 62 /*reverse version of ENA_IO_RXQ_IDX*/ 63 #define ENA_IO_RXQ_IDX_REV(q) ((q - 1) / 2) 64 65 /* While processing submitted and completed descriptors (rx and tx path 66 * respectively) in a loop it is desired to: 67 * - perform batch submissions while populating sumbissmion queue 68 * - avoid blocking transmission of other packets during cleanup phase 69 * Hence the utilization ratio of 1/8 of a queue size. 70 */ 71 #define ENA_RING_DESCS_RATIO(ring_size) (ring_size / 8) 72 73 #define __MERGE_64B_H_L(h, l) (((uint64_t)h << 32) | l) 74 #define TEST_BIT(val, bit_shift) (val & (1UL << bit_shift)) 75 76 #define GET_L4_HDR_LEN(mbuf) \ 77 ((rte_pktmbuf_mtod_offset(mbuf, struct tcp_hdr *, \ 78 mbuf->l3_len + mbuf->l2_len)->data_off) >> 4) 79 80 #define ENA_RX_RSS_TABLE_LOG_SIZE 7 81 #define ENA_RX_RSS_TABLE_SIZE (1 << ENA_RX_RSS_TABLE_LOG_SIZE) 82 #define ENA_HASH_KEY_SIZE 40 83 #define ENA_ETH_SS_STATS 0xFF 84 #define ETH_GSTRING_LEN 32 85 86 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) 87 88 enum ethtool_stringset { 89 ETH_SS_TEST = 0, 90 ETH_SS_STATS, 91 }; 92 93 struct ena_stats { 94 char name[ETH_GSTRING_LEN]; 95 int stat_offset; 96 }; 97 98 #define ENA_STAT_ENA_COM_ENTRY(stat) { \ 99 .name = #stat, \ 100 .stat_offset = offsetof(struct ena_com_stats_admin, stat) \ 101 } 102 103 #define ENA_STAT_ENTRY(stat, stat_type) { \ 104 .name = #stat, \ 105 .stat_offset = offsetof(struct ena_stats_##stat_type, stat) \ 106 } 107 108 #define ENA_STAT_RX_ENTRY(stat) \ 109 ENA_STAT_ENTRY(stat, rx) 110 111 #define ENA_STAT_TX_ENTRY(stat) \ 112 ENA_STAT_ENTRY(stat, tx) 113 114 #define ENA_STAT_GLOBAL_ENTRY(stat) \ 115 ENA_STAT_ENTRY(stat, dev) 116 117 static const struct ena_stats ena_stats_global_strings[] = { 118 ENA_STAT_GLOBAL_ENTRY(tx_timeout), 119 ENA_STAT_GLOBAL_ENTRY(io_suspend), 120 ENA_STAT_GLOBAL_ENTRY(io_resume), 121 ENA_STAT_GLOBAL_ENTRY(wd_expired), 122 ENA_STAT_GLOBAL_ENTRY(interface_up), 123 ENA_STAT_GLOBAL_ENTRY(interface_down), 124 ENA_STAT_GLOBAL_ENTRY(admin_q_pause), 125 }; 126 127 static const struct ena_stats ena_stats_tx_strings[] = { 128 ENA_STAT_TX_ENTRY(cnt), 129 ENA_STAT_TX_ENTRY(bytes), 130 ENA_STAT_TX_ENTRY(queue_stop), 131 ENA_STAT_TX_ENTRY(queue_wakeup), 132 ENA_STAT_TX_ENTRY(dma_mapping_err), 133 ENA_STAT_TX_ENTRY(linearize), 134 ENA_STAT_TX_ENTRY(linearize_failed), 135 ENA_STAT_TX_ENTRY(tx_poll), 136 ENA_STAT_TX_ENTRY(doorbells), 137 ENA_STAT_TX_ENTRY(prepare_ctx_err), 138 ENA_STAT_TX_ENTRY(missing_tx_comp), 139 ENA_STAT_TX_ENTRY(bad_req_id), 140 }; 141 142 static const struct ena_stats ena_stats_rx_strings[] = { 143 ENA_STAT_RX_ENTRY(cnt), 144 ENA_STAT_RX_ENTRY(bytes), 145 ENA_STAT_RX_ENTRY(refil_partial), 146 ENA_STAT_RX_ENTRY(bad_csum), 147 ENA_STAT_RX_ENTRY(page_alloc_fail), 148 ENA_STAT_RX_ENTRY(skb_alloc_fail), 149 ENA_STAT_RX_ENTRY(dma_mapping_err), 150 ENA_STAT_RX_ENTRY(bad_desc_num), 151 ENA_STAT_RX_ENTRY(small_copy_len_pkt), 152 }; 153 154 static const struct ena_stats ena_stats_ena_com_strings[] = { 155 ENA_STAT_ENA_COM_ENTRY(aborted_cmd), 156 ENA_STAT_ENA_COM_ENTRY(submitted_cmd), 157 ENA_STAT_ENA_COM_ENTRY(completed_cmd), 158 ENA_STAT_ENA_COM_ENTRY(out_of_space), 159 ENA_STAT_ENA_COM_ENTRY(no_completion), 160 }; 161 162 #define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings) 163 #define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings) 164 #define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings) 165 #define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings) 166 167 /** Vendor ID used by Amazon devices */ 168 #define PCI_VENDOR_ID_AMAZON 0x1D0F 169 /** Amazon devices */ 170 #define PCI_DEVICE_ID_ENA_VF 0xEC20 171 #define PCI_DEVICE_ID_ENA_LLQ_VF 0xEC21 172 173 #define ENA_TX_OFFLOAD_MASK (\ 174 PKT_TX_L4_MASK | \ 175 PKT_TX_IP_CKSUM | \ 176 PKT_TX_TCP_SEG) 177 178 #define ENA_TX_OFFLOAD_NOTSUP_MASK \ 179 (PKT_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK) 180 181 int ena_logtype_init; 182 int ena_logtype_driver; 183 184 static const struct rte_pci_id pci_id_ena_map[] = { 185 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) }, 186 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_LLQ_VF) }, 187 { .device_id = 0 }, 188 }; 189 190 static int ena_device_init(struct ena_com_dev *ena_dev, 191 struct ena_com_dev_get_features_ctx *get_feat_ctx); 192 static int ena_dev_configure(struct rte_eth_dev *dev); 193 static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 194 uint16_t nb_pkts); 195 static uint16_t eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 196 uint16_t nb_pkts); 197 static int ena_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 198 uint16_t nb_desc, unsigned int socket_id, 199 const struct rte_eth_txconf *tx_conf); 200 static int ena_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 201 uint16_t nb_desc, unsigned int socket_id, 202 const struct rte_eth_rxconf *rx_conf, 203 struct rte_mempool *mp); 204 static uint16_t eth_ena_recv_pkts(void *rx_queue, 205 struct rte_mbuf **rx_pkts, uint16_t nb_pkts); 206 static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count); 207 static void ena_init_rings(struct ena_adapter *adapter); 208 static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 209 static int ena_start(struct rte_eth_dev *dev); 210 static void ena_close(struct rte_eth_dev *dev); 211 static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); 212 static void ena_rx_queue_release_all(struct rte_eth_dev *dev); 213 static void ena_tx_queue_release_all(struct rte_eth_dev *dev); 214 static void ena_rx_queue_release(void *queue); 215 static void ena_tx_queue_release(void *queue); 216 static void ena_rx_queue_release_bufs(struct ena_ring *ring); 217 static void ena_tx_queue_release_bufs(struct ena_ring *ring); 218 static int ena_link_update(struct rte_eth_dev *dev, 219 int wait_to_complete); 220 static int ena_queue_restart(struct ena_ring *ring); 221 static int ena_queue_restart_all(struct rte_eth_dev *dev, 222 enum ena_ring_type ring_type); 223 static void ena_stats_restart(struct rte_eth_dev *dev); 224 static void ena_infos_get(struct rte_eth_dev *dev, 225 struct rte_eth_dev_info *dev_info); 226 static int ena_rss_reta_update(struct rte_eth_dev *dev, 227 struct rte_eth_rss_reta_entry64 *reta_conf, 228 uint16_t reta_size); 229 static int ena_rss_reta_query(struct rte_eth_dev *dev, 230 struct rte_eth_rss_reta_entry64 *reta_conf, 231 uint16_t reta_size); 232 static int ena_get_sset_count(struct rte_eth_dev *dev, int sset); 233 234 static const struct eth_dev_ops ena_dev_ops = { 235 .dev_configure = ena_dev_configure, 236 .dev_infos_get = ena_infos_get, 237 .rx_queue_setup = ena_rx_queue_setup, 238 .tx_queue_setup = ena_tx_queue_setup, 239 .dev_start = ena_start, 240 .link_update = ena_link_update, 241 .stats_get = ena_stats_get, 242 .mtu_set = ena_mtu_set, 243 .rx_queue_release = ena_rx_queue_release, 244 .tx_queue_release = ena_tx_queue_release, 245 .dev_close = ena_close, 246 .reta_update = ena_rss_reta_update, 247 .reta_query = ena_rss_reta_query, 248 }; 249 250 #define NUMA_NO_NODE SOCKET_ID_ANY 251 252 static inline int ena_cpu_to_node(int cpu) 253 { 254 struct rte_config *config = rte_eal_get_configuration(); 255 256 if (likely(cpu < RTE_MAX_MEMZONE)) 257 return config->mem_config->memzone[cpu].socket_id; 258 259 return NUMA_NO_NODE; 260 } 261 262 static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf, 263 struct ena_com_rx_ctx *ena_rx_ctx) 264 { 265 uint64_t ol_flags = 0; 266 267 if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) 268 ol_flags |= PKT_TX_TCP_CKSUM; 269 else if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP) 270 ol_flags |= PKT_TX_UDP_CKSUM; 271 272 if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) 273 ol_flags |= PKT_TX_IPV4; 274 else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6) 275 ol_flags |= PKT_TX_IPV6; 276 277 if (unlikely(ena_rx_ctx->l4_csum_err)) 278 ol_flags |= PKT_RX_L4_CKSUM_BAD; 279 if (unlikely(ena_rx_ctx->l3_csum_err)) 280 ol_flags |= PKT_RX_IP_CKSUM_BAD; 281 282 mbuf->ol_flags = ol_flags; 283 } 284 285 static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, 286 struct ena_com_tx_ctx *ena_tx_ctx) 287 { 288 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; 289 290 if (mbuf->ol_flags & 291 (PKT_TX_L4_MASK | PKT_TX_IP_CKSUM | PKT_TX_TCP_SEG)) { 292 /* check if TSO is required */ 293 if (mbuf->ol_flags & PKT_TX_TCP_SEG) { 294 ena_tx_ctx->tso_enable = true; 295 296 ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf); 297 } 298 299 /* check if L3 checksum is needed */ 300 if (mbuf->ol_flags & PKT_TX_IP_CKSUM) 301 ena_tx_ctx->l3_csum_enable = true; 302 303 if (mbuf->ol_flags & PKT_TX_IPV6) { 304 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6; 305 } else { 306 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4; 307 308 /* set don't fragment (DF) flag */ 309 if (mbuf->packet_type & 310 (RTE_PTYPE_L4_NONFRAG 311 | RTE_PTYPE_INNER_L4_NONFRAG)) 312 ena_tx_ctx->df = true; 313 } 314 315 /* check if L4 checksum is needed */ 316 switch (mbuf->ol_flags & PKT_TX_L4_MASK) { 317 case PKT_TX_TCP_CKSUM: 318 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP; 319 ena_tx_ctx->l4_csum_enable = true; 320 break; 321 case PKT_TX_UDP_CKSUM: 322 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP; 323 ena_tx_ctx->l4_csum_enable = true; 324 break; 325 default: 326 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN; 327 ena_tx_ctx->l4_csum_enable = false; 328 break; 329 } 330 331 ena_meta->mss = mbuf->tso_segsz; 332 ena_meta->l3_hdr_len = mbuf->l3_len; 333 ena_meta->l3_hdr_offset = mbuf->l2_len; 334 /* this param needed only for TSO */ 335 ena_meta->l3_outer_hdr_len = 0; 336 ena_meta->l3_outer_hdr_offset = 0; 337 338 ena_tx_ctx->meta_valid = true; 339 } else { 340 ena_tx_ctx->meta_valid = false; 341 } 342 } 343 344 static void ena_config_host_info(struct ena_com_dev *ena_dev) 345 { 346 struct ena_admin_host_info *host_info; 347 int rc; 348 349 /* Allocate only the host info */ 350 rc = ena_com_allocate_host_info(ena_dev); 351 if (rc) { 352 RTE_LOG(ERR, PMD, "Cannot allocate host info\n"); 353 return; 354 } 355 356 host_info = ena_dev->host_attr.host_info; 357 358 host_info->os_type = ENA_ADMIN_OS_DPDK; 359 host_info->kernel_ver = RTE_VERSION; 360 snprintf((char *)host_info->kernel_ver_str, 361 sizeof(host_info->kernel_ver_str), 362 "%s", rte_version()); 363 host_info->os_dist = RTE_VERSION; 364 snprintf((char *)host_info->os_dist_str, 365 sizeof(host_info->os_dist_str), 366 "%s", rte_version()); 367 host_info->driver_version = 368 (DRV_MODULE_VER_MAJOR) | 369 (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | 370 (DRV_MODULE_VER_SUBMINOR << 371 ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); 372 373 rc = ena_com_set_host_attributes(ena_dev); 374 if (rc) { 375 RTE_LOG(ERR, PMD, "Cannot set host attributes\n"); 376 if (rc != -EPERM) 377 goto err; 378 } 379 380 return; 381 382 err: 383 ena_com_delete_host_info(ena_dev); 384 } 385 386 static int 387 ena_get_sset_count(struct rte_eth_dev *dev, int sset) 388 { 389 if (sset != ETH_SS_STATS) 390 return -EOPNOTSUPP; 391 392 /* Workaround for clang: 393 * touch internal structures to prevent 394 * compiler error 395 */ 396 ENA_TOUCH(ena_stats_global_strings); 397 ENA_TOUCH(ena_stats_tx_strings); 398 ENA_TOUCH(ena_stats_rx_strings); 399 ENA_TOUCH(ena_stats_ena_com_strings); 400 401 return dev->data->nb_tx_queues * 402 (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX) + 403 ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENA_COM; 404 } 405 406 static void ena_config_debug_area(struct ena_adapter *adapter) 407 { 408 u32 debug_area_size; 409 int rc, ss_count; 410 411 ss_count = ena_get_sset_count(adapter->rte_dev, ETH_SS_STATS); 412 if (ss_count <= 0) { 413 RTE_LOG(ERR, PMD, "SS count is negative\n"); 414 return; 415 } 416 417 /* allocate 32 bytes for each string and 64bit for the value */ 418 debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count; 419 420 rc = ena_com_allocate_debug_area(&adapter->ena_dev, debug_area_size); 421 if (rc) { 422 RTE_LOG(ERR, PMD, "Cannot allocate debug area\n"); 423 return; 424 } 425 426 rc = ena_com_set_host_attributes(&adapter->ena_dev); 427 if (rc) { 428 RTE_LOG(WARNING, PMD, "Cannot set host attributes\n"); 429 if (rc != -EPERM) 430 goto err; 431 } 432 433 return; 434 err: 435 ena_com_delete_debug_area(&adapter->ena_dev); 436 } 437 438 static void ena_close(struct rte_eth_dev *dev) 439 { 440 struct ena_adapter *adapter = 441 (struct ena_adapter *)(dev->data->dev_private); 442 443 adapter->state = ENA_ADAPTER_STATE_STOPPED; 444 445 ena_rx_queue_release_all(dev); 446 ena_tx_queue_release_all(dev); 447 } 448 449 static int ena_rss_reta_update(struct rte_eth_dev *dev, 450 struct rte_eth_rss_reta_entry64 *reta_conf, 451 uint16_t reta_size) 452 { 453 struct ena_adapter *adapter = 454 (struct ena_adapter *)(dev->data->dev_private); 455 struct ena_com_dev *ena_dev = &adapter->ena_dev; 456 int ret, i; 457 u16 entry_value; 458 int conf_idx; 459 int idx; 460 461 if ((reta_size == 0) || (reta_conf == NULL)) 462 return -EINVAL; 463 464 if (reta_size > ENA_RX_RSS_TABLE_SIZE) { 465 RTE_LOG(WARNING, PMD, 466 "indirection table %d is bigger than supported (%d)\n", 467 reta_size, ENA_RX_RSS_TABLE_SIZE); 468 ret = -EINVAL; 469 goto err; 470 } 471 472 for (i = 0 ; i < reta_size ; i++) { 473 /* each reta_conf is for 64 entries. 474 * to support 128 we use 2 conf of 64 475 */ 476 conf_idx = i / RTE_RETA_GROUP_SIZE; 477 idx = i % RTE_RETA_GROUP_SIZE; 478 if (TEST_BIT(reta_conf[conf_idx].mask, idx)) { 479 entry_value = 480 ENA_IO_RXQ_IDX(reta_conf[conf_idx].reta[idx]); 481 ret = ena_com_indirect_table_fill_entry(ena_dev, 482 i, 483 entry_value); 484 if (unlikely(ret && (ret != ENA_COM_PERMISSION))) { 485 RTE_LOG(ERR, PMD, 486 "Cannot fill indirect table\n"); 487 ret = -ENOTSUP; 488 goto err; 489 } 490 } 491 } 492 493 ret = ena_com_indirect_table_set(ena_dev); 494 if (unlikely(ret && (ret != ENA_COM_PERMISSION))) { 495 RTE_LOG(ERR, PMD, "Cannot flush the indirect table\n"); 496 ret = -ENOTSUP; 497 goto err; 498 } 499 500 RTE_LOG(DEBUG, PMD, "%s(): RSS configured %d entries for port %d\n", 501 __func__, reta_size, adapter->rte_dev->data->port_id); 502 err: 503 return ret; 504 } 505 506 /* Query redirection table. */ 507 static int ena_rss_reta_query(struct rte_eth_dev *dev, 508 struct rte_eth_rss_reta_entry64 *reta_conf, 509 uint16_t reta_size) 510 { 511 struct ena_adapter *adapter = 512 (struct ena_adapter *)(dev->data->dev_private); 513 struct ena_com_dev *ena_dev = &adapter->ena_dev; 514 int ret; 515 int i; 516 u32 indirect_table[ENA_RX_RSS_TABLE_SIZE] = {0}; 517 int reta_conf_idx; 518 int reta_idx; 519 520 if (reta_size == 0 || reta_conf == NULL || 521 (reta_size > RTE_RETA_GROUP_SIZE && ((reta_conf + 1) == NULL))) 522 return -EINVAL; 523 524 ret = ena_com_indirect_table_get(ena_dev, indirect_table); 525 if (unlikely(ret && (ret != ENA_COM_PERMISSION))) { 526 RTE_LOG(ERR, PMD, "cannot get indirect table\n"); 527 ret = -ENOTSUP; 528 goto err; 529 } 530 531 for (i = 0 ; i < reta_size ; i++) { 532 reta_conf_idx = i / RTE_RETA_GROUP_SIZE; 533 reta_idx = i % RTE_RETA_GROUP_SIZE; 534 if (TEST_BIT(reta_conf[reta_conf_idx].mask, reta_idx)) 535 reta_conf[reta_conf_idx].reta[reta_idx] = 536 ENA_IO_RXQ_IDX_REV(indirect_table[i]); 537 } 538 err: 539 return ret; 540 } 541 542 static int ena_rss_init_default(struct ena_adapter *adapter) 543 { 544 struct ena_com_dev *ena_dev = &adapter->ena_dev; 545 uint16_t nb_rx_queues = adapter->rte_dev->data->nb_rx_queues; 546 int rc, i; 547 u32 val; 548 549 rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE); 550 if (unlikely(rc)) { 551 RTE_LOG(ERR, PMD, "Cannot init indirect table\n"); 552 goto err_rss_init; 553 } 554 555 for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) { 556 val = i % nb_rx_queues; 557 rc = ena_com_indirect_table_fill_entry(ena_dev, i, 558 ENA_IO_RXQ_IDX(val)); 559 if (unlikely(rc && (rc != ENA_COM_PERMISSION))) { 560 RTE_LOG(ERR, PMD, "Cannot fill indirect table\n"); 561 goto err_fill_indir; 562 } 563 } 564 565 rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL, 566 ENA_HASH_KEY_SIZE, 0xFFFFFFFF); 567 if (unlikely(rc && (rc != ENA_COM_PERMISSION))) { 568 RTE_LOG(INFO, PMD, "Cannot fill hash function\n"); 569 goto err_fill_indir; 570 } 571 572 rc = ena_com_set_default_hash_ctrl(ena_dev); 573 if (unlikely(rc && (rc != ENA_COM_PERMISSION))) { 574 RTE_LOG(INFO, PMD, "Cannot fill hash control\n"); 575 goto err_fill_indir; 576 } 577 578 rc = ena_com_indirect_table_set(ena_dev); 579 if (unlikely(rc && (rc != ENA_COM_PERMISSION))) { 580 RTE_LOG(ERR, PMD, "Cannot flush the indirect table\n"); 581 goto err_fill_indir; 582 } 583 RTE_LOG(DEBUG, PMD, "RSS configured for port %d\n", 584 adapter->rte_dev->data->port_id); 585 586 return 0; 587 588 err_fill_indir: 589 ena_com_rss_destroy(ena_dev); 590 err_rss_init: 591 592 return rc; 593 } 594 595 static void ena_rx_queue_release_all(struct rte_eth_dev *dev) 596 { 597 struct ena_ring **queues = (struct ena_ring **)dev->data->rx_queues; 598 int nb_queues = dev->data->nb_rx_queues; 599 int i; 600 601 for (i = 0; i < nb_queues; i++) 602 ena_rx_queue_release(queues[i]); 603 } 604 605 static void ena_tx_queue_release_all(struct rte_eth_dev *dev) 606 { 607 struct ena_ring **queues = (struct ena_ring **)dev->data->tx_queues; 608 int nb_queues = dev->data->nb_tx_queues; 609 int i; 610 611 for (i = 0; i < nb_queues; i++) 612 ena_tx_queue_release(queues[i]); 613 } 614 615 static void ena_rx_queue_release(void *queue) 616 { 617 struct ena_ring *ring = (struct ena_ring *)queue; 618 struct ena_adapter *adapter = ring->adapter; 619 int ena_qid; 620 621 ena_assert_msg(ring->configured, 622 "API violation - releasing not configured queue"); 623 ena_assert_msg(ring->adapter->state != ENA_ADAPTER_STATE_RUNNING, 624 "API violation"); 625 626 /* Destroy HW queue */ 627 ena_qid = ENA_IO_RXQ_IDX(ring->id); 628 ena_com_destroy_io_queue(&adapter->ena_dev, ena_qid); 629 630 /* Free all bufs */ 631 ena_rx_queue_release_bufs(ring); 632 633 /* Free ring resources */ 634 if (ring->rx_buffer_info) 635 rte_free(ring->rx_buffer_info); 636 ring->rx_buffer_info = NULL; 637 638 ring->configured = 0; 639 640 RTE_LOG(NOTICE, PMD, "RX Queue %d:%d released\n", 641 ring->port_id, ring->id); 642 } 643 644 static void ena_tx_queue_release(void *queue) 645 { 646 struct ena_ring *ring = (struct ena_ring *)queue; 647 struct ena_adapter *adapter = ring->adapter; 648 int ena_qid; 649 650 ena_assert_msg(ring->configured, 651 "API violation. Releasing not configured queue"); 652 ena_assert_msg(ring->adapter->state != ENA_ADAPTER_STATE_RUNNING, 653 "API violation"); 654 655 /* Destroy HW queue */ 656 ena_qid = ENA_IO_TXQ_IDX(ring->id); 657 ena_com_destroy_io_queue(&adapter->ena_dev, ena_qid); 658 659 /* Free all bufs */ 660 ena_tx_queue_release_bufs(ring); 661 662 /* Free ring resources */ 663 if (ring->tx_buffer_info) 664 rte_free(ring->tx_buffer_info); 665 666 if (ring->empty_tx_reqs) 667 rte_free(ring->empty_tx_reqs); 668 669 ring->empty_tx_reqs = NULL; 670 ring->tx_buffer_info = NULL; 671 672 ring->configured = 0; 673 674 RTE_LOG(NOTICE, PMD, "TX Queue %d:%d released\n", 675 ring->port_id, ring->id); 676 } 677 678 static void ena_rx_queue_release_bufs(struct ena_ring *ring) 679 { 680 unsigned int ring_mask = ring->ring_size - 1; 681 682 while (ring->next_to_clean != ring->next_to_use) { 683 struct rte_mbuf *m = 684 ring->rx_buffer_info[ring->next_to_clean & ring_mask]; 685 686 if (m) 687 rte_mbuf_raw_free(m); 688 689 ring->next_to_clean++; 690 } 691 } 692 693 static void ena_tx_queue_release_bufs(struct ena_ring *ring) 694 { 695 unsigned int i; 696 697 for (i = 0; i < ring->ring_size; ++i) { 698 struct ena_tx_buffer *tx_buf = &ring->tx_buffer_info[i]; 699 700 if (tx_buf->mbuf) 701 rte_pktmbuf_free(tx_buf->mbuf); 702 703 ring->next_to_clean++; 704 } 705 } 706 707 static int ena_link_update(struct rte_eth_dev *dev, 708 __rte_unused int wait_to_complete) 709 { 710 struct rte_eth_link *link = &dev->data->dev_link; 711 712 link->link_status = 1; 713 link->link_speed = ETH_SPEED_NUM_10G; 714 link->link_duplex = ETH_LINK_FULL_DUPLEX; 715 716 return 0; 717 } 718 719 static int ena_queue_restart_all(struct rte_eth_dev *dev, 720 enum ena_ring_type ring_type) 721 { 722 struct ena_adapter *adapter = 723 (struct ena_adapter *)(dev->data->dev_private); 724 struct ena_ring *queues = NULL; 725 int i = 0; 726 int rc = 0; 727 728 queues = (ring_type == ENA_RING_TYPE_RX) ? 729 adapter->rx_ring : adapter->tx_ring; 730 731 for (i = 0; i < adapter->num_queues; i++) { 732 if (queues[i].configured) { 733 if (ring_type == ENA_RING_TYPE_RX) { 734 ena_assert_msg( 735 dev->data->rx_queues[i] == &queues[i], 736 "Inconsistent state of rx queues\n"); 737 } else { 738 ena_assert_msg( 739 dev->data->tx_queues[i] == &queues[i], 740 "Inconsistent state of tx queues\n"); 741 } 742 743 rc = ena_queue_restart(&queues[i]); 744 745 if (rc) { 746 PMD_INIT_LOG(ERR, 747 "failed to restart queue %d type(%d)", 748 i, ring_type); 749 return -1; 750 } 751 } 752 } 753 754 return 0; 755 } 756 757 static uint32_t ena_get_mtu_conf(struct ena_adapter *adapter) 758 { 759 uint32_t max_frame_len = adapter->max_mtu; 760 761 if (adapter->rte_eth_dev_data->dev_conf.rxmode.jumbo_frame == 1) 762 max_frame_len = 763 adapter->rte_eth_dev_data->dev_conf.rxmode.max_rx_pkt_len; 764 765 return max_frame_len; 766 } 767 768 static int ena_check_valid_conf(struct ena_adapter *adapter) 769 { 770 uint32_t max_frame_len = ena_get_mtu_conf(adapter); 771 772 if (max_frame_len > adapter->max_mtu) { 773 PMD_INIT_LOG(ERR, "Unsupported MTU of %d", max_frame_len); 774 return -1; 775 } 776 777 return 0; 778 } 779 780 static int 781 ena_calc_queue_size(struct ena_com_dev *ena_dev, 782 struct ena_com_dev_get_features_ctx *get_feat_ctx) 783 { 784 uint32_t queue_size = ENA_DEFAULT_RING_SIZE; 785 786 queue_size = RTE_MIN(queue_size, 787 get_feat_ctx->max_queues.max_cq_depth); 788 queue_size = RTE_MIN(queue_size, 789 get_feat_ctx->max_queues.max_sq_depth); 790 791 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) 792 queue_size = RTE_MIN(queue_size, 793 get_feat_ctx->max_queues.max_llq_depth); 794 795 /* Round down to power of 2 */ 796 if (!rte_is_power_of_2(queue_size)) 797 queue_size = rte_align32pow2(queue_size >> 1); 798 799 if (queue_size == 0) { 800 PMD_INIT_LOG(ERR, "Invalid queue size"); 801 return -EFAULT; 802 } 803 804 return queue_size; 805 } 806 807 static void ena_stats_restart(struct rte_eth_dev *dev) 808 { 809 struct ena_adapter *adapter = 810 (struct ena_adapter *)(dev->data->dev_private); 811 812 rte_atomic64_init(&adapter->drv_stats->ierrors); 813 rte_atomic64_init(&adapter->drv_stats->oerrors); 814 rte_atomic64_init(&adapter->drv_stats->rx_nombuf); 815 } 816 817 static int ena_stats_get(struct rte_eth_dev *dev, 818 struct rte_eth_stats *stats) 819 { 820 struct ena_admin_basic_stats ena_stats; 821 struct ena_adapter *adapter = 822 (struct ena_adapter *)(dev->data->dev_private); 823 struct ena_com_dev *ena_dev = &adapter->ena_dev; 824 int rc; 825 826 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 827 return -ENOTSUP; 828 829 memset(&ena_stats, 0, sizeof(ena_stats)); 830 rc = ena_com_get_dev_basic_stats(ena_dev, &ena_stats); 831 if (unlikely(rc)) { 832 RTE_LOG(ERR, PMD, "Could not retrieve statistics from ENA"); 833 return rc; 834 } 835 836 /* Set of basic statistics from ENA */ 837 stats->ipackets = __MERGE_64B_H_L(ena_stats.rx_pkts_high, 838 ena_stats.rx_pkts_low); 839 stats->opackets = __MERGE_64B_H_L(ena_stats.tx_pkts_high, 840 ena_stats.tx_pkts_low); 841 stats->ibytes = __MERGE_64B_H_L(ena_stats.rx_bytes_high, 842 ena_stats.rx_bytes_low); 843 stats->obytes = __MERGE_64B_H_L(ena_stats.tx_bytes_high, 844 ena_stats.tx_bytes_low); 845 stats->imissed = __MERGE_64B_H_L(ena_stats.rx_drops_high, 846 ena_stats.rx_drops_low); 847 848 /* Driver related stats */ 849 stats->ierrors = rte_atomic64_read(&adapter->drv_stats->ierrors); 850 stats->oerrors = rte_atomic64_read(&adapter->drv_stats->oerrors); 851 stats->rx_nombuf = rte_atomic64_read(&adapter->drv_stats->rx_nombuf); 852 return 0; 853 } 854 855 static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 856 { 857 struct ena_adapter *adapter; 858 struct ena_com_dev *ena_dev; 859 int rc = 0; 860 861 ena_assert_msg(dev->data != NULL, "Uninitialized device"); 862 ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device"); 863 adapter = (struct ena_adapter *)(dev->data->dev_private); 864 865 ena_dev = &adapter->ena_dev; 866 ena_assert_msg(ena_dev != NULL, "Uninitialized device"); 867 868 if (mtu > ena_get_mtu_conf(adapter)) { 869 RTE_LOG(ERR, PMD, 870 "Given MTU (%d) exceeds maximum MTU supported (%d)\n", 871 mtu, ena_get_mtu_conf(adapter)); 872 rc = -EINVAL; 873 goto err; 874 } 875 876 rc = ena_com_set_dev_mtu(ena_dev, mtu); 877 if (rc) 878 RTE_LOG(ERR, PMD, "Could not set MTU: %d\n", mtu); 879 else 880 RTE_LOG(NOTICE, PMD, "Set MTU: %d\n", mtu); 881 882 err: 883 return rc; 884 } 885 886 static int ena_start(struct rte_eth_dev *dev) 887 { 888 struct ena_adapter *adapter = 889 (struct ena_adapter *)(dev->data->dev_private); 890 int rc = 0; 891 892 if (!(adapter->state == ENA_ADAPTER_STATE_CONFIG || 893 adapter->state == ENA_ADAPTER_STATE_STOPPED)) { 894 PMD_INIT_LOG(ERR, "API violation"); 895 return -1; 896 } 897 898 rc = ena_check_valid_conf(adapter); 899 if (rc) 900 return rc; 901 902 rc = ena_queue_restart_all(dev, ENA_RING_TYPE_RX); 903 if (rc) 904 return rc; 905 906 rc = ena_queue_restart_all(dev, ENA_RING_TYPE_TX); 907 if (rc) 908 return rc; 909 910 if (adapter->rte_dev->data->dev_conf.rxmode.mq_mode & 911 ETH_MQ_RX_RSS_FLAG) { 912 rc = ena_rss_init_default(adapter); 913 if (rc) 914 return rc; 915 } 916 917 ena_stats_restart(dev); 918 919 adapter->state = ENA_ADAPTER_STATE_RUNNING; 920 921 return 0; 922 } 923 924 static int ena_queue_restart(struct ena_ring *ring) 925 { 926 int rc, bufs_num; 927 928 ena_assert_msg(ring->configured == 1, 929 "Trying to restart unconfigured queue\n"); 930 931 ring->next_to_clean = 0; 932 ring->next_to_use = 0; 933 934 if (ring->type == ENA_RING_TYPE_TX) 935 return 0; 936 937 bufs_num = ring->ring_size - 1; 938 rc = ena_populate_rx_queue(ring, bufs_num); 939 if (rc != bufs_num) { 940 PMD_INIT_LOG(ERR, "Failed to populate rx ring !"); 941 return (-1); 942 } 943 944 return 0; 945 } 946 947 static int ena_tx_queue_setup(struct rte_eth_dev *dev, 948 uint16_t queue_idx, 949 uint16_t nb_desc, 950 __rte_unused unsigned int socket_id, 951 __rte_unused const struct rte_eth_txconf *tx_conf) 952 { 953 struct ena_com_create_io_ctx ctx = 954 /* policy set to _HOST just to satisfy icc compiler */ 955 { ENA_ADMIN_PLACEMENT_POLICY_HOST, 956 ENA_COM_IO_QUEUE_DIRECTION_TX, 0, 0, 0, 0 }; 957 struct ena_ring *txq = NULL; 958 struct ena_adapter *adapter = 959 (struct ena_adapter *)(dev->data->dev_private); 960 unsigned int i; 961 int ena_qid; 962 int rc; 963 struct ena_com_dev *ena_dev = &adapter->ena_dev; 964 965 txq = &adapter->tx_ring[queue_idx]; 966 967 if (txq->configured) { 968 RTE_LOG(CRIT, PMD, 969 "API violation. Queue %d is already configured\n", 970 queue_idx); 971 return -1; 972 } 973 974 if (!rte_is_power_of_2(nb_desc)) { 975 RTE_LOG(ERR, PMD, 976 "Unsupported size of RX queue: %d is not a power of 2.", 977 nb_desc); 978 return -EINVAL; 979 } 980 981 if (nb_desc > adapter->tx_ring_size) { 982 RTE_LOG(ERR, PMD, 983 "Unsupported size of TX queue (max size: %d)\n", 984 adapter->tx_ring_size); 985 return -EINVAL; 986 } 987 988 ena_qid = ENA_IO_TXQ_IDX(queue_idx); 989 990 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; 991 ctx.qid = ena_qid; 992 ctx.msix_vector = -1; /* admin interrupts not used */ 993 ctx.mem_queue_type = ena_dev->tx_mem_queue_type; 994 ctx.queue_size = adapter->tx_ring_size; 995 ctx.numa_node = ena_cpu_to_node(queue_idx); 996 997 rc = ena_com_create_io_queue(ena_dev, &ctx); 998 if (rc) { 999 RTE_LOG(ERR, PMD, 1000 "failed to create io TX queue #%d (qid:%d) rc: %d\n", 1001 queue_idx, ena_qid, rc); 1002 } 1003 txq->ena_com_io_cq = &ena_dev->io_cq_queues[ena_qid]; 1004 txq->ena_com_io_sq = &ena_dev->io_sq_queues[ena_qid]; 1005 1006 rc = ena_com_get_io_handlers(ena_dev, ena_qid, 1007 &txq->ena_com_io_sq, 1008 &txq->ena_com_io_cq); 1009 if (rc) { 1010 RTE_LOG(ERR, PMD, 1011 "Failed to get TX queue handlers. TX queue num %d rc: %d\n", 1012 queue_idx, rc); 1013 ena_com_destroy_io_queue(ena_dev, ena_qid); 1014 goto err; 1015 } 1016 1017 txq->port_id = dev->data->port_id; 1018 txq->next_to_clean = 0; 1019 txq->next_to_use = 0; 1020 txq->ring_size = nb_desc; 1021 1022 txq->tx_buffer_info = rte_zmalloc("txq->tx_buffer_info", 1023 sizeof(struct ena_tx_buffer) * 1024 txq->ring_size, 1025 RTE_CACHE_LINE_SIZE); 1026 if (!txq->tx_buffer_info) { 1027 RTE_LOG(ERR, PMD, "failed to alloc mem for tx buffer info\n"); 1028 return -ENOMEM; 1029 } 1030 1031 txq->empty_tx_reqs = rte_zmalloc("txq->empty_tx_reqs", 1032 sizeof(u16) * txq->ring_size, 1033 RTE_CACHE_LINE_SIZE); 1034 if (!txq->empty_tx_reqs) { 1035 RTE_LOG(ERR, PMD, "failed to alloc mem for tx reqs\n"); 1036 rte_free(txq->tx_buffer_info); 1037 return -ENOMEM; 1038 } 1039 for (i = 0; i < txq->ring_size; i++) 1040 txq->empty_tx_reqs[i] = i; 1041 1042 /* Store pointer to this queue in upper layer */ 1043 txq->configured = 1; 1044 dev->data->tx_queues[queue_idx] = txq; 1045 err: 1046 return rc; 1047 } 1048 1049 static int ena_rx_queue_setup(struct rte_eth_dev *dev, 1050 uint16_t queue_idx, 1051 uint16_t nb_desc, 1052 __rte_unused unsigned int socket_id, 1053 __rte_unused const struct rte_eth_rxconf *rx_conf, 1054 struct rte_mempool *mp) 1055 { 1056 struct ena_com_create_io_ctx ctx = 1057 /* policy set to _HOST just to satisfy icc compiler */ 1058 { ENA_ADMIN_PLACEMENT_POLICY_HOST, 1059 ENA_COM_IO_QUEUE_DIRECTION_RX, 0, 0, 0, 0 }; 1060 struct ena_adapter *adapter = 1061 (struct ena_adapter *)(dev->data->dev_private); 1062 struct ena_ring *rxq = NULL; 1063 uint16_t ena_qid = 0; 1064 int rc = 0; 1065 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1066 1067 rxq = &adapter->rx_ring[queue_idx]; 1068 if (rxq->configured) { 1069 RTE_LOG(CRIT, PMD, 1070 "API violation. Queue %d is already configured\n", 1071 queue_idx); 1072 return -1; 1073 } 1074 1075 if (!rte_is_power_of_2(nb_desc)) { 1076 RTE_LOG(ERR, PMD, 1077 "Unsupported size of TX queue: %d is not a power of 2.", 1078 nb_desc); 1079 return -EINVAL; 1080 } 1081 1082 if (nb_desc > adapter->rx_ring_size) { 1083 RTE_LOG(ERR, PMD, 1084 "Unsupported size of RX queue (max size: %d)\n", 1085 adapter->rx_ring_size); 1086 return -EINVAL; 1087 } 1088 1089 ena_qid = ENA_IO_RXQ_IDX(queue_idx); 1090 1091 ctx.qid = ena_qid; 1092 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; 1093 ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 1094 ctx.msix_vector = -1; /* admin interrupts not used */ 1095 ctx.queue_size = adapter->rx_ring_size; 1096 ctx.numa_node = ena_cpu_to_node(queue_idx); 1097 1098 rc = ena_com_create_io_queue(ena_dev, &ctx); 1099 if (rc) 1100 RTE_LOG(ERR, PMD, "failed to create io RX queue #%d rc: %d\n", 1101 queue_idx, rc); 1102 1103 rxq->ena_com_io_cq = &ena_dev->io_cq_queues[ena_qid]; 1104 rxq->ena_com_io_sq = &ena_dev->io_sq_queues[ena_qid]; 1105 1106 rc = ena_com_get_io_handlers(ena_dev, ena_qid, 1107 &rxq->ena_com_io_sq, 1108 &rxq->ena_com_io_cq); 1109 if (rc) { 1110 RTE_LOG(ERR, PMD, 1111 "Failed to get RX queue handlers. RX queue num %d rc: %d\n", 1112 queue_idx, rc); 1113 ena_com_destroy_io_queue(ena_dev, ena_qid); 1114 } 1115 1116 rxq->port_id = dev->data->port_id; 1117 rxq->next_to_clean = 0; 1118 rxq->next_to_use = 0; 1119 rxq->ring_size = nb_desc; 1120 rxq->mb_pool = mp; 1121 1122 rxq->rx_buffer_info = rte_zmalloc("rxq->buffer_info", 1123 sizeof(struct rte_mbuf *) * nb_desc, 1124 RTE_CACHE_LINE_SIZE); 1125 if (!rxq->rx_buffer_info) { 1126 RTE_LOG(ERR, PMD, "failed to alloc mem for rx buffer info\n"); 1127 return -ENOMEM; 1128 } 1129 1130 /* Store pointer to this queue in upper layer */ 1131 rxq->configured = 1; 1132 dev->data->rx_queues[queue_idx] = rxq; 1133 1134 return rc; 1135 } 1136 1137 static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) 1138 { 1139 unsigned int i; 1140 int rc; 1141 uint16_t ring_size = rxq->ring_size; 1142 uint16_t ring_mask = ring_size - 1; 1143 uint16_t next_to_use = rxq->next_to_use; 1144 uint16_t in_use; 1145 struct rte_mbuf **mbufs = &rxq->rx_buffer_info[0]; 1146 1147 if (unlikely(!count)) 1148 return 0; 1149 1150 in_use = rxq->next_to_use - rxq->next_to_clean; 1151 ena_assert_msg(((in_use + count) < ring_size), "bad ring state"); 1152 1153 count = RTE_MIN(count, 1154 (uint16_t)(ring_size - (next_to_use & ring_mask))); 1155 1156 /* get resources for incoming packets */ 1157 rc = rte_mempool_get_bulk(rxq->mb_pool, 1158 (void **)(&mbufs[next_to_use & ring_mask]), 1159 count); 1160 if (unlikely(rc < 0)) { 1161 rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf); 1162 PMD_RX_LOG(DEBUG, "there are no enough free buffers"); 1163 return 0; 1164 } 1165 1166 for (i = 0; i < count; i++) { 1167 uint16_t next_to_use_masked = next_to_use & ring_mask; 1168 struct rte_mbuf *mbuf = mbufs[next_to_use_masked]; 1169 struct ena_com_buf ebuf; 1170 1171 rte_prefetch0(mbufs[((next_to_use + 4) & ring_mask)]); 1172 /* prepare physical address for DMA transaction */ 1173 ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM; 1174 ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM; 1175 /* pass resource to device */ 1176 rc = ena_com_add_single_rx_desc(rxq->ena_com_io_sq, 1177 &ebuf, next_to_use_masked); 1178 if (unlikely(rc)) { 1179 rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbuf), 1180 count - i); 1181 RTE_LOG(WARNING, PMD, "failed adding rx desc\n"); 1182 break; 1183 } 1184 next_to_use++; 1185 } 1186 1187 /* When we submitted free recources to device... */ 1188 if (i > 0) { 1189 /* ...let HW know that it can fill buffers with data */ 1190 rte_wmb(); 1191 ena_com_write_sq_doorbell(rxq->ena_com_io_sq); 1192 1193 rxq->next_to_use = next_to_use; 1194 } 1195 1196 return i; 1197 } 1198 1199 static int ena_device_init(struct ena_com_dev *ena_dev, 1200 struct ena_com_dev_get_features_ctx *get_feat_ctx) 1201 { 1202 int rc; 1203 bool readless_supported; 1204 1205 /* Initialize mmio registers */ 1206 rc = ena_com_mmio_reg_read_request_init(ena_dev); 1207 if (rc) { 1208 RTE_LOG(ERR, PMD, "failed to init mmio read less\n"); 1209 return rc; 1210 } 1211 1212 /* The PCIe configuration space revision id indicate if mmio reg 1213 * read is disabled. 1214 */ 1215 readless_supported = 1216 !(((struct rte_pci_device *)ena_dev->dmadev)->id.class_id 1217 & ENA_MMIO_DISABLE_REG_READ); 1218 ena_com_set_mmio_read_mode(ena_dev, readless_supported); 1219 1220 /* reset device */ 1221 rc = ena_com_dev_reset(ena_dev); 1222 if (rc) { 1223 RTE_LOG(ERR, PMD, "cannot reset device\n"); 1224 goto err_mmio_read_less; 1225 } 1226 1227 /* check FW version */ 1228 rc = ena_com_validate_version(ena_dev); 1229 if (rc) { 1230 RTE_LOG(ERR, PMD, "device version is too low\n"); 1231 goto err_mmio_read_less; 1232 } 1233 1234 ena_dev->dma_addr_bits = ena_com_get_dma_width(ena_dev); 1235 1236 /* ENA device administration layer init */ 1237 rc = ena_com_admin_init(ena_dev, NULL, true); 1238 if (rc) { 1239 RTE_LOG(ERR, PMD, 1240 "cannot initialize ena admin queue with device\n"); 1241 goto err_mmio_read_less; 1242 } 1243 1244 /* To enable the msix interrupts the driver needs to know the number 1245 * of queues. So the driver uses polling mode to retrieve this 1246 * information. 1247 */ 1248 ena_com_set_admin_polling_mode(ena_dev, true); 1249 1250 ena_config_host_info(ena_dev); 1251 1252 /* Get Device Attributes and features */ 1253 rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); 1254 if (rc) { 1255 RTE_LOG(ERR, PMD, 1256 "cannot get attribute for ena device rc= %d\n", rc); 1257 goto err_admin_init; 1258 } 1259 1260 return 0; 1261 1262 err_admin_init: 1263 ena_com_admin_destroy(ena_dev); 1264 1265 err_mmio_read_less: 1266 ena_com_mmio_reg_read_request_destroy(ena_dev); 1267 1268 return rc; 1269 } 1270 1271 static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) 1272 { 1273 struct rte_pci_device *pci_dev; 1274 struct ena_adapter *adapter = 1275 (struct ena_adapter *)(eth_dev->data->dev_private); 1276 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1277 struct ena_com_dev_get_features_ctx get_feat_ctx; 1278 int queue_size, rc; 1279 1280 static int adapters_found; 1281 1282 memset(adapter, 0, sizeof(struct ena_adapter)); 1283 ena_dev = &adapter->ena_dev; 1284 1285 eth_dev->dev_ops = &ena_dev_ops; 1286 eth_dev->rx_pkt_burst = ð_ena_recv_pkts; 1287 eth_dev->tx_pkt_burst = ð_ena_xmit_pkts; 1288 eth_dev->tx_pkt_prepare = ð_ena_prep_pkts; 1289 adapter->rte_eth_dev_data = eth_dev->data; 1290 adapter->rte_dev = eth_dev; 1291 1292 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1293 return 0; 1294 1295 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1296 adapter->pdev = pci_dev; 1297 1298 PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d", 1299 pci_dev->addr.domain, 1300 pci_dev->addr.bus, 1301 pci_dev->addr.devid, 1302 pci_dev->addr.function); 1303 1304 adapter->regs = pci_dev->mem_resource[ENA_REGS_BAR].addr; 1305 adapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr; 1306 1307 /* Present ENA_MEM_BAR indicates available LLQ mode. 1308 * Use corresponding policy 1309 */ 1310 if (adapter->dev_mem_base) 1311 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV; 1312 else if (adapter->regs) 1313 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 1314 else 1315 PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)", 1316 ENA_REGS_BAR); 1317 1318 ena_dev->reg_bar = adapter->regs; 1319 ena_dev->dmadev = adapter->pdev; 1320 1321 adapter->id_number = adapters_found; 1322 1323 snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", 1324 adapter->id_number); 1325 1326 /* device specific initialization routine */ 1327 rc = ena_device_init(ena_dev, &get_feat_ctx); 1328 if (rc) { 1329 PMD_INIT_LOG(CRIT, "Failed to init ENA device"); 1330 return -1; 1331 } 1332 1333 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 1334 if (get_feat_ctx.max_queues.max_llq_num == 0) { 1335 PMD_INIT_LOG(ERR, 1336 "Trying to use LLQ but llq_num is 0.\n" 1337 "Fall back into regular queues."); 1338 ena_dev->tx_mem_queue_type = 1339 ENA_ADMIN_PLACEMENT_POLICY_HOST; 1340 adapter->num_queues = 1341 get_feat_ctx.max_queues.max_sq_num; 1342 } else { 1343 adapter->num_queues = 1344 get_feat_ctx.max_queues.max_llq_num; 1345 } 1346 } else { 1347 adapter->num_queues = get_feat_ctx.max_queues.max_sq_num; 1348 } 1349 1350 queue_size = ena_calc_queue_size(ena_dev, &get_feat_ctx); 1351 if ((queue_size <= 0) || (adapter->num_queues <= 0)) 1352 return -EFAULT; 1353 1354 adapter->tx_ring_size = queue_size; 1355 adapter->rx_ring_size = queue_size; 1356 1357 /* prepare ring structures */ 1358 ena_init_rings(adapter); 1359 1360 ena_config_debug_area(adapter); 1361 1362 /* Set max MTU for this device */ 1363 adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu; 1364 1365 /* set device support for TSO */ 1366 adapter->tso4_supported = get_feat_ctx.offload.tx & 1367 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK; 1368 1369 /* Copy MAC address and point DPDK to it */ 1370 eth_dev->data->mac_addrs = (struct ether_addr *)adapter->mac_addr; 1371 ether_addr_copy((struct ether_addr *)get_feat_ctx.dev_attr.mac_addr, 1372 (struct ether_addr *)adapter->mac_addr); 1373 1374 adapter->drv_stats = rte_zmalloc("adapter stats", 1375 sizeof(*adapter->drv_stats), 1376 RTE_CACHE_LINE_SIZE); 1377 if (!adapter->drv_stats) { 1378 RTE_LOG(ERR, PMD, "failed to alloc mem for adapter stats\n"); 1379 return -ENOMEM; 1380 } 1381 1382 adapters_found++; 1383 adapter->state = ENA_ADAPTER_STATE_INIT; 1384 1385 return 0; 1386 } 1387 1388 static int ena_dev_configure(struct rte_eth_dev *dev) 1389 { 1390 struct ena_adapter *adapter = 1391 (struct ena_adapter *)(dev->data->dev_private); 1392 1393 if (!(adapter->state == ENA_ADAPTER_STATE_INIT || 1394 adapter->state == ENA_ADAPTER_STATE_STOPPED)) { 1395 PMD_INIT_LOG(ERR, "Illegal adapter state: %d", 1396 adapter->state); 1397 return -1; 1398 } 1399 1400 switch (adapter->state) { 1401 case ENA_ADAPTER_STATE_INIT: 1402 case ENA_ADAPTER_STATE_STOPPED: 1403 adapter->state = ENA_ADAPTER_STATE_CONFIG; 1404 break; 1405 case ENA_ADAPTER_STATE_CONFIG: 1406 RTE_LOG(WARNING, PMD, 1407 "Ivalid driver state while trying to configure device\n"); 1408 break; 1409 default: 1410 break; 1411 } 1412 1413 return 0; 1414 } 1415 1416 static void ena_init_rings(struct ena_adapter *adapter) 1417 { 1418 int i; 1419 1420 for (i = 0; i < adapter->num_queues; i++) { 1421 struct ena_ring *ring = &adapter->tx_ring[i]; 1422 1423 ring->configured = 0; 1424 ring->type = ENA_RING_TYPE_TX; 1425 ring->adapter = adapter; 1426 ring->id = i; 1427 ring->tx_mem_queue_type = adapter->ena_dev.tx_mem_queue_type; 1428 ring->tx_max_header_size = adapter->ena_dev.tx_max_header_size; 1429 } 1430 1431 for (i = 0; i < adapter->num_queues; i++) { 1432 struct ena_ring *ring = &adapter->rx_ring[i]; 1433 1434 ring->configured = 0; 1435 ring->type = ENA_RING_TYPE_RX; 1436 ring->adapter = adapter; 1437 ring->id = i; 1438 } 1439 } 1440 1441 static void ena_infos_get(struct rte_eth_dev *dev, 1442 struct rte_eth_dev_info *dev_info) 1443 { 1444 struct ena_adapter *adapter; 1445 struct ena_com_dev *ena_dev; 1446 struct ena_com_dev_get_features_ctx feat; 1447 uint32_t rx_feat = 0, tx_feat = 0; 1448 int rc = 0; 1449 1450 ena_assert_msg(dev->data != NULL, "Uninitialized device"); 1451 ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device"); 1452 adapter = (struct ena_adapter *)(dev->data->dev_private); 1453 1454 ena_dev = &adapter->ena_dev; 1455 ena_assert_msg(ena_dev != NULL, "Uninitialized device"); 1456 1457 dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1458 1459 dev_info->speed_capa = 1460 ETH_LINK_SPEED_1G | 1461 ETH_LINK_SPEED_2_5G | 1462 ETH_LINK_SPEED_5G | 1463 ETH_LINK_SPEED_10G | 1464 ETH_LINK_SPEED_25G | 1465 ETH_LINK_SPEED_40G | 1466 ETH_LINK_SPEED_50G | 1467 ETH_LINK_SPEED_100G; 1468 1469 /* Get supported features from HW */ 1470 rc = ena_com_get_dev_attr_feat(ena_dev, &feat); 1471 if (unlikely(rc)) { 1472 RTE_LOG(ERR, PMD, 1473 "Cannot get attribute for ena device rc= %d\n", rc); 1474 return; 1475 } 1476 1477 /* Set Tx & Rx features available for device */ 1478 if (feat.offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) 1479 tx_feat |= DEV_TX_OFFLOAD_TCP_TSO; 1480 1481 if (feat.offload.tx & 1482 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) 1483 tx_feat |= DEV_TX_OFFLOAD_IPV4_CKSUM | 1484 DEV_TX_OFFLOAD_UDP_CKSUM | 1485 DEV_TX_OFFLOAD_TCP_CKSUM; 1486 1487 if (feat.offload.rx_supported & 1488 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) 1489 rx_feat |= DEV_RX_OFFLOAD_IPV4_CKSUM | 1490 DEV_RX_OFFLOAD_UDP_CKSUM | 1491 DEV_RX_OFFLOAD_TCP_CKSUM; 1492 1493 /* Inform framework about available features */ 1494 dev_info->rx_offload_capa = rx_feat; 1495 dev_info->tx_offload_capa = tx_feat; 1496 1497 dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN; 1498 dev_info->max_rx_pktlen = adapter->max_mtu; 1499 dev_info->max_mac_addrs = 1; 1500 1501 dev_info->max_rx_queues = adapter->num_queues; 1502 dev_info->max_tx_queues = adapter->num_queues; 1503 dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE; 1504 } 1505 1506 static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 1507 uint16_t nb_pkts) 1508 { 1509 struct ena_ring *rx_ring = (struct ena_ring *)(rx_queue); 1510 unsigned int ring_size = rx_ring->ring_size; 1511 unsigned int ring_mask = ring_size - 1; 1512 uint16_t next_to_clean = rx_ring->next_to_clean; 1513 uint16_t desc_in_use = 0; 1514 unsigned int recv_idx = 0; 1515 struct rte_mbuf *mbuf = NULL; 1516 struct rte_mbuf *mbuf_head = NULL; 1517 struct rte_mbuf *mbuf_prev = NULL; 1518 struct rte_mbuf **rx_buff_info = rx_ring->rx_buffer_info; 1519 unsigned int completed; 1520 1521 struct ena_com_rx_ctx ena_rx_ctx; 1522 int rc = 0; 1523 1524 /* Check adapter state */ 1525 if (unlikely(rx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { 1526 RTE_LOG(ALERT, PMD, 1527 "Trying to receive pkts while device is NOT running\n"); 1528 return 0; 1529 } 1530 1531 desc_in_use = rx_ring->next_to_use - next_to_clean; 1532 if (unlikely(nb_pkts > desc_in_use)) 1533 nb_pkts = desc_in_use; 1534 1535 for (completed = 0; completed < nb_pkts; completed++) { 1536 int segments = 0; 1537 1538 ena_rx_ctx.max_bufs = rx_ring->ring_size; 1539 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; 1540 ena_rx_ctx.descs = 0; 1541 /* receive packet context */ 1542 rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq, 1543 rx_ring->ena_com_io_sq, 1544 &ena_rx_ctx); 1545 if (unlikely(rc)) { 1546 RTE_LOG(ERR, PMD, "ena_com_rx_pkt error %d\n", rc); 1547 return 0; 1548 } 1549 1550 if (unlikely(ena_rx_ctx.descs == 0)) 1551 break; 1552 1553 while (segments < ena_rx_ctx.descs) { 1554 mbuf = rx_buff_info[next_to_clean & ring_mask]; 1555 mbuf->data_len = ena_rx_ctx.ena_bufs[segments].len; 1556 mbuf->data_off = RTE_PKTMBUF_HEADROOM; 1557 mbuf->refcnt = 1; 1558 mbuf->next = NULL; 1559 if (segments == 0) { 1560 mbuf->nb_segs = ena_rx_ctx.descs; 1561 mbuf->port = rx_ring->port_id; 1562 mbuf->pkt_len = 0; 1563 mbuf_head = mbuf; 1564 } else { 1565 /* for multi-segment pkts create mbuf chain */ 1566 mbuf_prev->next = mbuf; 1567 } 1568 mbuf_head->pkt_len += mbuf->data_len; 1569 1570 mbuf_prev = mbuf; 1571 segments++; 1572 next_to_clean++; 1573 } 1574 1575 /* fill mbuf attributes if any */ 1576 ena_rx_mbuf_prepare(mbuf_head, &ena_rx_ctx); 1577 mbuf_head->hash.rss = (uint32_t)rx_ring->id; 1578 1579 /* pass to DPDK application head mbuf */ 1580 rx_pkts[recv_idx] = mbuf_head; 1581 recv_idx++; 1582 } 1583 1584 rx_ring->next_to_clean = next_to_clean; 1585 1586 desc_in_use = desc_in_use - completed + 1; 1587 /* Burst refill to save doorbells, memory barriers, const interval */ 1588 if (ring_size - desc_in_use > ENA_RING_DESCS_RATIO(ring_size)) 1589 ena_populate_rx_queue(rx_ring, ring_size - desc_in_use); 1590 1591 return recv_idx; 1592 } 1593 1594 static uint16_t 1595 eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 1596 uint16_t nb_pkts) 1597 { 1598 int32_t ret; 1599 uint32_t i; 1600 struct rte_mbuf *m; 1601 struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); 1602 struct ipv4_hdr *ip_hdr; 1603 uint64_t ol_flags; 1604 uint16_t frag_field; 1605 1606 for (i = 0; i != nb_pkts; i++) { 1607 m = tx_pkts[i]; 1608 ol_flags = m->ol_flags; 1609 1610 if (!(ol_flags & PKT_TX_IPV4)) 1611 continue; 1612 1613 /* If there was not L2 header length specified, assume it is 1614 * length of the ethernet header. 1615 */ 1616 if (unlikely(m->l2_len == 0)) 1617 m->l2_len = sizeof(struct ether_hdr); 1618 1619 ip_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, 1620 m->l2_len); 1621 frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset); 1622 1623 if ((frag_field & IPV4_HDR_DF_FLAG) != 0) { 1624 m->packet_type |= RTE_PTYPE_L4_NONFRAG; 1625 1626 /* If IPv4 header has DF flag enabled and TSO support is 1627 * disabled, partial chcecksum should not be calculated. 1628 */ 1629 if (!tx_ring->adapter->tso4_supported) 1630 continue; 1631 } 1632 1633 if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) != 0 || 1634 (ol_flags & PKT_TX_L4_MASK) == 1635 PKT_TX_SCTP_CKSUM) { 1636 rte_errno = -ENOTSUP; 1637 return i; 1638 } 1639 1640 #ifdef RTE_LIBRTE_ETHDEV_DEBUG 1641 ret = rte_validate_tx_offload(m); 1642 if (ret != 0) { 1643 rte_errno = ret; 1644 return i; 1645 } 1646 #endif 1647 1648 /* In case we are supposed to TSO and have DF not set (DF=0) 1649 * hardware must be provided with partial checksum, otherwise 1650 * it will take care of necessary calculations. 1651 */ 1652 1653 ret = rte_net_intel_cksum_flags_prepare(m, 1654 ol_flags & ~PKT_TX_TCP_SEG); 1655 if (ret != 0) { 1656 rte_errno = ret; 1657 return i; 1658 } 1659 } 1660 1661 return i; 1662 } 1663 1664 static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 1665 uint16_t nb_pkts) 1666 { 1667 struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); 1668 uint16_t next_to_use = tx_ring->next_to_use; 1669 uint16_t next_to_clean = tx_ring->next_to_clean; 1670 struct rte_mbuf *mbuf; 1671 unsigned int ring_size = tx_ring->ring_size; 1672 unsigned int ring_mask = ring_size - 1; 1673 struct ena_com_tx_ctx ena_tx_ctx; 1674 struct ena_tx_buffer *tx_info; 1675 struct ena_com_buf *ebuf; 1676 uint16_t rc, req_id, total_tx_descs = 0; 1677 uint16_t sent_idx = 0, empty_tx_reqs; 1678 int nb_hw_desc; 1679 1680 /* Check adapter state */ 1681 if (unlikely(tx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { 1682 RTE_LOG(ALERT, PMD, 1683 "Trying to xmit pkts while device is NOT running\n"); 1684 return 0; 1685 } 1686 1687 empty_tx_reqs = ring_size - (next_to_use - next_to_clean); 1688 if (nb_pkts > empty_tx_reqs) 1689 nb_pkts = empty_tx_reqs; 1690 1691 for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) { 1692 mbuf = tx_pkts[sent_idx]; 1693 1694 req_id = tx_ring->empty_tx_reqs[next_to_use & ring_mask]; 1695 tx_info = &tx_ring->tx_buffer_info[req_id]; 1696 tx_info->mbuf = mbuf; 1697 tx_info->num_of_bufs = 0; 1698 ebuf = tx_info->bufs; 1699 1700 /* Prepare TX context */ 1701 memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx)); 1702 memset(&ena_tx_ctx.ena_meta, 0x0, 1703 sizeof(struct ena_com_tx_meta)); 1704 ena_tx_ctx.ena_bufs = ebuf; 1705 ena_tx_ctx.req_id = req_id; 1706 if (tx_ring->tx_mem_queue_type == 1707 ENA_ADMIN_PLACEMENT_POLICY_DEV) { 1708 /* prepare the push buffer with 1709 * virtual address of the data 1710 */ 1711 ena_tx_ctx.header_len = 1712 RTE_MIN(mbuf->data_len, 1713 tx_ring->tx_max_header_size); 1714 ena_tx_ctx.push_header = 1715 (void *)((char *)mbuf->buf_addr + 1716 mbuf->data_off); 1717 } /* there's no else as we take advantage of memset zeroing */ 1718 1719 /* Set TX offloads flags, if applicable */ 1720 ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx); 1721 1722 if (unlikely(mbuf->ol_flags & 1723 (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD))) 1724 rte_atomic64_inc(&tx_ring->adapter->drv_stats->ierrors); 1725 1726 rte_prefetch0(tx_pkts[(sent_idx + 4) & ring_mask]); 1727 1728 /* Process first segment taking into 1729 * consideration pushed header 1730 */ 1731 if (mbuf->data_len > ena_tx_ctx.header_len) { 1732 ebuf->paddr = mbuf->buf_iova + 1733 mbuf->data_off + 1734 ena_tx_ctx.header_len; 1735 ebuf->len = mbuf->data_len - ena_tx_ctx.header_len; 1736 ebuf++; 1737 tx_info->num_of_bufs++; 1738 } 1739 1740 while ((mbuf = mbuf->next) != NULL) { 1741 ebuf->paddr = mbuf->buf_iova + mbuf->data_off; 1742 ebuf->len = mbuf->data_len; 1743 ebuf++; 1744 tx_info->num_of_bufs++; 1745 } 1746 1747 ena_tx_ctx.num_bufs = tx_info->num_of_bufs; 1748 1749 /* Write data to device */ 1750 rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, 1751 &ena_tx_ctx, &nb_hw_desc); 1752 if (unlikely(rc)) 1753 break; 1754 1755 tx_info->tx_descs = nb_hw_desc; 1756 1757 next_to_use++; 1758 } 1759 1760 /* If there are ready packets to be xmitted... */ 1761 if (sent_idx > 0) { 1762 /* ...let HW do its best :-) */ 1763 rte_wmb(); 1764 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); 1765 1766 tx_ring->next_to_use = next_to_use; 1767 } 1768 1769 /* Clear complete packets */ 1770 while (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) >= 0) { 1771 /* Get Tx info & store how many descs were processed */ 1772 tx_info = &tx_ring->tx_buffer_info[req_id]; 1773 total_tx_descs += tx_info->tx_descs; 1774 1775 /* Free whole mbuf chain */ 1776 mbuf = tx_info->mbuf; 1777 rte_pktmbuf_free(mbuf); 1778 tx_info->mbuf = NULL; 1779 1780 /* Put back descriptor to the ring for reuse */ 1781 tx_ring->empty_tx_reqs[next_to_clean & ring_mask] = req_id; 1782 next_to_clean++; 1783 1784 /* If too many descs to clean, leave it for another run */ 1785 if (unlikely(total_tx_descs > ENA_RING_DESCS_RATIO(ring_size))) 1786 break; 1787 } 1788 1789 if (total_tx_descs > 0) { 1790 /* acknowledge completion of sent packets */ 1791 ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs); 1792 tx_ring->next_to_clean = next_to_clean; 1793 } 1794 1795 return sent_idx; 1796 } 1797 1798 static int eth_ena_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1799 struct rte_pci_device *pci_dev) 1800 { 1801 return rte_eth_dev_pci_generic_probe(pci_dev, 1802 sizeof(struct ena_adapter), eth_ena_dev_init); 1803 } 1804 1805 static int eth_ena_pci_remove(struct rte_pci_device *pci_dev) 1806 { 1807 return rte_eth_dev_pci_generic_remove(pci_dev, NULL); 1808 } 1809 1810 static struct rte_pci_driver rte_ena_pmd = { 1811 .id_table = pci_id_ena_map, 1812 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 1813 .probe = eth_ena_pci_probe, 1814 .remove = eth_ena_pci_remove, 1815 }; 1816 1817 RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd); 1818 RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map); 1819 RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci"); 1820 1821 RTE_INIT(ena_init_log); 1822 static void 1823 ena_init_log(void) 1824 { 1825 ena_logtype_init = rte_log_register("pmd.ena.init"); 1826 if (ena_logtype_init >= 0) 1827 rte_log_set_level(ena_logtype_init, RTE_LOG_NOTICE); 1828 ena_logtype_driver = rte_log_register("pmd.ena.driver"); 1829 if (ena_logtype_driver >= 0) 1830 rte_log_set_level(ena_logtype_driver, RTE_LOG_NOTICE); 1831 } 1832