1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates. 3 * All rights reserved. 4 */ 5 6 #include <rte_string_fns.h> 7 #include <rte_errno.h> 8 #include <rte_version.h> 9 #include <rte_net.h> 10 #include <rte_kvargs.h> 11 12 #include "ena_ethdev.h" 13 #include "ena_logs.h" 14 #include "ena_platform.h" 15 #include "ena_com.h" 16 #include "ena_eth_com.h" 17 18 #include <ena_common_defs.h> 19 #include <ena_regs_defs.h> 20 #include <ena_admin_defs.h> 21 #include <ena_eth_io_defs.h> 22 23 #define DRV_MODULE_VER_MAJOR 2 24 #define DRV_MODULE_VER_MINOR 8 25 #define DRV_MODULE_VER_SUBMINOR 0 26 27 #define __MERGE_64B_H_L(h, l) (((uint64_t)h << 32) | l) 28 29 #define GET_L4_HDR_LEN(mbuf) \ 30 ((rte_pktmbuf_mtod_offset(mbuf, struct rte_tcp_hdr *, \ 31 mbuf->l3_len + mbuf->l2_len)->data_off) >> 4) 32 33 #define ETH_GSTRING_LEN 32 34 35 #define ARRAY_SIZE(x) RTE_DIM(x) 36 37 #define ENA_MIN_RING_DESC 128 38 39 #define BITS_PER_BYTE 8 40 41 #define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) 42 43 #define DECIMAL_BASE 10 44 45 /* 46 * We should try to keep ENA_CLEANUP_BUF_SIZE lower than 47 * RTE_MEMPOOL_CACHE_MAX_SIZE, so we can fit this in mempool local cache. 48 */ 49 #define ENA_CLEANUP_BUF_SIZE 256 50 51 #define ENA_PTYPE_HAS_HASH (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP) 52 53 struct ena_stats { 54 char name[ETH_GSTRING_LEN]; 55 int stat_offset; 56 }; 57 58 #define ENA_STAT_ENTRY(stat, stat_type) { \ 59 .name = #stat, \ 60 .stat_offset = offsetof(struct ena_stats_##stat_type, stat) \ 61 } 62 63 #define ENA_STAT_RX_ENTRY(stat) \ 64 ENA_STAT_ENTRY(stat, rx) 65 66 #define ENA_STAT_TX_ENTRY(stat) \ 67 ENA_STAT_ENTRY(stat, tx) 68 69 #define ENA_STAT_METRICS_ENTRY(stat) \ 70 ENA_STAT_ENTRY(stat, metrics) 71 72 #define ENA_STAT_GLOBAL_ENTRY(stat) \ 73 ENA_STAT_ENTRY(stat, dev) 74 75 #define ENA_STAT_ENA_SRD_ENTRY(stat) \ 76 ENA_STAT_ENTRY(stat, srd) 77 78 /* Device arguments */ 79 #define ENA_DEVARG_LARGE_LLQ_HDR "large_llq_hdr" 80 #define ENA_DEVARG_NORMAL_LLQ_HDR "normal_llq_hdr" 81 /* Timeout in seconds after which a single uncompleted Tx packet should be 82 * considered as a missing. 83 */ 84 #define ENA_DEVARG_MISS_TXC_TO "miss_txc_to" 85 /* 86 * Controls whether LLQ should be used (if available). Enabled by default. 87 * NOTE: It's highly not recommended to disable the LLQ, as it may lead to a 88 * huge performance degradation on 6th generation AWS instances. 89 */ 90 #define ENA_DEVARG_ENABLE_LLQ "enable_llq" 91 92 /* 93 * Each rte_memzone should have unique name. 94 * To satisfy it, count number of allocation and add it to name. 95 */ 96 rte_atomic64_t ena_alloc_cnt; 97 98 static const struct ena_stats ena_stats_global_strings[] = { 99 ENA_STAT_GLOBAL_ENTRY(wd_expired), 100 ENA_STAT_GLOBAL_ENTRY(dev_start), 101 ENA_STAT_GLOBAL_ENTRY(dev_stop), 102 ENA_STAT_GLOBAL_ENTRY(tx_drops), 103 }; 104 105 /* 106 * The legacy metrics (also known as eni stats) consisted of 5 stats, while the reworked 107 * metrics (also known as customer metrics) support an additional stat. 108 */ 109 static struct ena_stats ena_stats_metrics_strings[] = { 110 ENA_STAT_METRICS_ENTRY(bw_in_allowance_exceeded), 111 ENA_STAT_METRICS_ENTRY(bw_out_allowance_exceeded), 112 ENA_STAT_METRICS_ENTRY(pps_allowance_exceeded), 113 ENA_STAT_METRICS_ENTRY(conntrack_allowance_exceeded), 114 ENA_STAT_METRICS_ENTRY(linklocal_allowance_exceeded), 115 ENA_STAT_METRICS_ENTRY(conntrack_allowance_available), 116 }; 117 118 static const struct ena_stats ena_stats_srd_strings[] = { 119 ENA_STAT_ENA_SRD_ENTRY(ena_srd_mode), 120 ENA_STAT_ENA_SRD_ENTRY(ena_srd_tx_pkts), 121 ENA_STAT_ENA_SRD_ENTRY(ena_srd_eligible_tx_pkts), 122 ENA_STAT_ENA_SRD_ENTRY(ena_srd_rx_pkts), 123 ENA_STAT_ENA_SRD_ENTRY(ena_srd_resource_utilization), 124 }; 125 126 static const struct ena_stats ena_stats_tx_strings[] = { 127 ENA_STAT_TX_ENTRY(cnt), 128 ENA_STAT_TX_ENTRY(bytes), 129 ENA_STAT_TX_ENTRY(prepare_ctx_err), 130 ENA_STAT_TX_ENTRY(tx_poll), 131 ENA_STAT_TX_ENTRY(doorbells), 132 ENA_STAT_TX_ENTRY(bad_req_id), 133 ENA_STAT_TX_ENTRY(available_desc), 134 ENA_STAT_TX_ENTRY(missed_tx), 135 }; 136 137 static const struct ena_stats ena_stats_rx_strings[] = { 138 ENA_STAT_RX_ENTRY(cnt), 139 ENA_STAT_RX_ENTRY(bytes), 140 ENA_STAT_RX_ENTRY(refill_partial), 141 ENA_STAT_RX_ENTRY(l3_csum_bad), 142 ENA_STAT_RX_ENTRY(l4_csum_bad), 143 ENA_STAT_RX_ENTRY(l4_csum_good), 144 ENA_STAT_RX_ENTRY(mbuf_alloc_fail), 145 ENA_STAT_RX_ENTRY(bad_desc_num), 146 ENA_STAT_RX_ENTRY(bad_req_id), 147 }; 148 149 #define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings) 150 #define ENA_STATS_ARRAY_METRICS ARRAY_SIZE(ena_stats_metrics_strings) 151 #define ENA_STATS_ARRAY_METRICS_LEGACY (ENA_STATS_ARRAY_METRICS - 1) 152 #define ENA_STATS_ARRAY_ENA_SRD ARRAY_SIZE(ena_stats_srd_strings) 153 #define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings) 154 #define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings) 155 156 #define QUEUE_OFFLOADS (RTE_ETH_TX_OFFLOAD_TCP_CKSUM |\ 157 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |\ 158 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |\ 159 RTE_ETH_TX_OFFLOAD_TCP_TSO) 160 #define MBUF_OFFLOADS (RTE_MBUF_F_TX_L4_MASK |\ 161 RTE_MBUF_F_TX_IP_CKSUM |\ 162 RTE_MBUF_F_TX_TCP_SEG) 163 164 /** Vendor ID used by Amazon devices */ 165 #define PCI_VENDOR_ID_AMAZON 0x1D0F 166 /** Amazon devices */ 167 #define PCI_DEVICE_ID_ENA_VF 0xEC20 168 #define PCI_DEVICE_ID_ENA_VF_RSERV0 0xEC21 169 170 #define ENA_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_L4_MASK | \ 171 RTE_MBUF_F_TX_IPV6 | \ 172 RTE_MBUF_F_TX_IPV4 | \ 173 RTE_MBUF_F_TX_IP_CKSUM | \ 174 RTE_MBUF_F_TX_TCP_SEG) 175 176 #define ENA_TX_OFFLOAD_NOTSUP_MASK \ 177 (RTE_MBUF_F_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK) 178 179 /** HW specific offloads capabilities. */ 180 /* IPv4 checksum offload. */ 181 #define ENA_L3_IPV4_CSUM 0x0001 182 /* TCP/UDP checksum offload for IPv4 packets. */ 183 #define ENA_L4_IPV4_CSUM 0x0002 184 /* TCP/UDP checksum offload for IPv4 packets with pseudo header checksum. */ 185 #define ENA_L4_IPV4_CSUM_PARTIAL 0x0004 186 /* TCP/UDP checksum offload for IPv6 packets. */ 187 #define ENA_L4_IPV6_CSUM 0x0008 188 /* TCP/UDP checksum offload for IPv6 packets with pseudo header checksum. */ 189 #define ENA_L4_IPV6_CSUM_PARTIAL 0x0010 190 /* TSO support for IPv4 packets. */ 191 #define ENA_IPV4_TSO 0x0020 192 193 /* Device supports setting RSS hash. */ 194 #define ENA_RX_RSS_HASH 0x0040 195 196 static const struct rte_pci_id pci_id_ena_map[] = { 197 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) }, 198 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF_RSERV0) }, 199 { .device_id = 0 }, 200 }; 201 202 static struct ena_aenq_handlers aenq_handlers; 203 204 static int ena_device_init(struct ena_adapter *adapter, 205 struct rte_pci_device *pdev, 206 struct ena_com_dev_get_features_ctx *get_feat_ctx); 207 static int ena_dev_configure(struct rte_eth_dev *dev); 208 static void ena_tx_map_mbuf(struct ena_ring *tx_ring, 209 struct ena_tx_buffer *tx_info, 210 struct rte_mbuf *mbuf, 211 void **push_header, 212 uint16_t *header_len); 213 static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf); 214 static int ena_tx_cleanup(void *txp, uint32_t free_pkt_cnt); 215 static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 216 uint16_t nb_pkts); 217 static uint16_t eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 218 uint16_t nb_pkts); 219 static int ena_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 220 uint16_t nb_desc, unsigned int socket_id, 221 const struct rte_eth_txconf *tx_conf); 222 static int ena_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 223 uint16_t nb_desc, unsigned int socket_id, 224 const struct rte_eth_rxconf *rx_conf, 225 struct rte_mempool *mp); 226 static inline void ena_init_rx_mbuf(struct rte_mbuf *mbuf, uint16_t len); 227 static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring, 228 struct ena_com_rx_buf_info *ena_bufs, 229 uint32_t descs, 230 uint16_t *next_to_clean, 231 uint8_t offset); 232 static uint16_t eth_ena_recv_pkts(void *rx_queue, 233 struct rte_mbuf **rx_pkts, uint16_t nb_pkts); 234 static int ena_add_single_rx_desc(struct ena_com_io_sq *io_sq, 235 struct rte_mbuf *mbuf, uint16_t id); 236 static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count); 237 static void ena_init_rings(struct ena_adapter *adapter, 238 bool disable_meta_caching); 239 static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 240 static int ena_start(struct rte_eth_dev *dev); 241 static int ena_stop(struct rte_eth_dev *dev); 242 static int ena_close(struct rte_eth_dev *dev); 243 static int ena_dev_reset(struct rte_eth_dev *dev); 244 static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); 245 static void ena_rx_queue_release_all(struct rte_eth_dev *dev); 246 static void ena_tx_queue_release_all(struct rte_eth_dev *dev); 247 static void ena_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid); 248 static void ena_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid); 249 static void ena_rx_queue_release_bufs(struct ena_ring *ring); 250 static void ena_tx_queue_release_bufs(struct ena_ring *ring); 251 static int ena_link_update(struct rte_eth_dev *dev, 252 int wait_to_complete); 253 static int ena_create_io_queue(struct rte_eth_dev *dev, struct ena_ring *ring); 254 static void ena_queue_stop(struct ena_ring *ring); 255 static void ena_queue_stop_all(struct rte_eth_dev *dev, 256 enum ena_ring_type ring_type); 257 static int ena_queue_start(struct rte_eth_dev *dev, struct ena_ring *ring); 258 static int ena_queue_start_all(struct rte_eth_dev *dev, 259 enum ena_ring_type ring_type); 260 static void ena_stats_restart(struct rte_eth_dev *dev); 261 static uint64_t ena_get_rx_port_offloads(struct ena_adapter *adapter); 262 static uint64_t ena_get_tx_port_offloads(struct ena_adapter *adapter); 263 static uint64_t ena_get_rx_queue_offloads(struct ena_adapter *adapter); 264 static uint64_t ena_get_tx_queue_offloads(struct ena_adapter *adapter); 265 static int ena_infos_get(struct rte_eth_dev *dev, 266 struct rte_eth_dev_info *dev_info); 267 static void ena_interrupt_handler_rte(void *cb_arg); 268 static void ena_timer_wd_callback(struct rte_timer *timer, void *arg); 269 static void ena_destroy_device(struct rte_eth_dev *eth_dev); 270 static int eth_ena_dev_init(struct rte_eth_dev *eth_dev); 271 static int ena_xstats_get_names(struct rte_eth_dev *dev, 272 struct rte_eth_xstat_name *xstats_names, 273 unsigned int n); 274 static int ena_xstats_get_names_by_id(struct rte_eth_dev *dev, 275 const uint64_t *ids, 276 struct rte_eth_xstat_name *xstats_names, 277 unsigned int size); 278 static int ena_xstats_get(struct rte_eth_dev *dev, 279 struct rte_eth_xstat *stats, 280 unsigned int n); 281 static int ena_xstats_get_by_id(struct rte_eth_dev *dev, 282 const uint64_t *ids, 283 uint64_t *values, 284 unsigned int n); 285 static int ena_process_bool_devarg(const char *key, 286 const char *value, 287 void *opaque); 288 static int ena_parse_devargs(struct ena_adapter *adapter, 289 struct rte_devargs *devargs); 290 static void ena_copy_customer_metrics(struct ena_adapter *adapter, 291 uint64_t *buf, 292 size_t buf_size); 293 static void ena_copy_ena_srd_info(struct ena_adapter *adapter, 294 struct ena_stats_srd *srd_info); 295 static int ena_setup_rx_intr(struct rte_eth_dev *dev); 296 static int ena_rx_queue_intr_enable(struct rte_eth_dev *dev, 297 uint16_t queue_id); 298 static int ena_rx_queue_intr_disable(struct rte_eth_dev *dev, 299 uint16_t queue_id); 300 static int ena_configure_aenq(struct ena_adapter *adapter); 301 static int ena_mp_primary_handle(const struct rte_mp_msg *mp_msg, 302 const void *peer); 303 static ena_llq_policy ena_define_llq_hdr_policy(struct ena_adapter *adapter); 304 static bool ena_use_large_llq_hdr(struct ena_adapter *adapter, uint8_t recommended_entry_size); 305 306 static const struct eth_dev_ops ena_dev_ops = { 307 .dev_configure = ena_dev_configure, 308 .dev_infos_get = ena_infos_get, 309 .rx_queue_setup = ena_rx_queue_setup, 310 .tx_queue_setup = ena_tx_queue_setup, 311 .dev_start = ena_start, 312 .dev_stop = ena_stop, 313 .link_update = ena_link_update, 314 .stats_get = ena_stats_get, 315 .xstats_get_names = ena_xstats_get_names, 316 .xstats_get_names_by_id = ena_xstats_get_names_by_id, 317 .xstats_get = ena_xstats_get, 318 .xstats_get_by_id = ena_xstats_get_by_id, 319 .mtu_set = ena_mtu_set, 320 .rx_queue_release = ena_rx_queue_release, 321 .tx_queue_release = ena_tx_queue_release, 322 .dev_close = ena_close, 323 .dev_reset = ena_dev_reset, 324 .reta_update = ena_rss_reta_update, 325 .reta_query = ena_rss_reta_query, 326 .rx_queue_intr_enable = ena_rx_queue_intr_enable, 327 .rx_queue_intr_disable = ena_rx_queue_intr_disable, 328 .rss_hash_update = ena_rss_hash_update, 329 .rss_hash_conf_get = ena_rss_hash_conf_get, 330 .tx_done_cleanup = ena_tx_cleanup, 331 }; 332 333 /********************************************************************* 334 * Multi-Process communication bits 335 *********************************************************************/ 336 /* rte_mp IPC message name */ 337 #define ENA_MP_NAME "net_ena_mp" 338 /* Request timeout in seconds */ 339 #define ENA_MP_REQ_TMO 5 340 341 /** Proxy request type */ 342 enum ena_mp_req { 343 ENA_MP_DEV_STATS_GET, 344 ENA_MP_ENI_STATS_GET, 345 ENA_MP_MTU_SET, 346 ENA_MP_IND_TBL_GET, 347 ENA_MP_IND_TBL_SET, 348 ENA_MP_CUSTOMER_METRICS_GET, 349 ENA_MP_SRD_STATS_GET, 350 }; 351 352 /** Proxy message body. Shared between requests and responses. */ 353 struct ena_mp_body { 354 /* Message type */ 355 enum ena_mp_req type; 356 int port_id; 357 /* Processing result. Set in replies. 0 if message succeeded, negative 358 * error code otherwise. 359 */ 360 int result; 361 union { 362 int mtu; /* For ENA_MP_MTU_SET */ 363 } args; 364 }; 365 366 /** 367 * Initialize IPC message. 368 * 369 * @param[out] msg 370 * Pointer to the message to initialize. 371 * @param[in] type 372 * Message type. 373 * @param[in] port_id 374 * Port ID of target device. 375 * 376 */ 377 static void 378 mp_msg_init(struct rte_mp_msg *msg, enum ena_mp_req type, int port_id) 379 { 380 struct ena_mp_body *body = (struct ena_mp_body *)&msg->param; 381 382 memset(msg, 0, sizeof(*msg)); 383 strlcpy(msg->name, ENA_MP_NAME, sizeof(msg->name)); 384 msg->len_param = sizeof(*body); 385 body->type = type; 386 body->port_id = port_id; 387 } 388 389 /********************************************************************* 390 * Multi-Process communication PMD API 391 *********************************************************************/ 392 /** 393 * Define proxy request descriptor 394 * 395 * Used to define all structures and functions required for proxying a given 396 * function to the primary process including the code to perform to prepare the 397 * request and process the response. 398 * 399 * @param[in] f 400 * Name of the function to proxy 401 * @param[in] t 402 * Message type to use 403 * @param[in] prep 404 * Body of a function to prepare the request in form of a statement 405 * expression. It is passed all the original function arguments along with two 406 * extra ones: 407 * - struct ena_adapter *adapter - PMD data of the device calling the proxy. 408 * - struct ena_mp_body *req - body of a request to prepare. 409 * @param[in] proc 410 * Body of a function to process the response in form of a statement 411 * expression. It is passed all the original function arguments along with two 412 * extra ones: 413 * - struct ena_adapter *adapter - PMD data of the device calling the proxy. 414 * - struct ena_mp_body *rsp - body of a response to process. 415 * @param ... 416 * Proxied function's arguments 417 * 418 * @note Inside prep and proc any parameters which aren't used should be marked 419 * as such (with ENA_TOUCH or __rte_unused). 420 */ 421 #define ENA_PROXY_DESC(f, t, prep, proc, ...) \ 422 static const enum ena_mp_req mp_type_ ## f = t; \ 423 static const char *mp_name_ ## f = #t; \ 424 static void mp_prep_ ## f(struct ena_adapter *adapter, \ 425 struct ena_mp_body *req, \ 426 __VA_ARGS__) \ 427 { \ 428 prep; \ 429 } \ 430 static void mp_proc_ ## f(struct ena_adapter *adapter, \ 431 struct ena_mp_body *rsp, \ 432 __VA_ARGS__) \ 433 { \ 434 proc; \ 435 } 436 437 /** 438 * Proxy wrapper for calling primary functions in a secondary process. 439 * 440 * Depending on whether called in primary or secondary process, calls the 441 * @p func directly or proxies the call to the primary process via rte_mp IPC. 442 * This macro requires a proxy request descriptor to be defined for @p func 443 * using ENA_PROXY_DESC() macro. 444 * 445 * @param[in/out] a 446 * Device PMD data. Used for sending the message and sharing message results 447 * between primary and secondary. 448 * @param[in] f 449 * Function to proxy. 450 * @param ... 451 * Arguments of @p func. 452 * 453 * @return 454 * - 0: Processing succeeded and response handler was called. 455 * - -EPERM: IPC is unavailable on this platform. This means only primary 456 * process may call the proxied function. 457 * - -EIO: IPC returned error on request send. Inspect rte_errno detailed 458 * error code. 459 * - Negative error code from the proxied function. 460 * 461 * @note This mechanism is geared towards control-path tasks. Avoid calling it 462 * in fast-path unless unbound delays are allowed. This is due to the IPC 463 * mechanism itself (socket based). 464 * @note Due to IPC parameter size limitations the proxy logic shares call 465 * results through the struct ena_adapter shared memory. This makes the 466 * proxy mechanism strictly single-threaded. Therefore be sure to make all 467 * calls to the same proxied function under the same lock. 468 */ 469 #define ENA_PROXY(a, f, ...) \ 470 __extension__ ({ \ 471 struct ena_adapter *_a = (a); \ 472 struct timespec ts = { .tv_sec = ENA_MP_REQ_TMO }; \ 473 struct ena_mp_body *req, *rsp; \ 474 struct rte_mp_reply mp_rep; \ 475 struct rte_mp_msg mp_req; \ 476 int ret; \ 477 \ 478 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { \ 479 ret = f(__VA_ARGS__); \ 480 } else { \ 481 /* Prepare and send request */ \ 482 req = (struct ena_mp_body *)&mp_req.param; \ 483 mp_msg_init(&mp_req, mp_type_ ## f, _a->edev_data->port_id); \ 484 mp_prep_ ## f(_a, req, ## __VA_ARGS__); \ 485 \ 486 ret = rte_mp_request_sync(&mp_req, &mp_rep, &ts); \ 487 if (likely(!ret)) { \ 488 RTE_ASSERT(mp_rep.nb_received == 1); \ 489 rsp = (struct ena_mp_body *)&mp_rep.msgs[0].param; \ 490 ret = rsp->result; \ 491 if (ret == 0) { \ 492 mp_proc_##f(_a, rsp, ## __VA_ARGS__); \ 493 } else { \ 494 PMD_DRV_LOG(ERR, \ 495 "%s returned error: %d\n", \ 496 mp_name_ ## f, rsp->result);\ 497 } \ 498 free(mp_rep.msgs); \ 499 } else if (rte_errno == ENOTSUP) { \ 500 PMD_DRV_LOG(ERR, \ 501 "No IPC, can't proxy to primary\n");\ 502 ret = -rte_errno; \ 503 } else { \ 504 PMD_DRV_LOG(ERR, "Request %s failed: %s\n", \ 505 mp_name_ ## f, \ 506 rte_strerror(rte_errno)); \ 507 ret = -EIO; \ 508 } \ 509 } \ 510 ret; \ 511 }) 512 513 /********************************************************************* 514 * Multi-Process communication request descriptors 515 *********************************************************************/ 516 517 ENA_PROXY_DESC(ena_com_get_dev_basic_stats, ENA_MP_DEV_STATS_GET, 518 __extension__ ({ 519 ENA_TOUCH(adapter); 520 ENA_TOUCH(req); 521 ENA_TOUCH(ena_dev); 522 ENA_TOUCH(stats); 523 }), 524 __extension__ ({ 525 ENA_TOUCH(rsp); 526 ENA_TOUCH(ena_dev); 527 if (stats != &adapter->basic_stats) 528 rte_memcpy(stats, &adapter->basic_stats, sizeof(*stats)); 529 }), 530 struct ena_com_dev *ena_dev, struct ena_admin_basic_stats *stats); 531 532 ENA_PROXY_DESC(ena_com_get_eni_stats, ENA_MP_ENI_STATS_GET, 533 __extension__ ({ 534 ENA_TOUCH(adapter); 535 ENA_TOUCH(req); 536 ENA_TOUCH(ena_dev); 537 ENA_TOUCH(stats); 538 }), 539 __extension__ ({ 540 ENA_TOUCH(rsp); 541 ENA_TOUCH(ena_dev); 542 if (stats != (struct ena_admin_eni_stats *)adapter->metrics_stats) 543 rte_memcpy(stats, adapter->metrics_stats, sizeof(*stats)); 544 }), 545 struct ena_com_dev *ena_dev, struct ena_admin_eni_stats *stats); 546 547 ENA_PROXY_DESC(ena_com_set_dev_mtu, ENA_MP_MTU_SET, 548 __extension__ ({ 549 ENA_TOUCH(adapter); 550 ENA_TOUCH(ena_dev); 551 req->args.mtu = mtu; 552 }), 553 __extension__ ({ 554 ENA_TOUCH(adapter); 555 ENA_TOUCH(rsp); 556 ENA_TOUCH(ena_dev); 557 ENA_TOUCH(mtu); 558 }), 559 struct ena_com_dev *ena_dev, int mtu); 560 561 ENA_PROXY_DESC(ena_com_indirect_table_set, ENA_MP_IND_TBL_SET, 562 __extension__ ({ 563 ENA_TOUCH(adapter); 564 ENA_TOUCH(req); 565 ENA_TOUCH(ena_dev); 566 }), 567 __extension__ ({ 568 ENA_TOUCH(adapter); 569 ENA_TOUCH(rsp); 570 ENA_TOUCH(ena_dev); 571 }), 572 struct ena_com_dev *ena_dev); 573 574 ENA_PROXY_DESC(ena_com_indirect_table_get, ENA_MP_IND_TBL_GET, 575 __extension__ ({ 576 ENA_TOUCH(adapter); 577 ENA_TOUCH(req); 578 ENA_TOUCH(ena_dev); 579 ENA_TOUCH(ind_tbl); 580 }), 581 __extension__ ({ 582 ENA_TOUCH(rsp); 583 ENA_TOUCH(ena_dev); 584 if (ind_tbl != adapter->indirect_table) 585 rte_memcpy(ind_tbl, adapter->indirect_table, 586 sizeof(adapter->indirect_table)); 587 }), 588 struct ena_com_dev *ena_dev, u32 *ind_tbl); 589 590 ENA_PROXY_DESC(ena_com_get_customer_metrics, ENA_MP_CUSTOMER_METRICS_GET, 591 __extension__ ({ 592 ENA_TOUCH(adapter); 593 ENA_TOUCH(req); 594 ENA_TOUCH(ena_dev); 595 ENA_TOUCH(buf); 596 ENA_TOUCH(buf_size); 597 }), 598 __extension__ ({ 599 ENA_TOUCH(rsp); 600 ENA_TOUCH(ena_dev); 601 if (buf != (char *)adapter->metrics_stats) 602 rte_memcpy(buf, adapter->metrics_stats, buf_size); 603 }), 604 struct ena_com_dev *ena_dev, char *buf, size_t buf_size); 605 606 ENA_PROXY_DESC(ena_com_get_ena_srd_info, ENA_MP_SRD_STATS_GET, 607 __extension__ ({ 608 ENA_TOUCH(adapter); 609 ENA_TOUCH(req); 610 ENA_TOUCH(ena_dev); 611 ENA_TOUCH(info); 612 }), 613 __extension__ ({ 614 ENA_TOUCH(rsp); 615 ENA_TOUCH(ena_dev); 616 if ((struct ena_stats_srd *)info != &adapter->srd_stats) 617 rte_memcpy((struct ena_stats_srd *)info, 618 &adapter->srd_stats, 619 sizeof(struct ena_stats_srd)); 620 }), 621 struct ena_com_dev *ena_dev, struct ena_admin_ena_srd_info *info); 622 623 static inline void ena_trigger_reset(struct ena_adapter *adapter, 624 enum ena_regs_reset_reason_types reason) 625 { 626 if (likely(!adapter->trigger_reset)) { 627 adapter->reset_reason = reason; 628 adapter->trigger_reset = true; 629 } 630 } 631 632 static inline void ena_rx_mbuf_prepare(struct ena_ring *rx_ring, 633 struct rte_mbuf *mbuf, 634 struct ena_com_rx_ctx *ena_rx_ctx, 635 bool fill_hash) 636 { 637 struct ena_stats_rx *rx_stats = &rx_ring->rx_stats; 638 uint64_t ol_flags = 0; 639 uint32_t packet_type = 0; 640 641 if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) 642 packet_type |= RTE_PTYPE_L4_TCP; 643 else if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP) 644 packet_type |= RTE_PTYPE_L4_UDP; 645 646 if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) { 647 packet_type |= RTE_PTYPE_L3_IPV4; 648 if (unlikely(ena_rx_ctx->l3_csum_err)) { 649 ++rx_stats->l3_csum_bad; 650 ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; 651 } else { 652 ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; 653 } 654 } else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6) { 655 packet_type |= RTE_PTYPE_L3_IPV6; 656 } 657 658 if (!ena_rx_ctx->l4_csum_checked || ena_rx_ctx->frag) { 659 ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN; 660 } else { 661 if (unlikely(ena_rx_ctx->l4_csum_err)) { 662 ++rx_stats->l4_csum_bad; 663 /* 664 * For the L4 Rx checksum offload the HW may indicate 665 * bad checksum although it's valid. Because of that, 666 * we're setting the UNKNOWN flag to let the app 667 * re-verify the checksum. 668 */ 669 ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN; 670 } else { 671 ++rx_stats->l4_csum_good; 672 ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; 673 } 674 } 675 676 if (fill_hash && 677 likely((packet_type & ENA_PTYPE_HAS_HASH) && !ena_rx_ctx->frag)) { 678 ol_flags |= RTE_MBUF_F_RX_RSS_HASH; 679 mbuf->hash.rss = ena_rx_ctx->hash; 680 } 681 682 mbuf->ol_flags = ol_flags; 683 mbuf->packet_type = packet_type; 684 } 685 686 static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, 687 struct ena_com_tx_ctx *ena_tx_ctx, 688 uint64_t queue_offloads, 689 bool disable_meta_caching) 690 { 691 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; 692 693 if ((mbuf->ol_flags & MBUF_OFFLOADS) && 694 (queue_offloads & QUEUE_OFFLOADS)) { 695 /* check if TSO is required */ 696 if ((mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG) && 697 (queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO)) { 698 ena_tx_ctx->tso_enable = true; 699 700 ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf); 701 } 702 703 /* check if L3 checksum is needed */ 704 if ((mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) && 705 (queue_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)) 706 ena_tx_ctx->l3_csum_enable = true; 707 708 if (mbuf->ol_flags & RTE_MBUF_F_TX_IPV6) { 709 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6; 710 /* For the IPv6 packets, DF always needs to be true. */ 711 ena_tx_ctx->df = 1; 712 } else { 713 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4; 714 715 /* set don't fragment (DF) flag */ 716 if (mbuf->packet_type & 717 (RTE_PTYPE_L4_NONFRAG 718 | RTE_PTYPE_INNER_L4_NONFRAG)) 719 ena_tx_ctx->df = 1; 720 } 721 722 /* check if L4 checksum is needed */ 723 if (((mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_TCP_CKSUM) && 724 (queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) { 725 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP; 726 ena_tx_ctx->l4_csum_enable = true; 727 } else if (((mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == 728 RTE_MBUF_F_TX_UDP_CKSUM) && 729 (queue_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) { 730 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP; 731 ena_tx_ctx->l4_csum_enable = true; 732 } else { 733 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN; 734 ena_tx_ctx->l4_csum_enable = false; 735 } 736 737 ena_meta->mss = mbuf->tso_segsz; 738 ena_meta->l3_hdr_len = mbuf->l3_len; 739 ena_meta->l3_hdr_offset = mbuf->l2_len; 740 741 ena_tx_ctx->meta_valid = true; 742 } else if (disable_meta_caching) { 743 memset(ena_meta, 0, sizeof(*ena_meta)); 744 ena_tx_ctx->meta_valid = true; 745 } else { 746 ena_tx_ctx->meta_valid = false; 747 } 748 } 749 750 static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) 751 { 752 struct ena_tx_buffer *tx_info = NULL; 753 754 if (likely(req_id < tx_ring->ring_size)) { 755 tx_info = &tx_ring->tx_buffer_info[req_id]; 756 if (likely(tx_info->mbuf)) 757 return 0; 758 } 759 760 if (tx_info) 761 PMD_TX_LOG(ERR, "tx_info doesn't have valid mbuf. queue %d:%d req_id %u\n", 762 tx_ring->port_id, tx_ring->id, req_id); 763 else 764 PMD_TX_LOG(ERR, "Invalid req_id: %hu in queue %d:%d\n", 765 req_id, tx_ring->port_id, tx_ring->id); 766 767 /* Trigger device reset */ 768 ++tx_ring->tx_stats.bad_req_id; 769 ena_trigger_reset(tx_ring->adapter, ENA_REGS_RESET_INV_TX_REQ_ID); 770 return -EFAULT; 771 } 772 773 static void ena_config_host_info(struct ena_com_dev *ena_dev) 774 { 775 struct ena_admin_host_info *host_info; 776 int rc; 777 778 /* Allocate only the host info */ 779 rc = ena_com_allocate_host_info(ena_dev); 780 if (rc) { 781 PMD_DRV_LOG(ERR, "Cannot allocate host info\n"); 782 return; 783 } 784 785 host_info = ena_dev->host_attr.host_info; 786 787 host_info->os_type = ENA_ADMIN_OS_DPDK; 788 host_info->kernel_ver = RTE_VERSION; 789 strlcpy((char *)host_info->kernel_ver_str, rte_version(), 790 sizeof(host_info->kernel_ver_str)); 791 host_info->os_dist = RTE_VERSION; 792 strlcpy((char *)host_info->os_dist_str, rte_version(), 793 sizeof(host_info->os_dist_str)); 794 host_info->driver_version = 795 (DRV_MODULE_VER_MAJOR) | 796 (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | 797 (DRV_MODULE_VER_SUBMINOR << 798 ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); 799 host_info->num_cpus = rte_lcore_count(); 800 801 host_info->driver_supported_features = 802 ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK | 803 ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK; 804 805 rc = ena_com_set_host_attributes(ena_dev); 806 if (rc) { 807 if (rc == -ENA_COM_UNSUPPORTED) 808 PMD_DRV_LOG(WARNING, "Cannot set host attributes\n"); 809 else 810 PMD_DRV_LOG(ERR, "Cannot set host attributes\n"); 811 812 goto err; 813 } 814 815 return; 816 817 err: 818 ena_com_delete_host_info(ena_dev); 819 } 820 821 /* This function calculates the number of xstats based on the current config */ 822 static unsigned int ena_xstats_calc_num(struct rte_eth_dev_data *data) 823 { 824 struct ena_adapter *adapter = data->dev_private; 825 826 return ENA_STATS_ARRAY_GLOBAL + 827 adapter->metrics_num + 828 ENA_STATS_ARRAY_ENA_SRD + 829 (data->nb_tx_queues * ENA_STATS_ARRAY_TX) + 830 (data->nb_rx_queues * ENA_STATS_ARRAY_RX); 831 } 832 833 static void ena_config_debug_area(struct ena_adapter *adapter) 834 { 835 u32 debug_area_size; 836 int rc, ss_count; 837 838 ss_count = ena_xstats_calc_num(adapter->edev_data); 839 840 /* allocate 32 bytes for each string and 64bit for the value */ 841 debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count; 842 843 rc = ena_com_allocate_debug_area(&adapter->ena_dev, debug_area_size); 844 if (rc) { 845 PMD_DRV_LOG(ERR, "Cannot allocate debug area\n"); 846 return; 847 } 848 849 rc = ena_com_set_host_attributes(&adapter->ena_dev); 850 if (rc) { 851 if (rc == -ENA_COM_UNSUPPORTED) 852 PMD_DRV_LOG(WARNING, "Cannot set host attributes\n"); 853 else 854 PMD_DRV_LOG(ERR, "Cannot set host attributes\n"); 855 856 goto err; 857 } 858 859 return; 860 err: 861 ena_com_delete_debug_area(&adapter->ena_dev); 862 } 863 864 static int ena_close(struct rte_eth_dev *dev) 865 { 866 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 867 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 868 struct ena_adapter *adapter = dev->data->dev_private; 869 int ret = 0; 870 871 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 872 return 0; 873 874 if (adapter->state == ENA_ADAPTER_STATE_RUNNING) 875 ret = ena_stop(dev); 876 adapter->state = ENA_ADAPTER_STATE_CLOSED; 877 878 ena_rx_queue_release_all(dev); 879 ena_tx_queue_release_all(dev); 880 881 rte_free(adapter->drv_stats); 882 adapter->drv_stats = NULL; 883 884 rte_intr_disable(intr_handle); 885 rte_intr_callback_unregister(intr_handle, 886 ena_interrupt_handler_rte, 887 dev); 888 889 /* 890 * MAC is not allocated dynamically. Setting NULL should prevent from 891 * release of the resource in the rte_eth_dev_release_port(). 892 */ 893 dev->data->mac_addrs = NULL; 894 895 return ret; 896 } 897 898 static int 899 ena_dev_reset(struct rte_eth_dev *dev) 900 { 901 int rc = 0; 902 903 /* Cannot release memory in secondary process */ 904 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 905 PMD_DRV_LOG(WARNING, "dev_reset not supported in secondary.\n"); 906 return -EPERM; 907 } 908 909 ena_destroy_device(dev); 910 rc = eth_ena_dev_init(dev); 911 if (rc) 912 PMD_INIT_LOG(CRIT, "Cannot initialize device\n"); 913 914 return rc; 915 } 916 917 static void ena_rx_queue_release_all(struct rte_eth_dev *dev) 918 { 919 int nb_queues = dev->data->nb_rx_queues; 920 int i; 921 922 for (i = 0; i < nb_queues; i++) 923 ena_rx_queue_release(dev, i); 924 } 925 926 static void ena_tx_queue_release_all(struct rte_eth_dev *dev) 927 { 928 int nb_queues = dev->data->nb_tx_queues; 929 int i; 930 931 for (i = 0; i < nb_queues; i++) 932 ena_tx_queue_release(dev, i); 933 } 934 935 static void ena_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 936 { 937 struct ena_ring *ring = dev->data->rx_queues[qid]; 938 939 /* Free ring resources */ 940 rte_free(ring->rx_buffer_info); 941 ring->rx_buffer_info = NULL; 942 943 rte_free(ring->rx_refill_buffer); 944 ring->rx_refill_buffer = NULL; 945 946 rte_free(ring->empty_rx_reqs); 947 ring->empty_rx_reqs = NULL; 948 949 ring->configured = 0; 950 951 PMD_DRV_LOG(NOTICE, "Rx queue %d:%d released\n", 952 ring->port_id, ring->id); 953 } 954 955 static void ena_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 956 { 957 struct ena_ring *ring = dev->data->tx_queues[qid]; 958 959 /* Free ring resources */ 960 rte_free(ring->push_buf_intermediate_buf); 961 962 rte_free(ring->tx_buffer_info); 963 964 rte_free(ring->empty_tx_reqs); 965 966 ring->empty_tx_reqs = NULL; 967 ring->tx_buffer_info = NULL; 968 ring->push_buf_intermediate_buf = NULL; 969 970 ring->configured = 0; 971 972 PMD_DRV_LOG(NOTICE, "Tx queue %d:%d released\n", 973 ring->port_id, ring->id); 974 } 975 976 static void ena_rx_queue_release_bufs(struct ena_ring *ring) 977 { 978 unsigned int i; 979 980 for (i = 0; i < ring->ring_size; ++i) { 981 struct ena_rx_buffer *rx_info = &ring->rx_buffer_info[i]; 982 if (rx_info->mbuf) { 983 rte_mbuf_raw_free(rx_info->mbuf); 984 rx_info->mbuf = NULL; 985 } 986 } 987 } 988 989 static void ena_tx_queue_release_bufs(struct ena_ring *ring) 990 { 991 unsigned int i; 992 993 for (i = 0; i < ring->ring_size; ++i) { 994 struct ena_tx_buffer *tx_buf = &ring->tx_buffer_info[i]; 995 996 if (tx_buf->mbuf) { 997 rte_pktmbuf_free(tx_buf->mbuf); 998 tx_buf->mbuf = NULL; 999 } 1000 } 1001 } 1002 1003 static int ena_link_update(struct rte_eth_dev *dev, 1004 __rte_unused int wait_to_complete) 1005 { 1006 struct rte_eth_link *link = &dev->data->dev_link; 1007 struct ena_adapter *adapter = dev->data->dev_private; 1008 1009 link->link_status = adapter->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN; 1010 link->link_speed = RTE_ETH_SPEED_NUM_NONE; 1011 link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 1012 1013 return 0; 1014 } 1015 1016 static int ena_queue_start_all(struct rte_eth_dev *dev, 1017 enum ena_ring_type ring_type) 1018 { 1019 struct ena_adapter *adapter = dev->data->dev_private; 1020 struct ena_ring *queues = NULL; 1021 int nb_queues; 1022 int i = 0; 1023 int rc = 0; 1024 1025 if (ring_type == ENA_RING_TYPE_RX) { 1026 queues = adapter->rx_ring; 1027 nb_queues = dev->data->nb_rx_queues; 1028 } else { 1029 queues = adapter->tx_ring; 1030 nb_queues = dev->data->nb_tx_queues; 1031 } 1032 for (i = 0; i < nb_queues; i++) { 1033 if (queues[i].configured) { 1034 if (ring_type == ENA_RING_TYPE_RX) { 1035 ena_assert_msg( 1036 dev->data->rx_queues[i] == &queues[i], 1037 "Inconsistent state of Rx queues\n"); 1038 } else { 1039 ena_assert_msg( 1040 dev->data->tx_queues[i] == &queues[i], 1041 "Inconsistent state of Tx queues\n"); 1042 } 1043 1044 rc = ena_queue_start(dev, &queues[i]); 1045 1046 if (rc) { 1047 PMD_INIT_LOG(ERR, 1048 "Failed to start queue[%d] of type(%d)\n", 1049 i, ring_type); 1050 goto err; 1051 } 1052 } 1053 } 1054 1055 return 0; 1056 1057 err: 1058 while (i--) 1059 if (queues[i].configured) 1060 ena_queue_stop(&queues[i]); 1061 1062 return rc; 1063 } 1064 1065 static int 1066 ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx, 1067 bool use_large_llq_hdr) 1068 { 1069 struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq; 1070 struct ena_com_dev *ena_dev = ctx->ena_dev; 1071 uint32_t max_tx_queue_size; 1072 uint32_t max_rx_queue_size; 1073 1074 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 1075 struct ena_admin_queue_ext_feature_fields *max_queue_ext = 1076 &ctx->get_feat_ctx->max_queue_ext.max_queue_ext; 1077 max_rx_queue_size = RTE_MIN(max_queue_ext->max_rx_cq_depth, 1078 max_queue_ext->max_rx_sq_depth); 1079 max_tx_queue_size = max_queue_ext->max_tx_cq_depth; 1080 1081 if (ena_dev->tx_mem_queue_type == 1082 ENA_ADMIN_PLACEMENT_POLICY_DEV) { 1083 max_tx_queue_size = RTE_MIN(max_tx_queue_size, 1084 llq->max_llq_depth); 1085 } else { 1086 max_tx_queue_size = RTE_MIN(max_tx_queue_size, 1087 max_queue_ext->max_tx_sq_depth); 1088 } 1089 1090 ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 1091 max_queue_ext->max_per_packet_rx_descs); 1092 ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 1093 max_queue_ext->max_per_packet_tx_descs); 1094 } else { 1095 struct ena_admin_queue_feature_desc *max_queues = 1096 &ctx->get_feat_ctx->max_queues; 1097 max_rx_queue_size = RTE_MIN(max_queues->max_cq_depth, 1098 max_queues->max_sq_depth); 1099 max_tx_queue_size = max_queues->max_cq_depth; 1100 1101 if (ena_dev->tx_mem_queue_type == 1102 ENA_ADMIN_PLACEMENT_POLICY_DEV) { 1103 max_tx_queue_size = RTE_MIN(max_tx_queue_size, 1104 llq->max_llq_depth); 1105 } else { 1106 max_tx_queue_size = RTE_MIN(max_tx_queue_size, 1107 max_queues->max_sq_depth); 1108 } 1109 1110 ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 1111 max_queues->max_packet_rx_descs); 1112 ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 1113 max_queues->max_packet_tx_descs); 1114 } 1115 1116 /* Round down to the nearest power of 2 */ 1117 max_rx_queue_size = rte_align32prevpow2(max_rx_queue_size); 1118 max_tx_queue_size = rte_align32prevpow2(max_tx_queue_size); 1119 1120 if (use_large_llq_hdr) { 1121 if ((llq->entry_size_ctrl_supported & 1122 ENA_ADMIN_LIST_ENTRY_SIZE_256B) && 1123 (ena_dev->tx_mem_queue_type == 1124 ENA_ADMIN_PLACEMENT_POLICY_DEV)) { 1125 max_tx_queue_size /= 2; 1126 PMD_INIT_LOG(INFO, 1127 "Forcing large headers and decreasing maximum Tx queue size to %d\n", 1128 max_tx_queue_size); 1129 } else { 1130 PMD_INIT_LOG(ERR, 1131 "Forcing large headers failed: LLQ is disabled or device does not support large headers\n"); 1132 } 1133 } 1134 1135 if (unlikely(max_rx_queue_size == 0 || max_tx_queue_size == 0)) { 1136 PMD_INIT_LOG(ERR, "Invalid queue size\n"); 1137 return -EFAULT; 1138 } 1139 1140 ctx->max_tx_queue_size = max_tx_queue_size; 1141 ctx->max_rx_queue_size = max_rx_queue_size; 1142 1143 PMD_DRV_LOG(INFO, "tx queue size %u\n", max_tx_queue_size); 1144 return 0; 1145 } 1146 1147 static void ena_stats_restart(struct rte_eth_dev *dev) 1148 { 1149 struct ena_adapter *adapter = dev->data->dev_private; 1150 1151 rte_atomic64_init(&adapter->drv_stats->ierrors); 1152 rte_atomic64_init(&adapter->drv_stats->oerrors); 1153 rte_atomic64_init(&adapter->drv_stats->rx_nombuf); 1154 adapter->drv_stats->rx_drops = 0; 1155 } 1156 1157 static int ena_stats_get(struct rte_eth_dev *dev, 1158 struct rte_eth_stats *stats) 1159 { 1160 struct ena_admin_basic_stats ena_stats; 1161 struct ena_adapter *adapter = dev->data->dev_private; 1162 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1163 int rc; 1164 int i; 1165 int max_rings_stats; 1166 1167 memset(&ena_stats, 0, sizeof(ena_stats)); 1168 1169 rte_spinlock_lock(&adapter->admin_lock); 1170 rc = ENA_PROXY(adapter, ena_com_get_dev_basic_stats, ena_dev, 1171 &ena_stats); 1172 rte_spinlock_unlock(&adapter->admin_lock); 1173 if (unlikely(rc)) { 1174 PMD_DRV_LOG(ERR, "Could not retrieve statistics from ENA\n"); 1175 return rc; 1176 } 1177 1178 /* Set of basic statistics from ENA */ 1179 stats->ipackets = __MERGE_64B_H_L(ena_stats.rx_pkts_high, 1180 ena_stats.rx_pkts_low); 1181 stats->opackets = __MERGE_64B_H_L(ena_stats.tx_pkts_high, 1182 ena_stats.tx_pkts_low); 1183 stats->ibytes = __MERGE_64B_H_L(ena_stats.rx_bytes_high, 1184 ena_stats.rx_bytes_low); 1185 stats->obytes = __MERGE_64B_H_L(ena_stats.tx_bytes_high, 1186 ena_stats.tx_bytes_low); 1187 1188 /* Driver related stats */ 1189 stats->imissed = adapter->drv_stats->rx_drops; 1190 stats->ierrors = rte_atomic64_read(&adapter->drv_stats->ierrors); 1191 stats->oerrors = rte_atomic64_read(&adapter->drv_stats->oerrors); 1192 stats->rx_nombuf = rte_atomic64_read(&adapter->drv_stats->rx_nombuf); 1193 1194 max_rings_stats = RTE_MIN(dev->data->nb_rx_queues, 1195 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1196 for (i = 0; i < max_rings_stats; ++i) { 1197 struct ena_stats_rx *rx_stats = &adapter->rx_ring[i].rx_stats; 1198 1199 stats->q_ibytes[i] = rx_stats->bytes; 1200 stats->q_ipackets[i] = rx_stats->cnt; 1201 stats->q_errors[i] = rx_stats->bad_desc_num + 1202 rx_stats->bad_req_id; 1203 } 1204 1205 max_rings_stats = RTE_MIN(dev->data->nb_tx_queues, 1206 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1207 for (i = 0; i < max_rings_stats; ++i) { 1208 struct ena_stats_tx *tx_stats = &adapter->tx_ring[i].tx_stats; 1209 1210 stats->q_obytes[i] = tx_stats->bytes; 1211 stats->q_opackets[i] = tx_stats->cnt; 1212 } 1213 1214 return 0; 1215 } 1216 1217 static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 1218 { 1219 struct ena_adapter *adapter; 1220 struct ena_com_dev *ena_dev; 1221 int rc = 0; 1222 1223 ena_assert_msg(dev->data != NULL, "Uninitialized device\n"); 1224 ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n"); 1225 adapter = dev->data->dev_private; 1226 1227 ena_dev = &adapter->ena_dev; 1228 ena_assert_msg(ena_dev != NULL, "Uninitialized device\n"); 1229 1230 rc = ENA_PROXY(adapter, ena_com_set_dev_mtu, ena_dev, mtu); 1231 if (rc) 1232 PMD_DRV_LOG(ERR, "Could not set MTU: %d\n", mtu); 1233 else 1234 PMD_DRV_LOG(NOTICE, "MTU set to: %d\n", mtu); 1235 1236 return rc; 1237 } 1238 1239 static int ena_start(struct rte_eth_dev *dev) 1240 { 1241 struct ena_adapter *adapter = dev->data->dev_private; 1242 uint64_t ticks; 1243 int rc = 0; 1244 uint16_t i; 1245 1246 /* Cannot allocate memory in secondary process */ 1247 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 1248 PMD_DRV_LOG(WARNING, "dev_start not supported in secondary.\n"); 1249 return -EPERM; 1250 } 1251 1252 rc = ena_setup_rx_intr(dev); 1253 if (rc) 1254 return rc; 1255 1256 rc = ena_queue_start_all(dev, ENA_RING_TYPE_RX); 1257 if (rc) 1258 return rc; 1259 1260 rc = ena_queue_start_all(dev, ENA_RING_TYPE_TX); 1261 if (rc) 1262 goto err_start_tx; 1263 1264 if (adapter->edev_data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) { 1265 rc = ena_rss_configure(adapter); 1266 if (rc) 1267 goto err_rss_init; 1268 } 1269 1270 ena_stats_restart(dev); 1271 1272 adapter->timestamp_wd = rte_get_timer_cycles(); 1273 adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT; 1274 1275 ticks = rte_get_timer_hz(); 1276 rte_timer_reset(&adapter->timer_wd, ticks, PERIODICAL, rte_lcore_id(), 1277 ena_timer_wd_callback, dev); 1278 1279 ++adapter->dev_stats.dev_start; 1280 adapter->state = ENA_ADAPTER_STATE_RUNNING; 1281 1282 for (i = 0; i < dev->data->nb_rx_queues; i++) 1283 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; 1284 for (i = 0; i < dev->data->nb_tx_queues; i++) 1285 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; 1286 1287 return 0; 1288 1289 err_rss_init: 1290 ena_queue_stop_all(dev, ENA_RING_TYPE_TX); 1291 err_start_tx: 1292 ena_queue_stop_all(dev, ENA_RING_TYPE_RX); 1293 return rc; 1294 } 1295 1296 static int ena_stop(struct rte_eth_dev *dev) 1297 { 1298 struct ena_adapter *adapter = dev->data->dev_private; 1299 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1300 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1301 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 1302 uint16_t i; 1303 int rc; 1304 1305 /* Cannot free memory in secondary process */ 1306 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 1307 PMD_DRV_LOG(WARNING, "dev_stop not supported in secondary.\n"); 1308 return -EPERM; 1309 } 1310 1311 rte_timer_stop_sync(&adapter->timer_wd); 1312 ena_queue_stop_all(dev, ENA_RING_TYPE_TX); 1313 ena_queue_stop_all(dev, ENA_RING_TYPE_RX); 1314 1315 if (adapter->trigger_reset) { 1316 rc = ena_com_dev_reset(ena_dev, adapter->reset_reason); 1317 if (rc) 1318 PMD_DRV_LOG(ERR, "Device reset failed, rc: %d\n", rc); 1319 } 1320 1321 rte_intr_disable(intr_handle); 1322 1323 rte_intr_efd_disable(intr_handle); 1324 1325 /* Cleanup vector list */ 1326 rte_intr_vec_list_free(intr_handle); 1327 1328 rte_intr_enable(intr_handle); 1329 1330 ++adapter->dev_stats.dev_stop; 1331 adapter->state = ENA_ADAPTER_STATE_STOPPED; 1332 dev->data->dev_started = 0; 1333 1334 for (i = 0; i < dev->data->nb_rx_queues; i++) 1335 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; 1336 for (i = 0; i < dev->data->nb_tx_queues; i++) 1337 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; 1338 1339 return 0; 1340 } 1341 1342 static int ena_create_io_queue(struct rte_eth_dev *dev, struct ena_ring *ring) 1343 { 1344 struct ena_adapter *adapter = ring->adapter; 1345 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1346 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1347 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 1348 struct ena_com_create_io_ctx ctx = 1349 /* policy set to _HOST just to satisfy icc compiler */ 1350 { ENA_ADMIN_PLACEMENT_POLICY_HOST, 1351 0, 0, 0, 0, 0 }; 1352 uint16_t ena_qid; 1353 unsigned int i; 1354 int rc; 1355 1356 ctx.msix_vector = -1; 1357 if (ring->type == ENA_RING_TYPE_TX) { 1358 ena_qid = ENA_IO_TXQ_IDX(ring->id); 1359 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; 1360 ctx.mem_queue_type = ena_dev->tx_mem_queue_type; 1361 for (i = 0; i < ring->ring_size; i++) 1362 ring->empty_tx_reqs[i] = i; 1363 } else { 1364 ena_qid = ENA_IO_RXQ_IDX(ring->id); 1365 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; 1366 if (rte_intr_dp_is_en(intr_handle)) 1367 ctx.msix_vector = 1368 rte_intr_vec_list_index_get(intr_handle, 1369 ring->id); 1370 1371 for (i = 0; i < ring->ring_size; i++) 1372 ring->empty_rx_reqs[i] = i; 1373 } 1374 ctx.queue_size = ring->ring_size; 1375 ctx.qid = ena_qid; 1376 ctx.numa_node = ring->numa_socket_id; 1377 1378 rc = ena_com_create_io_queue(ena_dev, &ctx); 1379 if (rc) { 1380 PMD_DRV_LOG(ERR, 1381 "Failed to create IO queue[%d] (qid:%d), rc: %d\n", 1382 ring->id, ena_qid, rc); 1383 return rc; 1384 } 1385 1386 rc = ena_com_get_io_handlers(ena_dev, ena_qid, 1387 &ring->ena_com_io_sq, 1388 &ring->ena_com_io_cq); 1389 if (rc) { 1390 PMD_DRV_LOG(ERR, 1391 "Failed to get IO queue[%d] handlers, rc: %d\n", 1392 ring->id, rc); 1393 ena_com_destroy_io_queue(ena_dev, ena_qid); 1394 return rc; 1395 } 1396 1397 if (ring->type == ENA_RING_TYPE_TX) 1398 ena_com_update_numa_node(ring->ena_com_io_cq, ctx.numa_node); 1399 1400 /* Start with Rx interrupts being masked. */ 1401 if (ring->type == ENA_RING_TYPE_RX && rte_intr_dp_is_en(intr_handle)) 1402 ena_rx_queue_intr_disable(dev, ring->id); 1403 1404 return 0; 1405 } 1406 1407 static void ena_queue_stop(struct ena_ring *ring) 1408 { 1409 struct ena_com_dev *ena_dev = &ring->adapter->ena_dev; 1410 1411 if (ring->type == ENA_RING_TYPE_RX) { 1412 ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(ring->id)); 1413 ena_rx_queue_release_bufs(ring); 1414 } else { 1415 ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(ring->id)); 1416 ena_tx_queue_release_bufs(ring); 1417 } 1418 } 1419 1420 static void ena_queue_stop_all(struct rte_eth_dev *dev, 1421 enum ena_ring_type ring_type) 1422 { 1423 struct ena_adapter *adapter = dev->data->dev_private; 1424 struct ena_ring *queues = NULL; 1425 uint16_t nb_queues, i; 1426 1427 if (ring_type == ENA_RING_TYPE_RX) { 1428 queues = adapter->rx_ring; 1429 nb_queues = dev->data->nb_rx_queues; 1430 } else { 1431 queues = adapter->tx_ring; 1432 nb_queues = dev->data->nb_tx_queues; 1433 } 1434 1435 for (i = 0; i < nb_queues; ++i) 1436 if (queues[i].configured) 1437 ena_queue_stop(&queues[i]); 1438 } 1439 1440 static int ena_queue_start(struct rte_eth_dev *dev, struct ena_ring *ring) 1441 { 1442 int rc, bufs_num; 1443 1444 ena_assert_msg(ring->configured == 1, 1445 "Trying to start unconfigured queue\n"); 1446 1447 rc = ena_create_io_queue(dev, ring); 1448 if (rc) { 1449 PMD_INIT_LOG(ERR, "Failed to create IO queue\n"); 1450 return rc; 1451 } 1452 1453 ring->next_to_clean = 0; 1454 ring->next_to_use = 0; 1455 1456 if (ring->type == ENA_RING_TYPE_TX) { 1457 ring->tx_stats.available_desc = 1458 ena_com_free_q_entries(ring->ena_com_io_sq); 1459 return 0; 1460 } 1461 1462 bufs_num = ring->ring_size - 1; 1463 rc = ena_populate_rx_queue(ring, bufs_num); 1464 if (rc != bufs_num) { 1465 ena_com_destroy_io_queue(&ring->adapter->ena_dev, 1466 ENA_IO_RXQ_IDX(ring->id)); 1467 PMD_INIT_LOG(ERR, "Failed to populate Rx ring\n"); 1468 return ENA_COM_FAULT; 1469 } 1470 /* Flush per-core RX buffers pools cache as they can be used on other 1471 * cores as well. 1472 */ 1473 rte_mempool_cache_flush(NULL, ring->mb_pool); 1474 1475 return 0; 1476 } 1477 1478 static int ena_tx_queue_setup(struct rte_eth_dev *dev, 1479 uint16_t queue_idx, 1480 uint16_t nb_desc, 1481 unsigned int socket_id, 1482 const struct rte_eth_txconf *tx_conf) 1483 { 1484 struct ena_ring *txq = NULL; 1485 struct ena_adapter *adapter = dev->data->dev_private; 1486 unsigned int i; 1487 uint16_t dyn_thresh; 1488 1489 txq = &adapter->tx_ring[queue_idx]; 1490 1491 if (txq->configured) { 1492 PMD_DRV_LOG(CRIT, 1493 "API violation. Queue[%d] is already configured\n", 1494 queue_idx); 1495 return ENA_COM_FAULT; 1496 } 1497 1498 if (!rte_is_power_of_2(nb_desc)) { 1499 PMD_DRV_LOG(ERR, 1500 "Unsupported size of Tx queue: %d is not a power of 2.\n", 1501 nb_desc); 1502 return -EINVAL; 1503 } 1504 1505 if (nb_desc > adapter->max_tx_ring_size) { 1506 PMD_DRV_LOG(ERR, 1507 "Unsupported size of Tx queue (max size: %d)\n", 1508 adapter->max_tx_ring_size); 1509 return -EINVAL; 1510 } 1511 1512 txq->port_id = dev->data->port_id; 1513 txq->next_to_clean = 0; 1514 txq->next_to_use = 0; 1515 txq->ring_size = nb_desc; 1516 txq->size_mask = nb_desc - 1; 1517 txq->numa_socket_id = socket_id; 1518 txq->pkts_without_db = false; 1519 txq->last_cleanup_ticks = 0; 1520 1521 txq->tx_buffer_info = rte_zmalloc_socket("txq->tx_buffer_info", 1522 sizeof(struct ena_tx_buffer) * txq->ring_size, 1523 RTE_CACHE_LINE_SIZE, 1524 socket_id); 1525 if (!txq->tx_buffer_info) { 1526 PMD_DRV_LOG(ERR, 1527 "Failed to allocate memory for Tx buffer info\n"); 1528 return -ENOMEM; 1529 } 1530 1531 txq->empty_tx_reqs = rte_zmalloc_socket("txq->empty_tx_reqs", 1532 sizeof(uint16_t) * txq->ring_size, 1533 RTE_CACHE_LINE_SIZE, 1534 socket_id); 1535 if (!txq->empty_tx_reqs) { 1536 PMD_DRV_LOG(ERR, 1537 "Failed to allocate memory for empty Tx requests\n"); 1538 rte_free(txq->tx_buffer_info); 1539 return -ENOMEM; 1540 } 1541 1542 txq->push_buf_intermediate_buf = 1543 rte_zmalloc_socket("txq->push_buf_intermediate_buf", 1544 txq->tx_max_header_size, 1545 RTE_CACHE_LINE_SIZE, 1546 socket_id); 1547 if (!txq->push_buf_intermediate_buf) { 1548 PMD_DRV_LOG(ERR, "Failed to alloc push buffer for LLQ\n"); 1549 rte_free(txq->tx_buffer_info); 1550 rte_free(txq->empty_tx_reqs); 1551 return -ENOMEM; 1552 } 1553 1554 for (i = 0; i < txq->ring_size; i++) 1555 txq->empty_tx_reqs[i] = i; 1556 1557 txq->offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; 1558 1559 /* Check if caller provided the Tx cleanup threshold value. */ 1560 if (tx_conf->tx_free_thresh != 0) { 1561 txq->tx_free_thresh = tx_conf->tx_free_thresh; 1562 } else { 1563 dyn_thresh = txq->ring_size - 1564 txq->ring_size / ENA_REFILL_THRESH_DIVIDER; 1565 txq->tx_free_thresh = RTE_MAX(dyn_thresh, 1566 txq->ring_size - ENA_REFILL_THRESH_PACKET); 1567 } 1568 1569 txq->missing_tx_completion_threshold = 1570 RTE_MIN(txq->ring_size / 2, ENA_DEFAULT_MISSING_COMP); 1571 1572 /* Store pointer to this queue in upper layer */ 1573 txq->configured = 1; 1574 dev->data->tx_queues[queue_idx] = txq; 1575 1576 return 0; 1577 } 1578 1579 static int ena_rx_queue_setup(struct rte_eth_dev *dev, 1580 uint16_t queue_idx, 1581 uint16_t nb_desc, 1582 unsigned int socket_id, 1583 const struct rte_eth_rxconf *rx_conf, 1584 struct rte_mempool *mp) 1585 { 1586 struct ena_adapter *adapter = dev->data->dev_private; 1587 struct ena_ring *rxq = NULL; 1588 size_t buffer_size; 1589 int i; 1590 uint16_t dyn_thresh; 1591 1592 rxq = &adapter->rx_ring[queue_idx]; 1593 if (rxq->configured) { 1594 PMD_DRV_LOG(CRIT, 1595 "API violation. Queue[%d] is already configured\n", 1596 queue_idx); 1597 return ENA_COM_FAULT; 1598 } 1599 1600 if (!rte_is_power_of_2(nb_desc)) { 1601 PMD_DRV_LOG(ERR, 1602 "Unsupported size of Rx queue: %d is not a power of 2.\n", 1603 nb_desc); 1604 return -EINVAL; 1605 } 1606 1607 if (nb_desc > adapter->max_rx_ring_size) { 1608 PMD_DRV_LOG(ERR, 1609 "Unsupported size of Rx queue (max size: %d)\n", 1610 adapter->max_rx_ring_size); 1611 return -EINVAL; 1612 } 1613 1614 /* ENA isn't supporting buffers smaller than 1400 bytes */ 1615 buffer_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM; 1616 if (buffer_size < ENA_RX_BUF_MIN_SIZE) { 1617 PMD_DRV_LOG(ERR, 1618 "Unsupported size of Rx buffer: %zu (min size: %d)\n", 1619 buffer_size, ENA_RX_BUF_MIN_SIZE); 1620 return -EINVAL; 1621 } 1622 1623 rxq->port_id = dev->data->port_id; 1624 rxq->next_to_clean = 0; 1625 rxq->next_to_use = 0; 1626 rxq->ring_size = nb_desc; 1627 rxq->size_mask = nb_desc - 1; 1628 rxq->numa_socket_id = socket_id; 1629 rxq->mb_pool = mp; 1630 1631 rxq->rx_buffer_info = rte_zmalloc_socket("rxq->buffer_info", 1632 sizeof(struct ena_rx_buffer) * nb_desc, 1633 RTE_CACHE_LINE_SIZE, 1634 socket_id); 1635 if (!rxq->rx_buffer_info) { 1636 PMD_DRV_LOG(ERR, 1637 "Failed to allocate memory for Rx buffer info\n"); 1638 return -ENOMEM; 1639 } 1640 1641 rxq->rx_refill_buffer = rte_zmalloc_socket("rxq->rx_refill_buffer", 1642 sizeof(struct rte_mbuf *) * nb_desc, 1643 RTE_CACHE_LINE_SIZE, 1644 socket_id); 1645 if (!rxq->rx_refill_buffer) { 1646 PMD_DRV_LOG(ERR, 1647 "Failed to allocate memory for Rx refill buffer\n"); 1648 rte_free(rxq->rx_buffer_info); 1649 rxq->rx_buffer_info = NULL; 1650 return -ENOMEM; 1651 } 1652 1653 rxq->empty_rx_reqs = rte_zmalloc_socket("rxq->empty_rx_reqs", 1654 sizeof(uint16_t) * nb_desc, 1655 RTE_CACHE_LINE_SIZE, 1656 socket_id); 1657 if (!rxq->empty_rx_reqs) { 1658 PMD_DRV_LOG(ERR, 1659 "Failed to allocate memory for empty Rx requests\n"); 1660 rte_free(rxq->rx_buffer_info); 1661 rxq->rx_buffer_info = NULL; 1662 rte_free(rxq->rx_refill_buffer); 1663 rxq->rx_refill_buffer = NULL; 1664 return -ENOMEM; 1665 } 1666 1667 for (i = 0; i < nb_desc; i++) 1668 rxq->empty_rx_reqs[i] = i; 1669 1670 rxq->offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; 1671 1672 if (rx_conf->rx_free_thresh != 0) { 1673 rxq->rx_free_thresh = rx_conf->rx_free_thresh; 1674 } else { 1675 dyn_thresh = rxq->ring_size / ENA_REFILL_THRESH_DIVIDER; 1676 rxq->rx_free_thresh = RTE_MIN(dyn_thresh, 1677 (uint16_t)(ENA_REFILL_THRESH_PACKET)); 1678 } 1679 1680 /* Store pointer to this queue in upper layer */ 1681 rxq->configured = 1; 1682 dev->data->rx_queues[queue_idx] = rxq; 1683 1684 return 0; 1685 } 1686 1687 static int ena_add_single_rx_desc(struct ena_com_io_sq *io_sq, 1688 struct rte_mbuf *mbuf, uint16_t id) 1689 { 1690 struct ena_com_buf ebuf; 1691 int rc; 1692 1693 /* prepare physical address for DMA transaction */ 1694 ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM; 1695 ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM; 1696 1697 /* pass resource to device */ 1698 rc = ena_com_add_single_rx_desc(io_sq, &ebuf, id); 1699 if (unlikely(rc != 0)) 1700 PMD_RX_LOG(WARNING, "Failed adding Rx desc\n"); 1701 1702 return rc; 1703 } 1704 1705 static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) 1706 { 1707 unsigned int i; 1708 int rc; 1709 uint16_t next_to_use = rxq->next_to_use; 1710 uint16_t req_id; 1711 #ifdef RTE_ETHDEV_DEBUG_RX 1712 uint16_t in_use; 1713 #endif 1714 struct rte_mbuf **mbufs = rxq->rx_refill_buffer; 1715 1716 if (unlikely(!count)) 1717 return 0; 1718 1719 #ifdef RTE_ETHDEV_DEBUG_RX 1720 in_use = rxq->ring_size - 1 - 1721 ena_com_free_q_entries(rxq->ena_com_io_sq); 1722 if (unlikely((in_use + count) >= rxq->ring_size)) 1723 PMD_RX_LOG(ERR, "Bad Rx ring state\n"); 1724 #endif 1725 1726 /* get resources for incoming packets */ 1727 rc = rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, count); 1728 if (unlikely(rc < 0)) { 1729 rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf); 1730 ++rxq->rx_stats.mbuf_alloc_fail; 1731 PMD_RX_LOG(DEBUG, "There are not enough free buffers\n"); 1732 return 0; 1733 } 1734 1735 for (i = 0; i < count; i++) { 1736 struct rte_mbuf *mbuf = mbufs[i]; 1737 struct ena_rx_buffer *rx_info; 1738 1739 if (likely((i + 4) < count)) 1740 rte_prefetch0(mbufs[i + 4]); 1741 1742 req_id = rxq->empty_rx_reqs[next_to_use]; 1743 rx_info = &rxq->rx_buffer_info[req_id]; 1744 1745 rc = ena_add_single_rx_desc(rxq->ena_com_io_sq, mbuf, req_id); 1746 if (unlikely(rc != 0)) 1747 break; 1748 1749 rx_info->mbuf = mbuf; 1750 next_to_use = ENA_IDX_NEXT_MASKED(next_to_use, rxq->size_mask); 1751 } 1752 1753 if (unlikely(i < count)) { 1754 PMD_RX_LOG(WARNING, 1755 "Refilled Rx queue[%d] with only %d/%d buffers\n", 1756 rxq->id, i, count); 1757 rte_pktmbuf_free_bulk(&mbufs[i], count - i); 1758 ++rxq->rx_stats.refill_partial; 1759 } 1760 1761 /* When we submitted free resources to device... */ 1762 if (likely(i > 0)) { 1763 /* ...let HW know that it can fill buffers with data. */ 1764 ena_com_write_sq_doorbell(rxq->ena_com_io_sq); 1765 1766 rxq->next_to_use = next_to_use; 1767 } 1768 1769 return i; 1770 } 1771 1772 static size_t ena_get_metrics_entries(struct ena_adapter *adapter) 1773 { 1774 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1775 size_t metrics_num = 0; 1776 1777 if (ena_com_get_cap(ena_dev, ENA_ADMIN_CUSTOMER_METRICS)) 1778 metrics_num = ENA_STATS_ARRAY_METRICS; 1779 else if (ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) 1780 metrics_num = ENA_STATS_ARRAY_METRICS_LEGACY; 1781 PMD_DRV_LOG(NOTICE, "0x%x customer metrics are supported\n", (unsigned int)metrics_num); 1782 if (metrics_num > ENA_MAX_CUSTOMER_METRICS) { 1783 PMD_DRV_LOG(NOTICE, "Not enough space for the requested customer metrics\n"); 1784 metrics_num = ENA_MAX_CUSTOMER_METRICS; 1785 } 1786 return metrics_num; 1787 } 1788 1789 static int ena_device_init(struct ena_adapter *adapter, 1790 struct rte_pci_device *pdev, 1791 struct ena_com_dev_get_features_ctx *get_feat_ctx) 1792 { 1793 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1794 uint32_t aenq_groups; 1795 int rc; 1796 bool readless_supported; 1797 1798 /* Initialize mmio registers */ 1799 rc = ena_com_mmio_reg_read_request_init(ena_dev); 1800 if (rc) { 1801 PMD_DRV_LOG(ERR, "Failed to init MMIO read less\n"); 1802 return rc; 1803 } 1804 1805 /* The PCIe configuration space revision id indicate if mmio reg 1806 * read is disabled. 1807 */ 1808 readless_supported = !(pdev->id.class_id & ENA_MMIO_DISABLE_REG_READ); 1809 ena_com_set_mmio_read_mode(ena_dev, readless_supported); 1810 1811 /* reset device */ 1812 rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL); 1813 if (rc) { 1814 PMD_DRV_LOG(ERR, "Cannot reset device\n"); 1815 goto err_mmio_read_less; 1816 } 1817 1818 /* check FW version */ 1819 rc = ena_com_validate_version(ena_dev); 1820 if (rc) { 1821 PMD_DRV_LOG(ERR, "Device version is too low\n"); 1822 goto err_mmio_read_less; 1823 } 1824 1825 ena_dev->dma_addr_bits = ena_com_get_dma_width(ena_dev); 1826 1827 /* ENA device administration layer init */ 1828 rc = ena_com_admin_init(ena_dev, &aenq_handlers); 1829 if (rc) { 1830 PMD_DRV_LOG(ERR, 1831 "Cannot initialize ENA admin queue\n"); 1832 goto err_mmio_read_less; 1833 } 1834 1835 /* To enable the msix interrupts the driver needs to know the number 1836 * of queues. So the driver uses polling mode to retrieve this 1837 * information. 1838 */ 1839 ena_com_set_admin_polling_mode(ena_dev, true); 1840 1841 ena_config_host_info(ena_dev); 1842 1843 /* Get Device Attributes and features */ 1844 rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); 1845 if (rc) { 1846 PMD_DRV_LOG(ERR, 1847 "Cannot get attribute for ENA device, rc: %d\n", rc); 1848 goto err_admin_init; 1849 } 1850 1851 aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | 1852 BIT(ENA_ADMIN_NOTIFICATION) | 1853 BIT(ENA_ADMIN_KEEP_ALIVE) | 1854 BIT(ENA_ADMIN_FATAL_ERROR) | 1855 BIT(ENA_ADMIN_WARNING) | 1856 BIT(ENA_ADMIN_CONF_NOTIFICATIONS); 1857 1858 aenq_groups &= get_feat_ctx->aenq.supported_groups; 1859 1860 adapter->all_aenq_groups = aenq_groups; 1861 /* The actual supported number of metrics is negotiated with the device at runtime */ 1862 adapter->metrics_num = ena_get_metrics_entries(adapter); 1863 1864 return 0; 1865 1866 err_admin_init: 1867 ena_com_admin_destroy(ena_dev); 1868 1869 err_mmio_read_less: 1870 ena_com_mmio_reg_read_request_destroy(ena_dev); 1871 1872 return rc; 1873 } 1874 1875 static void ena_interrupt_handler_rte(void *cb_arg) 1876 { 1877 struct rte_eth_dev *dev = cb_arg; 1878 struct ena_adapter *adapter = dev->data->dev_private; 1879 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1880 1881 ena_com_admin_q_comp_intr_handler(ena_dev); 1882 if (likely(adapter->state != ENA_ADAPTER_STATE_CLOSED)) 1883 ena_com_aenq_intr_handler(ena_dev, dev); 1884 } 1885 1886 static void check_for_missing_keep_alive(struct ena_adapter *adapter) 1887 { 1888 if (!(adapter->active_aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE))) 1889 return; 1890 1891 if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) 1892 return; 1893 1894 if (unlikely((rte_get_timer_cycles() - adapter->timestamp_wd) >= 1895 adapter->keep_alive_timeout)) { 1896 PMD_DRV_LOG(ERR, "Keep alive timeout\n"); 1897 ena_trigger_reset(adapter, ENA_REGS_RESET_KEEP_ALIVE_TO); 1898 ++adapter->dev_stats.wd_expired; 1899 } 1900 } 1901 1902 /* Check if admin queue is enabled */ 1903 static void check_for_admin_com_state(struct ena_adapter *adapter) 1904 { 1905 if (unlikely(!ena_com_get_admin_running_state(&adapter->ena_dev))) { 1906 PMD_DRV_LOG(ERR, "ENA admin queue is not in running state\n"); 1907 ena_trigger_reset(adapter, ENA_REGS_RESET_ADMIN_TO); 1908 } 1909 } 1910 1911 static int check_for_tx_completion_in_queue(struct ena_adapter *adapter, 1912 struct ena_ring *tx_ring) 1913 { 1914 struct ena_tx_buffer *tx_buf; 1915 uint64_t timestamp; 1916 uint64_t completion_delay; 1917 uint32_t missed_tx = 0; 1918 unsigned int i; 1919 int rc = 0; 1920 1921 for (i = 0; i < tx_ring->ring_size; ++i) { 1922 tx_buf = &tx_ring->tx_buffer_info[i]; 1923 timestamp = tx_buf->timestamp; 1924 1925 if (timestamp == 0) 1926 continue; 1927 1928 completion_delay = rte_get_timer_cycles() - timestamp; 1929 if (completion_delay > adapter->missing_tx_completion_to) { 1930 if (unlikely(!tx_buf->print_once)) { 1931 PMD_TX_LOG(WARNING, 1932 "Found a Tx that wasn't completed on time, qid %d, index %d. " 1933 "Missing Tx outstanding for %" PRIu64 " msecs.\n", 1934 tx_ring->id, i, completion_delay / 1935 rte_get_timer_hz() * 1000); 1936 tx_buf->print_once = true; 1937 } 1938 ++missed_tx; 1939 } 1940 } 1941 1942 if (unlikely(missed_tx > tx_ring->missing_tx_completion_threshold)) { 1943 PMD_DRV_LOG(ERR, 1944 "The number of lost Tx completions is above the threshold (%d > %d). " 1945 "Trigger the device reset.\n", 1946 missed_tx, 1947 tx_ring->missing_tx_completion_threshold); 1948 adapter->reset_reason = ENA_REGS_RESET_MISS_TX_CMPL; 1949 adapter->trigger_reset = true; 1950 rc = -EIO; 1951 } 1952 1953 tx_ring->tx_stats.missed_tx += missed_tx; 1954 1955 return rc; 1956 } 1957 1958 static void check_for_tx_completions(struct ena_adapter *adapter) 1959 { 1960 struct ena_ring *tx_ring; 1961 uint64_t tx_cleanup_delay; 1962 size_t qid; 1963 int budget; 1964 uint16_t nb_tx_queues = adapter->edev_data->nb_tx_queues; 1965 1966 if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT) 1967 return; 1968 1969 nb_tx_queues = adapter->edev_data->nb_tx_queues; 1970 budget = adapter->missing_tx_completion_budget; 1971 1972 qid = adapter->last_tx_comp_qid; 1973 while (budget-- > 0) { 1974 tx_ring = &adapter->tx_ring[qid]; 1975 1976 /* Tx cleanup is called only by the burst function and can be 1977 * called dynamically by the application. Also cleanup is 1978 * limited by the threshold. To avoid false detection of the 1979 * missing HW Tx completion, get the delay since last cleanup 1980 * function was called. 1981 */ 1982 tx_cleanup_delay = rte_get_timer_cycles() - 1983 tx_ring->last_cleanup_ticks; 1984 if (tx_cleanup_delay < adapter->tx_cleanup_stall_delay) 1985 check_for_tx_completion_in_queue(adapter, tx_ring); 1986 qid = (qid + 1) % nb_tx_queues; 1987 } 1988 1989 adapter->last_tx_comp_qid = qid; 1990 } 1991 1992 static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer, 1993 void *arg) 1994 { 1995 struct rte_eth_dev *dev = arg; 1996 struct ena_adapter *adapter = dev->data->dev_private; 1997 1998 if (unlikely(adapter->trigger_reset)) 1999 return; 2000 2001 check_for_missing_keep_alive(adapter); 2002 check_for_admin_com_state(adapter); 2003 check_for_tx_completions(adapter); 2004 2005 if (unlikely(adapter->trigger_reset)) { 2006 PMD_DRV_LOG(ERR, "Trigger reset is on\n"); 2007 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, 2008 NULL); 2009 } 2010 } 2011 2012 static inline void 2013 set_default_llq_configurations(struct ena_llq_configurations *llq_config, 2014 struct ena_admin_feature_llq_desc *llq, 2015 bool use_large_llq_hdr) 2016 { 2017 llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; 2018 llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; 2019 llq_config->llq_num_decs_before_header = 2020 ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; 2021 2022 if (use_large_llq_hdr && 2023 (llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B)) { 2024 llq_config->llq_ring_entry_size = 2025 ENA_ADMIN_LIST_ENTRY_SIZE_256B; 2026 llq_config->llq_ring_entry_size_value = 256; 2027 } else { 2028 llq_config->llq_ring_entry_size = 2029 ENA_ADMIN_LIST_ENTRY_SIZE_128B; 2030 llq_config->llq_ring_entry_size_value = 128; 2031 } 2032 } 2033 2034 static int 2035 ena_set_queues_placement_policy(struct ena_adapter *adapter, 2036 struct ena_com_dev *ena_dev, 2037 struct ena_admin_feature_llq_desc *llq, 2038 struct ena_llq_configurations *llq_default_configurations) 2039 { 2040 int rc; 2041 u32 llq_feature_mask; 2042 2043 if (adapter->llq_header_policy == ENA_LLQ_POLICY_DISABLED) { 2044 PMD_DRV_LOG(WARNING, 2045 "NOTE: LLQ has been disabled as per user's request. " 2046 "This may lead to a huge performance degradation!\n"); 2047 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 2048 return 0; 2049 } 2050 2051 llq_feature_mask = 1 << ENA_ADMIN_LLQ; 2052 if (!(ena_dev->supported_features & llq_feature_mask)) { 2053 PMD_DRV_LOG(INFO, 2054 "LLQ is not supported. Fallback to host mode policy.\n"); 2055 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 2056 return 0; 2057 } 2058 2059 if (adapter->dev_mem_base == NULL) { 2060 PMD_DRV_LOG(ERR, 2061 "LLQ is advertised as supported, but device doesn't expose mem bar\n"); 2062 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 2063 return 0; 2064 } 2065 2066 rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); 2067 if (unlikely(rc)) { 2068 PMD_INIT_LOG(WARNING, 2069 "Failed to config dev mode. Fallback to host mode policy.\n"); 2070 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 2071 return 0; 2072 } 2073 2074 /* Nothing to config, exit */ 2075 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 2076 return 0; 2077 2078 ena_dev->mem_bar = adapter->dev_mem_base; 2079 2080 return 0; 2081 } 2082 2083 static uint32_t ena_calc_max_io_queue_num(struct ena_com_dev *ena_dev, 2084 struct ena_com_dev_get_features_ctx *get_feat_ctx) 2085 { 2086 uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues; 2087 2088 /* Regular queues capabilities */ 2089 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 2090 struct ena_admin_queue_ext_feature_fields *max_queue_ext = 2091 &get_feat_ctx->max_queue_ext.max_queue_ext; 2092 io_rx_num = RTE_MIN(max_queue_ext->max_rx_sq_num, 2093 max_queue_ext->max_rx_cq_num); 2094 io_tx_sq_num = max_queue_ext->max_tx_sq_num; 2095 io_tx_cq_num = max_queue_ext->max_tx_cq_num; 2096 } else { 2097 struct ena_admin_queue_feature_desc *max_queues = 2098 &get_feat_ctx->max_queues; 2099 io_tx_sq_num = max_queues->max_sq_num; 2100 io_tx_cq_num = max_queues->max_cq_num; 2101 io_rx_num = RTE_MIN(io_tx_sq_num, io_tx_cq_num); 2102 } 2103 2104 /* In case of LLQ use the llq number in the get feature cmd */ 2105 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) 2106 io_tx_sq_num = get_feat_ctx->llq.max_llq_num; 2107 2108 max_num_io_queues = RTE_MIN(ENA_MAX_NUM_IO_QUEUES, io_rx_num); 2109 max_num_io_queues = RTE_MIN(max_num_io_queues, io_tx_sq_num); 2110 max_num_io_queues = RTE_MIN(max_num_io_queues, io_tx_cq_num); 2111 2112 if (unlikely(max_num_io_queues == 0)) { 2113 PMD_DRV_LOG(ERR, "Number of IO queues cannot not be 0\n"); 2114 return -EFAULT; 2115 } 2116 2117 return max_num_io_queues; 2118 } 2119 2120 static void 2121 ena_set_offloads(struct ena_offloads *offloads, 2122 struct ena_admin_feature_offload_desc *offload_desc) 2123 { 2124 if (offload_desc->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) 2125 offloads->tx_offloads |= ENA_IPV4_TSO; 2126 2127 /* Tx IPv4 checksum offloads */ 2128 if (offload_desc->tx & 2129 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK) 2130 offloads->tx_offloads |= ENA_L3_IPV4_CSUM; 2131 if (offload_desc->tx & 2132 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK) 2133 offloads->tx_offloads |= ENA_L4_IPV4_CSUM; 2134 if (offload_desc->tx & 2135 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) 2136 offloads->tx_offloads |= ENA_L4_IPV4_CSUM_PARTIAL; 2137 2138 /* Tx IPv6 checksum offloads */ 2139 if (offload_desc->tx & 2140 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK) 2141 offloads->tx_offloads |= ENA_L4_IPV6_CSUM; 2142 if (offload_desc->tx & 2143 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK) 2144 offloads->tx_offloads |= ENA_L4_IPV6_CSUM_PARTIAL; 2145 2146 /* Rx IPv4 checksum offloads */ 2147 if (offload_desc->rx_supported & 2148 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK) 2149 offloads->rx_offloads |= ENA_L3_IPV4_CSUM; 2150 if (offload_desc->rx_supported & 2151 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) 2152 offloads->rx_offloads |= ENA_L4_IPV4_CSUM; 2153 2154 /* Rx IPv6 checksum offloads */ 2155 if (offload_desc->rx_supported & 2156 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) 2157 offloads->rx_offloads |= ENA_L4_IPV6_CSUM; 2158 2159 if (offload_desc->rx_supported & 2160 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK) 2161 offloads->rx_offloads |= ENA_RX_RSS_HASH; 2162 } 2163 2164 static int ena_init_once(void) 2165 { 2166 static bool init_done; 2167 2168 if (init_done) 2169 return 0; 2170 2171 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 2172 /* Init timer subsystem for the ENA timer service. */ 2173 rte_timer_subsystem_init(); 2174 /* Register handler for requests from secondary processes. */ 2175 rte_mp_action_register(ENA_MP_NAME, ena_mp_primary_handle); 2176 } 2177 2178 init_done = true; 2179 return 0; 2180 } 2181 2182 static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) 2183 { 2184 struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 }; 2185 struct rte_pci_device *pci_dev; 2186 struct rte_intr_handle *intr_handle; 2187 struct ena_adapter *adapter = eth_dev->data->dev_private; 2188 struct ena_com_dev *ena_dev = &adapter->ena_dev; 2189 struct ena_com_dev_get_features_ctx get_feat_ctx; 2190 struct ena_llq_configurations llq_config; 2191 const char *queue_type_str; 2192 uint32_t max_num_io_queues; 2193 int rc; 2194 static int adapters_found; 2195 bool disable_meta_caching; 2196 2197 eth_dev->dev_ops = &ena_dev_ops; 2198 eth_dev->rx_pkt_burst = ð_ena_recv_pkts; 2199 eth_dev->tx_pkt_burst = ð_ena_xmit_pkts; 2200 eth_dev->tx_pkt_prepare = ð_ena_prep_pkts; 2201 2202 rc = ena_init_once(); 2203 if (rc != 0) 2204 return rc; 2205 2206 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2207 return 0; 2208 2209 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 2210 2211 memset(adapter, 0, sizeof(struct ena_adapter)); 2212 ena_dev = &adapter->ena_dev; 2213 2214 adapter->edev_data = eth_dev->data; 2215 2216 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 2217 2218 PMD_INIT_LOG(INFO, "Initializing " PCI_PRI_FMT "\n", 2219 pci_dev->addr.domain, 2220 pci_dev->addr.bus, 2221 pci_dev->addr.devid, 2222 pci_dev->addr.function); 2223 2224 intr_handle = pci_dev->intr_handle; 2225 2226 adapter->regs = pci_dev->mem_resource[ENA_REGS_BAR].addr; 2227 adapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr; 2228 2229 if (!adapter->regs) { 2230 PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)\n", 2231 ENA_REGS_BAR); 2232 return -ENXIO; 2233 } 2234 2235 ena_dev->reg_bar = adapter->regs; 2236 /* Pass device data as a pointer which can be passed to the IO functions 2237 * by the ena_com (for example - the memory allocation). 2238 */ 2239 ena_dev->dmadev = eth_dev->data; 2240 2241 adapter->id_number = adapters_found; 2242 2243 snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", 2244 adapter->id_number); 2245 2246 /* Assign default devargs values */ 2247 adapter->missing_tx_completion_to = ENA_TX_TIMEOUT; 2248 adapter->enable_llq = true; 2249 adapter->use_large_llq_hdr = false; 2250 adapter->use_normal_llq_hdr = false; 2251 2252 /* Get user bypass */ 2253 rc = ena_parse_devargs(adapter, pci_dev->device.devargs); 2254 if (rc != 0) { 2255 PMD_INIT_LOG(CRIT, "Failed to parse devargs\n"); 2256 goto err; 2257 } 2258 adapter->llq_header_policy = ena_define_llq_hdr_policy(adapter); 2259 2260 rc = ena_com_allocate_customer_metrics_buffer(ena_dev); 2261 if (rc != 0) { 2262 PMD_INIT_LOG(CRIT, "Failed to allocate customer metrics buffer\n"); 2263 goto err; 2264 } 2265 2266 /* device specific initialization routine */ 2267 rc = ena_device_init(adapter, pci_dev, &get_feat_ctx); 2268 if (rc) { 2269 PMD_INIT_LOG(CRIT, "Failed to init ENA device\n"); 2270 goto err_metrics_delete; 2271 } 2272 2273 /* Check if device supports LSC */ 2274 if (!(adapter->all_aenq_groups & BIT(ENA_ADMIN_LINK_CHANGE))) 2275 adapter->edev_data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC; 2276 2277 bool use_large_llq_hdr = ena_use_large_llq_hdr(adapter, 2278 get_feat_ctx.llq.entry_size_recommended); 2279 set_default_llq_configurations(&llq_config, &get_feat_ctx.llq, use_large_llq_hdr); 2280 rc = ena_set_queues_placement_policy(adapter, ena_dev, 2281 &get_feat_ctx.llq, &llq_config); 2282 if (unlikely(rc)) { 2283 PMD_INIT_LOG(CRIT, "Failed to set placement policy\n"); 2284 return rc; 2285 } 2286 2287 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) { 2288 queue_type_str = "Regular"; 2289 } else { 2290 queue_type_str = "Low latency"; 2291 PMD_DRV_LOG(INFO, "LLQ entry size %uB\n", llq_config.llq_ring_entry_size_value); 2292 } 2293 PMD_DRV_LOG(INFO, "Placement policy: %s\n", queue_type_str); 2294 2295 calc_queue_ctx.ena_dev = ena_dev; 2296 calc_queue_ctx.get_feat_ctx = &get_feat_ctx; 2297 2298 max_num_io_queues = ena_calc_max_io_queue_num(ena_dev, &get_feat_ctx); 2299 rc = ena_calc_io_queue_size(&calc_queue_ctx, use_large_llq_hdr); 2300 if (unlikely((rc != 0) || (max_num_io_queues == 0))) { 2301 rc = -EFAULT; 2302 goto err_device_destroy; 2303 } 2304 2305 adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size; 2306 adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size; 2307 adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size; 2308 adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size; 2309 adapter->max_num_io_queues = max_num_io_queues; 2310 2311 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 2312 disable_meta_caching = 2313 !!(get_feat_ctx.llq.accel_mode.u.get.supported_flags & 2314 BIT(ENA_ADMIN_DISABLE_META_CACHING)); 2315 } else { 2316 disable_meta_caching = false; 2317 } 2318 2319 /* prepare ring structures */ 2320 ena_init_rings(adapter, disable_meta_caching); 2321 2322 ena_config_debug_area(adapter); 2323 2324 /* Set max MTU for this device */ 2325 adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu; 2326 2327 ena_set_offloads(&adapter->offloads, &get_feat_ctx.offload); 2328 2329 /* Copy MAC address and point DPDK to it */ 2330 eth_dev->data->mac_addrs = (struct rte_ether_addr *)adapter->mac_addr; 2331 rte_ether_addr_copy((struct rte_ether_addr *) 2332 get_feat_ctx.dev_attr.mac_addr, 2333 (struct rte_ether_addr *)adapter->mac_addr); 2334 2335 rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE); 2336 if (unlikely(rc != 0)) { 2337 PMD_DRV_LOG(ERR, "Failed to initialize RSS in ENA device\n"); 2338 goto err_delete_debug_area; 2339 } 2340 2341 adapter->drv_stats = rte_zmalloc("adapter stats", 2342 sizeof(*adapter->drv_stats), 2343 RTE_CACHE_LINE_SIZE); 2344 if (!adapter->drv_stats) { 2345 PMD_DRV_LOG(ERR, 2346 "Failed to allocate memory for adapter statistics\n"); 2347 rc = -ENOMEM; 2348 goto err_rss_destroy; 2349 } 2350 2351 rte_spinlock_init(&adapter->admin_lock); 2352 2353 rte_intr_callback_register(intr_handle, 2354 ena_interrupt_handler_rte, 2355 eth_dev); 2356 rte_intr_enable(intr_handle); 2357 ena_com_set_admin_polling_mode(ena_dev, false); 2358 ena_com_admin_aenq_enable(ena_dev); 2359 2360 rte_timer_init(&adapter->timer_wd); 2361 2362 adapters_found++; 2363 adapter->state = ENA_ADAPTER_STATE_INIT; 2364 2365 return 0; 2366 2367 err_rss_destroy: 2368 ena_com_rss_destroy(ena_dev); 2369 err_delete_debug_area: 2370 ena_com_delete_debug_area(ena_dev); 2371 2372 err_device_destroy: 2373 ena_com_delete_host_info(ena_dev); 2374 ena_com_admin_destroy(ena_dev); 2375 err_metrics_delete: 2376 ena_com_delete_customer_metrics_buffer(ena_dev); 2377 err: 2378 return rc; 2379 } 2380 2381 static void ena_destroy_device(struct rte_eth_dev *eth_dev) 2382 { 2383 struct ena_adapter *adapter = eth_dev->data->dev_private; 2384 struct ena_com_dev *ena_dev = &adapter->ena_dev; 2385 2386 if (adapter->state == ENA_ADAPTER_STATE_FREE) 2387 return; 2388 2389 ena_com_set_admin_running_state(ena_dev, false); 2390 2391 if (adapter->state != ENA_ADAPTER_STATE_CLOSED) 2392 ena_close(eth_dev); 2393 2394 ena_com_rss_destroy(ena_dev); 2395 2396 ena_com_delete_debug_area(ena_dev); 2397 ena_com_delete_host_info(ena_dev); 2398 2399 ena_com_abort_admin_commands(ena_dev); 2400 ena_com_wait_for_abort_completion(ena_dev); 2401 ena_com_admin_destroy(ena_dev); 2402 ena_com_mmio_reg_read_request_destroy(ena_dev); 2403 ena_com_delete_customer_metrics_buffer(ena_dev); 2404 2405 adapter->state = ENA_ADAPTER_STATE_FREE; 2406 } 2407 2408 static int eth_ena_dev_uninit(struct rte_eth_dev *eth_dev) 2409 { 2410 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2411 return 0; 2412 2413 ena_destroy_device(eth_dev); 2414 2415 return 0; 2416 } 2417 2418 static int ena_dev_configure(struct rte_eth_dev *dev) 2419 { 2420 struct ena_adapter *adapter = dev->data->dev_private; 2421 int rc; 2422 2423 adapter->state = ENA_ADAPTER_STATE_CONFIG; 2424 2425 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) 2426 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; 2427 dev->data->dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS; 2428 2429 /* Scattered Rx cannot be turned off in the HW, so this capability must 2430 * be forced. 2431 */ 2432 dev->data->scattered_rx = 1; 2433 2434 adapter->last_tx_comp_qid = 0; 2435 2436 adapter->missing_tx_completion_budget = 2437 RTE_MIN(ENA_MONITORED_TX_QUEUES, dev->data->nb_tx_queues); 2438 2439 /* To avoid detection of the spurious Tx completion timeout due to 2440 * application not calling the Tx cleanup function, set timeout for the 2441 * Tx queue which should be half of the missing completion timeout for a 2442 * safety. If there will be a lot of missing Tx completions in the 2443 * queue, they will be detected sooner or later. 2444 */ 2445 adapter->tx_cleanup_stall_delay = adapter->missing_tx_completion_to / 2; 2446 2447 rc = ena_configure_aenq(adapter); 2448 2449 return rc; 2450 } 2451 2452 static void ena_init_rings(struct ena_adapter *adapter, 2453 bool disable_meta_caching) 2454 { 2455 size_t i; 2456 2457 for (i = 0; i < adapter->max_num_io_queues; i++) { 2458 struct ena_ring *ring = &adapter->tx_ring[i]; 2459 2460 ring->configured = 0; 2461 ring->type = ENA_RING_TYPE_TX; 2462 ring->adapter = adapter; 2463 ring->id = i; 2464 ring->tx_mem_queue_type = adapter->ena_dev.tx_mem_queue_type; 2465 ring->tx_max_header_size = adapter->ena_dev.tx_max_header_size; 2466 ring->sgl_size = adapter->max_tx_sgl_size; 2467 ring->disable_meta_caching = disable_meta_caching; 2468 } 2469 2470 for (i = 0; i < adapter->max_num_io_queues; i++) { 2471 struct ena_ring *ring = &adapter->rx_ring[i]; 2472 2473 ring->configured = 0; 2474 ring->type = ENA_RING_TYPE_RX; 2475 ring->adapter = adapter; 2476 ring->id = i; 2477 ring->sgl_size = adapter->max_rx_sgl_size; 2478 } 2479 } 2480 2481 static uint64_t ena_get_rx_port_offloads(struct ena_adapter *adapter) 2482 { 2483 uint64_t port_offloads = 0; 2484 2485 if (adapter->offloads.rx_offloads & ENA_L3_IPV4_CSUM) 2486 port_offloads |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM; 2487 2488 if (adapter->offloads.rx_offloads & 2489 (ENA_L4_IPV4_CSUM | ENA_L4_IPV6_CSUM)) 2490 port_offloads |= 2491 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM; 2492 2493 if (adapter->offloads.rx_offloads & ENA_RX_RSS_HASH) 2494 port_offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; 2495 2496 port_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER; 2497 2498 return port_offloads; 2499 } 2500 2501 static uint64_t ena_get_tx_port_offloads(struct ena_adapter *adapter) 2502 { 2503 uint64_t port_offloads = 0; 2504 2505 if (adapter->offloads.tx_offloads & ENA_IPV4_TSO) 2506 port_offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO; 2507 2508 if (adapter->offloads.tx_offloads & ENA_L3_IPV4_CSUM) 2509 port_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM; 2510 if (adapter->offloads.tx_offloads & 2511 (ENA_L4_IPV4_CSUM_PARTIAL | ENA_L4_IPV4_CSUM | 2512 ENA_L4_IPV6_CSUM | ENA_L4_IPV6_CSUM_PARTIAL)) 2513 port_offloads |= 2514 RTE_ETH_TX_OFFLOAD_UDP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_CKSUM; 2515 2516 port_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS; 2517 2518 port_offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 2519 2520 return port_offloads; 2521 } 2522 2523 static uint64_t ena_get_rx_queue_offloads(struct ena_adapter *adapter) 2524 { 2525 RTE_SET_USED(adapter); 2526 2527 return 0; 2528 } 2529 2530 static uint64_t ena_get_tx_queue_offloads(struct ena_adapter *adapter) 2531 { 2532 uint64_t queue_offloads = 0; 2533 RTE_SET_USED(adapter); 2534 2535 queue_offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 2536 2537 return queue_offloads; 2538 } 2539 2540 static int ena_infos_get(struct rte_eth_dev *dev, 2541 struct rte_eth_dev_info *dev_info) 2542 { 2543 struct ena_adapter *adapter; 2544 struct ena_com_dev *ena_dev; 2545 2546 ena_assert_msg(dev->data != NULL, "Uninitialized device\n"); 2547 ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n"); 2548 adapter = dev->data->dev_private; 2549 2550 ena_dev = &adapter->ena_dev; 2551 ena_assert_msg(ena_dev != NULL, "Uninitialized device\n"); 2552 2553 dev_info->speed_capa = 2554 RTE_ETH_LINK_SPEED_1G | 2555 RTE_ETH_LINK_SPEED_2_5G | 2556 RTE_ETH_LINK_SPEED_5G | 2557 RTE_ETH_LINK_SPEED_10G | 2558 RTE_ETH_LINK_SPEED_25G | 2559 RTE_ETH_LINK_SPEED_40G | 2560 RTE_ETH_LINK_SPEED_50G | 2561 RTE_ETH_LINK_SPEED_100G | 2562 RTE_ETH_LINK_SPEED_200G | 2563 RTE_ETH_LINK_SPEED_400G; 2564 2565 /* Inform framework about available features */ 2566 dev_info->rx_offload_capa = ena_get_rx_port_offloads(adapter); 2567 dev_info->tx_offload_capa = ena_get_tx_port_offloads(adapter); 2568 dev_info->rx_queue_offload_capa = ena_get_rx_queue_offloads(adapter); 2569 dev_info->tx_queue_offload_capa = ena_get_tx_queue_offloads(adapter); 2570 2571 dev_info->flow_type_rss_offloads = ENA_ALL_RSS_HF; 2572 dev_info->hash_key_size = ENA_HASH_KEY_SIZE; 2573 2574 dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN; 2575 dev_info->max_rx_pktlen = adapter->max_mtu + RTE_ETHER_HDR_LEN + 2576 RTE_ETHER_CRC_LEN; 2577 dev_info->min_mtu = ENA_MIN_MTU; 2578 dev_info->max_mtu = adapter->max_mtu; 2579 dev_info->max_mac_addrs = 1; 2580 2581 dev_info->max_rx_queues = adapter->max_num_io_queues; 2582 dev_info->max_tx_queues = adapter->max_num_io_queues; 2583 dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE; 2584 2585 dev_info->rx_desc_lim.nb_max = adapter->max_rx_ring_size; 2586 dev_info->rx_desc_lim.nb_min = ENA_MIN_RING_DESC; 2587 dev_info->rx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2588 adapter->max_rx_sgl_size); 2589 dev_info->rx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2590 adapter->max_rx_sgl_size); 2591 2592 dev_info->tx_desc_lim.nb_max = adapter->max_tx_ring_size; 2593 dev_info->tx_desc_lim.nb_min = ENA_MIN_RING_DESC; 2594 dev_info->tx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2595 adapter->max_tx_sgl_size); 2596 dev_info->tx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2597 adapter->max_tx_sgl_size); 2598 2599 dev_info->default_rxportconf.ring_size = ENA_DEFAULT_RING_SIZE; 2600 dev_info->default_txportconf.ring_size = ENA_DEFAULT_RING_SIZE; 2601 2602 dev_info->err_handle_mode = RTE_ETH_ERROR_HANDLE_MODE_PASSIVE; 2603 2604 return 0; 2605 } 2606 2607 static inline void ena_init_rx_mbuf(struct rte_mbuf *mbuf, uint16_t len) 2608 { 2609 mbuf->data_len = len; 2610 mbuf->data_off = RTE_PKTMBUF_HEADROOM; 2611 mbuf->refcnt = 1; 2612 mbuf->next = NULL; 2613 } 2614 2615 static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring, 2616 struct ena_com_rx_buf_info *ena_bufs, 2617 uint32_t descs, 2618 uint16_t *next_to_clean, 2619 uint8_t offset) 2620 { 2621 struct rte_mbuf *mbuf; 2622 struct rte_mbuf *mbuf_head; 2623 struct ena_rx_buffer *rx_info; 2624 int rc; 2625 uint16_t ntc, len, req_id, buf = 0; 2626 2627 if (unlikely(descs == 0)) 2628 return NULL; 2629 2630 ntc = *next_to_clean; 2631 2632 len = ena_bufs[buf].len; 2633 req_id = ena_bufs[buf].req_id; 2634 2635 rx_info = &rx_ring->rx_buffer_info[req_id]; 2636 2637 mbuf = rx_info->mbuf; 2638 RTE_ASSERT(mbuf != NULL); 2639 2640 ena_init_rx_mbuf(mbuf, len); 2641 2642 /* Fill the mbuf head with the data specific for 1st segment. */ 2643 mbuf_head = mbuf; 2644 mbuf_head->nb_segs = descs; 2645 mbuf_head->port = rx_ring->port_id; 2646 mbuf_head->pkt_len = len; 2647 mbuf_head->data_off += offset; 2648 2649 rx_info->mbuf = NULL; 2650 rx_ring->empty_rx_reqs[ntc] = req_id; 2651 ntc = ENA_IDX_NEXT_MASKED(ntc, rx_ring->size_mask); 2652 2653 while (--descs) { 2654 ++buf; 2655 len = ena_bufs[buf].len; 2656 req_id = ena_bufs[buf].req_id; 2657 2658 rx_info = &rx_ring->rx_buffer_info[req_id]; 2659 RTE_ASSERT(rx_info->mbuf != NULL); 2660 2661 if (unlikely(len == 0)) { 2662 /* 2663 * Some devices can pass descriptor with the length 0. 2664 * To avoid confusion, the PMD is simply putting the 2665 * descriptor back, as it was never used. We'll avoid 2666 * mbuf allocation that way. 2667 */ 2668 rc = ena_add_single_rx_desc(rx_ring->ena_com_io_sq, 2669 rx_info->mbuf, req_id); 2670 if (unlikely(rc != 0)) { 2671 /* Free the mbuf in case of an error. */ 2672 rte_mbuf_raw_free(rx_info->mbuf); 2673 } else { 2674 /* 2675 * If there was no error, just exit the loop as 2676 * 0 length descriptor is always the last one. 2677 */ 2678 break; 2679 } 2680 } else { 2681 /* Create an mbuf chain. */ 2682 mbuf->next = rx_info->mbuf; 2683 mbuf = mbuf->next; 2684 2685 ena_init_rx_mbuf(mbuf, len); 2686 mbuf_head->pkt_len += len; 2687 } 2688 2689 /* 2690 * Mark the descriptor as depleted and perform necessary 2691 * cleanup. 2692 * This code will execute in two cases: 2693 * 1. Descriptor len was greater than 0 - normal situation. 2694 * 2. Descriptor len was 0 and we failed to add the descriptor 2695 * to the device. In that situation, we should try to add 2696 * the mbuf again in the populate routine and mark the 2697 * descriptor as used up by the device. 2698 */ 2699 rx_info->mbuf = NULL; 2700 rx_ring->empty_rx_reqs[ntc] = req_id; 2701 ntc = ENA_IDX_NEXT_MASKED(ntc, rx_ring->size_mask); 2702 } 2703 2704 *next_to_clean = ntc; 2705 2706 return mbuf_head; 2707 } 2708 2709 static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 2710 uint16_t nb_pkts) 2711 { 2712 struct ena_ring *rx_ring = (struct ena_ring *)(rx_queue); 2713 unsigned int free_queue_entries; 2714 uint16_t next_to_clean = rx_ring->next_to_clean; 2715 uint16_t descs_in_use; 2716 struct rte_mbuf *mbuf; 2717 uint16_t completed; 2718 struct ena_com_rx_ctx ena_rx_ctx; 2719 int i, rc = 0; 2720 bool fill_hash; 2721 2722 #ifdef RTE_ETHDEV_DEBUG_RX 2723 /* Check adapter state */ 2724 if (unlikely(rx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { 2725 PMD_RX_LOG(ALERT, 2726 "Trying to receive pkts while device is NOT running\n"); 2727 return 0; 2728 } 2729 #endif 2730 2731 fill_hash = rx_ring->offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH; 2732 2733 descs_in_use = rx_ring->ring_size - 2734 ena_com_free_q_entries(rx_ring->ena_com_io_sq) - 1; 2735 nb_pkts = RTE_MIN(descs_in_use, nb_pkts); 2736 2737 for (completed = 0; completed < nb_pkts; completed++) { 2738 ena_rx_ctx.max_bufs = rx_ring->sgl_size; 2739 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; 2740 ena_rx_ctx.descs = 0; 2741 ena_rx_ctx.pkt_offset = 0; 2742 /* receive packet context */ 2743 rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq, 2744 rx_ring->ena_com_io_sq, 2745 &ena_rx_ctx); 2746 if (unlikely(rc)) { 2747 PMD_RX_LOG(ERR, 2748 "Failed to get the packet from the device, rc: %d\n", 2749 rc); 2750 if (rc == ENA_COM_NO_SPACE) { 2751 ++rx_ring->rx_stats.bad_desc_num; 2752 ena_trigger_reset(rx_ring->adapter, 2753 ENA_REGS_RESET_TOO_MANY_RX_DESCS); 2754 } else { 2755 ++rx_ring->rx_stats.bad_req_id; 2756 ena_trigger_reset(rx_ring->adapter, 2757 ENA_REGS_RESET_INV_RX_REQ_ID); 2758 } 2759 return 0; 2760 } 2761 2762 mbuf = ena_rx_mbuf(rx_ring, 2763 ena_rx_ctx.ena_bufs, 2764 ena_rx_ctx.descs, 2765 &next_to_clean, 2766 ena_rx_ctx.pkt_offset); 2767 if (unlikely(mbuf == NULL)) { 2768 for (i = 0; i < ena_rx_ctx.descs; ++i) { 2769 rx_ring->empty_rx_reqs[next_to_clean] = 2770 rx_ring->ena_bufs[i].req_id; 2771 next_to_clean = ENA_IDX_NEXT_MASKED( 2772 next_to_clean, rx_ring->size_mask); 2773 } 2774 break; 2775 } 2776 2777 /* fill mbuf attributes if any */ 2778 ena_rx_mbuf_prepare(rx_ring, mbuf, &ena_rx_ctx, fill_hash); 2779 2780 if (unlikely(mbuf->ol_flags & 2781 (RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD))) 2782 rte_atomic64_inc(&rx_ring->adapter->drv_stats->ierrors); 2783 2784 rx_pkts[completed] = mbuf; 2785 rx_ring->rx_stats.bytes += mbuf->pkt_len; 2786 } 2787 2788 rx_ring->rx_stats.cnt += completed; 2789 rx_ring->next_to_clean = next_to_clean; 2790 2791 free_queue_entries = ena_com_free_q_entries(rx_ring->ena_com_io_sq); 2792 2793 /* Burst refill to save doorbells, memory barriers, const interval */ 2794 if (free_queue_entries >= rx_ring->rx_free_thresh) { 2795 ena_populate_rx_queue(rx_ring, free_queue_entries); 2796 } 2797 2798 return completed; 2799 } 2800 2801 static uint16_t 2802 eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 2803 uint16_t nb_pkts) 2804 { 2805 int32_t ret; 2806 uint32_t i; 2807 struct rte_mbuf *m; 2808 struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); 2809 struct ena_adapter *adapter = tx_ring->adapter; 2810 struct rte_ipv4_hdr *ip_hdr; 2811 uint64_t ol_flags; 2812 uint64_t l4_csum_flag; 2813 uint64_t dev_offload_capa; 2814 uint16_t frag_field; 2815 bool need_pseudo_csum; 2816 2817 dev_offload_capa = adapter->offloads.tx_offloads; 2818 for (i = 0; i != nb_pkts; i++) { 2819 m = tx_pkts[i]; 2820 ol_flags = m->ol_flags; 2821 2822 /* Check if any offload flag was set */ 2823 if (ol_flags == 0) 2824 continue; 2825 2826 l4_csum_flag = ol_flags & RTE_MBUF_F_TX_L4_MASK; 2827 /* SCTP checksum offload is not supported by the ENA. */ 2828 if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) || 2829 l4_csum_flag == RTE_MBUF_F_TX_SCTP_CKSUM) { 2830 PMD_TX_LOG(DEBUG, 2831 "mbuf[%" PRIu32 "] has unsupported offloads flags set: 0x%" PRIu64 "\n", 2832 i, ol_flags); 2833 rte_errno = ENOTSUP; 2834 return i; 2835 } 2836 2837 if (unlikely(m->nb_segs >= tx_ring->sgl_size && 2838 !(tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && 2839 m->nb_segs == tx_ring->sgl_size && 2840 m->data_len < tx_ring->tx_max_header_size))) { 2841 PMD_TX_LOG(DEBUG, 2842 "mbuf[%" PRIu32 "] has too many segments: %" PRIu16 "\n", 2843 i, m->nb_segs); 2844 rte_errno = EINVAL; 2845 return i; 2846 } 2847 2848 #ifdef RTE_LIBRTE_ETHDEV_DEBUG 2849 /* Check if requested offload is also enabled for the queue */ 2850 if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM && 2851 !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)) || 2852 (l4_csum_flag == RTE_MBUF_F_TX_TCP_CKSUM && 2853 !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) || 2854 (l4_csum_flag == RTE_MBUF_F_TX_UDP_CKSUM && 2855 !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM))) { 2856 PMD_TX_LOG(DEBUG, 2857 "mbuf[%" PRIu32 "]: requested offloads: %" PRIu16 " are not enabled for the queue[%u]\n", 2858 i, m->nb_segs, tx_ring->id); 2859 rte_errno = EINVAL; 2860 return i; 2861 } 2862 2863 /* The caller is obligated to set l2 and l3 len if any cksum 2864 * offload is enabled. 2865 */ 2866 if (unlikely(ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK) && 2867 (m->l2_len == 0 || m->l3_len == 0))) { 2868 PMD_TX_LOG(DEBUG, 2869 "mbuf[%" PRIu32 "]: l2_len or l3_len values are 0 while the offload was requested\n", 2870 i); 2871 rte_errno = EINVAL; 2872 return i; 2873 } 2874 ret = rte_validate_tx_offload(m); 2875 if (ret != 0) { 2876 rte_errno = -ret; 2877 return i; 2878 } 2879 #endif 2880 2881 /* Verify HW support for requested offloads and determine if 2882 * pseudo header checksum is needed. 2883 */ 2884 need_pseudo_csum = false; 2885 if (ol_flags & RTE_MBUF_F_TX_IPV4) { 2886 if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM && 2887 !(dev_offload_capa & ENA_L3_IPV4_CSUM)) { 2888 rte_errno = ENOTSUP; 2889 return i; 2890 } 2891 2892 if (ol_flags & RTE_MBUF_F_TX_TCP_SEG && 2893 !(dev_offload_capa & ENA_IPV4_TSO)) { 2894 rte_errno = ENOTSUP; 2895 return i; 2896 } 2897 2898 /* Check HW capabilities and if pseudo csum is needed 2899 * for L4 offloads. 2900 */ 2901 if (l4_csum_flag != RTE_MBUF_F_TX_L4_NO_CKSUM && 2902 !(dev_offload_capa & ENA_L4_IPV4_CSUM)) { 2903 if (dev_offload_capa & 2904 ENA_L4_IPV4_CSUM_PARTIAL) { 2905 need_pseudo_csum = true; 2906 } else { 2907 rte_errno = ENOTSUP; 2908 return i; 2909 } 2910 } 2911 2912 /* Parse the DF flag */ 2913 ip_hdr = rte_pktmbuf_mtod_offset(m, 2914 struct rte_ipv4_hdr *, m->l2_len); 2915 frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset); 2916 if (frag_field & RTE_IPV4_HDR_DF_FLAG) { 2917 m->packet_type |= RTE_PTYPE_L4_NONFRAG; 2918 } else if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) { 2919 /* In case we are supposed to TSO and have DF 2920 * not set (DF=0) hardware must be provided with 2921 * partial checksum. 2922 */ 2923 need_pseudo_csum = true; 2924 } 2925 } else if (ol_flags & RTE_MBUF_F_TX_IPV6) { 2926 /* There is no support for IPv6 TSO as for now. */ 2927 if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) { 2928 rte_errno = ENOTSUP; 2929 return i; 2930 } 2931 2932 /* Check HW capabilities and if pseudo csum is needed */ 2933 if (l4_csum_flag != RTE_MBUF_F_TX_L4_NO_CKSUM && 2934 !(dev_offload_capa & ENA_L4_IPV6_CSUM)) { 2935 if (dev_offload_capa & 2936 ENA_L4_IPV6_CSUM_PARTIAL) { 2937 need_pseudo_csum = true; 2938 } else { 2939 rte_errno = ENOTSUP; 2940 return i; 2941 } 2942 } 2943 } 2944 2945 if (need_pseudo_csum) { 2946 ret = rte_net_intel_cksum_flags_prepare(m, ol_flags); 2947 if (ret != 0) { 2948 rte_errno = -ret; 2949 return i; 2950 } 2951 } 2952 } 2953 2954 return i; 2955 } 2956 2957 static void ena_update_hints(struct ena_adapter *adapter, 2958 struct ena_admin_ena_hw_hints *hints) 2959 { 2960 if (hints->admin_completion_tx_timeout) 2961 adapter->ena_dev.admin_queue.completion_timeout = 2962 hints->admin_completion_tx_timeout * 1000; 2963 2964 if (hints->mmio_read_timeout) 2965 /* convert to usec */ 2966 adapter->ena_dev.mmio_read.reg_read_to = 2967 hints->mmio_read_timeout * 1000; 2968 2969 if (hints->driver_watchdog_timeout) { 2970 if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT) 2971 adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT; 2972 else 2973 // Convert msecs to ticks 2974 adapter->keep_alive_timeout = 2975 (hints->driver_watchdog_timeout * 2976 rte_get_timer_hz()) / 1000; 2977 } 2978 } 2979 2980 static void ena_tx_map_mbuf(struct ena_ring *tx_ring, 2981 struct ena_tx_buffer *tx_info, 2982 struct rte_mbuf *mbuf, 2983 void **push_header, 2984 uint16_t *header_len) 2985 { 2986 struct ena_com_buf *ena_buf; 2987 uint16_t delta, seg_len, push_len; 2988 2989 delta = 0; 2990 seg_len = mbuf->data_len; 2991 2992 tx_info->mbuf = mbuf; 2993 ena_buf = tx_info->bufs; 2994 2995 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 2996 /* 2997 * Tx header might be (and will be in most cases) smaller than 2998 * tx_max_header_size. But it's not an issue to send more data 2999 * to the device, than actually needed if the mbuf size is 3000 * greater than tx_max_header_size. 3001 */ 3002 push_len = RTE_MIN(mbuf->pkt_len, tx_ring->tx_max_header_size); 3003 *header_len = push_len; 3004 3005 if (likely(push_len <= seg_len)) { 3006 /* If the push header is in the single segment, then 3007 * just point it to the 1st mbuf data. 3008 */ 3009 *push_header = rte_pktmbuf_mtod(mbuf, uint8_t *); 3010 } else { 3011 /* If the push header lays in the several segments, copy 3012 * it to the intermediate buffer. 3013 */ 3014 rte_pktmbuf_read(mbuf, 0, push_len, 3015 tx_ring->push_buf_intermediate_buf); 3016 *push_header = tx_ring->push_buf_intermediate_buf; 3017 delta = push_len - seg_len; 3018 } 3019 } else { 3020 *push_header = NULL; 3021 *header_len = 0; 3022 push_len = 0; 3023 } 3024 3025 /* Process first segment taking into consideration pushed header */ 3026 if (seg_len > push_len) { 3027 ena_buf->paddr = mbuf->buf_iova + 3028 mbuf->data_off + 3029 push_len; 3030 ena_buf->len = seg_len - push_len; 3031 ena_buf++; 3032 tx_info->num_of_bufs++; 3033 } 3034 3035 while ((mbuf = mbuf->next) != NULL) { 3036 seg_len = mbuf->data_len; 3037 3038 /* Skip mbufs if whole data is pushed as a header */ 3039 if (unlikely(delta > seg_len)) { 3040 delta -= seg_len; 3041 continue; 3042 } 3043 3044 ena_buf->paddr = mbuf->buf_iova + mbuf->data_off + delta; 3045 ena_buf->len = seg_len - delta; 3046 ena_buf++; 3047 tx_info->num_of_bufs++; 3048 3049 delta = 0; 3050 } 3051 } 3052 3053 static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf) 3054 { 3055 struct ena_tx_buffer *tx_info; 3056 struct ena_com_tx_ctx ena_tx_ctx = { { 0 } }; 3057 uint16_t next_to_use; 3058 uint16_t header_len; 3059 uint16_t req_id; 3060 void *push_header; 3061 int nb_hw_desc; 3062 int rc; 3063 3064 /* Checking for space for 2 additional metadata descriptors due to 3065 * possible header split and metadata descriptor 3066 */ 3067 if (!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 3068 mbuf->nb_segs + 2)) { 3069 PMD_DRV_LOG(DEBUG, "Not enough space in the tx queue\n"); 3070 return ENA_COM_NO_MEM; 3071 } 3072 3073 next_to_use = tx_ring->next_to_use; 3074 3075 req_id = tx_ring->empty_tx_reqs[next_to_use]; 3076 tx_info = &tx_ring->tx_buffer_info[req_id]; 3077 tx_info->num_of_bufs = 0; 3078 RTE_ASSERT(tx_info->mbuf == NULL); 3079 3080 ena_tx_map_mbuf(tx_ring, tx_info, mbuf, &push_header, &header_len); 3081 3082 ena_tx_ctx.ena_bufs = tx_info->bufs; 3083 ena_tx_ctx.push_header = push_header; 3084 ena_tx_ctx.num_bufs = tx_info->num_of_bufs; 3085 ena_tx_ctx.req_id = req_id; 3086 ena_tx_ctx.header_len = header_len; 3087 3088 /* Set Tx offloads flags, if applicable */ 3089 ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx, tx_ring->offloads, 3090 tx_ring->disable_meta_caching); 3091 3092 if (unlikely(ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq, 3093 &ena_tx_ctx))) { 3094 PMD_TX_LOG(DEBUG, 3095 "LLQ Tx max burst size of queue %d achieved, writing doorbell to send burst\n", 3096 tx_ring->id); 3097 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); 3098 tx_ring->tx_stats.doorbells++; 3099 tx_ring->pkts_without_db = false; 3100 } 3101 3102 /* prepare the packet's descriptors to dma engine */ 3103 rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx, 3104 &nb_hw_desc); 3105 if (unlikely(rc)) { 3106 PMD_DRV_LOG(ERR, "Failed to prepare Tx buffers, rc: %d\n", rc); 3107 ++tx_ring->tx_stats.prepare_ctx_err; 3108 ena_trigger_reset(tx_ring->adapter, 3109 ENA_REGS_RESET_DRIVER_INVALID_STATE); 3110 return rc; 3111 } 3112 3113 tx_info->tx_descs = nb_hw_desc; 3114 tx_info->timestamp = rte_get_timer_cycles(); 3115 3116 tx_ring->tx_stats.cnt++; 3117 tx_ring->tx_stats.bytes += mbuf->pkt_len; 3118 3119 tx_ring->next_to_use = ENA_IDX_NEXT_MASKED(next_to_use, 3120 tx_ring->size_mask); 3121 3122 return 0; 3123 } 3124 3125 static __rte_always_inline size_t 3126 ena_tx_cleanup_mbuf_fast(struct rte_mbuf **mbufs_to_clean, 3127 struct rte_mbuf *mbuf, 3128 size_t mbuf_cnt, 3129 size_t buf_size) 3130 { 3131 struct rte_mbuf *m_next; 3132 3133 while (mbuf != NULL) { 3134 m_next = mbuf->next; 3135 mbufs_to_clean[mbuf_cnt++] = mbuf; 3136 if (mbuf_cnt == buf_size) { 3137 rte_pktmbuf_free_bulk(mbufs_to_clean, mbuf_cnt); 3138 mbuf_cnt = 0; 3139 } 3140 mbuf = m_next; 3141 } 3142 3143 return mbuf_cnt; 3144 } 3145 3146 static int ena_tx_cleanup(void *txp, uint32_t free_pkt_cnt) 3147 { 3148 struct rte_mbuf *mbufs_to_clean[ENA_CLEANUP_BUF_SIZE]; 3149 struct ena_ring *tx_ring = (struct ena_ring *)txp; 3150 size_t mbuf_cnt = 0; 3151 unsigned int total_tx_descs = 0; 3152 unsigned int total_tx_pkts = 0; 3153 uint16_t cleanup_budget; 3154 uint16_t next_to_clean = tx_ring->next_to_clean; 3155 bool fast_free = tx_ring->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 3156 3157 /* 3158 * If free_pkt_cnt is equal to 0, it means that the user requested 3159 * full cleanup, so attempt to release all Tx descriptors 3160 * (ring_size - 1 -> size_mask) 3161 */ 3162 cleanup_budget = (free_pkt_cnt == 0) ? tx_ring->size_mask : free_pkt_cnt; 3163 3164 while (likely(total_tx_pkts < cleanup_budget)) { 3165 struct rte_mbuf *mbuf; 3166 struct ena_tx_buffer *tx_info; 3167 uint16_t req_id; 3168 3169 if (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) != 0) 3170 break; 3171 3172 if (unlikely(validate_tx_req_id(tx_ring, req_id) != 0)) 3173 break; 3174 3175 /* Get Tx info & store how many descs were processed */ 3176 tx_info = &tx_ring->tx_buffer_info[req_id]; 3177 tx_info->timestamp = 0; 3178 3179 mbuf = tx_info->mbuf; 3180 if (fast_free) { 3181 mbuf_cnt = ena_tx_cleanup_mbuf_fast(mbufs_to_clean, mbuf, mbuf_cnt, 3182 ENA_CLEANUP_BUF_SIZE); 3183 } else { 3184 rte_pktmbuf_free(mbuf); 3185 } 3186 3187 tx_info->mbuf = NULL; 3188 tx_ring->empty_tx_reqs[next_to_clean] = req_id; 3189 3190 total_tx_descs += tx_info->tx_descs; 3191 total_tx_pkts++; 3192 3193 /* Put back descriptor to the ring for reuse */ 3194 next_to_clean = ENA_IDX_NEXT_MASKED(next_to_clean, 3195 tx_ring->size_mask); 3196 } 3197 3198 if (likely(total_tx_descs > 0)) { 3199 /* acknowledge completion of sent packets */ 3200 tx_ring->next_to_clean = next_to_clean; 3201 ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs); 3202 } 3203 3204 if (mbuf_cnt != 0) 3205 rte_pktmbuf_free_bulk(mbufs_to_clean, mbuf_cnt); 3206 3207 /* Notify completion handler that full cleanup was performed */ 3208 if (free_pkt_cnt == 0 || total_tx_pkts < cleanup_budget) 3209 tx_ring->last_cleanup_ticks = rte_get_timer_cycles(); 3210 3211 return total_tx_pkts; 3212 } 3213 3214 static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 3215 uint16_t nb_pkts) 3216 { 3217 struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); 3218 int available_desc; 3219 uint16_t sent_idx = 0; 3220 3221 #ifdef RTE_ETHDEV_DEBUG_TX 3222 /* Check adapter state */ 3223 if (unlikely(tx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { 3224 PMD_TX_LOG(ALERT, 3225 "Trying to xmit pkts while device is NOT running\n"); 3226 return 0; 3227 } 3228 #endif 3229 3230 available_desc = ena_com_free_q_entries(tx_ring->ena_com_io_sq); 3231 if (available_desc < tx_ring->tx_free_thresh) 3232 ena_tx_cleanup((void *)tx_ring, 0); 3233 3234 for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) { 3235 if (ena_xmit_mbuf(tx_ring, tx_pkts[sent_idx])) 3236 break; 3237 tx_ring->pkts_without_db = true; 3238 rte_prefetch0(tx_pkts[ENA_IDX_ADD_MASKED(sent_idx, 4, 3239 tx_ring->size_mask)]); 3240 } 3241 3242 /* If there are ready packets to be xmitted... */ 3243 if (likely(tx_ring->pkts_without_db)) { 3244 /* ...let HW do its best :-) */ 3245 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); 3246 tx_ring->tx_stats.doorbells++; 3247 tx_ring->pkts_without_db = false; 3248 } 3249 3250 tx_ring->tx_stats.available_desc = 3251 ena_com_free_q_entries(tx_ring->ena_com_io_sq); 3252 tx_ring->tx_stats.tx_poll++; 3253 3254 return sent_idx; 3255 } 3256 3257 static void ena_copy_customer_metrics(struct ena_adapter *adapter, uint64_t *buf, 3258 size_t num_metrics) 3259 { 3260 struct ena_com_dev *ena_dev = &adapter->ena_dev; 3261 int rc; 3262 3263 if (ena_com_get_cap(ena_dev, ENA_ADMIN_CUSTOMER_METRICS)) { 3264 if (num_metrics != ENA_STATS_ARRAY_METRICS) { 3265 PMD_DRV_LOG(ERR, "Detected discrepancy in the number of customer metrics"); 3266 return; 3267 } 3268 rte_spinlock_lock(&adapter->admin_lock); 3269 rc = ENA_PROXY(adapter, 3270 ena_com_get_customer_metrics, 3271 &adapter->ena_dev, 3272 (char *)buf, 3273 num_metrics * sizeof(uint64_t)); 3274 rte_spinlock_unlock(&adapter->admin_lock); 3275 if (rc != 0) { 3276 PMD_DRV_LOG(WARNING, "Failed to get customer metrics, rc: %d\n", rc); 3277 return; 3278 } 3279 3280 } else if (ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) { 3281 if (num_metrics != ENA_STATS_ARRAY_METRICS_LEGACY) { 3282 PMD_DRV_LOG(ERR, "Detected discrepancy in the number of legacy metrics"); 3283 return; 3284 } 3285 3286 rte_spinlock_lock(&adapter->admin_lock); 3287 rc = ENA_PROXY(adapter, 3288 ena_com_get_eni_stats, 3289 &adapter->ena_dev, 3290 (struct ena_admin_eni_stats *)buf); 3291 rte_spinlock_unlock(&adapter->admin_lock); 3292 if (rc != 0) { 3293 PMD_DRV_LOG(WARNING, 3294 "Failed to get ENI metrics, rc: %d\n", rc); 3295 return; 3296 } 3297 } 3298 } 3299 3300 static void ena_copy_ena_srd_info(struct ena_adapter *adapter, 3301 struct ena_stats_srd *srd_info) 3302 { 3303 int rc; 3304 3305 if (!ena_com_get_cap(&adapter->ena_dev, ENA_ADMIN_ENA_SRD_INFO)) 3306 return; 3307 3308 rte_spinlock_lock(&adapter->admin_lock); 3309 rc = ENA_PROXY(adapter, 3310 ena_com_get_ena_srd_info, 3311 &adapter->ena_dev, 3312 (struct ena_admin_ena_srd_info *)srd_info); 3313 rte_spinlock_unlock(&adapter->admin_lock); 3314 if (rc != ENA_COM_OK && rc != ENA_COM_UNSUPPORTED) { 3315 PMD_DRV_LOG(WARNING, 3316 "Failed to get ENA express srd info, rc: %d\n", rc); 3317 return; 3318 } 3319 } 3320 3321 /** 3322 * DPDK callback to retrieve names of extended device statistics 3323 * 3324 * @param dev 3325 * Pointer to Ethernet device structure. 3326 * @param[out] xstats_names 3327 * Buffer to insert names into. 3328 * @param n 3329 * Number of names. 3330 * 3331 * @return 3332 * Number of xstats names. 3333 */ 3334 static int ena_xstats_get_names(struct rte_eth_dev *dev, 3335 struct rte_eth_xstat_name *xstats_names, 3336 unsigned int n) 3337 { 3338 struct ena_adapter *adapter = dev->data->dev_private; 3339 unsigned int xstats_count = ena_xstats_calc_num(dev->data); 3340 unsigned int stat, i, count = 0; 3341 3342 if (n < xstats_count || !xstats_names) 3343 return xstats_count; 3344 3345 for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++) 3346 strcpy(xstats_names[count].name, 3347 ena_stats_global_strings[stat].name); 3348 3349 for (stat = 0; stat < adapter->metrics_num; stat++, count++) 3350 rte_strscpy(xstats_names[count].name, 3351 ena_stats_metrics_strings[stat].name, 3352 RTE_ETH_XSTATS_NAME_SIZE); 3353 for (stat = 0; stat < ENA_STATS_ARRAY_ENA_SRD; stat++, count++) 3354 rte_strscpy(xstats_names[count].name, 3355 ena_stats_srd_strings[stat].name, 3356 RTE_ETH_XSTATS_NAME_SIZE); 3357 3358 for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++) 3359 for (i = 0; i < dev->data->nb_rx_queues; i++, count++) 3360 snprintf(xstats_names[count].name, 3361 sizeof(xstats_names[count].name), 3362 "rx_q%d_%s", i, 3363 ena_stats_rx_strings[stat].name); 3364 3365 for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++) 3366 for (i = 0; i < dev->data->nb_tx_queues; i++, count++) 3367 snprintf(xstats_names[count].name, 3368 sizeof(xstats_names[count].name), 3369 "tx_q%d_%s", i, 3370 ena_stats_tx_strings[stat].name); 3371 3372 return xstats_count; 3373 } 3374 3375 /** 3376 * DPDK callback to retrieve names of extended device statistics for the given 3377 * ids. 3378 * 3379 * @param dev 3380 * Pointer to Ethernet device structure. 3381 * @param[out] xstats_names 3382 * Buffer to insert names into. 3383 * @param ids 3384 * IDs array for which the names should be retrieved. 3385 * @param size 3386 * Number of ids. 3387 * 3388 * @return 3389 * Positive value: number of xstats names. Negative value: error code. 3390 */ 3391 static int ena_xstats_get_names_by_id(struct rte_eth_dev *dev, 3392 const uint64_t *ids, 3393 struct rte_eth_xstat_name *xstats_names, 3394 unsigned int size) 3395 { 3396 struct ena_adapter *adapter = dev->data->dev_private; 3397 uint64_t xstats_count = ena_xstats_calc_num(dev->data); 3398 uint64_t id, qid; 3399 unsigned int i; 3400 3401 if (xstats_names == NULL) 3402 return xstats_count; 3403 3404 for (i = 0; i < size; ++i) { 3405 id = ids[i]; 3406 if (id > xstats_count) { 3407 PMD_DRV_LOG(ERR, 3408 "ID value out of range: id=%" PRIu64 ", xstats_num=%" PRIu64 "\n", 3409 id, xstats_count); 3410 return -EINVAL; 3411 } 3412 3413 if (id < ENA_STATS_ARRAY_GLOBAL) { 3414 strcpy(xstats_names[i].name, 3415 ena_stats_global_strings[id].name); 3416 continue; 3417 } 3418 3419 id -= ENA_STATS_ARRAY_GLOBAL; 3420 if (id < adapter->metrics_num) { 3421 rte_strscpy(xstats_names[i].name, 3422 ena_stats_metrics_strings[id].name, 3423 RTE_ETH_XSTATS_NAME_SIZE); 3424 continue; 3425 } 3426 3427 id -= adapter->metrics_num; 3428 3429 if (id < ENA_STATS_ARRAY_ENA_SRD) { 3430 rte_strscpy(xstats_names[i].name, 3431 ena_stats_srd_strings[id].name, 3432 RTE_ETH_XSTATS_NAME_SIZE); 3433 continue; 3434 } 3435 id -= ENA_STATS_ARRAY_ENA_SRD; 3436 3437 if (id < ENA_STATS_ARRAY_RX) { 3438 qid = id / dev->data->nb_rx_queues; 3439 id %= dev->data->nb_rx_queues; 3440 snprintf(xstats_names[i].name, 3441 sizeof(xstats_names[i].name), 3442 "rx_q%" PRIu64 "d_%s", 3443 qid, ena_stats_rx_strings[id].name); 3444 continue; 3445 } 3446 3447 id -= ENA_STATS_ARRAY_RX; 3448 /* Although this condition is not needed, it was added for 3449 * compatibility if new xstat structure would be ever added. 3450 */ 3451 if (id < ENA_STATS_ARRAY_TX) { 3452 qid = id / dev->data->nb_tx_queues; 3453 id %= dev->data->nb_tx_queues; 3454 snprintf(xstats_names[i].name, 3455 sizeof(xstats_names[i].name), 3456 "tx_q%" PRIu64 "_%s", 3457 qid, ena_stats_tx_strings[id].name); 3458 continue; 3459 } 3460 } 3461 3462 return i; 3463 } 3464 3465 /** 3466 * DPDK callback to get extended device statistics. 3467 * 3468 * @param dev 3469 * Pointer to Ethernet device structure. 3470 * @param[out] stats 3471 * Stats table output buffer. 3472 * @param n 3473 * The size of the stats table. 3474 * 3475 * @return 3476 * Number of xstats on success, negative on failure. 3477 */ 3478 static int ena_xstats_get(struct rte_eth_dev *dev, 3479 struct rte_eth_xstat *xstats, 3480 unsigned int n) 3481 { 3482 struct ena_adapter *adapter = dev->data->dev_private; 3483 unsigned int xstats_count = ena_xstats_calc_num(dev->data); 3484 unsigned int stat, i, count = 0; 3485 int stat_offset; 3486 void *stats_begin; 3487 uint64_t metrics_stats[ENA_MAX_CUSTOMER_METRICS]; 3488 struct ena_stats_srd srd_info = {0}; 3489 3490 if (n < xstats_count) 3491 return xstats_count; 3492 3493 if (!xstats) 3494 return 0; 3495 3496 for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++) { 3497 stat_offset = ena_stats_global_strings[stat].stat_offset; 3498 stats_begin = &adapter->dev_stats; 3499 3500 xstats[count].id = count; 3501 xstats[count].value = *((uint64_t *) 3502 ((char *)stats_begin + stat_offset)); 3503 } 3504 3505 ena_copy_customer_metrics(adapter, metrics_stats, adapter->metrics_num); 3506 stats_begin = metrics_stats; 3507 for (stat = 0; stat < adapter->metrics_num; stat++, count++) { 3508 stat_offset = ena_stats_metrics_strings[stat].stat_offset; 3509 3510 xstats[count].id = count; 3511 xstats[count].value = *((uint64_t *) 3512 ((char *)stats_begin + stat_offset)); 3513 } 3514 3515 ena_copy_ena_srd_info(adapter, &srd_info); 3516 stats_begin = &srd_info; 3517 for (stat = 0; stat < ENA_STATS_ARRAY_ENA_SRD; stat++, count++) { 3518 stat_offset = ena_stats_srd_strings[stat].stat_offset; 3519 xstats[count].id = count; 3520 xstats[count].value = *((uint64_t *) 3521 ((char *)stats_begin + stat_offset)); 3522 } 3523 3524 for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++) { 3525 for (i = 0; i < dev->data->nb_rx_queues; i++, count++) { 3526 stat_offset = ena_stats_rx_strings[stat].stat_offset; 3527 stats_begin = &adapter->rx_ring[i].rx_stats; 3528 3529 xstats[count].id = count; 3530 xstats[count].value = *((uint64_t *) 3531 ((char *)stats_begin + stat_offset)); 3532 } 3533 } 3534 3535 for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++) { 3536 for (i = 0; i < dev->data->nb_tx_queues; i++, count++) { 3537 stat_offset = ena_stats_tx_strings[stat].stat_offset; 3538 stats_begin = &adapter->tx_ring[i].rx_stats; 3539 3540 xstats[count].id = count; 3541 xstats[count].value = *((uint64_t *) 3542 ((char *)stats_begin + stat_offset)); 3543 } 3544 } 3545 3546 return count; 3547 } 3548 3549 static int ena_xstats_get_by_id(struct rte_eth_dev *dev, 3550 const uint64_t *ids, 3551 uint64_t *values, 3552 unsigned int n) 3553 { 3554 struct ena_adapter *adapter = dev->data->dev_private; 3555 uint64_t id; 3556 uint64_t rx_entries, tx_entries; 3557 unsigned int i; 3558 int qid; 3559 int valid = 0; 3560 bool were_metrics_copied = false; 3561 bool was_srd_info_copied = false; 3562 uint64_t metrics_stats[ENA_MAX_CUSTOMER_METRICS]; 3563 struct ena_stats_srd srd_info = {0}; 3564 3565 for (i = 0; i < n; ++i) { 3566 id = ids[i]; 3567 /* Check if id belongs to global statistics */ 3568 if (id < ENA_STATS_ARRAY_GLOBAL) { 3569 values[i] = *((uint64_t *)&adapter->dev_stats + id); 3570 ++valid; 3571 continue; 3572 } 3573 3574 /* Check if id belongs to ENI statistics */ 3575 id -= ENA_STATS_ARRAY_GLOBAL; 3576 if (id < adapter->metrics_num) { 3577 /* Avoid reading metrics multiple times in a single 3578 * function call, as it requires communication with the 3579 * admin queue. 3580 */ 3581 if (!were_metrics_copied) { 3582 were_metrics_copied = true; 3583 ena_copy_customer_metrics(adapter, 3584 metrics_stats, 3585 adapter->metrics_num); 3586 } 3587 3588 values[i] = *((uint64_t *)&metrics_stats + id); 3589 ++valid; 3590 continue; 3591 } 3592 3593 /* Check if id belongs to SRD info statistics */ 3594 id -= adapter->metrics_num; 3595 3596 if (id < ENA_STATS_ARRAY_ENA_SRD) { 3597 /* 3598 * Avoid reading srd info multiple times in a single 3599 * function call, as it requires communication with the 3600 * admin queue. 3601 */ 3602 if (!was_srd_info_copied) { 3603 was_srd_info_copied = true; 3604 ena_copy_ena_srd_info(adapter, &srd_info); 3605 } 3606 values[i] = *((uint64_t *)&adapter->srd_stats + id); 3607 ++valid; 3608 continue; 3609 } 3610 3611 /* Check if id belongs to rx queue statistics */ 3612 id -= ENA_STATS_ARRAY_ENA_SRD; 3613 3614 rx_entries = ENA_STATS_ARRAY_RX * dev->data->nb_rx_queues; 3615 if (id < rx_entries) { 3616 qid = id % dev->data->nb_rx_queues; 3617 id /= dev->data->nb_rx_queues; 3618 values[i] = *((uint64_t *) 3619 &adapter->rx_ring[qid].rx_stats + id); 3620 ++valid; 3621 continue; 3622 } 3623 /* Check if id belongs to rx queue statistics */ 3624 id -= rx_entries; 3625 tx_entries = ENA_STATS_ARRAY_TX * dev->data->nb_tx_queues; 3626 if (id < tx_entries) { 3627 qid = id % dev->data->nb_tx_queues; 3628 id /= dev->data->nb_tx_queues; 3629 values[i] = *((uint64_t *) 3630 &adapter->tx_ring[qid].tx_stats + id); 3631 ++valid; 3632 continue; 3633 } 3634 } 3635 3636 return valid; 3637 } 3638 3639 static int ena_process_uint_devarg(const char *key, 3640 const char *value, 3641 void *opaque) 3642 { 3643 struct ena_adapter *adapter = opaque; 3644 char *str_end; 3645 uint64_t uint_value; 3646 3647 uint_value = strtoull(value, &str_end, DECIMAL_BASE); 3648 if (value == str_end) { 3649 PMD_INIT_LOG(ERR, 3650 "Invalid value for key '%s'. Only uint values are accepted.\n", 3651 key); 3652 return -EINVAL; 3653 } 3654 3655 if (strcmp(key, ENA_DEVARG_MISS_TXC_TO) == 0) { 3656 if (uint_value > ENA_MAX_TX_TIMEOUT_SECONDS) { 3657 PMD_INIT_LOG(ERR, 3658 "Tx timeout too high: %" PRIu64 " sec. Maximum allowed: %d sec.\n", 3659 uint_value, ENA_MAX_TX_TIMEOUT_SECONDS); 3660 return -EINVAL; 3661 } else if (uint_value == 0) { 3662 PMD_INIT_LOG(INFO, 3663 "Check for missing Tx completions has been disabled.\n"); 3664 adapter->missing_tx_completion_to = 3665 ENA_HW_HINTS_NO_TIMEOUT; 3666 } else { 3667 PMD_INIT_LOG(INFO, 3668 "Tx packet completion timeout set to %" PRIu64 " seconds.\n", 3669 uint_value); 3670 adapter->missing_tx_completion_to = 3671 uint_value * rte_get_timer_hz(); 3672 } 3673 } 3674 3675 return 0; 3676 } 3677 3678 static int ena_process_bool_devarg(const char *key, 3679 const char *value, 3680 void *opaque) 3681 { 3682 struct ena_adapter *adapter = opaque; 3683 bool bool_value; 3684 3685 /* Parse the value. */ 3686 if (strcmp(value, "1") == 0) { 3687 bool_value = true; 3688 } else if (strcmp(value, "0") == 0) { 3689 bool_value = false; 3690 } else { 3691 PMD_INIT_LOG(ERR, 3692 "Invalid value: '%s' for key '%s'. Accepted: '0' or '1'\n", 3693 value, key); 3694 return -EINVAL; 3695 } 3696 3697 /* Now, assign it to the proper adapter field. */ 3698 if (strcmp(key, ENA_DEVARG_LARGE_LLQ_HDR) == 0) 3699 adapter->use_large_llq_hdr = bool_value; 3700 else if (strcmp(key, ENA_DEVARG_NORMAL_LLQ_HDR) == 0) 3701 adapter->use_normal_llq_hdr = bool_value; 3702 else if (strcmp(key, ENA_DEVARG_ENABLE_LLQ) == 0) 3703 adapter->enable_llq = bool_value; 3704 3705 return 0; 3706 } 3707 3708 static int ena_parse_devargs(struct ena_adapter *adapter, 3709 struct rte_devargs *devargs) 3710 { 3711 static const char * const allowed_args[] = { 3712 ENA_DEVARG_LARGE_LLQ_HDR, 3713 ENA_DEVARG_NORMAL_LLQ_HDR, 3714 ENA_DEVARG_MISS_TXC_TO, 3715 ENA_DEVARG_ENABLE_LLQ, 3716 NULL, 3717 }; 3718 struct rte_kvargs *kvlist; 3719 int rc; 3720 3721 if (devargs == NULL) 3722 return 0; 3723 3724 kvlist = rte_kvargs_parse(devargs->args, allowed_args); 3725 if (kvlist == NULL) { 3726 PMD_INIT_LOG(ERR, "Invalid device arguments: %s\n", 3727 devargs->args); 3728 return -EINVAL; 3729 } 3730 3731 rc = rte_kvargs_process(kvlist, ENA_DEVARG_LARGE_LLQ_HDR, 3732 ena_process_bool_devarg, adapter); 3733 if (rc != 0) 3734 goto exit; 3735 rc = rte_kvargs_process(kvlist, ENA_DEVARG_NORMAL_LLQ_HDR, 3736 ena_process_bool_devarg, adapter); 3737 if (rc != 0) 3738 goto exit; 3739 rc = rte_kvargs_process(kvlist, ENA_DEVARG_MISS_TXC_TO, 3740 ena_process_uint_devarg, adapter); 3741 if (rc != 0) 3742 goto exit; 3743 rc = rte_kvargs_process(kvlist, ENA_DEVARG_ENABLE_LLQ, 3744 ena_process_bool_devarg, adapter); 3745 3746 exit: 3747 rte_kvargs_free(kvlist); 3748 3749 return rc; 3750 } 3751 3752 static int ena_setup_rx_intr(struct rte_eth_dev *dev) 3753 { 3754 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 3755 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 3756 int rc; 3757 uint16_t vectors_nb, i; 3758 bool rx_intr_requested = dev->data->dev_conf.intr_conf.rxq; 3759 3760 if (!rx_intr_requested) 3761 return 0; 3762 3763 if (!rte_intr_cap_multiple(intr_handle)) { 3764 PMD_DRV_LOG(ERR, 3765 "Rx interrupt requested, but it isn't supported by the PCI driver\n"); 3766 return -ENOTSUP; 3767 } 3768 3769 /* Disable interrupt mapping before the configuration starts. */ 3770 rte_intr_disable(intr_handle); 3771 3772 /* Verify if there are enough vectors available. */ 3773 vectors_nb = dev->data->nb_rx_queues; 3774 if (vectors_nb > RTE_MAX_RXTX_INTR_VEC_ID) { 3775 PMD_DRV_LOG(ERR, 3776 "Too many Rx interrupts requested, maximum number: %d\n", 3777 RTE_MAX_RXTX_INTR_VEC_ID); 3778 rc = -ENOTSUP; 3779 goto enable_intr; 3780 } 3781 3782 /* Allocate the vector list */ 3783 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", 3784 dev->data->nb_rx_queues)) { 3785 PMD_DRV_LOG(ERR, 3786 "Failed to allocate interrupt vector for %d queues\n", 3787 dev->data->nb_rx_queues); 3788 rc = -ENOMEM; 3789 goto enable_intr; 3790 } 3791 3792 rc = rte_intr_efd_enable(intr_handle, vectors_nb); 3793 if (rc != 0) 3794 goto free_intr_vec; 3795 3796 if (!rte_intr_allow_others(intr_handle)) { 3797 PMD_DRV_LOG(ERR, 3798 "Not enough interrupts available to use both ENA Admin and Rx interrupts\n"); 3799 goto disable_intr_efd; 3800 } 3801 3802 for (i = 0; i < vectors_nb; ++i) 3803 if (rte_intr_vec_list_index_set(intr_handle, i, 3804 RTE_INTR_VEC_RXTX_OFFSET + i)) 3805 goto disable_intr_efd; 3806 3807 rte_intr_enable(intr_handle); 3808 return 0; 3809 3810 disable_intr_efd: 3811 rte_intr_efd_disable(intr_handle); 3812 free_intr_vec: 3813 rte_intr_vec_list_free(intr_handle); 3814 enable_intr: 3815 rte_intr_enable(intr_handle); 3816 return rc; 3817 } 3818 3819 static void ena_rx_queue_intr_set(struct rte_eth_dev *dev, 3820 uint16_t queue_id, 3821 bool unmask) 3822 { 3823 struct ena_adapter *adapter = dev->data->dev_private; 3824 struct ena_ring *rxq = &adapter->rx_ring[queue_id]; 3825 struct ena_eth_io_intr_reg intr_reg; 3826 3827 ena_com_update_intr_reg(&intr_reg, 0, 0, unmask, 1); 3828 ena_com_unmask_intr(rxq->ena_com_io_cq, &intr_reg); 3829 } 3830 3831 static int ena_rx_queue_intr_enable(struct rte_eth_dev *dev, 3832 uint16_t queue_id) 3833 { 3834 ena_rx_queue_intr_set(dev, queue_id, true); 3835 3836 return 0; 3837 } 3838 3839 static int ena_rx_queue_intr_disable(struct rte_eth_dev *dev, 3840 uint16_t queue_id) 3841 { 3842 ena_rx_queue_intr_set(dev, queue_id, false); 3843 3844 return 0; 3845 } 3846 3847 static int ena_configure_aenq(struct ena_adapter *adapter) 3848 { 3849 uint32_t aenq_groups = adapter->all_aenq_groups; 3850 int rc; 3851 3852 /* All_aenq_groups holds all AENQ functions supported by the device and 3853 * the HW, so at first we need to be sure the LSC request is valid. 3854 */ 3855 if (adapter->edev_data->dev_conf.intr_conf.lsc != 0) { 3856 if (!(aenq_groups & BIT(ENA_ADMIN_LINK_CHANGE))) { 3857 PMD_DRV_LOG(ERR, 3858 "LSC requested, but it's not supported by the AENQ\n"); 3859 return -EINVAL; 3860 } 3861 } else { 3862 /* If LSC wasn't enabled by the app, let's enable all supported 3863 * AENQ procedures except the LSC. 3864 */ 3865 aenq_groups &= ~BIT(ENA_ADMIN_LINK_CHANGE); 3866 } 3867 3868 rc = ena_com_set_aenq_config(&adapter->ena_dev, aenq_groups); 3869 if (rc != 0) { 3870 PMD_DRV_LOG(ERR, "Cannot configure AENQ groups, rc=%d\n", rc); 3871 return rc; 3872 } 3873 3874 adapter->active_aenq_groups = aenq_groups; 3875 3876 return 0; 3877 } 3878 3879 int ena_mp_indirect_table_set(struct ena_adapter *adapter) 3880 { 3881 return ENA_PROXY(adapter, ena_com_indirect_table_set, &adapter->ena_dev); 3882 } 3883 3884 int ena_mp_indirect_table_get(struct ena_adapter *adapter, 3885 uint32_t *indirect_table) 3886 { 3887 return ENA_PROXY(adapter, ena_com_indirect_table_get, &adapter->ena_dev, 3888 indirect_table); 3889 } 3890 3891 /********************************************************************* 3892 * ena_plat_dpdk.h functions implementations 3893 *********************************************************************/ 3894 3895 const struct rte_memzone * 3896 ena_mem_alloc_coherent(struct rte_eth_dev_data *data, size_t size, 3897 int socket_id, unsigned int alignment, void **virt_addr, 3898 dma_addr_t *phys_addr) 3899 { 3900 char z_name[RTE_MEMZONE_NAMESIZE]; 3901 struct ena_adapter *adapter = data->dev_private; 3902 const struct rte_memzone *memzone; 3903 int rc; 3904 3905 rc = snprintf(z_name, RTE_MEMZONE_NAMESIZE, "ena_p%d_mz%" PRIu64 "", 3906 data->port_id, adapter->memzone_cnt); 3907 if (rc >= RTE_MEMZONE_NAMESIZE) { 3908 PMD_DRV_LOG(ERR, 3909 "Name for the ena_com memzone is too long. Port: %d, mz_num: %" PRIu64 "\n", 3910 data->port_id, adapter->memzone_cnt); 3911 goto error; 3912 } 3913 adapter->memzone_cnt++; 3914 3915 memzone = rte_memzone_reserve_aligned(z_name, size, socket_id, 3916 RTE_MEMZONE_IOVA_CONTIG, alignment); 3917 if (memzone == NULL) { 3918 PMD_DRV_LOG(ERR, "Failed to allocate ena_com memzone: %s\n", 3919 z_name); 3920 goto error; 3921 } 3922 3923 memset(memzone->addr, 0, size); 3924 *virt_addr = memzone->addr; 3925 *phys_addr = memzone->iova; 3926 3927 return memzone; 3928 3929 error: 3930 *virt_addr = NULL; 3931 *phys_addr = 0; 3932 3933 return NULL; 3934 } 3935 3936 3937 /********************************************************************* 3938 * PMD configuration 3939 *********************************************************************/ 3940 static int eth_ena_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 3941 struct rte_pci_device *pci_dev) 3942 { 3943 return rte_eth_dev_pci_generic_probe(pci_dev, 3944 sizeof(struct ena_adapter), eth_ena_dev_init); 3945 } 3946 3947 static int eth_ena_pci_remove(struct rte_pci_device *pci_dev) 3948 { 3949 return rte_eth_dev_pci_generic_remove(pci_dev, eth_ena_dev_uninit); 3950 } 3951 3952 static struct rte_pci_driver rte_ena_pmd = { 3953 .id_table = pci_id_ena_map, 3954 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | 3955 RTE_PCI_DRV_WC_ACTIVATE, 3956 .probe = eth_ena_pci_probe, 3957 .remove = eth_ena_pci_remove, 3958 }; 3959 3960 RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd); 3961 RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map); 3962 RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci"); 3963 RTE_PMD_REGISTER_PARAM_STRING(net_ena, 3964 ENA_DEVARG_LARGE_LLQ_HDR "=<0|1> " 3965 ENA_DEVARG_NORMAL_LLQ_HDR "=<0|1> " 3966 ENA_DEVARG_ENABLE_LLQ "=<0|1> " 3967 ENA_DEVARG_MISS_TXC_TO "=<uint>"); 3968 RTE_LOG_REGISTER_SUFFIX(ena_logtype_init, init, NOTICE); 3969 RTE_LOG_REGISTER_SUFFIX(ena_logtype_driver, driver, NOTICE); 3970 #ifdef RTE_ETHDEV_DEBUG_RX 3971 RTE_LOG_REGISTER_SUFFIX(ena_logtype_rx, rx, DEBUG); 3972 #endif 3973 #ifdef RTE_ETHDEV_DEBUG_TX 3974 RTE_LOG_REGISTER_SUFFIX(ena_logtype_tx, tx, DEBUG); 3975 #endif 3976 RTE_LOG_REGISTER_SUFFIX(ena_logtype_com, com, WARNING); 3977 3978 /****************************************************************************** 3979 ******************************** AENQ Handlers ******************************* 3980 *****************************************************************************/ 3981 static void ena_update_on_link_change(void *adapter_data, 3982 struct ena_admin_aenq_entry *aenq_e) 3983 { 3984 struct rte_eth_dev *eth_dev = adapter_data; 3985 struct ena_adapter *adapter = eth_dev->data->dev_private; 3986 struct ena_admin_aenq_link_change_desc *aenq_link_desc; 3987 uint32_t status; 3988 3989 aenq_link_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e; 3990 3991 status = get_ena_admin_aenq_link_change_desc_link_status(aenq_link_desc); 3992 adapter->link_status = status; 3993 3994 ena_link_update(eth_dev, 0); 3995 rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL); 3996 } 3997 3998 static void ena_notification(void *adapter_data, 3999 struct ena_admin_aenq_entry *aenq_e) 4000 { 4001 struct rte_eth_dev *eth_dev = adapter_data; 4002 struct ena_adapter *adapter = eth_dev->data->dev_private; 4003 struct ena_admin_ena_hw_hints *hints; 4004 4005 if (aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION) 4006 PMD_DRV_LOG(WARNING, "Invalid AENQ group: %x. Expected: %x\n", 4007 aenq_e->aenq_common_desc.group, 4008 ENA_ADMIN_NOTIFICATION); 4009 4010 switch (aenq_e->aenq_common_desc.syndrome) { 4011 case ENA_ADMIN_UPDATE_HINTS: 4012 hints = (struct ena_admin_ena_hw_hints *) 4013 (&aenq_e->inline_data_w4); 4014 ena_update_hints(adapter, hints); 4015 break; 4016 default: 4017 PMD_DRV_LOG(ERR, "Invalid AENQ notification link state: %d\n", 4018 aenq_e->aenq_common_desc.syndrome); 4019 } 4020 } 4021 4022 static void ena_keep_alive(void *adapter_data, 4023 __rte_unused struct ena_admin_aenq_entry *aenq_e) 4024 { 4025 struct rte_eth_dev *eth_dev = adapter_data; 4026 struct ena_adapter *adapter = eth_dev->data->dev_private; 4027 struct ena_admin_aenq_keep_alive_desc *desc; 4028 uint64_t rx_drops; 4029 uint64_t tx_drops; 4030 uint64_t rx_overruns; 4031 4032 adapter->timestamp_wd = rte_get_timer_cycles(); 4033 4034 desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e; 4035 rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low; 4036 tx_drops = ((uint64_t)desc->tx_drops_high << 32) | desc->tx_drops_low; 4037 rx_overruns = ((uint64_t)desc->rx_overruns_high << 32) | desc->rx_overruns_low; 4038 4039 /* 4040 * Depending on its acceleration support, the device updates a different statistic when 4041 * Rx packet is dropped because there are no available buffers to accommodate it. 4042 */ 4043 adapter->drv_stats->rx_drops = rx_drops + rx_overruns; 4044 adapter->dev_stats.tx_drops = tx_drops; 4045 } 4046 4047 static void ena_suboptimal_configuration(__rte_unused void *adapter_data, 4048 struct ena_admin_aenq_entry *aenq_e) 4049 { 4050 struct ena_admin_aenq_conf_notifications_desc *desc; 4051 int bit, num_bits; 4052 4053 desc = (struct ena_admin_aenq_conf_notifications_desc *)aenq_e; 4054 num_bits = BITS_PER_TYPE(desc->notifications_bitmap); 4055 for (bit = 0; bit < num_bits; bit++) { 4056 if (desc->notifications_bitmap & RTE_BIT64(bit)) { 4057 PMD_DRV_LOG(WARNING, 4058 "Sub-optimal configuration notification code: %d\n", bit + 1); 4059 } 4060 } 4061 } 4062 4063 /** 4064 * This handler will called for unknown event group or unimplemented handlers 4065 **/ 4066 static void unimplemented_aenq_handler(__rte_unused void *data, 4067 __rte_unused struct ena_admin_aenq_entry *aenq_e) 4068 { 4069 PMD_DRV_LOG(ERR, 4070 "Unknown event was received or event with unimplemented handler\n"); 4071 } 4072 4073 static struct ena_aenq_handlers aenq_handlers = { 4074 .handlers = { 4075 [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change, 4076 [ENA_ADMIN_NOTIFICATION] = ena_notification, 4077 [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive, 4078 [ENA_ADMIN_CONF_NOTIFICATIONS] = ena_suboptimal_configuration 4079 }, 4080 .unimplemented_handler = unimplemented_aenq_handler 4081 }; 4082 4083 /********************************************************************* 4084 * Multi-Process communication request handling (in primary) 4085 *********************************************************************/ 4086 static int 4087 ena_mp_primary_handle(const struct rte_mp_msg *mp_msg, const void *peer) 4088 { 4089 const struct ena_mp_body *req = 4090 (const struct ena_mp_body *)mp_msg->param; 4091 struct ena_adapter *adapter; 4092 struct ena_com_dev *ena_dev; 4093 struct ena_mp_body *rsp; 4094 struct rte_mp_msg mp_rsp; 4095 struct rte_eth_dev *dev; 4096 int res = 0; 4097 4098 rsp = (struct ena_mp_body *)&mp_rsp.param; 4099 mp_msg_init(&mp_rsp, req->type, req->port_id); 4100 4101 if (!rte_eth_dev_is_valid_port(req->port_id)) { 4102 rte_errno = ENODEV; 4103 res = -rte_errno; 4104 PMD_DRV_LOG(ERR, "Unknown port %d in request %d\n", 4105 req->port_id, req->type); 4106 goto end; 4107 } 4108 dev = &rte_eth_devices[req->port_id]; 4109 adapter = dev->data->dev_private; 4110 ena_dev = &adapter->ena_dev; 4111 4112 switch (req->type) { 4113 case ENA_MP_DEV_STATS_GET: 4114 res = ena_com_get_dev_basic_stats(ena_dev, 4115 &adapter->basic_stats); 4116 break; 4117 case ENA_MP_ENI_STATS_GET: 4118 res = ena_com_get_eni_stats(ena_dev, 4119 (struct ena_admin_eni_stats *)&adapter->metrics_stats); 4120 break; 4121 case ENA_MP_MTU_SET: 4122 res = ena_com_set_dev_mtu(ena_dev, req->args.mtu); 4123 break; 4124 case ENA_MP_IND_TBL_GET: 4125 res = ena_com_indirect_table_get(ena_dev, 4126 adapter->indirect_table); 4127 break; 4128 case ENA_MP_IND_TBL_SET: 4129 res = ena_com_indirect_table_set(ena_dev); 4130 break; 4131 case ENA_MP_CUSTOMER_METRICS_GET: 4132 res = ena_com_get_customer_metrics(ena_dev, 4133 (char *)adapter->metrics_stats, 4134 adapter->metrics_num * sizeof(uint64_t)); 4135 break; 4136 case ENA_MP_SRD_STATS_GET: 4137 res = ena_com_get_ena_srd_info(ena_dev, 4138 (struct ena_admin_ena_srd_info *)&adapter->srd_stats); 4139 break; 4140 default: 4141 PMD_DRV_LOG(ERR, "Unknown request type %d\n", req->type); 4142 res = -EINVAL; 4143 break; 4144 } 4145 4146 end: 4147 /* Save processing result in the reply */ 4148 rsp->result = res; 4149 /* Return just IPC processing status */ 4150 return rte_mp_reply(&mp_rsp, peer); 4151 } 4152 4153 static ena_llq_policy ena_define_llq_hdr_policy(struct ena_adapter *adapter) 4154 { 4155 if (!adapter->enable_llq) 4156 return ENA_LLQ_POLICY_DISABLED; 4157 if (adapter->use_large_llq_hdr) 4158 return ENA_LLQ_POLICY_LARGE; 4159 if (adapter->use_normal_llq_hdr) 4160 return ENA_LLQ_POLICY_NORMAL; 4161 return ENA_LLQ_POLICY_RECOMMENDED; 4162 } 4163 4164 static bool ena_use_large_llq_hdr(struct ena_adapter *adapter, uint8_t recommended_entry_size) 4165 { 4166 if (adapter->llq_header_policy == ENA_LLQ_POLICY_LARGE) { 4167 return true; 4168 } else if (adapter->llq_header_policy == ENA_LLQ_POLICY_RECOMMENDED) { 4169 PMD_DRV_LOG(INFO, "Recommended device entry size policy %u\n", 4170 recommended_entry_size); 4171 if (recommended_entry_size == ENA_ADMIN_LIST_ENTRY_SIZE_256B) 4172 return true; 4173 } 4174 return false; 4175 } 4176