1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates. 3 * All rights reserved. 4 */ 5 6 #include <rte_string_fns.h> 7 #include <rte_errno.h> 8 #include <rte_version.h> 9 #include <rte_net.h> 10 #include <rte_kvargs.h> 11 12 #include "ena_ethdev.h" 13 #include "ena_logs.h" 14 #include "ena_platform.h" 15 #include "ena_com.h" 16 #include "ena_eth_com.h" 17 18 #include <ena_common_defs.h> 19 #include <ena_regs_defs.h> 20 #include <ena_admin_defs.h> 21 #include <ena_eth_io_defs.h> 22 23 #define DRV_MODULE_VER_MAJOR 2 24 #define DRV_MODULE_VER_MINOR 8 25 #define DRV_MODULE_VER_SUBMINOR 0 26 27 #define __MERGE_64B_H_L(h, l) (((uint64_t)h << 32) | l) 28 29 #define GET_L4_HDR_LEN(mbuf) \ 30 ((rte_pktmbuf_mtod_offset(mbuf, struct rte_tcp_hdr *, \ 31 mbuf->l3_len + mbuf->l2_len)->data_off) >> 4) 32 33 #define ETH_GSTRING_LEN 32 34 35 #define ARRAY_SIZE(x) RTE_DIM(x) 36 37 #define ENA_MIN_RING_DESC 128 38 39 /* 40 * We should try to keep ENA_CLEANUP_BUF_SIZE lower than 41 * RTE_MEMPOOL_CACHE_MAX_SIZE, so we can fit this in mempool local cache. 42 */ 43 #define ENA_CLEANUP_BUF_SIZE 256 44 45 #define ENA_PTYPE_HAS_HASH (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP) 46 47 struct ena_stats { 48 char name[ETH_GSTRING_LEN]; 49 int stat_offset; 50 }; 51 52 #define ENA_STAT_ENTRY(stat, stat_type) { \ 53 .name = #stat, \ 54 .stat_offset = offsetof(struct ena_stats_##stat_type, stat) \ 55 } 56 57 #define ENA_STAT_RX_ENTRY(stat) \ 58 ENA_STAT_ENTRY(stat, rx) 59 60 #define ENA_STAT_TX_ENTRY(stat) \ 61 ENA_STAT_ENTRY(stat, tx) 62 63 #define ENA_STAT_METRICS_ENTRY(stat) \ 64 ENA_STAT_ENTRY(stat, metrics) 65 66 #define ENA_STAT_GLOBAL_ENTRY(stat) \ 67 ENA_STAT_ENTRY(stat, dev) 68 69 #define ENA_STAT_ENA_SRD_ENTRY(stat) \ 70 ENA_STAT_ENTRY(stat, srd) 71 72 /* Device arguments */ 73 #define ENA_DEVARG_LARGE_LLQ_HDR "large_llq_hdr" 74 /* Timeout in seconds after which a single uncompleted Tx packet should be 75 * considered as a missing. 76 */ 77 #define ENA_DEVARG_MISS_TXC_TO "miss_txc_to" 78 /* 79 * Controls whether LLQ should be used (if available). Enabled by default. 80 * NOTE: It's highly not recommended to disable the LLQ, as it may lead to a 81 * huge performance degradation on 6th generation AWS instances. 82 */ 83 #define ENA_DEVARG_ENABLE_LLQ "enable_llq" 84 85 /* 86 * Each rte_memzone should have unique name. 87 * To satisfy it, count number of allocation and add it to name. 88 */ 89 rte_atomic64_t ena_alloc_cnt; 90 91 static const struct ena_stats ena_stats_global_strings[] = { 92 ENA_STAT_GLOBAL_ENTRY(wd_expired), 93 ENA_STAT_GLOBAL_ENTRY(dev_start), 94 ENA_STAT_GLOBAL_ENTRY(dev_stop), 95 ENA_STAT_GLOBAL_ENTRY(tx_drops), 96 }; 97 98 /* 99 * The legacy metrics (also known as eni stats) consisted of 5 stats, while the reworked 100 * metrics (also known as customer metrics) support an additional stat. 101 */ 102 static struct ena_stats ena_stats_metrics_strings[] = { 103 ENA_STAT_METRICS_ENTRY(bw_in_allowance_exceeded), 104 ENA_STAT_METRICS_ENTRY(bw_out_allowance_exceeded), 105 ENA_STAT_METRICS_ENTRY(pps_allowance_exceeded), 106 ENA_STAT_METRICS_ENTRY(conntrack_allowance_exceeded), 107 ENA_STAT_METRICS_ENTRY(linklocal_allowance_exceeded), 108 ENA_STAT_METRICS_ENTRY(conntrack_allowance_available), 109 }; 110 111 static const struct ena_stats ena_stats_srd_strings[] = { 112 ENA_STAT_ENA_SRD_ENTRY(ena_srd_mode), 113 ENA_STAT_ENA_SRD_ENTRY(ena_srd_tx_pkts), 114 ENA_STAT_ENA_SRD_ENTRY(ena_srd_eligible_tx_pkts), 115 ENA_STAT_ENA_SRD_ENTRY(ena_srd_rx_pkts), 116 ENA_STAT_ENA_SRD_ENTRY(ena_srd_resource_utilization), 117 }; 118 119 static const struct ena_stats ena_stats_tx_strings[] = { 120 ENA_STAT_TX_ENTRY(cnt), 121 ENA_STAT_TX_ENTRY(bytes), 122 ENA_STAT_TX_ENTRY(prepare_ctx_err), 123 ENA_STAT_TX_ENTRY(tx_poll), 124 ENA_STAT_TX_ENTRY(doorbells), 125 ENA_STAT_TX_ENTRY(bad_req_id), 126 ENA_STAT_TX_ENTRY(available_desc), 127 ENA_STAT_TX_ENTRY(missed_tx), 128 }; 129 130 static const struct ena_stats ena_stats_rx_strings[] = { 131 ENA_STAT_RX_ENTRY(cnt), 132 ENA_STAT_RX_ENTRY(bytes), 133 ENA_STAT_RX_ENTRY(refill_partial), 134 ENA_STAT_RX_ENTRY(l3_csum_bad), 135 ENA_STAT_RX_ENTRY(l4_csum_bad), 136 ENA_STAT_RX_ENTRY(l4_csum_good), 137 ENA_STAT_RX_ENTRY(mbuf_alloc_fail), 138 ENA_STAT_RX_ENTRY(bad_desc_num), 139 ENA_STAT_RX_ENTRY(bad_req_id), 140 }; 141 142 #define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings) 143 #define ENA_STATS_ARRAY_METRICS ARRAY_SIZE(ena_stats_metrics_strings) 144 #define ENA_STATS_ARRAY_METRICS_LEGACY (ENA_STATS_ARRAY_METRICS - 1) 145 #define ENA_STATS_ARRAY_ENA_SRD ARRAY_SIZE(ena_stats_srd_strings) 146 #define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings) 147 #define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings) 148 149 #define QUEUE_OFFLOADS (RTE_ETH_TX_OFFLOAD_TCP_CKSUM |\ 150 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |\ 151 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |\ 152 RTE_ETH_TX_OFFLOAD_TCP_TSO) 153 #define MBUF_OFFLOADS (RTE_MBUF_F_TX_L4_MASK |\ 154 RTE_MBUF_F_TX_IP_CKSUM |\ 155 RTE_MBUF_F_TX_TCP_SEG) 156 157 /** Vendor ID used by Amazon devices */ 158 #define PCI_VENDOR_ID_AMAZON 0x1D0F 159 /** Amazon devices */ 160 #define PCI_DEVICE_ID_ENA_VF 0xEC20 161 #define PCI_DEVICE_ID_ENA_VF_RSERV0 0xEC21 162 163 #define ENA_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_L4_MASK | \ 164 RTE_MBUF_F_TX_IPV6 | \ 165 RTE_MBUF_F_TX_IPV4 | \ 166 RTE_MBUF_F_TX_IP_CKSUM | \ 167 RTE_MBUF_F_TX_TCP_SEG) 168 169 #define ENA_TX_OFFLOAD_NOTSUP_MASK \ 170 (RTE_MBUF_F_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK) 171 172 /** HW specific offloads capabilities. */ 173 /* IPv4 checksum offload. */ 174 #define ENA_L3_IPV4_CSUM 0x0001 175 /* TCP/UDP checksum offload for IPv4 packets. */ 176 #define ENA_L4_IPV4_CSUM 0x0002 177 /* TCP/UDP checksum offload for IPv4 packets with pseudo header checksum. */ 178 #define ENA_L4_IPV4_CSUM_PARTIAL 0x0004 179 /* TCP/UDP checksum offload for IPv6 packets. */ 180 #define ENA_L4_IPV6_CSUM 0x0008 181 /* TCP/UDP checksum offload for IPv6 packets with pseudo header checksum. */ 182 #define ENA_L4_IPV6_CSUM_PARTIAL 0x0010 183 /* TSO support for IPv4 packets. */ 184 #define ENA_IPV4_TSO 0x0020 185 186 /* Device supports setting RSS hash. */ 187 #define ENA_RX_RSS_HASH 0x0040 188 189 static const struct rte_pci_id pci_id_ena_map[] = { 190 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) }, 191 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF_RSERV0) }, 192 { .device_id = 0 }, 193 }; 194 195 static struct ena_aenq_handlers aenq_handlers; 196 197 static int ena_device_init(struct ena_adapter *adapter, 198 struct rte_pci_device *pdev, 199 struct ena_com_dev_get_features_ctx *get_feat_ctx); 200 static int ena_dev_configure(struct rte_eth_dev *dev); 201 static void ena_tx_map_mbuf(struct ena_ring *tx_ring, 202 struct ena_tx_buffer *tx_info, 203 struct rte_mbuf *mbuf, 204 void **push_header, 205 uint16_t *header_len); 206 static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf); 207 static int ena_tx_cleanup(void *txp, uint32_t free_pkt_cnt); 208 static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 209 uint16_t nb_pkts); 210 static uint16_t eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 211 uint16_t nb_pkts); 212 static int ena_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 213 uint16_t nb_desc, unsigned int socket_id, 214 const struct rte_eth_txconf *tx_conf); 215 static int ena_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 216 uint16_t nb_desc, unsigned int socket_id, 217 const struct rte_eth_rxconf *rx_conf, 218 struct rte_mempool *mp); 219 static inline void ena_init_rx_mbuf(struct rte_mbuf *mbuf, uint16_t len); 220 static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring, 221 struct ena_com_rx_buf_info *ena_bufs, 222 uint32_t descs, 223 uint16_t *next_to_clean, 224 uint8_t offset); 225 static uint16_t eth_ena_recv_pkts(void *rx_queue, 226 struct rte_mbuf **rx_pkts, uint16_t nb_pkts); 227 static int ena_add_single_rx_desc(struct ena_com_io_sq *io_sq, 228 struct rte_mbuf *mbuf, uint16_t id); 229 static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count); 230 static void ena_init_rings(struct ena_adapter *adapter, 231 bool disable_meta_caching); 232 static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 233 static int ena_start(struct rte_eth_dev *dev); 234 static int ena_stop(struct rte_eth_dev *dev); 235 static int ena_close(struct rte_eth_dev *dev); 236 static int ena_dev_reset(struct rte_eth_dev *dev); 237 static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); 238 static void ena_rx_queue_release_all(struct rte_eth_dev *dev); 239 static void ena_tx_queue_release_all(struct rte_eth_dev *dev); 240 static void ena_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid); 241 static void ena_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid); 242 static void ena_rx_queue_release_bufs(struct ena_ring *ring); 243 static void ena_tx_queue_release_bufs(struct ena_ring *ring); 244 static int ena_link_update(struct rte_eth_dev *dev, 245 int wait_to_complete); 246 static int ena_create_io_queue(struct rte_eth_dev *dev, struct ena_ring *ring); 247 static void ena_queue_stop(struct ena_ring *ring); 248 static void ena_queue_stop_all(struct rte_eth_dev *dev, 249 enum ena_ring_type ring_type); 250 static int ena_queue_start(struct rte_eth_dev *dev, struct ena_ring *ring); 251 static int ena_queue_start_all(struct rte_eth_dev *dev, 252 enum ena_ring_type ring_type); 253 static void ena_stats_restart(struct rte_eth_dev *dev); 254 static uint64_t ena_get_rx_port_offloads(struct ena_adapter *adapter); 255 static uint64_t ena_get_tx_port_offloads(struct ena_adapter *adapter); 256 static uint64_t ena_get_rx_queue_offloads(struct ena_adapter *adapter); 257 static uint64_t ena_get_tx_queue_offloads(struct ena_adapter *adapter); 258 static int ena_infos_get(struct rte_eth_dev *dev, 259 struct rte_eth_dev_info *dev_info); 260 static void ena_interrupt_handler_rte(void *cb_arg); 261 static void ena_timer_wd_callback(struct rte_timer *timer, void *arg); 262 static void ena_destroy_device(struct rte_eth_dev *eth_dev); 263 static int eth_ena_dev_init(struct rte_eth_dev *eth_dev); 264 static int ena_xstats_get_names(struct rte_eth_dev *dev, 265 struct rte_eth_xstat_name *xstats_names, 266 unsigned int n); 267 static int ena_xstats_get_names_by_id(struct rte_eth_dev *dev, 268 const uint64_t *ids, 269 struct rte_eth_xstat_name *xstats_names, 270 unsigned int size); 271 static int ena_xstats_get(struct rte_eth_dev *dev, 272 struct rte_eth_xstat *stats, 273 unsigned int n); 274 static int ena_xstats_get_by_id(struct rte_eth_dev *dev, 275 const uint64_t *ids, 276 uint64_t *values, 277 unsigned int n); 278 static int ena_process_bool_devarg(const char *key, 279 const char *value, 280 void *opaque); 281 static int ena_parse_devargs(struct ena_adapter *adapter, 282 struct rte_devargs *devargs); 283 static void ena_copy_customer_metrics(struct ena_adapter *adapter, 284 uint64_t *buf, 285 size_t buf_size); 286 static void ena_copy_ena_srd_info(struct ena_adapter *adapter, 287 struct ena_stats_srd *srd_info); 288 static int ena_setup_rx_intr(struct rte_eth_dev *dev); 289 static int ena_rx_queue_intr_enable(struct rte_eth_dev *dev, 290 uint16_t queue_id); 291 static int ena_rx_queue_intr_disable(struct rte_eth_dev *dev, 292 uint16_t queue_id); 293 static int ena_configure_aenq(struct ena_adapter *adapter); 294 static int ena_mp_primary_handle(const struct rte_mp_msg *mp_msg, 295 const void *peer); 296 297 static const struct eth_dev_ops ena_dev_ops = { 298 .dev_configure = ena_dev_configure, 299 .dev_infos_get = ena_infos_get, 300 .rx_queue_setup = ena_rx_queue_setup, 301 .tx_queue_setup = ena_tx_queue_setup, 302 .dev_start = ena_start, 303 .dev_stop = ena_stop, 304 .link_update = ena_link_update, 305 .stats_get = ena_stats_get, 306 .xstats_get_names = ena_xstats_get_names, 307 .xstats_get_names_by_id = ena_xstats_get_names_by_id, 308 .xstats_get = ena_xstats_get, 309 .xstats_get_by_id = ena_xstats_get_by_id, 310 .mtu_set = ena_mtu_set, 311 .rx_queue_release = ena_rx_queue_release, 312 .tx_queue_release = ena_tx_queue_release, 313 .dev_close = ena_close, 314 .dev_reset = ena_dev_reset, 315 .reta_update = ena_rss_reta_update, 316 .reta_query = ena_rss_reta_query, 317 .rx_queue_intr_enable = ena_rx_queue_intr_enable, 318 .rx_queue_intr_disable = ena_rx_queue_intr_disable, 319 .rss_hash_update = ena_rss_hash_update, 320 .rss_hash_conf_get = ena_rss_hash_conf_get, 321 .tx_done_cleanup = ena_tx_cleanup, 322 }; 323 324 /********************************************************************* 325 * Multi-Process communication bits 326 *********************************************************************/ 327 /* rte_mp IPC message name */ 328 #define ENA_MP_NAME "net_ena_mp" 329 /* Request timeout in seconds */ 330 #define ENA_MP_REQ_TMO 5 331 332 /** Proxy request type */ 333 enum ena_mp_req { 334 ENA_MP_DEV_STATS_GET, 335 ENA_MP_ENI_STATS_GET, 336 ENA_MP_MTU_SET, 337 ENA_MP_IND_TBL_GET, 338 ENA_MP_IND_TBL_SET, 339 ENA_MP_CUSTOMER_METRICS_GET, 340 ENA_MP_SRD_STATS_GET, 341 }; 342 343 /** Proxy message body. Shared between requests and responses. */ 344 struct ena_mp_body { 345 /* Message type */ 346 enum ena_mp_req type; 347 int port_id; 348 /* Processing result. Set in replies. 0 if message succeeded, negative 349 * error code otherwise. 350 */ 351 int result; 352 union { 353 int mtu; /* For ENA_MP_MTU_SET */ 354 } args; 355 }; 356 357 /** 358 * Initialize IPC message. 359 * 360 * @param[out] msg 361 * Pointer to the message to initialize. 362 * @param[in] type 363 * Message type. 364 * @param[in] port_id 365 * Port ID of target device. 366 * 367 */ 368 static void 369 mp_msg_init(struct rte_mp_msg *msg, enum ena_mp_req type, int port_id) 370 { 371 struct ena_mp_body *body = (struct ena_mp_body *)&msg->param; 372 373 memset(msg, 0, sizeof(*msg)); 374 strlcpy(msg->name, ENA_MP_NAME, sizeof(msg->name)); 375 msg->len_param = sizeof(*body); 376 body->type = type; 377 body->port_id = port_id; 378 } 379 380 /********************************************************************* 381 * Multi-Process communication PMD API 382 *********************************************************************/ 383 /** 384 * Define proxy request descriptor 385 * 386 * Used to define all structures and functions required for proxying a given 387 * function to the primary process including the code to perform to prepare the 388 * request and process the response. 389 * 390 * @param[in] f 391 * Name of the function to proxy 392 * @param[in] t 393 * Message type to use 394 * @param[in] prep 395 * Body of a function to prepare the request in form of a statement 396 * expression. It is passed all the original function arguments along with two 397 * extra ones: 398 * - struct ena_adapter *adapter - PMD data of the device calling the proxy. 399 * - struct ena_mp_body *req - body of a request to prepare. 400 * @param[in] proc 401 * Body of a function to process the response in form of a statement 402 * expression. It is passed all the original function arguments along with two 403 * extra ones: 404 * - struct ena_adapter *adapter - PMD data of the device calling the proxy. 405 * - struct ena_mp_body *rsp - body of a response to process. 406 * @param ... 407 * Proxied function's arguments 408 * 409 * @note Inside prep and proc any parameters which aren't used should be marked 410 * as such (with ENA_TOUCH or __rte_unused). 411 */ 412 #define ENA_PROXY_DESC(f, t, prep, proc, ...) \ 413 static const enum ena_mp_req mp_type_ ## f = t; \ 414 static const char *mp_name_ ## f = #t; \ 415 static void mp_prep_ ## f(struct ena_adapter *adapter, \ 416 struct ena_mp_body *req, \ 417 __VA_ARGS__) \ 418 { \ 419 prep; \ 420 } \ 421 static void mp_proc_ ## f(struct ena_adapter *adapter, \ 422 struct ena_mp_body *rsp, \ 423 __VA_ARGS__) \ 424 { \ 425 proc; \ 426 } 427 428 /** 429 * Proxy wrapper for calling primary functions in a secondary process. 430 * 431 * Depending on whether called in primary or secondary process, calls the 432 * @p func directly or proxies the call to the primary process via rte_mp IPC. 433 * This macro requires a proxy request descriptor to be defined for @p func 434 * using ENA_PROXY_DESC() macro. 435 * 436 * @param[in/out] a 437 * Device PMD data. Used for sending the message and sharing message results 438 * between primary and secondary. 439 * @param[in] f 440 * Function to proxy. 441 * @param ... 442 * Arguments of @p func. 443 * 444 * @return 445 * - 0: Processing succeeded and response handler was called. 446 * - -EPERM: IPC is unavailable on this platform. This means only primary 447 * process may call the proxied function. 448 * - -EIO: IPC returned error on request send. Inspect rte_errno detailed 449 * error code. 450 * - Negative error code from the proxied function. 451 * 452 * @note This mechanism is geared towards control-path tasks. Avoid calling it 453 * in fast-path unless unbound delays are allowed. This is due to the IPC 454 * mechanism itself (socket based). 455 * @note Due to IPC parameter size limitations the proxy logic shares call 456 * results through the struct ena_adapter shared memory. This makes the 457 * proxy mechanism strictly single-threaded. Therefore be sure to make all 458 * calls to the same proxied function under the same lock. 459 */ 460 #define ENA_PROXY(a, f, ...) \ 461 __extension__ ({ \ 462 struct ena_adapter *_a = (a); \ 463 struct timespec ts = { .tv_sec = ENA_MP_REQ_TMO }; \ 464 struct ena_mp_body *req, *rsp; \ 465 struct rte_mp_reply mp_rep; \ 466 struct rte_mp_msg mp_req; \ 467 int ret; \ 468 \ 469 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { \ 470 ret = f(__VA_ARGS__); \ 471 } else { \ 472 /* Prepare and send request */ \ 473 req = (struct ena_mp_body *)&mp_req.param; \ 474 mp_msg_init(&mp_req, mp_type_ ## f, _a->edev_data->port_id); \ 475 mp_prep_ ## f(_a, req, ## __VA_ARGS__); \ 476 \ 477 ret = rte_mp_request_sync(&mp_req, &mp_rep, &ts); \ 478 if (likely(!ret)) { \ 479 RTE_ASSERT(mp_rep.nb_received == 1); \ 480 rsp = (struct ena_mp_body *)&mp_rep.msgs[0].param; \ 481 ret = rsp->result; \ 482 if (ret == 0) { \ 483 mp_proc_##f(_a, rsp, ## __VA_ARGS__); \ 484 } else { \ 485 PMD_DRV_LOG(ERR, \ 486 "%s returned error: %d\n", \ 487 mp_name_ ## f, rsp->result);\ 488 } \ 489 free(mp_rep.msgs); \ 490 } else if (rte_errno == ENOTSUP) { \ 491 PMD_DRV_LOG(ERR, \ 492 "No IPC, can't proxy to primary\n");\ 493 ret = -rte_errno; \ 494 } else { \ 495 PMD_DRV_LOG(ERR, "Request %s failed: %s\n", \ 496 mp_name_ ## f, \ 497 rte_strerror(rte_errno)); \ 498 ret = -EIO; \ 499 } \ 500 } \ 501 ret; \ 502 }) 503 504 /********************************************************************* 505 * Multi-Process communication request descriptors 506 *********************************************************************/ 507 508 ENA_PROXY_DESC(ena_com_get_dev_basic_stats, ENA_MP_DEV_STATS_GET, 509 __extension__ ({ 510 ENA_TOUCH(adapter); 511 ENA_TOUCH(req); 512 ENA_TOUCH(ena_dev); 513 ENA_TOUCH(stats); 514 }), 515 __extension__ ({ 516 ENA_TOUCH(rsp); 517 ENA_TOUCH(ena_dev); 518 if (stats != &adapter->basic_stats) 519 rte_memcpy(stats, &adapter->basic_stats, sizeof(*stats)); 520 }), 521 struct ena_com_dev *ena_dev, struct ena_admin_basic_stats *stats); 522 523 ENA_PROXY_DESC(ena_com_get_eni_stats, ENA_MP_ENI_STATS_GET, 524 __extension__ ({ 525 ENA_TOUCH(adapter); 526 ENA_TOUCH(req); 527 ENA_TOUCH(ena_dev); 528 ENA_TOUCH(stats); 529 }), 530 __extension__ ({ 531 ENA_TOUCH(rsp); 532 ENA_TOUCH(ena_dev); 533 if (stats != (struct ena_admin_eni_stats *)adapter->metrics_stats) 534 rte_memcpy(stats, adapter->metrics_stats, sizeof(*stats)); 535 }), 536 struct ena_com_dev *ena_dev, struct ena_admin_eni_stats *stats); 537 538 ENA_PROXY_DESC(ena_com_set_dev_mtu, ENA_MP_MTU_SET, 539 __extension__ ({ 540 ENA_TOUCH(adapter); 541 ENA_TOUCH(ena_dev); 542 req->args.mtu = mtu; 543 }), 544 __extension__ ({ 545 ENA_TOUCH(adapter); 546 ENA_TOUCH(rsp); 547 ENA_TOUCH(ena_dev); 548 ENA_TOUCH(mtu); 549 }), 550 struct ena_com_dev *ena_dev, int mtu); 551 552 ENA_PROXY_DESC(ena_com_indirect_table_set, ENA_MP_IND_TBL_SET, 553 __extension__ ({ 554 ENA_TOUCH(adapter); 555 ENA_TOUCH(req); 556 ENA_TOUCH(ena_dev); 557 }), 558 __extension__ ({ 559 ENA_TOUCH(adapter); 560 ENA_TOUCH(rsp); 561 ENA_TOUCH(ena_dev); 562 }), 563 struct ena_com_dev *ena_dev); 564 565 ENA_PROXY_DESC(ena_com_indirect_table_get, ENA_MP_IND_TBL_GET, 566 __extension__ ({ 567 ENA_TOUCH(adapter); 568 ENA_TOUCH(req); 569 ENA_TOUCH(ena_dev); 570 ENA_TOUCH(ind_tbl); 571 }), 572 __extension__ ({ 573 ENA_TOUCH(rsp); 574 ENA_TOUCH(ena_dev); 575 if (ind_tbl != adapter->indirect_table) 576 rte_memcpy(ind_tbl, adapter->indirect_table, 577 sizeof(adapter->indirect_table)); 578 }), 579 struct ena_com_dev *ena_dev, u32 *ind_tbl); 580 581 ENA_PROXY_DESC(ena_com_get_customer_metrics, ENA_MP_CUSTOMER_METRICS_GET, 582 __extension__ ({ 583 ENA_TOUCH(adapter); 584 ENA_TOUCH(req); 585 ENA_TOUCH(ena_dev); 586 ENA_TOUCH(buf); 587 ENA_TOUCH(buf_size); 588 }), 589 __extension__ ({ 590 ENA_TOUCH(rsp); 591 ENA_TOUCH(ena_dev); 592 if (buf != (char *)adapter->metrics_stats) 593 rte_memcpy(buf, adapter->metrics_stats, buf_size); 594 }), 595 struct ena_com_dev *ena_dev, char *buf, size_t buf_size); 596 597 ENA_PROXY_DESC(ena_com_get_ena_srd_info, ENA_MP_SRD_STATS_GET, 598 __extension__ ({ 599 ENA_TOUCH(adapter); 600 ENA_TOUCH(req); 601 ENA_TOUCH(ena_dev); 602 ENA_TOUCH(info); 603 }), 604 __extension__ ({ 605 ENA_TOUCH(rsp); 606 ENA_TOUCH(ena_dev); 607 if ((struct ena_stats_srd *)info != &adapter->srd_stats) 608 rte_memcpy((struct ena_stats_srd *)info, 609 &adapter->srd_stats, 610 sizeof(struct ena_stats_srd)); 611 }), 612 struct ena_com_dev *ena_dev, struct ena_admin_ena_srd_info *info); 613 614 static inline void ena_trigger_reset(struct ena_adapter *adapter, 615 enum ena_regs_reset_reason_types reason) 616 { 617 if (likely(!adapter->trigger_reset)) { 618 adapter->reset_reason = reason; 619 adapter->trigger_reset = true; 620 } 621 } 622 623 static inline void ena_rx_mbuf_prepare(struct ena_ring *rx_ring, 624 struct rte_mbuf *mbuf, 625 struct ena_com_rx_ctx *ena_rx_ctx, 626 bool fill_hash) 627 { 628 struct ena_stats_rx *rx_stats = &rx_ring->rx_stats; 629 uint64_t ol_flags = 0; 630 uint32_t packet_type = 0; 631 632 if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) 633 packet_type |= RTE_PTYPE_L4_TCP; 634 else if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP) 635 packet_type |= RTE_PTYPE_L4_UDP; 636 637 if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) { 638 packet_type |= RTE_PTYPE_L3_IPV4; 639 if (unlikely(ena_rx_ctx->l3_csum_err)) { 640 ++rx_stats->l3_csum_bad; 641 ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; 642 } else { 643 ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; 644 } 645 } else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6) { 646 packet_type |= RTE_PTYPE_L3_IPV6; 647 } 648 649 if (!ena_rx_ctx->l4_csum_checked || ena_rx_ctx->frag) { 650 ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN; 651 } else { 652 if (unlikely(ena_rx_ctx->l4_csum_err)) { 653 ++rx_stats->l4_csum_bad; 654 /* 655 * For the L4 Rx checksum offload the HW may indicate 656 * bad checksum although it's valid. Because of that, 657 * we're setting the UNKNOWN flag to let the app 658 * re-verify the checksum. 659 */ 660 ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN; 661 } else { 662 ++rx_stats->l4_csum_good; 663 ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; 664 } 665 } 666 667 if (fill_hash && 668 likely((packet_type & ENA_PTYPE_HAS_HASH) && !ena_rx_ctx->frag)) { 669 ol_flags |= RTE_MBUF_F_RX_RSS_HASH; 670 mbuf->hash.rss = ena_rx_ctx->hash; 671 } 672 673 mbuf->ol_flags = ol_flags; 674 mbuf->packet_type = packet_type; 675 } 676 677 static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, 678 struct ena_com_tx_ctx *ena_tx_ctx, 679 uint64_t queue_offloads, 680 bool disable_meta_caching) 681 { 682 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; 683 684 if ((mbuf->ol_flags & MBUF_OFFLOADS) && 685 (queue_offloads & QUEUE_OFFLOADS)) { 686 /* check if TSO is required */ 687 if ((mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG) && 688 (queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO)) { 689 ena_tx_ctx->tso_enable = true; 690 691 ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf); 692 } 693 694 /* check if L3 checksum is needed */ 695 if ((mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) && 696 (queue_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)) 697 ena_tx_ctx->l3_csum_enable = true; 698 699 if (mbuf->ol_flags & RTE_MBUF_F_TX_IPV6) { 700 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6; 701 /* For the IPv6 packets, DF always needs to be true. */ 702 ena_tx_ctx->df = 1; 703 } else { 704 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4; 705 706 /* set don't fragment (DF) flag */ 707 if (mbuf->packet_type & 708 (RTE_PTYPE_L4_NONFRAG 709 | RTE_PTYPE_INNER_L4_NONFRAG)) 710 ena_tx_ctx->df = 1; 711 } 712 713 /* check if L4 checksum is needed */ 714 if (((mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_TCP_CKSUM) && 715 (queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) { 716 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP; 717 ena_tx_ctx->l4_csum_enable = true; 718 } else if (((mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == 719 RTE_MBUF_F_TX_UDP_CKSUM) && 720 (queue_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) { 721 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP; 722 ena_tx_ctx->l4_csum_enable = true; 723 } else { 724 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN; 725 ena_tx_ctx->l4_csum_enable = false; 726 } 727 728 ena_meta->mss = mbuf->tso_segsz; 729 ena_meta->l3_hdr_len = mbuf->l3_len; 730 ena_meta->l3_hdr_offset = mbuf->l2_len; 731 732 ena_tx_ctx->meta_valid = true; 733 } else if (disable_meta_caching) { 734 memset(ena_meta, 0, sizeof(*ena_meta)); 735 ena_tx_ctx->meta_valid = true; 736 } else { 737 ena_tx_ctx->meta_valid = false; 738 } 739 } 740 741 static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) 742 { 743 struct ena_tx_buffer *tx_info = NULL; 744 745 if (likely(req_id < tx_ring->ring_size)) { 746 tx_info = &tx_ring->tx_buffer_info[req_id]; 747 if (likely(tx_info->mbuf)) 748 return 0; 749 } 750 751 if (tx_info) 752 PMD_TX_LOG(ERR, "tx_info doesn't have valid mbuf. queue %d:%d req_id %u\n", 753 tx_ring->port_id, tx_ring->id, req_id); 754 else 755 PMD_TX_LOG(ERR, "Invalid req_id: %hu in queue %d:%d\n", 756 req_id, tx_ring->port_id, tx_ring->id); 757 758 /* Trigger device reset */ 759 ++tx_ring->tx_stats.bad_req_id; 760 ena_trigger_reset(tx_ring->adapter, ENA_REGS_RESET_INV_TX_REQ_ID); 761 return -EFAULT; 762 } 763 764 static void ena_config_host_info(struct ena_com_dev *ena_dev) 765 { 766 struct ena_admin_host_info *host_info; 767 int rc; 768 769 /* Allocate only the host info */ 770 rc = ena_com_allocate_host_info(ena_dev); 771 if (rc) { 772 PMD_DRV_LOG(ERR, "Cannot allocate host info\n"); 773 return; 774 } 775 776 host_info = ena_dev->host_attr.host_info; 777 778 host_info->os_type = ENA_ADMIN_OS_DPDK; 779 host_info->kernel_ver = RTE_VERSION; 780 strlcpy((char *)host_info->kernel_ver_str, rte_version(), 781 sizeof(host_info->kernel_ver_str)); 782 host_info->os_dist = RTE_VERSION; 783 strlcpy((char *)host_info->os_dist_str, rte_version(), 784 sizeof(host_info->os_dist_str)); 785 host_info->driver_version = 786 (DRV_MODULE_VER_MAJOR) | 787 (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | 788 (DRV_MODULE_VER_SUBMINOR << 789 ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); 790 host_info->num_cpus = rte_lcore_count(); 791 792 host_info->driver_supported_features = 793 ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK | 794 ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK; 795 796 rc = ena_com_set_host_attributes(ena_dev); 797 if (rc) { 798 if (rc == -ENA_COM_UNSUPPORTED) 799 PMD_DRV_LOG(WARNING, "Cannot set host attributes\n"); 800 else 801 PMD_DRV_LOG(ERR, "Cannot set host attributes\n"); 802 803 goto err; 804 } 805 806 return; 807 808 err: 809 ena_com_delete_host_info(ena_dev); 810 } 811 812 /* This function calculates the number of xstats based on the current config */ 813 static unsigned int ena_xstats_calc_num(struct rte_eth_dev_data *data) 814 { 815 struct ena_adapter *adapter = data->dev_private; 816 817 return ENA_STATS_ARRAY_GLOBAL + 818 adapter->metrics_num + 819 ENA_STATS_ARRAY_ENA_SRD + 820 (data->nb_tx_queues * ENA_STATS_ARRAY_TX) + 821 (data->nb_rx_queues * ENA_STATS_ARRAY_RX); 822 } 823 824 static void ena_config_debug_area(struct ena_adapter *adapter) 825 { 826 u32 debug_area_size; 827 int rc, ss_count; 828 829 ss_count = ena_xstats_calc_num(adapter->edev_data); 830 831 /* allocate 32 bytes for each string and 64bit for the value */ 832 debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count; 833 834 rc = ena_com_allocate_debug_area(&adapter->ena_dev, debug_area_size); 835 if (rc) { 836 PMD_DRV_LOG(ERR, "Cannot allocate debug area\n"); 837 return; 838 } 839 840 rc = ena_com_set_host_attributes(&adapter->ena_dev); 841 if (rc) { 842 if (rc == -ENA_COM_UNSUPPORTED) 843 PMD_DRV_LOG(WARNING, "Cannot set host attributes\n"); 844 else 845 PMD_DRV_LOG(ERR, "Cannot set host attributes\n"); 846 847 goto err; 848 } 849 850 return; 851 err: 852 ena_com_delete_debug_area(&adapter->ena_dev); 853 } 854 855 static int ena_close(struct rte_eth_dev *dev) 856 { 857 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 858 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 859 struct ena_adapter *adapter = dev->data->dev_private; 860 int ret = 0; 861 862 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 863 return 0; 864 865 if (adapter->state == ENA_ADAPTER_STATE_RUNNING) 866 ret = ena_stop(dev); 867 adapter->state = ENA_ADAPTER_STATE_CLOSED; 868 869 ena_rx_queue_release_all(dev); 870 ena_tx_queue_release_all(dev); 871 872 rte_free(adapter->drv_stats); 873 adapter->drv_stats = NULL; 874 875 rte_intr_disable(intr_handle); 876 rte_intr_callback_unregister(intr_handle, 877 ena_interrupt_handler_rte, 878 dev); 879 880 /* 881 * MAC is not allocated dynamically. Setting NULL should prevent from 882 * release of the resource in the rte_eth_dev_release_port(). 883 */ 884 dev->data->mac_addrs = NULL; 885 886 return ret; 887 } 888 889 static int 890 ena_dev_reset(struct rte_eth_dev *dev) 891 { 892 int rc = 0; 893 894 /* Cannot release memory in secondary process */ 895 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 896 PMD_DRV_LOG(WARNING, "dev_reset not supported in secondary.\n"); 897 return -EPERM; 898 } 899 900 ena_destroy_device(dev); 901 rc = eth_ena_dev_init(dev); 902 if (rc) 903 PMD_INIT_LOG(CRIT, "Cannot initialize device\n"); 904 905 return rc; 906 } 907 908 static void ena_rx_queue_release_all(struct rte_eth_dev *dev) 909 { 910 int nb_queues = dev->data->nb_rx_queues; 911 int i; 912 913 for (i = 0; i < nb_queues; i++) 914 ena_rx_queue_release(dev, i); 915 } 916 917 static void ena_tx_queue_release_all(struct rte_eth_dev *dev) 918 { 919 int nb_queues = dev->data->nb_tx_queues; 920 int i; 921 922 for (i = 0; i < nb_queues; i++) 923 ena_tx_queue_release(dev, i); 924 } 925 926 static void ena_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 927 { 928 struct ena_ring *ring = dev->data->rx_queues[qid]; 929 930 /* Free ring resources */ 931 rte_free(ring->rx_buffer_info); 932 ring->rx_buffer_info = NULL; 933 934 rte_free(ring->rx_refill_buffer); 935 ring->rx_refill_buffer = NULL; 936 937 rte_free(ring->empty_rx_reqs); 938 ring->empty_rx_reqs = NULL; 939 940 ring->configured = 0; 941 942 PMD_DRV_LOG(NOTICE, "Rx queue %d:%d released\n", 943 ring->port_id, ring->id); 944 } 945 946 static void ena_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 947 { 948 struct ena_ring *ring = dev->data->tx_queues[qid]; 949 950 /* Free ring resources */ 951 rte_free(ring->push_buf_intermediate_buf); 952 953 rte_free(ring->tx_buffer_info); 954 955 rte_free(ring->empty_tx_reqs); 956 957 ring->empty_tx_reqs = NULL; 958 ring->tx_buffer_info = NULL; 959 ring->push_buf_intermediate_buf = NULL; 960 961 ring->configured = 0; 962 963 PMD_DRV_LOG(NOTICE, "Tx queue %d:%d released\n", 964 ring->port_id, ring->id); 965 } 966 967 static void ena_rx_queue_release_bufs(struct ena_ring *ring) 968 { 969 unsigned int i; 970 971 for (i = 0; i < ring->ring_size; ++i) { 972 struct ena_rx_buffer *rx_info = &ring->rx_buffer_info[i]; 973 if (rx_info->mbuf) { 974 rte_mbuf_raw_free(rx_info->mbuf); 975 rx_info->mbuf = NULL; 976 } 977 } 978 } 979 980 static void ena_tx_queue_release_bufs(struct ena_ring *ring) 981 { 982 unsigned int i; 983 984 for (i = 0; i < ring->ring_size; ++i) { 985 struct ena_tx_buffer *tx_buf = &ring->tx_buffer_info[i]; 986 987 if (tx_buf->mbuf) { 988 rte_pktmbuf_free(tx_buf->mbuf); 989 tx_buf->mbuf = NULL; 990 } 991 } 992 } 993 994 static int ena_link_update(struct rte_eth_dev *dev, 995 __rte_unused int wait_to_complete) 996 { 997 struct rte_eth_link *link = &dev->data->dev_link; 998 struct ena_adapter *adapter = dev->data->dev_private; 999 1000 link->link_status = adapter->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN; 1001 link->link_speed = RTE_ETH_SPEED_NUM_NONE; 1002 link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 1003 1004 return 0; 1005 } 1006 1007 static int ena_queue_start_all(struct rte_eth_dev *dev, 1008 enum ena_ring_type ring_type) 1009 { 1010 struct ena_adapter *adapter = dev->data->dev_private; 1011 struct ena_ring *queues = NULL; 1012 int nb_queues; 1013 int i = 0; 1014 int rc = 0; 1015 1016 if (ring_type == ENA_RING_TYPE_RX) { 1017 queues = adapter->rx_ring; 1018 nb_queues = dev->data->nb_rx_queues; 1019 } else { 1020 queues = adapter->tx_ring; 1021 nb_queues = dev->data->nb_tx_queues; 1022 } 1023 for (i = 0; i < nb_queues; i++) { 1024 if (queues[i].configured) { 1025 if (ring_type == ENA_RING_TYPE_RX) { 1026 ena_assert_msg( 1027 dev->data->rx_queues[i] == &queues[i], 1028 "Inconsistent state of Rx queues\n"); 1029 } else { 1030 ena_assert_msg( 1031 dev->data->tx_queues[i] == &queues[i], 1032 "Inconsistent state of Tx queues\n"); 1033 } 1034 1035 rc = ena_queue_start(dev, &queues[i]); 1036 1037 if (rc) { 1038 PMD_INIT_LOG(ERR, 1039 "Failed to start queue[%d] of type(%d)\n", 1040 i, ring_type); 1041 goto err; 1042 } 1043 } 1044 } 1045 1046 return 0; 1047 1048 err: 1049 while (i--) 1050 if (queues[i].configured) 1051 ena_queue_stop(&queues[i]); 1052 1053 return rc; 1054 } 1055 1056 static int 1057 ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx, 1058 bool use_large_llq_hdr) 1059 { 1060 struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq; 1061 struct ena_com_dev *ena_dev = ctx->ena_dev; 1062 uint32_t max_tx_queue_size; 1063 uint32_t max_rx_queue_size; 1064 1065 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 1066 struct ena_admin_queue_ext_feature_fields *max_queue_ext = 1067 &ctx->get_feat_ctx->max_queue_ext.max_queue_ext; 1068 max_rx_queue_size = RTE_MIN(max_queue_ext->max_rx_cq_depth, 1069 max_queue_ext->max_rx_sq_depth); 1070 max_tx_queue_size = max_queue_ext->max_tx_cq_depth; 1071 1072 if (ena_dev->tx_mem_queue_type == 1073 ENA_ADMIN_PLACEMENT_POLICY_DEV) { 1074 max_tx_queue_size = RTE_MIN(max_tx_queue_size, 1075 llq->max_llq_depth); 1076 } else { 1077 max_tx_queue_size = RTE_MIN(max_tx_queue_size, 1078 max_queue_ext->max_tx_sq_depth); 1079 } 1080 1081 ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 1082 max_queue_ext->max_per_packet_rx_descs); 1083 ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 1084 max_queue_ext->max_per_packet_tx_descs); 1085 } else { 1086 struct ena_admin_queue_feature_desc *max_queues = 1087 &ctx->get_feat_ctx->max_queues; 1088 max_rx_queue_size = RTE_MIN(max_queues->max_cq_depth, 1089 max_queues->max_sq_depth); 1090 max_tx_queue_size = max_queues->max_cq_depth; 1091 1092 if (ena_dev->tx_mem_queue_type == 1093 ENA_ADMIN_PLACEMENT_POLICY_DEV) { 1094 max_tx_queue_size = RTE_MIN(max_tx_queue_size, 1095 llq->max_llq_depth); 1096 } else { 1097 max_tx_queue_size = RTE_MIN(max_tx_queue_size, 1098 max_queues->max_sq_depth); 1099 } 1100 1101 ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 1102 max_queues->max_packet_rx_descs); 1103 ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 1104 max_queues->max_packet_tx_descs); 1105 } 1106 1107 /* Round down to the nearest power of 2 */ 1108 max_rx_queue_size = rte_align32prevpow2(max_rx_queue_size); 1109 max_tx_queue_size = rte_align32prevpow2(max_tx_queue_size); 1110 1111 if (use_large_llq_hdr) { 1112 if ((llq->entry_size_ctrl_supported & 1113 ENA_ADMIN_LIST_ENTRY_SIZE_256B) && 1114 (ena_dev->tx_mem_queue_type == 1115 ENA_ADMIN_PLACEMENT_POLICY_DEV)) { 1116 max_tx_queue_size /= 2; 1117 PMD_INIT_LOG(INFO, 1118 "Forcing large headers and decreasing maximum Tx queue size to %d\n", 1119 max_tx_queue_size); 1120 } else { 1121 PMD_INIT_LOG(ERR, 1122 "Forcing large headers failed: LLQ is disabled or device does not support large headers\n"); 1123 } 1124 } 1125 1126 if (unlikely(max_rx_queue_size == 0 || max_tx_queue_size == 0)) { 1127 PMD_INIT_LOG(ERR, "Invalid queue size\n"); 1128 return -EFAULT; 1129 } 1130 1131 ctx->max_tx_queue_size = max_tx_queue_size; 1132 ctx->max_rx_queue_size = max_rx_queue_size; 1133 1134 return 0; 1135 } 1136 1137 static void ena_stats_restart(struct rte_eth_dev *dev) 1138 { 1139 struct ena_adapter *adapter = dev->data->dev_private; 1140 1141 rte_atomic64_init(&adapter->drv_stats->ierrors); 1142 rte_atomic64_init(&adapter->drv_stats->oerrors); 1143 rte_atomic64_init(&adapter->drv_stats->rx_nombuf); 1144 adapter->drv_stats->rx_drops = 0; 1145 } 1146 1147 static int ena_stats_get(struct rte_eth_dev *dev, 1148 struct rte_eth_stats *stats) 1149 { 1150 struct ena_admin_basic_stats ena_stats; 1151 struct ena_adapter *adapter = dev->data->dev_private; 1152 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1153 int rc; 1154 int i; 1155 int max_rings_stats; 1156 1157 memset(&ena_stats, 0, sizeof(ena_stats)); 1158 1159 rte_spinlock_lock(&adapter->admin_lock); 1160 rc = ENA_PROXY(adapter, ena_com_get_dev_basic_stats, ena_dev, 1161 &ena_stats); 1162 rte_spinlock_unlock(&adapter->admin_lock); 1163 if (unlikely(rc)) { 1164 PMD_DRV_LOG(ERR, "Could not retrieve statistics from ENA\n"); 1165 return rc; 1166 } 1167 1168 /* Set of basic statistics from ENA */ 1169 stats->ipackets = __MERGE_64B_H_L(ena_stats.rx_pkts_high, 1170 ena_stats.rx_pkts_low); 1171 stats->opackets = __MERGE_64B_H_L(ena_stats.tx_pkts_high, 1172 ena_stats.tx_pkts_low); 1173 stats->ibytes = __MERGE_64B_H_L(ena_stats.rx_bytes_high, 1174 ena_stats.rx_bytes_low); 1175 stats->obytes = __MERGE_64B_H_L(ena_stats.tx_bytes_high, 1176 ena_stats.tx_bytes_low); 1177 1178 /* Driver related stats */ 1179 stats->imissed = adapter->drv_stats->rx_drops; 1180 stats->ierrors = rte_atomic64_read(&adapter->drv_stats->ierrors); 1181 stats->oerrors = rte_atomic64_read(&adapter->drv_stats->oerrors); 1182 stats->rx_nombuf = rte_atomic64_read(&adapter->drv_stats->rx_nombuf); 1183 1184 max_rings_stats = RTE_MIN(dev->data->nb_rx_queues, 1185 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1186 for (i = 0; i < max_rings_stats; ++i) { 1187 struct ena_stats_rx *rx_stats = &adapter->rx_ring[i].rx_stats; 1188 1189 stats->q_ibytes[i] = rx_stats->bytes; 1190 stats->q_ipackets[i] = rx_stats->cnt; 1191 stats->q_errors[i] = rx_stats->bad_desc_num + 1192 rx_stats->bad_req_id; 1193 } 1194 1195 max_rings_stats = RTE_MIN(dev->data->nb_tx_queues, 1196 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1197 for (i = 0; i < max_rings_stats; ++i) { 1198 struct ena_stats_tx *tx_stats = &adapter->tx_ring[i].tx_stats; 1199 1200 stats->q_obytes[i] = tx_stats->bytes; 1201 stats->q_opackets[i] = tx_stats->cnt; 1202 } 1203 1204 return 0; 1205 } 1206 1207 static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 1208 { 1209 struct ena_adapter *adapter; 1210 struct ena_com_dev *ena_dev; 1211 int rc = 0; 1212 1213 ena_assert_msg(dev->data != NULL, "Uninitialized device\n"); 1214 ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n"); 1215 adapter = dev->data->dev_private; 1216 1217 ena_dev = &adapter->ena_dev; 1218 ena_assert_msg(ena_dev != NULL, "Uninitialized device\n"); 1219 1220 rc = ENA_PROXY(adapter, ena_com_set_dev_mtu, ena_dev, mtu); 1221 if (rc) 1222 PMD_DRV_LOG(ERR, "Could not set MTU: %d\n", mtu); 1223 else 1224 PMD_DRV_LOG(NOTICE, "MTU set to: %d\n", mtu); 1225 1226 return rc; 1227 } 1228 1229 static int ena_start(struct rte_eth_dev *dev) 1230 { 1231 struct ena_adapter *adapter = dev->data->dev_private; 1232 uint64_t ticks; 1233 int rc = 0; 1234 uint16_t i; 1235 1236 /* Cannot allocate memory in secondary process */ 1237 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 1238 PMD_DRV_LOG(WARNING, "dev_start not supported in secondary.\n"); 1239 return -EPERM; 1240 } 1241 1242 rc = ena_setup_rx_intr(dev); 1243 if (rc) 1244 return rc; 1245 1246 rc = ena_queue_start_all(dev, ENA_RING_TYPE_RX); 1247 if (rc) 1248 return rc; 1249 1250 rc = ena_queue_start_all(dev, ENA_RING_TYPE_TX); 1251 if (rc) 1252 goto err_start_tx; 1253 1254 if (adapter->edev_data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) { 1255 rc = ena_rss_configure(adapter); 1256 if (rc) 1257 goto err_rss_init; 1258 } 1259 1260 ena_stats_restart(dev); 1261 1262 adapter->timestamp_wd = rte_get_timer_cycles(); 1263 adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT; 1264 1265 ticks = rte_get_timer_hz(); 1266 rte_timer_reset(&adapter->timer_wd, ticks, PERIODICAL, rte_lcore_id(), 1267 ena_timer_wd_callback, dev); 1268 1269 ++adapter->dev_stats.dev_start; 1270 adapter->state = ENA_ADAPTER_STATE_RUNNING; 1271 1272 for (i = 0; i < dev->data->nb_rx_queues; i++) 1273 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; 1274 for (i = 0; i < dev->data->nb_tx_queues; i++) 1275 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; 1276 1277 return 0; 1278 1279 err_rss_init: 1280 ena_queue_stop_all(dev, ENA_RING_TYPE_TX); 1281 err_start_tx: 1282 ena_queue_stop_all(dev, ENA_RING_TYPE_RX); 1283 return rc; 1284 } 1285 1286 static int ena_stop(struct rte_eth_dev *dev) 1287 { 1288 struct ena_adapter *adapter = dev->data->dev_private; 1289 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1290 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1291 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 1292 uint16_t i; 1293 int rc; 1294 1295 /* Cannot free memory in secondary process */ 1296 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 1297 PMD_DRV_LOG(WARNING, "dev_stop not supported in secondary.\n"); 1298 return -EPERM; 1299 } 1300 1301 rte_timer_stop_sync(&adapter->timer_wd); 1302 ena_queue_stop_all(dev, ENA_RING_TYPE_TX); 1303 ena_queue_stop_all(dev, ENA_RING_TYPE_RX); 1304 1305 if (adapter->trigger_reset) { 1306 rc = ena_com_dev_reset(ena_dev, adapter->reset_reason); 1307 if (rc) 1308 PMD_DRV_LOG(ERR, "Device reset failed, rc: %d\n", rc); 1309 } 1310 1311 rte_intr_disable(intr_handle); 1312 1313 rte_intr_efd_disable(intr_handle); 1314 1315 /* Cleanup vector list */ 1316 rte_intr_vec_list_free(intr_handle); 1317 1318 rte_intr_enable(intr_handle); 1319 1320 ++adapter->dev_stats.dev_stop; 1321 adapter->state = ENA_ADAPTER_STATE_STOPPED; 1322 dev->data->dev_started = 0; 1323 1324 for (i = 0; i < dev->data->nb_rx_queues; i++) 1325 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; 1326 for (i = 0; i < dev->data->nb_tx_queues; i++) 1327 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; 1328 1329 return 0; 1330 } 1331 1332 static int ena_create_io_queue(struct rte_eth_dev *dev, struct ena_ring *ring) 1333 { 1334 struct ena_adapter *adapter = ring->adapter; 1335 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1336 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1337 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 1338 struct ena_com_create_io_ctx ctx = 1339 /* policy set to _HOST just to satisfy icc compiler */ 1340 { ENA_ADMIN_PLACEMENT_POLICY_HOST, 1341 0, 0, 0, 0, 0 }; 1342 uint16_t ena_qid; 1343 unsigned int i; 1344 int rc; 1345 1346 ctx.msix_vector = -1; 1347 if (ring->type == ENA_RING_TYPE_TX) { 1348 ena_qid = ENA_IO_TXQ_IDX(ring->id); 1349 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; 1350 ctx.mem_queue_type = ena_dev->tx_mem_queue_type; 1351 for (i = 0; i < ring->ring_size; i++) 1352 ring->empty_tx_reqs[i] = i; 1353 } else { 1354 ena_qid = ENA_IO_RXQ_IDX(ring->id); 1355 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; 1356 if (rte_intr_dp_is_en(intr_handle)) 1357 ctx.msix_vector = 1358 rte_intr_vec_list_index_get(intr_handle, 1359 ring->id); 1360 1361 for (i = 0; i < ring->ring_size; i++) 1362 ring->empty_rx_reqs[i] = i; 1363 } 1364 ctx.queue_size = ring->ring_size; 1365 ctx.qid = ena_qid; 1366 ctx.numa_node = ring->numa_socket_id; 1367 1368 rc = ena_com_create_io_queue(ena_dev, &ctx); 1369 if (rc) { 1370 PMD_DRV_LOG(ERR, 1371 "Failed to create IO queue[%d] (qid:%d), rc: %d\n", 1372 ring->id, ena_qid, rc); 1373 return rc; 1374 } 1375 1376 rc = ena_com_get_io_handlers(ena_dev, ena_qid, 1377 &ring->ena_com_io_sq, 1378 &ring->ena_com_io_cq); 1379 if (rc) { 1380 PMD_DRV_LOG(ERR, 1381 "Failed to get IO queue[%d] handlers, rc: %d\n", 1382 ring->id, rc); 1383 ena_com_destroy_io_queue(ena_dev, ena_qid); 1384 return rc; 1385 } 1386 1387 if (ring->type == ENA_RING_TYPE_TX) 1388 ena_com_update_numa_node(ring->ena_com_io_cq, ctx.numa_node); 1389 1390 /* Start with Rx interrupts being masked. */ 1391 if (ring->type == ENA_RING_TYPE_RX && rte_intr_dp_is_en(intr_handle)) 1392 ena_rx_queue_intr_disable(dev, ring->id); 1393 1394 return 0; 1395 } 1396 1397 static void ena_queue_stop(struct ena_ring *ring) 1398 { 1399 struct ena_com_dev *ena_dev = &ring->adapter->ena_dev; 1400 1401 if (ring->type == ENA_RING_TYPE_RX) { 1402 ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(ring->id)); 1403 ena_rx_queue_release_bufs(ring); 1404 } else { 1405 ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(ring->id)); 1406 ena_tx_queue_release_bufs(ring); 1407 } 1408 } 1409 1410 static void ena_queue_stop_all(struct rte_eth_dev *dev, 1411 enum ena_ring_type ring_type) 1412 { 1413 struct ena_adapter *adapter = dev->data->dev_private; 1414 struct ena_ring *queues = NULL; 1415 uint16_t nb_queues, i; 1416 1417 if (ring_type == ENA_RING_TYPE_RX) { 1418 queues = adapter->rx_ring; 1419 nb_queues = dev->data->nb_rx_queues; 1420 } else { 1421 queues = adapter->tx_ring; 1422 nb_queues = dev->data->nb_tx_queues; 1423 } 1424 1425 for (i = 0; i < nb_queues; ++i) 1426 if (queues[i].configured) 1427 ena_queue_stop(&queues[i]); 1428 } 1429 1430 static int ena_queue_start(struct rte_eth_dev *dev, struct ena_ring *ring) 1431 { 1432 int rc, bufs_num; 1433 1434 ena_assert_msg(ring->configured == 1, 1435 "Trying to start unconfigured queue\n"); 1436 1437 rc = ena_create_io_queue(dev, ring); 1438 if (rc) { 1439 PMD_INIT_LOG(ERR, "Failed to create IO queue\n"); 1440 return rc; 1441 } 1442 1443 ring->next_to_clean = 0; 1444 ring->next_to_use = 0; 1445 1446 if (ring->type == ENA_RING_TYPE_TX) { 1447 ring->tx_stats.available_desc = 1448 ena_com_free_q_entries(ring->ena_com_io_sq); 1449 return 0; 1450 } 1451 1452 bufs_num = ring->ring_size - 1; 1453 rc = ena_populate_rx_queue(ring, bufs_num); 1454 if (rc != bufs_num) { 1455 ena_com_destroy_io_queue(&ring->adapter->ena_dev, 1456 ENA_IO_RXQ_IDX(ring->id)); 1457 PMD_INIT_LOG(ERR, "Failed to populate Rx ring\n"); 1458 return ENA_COM_FAULT; 1459 } 1460 /* Flush per-core RX buffers pools cache as they can be used on other 1461 * cores as well. 1462 */ 1463 rte_mempool_cache_flush(NULL, ring->mb_pool); 1464 1465 return 0; 1466 } 1467 1468 static int ena_tx_queue_setup(struct rte_eth_dev *dev, 1469 uint16_t queue_idx, 1470 uint16_t nb_desc, 1471 unsigned int socket_id, 1472 const struct rte_eth_txconf *tx_conf) 1473 { 1474 struct ena_ring *txq = NULL; 1475 struct ena_adapter *adapter = dev->data->dev_private; 1476 unsigned int i; 1477 uint16_t dyn_thresh; 1478 1479 txq = &adapter->tx_ring[queue_idx]; 1480 1481 if (txq->configured) { 1482 PMD_DRV_LOG(CRIT, 1483 "API violation. Queue[%d] is already configured\n", 1484 queue_idx); 1485 return ENA_COM_FAULT; 1486 } 1487 1488 if (!rte_is_power_of_2(nb_desc)) { 1489 PMD_DRV_LOG(ERR, 1490 "Unsupported size of Tx queue: %d is not a power of 2.\n", 1491 nb_desc); 1492 return -EINVAL; 1493 } 1494 1495 if (nb_desc > adapter->max_tx_ring_size) { 1496 PMD_DRV_LOG(ERR, 1497 "Unsupported size of Tx queue (max size: %d)\n", 1498 adapter->max_tx_ring_size); 1499 return -EINVAL; 1500 } 1501 1502 txq->port_id = dev->data->port_id; 1503 txq->next_to_clean = 0; 1504 txq->next_to_use = 0; 1505 txq->ring_size = nb_desc; 1506 txq->size_mask = nb_desc - 1; 1507 txq->numa_socket_id = socket_id; 1508 txq->pkts_without_db = false; 1509 txq->last_cleanup_ticks = 0; 1510 1511 txq->tx_buffer_info = rte_zmalloc_socket("txq->tx_buffer_info", 1512 sizeof(struct ena_tx_buffer) * txq->ring_size, 1513 RTE_CACHE_LINE_SIZE, 1514 socket_id); 1515 if (!txq->tx_buffer_info) { 1516 PMD_DRV_LOG(ERR, 1517 "Failed to allocate memory for Tx buffer info\n"); 1518 return -ENOMEM; 1519 } 1520 1521 txq->empty_tx_reqs = rte_zmalloc_socket("txq->empty_tx_reqs", 1522 sizeof(uint16_t) * txq->ring_size, 1523 RTE_CACHE_LINE_SIZE, 1524 socket_id); 1525 if (!txq->empty_tx_reqs) { 1526 PMD_DRV_LOG(ERR, 1527 "Failed to allocate memory for empty Tx requests\n"); 1528 rte_free(txq->tx_buffer_info); 1529 return -ENOMEM; 1530 } 1531 1532 txq->push_buf_intermediate_buf = 1533 rte_zmalloc_socket("txq->push_buf_intermediate_buf", 1534 txq->tx_max_header_size, 1535 RTE_CACHE_LINE_SIZE, 1536 socket_id); 1537 if (!txq->push_buf_intermediate_buf) { 1538 PMD_DRV_LOG(ERR, "Failed to alloc push buffer for LLQ\n"); 1539 rte_free(txq->tx_buffer_info); 1540 rte_free(txq->empty_tx_reqs); 1541 return -ENOMEM; 1542 } 1543 1544 for (i = 0; i < txq->ring_size; i++) 1545 txq->empty_tx_reqs[i] = i; 1546 1547 txq->offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; 1548 1549 /* Check if caller provided the Tx cleanup threshold value. */ 1550 if (tx_conf->tx_free_thresh != 0) { 1551 txq->tx_free_thresh = tx_conf->tx_free_thresh; 1552 } else { 1553 dyn_thresh = txq->ring_size - 1554 txq->ring_size / ENA_REFILL_THRESH_DIVIDER; 1555 txq->tx_free_thresh = RTE_MAX(dyn_thresh, 1556 txq->ring_size - ENA_REFILL_THRESH_PACKET); 1557 } 1558 1559 txq->missing_tx_completion_threshold = 1560 RTE_MIN(txq->ring_size / 2, ENA_DEFAULT_MISSING_COMP); 1561 1562 /* Store pointer to this queue in upper layer */ 1563 txq->configured = 1; 1564 dev->data->tx_queues[queue_idx] = txq; 1565 1566 return 0; 1567 } 1568 1569 static int ena_rx_queue_setup(struct rte_eth_dev *dev, 1570 uint16_t queue_idx, 1571 uint16_t nb_desc, 1572 unsigned int socket_id, 1573 const struct rte_eth_rxconf *rx_conf, 1574 struct rte_mempool *mp) 1575 { 1576 struct ena_adapter *adapter = dev->data->dev_private; 1577 struct ena_ring *rxq = NULL; 1578 size_t buffer_size; 1579 int i; 1580 uint16_t dyn_thresh; 1581 1582 rxq = &adapter->rx_ring[queue_idx]; 1583 if (rxq->configured) { 1584 PMD_DRV_LOG(CRIT, 1585 "API violation. Queue[%d] is already configured\n", 1586 queue_idx); 1587 return ENA_COM_FAULT; 1588 } 1589 1590 if (!rte_is_power_of_2(nb_desc)) { 1591 PMD_DRV_LOG(ERR, 1592 "Unsupported size of Rx queue: %d is not a power of 2.\n", 1593 nb_desc); 1594 return -EINVAL; 1595 } 1596 1597 if (nb_desc > adapter->max_rx_ring_size) { 1598 PMD_DRV_LOG(ERR, 1599 "Unsupported size of Rx queue (max size: %d)\n", 1600 adapter->max_rx_ring_size); 1601 return -EINVAL; 1602 } 1603 1604 /* ENA isn't supporting buffers smaller than 1400 bytes */ 1605 buffer_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM; 1606 if (buffer_size < ENA_RX_BUF_MIN_SIZE) { 1607 PMD_DRV_LOG(ERR, 1608 "Unsupported size of Rx buffer: %zu (min size: %d)\n", 1609 buffer_size, ENA_RX_BUF_MIN_SIZE); 1610 return -EINVAL; 1611 } 1612 1613 rxq->port_id = dev->data->port_id; 1614 rxq->next_to_clean = 0; 1615 rxq->next_to_use = 0; 1616 rxq->ring_size = nb_desc; 1617 rxq->size_mask = nb_desc - 1; 1618 rxq->numa_socket_id = socket_id; 1619 rxq->mb_pool = mp; 1620 1621 rxq->rx_buffer_info = rte_zmalloc_socket("rxq->buffer_info", 1622 sizeof(struct ena_rx_buffer) * nb_desc, 1623 RTE_CACHE_LINE_SIZE, 1624 socket_id); 1625 if (!rxq->rx_buffer_info) { 1626 PMD_DRV_LOG(ERR, 1627 "Failed to allocate memory for Rx buffer info\n"); 1628 return -ENOMEM; 1629 } 1630 1631 rxq->rx_refill_buffer = rte_zmalloc_socket("rxq->rx_refill_buffer", 1632 sizeof(struct rte_mbuf *) * nb_desc, 1633 RTE_CACHE_LINE_SIZE, 1634 socket_id); 1635 if (!rxq->rx_refill_buffer) { 1636 PMD_DRV_LOG(ERR, 1637 "Failed to allocate memory for Rx refill buffer\n"); 1638 rte_free(rxq->rx_buffer_info); 1639 rxq->rx_buffer_info = NULL; 1640 return -ENOMEM; 1641 } 1642 1643 rxq->empty_rx_reqs = rte_zmalloc_socket("rxq->empty_rx_reqs", 1644 sizeof(uint16_t) * nb_desc, 1645 RTE_CACHE_LINE_SIZE, 1646 socket_id); 1647 if (!rxq->empty_rx_reqs) { 1648 PMD_DRV_LOG(ERR, 1649 "Failed to allocate memory for empty Rx requests\n"); 1650 rte_free(rxq->rx_buffer_info); 1651 rxq->rx_buffer_info = NULL; 1652 rte_free(rxq->rx_refill_buffer); 1653 rxq->rx_refill_buffer = NULL; 1654 return -ENOMEM; 1655 } 1656 1657 for (i = 0; i < nb_desc; i++) 1658 rxq->empty_rx_reqs[i] = i; 1659 1660 rxq->offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; 1661 1662 if (rx_conf->rx_free_thresh != 0) { 1663 rxq->rx_free_thresh = rx_conf->rx_free_thresh; 1664 } else { 1665 dyn_thresh = rxq->ring_size / ENA_REFILL_THRESH_DIVIDER; 1666 rxq->rx_free_thresh = RTE_MIN(dyn_thresh, 1667 (uint16_t)(ENA_REFILL_THRESH_PACKET)); 1668 } 1669 1670 /* Store pointer to this queue in upper layer */ 1671 rxq->configured = 1; 1672 dev->data->rx_queues[queue_idx] = rxq; 1673 1674 return 0; 1675 } 1676 1677 static int ena_add_single_rx_desc(struct ena_com_io_sq *io_sq, 1678 struct rte_mbuf *mbuf, uint16_t id) 1679 { 1680 struct ena_com_buf ebuf; 1681 int rc; 1682 1683 /* prepare physical address for DMA transaction */ 1684 ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM; 1685 ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM; 1686 1687 /* pass resource to device */ 1688 rc = ena_com_add_single_rx_desc(io_sq, &ebuf, id); 1689 if (unlikely(rc != 0)) 1690 PMD_RX_LOG(WARNING, "Failed adding Rx desc\n"); 1691 1692 return rc; 1693 } 1694 1695 static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) 1696 { 1697 unsigned int i; 1698 int rc; 1699 uint16_t next_to_use = rxq->next_to_use; 1700 uint16_t req_id; 1701 #ifdef RTE_ETHDEV_DEBUG_RX 1702 uint16_t in_use; 1703 #endif 1704 struct rte_mbuf **mbufs = rxq->rx_refill_buffer; 1705 1706 if (unlikely(!count)) 1707 return 0; 1708 1709 #ifdef RTE_ETHDEV_DEBUG_RX 1710 in_use = rxq->ring_size - 1 - 1711 ena_com_free_q_entries(rxq->ena_com_io_sq); 1712 if (unlikely((in_use + count) >= rxq->ring_size)) 1713 PMD_RX_LOG(ERR, "Bad Rx ring state\n"); 1714 #endif 1715 1716 /* get resources for incoming packets */ 1717 rc = rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, count); 1718 if (unlikely(rc < 0)) { 1719 rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf); 1720 ++rxq->rx_stats.mbuf_alloc_fail; 1721 PMD_RX_LOG(DEBUG, "There are not enough free buffers\n"); 1722 return 0; 1723 } 1724 1725 for (i = 0; i < count; i++) { 1726 struct rte_mbuf *mbuf = mbufs[i]; 1727 struct ena_rx_buffer *rx_info; 1728 1729 if (likely((i + 4) < count)) 1730 rte_prefetch0(mbufs[i + 4]); 1731 1732 req_id = rxq->empty_rx_reqs[next_to_use]; 1733 rx_info = &rxq->rx_buffer_info[req_id]; 1734 1735 rc = ena_add_single_rx_desc(rxq->ena_com_io_sq, mbuf, req_id); 1736 if (unlikely(rc != 0)) 1737 break; 1738 1739 rx_info->mbuf = mbuf; 1740 next_to_use = ENA_IDX_NEXT_MASKED(next_to_use, rxq->size_mask); 1741 } 1742 1743 if (unlikely(i < count)) { 1744 PMD_RX_LOG(WARNING, 1745 "Refilled Rx queue[%d] with only %d/%d buffers\n", 1746 rxq->id, i, count); 1747 rte_pktmbuf_free_bulk(&mbufs[i], count - i); 1748 ++rxq->rx_stats.refill_partial; 1749 } 1750 1751 /* When we submitted free resources to device... */ 1752 if (likely(i > 0)) { 1753 /* ...let HW know that it can fill buffers with data. */ 1754 ena_com_write_sq_doorbell(rxq->ena_com_io_sq); 1755 1756 rxq->next_to_use = next_to_use; 1757 } 1758 1759 return i; 1760 } 1761 1762 static size_t ena_get_metrics_entries(struct ena_adapter *adapter) 1763 { 1764 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1765 size_t metrics_num = 0; 1766 1767 if (ena_com_get_cap(ena_dev, ENA_ADMIN_CUSTOMER_METRICS)) 1768 metrics_num = ENA_STATS_ARRAY_METRICS; 1769 else if (ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) 1770 metrics_num = ENA_STATS_ARRAY_METRICS_LEGACY; 1771 PMD_DRV_LOG(NOTICE, "0x%x customer metrics are supported\n", (unsigned int)metrics_num); 1772 if (metrics_num > ENA_MAX_CUSTOMER_METRICS) { 1773 PMD_DRV_LOG(NOTICE, "Not enough space for the requested customer metrics\n"); 1774 metrics_num = ENA_MAX_CUSTOMER_METRICS; 1775 } 1776 return metrics_num; 1777 } 1778 1779 static int ena_device_init(struct ena_adapter *adapter, 1780 struct rte_pci_device *pdev, 1781 struct ena_com_dev_get_features_ctx *get_feat_ctx) 1782 { 1783 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1784 uint32_t aenq_groups; 1785 int rc; 1786 bool readless_supported; 1787 1788 /* Initialize mmio registers */ 1789 rc = ena_com_mmio_reg_read_request_init(ena_dev); 1790 if (rc) { 1791 PMD_DRV_LOG(ERR, "Failed to init MMIO read less\n"); 1792 return rc; 1793 } 1794 1795 /* The PCIe configuration space revision id indicate if mmio reg 1796 * read is disabled. 1797 */ 1798 readless_supported = !(pdev->id.class_id & ENA_MMIO_DISABLE_REG_READ); 1799 ena_com_set_mmio_read_mode(ena_dev, readless_supported); 1800 1801 /* reset device */ 1802 rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL); 1803 if (rc) { 1804 PMD_DRV_LOG(ERR, "Cannot reset device\n"); 1805 goto err_mmio_read_less; 1806 } 1807 1808 /* check FW version */ 1809 rc = ena_com_validate_version(ena_dev); 1810 if (rc) { 1811 PMD_DRV_LOG(ERR, "Device version is too low\n"); 1812 goto err_mmio_read_less; 1813 } 1814 1815 ena_dev->dma_addr_bits = ena_com_get_dma_width(ena_dev); 1816 1817 /* ENA device administration layer init */ 1818 rc = ena_com_admin_init(ena_dev, &aenq_handlers); 1819 if (rc) { 1820 PMD_DRV_LOG(ERR, 1821 "Cannot initialize ENA admin queue\n"); 1822 goto err_mmio_read_less; 1823 } 1824 1825 /* To enable the msix interrupts the driver needs to know the number 1826 * of queues. So the driver uses polling mode to retrieve this 1827 * information. 1828 */ 1829 ena_com_set_admin_polling_mode(ena_dev, true); 1830 1831 ena_config_host_info(ena_dev); 1832 1833 /* Get Device Attributes and features */ 1834 rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); 1835 if (rc) { 1836 PMD_DRV_LOG(ERR, 1837 "Cannot get attribute for ENA device, rc: %d\n", rc); 1838 goto err_admin_init; 1839 } 1840 1841 aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | 1842 BIT(ENA_ADMIN_NOTIFICATION) | 1843 BIT(ENA_ADMIN_KEEP_ALIVE) | 1844 BIT(ENA_ADMIN_FATAL_ERROR) | 1845 BIT(ENA_ADMIN_WARNING); 1846 1847 aenq_groups &= get_feat_ctx->aenq.supported_groups; 1848 1849 adapter->all_aenq_groups = aenq_groups; 1850 /* The actual supported number of metrics is negotiated with the device at runtime */ 1851 adapter->metrics_num = ena_get_metrics_entries(adapter); 1852 1853 return 0; 1854 1855 err_admin_init: 1856 ena_com_admin_destroy(ena_dev); 1857 1858 err_mmio_read_less: 1859 ena_com_mmio_reg_read_request_destroy(ena_dev); 1860 1861 return rc; 1862 } 1863 1864 static void ena_interrupt_handler_rte(void *cb_arg) 1865 { 1866 struct rte_eth_dev *dev = cb_arg; 1867 struct ena_adapter *adapter = dev->data->dev_private; 1868 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1869 1870 ena_com_admin_q_comp_intr_handler(ena_dev); 1871 if (likely(adapter->state != ENA_ADAPTER_STATE_CLOSED)) 1872 ena_com_aenq_intr_handler(ena_dev, dev); 1873 } 1874 1875 static void check_for_missing_keep_alive(struct ena_adapter *adapter) 1876 { 1877 if (!(adapter->active_aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE))) 1878 return; 1879 1880 if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) 1881 return; 1882 1883 if (unlikely((rte_get_timer_cycles() - adapter->timestamp_wd) >= 1884 adapter->keep_alive_timeout)) { 1885 PMD_DRV_LOG(ERR, "Keep alive timeout\n"); 1886 ena_trigger_reset(adapter, ENA_REGS_RESET_KEEP_ALIVE_TO); 1887 ++adapter->dev_stats.wd_expired; 1888 } 1889 } 1890 1891 /* Check if admin queue is enabled */ 1892 static void check_for_admin_com_state(struct ena_adapter *adapter) 1893 { 1894 if (unlikely(!ena_com_get_admin_running_state(&adapter->ena_dev))) { 1895 PMD_DRV_LOG(ERR, "ENA admin queue is not in running state\n"); 1896 ena_trigger_reset(adapter, ENA_REGS_RESET_ADMIN_TO); 1897 } 1898 } 1899 1900 static int check_for_tx_completion_in_queue(struct ena_adapter *adapter, 1901 struct ena_ring *tx_ring) 1902 { 1903 struct ena_tx_buffer *tx_buf; 1904 uint64_t timestamp; 1905 uint64_t completion_delay; 1906 uint32_t missed_tx = 0; 1907 unsigned int i; 1908 int rc = 0; 1909 1910 for (i = 0; i < tx_ring->ring_size; ++i) { 1911 tx_buf = &tx_ring->tx_buffer_info[i]; 1912 timestamp = tx_buf->timestamp; 1913 1914 if (timestamp == 0) 1915 continue; 1916 1917 completion_delay = rte_get_timer_cycles() - timestamp; 1918 if (completion_delay > adapter->missing_tx_completion_to) { 1919 if (unlikely(!tx_buf->print_once)) { 1920 PMD_TX_LOG(WARNING, 1921 "Found a Tx that wasn't completed on time, qid %d, index %d. " 1922 "Missing Tx outstanding for %" PRIu64 " msecs.\n", 1923 tx_ring->id, i, completion_delay / 1924 rte_get_timer_hz() * 1000); 1925 tx_buf->print_once = true; 1926 } 1927 ++missed_tx; 1928 } 1929 } 1930 1931 if (unlikely(missed_tx > tx_ring->missing_tx_completion_threshold)) { 1932 PMD_DRV_LOG(ERR, 1933 "The number of lost Tx completions is above the threshold (%d > %d). " 1934 "Trigger the device reset.\n", 1935 missed_tx, 1936 tx_ring->missing_tx_completion_threshold); 1937 adapter->reset_reason = ENA_REGS_RESET_MISS_TX_CMPL; 1938 adapter->trigger_reset = true; 1939 rc = -EIO; 1940 } 1941 1942 tx_ring->tx_stats.missed_tx += missed_tx; 1943 1944 return rc; 1945 } 1946 1947 static void check_for_tx_completions(struct ena_adapter *adapter) 1948 { 1949 struct ena_ring *tx_ring; 1950 uint64_t tx_cleanup_delay; 1951 size_t qid; 1952 int budget; 1953 uint16_t nb_tx_queues = adapter->edev_data->nb_tx_queues; 1954 1955 if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT) 1956 return; 1957 1958 nb_tx_queues = adapter->edev_data->nb_tx_queues; 1959 budget = adapter->missing_tx_completion_budget; 1960 1961 qid = adapter->last_tx_comp_qid; 1962 while (budget-- > 0) { 1963 tx_ring = &adapter->tx_ring[qid]; 1964 1965 /* Tx cleanup is called only by the burst function and can be 1966 * called dynamically by the application. Also cleanup is 1967 * limited by the threshold. To avoid false detection of the 1968 * missing HW Tx completion, get the delay since last cleanup 1969 * function was called. 1970 */ 1971 tx_cleanup_delay = rte_get_timer_cycles() - 1972 tx_ring->last_cleanup_ticks; 1973 if (tx_cleanup_delay < adapter->tx_cleanup_stall_delay) 1974 check_for_tx_completion_in_queue(adapter, tx_ring); 1975 qid = (qid + 1) % nb_tx_queues; 1976 } 1977 1978 adapter->last_tx_comp_qid = qid; 1979 } 1980 1981 static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer, 1982 void *arg) 1983 { 1984 struct rte_eth_dev *dev = arg; 1985 struct ena_adapter *adapter = dev->data->dev_private; 1986 1987 if (unlikely(adapter->trigger_reset)) 1988 return; 1989 1990 check_for_missing_keep_alive(adapter); 1991 check_for_admin_com_state(adapter); 1992 check_for_tx_completions(adapter); 1993 1994 if (unlikely(adapter->trigger_reset)) { 1995 PMD_DRV_LOG(ERR, "Trigger reset is on\n"); 1996 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, 1997 NULL); 1998 } 1999 } 2000 2001 static inline void 2002 set_default_llq_configurations(struct ena_llq_configurations *llq_config, 2003 struct ena_admin_feature_llq_desc *llq, 2004 bool use_large_llq_hdr) 2005 { 2006 llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; 2007 llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; 2008 llq_config->llq_num_decs_before_header = 2009 ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; 2010 2011 if (use_large_llq_hdr && 2012 (llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B)) { 2013 llq_config->llq_ring_entry_size = 2014 ENA_ADMIN_LIST_ENTRY_SIZE_256B; 2015 llq_config->llq_ring_entry_size_value = 256; 2016 } else { 2017 llq_config->llq_ring_entry_size = 2018 ENA_ADMIN_LIST_ENTRY_SIZE_128B; 2019 llq_config->llq_ring_entry_size_value = 128; 2020 } 2021 } 2022 2023 static int 2024 ena_set_queues_placement_policy(struct ena_adapter *adapter, 2025 struct ena_com_dev *ena_dev, 2026 struct ena_admin_feature_llq_desc *llq, 2027 struct ena_llq_configurations *llq_default_configurations) 2028 { 2029 int rc; 2030 u32 llq_feature_mask; 2031 2032 if (!adapter->enable_llq) { 2033 PMD_DRV_LOG(WARNING, 2034 "NOTE: LLQ has been disabled as per user's request. " 2035 "This may lead to a huge performance degradation!\n"); 2036 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 2037 return 0; 2038 } 2039 2040 llq_feature_mask = 1 << ENA_ADMIN_LLQ; 2041 if (!(ena_dev->supported_features & llq_feature_mask)) { 2042 PMD_DRV_LOG(INFO, 2043 "LLQ is not supported. Fallback to host mode policy.\n"); 2044 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 2045 return 0; 2046 } 2047 2048 if (adapter->dev_mem_base == NULL) { 2049 PMD_DRV_LOG(ERR, 2050 "LLQ is advertised as supported, but device doesn't expose mem bar\n"); 2051 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 2052 return 0; 2053 } 2054 2055 rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); 2056 if (unlikely(rc)) { 2057 PMD_INIT_LOG(WARNING, 2058 "Failed to config dev mode. Fallback to host mode policy.\n"); 2059 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 2060 return 0; 2061 } 2062 2063 /* Nothing to config, exit */ 2064 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 2065 return 0; 2066 2067 ena_dev->mem_bar = adapter->dev_mem_base; 2068 2069 return 0; 2070 } 2071 2072 static uint32_t ena_calc_max_io_queue_num(struct ena_com_dev *ena_dev, 2073 struct ena_com_dev_get_features_ctx *get_feat_ctx) 2074 { 2075 uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues; 2076 2077 /* Regular queues capabilities */ 2078 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 2079 struct ena_admin_queue_ext_feature_fields *max_queue_ext = 2080 &get_feat_ctx->max_queue_ext.max_queue_ext; 2081 io_rx_num = RTE_MIN(max_queue_ext->max_rx_sq_num, 2082 max_queue_ext->max_rx_cq_num); 2083 io_tx_sq_num = max_queue_ext->max_tx_sq_num; 2084 io_tx_cq_num = max_queue_ext->max_tx_cq_num; 2085 } else { 2086 struct ena_admin_queue_feature_desc *max_queues = 2087 &get_feat_ctx->max_queues; 2088 io_tx_sq_num = max_queues->max_sq_num; 2089 io_tx_cq_num = max_queues->max_cq_num; 2090 io_rx_num = RTE_MIN(io_tx_sq_num, io_tx_cq_num); 2091 } 2092 2093 /* In case of LLQ use the llq number in the get feature cmd */ 2094 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) 2095 io_tx_sq_num = get_feat_ctx->llq.max_llq_num; 2096 2097 max_num_io_queues = RTE_MIN(ENA_MAX_NUM_IO_QUEUES, io_rx_num); 2098 max_num_io_queues = RTE_MIN(max_num_io_queues, io_tx_sq_num); 2099 max_num_io_queues = RTE_MIN(max_num_io_queues, io_tx_cq_num); 2100 2101 if (unlikely(max_num_io_queues == 0)) { 2102 PMD_DRV_LOG(ERR, "Number of IO queues cannot not be 0\n"); 2103 return -EFAULT; 2104 } 2105 2106 return max_num_io_queues; 2107 } 2108 2109 static void 2110 ena_set_offloads(struct ena_offloads *offloads, 2111 struct ena_admin_feature_offload_desc *offload_desc) 2112 { 2113 if (offload_desc->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) 2114 offloads->tx_offloads |= ENA_IPV4_TSO; 2115 2116 /* Tx IPv4 checksum offloads */ 2117 if (offload_desc->tx & 2118 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK) 2119 offloads->tx_offloads |= ENA_L3_IPV4_CSUM; 2120 if (offload_desc->tx & 2121 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK) 2122 offloads->tx_offloads |= ENA_L4_IPV4_CSUM; 2123 if (offload_desc->tx & 2124 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) 2125 offloads->tx_offloads |= ENA_L4_IPV4_CSUM_PARTIAL; 2126 2127 /* Tx IPv6 checksum offloads */ 2128 if (offload_desc->tx & 2129 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK) 2130 offloads->tx_offloads |= ENA_L4_IPV6_CSUM; 2131 if (offload_desc->tx & 2132 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK) 2133 offloads->tx_offloads |= ENA_L4_IPV6_CSUM_PARTIAL; 2134 2135 /* Rx IPv4 checksum offloads */ 2136 if (offload_desc->rx_supported & 2137 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK) 2138 offloads->rx_offloads |= ENA_L3_IPV4_CSUM; 2139 if (offload_desc->rx_supported & 2140 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) 2141 offloads->rx_offloads |= ENA_L4_IPV4_CSUM; 2142 2143 /* Rx IPv6 checksum offloads */ 2144 if (offload_desc->rx_supported & 2145 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) 2146 offloads->rx_offloads |= ENA_L4_IPV6_CSUM; 2147 2148 if (offload_desc->rx_supported & 2149 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK) 2150 offloads->rx_offloads |= ENA_RX_RSS_HASH; 2151 } 2152 2153 static int ena_init_once(void) 2154 { 2155 static bool init_done; 2156 2157 if (init_done) 2158 return 0; 2159 2160 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 2161 /* Init timer subsystem for the ENA timer service. */ 2162 rte_timer_subsystem_init(); 2163 /* Register handler for requests from secondary processes. */ 2164 rte_mp_action_register(ENA_MP_NAME, ena_mp_primary_handle); 2165 } 2166 2167 init_done = true; 2168 return 0; 2169 } 2170 2171 static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) 2172 { 2173 struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 }; 2174 struct rte_pci_device *pci_dev; 2175 struct rte_intr_handle *intr_handle; 2176 struct ena_adapter *adapter = eth_dev->data->dev_private; 2177 struct ena_com_dev *ena_dev = &adapter->ena_dev; 2178 struct ena_com_dev_get_features_ctx get_feat_ctx; 2179 struct ena_llq_configurations llq_config; 2180 const char *queue_type_str; 2181 uint32_t max_num_io_queues; 2182 int rc; 2183 static int adapters_found; 2184 bool disable_meta_caching; 2185 2186 eth_dev->dev_ops = &ena_dev_ops; 2187 eth_dev->rx_pkt_burst = ð_ena_recv_pkts; 2188 eth_dev->tx_pkt_burst = ð_ena_xmit_pkts; 2189 eth_dev->tx_pkt_prepare = ð_ena_prep_pkts; 2190 2191 rc = ena_init_once(); 2192 if (rc != 0) 2193 return rc; 2194 2195 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2196 return 0; 2197 2198 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 2199 2200 memset(adapter, 0, sizeof(struct ena_adapter)); 2201 ena_dev = &adapter->ena_dev; 2202 2203 adapter->edev_data = eth_dev->data; 2204 2205 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 2206 2207 PMD_INIT_LOG(INFO, "Initializing " PCI_PRI_FMT "\n", 2208 pci_dev->addr.domain, 2209 pci_dev->addr.bus, 2210 pci_dev->addr.devid, 2211 pci_dev->addr.function); 2212 2213 intr_handle = pci_dev->intr_handle; 2214 2215 adapter->regs = pci_dev->mem_resource[ENA_REGS_BAR].addr; 2216 adapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr; 2217 2218 if (!adapter->regs) { 2219 PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)\n", 2220 ENA_REGS_BAR); 2221 return -ENXIO; 2222 } 2223 2224 ena_dev->reg_bar = adapter->regs; 2225 /* Pass device data as a pointer which can be passed to the IO functions 2226 * by the ena_com (for example - the memory allocation). 2227 */ 2228 ena_dev->dmadev = eth_dev->data; 2229 2230 adapter->id_number = adapters_found; 2231 2232 snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", 2233 adapter->id_number); 2234 2235 /* Assign default devargs values */ 2236 adapter->missing_tx_completion_to = ENA_TX_TIMEOUT; 2237 adapter->enable_llq = true; 2238 adapter->use_large_llq_hdr = false; 2239 2240 rc = ena_parse_devargs(adapter, pci_dev->device.devargs); 2241 if (rc != 0) { 2242 PMD_INIT_LOG(CRIT, "Failed to parse devargs\n"); 2243 goto err; 2244 } 2245 rc = ena_com_allocate_customer_metrics_buffer(ena_dev); 2246 if (rc != 0) { 2247 PMD_INIT_LOG(CRIT, "Failed to allocate customer metrics buffer\n"); 2248 goto err; 2249 } 2250 2251 /* device specific initialization routine */ 2252 rc = ena_device_init(adapter, pci_dev, &get_feat_ctx); 2253 if (rc) { 2254 PMD_INIT_LOG(CRIT, "Failed to init ENA device\n"); 2255 goto err_metrics_delete; 2256 } 2257 2258 /* Check if device supports LSC */ 2259 if (!(adapter->all_aenq_groups & BIT(ENA_ADMIN_LINK_CHANGE))) 2260 adapter->edev_data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC; 2261 2262 set_default_llq_configurations(&llq_config, &get_feat_ctx.llq, 2263 adapter->use_large_llq_hdr); 2264 rc = ena_set_queues_placement_policy(adapter, ena_dev, 2265 &get_feat_ctx.llq, &llq_config); 2266 if (unlikely(rc)) { 2267 PMD_INIT_LOG(CRIT, "Failed to set placement policy\n"); 2268 return rc; 2269 } 2270 2271 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 2272 queue_type_str = "Regular"; 2273 else 2274 queue_type_str = "Low latency"; 2275 PMD_DRV_LOG(INFO, "Placement policy: %s\n", queue_type_str); 2276 2277 calc_queue_ctx.ena_dev = ena_dev; 2278 calc_queue_ctx.get_feat_ctx = &get_feat_ctx; 2279 2280 max_num_io_queues = ena_calc_max_io_queue_num(ena_dev, &get_feat_ctx); 2281 rc = ena_calc_io_queue_size(&calc_queue_ctx, 2282 adapter->use_large_llq_hdr); 2283 if (unlikely((rc != 0) || (max_num_io_queues == 0))) { 2284 rc = -EFAULT; 2285 goto err_device_destroy; 2286 } 2287 2288 adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size; 2289 adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size; 2290 adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size; 2291 adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size; 2292 adapter->max_num_io_queues = max_num_io_queues; 2293 2294 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 2295 disable_meta_caching = 2296 !!(get_feat_ctx.llq.accel_mode.u.get.supported_flags & 2297 BIT(ENA_ADMIN_DISABLE_META_CACHING)); 2298 } else { 2299 disable_meta_caching = false; 2300 } 2301 2302 /* prepare ring structures */ 2303 ena_init_rings(adapter, disable_meta_caching); 2304 2305 ena_config_debug_area(adapter); 2306 2307 /* Set max MTU for this device */ 2308 adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu; 2309 2310 ena_set_offloads(&adapter->offloads, &get_feat_ctx.offload); 2311 2312 /* Copy MAC address and point DPDK to it */ 2313 eth_dev->data->mac_addrs = (struct rte_ether_addr *)adapter->mac_addr; 2314 rte_ether_addr_copy((struct rte_ether_addr *) 2315 get_feat_ctx.dev_attr.mac_addr, 2316 (struct rte_ether_addr *)adapter->mac_addr); 2317 2318 rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE); 2319 if (unlikely(rc != 0)) { 2320 PMD_DRV_LOG(ERR, "Failed to initialize RSS in ENA device\n"); 2321 goto err_delete_debug_area; 2322 } 2323 2324 adapter->drv_stats = rte_zmalloc("adapter stats", 2325 sizeof(*adapter->drv_stats), 2326 RTE_CACHE_LINE_SIZE); 2327 if (!adapter->drv_stats) { 2328 PMD_DRV_LOG(ERR, 2329 "Failed to allocate memory for adapter statistics\n"); 2330 rc = -ENOMEM; 2331 goto err_rss_destroy; 2332 } 2333 2334 rte_spinlock_init(&adapter->admin_lock); 2335 2336 rte_intr_callback_register(intr_handle, 2337 ena_interrupt_handler_rte, 2338 eth_dev); 2339 rte_intr_enable(intr_handle); 2340 ena_com_set_admin_polling_mode(ena_dev, false); 2341 ena_com_admin_aenq_enable(ena_dev); 2342 2343 rte_timer_init(&adapter->timer_wd); 2344 2345 adapters_found++; 2346 adapter->state = ENA_ADAPTER_STATE_INIT; 2347 2348 return 0; 2349 2350 err_rss_destroy: 2351 ena_com_rss_destroy(ena_dev); 2352 err_delete_debug_area: 2353 ena_com_delete_debug_area(ena_dev); 2354 2355 err_device_destroy: 2356 ena_com_delete_host_info(ena_dev); 2357 ena_com_admin_destroy(ena_dev); 2358 err_metrics_delete: 2359 ena_com_delete_customer_metrics_buffer(ena_dev); 2360 err: 2361 return rc; 2362 } 2363 2364 static void ena_destroy_device(struct rte_eth_dev *eth_dev) 2365 { 2366 struct ena_adapter *adapter = eth_dev->data->dev_private; 2367 struct ena_com_dev *ena_dev = &adapter->ena_dev; 2368 2369 if (adapter->state == ENA_ADAPTER_STATE_FREE) 2370 return; 2371 2372 ena_com_set_admin_running_state(ena_dev, false); 2373 2374 if (adapter->state != ENA_ADAPTER_STATE_CLOSED) 2375 ena_close(eth_dev); 2376 2377 ena_com_rss_destroy(ena_dev); 2378 2379 ena_com_delete_debug_area(ena_dev); 2380 ena_com_delete_host_info(ena_dev); 2381 2382 ena_com_abort_admin_commands(ena_dev); 2383 ena_com_wait_for_abort_completion(ena_dev); 2384 ena_com_admin_destroy(ena_dev); 2385 ena_com_mmio_reg_read_request_destroy(ena_dev); 2386 ena_com_delete_customer_metrics_buffer(ena_dev); 2387 2388 adapter->state = ENA_ADAPTER_STATE_FREE; 2389 } 2390 2391 static int eth_ena_dev_uninit(struct rte_eth_dev *eth_dev) 2392 { 2393 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2394 return 0; 2395 2396 ena_destroy_device(eth_dev); 2397 2398 return 0; 2399 } 2400 2401 static int ena_dev_configure(struct rte_eth_dev *dev) 2402 { 2403 struct ena_adapter *adapter = dev->data->dev_private; 2404 int rc; 2405 2406 adapter->state = ENA_ADAPTER_STATE_CONFIG; 2407 2408 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) 2409 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; 2410 dev->data->dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS; 2411 2412 /* Scattered Rx cannot be turned off in the HW, so this capability must 2413 * be forced. 2414 */ 2415 dev->data->scattered_rx = 1; 2416 2417 adapter->last_tx_comp_qid = 0; 2418 2419 adapter->missing_tx_completion_budget = 2420 RTE_MIN(ENA_MONITORED_TX_QUEUES, dev->data->nb_tx_queues); 2421 2422 /* To avoid detection of the spurious Tx completion timeout due to 2423 * application not calling the Tx cleanup function, set timeout for the 2424 * Tx queue which should be half of the missing completion timeout for a 2425 * safety. If there will be a lot of missing Tx completions in the 2426 * queue, they will be detected sooner or later. 2427 */ 2428 adapter->tx_cleanup_stall_delay = adapter->missing_tx_completion_to / 2; 2429 2430 rc = ena_configure_aenq(adapter); 2431 2432 return rc; 2433 } 2434 2435 static void ena_init_rings(struct ena_adapter *adapter, 2436 bool disable_meta_caching) 2437 { 2438 size_t i; 2439 2440 for (i = 0; i < adapter->max_num_io_queues; i++) { 2441 struct ena_ring *ring = &adapter->tx_ring[i]; 2442 2443 ring->configured = 0; 2444 ring->type = ENA_RING_TYPE_TX; 2445 ring->adapter = adapter; 2446 ring->id = i; 2447 ring->tx_mem_queue_type = adapter->ena_dev.tx_mem_queue_type; 2448 ring->tx_max_header_size = adapter->ena_dev.tx_max_header_size; 2449 ring->sgl_size = adapter->max_tx_sgl_size; 2450 ring->disable_meta_caching = disable_meta_caching; 2451 } 2452 2453 for (i = 0; i < adapter->max_num_io_queues; i++) { 2454 struct ena_ring *ring = &adapter->rx_ring[i]; 2455 2456 ring->configured = 0; 2457 ring->type = ENA_RING_TYPE_RX; 2458 ring->adapter = adapter; 2459 ring->id = i; 2460 ring->sgl_size = adapter->max_rx_sgl_size; 2461 } 2462 } 2463 2464 static uint64_t ena_get_rx_port_offloads(struct ena_adapter *adapter) 2465 { 2466 uint64_t port_offloads = 0; 2467 2468 if (adapter->offloads.rx_offloads & ENA_L3_IPV4_CSUM) 2469 port_offloads |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM; 2470 2471 if (adapter->offloads.rx_offloads & 2472 (ENA_L4_IPV4_CSUM | ENA_L4_IPV6_CSUM)) 2473 port_offloads |= 2474 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM; 2475 2476 if (adapter->offloads.rx_offloads & ENA_RX_RSS_HASH) 2477 port_offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; 2478 2479 port_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER; 2480 2481 return port_offloads; 2482 } 2483 2484 static uint64_t ena_get_tx_port_offloads(struct ena_adapter *adapter) 2485 { 2486 uint64_t port_offloads = 0; 2487 2488 if (adapter->offloads.tx_offloads & ENA_IPV4_TSO) 2489 port_offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO; 2490 2491 if (adapter->offloads.tx_offloads & ENA_L3_IPV4_CSUM) 2492 port_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM; 2493 if (adapter->offloads.tx_offloads & 2494 (ENA_L4_IPV4_CSUM_PARTIAL | ENA_L4_IPV4_CSUM | 2495 ENA_L4_IPV6_CSUM | ENA_L4_IPV6_CSUM_PARTIAL)) 2496 port_offloads |= 2497 RTE_ETH_TX_OFFLOAD_UDP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_CKSUM; 2498 2499 port_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS; 2500 2501 port_offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 2502 2503 return port_offloads; 2504 } 2505 2506 static uint64_t ena_get_rx_queue_offloads(struct ena_adapter *adapter) 2507 { 2508 RTE_SET_USED(adapter); 2509 2510 return 0; 2511 } 2512 2513 static uint64_t ena_get_tx_queue_offloads(struct ena_adapter *adapter) 2514 { 2515 uint64_t queue_offloads = 0; 2516 RTE_SET_USED(adapter); 2517 2518 queue_offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 2519 2520 return queue_offloads; 2521 } 2522 2523 static int ena_infos_get(struct rte_eth_dev *dev, 2524 struct rte_eth_dev_info *dev_info) 2525 { 2526 struct ena_adapter *adapter; 2527 struct ena_com_dev *ena_dev; 2528 2529 ena_assert_msg(dev->data != NULL, "Uninitialized device\n"); 2530 ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n"); 2531 adapter = dev->data->dev_private; 2532 2533 ena_dev = &adapter->ena_dev; 2534 ena_assert_msg(ena_dev != NULL, "Uninitialized device\n"); 2535 2536 dev_info->speed_capa = 2537 RTE_ETH_LINK_SPEED_1G | 2538 RTE_ETH_LINK_SPEED_2_5G | 2539 RTE_ETH_LINK_SPEED_5G | 2540 RTE_ETH_LINK_SPEED_10G | 2541 RTE_ETH_LINK_SPEED_25G | 2542 RTE_ETH_LINK_SPEED_40G | 2543 RTE_ETH_LINK_SPEED_50G | 2544 RTE_ETH_LINK_SPEED_100G | 2545 RTE_ETH_LINK_SPEED_200G | 2546 RTE_ETH_LINK_SPEED_400G; 2547 2548 /* Inform framework about available features */ 2549 dev_info->rx_offload_capa = ena_get_rx_port_offloads(adapter); 2550 dev_info->tx_offload_capa = ena_get_tx_port_offloads(adapter); 2551 dev_info->rx_queue_offload_capa = ena_get_rx_queue_offloads(adapter); 2552 dev_info->tx_queue_offload_capa = ena_get_tx_queue_offloads(adapter); 2553 2554 dev_info->flow_type_rss_offloads = ENA_ALL_RSS_HF; 2555 dev_info->hash_key_size = ENA_HASH_KEY_SIZE; 2556 2557 dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN; 2558 dev_info->max_rx_pktlen = adapter->max_mtu + RTE_ETHER_HDR_LEN + 2559 RTE_ETHER_CRC_LEN; 2560 dev_info->min_mtu = ENA_MIN_MTU; 2561 dev_info->max_mtu = adapter->max_mtu; 2562 dev_info->max_mac_addrs = 1; 2563 2564 dev_info->max_rx_queues = adapter->max_num_io_queues; 2565 dev_info->max_tx_queues = adapter->max_num_io_queues; 2566 dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE; 2567 2568 dev_info->rx_desc_lim.nb_max = adapter->max_rx_ring_size; 2569 dev_info->rx_desc_lim.nb_min = ENA_MIN_RING_DESC; 2570 dev_info->rx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2571 adapter->max_rx_sgl_size); 2572 dev_info->rx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2573 adapter->max_rx_sgl_size); 2574 2575 dev_info->tx_desc_lim.nb_max = adapter->max_tx_ring_size; 2576 dev_info->tx_desc_lim.nb_min = ENA_MIN_RING_DESC; 2577 dev_info->tx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2578 adapter->max_tx_sgl_size); 2579 dev_info->tx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2580 adapter->max_tx_sgl_size); 2581 2582 dev_info->default_rxportconf.ring_size = ENA_DEFAULT_RING_SIZE; 2583 dev_info->default_txportconf.ring_size = ENA_DEFAULT_RING_SIZE; 2584 2585 dev_info->err_handle_mode = RTE_ETH_ERROR_HANDLE_MODE_PASSIVE; 2586 2587 return 0; 2588 } 2589 2590 static inline void ena_init_rx_mbuf(struct rte_mbuf *mbuf, uint16_t len) 2591 { 2592 mbuf->data_len = len; 2593 mbuf->data_off = RTE_PKTMBUF_HEADROOM; 2594 mbuf->refcnt = 1; 2595 mbuf->next = NULL; 2596 } 2597 2598 static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring, 2599 struct ena_com_rx_buf_info *ena_bufs, 2600 uint32_t descs, 2601 uint16_t *next_to_clean, 2602 uint8_t offset) 2603 { 2604 struct rte_mbuf *mbuf; 2605 struct rte_mbuf *mbuf_head; 2606 struct ena_rx_buffer *rx_info; 2607 int rc; 2608 uint16_t ntc, len, req_id, buf = 0; 2609 2610 if (unlikely(descs == 0)) 2611 return NULL; 2612 2613 ntc = *next_to_clean; 2614 2615 len = ena_bufs[buf].len; 2616 req_id = ena_bufs[buf].req_id; 2617 2618 rx_info = &rx_ring->rx_buffer_info[req_id]; 2619 2620 mbuf = rx_info->mbuf; 2621 RTE_ASSERT(mbuf != NULL); 2622 2623 ena_init_rx_mbuf(mbuf, len); 2624 2625 /* Fill the mbuf head with the data specific for 1st segment. */ 2626 mbuf_head = mbuf; 2627 mbuf_head->nb_segs = descs; 2628 mbuf_head->port = rx_ring->port_id; 2629 mbuf_head->pkt_len = len; 2630 mbuf_head->data_off += offset; 2631 2632 rx_info->mbuf = NULL; 2633 rx_ring->empty_rx_reqs[ntc] = req_id; 2634 ntc = ENA_IDX_NEXT_MASKED(ntc, rx_ring->size_mask); 2635 2636 while (--descs) { 2637 ++buf; 2638 len = ena_bufs[buf].len; 2639 req_id = ena_bufs[buf].req_id; 2640 2641 rx_info = &rx_ring->rx_buffer_info[req_id]; 2642 RTE_ASSERT(rx_info->mbuf != NULL); 2643 2644 if (unlikely(len == 0)) { 2645 /* 2646 * Some devices can pass descriptor with the length 0. 2647 * To avoid confusion, the PMD is simply putting the 2648 * descriptor back, as it was never used. We'll avoid 2649 * mbuf allocation that way. 2650 */ 2651 rc = ena_add_single_rx_desc(rx_ring->ena_com_io_sq, 2652 rx_info->mbuf, req_id); 2653 if (unlikely(rc != 0)) { 2654 /* Free the mbuf in case of an error. */ 2655 rte_mbuf_raw_free(rx_info->mbuf); 2656 } else { 2657 /* 2658 * If there was no error, just exit the loop as 2659 * 0 length descriptor is always the last one. 2660 */ 2661 break; 2662 } 2663 } else { 2664 /* Create an mbuf chain. */ 2665 mbuf->next = rx_info->mbuf; 2666 mbuf = mbuf->next; 2667 2668 ena_init_rx_mbuf(mbuf, len); 2669 mbuf_head->pkt_len += len; 2670 } 2671 2672 /* 2673 * Mark the descriptor as depleted and perform necessary 2674 * cleanup. 2675 * This code will execute in two cases: 2676 * 1. Descriptor len was greater than 0 - normal situation. 2677 * 2. Descriptor len was 0 and we failed to add the descriptor 2678 * to the device. In that situation, we should try to add 2679 * the mbuf again in the populate routine and mark the 2680 * descriptor as used up by the device. 2681 */ 2682 rx_info->mbuf = NULL; 2683 rx_ring->empty_rx_reqs[ntc] = req_id; 2684 ntc = ENA_IDX_NEXT_MASKED(ntc, rx_ring->size_mask); 2685 } 2686 2687 *next_to_clean = ntc; 2688 2689 return mbuf_head; 2690 } 2691 2692 static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 2693 uint16_t nb_pkts) 2694 { 2695 struct ena_ring *rx_ring = (struct ena_ring *)(rx_queue); 2696 unsigned int free_queue_entries; 2697 uint16_t next_to_clean = rx_ring->next_to_clean; 2698 uint16_t descs_in_use; 2699 struct rte_mbuf *mbuf; 2700 uint16_t completed; 2701 struct ena_com_rx_ctx ena_rx_ctx; 2702 int i, rc = 0; 2703 bool fill_hash; 2704 2705 #ifdef RTE_ETHDEV_DEBUG_RX 2706 /* Check adapter state */ 2707 if (unlikely(rx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { 2708 PMD_RX_LOG(ALERT, 2709 "Trying to receive pkts while device is NOT running\n"); 2710 return 0; 2711 } 2712 #endif 2713 2714 fill_hash = rx_ring->offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH; 2715 2716 descs_in_use = rx_ring->ring_size - 2717 ena_com_free_q_entries(rx_ring->ena_com_io_sq) - 1; 2718 nb_pkts = RTE_MIN(descs_in_use, nb_pkts); 2719 2720 for (completed = 0; completed < nb_pkts; completed++) { 2721 ena_rx_ctx.max_bufs = rx_ring->sgl_size; 2722 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; 2723 ena_rx_ctx.descs = 0; 2724 ena_rx_ctx.pkt_offset = 0; 2725 /* receive packet context */ 2726 rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq, 2727 rx_ring->ena_com_io_sq, 2728 &ena_rx_ctx); 2729 if (unlikely(rc)) { 2730 PMD_RX_LOG(ERR, 2731 "Failed to get the packet from the device, rc: %d\n", 2732 rc); 2733 if (rc == ENA_COM_NO_SPACE) { 2734 ++rx_ring->rx_stats.bad_desc_num; 2735 ena_trigger_reset(rx_ring->adapter, 2736 ENA_REGS_RESET_TOO_MANY_RX_DESCS); 2737 } else { 2738 ++rx_ring->rx_stats.bad_req_id; 2739 ena_trigger_reset(rx_ring->adapter, 2740 ENA_REGS_RESET_INV_RX_REQ_ID); 2741 } 2742 return 0; 2743 } 2744 2745 mbuf = ena_rx_mbuf(rx_ring, 2746 ena_rx_ctx.ena_bufs, 2747 ena_rx_ctx.descs, 2748 &next_to_clean, 2749 ena_rx_ctx.pkt_offset); 2750 if (unlikely(mbuf == NULL)) { 2751 for (i = 0; i < ena_rx_ctx.descs; ++i) { 2752 rx_ring->empty_rx_reqs[next_to_clean] = 2753 rx_ring->ena_bufs[i].req_id; 2754 next_to_clean = ENA_IDX_NEXT_MASKED( 2755 next_to_clean, rx_ring->size_mask); 2756 } 2757 break; 2758 } 2759 2760 /* fill mbuf attributes if any */ 2761 ena_rx_mbuf_prepare(rx_ring, mbuf, &ena_rx_ctx, fill_hash); 2762 2763 if (unlikely(mbuf->ol_flags & 2764 (RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD))) 2765 rte_atomic64_inc(&rx_ring->adapter->drv_stats->ierrors); 2766 2767 rx_pkts[completed] = mbuf; 2768 rx_ring->rx_stats.bytes += mbuf->pkt_len; 2769 } 2770 2771 rx_ring->rx_stats.cnt += completed; 2772 rx_ring->next_to_clean = next_to_clean; 2773 2774 free_queue_entries = ena_com_free_q_entries(rx_ring->ena_com_io_sq); 2775 2776 /* Burst refill to save doorbells, memory barriers, const interval */ 2777 if (free_queue_entries >= rx_ring->rx_free_thresh) { 2778 ena_populate_rx_queue(rx_ring, free_queue_entries); 2779 } 2780 2781 return completed; 2782 } 2783 2784 static uint16_t 2785 eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 2786 uint16_t nb_pkts) 2787 { 2788 int32_t ret; 2789 uint32_t i; 2790 struct rte_mbuf *m; 2791 struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); 2792 struct ena_adapter *adapter = tx_ring->adapter; 2793 struct rte_ipv4_hdr *ip_hdr; 2794 uint64_t ol_flags; 2795 uint64_t l4_csum_flag; 2796 uint64_t dev_offload_capa; 2797 uint16_t frag_field; 2798 bool need_pseudo_csum; 2799 2800 dev_offload_capa = adapter->offloads.tx_offloads; 2801 for (i = 0; i != nb_pkts; i++) { 2802 m = tx_pkts[i]; 2803 ol_flags = m->ol_flags; 2804 2805 /* Check if any offload flag was set */ 2806 if (ol_flags == 0) 2807 continue; 2808 2809 l4_csum_flag = ol_flags & RTE_MBUF_F_TX_L4_MASK; 2810 /* SCTP checksum offload is not supported by the ENA. */ 2811 if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) || 2812 l4_csum_flag == RTE_MBUF_F_TX_SCTP_CKSUM) { 2813 PMD_TX_LOG(DEBUG, 2814 "mbuf[%" PRIu32 "] has unsupported offloads flags set: 0x%" PRIu64 "\n", 2815 i, ol_flags); 2816 rte_errno = ENOTSUP; 2817 return i; 2818 } 2819 2820 if (unlikely(m->nb_segs >= tx_ring->sgl_size && 2821 !(tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && 2822 m->nb_segs == tx_ring->sgl_size && 2823 m->data_len < tx_ring->tx_max_header_size))) { 2824 PMD_TX_LOG(DEBUG, 2825 "mbuf[%" PRIu32 "] has too many segments: %" PRIu16 "\n", 2826 i, m->nb_segs); 2827 rte_errno = EINVAL; 2828 return i; 2829 } 2830 2831 #ifdef RTE_LIBRTE_ETHDEV_DEBUG 2832 /* Check if requested offload is also enabled for the queue */ 2833 if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM && 2834 !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)) || 2835 (l4_csum_flag == RTE_MBUF_F_TX_TCP_CKSUM && 2836 !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) || 2837 (l4_csum_flag == RTE_MBUF_F_TX_UDP_CKSUM && 2838 !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM))) { 2839 PMD_TX_LOG(DEBUG, 2840 "mbuf[%" PRIu32 "]: requested offloads: %" PRIu16 " are not enabled for the queue[%u]\n", 2841 i, m->nb_segs, tx_ring->id); 2842 rte_errno = EINVAL; 2843 return i; 2844 } 2845 2846 /* The caller is obligated to set l2 and l3 len if any cksum 2847 * offload is enabled. 2848 */ 2849 if (unlikely(ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK) && 2850 (m->l2_len == 0 || m->l3_len == 0))) { 2851 PMD_TX_LOG(DEBUG, 2852 "mbuf[%" PRIu32 "]: l2_len or l3_len values are 0 while the offload was requested\n", 2853 i); 2854 rte_errno = EINVAL; 2855 return i; 2856 } 2857 ret = rte_validate_tx_offload(m); 2858 if (ret != 0) { 2859 rte_errno = -ret; 2860 return i; 2861 } 2862 #endif 2863 2864 /* Verify HW support for requested offloads and determine if 2865 * pseudo header checksum is needed. 2866 */ 2867 need_pseudo_csum = false; 2868 if (ol_flags & RTE_MBUF_F_TX_IPV4) { 2869 if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM && 2870 !(dev_offload_capa & ENA_L3_IPV4_CSUM)) { 2871 rte_errno = ENOTSUP; 2872 return i; 2873 } 2874 2875 if (ol_flags & RTE_MBUF_F_TX_TCP_SEG && 2876 !(dev_offload_capa & ENA_IPV4_TSO)) { 2877 rte_errno = ENOTSUP; 2878 return i; 2879 } 2880 2881 /* Check HW capabilities and if pseudo csum is needed 2882 * for L4 offloads. 2883 */ 2884 if (l4_csum_flag != RTE_MBUF_F_TX_L4_NO_CKSUM && 2885 !(dev_offload_capa & ENA_L4_IPV4_CSUM)) { 2886 if (dev_offload_capa & 2887 ENA_L4_IPV4_CSUM_PARTIAL) { 2888 need_pseudo_csum = true; 2889 } else { 2890 rte_errno = ENOTSUP; 2891 return i; 2892 } 2893 } 2894 2895 /* Parse the DF flag */ 2896 ip_hdr = rte_pktmbuf_mtod_offset(m, 2897 struct rte_ipv4_hdr *, m->l2_len); 2898 frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset); 2899 if (frag_field & RTE_IPV4_HDR_DF_FLAG) { 2900 m->packet_type |= RTE_PTYPE_L4_NONFRAG; 2901 } else if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) { 2902 /* In case we are supposed to TSO and have DF 2903 * not set (DF=0) hardware must be provided with 2904 * partial checksum. 2905 */ 2906 need_pseudo_csum = true; 2907 } 2908 } else if (ol_flags & RTE_MBUF_F_TX_IPV6) { 2909 /* There is no support for IPv6 TSO as for now. */ 2910 if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) { 2911 rte_errno = ENOTSUP; 2912 return i; 2913 } 2914 2915 /* Check HW capabilities and if pseudo csum is needed */ 2916 if (l4_csum_flag != RTE_MBUF_F_TX_L4_NO_CKSUM && 2917 !(dev_offload_capa & ENA_L4_IPV6_CSUM)) { 2918 if (dev_offload_capa & 2919 ENA_L4_IPV6_CSUM_PARTIAL) { 2920 need_pseudo_csum = true; 2921 } else { 2922 rte_errno = ENOTSUP; 2923 return i; 2924 } 2925 } 2926 } 2927 2928 if (need_pseudo_csum) { 2929 ret = rte_net_intel_cksum_flags_prepare(m, ol_flags); 2930 if (ret != 0) { 2931 rte_errno = -ret; 2932 return i; 2933 } 2934 } 2935 } 2936 2937 return i; 2938 } 2939 2940 static void ena_update_hints(struct ena_adapter *adapter, 2941 struct ena_admin_ena_hw_hints *hints) 2942 { 2943 if (hints->admin_completion_tx_timeout) 2944 adapter->ena_dev.admin_queue.completion_timeout = 2945 hints->admin_completion_tx_timeout * 1000; 2946 2947 if (hints->mmio_read_timeout) 2948 /* convert to usec */ 2949 adapter->ena_dev.mmio_read.reg_read_to = 2950 hints->mmio_read_timeout * 1000; 2951 2952 if (hints->driver_watchdog_timeout) { 2953 if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT) 2954 adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT; 2955 else 2956 // Convert msecs to ticks 2957 adapter->keep_alive_timeout = 2958 (hints->driver_watchdog_timeout * 2959 rte_get_timer_hz()) / 1000; 2960 } 2961 } 2962 2963 static void ena_tx_map_mbuf(struct ena_ring *tx_ring, 2964 struct ena_tx_buffer *tx_info, 2965 struct rte_mbuf *mbuf, 2966 void **push_header, 2967 uint16_t *header_len) 2968 { 2969 struct ena_com_buf *ena_buf; 2970 uint16_t delta, seg_len, push_len; 2971 2972 delta = 0; 2973 seg_len = mbuf->data_len; 2974 2975 tx_info->mbuf = mbuf; 2976 ena_buf = tx_info->bufs; 2977 2978 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 2979 /* 2980 * Tx header might be (and will be in most cases) smaller than 2981 * tx_max_header_size. But it's not an issue to send more data 2982 * to the device, than actually needed if the mbuf size is 2983 * greater than tx_max_header_size. 2984 */ 2985 push_len = RTE_MIN(mbuf->pkt_len, tx_ring->tx_max_header_size); 2986 *header_len = push_len; 2987 2988 if (likely(push_len <= seg_len)) { 2989 /* If the push header is in the single segment, then 2990 * just point it to the 1st mbuf data. 2991 */ 2992 *push_header = rte_pktmbuf_mtod(mbuf, uint8_t *); 2993 } else { 2994 /* If the push header lays in the several segments, copy 2995 * it to the intermediate buffer. 2996 */ 2997 rte_pktmbuf_read(mbuf, 0, push_len, 2998 tx_ring->push_buf_intermediate_buf); 2999 *push_header = tx_ring->push_buf_intermediate_buf; 3000 delta = push_len - seg_len; 3001 } 3002 } else { 3003 *push_header = NULL; 3004 *header_len = 0; 3005 push_len = 0; 3006 } 3007 3008 /* Process first segment taking into consideration pushed header */ 3009 if (seg_len > push_len) { 3010 ena_buf->paddr = mbuf->buf_iova + 3011 mbuf->data_off + 3012 push_len; 3013 ena_buf->len = seg_len - push_len; 3014 ena_buf++; 3015 tx_info->num_of_bufs++; 3016 } 3017 3018 while ((mbuf = mbuf->next) != NULL) { 3019 seg_len = mbuf->data_len; 3020 3021 /* Skip mbufs if whole data is pushed as a header */ 3022 if (unlikely(delta > seg_len)) { 3023 delta -= seg_len; 3024 continue; 3025 } 3026 3027 ena_buf->paddr = mbuf->buf_iova + mbuf->data_off + delta; 3028 ena_buf->len = seg_len - delta; 3029 ena_buf++; 3030 tx_info->num_of_bufs++; 3031 3032 delta = 0; 3033 } 3034 } 3035 3036 static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf) 3037 { 3038 struct ena_tx_buffer *tx_info; 3039 struct ena_com_tx_ctx ena_tx_ctx = { { 0 } }; 3040 uint16_t next_to_use; 3041 uint16_t header_len; 3042 uint16_t req_id; 3043 void *push_header; 3044 int nb_hw_desc; 3045 int rc; 3046 3047 /* Checking for space for 2 additional metadata descriptors due to 3048 * possible header split and metadata descriptor 3049 */ 3050 if (!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 3051 mbuf->nb_segs + 2)) { 3052 PMD_DRV_LOG(DEBUG, "Not enough space in the tx queue\n"); 3053 return ENA_COM_NO_MEM; 3054 } 3055 3056 next_to_use = tx_ring->next_to_use; 3057 3058 req_id = tx_ring->empty_tx_reqs[next_to_use]; 3059 tx_info = &tx_ring->tx_buffer_info[req_id]; 3060 tx_info->num_of_bufs = 0; 3061 RTE_ASSERT(tx_info->mbuf == NULL); 3062 3063 ena_tx_map_mbuf(tx_ring, tx_info, mbuf, &push_header, &header_len); 3064 3065 ena_tx_ctx.ena_bufs = tx_info->bufs; 3066 ena_tx_ctx.push_header = push_header; 3067 ena_tx_ctx.num_bufs = tx_info->num_of_bufs; 3068 ena_tx_ctx.req_id = req_id; 3069 ena_tx_ctx.header_len = header_len; 3070 3071 /* Set Tx offloads flags, if applicable */ 3072 ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx, tx_ring->offloads, 3073 tx_ring->disable_meta_caching); 3074 3075 if (unlikely(ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq, 3076 &ena_tx_ctx))) { 3077 PMD_TX_LOG(DEBUG, 3078 "LLQ Tx max burst size of queue %d achieved, writing doorbell to send burst\n", 3079 tx_ring->id); 3080 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); 3081 tx_ring->tx_stats.doorbells++; 3082 tx_ring->pkts_without_db = false; 3083 } 3084 3085 /* prepare the packet's descriptors to dma engine */ 3086 rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx, 3087 &nb_hw_desc); 3088 if (unlikely(rc)) { 3089 PMD_DRV_LOG(ERR, "Failed to prepare Tx buffers, rc: %d\n", rc); 3090 ++tx_ring->tx_stats.prepare_ctx_err; 3091 ena_trigger_reset(tx_ring->adapter, 3092 ENA_REGS_RESET_DRIVER_INVALID_STATE); 3093 return rc; 3094 } 3095 3096 tx_info->tx_descs = nb_hw_desc; 3097 tx_info->timestamp = rte_get_timer_cycles(); 3098 3099 tx_ring->tx_stats.cnt++; 3100 tx_ring->tx_stats.bytes += mbuf->pkt_len; 3101 3102 tx_ring->next_to_use = ENA_IDX_NEXT_MASKED(next_to_use, 3103 tx_ring->size_mask); 3104 3105 return 0; 3106 } 3107 3108 static __rte_always_inline size_t 3109 ena_tx_cleanup_mbuf_fast(struct rte_mbuf **mbufs_to_clean, 3110 struct rte_mbuf *mbuf, 3111 size_t mbuf_cnt, 3112 size_t buf_size) 3113 { 3114 struct rte_mbuf *m_next; 3115 3116 while (mbuf != NULL) { 3117 m_next = mbuf->next; 3118 mbufs_to_clean[mbuf_cnt++] = mbuf; 3119 if (mbuf_cnt == buf_size) { 3120 rte_mempool_put_bulk(mbufs_to_clean[0]->pool, (void **)mbufs_to_clean, 3121 (unsigned int)mbuf_cnt); 3122 mbuf_cnt = 0; 3123 } 3124 mbuf = m_next; 3125 } 3126 3127 return mbuf_cnt; 3128 } 3129 3130 static int ena_tx_cleanup(void *txp, uint32_t free_pkt_cnt) 3131 { 3132 struct rte_mbuf *mbufs_to_clean[ENA_CLEANUP_BUF_SIZE]; 3133 struct ena_ring *tx_ring = (struct ena_ring *)txp; 3134 size_t mbuf_cnt = 0; 3135 unsigned int total_tx_descs = 0; 3136 unsigned int total_tx_pkts = 0; 3137 uint16_t cleanup_budget; 3138 uint16_t next_to_clean = tx_ring->next_to_clean; 3139 bool fast_free = tx_ring->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 3140 3141 /* 3142 * If free_pkt_cnt is equal to 0, it means that the user requested 3143 * full cleanup, so attempt to release all Tx descriptors 3144 * (ring_size - 1 -> size_mask) 3145 */ 3146 cleanup_budget = (free_pkt_cnt == 0) ? tx_ring->size_mask : free_pkt_cnt; 3147 3148 while (likely(total_tx_pkts < cleanup_budget)) { 3149 struct rte_mbuf *mbuf; 3150 struct ena_tx_buffer *tx_info; 3151 uint16_t req_id; 3152 3153 if (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) != 0) 3154 break; 3155 3156 if (unlikely(validate_tx_req_id(tx_ring, req_id) != 0)) 3157 break; 3158 3159 /* Get Tx info & store how many descs were processed */ 3160 tx_info = &tx_ring->tx_buffer_info[req_id]; 3161 tx_info->timestamp = 0; 3162 3163 mbuf = tx_info->mbuf; 3164 if (fast_free) { 3165 mbuf_cnt = ena_tx_cleanup_mbuf_fast(mbufs_to_clean, mbuf, mbuf_cnt, 3166 ENA_CLEANUP_BUF_SIZE); 3167 } else { 3168 rte_pktmbuf_free(mbuf); 3169 } 3170 3171 tx_info->mbuf = NULL; 3172 tx_ring->empty_tx_reqs[next_to_clean] = req_id; 3173 3174 total_tx_descs += tx_info->tx_descs; 3175 total_tx_pkts++; 3176 3177 /* Put back descriptor to the ring for reuse */ 3178 next_to_clean = ENA_IDX_NEXT_MASKED(next_to_clean, 3179 tx_ring->size_mask); 3180 } 3181 3182 if (likely(total_tx_descs > 0)) { 3183 /* acknowledge completion of sent packets */ 3184 tx_ring->next_to_clean = next_to_clean; 3185 ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs); 3186 } 3187 3188 if (mbuf_cnt != 0) 3189 rte_mempool_put_bulk(mbufs_to_clean[0]->pool, 3190 (void **)mbufs_to_clean, mbuf_cnt); 3191 3192 /* Notify completion handler that full cleanup was performed */ 3193 if (free_pkt_cnt == 0 || total_tx_pkts < cleanup_budget) 3194 tx_ring->last_cleanup_ticks = rte_get_timer_cycles(); 3195 3196 return total_tx_pkts; 3197 } 3198 3199 static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 3200 uint16_t nb_pkts) 3201 { 3202 struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); 3203 int available_desc; 3204 uint16_t sent_idx = 0; 3205 3206 #ifdef RTE_ETHDEV_DEBUG_TX 3207 /* Check adapter state */ 3208 if (unlikely(tx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { 3209 PMD_TX_LOG(ALERT, 3210 "Trying to xmit pkts while device is NOT running\n"); 3211 return 0; 3212 } 3213 #endif 3214 3215 available_desc = ena_com_free_q_entries(tx_ring->ena_com_io_sq); 3216 if (available_desc < tx_ring->tx_free_thresh) 3217 ena_tx_cleanup((void *)tx_ring, 0); 3218 3219 for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) { 3220 if (ena_xmit_mbuf(tx_ring, tx_pkts[sent_idx])) 3221 break; 3222 tx_ring->pkts_without_db = true; 3223 rte_prefetch0(tx_pkts[ENA_IDX_ADD_MASKED(sent_idx, 4, 3224 tx_ring->size_mask)]); 3225 } 3226 3227 /* If there are ready packets to be xmitted... */ 3228 if (likely(tx_ring->pkts_without_db)) { 3229 /* ...let HW do its best :-) */ 3230 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); 3231 tx_ring->tx_stats.doorbells++; 3232 tx_ring->pkts_without_db = false; 3233 } 3234 3235 tx_ring->tx_stats.available_desc = 3236 ena_com_free_q_entries(tx_ring->ena_com_io_sq); 3237 tx_ring->tx_stats.tx_poll++; 3238 3239 return sent_idx; 3240 } 3241 3242 static void ena_copy_customer_metrics(struct ena_adapter *adapter, uint64_t *buf, 3243 size_t num_metrics) 3244 { 3245 struct ena_com_dev *ena_dev = &adapter->ena_dev; 3246 int rc; 3247 3248 if (ena_com_get_cap(ena_dev, ENA_ADMIN_CUSTOMER_METRICS)) { 3249 if (num_metrics != ENA_STATS_ARRAY_METRICS) { 3250 PMD_DRV_LOG(ERR, "Detected discrepancy in the number of customer metrics"); 3251 return; 3252 } 3253 rte_spinlock_lock(&adapter->admin_lock); 3254 rc = ENA_PROXY(adapter, 3255 ena_com_get_customer_metrics, 3256 &adapter->ena_dev, 3257 (char *)buf, 3258 num_metrics * sizeof(uint64_t)); 3259 rte_spinlock_unlock(&adapter->admin_lock); 3260 if (rc != 0) { 3261 PMD_DRV_LOG(WARNING, "Failed to get customer metrics, rc: %d\n", rc); 3262 return; 3263 } 3264 3265 } else if (ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) { 3266 if (num_metrics != ENA_STATS_ARRAY_METRICS_LEGACY) { 3267 PMD_DRV_LOG(ERR, "Detected discrepancy in the number of legacy metrics"); 3268 return; 3269 } 3270 3271 rte_spinlock_lock(&adapter->admin_lock); 3272 rc = ENA_PROXY(adapter, 3273 ena_com_get_eni_stats, 3274 &adapter->ena_dev, 3275 (struct ena_admin_eni_stats *)buf); 3276 rte_spinlock_unlock(&adapter->admin_lock); 3277 if (rc != 0) { 3278 PMD_DRV_LOG(WARNING, 3279 "Failed to get ENI metrics, rc: %d\n", rc); 3280 return; 3281 } 3282 } 3283 } 3284 3285 static void ena_copy_ena_srd_info(struct ena_adapter *adapter, 3286 struct ena_stats_srd *srd_info) 3287 { 3288 int rc; 3289 3290 if (!ena_com_get_cap(&adapter->ena_dev, ENA_ADMIN_ENA_SRD_INFO)) 3291 return; 3292 3293 rte_spinlock_lock(&adapter->admin_lock); 3294 rc = ENA_PROXY(adapter, 3295 ena_com_get_ena_srd_info, 3296 &adapter->ena_dev, 3297 (struct ena_admin_ena_srd_info *)srd_info); 3298 rte_spinlock_unlock(&adapter->admin_lock); 3299 if (rc != ENA_COM_OK && rc != ENA_COM_UNSUPPORTED) { 3300 PMD_DRV_LOG(WARNING, 3301 "Failed to get ENA express srd info, rc: %d\n", rc); 3302 return; 3303 } 3304 } 3305 3306 /** 3307 * DPDK callback to retrieve names of extended device statistics 3308 * 3309 * @param dev 3310 * Pointer to Ethernet device structure. 3311 * @param[out] xstats_names 3312 * Buffer to insert names into. 3313 * @param n 3314 * Number of names. 3315 * 3316 * @return 3317 * Number of xstats names. 3318 */ 3319 static int ena_xstats_get_names(struct rte_eth_dev *dev, 3320 struct rte_eth_xstat_name *xstats_names, 3321 unsigned int n) 3322 { 3323 struct ena_adapter *adapter = dev->data->dev_private; 3324 unsigned int xstats_count = ena_xstats_calc_num(dev->data); 3325 unsigned int stat, i, count = 0; 3326 3327 if (n < xstats_count || !xstats_names) 3328 return xstats_count; 3329 3330 for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++) 3331 strcpy(xstats_names[count].name, 3332 ena_stats_global_strings[stat].name); 3333 3334 for (stat = 0; stat < adapter->metrics_num; stat++, count++) 3335 rte_strscpy(xstats_names[count].name, 3336 ena_stats_metrics_strings[stat].name, 3337 RTE_ETH_XSTATS_NAME_SIZE); 3338 for (stat = 0; stat < ENA_STATS_ARRAY_ENA_SRD; stat++, count++) 3339 rte_strscpy(xstats_names[count].name, 3340 ena_stats_srd_strings[stat].name, 3341 RTE_ETH_XSTATS_NAME_SIZE); 3342 3343 for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++) 3344 for (i = 0; i < dev->data->nb_rx_queues; i++, count++) 3345 snprintf(xstats_names[count].name, 3346 sizeof(xstats_names[count].name), 3347 "rx_q%d_%s", i, 3348 ena_stats_rx_strings[stat].name); 3349 3350 for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++) 3351 for (i = 0; i < dev->data->nb_tx_queues; i++, count++) 3352 snprintf(xstats_names[count].name, 3353 sizeof(xstats_names[count].name), 3354 "tx_q%d_%s", i, 3355 ena_stats_tx_strings[stat].name); 3356 3357 return xstats_count; 3358 } 3359 3360 /** 3361 * DPDK callback to retrieve names of extended device statistics for the given 3362 * ids. 3363 * 3364 * @param dev 3365 * Pointer to Ethernet device structure. 3366 * @param[out] xstats_names 3367 * Buffer to insert names into. 3368 * @param ids 3369 * IDs array for which the names should be retrieved. 3370 * @param size 3371 * Number of ids. 3372 * 3373 * @return 3374 * Positive value: number of xstats names. Negative value: error code. 3375 */ 3376 static int ena_xstats_get_names_by_id(struct rte_eth_dev *dev, 3377 const uint64_t *ids, 3378 struct rte_eth_xstat_name *xstats_names, 3379 unsigned int size) 3380 { 3381 struct ena_adapter *adapter = dev->data->dev_private; 3382 uint64_t xstats_count = ena_xstats_calc_num(dev->data); 3383 uint64_t id, qid; 3384 unsigned int i; 3385 3386 if (xstats_names == NULL) 3387 return xstats_count; 3388 3389 for (i = 0; i < size; ++i) { 3390 id = ids[i]; 3391 if (id > xstats_count) { 3392 PMD_DRV_LOG(ERR, 3393 "ID value out of range: id=%" PRIu64 ", xstats_num=%" PRIu64 "\n", 3394 id, xstats_count); 3395 return -EINVAL; 3396 } 3397 3398 if (id < ENA_STATS_ARRAY_GLOBAL) { 3399 strcpy(xstats_names[i].name, 3400 ena_stats_global_strings[id].name); 3401 continue; 3402 } 3403 3404 id -= ENA_STATS_ARRAY_GLOBAL; 3405 if (id < adapter->metrics_num) { 3406 rte_strscpy(xstats_names[i].name, 3407 ena_stats_metrics_strings[id].name, 3408 RTE_ETH_XSTATS_NAME_SIZE); 3409 continue; 3410 } 3411 3412 id -= adapter->metrics_num; 3413 3414 if (id < ENA_STATS_ARRAY_ENA_SRD) { 3415 rte_strscpy(xstats_names[i].name, 3416 ena_stats_srd_strings[id].name, 3417 RTE_ETH_XSTATS_NAME_SIZE); 3418 continue; 3419 } 3420 id -= ENA_STATS_ARRAY_ENA_SRD; 3421 3422 if (id < ENA_STATS_ARRAY_RX) { 3423 qid = id / dev->data->nb_rx_queues; 3424 id %= dev->data->nb_rx_queues; 3425 snprintf(xstats_names[i].name, 3426 sizeof(xstats_names[i].name), 3427 "rx_q%" PRIu64 "d_%s", 3428 qid, ena_stats_rx_strings[id].name); 3429 continue; 3430 } 3431 3432 id -= ENA_STATS_ARRAY_RX; 3433 /* Although this condition is not needed, it was added for 3434 * compatibility if new xstat structure would be ever added. 3435 */ 3436 if (id < ENA_STATS_ARRAY_TX) { 3437 qid = id / dev->data->nb_tx_queues; 3438 id %= dev->data->nb_tx_queues; 3439 snprintf(xstats_names[i].name, 3440 sizeof(xstats_names[i].name), 3441 "tx_q%" PRIu64 "_%s", 3442 qid, ena_stats_tx_strings[id].name); 3443 continue; 3444 } 3445 } 3446 3447 return i; 3448 } 3449 3450 /** 3451 * DPDK callback to get extended device statistics. 3452 * 3453 * @param dev 3454 * Pointer to Ethernet device structure. 3455 * @param[out] stats 3456 * Stats table output buffer. 3457 * @param n 3458 * The size of the stats table. 3459 * 3460 * @return 3461 * Number of xstats on success, negative on failure. 3462 */ 3463 static int ena_xstats_get(struct rte_eth_dev *dev, 3464 struct rte_eth_xstat *xstats, 3465 unsigned int n) 3466 { 3467 struct ena_adapter *adapter = dev->data->dev_private; 3468 unsigned int xstats_count = ena_xstats_calc_num(dev->data); 3469 unsigned int stat, i, count = 0; 3470 int stat_offset; 3471 void *stats_begin; 3472 uint64_t metrics_stats[ENA_MAX_CUSTOMER_METRICS]; 3473 struct ena_stats_srd srd_info = {0}; 3474 3475 if (n < xstats_count) 3476 return xstats_count; 3477 3478 if (!xstats) 3479 return 0; 3480 3481 for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++) { 3482 stat_offset = ena_stats_global_strings[stat].stat_offset; 3483 stats_begin = &adapter->dev_stats; 3484 3485 xstats[count].id = count; 3486 xstats[count].value = *((uint64_t *) 3487 ((char *)stats_begin + stat_offset)); 3488 } 3489 3490 ena_copy_customer_metrics(adapter, metrics_stats, adapter->metrics_num); 3491 stats_begin = metrics_stats; 3492 for (stat = 0; stat < adapter->metrics_num; stat++, count++) { 3493 stat_offset = ena_stats_metrics_strings[stat].stat_offset; 3494 3495 xstats[count].id = count; 3496 xstats[count].value = *((uint64_t *) 3497 ((char *)stats_begin + stat_offset)); 3498 } 3499 3500 ena_copy_ena_srd_info(adapter, &srd_info); 3501 stats_begin = &srd_info; 3502 for (stat = 0; stat < ENA_STATS_ARRAY_ENA_SRD; stat++, count++) { 3503 stat_offset = ena_stats_srd_strings[stat].stat_offset; 3504 xstats[count].id = count; 3505 xstats[count].value = *((uint64_t *) 3506 ((char *)stats_begin + stat_offset)); 3507 } 3508 3509 for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++) { 3510 for (i = 0; i < dev->data->nb_rx_queues; i++, count++) { 3511 stat_offset = ena_stats_rx_strings[stat].stat_offset; 3512 stats_begin = &adapter->rx_ring[i].rx_stats; 3513 3514 xstats[count].id = count; 3515 xstats[count].value = *((uint64_t *) 3516 ((char *)stats_begin + stat_offset)); 3517 } 3518 } 3519 3520 for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++) { 3521 for (i = 0; i < dev->data->nb_tx_queues; i++, count++) { 3522 stat_offset = ena_stats_tx_strings[stat].stat_offset; 3523 stats_begin = &adapter->tx_ring[i].rx_stats; 3524 3525 xstats[count].id = count; 3526 xstats[count].value = *((uint64_t *) 3527 ((char *)stats_begin + stat_offset)); 3528 } 3529 } 3530 3531 return count; 3532 } 3533 3534 static int ena_xstats_get_by_id(struct rte_eth_dev *dev, 3535 const uint64_t *ids, 3536 uint64_t *values, 3537 unsigned int n) 3538 { 3539 struct ena_adapter *adapter = dev->data->dev_private; 3540 uint64_t id; 3541 uint64_t rx_entries, tx_entries; 3542 unsigned int i; 3543 int qid; 3544 int valid = 0; 3545 bool were_metrics_copied = false; 3546 bool was_srd_info_copied = false; 3547 uint64_t metrics_stats[ENA_MAX_CUSTOMER_METRICS]; 3548 struct ena_stats_srd srd_info = {0}; 3549 3550 for (i = 0; i < n; ++i) { 3551 id = ids[i]; 3552 /* Check if id belongs to global statistics */ 3553 if (id < ENA_STATS_ARRAY_GLOBAL) { 3554 values[i] = *((uint64_t *)&adapter->dev_stats + id); 3555 ++valid; 3556 continue; 3557 } 3558 3559 /* Check if id belongs to ENI statistics */ 3560 id -= ENA_STATS_ARRAY_GLOBAL; 3561 if (id < adapter->metrics_num) { 3562 /* Avoid reading metrics multiple times in a single 3563 * function call, as it requires communication with the 3564 * admin queue. 3565 */ 3566 if (!were_metrics_copied) { 3567 were_metrics_copied = true; 3568 ena_copy_customer_metrics(adapter, 3569 metrics_stats, 3570 adapter->metrics_num); 3571 } 3572 3573 values[i] = *((uint64_t *)&metrics_stats + id); 3574 ++valid; 3575 continue; 3576 } 3577 3578 /* Check if id belongs to SRD info statistics */ 3579 id -= adapter->metrics_num; 3580 3581 if (id < ENA_STATS_ARRAY_ENA_SRD) { 3582 /* 3583 * Avoid reading srd info multiple times in a single 3584 * function call, as it requires communication with the 3585 * admin queue. 3586 */ 3587 if (!was_srd_info_copied) { 3588 was_srd_info_copied = true; 3589 ena_copy_ena_srd_info(adapter, &srd_info); 3590 } 3591 values[i] = *((uint64_t *)&adapter->srd_stats + id); 3592 ++valid; 3593 continue; 3594 } 3595 3596 /* Check if id belongs to rx queue statistics */ 3597 id -= ENA_STATS_ARRAY_ENA_SRD; 3598 3599 rx_entries = ENA_STATS_ARRAY_RX * dev->data->nb_rx_queues; 3600 if (id < rx_entries) { 3601 qid = id % dev->data->nb_rx_queues; 3602 id /= dev->data->nb_rx_queues; 3603 values[i] = *((uint64_t *) 3604 &adapter->rx_ring[qid].rx_stats + id); 3605 ++valid; 3606 continue; 3607 } 3608 /* Check if id belongs to rx queue statistics */ 3609 id -= rx_entries; 3610 tx_entries = ENA_STATS_ARRAY_TX * dev->data->nb_tx_queues; 3611 if (id < tx_entries) { 3612 qid = id % dev->data->nb_tx_queues; 3613 id /= dev->data->nb_tx_queues; 3614 values[i] = *((uint64_t *) 3615 &adapter->tx_ring[qid].tx_stats + id); 3616 ++valid; 3617 continue; 3618 } 3619 } 3620 3621 return valid; 3622 } 3623 3624 static int ena_process_uint_devarg(const char *key, 3625 const char *value, 3626 void *opaque) 3627 { 3628 struct ena_adapter *adapter = opaque; 3629 char *str_end; 3630 uint64_t uint_value; 3631 3632 uint_value = strtoull(value, &str_end, 10); 3633 if (value == str_end) { 3634 PMD_INIT_LOG(ERR, 3635 "Invalid value for key '%s'. Only uint values are accepted.\n", 3636 key); 3637 return -EINVAL; 3638 } 3639 3640 if (strcmp(key, ENA_DEVARG_MISS_TXC_TO) == 0) { 3641 if (uint_value > ENA_MAX_TX_TIMEOUT_SECONDS) { 3642 PMD_INIT_LOG(ERR, 3643 "Tx timeout too high: %" PRIu64 " sec. Maximum allowed: %d sec.\n", 3644 uint_value, ENA_MAX_TX_TIMEOUT_SECONDS); 3645 return -EINVAL; 3646 } else if (uint_value == 0) { 3647 PMD_INIT_LOG(INFO, 3648 "Check for missing Tx completions has been disabled.\n"); 3649 adapter->missing_tx_completion_to = 3650 ENA_HW_HINTS_NO_TIMEOUT; 3651 } else { 3652 PMD_INIT_LOG(INFO, 3653 "Tx packet completion timeout set to %" PRIu64 " seconds.\n", 3654 uint_value); 3655 adapter->missing_tx_completion_to = 3656 uint_value * rte_get_timer_hz(); 3657 } 3658 } 3659 3660 return 0; 3661 } 3662 3663 static int ena_process_bool_devarg(const char *key, 3664 const char *value, 3665 void *opaque) 3666 { 3667 struct ena_adapter *adapter = opaque; 3668 bool bool_value; 3669 3670 /* Parse the value. */ 3671 if (strcmp(value, "1") == 0) { 3672 bool_value = true; 3673 } else if (strcmp(value, "0") == 0) { 3674 bool_value = false; 3675 } else { 3676 PMD_INIT_LOG(ERR, 3677 "Invalid value: '%s' for key '%s'. Accepted: '0' or '1'\n", 3678 value, key); 3679 return -EINVAL; 3680 } 3681 3682 /* Now, assign it to the proper adapter field. */ 3683 if (strcmp(key, ENA_DEVARG_LARGE_LLQ_HDR) == 0) 3684 adapter->use_large_llq_hdr = bool_value; 3685 else if (strcmp(key, ENA_DEVARG_ENABLE_LLQ) == 0) 3686 adapter->enable_llq = bool_value; 3687 3688 return 0; 3689 } 3690 3691 static int ena_parse_devargs(struct ena_adapter *adapter, 3692 struct rte_devargs *devargs) 3693 { 3694 static const char * const allowed_args[] = { 3695 ENA_DEVARG_LARGE_LLQ_HDR, 3696 ENA_DEVARG_MISS_TXC_TO, 3697 ENA_DEVARG_ENABLE_LLQ, 3698 NULL, 3699 }; 3700 struct rte_kvargs *kvlist; 3701 int rc; 3702 3703 if (devargs == NULL) 3704 return 0; 3705 3706 kvlist = rte_kvargs_parse(devargs->args, allowed_args); 3707 if (kvlist == NULL) { 3708 PMD_INIT_LOG(ERR, "Invalid device arguments: %s\n", 3709 devargs->args); 3710 return -EINVAL; 3711 } 3712 3713 rc = rte_kvargs_process(kvlist, ENA_DEVARG_LARGE_LLQ_HDR, 3714 ena_process_bool_devarg, adapter); 3715 if (rc != 0) 3716 goto exit; 3717 rc = rte_kvargs_process(kvlist, ENA_DEVARG_MISS_TXC_TO, 3718 ena_process_uint_devarg, adapter); 3719 if (rc != 0) 3720 goto exit; 3721 rc = rte_kvargs_process(kvlist, ENA_DEVARG_ENABLE_LLQ, 3722 ena_process_bool_devarg, adapter); 3723 3724 exit: 3725 rte_kvargs_free(kvlist); 3726 3727 return rc; 3728 } 3729 3730 static int ena_setup_rx_intr(struct rte_eth_dev *dev) 3731 { 3732 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 3733 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 3734 int rc; 3735 uint16_t vectors_nb, i; 3736 bool rx_intr_requested = dev->data->dev_conf.intr_conf.rxq; 3737 3738 if (!rx_intr_requested) 3739 return 0; 3740 3741 if (!rte_intr_cap_multiple(intr_handle)) { 3742 PMD_DRV_LOG(ERR, 3743 "Rx interrupt requested, but it isn't supported by the PCI driver\n"); 3744 return -ENOTSUP; 3745 } 3746 3747 /* Disable interrupt mapping before the configuration starts. */ 3748 rte_intr_disable(intr_handle); 3749 3750 /* Verify if there are enough vectors available. */ 3751 vectors_nb = dev->data->nb_rx_queues; 3752 if (vectors_nb > RTE_MAX_RXTX_INTR_VEC_ID) { 3753 PMD_DRV_LOG(ERR, 3754 "Too many Rx interrupts requested, maximum number: %d\n", 3755 RTE_MAX_RXTX_INTR_VEC_ID); 3756 rc = -ENOTSUP; 3757 goto enable_intr; 3758 } 3759 3760 /* Allocate the vector list */ 3761 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", 3762 dev->data->nb_rx_queues)) { 3763 PMD_DRV_LOG(ERR, 3764 "Failed to allocate interrupt vector for %d queues\n", 3765 dev->data->nb_rx_queues); 3766 rc = -ENOMEM; 3767 goto enable_intr; 3768 } 3769 3770 rc = rte_intr_efd_enable(intr_handle, vectors_nb); 3771 if (rc != 0) 3772 goto free_intr_vec; 3773 3774 if (!rte_intr_allow_others(intr_handle)) { 3775 PMD_DRV_LOG(ERR, 3776 "Not enough interrupts available to use both ENA Admin and Rx interrupts\n"); 3777 goto disable_intr_efd; 3778 } 3779 3780 for (i = 0; i < vectors_nb; ++i) 3781 if (rte_intr_vec_list_index_set(intr_handle, i, 3782 RTE_INTR_VEC_RXTX_OFFSET + i)) 3783 goto disable_intr_efd; 3784 3785 rte_intr_enable(intr_handle); 3786 return 0; 3787 3788 disable_intr_efd: 3789 rte_intr_efd_disable(intr_handle); 3790 free_intr_vec: 3791 rte_intr_vec_list_free(intr_handle); 3792 enable_intr: 3793 rte_intr_enable(intr_handle); 3794 return rc; 3795 } 3796 3797 static void ena_rx_queue_intr_set(struct rte_eth_dev *dev, 3798 uint16_t queue_id, 3799 bool unmask) 3800 { 3801 struct ena_adapter *adapter = dev->data->dev_private; 3802 struct ena_ring *rxq = &adapter->rx_ring[queue_id]; 3803 struct ena_eth_io_intr_reg intr_reg; 3804 3805 ena_com_update_intr_reg(&intr_reg, 0, 0, unmask, 1); 3806 ena_com_unmask_intr(rxq->ena_com_io_cq, &intr_reg); 3807 } 3808 3809 static int ena_rx_queue_intr_enable(struct rte_eth_dev *dev, 3810 uint16_t queue_id) 3811 { 3812 ena_rx_queue_intr_set(dev, queue_id, true); 3813 3814 return 0; 3815 } 3816 3817 static int ena_rx_queue_intr_disable(struct rte_eth_dev *dev, 3818 uint16_t queue_id) 3819 { 3820 ena_rx_queue_intr_set(dev, queue_id, false); 3821 3822 return 0; 3823 } 3824 3825 static int ena_configure_aenq(struct ena_adapter *adapter) 3826 { 3827 uint32_t aenq_groups = adapter->all_aenq_groups; 3828 int rc; 3829 3830 /* All_aenq_groups holds all AENQ functions supported by the device and 3831 * the HW, so at first we need to be sure the LSC request is valid. 3832 */ 3833 if (adapter->edev_data->dev_conf.intr_conf.lsc != 0) { 3834 if (!(aenq_groups & BIT(ENA_ADMIN_LINK_CHANGE))) { 3835 PMD_DRV_LOG(ERR, 3836 "LSC requested, but it's not supported by the AENQ\n"); 3837 return -EINVAL; 3838 } 3839 } else { 3840 /* If LSC wasn't enabled by the app, let's enable all supported 3841 * AENQ procedures except the LSC. 3842 */ 3843 aenq_groups &= ~BIT(ENA_ADMIN_LINK_CHANGE); 3844 } 3845 3846 rc = ena_com_set_aenq_config(&adapter->ena_dev, aenq_groups); 3847 if (rc != 0) { 3848 PMD_DRV_LOG(ERR, "Cannot configure AENQ groups, rc=%d\n", rc); 3849 return rc; 3850 } 3851 3852 adapter->active_aenq_groups = aenq_groups; 3853 3854 return 0; 3855 } 3856 3857 int ena_mp_indirect_table_set(struct ena_adapter *adapter) 3858 { 3859 return ENA_PROXY(adapter, ena_com_indirect_table_set, &adapter->ena_dev); 3860 } 3861 3862 int ena_mp_indirect_table_get(struct ena_adapter *adapter, 3863 uint32_t *indirect_table) 3864 { 3865 return ENA_PROXY(adapter, ena_com_indirect_table_get, &adapter->ena_dev, 3866 indirect_table); 3867 } 3868 3869 /********************************************************************* 3870 * ena_plat_dpdk.h functions implementations 3871 *********************************************************************/ 3872 3873 const struct rte_memzone * 3874 ena_mem_alloc_coherent(struct rte_eth_dev_data *data, size_t size, 3875 int socket_id, unsigned int alignment, void **virt_addr, 3876 dma_addr_t *phys_addr) 3877 { 3878 char z_name[RTE_MEMZONE_NAMESIZE]; 3879 struct ena_adapter *adapter = data->dev_private; 3880 const struct rte_memzone *memzone; 3881 int rc; 3882 3883 rc = snprintf(z_name, RTE_MEMZONE_NAMESIZE, "ena_p%d_mz%" PRIu64 "", 3884 data->port_id, adapter->memzone_cnt); 3885 if (rc >= RTE_MEMZONE_NAMESIZE) { 3886 PMD_DRV_LOG(ERR, 3887 "Name for the ena_com memzone is too long. Port: %d, mz_num: %" PRIu64 "\n", 3888 data->port_id, adapter->memzone_cnt); 3889 goto error; 3890 } 3891 adapter->memzone_cnt++; 3892 3893 memzone = rte_memzone_reserve_aligned(z_name, size, socket_id, 3894 RTE_MEMZONE_IOVA_CONTIG, alignment); 3895 if (memzone == NULL) { 3896 PMD_DRV_LOG(ERR, "Failed to allocate ena_com memzone: %s\n", 3897 z_name); 3898 goto error; 3899 } 3900 3901 memset(memzone->addr, 0, size); 3902 *virt_addr = memzone->addr; 3903 *phys_addr = memzone->iova; 3904 3905 return memzone; 3906 3907 error: 3908 *virt_addr = NULL; 3909 *phys_addr = 0; 3910 3911 return NULL; 3912 } 3913 3914 3915 /********************************************************************* 3916 * PMD configuration 3917 *********************************************************************/ 3918 static int eth_ena_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 3919 struct rte_pci_device *pci_dev) 3920 { 3921 return rte_eth_dev_pci_generic_probe(pci_dev, 3922 sizeof(struct ena_adapter), eth_ena_dev_init); 3923 } 3924 3925 static int eth_ena_pci_remove(struct rte_pci_device *pci_dev) 3926 { 3927 return rte_eth_dev_pci_generic_remove(pci_dev, eth_ena_dev_uninit); 3928 } 3929 3930 static struct rte_pci_driver rte_ena_pmd = { 3931 .id_table = pci_id_ena_map, 3932 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | 3933 RTE_PCI_DRV_WC_ACTIVATE, 3934 .probe = eth_ena_pci_probe, 3935 .remove = eth_ena_pci_remove, 3936 }; 3937 3938 RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd); 3939 RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map); 3940 RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci"); 3941 RTE_PMD_REGISTER_PARAM_STRING(net_ena, 3942 ENA_DEVARG_LARGE_LLQ_HDR "=<0|1> " 3943 ENA_DEVARG_ENABLE_LLQ "=<0|1> " 3944 ENA_DEVARG_MISS_TXC_TO "=<uint>"); 3945 RTE_LOG_REGISTER_SUFFIX(ena_logtype_init, init, NOTICE); 3946 RTE_LOG_REGISTER_SUFFIX(ena_logtype_driver, driver, NOTICE); 3947 #ifdef RTE_ETHDEV_DEBUG_RX 3948 RTE_LOG_REGISTER_SUFFIX(ena_logtype_rx, rx, DEBUG); 3949 #endif 3950 #ifdef RTE_ETHDEV_DEBUG_TX 3951 RTE_LOG_REGISTER_SUFFIX(ena_logtype_tx, tx, DEBUG); 3952 #endif 3953 RTE_LOG_REGISTER_SUFFIX(ena_logtype_com, com, WARNING); 3954 3955 /****************************************************************************** 3956 ******************************** AENQ Handlers ******************************* 3957 *****************************************************************************/ 3958 static void ena_update_on_link_change(void *adapter_data, 3959 struct ena_admin_aenq_entry *aenq_e) 3960 { 3961 struct rte_eth_dev *eth_dev = adapter_data; 3962 struct ena_adapter *adapter = eth_dev->data->dev_private; 3963 struct ena_admin_aenq_link_change_desc *aenq_link_desc; 3964 uint32_t status; 3965 3966 aenq_link_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e; 3967 3968 status = get_ena_admin_aenq_link_change_desc_link_status(aenq_link_desc); 3969 adapter->link_status = status; 3970 3971 ena_link_update(eth_dev, 0); 3972 rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL); 3973 } 3974 3975 static void ena_notification(void *adapter_data, 3976 struct ena_admin_aenq_entry *aenq_e) 3977 { 3978 struct rte_eth_dev *eth_dev = adapter_data; 3979 struct ena_adapter *adapter = eth_dev->data->dev_private; 3980 struct ena_admin_ena_hw_hints *hints; 3981 3982 if (aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION) 3983 PMD_DRV_LOG(WARNING, "Invalid AENQ group: %x. Expected: %x\n", 3984 aenq_e->aenq_common_desc.group, 3985 ENA_ADMIN_NOTIFICATION); 3986 3987 switch (aenq_e->aenq_common_desc.syndrome) { 3988 case ENA_ADMIN_UPDATE_HINTS: 3989 hints = (struct ena_admin_ena_hw_hints *) 3990 (&aenq_e->inline_data_w4); 3991 ena_update_hints(adapter, hints); 3992 break; 3993 default: 3994 PMD_DRV_LOG(ERR, "Invalid AENQ notification link state: %d\n", 3995 aenq_e->aenq_common_desc.syndrome); 3996 } 3997 } 3998 3999 static void ena_keep_alive(void *adapter_data, 4000 __rte_unused struct ena_admin_aenq_entry *aenq_e) 4001 { 4002 struct rte_eth_dev *eth_dev = adapter_data; 4003 struct ena_adapter *adapter = eth_dev->data->dev_private; 4004 struct ena_admin_aenq_keep_alive_desc *desc; 4005 uint64_t rx_drops; 4006 uint64_t tx_drops; 4007 uint64_t rx_overruns; 4008 4009 adapter->timestamp_wd = rte_get_timer_cycles(); 4010 4011 desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e; 4012 rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low; 4013 tx_drops = ((uint64_t)desc->tx_drops_high << 32) | desc->tx_drops_low; 4014 rx_overruns = ((uint64_t)desc->rx_overruns_high << 32) | desc->rx_overruns_low; 4015 4016 /* 4017 * Depending on its acceleration support, the device updates a different statistic when 4018 * Rx packet is dropped because there are no available buffers to accommodate it. 4019 */ 4020 adapter->drv_stats->rx_drops = rx_drops + rx_overruns; 4021 adapter->dev_stats.tx_drops = tx_drops; 4022 } 4023 4024 /** 4025 * This handler will called for unknown event group or unimplemented handlers 4026 **/ 4027 static void unimplemented_aenq_handler(__rte_unused void *data, 4028 __rte_unused struct ena_admin_aenq_entry *aenq_e) 4029 { 4030 PMD_DRV_LOG(ERR, 4031 "Unknown event was received or event with unimplemented handler\n"); 4032 } 4033 4034 static struct ena_aenq_handlers aenq_handlers = { 4035 .handlers = { 4036 [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change, 4037 [ENA_ADMIN_NOTIFICATION] = ena_notification, 4038 [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive 4039 }, 4040 .unimplemented_handler = unimplemented_aenq_handler 4041 }; 4042 4043 /********************************************************************* 4044 * Multi-Process communication request handling (in primary) 4045 *********************************************************************/ 4046 static int 4047 ena_mp_primary_handle(const struct rte_mp_msg *mp_msg, const void *peer) 4048 { 4049 const struct ena_mp_body *req = 4050 (const struct ena_mp_body *)mp_msg->param; 4051 struct ena_adapter *adapter; 4052 struct ena_com_dev *ena_dev; 4053 struct ena_mp_body *rsp; 4054 struct rte_mp_msg mp_rsp; 4055 struct rte_eth_dev *dev; 4056 int res = 0; 4057 4058 rsp = (struct ena_mp_body *)&mp_rsp.param; 4059 mp_msg_init(&mp_rsp, req->type, req->port_id); 4060 4061 if (!rte_eth_dev_is_valid_port(req->port_id)) { 4062 rte_errno = ENODEV; 4063 res = -rte_errno; 4064 PMD_DRV_LOG(ERR, "Unknown port %d in request %d\n", 4065 req->port_id, req->type); 4066 goto end; 4067 } 4068 dev = &rte_eth_devices[req->port_id]; 4069 adapter = dev->data->dev_private; 4070 ena_dev = &adapter->ena_dev; 4071 4072 switch (req->type) { 4073 case ENA_MP_DEV_STATS_GET: 4074 res = ena_com_get_dev_basic_stats(ena_dev, 4075 &adapter->basic_stats); 4076 break; 4077 case ENA_MP_ENI_STATS_GET: 4078 res = ena_com_get_eni_stats(ena_dev, 4079 (struct ena_admin_eni_stats *)&adapter->metrics_stats); 4080 break; 4081 case ENA_MP_MTU_SET: 4082 res = ena_com_set_dev_mtu(ena_dev, req->args.mtu); 4083 break; 4084 case ENA_MP_IND_TBL_GET: 4085 res = ena_com_indirect_table_get(ena_dev, 4086 adapter->indirect_table); 4087 break; 4088 case ENA_MP_IND_TBL_SET: 4089 res = ena_com_indirect_table_set(ena_dev); 4090 break; 4091 case ENA_MP_CUSTOMER_METRICS_GET: 4092 res = ena_com_get_customer_metrics(ena_dev, 4093 (char *)adapter->metrics_stats, 4094 adapter->metrics_num * sizeof(uint64_t)); 4095 break; 4096 case ENA_MP_SRD_STATS_GET: 4097 res = ena_com_get_ena_srd_info(ena_dev, 4098 (struct ena_admin_ena_srd_info *)&adapter->srd_stats); 4099 break; 4100 default: 4101 PMD_DRV_LOG(ERR, "Unknown request type %d\n", req->type); 4102 res = -EINVAL; 4103 break; 4104 } 4105 4106 end: 4107 /* Save processing result in the reply */ 4108 rsp->result = res; 4109 /* Return just IPC processing status */ 4110 return rte_mp_reply(&mp_rsp, peer); 4111 } 4112