1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates. 3 * All rights reserved. 4 */ 5 6 #include <rte_string_fns.h> 7 #include <rte_errno.h> 8 #include <rte_version.h> 9 #include <rte_net.h> 10 #include <rte_kvargs.h> 11 12 #include "ena_ethdev.h" 13 #include "ena_logs.h" 14 #include "ena_platform.h" 15 #include "ena_com.h" 16 #include "ena_eth_com.h" 17 18 #include <ena_common_defs.h> 19 #include <ena_regs_defs.h> 20 #include <ena_admin_defs.h> 21 #include <ena_eth_io_defs.h> 22 23 #define DRV_MODULE_VER_MAJOR 2 24 #define DRV_MODULE_VER_MINOR 5 25 #define DRV_MODULE_VER_SUBMINOR 0 26 27 #define __MERGE_64B_H_L(h, l) (((uint64_t)h << 32) | l) 28 29 #define GET_L4_HDR_LEN(mbuf) \ 30 ((rte_pktmbuf_mtod_offset(mbuf, struct rte_tcp_hdr *, \ 31 mbuf->l3_len + mbuf->l2_len)->data_off) >> 4) 32 33 #define ETH_GSTRING_LEN 32 34 35 #define ARRAY_SIZE(x) RTE_DIM(x) 36 37 #define ENA_MIN_RING_DESC 128 38 39 #define ENA_PTYPE_HAS_HASH (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP) 40 41 struct ena_stats { 42 char name[ETH_GSTRING_LEN]; 43 int stat_offset; 44 }; 45 46 #define ENA_STAT_ENTRY(stat, stat_type) { \ 47 .name = #stat, \ 48 .stat_offset = offsetof(struct ena_stats_##stat_type, stat) \ 49 } 50 51 #define ENA_STAT_RX_ENTRY(stat) \ 52 ENA_STAT_ENTRY(stat, rx) 53 54 #define ENA_STAT_TX_ENTRY(stat) \ 55 ENA_STAT_ENTRY(stat, tx) 56 57 #define ENA_STAT_ENI_ENTRY(stat) \ 58 ENA_STAT_ENTRY(stat, eni) 59 60 #define ENA_STAT_GLOBAL_ENTRY(stat) \ 61 ENA_STAT_ENTRY(stat, dev) 62 63 /* Device arguments */ 64 #define ENA_DEVARG_LARGE_LLQ_HDR "large_llq_hdr" 65 /* Timeout in seconds after which a single uncompleted Tx packet should be 66 * considered as a missing. 67 */ 68 #define ENA_DEVARG_MISS_TXC_TO "miss_txc_to" 69 70 /* 71 * Each rte_memzone should have unique name. 72 * To satisfy it, count number of allocation and add it to name. 73 */ 74 rte_atomic64_t ena_alloc_cnt; 75 76 static const struct ena_stats ena_stats_global_strings[] = { 77 ENA_STAT_GLOBAL_ENTRY(wd_expired), 78 ENA_STAT_GLOBAL_ENTRY(dev_start), 79 ENA_STAT_GLOBAL_ENTRY(dev_stop), 80 ENA_STAT_GLOBAL_ENTRY(tx_drops), 81 }; 82 83 static const struct ena_stats ena_stats_eni_strings[] = { 84 ENA_STAT_ENI_ENTRY(bw_in_allowance_exceeded), 85 ENA_STAT_ENI_ENTRY(bw_out_allowance_exceeded), 86 ENA_STAT_ENI_ENTRY(pps_allowance_exceeded), 87 ENA_STAT_ENI_ENTRY(conntrack_allowance_exceeded), 88 ENA_STAT_ENI_ENTRY(linklocal_allowance_exceeded), 89 }; 90 91 static const struct ena_stats ena_stats_tx_strings[] = { 92 ENA_STAT_TX_ENTRY(cnt), 93 ENA_STAT_TX_ENTRY(bytes), 94 ENA_STAT_TX_ENTRY(prepare_ctx_err), 95 ENA_STAT_TX_ENTRY(tx_poll), 96 ENA_STAT_TX_ENTRY(doorbells), 97 ENA_STAT_TX_ENTRY(bad_req_id), 98 ENA_STAT_TX_ENTRY(available_desc), 99 ENA_STAT_TX_ENTRY(missed_tx), 100 }; 101 102 static const struct ena_stats ena_stats_rx_strings[] = { 103 ENA_STAT_RX_ENTRY(cnt), 104 ENA_STAT_RX_ENTRY(bytes), 105 ENA_STAT_RX_ENTRY(refill_partial), 106 ENA_STAT_RX_ENTRY(l3_csum_bad), 107 ENA_STAT_RX_ENTRY(l4_csum_bad), 108 ENA_STAT_RX_ENTRY(l4_csum_good), 109 ENA_STAT_RX_ENTRY(mbuf_alloc_fail), 110 ENA_STAT_RX_ENTRY(bad_desc_num), 111 ENA_STAT_RX_ENTRY(bad_req_id), 112 }; 113 114 #define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings) 115 #define ENA_STATS_ARRAY_ENI ARRAY_SIZE(ena_stats_eni_strings) 116 #define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings) 117 #define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings) 118 119 #define QUEUE_OFFLOADS (RTE_ETH_TX_OFFLOAD_TCP_CKSUM |\ 120 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |\ 121 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |\ 122 RTE_ETH_TX_OFFLOAD_TCP_TSO) 123 #define MBUF_OFFLOADS (RTE_MBUF_F_TX_L4_MASK |\ 124 RTE_MBUF_F_TX_IP_CKSUM |\ 125 RTE_MBUF_F_TX_TCP_SEG) 126 127 /** Vendor ID used by Amazon devices */ 128 #define PCI_VENDOR_ID_AMAZON 0x1D0F 129 /** Amazon devices */ 130 #define PCI_DEVICE_ID_ENA_VF 0xEC20 131 #define PCI_DEVICE_ID_ENA_VF_RSERV0 0xEC21 132 133 #define ENA_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_L4_MASK | \ 134 RTE_MBUF_F_TX_IPV6 | \ 135 RTE_MBUF_F_TX_IPV4 | \ 136 RTE_MBUF_F_TX_IP_CKSUM | \ 137 RTE_MBUF_F_TX_TCP_SEG) 138 139 #define ENA_TX_OFFLOAD_NOTSUP_MASK \ 140 (RTE_MBUF_F_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK) 141 142 /** HW specific offloads capabilities. */ 143 /* IPv4 checksum offload. */ 144 #define ENA_L3_IPV4_CSUM 0x0001 145 /* TCP/UDP checksum offload for IPv4 packets. */ 146 #define ENA_L4_IPV4_CSUM 0x0002 147 /* TCP/UDP checksum offload for IPv4 packets with pseudo header checksum. */ 148 #define ENA_L4_IPV4_CSUM_PARTIAL 0x0004 149 /* TCP/UDP checksum offload for IPv6 packets. */ 150 #define ENA_L4_IPV6_CSUM 0x0008 151 /* TCP/UDP checksum offload for IPv6 packets with pseudo header checksum. */ 152 #define ENA_L4_IPV6_CSUM_PARTIAL 0x0010 153 /* TSO support for IPv4 packets. */ 154 #define ENA_IPV4_TSO 0x0020 155 156 /* Device supports setting RSS hash. */ 157 #define ENA_RX_RSS_HASH 0x0040 158 159 static const struct rte_pci_id pci_id_ena_map[] = { 160 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) }, 161 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF_RSERV0) }, 162 { .device_id = 0 }, 163 }; 164 165 static struct ena_aenq_handlers aenq_handlers; 166 167 static int ena_device_init(struct ena_adapter *adapter, 168 struct rte_pci_device *pdev, 169 struct ena_com_dev_get_features_ctx *get_feat_ctx); 170 static int ena_dev_configure(struct rte_eth_dev *dev); 171 static void ena_tx_map_mbuf(struct ena_ring *tx_ring, 172 struct ena_tx_buffer *tx_info, 173 struct rte_mbuf *mbuf, 174 void **push_header, 175 uint16_t *header_len); 176 static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf); 177 static int ena_tx_cleanup(void *txp, uint32_t free_pkt_cnt); 178 static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 179 uint16_t nb_pkts); 180 static uint16_t eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 181 uint16_t nb_pkts); 182 static int ena_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 183 uint16_t nb_desc, unsigned int socket_id, 184 const struct rte_eth_txconf *tx_conf); 185 static int ena_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 186 uint16_t nb_desc, unsigned int socket_id, 187 const struct rte_eth_rxconf *rx_conf, 188 struct rte_mempool *mp); 189 static inline void ena_init_rx_mbuf(struct rte_mbuf *mbuf, uint16_t len); 190 static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring, 191 struct ena_com_rx_buf_info *ena_bufs, 192 uint32_t descs, 193 uint16_t *next_to_clean, 194 uint8_t offset); 195 static uint16_t eth_ena_recv_pkts(void *rx_queue, 196 struct rte_mbuf **rx_pkts, uint16_t nb_pkts); 197 static int ena_add_single_rx_desc(struct ena_com_io_sq *io_sq, 198 struct rte_mbuf *mbuf, uint16_t id); 199 static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count); 200 static void ena_init_rings(struct ena_adapter *adapter, 201 bool disable_meta_caching); 202 static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 203 static int ena_start(struct rte_eth_dev *dev); 204 static int ena_stop(struct rte_eth_dev *dev); 205 static int ena_close(struct rte_eth_dev *dev); 206 static int ena_dev_reset(struct rte_eth_dev *dev); 207 static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); 208 static void ena_rx_queue_release_all(struct rte_eth_dev *dev); 209 static void ena_tx_queue_release_all(struct rte_eth_dev *dev); 210 static void ena_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid); 211 static void ena_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid); 212 static void ena_rx_queue_release_bufs(struct ena_ring *ring); 213 static void ena_tx_queue_release_bufs(struct ena_ring *ring); 214 static int ena_link_update(struct rte_eth_dev *dev, 215 int wait_to_complete); 216 static int ena_create_io_queue(struct rte_eth_dev *dev, struct ena_ring *ring); 217 static void ena_queue_stop(struct ena_ring *ring); 218 static void ena_queue_stop_all(struct rte_eth_dev *dev, 219 enum ena_ring_type ring_type); 220 static int ena_queue_start(struct rte_eth_dev *dev, struct ena_ring *ring); 221 static int ena_queue_start_all(struct rte_eth_dev *dev, 222 enum ena_ring_type ring_type); 223 static void ena_stats_restart(struct rte_eth_dev *dev); 224 static uint64_t ena_get_rx_port_offloads(struct ena_adapter *adapter); 225 static uint64_t ena_get_tx_port_offloads(struct ena_adapter *adapter); 226 static uint64_t ena_get_rx_queue_offloads(struct ena_adapter *adapter); 227 static uint64_t ena_get_tx_queue_offloads(struct ena_adapter *adapter); 228 static int ena_infos_get(struct rte_eth_dev *dev, 229 struct rte_eth_dev_info *dev_info); 230 static void ena_interrupt_handler_rte(void *cb_arg); 231 static void ena_timer_wd_callback(struct rte_timer *timer, void *arg); 232 static void ena_destroy_device(struct rte_eth_dev *eth_dev); 233 static int eth_ena_dev_init(struct rte_eth_dev *eth_dev); 234 static int ena_xstats_get_names(struct rte_eth_dev *dev, 235 struct rte_eth_xstat_name *xstats_names, 236 unsigned int n); 237 static int ena_xstats_get_names_by_id(struct rte_eth_dev *dev, 238 const uint64_t *ids, 239 struct rte_eth_xstat_name *xstats_names, 240 unsigned int size); 241 static int ena_xstats_get(struct rte_eth_dev *dev, 242 struct rte_eth_xstat *stats, 243 unsigned int n); 244 static int ena_xstats_get_by_id(struct rte_eth_dev *dev, 245 const uint64_t *ids, 246 uint64_t *values, 247 unsigned int n); 248 static int ena_process_bool_devarg(const char *key, 249 const char *value, 250 void *opaque); 251 static int ena_parse_devargs(struct ena_adapter *adapter, 252 struct rte_devargs *devargs); 253 static int ena_copy_eni_stats(struct ena_adapter *adapter, 254 struct ena_stats_eni *stats); 255 static int ena_setup_rx_intr(struct rte_eth_dev *dev); 256 static int ena_rx_queue_intr_enable(struct rte_eth_dev *dev, 257 uint16_t queue_id); 258 static int ena_rx_queue_intr_disable(struct rte_eth_dev *dev, 259 uint16_t queue_id); 260 static int ena_configure_aenq(struct ena_adapter *adapter); 261 static int ena_mp_primary_handle(const struct rte_mp_msg *mp_msg, 262 const void *peer); 263 264 static const struct eth_dev_ops ena_dev_ops = { 265 .dev_configure = ena_dev_configure, 266 .dev_infos_get = ena_infos_get, 267 .rx_queue_setup = ena_rx_queue_setup, 268 .tx_queue_setup = ena_tx_queue_setup, 269 .dev_start = ena_start, 270 .dev_stop = ena_stop, 271 .link_update = ena_link_update, 272 .stats_get = ena_stats_get, 273 .xstats_get_names = ena_xstats_get_names, 274 .xstats_get_names_by_id = ena_xstats_get_names_by_id, 275 .xstats_get = ena_xstats_get, 276 .xstats_get_by_id = ena_xstats_get_by_id, 277 .mtu_set = ena_mtu_set, 278 .rx_queue_release = ena_rx_queue_release, 279 .tx_queue_release = ena_tx_queue_release, 280 .dev_close = ena_close, 281 .dev_reset = ena_dev_reset, 282 .reta_update = ena_rss_reta_update, 283 .reta_query = ena_rss_reta_query, 284 .rx_queue_intr_enable = ena_rx_queue_intr_enable, 285 .rx_queue_intr_disable = ena_rx_queue_intr_disable, 286 .rss_hash_update = ena_rss_hash_update, 287 .rss_hash_conf_get = ena_rss_hash_conf_get, 288 .tx_done_cleanup = ena_tx_cleanup, 289 }; 290 291 /********************************************************************* 292 * Multi-Process communication bits 293 *********************************************************************/ 294 /* rte_mp IPC message name */ 295 #define ENA_MP_NAME "net_ena_mp" 296 /* Request timeout in seconds */ 297 #define ENA_MP_REQ_TMO 5 298 299 /** Proxy request type */ 300 enum ena_mp_req { 301 ENA_MP_DEV_STATS_GET, 302 ENA_MP_ENI_STATS_GET, 303 ENA_MP_MTU_SET, 304 ENA_MP_IND_TBL_GET, 305 ENA_MP_IND_TBL_SET 306 }; 307 308 /** Proxy message body. Shared between requests and responses. */ 309 struct ena_mp_body { 310 /* Message type */ 311 enum ena_mp_req type; 312 int port_id; 313 /* Processing result. Set in replies. 0 if message succeeded, negative 314 * error code otherwise. 315 */ 316 int result; 317 union { 318 int mtu; /* For ENA_MP_MTU_SET */ 319 } args; 320 }; 321 322 /** 323 * Initialize IPC message. 324 * 325 * @param[out] msg 326 * Pointer to the message to initialize. 327 * @param[in] type 328 * Message type. 329 * @param[in] port_id 330 * Port ID of target device. 331 * 332 */ 333 static void 334 mp_msg_init(struct rte_mp_msg *msg, enum ena_mp_req type, int port_id) 335 { 336 struct ena_mp_body *body = (struct ena_mp_body *)&msg->param; 337 338 memset(msg, 0, sizeof(*msg)); 339 strlcpy(msg->name, ENA_MP_NAME, sizeof(msg->name)); 340 msg->len_param = sizeof(*body); 341 body->type = type; 342 body->port_id = port_id; 343 } 344 345 /********************************************************************* 346 * Multi-Process communication PMD API 347 *********************************************************************/ 348 /** 349 * Define proxy request descriptor 350 * 351 * Used to define all structures and functions required for proxying a given 352 * function to the primary process including the code to perform to prepare the 353 * request and process the response. 354 * 355 * @param[in] f 356 * Name of the function to proxy 357 * @param[in] t 358 * Message type to use 359 * @param[in] prep 360 * Body of a function to prepare the request in form of a statement 361 * expression. It is passed all the original function arguments along with two 362 * extra ones: 363 * - struct ena_adapter *adapter - PMD data of the device calling the proxy. 364 * - struct ena_mp_body *req - body of a request to prepare. 365 * @param[in] proc 366 * Body of a function to process the response in form of a statement 367 * expression. It is passed all the original function arguments along with two 368 * extra ones: 369 * - struct ena_adapter *adapter - PMD data of the device calling the proxy. 370 * - struct ena_mp_body *rsp - body of a response to process. 371 * @param ... 372 * Proxied function's arguments 373 * 374 * @note Inside prep and proc any parameters which aren't used should be marked 375 * as such (with ENA_TOUCH or __rte_unused). 376 */ 377 #define ENA_PROXY_DESC(f, t, prep, proc, ...) \ 378 static const enum ena_mp_req mp_type_ ## f = t; \ 379 static const char *mp_name_ ## f = #t; \ 380 static void mp_prep_ ## f(struct ena_adapter *adapter, \ 381 struct ena_mp_body *req, \ 382 __VA_ARGS__) \ 383 { \ 384 prep; \ 385 } \ 386 static void mp_proc_ ## f(struct ena_adapter *adapter, \ 387 struct ena_mp_body *rsp, \ 388 __VA_ARGS__) \ 389 { \ 390 proc; \ 391 } 392 393 /** 394 * Proxy wrapper for calling primary functions in a secondary process. 395 * 396 * Depending on whether called in primary or secondary process, calls the 397 * @p func directly or proxies the call to the primary process via rte_mp IPC. 398 * This macro requires a proxy request descriptor to be defined for @p func 399 * using ENA_PROXY_DESC() macro. 400 * 401 * @param[in/out] a 402 * Device PMD data. Used for sending the message and sharing message results 403 * between primary and secondary. 404 * @param[in] f 405 * Function to proxy. 406 * @param ... 407 * Arguments of @p func. 408 * 409 * @return 410 * - 0: Processing succeeded and response handler was called. 411 * - -EPERM: IPC is unavailable on this platform. This means only primary 412 * process may call the proxied function. 413 * - -EIO: IPC returned error on request send. Inspect rte_errno detailed 414 * error code. 415 * - Negative error code from the proxied function. 416 * 417 * @note This mechanism is geared towards control-path tasks. Avoid calling it 418 * in fast-path unless unbound delays are allowed. This is due to the IPC 419 * mechanism itself (socket based). 420 * @note Due to IPC parameter size limitations the proxy logic shares call 421 * results through the struct ena_adapter shared memory. This makes the 422 * proxy mechanism strictly single-threaded. Therefore be sure to make all 423 * calls to the same proxied function under the same lock. 424 */ 425 #define ENA_PROXY(a, f, ...) \ 426 ({ \ 427 struct ena_adapter *_a = (a); \ 428 struct timespec ts = { .tv_sec = ENA_MP_REQ_TMO }; \ 429 struct ena_mp_body *req, *rsp; \ 430 struct rte_mp_reply mp_rep; \ 431 struct rte_mp_msg mp_req; \ 432 int ret; \ 433 \ 434 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { \ 435 ret = f(__VA_ARGS__); \ 436 } else { \ 437 /* Prepare and send request */ \ 438 req = (struct ena_mp_body *)&mp_req.param; \ 439 mp_msg_init(&mp_req, mp_type_ ## f, _a->edev_data->port_id); \ 440 mp_prep_ ## f(_a, req, ## __VA_ARGS__); \ 441 \ 442 ret = rte_mp_request_sync(&mp_req, &mp_rep, &ts); \ 443 if (likely(!ret)) { \ 444 RTE_ASSERT(mp_rep.nb_received == 1); \ 445 rsp = (struct ena_mp_body *)&mp_rep.msgs[0].param; \ 446 ret = rsp->result; \ 447 if (ret == 0) { \ 448 mp_proc_##f(_a, rsp, ## __VA_ARGS__); \ 449 } else { \ 450 PMD_DRV_LOG(ERR, \ 451 "%s returned error: %d\n", \ 452 mp_name_ ## f, rsp->result);\ 453 } \ 454 free(mp_rep.msgs); \ 455 } else if (rte_errno == ENOTSUP) { \ 456 PMD_DRV_LOG(ERR, \ 457 "No IPC, can't proxy to primary\n");\ 458 ret = -rte_errno; \ 459 } else { \ 460 PMD_DRV_LOG(ERR, "Request %s failed: %s\n", \ 461 mp_name_ ## f, \ 462 rte_strerror(rte_errno)); \ 463 ret = -EIO; \ 464 } \ 465 } \ 466 ret; \ 467 }) 468 469 /********************************************************************* 470 * Multi-Process communication request descriptors 471 *********************************************************************/ 472 473 ENA_PROXY_DESC(ena_com_get_dev_basic_stats, ENA_MP_DEV_STATS_GET, 474 ({ 475 ENA_TOUCH(adapter); 476 ENA_TOUCH(req); 477 ENA_TOUCH(ena_dev); 478 ENA_TOUCH(stats); 479 }), 480 ({ 481 ENA_TOUCH(rsp); 482 ENA_TOUCH(ena_dev); 483 if (stats != &adapter->basic_stats) 484 rte_memcpy(stats, &adapter->basic_stats, sizeof(*stats)); 485 }), 486 struct ena_com_dev *ena_dev, struct ena_admin_basic_stats *stats); 487 488 ENA_PROXY_DESC(ena_com_get_eni_stats, ENA_MP_ENI_STATS_GET, 489 ({ 490 ENA_TOUCH(adapter); 491 ENA_TOUCH(req); 492 ENA_TOUCH(ena_dev); 493 ENA_TOUCH(stats); 494 }), 495 ({ 496 ENA_TOUCH(rsp); 497 ENA_TOUCH(ena_dev); 498 if (stats != (struct ena_admin_eni_stats *)&adapter->eni_stats) 499 rte_memcpy(stats, &adapter->eni_stats, sizeof(*stats)); 500 }), 501 struct ena_com_dev *ena_dev, struct ena_admin_eni_stats *stats); 502 503 ENA_PROXY_DESC(ena_com_set_dev_mtu, ENA_MP_MTU_SET, 504 ({ 505 ENA_TOUCH(adapter); 506 ENA_TOUCH(ena_dev); 507 req->args.mtu = mtu; 508 }), 509 ({ 510 ENA_TOUCH(adapter); 511 ENA_TOUCH(rsp); 512 ENA_TOUCH(ena_dev); 513 ENA_TOUCH(mtu); 514 }), 515 struct ena_com_dev *ena_dev, int mtu); 516 517 ENA_PROXY_DESC(ena_com_indirect_table_set, ENA_MP_IND_TBL_SET, 518 ({ 519 ENA_TOUCH(adapter); 520 ENA_TOUCH(req); 521 ENA_TOUCH(ena_dev); 522 }), 523 ({ 524 ENA_TOUCH(adapter); 525 ENA_TOUCH(rsp); 526 ENA_TOUCH(ena_dev); 527 }), 528 struct ena_com_dev *ena_dev); 529 530 ENA_PROXY_DESC(ena_com_indirect_table_get, ENA_MP_IND_TBL_GET, 531 ({ 532 ENA_TOUCH(adapter); 533 ENA_TOUCH(req); 534 ENA_TOUCH(ena_dev); 535 ENA_TOUCH(ind_tbl); 536 }), 537 ({ 538 ENA_TOUCH(rsp); 539 ENA_TOUCH(ena_dev); 540 if (ind_tbl != adapter->indirect_table) 541 rte_memcpy(ind_tbl, adapter->indirect_table, 542 sizeof(adapter->indirect_table)); 543 }), 544 struct ena_com_dev *ena_dev, u32 *ind_tbl); 545 546 static inline void ena_trigger_reset(struct ena_adapter *adapter, 547 enum ena_regs_reset_reason_types reason) 548 { 549 if (likely(!adapter->trigger_reset)) { 550 adapter->reset_reason = reason; 551 adapter->trigger_reset = true; 552 } 553 } 554 555 static inline void ena_rx_mbuf_prepare(struct ena_ring *rx_ring, 556 struct rte_mbuf *mbuf, 557 struct ena_com_rx_ctx *ena_rx_ctx, 558 bool fill_hash) 559 { 560 struct ena_stats_rx *rx_stats = &rx_ring->rx_stats; 561 uint64_t ol_flags = 0; 562 uint32_t packet_type = 0; 563 564 if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) 565 packet_type |= RTE_PTYPE_L4_TCP; 566 else if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP) 567 packet_type |= RTE_PTYPE_L4_UDP; 568 569 if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) { 570 packet_type |= RTE_PTYPE_L3_IPV4; 571 if (unlikely(ena_rx_ctx->l3_csum_err)) { 572 ++rx_stats->l3_csum_bad; 573 ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; 574 } else { 575 ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; 576 } 577 } else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6) { 578 packet_type |= RTE_PTYPE_L3_IPV6; 579 } 580 581 if (!ena_rx_ctx->l4_csum_checked || ena_rx_ctx->frag) { 582 ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN; 583 } else { 584 if (unlikely(ena_rx_ctx->l4_csum_err)) { 585 ++rx_stats->l4_csum_bad; 586 ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; 587 } else { 588 ++rx_stats->l4_csum_good; 589 ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; 590 } 591 } 592 593 if (fill_hash && 594 likely((packet_type & ENA_PTYPE_HAS_HASH) && !ena_rx_ctx->frag)) { 595 ol_flags |= RTE_MBUF_F_RX_RSS_HASH; 596 mbuf->hash.rss = ena_rx_ctx->hash; 597 } 598 599 mbuf->ol_flags = ol_flags; 600 mbuf->packet_type = packet_type; 601 } 602 603 static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, 604 struct ena_com_tx_ctx *ena_tx_ctx, 605 uint64_t queue_offloads, 606 bool disable_meta_caching) 607 { 608 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; 609 610 if ((mbuf->ol_flags & MBUF_OFFLOADS) && 611 (queue_offloads & QUEUE_OFFLOADS)) { 612 /* check if TSO is required */ 613 if ((mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG) && 614 (queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO)) { 615 ena_tx_ctx->tso_enable = true; 616 617 ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf); 618 } 619 620 /* check if L3 checksum is needed */ 621 if ((mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) && 622 (queue_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)) 623 ena_tx_ctx->l3_csum_enable = true; 624 625 if (mbuf->ol_flags & RTE_MBUF_F_TX_IPV6) { 626 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6; 627 /* For the IPv6 packets, DF always needs to be true. */ 628 ena_tx_ctx->df = 1; 629 } else { 630 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4; 631 632 /* set don't fragment (DF) flag */ 633 if (mbuf->packet_type & 634 (RTE_PTYPE_L4_NONFRAG 635 | RTE_PTYPE_INNER_L4_NONFRAG)) 636 ena_tx_ctx->df = 1; 637 } 638 639 /* check if L4 checksum is needed */ 640 if (((mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_TCP_CKSUM) && 641 (queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) { 642 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP; 643 ena_tx_ctx->l4_csum_enable = true; 644 } else if (((mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == 645 RTE_MBUF_F_TX_UDP_CKSUM) && 646 (queue_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) { 647 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP; 648 ena_tx_ctx->l4_csum_enable = true; 649 } else { 650 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN; 651 ena_tx_ctx->l4_csum_enable = false; 652 } 653 654 ena_meta->mss = mbuf->tso_segsz; 655 ena_meta->l3_hdr_len = mbuf->l3_len; 656 ena_meta->l3_hdr_offset = mbuf->l2_len; 657 658 ena_tx_ctx->meta_valid = true; 659 } else if (disable_meta_caching) { 660 memset(ena_meta, 0, sizeof(*ena_meta)); 661 ena_tx_ctx->meta_valid = true; 662 } else { 663 ena_tx_ctx->meta_valid = false; 664 } 665 } 666 667 static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) 668 { 669 struct ena_tx_buffer *tx_info = NULL; 670 671 if (likely(req_id < tx_ring->ring_size)) { 672 tx_info = &tx_ring->tx_buffer_info[req_id]; 673 if (likely(tx_info->mbuf)) 674 return 0; 675 } 676 677 if (tx_info) 678 PMD_TX_LOG(ERR, "tx_info doesn't have valid mbuf\n"); 679 else 680 PMD_TX_LOG(ERR, "Invalid req_id: %hu\n", req_id); 681 682 /* Trigger device reset */ 683 ++tx_ring->tx_stats.bad_req_id; 684 ena_trigger_reset(tx_ring->adapter, ENA_REGS_RESET_INV_TX_REQ_ID); 685 return -EFAULT; 686 } 687 688 static void ena_config_host_info(struct ena_com_dev *ena_dev) 689 { 690 struct ena_admin_host_info *host_info; 691 int rc; 692 693 /* Allocate only the host info */ 694 rc = ena_com_allocate_host_info(ena_dev); 695 if (rc) { 696 PMD_DRV_LOG(ERR, "Cannot allocate host info\n"); 697 return; 698 } 699 700 host_info = ena_dev->host_attr.host_info; 701 702 host_info->os_type = ENA_ADMIN_OS_DPDK; 703 host_info->kernel_ver = RTE_VERSION; 704 strlcpy((char *)host_info->kernel_ver_str, rte_version(), 705 sizeof(host_info->kernel_ver_str)); 706 host_info->os_dist = RTE_VERSION; 707 strlcpy((char *)host_info->os_dist_str, rte_version(), 708 sizeof(host_info->os_dist_str)); 709 host_info->driver_version = 710 (DRV_MODULE_VER_MAJOR) | 711 (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | 712 (DRV_MODULE_VER_SUBMINOR << 713 ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); 714 host_info->num_cpus = rte_lcore_count(); 715 716 host_info->driver_supported_features = 717 ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK | 718 ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK; 719 720 rc = ena_com_set_host_attributes(ena_dev); 721 if (rc) { 722 if (rc == -ENA_COM_UNSUPPORTED) 723 PMD_DRV_LOG(WARNING, "Cannot set host attributes\n"); 724 else 725 PMD_DRV_LOG(ERR, "Cannot set host attributes\n"); 726 727 goto err; 728 } 729 730 return; 731 732 err: 733 ena_com_delete_host_info(ena_dev); 734 } 735 736 /* This function calculates the number of xstats based on the current config */ 737 static unsigned int ena_xstats_calc_num(struct rte_eth_dev_data *data) 738 { 739 return ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENI + 740 (data->nb_tx_queues * ENA_STATS_ARRAY_TX) + 741 (data->nb_rx_queues * ENA_STATS_ARRAY_RX); 742 } 743 744 static void ena_config_debug_area(struct ena_adapter *adapter) 745 { 746 u32 debug_area_size; 747 int rc, ss_count; 748 749 ss_count = ena_xstats_calc_num(adapter->edev_data); 750 751 /* allocate 32 bytes for each string and 64bit for the value */ 752 debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count; 753 754 rc = ena_com_allocate_debug_area(&adapter->ena_dev, debug_area_size); 755 if (rc) { 756 PMD_DRV_LOG(ERR, "Cannot allocate debug area\n"); 757 return; 758 } 759 760 rc = ena_com_set_host_attributes(&adapter->ena_dev); 761 if (rc) { 762 if (rc == -ENA_COM_UNSUPPORTED) 763 PMD_DRV_LOG(WARNING, "Cannot set host attributes\n"); 764 else 765 PMD_DRV_LOG(ERR, "Cannot set host attributes\n"); 766 767 goto err; 768 } 769 770 return; 771 err: 772 ena_com_delete_debug_area(&adapter->ena_dev); 773 } 774 775 static int ena_close(struct rte_eth_dev *dev) 776 { 777 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 778 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 779 struct ena_adapter *adapter = dev->data->dev_private; 780 int ret = 0; 781 782 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 783 return 0; 784 785 if (adapter->state == ENA_ADAPTER_STATE_RUNNING) 786 ret = ena_stop(dev); 787 adapter->state = ENA_ADAPTER_STATE_CLOSED; 788 789 ena_rx_queue_release_all(dev); 790 ena_tx_queue_release_all(dev); 791 792 rte_free(adapter->drv_stats); 793 adapter->drv_stats = NULL; 794 795 rte_intr_disable(intr_handle); 796 rte_intr_callback_unregister(intr_handle, 797 ena_interrupt_handler_rte, 798 dev); 799 800 /* 801 * MAC is not allocated dynamically. Setting NULL should prevent from 802 * release of the resource in the rte_eth_dev_release_port(). 803 */ 804 dev->data->mac_addrs = NULL; 805 806 return ret; 807 } 808 809 static int 810 ena_dev_reset(struct rte_eth_dev *dev) 811 { 812 int rc = 0; 813 814 /* Cannot release memory in secondary process */ 815 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 816 PMD_DRV_LOG(WARNING, "dev_reset not supported in secondary.\n"); 817 return -EPERM; 818 } 819 820 ena_destroy_device(dev); 821 rc = eth_ena_dev_init(dev); 822 if (rc) 823 PMD_INIT_LOG(CRIT, "Cannot initialize device\n"); 824 825 return rc; 826 } 827 828 static void ena_rx_queue_release_all(struct rte_eth_dev *dev) 829 { 830 int nb_queues = dev->data->nb_rx_queues; 831 int i; 832 833 for (i = 0; i < nb_queues; i++) 834 ena_rx_queue_release(dev, i); 835 } 836 837 static void ena_tx_queue_release_all(struct rte_eth_dev *dev) 838 { 839 int nb_queues = dev->data->nb_tx_queues; 840 int i; 841 842 for (i = 0; i < nb_queues; i++) 843 ena_tx_queue_release(dev, i); 844 } 845 846 static void ena_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 847 { 848 struct ena_ring *ring = dev->data->rx_queues[qid]; 849 850 /* Free ring resources */ 851 rte_free(ring->rx_buffer_info); 852 ring->rx_buffer_info = NULL; 853 854 rte_free(ring->rx_refill_buffer); 855 ring->rx_refill_buffer = NULL; 856 857 rte_free(ring->empty_rx_reqs); 858 ring->empty_rx_reqs = NULL; 859 860 ring->configured = 0; 861 862 PMD_DRV_LOG(NOTICE, "Rx queue %d:%d released\n", 863 ring->port_id, ring->id); 864 } 865 866 static void ena_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 867 { 868 struct ena_ring *ring = dev->data->tx_queues[qid]; 869 870 /* Free ring resources */ 871 rte_free(ring->push_buf_intermediate_buf); 872 873 rte_free(ring->tx_buffer_info); 874 875 rte_free(ring->empty_tx_reqs); 876 877 ring->empty_tx_reqs = NULL; 878 ring->tx_buffer_info = NULL; 879 ring->push_buf_intermediate_buf = NULL; 880 881 ring->configured = 0; 882 883 PMD_DRV_LOG(NOTICE, "Tx queue %d:%d released\n", 884 ring->port_id, ring->id); 885 } 886 887 static void ena_rx_queue_release_bufs(struct ena_ring *ring) 888 { 889 unsigned int i; 890 891 for (i = 0; i < ring->ring_size; ++i) { 892 struct ena_rx_buffer *rx_info = &ring->rx_buffer_info[i]; 893 if (rx_info->mbuf) { 894 rte_mbuf_raw_free(rx_info->mbuf); 895 rx_info->mbuf = NULL; 896 } 897 } 898 } 899 900 static void ena_tx_queue_release_bufs(struct ena_ring *ring) 901 { 902 unsigned int i; 903 904 for (i = 0; i < ring->ring_size; ++i) { 905 struct ena_tx_buffer *tx_buf = &ring->tx_buffer_info[i]; 906 907 if (tx_buf->mbuf) { 908 rte_pktmbuf_free(tx_buf->mbuf); 909 tx_buf->mbuf = NULL; 910 } 911 } 912 } 913 914 static int ena_link_update(struct rte_eth_dev *dev, 915 __rte_unused int wait_to_complete) 916 { 917 struct rte_eth_link *link = &dev->data->dev_link; 918 struct ena_adapter *adapter = dev->data->dev_private; 919 920 link->link_status = adapter->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN; 921 link->link_speed = RTE_ETH_SPEED_NUM_NONE; 922 link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 923 924 return 0; 925 } 926 927 static int ena_queue_start_all(struct rte_eth_dev *dev, 928 enum ena_ring_type ring_type) 929 { 930 struct ena_adapter *adapter = dev->data->dev_private; 931 struct ena_ring *queues = NULL; 932 int nb_queues; 933 int i = 0; 934 int rc = 0; 935 936 if (ring_type == ENA_RING_TYPE_RX) { 937 queues = adapter->rx_ring; 938 nb_queues = dev->data->nb_rx_queues; 939 } else { 940 queues = adapter->tx_ring; 941 nb_queues = dev->data->nb_tx_queues; 942 } 943 for (i = 0; i < nb_queues; i++) { 944 if (queues[i].configured) { 945 if (ring_type == ENA_RING_TYPE_RX) { 946 ena_assert_msg( 947 dev->data->rx_queues[i] == &queues[i], 948 "Inconsistent state of Rx queues\n"); 949 } else { 950 ena_assert_msg( 951 dev->data->tx_queues[i] == &queues[i], 952 "Inconsistent state of Tx queues\n"); 953 } 954 955 rc = ena_queue_start(dev, &queues[i]); 956 957 if (rc) { 958 PMD_INIT_LOG(ERR, 959 "Failed to start queue[%d] of type(%d)\n", 960 i, ring_type); 961 goto err; 962 } 963 } 964 } 965 966 return 0; 967 968 err: 969 while (i--) 970 if (queues[i].configured) 971 ena_queue_stop(&queues[i]); 972 973 return rc; 974 } 975 976 static int ena_check_valid_conf(struct ena_adapter *adapter) 977 { 978 uint32_t mtu = adapter->edev_data->mtu; 979 980 if (mtu > adapter->max_mtu || mtu < ENA_MIN_MTU) { 981 PMD_INIT_LOG(ERR, 982 "Unsupported MTU of %d. Max MTU: %d, min MTU: %d\n", 983 mtu, adapter->max_mtu, ENA_MIN_MTU); 984 return ENA_COM_UNSUPPORTED; 985 } 986 987 return 0; 988 } 989 990 static int 991 ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx, 992 bool use_large_llq_hdr) 993 { 994 struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq; 995 struct ena_com_dev *ena_dev = ctx->ena_dev; 996 uint32_t max_tx_queue_size; 997 uint32_t max_rx_queue_size; 998 999 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 1000 struct ena_admin_queue_ext_feature_fields *max_queue_ext = 1001 &ctx->get_feat_ctx->max_queue_ext.max_queue_ext; 1002 max_rx_queue_size = RTE_MIN(max_queue_ext->max_rx_cq_depth, 1003 max_queue_ext->max_rx_sq_depth); 1004 max_tx_queue_size = max_queue_ext->max_tx_cq_depth; 1005 1006 if (ena_dev->tx_mem_queue_type == 1007 ENA_ADMIN_PLACEMENT_POLICY_DEV) { 1008 max_tx_queue_size = RTE_MIN(max_tx_queue_size, 1009 llq->max_llq_depth); 1010 } else { 1011 max_tx_queue_size = RTE_MIN(max_tx_queue_size, 1012 max_queue_ext->max_tx_sq_depth); 1013 } 1014 1015 ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 1016 max_queue_ext->max_per_packet_rx_descs); 1017 ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 1018 max_queue_ext->max_per_packet_tx_descs); 1019 } else { 1020 struct ena_admin_queue_feature_desc *max_queues = 1021 &ctx->get_feat_ctx->max_queues; 1022 max_rx_queue_size = RTE_MIN(max_queues->max_cq_depth, 1023 max_queues->max_sq_depth); 1024 max_tx_queue_size = max_queues->max_cq_depth; 1025 1026 if (ena_dev->tx_mem_queue_type == 1027 ENA_ADMIN_PLACEMENT_POLICY_DEV) { 1028 max_tx_queue_size = RTE_MIN(max_tx_queue_size, 1029 llq->max_llq_depth); 1030 } else { 1031 max_tx_queue_size = RTE_MIN(max_tx_queue_size, 1032 max_queues->max_sq_depth); 1033 } 1034 1035 ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 1036 max_queues->max_packet_rx_descs); 1037 ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 1038 max_queues->max_packet_tx_descs); 1039 } 1040 1041 /* Round down to the nearest power of 2 */ 1042 max_rx_queue_size = rte_align32prevpow2(max_rx_queue_size); 1043 max_tx_queue_size = rte_align32prevpow2(max_tx_queue_size); 1044 1045 if (use_large_llq_hdr) { 1046 if ((llq->entry_size_ctrl_supported & 1047 ENA_ADMIN_LIST_ENTRY_SIZE_256B) && 1048 (ena_dev->tx_mem_queue_type == 1049 ENA_ADMIN_PLACEMENT_POLICY_DEV)) { 1050 max_tx_queue_size /= 2; 1051 PMD_INIT_LOG(INFO, 1052 "Forcing large headers and decreasing maximum Tx queue size to %d\n", 1053 max_tx_queue_size); 1054 } else { 1055 PMD_INIT_LOG(ERR, 1056 "Forcing large headers failed: LLQ is disabled or device does not support large headers\n"); 1057 } 1058 } 1059 1060 if (unlikely(max_rx_queue_size == 0 || max_tx_queue_size == 0)) { 1061 PMD_INIT_LOG(ERR, "Invalid queue size\n"); 1062 return -EFAULT; 1063 } 1064 1065 ctx->max_tx_queue_size = max_tx_queue_size; 1066 ctx->max_rx_queue_size = max_rx_queue_size; 1067 1068 return 0; 1069 } 1070 1071 static void ena_stats_restart(struct rte_eth_dev *dev) 1072 { 1073 struct ena_adapter *adapter = dev->data->dev_private; 1074 1075 rte_atomic64_init(&adapter->drv_stats->ierrors); 1076 rte_atomic64_init(&adapter->drv_stats->oerrors); 1077 rte_atomic64_init(&adapter->drv_stats->rx_nombuf); 1078 adapter->drv_stats->rx_drops = 0; 1079 } 1080 1081 static int ena_stats_get(struct rte_eth_dev *dev, 1082 struct rte_eth_stats *stats) 1083 { 1084 struct ena_admin_basic_stats ena_stats; 1085 struct ena_adapter *adapter = dev->data->dev_private; 1086 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1087 int rc; 1088 int i; 1089 int max_rings_stats; 1090 1091 memset(&ena_stats, 0, sizeof(ena_stats)); 1092 1093 rte_spinlock_lock(&adapter->admin_lock); 1094 rc = ENA_PROXY(adapter, ena_com_get_dev_basic_stats, ena_dev, 1095 &ena_stats); 1096 rte_spinlock_unlock(&adapter->admin_lock); 1097 if (unlikely(rc)) { 1098 PMD_DRV_LOG(ERR, "Could not retrieve statistics from ENA\n"); 1099 return rc; 1100 } 1101 1102 /* Set of basic statistics from ENA */ 1103 stats->ipackets = __MERGE_64B_H_L(ena_stats.rx_pkts_high, 1104 ena_stats.rx_pkts_low); 1105 stats->opackets = __MERGE_64B_H_L(ena_stats.tx_pkts_high, 1106 ena_stats.tx_pkts_low); 1107 stats->ibytes = __MERGE_64B_H_L(ena_stats.rx_bytes_high, 1108 ena_stats.rx_bytes_low); 1109 stats->obytes = __MERGE_64B_H_L(ena_stats.tx_bytes_high, 1110 ena_stats.tx_bytes_low); 1111 1112 /* Driver related stats */ 1113 stats->imissed = adapter->drv_stats->rx_drops; 1114 stats->ierrors = rte_atomic64_read(&adapter->drv_stats->ierrors); 1115 stats->oerrors = rte_atomic64_read(&adapter->drv_stats->oerrors); 1116 stats->rx_nombuf = rte_atomic64_read(&adapter->drv_stats->rx_nombuf); 1117 1118 max_rings_stats = RTE_MIN(dev->data->nb_rx_queues, 1119 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1120 for (i = 0; i < max_rings_stats; ++i) { 1121 struct ena_stats_rx *rx_stats = &adapter->rx_ring[i].rx_stats; 1122 1123 stats->q_ibytes[i] = rx_stats->bytes; 1124 stats->q_ipackets[i] = rx_stats->cnt; 1125 stats->q_errors[i] = rx_stats->bad_desc_num + 1126 rx_stats->bad_req_id; 1127 } 1128 1129 max_rings_stats = RTE_MIN(dev->data->nb_tx_queues, 1130 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1131 for (i = 0; i < max_rings_stats; ++i) { 1132 struct ena_stats_tx *tx_stats = &adapter->tx_ring[i].tx_stats; 1133 1134 stats->q_obytes[i] = tx_stats->bytes; 1135 stats->q_opackets[i] = tx_stats->cnt; 1136 } 1137 1138 return 0; 1139 } 1140 1141 static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 1142 { 1143 struct ena_adapter *adapter; 1144 struct ena_com_dev *ena_dev; 1145 int rc = 0; 1146 1147 ena_assert_msg(dev->data != NULL, "Uninitialized device\n"); 1148 ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n"); 1149 adapter = dev->data->dev_private; 1150 1151 ena_dev = &adapter->ena_dev; 1152 ena_assert_msg(ena_dev != NULL, "Uninitialized device\n"); 1153 1154 if (mtu > adapter->max_mtu || mtu < ENA_MIN_MTU) { 1155 PMD_DRV_LOG(ERR, 1156 "Invalid MTU setting. New MTU: %d, max MTU: %d, min MTU: %d\n", 1157 mtu, adapter->max_mtu, ENA_MIN_MTU); 1158 return -EINVAL; 1159 } 1160 1161 rc = ENA_PROXY(adapter, ena_com_set_dev_mtu, ena_dev, mtu); 1162 if (rc) 1163 PMD_DRV_LOG(ERR, "Could not set MTU: %d\n", mtu); 1164 else 1165 PMD_DRV_LOG(NOTICE, "MTU set to: %d\n", mtu); 1166 1167 return rc; 1168 } 1169 1170 static int ena_start(struct rte_eth_dev *dev) 1171 { 1172 struct ena_adapter *adapter = dev->data->dev_private; 1173 uint64_t ticks; 1174 int rc = 0; 1175 1176 /* Cannot allocate memory in secondary process */ 1177 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 1178 PMD_DRV_LOG(WARNING, "dev_start not supported in secondary.\n"); 1179 return -EPERM; 1180 } 1181 1182 rc = ena_check_valid_conf(adapter); 1183 if (rc) 1184 return rc; 1185 1186 rc = ena_setup_rx_intr(dev); 1187 if (rc) 1188 return rc; 1189 1190 rc = ena_queue_start_all(dev, ENA_RING_TYPE_RX); 1191 if (rc) 1192 return rc; 1193 1194 rc = ena_queue_start_all(dev, ENA_RING_TYPE_TX); 1195 if (rc) 1196 goto err_start_tx; 1197 1198 if (adapter->edev_data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) { 1199 rc = ena_rss_configure(adapter); 1200 if (rc) 1201 goto err_rss_init; 1202 } 1203 1204 ena_stats_restart(dev); 1205 1206 adapter->timestamp_wd = rte_get_timer_cycles(); 1207 adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT; 1208 1209 ticks = rte_get_timer_hz(); 1210 rte_timer_reset(&adapter->timer_wd, ticks, PERIODICAL, rte_lcore_id(), 1211 ena_timer_wd_callback, dev); 1212 1213 ++adapter->dev_stats.dev_start; 1214 adapter->state = ENA_ADAPTER_STATE_RUNNING; 1215 1216 return 0; 1217 1218 err_rss_init: 1219 ena_queue_stop_all(dev, ENA_RING_TYPE_TX); 1220 err_start_tx: 1221 ena_queue_stop_all(dev, ENA_RING_TYPE_RX); 1222 return rc; 1223 } 1224 1225 static int ena_stop(struct rte_eth_dev *dev) 1226 { 1227 struct ena_adapter *adapter = dev->data->dev_private; 1228 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1229 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1230 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 1231 int rc; 1232 1233 /* Cannot free memory in secondary process */ 1234 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 1235 PMD_DRV_LOG(WARNING, "dev_stop not supported in secondary.\n"); 1236 return -EPERM; 1237 } 1238 1239 rte_timer_stop_sync(&adapter->timer_wd); 1240 ena_queue_stop_all(dev, ENA_RING_TYPE_TX); 1241 ena_queue_stop_all(dev, ENA_RING_TYPE_RX); 1242 1243 if (adapter->trigger_reset) { 1244 rc = ena_com_dev_reset(ena_dev, adapter->reset_reason); 1245 if (rc) 1246 PMD_DRV_LOG(ERR, "Device reset failed, rc: %d\n", rc); 1247 } 1248 1249 rte_intr_disable(intr_handle); 1250 1251 rte_intr_efd_disable(intr_handle); 1252 1253 /* Cleanup vector list */ 1254 rte_intr_vec_list_free(intr_handle); 1255 1256 rte_intr_enable(intr_handle); 1257 1258 ++adapter->dev_stats.dev_stop; 1259 adapter->state = ENA_ADAPTER_STATE_STOPPED; 1260 dev->data->dev_started = 0; 1261 1262 return 0; 1263 } 1264 1265 static int ena_create_io_queue(struct rte_eth_dev *dev, struct ena_ring *ring) 1266 { 1267 struct ena_adapter *adapter = ring->adapter; 1268 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1269 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1270 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 1271 struct ena_com_create_io_ctx ctx = 1272 /* policy set to _HOST just to satisfy icc compiler */ 1273 { ENA_ADMIN_PLACEMENT_POLICY_HOST, 1274 0, 0, 0, 0, 0 }; 1275 uint16_t ena_qid; 1276 unsigned int i; 1277 int rc; 1278 1279 ctx.msix_vector = -1; 1280 if (ring->type == ENA_RING_TYPE_TX) { 1281 ena_qid = ENA_IO_TXQ_IDX(ring->id); 1282 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; 1283 ctx.mem_queue_type = ena_dev->tx_mem_queue_type; 1284 for (i = 0; i < ring->ring_size; i++) 1285 ring->empty_tx_reqs[i] = i; 1286 } else { 1287 ena_qid = ENA_IO_RXQ_IDX(ring->id); 1288 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; 1289 if (rte_intr_dp_is_en(intr_handle)) 1290 ctx.msix_vector = 1291 rte_intr_vec_list_index_get(intr_handle, 1292 ring->id); 1293 1294 for (i = 0; i < ring->ring_size; i++) 1295 ring->empty_rx_reqs[i] = i; 1296 } 1297 ctx.queue_size = ring->ring_size; 1298 ctx.qid = ena_qid; 1299 ctx.numa_node = ring->numa_socket_id; 1300 1301 rc = ena_com_create_io_queue(ena_dev, &ctx); 1302 if (rc) { 1303 PMD_DRV_LOG(ERR, 1304 "Failed to create IO queue[%d] (qid:%d), rc: %d\n", 1305 ring->id, ena_qid, rc); 1306 return rc; 1307 } 1308 1309 rc = ena_com_get_io_handlers(ena_dev, ena_qid, 1310 &ring->ena_com_io_sq, 1311 &ring->ena_com_io_cq); 1312 if (rc) { 1313 PMD_DRV_LOG(ERR, 1314 "Failed to get IO queue[%d] handlers, rc: %d\n", 1315 ring->id, rc); 1316 ena_com_destroy_io_queue(ena_dev, ena_qid); 1317 return rc; 1318 } 1319 1320 if (ring->type == ENA_RING_TYPE_TX) 1321 ena_com_update_numa_node(ring->ena_com_io_cq, ctx.numa_node); 1322 1323 /* Start with Rx interrupts being masked. */ 1324 if (ring->type == ENA_RING_TYPE_RX && rte_intr_dp_is_en(intr_handle)) 1325 ena_rx_queue_intr_disable(dev, ring->id); 1326 1327 return 0; 1328 } 1329 1330 static void ena_queue_stop(struct ena_ring *ring) 1331 { 1332 struct ena_com_dev *ena_dev = &ring->adapter->ena_dev; 1333 1334 if (ring->type == ENA_RING_TYPE_RX) { 1335 ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(ring->id)); 1336 ena_rx_queue_release_bufs(ring); 1337 } else { 1338 ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(ring->id)); 1339 ena_tx_queue_release_bufs(ring); 1340 } 1341 } 1342 1343 static void ena_queue_stop_all(struct rte_eth_dev *dev, 1344 enum ena_ring_type ring_type) 1345 { 1346 struct ena_adapter *adapter = dev->data->dev_private; 1347 struct ena_ring *queues = NULL; 1348 uint16_t nb_queues, i; 1349 1350 if (ring_type == ENA_RING_TYPE_RX) { 1351 queues = adapter->rx_ring; 1352 nb_queues = dev->data->nb_rx_queues; 1353 } else { 1354 queues = adapter->tx_ring; 1355 nb_queues = dev->data->nb_tx_queues; 1356 } 1357 1358 for (i = 0; i < nb_queues; ++i) 1359 if (queues[i].configured) 1360 ena_queue_stop(&queues[i]); 1361 } 1362 1363 static int ena_queue_start(struct rte_eth_dev *dev, struct ena_ring *ring) 1364 { 1365 int rc, bufs_num; 1366 1367 ena_assert_msg(ring->configured == 1, 1368 "Trying to start unconfigured queue\n"); 1369 1370 rc = ena_create_io_queue(dev, ring); 1371 if (rc) { 1372 PMD_INIT_LOG(ERR, "Failed to create IO queue\n"); 1373 return rc; 1374 } 1375 1376 ring->next_to_clean = 0; 1377 ring->next_to_use = 0; 1378 1379 if (ring->type == ENA_RING_TYPE_TX) { 1380 ring->tx_stats.available_desc = 1381 ena_com_free_q_entries(ring->ena_com_io_sq); 1382 return 0; 1383 } 1384 1385 bufs_num = ring->ring_size - 1; 1386 rc = ena_populate_rx_queue(ring, bufs_num); 1387 if (rc != bufs_num) { 1388 ena_com_destroy_io_queue(&ring->adapter->ena_dev, 1389 ENA_IO_RXQ_IDX(ring->id)); 1390 PMD_INIT_LOG(ERR, "Failed to populate Rx ring\n"); 1391 return ENA_COM_FAULT; 1392 } 1393 /* Flush per-core RX buffers pools cache as they can be used on other 1394 * cores as well. 1395 */ 1396 rte_mempool_cache_flush(NULL, ring->mb_pool); 1397 1398 return 0; 1399 } 1400 1401 static int ena_tx_queue_setup(struct rte_eth_dev *dev, 1402 uint16_t queue_idx, 1403 uint16_t nb_desc, 1404 unsigned int socket_id, 1405 const struct rte_eth_txconf *tx_conf) 1406 { 1407 struct ena_ring *txq = NULL; 1408 struct ena_adapter *adapter = dev->data->dev_private; 1409 unsigned int i; 1410 uint16_t dyn_thresh; 1411 1412 txq = &adapter->tx_ring[queue_idx]; 1413 1414 if (txq->configured) { 1415 PMD_DRV_LOG(CRIT, 1416 "API violation. Queue[%d] is already configured\n", 1417 queue_idx); 1418 return ENA_COM_FAULT; 1419 } 1420 1421 if (!rte_is_power_of_2(nb_desc)) { 1422 PMD_DRV_LOG(ERR, 1423 "Unsupported size of Tx queue: %d is not a power of 2.\n", 1424 nb_desc); 1425 return -EINVAL; 1426 } 1427 1428 if (nb_desc > adapter->max_tx_ring_size) { 1429 PMD_DRV_LOG(ERR, 1430 "Unsupported size of Tx queue (max size: %d)\n", 1431 adapter->max_tx_ring_size); 1432 return -EINVAL; 1433 } 1434 1435 txq->port_id = dev->data->port_id; 1436 txq->next_to_clean = 0; 1437 txq->next_to_use = 0; 1438 txq->ring_size = nb_desc; 1439 txq->size_mask = nb_desc - 1; 1440 txq->numa_socket_id = socket_id; 1441 txq->pkts_without_db = false; 1442 txq->last_cleanup_ticks = 0; 1443 1444 txq->tx_buffer_info = rte_zmalloc_socket("txq->tx_buffer_info", 1445 sizeof(struct ena_tx_buffer) * txq->ring_size, 1446 RTE_CACHE_LINE_SIZE, 1447 socket_id); 1448 if (!txq->tx_buffer_info) { 1449 PMD_DRV_LOG(ERR, 1450 "Failed to allocate memory for Tx buffer info\n"); 1451 return -ENOMEM; 1452 } 1453 1454 txq->empty_tx_reqs = rte_zmalloc_socket("txq->empty_tx_reqs", 1455 sizeof(uint16_t) * txq->ring_size, 1456 RTE_CACHE_LINE_SIZE, 1457 socket_id); 1458 if (!txq->empty_tx_reqs) { 1459 PMD_DRV_LOG(ERR, 1460 "Failed to allocate memory for empty Tx requests\n"); 1461 rte_free(txq->tx_buffer_info); 1462 return -ENOMEM; 1463 } 1464 1465 txq->push_buf_intermediate_buf = 1466 rte_zmalloc_socket("txq->push_buf_intermediate_buf", 1467 txq->tx_max_header_size, 1468 RTE_CACHE_LINE_SIZE, 1469 socket_id); 1470 if (!txq->push_buf_intermediate_buf) { 1471 PMD_DRV_LOG(ERR, "Failed to alloc push buffer for LLQ\n"); 1472 rte_free(txq->tx_buffer_info); 1473 rte_free(txq->empty_tx_reqs); 1474 return -ENOMEM; 1475 } 1476 1477 for (i = 0; i < txq->ring_size; i++) 1478 txq->empty_tx_reqs[i] = i; 1479 1480 txq->offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; 1481 1482 /* Check if caller provided the Tx cleanup threshold value. */ 1483 if (tx_conf->tx_free_thresh != 0) { 1484 txq->tx_free_thresh = tx_conf->tx_free_thresh; 1485 } else { 1486 dyn_thresh = txq->ring_size - 1487 txq->ring_size / ENA_REFILL_THRESH_DIVIDER; 1488 txq->tx_free_thresh = RTE_MAX(dyn_thresh, 1489 txq->ring_size - ENA_REFILL_THRESH_PACKET); 1490 } 1491 1492 txq->missing_tx_completion_threshold = 1493 RTE_MIN(txq->ring_size / 2, ENA_DEFAULT_MISSING_COMP); 1494 1495 /* Store pointer to this queue in upper layer */ 1496 txq->configured = 1; 1497 dev->data->tx_queues[queue_idx] = txq; 1498 1499 return 0; 1500 } 1501 1502 static int ena_rx_queue_setup(struct rte_eth_dev *dev, 1503 uint16_t queue_idx, 1504 uint16_t nb_desc, 1505 unsigned int socket_id, 1506 const struct rte_eth_rxconf *rx_conf, 1507 struct rte_mempool *mp) 1508 { 1509 struct ena_adapter *adapter = dev->data->dev_private; 1510 struct ena_ring *rxq = NULL; 1511 size_t buffer_size; 1512 int i; 1513 uint16_t dyn_thresh; 1514 1515 rxq = &adapter->rx_ring[queue_idx]; 1516 if (rxq->configured) { 1517 PMD_DRV_LOG(CRIT, 1518 "API violation. Queue[%d] is already configured\n", 1519 queue_idx); 1520 return ENA_COM_FAULT; 1521 } 1522 1523 if (!rte_is_power_of_2(nb_desc)) { 1524 PMD_DRV_LOG(ERR, 1525 "Unsupported size of Rx queue: %d is not a power of 2.\n", 1526 nb_desc); 1527 return -EINVAL; 1528 } 1529 1530 if (nb_desc > adapter->max_rx_ring_size) { 1531 PMD_DRV_LOG(ERR, 1532 "Unsupported size of Rx queue (max size: %d)\n", 1533 adapter->max_rx_ring_size); 1534 return -EINVAL; 1535 } 1536 1537 /* ENA isn't supporting buffers smaller than 1400 bytes */ 1538 buffer_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM; 1539 if (buffer_size < ENA_RX_BUF_MIN_SIZE) { 1540 PMD_DRV_LOG(ERR, 1541 "Unsupported size of Rx buffer: %zu (min size: %d)\n", 1542 buffer_size, ENA_RX_BUF_MIN_SIZE); 1543 return -EINVAL; 1544 } 1545 1546 rxq->port_id = dev->data->port_id; 1547 rxq->next_to_clean = 0; 1548 rxq->next_to_use = 0; 1549 rxq->ring_size = nb_desc; 1550 rxq->size_mask = nb_desc - 1; 1551 rxq->numa_socket_id = socket_id; 1552 rxq->mb_pool = mp; 1553 1554 rxq->rx_buffer_info = rte_zmalloc_socket("rxq->buffer_info", 1555 sizeof(struct ena_rx_buffer) * nb_desc, 1556 RTE_CACHE_LINE_SIZE, 1557 socket_id); 1558 if (!rxq->rx_buffer_info) { 1559 PMD_DRV_LOG(ERR, 1560 "Failed to allocate memory for Rx buffer info\n"); 1561 return -ENOMEM; 1562 } 1563 1564 rxq->rx_refill_buffer = rte_zmalloc_socket("rxq->rx_refill_buffer", 1565 sizeof(struct rte_mbuf *) * nb_desc, 1566 RTE_CACHE_LINE_SIZE, 1567 socket_id); 1568 if (!rxq->rx_refill_buffer) { 1569 PMD_DRV_LOG(ERR, 1570 "Failed to allocate memory for Rx refill buffer\n"); 1571 rte_free(rxq->rx_buffer_info); 1572 rxq->rx_buffer_info = NULL; 1573 return -ENOMEM; 1574 } 1575 1576 rxq->empty_rx_reqs = rte_zmalloc_socket("rxq->empty_rx_reqs", 1577 sizeof(uint16_t) * nb_desc, 1578 RTE_CACHE_LINE_SIZE, 1579 socket_id); 1580 if (!rxq->empty_rx_reqs) { 1581 PMD_DRV_LOG(ERR, 1582 "Failed to allocate memory for empty Rx requests\n"); 1583 rte_free(rxq->rx_buffer_info); 1584 rxq->rx_buffer_info = NULL; 1585 rte_free(rxq->rx_refill_buffer); 1586 rxq->rx_refill_buffer = NULL; 1587 return -ENOMEM; 1588 } 1589 1590 for (i = 0; i < nb_desc; i++) 1591 rxq->empty_rx_reqs[i] = i; 1592 1593 rxq->offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; 1594 1595 if (rx_conf->rx_free_thresh != 0) { 1596 rxq->rx_free_thresh = rx_conf->rx_free_thresh; 1597 } else { 1598 dyn_thresh = rxq->ring_size / ENA_REFILL_THRESH_DIVIDER; 1599 rxq->rx_free_thresh = RTE_MIN(dyn_thresh, 1600 (uint16_t)(ENA_REFILL_THRESH_PACKET)); 1601 } 1602 1603 /* Store pointer to this queue in upper layer */ 1604 rxq->configured = 1; 1605 dev->data->rx_queues[queue_idx] = rxq; 1606 1607 return 0; 1608 } 1609 1610 static int ena_add_single_rx_desc(struct ena_com_io_sq *io_sq, 1611 struct rte_mbuf *mbuf, uint16_t id) 1612 { 1613 struct ena_com_buf ebuf; 1614 int rc; 1615 1616 /* prepare physical address for DMA transaction */ 1617 ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM; 1618 ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM; 1619 1620 /* pass resource to device */ 1621 rc = ena_com_add_single_rx_desc(io_sq, &ebuf, id); 1622 if (unlikely(rc != 0)) 1623 PMD_RX_LOG(WARNING, "Failed adding Rx desc\n"); 1624 1625 return rc; 1626 } 1627 1628 static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) 1629 { 1630 unsigned int i; 1631 int rc; 1632 uint16_t next_to_use = rxq->next_to_use; 1633 uint16_t req_id; 1634 #ifdef RTE_ETHDEV_DEBUG_RX 1635 uint16_t in_use; 1636 #endif 1637 struct rte_mbuf **mbufs = rxq->rx_refill_buffer; 1638 1639 if (unlikely(!count)) 1640 return 0; 1641 1642 #ifdef RTE_ETHDEV_DEBUG_RX 1643 in_use = rxq->ring_size - 1 - 1644 ena_com_free_q_entries(rxq->ena_com_io_sq); 1645 if (unlikely((in_use + count) >= rxq->ring_size)) 1646 PMD_RX_LOG(ERR, "Bad Rx ring state\n"); 1647 #endif 1648 1649 /* get resources for incoming packets */ 1650 rc = rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, count); 1651 if (unlikely(rc < 0)) { 1652 rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf); 1653 ++rxq->rx_stats.mbuf_alloc_fail; 1654 PMD_RX_LOG(DEBUG, "There are not enough free buffers\n"); 1655 return 0; 1656 } 1657 1658 for (i = 0; i < count; i++) { 1659 struct rte_mbuf *mbuf = mbufs[i]; 1660 struct ena_rx_buffer *rx_info; 1661 1662 if (likely((i + 4) < count)) 1663 rte_prefetch0(mbufs[i + 4]); 1664 1665 req_id = rxq->empty_rx_reqs[next_to_use]; 1666 rx_info = &rxq->rx_buffer_info[req_id]; 1667 1668 rc = ena_add_single_rx_desc(rxq->ena_com_io_sq, mbuf, req_id); 1669 if (unlikely(rc != 0)) 1670 break; 1671 1672 rx_info->mbuf = mbuf; 1673 next_to_use = ENA_IDX_NEXT_MASKED(next_to_use, rxq->size_mask); 1674 } 1675 1676 if (unlikely(i < count)) { 1677 PMD_RX_LOG(WARNING, 1678 "Refilled Rx queue[%d] with only %d/%d buffers\n", 1679 rxq->id, i, count); 1680 rte_pktmbuf_free_bulk(&mbufs[i], count - i); 1681 ++rxq->rx_stats.refill_partial; 1682 } 1683 1684 /* When we submitted free resources to device... */ 1685 if (likely(i > 0)) { 1686 /* ...let HW know that it can fill buffers with data. */ 1687 ena_com_write_sq_doorbell(rxq->ena_com_io_sq); 1688 1689 rxq->next_to_use = next_to_use; 1690 } 1691 1692 return i; 1693 } 1694 1695 static int ena_device_init(struct ena_adapter *adapter, 1696 struct rte_pci_device *pdev, 1697 struct ena_com_dev_get_features_ctx *get_feat_ctx) 1698 { 1699 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1700 uint32_t aenq_groups; 1701 int rc; 1702 bool readless_supported; 1703 1704 /* Initialize mmio registers */ 1705 rc = ena_com_mmio_reg_read_request_init(ena_dev); 1706 if (rc) { 1707 PMD_DRV_LOG(ERR, "Failed to init MMIO read less\n"); 1708 return rc; 1709 } 1710 1711 /* The PCIe configuration space revision id indicate if mmio reg 1712 * read is disabled. 1713 */ 1714 readless_supported = !(pdev->id.class_id & ENA_MMIO_DISABLE_REG_READ); 1715 ena_com_set_mmio_read_mode(ena_dev, readless_supported); 1716 1717 /* reset device */ 1718 rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL); 1719 if (rc) { 1720 PMD_DRV_LOG(ERR, "Cannot reset device\n"); 1721 goto err_mmio_read_less; 1722 } 1723 1724 /* check FW version */ 1725 rc = ena_com_validate_version(ena_dev); 1726 if (rc) { 1727 PMD_DRV_LOG(ERR, "Device version is too low\n"); 1728 goto err_mmio_read_less; 1729 } 1730 1731 ena_dev->dma_addr_bits = ena_com_get_dma_width(ena_dev); 1732 1733 /* ENA device administration layer init */ 1734 rc = ena_com_admin_init(ena_dev, &aenq_handlers); 1735 if (rc) { 1736 PMD_DRV_LOG(ERR, 1737 "Cannot initialize ENA admin queue\n"); 1738 goto err_mmio_read_less; 1739 } 1740 1741 /* To enable the msix interrupts the driver needs to know the number 1742 * of queues. So the driver uses polling mode to retrieve this 1743 * information. 1744 */ 1745 ena_com_set_admin_polling_mode(ena_dev, true); 1746 1747 ena_config_host_info(ena_dev); 1748 1749 /* Get Device Attributes and features */ 1750 rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); 1751 if (rc) { 1752 PMD_DRV_LOG(ERR, 1753 "Cannot get attribute for ENA device, rc: %d\n", rc); 1754 goto err_admin_init; 1755 } 1756 1757 aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | 1758 BIT(ENA_ADMIN_NOTIFICATION) | 1759 BIT(ENA_ADMIN_KEEP_ALIVE) | 1760 BIT(ENA_ADMIN_FATAL_ERROR) | 1761 BIT(ENA_ADMIN_WARNING); 1762 1763 aenq_groups &= get_feat_ctx->aenq.supported_groups; 1764 1765 adapter->all_aenq_groups = aenq_groups; 1766 1767 return 0; 1768 1769 err_admin_init: 1770 ena_com_admin_destroy(ena_dev); 1771 1772 err_mmio_read_less: 1773 ena_com_mmio_reg_read_request_destroy(ena_dev); 1774 1775 return rc; 1776 } 1777 1778 static void ena_interrupt_handler_rte(void *cb_arg) 1779 { 1780 struct rte_eth_dev *dev = cb_arg; 1781 struct ena_adapter *adapter = dev->data->dev_private; 1782 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1783 1784 ena_com_admin_q_comp_intr_handler(ena_dev); 1785 if (likely(adapter->state != ENA_ADAPTER_STATE_CLOSED)) 1786 ena_com_aenq_intr_handler(ena_dev, dev); 1787 } 1788 1789 static void check_for_missing_keep_alive(struct ena_adapter *adapter) 1790 { 1791 if (!(adapter->active_aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE))) 1792 return; 1793 1794 if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) 1795 return; 1796 1797 if (unlikely((rte_get_timer_cycles() - adapter->timestamp_wd) >= 1798 adapter->keep_alive_timeout)) { 1799 PMD_DRV_LOG(ERR, "Keep alive timeout\n"); 1800 ena_trigger_reset(adapter, ENA_REGS_RESET_KEEP_ALIVE_TO); 1801 ++adapter->dev_stats.wd_expired; 1802 } 1803 } 1804 1805 /* Check if admin queue is enabled */ 1806 static void check_for_admin_com_state(struct ena_adapter *adapter) 1807 { 1808 if (unlikely(!ena_com_get_admin_running_state(&adapter->ena_dev))) { 1809 PMD_DRV_LOG(ERR, "ENA admin queue is not in running state\n"); 1810 ena_trigger_reset(adapter, ENA_REGS_RESET_ADMIN_TO); 1811 } 1812 } 1813 1814 static int check_for_tx_completion_in_queue(struct ena_adapter *adapter, 1815 struct ena_ring *tx_ring) 1816 { 1817 struct ena_tx_buffer *tx_buf; 1818 uint64_t timestamp; 1819 uint64_t completion_delay; 1820 uint32_t missed_tx = 0; 1821 unsigned int i; 1822 int rc = 0; 1823 1824 for (i = 0; i < tx_ring->ring_size; ++i) { 1825 tx_buf = &tx_ring->tx_buffer_info[i]; 1826 timestamp = tx_buf->timestamp; 1827 1828 if (timestamp == 0) 1829 continue; 1830 1831 completion_delay = rte_get_timer_cycles() - timestamp; 1832 if (completion_delay > adapter->missing_tx_completion_to) { 1833 if (unlikely(!tx_buf->print_once)) { 1834 PMD_TX_LOG(WARNING, 1835 "Found a Tx that wasn't completed on time, qid %d, index %d. " 1836 "Missing Tx outstanding for %" PRIu64 " msecs.\n", 1837 tx_ring->id, i, completion_delay / 1838 rte_get_timer_hz() * 1000); 1839 tx_buf->print_once = true; 1840 } 1841 ++missed_tx; 1842 } 1843 } 1844 1845 if (unlikely(missed_tx > tx_ring->missing_tx_completion_threshold)) { 1846 PMD_DRV_LOG(ERR, 1847 "The number of lost Tx completions is above the threshold (%d > %d). " 1848 "Trigger the device reset.\n", 1849 missed_tx, 1850 tx_ring->missing_tx_completion_threshold); 1851 adapter->reset_reason = ENA_REGS_RESET_MISS_TX_CMPL; 1852 adapter->trigger_reset = true; 1853 rc = -EIO; 1854 } 1855 1856 tx_ring->tx_stats.missed_tx += missed_tx; 1857 1858 return rc; 1859 } 1860 1861 static void check_for_tx_completions(struct ena_adapter *adapter) 1862 { 1863 struct ena_ring *tx_ring; 1864 uint64_t tx_cleanup_delay; 1865 size_t qid; 1866 int budget; 1867 uint16_t nb_tx_queues = adapter->edev_data->nb_tx_queues; 1868 1869 if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT) 1870 return; 1871 1872 nb_tx_queues = adapter->edev_data->nb_tx_queues; 1873 budget = adapter->missing_tx_completion_budget; 1874 1875 qid = adapter->last_tx_comp_qid; 1876 while (budget-- > 0) { 1877 tx_ring = &adapter->tx_ring[qid]; 1878 1879 /* Tx cleanup is called only by the burst function and can be 1880 * called dynamically by the application. Also cleanup is 1881 * limited by the threshold. To avoid false detection of the 1882 * missing HW Tx completion, get the delay since last cleanup 1883 * function was called. 1884 */ 1885 tx_cleanup_delay = rte_get_timer_cycles() - 1886 tx_ring->last_cleanup_ticks; 1887 if (tx_cleanup_delay < adapter->tx_cleanup_stall_delay) 1888 check_for_tx_completion_in_queue(adapter, tx_ring); 1889 qid = (qid + 1) % nb_tx_queues; 1890 } 1891 1892 adapter->last_tx_comp_qid = qid; 1893 } 1894 1895 static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer, 1896 void *arg) 1897 { 1898 struct rte_eth_dev *dev = arg; 1899 struct ena_adapter *adapter = dev->data->dev_private; 1900 1901 if (unlikely(adapter->trigger_reset)) 1902 return; 1903 1904 check_for_missing_keep_alive(adapter); 1905 check_for_admin_com_state(adapter); 1906 check_for_tx_completions(adapter); 1907 1908 if (unlikely(adapter->trigger_reset)) { 1909 PMD_DRV_LOG(ERR, "Trigger reset is on\n"); 1910 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, 1911 NULL); 1912 } 1913 } 1914 1915 static inline void 1916 set_default_llq_configurations(struct ena_llq_configurations *llq_config, 1917 struct ena_admin_feature_llq_desc *llq, 1918 bool use_large_llq_hdr) 1919 { 1920 llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; 1921 llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; 1922 llq_config->llq_num_decs_before_header = 1923 ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; 1924 1925 if (use_large_llq_hdr && 1926 (llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B)) { 1927 llq_config->llq_ring_entry_size = 1928 ENA_ADMIN_LIST_ENTRY_SIZE_256B; 1929 llq_config->llq_ring_entry_size_value = 256; 1930 } else { 1931 llq_config->llq_ring_entry_size = 1932 ENA_ADMIN_LIST_ENTRY_SIZE_128B; 1933 llq_config->llq_ring_entry_size_value = 128; 1934 } 1935 } 1936 1937 static int 1938 ena_set_queues_placement_policy(struct ena_adapter *adapter, 1939 struct ena_com_dev *ena_dev, 1940 struct ena_admin_feature_llq_desc *llq, 1941 struct ena_llq_configurations *llq_default_configurations) 1942 { 1943 int rc; 1944 u32 llq_feature_mask; 1945 1946 llq_feature_mask = 1 << ENA_ADMIN_LLQ; 1947 if (!(ena_dev->supported_features & llq_feature_mask)) { 1948 PMD_DRV_LOG(INFO, 1949 "LLQ is not supported. Fallback to host mode policy.\n"); 1950 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 1951 return 0; 1952 } 1953 1954 rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); 1955 if (unlikely(rc)) { 1956 PMD_INIT_LOG(WARNING, 1957 "Failed to config dev mode. Fallback to host mode policy.\n"); 1958 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 1959 return 0; 1960 } 1961 1962 /* Nothing to config, exit */ 1963 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 1964 return 0; 1965 1966 if (!adapter->dev_mem_base) { 1967 PMD_DRV_LOG(ERR, 1968 "Unable to access LLQ BAR resource. Fallback to host mode policy.\n"); 1969 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 1970 return 0; 1971 } 1972 1973 ena_dev->mem_bar = adapter->dev_mem_base; 1974 1975 return 0; 1976 } 1977 1978 static uint32_t ena_calc_max_io_queue_num(struct ena_com_dev *ena_dev, 1979 struct ena_com_dev_get_features_ctx *get_feat_ctx) 1980 { 1981 uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues; 1982 1983 /* Regular queues capabilities */ 1984 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 1985 struct ena_admin_queue_ext_feature_fields *max_queue_ext = 1986 &get_feat_ctx->max_queue_ext.max_queue_ext; 1987 io_rx_num = RTE_MIN(max_queue_ext->max_rx_sq_num, 1988 max_queue_ext->max_rx_cq_num); 1989 io_tx_sq_num = max_queue_ext->max_tx_sq_num; 1990 io_tx_cq_num = max_queue_ext->max_tx_cq_num; 1991 } else { 1992 struct ena_admin_queue_feature_desc *max_queues = 1993 &get_feat_ctx->max_queues; 1994 io_tx_sq_num = max_queues->max_sq_num; 1995 io_tx_cq_num = max_queues->max_cq_num; 1996 io_rx_num = RTE_MIN(io_tx_sq_num, io_tx_cq_num); 1997 } 1998 1999 /* In case of LLQ use the llq number in the get feature cmd */ 2000 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) 2001 io_tx_sq_num = get_feat_ctx->llq.max_llq_num; 2002 2003 max_num_io_queues = RTE_MIN(ENA_MAX_NUM_IO_QUEUES, io_rx_num); 2004 max_num_io_queues = RTE_MIN(max_num_io_queues, io_tx_sq_num); 2005 max_num_io_queues = RTE_MIN(max_num_io_queues, io_tx_cq_num); 2006 2007 if (unlikely(max_num_io_queues == 0)) { 2008 PMD_DRV_LOG(ERR, "Number of IO queues cannot not be 0\n"); 2009 return -EFAULT; 2010 } 2011 2012 return max_num_io_queues; 2013 } 2014 2015 static void 2016 ena_set_offloads(struct ena_offloads *offloads, 2017 struct ena_admin_feature_offload_desc *offload_desc) 2018 { 2019 if (offload_desc->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) 2020 offloads->tx_offloads |= ENA_IPV4_TSO; 2021 2022 /* Tx IPv4 checksum offloads */ 2023 if (offload_desc->tx & 2024 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK) 2025 offloads->tx_offloads |= ENA_L3_IPV4_CSUM; 2026 if (offload_desc->tx & 2027 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK) 2028 offloads->tx_offloads |= ENA_L4_IPV4_CSUM; 2029 if (offload_desc->tx & 2030 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) 2031 offloads->tx_offloads |= ENA_L4_IPV4_CSUM_PARTIAL; 2032 2033 /* Tx IPv6 checksum offloads */ 2034 if (offload_desc->tx & 2035 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK) 2036 offloads->tx_offloads |= ENA_L4_IPV6_CSUM; 2037 if (offload_desc->tx & 2038 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK) 2039 offloads->tx_offloads |= ENA_L4_IPV6_CSUM_PARTIAL; 2040 2041 /* Rx IPv4 checksum offloads */ 2042 if (offload_desc->rx_supported & 2043 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK) 2044 offloads->rx_offloads |= ENA_L3_IPV4_CSUM; 2045 if (offload_desc->rx_supported & 2046 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) 2047 offloads->rx_offloads |= ENA_L4_IPV4_CSUM; 2048 2049 /* Rx IPv6 checksum offloads */ 2050 if (offload_desc->rx_supported & 2051 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) 2052 offloads->rx_offloads |= ENA_L4_IPV6_CSUM; 2053 2054 if (offload_desc->rx_supported & 2055 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK) 2056 offloads->rx_offloads |= ENA_RX_RSS_HASH; 2057 } 2058 2059 static int ena_init_once(void) 2060 { 2061 static bool init_done; 2062 2063 if (init_done) 2064 return 0; 2065 2066 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 2067 /* Init timer subsystem for the ENA timer service. */ 2068 rte_timer_subsystem_init(); 2069 /* Register handler for requests from secondary processes. */ 2070 rte_mp_action_register(ENA_MP_NAME, ena_mp_primary_handle); 2071 } 2072 2073 init_done = true; 2074 return 0; 2075 } 2076 2077 static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) 2078 { 2079 struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 }; 2080 struct rte_pci_device *pci_dev; 2081 struct rte_intr_handle *intr_handle; 2082 struct ena_adapter *adapter = eth_dev->data->dev_private; 2083 struct ena_com_dev *ena_dev = &adapter->ena_dev; 2084 struct ena_com_dev_get_features_ctx get_feat_ctx; 2085 struct ena_llq_configurations llq_config; 2086 const char *queue_type_str; 2087 uint32_t max_num_io_queues; 2088 int rc; 2089 static int adapters_found; 2090 bool disable_meta_caching; 2091 2092 eth_dev->dev_ops = &ena_dev_ops; 2093 eth_dev->rx_pkt_burst = ð_ena_recv_pkts; 2094 eth_dev->tx_pkt_burst = ð_ena_xmit_pkts; 2095 eth_dev->tx_pkt_prepare = ð_ena_prep_pkts; 2096 2097 rc = ena_init_once(); 2098 if (rc != 0) 2099 return rc; 2100 2101 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2102 return 0; 2103 2104 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 2105 2106 memset(adapter, 0, sizeof(struct ena_adapter)); 2107 ena_dev = &adapter->ena_dev; 2108 2109 adapter->edev_data = eth_dev->data; 2110 2111 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 2112 2113 PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d\n", 2114 pci_dev->addr.domain, 2115 pci_dev->addr.bus, 2116 pci_dev->addr.devid, 2117 pci_dev->addr.function); 2118 2119 intr_handle = pci_dev->intr_handle; 2120 2121 adapter->regs = pci_dev->mem_resource[ENA_REGS_BAR].addr; 2122 adapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr; 2123 2124 if (!adapter->regs) { 2125 PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)\n", 2126 ENA_REGS_BAR); 2127 return -ENXIO; 2128 } 2129 2130 ena_dev->reg_bar = adapter->regs; 2131 /* Pass device data as a pointer which can be passed to the IO functions 2132 * by the ena_com (for example - the memory allocation). 2133 */ 2134 ena_dev->dmadev = eth_dev->data; 2135 2136 adapter->id_number = adapters_found; 2137 2138 snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", 2139 adapter->id_number); 2140 2141 adapter->missing_tx_completion_to = ENA_TX_TIMEOUT; 2142 2143 rc = ena_parse_devargs(adapter, pci_dev->device.devargs); 2144 if (rc != 0) { 2145 PMD_INIT_LOG(CRIT, "Failed to parse devargs\n"); 2146 goto err; 2147 } 2148 2149 /* device specific initialization routine */ 2150 rc = ena_device_init(adapter, pci_dev, &get_feat_ctx); 2151 if (rc) { 2152 PMD_INIT_LOG(CRIT, "Failed to init ENA device\n"); 2153 goto err; 2154 } 2155 2156 /* Check if device supports LSC */ 2157 if (!(adapter->all_aenq_groups & BIT(ENA_ADMIN_LINK_CHANGE))) 2158 adapter->edev_data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC; 2159 2160 set_default_llq_configurations(&llq_config, &get_feat_ctx.llq, 2161 adapter->use_large_llq_hdr); 2162 rc = ena_set_queues_placement_policy(adapter, ena_dev, 2163 &get_feat_ctx.llq, &llq_config); 2164 if (unlikely(rc)) { 2165 PMD_INIT_LOG(CRIT, "Failed to set placement policy\n"); 2166 return rc; 2167 } 2168 2169 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 2170 queue_type_str = "Regular"; 2171 else 2172 queue_type_str = "Low latency"; 2173 PMD_DRV_LOG(INFO, "Placement policy: %s\n", queue_type_str); 2174 2175 calc_queue_ctx.ena_dev = ena_dev; 2176 calc_queue_ctx.get_feat_ctx = &get_feat_ctx; 2177 2178 max_num_io_queues = ena_calc_max_io_queue_num(ena_dev, &get_feat_ctx); 2179 rc = ena_calc_io_queue_size(&calc_queue_ctx, 2180 adapter->use_large_llq_hdr); 2181 if (unlikely((rc != 0) || (max_num_io_queues == 0))) { 2182 rc = -EFAULT; 2183 goto err_device_destroy; 2184 } 2185 2186 adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size; 2187 adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size; 2188 adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size; 2189 adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size; 2190 adapter->max_num_io_queues = max_num_io_queues; 2191 2192 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 2193 disable_meta_caching = 2194 !!(get_feat_ctx.llq.accel_mode.u.get.supported_flags & 2195 BIT(ENA_ADMIN_DISABLE_META_CACHING)); 2196 } else { 2197 disable_meta_caching = false; 2198 } 2199 2200 /* prepare ring structures */ 2201 ena_init_rings(adapter, disable_meta_caching); 2202 2203 ena_config_debug_area(adapter); 2204 2205 /* Set max MTU for this device */ 2206 adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu; 2207 2208 ena_set_offloads(&adapter->offloads, &get_feat_ctx.offload); 2209 2210 /* Copy MAC address and point DPDK to it */ 2211 eth_dev->data->mac_addrs = (struct rte_ether_addr *)adapter->mac_addr; 2212 rte_ether_addr_copy((struct rte_ether_addr *) 2213 get_feat_ctx.dev_attr.mac_addr, 2214 (struct rte_ether_addr *)adapter->mac_addr); 2215 2216 rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE); 2217 if (unlikely(rc != 0)) { 2218 PMD_DRV_LOG(ERR, "Failed to initialize RSS in ENA device\n"); 2219 goto err_delete_debug_area; 2220 } 2221 2222 adapter->drv_stats = rte_zmalloc("adapter stats", 2223 sizeof(*adapter->drv_stats), 2224 RTE_CACHE_LINE_SIZE); 2225 if (!adapter->drv_stats) { 2226 PMD_DRV_LOG(ERR, 2227 "Failed to allocate memory for adapter statistics\n"); 2228 rc = -ENOMEM; 2229 goto err_rss_destroy; 2230 } 2231 2232 rte_spinlock_init(&adapter->admin_lock); 2233 2234 rte_intr_callback_register(intr_handle, 2235 ena_interrupt_handler_rte, 2236 eth_dev); 2237 rte_intr_enable(intr_handle); 2238 ena_com_set_admin_polling_mode(ena_dev, false); 2239 ena_com_admin_aenq_enable(ena_dev); 2240 2241 rte_timer_init(&adapter->timer_wd); 2242 2243 adapters_found++; 2244 adapter->state = ENA_ADAPTER_STATE_INIT; 2245 2246 return 0; 2247 2248 err_rss_destroy: 2249 ena_com_rss_destroy(ena_dev); 2250 err_delete_debug_area: 2251 ena_com_delete_debug_area(ena_dev); 2252 2253 err_device_destroy: 2254 ena_com_delete_host_info(ena_dev); 2255 ena_com_admin_destroy(ena_dev); 2256 2257 err: 2258 return rc; 2259 } 2260 2261 static void ena_destroy_device(struct rte_eth_dev *eth_dev) 2262 { 2263 struct ena_adapter *adapter = eth_dev->data->dev_private; 2264 struct ena_com_dev *ena_dev = &adapter->ena_dev; 2265 2266 if (adapter->state == ENA_ADAPTER_STATE_FREE) 2267 return; 2268 2269 ena_com_set_admin_running_state(ena_dev, false); 2270 2271 if (adapter->state != ENA_ADAPTER_STATE_CLOSED) 2272 ena_close(eth_dev); 2273 2274 ena_com_rss_destroy(ena_dev); 2275 2276 ena_com_delete_debug_area(ena_dev); 2277 ena_com_delete_host_info(ena_dev); 2278 2279 ena_com_abort_admin_commands(ena_dev); 2280 ena_com_wait_for_abort_completion(ena_dev); 2281 ena_com_admin_destroy(ena_dev); 2282 ena_com_mmio_reg_read_request_destroy(ena_dev); 2283 2284 adapter->state = ENA_ADAPTER_STATE_FREE; 2285 } 2286 2287 static int eth_ena_dev_uninit(struct rte_eth_dev *eth_dev) 2288 { 2289 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2290 return 0; 2291 2292 ena_destroy_device(eth_dev); 2293 2294 return 0; 2295 } 2296 2297 static int ena_dev_configure(struct rte_eth_dev *dev) 2298 { 2299 struct ena_adapter *adapter = dev->data->dev_private; 2300 int rc; 2301 2302 adapter->state = ENA_ADAPTER_STATE_CONFIG; 2303 2304 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) 2305 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; 2306 dev->data->dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS; 2307 2308 /* Scattered Rx cannot be turned off in the HW, so this capability must 2309 * be forced. 2310 */ 2311 dev->data->scattered_rx = 1; 2312 2313 adapter->last_tx_comp_qid = 0; 2314 2315 adapter->missing_tx_completion_budget = 2316 RTE_MIN(ENA_MONITORED_TX_QUEUES, dev->data->nb_tx_queues); 2317 2318 /* To avoid detection of the spurious Tx completion timeout due to 2319 * application not calling the Tx cleanup function, set timeout for the 2320 * Tx queue which should be half of the missing completion timeout for a 2321 * safety. If there will be a lot of missing Tx completions in the 2322 * queue, they will be detected sooner or later. 2323 */ 2324 adapter->tx_cleanup_stall_delay = adapter->missing_tx_completion_to / 2; 2325 2326 rc = ena_configure_aenq(adapter); 2327 2328 return rc; 2329 } 2330 2331 static void ena_init_rings(struct ena_adapter *adapter, 2332 bool disable_meta_caching) 2333 { 2334 size_t i; 2335 2336 for (i = 0; i < adapter->max_num_io_queues; i++) { 2337 struct ena_ring *ring = &adapter->tx_ring[i]; 2338 2339 ring->configured = 0; 2340 ring->type = ENA_RING_TYPE_TX; 2341 ring->adapter = adapter; 2342 ring->id = i; 2343 ring->tx_mem_queue_type = adapter->ena_dev.tx_mem_queue_type; 2344 ring->tx_max_header_size = adapter->ena_dev.tx_max_header_size; 2345 ring->sgl_size = adapter->max_tx_sgl_size; 2346 ring->disable_meta_caching = disable_meta_caching; 2347 } 2348 2349 for (i = 0; i < adapter->max_num_io_queues; i++) { 2350 struct ena_ring *ring = &adapter->rx_ring[i]; 2351 2352 ring->configured = 0; 2353 ring->type = ENA_RING_TYPE_RX; 2354 ring->adapter = adapter; 2355 ring->id = i; 2356 ring->sgl_size = adapter->max_rx_sgl_size; 2357 } 2358 } 2359 2360 static uint64_t ena_get_rx_port_offloads(struct ena_adapter *adapter) 2361 { 2362 uint64_t port_offloads = 0; 2363 2364 if (adapter->offloads.rx_offloads & ENA_L3_IPV4_CSUM) 2365 port_offloads |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM; 2366 2367 if (adapter->offloads.rx_offloads & 2368 (ENA_L4_IPV4_CSUM | ENA_L4_IPV6_CSUM)) 2369 port_offloads |= 2370 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM; 2371 2372 if (adapter->offloads.rx_offloads & ENA_RX_RSS_HASH) 2373 port_offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; 2374 2375 port_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER; 2376 2377 return port_offloads; 2378 } 2379 2380 static uint64_t ena_get_tx_port_offloads(struct ena_adapter *adapter) 2381 { 2382 uint64_t port_offloads = 0; 2383 2384 if (adapter->offloads.tx_offloads & ENA_IPV4_TSO) 2385 port_offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO; 2386 2387 if (adapter->offloads.tx_offloads & ENA_L3_IPV4_CSUM) 2388 port_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM; 2389 if (adapter->offloads.tx_offloads & 2390 (ENA_L4_IPV4_CSUM_PARTIAL | ENA_L4_IPV4_CSUM | 2391 ENA_L4_IPV6_CSUM | ENA_L4_IPV6_CSUM_PARTIAL)) 2392 port_offloads |= 2393 RTE_ETH_TX_OFFLOAD_UDP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_CKSUM; 2394 2395 port_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS; 2396 2397 return port_offloads; 2398 } 2399 2400 static uint64_t ena_get_rx_queue_offloads(struct ena_adapter *adapter) 2401 { 2402 RTE_SET_USED(adapter); 2403 2404 return 0; 2405 } 2406 2407 static uint64_t ena_get_tx_queue_offloads(struct ena_adapter *adapter) 2408 { 2409 RTE_SET_USED(adapter); 2410 2411 return 0; 2412 } 2413 2414 static int ena_infos_get(struct rte_eth_dev *dev, 2415 struct rte_eth_dev_info *dev_info) 2416 { 2417 struct ena_adapter *adapter; 2418 struct ena_com_dev *ena_dev; 2419 2420 ena_assert_msg(dev->data != NULL, "Uninitialized device\n"); 2421 ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n"); 2422 adapter = dev->data->dev_private; 2423 2424 ena_dev = &adapter->ena_dev; 2425 ena_assert_msg(ena_dev != NULL, "Uninitialized device\n"); 2426 2427 dev_info->speed_capa = 2428 RTE_ETH_LINK_SPEED_1G | 2429 RTE_ETH_LINK_SPEED_2_5G | 2430 RTE_ETH_LINK_SPEED_5G | 2431 RTE_ETH_LINK_SPEED_10G | 2432 RTE_ETH_LINK_SPEED_25G | 2433 RTE_ETH_LINK_SPEED_40G | 2434 RTE_ETH_LINK_SPEED_50G | 2435 RTE_ETH_LINK_SPEED_100G; 2436 2437 /* Inform framework about available features */ 2438 dev_info->rx_offload_capa = ena_get_rx_port_offloads(adapter); 2439 dev_info->tx_offload_capa = ena_get_tx_port_offloads(adapter); 2440 dev_info->rx_queue_offload_capa = ena_get_rx_queue_offloads(adapter); 2441 dev_info->tx_queue_offload_capa = ena_get_tx_queue_offloads(adapter); 2442 2443 dev_info->flow_type_rss_offloads = ENA_ALL_RSS_HF; 2444 dev_info->hash_key_size = ENA_HASH_KEY_SIZE; 2445 2446 dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN; 2447 dev_info->max_rx_pktlen = adapter->max_mtu + RTE_ETHER_HDR_LEN + 2448 RTE_ETHER_CRC_LEN; 2449 dev_info->min_mtu = ENA_MIN_MTU; 2450 dev_info->max_mtu = adapter->max_mtu; 2451 dev_info->max_mac_addrs = 1; 2452 2453 dev_info->max_rx_queues = adapter->max_num_io_queues; 2454 dev_info->max_tx_queues = adapter->max_num_io_queues; 2455 dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE; 2456 2457 dev_info->rx_desc_lim.nb_max = adapter->max_rx_ring_size; 2458 dev_info->rx_desc_lim.nb_min = ENA_MIN_RING_DESC; 2459 dev_info->rx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2460 adapter->max_rx_sgl_size); 2461 dev_info->rx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2462 adapter->max_rx_sgl_size); 2463 2464 dev_info->tx_desc_lim.nb_max = adapter->max_tx_ring_size; 2465 dev_info->tx_desc_lim.nb_min = ENA_MIN_RING_DESC; 2466 dev_info->tx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2467 adapter->max_tx_sgl_size); 2468 dev_info->tx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 2469 adapter->max_tx_sgl_size); 2470 2471 dev_info->default_rxportconf.ring_size = ENA_DEFAULT_RING_SIZE; 2472 dev_info->default_txportconf.ring_size = ENA_DEFAULT_RING_SIZE; 2473 2474 return 0; 2475 } 2476 2477 static inline void ena_init_rx_mbuf(struct rte_mbuf *mbuf, uint16_t len) 2478 { 2479 mbuf->data_len = len; 2480 mbuf->data_off = RTE_PKTMBUF_HEADROOM; 2481 mbuf->refcnt = 1; 2482 mbuf->next = NULL; 2483 } 2484 2485 static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring, 2486 struct ena_com_rx_buf_info *ena_bufs, 2487 uint32_t descs, 2488 uint16_t *next_to_clean, 2489 uint8_t offset) 2490 { 2491 struct rte_mbuf *mbuf; 2492 struct rte_mbuf *mbuf_head; 2493 struct ena_rx_buffer *rx_info; 2494 int rc; 2495 uint16_t ntc, len, req_id, buf = 0; 2496 2497 if (unlikely(descs == 0)) 2498 return NULL; 2499 2500 ntc = *next_to_clean; 2501 2502 len = ena_bufs[buf].len; 2503 req_id = ena_bufs[buf].req_id; 2504 2505 rx_info = &rx_ring->rx_buffer_info[req_id]; 2506 2507 mbuf = rx_info->mbuf; 2508 RTE_ASSERT(mbuf != NULL); 2509 2510 ena_init_rx_mbuf(mbuf, len); 2511 2512 /* Fill the mbuf head with the data specific for 1st segment. */ 2513 mbuf_head = mbuf; 2514 mbuf_head->nb_segs = descs; 2515 mbuf_head->port = rx_ring->port_id; 2516 mbuf_head->pkt_len = len; 2517 mbuf_head->data_off += offset; 2518 2519 rx_info->mbuf = NULL; 2520 rx_ring->empty_rx_reqs[ntc] = req_id; 2521 ntc = ENA_IDX_NEXT_MASKED(ntc, rx_ring->size_mask); 2522 2523 while (--descs) { 2524 ++buf; 2525 len = ena_bufs[buf].len; 2526 req_id = ena_bufs[buf].req_id; 2527 2528 rx_info = &rx_ring->rx_buffer_info[req_id]; 2529 RTE_ASSERT(rx_info->mbuf != NULL); 2530 2531 if (unlikely(len == 0)) { 2532 /* 2533 * Some devices can pass descriptor with the length 0. 2534 * To avoid confusion, the PMD is simply putting the 2535 * descriptor back, as it was never used. We'll avoid 2536 * mbuf allocation that way. 2537 */ 2538 rc = ena_add_single_rx_desc(rx_ring->ena_com_io_sq, 2539 rx_info->mbuf, req_id); 2540 if (unlikely(rc != 0)) { 2541 /* Free the mbuf in case of an error. */ 2542 rte_mbuf_raw_free(rx_info->mbuf); 2543 } else { 2544 /* 2545 * If there was no error, just exit the loop as 2546 * 0 length descriptor is always the last one. 2547 */ 2548 break; 2549 } 2550 } else { 2551 /* Create an mbuf chain. */ 2552 mbuf->next = rx_info->mbuf; 2553 mbuf = mbuf->next; 2554 2555 ena_init_rx_mbuf(mbuf, len); 2556 mbuf_head->pkt_len += len; 2557 } 2558 2559 /* 2560 * Mark the descriptor as depleted and perform necessary 2561 * cleanup. 2562 * This code will execute in two cases: 2563 * 1. Descriptor len was greater than 0 - normal situation. 2564 * 2. Descriptor len was 0 and we failed to add the descriptor 2565 * to the device. In that situation, we should try to add 2566 * the mbuf again in the populate routine and mark the 2567 * descriptor as used up by the device. 2568 */ 2569 rx_info->mbuf = NULL; 2570 rx_ring->empty_rx_reqs[ntc] = req_id; 2571 ntc = ENA_IDX_NEXT_MASKED(ntc, rx_ring->size_mask); 2572 } 2573 2574 *next_to_clean = ntc; 2575 2576 return mbuf_head; 2577 } 2578 2579 static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 2580 uint16_t nb_pkts) 2581 { 2582 struct ena_ring *rx_ring = (struct ena_ring *)(rx_queue); 2583 unsigned int free_queue_entries; 2584 uint16_t next_to_clean = rx_ring->next_to_clean; 2585 uint16_t descs_in_use; 2586 struct rte_mbuf *mbuf; 2587 uint16_t completed; 2588 struct ena_com_rx_ctx ena_rx_ctx; 2589 int i, rc = 0; 2590 bool fill_hash; 2591 2592 #ifdef RTE_ETHDEV_DEBUG_RX 2593 /* Check adapter state */ 2594 if (unlikely(rx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { 2595 PMD_RX_LOG(ALERT, 2596 "Trying to receive pkts while device is NOT running\n"); 2597 return 0; 2598 } 2599 #endif 2600 2601 fill_hash = rx_ring->offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH; 2602 2603 descs_in_use = rx_ring->ring_size - 2604 ena_com_free_q_entries(rx_ring->ena_com_io_sq) - 1; 2605 nb_pkts = RTE_MIN(descs_in_use, nb_pkts); 2606 2607 for (completed = 0; completed < nb_pkts; completed++) { 2608 ena_rx_ctx.max_bufs = rx_ring->sgl_size; 2609 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; 2610 ena_rx_ctx.descs = 0; 2611 ena_rx_ctx.pkt_offset = 0; 2612 /* receive packet context */ 2613 rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq, 2614 rx_ring->ena_com_io_sq, 2615 &ena_rx_ctx); 2616 if (unlikely(rc)) { 2617 PMD_RX_LOG(ERR, 2618 "Failed to get the packet from the device, rc: %d\n", 2619 rc); 2620 if (rc == ENA_COM_NO_SPACE) { 2621 ++rx_ring->rx_stats.bad_desc_num; 2622 ena_trigger_reset(rx_ring->adapter, 2623 ENA_REGS_RESET_TOO_MANY_RX_DESCS); 2624 } else { 2625 ++rx_ring->rx_stats.bad_req_id; 2626 ena_trigger_reset(rx_ring->adapter, 2627 ENA_REGS_RESET_INV_RX_REQ_ID); 2628 } 2629 return 0; 2630 } 2631 2632 mbuf = ena_rx_mbuf(rx_ring, 2633 ena_rx_ctx.ena_bufs, 2634 ena_rx_ctx.descs, 2635 &next_to_clean, 2636 ena_rx_ctx.pkt_offset); 2637 if (unlikely(mbuf == NULL)) { 2638 for (i = 0; i < ena_rx_ctx.descs; ++i) { 2639 rx_ring->empty_rx_reqs[next_to_clean] = 2640 rx_ring->ena_bufs[i].req_id; 2641 next_to_clean = ENA_IDX_NEXT_MASKED( 2642 next_to_clean, rx_ring->size_mask); 2643 } 2644 break; 2645 } 2646 2647 /* fill mbuf attributes if any */ 2648 ena_rx_mbuf_prepare(rx_ring, mbuf, &ena_rx_ctx, fill_hash); 2649 2650 if (unlikely(mbuf->ol_flags & 2651 (RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD))) 2652 rte_atomic64_inc(&rx_ring->adapter->drv_stats->ierrors); 2653 2654 rx_pkts[completed] = mbuf; 2655 rx_ring->rx_stats.bytes += mbuf->pkt_len; 2656 } 2657 2658 rx_ring->rx_stats.cnt += completed; 2659 rx_ring->next_to_clean = next_to_clean; 2660 2661 free_queue_entries = ena_com_free_q_entries(rx_ring->ena_com_io_sq); 2662 2663 /* Burst refill to save doorbells, memory barriers, const interval */ 2664 if (free_queue_entries >= rx_ring->rx_free_thresh) { 2665 ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq); 2666 ena_populate_rx_queue(rx_ring, free_queue_entries); 2667 } 2668 2669 return completed; 2670 } 2671 2672 static uint16_t 2673 eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 2674 uint16_t nb_pkts) 2675 { 2676 int32_t ret; 2677 uint32_t i; 2678 struct rte_mbuf *m; 2679 struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); 2680 struct ena_adapter *adapter = tx_ring->adapter; 2681 struct rte_ipv4_hdr *ip_hdr; 2682 uint64_t ol_flags; 2683 uint64_t l4_csum_flag; 2684 uint64_t dev_offload_capa; 2685 uint16_t frag_field; 2686 bool need_pseudo_csum; 2687 2688 dev_offload_capa = adapter->offloads.tx_offloads; 2689 for (i = 0; i != nb_pkts; i++) { 2690 m = tx_pkts[i]; 2691 ol_flags = m->ol_flags; 2692 2693 /* Check if any offload flag was set */ 2694 if (ol_flags == 0) 2695 continue; 2696 2697 l4_csum_flag = ol_flags & RTE_MBUF_F_TX_L4_MASK; 2698 /* SCTP checksum offload is not supported by the ENA. */ 2699 if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) || 2700 l4_csum_flag == RTE_MBUF_F_TX_SCTP_CKSUM) { 2701 PMD_TX_LOG(DEBUG, 2702 "mbuf[%" PRIu32 "] has unsupported offloads flags set: 0x%" PRIu64 "\n", 2703 i, ol_flags); 2704 rte_errno = ENOTSUP; 2705 return i; 2706 } 2707 2708 if (unlikely(m->nb_segs >= tx_ring->sgl_size && 2709 !(tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && 2710 m->nb_segs == tx_ring->sgl_size && 2711 m->data_len < tx_ring->tx_max_header_size))) { 2712 PMD_TX_LOG(DEBUG, 2713 "mbuf[%" PRIu32 "] has too many segments: %" PRIu16 "\n", 2714 i, m->nb_segs); 2715 rte_errno = EINVAL; 2716 return i; 2717 } 2718 2719 #ifdef RTE_LIBRTE_ETHDEV_DEBUG 2720 /* Check if requested offload is also enabled for the queue */ 2721 if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM && 2722 !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)) || 2723 (l4_csum_flag == RTE_MBUF_F_TX_TCP_CKSUM && 2724 !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) || 2725 (l4_csum_flag == RTE_MBUF_F_TX_UDP_CKSUM && 2726 !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM))) { 2727 PMD_TX_LOG(DEBUG, 2728 "mbuf[%" PRIu32 "]: requested offloads: %" PRIu16 " are not enabled for the queue[%u]\n", 2729 i, m->nb_segs, tx_ring->id); 2730 rte_errno = EINVAL; 2731 return i; 2732 } 2733 2734 /* The caller is obligated to set l2 and l3 len if any cksum 2735 * offload is enabled. 2736 */ 2737 if (unlikely(ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK) && 2738 (m->l2_len == 0 || m->l3_len == 0))) { 2739 PMD_TX_LOG(DEBUG, 2740 "mbuf[%" PRIu32 "]: l2_len or l3_len values are 0 while the offload was requested\n", 2741 i); 2742 rte_errno = EINVAL; 2743 return i; 2744 } 2745 ret = rte_validate_tx_offload(m); 2746 if (ret != 0) { 2747 rte_errno = -ret; 2748 return i; 2749 } 2750 #endif 2751 2752 /* Verify HW support for requested offloads and determine if 2753 * pseudo header checksum is needed. 2754 */ 2755 need_pseudo_csum = false; 2756 if (ol_flags & RTE_MBUF_F_TX_IPV4) { 2757 if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM && 2758 !(dev_offload_capa & ENA_L3_IPV4_CSUM)) { 2759 rte_errno = ENOTSUP; 2760 return i; 2761 } 2762 2763 if (ol_flags & RTE_MBUF_F_TX_TCP_SEG && 2764 !(dev_offload_capa & ENA_IPV4_TSO)) { 2765 rte_errno = ENOTSUP; 2766 return i; 2767 } 2768 2769 /* Check HW capabilities and if pseudo csum is needed 2770 * for L4 offloads. 2771 */ 2772 if (l4_csum_flag != RTE_MBUF_F_TX_L4_NO_CKSUM && 2773 !(dev_offload_capa & ENA_L4_IPV4_CSUM)) { 2774 if (dev_offload_capa & 2775 ENA_L4_IPV4_CSUM_PARTIAL) { 2776 need_pseudo_csum = true; 2777 } else { 2778 rte_errno = ENOTSUP; 2779 return i; 2780 } 2781 } 2782 2783 /* Parse the DF flag */ 2784 ip_hdr = rte_pktmbuf_mtod_offset(m, 2785 struct rte_ipv4_hdr *, m->l2_len); 2786 frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset); 2787 if (frag_field & RTE_IPV4_HDR_DF_FLAG) { 2788 m->packet_type |= RTE_PTYPE_L4_NONFRAG; 2789 } else if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) { 2790 /* In case we are supposed to TSO and have DF 2791 * not set (DF=0) hardware must be provided with 2792 * partial checksum. 2793 */ 2794 need_pseudo_csum = true; 2795 } 2796 } else if (ol_flags & RTE_MBUF_F_TX_IPV6) { 2797 /* There is no support for IPv6 TSO as for now. */ 2798 if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) { 2799 rte_errno = ENOTSUP; 2800 return i; 2801 } 2802 2803 /* Check HW capabilities and if pseudo csum is needed */ 2804 if (l4_csum_flag != RTE_MBUF_F_TX_L4_NO_CKSUM && 2805 !(dev_offload_capa & ENA_L4_IPV6_CSUM)) { 2806 if (dev_offload_capa & 2807 ENA_L4_IPV6_CSUM_PARTIAL) { 2808 need_pseudo_csum = true; 2809 } else { 2810 rte_errno = ENOTSUP; 2811 return i; 2812 } 2813 } 2814 } 2815 2816 if (need_pseudo_csum) { 2817 ret = rte_net_intel_cksum_flags_prepare(m, ol_flags); 2818 if (ret != 0) { 2819 rte_errno = -ret; 2820 return i; 2821 } 2822 } 2823 } 2824 2825 return i; 2826 } 2827 2828 static void ena_update_hints(struct ena_adapter *adapter, 2829 struct ena_admin_ena_hw_hints *hints) 2830 { 2831 if (hints->admin_completion_tx_timeout) 2832 adapter->ena_dev.admin_queue.completion_timeout = 2833 hints->admin_completion_tx_timeout * 1000; 2834 2835 if (hints->mmio_read_timeout) 2836 /* convert to usec */ 2837 adapter->ena_dev.mmio_read.reg_read_to = 2838 hints->mmio_read_timeout * 1000; 2839 2840 if (hints->driver_watchdog_timeout) { 2841 if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT) 2842 adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT; 2843 else 2844 // Convert msecs to ticks 2845 adapter->keep_alive_timeout = 2846 (hints->driver_watchdog_timeout * 2847 rte_get_timer_hz()) / 1000; 2848 } 2849 } 2850 2851 static void ena_tx_map_mbuf(struct ena_ring *tx_ring, 2852 struct ena_tx_buffer *tx_info, 2853 struct rte_mbuf *mbuf, 2854 void **push_header, 2855 uint16_t *header_len) 2856 { 2857 struct ena_com_buf *ena_buf; 2858 uint16_t delta, seg_len, push_len; 2859 2860 delta = 0; 2861 seg_len = mbuf->data_len; 2862 2863 tx_info->mbuf = mbuf; 2864 ena_buf = tx_info->bufs; 2865 2866 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 2867 /* 2868 * Tx header might be (and will be in most cases) smaller than 2869 * tx_max_header_size. But it's not an issue to send more data 2870 * to the device, than actually needed if the mbuf size is 2871 * greater than tx_max_header_size. 2872 */ 2873 push_len = RTE_MIN(mbuf->pkt_len, tx_ring->tx_max_header_size); 2874 *header_len = push_len; 2875 2876 if (likely(push_len <= seg_len)) { 2877 /* If the push header is in the single segment, then 2878 * just point it to the 1st mbuf data. 2879 */ 2880 *push_header = rte_pktmbuf_mtod(mbuf, uint8_t *); 2881 } else { 2882 /* If the push header lays in the several segments, copy 2883 * it to the intermediate buffer. 2884 */ 2885 rte_pktmbuf_read(mbuf, 0, push_len, 2886 tx_ring->push_buf_intermediate_buf); 2887 *push_header = tx_ring->push_buf_intermediate_buf; 2888 delta = push_len - seg_len; 2889 } 2890 } else { 2891 *push_header = NULL; 2892 *header_len = 0; 2893 push_len = 0; 2894 } 2895 2896 /* Process first segment taking into consideration pushed header */ 2897 if (seg_len > push_len) { 2898 ena_buf->paddr = mbuf->buf_iova + 2899 mbuf->data_off + 2900 push_len; 2901 ena_buf->len = seg_len - push_len; 2902 ena_buf++; 2903 tx_info->num_of_bufs++; 2904 } 2905 2906 while ((mbuf = mbuf->next) != NULL) { 2907 seg_len = mbuf->data_len; 2908 2909 /* Skip mbufs if whole data is pushed as a header */ 2910 if (unlikely(delta > seg_len)) { 2911 delta -= seg_len; 2912 continue; 2913 } 2914 2915 ena_buf->paddr = mbuf->buf_iova + mbuf->data_off + delta; 2916 ena_buf->len = seg_len - delta; 2917 ena_buf++; 2918 tx_info->num_of_bufs++; 2919 2920 delta = 0; 2921 } 2922 } 2923 2924 static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf) 2925 { 2926 struct ena_tx_buffer *tx_info; 2927 struct ena_com_tx_ctx ena_tx_ctx = { { 0 } }; 2928 uint16_t next_to_use; 2929 uint16_t header_len; 2930 uint16_t req_id; 2931 void *push_header; 2932 int nb_hw_desc; 2933 int rc; 2934 2935 /* Checking for space for 2 additional metadata descriptors due to 2936 * possible header split and metadata descriptor 2937 */ 2938 if (!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 2939 mbuf->nb_segs + 2)) { 2940 PMD_DRV_LOG(DEBUG, "Not enough space in the tx queue\n"); 2941 return ENA_COM_NO_MEM; 2942 } 2943 2944 next_to_use = tx_ring->next_to_use; 2945 2946 req_id = tx_ring->empty_tx_reqs[next_to_use]; 2947 tx_info = &tx_ring->tx_buffer_info[req_id]; 2948 tx_info->num_of_bufs = 0; 2949 RTE_ASSERT(tx_info->mbuf == NULL); 2950 2951 ena_tx_map_mbuf(tx_ring, tx_info, mbuf, &push_header, &header_len); 2952 2953 ena_tx_ctx.ena_bufs = tx_info->bufs; 2954 ena_tx_ctx.push_header = push_header; 2955 ena_tx_ctx.num_bufs = tx_info->num_of_bufs; 2956 ena_tx_ctx.req_id = req_id; 2957 ena_tx_ctx.header_len = header_len; 2958 2959 /* Set Tx offloads flags, if applicable */ 2960 ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx, tx_ring->offloads, 2961 tx_ring->disable_meta_caching); 2962 2963 if (unlikely(ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq, 2964 &ena_tx_ctx))) { 2965 PMD_TX_LOG(DEBUG, 2966 "LLQ Tx max burst size of queue %d achieved, writing doorbell to send burst\n", 2967 tx_ring->id); 2968 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); 2969 tx_ring->tx_stats.doorbells++; 2970 tx_ring->pkts_without_db = false; 2971 } 2972 2973 /* prepare the packet's descriptors to dma engine */ 2974 rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx, 2975 &nb_hw_desc); 2976 if (unlikely(rc)) { 2977 PMD_DRV_LOG(ERR, "Failed to prepare Tx buffers, rc: %d\n", rc); 2978 ++tx_ring->tx_stats.prepare_ctx_err; 2979 ena_trigger_reset(tx_ring->adapter, 2980 ENA_REGS_RESET_DRIVER_INVALID_STATE); 2981 return rc; 2982 } 2983 2984 tx_info->tx_descs = nb_hw_desc; 2985 tx_info->timestamp = rte_get_timer_cycles(); 2986 2987 tx_ring->tx_stats.cnt++; 2988 tx_ring->tx_stats.bytes += mbuf->pkt_len; 2989 2990 tx_ring->next_to_use = ENA_IDX_NEXT_MASKED(next_to_use, 2991 tx_ring->size_mask); 2992 2993 return 0; 2994 } 2995 2996 static int ena_tx_cleanup(void *txp, uint32_t free_pkt_cnt) 2997 { 2998 struct ena_ring *tx_ring = (struct ena_ring *)txp; 2999 unsigned int total_tx_descs = 0; 3000 unsigned int total_tx_pkts = 0; 3001 uint16_t cleanup_budget; 3002 uint16_t next_to_clean = tx_ring->next_to_clean; 3003 3004 /* 3005 * If free_pkt_cnt is equal to 0, it means that the user requested 3006 * full cleanup, so attempt to release all Tx descriptors 3007 * (ring_size - 1 -> size_mask) 3008 */ 3009 cleanup_budget = (free_pkt_cnt == 0) ? tx_ring->size_mask : free_pkt_cnt; 3010 3011 while (likely(total_tx_pkts < cleanup_budget)) { 3012 struct rte_mbuf *mbuf; 3013 struct ena_tx_buffer *tx_info; 3014 uint16_t req_id; 3015 3016 if (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) != 0) 3017 break; 3018 3019 if (unlikely(validate_tx_req_id(tx_ring, req_id) != 0)) 3020 break; 3021 3022 /* Get Tx info & store how many descs were processed */ 3023 tx_info = &tx_ring->tx_buffer_info[req_id]; 3024 tx_info->timestamp = 0; 3025 3026 mbuf = tx_info->mbuf; 3027 rte_pktmbuf_free(mbuf); 3028 3029 tx_info->mbuf = NULL; 3030 tx_ring->empty_tx_reqs[next_to_clean] = req_id; 3031 3032 total_tx_descs += tx_info->tx_descs; 3033 total_tx_pkts++; 3034 3035 /* Put back descriptor to the ring for reuse */ 3036 next_to_clean = ENA_IDX_NEXT_MASKED(next_to_clean, 3037 tx_ring->size_mask); 3038 } 3039 3040 if (likely(total_tx_descs > 0)) { 3041 /* acknowledge completion of sent packets */ 3042 tx_ring->next_to_clean = next_to_clean; 3043 ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs); 3044 ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq); 3045 } 3046 3047 /* Notify completion handler that full cleanup was performed */ 3048 if (free_pkt_cnt == 0 || total_tx_pkts < cleanup_budget) 3049 tx_ring->last_cleanup_ticks = rte_get_timer_cycles(); 3050 3051 return total_tx_pkts; 3052 } 3053 3054 static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 3055 uint16_t nb_pkts) 3056 { 3057 struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); 3058 int available_desc; 3059 uint16_t sent_idx = 0; 3060 3061 #ifdef RTE_ETHDEV_DEBUG_TX 3062 /* Check adapter state */ 3063 if (unlikely(tx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { 3064 PMD_TX_LOG(ALERT, 3065 "Trying to xmit pkts while device is NOT running\n"); 3066 return 0; 3067 } 3068 #endif 3069 3070 available_desc = ena_com_free_q_entries(tx_ring->ena_com_io_sq); 3071 if (available_desc < tx_ring->tx_free_thresh) 3072 ena_tx_cleanup((void *)tx_ring, 0); 3073 3074 for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) { 3075 if (ena_xmit_mbuf(tx_ring, tx_pkts[sent_idx])) 3076 break; 3077 tx_ring->pkts_without_db = true; 3078 rte_prefetch0(tx_pkts[ENA_IDX_ADD_MASKED(sent_idx, 4, 3079 tx_ring->size_mask)]); 3080 } 3081 3082 /* If there are ready packets to be xmitted... */ 3083 if (likely(tx_ring->pkts_without_db)) { 3084 /* ...let HW do its best :-) */ 3085 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); 3086 tx_ring->tx_stats.doorbells++; 3087 tx_ring->pkts_without_db = false; 3088 } 3089 3090 tx_ring->tx_stats.available_desc = 3091 ena_com_free_q_entries(tx_ring->ena_com_io_sq); 3092 tx_ring->tx_stats.tx_poll++; 3093 3094 return sent_idx; 3095 } 3096 3097 int ena_copy_eni_stats(struct ena_adapter *adapter, struct ena_stats_eni *stats) 3098 { 3099 int rc; 3100 3101 rte_spinlock_lock(&adapter->admin_lock); 3102 /* Retrieve and store the latest statistics from the AQ. This ensures 3103 * that previous value is returned in case of a com error. 3104 */ 3105 rc = ENA_PROXY(adapter, ena_com_get_eni_stats, &adapter->ena_dev, 3106 (struct ena_admin_eni_stats *)stats); 3107 rte_spinlock_unlock(&adapter->admin_lock); 3108 if (rc != 0) { 3109 if (rc == ENA_COM_UNSUPPORTED) { 3110 PMD_DRV_LOG(DEBUG, 3111 "Retrieving ENI metrics is not supported\n"); 3112 } else { 3113 PMD_DRV_LOG(WARNING, 3114 "Failed to get ENI metrics, rc: %d\n", rc); 3115 } 3116 return rc; 3117 } 3118 3119 return 0; 3120 } 3121 3122 /** 3123 * DPDK callback to retrieve names of extended device statistics 3124 * 3125 * @param dev 3126 * Pointer to Ethernet device structure. 3127 * @param[out] xstats_names 3128 * Buffer to insert names into. 3129 * @param n 3130 * Number of names. 3131 * 3132 * @return 3133 * Number of xstats names. 3134 */ 3135 static int ena_xstats_get_names(struct rte_eth_dev *dev, 3136 struct rte_eth_xstat_name *xstats_names, 3137 unsigned int n) 3138 { 3139 unsigned int xstats_count = ena_xstats_calc_num(dev->data); 3140 unsigned int stat, i, count = 0; 3141 3142 if (n < xstats_count || !xstats_names) 3143 return xstats_count; 3144 3145 for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++) 3146 strcpy(xstats_names[count].name, 3147 ena_stats_global_strings[stat].name); 3148 3149 for (stat = 0; stat < ENA_STATS_ARRAY_ENI; stat++, count++) 3150 strcpy(xstats_names[count].name, 3151 ena_stats_eni_strings[stat].name); 3152 3153 for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++) 3154 for (i = 0; i < dev->data->nb_rx_queues; i++, count++) 3155 snprintf(xstats_names[count].name, 3156 sizeof(xstats_names[count].name), 3157 "rx_q%d_%s", i, 3158 ena_stats_rx_strings[stat].name); 3159 3160 for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++) 3161 for (i = 0; i < dev->data->nb_tx_queues; i++, count++) 3162 snprintf(xstats_names[count].name, 3163 sizeof(xstats_names[count].name), 3164 "tx_q%d_%s", i, 3165 ena_stats_tx_strings[stat].name); 3166 3167 return xstats_count; 3168 } 3169 3170 /** 3171 * DPDK callback to retrieve names of extended device statistics for the given 3172 * ids. 3173 * 3174 * @param dev 3175 * Pointer to Ethernet device structure. 3176 * @param[out] xstats_names 3177 * Buffer to insert names into. 3178 * @param ids 3179 * IDs array for which the names should be retrieved. 3180 * @param size 3181 * Number of ids. 3182 * 3183 * @return 3184 * Positive value: number of xstats names. Negative value: error code. 3185 */ 3186 static int ena_xstats_get_names_by_id(struct rte_eth_dev *dev, 3187 const uint64_t *ids, 3188 struct rte_eth_xstat_name *xstats_names, 3189 unsigned int size) 3190 { 3191 uint64_t xstats_count = ena_xstats_calc_num(dev->data); 3192 uint64_t id, qid; 3193 unsigned int i; 3194 3195 if (xstats_names == NULL) 3196 return xstats_count; 3197 3198 for (i = 0; i < size; ++i) { 3199 id = ids[i]; 3200 if (id > xstats_count) { 3201 PMD_DRV_LOG(ERR, 3202 "ID value out of range: id=%" PRIu64 ", xstats_num=%" PRIu64 "\n", 3203 id, xstats_count); 3204 return -EINVAL; 3205 } 3206 3207 if (id < ENA_STATS_ARRAY_GLOBAL) { 3208 strcpy(xstats_names[i].name, 3209 ena_stats_global_strings[id].name); 3210 continue; 3211 } 3212 3213 id -= ENA_STATS_ARRAY_GLOBAL; 3214 if (id < ENA_STATS_ARRAY_ENI) { 3215 strcpy(xstats_names[i].name, 3216 ena_stats_eni_strings[id].name); 3217 continue; 3218 } 3219 3220 id -= ENA_STATS_ARRAY_ENI; 3221 if (id < ENA_STATS_ARRAY_RX) { 3222 qid = id / dev->data->nb_rx_queues; 3223 id %= dev->data->nb_rx_queues; 3224 snprintf(xstats_names[i].name, 3225 sizeof(xstats_names[i].name), 3226 "rx_q%" PRIu64 "d_%s", 3227 qid, ena_stats_rx_strings[id].name); 3228 continue; 3229 } 3230 3231 id -= ENA_STATS_ARRAY_RX; 3232 /* Although this condition is not needed, it was added for 3233 * compatibility if new xstat structure would be ever added. 3234 */ 3235 if (id < ENA_STATS_ARRAY_TX) { 3236 qid = id / dev->data->nb_tx_queues; 3237 id %= dev->data->nb_tx_queues; 3238 snprintf(xstats_names[i].name, 3239 sizeof(xstats_names[i].name), 3240 "tx_q%" PRIu64 "_%s", 3241 qid, ena_stats_tx_strings[id].name); 3242 continue; 3243 } 3244 } 3245 3246 return i; 3247 } 3248 3249 /** 3250 * DPDK callback to get extended device statistics. 3251 * 3252 * @param dev 3253 * Pointer to Ethernet device structure. 3254 * @param[out] stats 3255 * Stats table output buffer. 3256 * @param n 3257 * The size of the stats table. 3258 * 3259 * @return 3260 * Number of xstats on success, negative on failure. 3261 */ 3262 static int ena_xstats_get(struct rte_eth_dev *dev, 3263 struct rte_eth_xstat *xstats, 3264 unsigned int n) 3265 { 3266 struct ena_adapter *adapter = dev->data->dev_private; 3267 unsigned int xstats_count = ena_xstats_calc_num(dev->data); 3268 struct ena_stats_eni eni_stats; 3269 unsigned int stat, i, count = 0; 3270 int stat_offset; 3271 void *stats_begin; 3272 3273 if (n < xstats_count) 3274 return xstats_count; 3275 3276 if (!xstats) 3277 return 0; 3278 3279 for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++) { 3280 stat_offset = ena_stats_global_strings[stat].stat_offset; 3281 stats_begin = &adapter->dev_stats; 3282 3283 xstats[count].id = count; 3284 xstats[count].value = *((uint64_t *) 3285 ((char *)stats_begin + stat_offset)); 3286 } 3287 3288 /* Even if the function below fails, we should copy previous (or initial 3289 * values) to keep structure of rte_eth_xstat consistent. 3290 */ 3291 ena_copy_eni_stats(adapter, &eni_stats); 3292 for (stat = 0; stat < ENA_STATS_ARRAY_ENI; stat++, count++) { 3293 stat_offset = ena_stats_eni_strings[stat].stat_offset; 3294 stats_begin = &eni_stats; 3295 3296 xstats[count].id = count; 3297 xstats[count].value = *((uint64_t *) 3298 ((char *)stats_begin + stat_offset)); 3299 } 3300 3301 for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++) { 3302 for (i = 0; i < dev->data->nb_rx_queues; i++, count++) { 3303 stat_offset = ena_stats_rx_strings[stat].stat_offset; 3304 stats_begin = &adapter->rx_ring[i].rx_stats; 3305 3306 xstats[count].id = count; 3307 xstats[count].value = *((uint64_t *) 3308 ((char *)stats_begin + stat_offset)); 3309 } 3310 } 3311 3312 for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++) { 3313 for (i = 0; i < dev->data->nb_tx_queues; i++, count++) { 3314 stat_offset = ena_stats_tx_strings[stat].stat_offset; 3315 stats_begin = &adapter->tx_ring[i].rx_stats; 3316 3317 xstats[count].id = count; 3318 xstats[count].value = *((uint64_t *) 3319 ((char *)stats_begin + stat_offset)); 3320 } 3321 } 3322 3323 return count; 3324 } 3325 3326 static int ena_xstats_get_by_id(struct rte_eth_dev *dev, 3327 const uint64_t *ids, 3328 uint64_t *values, 3329 unsigned int n) 3330 { 3331 struct ena_adapter *adapter = dev->data->dev_private; 3332 struct ena_stats_eni eni_stats; 3333 uint64_t id; 3334 uint64_t rx_entries, tx_entries; 3335 unsigned int i; 3336 int qid; 3337 int valid = 0; 3338 bool was_eni_copied = false; 3339 3340 for (i = 0; i < n; ++i) { 3341 id = ids[i]; 3342 /* Check if id belongs to global statistics */ 3343 if (id < ENA_STATS_ARRAY_GLOBAL) { 3344 values[i] = *((uint64_t *)&adapter->dev_stats + id); 3345 ++valid; 3346 continue; 3347 } 3348 3349 /* Check if id belongs to ENI statistics */ 3350 id -= ENA_STATS_ARRAY_GLOBAL; 3351 if (id < ENA_STATS_ARRAY_ENI) { 3352 /* Avoid reading ENI stats multiple times in a single 3353 * function call, as it requires communication with the 3354 * admin queue. 3355 */ 3356 if (!was_eni_copied) { 3357 was_eni_copied = true; 3358 ena_copy_eni_stats(adapter, &eni_stats); 3359 } 3360 values[i] = *((uint64_t *)&eni_stats + id); 3361 ++valid; 3362 continue; 3363 } 3364 3365 /* Check if id belongs to rx queue statistics */ 3366 id -= ENA_STATS_ARRAY_ENI; 3367 rx_entries = ENA_STATS_ARRAY_RX * dev->data->nb_rx_queues; 3368 if (id < rx_entries) { 3369 qid = id % dev->data->nb_rx_queues; 3370 id /= dev->data->nb_rx_queues; 3371 values[i] = *((uint64_t *) 3372 &adapter->rx_ring[qid].rx_stats + id); 3373 ++valid; 3374 continue; 3375 } 3376 /* Check if id belongs to rx queue statistics */ 3377 id -= rx_entries; 3378 tx_entries = ENA_STATS_ARRAY_TX * dev->data->nb_tx_queues; 3379 if (id < tx_entries) { 3380 qid = id % dev->data->nb_tx_queues; 3381 id /= dev->data->nb_tx_queues; 3382 values[i] = *((uint64_t *) 3383 &adapter->tx_ring[qid].tx_stats + id); 3384 ++valid; 3385 continue; 3386 } 3387 } 3388 3389 return valid; 3390 } 3391 3392 static int ena_process_uint_devarg(const char *key, 3393 const char *value, 3394 void *opaque) 3395 { 3396 struct ena_adapter *adapter = opaque; 3397 char *str_end; 3398 uint64_t uint_value; 3399 3400 uint_value = strtoull(value, &str_end, 10); 3401 if (value == str_end) { 3402 PMD_INIT_LOG(ERR, 3403 "Invalid value for key '%s'. Only uint values are accepted.\n", 3404 key); 3405 return -EINVAL; 3406 } 3407 3408 if (strcmp(key, ENA_DEVARG_MISS_TXC_TO) == 0) { 3409 if (uint_value > ENA_MAX_TX_TIMEOUT_SECONDS) { 3410 PMD_INIT_LOG(ERR, 3411 "Tx timeout too high: %" PRIu64 " sec. Maximum allowed: %d sec.\n", 3412 uint_value, ENA_MAX_TX_TIMEOUT_SECONDS); 3413 return -EINVAL; 3414 } else if (uint_value == 0) { 3415 PMD_INIT_LOG(INFO, 3416 "Check for missing Tx completions has been disabled.\n"); 3417 adapter->missing_tx_completion_to = 3418 ENA_HW_HINTS_NO_TIMEOUT; 3419 } else { 3420 PMD_INIT_LOG(INFO, 3421 "Tx packet completion timeout set to %" PRIu64 " seconds.\n", 3422 uint_value); 3423 adapter->missing_tx_completion_to = 3424 uint_value * rte_get_timer_hz(); 3425 } 3426 } 3427 3428 return 0; 3429 } 3430 3431 static int ena_process_bool_devarg(const char *key, 3432 const char *value, 3433 void *opaque) 3434 { 3435 struct ena_adapter *adapter = opaque; 3436 bool bool_value; 3437 3438 /* Parse the value. */ 3439 if (strcmp(value, "1") == 0) { 3440 bool_value = true; 3441 } else if (strcmp(value, "0") == 0) { 3442 bool_value = false; 3443 } else { 3444 PMD_INIT_LOG(ERR, 3445 "Invalid value: '%s' for key '%s'. Accepted: '0' or '1'\n", 3446 value, key); 3447 return -EINVAL; 3448 } 3449 3450 /* Now, assign it to the proper adapter field. */ 3451 if (strcmp(key, ENA_DEVARG_LARGE_LLQ_HDR) == 0) 3452 adapter->use_large_llq_hdr = bool_value; 3453 3454 return 0; 3455 } 3456 3457 static int ena_parse_devargs(struct ena_adapter *adapter, 3458 struct rte_devargs *devargs) 3459 { 3460 static const char * const allowed_args[] = { 3461 ENA_DEVARG_LARGE_LLQ_HDR, 3462 ENA_DEVARG_MISS_TXC_TO, 3463 NULL, 3464 }; 3465 struct rte_kvargs *kvlist; 3466 int rc; 3467 3468 if (devargs == NULL) 3469 return 0; 3470 3471 kvlist = rte_kvargs_parse(devargs->args, allowed_args); 3472 if (kvlist == NULL) { 3473 PMD_INIT_LOG(ERR, "Invalid device arguments: %s\n", 3474 devargs->args); 3475 return -EINVAL; 3476 } 3477 3478 rc = rte_kvargs_process(kvlist, ENA_DEVARG_LARGE_LLQ_HDR, 3479 ena_process_bool_devarg, adapter); 3480 if (rc != 0) 3481 goto exit; 3482 rc = rte_kvargs_process(kvlist, ENA_DEVARG_MISS_TXC_TO, 3483 ena_process_uint_devarg, adapter); 3484 3485 exit: 3486 rte_kvargs_free(kvlist); 3487 3488 return rc; 3489 } 3490 3491 static int ena_setup_rx_intr(struct rte_eth_dev *dev) 3492 { 3493 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 3494 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 3495 int rc; 3496 uint16_t vectors_nb, i; 3497 bool rx_intr_requested = dev->data->dev_conf.intr_conf.rxq; 3498 3499 if (!rx_intr_requested) 3500 return 0; 3501 3502 if (!rte_intr_cap_multiple(intr_handle)) { 3503 PMD_DRV_LOG(ERR, 3504 "Rx interrupt requested, but it isn't supported by the PCI driver\n"); 3505 return -ENOTSUP; 3506 } 3507 3508 /* Disable interrupt mapping before the configuration starts. */ 3509 rte_intr_disable(intr_handle); 3510 3511 /* Verify if there are enough vectors available. */ 3512 vectors_nb = dev->data->nb_rx_queues; 3513 if (vectors_nb > RTE_MAX_RXTX_INTR_VEC_ID) { 3514 PMD_DRV_LOG(ERR, 3515 "Too many Rx interrupts requested, maximum number: %d\n", 3516 RTE_MAX_RXTX_INTR_VEC_ID); 3517 rc = -ENOTSUP; 3518 goto enable_intr; 3519 } 3520 3521 /* Allocate the vector list */ 3522 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", 3523 dev->data->nb_rx_queues)) { 3524 PMD_DRV_LOG(ERR, 3525 "Failed to allocate interrupt vector for %d queues\n", 3526 dev->data->nb_rx_queues); 3527 rc = -ENOMEM; 3528 goto enable_intr; 3529 } 3530 3531 rc = rte_intr_efd_enable(intr_handle, vectors_nb); 3532 if (rc != 0) 3533 goto free_intr_vec; 3534 3535 if (!rte_intr_allow_others(intr_handle)) { 3536 PMD_DRV_LOG(ERR, 3537 "Not enough interrupts available to use both ENA Admin and Rx interrupts\n"); 3538 goto disable_intr_efd; 3539 } 3540 3541 for (i = 0; i < vectors_nb; ++i) 3542 if (rte_intr_vec_list_index_set(intr_handle, i, 3543 RTE_INTR_VEC_RXTX_OFFSET + i)) 3544 goto disable_intr_efd; 3545 3546 rte_intr_enable(intr_handle); 3547 return 0; 3548 3549 disable_intr_efd: 3550 rte_intr_efd_disable(intr_handle); 3551 free_intr_vec: 3552 rte_intr_vec_list_free(intr_handle); 3553 enable_intr: 3554 rte_intr_enable(intr_handle); 3555 return rc; 3556 } 3557 3558 static void ena_rx_queue_intr_set(struct rte_eth_dev *dev, 3559 uint16_t queue_id, 3560 bool unmask) 3561 { 3562 struct ena_adapter *adapter = dev->data->dev_private; 3563 struct ena_ring *rxq = &adapter->rx_ring[queue_id]; 3564 struct ena_eth_io_intr_reg intr_reg; 3565 3566 ena_com_update_intr_reg(&intr_reg, 0, 0, unmask); 3567 ena_com_unmask_intr(rxq->ena_com_io_cq, &intr_reg); 3568 } 3569 3570 static int ena_rx_queue_intr_enable(struct rte_eth_dev *dev, 3571 uint16_t queue_id) 3572 { 3573 ena_rx_queue_intr_set(dev, queue_id, true); 3574 3575 return 0; 3576 } 3577 3578 static int ena_rx_queue_intr_disable(struct rte_eth_dev *dev, 3579 uint16_t queue_id) 3580 { 3581 ena_rx_queue_intr_set(dev, queue_id, false); 3582 3583 return 0; 3584 } 3585 3586 static int ena_configure_aenq(struct ena_adapter *adapter) 3587 { 3588 uint32_t aenq_groups = adapter->all_aenq_groups; 3589 int rc; 3590 3591 /* All_aenq_groups holds all AENQ functions supported by the device and 3592 * the HW, so at first we need to be sure the LSC request is valid. 3593 */ 3594 if (adapter->edev_data->dev_conf.intr_conf.lsc != 0) { 3595 if (!(aenq_groups & BIT(ENA_ADMIN_LINK_CHANGE))) { 3596 PMD_DRV_LOG(ERR, 3597 "LSC requested, but it's not supported by the AENQ\n"); 3598 return -EINVAL; 3599 } 3600 } else { 3601 /* If LSC wasn't enabled by the app, let's enable all supported 3602 * AENQ procedures except the LSC. 3603 */ 3604 aenq_groups &= ~BIT(ENA_ADMIN_LINK_CHANGE); 3605 } 3606 3607 rc = ena_com_set_aenq_config(&adapter->ena_dev, aenq_groups); 3608 if (rc != 0) { 3609 PMD_DRV_LOG(ERR, "Cannot configure AENQ groups, rc=%d\n", rc); 3610 return rc; 3611 } 3612 3613 adapter->active_aenq_groups = aenq_groups; 3614 3615 return 0; 3616 } 3617 3618 int ena_mp_indirect_table_set(struct ena_adapter *adapter) 3619 { 3620 return ENA_PROXY(adapter, ena_com_indirect_table_set, &adapter->ena_dev); 3621 } 3622 3623 int ena_mp_indirect_table_get(struct ena_adapter *adapter, 3624 uint32_t *indirect_table) 3625 { 3626 return ENA_PROXY(adapter, ena_com_indirect_table_get, &adapter->ena_dev, 3627 indirect_table); 3628 } 3629 3630 /********************************************************************* 3631 * ena_plat_dpdk.h functions implementations 3632 *********************************************************************/ 3633 3634 const struct rte_memzone * 3635 ena_mem_alloc_coherent(struct rte_eth_dev_data *data, size_t size, 3636 int socket_id, unsigned int alignment, void **virt_addr, 3637 dma_addr_t *phys_addr) 3638 { 3639 char z_name[RTE_MEMZONE_NAMESIZE]; 3640 struct ena_adapter *adapter = data->dev_private; 3641 const struct rte_memzone *memzone; 3642 int rc; 3643 3644 rc = snprintf(z_name, RTE_MEMZONE_NAMESIZE, "ena_p%d_mz%" PRIu64 "", 3645 data->port_id, adapter->memzone_cnt); 3646 if (rc >= RTE_MEMZONE_NAMESIZE) { 3647 PMD_DRV_LOG(ERR, 3648 "Name for the ena_com memzone is too long. Port: %d, mz_num: %" PRIu64 "\n", 3649 data->port_id, adapter->memzone_cnt); 3650 goto error; 3651 } 3652 adapter->memzone_cnt++; 3653 3654 memzone = rte_memzone_reserve_aligned(z_name, size, socket_id, 3655 RTE_MEMZONE_IOVA_CONTIG, alignment); 3656 if (memzone == NULL) { 3657 PMD_DRV_LOG(ERR, "Failed to allocate ena_com memzone: %s\n", 3658 z_name); 3659 goto error; 3660 } 3661 3662 memset(memzone->addr, 0, size); 3663 *virt_addr = memzone->addr; 3664 *phys_addr = memzone->iova; 3665 3666 return memzone; 3667 3668 error: 3669 *virt_addr = NULL; 3670 *phys_addr = 0; 3671 3672 return NULL; 3673 } 3674 3675 3676 /********************************************************************* 3677 * PMD configuration 3678 *********************************************************************/ 3679 static int eth_ena_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 3680 struct rte_pci_device *pci_dev) 3681 { 3682 return rte_eth_dev_pci_generic_probe(pci_dev, 3683 sizeof(struct ena_adapter), eth_ena_dev_init); 3684 } 3685 3686 static int eth_ena_pci_remove(struct rte_pci_device *pci_dev) 3687 { 3688 return rte_eth_dev_pci_generic_remove(pci_dev, eth_ena_dev_uninit); 3689 } 3690 3691 static struct rte_pci_driver rte_ena_pmd = { 3692 .id_table = pci_id_ena_map, 3693 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | 3694 RTE_PCI_DRV_WC_ACTIVATE, 3695 .probe = eth_ena_pci_probe, 3696 .remove = eth_ena_pci_remove, 3697 }; 3698 3699 RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd); 3700 RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map); 3701 RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci"); 3702 RTE_PMD_REGISTER_PARAM_STRING(net_ena, ENA_DEVARG_LARGE_LLQ_HDR "=<0|1>"); 3703 RTE_LOG_REGISTER_SUFFIX(ena_logtype_init, init, NOTICE); 3704 RTE_LOG_REGISTER_SUFFIX(ena_logtype_driver, driver, NOTICE); 3705 #ifdef RTE_ETHDEV_DEBUG_RX 3706 RTE_LOG_REGISTER_SUFFIX(ena_logtype_rx, rx, DEBUG); 3707 #endif 3708 #ifdef RTE_ETHDEV_DEBUG_TX 3709 RTE_LOG_REGISTER_SUFFIX(ena_logtype_tx, tx, DEBUG); 3710 #endif 3711 RTE_LOG_REGISTER_SUFFIX(ena_logtype_com, com, WARNING); 3712 3713 /****************************************************************************** 3714 ******************************** AENQ Handlers ******************************* 3715 *****************************************************************************/ 3716 static void ena_update_on_link_change(void *adapter_data, 3717 struct ena_admin_aenq_entry *aenq_e) 3718 { 3719 struct rte_eth_dev *eth_dev = adapter_data; 3720 struct ena_adapter *adapter = eth_dev->data->dev_private; 3721 struct ena_admin_aenq_link_change_desc *aenq_link_desc; 3722 uint32_t status; 3723 3724 aenq_link_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e; 3725 3726 status = get_ena_admin_aenq_link_change_desc_link_status(aenq_link_desc); 3727 adapter->link_status = status; 3728 3729 ena_link_update(eth_dev, 0); 3730 rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL); 3731 } 3732 3733 static void ena_notification(void *adapter_data, 3734 struct ena_admin_aenq_entry *aenq_e) 3735 { 3736 struct rte_eth_dev *eth_dev = adapter_data; 3737 struct ena_adapter *adapter = eth_dev->data->dev_private; 3738 struct ena_admin_ena_hw_hints *hints; 3739 3740 if (aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION) 3741 PMD_DRV_LOG(WARNING, "Invalid AENQ group: %x. Expected: %x\n", 3742 aenq_e->aenq_common_desc.group, 3743 ENA_ADMIN_NOTIFICATION); 3744 3745 switch (aenq_e->aenq_common_desc.syndrome) { 3746 case ENA_ADMIN_UPDATE_HINTS: 3747 hints = (struct ena_admin_ena_hw_hints *) 3748 (&aenq_e->inline_data_w4); 3749 ena_update_hints(adapter, hints); 3750 break; 3751 default: 3752 PMD_DRV_LOG(ERR, "Invalid AENQ notification link state: %d\n", 3753 aenq_e->aenq_common_desc.syndrome); 3754 } 3755 } 3756 3757 static void ena_keep_alive(void *adapter_data, 3758 __rte_unused struct ena_admin_aenq_entry *aenq_e) 3759 { 3760 struct rte_eth_dev *eth_dev = adapter_data; 3761 struct ena_adapter *adapter = eth_dev->data->dev_private; 3762 struct ena_admin_aenq_keep_alive_desc *desc; 3763 uint64_t rx_drops; 3764 uint64_t tx_drops; 3765 3766 adapter->timestamp_wd = rte_get_timer_cycles(); 3767 3768 desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e; 3769 rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low; 3770 tx_drops = ((uint64_t)desc->tx_drops_high << 32) | desc->tx_drops_low; 3771 3772 adapter->drv_stats->rx_drops = rx_drops; 3773 adapter->dev_stats.tx_drops = tx_drops; 3774 } 3775 3776 /** 3777 * This handler will called for unknown event group or unimplemented handlers 3778 **/ 3779 static void unimplemented_aenq_handler(__rte_unused void *data, 3780 __rte_unused struct ena_admin_aenq_entry *aenq_e) 3781 { 3782 PMD_DRV_LOG(ERR, 3783 "Unknown event was received or event with unimplemented handler\n"); 3784 } 3785 3786 static struct ena_aenq_handlers aenq_handlers = { 3787 .handlers = { 3788 [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change, 3789 [ENA_ADMIN_NOTIFICATION] = ena_notification, 3790 [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive 3791 }, 3792 .unimplemented_handler = unimplemented_aenq_handler 3793 }; 3794 3795 /********************************************************************* 3796 * Multi-Process communication request handling (in primary) 3797 *********************************************************************/ 3798 static int 3799 ena_mp_primary_handle(const struct rte_mp_msg *mp_msg, const void *peer) 3800 { 3801 const struct ena_mp_body *req = 3802 (const struct ena_mp_body *)mp_msg->param; 3803 struct ena_adapter *adapter; 3804 struct ena_com_dev *ena_dev; 3805 struct ena_mp_body *rsp; 3806 struct rte_mp_msg mp_rsp; 3807 struct rte_eth_dev *dev; 3808 int res = 0; 3809 3810 rsp = (struct ena_mp_body *)&mp_rsp.param; 3811 mp_msg_init(&mp_rsp, req->type, req->port_id); 3812 3813 if (!rte_eth_dev_is_valid_port(req->port_id)) { 3814 rte_errno = ENODEV; 3815 res = -rte_errno; 3816 PMD_DRV_LOG(ERR, "Unknown port %d in request %d\n", 3817 req->port_id, req->type); 3818 goto end; 3819 } 3820 dev = &rte_eth_devices[req->port_id]; 3821 adapter = dev->data->dev_private; 3822 ena_dev = &adapter->ena_dev; 3823 3824 switch (req->type) { 3825 case ENA_MP_DEV_STATS_GET: 3826 res = ena_com_get_dev_basic_stats(ena_dev, 3827 &adapter->basic_stats); 3828 break; 3829 case ENA_MP_ENI_STATS_GET: 3830 res = ena_com_get_eni_stats(ena_dev, 3831 (struct ena_admin_eni_stats *)&adapter->eni_stats); 3832 break; 3833 case ENA_MP_MTU_SET: 3834 res = ena_com_set_dev_mtu(ena_dev, req->args.mtu); 3835 break; 3836 case ENA_MP_IND_TBL_GET: 3837 res = ena_com_indirect_table_get(ena_dev, 3838 adapter->indirect_table); 3839 break; 3840 case ENA_MP_IND_TBL_SET: 3841 res = ena_com_indirect_table_set(ena_dev); 3842 break; 3843 default: 3844 PMD_DRV_LOG(ERR, "Unknown request type %d\n", req->type); 3845 res = -EINVAL; 3846 break; 3847 } 3848 3849 end: 3850 /* Save processing result in the reply */ 3851 rsp->result = res; 3852 /* Return just IPC processing status */ 3853 return rte_mp_reply(&mp_rsp, peer); 3854 } 3855