1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates. 3 * All rights reserved. 4 */ 5 6 #ifndef _ENA_ETHDEV_H_ 7 #define _ENA_ETHDEV_H_ 8 9 #include <rte_cycles.h> 10 #include <rte_pci.h> 11 #include <rte_bus_pci.h> 12 #include <rte_timer.h> 13 14 #include "ena_com.h" 15 16 #define ENA_REGS_BAR 0 17 #define ENA_MEM_BAR 2 18 19 #define ENA_MAX_NUM_QUEUES 128 20 #define ENA_MIN_FRAME_LEN 64 21 #define ENA_NAME_MAX_LEN 20 22 #define ENA_PKT_MAX_BUFS 17 23 #define ENA_RX_BUF_MIN_SIZE 1400 24 #define ENA_DEFAULT_RING_SIZE 1024 25 26 #define ENA_MIN_MTU 128 27 28 #define ENA_MMIO_DISABLE_REG_READ BIT(0) 29 30 #define ENA_WD_TIMEOUT_SEC 3 31 #define ENA_DEVICE_KALIVE_TIMEOUT (ENA_WD_TIMEOUT_SEC * rte_get_timer_hz()) 32 33 /* While processing submitted and completed descriptors (rx and tx path 34 * respectively) in a loop it is desired to: 35 * - perform batch submissions while populating sumbissmion queue 36 * - avoid blocking transmission of other packets during cleanup phase 37 * Hence the utilization ratio of 1/8 of a queue size or max value if the size 38 * of the ring is very big - like 8k Rx rings. 39 */ 40 #define ENA_REFILL_THRESH_DIVIDER 8 41 #define ENA_REFILL_THRESH_PACKET 256 42 43 #define ENA_IDX_NEXT_MASKED(idx, mask) (((idx) + 1) & (mask)) 44 #define ENA_IDX_ADD_MASKED(idx, n, mask) (((idx) + (n)) & (mask)) 45 46 struct ena_adapter; 47 48 enum ena_ring_type { 49 ENA_RING_TYPE_RX = 1, 50 ENA_RING_TYPE_TX = 2, 51 }; 52 53 struct ena_tx_buffer { 54 struct rte_mbuf *mbuf; 55 unsigned int tx_descs; 56 unsigned int num_of_bufs; 57 struct ena_com_buf bufs[ENA_PKT_MAX_BUFS]; 58 }; 59 60 /* Rx buffer holds only pointer to the mbuf - may be expanded in the future */ 61 struct ena_rx_buffer { 62 struct rte_mbuf *mbuf; 63 struct ena_com_buf ena_buf; 64 }; 65 66 struct ena_calc_queue_size_ctx { 67 struct ena_com_dev_get_features_ctx *get_feat_ctx; 68 struct ena_com_dev *ena_dev; 69 u32 max_rx_queue_size; 70 u32 max_tx_queue_size; 71 u16 max_tx_sgl_size; 72 u16 max_rx_sgl_size; 73 }; 74 75 struct ena_stats_tx { 76 u64 cnt; 77 u64 bytes; 78 u64 prepare_ctx_err; 79 u64 linearize; 80 u64 linearize_failed; 81 u64 tx_poll; 82 u64 doorbells; 83 u64 bad_req_id; 84 u64 available_desc; 85 }; 86 87 struct ena_stats_rx { 88 u64 cnt; 89 u64 bytes; 90 u64 refill_partial; 91 u64 bad_csum; 92 u64 mbuf_alloc_fail; 93 u64 bad_desc_num; 94 u64 bad_req_id; 95 }; 96 97 struct ena_ring { 98 u16 next_to_use; 99 u16 next_to_clean; 100 101 enum ena_ring_type type; 102 enum ena_admin_placement_policy_type tx_mem_queue_type; 103 /* Holds the empty requests for TX/RX OOO completions */ 104 union { 105 uint16_t *empty_tx_reqs; 106 uint16_t *empty_rx_reqs; 107 }; 108 109 union { 110 struct ena_tx_buffer *tx_buffer_info; /* contex of tx packet */ 111 struct ena_rx_buffer *rx_buffer_info; /* contex of rx packet */ 112 }; 113 struct rte_mbuf **rx_refill_buffer; 114 unsigned int ring_size; /* number of tx/rx_buffer_info's entries */ 115 unsigned int size_mask; 116 117 struct ena_com_io_cq *ena_com_io_cq; 118 struct ena_com_io_sq *ena_com_io_sq; 119 120 struct ena_com_rx_buf_info ena_bufs[ENA_PKT_MAX_BUFS] 121 __rte_cache_aligned; 122 123 struct rte_mempool *mb_pool; 124 unsigned int port_id; 125 unsigned int id; 126 /* Max length PMD can push to device for LLQ */ 127 uint8_t tx_max_header_size; 128 int configured; 129 130 uint8_t *push_buf_intermediate_buf; 131 132 struct ena_adapter *adapter; 133 uint64_t offloads; 134 u16 sgl_size; 135 136 bool disable_meta_caching; 137 138 union { 139 struct ena_stats_rx rx_stats; 140 struct ena_stats_tx tx_stats; 141 }; 142 143 unsigned int numa_socket_id; 144 } __rte_cache_aligned; 145 146 enum ena_adapter_state { 147 ENA_ADAPTER_STATE_FREE = 0, 148 ENA_ADAPTER_STATE_INIT = 1, 149 ENA_ADAPTER_STATE_RUNNING = 2, 150 ENA_ADAPTER_STATE_STOPPED = 3, 151 ENA_ADAPTER_STATE_CONFIG = 4, 152 ENA_ADAPTER_STATE_CLOSED = 5, 153 }; 154 155 struct ena_driver_stats { 156 rte_atomic64_t ierrors; 157 rte_atomic64_t oerrors; 158 rte_atomic64_t rx_nombuf; 159 u64 rx_drops; 160 }; 161 162 struct ena_stats_dev { 163 u64 wd_expired; 164 u64 dev_start; 165 u64 dev_stop; 166 /* 167 * Tx drops cannot be reported as the driver statistic, because DPDK 168 * rte_eth_stats structure isn't providing appropriate field for that. 169 * As a workaround it is being published as an extended statistic. 170 */ 171 u64 tx_drops; 172 }; 173 174 struct ena_stats_eni { 175 /* 176 * The number of packets shaped due to inbound aggregate BW 177 * allowance being exceeded 178 */ 179 uint64_t bw_in_allowance_exceeded; 180 /* 181 * The number of packets shaped due to outbound aggregate BW 182 * allowance being exceeded 183 */ 184 uint64_t bw_out_allowance_exceeded; 185 /* The number of packets shaped due to PPS allowance being exceeded */ 186 uint64_t pps_allowance_exceeded; 187 /* 188 * The number of packets shaped due to connection tracking 189 * allowance being exceeded and leading to failure in establishment 190 * of new connections 191 */ 192 uint64_t conntrack_allowance_exceeded; 193 /* 194 * The number of packets shaped due to linklocal packet rate 195 * allowance being exceeded 196 */ 197 uint64_t linklocal_allowance_exceeded; 198 }; 199 200 struct ena_offloads { 201 bool tso4_supported; 202 bool tx_csum_supported; 203 bool rx_csum_supported; 204 }; 205 206 /* board specific private data structure */ 207 struct ena_adapter { 208 /* OS defined structs */ 209 struct rte_pci_device *pdev; 210 struct rte_eth_dev_data *rte_eth_dev_data; 211 struct rte_eth_dev *rte_dev; 212 213 struct ena_com_dev ena_dev __rte_cache_aligned; 214 215 /* TX */ 216 struct ena_ring tx_ring[ENA_MAX_NUM_QUEUES] __rte_cache_aligned; 217 u32 max_tx_ring_size; 218 u16 max_tx_sgl_size; 219 220 /* RX */ 221 struct ena_ring rx_ring[ENA_MAX_NUM_QUEUES] __rte_cache_aligned; 222 u32 max_rx_ring_size; 223 u16 max_rx_sgl_size; 224 225 u32 max_num_io_queues; 226 u16 max_mtu; 227 struct ena_offloads offloads; 228 229 /* The admin queue isn't protected by the lock and is used to 230 * retrieve statistics from the device. As there is no guarantee that 231 * application won't try to get statistics from multiple threads, it is 232 * safer to lock the queue to avoid admin queue failure. 233 */ 234 rte_spinlock_t admin_lock; 235 236 int id_number; 237 char name[ENA_NAME_MAX_LEN]; 238 u8 mac_addr[RTE_ETHER_ADDR_LEN]; 239 240 void *regs; 241 void *dev_mem_base; 242 243 struct ena_driver_stats *drv_stats; 244 enum ena_adapter_state state; 245 246 uint64_t tx_supported_offloads; 247 uint64_t tx_selected_offloads; 248 uint64_t rx_supported_offloads; 249 uint64_t rx_selected_offloads; 250 251 bool link_status; 252 253 enum ena_regs_reset_reason_types reset_reason; 254 255 struct rte_timer timer_wd; 256 uint64_t timestamp_wd; 257 uint64_t keep_alive_timeout; 258 259 struct ena_stats_dev dev_stats; 260 struct ena_stats_eni eni_stats; 261 262 bool trigger_reset; 263 264 bool wd_state; 265 266 bool use_large_llq_hdr; 267 }; 268 269 #endif /* _ENA_ETHDEV_H_ */ 270