1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates. 3 * All rights reserved. 4 */ 5 6 #ifndef ENA_ETH_COM_H_ 7 #define ENA_ETH_COM_H_ 8 9 #if defined(__cplusplus) 10 extern "C" { 11 #endif 12 #include "ena_com.h" 13 14 /* head update threshold in units of (queue size / ENA_COMP_HEAD_THRESH) */ 15 #define ENA_COMP_HEAD_THRESH 4 16 17 struct ena_com_tx_ctx { 18 struct ena_com_tx_meta ena_meta; 19 struct ena_com_buf *ena_bufs; 20 /* For LLQ, header buffer - pushed to the device mem space */ 21 void *push_header; 22 23 enum ena_eth_io_l3_proto_index l3_proto; 24 enum ena_eth_io_l4_proto_index l4_proto; 25 u16 num_bufs; 26 u16 req_id; 27 /* For regular queue, indicate the size of the header 28 * For LLQ, indicate the size of the pushed buffer 29 */ 30 u16 header_len; 31 32 u8 meta_valid; 33 u8 tso_enable; 34 u8 l3_csum_enable; 35 u8 l4_csum_enable; 36 u8 l4_csum_partial; 37 u8 df; /* Don't fragment */ 38 }; 39 40 struct ena_com_rx_ctx { 41 struct ena_com_rx_buf_info *ena_bufs; 42 enum ena_eth_io_l3_proto_index l3_proto; 43 enum ena_eth_io_l4_proto_index l4_proto; 44 bool l3_csum_err; 45 bool l4_csum_err; 46 u8 l4_csum_checked; 47 /* fragmented packet */ 48 bool frag; 49 u32 hash; 50 u16 descs; 51 int max_bufs; 52 u8 pkt_offset; 53 }; 54 55 int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, 56 struct ena_com_tx_ctx *ena_tx_ctx, 57 int *nb_hw_desc); 58 59 int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, 60 struct ena_com_io_sq *io_sq, 61 struct ena_com_rx_ctx *ena_rx_ctx); 62 63 int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, 64 struct ena_com_buf *ena_buf, 65 u16 req_id); 66 67 bool ena_com_cq_empty(struct ena_com_io_cq *io_cq); 68 69 static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq, 70 struct ena_eth_io_intr_reg *intr_reg) 71 { 72 ENA_REG_WRITE32(io_cq->bus, intr_reg->intr_control, io_cq->unmask_reg); 73 } 74 75 static inline int ena_com_free_q_entries(struct ena_com_io_sq *io_sq) 76 { 77 u16 tail, next_to_comp, cnt; 78 79 next_to_comp = io_sq->next_to_comp; 80 tail = io_sq->tail; 81 cnt = tail - next_to_comp; 82 83 return io_sq->q_depth - 1 - cnt; 84 } 85 86 /* Check if the submission queue has enough space to hold required_buffers */ 87 static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq, 88 u16 required_buffers) 89 { 90 int temp; 91 92 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 93 return ena_com_free_q_entries(io_sq) >= required_buffers; 94 95 /* This calculation doesn't need to be 100% accurate. So to reduce 96 * the calculation overhead just Subtract 2 lines from the free descs 97 * (one for the header line and one to compensate the devision 98 * down calculation. 99 */ 100 temp = required_buffers / io_sq->llq_info.descs_per_entry + 2; 101 102 return ena_com_free_q_entries(io_sq) > temp; 103 } 104 105 static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq, 106 struct ena_com_tx_ctx *ena_tx_ctx) 107 { 108 if (!ena_tx_ctx->meta_valid) 109 return false; 110 111 return !!memcmp(&io_sq->cached_tx_meta, 112 &ena_tx_ctx->ena_meta, 113 sizeof(struct ena_com_tx_meta)); 114 } 115 116 static inline bool is_llq_max_tx_burst_exists(struct ena_com_io_sq *io_sq) 117 { 118 return (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) && 119 io_sq->llq_info.max_entries_in_tx_burst > 0; 120 } 121 122 static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq, 123 struct ena_com_tx_ctx *ena_tx_ctx) 124 { 125 struct ena_com_llq_info *llq_info; 126 int descs_after_first_entry; 127 int num_entries_needed = 1; 128 u16 num_descs; 129 130 if (!is_llq_max_tx_burst_exists(io_sq)) 131 return false; 132 133 llq_info = &io_sq->llq_info; 134 num_descs = ena_tx_ctx->num_bufs; 135 136 if (llq_info->disable_meta_caching || 137 unlikely(ena_com_meta_desc_changed(io_sq, ena_tx_ctx))) 138 ++num_descs; 139 140 if (num_descs > llq_info->descs_num_before_header) { 141 descs_after_first_entry = num_descs - llq_info->descs_num_before_header; 142 num_entries_needed += DIV_ROUND_UP(descs_after_first_entry, 143 llq_info->descs_per_entry); 144 } 145 146 ena_trc_dbg("queue: %d num_descs: %d num_entries_needed: %d\n", 147 io_sq->qid, num_descs, num_entries_needed); 148 149 return num_entries_needed > io_sq->entries_in_tx_burst_left; 150 } 151 152 static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq) 153 { 154 u16 max_entries_in_tx_burst = io_sq->llq_info.max_entries_in_tx_burst; 155 u16 tail = io_sq->tail; 156 157 ena_trc_dbg("write submission queue doorbell for queue: %d tail: %d\n", 158 io_sq->qid, tail); 159 160 ENA_REG_WRITE32(io_sq->bus, tail, io_sq->db_addr); 161 162 if (is_llq_max_tx_burst_exists(io_sq)) { 163 ena_trc_dbg("reset available entries in tx burst for queue %d to %d\n", 164 io_sq->qid, max_entries_in_tx_burst); 165 io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst; 166 } 167 168 return 0; 169 } 170 171 static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq) 172 { 173 u16 unreported_comp, head; 174 bool need_update; 175 176 if (unlikely(io_cq->cq_head_db_reg)) { 177 head = io_cq->head; 178 unreported_comp = head - io_cq->last_head_update; 179 need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH); 180 181 if (unlikely(need_update)) { 182 ena_trc_dbg("Write completion queue doorbell for queue %d: head: %d\n", 183 io_cq->qid, head); 184 ENA_REG_WRITE32(io_cq->bus, head, io_cq->cq_head_db_reg); 185 io_cq->last_head_update = head; 186 } 187 } 188 189 return 0; 190 } 191 192 static inline void ena_com_update_numa_node(struct ena_com_io_cq *io_cq, 193 u8 numa_node) 194 { 195 struct ena_eth_io_numa_node_cfg_reg numa_cfg; 196 197 if (!io_cq->numa_node_cfg_reg) 198 return; 199 200 numa_cfg.numa_cfg = (numa_node & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK) 201 | ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK; 202 203 ENA_REG_WRITE32(io_cq->bus, numa_cfg.numa_cfg, io_cq->numa_node_cfg_reg); 204 } 205 206 static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem) 207 { 208 io_sq->next_to_comp += elem; 209 } 210 211 static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq) 212 { 213 io_cq->head++; 214 215 /* Switch phase bit in case of wrap around */ 216 if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0)) 217 io_cq->phase ^= 1; 218 } 219 220 static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, 221 u16 *req_id) 222 { 223 u8 expected_phase, cdesc_phase; 224 struct ena_eth_io_tx_cdesc *cdesc; 225 u16 masked_head; 226 227 masked_head = io_cq->head & (io_cq->q_depth - 1); 228 expected_phase = io_cq->phase; 229 230 cdesc = (struct ena_eth_io_tx_cdesc *) 231 ((uintptr_t)io_cq->cdesc_addr.virt_addr + 232 (masked_head * io_cq->cdesc_entry_size_in_bytes)); 233 234 /* When the current completion descriptor phase isn't the same as the 235 * expected, it mean that the device still didn't update 236 * this completion. 237 */ 238 cdesc_phase = READ_ONCE16(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK; 239 if (cdesc_phase != expected_phase) 240 return ENA_COM_TRY_AGAIN; 241 242 dma_rmb(); 243 244 *req_id = READ_ONCE16(cdesc->req_id); 245 if (unlikely(*req_id >= io_cq->q_depth)) { 246 ena_trc_err("Invalid req id %d\n", cdesc->req_id); 247 return ENA_COM_INVAL; 248 } 249 250 ena_com_cq_inc_head(io_cq); 251 252 return 0; 253 } 254 255 #if defined(__cplusplus) 256 } 257 #endif 258 #endif /* ENA_ETH_COM_H_ */ 259