1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright (c) Amazon.com, Inc. or its affiliates.
3 * All rights reserved.
4 */
5
6 #ifndef ENA_ETH_COM_H_
7 #define ENA_ETH_COM_H_
8
9 #if defined(__cplusplus)
10 extern "C" {
11 #endif
12 #include "ena_com.h"
13
14 /* we allow 2 DMA descriptors per LLQ entry */
15 #define ENA_LLQ_ENTRY_DESC_CHUNK_SIZE (2 * sizeof(struct ena_eth_io_tx_desc))
16 #define ENA_LLQ_HEADER (128UL - ENA_LLQ_ENTRY_DESC_CHUNK_SIZE)
17 #define ENA_LLQ_LARGE_HEADER (256UL - ENA_LLQ_ENTRY_DESC_CHUNK_SIZE)
18
19 void ena_com_dump_single_rx_cdesc(struct ena_com_io_cq *io_cq,
20 struct ena_eth_io_rx_cdesc_base *desc);
21 void ena_com_dump_single_tx_cdesc(struct ena_com_io_cq *io_cq,
22 struct ena_eth_io_tx_cdesc *desc);
23 struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(struct ena_com_io_cq *io_cq);
24 struct ena_eth_io_rx_cdesc_base *ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx);
25 struct ena_eth_io_tx_cdesc *ena_com_tx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx);
26
27 struct ena_com_tx_ctx {
28 struct ena_com_tx_meta ena_meta;
29 struct ena_com_buf *ena_bufs;
30 /* For LLQ, header buffer - pushed to the device mem space */
31 void *push_header;
32
33 enum ena_eth_io_l3_proto_index l3_proto;
34 enum ena_eth_io_l4_proto_index l4_proto;
35 u16 num_bufs;
36 u16 req_id;
37 /* For regular queue, indicate the size of the header
38 * For LLQ, indicate the size of the pushed buffer
39 */
40 u16 header_len;
41
42 u8 meta_valid;
43 u8 tso_enable;
44 u8 l3_csum_enable;
45 u8 l4_csum_enable;
46 u8 l4_csum_partial;
47 u8 df; /* Don't fragment */
48 };
49
50 struct ena_com_rx_ctx {
51 struct ena_com_rx_buf_info *ena_bufs;
52 enum ena_eth_io_l3_proto_index l3_proto;
53 enum ena_eth_io_l4_proto_index l4_proto;
54 bool l3_csum_err;
55 bool l4_csum_err;
56 u8 l4_csum_checked;
57 /* fragmented packet */
58 bool frag;
59 u32 hash;
60 u16 descs;
61 u16 max_bufs;
62 u8 pkt_offset;
63 };
64
65 int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
66 struct ena_com_tx_ctx *ena_tx_ctx,
67 int *nb_hw_desc);
68
69 int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
70 struct ena_com_io_sq *io_sq,
71 struct ena_com_rx_ctx *ena_rx_ctx);
72
73 int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
74 struct ena_com_buf *ena_buf,
75 u16 req_id);
76
77 bool ena_com_cq_empty(struct ena_com_io_cq *io_cq);
78
ena_com_unmask_intr(struct ena_com_io_cq * io_cq,struct ena_eth_io_intr_reg * intr_reg)79 static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
80 struct ena_eth_io_intr_reg *intr_reg)
81 {
82 ENA_REG_WRITE32(io_cq->bus, intr_reg->intr_control, io_cq->unmask_reg);
83 }
84
ena_com_used_q_entries(struct ena_com_io_sq * io_sq)85 static inline u16 ena_com_used_q_entries(struct ena_com_io_sq *io_sq)
86 {
87 return io_sq->tail - io_sq->next_to_comp;
88 }
89
ena_com_free_q_entries(struct ena_com_io_sq * io_sq)90 static inline int ena_com_free_q_entries(struct ena_com_io_sq *io_sq)
91 {
92 return io_sq->q_depth - 1 - ena_com_used_q_entries(io_sq);
93 }
94
95 /* Check if the submission queue has enough space to hold required_buffers */
ena_com_sq_have_enough_space(struct ena_com_io_sq * io_sq,u16 required_buffers)96 static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq,
97 u16 required_buffers)
98 {
99 int temp;
100
101 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
102 return ena_com_free_q_entries(io_sq) >= required_buffers;
103
104 /* This calculation doesn't need to be 100% accurate. So to reduce
105 * the calculation overhead just Subtract 2 lines from the free descs
106 * (one for the header line and one to compensate the devision
107 * down calculation.
108 */
109 temp = required_buffers / io_sq->llq_info.descs_per_entry + 2;
110
111 return ena_com_free_q_entries(io_sq) > temp;
112 }
113
ena_com_meta_desc_changed(struct ena_com_io_sq * io_sq,struct ena_com_tx_ctx * ena_tx_ctx)114 static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
115 struct ena_com_tx_ctx *ena_tx_ctx)
116 {
117 if (!ena_tx_ctx->meta_valid)
118 return false;
119
120 return !!memcmp(&io_sq->cached_tx_meta,
121 &ena_tx_ctx->ena_meta,
122 sizeof(struct ena_com_tx_meta));
123 }
124
is_llq_max_tx_burst_exists(struct ena_com_io_sq * io_sq)125 static inline bool is_llq_max_tx_burst_exists(struct ena_com_io_sq *io_sq)
126 {
127 return (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) &&
128 io_sq->llq_info.max_entries_in_tx_burst > 0;
129 }
130
ena_com_is_doorbell_needed(struct ena_com_io_sq * io_sq,struct ena_com_tx_ctx * ena_tx_ctx)131 static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq,
132 struct ena_com_tx_ctx *ena_tx_ctx)
133 {
134 struct ena_com_llq_info *llq_info;
135 int descs_after_first_entry;
136 int num_entries_needed = 1;
137 u16 num_descs;
138
139 if (!is_llq_max_tx_burst_exists(io_sq))
140 return false;
141
142 llq_info = &io_sq->llq_info;
143 num_descs = ena_tx_ctx->num_bufs;
144
145 if (llq_info->disable_meta_caching ||
146 unlikely(ena_com_meta_desc_changed(io_sq, ena_tx_ctx)))
147 ++num_descs;
148
149 if (num_descs > llq_info->descs_num_before_header) {
150 descs_after_first_entry = num_descs - llq_info->descs_num_before_header;
151 num_entries_needed += DIV_ROUND_UP(descs_after_first_entry,
152 llq_info->descs_per_entry);
153 }
154
155 ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
156 "Queue: %d num_descs: %d num_entries_needed: %d\n",
157 io_sq->qid, num_descs, num_entries_needed);
158
159 return num_entries_needed > io_sq->entries_in_tx_burst_left;
160 }
161
ena_com_write_sq_doorbell(struct ena_com_io_sq * io_sq)162 static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
163 {
164 u16 max_entries_in_tx_burst = io_sq->llq_info.max_entries_in_tx_burst;
165 u16 tail = io_sq->tail;
166
167 ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
168 "Write submission queue doorbell for queue: %d tail: %d\n",
169 io_sq->qid, tail);
170
171 ENA_REG_WRITE32(io_sq->bus, tail, io_sq->db_addr);
172
173 if (is_llq_max_tx_burst_exists(io_sq)) {
174 ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
175 "Reset available entries in tx burst for queue %d to %d\n",
176 io_sq->qid, max_entries_in_tx_burst);
177 io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst;
178 }
179
180 return 0;
181 }
182
ena_com_update_numa_node(struct ena_com_io_cq * io_cq,u8 numa_node)183 static inline void ena_com_update_numa_node(struct ena_com_io_cq *io_cq,
184 u8 numa_node)
185 {
186 struct ena_eth_io_numa_node_cfg_reg numa_cfg;
187
188 if (!io_cq->numa_node_cfg_reg)
189 return;
190
191 numa_cfg.numa_cfg = (ENA_FIELD_GET(numa_node,
192 ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK,
193 ENA_ZERO_SHIFT))
194 | ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK;
195
196 ENA_REG_WRITE32(io_cq->bus, numa_cfg.numa_cfg, io_cq->numa_node_cfg_reg);
197 }
198
ena_com_comp_ack(struct ena_com_io_sq * io_sq,u16 elem)199 static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem)
200 {
201 io_sq->next_to_comp += elem;
202 }
203
ena_com_cq_inc_head(struct ena_com_io_cq * io_cq)204 static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)
205 {
206 io_cq->head++;
207
208 /* Switch phase bit in case of wrap around */
209 if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
210 io_cq->phase ^= 1;
211 }
212
ena_com_tx_comp_req_id_get(struct ena_com_io_cq * io_cq,u16 * req_id)213 static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq,
214 u16 *req_id)
215 {
216 struct ena_com_dev *dev = ena_com_io_cq_to_ena_dev(io_cq);
217 u8 expected_phase, cdesc_phase;
218 struct ena_eth_io_tx_cdesc *cdesc;
219 u16 masked_head;
220 u8 flags;
221
222 masked_head = io_cq->head & (io_cq->q_depth - 1);
223 expected_phase = io_cq->phase;
224
225 cdesc = (struct ena_eth_io_tx_cdesc *)
226 ((uintptr_t)io_cq->cdesc_addr.virt_addr +
227 (masked_head * io_cq->cdesc_entry_size_in_bytes));
228
229 flags = READ_ONCE8(cdesc->flags);
230
231 /* When the current completion descriptor phase isn't the same as the
232 * expected, it mean that the device still didn't update
233 * this completion.
234 */
235 cdesc_phase = ENA_FIELD_GET(flags,
236 ENA_ETH_IO_TX_CDESC_PHASE_MASK,
237 ENA_ZERO_SHIFT);
238 if (cdesc_phase != expected_phase)
239 return ENA_COM_TRY_AGAIN;
240
241 if (unlikely((flags & ENA_ETH_IO_TX_CDESC_MBZ6_MASK) &&
242 ena_com_get_cap(dev, ENA_ADMIN_CDESC_MBZ))) {
243 ena_trc_err(dev,
244 "Corrupted TX descriptor on q_id: %d, req_id: %u\n",
245 io_cq->qid, cdesc->req_id);
246 return ENA_COM_FAULT;
247 }
248
249 dma_rmb();
250
251 *req_id = READ_ONCE16(cdesc->req_id);
252 if (unlikely(*req_id >= io_cq->q_depth)) {
253 ena_trc_err(ena_com_io_cq_to_ena_dev(io_cq),
254 "Invalid req id %d\n", cdesc->req_id);
255 return ENA_COM_INVAL;
256 }
257
258 ena_com_cq_inc_head(io_cq);
259
260 return 0;
261 }
262
263 #if defined(__cplusplus)
264 }
265 #endif
266 #endif /* ENA_ETH_COM_H_ */
267