1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright (c) Amazon.com, Inc. or its affiliates.
3 * All rights reserved.
4 */
5
6 #include "ena_eth_com.h"
7
ena_com_get_next_rx_cdesc(struct ena_com_io_cq * io_cq)8 struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(struct ena_com_io_cq *io_cq)
9 {
10 struct ena_eth_io_rx_cdesc_base *cdesc;
11 u16 expected_phase, head_masked;
12 u16 desc_phase;
13
14 head_masked = io_cq->head & (io_cq->q_depth - 1);
15 expected_phase = io_cq->phase;
16
17 cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
18 + (head_masked * io_cq->cdesc_entry_size_in_bytes));
19
20 desc_phase = ENA_FIELD_GET(READ_ONCE32(cdesc->status),
21 ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK,
22 ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT);
23
24 if (desc_phase != expected_phase)
25 return NULL;
26
27 /* Make sure we read the rest of the descriptor after the phase bit
28 * has been read
29 */
30 dma_rmb();
31
32 return cdesc;
33 }
34
ena_com_dump_single_rx_cdesc(struct ena_com_io_cq * io_cq,struct ena_eth_io_rx_cdesc_base * desc)35 void ena_com_dump_single_rx_cdesc(struct ena_com_io_cq *io_cq,
36 struct ena_eth_io_rx_cdesc_base *desc)
37 {
38 if (desc) {
39 uint32_t *desc_arr = (uint32_t *)desc;
40
41 ena_trc_err(ena_com_io_cq_to_ena_dev(io_cq),
42 "RX descriptor value[0x%08x 0x%08x 0x%08x 0x%08x] phase[%u] first[%u] last[%u] MBZ7[%u] MZB17[%u]\n",
43 desc_arr[0], desc_arr[1], desc_arr[2], desc_arr[3],
44 ENA_FIELD_GET(desc->status, (uint32_t)ENA_ETH_IO_RX_DESC_PHASE_MASK,
45 0),
46 ENA_FIELD_GET(desc->status, (uint32_t)ENA_ETH_IO_RX_DESC_FIRST_MASK,
47 ENA_ETH_IO_RX_DESC_FIRST_SHIFT),
48 ENA_FIELD_GET(desc->status, (uint32_t)ENA_ETH_IO_RX_DESC_LAST_MASK,
49 ENA_ETH_IO_RX_DESC_LAST_SHIFT),
50 ENA_FIELD_GET(desc->status,
51 (uint32_t)ENA_ETH_IO_RX_CDESC_BASE_MBZ7_MASK,
52 ENA_ETH_IO_RX_CDESC_BASE_MBZ7_SHIFT),
53 ENA_FIELD_GET(desc->status,
54 (uint32_t)ENA_ETH_IO_RX_CDESC_BASE_MBZ17_MASK,
55 ENA_ETH_IO_RX_CDESC_BASE_MBZ17_SHIFT));
56 }
57 }
58
ena_com_dump_single_tx_cdesc(struct ena_com_io_cq * io_cq,struct ena_eth_io_tx_cdesc * desc)59 void ena_com_dump_single_tx_cdesc(struct ena_com_io_cq *io_cq,
60 struct ena_eth_io_tx_cdesc *desc)
61 {
62 if (desc) {
63 uint32_t *desc_arr = (uint32_t *)desc;
64
65 ena_trc_err(ena_com_io_cq_to_ena_dev(io_cq),
66 "TX descriptor value[0x%08x 0x%08x] phase[%u] MBZ6[%u]\n",
67 desc_arr[0], desc_arr[1],
68 ENA_FIELD_GET(desc->flags, (uint32_t)ENA_ETH_IO_TX_CDESC_PHASE_MASK,
69 0),
70 ENA_FIELD_GET(desc->flags, (uint32_t)ENA_ETH_IO_TX_CDESC_MBZ6_MASK,
71 ENA_ETH_IO_TX_CDESC_MBZ6_SHIFT));
72 }
73 }
74
ena_com_tx_cdesc_idx_to_ptr(struct ena_com_io_cq * io_cq,u16 idx)75 struct ena_eth_io_tx_cdesc *ena_com_tx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
76 {
77 idx &= (io_cq->q_depth - 1);
78
79 return (struct ena_eth_io_tx_cdesc *)
80 ((uintptr_t)io_cq->cdesc_addr.virt_addr +
81 idx * io_cq->cdesc_entry_size_in_bytes);
82 }
83
get_sq_desc_regular_queue(struct ena_com_io_sq * io_sq)84 static void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
85 {
86 u16 tail_masked;
87 u32 offset;
88
89 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
90
91 offset = tail_masked * io_sq->desc_entry_size;
92
93 return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
94 }
95
ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq * io_sq,u8 * bounce_buffer)96 static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
97 u8 *bounce_buffer)
98 {
99 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
100
101 u16 dst_tail_mask;
102 u32 dst_offset;
103
104 dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
105 dst_offset = dst_tail_mask * llq_info->desc_list_entry_size;
106
107 if (is_llq_max_tx_burst_exists(io_sq)) {
108 if (unlikely(!io_sq->entries_in_tx_burst_left)) {
109 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
110 "Error: trying to send more packets than tx burst allows\n");
111 return ENA_COM_NO_SPACE;
112 }
113
114 io_sq->entries_in_tx_burst_left--;
115 ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
116 "Decreasing entries_in_tx_burst_left of queue %u to %u\n",
117 io_sq->qid, io_sq->entries_in_tx_burst_left);
118 }
119
120 /* Make sure everything was written into the bounce buffer before
121 * writing the bounce buffer to the device
122 */
123 wmb();
124
125 /* The line is completed. Copy it to dev */
126 ENA_MEMCPY_TO_DEVICE_64(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
127 bounce_buffer,
128 llq_info->desc_list_entry_size);
129
130 io_sq->tail++;
131
132 /* Switch phase bit in case of wrap around */
133 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
134 io_sq->phase ^= 1;
135
136 return ENA_COM_OK;
137 }
138
ena_com_write_header_to_bounce(struct ena_com_io_sq * io_sq,u8 * header_src,u16 header_len)139 static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
140 u8 *header_src,
141 u16 header_len)
142 {
143 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
144 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
145 u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf;
146 u16 header_offset;
147
148 if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
149 return 0;
150
151 header_offset =
152 llq_info->descs_num_before_header * io_sq->desc_entry_size;
153
154 if (unlikely((header_offset + header_len) > llq_info->desc_list_entry_size)) {
155 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
156 "Trying to write header larger than llq entry can accommodate\n");
157 return ENA_COM_FAULT;
158 }
159
160 if (unlikely(!bounce_buffer)) {
161 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
162 "Bounce buffer is NULL\n");
163 return ENA_COM_FAULT;
164 }
165
166 memcpy(bounce_buffer + header_offset, header_src, header_len);
167
168 return 0;
169 }
170
get_sq_desc_llq(struct ena_com_io_sq * io_sq)171 static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
172 {
173 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
174 u8 *bounce_buffer;
175 void *sq_desc;
176
177 bounce_buffer = pkt_ctrl->curr_bounce_buf;
178
179 if (unlikely(!bounce_buffer)) {
180 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
181 "Bounce buffer is NULL\n");
182 return NULL;
183 }
184
185 sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size;
186 pkt_ctrl->idx++;
187 pkt_ctrl->descs_left_in_line--;
188
189 return sq_desc;
190 }
191
ena_com_close_bounce_buffer(struct ena_com_io_sq * io_sq)192 static int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
193 {
194 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
195 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
196 int rc;
197
198 if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
199 return ENA_COM_OK;
200
201 /* bounce buffer was used, so write it and get a new one */
202 if (likely(pkt_ctrl->idx)) {
203 rc = ena_com_write_bounce_buffer_to_dev(io_sq,
204 pkt_ctrl->curr_bounce_buf);
205 if (unlikely(rc)) {
206 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
207 "Failed to write bounce buffer to device\n");
208 return rc;
209 }
210
211 pkt_ctrl->curr_bounce_buf =
212 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
213 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
214 0x0, llq_info->desc_list_entry_size);
215 }
216
217 pkt_ctrl->idx = 0;
218 pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header;
219 return ENA_COM_OK;
220 }
221
get_sq_desc(struct ena_com_io_sq * io_sq)222 static void *get_sq_desc(struct ena_com_io_sq *io_sq)
223 {
224 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
225 return get_sq_desc_llq(io_sq);
226
227 return get_sq_desc_regular_queue(io_sq);
228 }
229
ena_com_sq_update_llq_tail(struct ena_com_io_sq * io_sq)230 static int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
231 {
232 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
233 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
234 int rc;
235
236 if (!pkt_ctrl->descs_left_in_line) {
237 rc = ena_com_write_bounce_buffer_to_dev(io_sq,
238 pkt_ctrl->curr_bounce_buf);
239 if (unlikely(rc)) {
240 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
241 "Failed to write bounce buffer to device\n");
242 return rc;
243 }
244
245 pkt_ctrl->curr_bounce_buf =
246 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
247 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
248 0x0, llq_info->desc_list_entry_size);
249
250 pkt_ctrl->idx = 0;
251 if (unlikely(llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY))
252 pkt_ctrl->descs_left_in_line = 1;
253 else
254 pkt_ctrl->descs_left_in_line =
255 llq_info->desc_list_entry_size / io_sq->desc_entry_size;
256 }
257
258 return ENA_COM_OK;
259 }
260
ena_com_sq_update_reqular_queue_tail(struct ena_com_io_sq * io_sq)261 static int ena_com_sq_update_reqular_queue_tail(struct ena_com_io_sq *io_sq)
262 {
263 io_sq->tail++;
264
265 /* Switch phase bit in case of wrap around */
266 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
267 io_sq->phase ^= 1;
268
269 return ENA_COM_OK;
270 }
271
ena_com_sq_update_tail(struct ena_com_io_sq * io_sq)272 static int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
273 {
274 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
275 return ena_com_sq_update_llq_tail(io_sq);
276
277 return ena_com_sq_update_reqular_queue_tail(io_sq);
278 }
279
280 struct ena_eth_io_rx_cdesc_base *
ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq * io_cq,u16 idx)281 ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
282 {
283 idx &= (io_cq->q_depth - 1);
284 return (struct ena_eth_io_rx_cdesc_base *)
285 ((uintptr_t)io_cq->cdesc_addr.virt_addr +
286 idx * io_cq->cdesc_entry_size_in_bytes);
287 }
288
ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq * io_cq,u16 * first_cdesc_idx,u16 * num_descs)289 static int ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
290 u16 *first_cdesc_idx,
291 u16 *num_descs)
292 {
293 struct ena_com_dev *dev = ena_com_io_cq_to_ena_dev(io_cq);
294 u16 count = io_cq->cur_rx_pkt_cdesc_count, head_masked;
295 struct ena_eth_io_rx_cdesc_base *cdesc;
296 u32 last = 0;
297
298 do {
299 u32 status;
300
301 cdesc = ena_com_get_next_rx_cdesc(io_cq);
302 if (!cdesc)
303 break;
304 status = READ_ONCE32(cdesc->status);
305
306 if (unlikely(ENA_FIELD_GET(status,
307 ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK,
308 ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT) &&
309 count != 0)) {
310 ena_trc_err(dev,
311 "First bit is on in descriptor #%u on q_id: %u, req_id: %u\n",
312 count, io_cq->qid, cdesc->req_id);
313 return ENA_COM_FAULT;
314 }
315
316 if (unlikely((status & (ENA_ETH_IO_RX_CDESC_BASE_MBZ7_MASK |
317 ENA_ETH_IO_RX_CDESC_BASE_MBZ17_MASK)) &&
318 ena_com_get_cap(dev, ENA_ADMIN_CDESC_MBZ))) {
319 ena_trc_err(dev,
320 "Corrupted RX descriptor #%u on q_id: %u, req_id: %u\n",
321 count, io_cq->qid, cdesc->req_id);
322 return ENA_COM_FAULT;
323 }
324
325 ena_com_cq_inc_head(io_cq);
326 count++;
327 last = ENA_FIELD_GET(status,
328 ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK,
329 ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT);
330 } while (!last);
331
332 if (last) {
333 *first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx;
334
335 head_masked = io_cq->head & (io_cq->q_depth - 1);
336
337 *num_descs = count;
338 io_cq->cur_rx_pkt_cdesc_count = 0;
339 io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
340
341 ena_trc_dbg(ena_com_io_cq_to_ena_dev(io_cq),
342 "ENA q_id: %u packets were completed. first desc idx %u descs# %u\n",
343 io_cq->qid, *first_cdesc_idx, count);
344 } else {
345 io_cq->cur_rx_pkt_cdesc_count = count;
346 *num_descs = 0;
347 }
348
349 return ENA_COM_OK;
350 }
351
ena_com_create_meta(struct ena_com_io_sq * io_sq,struct ena_com_tx_meta * ena_meta)352 static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
353 struct ena_com_tx_meta *ena_meta)
354 {
355 struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
356
357 meta_desc = get_sq_desc(io_sq);
358 if (unlikely(!meta_desc))
359 return ENA_COM_FAULT;
360
361 memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
362
363 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
364
365 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
366
367 /* bits 0-9 of the mss */
368 meta_desc->word2 |=
369 ENA_FIELD_PREP((u32)ena_meta->mss,
370 ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK,
371 ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT);
372 /* bits 10-13 of the mss */
373 meta_desc->len_ctrl |=
374 ENA_FIELD_PREP((ena_meta->mss >> 10),
375 ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK,
376 ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT);
377
378 /* Extended meta desc */
379 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
380 meta_desc->len_ctrl |=
381 ENA_FIELD_PREP((u32)io_sq->phase,
382 ENA_ETH_IO_TX_META_DESC_PHASE_MASK,
383 ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT);
384
385 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
386 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
387
388 meta_desc->word2 |= ena_meta->l3_hdr_len &
389 ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
390 meta_desc->word2 |=
391 ENA_FIELD_PREP(ena_meta->l3_hdr_offset,
392 ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK,
393 ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT);
394
395 meta_desc->word2 |=
396 ENA_FIELD_PREP((u32)ena_meta->l4_hdr_len,
397 ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK,
398 ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT);
399
400 return ena_com_sq_update_tail(io_sq);
401 }
402
ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq * io_sq,struct ena_com_tx_ctx * ena_tx_ctx,bool * have_meta)403 static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
404 struct ena_com_tx_ctx *ena_tx_ctx,
405 bool *have_meta)
406 {
407 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
408
409 /* When disable meta caching is set, don't bother to save the meta and
410 * compare it to the stored version, just create the meta
411 */
412 if (io_sq->disable_meta_caching) {
413 *have_meta = true;
414 return ena_com_create_meta(io_sq, ena_meta);
415 }
416
417 if (ena_com_meta_desc_changed(io_sq, ena_tx_ctx)) {
418 *have_meta = true;
419 /* Cache the meta desc */
420 memcpy(&io_sq->cached_tx_meta, ena_meta,
421 sizeof(struct ena_com_tx_meta));
422 return ena_com_create_meta(io_sq, ena_meta);
423 }
424
425 *have_meta = false;
426 return ENA_COM_OK;
427 }
428
ena_com_rx_set_flags(struct ena_com_rx_ctx * ena_rx_ctx,struct ena_eth_io_rx_cdesc_base * cdesc)429 static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
430 struct ena_eth_io_rx_cdesc_base *cdesc)
431 {
432 ena_rx_ctx->l3_proto = cdesc->status &
433 ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
434 ena_rx_ctx->l4_proto =
435 ENA_FIELD_GET(cdesc->status,
436 ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK,
437 ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT);
438 ena_rx_ctx->l3_csum_err =
439 !!(ENA_FIELD_GET(cdesc->status,
440 ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK,
441 ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT));
442 ena_rx_ctx->l4_csum_err =
443 !!(ENA_FIELD_GET(cdesc->status,
444 ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK,
445 ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT));
446 ena_rx_ctx->l4_csum_checked =
447 !!(ENA_FIELD_GET(cdesc->status,
448 ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK,
449 ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT));
450 ena_rx_ctx->hash = cdesc->hash;
451 ena_rx_ctx->frag =
452 ENA_FIELD_GET(cdesc->status,
453 ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK,
454 ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT);
455 }
456
457 /*****************************************************************************/
458 /***************************** API **********************************/
459 /*****************************************************************************/
460
ena_com_prepare_tx(struct ena_com_io_sq * io_sq,struct ena_com_tx_ctx * ena_tx_ctx,int * nb_hw_desc)461 int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
462 struct ena_com_tx_ctx *ena_tx_ctx,
463 int *nb_hw_desc)
464 {
465 struct ena_eth_io_tx_desc *desc = NULL;
466 struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
467 void *buffer_to_push = ena_tx_ctx->push_header;
468 u16 header_len = ena_tx_ctx->header_len;
469 u16 num_bufs = ena_tx_ctx->num_bufs;
470 u16 start_tail = io_sq->tail;
471 int i, rc;
472 bool have_meta;
473 u64 addr_hi;
474
475 ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX,
476 ena_com_io_sq_to_ena_dev(io_sq), "wrong Q type");
477
478 /* num_bufs +1 for potential meta desc */
479 if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) {
480 ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
481 "Not enough space in the tx queue\n");
482 return ENA_COM_NO_MEM;
483 }
484
485 if (unlikely(header_len > io_sq->tx_max_header_size)) {
486 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
487 "Header size is too large %u max header: %u\n",
488 header_len, io_sq->tx_max_header_size);
489 return ENA_COM_INVAL;
490 }
491
492 if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV
493 && !buffer_to_push)) {
494 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
495 "Push header wasn't provided in LLQ mode\n");
496 return ENA_COM_INVAL;
497 }
498
499 rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len);
500 if (unlikely(rc))
501 return rc;
502
503 rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta);
504 if (unlikely(rc)) {
505 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
506 "Failed to create and store tx meta desc\n");
507 return rc;
508 }
509
510 /* If the caller doesn't want to send packets */
511 if (unlikely(!num_bufs && !header_len)) {
512 rc = ena_com_close_bounce_buffer(io_sq);
513 if (unlikely(rc))
514 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
515 "Failed to write buffers to LLQ\n");
516 *nb_hw_desc = io_sq->tail - start_tail;
517 return rc;
518 }
519
520 desc = get_sq_desc(io_sq);
521 if (unlikely(!desc))
522 return ENA_COM_FAULT;
523 memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
524
525 /* Set first desc when we don't have meta descriptor */
526 if (!have_meta)
527 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK;
528
529 desc->buff_addr_hi_hdr_sz |= ENA_FIELD_PREP((u32)header_len,
530 ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK,
531 ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT);
532
533 desc->len_ctrl |= ENA_FIELD_PREP((u32)io_sq->phase,
534 ENA_ETH_IO_TX_DESC_PHASE_MASK,
535 ENA_ETH_IO_TX_DESC_PHASE_SHIFT);
536
537 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
538
539 /* Bits 0-9 */
540 desc->meta_ctrl |= ENA_FIELD_PREP((u32)ena_tx_ctx->req_id,
541 ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK,
542 ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT);
543
544 desc->meta_ctrl |= ENA_FIELD_PREP(ena_tx_ctx->df,
545 ENA_ETH_IO_TX_DESC_DF_MASK,
546 ENA_ETH_IO_TX_DESC_DF_SHIFT);
547
548 /* Bits 10-15 */
549 desc->len_ctrl |= ENA_FIELD_PREP((ena_tx_ctx->req_id >> 10),
550 ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK,
551 ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT);
552
553 if (ena_tx_ctx->meta_valid) {
554 desc->meta_ctrl |= ENA_FIELD_PREP(ena_tx_ctx->tso_enable,
555 ENA_ETH_IO_TX_DESC_TSO_EN_MASK,
556 ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT);
557 desc->meta_ctrl |= ena_tx_ctx->l3_proto &
558 ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
559 desc->meta_ctrl |= ENA_FIELD_PREP(ena_tx_ctx->l4_proto,
560 ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK,
561 ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT);
562 desc->meta_ctrl |= ENA_FIELD_PREP(ena_tx_ctx->l3_csum_enable,
563 ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK,
564 ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT);
565 desc->meta_ctrl |= ENA_FIELD_PREP(ena_tx_ctx->l4_csum_enable,
566 ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK,
567 ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT);
568 desc->meta_ctrl |= ENA_FIELD_PREP(ena_tx_ctx->l4_csum_partial,
569 ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK,
570 ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT);
571 }
572
573 for (i = 0; i < num_bufs; i++) {
574 /* The first desc share the same desc as the header */
575 if (likely(i != 0)) {
576 rc = ena_com_sq_update_tail(io_sq);
577 if (unlikely(rc)) {
578 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
579 "Failed to update sq tail\n");
580 return rc;
581 }
582
583 desc = get_sq_desc(io_sq);
584 if (unlikely(!desc))
585 return ENA_COM_FAULT;
586
587 memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
588
589 desc->len_ctrl |= ENA_FIELD_PREP((u32)io_sq->phase,
590 ENA_ETH_IO_TX_DESC_PHASE_MASK,
591 ENA_ETH_IO_TX_DESC_PHASE_SHIFT);
592 }
593
594 desc->len_ctrl |= ena_bufs->len &
595 ENA_ETH_IO_TX_DESC_LENGTH_MASK;
596
597 addr_hi = ((ena_bufs->paddr &
598 GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
599
600 desc->buff_addr_lo = (u32)ena_bufs->paddr;
601 desc->buff_addr_hi_hdr_sz |= addr_hi &
602 ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
603 ena_bufs++;
604 }
605
606 /* set the last desc indicator */
607 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
608
609 rc = ena_com_sq_update_tail(io_sq);
610 if (unlikely(rc)) {
611 ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
612 "Failed to update sq tail of the last descriptor\n");
613 return rc;
614 }
615
616 rc = ena_com_close_bounce_buffer(io_sq);
617
618 *nb_hw_desc = io_sq->tail - start_tail;
619 return rc;
620 }
621
ena_com_rx_pkt(struct ena_com_io_cq * io_cq,struct ena_com_io_sq * io_sq,struct ena_com_rx_ctx * ena_rx_ctx)622 int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
623 struct ena_com_io_sq *io_sq,
624 struct ena_com_rx_ctx *ena_rx_ctx)
625 {
626 struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0];
627 struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
628 u16 q_depth = io_cq->q_depth;
629 u16 cdesc_idx = 0;
630 u16 nb_hw_desc;
631 u16 i = 0;
632 int rc;
633
634 ENA_WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
635 ena_com_io_cq_to_ena_dev(io_cq), "wrong Q type");
636
637 rc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx, &nb_hw_desc);
638 if (unlikely(rc != ENA_COM_OK))
639 return ENA_COM_FAULT;
640
641 if (nb_hw_desc == 0) {
642 ena_rx_ctx->descs = nb_hw_desc;
643 return 0;
644 }
645
646 ena_trc_dbg(ena_com_io_cq_to_ena_dev(io_cq),
647 "Fetch rx packet: queue %u completed desc: %u\n",
648 io_cq->qid, nb_hw_desc);
649
650 if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
651 ena_trc_err(ena_com_io_cq_to_ena_dev(io_cq),
652 "Too many RX cdescs (%u) > MAX(%u)\n",
653 nb_hw_desc, ena_rx_ctx->max_bufs);
654 return ENA_COM_NO_SPACE;
655 }
656
657 cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx);
658 ena_rx_ctx->pkt_offset = cdesc->offset;
659
660 do {
661 ena_buf[i].len = cdesc->length;
662 ena_buf[i].req_id = cdesc->req_id;
663 if (unlikely(ena_buf[i].req_id >= q_depth))
664 return ENA_COM_EIO;
665
666 if (++i >= nb_hw_desc)
667 break;
668
669 cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i);
670
671 } while (1);
672
673 /* Update SQ head ptr */
674 io_sq->next_to_comp += nb_hw_desc;
675
676 ena_trc_dbg(ena_com_io_cq_to_ena_dev(io_cq),
677 "Updating Queue %u, SQ head to: %u\n",
678 io_sq->qid, io_sq->next_to_comp);
679
680 /* Get rx flags from the last pkt */
681 ena_com_rx_set_flags(ena_rx_ctx, cdesc);
682
683 ena_trc_dbg(ena_com_io_cq_to_ena_dev(io_cq),
684 "l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n",
685 ena_rx_ctx->l3_proto,
686 ena_rx_ctx->l4_proto,
687 ena_rx_ctx->l3_csum_err,
688 ena_rx_ctx->l4_csum_err,
689 ena_rx_ctx->hash,
690 ena_rx_ctx->frag,
691 cdesc->status);
692
693 ena_rx_ctx->descs = nb_hw_desc;
694
695 return 0;
696 }
697
ena_com_add_single_rx_desc(struct ena_com_io_sq * io_sq,struct ena_com_buf * ena_buf,u16 req_id)698 int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
699 struct ena_com_buf *ena_buf,
700 u16 req_id)
701 {
702 struct ena_eth_io_rx_desc *desc;
703
704 ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
705 ena_com_io_sq_to_ena_dev(io_sq), "wrong Q type");
706
707 if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1)))
708 return ENA_COM_NO_SPACE;
709
710 /* virt_addr allocation success is checked before calling this function */
711 desc = get_sq_desc_regular_queue(io_sq);
712
713 memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
714
715 desc->length = ena_buf->len;
716
717 desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK |
718 ENA_ETH_IO_RX_DESC_LAST_MASK |
719 ENA_ETH_IO_RX_DESC_COMP_REQ_MASK |
720 ENA_FIELD_GET(io_sq->phase,
721 ENA_ETH_IO_RX_DESC_PHASE_MASK,
722 ENA_ZERO_SHIFT);
723
724 desc->req_id = req_id;
725
726 ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
727 "Adding single RX desc, Queue: %u, req_id: %u\n",
728 io_sq->qid, req_id);
729
730 desc->buff_addr_lo = (u32)ena_buf->paddr;
731 desc->buff_addr_hi =
732 ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
733
734 return ena_com_sq_update_reqular_queue_tail(io_sq);
735 }
736
ena_com_cq_empty(struct ena_com_io_cq * io_cq)737 bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
738 {
739 struct ena_eth_io_rx_cdesc_base *cdesc;
740
741 cdesc = ena_com_get_next_rx_cdesc(io_cq);
742 if (cdesc)
743 return false;
744 else
745 return true;
746 }
747