xref: /dpdk/drivers/net/ena/base/ena_eth_com.c (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
3  * All rights reserved.
4  */
5 
6 #include "ena_eth_com.h"
7 
8 static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
9 	struct ena_com_io_cq *io_cq)
10 {
11 	struct ena_eth_io_rx_cdesc_base *cdesc;
12 	u16 expected_phase, head_masked;
13 	u16 desc_phase;
14 
15 	head_masked = io_cq->head & (io_cq->q_depth - 1);
16 	expected_phase = io_cq->phase;
17 
18 	cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
19 			+ (head_masked * io_cq->cdesc_entry_size_in_bytes));
20 
21 	desc_phase = (READ_ONCE32(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
22 			ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
23 
24 	if (desc_phase != expected_phase)
25 		return NULL;
26 
27 	/* Make sure we read the rest of the descriptor after the phase bit
28 	 * has been read
29 	 */
30 	dma_rmb();
31 
32 	return cdesc;
33 }
34 
35 static void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
36 {
37 	u16 tail_masked;
38 	u32 offset;
39 
40 	tail_masked = io_sq->tail & (io_sq->q_depth - 1);
41 
42 	offset = tail_masked * io_sq->desc_entry_size;
43 
44 	return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
45 }
46 
47 static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
48 						     u8 *bounce_buffer)
49 {
50 	struct ena_com_llq_info *llq_info = &io_sq->llq_info;
51 
52 	u16 dst_tail_mask;
53 	u32 dst_offset;
54 
55 	dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
56 	dst_offset = dst_tail_mask * llq_info->desc_list_entry_size;
57 
58 	if (is_llq_max_tx_burst_exists(io_sq)) {
59 		if (unlikely(!io_sq->entries_in_tx_burst_left)) {
60 			ena_trc_err("Error: trying to send more packets than tx burst allows\n");
61 			return ENA_COM_NO_SPACE;
62 		}
63 
64 		io_sq->entries_in_tx_burst_left--;
65 		ena_trc_dbg("decreasing entries_in_tx_burst_left of queue %d to %d\n",
66 			    io_sq->qid, io_sq->entries_in_tx_burst_left);
67 	}
68 
69 	/* Make sure everything was written into the bounce buffer before
70 	 * writing the bounce buffer to the device
71 	 */
72 	wmb();
73 
74 	/* The line is completed. Copy it to dev */
75 	ENA_MEMCPY_TO_DEVICE_64(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
76 				bounce_buffer,
77 				llq_info->desc_list_entry_size);
78 
79 	io_sq->tail++;
80 
81 	/* Switch phase bit in case of wrap around */
82 	if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
83 		io_sq->phase ^= 1;
84 
85 	return ENA_COM_OK;
86 }
87 
88 static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
89 						 u8 *header_src,
90 						 u16 header_len)
91 {
92 	struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
93 	struct ena_com_llq_info *llq_info = &io_sq->llq_info;
94 	u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf;
95 	u16 header_offset;
96 
97 	if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
98 		return 0;
99 
100 	header_offset =
101 		llq_info->descs_num_before_header * io_sq->desc_entry_size;
102 
103 	if (unlikely((header_offset + header_len) >  llq_info->desc_list_entry_size)) {
104 		ena_trc_err("trying to write header larger than llq entry can accommodate\n");
105 		return ENA_COM_FAULT;
106 	}
107 
108 	if (unlikely(!bounce_buffer)) {
109 		ena_trc_err("bounce buffer is NULL\n");
110 		return ENA_COM_FAULT;
111 	}
112 
113 	memcpy(bounce_buffer + header_offset, header_src, header_len);
114 
115 	return 0;
116 }
117 
118 static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
119 {
120 	struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
121 	u8 *bounce_buffer;
122 	void *sq_desc;
123 
124 	bounce_buffer = pkt_ctrl->curr_bounce_buf;
125 
126 	if (unlikely(!bounce_buffer)) {
127 		ena_trc_err("bounce buffer is NULL\n");
128 		return NULL;
129 	}
130 
131 	sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size;
132 	pkt_ctrl->idx++;
133 	pkt_ctrl->descs_left_in_line--;
134 
135 	return sq_desc;
136 }
137 
138 static int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
139 {
140 	struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
141 	struct ena_com_llq_info *llq_info = &io_sq->llq_info;
142 	int rc;
143 
144 	if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
145 		return ENA_COM_OK;
146 
147 	/* bounce buffer was used, so write it and get a new one */
148 	if (pkt_ctrl->idx) {
149 		rc = ena_com_write_bounce_buffer_to_dev(io_sq,
150 							pkt_ctrl->curr_bounce_buf);
151 		if (unlikely(rc)) {
152 			ena_trc_err("failed to write bounce buffer to device\n");
153 			return rc;
154 		}
155 
156 		pkt_ctrl->curr_bounce_buf =
157 			ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
158 		memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
159 		       0x0, llq_info->desc_list_entry_size);
160 	}
161 
162 	pkt_ctrl->idx = 0;
163 	pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header;
164 	return ENA_COM_OK;
165 }
166 
167 static void *get_sq_desc(struct ena_com_io_sq *io_sq)
168 {
169 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
170 		return get_sq_desc_llq(io_sq);
171 
172 	return get_sq_desc_regular_queue(io_sq);
173 }
174 
175 static int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
176 {
177 	struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
178 	struct ena_com_llq_info *llq_info = &io_sq->llq_info;
179 	int rc;
180 
181 	if (!pkt_ctrl->descs_left_in_line) {
182 		rc = ena_com_write_bounce_buffer_to_dev(io_sq,
183 							pkt_ctrl->curr_bounce_buf);
184 		if (unlikely(rc)) {
185 			ena_trc_err("failed to write bounce buffer to device\n");
186 			return rc;
187 		}
188 
189 		pkt_ctrl->curr_bounce_buf =
190 			ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
191 		memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
192 		       0x0, llq_info->desc_list_entry_size);
193 
194 		pkt_ctrl->idx = 0;
195 		if (unlikely(llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY))
196 			pkt_ctrl->descs_left_in_line = 1;
197 		else
198 			pkt_ctrl->descs_left_in_line =
199 			llq_info->desc_list_entry_size / io_sq->desc_entry_size;
200 	}
201 
202 	return ENA_COM_OK;
203 }
204 
205 static int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
206 {
207 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
208 		return ena_com_sq_update_llq_tail(io_sq);
209 
210 	io_sq->tail++;
211 
212 	/* Switch phase bit in case of wrap around */
213 	if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
214 		io_sq->phase ^= 1;
215 
216 	return ENA_COM_OK;
217 }
218 
219 static struct ena_eth_io_rx_cdesc_base *
220 	ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
221 {
222 	idx &= (io_cq->q_depth - 1);
223 	return (struct ena_eth_io_rx_cdesc_base *)
224 		((uintptr_t)io_cq->cdesc_addr.virt_addr +
225 		idx * io_cq->cdesc_entry_size_in_bytes);
226 }
227 
228 static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
229 					   u16 *first_cdesc_idx)
230 {
231 	struct ena_eth_io_rx_cdesc_base *cdesc;
232 	u16 count = 0, head_masked;
233 	u32 last = 0;
234 
235 	do {
236 		cdesc = ena_com_get_next_rx_cdesc(io_cq);
237 		if (!cdesc)
238 			break;
239 
240 		ena_com_cq_inc_head(io_cq);
241 		count++;
242 		last = (READ_ONCE32(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
243 			ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
244 	} while (!last);
245 
246 	if (last) {
247 		*first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx;
248 		count += io_cq->cur_rx_pkt_cdesc_count;
249 
250 		head_masked = io_cq->head & (io_cq->q_depth - 1);
251 
252 		io_cq->cur_rx_pkt_cdesc_count = 0;
253 		io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
254 
255 		ena_trc_dbg("ena q_id: %d packets were completed. first desc idx %u descs# %d\n",
256 			    io_cq->qid, *first_cdesc_idx, count);
257 	} else {
258 		io_cq->cur_rx_pkt_cdesc_count += count;
259 		count = 0;
260 	}
261 
262 	return count;
263 }
264 
265 static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
266 			       struct ena_com_tx_meta *ena_meta)
267 {
268 	struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
269 
270 	meta_desc = get_sq_desc(io_sq);
271 	if (unlikely(!meta_desc))
272 		return ENA_COM_FAULT;
273 
274 	memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
275 
276 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
277 
278 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
279 
280 	/* bits 0-9 of the mss */
281 	meta_desc->word2 |= ((u32)ena_meta->mss <<
282 		ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) &
283 		ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
284 	/* bits 10-13 of the mss */
285 	meta_desc->len_ctrl |= ((ena_meta->mss >> 10) <<
286 		ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) &
287 		ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK;
288 
289 	/* Extended meta desc */
290 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
291 	meta_desc->len_ctrl |= ((u32)io_sq->phase <<
292 		ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) &
293 		ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
294 
295 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
296 	meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
297 
298 	meta_desc->word2 |= ena_meta->l3_hdr_len &
299 		ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
300 	meta_desc->word2 |= (ena_meta->l3_hdr_offset <<
301 		ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) &
302 		ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
303 
304 	meta_desc->word2 |= ((u32)ena_meta->l4_hdr_len <<
305 		ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) &
306 		ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
307 
308 	return ena_com_sq_update_tail(io_sq);
309 }
310 
311 static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
312 						 struct ena_com_tx_ctx *ena_tx_ctx,
313 						 bool *have_meta)
314 {
315 	struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
316 
317 	/* When disable meta caching is set, don't bother to save the meta and
318 	 * compare it to the stored version, just create the meta
319 	 */
320 	if (io_sq->disable_meta_caching) {
321 		if (unlikely(!ena_tx_ctx->meta_valid))
322 			return ENA_COM_INVAL;
323 
324 		*have_meta = true;
325 		return ena_com_create_meta(io_sq, ena_meta);
326 	} else if (ena_com_meta_desc_changed(io_sq, ena_tx_ctx)) {
327 		*have_meta = true;
328 		/* Cache the meta desc */
329 		memcpy(&io_sq->cached_tx_meta, ena_meta,
330 		       sizeof(struct ena_com_tx_meta));
331 		return ena_com_create_meta(io_sq, ena_meta);
332 	} else {
333 		*have_meta = false;
334 		return ENA_COM_OK;
335 	}
336 }
337 
338 static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
339 					struct ena_eth_io_rx_cdesc_base *cdesc)
340 {
341 	ena_rx_ctx->l3_proto = cdesc->status &
342 		ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
343 	ena_rx_ctx->l4_proto =
344 		(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
345 		ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
346 	ena_rx_ctx->l3_csum_err =
347 		!!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
348 		ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT);
349 	ena_rx_ctx->l4_csum_err =
350 		!!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
351 		ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT);
352 	ena_rx_ctx->l4_csum_checked =
353 		!!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK) >>
354 		ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT);
355 	ena_rx_ctx->hash = cdesc->hash;
356 	ena_rx_ctx->frag =
357 		(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
358 		ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
359 
360 	ena_trc_dbg("ena_rx_ctx->l3_proto %d ena_rx_ctx->l4_proto %d\nena_rx_ctx->l3_csum_err %d ena_rx_ctx->l4_csum_err %d\nhash frag %d frag: %d cdesc_status: %x\n",
361 		    ena_rx_ctx->l3_proto,
362 		    ena_rx_ctx->l4_proto,
363 		    ena_rx_ctx->l3_csum_err,
364 		    ena_rx_ctx->l4_csum_err,
365 		    ena_rx_ctx->hash,
366 		    ena_rx_ctx->frag,
367 		    cdesc->status);
368 }
369 
370 /*****************************************************************************/
371 /*****************************     API      **********************************/
372 /*****************************************************************************/
373 
374 int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
375 		       struct ena_com_tx_ctx *ena_tx_ctx,
376 		       int *nb_hw_desc)
377 {
378 	struct ena_eth_io_tx_desc *desc = NULL;
379 	struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
380 	void *buffer_to_push = ena_tx_ctx->push_header;
381 	u16 header_len = ena_tx_ctx->header_len;
382 	u16 num_bufs = ena_tx_ctx->num_bufs;
383 	u16 start_tail = io_sq->tail;
384 	int i, rc;
385 	bool have_meta;
386 	u64 addr_hi;
387 
388 	ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX,
389 		 "wrong Q type");
390 
391 	/* num_bufs +1 for potential meta desc */
392 	if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) {
393 		ena_trc_dbg("Not enough space in the tx queue\n");
394 		return ENA_COM_NO_MEM;
395 	}
396 
397 	if (unlikely(header_len > io_sq->tx_max_header_size)) {
398 		ena_trc_err("header size is too large %d max header: %d\n",
399 			    header_len, io_sq->tx_max_header_size);
400 		return ENA_COM_INVAL;
401 	}
402 
403 	if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV
404 		     && !buffer_to_push)) {
405 		ena_trc_err("push header wasn't provided on LLQ mode\n");
406 		return ENA_COM_INVAL;
407 	}
408 
409 	rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len);
410 	if (unlikely(rc))
411 		return rc;
412 
413 	rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta);
414 	if (unlikely(rc)) {
415 		ena_trc_err("failed to create and store tx meta desc\n");
416 		return rc;
417 	}
418 
419 	/* If the caller doesn't want to send packets */
420 	if (unlikely(!num_bufs && !header_len)) {
421 		rc = ena_com_close_bounce_buffer(io_sq);
422 		if (rc)
423 			ena_trc_err("failed to write buffers to LLQ\n");
424 		*nb_hw_desc = io_sq->tail - start_tail;
425 		return rc;
426 	}
427 
428 	desc = get_sq_desc(io_sq);
429 	if (unlikely(!desc))
430 		return ENA_COM_FAULT;
431 	memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
432 
433 	/* Set first desc when we don't have meta descriptor */
434 	if (!have_meta)
435 		desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK;
436 
437 	desc->buff_addr_hi_hdr_sz |= ((u32)header_len <<
438 		ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) &
439 		ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
440 	desc->len_ctrl |= ((u32)io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
441 		ENA_ETH_IO_TX_DESC_PHASE_MASK;
442 
443 	desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
444 
445 	/* Bits 0-9 */
446 	desc->meta_ctrl |= ((u32)ena_tx_ctx->req_id <<
447 		ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) &
448 		ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
449 
450 	desc->meta_ctrl |= (ena_tx_ctx->df <<
451 		ENA_ETH_IO_TX_DESC_DF_SHIFT) &
452 		ENA_ETH_IO_TX_DESC_DF_MASK;
453 
454 	/* Bits 10-15 */
455 	desc->len_ctrl |= ((ena_tx_ctx->req_id >> 10) <<
456 		ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) &
457 		ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
458 
459 	if (ena_tx_ctx->meta_valid) {
460 		desc->meta_ctrl |= (ena_tx_ctx->tso_enable <<
461 			ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) &
462 			ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
463 		desc->meta_ctrl |= ena_tx_ctx->l3_proto &
464 			ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
465 		desc->meta_ctrl |= (ena_tx_ctx->l4_proto <<
466 			ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) &
467 			ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
468 		desc->meta_ctrl |= (ena_tx_ctx->l3_csum_enable <<
469 			ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) &
470 			ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
471 		desc->meta_ctrl |= (ena_tx_ctx->l4_csum_enable <<
472 			ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) &
473 			ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
474 		desc->meta_ctrl |= (ena_tx_ctx->l4_csum_partial <<
475 			ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) &
476 			ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
477 	}
478 
479 	for (i = 0; i < num_bufs; i++) {
480 		/* The first desc share the same desc as the header */
481 		if (likely(i != 0)) {
482 			rc = ena_com_sq_update_tail(io_sq);
483 			if (unlikely(rc)) {
484 				ena_trc_err("failed to update sq tail\n");
485 				return rc;
486 			}
487 
488 			desc = get_sq_desc(io_sq);
489 			if (unlikely(!desc))
490 				return ENA_COM_FAULT;
491 
492 			memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
493 
494 			desc->len_ctrl |= ((u32)io_sq->phase <<
495 				ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
496 				ENA_ETH_IO_TX_DESC_PHASE_MASK;
497 		}
498 
499 		desc->len_ctrl |= ena_bufs->len &
500 			ENA_ETH_IO_TX_DESC_LENGTH_MASK;
501 
502 		addr_hi = ((ena_bufs->paddr &
503 			GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
504 
505 		desc->buff_addr_lo = (u32)ena_bufs->paddr;
506 		desc->buff_addr_hi_hdr_sz |= addr_hi &
507 			ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
508 		ena_bufs++;
509 	}
510 
511 	/* set the last desc indicator */
512 	desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
513 
514 	rc = ena_com_sq_update_tail(io_sq);
515 	if (unlikely(rc)) {
516 		ena_trc_err("failed to update sq tail of the last descriptor\n");
517 		return rc;
518 	}
519 
520 	rc = ena_com_close_bounce_buffer(io_sq);
521 	if (rc)
522 		ena_trc_err("failed when closing bounce buffer\n");
523 
524 	*nb_hw_desc = io_sq->tail - start_tail;
525 	return rc;
526 }
527 
528 int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
529 		   struct ena_com_io_sq *io_sq,
530 		   struct ena_com_rx_ctx *ena_rx_ctx)
531 {
532 	struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0];
533 	struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
534 	u16 q_depth = io_cq->q_depth;
535 	u16 cdesc_idx = 0;
536 	u16 nb_hw_desc;
537 	u16 i = 0;
538 
539 	ENA_WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
540 		 "wrong Q type");
541 
542 	nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx);
543 	if (nb_hw_desc == 0) {
544 		ena_rx_ctx->descs = nb_hw_desc;
545 		return 0;
546 	}
547 
548 	ena_trc_dbg("fetch rx packet: queue %d completed desc: %d\n",
549 		    io_cq->qid, nb_hw_desc);
550 
551 	if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
552 		ena_trc_err("Too many RX cdescs (%d) > MAX(%d)\n",
553 			    nb_hw_desc, ena_rx_ctx->max_bufs);
554 		return ENA_COM_NO_SPACE;
555 	}
556 
557 	cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx);
558 	ena_rx_ctx->pkt_offset = cdesc->offset;
559 
560 	do {
561 		ena_buf[i].len = cdesc->length;
562 		ena_buf[i].req_id = cdesc->req_id;
563 		if (unlikely(ena_buf[i].req_id >= q_depth))
564 			return ENA_COM_EIO;
565 
566 		if (++i >= nb_hw_desc)
567 			break;
568 
569 		cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i);
570 
571 	} while (1);
572 
573 	/* Update SQ head ptr */
574 	io_sq->next_to_comp += nb_hw_desc;
575 
576 	ena_trc_dbg("[%s][QID#%d] Updating SQ head to: %d\n", __func__,
577 		    io_sq->qid, io_sq->next_to_comp);
578 
579 	/* Get rx flags from the last pkt */
580 	ena_com_rx_set_flags(ena_rx_ctx, cdesc);
581 
582 	ena_rx_ctx->descs = nb_hw_desc;
583 	return 0;
584 }
585 
586 int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
587 			       struct ena_com_buf *ena_buf,
588 			       u16 req_id)
589 {
590 	struct ena_eth_io_rx_desc *desc;
591 
592 	ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
593 		 "wrong Q type");
594 
595 	if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1)))
596 		return ENA_COM_NO_SPACE;
597 
598 	desc = get_sq_desc(io_sq);
599 	if (unlikely(!desc))
600 		return ENA_COM_FAULT;
601 
602 	memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
603 
604 	desc->length = ena_buf->len;
605 
606 	desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK |
607 		ENA_ETH_IO_RX_DESC_LAST_MASK |
608 		(io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK) |
609 		ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
610 
611 	desc->req_id = req_id;
612 
613 	desc->buff_addr_lo = (u32)ena_buf->paddr;
614 	desc->buff_addr_hi =
615 		((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
616 
617 	return ena_com_sq_update_tail(io_sq);
618 }
619 
620 bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
621 {
622 	struct ena_eth_io_rx_cdesc_base *cdesc;
623 
624 	cdesc = ena_com_get_next_rx_cdesc(io_cq);
625 	if (cdesc)
626 		return false;
627 	else
628 		return true;
629 }
630