xref: /dpdk/drivers/net/ena/base/ena_eth_com.h (revision 95eaa71c66eadb6a6924cc5241e02d3dcc58217c)
1*95eaa71cSShai Brandes /* SPDX-License-Identifier: BSD-3-Clause */
2*95eaa71cSShai Brandes /* Copyright (c) Amazon.com, Inc. or its affiliates.
399ecfbf8SJan Medala  * All rights reserved.
499ecfbf8SJan Medala  */
599ecfbf8SJan Medala 
699ecfbf8SJan Medala #ifndef ENA_ETH_COM_H_
799ecfbf8SJan Medala #define ENA_ETH_COM_H_
899ecfbf8SJan Medala 
999ecfbf8SJan Medala #if defined(__cplusplus)
1099ecfbf8SJan Medala extern "C" {
1199ecfbf8SJan Medala #endif
1299ecfbf8SJan Medala #include "ena_com.h"
1399ecfbf8SJan Medala 
14f73f53f7SShai Brandes /* we allow 2 DMA descriptors per LLQ entry */
15f73f53f7SShai Brandes #define ENA_LLQ_ENTRY_DESC_CHUNK_SIZE	(2 * sizeof(struct ena_eth_io_tx_desc))
16f73f53f7SShai Brandes #define ENA_LLQ_HEADER		(128UL - ENA_LLQ_ENTRY_DESC_CHUNK_SIZE)
17f73f53f7SShai Brandes #define ENA_LLQ_LARGE_HEADER	(256UL - ENA_LLQ_ENTRY_DESC_CHUNK_SIZE)
1899ecfbf8SJan Medala 
1911866230SShai Brandes void ena_com_dump_single_rx_cdesc(struct ena_com_io_cq *io_cq,
2011866230SShai Brandes 				  struct ena_eth_io_rx_cdesc_base *desc);
2111866230SShai Brandes void ena_com_dump_single_tx_cdesc(struct ena_com_io_cq *io_cq,
2211866230SShai Brandes 				  struct ena_eth_io_tx_cdesc *desc);
2311866230SShai Brandes struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(struct ena_com_io_cq *io_cq);
2411866230SShai Brandes struct ena_eth_io_rx_cdesc_base *ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx);
2511866230SShai Brandes struct ena_eth_io_tx_cdesc *ena_com_tx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx);
2611866230SShai Brandes 
2799ecfbf8SJan Medala struct ena_com_tx_ctx {
2899ecfbf8SJan Medala 	struct ena_com_tx_meta ena_meta;
2999ecfbf8SJan Medala 	struct ena_com_buf *ena_bufs;
3099ecfbf8SJan Medala 	/* For LLQ, header buffer - pushed to the device mem space */
3199ecfbf8SJan Medala 	void *push_header;
3299ecfbf8SJan Medala 
3399ecfbf8SJan Medala 	enum ena_eth_io_l3_proto_index l3_proto;
3499ecfbf8SJan Medala 	enum ena_eth_io_l4_proto_index l4_proto;
3599ecfbf8SJan Medala 	u16 num_bufs;
3699ecfbf8SJan Medala 	u16 req_id;
3799ecfbf8SJan Medala 	/* For regular queue, indicate the size of the header
3899ecfbf8SJan Medala 	 * For LLQ, indicate the size of the pushed buffer
3999ecfbf8SJan Medala 	 */
4099ecfbf8SJan Medala 	u16 header_len;
4199ecfbf8SJan Medala 
4299ecfbf8SJan Medala 	u8 meta_valid;
4399ecfbf8SJan Medala 	u8 tso_enable;
4499ecfbf8SJan Medala 	u8 l3_csum_enable;
4599ecfbf8SJan Medala 	u8 l4_csum_enable;
4699ecfbf8SJan Medala 	u8 l4_csum_partial;
4799ecfbf8SJan Medala 	u8 df; /* Don't fragment */
4899ecfbf8SJan Medala };
4999ecfbf8SJan Medala 
5099ecfbf8SJan Medala struct ena_com_rx_ctx {
5199ecfbf8SJan Medala 	struct ena_com_rx_buf_info *ena_bufs;
5299ecfbf8SJan Medala 	enum ena_eth_io_l3_proto_index l3_proto;
5399ecfbf8SJan Medala 	enum ena_eth_io_l4_proto_index l4_proto;
5499ecfbf8SJan Medala 	bool l3_csum_err;
5599ecfbf8SJan Medala 	bool l4_csum_err;
56b2b02edeSMichal Krawczyk 	u8 l4_csum_checked;
5799ecfbf8SJan Medala 	/* fragmented packet */
5899ecfbf8SJan Medala 	bool frag;
5999ecfbf8SJan Medala 	u32 hash;
6099ecfbf8SJan Medala 	u16 descs;
61f73f53f7SShai Brandes 	u16 max_bufs;
62b2b02edeSMichal Krawczyk 	u8 pkt_offset;
6399ecfbf8SJan Medala };
6499ecfbf8SJan Medala 
6599ecfbf8SJan Medala int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
6699ecfbf8SJan Medala 		       struct ena_com_tx_ctx *ena_tx_ctx,
6799ecfbf8SJan Medala 		       int *nb_hw_desc);
6899ecfbf8SJan Medala 
6999ecfbf8SJan Medala int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
7099ecfbf8SJan Medala 		   struct ena_com_io_sq *io_sq,
7199ecfbf8SJan Medala 		   struct ena_com_rx_ctx *ena_rx_ctx);
7299ecfbf8SJan Medala 
7399ecfbf8SJan Medala int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
7499ecfbf8SJan Medala 			       struct ena_com_buf *ena_buf,
7599ecfbf8SJan Medala 			       u16 req_id);
7699ecfbf8SJan Medala 
773adcba9aSMichal Krawczyk bool ena_com_cq_empty(struct ena_com_io_cq *io_cq);
783adcba9aSMichal Krawczyk 
ena_com_unmask_intr(struct ena_com_io_cq * io_cq,struct ena_eth_io_intr_reg * intr_reg)7999ecfbf8SJan Medala static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
8099ecfbf8SJan Medala 				       struct ena_eth_io_intr_reg *intr_reg)
8199ecfbf8SJan Medala {
823adcba9aSMichal Krawczyk 	ENA_REG_WRITE32(io_cq->bus, intr_reg->intr_control, io_cq->unmask_reg);
8399ecfbf8SJan Medala }
8499ecfbf8SJan Medala 
ena_com_used_q_entries(struct ena_com_io_sq * io_sq)85b5f2384fSShai Brandes static inline u16 ena_com_used_q_entries(struct ena_com_io_sq *io_sq)
86b5f2384fSShai Brandes {
87b5f2384fSShai Brandes 	return io_sq->tail - io_sq->next_to_comp;
88b5f2384fSShai Brandes }
89b5f2384fSShai Brandes 
ena_com_free_q_entries(struct ena_com_io_sq * io_sq)90b2b02edeSMichal Krawczyk static inline int ena_com_free_q_entries(struct ena_com_io_sq *io_sq)
9199ecfbf8SJan Medala {
92b5f2384fSShai Brandes 	return io_sq->q_depth - 1 - ena_com_used_q_entries(io_sq);
9399ecfbf8SJan Medala }
9499ecfbf8SJan Medala 
95b68309beSRafal Kozik /* Check if the submission queue has enough space to hold required_buffers */
ena_com_sq_have_enough_space(struct ena_com_io_sq * io_sq,u16 required_buffers)96b68309beSRafal Kozik static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq,
97b68309beSRafal Kozik 						u16 required_buffers)
98b68309beSRafal Kozik {
99b68309beSRafal Kozik 	int temp;
100b68309beSRafal Kozik 
101b68309beSRafal Kozik 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
102b2b02edeSMichal Krawczyk 		return ena_com_free_q_entries(io_sq) >= required_buffers;
103b68309beSRafal Kozik 
104b68309beSRafal Kozik 	/* This calculation doesn't need to be 100% accurate. So to reduce
105b68309beSRafal Kozik 	 * the calculation overhead just Subtract 2 lines from the free descs
106b68309beSRafal Kozik 	 * (one for the header line and one to compensate the devision
107b68309beSRafal Kozik 	 * down calculation.
108b68309beSRafal Kozik 	 */
109b68309beSRafal Kozik 	temp = required_buffers / io_sq->llq_info.descs_per_entry + 2;
110b68309beSRafal Kozik 
111b2b02edeSMichal Krawczyk 	return ena_com_free_q_entries(io_sq) > temp;
112b2b02edeSMichal Krawczyk }
113b2b02edeSMichal Krawczyk 
ena_com_meta_desc_changed(struct ena_com_io_sq * io_sq,struct ena_com_tx_ctx * ena_tx_ctx)114b2b02edeSMichal Krawczyk static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
115b2b02edeSMichal Krawczyk 					     struct ena_com_tx_ctx *ena_tx_ctx)
116b2b02edeSMichal Krawczyk {
117b2b02edeSMichal Krawczyk 	if (!ena_tx_ctx->meta_valid)
118b2b02edeSMichal Krawczyk 		return false;
119b2b02edeSMichal Krawczyk 
120b2b02edeSMichal Krawczyk 	return !!memcmp(&io_sq->cached_tx_meta,
121b2b02edeSMichal Krawczyk 			&ena_tx_ctx->ena_meta,
122b2b02edeSMichal Krawczyk 			sizeof(struct ena_com_tx_meta));
123b68309beSRafal Kozik }
124b68309beSRafal Kozik 
is_llq_max_tx_burst_exists(struct ena_com_io_sq * io_sq)125b68309beSRafal Kozik static inline bool is_llq_max_tx_burst_exists(struct ena_com_io_sq *io_sq)
126b68309beSRafal Kozik {
127b68309beSRafal Kozik 	return (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) &&
128b68309beSRafal Kozik 	       io_sq->llq_info.max_entries_in_tx_burst > 0;
129b68309beSRafal Kozik }
130b68309beSRafal Kozik 
ena_com_is_doorbell_needed(struct ena_com_io_sq * io_sq,struct ena_com_tx_ctx * ena_tx_ctx)131b2b02edeSMichal Krawczyk static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq,
132b2b02edeSMichal Krawczyk 					      struct ena_com_tx_ctx *ena_tx_ctx)
133b2b02edeSMichal Krawczyk {
134b2b02edeSMichal Krawczyk 	struct ena_com_llq_info *llq_info;
135b2b02edeSMichal Krawczyk 	int descs_after_first_entry;
136b2b02edeSMichal Krawczyk 	int num_entries_needed = 1;
137b2b02edeSMichal Krawczyk 	u16 num_descs;
138b2b02edeSMichal Krawczyk 
139b2b02edeSMichal Krawczyk 	if (!is_llq_max_tx_burst_exists(io_sq))
140b2b02edeSMichal Krawczyk 		return false;
141b2b02edeSMichal Krawczyk 
142b2b02edeSMichal Krawczyk 	llq_info = &io_sq->llq_info;
143b2b02edeSMichal Krawczyk 	num_descs = ena_tx_ctx->num_bufs;
144b2b02edeSMichal Krawczyk 
145bdebccafSMichal Krawczyk 	if (llq_info->disable_meta_caching ||
146bdebccafSMichal Krawczyk 	    unlikely(ena_com_meta_desc_changed(io_sq, ena_tx_ctx)))
147b2b02edeSMichal Krawczyk 		++num_descs;
148b2b02edeSMichal Krawczyk 
149b2b02edeSMichal Krawczyk 	if (num_descs > llq_info->descs_num_before_header) {
150b2b02edeSMichal Krawczyk 		descs_after_first_entry = num_descs - llq_info->descs_num_before_header;
151b2b02edeSMichal Krawczyk 		num_entries_needed += DIV_ROUND_UP(descs_after_first_entry,
152b2b02edeSMichal Krawczyk 						   llq_info->descs_per_entry);
153b2b02edeSMichal Krawczyk 	}
154b2b02edeSMichal Krawczyk 
155ac2fd8a5SMichal Krawczyk 	ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
156ac2fd8a5SMichal Krawczyk 		    "Queue: %d num_descs: %d num_entries_needed: %d\n",
157b2b02edeSMichal Krawczyk 		    io_sq->qid, num_descs, num_entries_needed);
158b2b02edeSMichal Krawczyk 
159b2b02edeSMichal Krawczyk 	return num_entries_needed > io_sq->entries_in_tx_burst_left;
160b2b02edeSMichal Krawczyk }
161b2b02edeSMichal Krawczyk 
ena_com_write_sq_doorbell(struct ena_com_io_sq * io_sq)16299ecfbf8SJan Medala static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
16399ecfbf8SJan Medala {
164b68309beSRafal Kozik 	u16 max_entries_in_tx_burst = io_sq->llq_info.max_entries_in_tx_burst;
165b2b02edeSMichal Krawczyk 	u16 tail = io_sq->tail;
16699ecfbf8SJan Medala 
167ac2fd8a5SMichal Krawczyk 	ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
168ac2fd8a5SMichal Krawczyk 		    "Write submission queue doorbell for queue: %d tail: %d\n",
16999ecfbf8SJan Medala 		    io_sq->qid, tail);
17099ecfbf8SJan Medala 
1713adcba9aSMichal Krawczyk 	ENA_REG_WRITE32(io_sq->bus, tail, io_sq->db_addr);
17299ecfbf8SJan Medala 
173b68309beSRafal Kozik 	if (is_llq_max_tx_burst_exists(io_sq)) {
174ac2fd8a5SMichal Krawczyk 		ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
175ac2fd8a5SMichal Krawczyk 			    "Reset available entries in tx burst for queue %d to %d\n",
176b68309beSRafal Kozik 			    io_sq->qid, max_entries_in_tx_burst);
177b68309beSRafal Kozik 		io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst;
178b68309beSRafal Kozik 	}
179b68309beSRafal Kozik 
18099ecfbf8SJan Medala 	return 0;
18199ecfbf8SJan Medala }
18299ecfbf8SJan Medala 
ena_com_update_numa_node(struct ena_com_io_cq * io_cq,u8 numa_node)1836dcee7cdSJan Medala static inline void ena_com_update_numa_node(struct ena_com_io_cq *io_cq,
1846dcee7cdSJan Medala 					    u8 numa_node)
1856dcee7cdSJan Medala {
1866dcee7cdSJan Medala 	struct ena_eth_io_numa_node_cfg_reg numa_cfg;
1876dcee7cdSJan Medala 
1886dcee7cdSJan Medala 	if (!io_cq->numa_node_cfg_reg)
1896dcee7cdSJan Medala 		return;
1906dcee7cdSJan Medala 
191368cbe96SShai Brandes 	numa_cfg.numa_cfg = (ENA_FIELD_GET(numa_node,
192368cbe96SShai Brandes 					   ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK,
193368cbe96SShai Brandes 					   ENA_ZERO_SHIFT))
1946dcee7cdSJan Medala 		| ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK;
1956dcee7cdSJan Medala 
1963adcba9aSMichal Krawczyk 	ENA_REG_WRITE32(io_cq->bus, numa_cfg.numa_cfg, io_cq->numa_node_cfg_reg);
1976dcee7cdSJan Medala }
1986dcee7cdSJan Medala 
ena_com_comp_ack(struct ena_com_io_sq * io_sq,u16 elem)19999ecfbf8SJan Medala static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem)
20099ecfbf8SJan Medala {
20199ecfbf8SJan Medala 	io_sq->next_to_comp += elem;
20299ecfbf8SJan Medala }
20399ecfbf8SJan Medala 
ena_com_cq_inc_head(struct ena_com_io_cq * io_cq)204b68309beSRafal Kozik static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)
205b68309beSRafal Kozik {
206b68309beSRafal Kozik 	io_cq->head++;
207b68309beSRafal Kozik 
208b68309beSRafal Kozik 	/* Switch phase bit in case of wrap around */
209b68309beSRafal Kozik 	if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
210b68309beSRafal Kozik 		io_cq->phase ^= 1;
211b68309beSRafal Kozik }
212b68309beSRafal Kozik 
ena_com_tx_comp_req_id_get(struct ena_com_io_cq * io_cq,u16 * req_id)213b2b02edeSMichal Krawczyk static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq,
214b2b02edeSMichal Krawczyk 					     u16 *req_id)
215b68309beSRafal Kozik {
216445d6d9aSShai Brandes 	struct ena_com_dev *dev = ena_com_io_cq_to_ena_dev(io_cq);
217b68309beSRafal Kozik 	u8 expected_phase, cdesc_phase;
218b68309beSRafal Kozik 	struct ena_eth_io_tx_cdesc *cdesc;
219b68309beSRafal Kozik 	u16 masked_head;
220445d6d9aSShai Brandes 	u8 flags;
221b68309beSRafal Kozik 
222b68309beSRafal Kozik 	masked_head = io_cq->head & (io_cq->q_depth - 1);
223b68309beSRafal Kozik 	expected_phase = io_cq->phase;
224b68309beSRafal Kozik 
225b68309beSRafal Kozik 	cdesc = (struct ena_eth_io_tx_cdesc *)
226b68309beSRafal Kozik 		((uintptr_t)io_cq->cdesc_addr.virt_addr +
227b68309beSRafal Kozik 		(masked_head * io_cq->cdesc_entry_size_in_bytes));
228b68309beSRafal Kozik 
229445d6d9aSShai Brandes 	flags = READ_ONCE8(cdesc->flags);
230445d6d9aSShai Brandes 
231b68309beSRafal Kozik 	/* When the current completion descriptor phase isn't the same as the
232b68309beSRafal Kozik 	 * expected, it mean that the device still didn't update
233b68309beSRafal Kozik 	 * this completion.
234b68309beSRafal Kozik 	 */
235368cbe96SShai Brandes 	cdesc_phase = ENA_FIELD_GET(flags,
236368cbe96SShai Brandes 				    ENA_ETH_IO_TX_CDESC_PHASE_MASK,
237368cbe96SShai Brandes 				    ENA_ZERO_SHIFT);
238b68309beSRafal Kozik 	if (cdesc_phase != expected_phase)
239b68309beSRafal Kozik 		return ENA_COM_TRY_AGAIN;
240b68309beSRafal Kozik 
241445d6d9aSShai Brandes 	if (unlikely((flags & ENA_ETH_IO_TX_CDESC_MBZ6_MASK) &&
242445d6d9aSShai Brandes 		      ena_com_get_cap(dev, ENA_ADMIN_CDESC_MBZ))) {
243445d6d9aSShai Brandes 		ena_trc_err(dev,
244445d6d9aSShai Brandes 			    "Corrupted TX descriptor on q_id: %d, req_id: %u\n",
245445d6d9aSShai Brandes 			    io_cq->qid, cdesc->req_id);
246445d6d9aSShai Brandes 		return ENA_COM_FAULT;
247445d6d9aSShai Brandes 	}
248445d6d9aSShai Brandes 
249b68309beSRafal Kozik 	dma_rmb();
250b68309beSRafal Kozik 
251b68309beSRafal Kozik 	*req_id = READ_ONCE16(cdesc->req_id);
252b68309beSRafal Kozik 	if (unlikely(*req_id >= io_cq->q_depth)) {
253ac2fd8a5SMichal Krawczyk 		ena_trc_err(ena_com_io_cq_to_ena_dev(io_cq),
254ac2fd8a5SMichal Krawczyk 			    "Invalid req id %d\n", cdesc->req_id);
255b68309beSRafal Kozik 		return ENA_COM_INVAL;
256b68309beSRafal Kozik 	}
257b68309beSRafal Kozik 
258b68309beSRafal Kozik 	ena_com_cq_inc_head(io_cq);
259b68309beSRafal Kozik 
260b68309beSRafal Kozik 	return 0;
261b68309beSRafal Kozik }
262b68309beSRafal Kozik 
26399ecfbf8SJan Medala #if defined(__cplusplus)
26499ecfbf8SJan Medala }
26599ecfbf8SJan Medala #endif
26699ecfbf8SJan Medala #endif /* ENA_ETH_COM_H_ */
267