xref: /dpdk/drivers/net/ntnic/dbsconfig/ntnic_dbsconfig.c (revision e77506397fc8005c5129e22e9e2d15d5876790fd)
1576e7721SDanylo Vodopianov /*
2576e7721SDanylo Vodopianov  * SPDX-License-Identifier: BSD-3-Clause
3576e7721SDanylo Vodopianov  * Copyright(c) 2023 Napatech A/S
4576e7721SDanylo Vodopianov  */
5576e7721SDanylo Vodopianov 
6f2a3bf9eSSerhii Iliushyk #include <rte_common.h>
7576e7721SDanylo Vodopianov #include <unistd.h>
8576e7721SDanylo Vodopianov 
9576e7721SDanylo Vodopianov #include "ntos_drv.h"
10f0fe222eSDanylo Vodopianov #include "nt_util.h"
11576e7721SDanylo Vodopianov #include "ntnic_virt_queue.h"
12576e7721SDanylo Vodopianov #include "ntnic_mod_reg.h"
13576e7721SDanylo Vodopianov #include "ntlog.h"
14576e7721SDanylo Vodopianov 
15e13da07fSDanylo Vodopianov #define STRUCT_ALIGNMENT (4 * 1024LU)
16576e7721SDanylo Vodopianov #define MAX_VIRT_QUEUES 128
17576e7721SDanylo Vodopianov 
18576e7721SDanylo Vodopianov #define LAST_QUEUE 127
19576e7721SDanylo Vodopianov #define DISABLE 0
20576e7721SDanylo Vodopianov #define ENABLE 1
21576e7721SDanylo Vodopianov #define RX_AM_DISABLE DISABLE
22576e7721SDanylo Vodopianov #define RX_AM_ENABLE ENABLE
23576e7721SDanylo Vodopianov #define RX_UW_DISABLE DISABLE
24576e7721SDanylo Vodopianov #define RX_UW_ENABLE ENABLE
25576e7721SDanylo Vodopianov #define RX_Q_DISABLE DISABLE
26576e7721SDanylo Vodopianov #define RX_Q_ENABLE ENABLE
27576e7721SDanylo Vodopianov #define RX_AM_POLL_SPEED 5
28576e7721SDanylo Vodopianov #define RX_UW_POLL_SPEED 9
29576e7721SDanylo Vodopianov #define INIT_QUEUE 1
30576e7721SDanylo Vodopianov 
31576e7721SDanylo Vodopianov #define TX_AM_DISABLE DISABLE
32576e7721SDanylo Vodopianov #define TX_AM_ENABLE ENABLE
33576e7721SDanylo Vodopianov #define TX_UW_DISABLE DISABLE
34576e7721SDanylo Vodopianov #define TX_UW_ENABLE ENABLE
35576e7721SDanylo Vodopianov #define TX_Q_DISABLE DISABLE
36576e7721SDanylo Vodopianov #define TX_Q_ENABLE ENABLE
37576e7721SDanylo Vodopianov #define TX_AM_POLL_SPEED 5
38576e7721SDanylo Vodopianov #define TX_UW_POLL_SPEED 8
39576e7721SDanylo Vodopianov 
40e13da07fSDanylo Vodopianov #define VIRTQ_AVAIL_F_NO_INTERRUPT 1
41e13da07fSDanylo Vodopianov 
42f0fe222eSDanylo Vodopianov #define vq_log_arg(vq, format, ...)
43f0fe222eSDanylo Vodopianov 
44f0fe222eSDanylo Vodopianov /*
45f0fe222eSDanylo Vodopianov  * Packed Ring helper macros
46f0fe222eSDanylo Vodopianov  */
47f0fe222eSDanylo Vodopianov #define PACKED(vq_type) ((vq_type) == PACKED_RING ? 1 : 0)
48f0fe222eSDanylo Vodopianov 
49f0fe222eSDanylo Vodopianov #define avail_flag(vq) ((vq)->avail_wrap_count ? VIRTQ_DESC_F_AVAIL : 0)
50f0fe222eSDanylo Vodopianov #define used_flag_inv(vq) ((vq)->avail_wrap_count ? 0 : VIRTQ_DESC_F_USED)
51f0fe222eSDanylo Vodopianov 
52f0fe222eSDanylo Vodopianov #define inc_avail(vq, num)                                                                        \
53f0fe222eSDanylo Vodopianov 	do {                                                                                      \
54f0fe222eSDanylo Vodopianov 		struct nthw_virt_queue *temp_vq = (vq);                                           \
55f0fe222eSDanylo Vodopianov 		temp_vq->next_avail += (num);                                                     \
56f0fe222eSDanylo Vodopianov 		if (temp_vq->next_avail >= temp_vq->queue_size) {                                 \
57f0fe222eSDanylo Vodopianov 			temp_vq->next_avail -= temp_vq->queue_size;                               \
58f0fe222eSDanylo Vodopianov 			temp_vq->avail_wrap_count ^= 1;                                           \
59f0fe222eSDanylo Vodopianov 		}                                                                                 \
60f0fe222eSDanylo Vodopianov 	} while (0)
61f0fe222eSDanylo Vodopianov 
629c2e6e75SDanylo Vodopianov #define inc_used(vq, num) do { \
639c2e6e75SDanylo Vodopianov 	struct nthw_virt_queue *temp_vq = (vq); \
649c2e6e75SDanylo Vodopianov 	temp_vq->next_used += (num); \
659c2e6e75SDanylo Vodopianov 	if (temp_vq->next_used >= temp_vq->queue_size) { \
669c2e6e75SDanylo Vodopianov 		temp_vq->next_used -= temp_vq->queue_size; \
679c2e6e75SDanylo Vodopianov 		temp_vq->used_wrap_count ^= 1; \
689c2e6e75SDanylo Vodopianov 	} \
699c2e6e75SDanylo Vodopianov } while (0)
709c2e6e75SDanylo Vodopianov 
71*e7750639SAndre Muezerie struct __rte_packed_begin virtq_avail {
72e13da07fSDanylo Vodopianov 	uint16_t flags;
73e13da07fSDanylo Vodopianov 	uint16_t idx;
74e13da07fSDanylo Vodopianov 	uint16_t ring[];	/* Queue Size */
75*e7750639SAndre Muezerie } __rte_packed_end;
76e13da07fSDanylo Vodopianov 
77*e7750639SAndre Muezerie struct __rte_packed_begin virtq_used_elem {
78e13da07fSDanylo Vodopianov 	/* Index of start of used descriptor chain. */
79e13da07fSDanylo Vodopianov 	uint32_t id;
80e13da07fSDanylo Vodopianov 	/* Total length of the descriptor chain which was used (written to) */
81e13da07fSDanylo Vodopianov 	uint32_t len;
82*e7750639SAndre Muezerie } __rte_packed_end;
83e13da07fSDanylo Vodopianov 
84*e7750639SAndre Muezerie struct __rte_packed_begin virtq_used {
85e13da07fSDanylo Vodopianov 	uint16_t flags;
86e13da07fSDanylo Vodopianov 	uint16_t idx;
87e13da07fSDanylo Vodopianov 	struct virtq_used_elem ring[];	/* Queue Size */
88*e7750639SAndre Muezerie } __rte_packed_end;
89e13da07fSDanylo Vodopianov 
90e13da07fSDanylo Vodopianov struct virtq_struct_layout_s {
91e13da07fSDanylo Vodopianov 	size_t used_offset;
92e13da07fSDanylo Vodopianov 	size_t desc_offset;
93e13da07fSDanylo Vodopianov };
94e13da07fSDanylo Vodopianov 
95576e7721SDanylo Vodopianov enum nthw_virt_queue_usage {
96e13da07fSDanylo Vodopianov 	NTHW_VIRTQ_UNUSED = 0,
97e13da07fSDanylo Vodopianov 	NTHW_VIRTQ_UNMANAGED,
98e13da07fSDanylo Vodopianov 	NTHW_VIRTQ_MANAGED
99576e7721SDanylo Vodopianov };
100576e7721SDanylo Vodopianov 
101576e7721SDanylo Vodopianov struct nthw_virt_queue {
102e13da07fSDanylo Vodopianov 	/* Pointers to virt-queue structs */
103af300887SDanylo Vodopianov 	union {
104e13da07fSDanylo Vodopianov 		struct {
105e13da07fSDanylo Vodopianov 			/* SPLIT virtqueue */
106e13da07fSDanylo Vodopianov 			struct virtq_avail *p_avail;
107e13da07fSDanylo Vodopianov 			struct virtq_used *p_used;
108e13da07fSDanylo Vodopianov 			struct virtq_desc *p_desc;
109e13da07fSDanylo Vodopianov 			/* Control variables for virt-queue structs */
110e13da07fSDanylo Vodopianov 			uint16_t am_idx;
111e13da07fSDanylo Vodopianov 			uint16_t used_idx;
112e13da07fSDanylo Vodopianov 			uint16_t cached_idx;
113e13da07fSDanylo Vodopianov 			uint16_t tx_descr_avail_idx;
114e13da07fSDanylo Vodopianov 		};
115af300887SDanylo Vodopianov 		struct {
116af300887SDanylo Vodopianov 			/* PACKED virtqueue */
117af300887SDanylo Vodopianov 			struct pvirtq_event_suppress *driver_event;
118af300887SDanylo Vodopianov 			struct pvirtq_event_suppress *device_event;
119af300887SDanylo Vodopianov 			struct pvirtq_desc *desc;
1209c2e6e75SDanylo Vodopianov 			struct {
1219c2e6e75SDanylo Vodopianov 				uint16_t next;
1229c2e6e75SDanylo Vodopianov 				uint16_t num;
1239c2e6e75SDanylo Vodopianov 			} outs;
124af300887SDanylo Vodopianov 			/*
125af300887SDanylo Vodopianov 			 * when in-order release used Tx packets from FPGA it may collapse
126af300887SDanylo Vodopianov 			 * into a batch. When getting new Tx buffers we may only need
127af300887SDanylo Vodopianov 			 * partial
128af300887SDanylo Vodopianov 			 */
129af300887SDanylo Vodopianov 			uint16_t next_avail;
130af300887SDanylo Vodopianov 			uint16_t next_used;
131af300887SDanylo Vodopianov 			uint16_t avail_wrap_count;
132af300887SDanylo Vodopianov 			uint16_t used_wrap_count;
133af300887SDanylo Vodopianov 		};
134af300887SDanylo Vodopianov 	};
135e13da07fSDanylo Vodopianov 
136e13da07fSDanylo Vodopianov 	/* Array with packet buffers */
137e13da07fSDanylo Vodopianov 	struct nthw_memory_descriptor *p_virtual_addr;
138e13da07fSDanylo Vodopianov 
139e13da07fSDanylo Vodopianov 	/* Queue configuration info */
140e13da07fSDanylo Vodopianov 	nthw_dbs_t *mp_nthw_dbs;
141e13da07fSDanylo Vodopianov 
142576e7721SDanylo Vodopianov 	enum nthw_virt_queue_usage usage;
143e13da07fSDanylo Vodopianov 	uint16_t irq_vector;
144e13da07fSDanylo Vodopianov 	uint16_t vq_type;
145e13da07fSDanylo Vodopianov 	uint16_t in_order;
146e13da07fSDanylo Vodopianov 
147e13da07fSDanylo Vodopianov 	uint16_t queue_size;
148e13da07fSDanylo Vodopianov 	uint32_t index;
149e13da07fSDanylo Vodopianov 	uint32_t am_enable;
150e13da07fSDanylo Vodopianov 	uint32_t host_id;
151e13da07fSDanylo Vodopianov 	uint32_t port;	/* Only used by TX queues */
152e13da07fSDanylo Vodopianov 	uint32_t virtual_port;	/* Only used by TX queues */
153f0fe222eSDanylo Vodopianov 	uint32_t header;
154e13da07fSDanylo Vodopianov 	/*
155e13da07fSDanylo Vodopianov 	 * Only used by TX queues:
156e13da07fSDanylo Vodopianov 	 *   0: VirtIO-Net header (12 bytes).
157e13da07fSDanylo Vodopianov 	 *   1: Napatech DVIO0 descriptor (12 bytes).
158e13da07fSDanylo Vodopianov 	 */
15901e34ed9SDanylo Vodopianov 	void *avail_struct_phys_addr;
160f7b88165SDanylo Vodopianov 	void *used_struct_phys_addr;
161f7b88165SDanylo Vodopianov 	void *desc_struct_phys_addr;
162576e7721SDanylo Vodopianov };
163576e7721SDanylo Vodopianov 
164af300887SDanylo Vodopianov struct pvirtq_struct_layout_s {
165af300887SDanylo Vodopianov 	size_t driver_event_offset;
166af300887SDanylo Vodopianov 	size_t device_event_offset;
167af300887SDanylo Vodopianov };
168af300887SDanylo Vodopianov 
169576e7721SDanylo Vodopianov static struct nthw_virt_queue rxvq[MAX_VIRT_QUEUES];
170576e7721SDanylo Vodopianov static struct nthw_virt_queue txvq[MAX_VIRT_QUEUES];
171576e7721SDanylo Vodopianov 
172576e7721SDanylo Vodopianov static void dbs_init_rx_queue(nthw_dbs_t *p_nthw_dbs, uint32_t queue, uint32_t start_idx,
173576e7721SDanylo Vodopianov 	uint32_t start_ptr)
174576e7721SDanylo Vodopianov {
175576e7721SDanylo Vodopianov 	uint32_t busy;
176576e7721SDanylo Vodopianov 	uint32_t init;
177576e7721SDanylo Vodopianov 	uint32_t dummy;
178576e7721SDanylo Vodopianov 
179576e7721SDanylo Vodopianov 	do {
180576e7721SDanylo Vodopianov 		get_rx_init(p_nthw_dbs, &init, &dummy, &busy);
181576e7721SDanylo Vodopianov 	} while (busy != 0);
182576e7721SDanylo Vodopianov 
183576e7721SDanylo Vodopianov 	set_rx_init(p_nthw_dbs, start_idx, start_ptr, INIT_QUEUE, queue);
184576e7721SDanylo Vodopianov 
185576e7721SDanylo Vodopianov 	do {
186576e7721SDanylo Vodopianov 		get_rx_init(p_nthw_dbs, &init, &dummy, &busy);
187576e7721SDanylo Vodopianov 	} while (busy != 0);
188576e7721SDanylo Vodopianov }
189576e7721SDanylo Vodopianov 
190576e7721SDanylo Vodopianov static void dbs_init_tx_queue(nthw_dbs_t *p_nthw_dbs, uint32_t queue, uint32_t start_idx,
191576e7721SDanylo Vodopianov 	uint32_t start_ptr)
192576e7721SDanylo Vodopianov {
193576e7721SDanylo Vodopianov 	uint32_t busy;
194576e7721SDanylo Vodopianov 	uint32_t init;
195576e7721SDanylo Vodopianov 	uint32_t dummy;
196576e7721SDanylo Vodopianov 
197576e7721SDanylo Vodopianov 	do {
198576e7721SDanylo Vodopianov 		get_tx_init(p_nthw_dbs, &init, &dummy, &busy);
199576e7721SDanylo Vodopianov 	} while (busy != 0);
200576e7721SDanylo Vodopianov 
201576e7721SDanylo Vodopianov 	set_tx_init(p_nthw_dbs, start_idx, start_ptr, INIT_QUEUE, queue);
202576e7721SDanylo Vodopianov 
203576e7721SDanylo Vodopianov 	do {
204576e7721SDanylo Vodopianov 		get_tx_init(p_nthw_dbs, &init, &dummy, &busy);
205576e7721SDanylo Vodopianov 	} while (busy != 0);
206576e7721SDanylo Vodopianov }
207576e7721SDanylo Vodopianov 
208576e7721SDanylo Vodopianov static int nthw_virt_queue_init(struct fpga_info_s *p_fpga_info)
209576e7721SDanylo Vodopianov {
210576e7721SDanylo Vodopianov 	assert(p_fpga_info);
211576e7721SDanylo Vodopianov 
212576e7721SDanylo Vodopianov 	nthw_fpga_t *const p_fpga = p_fpga_info->mp_fpga;
213576e7721SDanylo Vodopianov 	nthw_dbs_t *p_nthw_dbs;
214576e7721SDanylo Vodopianov 	int res = 0;
215576e7721SDanylo Vodopianov 	uint32_t i;
216576e7721SDanylo Vodopianov 
217576e7721SDanylo Vodopianov 	p_fpga_info->mp_nthw_dbs = NULL;
218576e7721SDanylo Vodopianov 
219576e7721SDanylo Vodopianov 	p_nthw_dbs = nthw_dbs_new();
220576e7721SDanylo Vodopianov 
221576e7721SDanylo Vodopianov 	if (p_nthw_dbs == NULL)
222576e7721SDanylo Vodopianov 		return -1;
223576e7721SDanylo Vodopianov 
224576e7721SDanylo Vodopianov 	res = dbs_init(NULL, p_fpga, 0);/* Check that DBS exists in FPGA */
225576e7721SDanylo Vodopianov 
226576e7721SDanylo Vodopianov 	if (res) {
227576e7721SDanylo Vodopianov 		free(p_nthw_dbs);
228576e7721SDanylo Vodopianov 		return res;
229576e7721SDanylo Vodopianov 	}
230576e7721SDanylo Vodopianov 
231576e7721SDanylo Vodopianov 	res = dbs_init(p_nthw_dbs, p_fpga, 0);	/* Create DBS module */
232576e7721SDanylo Vodopianov 
233576e7721SDanylo Vodopianov 	if (res) {
234576e7721SDanylo Vodopianov 		free(p_nthw_dbs);
235576e7721SDanylo Vodopianov 		return res;
236576e7721SDanylo Vodopianov 	}
237576e7721SDanylo Vodopianov 
238576e7721SDanylo Vodopianov 	p_fpga_info->mp_nthw_dbs = p_nthw_dbs;
239576e7721SDanylo Vodopianov 
240576e7721SDanylo Vodopianov 	for (i = 0; i < MAX_VIRT_QUEUES; ++i) {
241576e7721SDanylo Vodopianov 		rxvq[i].usage = NTHW_VIRTQ_UNUSED;
242576e7721SDanylo Vodopianov 		txvq[i].usage = NTHW_VIRTQ_UNUSED;
243576e7721SDanylo Vodopianov 	}
244576e7721SDanylo Vodopianov 
245576e7721SDanylo Vodopianov 	dbs_reset(p_nthw_dbs);
246576e7721SDanylo Vodopianov 
247576e7721SDanylo Vodopianov 	for (i = 0; i < NT_DBS_RX_QUEUES_MAX; ++i)
248576e7721SDanylo Vodopianov 		dbs_init_rx_queue(p_nthw_dbs, i, 0, 0);
249576e7721SDanylo Vodopianov 
250576e7721SDanylo Vodopianov 	for (i = 0; i < NT_DBS_TX_QUEUES_MAX; ++i)
251576e7721SDanylo Vodopianov 		dbs_init_tx_queue(p_nthw_dbs, i, 0, 0);
252576e7721SDanylo Vodopianov 
253576e7721SDanylo Vodopianov 	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_DISABLE, RX_AM_POLL_SPEED, RX_UW_DISABLE,
254576e7721SDanylo Vodopianov 		RX_UW_POLL_SPEED, RX_Q_DISABLE);
255576e7721SDanylo Vodopianov 	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_ENABLE, RX_AM_POLL_SPEED, RX_UW_ENABLE,
256576e7721SDanylo Vodopianov 		RX_UW_POLL_SPEED, RX_Q_DISABLE);
257576e7721SDanylo Vodopianov 	set_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_ENABLE, RX_AM_POLL_SPEED, RX_UW_ENABLE,
258576e7721SDanylo Vodopianov 		RX_UW_POLL_SPEED, RX_Q_ENABLE);
259576e7721SDanylo Vodopianov 
260576e7721SDanylo Vodopianov 	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_DISABLE, TX_AM_POLL_SPEED, TX_UW_DISABLE,
261576e7721SDanylo Vodopianov 		TX_UW_POLL_SPEED, TX_Q_DISABLE);
262576e7721SDanylo Vodopianov 	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_ENABLE, TX_AM_POLL_SPEED, TX_UW_ENABLE,
263576e7721SDanylo Vodopianov 		TX_UW_POLL_SPEED, TX_Q_DISABLE);
264576e7721SDanylo Vodopianov 	set_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_ENABLE, TX_AM_POLL_SPEED, TX_UW_ENABLE,
265576e7721SDanylo Vodopianov 		TX_UW_POLL_SPEED, TX_Q_ENABLE);
266576e7721SDanylo Vodopianov 
267576e7721SDanylo Vodopianov 	return 0;
268576e7721SDanylo Vodopianov }
269576e7721SDanylo Vodopianov 
270e13da07fSDanylo Vodopianov static struct virtq_struct_layout_s dbs_calc_struct_layout(uint32_t queue_size)
271e13da07fSDanylo Vodopianov {
272e13da07fSDanylo Vodopianov 	/* + sizeof(uint16_t); ("avail->used_event" is not used) */
273e13da07fSDanylo Vodopianov 	size_t avail_mem = sizeof(struct virtq_avail) + queue_size * sizeof(uint16_t);
274e13da07fSDanylo Vodopianov 	size_t avail_mem_aligned = ((avail_mem % STRUCT_ALIGNMENT) == 0)
275e13da07fSDanylo Vodopianov 		? avail_mem
276e13da07fSDanylo Vodopianov 		: STRUCT_ALIGNMENT * (avail_mem / STRUCT_ALIGNMENT + 1);
277e13da07fSDanylo Vodopianov 
278e13da07fSDanylo Vodopianov 	/* + sizeof(uint16_t); ("used->avail_event" is not used) */
279e13da07fSDanylo Vodopianov 	size_t used_mem = sizeof(struct virtq_used) + queue_size * sizeof(struct virtq_used_elem);
280e13da07fSDanylo Vodopianov 	size_t used_mem_aligned = ((used_mem % STRUCT_ALIGNMENT) == 0)
281e13da07fSDanylo Vodopianov 		? used_mem
282e13da07fSDanylo Vodopianov 		: STRUCT_ALIGNMENT * (used_mem / STRUCT_ALIGNMENT + 1);
283e13da07fSDanylo Vodopianov 
284e13da07fSDanylo Vodopianov 	struct virtq_struct_layout_s virtq_layout;
285e13da07fSDanylo Vodopianov 	virtq_layout.used_offset = avail_mem_aligned;
286e13da07fSDanylo Vodopianov 	virtq_layout.desc_offset = avail_mem_aligned + used_mem_aligned;
287e13da07fSDanylo Vodopianov 
288e13da07fSDanylo Vodopianov 	return virtq_layout;
289e13da07fSDanylo Vodopianov }
290e13da07fSDanylo Vodopianov 
291e13da07fSDanylo Vodopianov static void dbs_initialize_avail_struct(void *addr, uint16_t queue_size,
292e13da07fSDanylo Vodopianov 	uint16_t initial_avail_idx)
293e13da07fSDanylo Vodopianov {
294e13da07fSDanylo Vodopianov 	uint16_t i;
295e13da07fSDanylo Vodopianov 	struct virtq_avail *p_avail = (struct virtq_avail *)addr;
296e13da07fSDanylo Vodopianov 
297e13da07fSDanylo Vodopianov 	p_avail->flags = VIRTQ_AVAIL_F_NO_INTERRUPT;
298e13da07fSDanylo Vodopianov 	p_avail->idx = initial_avail_idx;
299e13da07fSDanylo Vodopianov 
300e13da07fSDanylo Vodopianov 	for (i = 0; i < queue_size; ++i)
301e13da07fSDanylo Vodopianov 		p_avail->ring[i] = i;
302e13da07fSDanylo Vodopianov }
303e13da07fSDanylo Vodopianov 
304e13da07fSDanylo Vodopianov static void dbs_initialize_used_struct(void *addr, uint16_t queue_size)
305e13da07fSDanylo Vodopianov {
306e13da07fSDanylo Vodopianov 	int i;
307e13da07fSDanylo Vodopianov 	struct virtq_used *p_used = (struct virtq_used *)addr;
308e13da07fSDanylo Vodopianov 
309e13da07fSDanylo Vodopianov 	p_used->flags = 1;
310e13da07fSDanylo Vodopianov 	p_used->idx = 0;
311e13da07fSDanylo Vodopianov 
312e13da07fSDanylo Vodopianov 	for (i = 0; i < queue_size; ++i) {
313e13da07fSDanylo Vodopianov 		p_used->ring[i].id = 0;
314e13da07fSDanylo Vodopianov 		p_used->ring[i].len = 0;
315e13da07fSDanylo Vodopianov 	}
316e13da07fSDanylo Vodopianov }
317e13da07fSDanylo Vodopianov 
318e13da07fSDanylo Vodopianov static void
319e13da07fSDanylo Vodopianov dbs_initialize_descriptor_struct(void *addr,
320e13da07fSDanylo Vodopianov 	struct nthw_memory_descriptor *packet_buffer_descriptors,
321e13da07fSDanylo Vodopianov 	uint16_t queue_size, uint16_t flgs)
322e13da07fSDanylo Vodopianov {
323e13da07fSDanylo Vodopianov 	if (packet_buffer_descriptors) {
324e13da07fSDanylo Vodopianov 		int i;
325e13da07fSDanylo Vodopianov 		struct virtq_desc *p_desc = (struct virtq_desc *)addr;
326e13da07fSDanylo Vodopianov 
327e13da07fSDanylo Vodopianov 		for (i = 0; i < queue_size; ++i) {
328e13da07fSDanylo Vodopianov 			p_desc[i].addr = (uint64_t)packet_buffer_descriptors[i].phys_addr;
329e13da07fSDanylo Vodopianov 			p_desc[i].len = packet_buffer_descriptors[i].len;
330e13da07fSDanylo Vodopianov 			p_desc[i].flags = flgs;
331e13da07fSDanylo Vodopianov 			p_desc[i].next = 0;
332e13da07fSDanylo Vodopianov 		}
333e13da07fSDanylo Vodopianov 	}
334e13da07fSDanylo Vodopianov }
335e13da07fSDanylo Vodopianov 
336e13da07fSDanylo Vodopianov static void
337e13da07fSDanylo Vodopianov dbs_initialize_virt_queue_structs(void *avail_struct_addr, void *used_struct_addr,
338e13da07fSDanylo Vodopianov 	void *desc_struct_addr,
339e13da07fSDanylo Vodopianov 	struct nthw_memory_descriptor *packet_buffer_descriptors,
340e13da07fSDanylo Vodopianov 	uint16_t queue_size, uint16_t initial_avail_idx, uint16_t flgs)
341e13da07fSDanylo Vodopianov {
342e13da07fSDanylo Vodopianov 	dbs_initialize_avail_struct(avail_struct_addr, queue_size, initial_avail_idx);
343e13da07fSDanylo Vodopianov 	dbs_initialize_used_struct(used_struct_addr, queue_size);
344e13da07fSDanylo Vodopianov 	dbs_initialize_descriptor_struct(desc_struct_addr, packet_buffer_descriptors, queue_size,
345e13da07fSDanylo Vodopianov 		flgs);
346e13da07fSDanylo Vodopianov }
347e13da07fSDanylo Vodopianov 
34867aee0a6SDanylo Vodopianov static uint16_t dbs_qsize_log2(uint16_t qsize)
34967aee0a6SDanylo Vodopianov {
35067aee0a6SDanylo Vodopianov 	uint32_t qs = 0;
35167aee0a6SDanylo Vodopianov 
35267aee0a6SDanylo Vodopianov 	while (qsize) {
35367aee0a6SDanylo Vodopianov 		qsize = qsize >> 1;
35467aee0a6SDanylo Vodopianov 		++qs;
35567aee0a6SDanylo Vodopianov 	}
35667aee0a6SDanylo Vodopianov 
35767aee0a6SDanylo Vodopianov 	--qs;
35867aee0a6SDanylo Vodopianov 	return qs;
35967aee0a6SDanylo Vodopianov }
36067aee0a6SDanylo Vodopianov 
361e13da07fSDanylo Vodopianov static struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
362e13da07fSDanylo Vodopianov 	uint32_t index,
363e13da07fSDanylo Vodopianov 	uint16_t start_idx,
364e13da07fSDanylo Vodopianov 	uint16_t start_ptr,
365e13da07fSDanylo Vodopianov 	void *avail_struct_phys_addr,
366e13da07fSDanylo Vodopianov 	void *used_struct_phys_addr,
367e13da07fSDanylo Vodopianov 	void *desc_struct_phys_addr,
368e13da07fSDanylo Vodopianov 	uint16_t queue_size,
369e13da07fSDanylo Vodopianov 	uint32_t host_id,
370e13da07fSDanylo Vodopianov 	uint32_t header,
371e13da07fSDanylo Vodopianov 	uint32_t vq_type,
372e13da07fSDanylo Vodopianov 	int irq_vector)
373e13da07fSDanylo Vodopianov {
37467aee0a6SDanylo Vodopianov 	uint32_t qs = dbs_qsize_log2(queue_size);
37567aee0a6SDanylo Vodopianov 	uint32_t int_enable;
37667aee0a6SDanylo Vodopianov 	uint32_t vec;
37767aee0a6SDanylo Vodopianov 	uint32_t istk;
37867aee0a6SDanylo Vodopianov 
37967aee0a6SDanylo Vodopianov 	/*
380f7b88165SDanylo Vodopianov 	 * Setup DBS module - DSF00094
381f7b88165SDanylo Vodopianov 	 * 3. Configure the DBS.RX_DR_DATA memory; good idea to initialize all
382f7b88165SDanylo Vodopianov 	 * DBS_RX_QUEUES entries.
383f7b88165SDanylo Vodopianov 	 */
384f7b88165SDanylo Vodopianov 	if (set_rx_dr_data(p_nthw_dbs, index, (uint64_t)desc_struct_phys_addr, host_id, qs, header,
385f7b88165SDanylo Vodopianov 			0) != 0) {
386f7b88165SDanylo Vodopianov 		return NULL;
387f7b88165SDanylo Vodopianov 	}
388f7b88165SDanylo Vodopianov 
389f7b88165SDanylo Vodopianov 	/*
39067aee0a6SDanylo Vodopianov 	 * 4. Configure the DBS.RX_UW_DATA memory; good idea to initialize all
39167aee0a6SDanylo Vodopianov 	 *   DBS_RX_QUEUES entries.
39267aee0a6SDanylo Vodopianov 	 *   Notice: We always start out with interrupts disabled (by setting the
39367aee0a6SDanylo Vodopianov 	 *     "irq_vector" argument to -1). Queues that require interrupts will have
39467aee0a6SDanylo Vodopianov 	 *     it enabled at a later time (after we have enabled vfio interrupts in
39567aee0a6SDanylo Vodopianov 	 *     the kernel).
39667aee0a6SDanylo Vodopianov 	 */
39767aee0a6SDanylo Vodopianov 	int_enable = 0;
39867aee0a6SDanylo Vodopianov 	vec = 0;
39967aee0a6SDanylo Vodopianov 	istk = 0;
40067aee0a6SDanylo Vodopianov 	NT_LOG_DBGX(DBG, NTNIC, "set_rx_uw_data int=0 irq_vector=%u", irq_vector);
40167aee0a6SDanylo Vodopianov 
40267aee0a6SDanylo Vodopianov 	if (set_rx_uw_data(p_nthw_dbs, index,
40367aee0a6SDanylo Vodopianov 			(uint64_t)used_struct_phys_addr,
40467aee0a6SDanylo Vodopianov 			host_id, qs, 0, int_enable, vec, istk) != 0) {
40567aee0a6SDanylo Vodopianov 		return NULL;
40667aee0a6SDanylo Vodopianov 	}
407e13da07fSDanylo Vodopianov 
40801e34ed9SDanylo Vodopianov 	/*
40901e34ed9SDanylo Vodopianov 	 * 2. Configure the DBS.RX_AM_DATA memory and enable the queues you plan to use;
41001e34ed9SDanylo Vodopianov 	 *  good idea to initialize all DBS_RX_QUEUES entries.
41101e34ed9SDanylo Vodopianov 	 *  Notice: We do this only for queues that don't require interrupts (i.e. if
41201e34ed9SDanylo Vodopianov 	 *    irq_vector < 0). Queues that require interrupts will have RX_AM_DATA enabled
41301e34ed9SDanylo Vodopianov 	 *    at a later time (after we have enabled vfio interrupts in the kernel).
41401e34ed9SDanylo Vodopianov 	 */
41501e34ed9SDanylo Vodopianov 	if (irq_vector < 0) {
41601e34ed9SDanylo Vodopianov 		if (set_rx_am_data(p_nthw_dbs, index, (uint64_t)avail_struct_phys_addr,
41701e34ed9SDanylo Vodopianov 				RX_AM_DISABLE, host_id, 0,
41801e34ed9SDanylo Vodopianov 				irq_vector >= 0 ? 1 : 0) != 0) {
41901e34ed9SDanylo Vodopianov 			return NULL;
42001e34ed9SDanylo Vodopianov 		}
42101e34ed9SDanylo Vodopianov 	}
422e13da07fSDanylo Vodopianov 
423e13da07fSDanylo Vodopianov 	/*
424e13da07fSDanylo Vodopianov 	 * 5. Initialize all RX queues (all DBS_RX_QUEUES of them) using the
425e13da07fSDanylo Vodopianov 	 *   DBS.RX_INIT register.
426e13da07fSDanylo Vodopianov 	 */
427e13da07fSDanylo Vodopianov 	dbs_init_rx_queue(p_nthw_dbs, index, start_idx, start_ptr);
428e13da07fSDanylo Vodopianov 
42901e34ed9SDanylo Vodopianov 	/*
43001e34ed9SDanylo Vodopianov 	 * 2. Configure the DBS.RX_AM_DATA memory and enable the queues you plan to use;
43101e34ed9SDanylo Vodopianov 	 *  good idea to initialize all DBS_RX_QUEUES entries.
43201e34ed9SDanylo Vodopianov 	 */
43301e34ed9SDanylo Vodopianov 	if (set_rx_am_data(p_nthw_dbs, index, (uint64_t)avail_struct_phys_addr, RX_AM_ENABLE,
43401e34ed9SDanylo Vodopianov 			host_id, 0, irq_vector >= 0 ? 1 : 0) != 0) {
43501e34ed9SDanylo Vodopianov 		return NULL;
43601e34ed9SDanylo Vodopianov 	}
43701e34ed9SDanylo Vodopianov 
438e13da07fSDanylo Vodopianov 	/* Save queue state */
439e13da07fSDanylo Vodopianov 	rxvq[index].usage = NTHW_VIRTQ_UNMANAGED;
440e13da07fSDanylo Vodopianov 	rxvq[index].mp_nthw_dbs = p_nthw_dbs;
441e13da07fSDanylo Vodopianov 	rxvq[index].index = index;
442e13da07fSDanylo Vodopianov 	rxvq[index].queue_size = queue_size;
443e13da07fSDanylo Vodopianov 	rxvq[index].am_enable = (irq_vector < 0) ? RX_AM_ENABLE : RX_AM_DISABLE;
444e13da07fSDanylo Vodopianov 	rxvq[index].host_id = host_id;
44501e34ed9SDanylo Vodopianov 	rxvq[index].avail_struct_phys_addr = avail_struct_phys_addr;
446f7b88165SDanylo Vodopianov 	rxvq[index].used_struct_phys_addr = used_struct_phys_addr;
447f7b88165SDanylo Vodopianov 	rxvq[index].desc_struct_phys_addr = desc_struct_phys_addr;
448e13da07fSDanylo Vodopianov 	rxvq[index].vq_type = vq_type;
449e13da07fSDanylo Vodopianov 	rxvq[index].in_order = 0;	/* not used */
450e13da07fSDanylo Vodopianov 	rxvq[index].irq_vector = irq_vector;
451e13da07fSDanylo Vodopianov 
452e13da07fSDanylo Vodopianov 	/* Return queue handle */
453e13da07fSDanylo Vodopianov 	return &rxvq[index];
454e13da07fSDanylo Vodopianov }
455e13da07fSDanylo Vodopianov 
456f0fe222eSDanylo Vodopianov static int dbs_wait_hw_queue_shutdown(struct nthw_virt_queue *vq, int rx);
457f0fe222eSDanylo Vodopianov 
458f0fe222eSDanylo Vodopianov static int dbs_wait_on_busy(struct nthw_virt_queue *vq, uint32_t *idle, int rx)
459f0fe222eSDanylo Vodopianov {
460f0fe222eSDanylo Vodopianov 	uint32_t busy;
461f0fe222eSDanylo Vodopianov 	uint32_t queue;
462f0fe222eSDanylo Vodopianov 	int err = 0;
463f0fe222eSDanylo Vodopianov 	nthw_dbs_t *p_nthw_dbs = vq->mp_nthw_dbs;
464f0fe222eSDanylo Vodopianov 
465f0fe222eSDanylo Vodopianov 	do {
466f0fe222eSDanylo Vodopianov 		if (rx)
467f0fe222eSDanylo Vodopianov 			err = get_rx_idle(p_nthw_dbs, idle, &queue, &busy);
468f0fe222eSDanylo Vodopianov 
469f0fe222eSDanylo Vodopianov 		else
470f0fe222eSDanylo Vodopianov 			err = get_tx_idle(p_nthw_dbs, idle, &queue, &busy);
471f0fe222eSDanylo Vodopianov 	} while (!err && busy);
472f0fe222eSDanylo Vodopianov 
473f0fe222eSDanylo Vodopianov 	return err;
474f0fe222eSDanylo Vodopianov }
475f0fe222eSDanylo Vodopianov 
476f0fe222eSDanylo Vodopianov static int dbs_wait_hw_queue_shutdown(struct nthw_virt_queue *vq, int rx)
477f0fe222eSDanylo Vodopianov {
478f0fe222eSDanylo Vodopianov 	int err = 0;
479f0fe222eSDanylo Vodopianov 	uint32_t idle = 0;
480f0fe222eSDanylo Vodopianov 	nthw_dbs_t *p_nthw_dbs = vq->mp_nthw_dbs;
481f0fe222eSDanylo Vodopianov 
482f0fe222eSDanylo Vodopianov 	err = dbs_wait_on_busy(vq, &idle, rx);
483f0fe222eSDanylo Vodopianov 
484f0fe222eSDanylo Vodopianov 	if (err) {
485f0fe222eSDanylo Vodopianov 		if (err == -ENOTSUP) {
486f0fe222eSDanylo Vodopianov 			nt_os_wait_usec(200000);
487f0fe222eSDanylo Vodopianov 			return 0;
488f0fe222eSDanylo Vodopianov 		}
489f0fe222eSDanylo Vodopianov 
490f0fe222eSDanylo Vodopianov 		return -1;
491f0fe222eSDanylo Vodopianov 	}
492f0fe222eSDanylo Vodopianov 
493f0fe222eSDanylo Vodopianov 	do {
494f0fe222eSDanylo Vodopianov 		if (rx)
495f0fe222eSDanylo Vodopianov 			err = set_rx_idle(p_nthw_dbs, 1, vq->index);
496f0fe222eSDanylo Vodopianov 
497f0fe222eSDanylo Vodopianov 		else
498f0fe222eSDanylo Vodopianov 			err = set_tx_idle(p_nthw_dbs, 1, vq->index);
499f0fe222eSDanylo Vodopianov 
500f0fe222eSDanylo Vodopianov 		if (err)
501f0fe222eSDanylo Vodopianov 			return -1;
502f0fe222eSDanylo Vodopianov 
503f0fe222eSDanylo Vodopianov 		if (dbs_wait_on_busy(vq, &idle, rx) != 0)
504f0fe222eSDanylo Vodopianov 			return -1;
505f0fe222eSDanylo Vodopianov 
506f0fe222eSDanylo Vodopianov 	} while (idle == 0);
507f0fe222eSDanylo Vodopianov 
508f0fe222eSDanylo Vodopianov 	return 0;
509f0fe222eSDanylo Vodopianov }
510f0fe222eSDanylo Vodopianov 
511f0fe222eSDanylo Vodopianov static int dbs_internal_release_rx_virt_queue(struct nthw_virt_queue *rxvq)
512f0fe222eSDanylo Vodopianov {
513f0fe222eSDanylo Vodopianov 	nthw_dbs_t *p_nthw_dbs = rxvq->mp_nthw_dbs;
514f0fe222eSDanylo Vodopianov 
515f0fe222eSDanylo Vodopianov 	if (rxvq == NULL)
516f0fe222eSDanylo Vodopianov 		return -1;
517f0fe222eSDanylo Vodopianov 
518f0fe222eSDanylo Vodopianov 	/* Clear UW */
519f0fe222eSDanylo Vodopianov 	rxvq->used_struct_phys_addr = NULL;
520f0fe222eSDanylo Vodopianov 
521f0fe222eSDanylo Vodopianov 	if (set_rx_uw_data(p_nthw_dbs, rxvq->index, (uint64_t)rxvq->used_struct_phys_addr,
522f0fe222eSDanylo Vodopianov 			rxvq->host_id, 0, PACKED(rxvq->vq_type), 0, 0, 0) != 0) {
523f0fe222eSDanylo Vodopianov 		return -1;
524f0fe222eSDanylo Vodopianov 	}
525f0fe222eSDanylo Vodopianov 
526f0fe222eSDanylo Vodopianov 	/* Disable AM */
527f0fe222eSDanylo Vodopianov 	rxvq->am_enable = RX_AM_DISABLE;
528f0fe222eSDanylo Vodopianov 
529f0fe222eSDanylo Vodopianov 	if (set_rx_am_data(p_nthw_dbs,
530f0fe222eSDanylo Vodopianov 			rxvq->index,
531f0fe222eSDanylo Vodopianov 			(uint64_t)rxvq->avail_struct_phys_addr,
532f0fe222eSDanylo Vodopianov 			rxvq->am_enable,
533f0fe222eSDanylo Vodopianov 			rxvq->host_id,
534f0fe222eSDanylo Vodopianov 			PACKED(rxvq->vq_type),
535f0fe222eSDanylo Vodopianov 			0) != 0) {
536f0fe222eSDanylo Vodopianov 		return -1;
537f0fe222eSDanylo Vodopianov 	}
538f0fe222eSDanylo Vodopianov 
539f0fe222eSDanylo Vodopianov 	/* Let the FPGA finish packet processing */
540f0fe222eSDanylo Vodopianov 	if (dbs_wait_hw_queue_shutdown(rxvq, 1) != 0)
541f0fe222eSDanylo Vodopianov 		return -1;
542f0fe222eSDanylo Vodopianov 
543f0fe222eSDanylo Vodopianov 	/* Clear rest of AM */
544f0fe222eSDanylo Vodopianov 	rxvq->avail_struct_phys_addr = NULL;
545f0fe222eSDanylo Vodopianov 	rxvq->host_id = 0;
546f0fe222eSDanylo Vodopianov 
547f0fe222eSDanylo Vodopianov 	if (set_rx_am_data(p_nthw_dbs,
548f0fe222eSDanylo Vodopianov 			rxvq->index,
549f0fe222eSDanylo Vodopianov 			(uint64_t)rxvq->avail_struct_phys_addr,
550f0fe222eSDanylo Vodopianov 			rxvq->am_enable,
551f0fe222eSDanylo Vodopianov 			rxvq->host_id,
552f0fe222eSDanylo Vodopianov 			PACKED(rxvq->vq_type),
553f0fe222eSDanylo Vodopianov 			0) != 0)
554f0fe222eSDanylo Vodopianov 		return -1;
555f0fe222eSDanylo Vodopianov 
556f0fe222eSDanylo Vodopianov 	/* Clear DR */
557f0fe222eSDanylo Vodopianov 	rxvq->desc_struct_phys_addr = NULL;
558f0fe222eSDanylo Vodopianov 
559f0fe222eSDanylo Vodopianov 	if (set_rx_dr_data(p_nthw_dbs,
560f0fe222eSDanylo Vodopianov 			rxvq->index,
561f0fe222eSDanylo Vodopianov 			(uint64_t)rxvq->desc_struct_phys_addr,
562f0fe222eSDanylo Vodopianov 			rxvq->host_id,
563f0fe222eSDanylo Vodopianov 			0,
564f0fe222eSDanylo Vodopianov 			rxvq->header,
565f0fe222eSDanylo Vodopianov 			PACKED(rxvq->vq_type)) != 0)
566f0fe222eSDanylo Vodopianov 		return -1;
567f0fe222eSDanylo Vodopianov 
568f0fe222eSDanylo Vodopianov 	/* Initialize queue */
569f0fe222eSDanylo Vodopianov 	dbs_init_rx_queue(p_nthw_dbs, rxvq->index, 0, 0);
570f0fe222eSDanylo Vodopianov 
571f0fe222eSDanylo Vodopianov 	/* Reset queue state */
572f0fe222eSDanylo Vodopianov 	rxvq->usage = NTHW_VIRTQ_UNUSED;
573f0fe222eSDanylo Vodopianov 	rxvq->mp_nthw_dbs = p_nthw_dbs;
574f0fe222eSDanylo Vodopianov 	rxvq->index = 0;
575f0fe222eSDanylo Vodopianov 	rxvq->queue_size = 0;
576f0fe222eSDanylo Vodopianov 
577f0fe222eSDanylo Vodopianov 	return 0;
578f0fe222eSDanylo Vodopianov }
579f0fe222eSDanylo Vodopianov 
580f0fe222eSDanylo Vodopianov static int nthw_release_mngd_rx_virt_queue(struct nthw_virt_queue *rxvq)
581f0fe222eSDanylo Vodopianov {
582f0fe222eSDanylo Vodopianov 	if (rxvq == NULL || rxvq->usage != NTHW_VIRTQ_MANAGED)
583f0fe222eSDanylo Vodopianov 		return -1;
584f0fe222eSDanylo Vodopianov 
585f0fe222eSDanylo Vodopianov 	if (rxvq->p_virtual_addr) {
586f0fe222eSDanylo Vodopianov 		free(rxvq->p_virtual_addr);
587f0fe222eSDanylo Vodopianov 		rxvq->p_virtual_addr = NULL;
588f0fe222eSDanylo Vodopianov 	}
589f0fe222eSDanylo Vodopianov 
590f0fe222eSDanylo Vodopianov 	return dbs_internal_release_rx_virt_queue(rxvq);
591f0fe222eSDanylo Vodopianov }
592f0fe222eSDanylo Vodopianov 
593f0fe222eSDanylo Vodopianov static int dbs_internal_release_tx_virt_queue(struct nthw_virt_queue *txvq)
594f0fe222eSDanylo Vodopianov {
595f0fe222eSDanylo Vodopianov 	nthw_dbs_t *p_nthw_dbs = txvq->mp_nthw_dbs;
596f0fe222eSDanylo Vodopianov 
597f0fe222eSDanylo Vodopianov 	if (txvq == NULL)
598f0fe222eSDanylo Vodopianov 		return -1;
599f0fe222eSDanylo Vodopianov 
600f0fe222eSDanylo Vodopianov 	/* Clear UW */
601f0fe222eSDanylo Vodopianov 	txvq->used_struct_phys_addr = NULL;
602f0fe222eSDanylo Vodopianov 
603f0fe222eSDanylo Vodopianov 	if (set_tx_uw_data(p_nthw_dbs, txvq->index, (uint64_t)txvq->used_struct_phys_addr,
604f0fe222eSDanylo Vodopianov 			txvq->host_id, 0, PACKED(txvq->vq_type), 0, 0, 0,
605f0fe222eSDanylo Vodopianov 			txvq->in_order) != 0) {
606f0fe222eSDanylo Vodopianov 		return -1;
607f0fe222eSDanylo Vodopianov 	}
608f0fe222eSDanylo Vodopianov 
609f0fe222eSDanylo Vodopianov 	/* Disable AM */
610f0fe222eSDanylo Vodopianov 	txvq->am_enable = TX_AM_DISABLE;
611f0fe222eSDanylo Vodopianov 
612f0fe222eSDanylo Vodopianov 	if (set_tx_am_data(p_nthw_dbs,
613f0fe222eSDanylo Vodopianov 			txvq->index,
614f0fe222eSDanylo Vodopianov 			(uint64_t)txvq->avail_struct_phys_addr,
615f0fe222eSDanylo Vodopianov 			txvq->am_enable,
616f0fe222eSDanylo Vodopianov 			txvq->host_id,
617f0fe222eSDanylo Vodopianov 			PACKED(txvq->vq_type),
618f0fe222eSDanylo Vodopianov 			0) != 0) {
619f0fe222eSDanylo Vodopianov 		return -1;
620f0fe222eSDanylo Vodopianov 	}
621f0fe222eSDanylo Vodopianov 
622f0fe222eSDanylo Vodopianov 	/* Let the FPGA finish packet processing */
623f0fe222eSDanylo Vodopianov 	if (dbs_wait_hw_queue_shutdown(txvq, 0) != 0)
624f0fe222eSDanylo Vodopianov 		return -1;
625f0fe222eSDanylo Vodopianov 
626f0fe222eSDanylo Vodopianov 	/* Clear rest of AM */
627f0fe222eSDanylo Vodopianov 	txvq->avail_struct_phys_addr = NULL;
628f0fe222eSDanylo Vodopianov 	txvq->host_id = 0;
629f0fe222eSDanylo Vodopianov 
630f0fe222eSDanylo Vodopianov 	if (set_tx_am_data(p_nthw_dbs,
631f0fe222eSDanylo Vodopianov 			txvq->index,
632f0fe222eSDanylo Vodopianov 			(uint64_t)txvq->avail_struct_phys_addr,
633f0fe222eSDanylo Vodopianov 			txvq->am_enable,
634f0fe222eSDanylo Vodopianov 			txvq->host_id,
635f0fe222eSDanylo Vodopianov 			PACKED(txvq->vq_type),
636f0fe222eSDanylo Vodopianov 			0) != 0) {
637f0fe222eSDanylo Vodopianov 		return -1;
638f0fe222eSDanylo Vodopianov 	}
639f0fe222eSDanylo Vodopianov 
640f0fe222eSDanylo Vodopianov 	/* Clear DR */
641f0fe222eSDanylo Vodopianov 	txvq->desc_struct_phys_addr = NULL;
642f0fe222eSDanylo Vodopianov 	txvq->port = 0;
643f0fe222eSDanylo Vodopianov 	txvq->header = 0;
644f0fe222eSDanylo Vodopianov 
645f0fe222eSDanylo Vodopianov 	if (set_tx_dr_data(p_nthw_dbs,
646f0fe222eSDanylo Vodopianov 			txvq->index,
647f0fe222eSDanylo Vodopianov 			(uint64_t)txvq->desc_struct_phys_addr,
648f0fe222eSDanylo Vodopianov 			txvq->host_id,
649f0fe222eSDanylo Vodopianov 			0,
650f0fe222eSDanylo Vodopianov 			txvq->port,
651f0fe222eSDanylo Vodopianov 			txvq->header,
652f0fe222eSDanylo Vodopianov 			PACKED(txvq->vq_type)) != 0) {
653f0fe222eSDanylo Vodopianov 		return -1;
654f0fe222eSDanylo Vodopianov 	}
655f0fe222eSDanylo Vodopianov 
656f0fe222eSDanylo Vodopianov 	/* Clear QP */
657f0fe222eSDanylo Vodopianov 	txvq->virtual_port = 0;
658f0fe222eSDanylo Vodopianov 
659f0fe222eSDanylo Vodopianov 	if (nthw_dbs_set_tx_qp_data(p_nthw_dbs, txvq->index, txvq->virtual_port) != 0)
660f0fe222eSDanylo Vodopianov 		return -1;
661f0fe222eSDanylo Vodopianov 
662f0fe222eSDanylo Vodopianov 	/* Initialize queue */
663f0fe222eSDanylo Vodopianov 	dbs_init_tx_queue(p_nthw_dbs, txvq->index, 0, 0);
664f0fe222eSDanylo Vodopianov 
665f0fe222eSDanylo Vodopianov 	/* Reset queue state */
666f0fe222eSDanylo Vodopianov 	txvq->usage = NTHW_VIRTQ_UNUSED;
667f0fe222eSDanylo Vodopianov 	txvq->mp_nthw_dbs = p_nthw_dbs;
668f0fe222eSDanylo Vodopianov 	txvq->index = 0;
669f0fe222eSDanylo Vodopianov 	txvq->queue_size = 0;
670f0fe222eSDanylo Vodopianov 
671f0fe222eSDanylo Vodopianov 	return 0;
672f0fe222eSDanylo Vodopianov }
673f0fe222eSDanylo Vodopianov 
674f0fe222eSDanylo Vodopianov static int nthw_release_mngd_tx_virt_queue(struct nthw_virt_queue *txvq)
675f0fe222eSDanylo Vodopianov {
676f0fe222eSDanylo Vodopianov 	if (txvq == NULL || txvq->usage != NTHW_VIRTQ_MANAGED)
677f0fe222eSDanylo Vodopianov 		return -1;
678f0fe222eSDanylo Vodopianov 
679f0fe222eSDanylo Vodopianov 	if (txvq->p_virtual_addr) {
680f0fe222eSDanylo Vodopianov 		free(txvq->p_virtual_addr);
681f0fe222eSDanylo Vodopianov 		txvq->p_virtual_addr = NULL;
682f0fe222eSDanylo Vodopianov 	}
683f0fe222eSDanylo Vodopianov 
684f0fe222eSDanylo Vodopianov 	return dbs_internal_release_tx_virt_queue(txvq);
685f0fe222eSDanylo Vodopianov }
686f0fe222eSDanylo Vodopianov 
687e13da07fSDanylo Vodopianov static struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
688e13da07fSDanylo Vodopianov 	uint32_t index,
689e13da07fSDanylo Vodopianov 	uint16_t start_idx,
690e13da07fSDanylo Vodopianov 	uint16_t start_ptr,
691e13da07fSDanylo Vodopianov 	void *avail_struct_phys_addr,
692e13da07fSDanylo Vodopianov 	void *used_struct_phys_addr,
693e13da07fSDanylo Vodopianov 	void *desc_struct_phys_addr,
694e13da07fSDanylo Vodopianov 	uint16_t queue_size,
695e13da07fSDanylo Vodopianov 	uint32_t host_id,
696e13da07fSDanylo Vodopianov 	uint32_t port,
697e13da07fSDanylo Vodopianov 	uint32_t virtual_port,
698e13da07fSDanylo Vodopianov 	uint32_t header,
699e13da07fSDanylo Vodopianov 	uint32_t vq_type,
700e13da07fSDanylo Vodopianov 	int irq_vector,
701e13da07fSDanylo Vodopianov 	uint32_t in_order)
702e13da07fSDanylo Vodopianov {
70367aee0a6SDanylo Vodopianov 	uint32_t int_enable;
70467aee0a6SDanylo Vodopianov 	uint32_t vec;
70567aee0a6SDanylo Vodopianov 	uint32_t istk;
70667aee0a6SDanylo Vodopianov 	uint32_t qs = dbs_qsize_log2(queue_size);
70767aee0a6SDanylo Vodopianov 
70867aee0a6SDanylo Vodopianov 	/*
709f7b88165SDanylo Vodopianov 	 * Setup DBS module - DSF00094
710f7b88165SDanylo Vodopianov 	 * 3. Configure the DBS.TX_DR_DATA memory; good idea to initialize all
711f7b88165SDanylo Vodopianov 	 *    DBS_TX_QUEUES entries.
712f7b88165SDanylo Vodopianov 	 */
713f7b88165SDanylo Vodopianov 	if (set_tx_dr_data(p_nthw_dbs, index, (uint64_t)desc_struct_phys_addr, host_id, qs, port,
714f7b88165SDanylo Vodopianov 			header, 0) != 0) {
715f7b88165SDanylo Vodopianov 		return NULL;
716f7b88165SDanylo Vodopianov 	}
717f7b88165SDanylo Vodopianov 
718f7b88165SDanylo Vodopianov 	/*
71967aee0a6SDanylo Vodopianov 	 * 4. Configure the DBS.TX_UW_DATA memory; good idea to initialize all
72067aee0a6SDanylo Vodopianov 	 *    DBS_TX_QUEUES entries.
72167aee0a6SDanylo Vodopianov 	 *    Notice: We always start out with interrupts disabled (by setting the
72267aee0a6SDanylo Vodopianov 	 *            "irq_vector" argument to -1). Queues that require interrupts will have
72367aee0a6SDanylo Vodopianov 	 *             it enabled at a later time (after we have enabled vfio interrupts in the
72467aee0a6SDanylo Vodopianov 	 *             kernel).
72567aee0a6SDanylo Vodopianov 	 */
72667aee0a6SDanylo Vodopianov 	int_enable = 0;
72767aee0a6SDanylo Vodopianov 	vec = 0;
72867aee0a6SDanylo Vodopianov 	istk = 0;
72967aee0a6SDanylo Vodopianov 
73067aee0a6SDanylo Vodopianov 	if (set_tx_uw_data(p_nthw_dbs, index,
73167aee0a6SDanylo Vodopianov 			(uint64_t)used_struct_phys_addr,
73267aee0a6SDanylo Vodopianov 			host_id, qs, 0, int_enable, vec, istk, in_order) != 0) {
73367aee0a6SDanylo Vodopianov 		return NULL;
73467aee0a6SDanylo Vodopianov 	}
735e13da07fSDanylo Vodopianov 
736e13da07fSDanylo Vodopianov 	/*
73701e34ed9SDanylo Vodopianov 	 * 2. Configure the DBS.TX_AM_DATA memory and enable the queues you plan to use;
73801e34ed9SDanylo Vodopianov 	 *    good idea to initialize all DBS_TX_QUEUES entries.
73901e34ed9SDanylo Vodopianov 	 */
74001e34ed9SDanylo Vodopianov 	if (set_tx_am_data(p_nthw_dbs, index, (uint64_t)avail_struct_phys_addr, TX_AM_DISABLE,
74101e34ed9SDanylo Vodopianov 			host_id, 0, irq_vector >= 0 ? 1 : 0) != 0) {
74201e34ed9SDanylo Vodopianov 		return NULL;
74301e34ed9SDanylo Vodopianov 	}
74401e34ed9SDanylo Vodopianov 
74501e34ed9SDanylo Vodopianov 	/*
746e13da07fSDanylo Vodopianov 	 * 5. Initialize all TX queues (all DBS_TX_QUEUES of them) using the
747e13da07fSDanylo Vodopianov 	 *    DBS.TX_INIT register.
748e13da07fSDanylo Vodopianov 	 */
749e13da07fSDanylo Vodopianov 	dbs_init_tx_queue(p_nthw_dbs, index, start_idx, start_ptr);
750e13da07fSDanylo Vodopianov 
75101e34ed9SDanylo Vodopianov 	if (nthw_dbs_set_tx_qp_data(p_nthw_dbs, index, virtual_port) != 0)
75201e34ed9SDanylo Vodopianov 		return NULL;
75301e34ed9SDanylo Vodopianov 
75401e34ed9SDanylo Vodopianov 	/*
75501e34ed9SDanylo Vodopianov 	 * 2. Configure the DBS.TX_AM_DATA memory and enable the queues you plan to use;
75601e34ed9SDanylo Vodopianov 	 *    good idea to initialize all DBS_TX_QUEUES entries.
75701e34ed9SDanylo Vodopianov 	 *    Notice: We do this only for queues that don't require interrupts (i.e. if
75801e34ed9SDanylo Vodopianov 	 *            irq_vector < 0). Queues that require interrupts will have TX_AM_DATA
75901e34ed9SDanylo Vodopianov 	 *            enabled at a later time (after we have enabled vfio interrupts in the
76001e34ed9SDanylo Vodopianov 	 *            kernel).
76101e34ed9SDanylo Vodopianov 	 */
76201e34ed9SDanylo Vodopianov 	if (irq_vector < 0) {
76301e34ed9SDanylo Vodopianov 		if (set_tx_am_data(p_nthw_dbs, index, (uint64_t)avail_struct_phys_addr,
76401e34ed9SDanylo Vodopianov 				TX_AM_ENABLE, host_id, 0,
76501e34ed9SDanylo Vodopianov 				irq_vector >= 0 ? 1 : 0) != 0) {
76601e34ed9SDanylo Vodopianov 			return NULL;
76701e34ed9SDanylo Vodopianov 		}
76801e34ed9SDanylo Vodopianov 	}
76901e34ed9SDanylo Vodopianov 
770e13da07fSDanylo Vodopianov 	/* Save queue state */
771e13da07fSDanylo Vodopianov 	txvq[index].usage = NTHW_VIRTQ_UNMANAGED;
772e13da07fSDanylo Vodopianov 	txvq[index].mp_nthw_dbs = p_nthw_dbs;
773e13da07fSDanylo Vodopianov 	txvq[index].index = index;
774e13da07fSDanylo Vodopianov 	txvq[index].queue_size = queue_size;
775e13da07fSDanylo Vodopianov 	txvq[index].am_enable = (irq_vector < 0) ? TX_AM_ENABLE : TX_AM_DISABLE;
776e13da07fSDanylo Vodopianov 	txvq[index].host_id = host_id;
777e13da07fSDanylo Vodopianov 	txvq[index].port = port;
778e13da07fSDanylo Vodopianov 	txvq[index].virtual_port = virtual_port;
77901e34ed9SDanylo Vodopianov 	txvq[index].avail_struct_phys_addr = avail_struct_phys_addr;
780f7b88165SDanylo Vodopianov 	txvq[index].used_struct_phys_addr = used_struct_phys_addr;
781f7b88165SDanylo Vodopianov 	txvq[index].desc_struct_phys_addr = desc_struct_phys_addr;
782e13da07fSDanylo Vodopianov 	txvq[index].vq_type = vq_type;
783e13da07fSDanylo Vodopianov 	txvq[index].in_order = in_order;
784e13da07fSDanylo Vodopianov 	txvq[index].irq_vector = irq_vector;
785e13da07fSDanylo Vodopianov 
786e13da07fSDanylo Vodopianov 	/* Return queue handle */
787e13da07fSDanylo Vodopianov 	return &txvq[index];
788e13da07fSDanylo Vodopianov }
789e13da07fSDanylo Vodopianov 
790e13da07fSDanylo Vodopianov static struct nthw_virt_queue *
791e13da07fSDanylo Vodopianov nthw_setup_mngd_rx_virt_queue_split(nthw_dbs_t *p_nthw_dbs,
792e13da07fSDanylo Vodopianov 	uint32_t index,
793e13da07fSDanylo Vodopianov 	uint32_t queue_size,
794e13da07fSDanylo Vodopianov 	uint32_t host_id,
795e13da07fSDanylo Vodopianov 	uint32_t header,
796e13da07fSDanylo Vodopianov 	struct nthw_memory_descriptor *p_virt_struct_area,
797e13da07fSDanylo Vodopianov 	struct nthw_memory_descriptor *p_packet_buffers,
798e13da07fSDanylo Vodopianov 	int irq_vector)
799e13da07fSDanylo Vodopianov {
800e13da07fSDanylo Vodopianov 	struct virtq_struct_layout_s virtq_struct_layout = dbs_calc_struct_layout(queue_size);
801e13da07fSDanylo Vodopianov 
802e13da07fSDanylo Vodopianov 	dbs_initialize_virt_queue_structs(p_virt_struct_area->virt_addr,
803e13da07fSDanylo Vodopianov 		(char *)p_virt_struct_area->virt_addr +
804e13da07fSDanylo Vodopianov 		virtq_struct_layout.used_offset,
805e13da07fSDanylo Vodopianov 		(char *)p_virt_struct_area->virt_addr +
806e13da07fSDanylo Vodopianov 		virtq_struct_layout.desc_offset,
807e13da07fSDanylo Vodopianov 		p_packet_buffers,
808e13da07fSDanylo Vodopianov 		(uint16_t)queue_size,
809e13da07fSDanylo Vodopianov 		p_packet_buffers ? (uint16_t)queue_size : 0,
810e13da07fSDanylo Vodopianov 		VIRTQ_DESC_F_WRITE /* Rx */);
811e13da07fSDanylo Vodopianov 
812e13da07fSDanylo Vodopianov 	rxvq[index].p_avail = p_virt_struct_area->virt_addr;
813e13da07fSDanylo Vodopianov 	rxvq[index].p_used =
814e13da07fSDanylo Vodopianov 		(void *)((char *)p_virt_struct_area->virt_addr + virtq_struct_layout.used_offset);
815e13da07fSDanylo Vodopianov 	rxvq[index].p_desc =
816e13da07fSDanylo Vodopianov 		(void *)((char *)p_virt_struct_area->virt_addr + virtq_struct_layout.desc_offset);
817e13da07fSDanylo Vodopianov 
818e13da07fSDanylo Vodopianov 	rxvq[index].am_idx = p_packet_buffers ? (uint16_t)queue_size : 0;
819e13da07fSDanylo Vodopianov 	rxvq[index].used_idx = 0;
820e13da07fSDanylo Vodopianov 	rxvq[index].cached_idx = 0;
821e13da07fSDanylo Vodopianov 	rxvq[index].p_virtual_addr = NULL;
822e13da07fSDanylo Vodopianov 
823e13da07fSDanylo Vodopianov 	if (p_packet_buffers) {
824e13da07fSDanylo Vodopianov 		rxvq[index].p_virtual_addr = malloc(queue_size * sizeof(*p_packet_buffers));
825e13da07fSDanylo Vodopianov 		memcpy(rxvq[index].p_virtual_addr, p_packet_buffers,
826e13da07fSDanylo Vodopianov 			queue_size * sizeof(*p_packet_buffers));
827e13da07fSDanylo Vodopianov 	}
828e13da07fSDanylo Vodopianov 
829e13da07fSDanylo Vodopianov 	nthw_setup_rx_virt_queue(p_nthw_dbs, index, 0, 0, (void *)p_virt_struct_area->phys_addr,
830e13da07fSDanylo Vodopianov 		(char *)p_virt_struct_area->phys_addr +
831e13da07fSDanylo Vodopianov 		virtq_struct_layout.used_offset,
832e13da07fSDanylo Vodopianov 		(char *)p_virt_struct_area->phys_addr +
833e13da07fSDanylo Vodopianov 		virtq_struct_layout.desc_offset,
834e13da07fSDanylo Vodopianov 		(uint16_t)queue_size, host_id, header, SPLIT_RING, irq_vector);
835e13da07fSDanylo Vodopianov 
836e13da07fSDanylo Vodopianov 	rxvq[index].usage = NTHW_VIRTQ_MANAGED;
837e13da07fSDanylo Vodopianov 
838e13da07fSDanylo Vodopianov 	return &rxvq[index];
839e13da07fSDanylo Vodopianov }
840e13da07fSDanylo Vodopianov 
841e13da07fSDanylo Vodopianov static struct nthw_virt_queue *
842e13da07fSDanylo Vodopianov nthw_setup_mngd_tx_virt_queue_split(nthw_dbs_t *p_nthw_dbs,
843e13da07fSDanylo Vodopianov 	uint32_t index,
844e13da07fSDanylo Vodopianov 	uint32_t queue_size,
845e13da07fSDanylo Vodopianov 	uint32_t host_id,
846e13da07fSDanylo Vodopianov 	uint32_t port,
847e13da07fSDanylo Vodopianov 	uint32_t virtual_port,
848e13da07fSDanylo Vodopianov 	uint32_t header,
849e13da07fSDanylo Vodopianov 	int irq_vector,
850e13da07fSDanylo Vodopianov 	uint32_t in_order,
851e13da07fSDanylo Vodopianov 	struct nthw_memory_descriptor *p_virt_struct_area,
852e13da07fSDanylo Vodopianov 	struct nthw_memory_descriptor *p_packet_buffers)
853e13da07fSDanylo Vodopianov {
854e13da07fSDanylo Vodopianov 	struct virtq_struct_layout_s virtq_struct_layout = dbs_calc_struct_layout(queue_size);
855e13da07fSDanylo Vodopianov 
856e13da07fSDanylo Vodopianov 	dbs_initialize_virt_queue_structs(p_virt_struct_area->virt_addr,
857e13da07fSDanylo Vodopianov 		(char *)p_virt_struct_area->virt_addr +
858e13da07fSDanylo Vodopianov 		virtq_struct_layout.used_offset,
859e13da07fSDanylo Vodopianov 		(char *)p_virt_struct_area->virt_addr +
860e13da07fSDanylo Vodopianov 		virtq_struct_layout.desc_offset,
861e13da07fSDanylo Vodopianov 		p_packet_buffers,
862e13da07fSDanylo Vodopianov 		(uint16_t)queue_size,
863e13da07fSDanylo Vodopianov 		0,
864e13da07fSDanylo Vodopianov 		0 /* Tx */);
865e13da07fSDanylo Vodopianov 
866e13da07fSDanylo Vodopianov 	txvq[index].p_avail = p_virt_struct_area->virt_addr;
867e13da07fSDanylo Vodopianov 	txvq[index].p_used =
868e13da07fSDanylo Vodopianov 		(void *)((char *)p_virt_struct_area->virt_addr + virtq_struct_layout.used_offset);
869e13da07fSDanylo Vodopianov 	txvq[index].p_desc =
870e13da07fSDanylo Vodopianov 		(void *)((char *)p_virt_struct_area->virt_addr + virtq_struct_layout.desc_offset);
871e13da07fSDanylo Vodopianov 	txvq[index].queue_size = (uint16_t)queue_size;
872e13da07fSDanylo Vodopianov 	txvq[index].am_idx = 0;
873e13da07fSDanylo Vodopianov 	txvq[index].used_idx = 0;
874e13da07fSDanylo Vodopianov 	txvq[index].cached_idx = 0;
875e13da07fSDanylo Vodopianov 	txvq[index].p_virtual_addr = NULL;
876e13da07fSDanylo Vodopianov 
877e13da07fSDanylo Vodopianov 	txvq[index].tx_descr_avail_idx = 0;
878e13da07fSDanylo Vodopianov 
879e13da07fSDanylo Vodopianov 	if (p_packet_buffers) {
880e13da07fSDanylo Vodopianov 		txvq[index].p_virtual_addr = malloc(queue_size * sizeof(*p_packet_buffers));
881e13da07fSDanylo Vodopianov 		memcpy(txvq[index].p_virtual_addr, p_packet_buffers,
882e13da07fSDanylo Vodopianov 			queue_size * sizeof(*p_packet_buffers));
883e13da07fSDanylo Vodopianov 	}
884e13da07fSDanylo Vodopianov 
885e13da07fSDanylo Vodopianov 	nthw_setup_tx_virt_queue(p_nthw_dbs, index, 0, 0, (void *)p_virt_struct_area->phys_addr,
886e13da07fSDanylo Vodopianov 		(char *)p_virt_struct_area->phys_addr +
887e13da07fSDanylo Vodopianov 		virtq_struct_layout.used_offset,
888e13da07fSDanylo Vodopianov 		(char *)p_virt_struct_area->phys_addr +
889e13da07fSDanylo Vodopianov 		virtq_struct_layout.desc_offset,
890e13da07fSDanylo Vodopianov 		(uint16_t)queue_size, host_id, port, virtual_port, header,
891e13da07fSDanylo Vodopianov 		SPLIT_RING, irq_vector, in_order);
892e13da07fSDanylo Vodopianov 
893e13da07fSDanylo Vodopianov 	txvq[index].usage = NTHW_VIRTQ_MANAGED;
894e13da07fSDanylo Vodopianov 
895e13da07fSDanylo Vodopianov 	return &txvq[index];
896e13da07fSDanylo Vodopianov }
897e13da07fSDanylo Vodopianov 
898e13da07fSDanylo Vodopianov /*
899af300887SDanylo Vodopianov  * Packed Ring
900af300887SDanylo Vodopianov  */
901af300887SDanylo Vodopianov static int nthw_setup_managed_virt_queue_packed(struct nthw_virt_queue *vq,
902af300887SDanylo Vodopianov 	struct pvirtq_struct_layout_s *pvirtq_layout,
903af300887SDanylo Vodopianov 	struct nthw_memory_descriptor *p_virt_struct_area,
904af300887SDanylo Vodopianov 	struct nthw_memory_descriptor *p_packet_buffers,
905af300887SDanylo Vodopianov 	uint16_t flags,
906af300887SDanylo Vodopianov 	int rx)
907af300887SDanylo Vodopianov {
908af300887SDanylo Vodopianov 	/* page aligned */
909af300887SDanylo Vodopianov 	assert(((uintptr_t)p_virt_struct_area->phys_addr & 0xfff) == 0);
910af300887SDanylo Vodopianov 	assert(p_packet_buffers);
911af300887SDanylo Vodopianov 
912af300887SDanylo Vodopianov 	/* clean canvas */
913af300887SDanylo Vodopianov 	memset(p_virt_struct_area->virt_addr, 0,
914af300887SDanylo Vodopianov 		sizeof(struct pvirtq_desc) * vq->queue_size +
915af300887SDanylo Vodopianov 		sizeof(struct pvirtq_event_suppress) * 2 + sizeof(int) * vq->queue_size);
916af300887SDanylo Vodopianov 
917af300887SDanylo Vodopianov 	pvirtq_layout->device_event_offset = sizeof(struct pvirtq_desc) * vq->queue_size;
918af300887SDanylo Vodopianov 	pvirtq_layout->driver_event_offset =
919af300887SDanylo Vodopianov 		pvirtq_layout->device_event_offset + sizeof(struct pvirtq_event_suppress);
920af300887SDanylo Vodopianov 
921af300887SDanylo Vodopianov 	vq->desc = p_virt_struct_area->virt_addr;
922af300887SDanylo Vodopianov 	vq->device_event = (void *)((uintptr_t)vq->desc + pvirtq_layout->device_event_offset);
923af300887SDanylo Vodopianov 	vq->driver_event = (void *)((uintptr_t)vq->desc + pvirtq_layout->driver_event_offset);
924af300887SDanylo Vodopianov 
925af300887SDanylo Vodopianov 	vq->next_avail = 0;
926af300887SDanylo Vodopianov 	vq->next_used = 0;
927af300887SDanylo Vodopianov 	vq->avail_wrap_count = 1;
928af300887SDanylo Vodopianov 	vq->used_wrap_count = 1;
929af300887SDanylo Vodopianov 
930af300887SDanylo Vodopianov 	/*
931af300887SDanylo Vodopianov 	 * Only possible if FPGA always delivers in-order
932af300887SDanylo Vodopianov 	 * Buffer ID used is the index in the p_packet_buffers array
933af300887SDanylo Vodopianov 	 */
934af300887SDanylo Vodopianov 	unsigned int i;
935af300887SDanylo Vodopianov 	struct pvirtq_desc *p_desc = vq->desc;
936af300887SDanylo Vodopianov 
937af300887SDanylo Vodopianov 	for (i = 0; i < vq->queue_size; i++) {
938af300887SDanylo Vodopianov 		if (rx) {
939af300887SDanylo Vodopianov 			p_desc[i].addr = (uint64_t)p_packet_buffers[i].phys_addr;
940af300887SDanylo Vodopianov 			p_desc[i].len = p_packet_buffers[i].len;
941af300887SDanylo Vodopianov 		}
942af300887SDanylo Vodopianov 
943af300887SDanylo Vodopianov 		p_desc[i].id = i;
944af300887SDanylo Vodopianov 		p_desc[i].flags = flags;
945af300887SDanylo Vodopianov 	}
946af300887SDanylo Vodopianov 
947af300887SDanylo Vodopianov 	if (rx)
948af300887SDanylo Vodopianov 		vq->avail_wrap_count ^= 1;	/* filled up available buffers for Rx */
949af300887SDanylo Vodopianov 	else
950af300887SDanylo Vodopianov 		vq->used_wrap_count ^= 1;	/* pre-fill free buffer IDs */
951af300887SDanylo Vodopianov 
952af300887SDanylo Vodopianov 	if (vq->queue_size == 0)
953af300887SDanylo Vodopianov 		return -1;	/* don't allocate memory with size of 0 bytes */
954af300887SDanylo Vodopianov 
955af300887SDanylo Vodopianov 	vq->p_virtual_addr = malloc(vq->queue_size * sizeof(*p_packet_buffers));
956af300887SDanylo Vodopianov 
957af300887SDanylo Vodopianov 	if (vq->p_virtual_addr == NULL)
958af300887SDanylo Vodopianov 		return -1;
959af300887SDanylo Vodopianov 
960af300887SDanylo Vodopianov 	memcpy(vq->p_virtual_addr, p_packet_buffers, vq->queue_size * sizeof(*p_packet_buffers));
961af300887SDanylo Vodopianov 
962af300887SDanylo Vodopianov 	/* Not used yet by FPGA - make sure we disable */
963af300887SDanylo Vodopianov 	vq->device_event->flags = RING_EVENT_FLAGS_DISABLE;
964af300887SDanylo Vodopianov 
965af300887SDanylo Vodopianov 	return 0;
966af300887SDanylo Vodopianov }
967af300887SDanylo Vodopianov 
968af300887SDanylo Vodopianov static struct nthw_virt_queue *
969af300887SDanylo Vodopianov nthw_setup_managed_rx_virt_queue_packed(nthw_dbs_t *p_nthw_dbs,
970af300887SDanylo Vodopianov 	uint32_t index,
971af300887SDanylo Vodopianov 	uint32_t queue_size,
972af300887SDanylo Vodopianov 	uint32_t host_id,
973af300887SDanylo Vodopianov 	uint32_t header,
974af300887SDanylo Vodopianov 	struct nthw_memory_descriptor *p_virt_struct_area,
975af300887SDanylo Vodopianov 	struct nthw_memory_descriptor *p_packet_buffers,
976af300887SDanylo Vodopianov 	int irq_vector)
977af300887SDanylo Vodopianov {
978af300887SDanylo Vodopianov 	struct pvirtq_struct_layout_s pvirtq_layout;
979af300887SDanylo Vodopianov 	struct nthw_virt_queue *vq = &rxvq[index];
980af300887SDanylo Vodopianov 	/* Set size and setup packed vq ring */
981af300887SDanylo Vodopianov 	vq->queue_size = queue_size;
982af300887SDanylo Vodopianov 
983af300887SDanylo Vodopianov 	/* Use Avail flag bit == 1 because wrap bit is initially set to 1 - and Used is inverse */
984af300887SDanylo Vodopianov 	if (nthw_setup_managed_virt_queue_packed(vq, &pvirtq_layout, p_virt_struct_area,
985af300887SDanylo Vodopianov 			p_packet_buffers,
986af300887SDanylo Vodopianov 			VIRTQ_DESC_F_WRITE | VIRTQ_DESC_F_AVAIL, 1) != 0)
987af300887SDanylo Vodopianov 		return NULL;
988af300887SDanylo Vodopianov 
989af300887SDanylo Vodopianov 	nthw_setup_rx_virt_queue(p_nthw_dbs, index, 0x8000, 0,	/* start wrap ring counter as 1 */
990af300887SDanylo Vodopianov 		(void *)((uintptr_t)p_virt_struct_area->phys_addr +
991af300887SDanylo Vodopianov 			pvirtq_layout.driver_event_offset),
992af300887SDanylo Vodopianov 		(void *)((uintptr_t)p_virt_struct_area->phys_addr +
993af300887SDanylo Vodopianov 			pvirtq_layout.device_event_offset),
994af300887SDanylo Vodopianov 		p_virt_struct_area->phys_addr, (uint16_t)queue_size, host_id,
995af300887SDanylo Vodopianov 		header, PACKED_RING, irq_vector);
996af300887SDanylo Vodopianov 
997af300887SDanylo Vodopianov 	vq->usage = NTHW_VIRTQ_MANAGED;
998af300887SDanylo Vodopianov 	return vq;
999af300887SDanylo Vodopianov }
1000af300887SDanylo Vodopianov 
1001af300887SDanylo Vodopianov static struct nthw_virt_queue *
1002af300887SDanylo Vodopianov nthw_setup_managed_tx_virt_queue_packed(nthw_dbs_t *p_nthw_dbs,
1003af300887SDanylo Vodopianov 	uint32_t index,
1004af300887SDanylo Vodopianov 	uint32_t queue_size,
1005af300887SDanylo Vodopianov 	uint32_t host_id,
1006af300887SDanylo Vodopianov 	uint32_t port,
1007af300887SDanylo Vodopianov 	uint32_t virtual_port,
1008af300887SDanylo Vodopianov 	uint32_t header,
1009af300887SDanylo Vodopianov 	int irq_vector,
1010af300887SDanylo Vodopianov 	uint32_t in_order,
1011af300887SDanylo Vodopianov 	struct nthw_memory_descriptor *p_virt_struct_area,
1012af300887SDanylo Vodopianov 	struct nthw_memory_descriptor *p_packet_buffers)
1013af300887SDanylo Vodopianov {
1014af300887SDanylo Vodopianov 	struct pvirtq_struct_layout_s pvirtq_layout;
1015af300887SDanylo Vodopianov 	struct nthw_virt_queue *vq = &txvq[index];
1016af300887SDanylo Vodopianov 	/* Set size and setup packed vq ring */
1017af300887SDanylo Vodopianov 	vq->queue_size = queue_size;
1018af300887SDanylo Vodopianov 
1019af300887SDanylo Vodopianov 	if (nthw_setup_managed_virt_queue_packed(vq, &pvirtq_layout, p_virt_struct_area,
1020af300887SDanylo Vodopianov 			p_packet_buffers, 0, 0) != 0)
1021af300887SDanylo Vodopianov 		return NULL;
1022af300887SDanylo Vodopianov 
1023af300887SDanylo Vodopianov 	nthw_setup_tx_virt_queue(p_nthw_dbs, index, 0x8000, 0,	/* start wrap ring counter as 1 */
1024af300887SDanylo Vodopianov 		(void *)((uintptr_t)p_virt_struct_area->phys_addr +
1025af300887SDanylo Vodopianov 			pvirtq_layout.driver_event_offset),
1026af300887SDanylo Vodopianov 		(void *)((uintptr_t)p_virt_struct_area->phys_addr +
1027af300887SDanylo Vodopianov 			pvirtq_layout.device_event_offset),
1028af300887SDanylo Vodopianov 		p_virt_struct_area->phys_addr, (uint16_t)queue_size, host_id,
1029af300887SDanylo Vodopianov 		port, virtual_port, header, PACKED_RING, irq_vector, in_order);
1030af300887SDanylo Vodopianov 
1031af300887SDanylo Vodopianov 	vq->usage = NTHW_VIRTQ_MANAGED;
1032af300887SDanylo Vodopianov 	return vq;
1033af300887SDanylo Vodopianov }
1034af300887SDanylo Vodopianov 
1035af300887SDanylo Vodopianov /*
1036e13da07fSDanylo Vodopianov  * Create a Managed Rx Virt Queue
1037e13da07fSDanylo Vodopianov  *
1038e13da07fSDanylo Vodopianov  * Notice: The queue will be created with interrupts disabled.
1039e13da07fSDanylo Vodopianov  *   If interrupts are required, make sure to call nthw_enable_rx_virt_queue()
1040e13da07fSDanylo Vodopianov  *   afterwards.
1041e13da07fSDanylo Vodopianov  */
1042e13da07fSDanylo Vodopianov static struct nthw_virt_queue *
1043e13da07fSDanylo Vodopianov nthw_setup_mngd_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
1044e13da07fSDanylo Vodopianov 	uint32_t index,
1045e13da07fSDanylo Vodopianov 	uint32_t queue_size,
1046e13da07fSDanylo Vodopianov 	uint32_t host_id,
1047e13da07fSDanylo Vodopianov 	uint32_t header,
1048e13da07fSDanylo Vodopianov 	struct nthw_memory_descriptor *p_virt_struct_area,
1049e13da07fSDanylo Vodopianov 	struct nthw_memory_descriptor *p_packet_buffers,
1050e13da07fSDanylo Vodopianov 	uint32_t vq_type,
1051e13da07fSDanylo Vodopianov 	int irq_vector)
1052e13da07fSDanylo Vodopianov {
1053e13da07fSDanylo Vodopianov 	switch (vq_type) {
1054e13da07fSDanylo Vodopianov 	case SPLIT_RING:
1055e13da07fSDanylo Vodopianov 		return nthw_setup_mngd_rx_virt_queue_split(p_nthw_dbs, index, queue_size,
1056e13da07fSDanylo Vodopianov 				host_id, header, p_virt_struct_area,
1057e13da07fSDanylo Vodopianov 				p_packet_buffers, irq_vector);
1058e13da07fSDanylo Vodopianov 
1059af300887SDanylo Vodopianov 	case PACKED_RING:
1060af300887SDanylo Vodopianov 		return nthw_setup_managed_rx_virt_queue_packed(p_nthw_dbs, index, queue_size,
1061af300887SDanylo Vodopianov 				host_id, header, p_virt_struct_area,
1062af300887SDanylo Vodopianov 				p_packet_buffers, irq_vector);
1063af300887SDanylo Vodopianov 
1064e13da07fSDanylo Vodopianov 	default:
1065e13da07fSDanylo Vodopianov 		break;
1066e13da07fSDanylo Vodopianov 	}
1067e13da07fSDanylo Vodopianov 
1068e13da07fSDanylo Vodopianov 	return NULL;
1069e13da07fSDanylo Vodopianov }
1070e13da07fSDanylo Vodopianov 
1071e13da07fSDanylo Vodopianov /*
1072e13da07fSDanylo Vodopianov  * Create a Managed Tx Virt Queue
1073e13da07fSDanylo Vodopianov  *
1074e13da07fSDanylo Vodopianov  * Notice: The queue will be created with interrupts disabled.
1075e13da07fSDanylo Vodopianov  *   If interrupts are required, make sure to call nthw_enable_tx_virt_queue()
1076e13da07fSDanylo Vodopianov  *   afterwards.
1077e13da07fSDanylo Vodopianov  */
1078e13da07fSDanylo Vodopianov static struct nthw_virt_queue *
1079e13da07fSDanylo Vodopianov nthw_setup_mngd_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
1080e13da07fSDanylo Vodopianov 	uint32_t index,
1081e13da07fSDanylo Vodopianov 	uint32_t queue_size,
1082e13da07fSDanylo Vodopianov 	uint32_t host_id,
1083e13da07fSDanylo Vodopianov 	uint32_t port,
1084e13da07fSDanylo Vodopianov 	uint32_t virtual_port,
1085e13da07fSDanylo Vodopianov 	uint32_t header,
1086e13da07fSDanylo Vodopianov 	struct nthw_memory_descriptor *p_virt_struct_area,
1087e13da07fSDanylo Vodopianov 	struct nthw_memory_descriptor *p_packet_buffers,
1088e13da07fSDanylo Vodopianov 	uint32_t vq_type,
1089e13da07fSDanylo Vodopianov 	int irq_vector,
1090e13da07fSDanylo Vodopianov 	uint32_t in_order)
1091e13da07fSDanylo Vodopianov {
1092e13da07fSDanylo Vodopianov 	switch (vq_type) {
1093e13da07fSDanylo Vodopianov 	case SPLIT_RING:
1094e13da07fSDanylo Vodopianov 		return nthw_setup_mngd_tx_virt_queue_split(p_nthw_dbs, index, queue_size,
1095e13da07fSDanylo Vodopianov 				host_id, port, virtual_port, header,
1096e13da07fSDanylo Vodopianov 				irq_vector, in_order,
1097e13da07fSDanylo Vodopianov 				p_virt_struct_area,
1098e13da07fSDanylo Vodopianov 				p_packet_buffers);
1099e13da07fSDanylo Vodopianov 
1100af300887SDanylo Vodopianov 	case PACKED_RING:
1101af300887SDanylo Vodopianov 		return nthw_setup_managed_tx_virt_queue_packed(p_nthw_dbs, index, queue_size,
1102af300887SDanylo Vodopianov 				host_id, port, virtual_port, header,
1103af300887SDanylo Vodopianov 				irq_vector, in_order,
1104af300887SDanylo Vodopianov 				p_virt_struct_area,
1105af300887SDanylo Vodopianov 				p_packet_buffers);
1106af300887SDanylo Vodopianov 
1107e13da07fSDanylo Vodopianov 	default:
1108e13da07fSDanylo Vodopianov 		break;
1109e13da07fSDanylo Vodopianov 	}
1110e13da07fSDanylo Vodopianov 
1111e13da07fSDanylo Vodopianov 	return NULL;
1112e13da07fSDanylo Vodopianov }
1113e13da07fSDanylo Vodopianov 
11149c2e6e75SDanylo Vodopianov static uint16_t nthw_get_rx_packets(struct nthw_virt_queue *rxvq,
11159c2e6e75SDanylo Vodopianov 	uint16_t n,
11169c2e6e75SDanylo Vodopianov 	struct nthw_received_packets *rp,
11179c2e6e75SDanylo Vodopianov 	uint16_t *nb_pkts)
11189c2e6e75SDanylo Vodopianov {
11199c2e6e75SDanylo Vodopianov 	uint16_t segs = 0;
11209c2e6e75SDanylo Vodopianov 	uint16_t pkts = 0;
11219c2e6e75SDanylo Vodopianov 
11229c2e6e75SDanylo Vodopianov 	if (rxvq->vq_type == SPLIT_RING) {
11239c2e6e75SDanylo Vodopianov 		uint16_t i;
11249c2e6e75SDanylo Vodopianov 		uint16_t entries_ready = (uint16_t)(rxvq->cached_idx - rxvq->used_idx);
11259c2e6e75SDanylo Vodopianov 
11269c2e6e75SDanylo Vodopianov 		if (entries_ready < n) {
11279c2e6e75SDanylo Vodopianov 			/* Look for more packets */
11289c2e6e75SDanylo Vodopianov 			rxvq->cached_idx = rxvq->p_used->idx;
11299c2e6e75SDanylo Vodopianov 			entries_ready = (uint16_t)(rxvq->cached_idx - rxvq->used_idx);
11309c2e6e75SDanylo Vodopianov 
11319c2e6e75SDanylo Vodopianov 			if (entries_ready == 0) {
11329c2e6e75SDanylo Vodopianov 				*nb_pkts = 0;
11339c2e6e75SDanylo Vodopianov 				return 0;
11349c2e6e75SDanylo Vodopianov 			}
11359c2e6e75SDanylo Vodopianov 
11369c2e6e75SDanylo Vodopianov 			if (n > entries_ready)
11379c2e6e75SDanylo Vodopianov 				n = entries_ready;
11389c2e6e75SDanylo Vodopianov 		}
11399c2e6e75SDanylo Vodopianov 
11409c2e6e75SDanylo Vodopianov 		/*
11419c2e6e75SDanylo Vodopianov 		 * Give packets - make sure all packets are whole packets.
11429c2e6e75SDanylo Vodopianov 		 * Valid because queue_size is always 2^n
11439c2e6e75SDanylo Vodopianov 		 */
11449c2e6e75SDanylo Vodopianov 		const uint16_t queue_mask = (uint16_t)(rxvq->queue_size - 1);
11459c2e6e75SDanylo Vodopianov 		const uint32_t buf_len = rxvq->p_desc[0].len;
11469c2e6e75SDanylo Vodopianov 
11479c2e6e75SDanylo Vodopianov 		uint16_t used = rxvq->used_idx;
11489c2e6e75SDanylo Vodopianov 
11499c2e6e75SDanylo Vodopianov 		for (i = 0; i < n; ++i) {
11509c2e6e75SDanylo Vodopianov 			uint32_t id = rxvq->p_used->ring[used & queue_mask].id;
11519c2e6e75SDanylo Vodopianov 			rp[i].addr = rxvq->p_virtual_addr[id].virt_addr;
11529c2e6e75SDanylo Vodopianov 			rp[i].len = rxvq->p_used->ring[used & queue_mask].len;
11539c2e6e75SDanylo Vodopianov 
11549c2e6e75SDanylo Vodopianov 			uint32_t pkt_len = ((struct _pkt_hdr_rx *)rp[i].addr)->cap_len;
11559c2e6e75SDanylo Vodopianov 
11569c2e6e75SDanylo Vodopianov 			if (pkt_len > buf_len) {
11579c2e6e75SDanylo Vodopianov 				/* segmented */
11589c2e6e75SDanylo Vodopianov 				int nbsegs = (pkt_len + buf_len - 1) / buf_len;
11599c2e6e75SDanylo Vodopianov 
11609c2e6e75SDanylo Vodopianov 				if (((int)i + nbsegs) > n) {
11619c2e6e75SDanylo Vodopianov 					/* don't have enough segments - break out */
11629c2e6e75SDanylo Vodopianov 					break;
11639c2e6e75SDanylo Vodopianov 				}
11649c2e6e75SDanylo Vodopianov 
11659c2e6e75SDanylo Vodopianov 				int ii;
11669c2e6e75SDanylo Vodopianov 
11679c2e6e75SDanylo Vodopianov 				for (ii = 1; ii < nbsegs; ii++) {
11689c2e6e75SDanylo Vodopianov 					++i;
11699c2e6e75SDanylo Vodopianov 					id = rxvq->p_used->ring[(used + ii) & queue_mask].id;
11709c2e6e75SDanylo Vodopianov 					rp[i].addr = rxvq->p_virtual_addr[id].virt_addr;
11719c2e6e75SDanylo Vodopianov 					rp[i].len =
11729c2e6e75SDanylo Vodopianov 						rxvq->p_used->ring[(used + ii) & queue_mask].len;
11739c2e6e75SDanylo Vodopianov 				}
11749c2e6e75SDanylo Vodopianov 
11759c2e6e75SDanylo Vodopianov 				used += nbsegs;
11769c2e6e75SDanylo Vodopianov 
11779c2e6e75SDanylo Vodopianov 			} else {
11789c2e6e75SDanylo Vodopianov 				++used;
11799c2e6e75SDanylo Vodopianov 			}
11809c2e6e75SDanylo Vodopianov 
11819c2e6e75SDanylo Vodopianov 			pkts++;
11829c2e6e75SDanylo Vodopianov 			segs = i + 1;
11839c2e6e75SDanylo Vodopianov 		}
11849c2e6e75SDanylo Vodopianov 
11859c2e6e75SDanylo Vodopianov 		rxvq->used_idx = used;
11869c2e6e75SDanylo Vodopianov 
11879c2e6e75SDanylo Vodopianov 	} else if (rxvq->vq_type == PACKED_RING) {
11889c2e6e75SDanylo Vodopianov 		/* This requires in-order behavior from FPGA */
11899c2e6e75SDanylo Vodopianov 		int i;
11909c2e6e75SDanylo Vodopianov 
11919c2e6e75SDanylo Vodopianov 		for (i = 0; i < n; i++) {
11929c2e6e75SDanylo Vodopianov 			struct pvirtq_desc *desc = &rxvq->desc[rxvq->next_used];
11939c2e6e75SDanylo Vodopianov 
11949c2e6e75SDanylo Vodopianov 			uint16_t flags = desc->flags;
11959c2e6e75SDanylo Vodopianov 			uint8_t avail = !!(flags & VIRTQ_DESC_F_AVAIL);
11969c2e6e75SDanylo Vodopianov 			uint8_t used = !!(flags & VIRTQ_DESC_F_USED);
11979c2e6e75SDanylo Vodopianov 
11989c2e6e75SDanylo Vodopianov 			if (avail != rxvq->used_wrap_count || used != rxvq->used_wrap_count)
11999c2e6e75SDanylo Vodopianov 				break;
12009c2e6e75SDanylo Vodopianov 
12019c2e6e75SDanylo Vodopianov 			rp[pkts].addr = rxvq->p_virtual_addr[desc->id].virt_addr;
12029c2e6e75SDanylo Vodopianov 			rp[pkts].len = desc->len;
12039c2e6e75SDanylo Vodopianov 			pkts++;
12049c2e6e75SDanylo Vodopianov 
12059c2e6e75SDanylo Vodopianov 			inc_used(rxvq, 1);
12069c2e6e75SDanylo Vodopianov 		}
12079c2e6e75SDanylo Vodopianov 
12089c2e6e75SDanylo Vodopianov 		segs = pkts;
12099c2e6e75SDanylo Vodopianov 	}
12109c2e6e75SDanylo Vodopianov 
12119c2e6e75SDanylo Vodopianov 	*nb_pkts = pkts;
12129c2e6e75SDanylo Vodopianov 	return segs;
12139c2e6e75SDanylo Vodopianov }
12149c2e6e75SDanylo Vodopianov 
1215f0fe222eSDanylo Vodopianov /*
1216f0fe222eSDanylo Vodopianov  * Put buffers back into Avail Ring
1217f0fe222eSDanylo Vodopianov  */
1218f0fe222eSDanylo Vodopianov static void nthw_release_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n)
1219f0fe222eSDanylo Vodopianov {
1220f0fe222eSDanylo Vodopianov 	if (rxvq->vq_type == SPLIT_RING) {
1221f0fe222eSDanylo Vodopianov 		rxvq->am_idx = (uint16_t)(rxvq->am_idx + n);
1222f0fe222eSDanylo Vodopianov 		rxvq->p_avail->idx = rxvq->am_idx;
1223f0fe222eSDanylo Vodopianov 
1224f0fe222eSDanylo Vodopianov 	} else if (rxvq->vq_type == PACKED_RING) {
1225f0fe222eSDanylo Vodopianov 		int i;
1226f0fe222eSDanylo Vodopianov 		/*
1227f0fe222eSDanylo Vodopianov 		 * Defer flags update on first segment - due to serialization towards HW and
1228f0fe222eSDanylo Vodopianov 		 * when jumbo segments are added
1229f0fe222eSDanylo Vodopianov 		 */
1230f0fe222eSDanylo Vodopianov 
1231f0fe222eSDanylo Vodopianov 		uint16_t first_flags = VIRTQ_DESC_F_WRITE | avail_flag(rxvq) | used_flag_inv(rxvq);
1232f0fe222eSDanylo Vodopianov 		struct pvirtq_desc *first_desc = &rxvq->desc[rxvq->next_avail];
1233f0fe222eSDanylo Vodopianov 
1234f0fe222eSDanylo Vodopianov 		uint32_t len = rxvq->p_virtual_addr[0].len;	/* all same size */
1235f0fe222eSDanylo Vodopianov 
1236f0fe222eSDanylo Vodopianov 		/* Optimization point: use in-order release */
1237f0fe222eSDanylo Vodopianov 
1238f0fe222eSDanylo Vodopianov 		for (i = 0; i < n; i++) {
1239f0fe222eSDanylo Vodopianov 			struct pvirtq_desc *desc = &rxvq->desc[rxvq->next_avail];
1240f0fe222eSDanylo Vodopianov 
1241f0fe222eSDanylo Vodopianov 			desc->id = rxvq->next_avail;
1242f0fe222eSDanylo Vodopianov 			desc->addr = (uint64_t)rxvq->p_virtual_addr[desc->id].phys_addr;
1243f0fe222eSDanylo Vodopianov 			desc->len = len;
1244f0fe222eSDanylo Vodopianov 
1245f0fe222eSDanylo Vodopianov 			if (i)
1246f0fe222eSDanylo Vodopianov 				desc->flags = VIRTQ_DESC_F_WRITE | avail_flag(rxvq) |
1247f0fe222eSDanylo Vodopianov 					used_flag_inv(rxvq);
1248f0fe222eSDanylo Vodopianov 
1249f0fe222eSDanylo Vodopianov 			inc_avail(rxvq, 1);
1250f0fe222eSDanylo Vodopianov 		}
1251f0fe222eSDanylo Vodopianov 
1252f0fe222eSDanylo Vodopianov 		rte_rmb();
1253f0fe222eSDanylo Vodopianov 		first_desc->flags = first_flags;
1254f0fe222eSDanylo Vodopianov 	}
1255f0fe222eSDanylo Vodopianov }
1256f0fe222eSDanylo Vodopianov 
12579c2e6e75SDanylo Vodopianov static uint16_t nthw_get_tx_packets(struct nthw_virt_queue *txvq,
12589c2e6e75SDanylo Vodopianov 	uint16_t n,
12599c2e6e75SDanylo Vodopianov 	uint16_t *first_idx,
12609c2e6e75SDanylo Vodopianov 	struct nthw_cvirtq_desc *cvq,
12619c2e6e75SDanylo Vodopianov 	struct nthw_memory_descriptor **p_virt_addr)
12629c2e6e75SDanylo Vodopianov {
12639c2e6e75SDanylo Vodopianov 	int m = 0;
12649c2e6e75SDanylo Vodopianov 	uint16_t queue_mask =
12659c2e6e75SDanylo Vodopianov 		(uint16_t)(txvq->queue_size - 1);	/* Valid because queue_size is always 2^n */
12669c2e6e75SDanylo Vodopianov 	*p_virt_addr = txvq->p_virtual_addr;
12679c2e6e75SDanylo Vodopianov 
12689c2e6e75SDanylo Vodopianov 	if (txvq->vq_type == SPLIT_RING) {
12699c2e6e75SDanylo Vodopianov 		cvq->s = txvq->p_desc;
12709c2e6e75SDanylo Vodopianov 		cvq->vq_type = SPLIT_RING;
12719c2e6e75SDanylo Vodopianov 
12729c2e6e75SDanylo Vodopianov 		*first_idx = txvq->tx_descr_avail_idx;
12739c2e6e75SDanylo Vodopianov 
12749c2e6e75SDanylo Vodopianov 		uint16_t entries_used =
12759c2e6e75SDanylo Vodopianov 			(uint16_t)((txvq->tx_descr_avail_idx - txvq->cached_idx) & queue_mask);
12769c2e6e75SDanylo Vodopianov 		uint16_t entries_ready = (uint16_t)(txvq->queue_size - 1 - entries_used);
12779c2e6e75SDanylo Vodopianov 
12789c2e6e75SDanylo Vodopianov 		vq_log_arg(txvq,
12799c2e6e75SDanylo Vodopianov 			"ask %i: descrAvail %i, cachedidx %i, used: %i, ready %i used->idx %i",
12809c2e6e75SDanylo Vodopianov 			n, txvq->tx_descr_avail_idx, txvq->cached_idx, entries_used, entries_ready,
12819c2e6e75SDanylo Vodopianov 			txvq->p_used->idx);
12829c2e6e75SDanylo Vodopianov 
12839c2e6e75SDanylo Vodopianov 		if (entries_ready < n) {
12849c2e6e75SDanylo Vodopianov 			/*
12859c2e6e75SDanylo Vodopianov 			 * Look for more packets.
12869c2e6e75SDanylo Vodopianov 			 * Using the used_idx in the avail ring since they are held synchronous
12879c2e6e75SDanylo Vodopianov 			 * because of in-order
12889c2e6e75SDanylo Vodopianov 			 */
12899c2e6e75SDanylo Vodopianov 			txvq->cached_idx =
12909c2e6e75SDanylo Vodopianov 				txvq->p_avail->ring[(txvq->p_used->idx - 1) & queue_mask];
12919c2e6e75SDanylo Vodopianov 
12929c2e6e75SDanylo Vodopianov 			vq_log_arg(txvq, "Update: get cachedidx %i (used_idx-1 %i)",
12939c2e6e75SDanylo Vodopianov 				txvq->cached_idx, (txvq->p_used->idx - 1) & queue_mask);
12949c2e6e75SDanylo Vodopianov 			entries_used =
12959c2e6e75SDanylo Vodopianov 				(uint16_t)((txvq->tx_descr_avail_idx - txvq->cached_idx)
12969c2e6e75SDanylo Vodopianov 				& queue_mask);
12979c2e6e75SDanylo Vodopianov 			entries_ready = (uint16_t)(txvq->queue_size - 1 - entries_used);
12989c2e6e75SDanylo Vodopianov 			vq_log_arg(txvq, "new used: %i, ready %i", entries_used, entries_ready);
12999c2e6e75SDanylo Vodopianov 
13009c2e6e75SDanylo Vodopianov 			if (n > entries_ready)
13019c2e6e75SDanylo Vodopianov 				n = entries_ready;
13029c2e6e75SDanylo Vodopianov 		}
13039c2e6e75SDanylo Vodopianov 
13049c2e6e75SDanylo Vodopianov 	} else if (txvq->vq_type == PACKED_RING) {
13059c2e6e75SDanylo Vodopianov 		int i;
13069c2e6e75SDanylo Vodopianov 
13079c2e6e75SDanylo Vodopianov 		cvq->p = txvq->desc;
13089c2e6e75SDanylo Vodopianov 		cvq->vq_type = PACKED_RING;
13099c2e6e75SDanylo Vodopianov 
13109c2e6e75SDanylo Vodopianov 		if (txvq->outs.num) {
13119c2e6e75SDanylo Vodopianov 			*first_idx = txvq->outs.next;
13129c2e6e75SDanylo Vodopianov 			uint16_t num = min(n, txvq->outs.num);
13139c2e6e75SDanylo Vodopianov 			txvq->outs.next = (txvq->outs.next + num) & queue_mask;
13149c2e6e75SDanylo Vodopianov 			txvq->outs.num -= num;
13159c2e6e75SDanylo Vodopianov 
13169c2e6e75SDanylo Vodopianov 			if (n == num)
13179c2e6e75SDanylo Vodopianov 				return n;
13189c2e6e75SDanylo Vodopianov 
13199c2e6e75SDanylo Vodopianov 			m = num;
13209c2e6e75SDanylo Vodopianov 			n -= num;
13219c2e6e75SDanylo Vodopianov 
13229c2e6e75SDanylo Vodopianov 		} else {
13239c2e6e75SDanylo Vodopianov 			*first_idx = txvq->next_used;
13249c2e6e75SDanylo Vodopianov 		}
13259c2e6e75SDanylo Vodopianov 
13269c2e6e75SDanylo Vodopianov 		/* iterate the ring - this requires in-order behavior from FPGA */
13279c2e6e75SDanylo Vodopianov 		for (i = 0; i < n; i++) {
13289c2e6e75SDanylo Vodopianov 			struct pvirtq_desc *desc = &txvq->desc[txvq->next_used];
13299c2e6e75SDanylo Vodopianov 
13309c2e6e75SDanylo Vodopianov 			uint16_t flags = desc->flags;
13319c2e6e75SDanylo Vodopianov 			uint8_t avail = !!(flags & VIRTQ_DESC_F_AVAIL);
13329c2e6e75SDanylo Vodopianov 			uint8_t used = !!(flags & VIRTQ_DESC_F_USED);
13339c2e6e75SDanylo Vodopianov 
13349c2e6e75SDanylo Vodopianov 			if (avail != txvq->used_wrap_count || used != txvq->used_wrap_count) {
13359c2e6e75SDanylo Vodopianov 				n = i;
13369c2e6e75SDanylo Vodopianov 				break;
13379c2e6e75SDanylo Vodopianov 			}
13389c2e6e75SDanylo Vodopianov 
13399c2e6e75SDanylo Vodopianov 			uint16_t incr = (desc->id - txvq->next_used) & queue_mask;
13409c2e6e75SDanylo Vodopianov 			i += incr;
13419c2e6e75SDanylo Vodopianov 			inc_used(txvq, incr + 1);
13429c2e6e75SDanylo Vodopianov 		}
13439c2e6e75SDanylo Vodopianov 
13449c2e6e75SDanylo Vodopianov 		if (i > n) {
13459c2e6e75SDanylo Vodopianov 			int outs_num = i - n;
13469c2e6e75SDanylo Vodopianov 			txvq->outs.next = (txvq->next_used - outs_num) & queue_mask;
13479c2e6e75SDanylo Vodopianov 			txvq->outs.num = outs_num;
13489c2e6e75SDanylo Vodopianov 		}
13499c2e6e75SDanylo Vodopianov 
13509c2e6e75SDanylo Vodopianov 	} else {
13519c2e6e75SDanylo Vodopianov 		return 0;
13529c2e6e75SDanylo Vodopianov 	}
13539c2e6e75SDanylo Vodopianov 
13549c2e6e75SDanylo Vodopianov 	return m + n;
13559c2e6e75SDanylo Vodopianov }
13569c2e6e75SDanylo Vodopianov 
1357f0fe222eSDanylo Vodopianov static void nthw_release_tx_packets(struct nthw_virt_queue *txvq, uint16_t n, uint16_t n_segs[])
1358f0fe222eSDanylo Vodopianov {
1359f0fe222eSDanylo Vodopianov 	int i;
1360f0fe222eSDanylo Vodopianov 
1361f0fe222eSDanylo Vodopianov 	if (txvq->vq_type == SPLIT_RING) {
1362f0fe222eSDanylo Vodopianov 		/* Valid because queue_size is always 2^n */
1363f0fe222eSDanylo Vodopianov 		uint16_t queue_mask = (uint16_t)(txvq->queue_size - 1);
1364f0fe222eSDanylo Vodopianov 
1365f0fe222eSDanylo Vodopianov 		vq_log_arg(txvq, "pkts %i, avail idx %i, start at %i", n, txvq->am_idx,
1366f0fe222eSDanylo Vodopianov 			txvq->tx_descr_avail_idx);
1367f0fe222eSDanylo Vodopianov 
1368f0fe222eSDanylo Vodopianov 		for (i = 0; i < n; i++) {
1369f0fe222eSDanylo Vodopianov 			int idx = txvq->am_idx & queue_mask;
1370f0fe222eSDanylo Vodopianov 			txvq->p_avail->ring[idx] = txvq->tx_descr_avail_idx;
1371f0fe222eSDanylo Vodopianov 			txvq->tx_descr_avail_idx =
1372f0fe222eSDanylo Vodopianov 				(txvq->tx_descr_avail_idx + n_segs[i]) & queue_mask;
1373f0fe222eSDanylo Vodopianov 			txvq->am_idx++;
1374f0fe222eSDanylo Vodopianov 		}
1375f0fe222eSDanylo Vodopianov 
1376f0fe222eSDanylo Vodopianov 		/* Make sure the ring has been updated before HW reads index update */
1377f0fe222eSDanylo Vodopianov 		rte_mb();
1378f0fe222eSDanylo Vodopianov 		txvq->p_avail->idx = txvq->am_idx;
1379f0fe222eSDanylo Vodopianov 		vq_log_arg(txvq, "new avail idx %i, descr_idx %i", txvq->p_avail->idx,
1380f0fe222eSDanylo Vodopianov 			txvq->tx_descr_avail_idx);
1381f0fe222eSDanylo Vodopianov 
1382f0fe222eSDanylo Vodopianov 	} else if (txvq->vq_type == PACKED_RING) {
1383f0fe222eSDanylo Vodopianov 		/*
1384f0fe222eSDanylo Vodopianov 		 * Defer flags update on first segment - due to serialization towards HW and
1385f0fe222eSDanylo Vodopianov 		 * when jumbo segments are added
1386f0fe222eSDanylo Vodopianov 		 */
1387f0fe222eSDanylo Vodopianov 
1388f0fe222eSDanylo Vodopianov 		uint16_t first_flags = avail_flag(txvq) | used_flag_inv(txvq);
1389f0fe222eSDanylo Vodopianov 		struct pvirtq_desc *first_desc = &txvq->desc[txvq->next_avail];
1390f0fe222eSDanylo Vodopianov 
1391f0fe222eSDanylo Vodopianov 		for (i = 0; i < n; i++) {
1392f0fe222eSDanylo Vodopianov 			struct pvirtq_desc *desc = &txvq->desc[txvq->next_avail];
1393f0fe222eSDanylo Vodopianov 
1394f0fe222eSDanylo Vodopianov 			desc->id = txvq->next_avail;
1395f0fe222eSDanylo Vodopianov 			desc->addr = (uint64_t)txvq->p_virtual_addr[desc->id].phys_addr;
1396f0fe222eSDanylo Vodopianov 
1397f0fe222eSDanylo Vodopianov 			if (i)
1398f0fe222eSDanylo Vodopianov 				/* bitwise-or here because next flags may already have been setup
1399f0fe222eSDanylo Vodopianov 				 */
1400f0fe222eSDanylo Vodopianov 				desc->flags |= avail_flag(txvq) | used_flag_inv(txvq);
1401f0fe222eSDanylo Vodopianov 
1402f0fe222eSDanylo Vodopianov 			inc_avail(txvq, 1);
1403f0fe222eSDanylo Vodopianov 		}
1404f0fe222eSDanylo Vodopianov 
1405f0fe222eSDanylo Vodopianov 		/* Proper read barrier before FPGA may see first flags */
1406f0fe222eSDanylo Vodopianov 		rte_rmb();
1407f0fe222eSDanylo Vodopianov 		first_desc->flags = first_flags;
1408f0fe222eSDanylo Vodopianov 	}
1409f0fe222eSDanylo Vodopianov }
1410f0fe222eSDanylo Vodopianov 
1411576e7721SDanylo Vodopianov static struct sg_ops_s sg_ops = {
1412e13da07fSDanylo Vodopianov 	.nthw_setup_rx_virt_queue = nthw_setup_rx_virt_queue,
1413e13da07fSDanylo Vodopianov 	.nthw_setup_tx_virt_queue = nthw_setup_tx_virt_queue,
1414e13da07fSDanylo Vodopianov 	.nthw_setup_mngd_rx_virt_queue = nthw_setup_mngd_rx_virt_queue,
1415f0fe222eSDanylo Vodopianov 	.nthw_release_mngd_rx_virt_queue = nthw_release_mngd_rx_virt_queue,
1416e13da07fSDanylo Vodopianov 	.nthw_setup_mngd_tx_virt_queue = nthw_setup_mngd_tx_virt_queue,
1417f0fe222eSDanylo Vodopianov 	.nthw_release_mngd_tx_virt_queue = nthw_release_mngd_tx_virt_queue,
14189c2e6e75SDanylo Vodopianov 	.nthw_get_rx_packets = nthw_get_rx_packets,
1419f0fe222eSDanylo Vodopianov 	.nthw_release_rx_packets = nthw_release_rx_packets,
14209c2e6e75SDanylo Vodopianov 	.nthw_get_tx_packets = nthw_get_tx_packets,
1421f0fe222eSDanylo Vodopianov 	.nthw_release_tx_packets = nthw_release_tx_packets,
1422576e7721SDanylo Vodopianov 	.nthw_virt_queue_init = nthw_virt_queue_init
1423576e7721SDanylo Vodopianov };
1424576e7721SDanylo Vodopianov 
1425576e7721SDanylo Vodopianov void sg_init(void)
1426576e7721SDanylo Vodopianov {
1427576e7721SDanylo Vodopianov 	NT_LOG(INF, NTNIC, "SG ops initialized");
1428576e7721SDanylo Vodopianov 	register_sg_ops(&sg_ops);
1429576e7721SDanylo Vodopianov }
1430