xref: /dpdk/drivers/net/thunderx/base/nicvf_hw.c (revision d3bf25644ba51474b7e7c1fd4e6886bfc60d4a47)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc
3  */
4 
5 #include <unistd.h>
6 #include <math.h>
7 #include <errno.h>
8 #include <stdarg.h>
9 #include <stdint.h>
10 #include <stdio.h>
11 #include <stdlib.h>
12 #include <string.h>
13 #include <assert.h>
14 
15 #include "nicvf_plat.h"
16 
17 struct nicvf_reg_info {
18 	uint32_t offset;
19 	const char *name;
20 };
21 
22 #define NICVF_REG_POLL_ITER_NR   (10)
23 #define NICVF_REG_POLL_DELAY_US  (2000)
24 #define NICVF_REG_INFO(reg) {reg, #reg}
25 
26 static const struct nicvf_reg_info nicvf_reg_tbl[] = {
27 	NICVF_REG_INFO(NIC_VF_CFG),
28 	NICVF_REG_INFO(NIC_VF_PF_MAILBOX_0_1),
29 	NICVF_REG_INFO(NIC_VF_INT),
30 	NICVF_REG_INFO(NIC_VF_INT_W1S),
31 	NICVF_REG_INFO(NIC_VF_ENA_W1C),
32 	NICVF_REG_INFO(NIC_VF_ENA_W1S),
33 	NICVF_REG_INFO(NIC_VNIC_RSS_CFG),
34 	NICVF_REG_INFO(NIC_VNIC_RQ_GEN_CFG),
35 };
36 
37 static const struct nicvf_reg_info nicvf_multi_reg_tbl[] = {
38 	{NIC_VNIC_RSS_KEY_0_4 + 0,  "NIC_VNIC_RSS_KEY_0"},
39 	{NIC_VNIC_RSS_KEY_0_4 + 8,  "NIC_VNIC_RSS_KEY_1"},
40 	{NIC_VNIC_RSS_KEY_0_4 + 16, "NIC_VNIC_RSS_KEY_2"},
41 	{NIC_VNIC_RSS_KEY_0_4 + 24, "NIC_VNIC_RSS_KEY_3"},
42 	{NIC_VNIC_RSS_KEY_0_4 + 32, "NIC_VNIC_RSS_KEY_4"},
43 	{NIC_VNIC_TX_STAT_0_4 + 0,  "NIC_VNIC_STAT_TX_OCTS"},
44 	{NIC_VNIC_TX_STAT_0_4 + 8,  "NIC_VNIC_STAT_TX_UCAST"},
45 	{NIC_VNIC_TX_STAT_0_4 + 16,  "NIC_VNIC_STAT_TX_BCAST"},
46 	{NIC_VNIC_TX_STAT_0_4 + 24,  "NIC_VNIC_STAT_TX_MCAST"},
47 	{NIC_VNIC_TX_STAT_0_4 + 32,  "NIC_VNIC_STAT_TX_DROP"},
48 	{NIC_VNIC_RX_STAT_0_13 + 0,  "NIC_VNIC_STAT_RX_OCTS"},
49 	{NIC_VNIC_RX_STAT_0_13 + 8,  "NIC_VNIC_STAT_RX_UCAST"},
50 	{NIC_VNIC_RX_STAT_0_13 + 16, "NIC_VNIC_STAT_RX_BCAST"},
51 	{NIC_VNIC_RX_STAT_0_13 + 24, "NIC_VNIC_STAT_RX_MCAST"},
52 	{NIC_VNIC_RX_STAT_0_13 + 32, "NIC_VNIC_STAT_RX_RED"},
53 	{NIC_VNIC_RX_STAT_0_13 + 40, "NIC_VNIC_STAT_RX_RED_OCTS"},
54 	{NIC_VNIC_RX_STAT_0_13 + 48, "NIC_VNIC_STAT_RX_ORUN"},
55 	{NIC_VNIC_RX_STAT_0_13 + 56, "NIC_VNIC_STAT_RX_ORUN_OCTS"},
56 	{NIC_VNIC_RX_STAT_0_13 + 64, "NIC_VNIC_STAT_RX_FCS"},
57 	{NIC_VNIC_RX_STAT_0_13 + 72, "NIC_VNIC_STAT_RX_L2ERR"},
58 	{NIC_VNIC_RX_STAT_0_13 + 80, "NIC_VNIC_STAT_RX_DRP_BCAST"},
59 	{NIC_VNIC_RX_STAT_0_13 + 88, "NIC_VNIC_STAT_RX_DRP_MCAST"},
60 	{NIC_VNIC_RX_STAT_0_13 + 96, "NIC_VNIC_STAT_RX_DRP_L3BCAST"},
61 	{NIC_VNIC_RX_STAT_0_13 + 104, "NIC_VNIC_STAT_RX_DRP_L3MCAST"},
62 };
63 
64 static const struct nicvf_reg_info nicvf_qset_cq_reg_tbl[] = {
65 	NICVF_REG_INFO(NIC_QSET_CQ_0_7_CFG),
66 	NICVF_REG_INFO(NIC_QSET_CQ_0_7_CFG2),
67 	NICVF_REG_INFO(NIC_QSET_CQ_0_7_THRESH),
68 	NICVF_REG_INFO(NIC_QSET_CQ_0_7_BASE),
69 	NICVF_REG_INFO(NIC_QSET_CQ_0_7_HEAD),
70 	NICVF_REG_INFO(NIC_QSET_CQ_0_7_TAIL),
71 	NICVF_REG_INFO(NIC_QSET_CQ_0_7_DOOR),
72 	NICVF_REG_INFO(NIC_QSET_CQ_0_7_STATUS),
73 	NICVF_REG_INFO(NIC_QSET_CQ_0_7_STATUS2),
74 	NICVF_REG_INFO(NIC_QSET_CQ_0_7_DEBUG),
75 };
76 
77 static const struct nicvf_reg_info nicvf_qset_rq_reg_tbl[] = {
78 	NICVF_REG_INFO(NIC_QSET_RQ_0_7_CFG),
79 	NICVF_REG_INFO(NIC_QSET_RQ_0_7_STATUS0),
80 	NICVF_REG_INFO(NIC_QSET_RQ_0_7_STATUS1),
81 };
82 
83 static const struct nicvf_reg_info nicvf_qset_sq_reg_tbl[] = {
84 	NICVF_REG_INFO(NIC_QSET_SQ_0_7_CFG),
85 	NICVF_REG_INFO(NIC_QSET_SQ_0_7_THRESH),
86 	NICVF_REG_INFO(NIC_QSET_SQ_0_7_BASE),
87 	NICVF_REG_INFO(NIC_QSET_SQ_0_7_HEAD),
88 	NICVF_REG_INFO(NIC_QSET_SQ_0_7_TAIL),
89 	NICVF_REG_INFO(NIC_QSET_SQ_0_7_DOOR),
90 	NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS),
91 	NICVF_REG_INFO(NIC_QSET_SQ_0_7_DEBUG),
92 	NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS0),
93 	NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS1),
94 };
95 
96 static const struct nicvf_reg_info nicvf_qset_rbdr_reg_tbl[] = {
97 	NICVF_REG_INFO(NIC_QSET_RBDR_0_1_CFG),
98 	NICVF_REG_INFO(NIC_QSET_RBDR_0_1_THRESH),
99 	NICVF_REG_INFO(NIC_QSET_RBDR_0_1_BASE),
100 	NICVF_REG_INFO(NIC_QSET_RBDR_0_1_HEAD),
101 	NICVF_REG_INFO(NIC_QSET_RBDR_0_1_TAIL),
102 	NICVF_REG_INFO(NIC_QSET_RBDR_0_1_DOOR),
103 	NICVF_REG_INFO(NIC_QSET_RBDR_0_1_STATUS0),
104 	NICVF_REG_INFO(NIC_QSET_RBDR_0_1_STATUS1),
105 	NICVF_REG_INFO(NIC_QSET_RBDR_0_1_PRFCH_STATUS),
106 };
107 
108 int
nicvf_base_init(struct nicvf * nic)109 nicvf_base_init(struct nicvf *nic)
110 {
111 	nic->hwcap = 0;
112 	if (nic->subsystem_device_id == 0)
113 		return NICVF_ERR_BASE_INIT;
114 
115 	if (nicvf_hw_version(nic) == PCI_SUB_DEVICE_ID_CN88XX_PASS2_NICVF)
116 		nic->hwcap |= NICVF_CAP_TUNNEL_PARSING | NICVF_CAP_CQE_RX2;
117 
118 	if (nicvf_hw_version(nic) == PCI_SUB_DEVICE_ID_CN81XX_NICVF)
119 		nic->hwcap |= NICVF_CAP_TUNNEL_PARSING | NICVF_CAP_CQE_RX2;
120 
121 	if (nicvf_hw_version(nic) == PCI_SUB_DEVICE_ID_CN83XX_NICVF)
122 		nic->hwcap |= NICVF_CAP_TUNNEL_PARSING | NICVF_CAP_CQE_RX2 |
123 				NICVF_CAP_DISABLE_APAD;
124 
125 	return NICVF_OK;
126 }
127 
128 /* dump on stdout if data is NULL */
129 int
nicvf_reg_dump(struct nicvf * nic,uint64_t * data)130 nicvf_reg_dump(struct nicvf *nic,  uint64_t *data)
131 {
132 	uint32_t i, q;
133 	bool dump_stdout;
134 
135 	dump_stdout = data ? 0 : 1;
136 
137 	for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_reg_tbl); i++)
138 		if (dump_stdout)
139 			nicvf_log("%24s  = 0x%" PRIx64 "\n",
140 				nicvf_reg_tbl[i].name,
141 				nicvf_reg_read(nic, nicvf_reg_tbl[i].offset));
142 		else
143 			*data++ = nicvf_reg_read(nic, nicvf_reg_tbl[i].offset);
144 
145 	for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_multi_reg_tbl); i++)
146 		if (dump_stdout)
147 			nicvf_log("%24s  = 0x%" PRIx64 "\n",
148 				nicvf_multi_reg_tbl[i].name,
149 				nicvf_reg_read(nic,
150 					nicvf_multi_reg_tbl[i].offset));
151 		else
152 			*data++ = nicvf_reg_read(nic,
153 					nicvf_multi_reg_tbl[i].offset);
154 
155 	for (q = 0; q < MAX_CMP_QUEUES_PER_QS; q++)
156 		for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_cq_reg_tbl); i++)
157 			if (dump_stdout)
158 				nicvf_log("%30s(%d)  = 0x%" PRIx64 "\n",
159 					nicvf_qset_cq_reg_tbl[i].name, q,
160 					nicvf_queue_reg_read(nic,
161 					nicvf_qset_cq_reg_tbl[i].offset, q));
162 			else
163 				*data++ = nicvf_queue_reg_read(nic,
164 					nicvf_qset_cq_reg_tbl[i].offset, q);
165 
166 	for (q = 0; q < MAX_RCV_QUEUES_PER_QS; q++)
167 		for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_rq_reg_tbl); i++)
168 			if (dump_stdout)
169 				nicvf_log("%30s(%d)  = 0x%" PRIx64 "\n",
170 					nicvf_qset_rq_reg_tbl[i].name, q,
171 					nicvf_queue_reg_read(nic,
172 					nicvf_qset_rq_reg_tbl[i].offset, q));
173 			else
174 				*data++ = nicvf_queue_reg_read(nic,
175 					nicvf_qset_rq_reg_tbl[i].offset, q);
176 
177 	for (q = 0; q < MAX_SND_QUEUES_PER_QS; q++)
178 		for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_sq_reg_tbl); i++)
179 			if (dump_stdout)
180 				nicvf_log("%30s(%d)  = 0x%" PRIx64 "\n",
181 					nicvf_qset_sq_reg_tbl[i].name, q,
182 					nicvf_queue_reg_read(nic,
183 					nicvf_qset_sq_reg_tbl[i].offset, q));
184 			else
185 				*data++ = nicvf_queue_reg_read(nic,
186 					nicvf_qset_sq_reg_tbl[i].offset, q);
187 
188 	for (q = 0; q < MAX_RCV_BUF_DESC_RINGS_PER_QS; q++)
189 		for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_rbdr_reg_tbl); i++)
190 			if (dump_stdout)
191 				nicvf_log("%30s(%d)  = 0x%" PRIx64 "\n",
192 					nicvf_qset_rbdr_reg_tbl[i].name, q,
193 					nicvf_queue_reg_read(nic,
194 					nicvf_qset_rbdr_reg_tbl[i].offset, q));
195 			else
196 				*data++ = nicvf_queue_reg_read(nic,
197 					nicvf_qset_rbdr_reg_tbl[i].offset, q);
198 	return 0;
199 }
200 
201 int
nicvf_reg_get_count(void)202 nicvf_reg_get_count(void)
203 {
204 	int nr_regs;
205 
206 	nr_regs = NICVF_ARRAY_SIZE(nicvf_reg_tbl);
207 	nr_regs += NICVF_ARRAY_SIZE(nicvf_multi_reg_tbl);
208 	nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_cq_reg_tbl) *
209 			MAX_CMP_QUEUES_PER_QS;
210 	nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_rq_reg_tbl) *
211 			MAX_RCV_QUEUES_PER_QS;
212 	nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_sq_reg_tbl) *
213 			MAX_SND_QUEUES_PER_QS;
214 	nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_rbdr_reg_tbl) *
215 			MAX_RCV_BUF_DESC_RINGS_PER_QS;
216 
217 	return nr_regs;
218 }
219 
220 static int
nicvf_qset_config_internal(struct nicvf * nic,bool enable)221 nicvf_qset_config_internal(struct nicvf *nic, bool enable)
222 {
223 	int ret;
224 	struct pf_qs_cfg pf_qs_cfg = {.value = 0};
225 
226 	pf_qs_cfg.ena = enable ? 1 : 0;
227 	pf_qs_cfg.vnic = nic->vf_id;
228 	ret = nicvf_mbox_qset_config(nic, &pf_qs_cfg);
229 	return ret ? NICVF_ERR_SET_QS : 0;
230 }
231 
232 /* Requests PF to assign and enable Qset */
233 int
nicvf_qset_config(struct nicvf * nic)234 nicvf_qset_config(struct nicvf *nic)
235 {
236 	/* Enable Qset */
237 	return nicvf_qset_config_internal(nic, true);
238 }
239 
240 int
nicvf_qset_reclaim(struct nicvf * nic)241 nicvf_qset_reclaim(struct nicvf *nic)
242 {
243 	/* Disable Qset */
244 	return nicvf_qset_config_internal(nic, false);
245 }
246 
247 static int
cmpfunc(const void * a,const void * b)248 cmpfunc(const void *a, const void *b)
249 {
250 	return (*(const uint32_t *)a - *(const uint32_t *)b);
251 }
252 
253 static uint32_t
nicvf_roundup_list(uint32_t val,uint32_t list[],uint32_t entries)254 nicvf_roundup_list(uint32_t val, uint32_t list[], uint32_t entries)
255 {
256 	uint32_t i;
257 
258 	qsort(list, entries, sizeof(uint32_t), cmpfunc);
259 	for (i = 0; i < entries; i++)
260 		if (val <= list[i])
261 			break;
262 	/* Not in the list */
263 	if (i >= entries)
264 		return 0;
265 	else
266 		return list[i];
267 }
268 
269 static void
nicvf_handle_qset_err_intr(struct nicvf * nic)270 nicvf_handle_qset_err_intr(struct nicvf *nic)
271 {
272 	uint16_t qidx;
273 	uint64_t status;
274 
275 	nicvf_log("%s (VF%d)\n", __func__, nic->vf_id);
276 	nicvf_reg_dump(nic, NULL);
277 
278 	for (qidx = 0; qidx < MAX_CMP_QUEUES_PER_QS; qidx++) {
279 		status = nicvf_queue_reg_read(
280 				nic, NIC_QSET_CQ_0_7_STATUS, qidx);
281 		if (!(status & NICVF_CQ_ERR_MASK))
282 			continue;
283 
284 		if (status & NICVF_CQ_WR_FULL)
285 			nicvf_log("[%d]NICVF_CQ_WR_FULL\n", qidx);
286 		if (status & NICVF_CQ_WR_DISABLE)
287 			nicvf_log("[%d]NICVF_CQ_WR_DISABLE\n", qidx);
288 		if (status & NICVF_CQ_WR_FAULT)
289 			nicvf_log("[%d]NICVF_CQ_WR_FAULT\n", qidx);
290 		nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_STATUS, qidx, 0);
291 	}
292 
293 	for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) {
294 		status = nicvf_queue_reg_read(
295 				nic, NIC_QSET_SQ_0_7_STATUS, qidx);
296 		if (!(status & NICVF_SQ_ERR_MASK))
297 			continue;
298 
299 		if (status & NICVF_SQ_ERR_STOPPED)
300 			nicvf_log("[%d]NICVF_SQ_ERR_STOPPED\n", qidx);
301 		if (status & NICVF_SQ_ERR_SEND)
302 			nicvf_log("[%d]NICVF_SQ_ERR_SEND\n", qidx);
303 		if (status & NICVF_SQ_ERR_DPE)
304 			nicvf_log("[%d]NICVF_SQ_ERR_DPE\n", qidx);
305 		nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_STATUS, qidx, 0);
306 	}
307 
308 	for (qidx = 0; qidx < MAX_RCV_BUF_DESC_RINGS_PER_QS; qidx++) {
309 		status = nicvf_queue_reg_read(nic,
310 				NIC_QSET_RBDR_0_1_STATUS0, qidx);
311 		status &= NICVF_RBDR_FIFO_STATE_MASK;
312 		status >>= NICVF_RBDR_FIFO_STATE_SHIFT;
313 
314 		if (status == RBDR_FIFO_STATE_FAIL)
315 			nicvf_log("[%d]RBDR_FIFO_STATE_FAIL\n", qidx);
316 		nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx, 0);
317 	}
318 
319 	nicvf_disable_all_interrupts(nic);
320 	abort();
321 }
322 
323 /*
324  * Handle poll mode driver interested "mbox" and "queue-set error" interrupts.
325  * This function is not re-entrant.
326  * The caller should provide proper serialization.
327  */
328 int
nicvf_reg_poll_interrupts(struct nicvf * nic)329 nicvf_reg_poll_interrupts(struct nicvf *nic)
330 {
331 	int msg = 0;
332 	uint64_t intr;
333 
334 	intr = nicvf_reg_read(nic, NIC_VF_INT);
335 	if (intr & NICVF_INTR_MBOX_MASK) {
336 		nicvf_reg_write(nic, NIC_VF_INT, NICVF_INTR_MBOX_MASK);
337 		msg = nicvf_handle_mbx_intr(nic);
338 	}
339 	if (intr & NICVF_INTR_QS_ERR_MASK) {
340 		nicvf_reg_write(nic, NIC_VF_INT, NICVF_INTR_QS_ERR_MASK);
341 		nicvf_handle_qset_err_intr(nic);
342 	}
343 	return msg;
344 }
345 
346 static int
nicvf_qset_poll_reg(struct nicvf * nic,uint16_t qidx,uint32_t offset,uint32_t bit_pos,uint32_t bits,uint64_t val)347 nicvf_qset_poll_reg(struct nicvf *nic, uint16_t qidx, uint32_t offset,
348 		    uint32_t bit_pos, uint32_t bits, uint64_t val)
349 {
350 	uint64_t bit_mask;
351 	uint64_t reg_val;
352 	int timeout = NICVF_REG_POLL_ITER_NR;
353 
354 	bit_mask = (1ULL << bits) - 1;
355 	bit_mask = (bit_mask << bit_pos);
356 
357 	while (timeout) {
358 		reg_val = nicvf_queue_reg_read(nic, offset, qidx);
359 		if (((reg_val & bit_mask) >> bit_pos) == val)
360 			return NICVF_OK;
361 		nicvf_delay_us(NICVF_REG_POLL_DELAY_US);
362 		timeout--;
363 	}
364 	return NICVF_ERR_REG_POLL;
365 }
366 
367 int
nicvf_qset_rbdr_reclaim(struct nicvf * nic,uint16_t qidx)368 nicvf_qset_rbdr_reclaim(struct nicvf *nic, uint16_t qidx)
369 {
370 	uint64_t status;
371 	int timeout = NICVF_REG_POLL_ITER_NR;
372 	struct nicvf_rbdr *rbdr = nic->rbdr;
373 
374 	/* Save head and tail pointers for freeing up buffers */
375 	if (rbdr) {
376 		rbdr->head = nicvf_queue_reg_read(nic,
377 				NIC_QSET_RBDR_0_1_HEAD, qidx) >> 3;
378 		rbdr->tail = nicvf_queue_reg_read(nic,
379 				NIC_QSET_RBDR_0_1_TAIL,	qidx) >> 3;
380 		rbdr->next_tail = rbdr->tail;
381 	}
382 
383 	/* Reset RBDR */
384 	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
385 				NICVF_RBDR_RESET);
386 
387 	/* Disable RBDR */
388 	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
389 	if (nicvf_qset_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0,
390 				62, 2, 0x00))
391 		return NICVF_ERR_RBDR_DISABLE;
392 
393 	while (1) {
394 		status = nicvf_queue_reg_read(nic,
395 				NIC_QSET_RBDR_0_1_PRFCH_STATUS,	qidx);
396 		if ((status & 0xFFFFFFFF) == ((status >> 32) & 0xFFFFFFFF))
397 			break;
398 		nicvf_delay_us(NICVF_REG_POLL_DELAY_US);
399 		timeout--;
400 		if (!timeout)
401 			return NICVF_ERR_RBDR_PREFETCH;
402 	}
403 
404 	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
405 			NICVF_RBDR_RESET);
406 	if (nicvf_qset_poll_reg(nic, qidx,
407 			NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
408 		return NICVF_ERR_RBDR_RESET1;
409 
410 	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
411 	if (nicvf_qset_poll_reg(nic, qidx,
412 			NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
413 		return NICVF_ERR_RBDR_RESET2;
414 
415 	return NICVF_OK;
416 }
417 
418 static int
nicvf_qsize_regbit(uint32_t len,uint32_t len_shift)419 nicvf_qsize_regbit(uint32_t len, uint32_t len_shift)
420 {
421 	int val;
422 
423 	val = nicvf_log2_u32(len) - len_shift;
424 
425 	assert(val >= NICVF_QSIZE_MIN_VAL);
426 	assert(val <= NICVF_QSIZE_MAX_VAL);
427 	return val;
428 }
429 
430 int
nicvf_qset_rbdr_config(struct nicvf * nic,uint16_t qidx)431 nicvf_qset_rbdr_config(struct nicvf *nic, uint16_t qidx)
432 {
433 	int ret;
434 	uint64_t head, tail;
435 	struct nicvf_rbdr *rbdr = nic->rbdr;
436 	struct rbdr_cfg rbdr_cfg = {.value = 0};
437 
438 	ret = nicvf_qset_rbdr_reclaim(nic, qidx);
439 	if (ret)
440 		return ret;
441 
442 	/* Set descriptor base address */
443 	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, qidx, rbdr->phys);
444 
445 	/* Enable RBDR  & set queue size */
446 	rbdr_cfg.ena = 1;
447 	rbdr_cfg.reset = 0;
448 	rbdr_cfg.ldwb = 0;
449 	rbdr_cfg.qsize = nicvf_qsize_regbit(rbdr->qlen_mask + 1,
450 						RBDR_SIZE_SHIFT);
451 	rbdr_cfg.avg_con = 0;
452 	rbdr_cfg.lines = rbdr->buffsz / 128;
453 
454 	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, rbdr_cfg.value);
455 
456 	/* Verify proper RBDR reset */
457 	head = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, qidx);
458 	tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, qidx);
459 
460 	if (head | tail)
461 		return NICVF_ERR_RBDR_RESET;
462 
463 	return NICVF_OK;
464 }
465 
466 uint32_t
nicvf_qsize_rbdr_roundup(uint32_t val)467 nicvf_qsize_rbdr_roundup(uint32_t val)
468 {
469 	uint32_t list[] = {RBDR_QUEUE_SZ_8K, RBDR_QUEUE_SZ_16K,
470 			RBDR_QUEUE_SZ_32K, RBDR_QUEUE_SZ_64K,
471 			RBDR_QUEUE_SZ_128K, RBDR_QUEUE_SZ_256K,
472 			RBDR_QUEUE_SZ_512K};
473 	return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
474 }
475 
476 int
nicvf_qset_rbdr_precharge(void * dev,struct nicvf * nic,uint16_t ridx,rbdr_pool_get_handler handler,uint32_t max_buffs)477 nicvf_qset_rbdr_precharge(void *dev, struct nicvf *nic,
478 			  uint16_t ridx, rbdr_pool_get_handler handler,
479 			  uint32_t max_buffs)
480 {
481 	struct rbdr_entry_t *desc, *desc0;
482 	struct nicvf_rbdr *rbdr = nic->rbdr;
483 	uint32_t count;
484 	nicvf_iova_addr_t phy;
485 
486 	assert(rbdr != NULL);
487 	desc = rbdr->desc;
488 	count = 0;
489 	/* Don't fill beyond max numbers of desc */
490 	while (count < rbdr->qlen_mask) {
491 		if (count >= max_buffs)
492 			break;
493 		desc0 = desc + count;
494 		phy = handler(dev, nic);
495 		if (phy) {
496 			desc0->full_addr = phy;
497 			count++;
498 		} else {
499 			break;
500 		}
501 	}
502 	nicvf_smp_wmb();
503 	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, ridx, count);
504 	rbdr->tail = nicvf_queue_reg_read(nic,
505 				NIC_QSET_RBDR_0_1_TAIL, ridx) >> 3;
506 	rbdr->next_tail = rbdr->tail;
507 	nicvf_smp_rmb();
508 	return 0;
509 }
510 
511 int
nicvf_qset_rbdr_active(struct nicvf * nic,uint16_t qidx)512 nicvf_qset_rbdr_active(struct nicvf *nic, uint16_t qidx)
513 {
514 	return nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
515 }
516 
517 int
nicvf_qset_sq_reclaim(struct nicvf * nic,uint16_t qidx)518 nicvf_qset_sq_reclaim(struct nicvf *nic, uint16_t qidx)
519 {
520 	uint64_t head, tail;
521 	struct sq_cfg sq_cfg;
522 
523 	sq_cfg.value = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
524 
525 	/* Disable send queue */
526 	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
527 
528 	/* Check if SQ is stopped */
529 	if (sq_cfg.ena && nicvf_qset_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS,
530 				NICVF_SQ_STATUS_STOPPED_BIT, 1, 0x01))
531 		return NICVF_ERR_SQ_DISABLE;
532 
533 	/* Reset send queue */
534 	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
535 	head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
536 	tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4;
537 	if (head | tail)
538 		return  NICVF_ERR_SQ_RESET;
539 
540 	return 0;
541 }
542 
543 int
nicvf_qset_sq_config(struct nicvf * nic,uint16_t qidx,struct nicvf_txq * txq)544 nicvf_qset_sq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_txq *txq)
545 {
546 	int ret;
547 	struct sq_cfg sq_cfg = {.value = 0};
548 
549 	ret = nicvf_qset_sq_reclaim(nic, qidx);
550 	if (ret)
551 		return ret;
552 
553 	/* Send a mailbox msg to PF to config SQ */
554 	if (nicvf_mbox_sq_config(nic, qidx))
555 		return  NICVF_ERR_SQ_PF_CFG;
556 
557 	/* Set queue base address */
558 	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, qidx, txq->phys);
559 
560 	/* Enable send queue  & set queue size */
561 	sq_cfg.cq_limit = 0;
562 	sq_cfg.ena = 1;
563 	sq_cfg.reset = 0;
564 	sq_cfg.ldwb = 0;
565 	sq_cfg.qsize = nicvf_qsize_regbit(txq->qlen_mask + 1, SND_QSIZE_SHIFT);
566 	sq_cfg.tstmp_bgx_intf = 0;
567 	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg.value);
568 
569 	/* Ring doorbell so that H/W restarts processing SQEs */
570 	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
571 
572 	return 0;
573 }
574 
575 uint32_t
nicvf_qsize_sq_roundup(uint32_t val)576 nicvf_qsize_sq_roundup(uint32_t val)
577 {
578 	uint32_t list[] = {SND_QUEUE_SZ_1K, SND_QUEUE_SZ_2K,
579 			SND_QUEUE_SZ_4K, SND_QUEUE_SZ_8K,
580 			SND_QUEUE_SZ_16K, SND_QUEUE_SZ_32K,
581 			SND_QUEUE_SZ_64K};
582 	return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
583 }
584 
585 int
nicvf_qset_rq_reclaim(struct nicvf * nic,uint16_t qidx)586 nicvf_qset_rq_reclaim(struct nicvf *nic, uint16_t qidx)
587 {
588 	/* Disable receive queue */
589 	nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
590 	return nicvf_mbox_rq_sync(nic);
591 }
592 
593 int
nicvf_qset_rq_config(struct nicvf * nic,uint16_t qidx,struct nicvf_rxq * rxq)594 nicvf_qset_rq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_rxq *rxq)
595 {
596 	struct pf_rq_cfg pf_rq_cfg = {.value = 0};
597 	struct rq_cfg rq_cfg = {.value = 0};
598 
599 	if (nicvf_qset_rq_reclaim(nic, qidx))
600 		return NICVF_ERR_RQ_CLAIM;
601 
602 	pf_rq_cfg.strip_pre_l2 = 0;
603 	/* First cache line of RBDR data will be allocated into L2C */
604 	pf_rq_cfg.caching = RQ_CACHE_ALLOC_FIRST;
605 	pf_rq_cfg.cq_qs = nic->vf_id;
606 	pf_rq_cfg.cq_idx = qidx;
607 	pf_rq_cfg.rbdr_cont_qs = nic->vf_id;
608 	pf_rq_cfg.rbdr_cont_idx = 0;
609 	pf_rq_cfg.rbdr_strt_qs = nic->vf_id;
610 	pf_rq_cfg.rbdr_strt_idx = 0;
611 
612 	/* Send a mailbox msg to PF to config RQ */
613 	if (nicvf_mbox_rq_config(nic, qidx, &pf_rq_cfg))
614 		return NICVF_ERR_RQ_PF_CFG;
615 
616 	/* Select Rx backpressure */
617 	if (nicvf_mbox_rq_bp_config(nic, qidx, rxq->rx_drop_en))
618 		return NICVF_ERR_RQ_BP_CFG;
619 
620 	/* Send a mailbox msg to PF to config RQ drop */
621 	if (nicvf_mbox_rq_drop_config(nic, qidx, rxq->rx_drop_en))
622 		return NICVF_ERR_RQ_DROP_CFG;
623 
624 	/* Enable Receive queue */
625 	rq_cfg.ena = 1;
626 	nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, rq_cfg.value);
627 
628 	return 0;
629 }
630 
631 int
nicvf_qset_cq_reclaim(struct nicvf * nic,uint16_t qidx)632 nicvf_qset_cq_reclaim(struct nicvf *nic, uint16_t qidx)
633 {
634 	uint64_t tail, head;
635 
636 	/* Disable completion queue */
637 	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
638 	if (nicvf_qset_poll_reg(nic, qidx, NIC_QSET_CQ_0_7_CFG, 42, 1, 0))
639 		return NICVF_ERR_CQ_DISABLE;
640 
641 	/* Reset completion queue */
642 	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
643 	tail = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_TAIL, qidx) >> 9;
644 	head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, qidx) >> 9;
645 	if (head | tail)
646 		return  NICVF_ERR_CQ_RESET;
647 
648 	/* Disable timer threshold (doesn't get reset upon CQ reset) */
649 	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
650 	return 0;
651 }
652 
653 int
nicvf_qset_cq_config(struct nicvf * nic,uint16_t qidx,struct nicvf_rxq * rxq)654 nicvf_qset_cq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_rxq *rxq)
655 {
656 	int ret;
657 	struct cq_cfg cq_cfg = {.value = 0};
658 
659 	ret = nicvf_qset_cq_reclaim(nic, qidx);
660 	if (ret)
661 		return ret;
662 
663 	/* Set completion queue base address */
664 	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, qidx, rxq->phys);
665 
666 	cq_cfg.ena = 1;
667 	cq_cfg.reset = 0;
668 	/* Writes of CQE will be allocated into L2C */
669 	cq_cfg.caching = 1;
670 	cq_cfg.qsize = nicvf_qsize_regbit(rxq->qlen_mask + 1, CMP_QSIZE_SHIFT);
671 	cq_cfg.avg_con = 0;
672 	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, cq_cfg.value);
673 
674 	/* Set threshold value for interrupt generation */
675 	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, 0);
676 	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
677 	return 0;
678 }
679 
680 uint32_t
nicvf_qsize_cq_roundup(uint32_t val)681 nicvf_qsize_cq_roundup(uint32_t val)
682 {
683 	uint32_t list[] = {CMP_QUEUE_SZ_1K, CMP_QUEUE_SZ_2K,
684 			CMP_QUEUE_SZ_4K, CMP_QUEUE_SZ_8K,
685 			CMP_QUEUE_SZ_16K, CMP_QUEUE_SZ_32K,
686 			CMP_QUEUE_SZ_64K};
687 	return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
688 }
689 
690 
691 void
nicvf_vlan_hw_strip(struct nicvf * nic,bool enable)692 nicvf_vlan_hw_strip(struct nicvf *nic, bool enable)
693 {
694 	uint64_t val;
695 
696 	val = nicvf_reg_read(nic, NIC_VNIC_RQ_GEN_CFG);
697 	if (enable)
698 		val |= (STRIP_FIRST_VLAN << 25);
699 	else
700 		val &= ~((STRIP_SECOND_VLAN | STRIP_FIRST_VLAN) << 25);
701 
702 	nic->vlan_strip = enable;
703 	nicvf_reg_write(nic, NIC_VNIC_RQ_GEN_CFG, val);
704 }
705 
706 void
nicvf_first_skip_config(struct nicvf * nic,uint8_t num_dwords)707 nicvf_first_skip_config(struct nicvf *nic, uint8_t num_dwords)
708 {
709 	uint64_t val;
710 
711 	val = nicvf_reg_read(nic, NIC_VNIC_RQ_GEN_CFG);
712 	val &= ~(0xfULL);
713 	val |= (num_dwords & 0xf);
714 
715 	nicvf_reg_write(nic, NIC_VNIC_RQ_GEN_CFG, val);
716 }
717 
718 void
nicvf_apad_config(struct nicvf * nic,bool enable)719 nicvf_apad_config(struct nicvf *nic, bool enable)
720 {
721 	uint64_t val;
722 
723 	/* APAD always enabled in this device */
724 	if (!(nic->hwcap & NICVF_CAP_DISABLE_APAD))
725 		return;
726 
727 	val = nicvf_reg_read(nic, NIC_VNIC_RQ_GEN_CFG);
728 	if (enable)
729 		val &= ~(1ULL << NICVF_QS_RQ_DIS_APAD_SHIFT);
730 	else
731 		val |= (1ULL << NICVF_QS_RQ_DIS_APAD_SHIFT);
732 
733 	nicvf_reg_write(nic, NIC_VNIC_RQ_GEN_CFG, val);
734 }
735 
736 void
nicvf_rss_set_key(struct nicvf * nic,uint8_t * key)737 nicvf_rss_set_key(struct nicvf *nic, uint8_t *key)
738 {
739 	int idx;
740 	uint64_t addr, val;
741 	uint64_t *keyptr = (uint64_t *)key;
742 
743 	addr = NIC_VNIC_RSS_KEY_0_4;
744 	for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
745 		val = nicvf_cpu_to_be_64(*keyptr);
746 		nicvf_reg_write(nic, addr, val);
747 		addr += sizeof(uint64_t);
748 		keyptr++;
749 	}
750 }
751 
752 void
nicvf_rss_get_key(struct nicvf * nic,uint8_t * key)753 nicvf_rss_get_key(struct nicvf *nic, uint8_t *key)
754 {
755 	int idx;
756 	uint64_t addr, val;
757 	uint64_t *keyptr = (uint64_t *)key;
758 
759 	addr = NIC_VNIC_RSS_KEY_0_4;
760 	for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
761 		val = nicvf_reg_read(nic, addr);
762 		*keyptr = nicvf_be_to_cpu_64(val);
763 		addr += sizeof(uint64_t);
764 		keyptr++;
765 	}
766 }
767 
768 void
nicvf_rss_set_cfg(struct nicvf * nic,uint64_t val)769 nicvf_rss_set_cfg(struct nicvf *nic, uint64_t val)
770 {
771 	nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, val);
772 }
773 
774 uint64_t
nicvf_rss_get_cfg(struct nicvf * nic)775 nicvf_rss_get_cfg(struct nicvf *nic)
776 {
777 	return nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
778 }
779 
780 int
nicvf_rss_reta_update(struct nicvf * nic,uint8_t * tbl,uint32_t max_count)781 nicvf_rss_reta_update(struct nicvf *nic, uint8_t *tbl, uint32_t max_count)
782 {
783 	uint32_t idx;
784 	struct nicvf_rss_reta_info *rss = &nic->rss_info;
785 
786 	/* result will be stored in nic->rss_info.rss_size */
787 	if (nicvf_mbox_get_rss_size(nic))
788 		return NICVF_ERR_RSS_GET_SZ;
789 
790 	assert(rss->rss_size > 0);
791 	rss->hash_bits = (uint8_t)nicvf_log2_u32(rss->rss_size);
792 	for (idx = 0; idx < rss->rss_size && idx < max_count; idx++)
793 		rss->ind_tbl[idx] = tbl[idx];
794 
795 	if (nicvf_mbox_config_rss(nic))
796 		return NICVF_ERR_RSS_TBL_UPDATE;
797 
798 	return NICVF_OK;
799 }
800 
801 int
nicvf_rss_reta_query(struct nicvf * nic,uint8_t * tbl,uint32_t max_count)802 nicvf_rss_reta_query(struct nicvf *nic, uint8_t *tbl, uint32_t max_count)
803 {
804 	uint32_t idx;
805 	struct nicvf_rss_reta_info *rss = &nic->rss_info;
806 
807 	/* result will be stored in nic->rss_info.rss_size */
808 	if (nicvf_mbox_get_rss_size(nic))
809 		return NICVF_ERR_RSS_GET_SZ;
810 
811 	assert(rss->rss_size > 0);
812 	rss->hash_bits = (uint8_t)nicvf_log2_u32(rss->rss_size);
813 
814 	for (idx = 0; idx < rss->rss_size && idx < max_count; idx++)
815 		tbl[idx] = rss->ind_tbl[idx];
816 
817 	return NICVF_OK;
818 }
819 
820 int
nicvf_rss_config(struct nicvf * nic,uint32_t qcnt,uint64_t cfg)821 nicvf_rss_config(struct nicvf *nic, uint32_t  qcnt, uint64_t cfg)
822 {
823 	uint32_t idx;
824 	uint8_t default_reta[NIC_MAX_RSS_IDR_TBL_SIZE];
825 	uint8_t default_key[RSS_HASH_KEY_BYTE_SIZE] = {
826 		0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
827 		0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
828 		0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
829 		0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
830 		0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD
831 	};
832 
833 	if (nic->cpi_alg != CPI_ALG_NONE)
834 		return -EINVAL;
835 
836 	if (cfg == 0)
837 		return -EINVAL;
838 
839 	/* Update default RSS key and cfg */
840 	nicvf_rss_set_key(nic, default_key);
841 	nicvf_rss_set_cfg(nic, cfg);
842 
843 	/* Update default RSS RETA */
844 	for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
845 		default_reta[idx] = idx % qcnt;
846 
847 	return nicvf_rss_reta_update(nic, default_reta,
848 			NIC_MAX_RSS_IDR_TBL_SIZE);
849 }
850 
851 int
nicvf_rss_term(struct nicvf * nic)852 nicvf_rss_term(struct nicvf *nic)
853 {
854 	uint32_t idx;
855 	uint8_t disable_rss[NIC_MAX_RSS_IDR_TBL_SIZE];
856 
857 	nicvf_rss_set_cfg(nic, 0);
858 	/* Redirect the output to 0th queue  */
859 	for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
860 		disable_rss[idx] = 0;
861 
862 	return nicvf_rss_reta_update(nic, disable_rss,
863 			NIC_MAX_RSS_IDR_TBL_SIZE);
864 }
865 
866 int
nicvf_loopback_config(struct nicvf * nic,bool enable)867 nicvf_loopback_config(struct nicvf *nic, bool enable)
868 {
869 	if (enable && nic->loopback_supported == 0)
870 		return NICVF_ERR_LOOPBACK_CFG;
871 
872 	return nicvf_mbox_loopback_config(nic, enable);
873 }
874 
875 void
nicvf_hw_get_stats(struct nicvf * nic,struct nicvf_hw_stats * stats)876 nicvf_hw_get_stats(struct nicvf *nic, struct nicvf_hw_stats *stats)
877 {
878 	stats->rx_bytes = NICVF_GET_RX_STATS(RX_OCTS);
879 	stats->rx_ucast_frames = NICVF_GET_RX_STATS(RX_UCAST);
880 	stats->rx_bcast_frames = NICVF_GET_RX_STATS(RX_BCAST);
881 	stats->rx_mcast_frames = NICVF_GET_RX_STATS(RX_MCAST);
882 	stats->rx_fcs_errors = NICVF_GET_RX_STATS(RX_FCS);
883 	stats->rx_l2_errors = NICVF_GET_RX_STATS(RX_L2ERR);
884 	stats->rx_drop_red = NICVF_GET_RX_STATS(RX_RED);
885 	stats->rx_drop_red_bytes = NICVF_GET_RX_STATS(RX_RED_OCTS);
886 	stats->rx_drop_overrun = NICVF_GET_RX_STATS(RX_ORUN);
887 	stats->rx_drop_overrun_bytes = NICVF_GET_RX_STATS(RX_ORUN_OCTS);
888 	stats->rx_drop_bcast = NICVF_GET_RX_STATS(RX_DRP_BCAST);
889 	stats->rx_drop_mcast = NICVF_GET_RX_STATS(RX_DRP_MCAST);
890 	stats->rx_drop_l3_bcast = NICVF_GET_RX_STATS(RX_DRP_L3BCAST);
891 	stats->rx_drop_l3_mcast = NICVF_GET_RX_STATS(RX_DRP_L3MCAST);
892 
893 	stats->tx_bytes_ok = NICVF_GET_TX_STATS(TX_OCTS);
894 	stats->tx_ucast_frames_ok = NICVF_GET_TX_STATS(TX_UCAST);
895 	stats->tx_bcast_frames_ok = NICVF_GET_TX_STATS(TX_BCAST);
896 	stats->tx_mcast_frames_ok = NICVF_GET_TX_STATS(TX_MCAST);
897 	stats->tx_drops = NICVF_GET_TX_STATS(TX_DROP);
898 }
899 
900 void
nicvf_hw_get_rx_qstats(struct nicvf * nic,struct nicvf_hw_rx_qstats * qstats,uint16_t qidx)901 nicvf_hw_get_rx_qstats(struct nicvf *nic, struct nicvf_hw_rx_qstats *qstats,
902 		       uint16_t qidx)
903 {
904 	qstats->q_rx_bytes =
905 		nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_STATUS0, qidx);
906 	qstats->q_rx_packets =
907 		nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_STATUS1, qidx);
908 }
909 
910 void
nicvf_hw_get_tx_qstats(struct nicvf * nic,struct nicvf_hw_tx_qstats * qstats,uint16_t qidx)911 nicvf_hw_get_tx_qstats(struct nicvf *nic, struct nicvf_hw_tx_qstats *qstats,
912 		       uint16_t qidx)
913 {
914 	qstats->q_tx_bytes =
915 		nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS0, qidx);
916 	qstats->q_tx_packets =
917 		nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS1, qidx);
918 }
919