xref: /dpdk/drivers/net/hns3/hns3_cmd.c (revision feb4548ffd80bf249239d99bf9053ecf78f815d1)
1737f30e1SWei Hu (Xavier) /* SPDX-License-Identifier: BSD-3-Clause
253e6f86cSMin Hu (Connor)  * Copyright(c) 2018-2021 HiSilicon Limited.
3737f30e1SWei Hu (Xavier)  */
4737f30e1SWei Hu (Xavier) 
5df96fd0dSBruce Richardson #include <ethdev_pci.h>
6737f30e1SWei Hu (Xavier) #include <rte_io.h>
7737f30e1SWei Hu (Xavier) 
8a4c7152dSHuisong Li #include "hns3_common.h"
9737f30e1SWei Hu (Xavier) #include "hns3_regs.h"
102790c646SWei Hu (Xavier) #include "hns3_intr.h"
11737f30e1SWei Hu (Xavier) #include "hns3_logs.h"
12737f30e1SWei Hu (Xavier) 
13737f30e1SWei Hu (Xavier) static int
14737f30e1SWei Hu (Xavier) hns3_ring_space(struct hns3_cmq_ring *ring)
15737f30e1SWei Hu (Xavier) {
16737f30e1SWei Hu (Xavier) 	int ntu = ring->next_to_use;
17737f30e1SWei Hu (Xavier) 	int ntc = ring->next_to_clean;
18737f30e1SWei Hu (Xavier) 	int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
19737f30e1SWei Hu (Xavier) 
20737f30e1SWei Hu (Xavier) 	return ring->desc_num - used - 1;
21737f30e1SWei Hu (Xavier) }
22737f30e1SWei Hu (Xavier) 
23737f30e1SWei Hu (Xavier) static bool
24737f30e1SWei Hu (Xavier) is_valid_csq_clean_head(struct hns3_cmq_ring *ring, int head)
25737f30e1SWei Hu (Xavier) {
26737f30e1SWei Hu (Xavier) 	int ntu = ring->next_to_use;
27737f30e1SWei Hu (Xavier) 	int ntc = ring->next_to_clean;
28737f30e1SWei Hu (Xavier) 
29737f30e1SWei Hu (Xavier) 	if (ntu > ntc)
30737f30e1SWei Hu (Xavier) 		return head >= ntc && head <= ntu;
31737f30e1SWei Hu (Xavier) 
32737f30e1SWei Hu (Xavier) 	return head >= ntc || head <= ntu;
33737f30e1SWei Hu (Xavier) }
34737f30e1SWei Hu (Xavier) 
35737f30e1SWei Hu (Xavier) /*
36737f30e1SWei Hu (Xavier)  * hns3_allocate_dma_mem - Specific memory alloc for command function.
37737f30e1SWei Hu (Xavier)  * Malloc a memzone, which is a contiguous portion of physical memory identified
38737f30e1SWei Hu (Xavier)  * by a name.
39737f30e1SWei Hu (Xavier)  * @ring: pointer to the ring structure
40737f30e1SWei Hu (Xavier)  * @size: size of memory requested
41737f30e1SWei Hu (Xavier)  * @alignment: what to align the allocation to
42737f30e1SWei Hu (Xavier)  */
43737f30e1SWei Hu (Xavier) static int
44737f30e1SWei Hu (Xavier) hns3_allocate_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring,
45737f30e1SWei Hu (Xavier) 		      uint64_t size, uint32_t alignment)
46737f30e1SWei Hu (Xavier) {
47e12a0166STyler Retzlaff 	static RTE_ATOMIC(uint64_t) hns3_dma_memzone_id;
48737f30e1SWei Hu (Xavier) 	const struct rte_memzone *mz = NULL;
49737f30e1SWei Hu (Xavier) 	char z_name[RTE_MEMZONE_NAMESIZE];
50737f30e1SWei Hu (Xavier) 
51d07fc02fSDavid Marchand 	snprintf(z_name, sizeof(z_name), "hns3_dma_%" PRIu64,
52e12a0166STyler Retzlaff 		rte_atomic_fetch_add_explicit(&hns3_dma_memzone_id, 1, rte_memory_order_relaxed));
53737f30e1SWei Hu (Xavier) 	mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
54737f30e1SWei Hu (Xavier) 					 RTE_MEMZONE_IOVA_CONTIG, alignment,
55737f30e1SWei Hu (Xavier) 					 RTE_PGSIZE_2M);
56737f30e1SWei Hu (Xavier) 	if (mz == NULL)
57737f30e1SWei Hu (Xavier) 		return -ENOMEM;
58737f30e1SWei Hu (Xavier) 
59737f30e1SWei Hu (Xavier) 	ring->buf_size = size;
60737f30e1SWei Hu (Xavier) 	ring->desc = mz->addr;
61737f30e1SWei Hu (Xavier) 	ring->desc_dma_addr = mz->iova;
62737f30e1SWei Hu (Xavier) 	ring->zone = (const void *)mz;
635608b547SHuisong Li 	hns3_dbg(hw, "cmd ring memzone name: %s", mz->name);
64737f30e1SWei Hu (Xavier) 
65737f30e1SWei Hu (Xavier) 	return 0;
66737f30e1SWei Hu (Xavier) }
67737f30e1SWei Hu (Xavier) 
68737f30e1SWei Hu (Xavier) static void
695608b547SHuisong Li hns3_free_dma_mem(struct hns3_cmq_ring *ring)
70737f30e1SWei Hu (Xavier) {
71737f30e1SWei Hu (Xavier) 	rte_memzone_free((const struct rte_memzone *)ring->zone);
72737f30e1SWei Hu (Xavier) 	ring->buf_size = 0;
73737f30e1SWei Hu (Xavier) 	ring->desc = NULL;
74737f30e1SWei Hu (Xavier) 	ring->desc_dma_addr = 0;
75737f30e1SWei Hu (Xavier) 	ring->zone = NULL;
76737f30e1SWei Hu (Xavier) }
77737f30e1SWei Hu (Xavier) 
78737f30e1SWei Hu (Xavier) static int
79737f30e1SWei Hu (Xavier) hns3_alloc_cmd_desc(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
80737f30e1SWei Hu (Xavier) {
81737f30e1SWei Hu (Xavier) 	int size  = ring->desc_num * sizeof(struct hns3_cmd_desc);
82737f30e1SWei Hu (Xavier) 
83737f30e1SWei Hu (Xavier) 	if (hns3_allocate_dma_mem(hw, ring, size, HNS3_CMD_DESC_ALIGNMENT)) {
84737f30e1SWei Hu (Xavier) 		hns3_err(hw, "allocate dma mem failed");
85737f30e1SWei Hu (Xavier) 		return -ENOMEM;
86737f30e1SWei Hu (Xavier) 	}
87737f30e1SWei Hu (Xavier) 
88737f30e1SWei Hu (Xavier) 	return 0;
89737f30e1SWei Hu (Xavier) }
90737f30e1SWei Hu (Xavier) 
91737f30e1SWei Hu (Xavier) static void
925608b547SHuisong Li hns3_free_cmd_desc(__rte_unused struct hns3_hw *hw, struct hns3_cmq_ring *ring)
93737f30e1SWei Hu (Xavier) {
94737f30e1SWei Hu (Xavier) 	if (ring->desc)
955608b547SHuisong Li 		hns3_free_dma_mem(ring);
96737f30e1SWei Hu (Xavier) }
97737f30e1SWei Hu (Xavier) 
98737f30e1SWei Hu (Xavier) static int
99737f30e1SWei Hu (Xavier) hns3_alloc_cmd_queue(struct hns3_hw *hw, int ring_type)
100737f30e1SWei Hu (Xavier) {
101737f30e1SWei Hu (Xavier) 	struct hns3_cmq_ring *ring =
102737f30e1SWei Hu (Xavier) 		(ring_type == HNS3_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
103737f30e1SWei Hu (Xavier) 	int ret;
104737f30e1SWei Hu (Xavier) 
105737f30e1SWei Hu (Xavier) 	ring->ring_type = ring_type;
106737f30e1SWei Hu (Xavier) 	ring->hw = hw;
107737f30e1SWei Hu (Xavier) 
108737f30e1SWei Hu (Xavier) 	ret = hns3_alloc_cmd_desc(hw, ring);
109737f30e1SWei Hu (Xavier) 	if (ret)
110737f30e1SWei Hu (Xavier) 		hns3_err(hw, "descriptor %s alloc error %d",
111737f30e1SWei Hu (Xavier) 			 (ring_type == HNS3_TYPE_CSQ) ? "CSQ" : "CRQ", ret);
112737f30e1SWei Hu (Xavier) 
113737f30e1SWei Hu (Xavier) 	return ret;
114737f30e1SWei Hu (Xavier) }
115737f30e1SWei Hu (Xavier) 
116737f30e1SWei Hu (Xavier) void
117737f30e1SWei Hu (Xavier) hns3_cmd_reuse_desc(struct hns3_cmd_desc *desc, bool is_read)
118737f30e1SWei Hu (Xavier) {
119737f30e1SWei Hu (Xavier) 	desc->flag = rte_cpu_to_le_16(HNS3_CMD_FLAG_NO_INTR | HNS3_CMD_FLAG_IN);
120737f30e1SWei Hu (Xavier) 	if (is_read)
121737f30e1SWei Hu (Xavier) 		desc->flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_WR);
122737f30e1SWei Hu (Xavier) 	else
123737f30e1SWei Hu (Xavier) 		desc->flag &= rte_cpu_to_le_16(~HNS3_CMD_FLAG_WR);
124737f30e1SWei Hu (Xavier) }
125737f30e1SWei Hu (Xavier) 
126737f30e1SWei Hu (Xavier) void
127737f30e1SWei Hu (Xavier) hns3_cmd_setup_basic_desc(struct hns3_cmd_desc *desc,
128737f30e1SWei Hu (Xavier) 			  enum hns3_opcode_type opcode, bool is_read)
129737f30e1SWei Hu (Xavier) {
130737f30e1SWei Hu (Xavier) 	memset((void *)desc, 0, sizeof(struct hns3_cmd_desc));
131737f30e1SWei Hu (Xavier) 	desc->opcode = rte_cpu_to_le_16(opcode);
132737f30e1SWei Hu (Xavier) 	desc->flag = rte_cpu_to_le_16(HNS3_CMD_FLAG_NO_INTR | HNS3_CMD_FLAG_IN);
133737f30e1SWei Hu (Xavier) 
134737f30e1SWei Hu (Xavier) 	if (is_read)
135737f30e1SWei Hu (Xavier) 		desc->flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_WR);
136737f30e1SWei Hu (Xavier) }
137737f30e1SWei Hu (Xavier) 
138737f30e1SWei Hu (Xavier) static void
139737f30e1SWei Hu (Xavier) hns3_cmd_clear_regs(struct hns3_hw *hw)
140737f30e1SWei Hu (Xavier) {
141737f30e1SWei Hu (Xavier) 	hns3_write_dev(hw, HNS3_CMDQ_TX_ADDR_L_REG, 0);
142737f30e1SWei Hu (Xavier) 	hns3_write_dev(hw, HNS3_CMDQ_TX_ADDR_H_REG, 0);
143737f30e1SWei Hu (Xavier) 	hns3_write_dev(hw, HNS3_CMDQ_TX_DEPTH_REG, 0);
144737f30e1SWei Hu (Xavier) 	hns3_write_dev(hw, HNS3_CMDQ_TX_HEAD_REG, 0);
145737f30e1SWei Hu (Xavier) 	hns3_write_dev(hw, HNS3_CMDQ_TX_TAIL_REG, 0);
146737f30e1SWei Hu (Xavier) 	hns3_write_dev(hw, HNS3_CMDQ_RX_ADDR_L_REG, 0);
147737f30e1SWei Hu (Xavier) 	hns3_write_dev(hw, HNS3_CMDQ_RX_ADDR_H_REG, 0);
148737f30e1SWei Hu (Xavier) 	hns3_write_dev(hw, HNS3_CMDQ_RX_DEPTH_REG, 0);
149737f30e1SWei Hu (Xavier) 	hns3_write_dev(hw, HNS3_CMDQ_RX_HEAD_REG, 0);
150737f30e1SWei Hu (Xavier) 	hns3_write_dev(hw, HNS3_CMDQ_RX_TAIL_REG, 0);
151737f30e1SWei Hu (Xavier) }
152737f30e1SWei Hu (Xavier) 
153737f30e1SWei Hu (Xavier) static void
154737f30e1SWei Hu (Xavier) hns3_cmd_config_regs(struct hns3_cmq_ring *ring)
155737f30e1SWei Hu (Xavier) {
156737f30e1SWei Hu (Xavier) 	uint64_t dma = ring->desc_dma_addr;
157737f30e1SWei Hu (Xavier) 
158737f30e1SWei Hu (Xavier) 	if (ring->ring_type == HNS3_TYPE_CSQ) {
159737f30e1SWei Hu (Xavier) 		hns3_write_dev(ring->hw, HNS3_CMDQ_TX_ADDR_L_REG,
160737f30e1SWei Hu (Xavier) 			       lower_32_bits(dma));
161737f30e1SWei Hu (Xavier) 		hns3_write_dev(ring->hw, HNS3_CMDQ_TX_ADDR_H_REG,
162737f30e1SWei Hu (Xavier) 			       upper_32_bits(dma));
163737f30e1SWei Hu (Xavier) 		hns3_write_dev(ring->hw, HNS3_CMDQ_TX_DEPTH_REG,
164737f30e1SWei Hu (Xavier) 			       ring->desc_num >> HNS3_NIC_CMQ_DESC_NUM_S |
165737f30e1SWei Hu (Xavier) 			       HNS3_NIC_SW_RST_RDY);
166737f30e1SWei Hu (Xavier) 		hns3_write_dev(ring->hw, HNS3_CMDQ_TX_HEAD_REG, 0);
167737f30e1SWei Hu (Xavier) 		hns3_write_dev(ring->hw, HNS3_CMDQ_TX_TAIL_REG, 0);
168737f30e1SWei Hu (Xavier) 	} else {
169737f30e1SWei Hu (Xavier) 		hns3_write_dev(ring->hw, HNS3_CMDQ_RX_ADDR_L_REG,
170737f30e1SWei Hu (Xavier) 			       lower_32_bits(dma));
171737f30e1SWei Hu (Xavier) 		hns3_write_dev(ring->hw, HNS3_CMDQ_RX_ADDR_H_REG,
172737f30e1SWei Hu (Xavier) 			       upper_32_bits(dma));
173737f30e1SWei Hu (Xavier) 		hns3_write_dev(ring->hw, HNS3_CMDQ_RX_DEPTH_REG,
174737f30e1SWei Hu (Xavier) 			       ring->desc_num >> HNS3_NIC_CMQ_DESC_NUM_S);
175737f30e1SWei Hu (Xavier) 		hns3_write_dev(ring->hw, HNS3_CMDQ_RX_HEAD_REG, 0);
176737f30e1SWei Hu (Xavier) 		hns3_write_dev(ring->hw, HNS3_CMDQ_RX_TAIL_REG, 0);
177737f30e1SWei Hu (Xavier) 	}
178737f30e1SWei Hu (Xavier) }
179737f30e1SWei Hu (Xavier) 
180737f30e1SWei Hu (Xavier) static void
181737f30e1SWei Hu (Xavier) hns3_cmd_init_regs(struct hns3_hw *hw)
182737f30e1SWei Hu (Xavier) {
183737f30e1SWei Hu (Xavier) 	hns3_cmd_config_regs(&hw->cmq.csq);
184737f30e1SWei Hu (Xavier) 	hns3_cmd_config_regs(&hw->cmq.crq);
185737f30e1SWei Hu (Xavier) }
186737f30e1SWei Hu (Xavier) 
187737f30e1SWei Hu (Xavier) static int
188737f30e1SWei Hu (Xavier) hns3_cmd_csq_clean(struct hns3_hw *hw)
189737f30e1SWei Hu (Xavier) {
190737f30e1SWei Hu (Xavier) 	struct hns3_cmq_ring *csq = &hw->cmq.csq;
191737f30e1SWei Hu (Xavier) 	uint32_t head;
19232040ae3SHongbo Zheng 	uint32_t addr;
193737f30e1SWei Hu (Xavier) 	int clean;
194737f30e1SWei Hu (Xavier) 
195737f30e1SWei Hu (Xavier) 	head = hns3_read_dev(hw, HNS3_CMDQ_TX_HEAD_REG);
19632040ae3SHongbo Zheng 	addr = hns3_read_dev(hw, HNS3_CMDQ_TX_ADDR_L_REG);
19732040ae3SHongbo Zheng 	if (!is_valid_csq_clean_head(csq, head) || addr == 0) {
19832040ae3SHongbo Zheng 		hns3_err(hw, "wrong cmd addr(%0x) head (%u, %u-%u)", addr, head,
199737f30e1SWei Hu (Xavier) 			 csq->next_to_use, csq->next_to_clean);
200a1f381adSChengwen Feng 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
201e12a0166STyler Retzlaff 			rte_atomic_store_explicit(&hw->reset.disable_cmd, 1,
202e12a0166STyler Retzlaff 					 rte_memory_order_relaxed);
203a1f381adSChengwen Feng 			hns3_schedule_delayed_reset(HNS3_DEV_HW_TO_ADAPTER(hw));
204a1f381adSChengwen Feng 		}
2052790c646SWei Hu (Xavier) 
206737f30e1SWei Hu (Xavier) 		return -EIO;
207737f30e1SWei Hu (Xavier) 	}
208737f30e1SWei Hu (Xavier) 
209737f30e1SWei Hu (Xavier) 	clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
210737f30e1SWei Hu (Xavier) 	csq->next_to_clean = head;
211737f30e1SWei Hu (Xavier) 	return clean;
212737f30e1SWei Hu (Xavier) }
213737f30e1SWei Hu (Xavier) 
214737f30e1SWei Hu (Xavier) static int
215737f30e1SWei Hu (Xavier) hns3_cmd_csq_done(struct hns3_hw *hw)
216737f30e1SWei Hu (Xavier) {
217737f30e1SWei Hu (Xavier) 	uint32_t head = hns3_read_dev(hw, HNS3_CMDQ_TX_HEAD_REG);
218737f30e1SWei Hu (Xavier) 
219737f30e1SWei Hu (Xavier) 	return head == hw->cmq.csq.next_to_use;
220737f30e1SWei Hu (Xavier) }
221737f30e1SWei Hu (Xavier) 
222737f30e1SWei Hu (Xavier) static bool
223737f30e1SWei Hu (Xavier) hns3_is_special_opcode(uint16_t opcode)
224737f30e1SWei Hu (Xavier) {
225737f30e1SWei Hu (Xavier) 	/*
226737f30e1SWei Hu (Xavier) 	 * These commands have several descriptors,
227737f30e1SWei Hu (Xavier) 	 * and use the first one to save opcode and return value.
228737f30e1SWei Hu (Xavier) 	 */
229737f30e1SWei Hu (Xavier) 	uint16_t spec_opcode[] = {HNS3_OPC_STATS_64_BIT,
230737f30e1SWei Hu (Xavier) 				  HNS3_OPC_STATS_32_BIT,
231737f30e1SWei Hu (Xavier) 				  HNS3_OPC_STATS_MAC,
232737f30e1SWei Hu (Xavier) 				  HNS3_OPC_STATS_MAC_ALL,
233737f30e1SWei Hu (Xavier) 				  HNS3_OPC_QUERY_32_BIT_REG,
2341c1eb759SHongbo Zheng 				  HNS3_OPC_QUERY_64_BIT_REG,
2351c1eb759SHongbo Zheng 				  HNS3_OPC_QUERY_CLEAR_MPF_RAS_INT,
2361c1eb759SHongbo Zheng 				  HNS3_OPC_QUERY_CLEAR_PF_RAS_INT,
2371c1eb759SHongbo Zheng 				  HNS3_OPC_QUERY_CLEAR_ALL_MPF_MSIX_INT,
2381c1eb759SHongbo Zheng 				  HNS3_OPC_QUERY_CLEAR_ALL_PF_MSIX_INT,
2391c1eb759SHongbo Zheng 				  HNS3_OPC_QUERY_ALL_ERR_INFO,};
240737f30e1SWei Hu (Xavier) 	uint32_t i;
241737f30e1SWei Hu (Xavier) 
24277d1f6b1SChengwen Feng 	for (i = 0; i < RTE_DIM(spec_opcode); i++)
243737f30e1SWei Hu (Xavier) 		if (spec_opcode[i] == opcode)
244737f30e1SWei Hu (Xavier) 			return true;
245737f30e1SWei Hu (Xavier) 
246737f30e1SWei Hu (Xavier) 	return false;
247737f30e1SWei Hu (Xavier) }
248737f30e1SWei Hu (Xavier) 
249737f30e1SWei Hu (Xavier) static int
250737f30e1SWei Hu (Xavier) hns3_cmd_convert_err_code(uint16_t desc_ret)
251737f30e1SWei Hu (Xavier) {
252040c4fb8SLijun Ou 	static const struct {
253040c4fb8SLijun Ou 		uint16_t imp_errcode;
254040c4fb8SLijun Ou 		int linux_errcode;
255040c4fb8SLijun Ou 	} hns3_cmdq_status[] = {
256040c4fb8SLijun Ou 		{HNS3_CMD_EXEC_SUCCESS, 0},
257040c4fb8SLijun Ou 		{HNS3_CMD_NO_AUTH, -EPERM},
258040c4fb8SLijun Ou 		{HNS3_CMD_NOT_SUPPORTED, -EOPNOTSUPP},
259040c4fb8SLijun Ou 		{HNS3_CMD_QUEUE_FULL, -EXFULL},
260040c4fb8SLijun Ou 		{HNS3_CMD_NEXT_ERR, -ENOSR},
261040c4fb8SLijun Ou 		{HNS3_CMD_UNEXE_ERR, -ENOTBLK},
262040c4fb8SLijun Ou 		{HNS3_CMD_PARA_ERR, -EINVAL},
263040c4fb8SLijun Ou 		{HNS3_CMD_RESULT_ERR, -ERANGE},
264040c4fb8SLijun Ou 		{HNS3_CMD_TIMEOUT, -ETIME},
265040c4fb8SLijun Ou 		{HNS3_CMD_HILINK_ERR, -ENOLINK},
266040c4fb8SLijun Ou 		{HNS3_CMD_QUEUE_ILLEGAL, -ENXIO},
267040c4fb8SLijun Ou 		{HNS3_CMD_INVALID, -EBADR},
268040c4fb8SLijun Ou 		{HNS3_CMD_ROH_CHECK_FAIL, -EINVAL}
269040c4fb8SLijun Ou 	};
270040c4fb8SLijun Ou 
271040c4fb8SLijun Ou 	uint32_t i;
272040c4fb8SLijun Ou 
27377d1f6b1SChengwen Feng 	for (i = 0; i < RTE_DIM(hns3_cmdq_status); i++)
274040c4fb8SLijun Ou 		if (hns3_cmdq_status[i].imp_errcode == desc_ret)
275040c4fb8SLijun Ou 			return hns3_cmdq_status[i].linux_errcode;
276040c4fb8SLijun Ou 
2774f4ee5b7SChengwen Feng 	return -EREMOTEIO;
278737f30e1SWei Hu (Xavier) }
279737f30e1SWei Hu (Xavier) 
280737f30e1SWei Hu (Xavier) static int
281737f30e1SWei Hu (Xavier) hns3_cmd_get_hardware_reply(struct hns3_hw *hw,
282737f30e1SWei Hu (Xavier) 			    struct hns3_cmd_desc *desc, int num, int ntc)
283737f30e1SWei Hu (Xavier) {
284737f30e1SWei Hu (Xavier) 	uint16_t opcode, desc_ret;
285737f30e1SWei Hu (Xavier) 	int current_ntc = ntc;
286737f30e1SWei Hu (Xavier) 	int handle;
287737f30e1SWei Hu (Xavier) 
288737f30e1SWei Hu (Xavier) 	opcode = rte_le_to_cpu_16(desc[0].opcode);
289737f30e1SWei Hu (Xavier) 	for (handle = 0; handle < num; handle++) {
290737f30e1SWei Hu (Xavier) 		/* Get the result of hardware write back */
291737f30e1SWei Hu (Xavier) 		desc[handle] = hw->cmq.csq.desc[current_ntc];
292737f30e1SWei Hu (Xavier) 
293737f30e1SWei Hu (Xavier) 		current_ntc++;
294737f30e1SWei Hu (Xavier) 		if (current_ntc == hw->cmq.csq.desc_num)
295737f30e1SWei Hu (Xavier) 			current_ntc = 0;
296737f30e1SWei Hu (Xavier) 	}
297737f30e1SWei Hu (Xavier) 
298737f30e1SWei Hu (Xavier) 	if (likely(!hns3_is_special_opcode(opcode)))
299737f30e1SWei Hu (Xavier) 		desc_ret = rte_le_to_cpu_16(desc[num - 1].retval);
300737f30e1SWei Hu (Xavier) 	else
301737f30e1SWei Hu (Xavier) 		desc_ret = rte_le_to_cpu_16(desc[0].retval);
302737f30e1SWei Hu (Xavier) 
303737f30e1SWei Hu (Xavier) 	hw->cmq.last_status = desc_ret;
304737f30e1SWei Hu (Xavier) 	return hns3_cmd_convert_err_code(desc_ret);
305737f30e1SWei Hu (Xavier) }
306737f30e1SWei Hu (Xavier) 
307737f30e1SWei Hu (Xavier) static int hns3_cmd_poll_reply(struct hns3_hw *hw)
308737f30e1SWei Hu (Xavier) {
3092790c646SWei Hu (Xavier) 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
310737f30e1SWei Hu (Xavier) 	uint32_t timeout = 0;
311737f30e1SWei Hu (Xavier) 
312737f30e1SWei Hu (Xavier) 	do {
313737f30e1SWei Hu (Xavier) 		if (hns3_cmd_csq_done(hw))
314737f30e1SWei Hu (Xavier) 			return 0;
315737f30e1SWei Hu (Xavier) 
316e12a0166STyler Retzlaff 		if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed)) {
317737f30e1SWei Hu (Xavier) 			hns3_err(hw,
318737f30e1SWei Hu (Xavier) 				 "Don't wait for reply because of disable_cmd");
319737f30e1SWei Hu (Xavier) 			return -EBUSY;
320737f30e1SWei Hu (Xavier) 		}
321737f30e1SWei Hu (Xavier) 
3222790c646SWei Hu (Xavier) 		if (is_reset_pending(hns)) {
3232790c646SWei Hu (Xavier) 			hns3_err(hw, "Don't wait for reply because of reset pending");
3242790c646SWei Hu (Xavier) 			return -EIO;
3252790c646SWei Hu (Xavier) 		}
3262790c646SWei Hu (Xavier) 
327737f30e1SWei Hu (Xavier) 		rte_delay_us(1);
328737f30e1SWei Hu (Xavier) 		timeout++;
329737f30e1SWei Hu (Xavier) 	} while (timeout < hw->cmq.tx_timeout);
330737f30e1SWei Hu (Xavier) 	hns3_err(hw, "Wait for reply timeout");
3311a028f2bSHongbo Zheng 	return -ETIME;
332737f30e1SWei Hu (Xavier) }
333737f30e1SWei Hu (Xavier) 
334737f30e1SWei Hu (Xavier) /*
335737f30e1SWei Hu (Xavier)  * hns3_cmd_send - send command to command queue
336737f30e1SWei Hu (Xavier)  *
3374f4ee5b7SChengwen Feng  * @param hw
3384f4ee5b7SChengwen Feng  *   pointer to the hw struct
3394f4ee5b7SChengwen Feng  * @param desc
3404f4ee5b7SChengwen Feng  *   prefilled descriptor for describing the command
3414f4ee5b7SChengwen Feng  * @param num
3424f4ee5b7SChengwen Feng  *   the number of descriptors to be sent
3434f4ee5b7SChengwen Feng  * @return
3444f4ee5b7SChengwen Feng  *   - -EBUSY if detect device is in resetting
3454f4ee5b7SChengwen Feng  *   - -EIO   if detect cmd csq corrupted (due to reset) or
3464f4ee5b7SChengwen Feng  *            there is reset pending
3474f4ee5b7SChengwen Feng  *   - -ENOMEM/-ETIME/...(Non-Zero) if other error case
3484f4ee5b7SChengwen Feng  *   - Zero   if operation completed successfully
3494f4ee5b7SChengwen Feng  *
3504f4ee5b7SChengwen Feng  * Note -BUSY/-EIO only used in reset case
3514f4ee5b7SChengwen Feng  *
3524f4ee5b7SChengwen Feng  * Note this is the main send command for command queue, it
353737f30e1SWei Hu (Xavier)  * sends the queue, cleans the queue, etc
354737f30e1SWei Hu (Xavier)  */
355737f30e1SWei Hu (Xavier) int
356737f30e1SWei Hu (Xavier) hns3_cmd_send(struct hns3_hw *hw, struct hns3_cmd_desc *desc, int num)
357737f30e1SWei Hu (Xavier) {
358737f30e1SWei Hu (Xavier) 	struct hns3_cmd_desc *desc_to_use;
359737f30e1SWei Hu (Xavier) 	int handle = 0;
360737f30e1SWei Hu (Xavier) 	int retval;
361737f30e1SWei Hu (Xavier) 	uint32_t ntc;
362737f30e1SWei Hu (Xavier) 
363e12a0166STyler Retzlaff 	if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed))
364737f30e1SWei Hu (Xavier) 		return -EBUSY;
365737f30e1SWei Hu (Xavier) 
366737f30e1SWei Hu (Xavier) 	rte_spinlock_lock(&hw->cmq.csq.lock);
367737f30e1SWei Hu (Xavier) 
368737f30e1SWei Hu (Xavier) 	/* Clean the command send queue */
369737f30e1SWei Hu (Xavier) 	retval = hns3_cmd_csq_clean(hw);
370737f30e1SWei Hu (Xavier) 	if (retval < 0) {
371737f30e1SWei Hu (Xavier) 		rte_spinlock_unlock(&hw->cmq.csq.lock);
372737f30e1SWei Hu (Xavier) 		return retval;
373737f30e1SWei Hu (Xavier) 	}
374737f30e1SWei Hu (Xavier) 
375737f30e1SWei Hu (Xavier) 	if (num > hns3_ring_space(&hw->cmq.csq)) {
376737f30e1SWei Hu (Xavier) 		rte_spinlock_unlock(&hw->cmq.csq.lock);
377737f30e1SWei Hu (Xavier) 		return -ENOMEM;
378737f30e1SWei Hu (Xavier) 	}
379737f30e1SWei Hu (Xavier) 
380737f30e1SWei Hu (Xavier) 	/*
381737f30e1SWei Hu (Xavier) 	 * Record the location of desc in the ring for this time
382737f30e1SWei Hu (Xavier) 	 * which will be use for hardware to write back
383737f30e1SWei Hu (Xavier) 	 */
384737f30e1SWei Hu (Xavier) 	ntc = hw->cmq.csq.next_to_use;
385737f30e1SWei Hu (Xavier) 
386737f30e1SWei Hu (Xavier) 	while (handle < num) {
387737f30e1SWei Hu (Xavier) 		desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
388737f30e1SWei Hu (Xavier) 		*desc_to_use = desc[handle];
389737f30e1SWei Hu (Xavier) 		(hw->cmq.csq.next_to_use)++;
390737f30e1SWei Hu (Xavier) 		if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
391737f30e1SWei Hu (Xavier) 			hw->cmq.csq.next_to_use = 0;
392737f30e1SWei Hu (Xavier) 		handle++;
393737f30e1SWei Hu (Xavier) 	}
394737f30e1SWei Hu (Xavier) 
395737f30e1SWei Hu (Xavier) 	/* Write to hardware */
396737f30e1SWei Hu (Xavier) 	hns3_write_dev(hw, HNS3_CMDQ_TX_TAIL_REG, hw->cmq.csq.next_to_use);
397737f30e1SWei Hu (Xavier) 
398737f30e1SWei Hu (Xavier) 	/*
399737f30e1SWei Hu (Xavier) 	 * If the command is sync, wait for the firmware to write back,
400737f30e1SWei Hu (Xavier) 	 * if multi descriptors to be sent, use the first one to check.
401737f30e1SWei Hu (Xavier) 	 */
402737f30e1SWei Hu (Xavier) 	if (HNS3_CMD_SEND_SYNC(rte_le_to_cpu_16(desc->flag))) {
403737f30e1SWei Hu (Xavier) 		retval = hns3_cmd_poll_reply(hw);
404737f30e1SWei Hu (Xavier) 		if (!retval)
405737f30e1SWei Hu (Xavier) 			retval = hns3_cmd_get_hardware_reply(hw, desc, num,
406737f30e1SWei Hu (Xavier) 							     ntc);
407737f30e1SWei Hu (Xavier) 	}
408737f30e1SWei Hu (Xavier) 
409737f30e1SWei Hu (Xavier) 	rte_spinlock_unlock(&hw->cmq.csq.lock);
410737f30e1SWei Hu (Xavier) 	return retval;
411737f30e1SWei Hu (Xavier) }
412737f30e1SWei Hu (Xavier) 
41370791213SChengwen Feng static const char *
41470791213SChengwen Feng hns3_get_caps_name(uint32_t caps_id)
41570791213SChengwen Feng {
41670791213SChengwen Feng 	const struct {
41770791213SChengwen Feng 		enum HNS3_CAPS_BITS caps;
41870791213SChengwen Feng 		const char *name;
41970791213SChengwen Feng 	} dev_caps[] = {
42070791213SChengwen Feng 		{ HNS3_CAPS_FD_QUEUE_REGION_B, "fd_queue_region" },
42170791213SChengwen Feng 		{ HNS3_CAPS_PTP_B,             "ptp"             },
4226393fc0bSDongdong Liu 		{ HNS3_CAPS_SIMPLE_BD_B,       "simple_bd"       },
423fe9c27a3SMin Hu (Connor) 		{ HNS3_CAPS_TX_PUSH_B,         "tx_push"         },
42470791213SChengwen Feng 		{ HNS3_CAPS_PHY_IMP_B,         "phy_imp"         },
42570791213SChengwen Feng 		{ HNS3_CAPS_TQP_TXRX_INDEP_B,  "tqp_txrx_indep"  },
42670791213SChengwen Feng 		{ HNS3_CAPS_HW_PAD_B,          "hw_pad"          },
42770791213SChengwen Feng 		{ HNS3_CAPS_STASH_B,           "stash"           },
42870791213SChengwen Feng 		{ HNS3_CAPS_UDP_TUNNEL_CSUM_B, "udp_tunnel_csum" },
42970791213SChengwen Feng 		{ HNS3_CAPS_RAS_IMP_B,         "ras_imp"         },
430fc18d1b4SHuisong Li 		{ HNS3_CAPS_RXD_ADV_LAYOUT_B,  "rxd_adv_layout"  },
43175e413b7SHuisong Li 		{ HNS3_CAPS_TM_B,              "tm_capability"   },
43275e413b7SHuisong Li 		{ HNS3_CAPS_FC_AUTO_B,         "fc_autoneg"      }
43370791213SChengwen Feng 	};
43470791213SChengwen Feng 	uint32_t i;
43570791213SChengwen Feng 
43670791213SChengwen Feng 	for (i = 0; i < RTE_DIM(dev_caps); i++) {
43770791213SChengwen Feng 		if (dev_caps[i].caps == caps_id)
43870791213SChengwen Feng 			return dev_caps[i].name;
43970791213SChengwen Feng 	}
44070791213SChengwen Feng 
44170791213SChengwen Feng 	return "unknown";
44270791213SChengwen Feng }
44370791213SChengwen Feng 
44470791213SChengwen Feng static void
44570791213SChengwen Feng hns3_mask_capability(struct hns3_hw *hw,
44670791213SChengwen Feng 		     struct hns3_query_version_cmd *cmd)
44770791213SChengwen Feng {
44870791213SChengwen Feng #define MAX_CAPS_BIT	64
44970791213SChengwen Feng 
45070791213SChengwen Feng 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
45170791213SChengwen Feng 	uint64_t caps_org, caps_new, caps_masked;
45270791213SChengwen Feng 	uint32_t i;
45370791213SChengwen Feng 
45470791213SChengwen Feng 	if (hns->dev_caps_mask == 0)
45570791213SChengwen Feng 		return;
45670791213SChengwen Feng 
45770791213SChengwen Feng 	memcpy(&caps_org, &cmd->caps[0], sizeof(caps_org));
45870791213SChengwen Feng 	caps_org = rte_le_to_cpu_64(caps_org);
45970791213SChengwen Feng 	caps_new = caps_org ^ (caps_org & hns->dev_caps_mask);
46070791213SChengwen Feng 	caps_masked = caps_org ^ caps_new;
46170791213SChengwen Feng 	caps_new = rte_cpu_to_le_64(caps_new);
46270791213SChengwen Feng 	memcpy(&cmd->caps[0], &caps_new, sizeof(caps_new));
46370791213SChengwen Feng 
46470791213SChengwen Feng 	for (i = 0; i < MAX_CAPS_BIT; i++) {
46570791213SChengwen Feng 		if (!(caps_masked & BIT_ULL(i)))
46670791213SChengwen Feng 			continue;
4677be78d02SJosh Soref 		hns3_info(hw, "mask capability: id-%u, name-%s.",
46870791213SChengwen Feng 			  i, hns3_get_caps_name(i));
46970791213SChengwen Feng 	}
47070791213SChengwen Feng }
47170791213SChengwen Feng 
472fb5e9069SChengwen Feng static void
473fb5e9069SChengwen Feng hns3_parse_capability(struct hns3_hw *hw,
47443875235SWei Hu (Xavier) 		      struct hns3_query_version_cmd *cmd)
47543875235SWei Hu (Xavier) {
47643875235SWei Hu (Xavier) 	uint32_t caps = rte_le_to_cpu_32(cmd->caps[0]);
47743875235SWei Hu (Xavier) 
478f8e7fcbfSChengwen Feng 	if (hns3_get_bit(caps, HNS3_CAPS_FD_QUEUE_REGION_B))
479f8e7fcbfSChengwen Feng 		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_FD_QUEUE_REGION_B,
480f8e7fcbfSChengwen Feng 			     1);
48155c038c2SChengwen Feng 	if (hns3_get_bit(caps, HNS3_CAPS_PTP_B)) {
48255c038c2SChengwen Feng 		/*
48355c038c2SChengwen Feng 		 * PTP depends on special packet type reported by hardware which
48455c038c2SChengwen Feng 		 * enabled rxd advanced layout, so if the hardware doesn't
48555c038c2SChengwen Feng 		 * support rxd advanced layout, driver should ignore the PTP
48655c038c2SChengwen Feng 		 * capability.
48755c038c2SChengwen Feng 		 */
48855c038c2SChengwen Feng 		if (hns3_get_bit(caps, HNS3_CAPS_RXD_ADV_LAYOUT_B))
48943875235SWei Hu (Xavier) 			hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_PTP_B, 1);
49055c038c2SChengwen Feng 		else
49155c038c2SChengwen Feng 			hns3_warn(hw, "ignore PTP capability due to lack of "
49255c038c2SChengwen Feng 				  "rxd advanced layout capability.");
49355c038c2SChengwen Feng 	}
4946393fc0bSDongdong Liu 	if (hns3_get_bit(caps, HNS3_CAPS_SIMPLE_BD_B))
4956393fc0bSDongdong Liu 		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_SIMPLE_BD_B, 1);
496fe9c27a3SMin Hu (Connor) 	if (hns3_get_bit(caps, HNS3_CAPS_TX_PUSH_B))
497fe9c27a3SMin Hu (Connor) 		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_TX_PUSH_B, 1);
49843875235SWei Hu (Xavier) 	if (hns3_get_bit(caps, HNS3_CAPS_PHY_IMP_B))
4992e4859f3SHuisong Li 		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_COPPER_B, 1);
50043875235SWei Hu (Xavier) 	if (hns3_get_bit(caps, HNS3_CAPS_TQP_TXRX_INDEP_B))
501fa29fe45SChengchang Tang 		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_INDEP_TXRX_B, 1);
50243875235SWei Hu (Xavier) 	if (hns3_get_bit(caps, HNS3_CAPS_STASH_B))
50343875235SWei Hu (Xavier) 		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_STASH_B, 1);
504fb5e9069SChengwen Feng 	if (hns3_get_bit(caps, HNS3_CAPS_RXD_ADV_LAYOUT_B))
505fb5e9069SChengwen Feng 		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_RXD_ADV_LAYOUT_B,
506fb5e9069SChengwen Feng 			     1);
507d0ab89e6SChengchang Tang 	if (hns3_get_bit(caps, HNS3_CAPS_UDP_TUNNEL_CSUM_B))
508d0ab89e6SChengchang Tang 		hns3_set_bit(hw->capability,
509d0ab89e6SChengchang Tang 				HNS3_DEV_SUPPORT_OUTER_UDP_CKSUM_B, 1);
5101c1eb759SHongbo Zheng 	if (hns3_get_bit(caps, HNS3_CAPS_RAS_IMP_B))
5111c1eb759SHongbo Zheng 		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_RAS_IMP_B, 1);
512fc18d1b4SHuisong Li 	if (hns3_get_bit(caps, HNS3_CAPS_TM_B))
513fc18d1b4SHuisong Li 		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_TM_B, 1);
51475e413b7SHuisong Li 	if (hns3_get_bit(caps, HNS3_CAPS_FC_AUTO_B))
51575e413b7SHuisong Li 		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_FC_AUTO_B, 1);
516a4b2c681SHuisong Li 	if (hns3_get_bit(caps, HNS3_CAPS_GRO_B))
517a4b2c681SHuisong Li 		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_GRO_B, 1);
51843875235SWei Hu (Xavier) }
51943875235SWei Hu (Xavier) 
5200fce2c46SLijun Ou static uint32_t
5210fce2c46SLijun Ou hns3_build_api_caps(void)
5220fce2c46SLijun Ou {
5230fce2c46SLijun Ou 	uint32_t api_caps = 0;
5240fce2c46SLijun Ou 
5250fce2c46SLijun Ou 	hns3_set_bit(api_caps, HNS3_API_CAP_FLEX_RSS_TBL_B, 1);
5260fce2c46SLijun Ou 
5270fce2c46SLijun Ou 	return rte_cpu_to_le_32(api_caps);
5280fce2c46SLijun Ou }
5290fce2c46SLijun Ou 
530ac61c444SHuisong Li static void
531ac61c444SHuisong Li hns3_set_dcb_capability(struct hns3_hw *hw)
532ac61c444SHuisong Li {
533ac61c444SHuisong Li 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
534ac61c444SHuisong Li 	struct rte_pci_device *pci_dev;
535ac61c444SHuisong Li 	struct rte_eth_dev *eth_dev;
536ac61c444SHuisong Li 	uint16_t device_id;
537ac61c444SHuisong Li 
538ac61c444SHuisong Li 	if (hns->is_vf)
539ac61c444SHuisong Li 		return;
540ac61c444SHuisong Li 
541ac61c444SHuisong Li 	eth_dev = &rte_eth_devices[hw->data->port_id];
542ac61c444SHuisong Li 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
543ac61c444SHuisong Li 	device_id = pci_dev->id.device_id;
544ac61c444SHuisong Li 
545ac61c444SHuisong Li 	if (device_id == HNS3_DEV_ID_25GE_RDMA ||
546ac61c444SHuisong Li 	    device_id == HNS3_DEV_ID_50GE_RDMA ||
547ac61c444SHuisong Li 	    device_id == HNS3_DEV_ID_100G_RDMA_MACSEC ||
548*feb4548fSDengdui Huang 	    device_id == HNS3_DEV_ID_200G_RDMA)
549ac61c444SHuisong Li 		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_DCB_B, 1);
550ac61c444SHuisong Li }
551ac61c444SHuisong Li 
552a4b2c681SHuisong Li static void
553a4b2c681SHuisong Li hns3_set_default_capability(struct hns3_hw *hw)
554a4b2c681SHuisong Li {
555a4b2c681SHuisong Li 	hns3_set_dcb_capability(hw);
556a4b2c681SHuisong Li 
557a4b2c681SHuisong Li 	/*
558a4b2c681SHuisong Li 	 * The firmware of the network engines with HIP08 do not report some
559a4b2c681SHuisong Li 	 * capabilities, like GRO. Set default capabilities for it.
560a4b2c681SHuisong Li 	 */
561a4b2c681SHuisong Li 	if (hw->revision < PCI_REVISION_ID_HIP09_A)
562a4b2c681SHuisong Li 		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_GRO_B, 1);
563a4b2c681SHuisong Li }
564a4b2c681SHuisong Li 
565a32eaf43SChengchang Tang static int
56643875235SWei Hu (Xavier) hns3_cmd_query_firmware_version_and_capability(struct hns3_hw *hw)
567737f30e1SWei Hu (Xavier) {
568737f30e1SWei Hu (Xavier) 	struct hns3_query_version_cmd *resp;
569737f30e1SWei Hu (Xavier) 	struct hns3_cmd_desc desc;
570737f30e1SWei Hu (Xavier) 	int ret;
571737f30e1SWei Hu (Xavier) 
572737f30e1SWei Hu (Xavier) 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FW_VER, 1);
573737f30e1SWei Hu (Xavier) 	resp = (struct hns3_query_version_cmd *)desc.data;
5740fce2c46SLijun Ou 	resp->api_caps = hns3_build_api_caps();
575737f30e1SWei Hu (Xavier) 
576737f30e1SWei Hu (Xavier) 	/* Initialize the cmd function */
577737f30e1SWei Hu (Xavier) 	ret = hns3_cmd_send(hw, &desc, 1);
57843875235SWei Hu (Xavier) 	if (ret)
579737f30e1SWei Hu (Xavier) 		return ret;
58043875235SWei Hu (Xavier) 
58143875235SWei Hu (Xavier) 	hw->fw_version = rte_le_to_cpu_32(resp->firmware);
582ac61c444SHuisong Li 
583a4b2c681SHuisong Li 	hns3_set_default_capability(hw);
584ac61c444SHuisong Li 
58570791213SChengwen Feng 	/*
58670791213SChengwen Feng 	 * Make sure mask the capability before parse capability because it
58770791213SChengwen Feng 	 * may overwrite resp's data.
58870791213SChengwen Feng 	 */
58970791213SChengwen Feng 	hns3_mask_capability(hw, resp);
59043875235SWei Hu (Xavier) 	hns3_parse_capability(hw, resp);
59143875235SWei Hu (Xavier) 
59243875235SWei Hu (Xavier) 	return 0;
593737f30e1SWei Hu (Xavier) }
594737f30e1SWei Hu (Xavier) 
595737f30e1SWei Hu (Xavier) int
596737f30e1SWei Hu (Xavier) hns3_cmd_init_queue(struct hns3_hw *hw)
597737f30e1SWei Hu (Xavier) {
598737f30e1SWei Hu (Xavier) 	int ret;
599737f30e1SWei Hu (Xavier) 
600737f30e1SWei Hu (Xavier) 	/* Setup the lock for command queue */
601737f30e1SWei Hu (Xavier) 	rte_spinlock_init(&hw->cmq.csq.lock);
602737f30e1SWei Hu (Xavier) 	rte_spinlock_init(&hw->cmq.crq.lock);
603737f30e1SWei Hu (Xavier) 
604737f30e1SWei Hu (Xavier) 	/*
605737f30e1SWei Hu (Xavier) 	 * Clear up all command register,
606737f30e1SWei Hu (Xavier) 	 * in case there are some residual values
607737f30e1SWei Hu (Xavier) 	 */
608737f30e1SWei Hu (Xavier) 	hns3_cmd_clear_regs(hw);
609737f30e1SWei Hu (Xavier) 
610737f30e1SWei Hu (Xavier) 	/* Setup the queue entries for use cmd queue */
611737f30e1SWei Hu (Xavier) 	hw->cmq.csq.desc_num = HNS3_NIC_CMQ_DESC_NUM;
612737f30e1SWei Hu (Xavier) 	hw->cmq.crq.desc_num = HNS3_NIC_CMQ_DESC_NUM;
613737f30e1SWei Hu (Xavier) 
614737f30e1SWei Hu (Xavier) 	/* Setup Tx write back timeout */
615737f30e1SWei Hu (Xavier) 	hw->cmq.tx_timeout = HNS3_CMDQ_TX_TIMEOUT;
616737f30e1SWei Hu (Xavier) 
617737f30e1SWei Hu (Xavier) 	/* Setup queue rings */
618737f30e1SWei Hu (Xavier) 	ret = hns3_alloc_cmd_queue(hw, HNS3_TYPE_CSQ);
619737f30e1SWei Hu (Xavier) 	if (ret) {
620737f30e1SWei Hu (Xavier) 		PMD_INIT_LOG(ERR, "CSQ ring setup error %d", ret);
621737f30e1SWei Hu (Xavier) 		return ret;
622737f30e1SWei Hu (Xavier) 	}
623737f30e1SWei Hu (Xavier) 
624737f30e1SWei Hu (Xavier) 	ret = hns3_alloc_cmd_queue(hw, HNS3_TYPE_CRQ);
625737f30e1SWei Hu (Xavier) 	if (ret) {
626737f30e1SWei Hu (Xavier) 		PMD_INIT_LOG(ERR, "CRQ ring setup error %d", ret);
627737f30e1SWei Hu (Xavier) 		goto err_crq;
628737f30e1SWei Hu (Xavier) 	}
629737f30e1SWei Hu (Xavier) 
630737f30e1SWei Hu (Xavier) 	return 0;
631737f30e1SWei Hu (Xavier) 
632737f30e1SWei Hu (Xavier) err_crq:
633737f30e1SWei Hu (Xavier) 	hns3_free_cmd_desc(hw, &hw->cmq.csq);
634737f30e1SWei Hu (Xavier) 
635737f30e1SWei Hu (Xavier) 	return ret;
636737f30e1SWei Hu (Xavier) }
637737f30e1SWei Hu (Xavier) 
6382192c428SHuisong Li static void
6392192c428SHuisong Li hns3_update_dev_lsc_cap(struct hns3_hw *hw, int fw_compact_cmd_result)
6402192c428SHuisong Li {
6412192c428SHuisong Li 	struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
6422192c428SHuisong Li 
6432192c428SHuisong Li 	if (hw->adapter_state != HNS3_NIC_UNINITIALIZED)
6442192c428SHuisong Li 		return;
6452192c428SHuisong Li 
6462192c428SHuisong Li 	if (fw_compact_cmd_result != 0) {
6472192c428SHuisong Li 		/*
6482192c428SHuisong Li 		 * If fw_compact_cmd_result is not zero, it means firmware don't
6492192c428SHuisong Li 		 * support link status change interrupt.
6502192c428SHuisong Li 		 * Framework already set RTE_ETH_DEV_INTR_LSC bit because driver
6512192c428SHuisong Li 		 * declared RTE_PCI_DRV_INTR_LSC in drv_flags. It need to clear
6522192c428SHuisong Li 		 * the RTE_ETH_DEV_INTR_LSC capability when detect firmware
6532192c428SHuisong Li 		 * don't support link status change interrupt.
6542192c428SHuisong Li 		 */
6552192c428SHuisong Li 		dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
6562192c428SHuisong Li 	}
6572192c428SHuisong Li }
6582192c428SHuisong Li 
65975e413b7SHuisong Li static void
66075e413b7SHuisong Li hns3_set_fc_autoneg_cap(struct hns3_adapter *hns, int fw_compact_cmd_result)
66175e413b7SHuisong Li {
66275e413b7SHuisong Li 	struct hns3_hw *hw = &hns->hw;
66375e413b7SHuisong Li 	struct hns3_mac *mac = &hw->mac;
66475e413b7SHuisong Li 
66575e413b7SHuisong Li 	if (mac->media_type == HNS3_MEDIA_TYPE_COPPER) {
66675e413b7SHuisong Li 		hns->pf.support_fc_autoneg = true;
66775e413b7SHuisong Li 		return;
66875e413b7SHuisong Li 	}
66975e413b7SHuisong Li 
67075e413b7SHuisong Li 	/*
67175e413b7SHuisong Li 	 * Flow control auto-negotiation requires the cooperation of the driver
67275e413b7SHuisong Li 	 * and firmware.
67375e413b7SHuisong Li 	 */
67475e413b7SHuisong Li 	hns->pf.support_fc_autoneg = (hns3_dev_get_support(hw, FC_AUTO) &&
67575e413b7SHuisong Li 					fw_compact_cmd_result == 0) ?
67675e413b7SHuisong Li 					true : false;
67775e413b7SHuisong Li }
67875e413b7SHuisong Li 
6792192c428SHuisong Li static int
6802192c428SHuisong Li hns3_apply_fw_compat_cmd_result(struct hns3_hw *hw, int result)
6812192c428SHuisong Li {
68275e413b7SHuisong Li 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
68375e413b7SHuisong Li 
684efcaa81eSChengchang Tang 	if (result != 0 && hns3_dev_get_support(hw, COPPER)) {
6852192c428SHuisong Li 		hns3_err(hw, "firmware fails to initialize the PHY, ret = %d.",
6862192c428SHuisong Li 			 result);
6872192c428SHuisong Li 		return result;
6882192c428SHuisong Li 	}
6892192c428SHuisong Li 
6902192c428SHuisong Li 	hns3_update_dev_lsc_cap(hw, result);
69175e413b7SHuisong Li 	hns3_set_fc_autoneg_cap(hns, result);
6922192c428SHuisong Li 
6932192c428SHuisong Li 	return 0;
6942192c428SHuisong Li }
6952192c428SHuisong Li 
6962192c428SHuisong Li static int
6972192c428SHuisong Li hns3_firmware_compat_config(struct hns3_hw *hw, bool is_init)
6982192c428SHuisong Li {
6992192c428SHuisong Li 	struct hns3_firmware_compat_cmd *req;
7002192c428SHuisong Li 	struct hns3_cmd_desc desc;
7012192c428SHuisong Li 	uint32_t compat = 0;
7022192c428SHuisong Li 
7032192c428SHuisong Li 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_FIRMWARE_COMPAT_CFG, false);
7042192c428SHuisong Li 	req = (struct hns3_firmware_compat_cmd *)desc.data;
7052192c428SHuisong Li 
7062192c428SHuisong Li 	if (is_init) {
7072192c428SHuisong Li 		hns3_set_bit(compat, HNS3_LINK_EVENT_REPORT_EN_B, 1);
7082192c428SHuisong Li 		hns3_set_bit(compat, HNS3_NCSI_ERROR_REPORT_EN_B, 0);
709f7b60b53SJie Hai 		hns3_set_bit(compat, HNS3_LLRS_FEC_EN_B, 1);
710efcaa81eSChengchang Tang 		if (hns3_dev_get_support(hw, COPPER))
7112192c428SHuisong Li 			hns3_set_bit(compat, HNS3_FIRMWARE_PHY_DRIVER_EN_B, 1);
71275e413b7SHuisong Li 		if (hns3_dev_get_support(hw, FC_AUTO))
71375e413b7SHuisong Li 			hns3_set_bit(compat, HNS3_MAC_FC_AUTONEG_EN_B, 1);
7142192c428SHuisong Li 	}
7152192c428SHuisong Li 	req->compat = rte_cpu_to_le_32(compat);
7162192c428SHuisong Li 
7172192c428SHuisong Li 	return hns3_cmd_send(hw, &desc, 1);
7182192c428SHuisong Li }
7192192c428SHuisong Li 
720737f30e1SWei Hu (Xavier) int
721737f30e1SWei Hu (Xavier) hns3_cmd_init(struct hns3_hw *hw)
722737f30e1SWei Hu (Xavier) {
7232192c428SHuisong Li 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
724bd5b8673SWei Hu (Xavier) 	uint32_t version;
725737f30e1SWei Hu (Xavier) 	int ret;
726737f30e1SWei Hu (Xavier) 
727737f30e1SWei Hu (Xavier) 	rte_spinlock_lock(&hw->cmq.csq.lock);
728737f30e1SWei Hu (Xavier) 	rte_spinlock_lock(&hw->cmq.crq.lock);
729737f30e1SWei Hu (Xavier) 
730737f30e1SWei Hu (Xavier) 	hw->cmq.csq.next_to_clean = 0;
731737f30e1SWei Hu (Xavier) 	hw->cmq.csq.next_to_use = 0;
732737f30e1SWei Hu (Xavier) 	hw->cmq.crq.next_to_clean = 0;
733737f30e1SWei Hu (Xavier) 	hw->cmq.crq.next_to_use = 0;
734737f30e1SWei Hu (Xavier) 	hns3_cmd_init_regs(hw);
735737f30e1SWei Hu (Xavier) 
736737f30e1SWei Hu (Xavier) 	rte_spinlock_unlock(&hw->cmq.crq.lock);
737737f30e1SWei Hu (Xavier) 	rte_spinlock_unlock(&hw->cmq.csq.lock);
738737f30e1SWei Hu (Xavier) 
7392790c646SWei Hu (Xavier) 	/*
7402790c646SWei Hu (Xavier) 	 * Check if there is new reset pending, because the higher level
7412790c646SWei Hu (Xavier) 	 * reset may happen when lower level reset is being processed.
7422790c646SWei Hu (Xavier) 	 */
7432790c646SWei Hu (Xavier) 	if (is_reset_pending(HNS3_DEV_HW_TO_ADAPTER(hw))) {
7442790c646SWei Hu (Xavier) 		PMD_INIT_LOG(ERR, "New reset pending, keep disable cmd");
7452790c646SWei Hu (Xavier) 		ret = -EBUSY;
7462790c646SWei Hu (Xavier) 		goto err_cmd_init;
7472790c646SWei Hu (Xavier) 	}
748e12a0166STyler Retzlaff 	rte_atomic_store_explicit(&hw->reset.disable_cmd, 0, rte_memory_order_relaxed);
749737f30e1SWei Hu (Xavier) 
75043875235SWei Hu (Xavier) 	ret = hns3_cmd_query_firmware_version_and_capability(hw);
751737f30e1SWei Hu (Xavier) 	if (ret) {
752737f30e1SWei Hu (Xavier) 		PMD_INIT_LOG(ERR, "firmware version query failed %d", ret);
753737f30e1SWei Hu (Xavier) 		goto err_cmd_init;
754737f30e1SWei Hu (Xavier) 	}
755737f30e1SWei Hu (Xavier) 
75643875235SWei Hu (Xavier) 	version = hw->fw_version;
757bd5b8673SWei Hu (Xavier) 	PMD_INIT_LOG(INFO, "The firmware version is %lu.%lu.%lu.%lu",
758bd5b8673SWei Hu (Xavier) 		     hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M,
759bd5b8673SWei Hu (Xavier) 				    HNS3_FW_VERSION_BYTE3_S),
760bd5b8673SWei Hu (Xavier) 		     hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M,
761bd5b8673SWei Hu (Xavier) 				    HNS3_FW_VERSION_BYTE2_S),
762bd5b8673SWei Hu (Xavier) 		     hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M,
763bd5b8673SWei Hu (Xavier) 				    HNS3_FW_VERSION_BYTE1_S),
764bd5b8673SWei Hu (Xavier) 		     hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M,
765bd5b8673SWei Hu (Xavier) 				    HNS3_FW_VERSION_BYTE0_S));
766737f30e1SWei Hu (Xavier) 
7672192c428SHuisong Li 	if (hns->is_vf)
7682192c428SHuisong Li 		return 0;
7692192c428SHuisong Li 
7702192c428SHuisong Li 	/*
7717be78d02SJosh Soref 	 * Requiring firmware to enable some features, fiber port can still
7722192c428SHuisong Li 	 * work without it, but copper port can't work because the firmware
7732192c428SHuisong Li 	 * fails to take over the PHY.
7742192c428SHuisong Li 	 */
7752192c428SHuisong Li 	ret = hns3_firmware_compat_config(hw, true);
7762192c428SHuisong Li 	if (ret)
7772192c428SHuisong Li 		PMD_INIT_LOG(WARNING, "firmware compatible features not "
7782192c428SHuisong Li 			     "supported, ret = %d.", ret);
7792192c428SHuisong Li 
7802192c428SHuisong Li 	/*
7812192c428SHuisong Li 	 * Perform some corresponding operations based on the firmware
7822192c428SHuisong Li 	 * compatibility configuration result.
7832192c428SHuisong Li 	 */
7842192c428SHuisong Li 	ret = hns3_apply_fw_compat_cmd_result(hw, ret);
7852192c428SHuisong Li 	if (ret)
7862192c428SHuisong Li 		goto err_cmd_init;
7872192c428SHuisong Li 
788737f30e1SWei Hu (Xavier) 	return 0;
789737f30e1SWei Hu (Xavier) 
790737f30e1SWei Hu (Xavier) err_cmd_init:
791e12a0166STyler Retzlaff 	rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
792737f30e1SWei Hu (Xavier) 	return ret;
793737f30e1SWei Hu (Xavier) }
794737f30e1SWei Hu (Xavier) 
795737f30e1SWei Hu (Xavier) static void
796737f30e1SWei Hu (Xavier) hns3_destroy_queue(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
797737f30e1SWei Hu (Xavier) {
798737f30e1SWei Hu (Xavier) 	rte_spinlock_lock(&ring->lock);
799737f30e1SWei Hu (Xavier) 
800737f30e1SWei Hu (Xavier) 	hns3_free_cmd_desc(hw, ring);
801737f30e1SWei Hu (Xavier) 
802737f30e1SWei Hu (Xavier) 	rte_spinlock_unlock(&ring->lock);
803737f30e1SWei Hu (Xavier) }
804737f30e1SWei Hu (Xavier) 
805737f30e1SWei Hu (Xavier) void
806737f30e1SWei Hu (Xavier) hns3_cmd_destroy_queue(struct hns3_hw *hw)
807737f30e1SWei Hu (Xavier) {
808737f30e1SWei Hu (Xavier) 	hns3_destroy_queue(hw, &hw->cmq.csq);
809737f30e1SWei Hu (Xavier) 	hns3_destroy_queue(hw, &hw->cmq.crq);
810737f30e1SWei Hu (Xavier) }
811737f30e1SWei Hu (Xavier) 
812737f30e1SWei Hu (Xavier) void
813737f30e1SWei Hu (Xavier) hns3_cmd_uninit(struct hns3_hw *hw)
814737f30e1SWei Hu (Xavier) {
8152192c428SHuisong Li 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
8162192c428SHuisong Li 
8172192c428SHuisong Li 	if (!hns->is_vf)
8182192c428SHuisong Li 		(void)hns3_firmware_compat_config(hw, false);
8192192c428SHuisong Li 
820e12a0166STyler Retzlaff 	rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
8215ef63df1SChengchang Tang 
8225ef63df1SChengchang Tang 	/*
8235ef63df1SChengchang Tang 	 * A delay is added to ensure that the register cleanup operations
8245ef63df1SChengchang Tang 	 * will not be performed concurrently with the firmware command and
8255ef63df1SChengchang Tang 	 * ensure that all the reserved commands are executed.
8265ef63df1SChengchang Tang 	 * Concurrency may occur in two scenarios: asynchronous command and
8275ef63df1SChengchang Tang 	 * timeout command. If the command fails to be executed due to busy
8285ef63df1SChengchang Tang 	 * scheduling, the command will be processed in the next scheduling
8295ef63df1SChengchang Tang 	 * of the firmware.
8305ef63df1SChengchang Tang 	 */
8315ef63df1SChengchang Tang 	rte_delay_ms(HNS3_CMDQ_CLEAR_WAIT_TIME);
8325ef63df1SChengchang Tang 
833737f30e1SWei Hu (Xavier) 	rte_spinlock_lock(&hw->cmq.csq.lock);
834737f30e1SWei Hu (Xavier) 	rte_spinlock_lock(&hw->cmq.crq.lock);
835737f30e1SWei Hu (Xavier) 	hns3_cmd_clear_regs(hw);
836737f30e1SWei Hu (Xavier) 	rte_spinlock_unlock(&hw->cmq.crq.lock);
837737f30e1SWei Hu (Xavier) 	rte_spinlock_unlock(&hw->cmq.csq.lock);
838737f30e1SWei Hu (Xavier) }
839