xref: /dpdk/drivers/net/hns3/hns3_cmd.c (revision c9902a15bd005b6d4fe072cf7b60fe4ee679155f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 HiSilicon Limited.
3  */
4 
5 #include <ethdev_pci.h>
6 #include <rte_io.h>
7 
8 #include "hns3_ethdev.h"
9 #include "hns3_regs.h"
10 #include "hns3_intr.h"
11 #include "hns3_logs.h"
12 
13 static int
14 hns3_ring_space(struct hns3_cmq_ring *ring)
15 {
16 	int ntu = ring->next_to_use;
17 	int ntc = ring->next_to_clean;
18 	int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
19 
20 	return ring->desc_num - used - 1;
21 }
22 
23 static bool
24 is_valid_csq_clean_head(struct hns3_cmq_ring *ring, int head)
25 {
26 	int ntu = ring->next_to_use;
27 	int ntc = ring->next_to_clean;
28 
29 	if (ntu > ntc)
30 		return head >= ntc && head <= ntu;
31 
32 	return head >= ntc || head <= ntu;
33 }
34 
35 /*
36  * hns3_allocate_dma_mem - Specific memory alloc for command function.
37  * Malloc a memzone, which is a contiguous portion of physical memory identified
38  * by a name.
39  * @ring: pointer to the ring structure
40  * @size: size of memory requested
41  * @alignment: what to align the allocation to
42  */
43 static int
44 hns3_allocate_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring,
45 		      uint64_t size, uint32_t alignment)
46 {
47 	static uint64_t hns3_dma_memzone_id;
48 	const struct rte_memzone *mz = NULL;
49 	char z_name[RTE_MEMZONE_NAMESIZE];
50 
51 	snprintf(z_name, sizeof(z_name), "hns3_dma_%" PRIu64,
52 		__atomic_fetch_add(&hns3_dma_memzone_id, 1, __ATOMIC_RELAXED));
53 	mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
54 					 RTE_MEMZONE_IOVA_CONTIG, alignment,
55 					 RTE_PGSIZE_2M);
56 	if (mz == NULL)
57 		return -ENOMEM;
58 
59 	ring->buf_size = size;
60 	ring->desc = mz->addr;
61 	ring->desc_dma_addr = mz->iova;
62 	ring->zone = (const void *)mz;
63 	hns3_dbg(hw, "memzone %s allocated with physical address: %" PRIu64,
64 		 mz->name, ring->desc_dma_addr);
65 
66 	return 0;
67 }
68 
69 static void
70 hns3_free_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
71 {
72 	hns3_dbg(hw, "memzone %s to be freed with physical address: %" PRIu64,
73 		 ((const struct rte_memzone *)ring->zone)->name,
74 		 ring->desc_dma_addr);
75 	rte_memzone_free((const struct rte_memzone *)ring->zone);
76 	ring->buf_size = 0;
77 	ring->desc = NULL;
78 	ring->desc_dma_addr = 0;
79 	ring->zone = NULL;
80 }
81 
82 static int
83 hns3_alloc_cmd_desc(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
84 {
85 	int size  = ring->desc_num * sizeof(struct hns3_cmd_desc);
86 
87 	if (hns3_allocate_dma_mem(hw, ring, size, HNS3_CMD_DESC_ALIGNMENT)) {
88 		hns3_err(hw, "allocate dma mem failed");
89 		return -ENOMEM;
90 	}
91 
92 	return 0;
93 }
94 
95 static void
96 hns3_free_cmd_desc(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
97 {
98 	if (ring->desc)
99 		hns3_free_dma_mem(hw, ring);
100 }
101 
102 static int
103 hns3_alloc_cmd_queue(struct hns3_hw *hw, int ring_type)
104 {
105 	struct hns3_cmq_ring *ring =
106 		(ring_type == HNS3_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
107 	int ret;
108 
109 	ring->ring_type = ring_type;
110 	ring->hw = hw;
111 
112 	ret = hns3_alloc_cmd_desc(hw, ring);
113 	if (ret)
114 		hns3_err(hw, "descriptor %s alloc error %d",
115 			    (ring_type == HNS3_TYPE_CSQ) ? "CSQ" : "CRQ", ret);
116 
117 	return ret;
118 }
119 
120 void
121 hns3_cmd_reuse_desc(struct hns3_cmd_desc *desc, bool is_read)
122 {
123 	desc->flag = rte_cpu_to_le_16(HNS3_CMD_FLAG_NO_INTR | HNS3_CMD_FLAG_IN);
124 	if (is_read)
125 		desc->flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_WR);
126 	else
127 		desc->flag &= rte_cpu_to_le_16(~HNS3_CMD_FLAG_WR);
128 }
129 
130 void
131 hns3_cmd_setup_basic_desc(struct hns3_cmd_desc *desc,
132 			  enum hns3_opcode_type opcode, bool is_read)
133 {
134 	memset((void *)desc, 0, sizeof(struct hns3_cmd_desc));
135 	desc->opcode = rte_cpu_to_le_16(opcode);
136 	desc->flag = rte_cpu_to_le_16(HNS3_CMD_FLAG_NO_INTR | HNS3_CMD_FLAG_IN);
137 
138 	if (is_read)
139 		desc->flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_WR);
140 }
141 
142 static void
143 hns3_cmd_clear_regs(struct hns3_hw *hw)
144 {
145 	hns3_write_dev(hw, HNS3_CMDQ_TX_ADDR_L_REG, 0);
146 	hns3_write_dev(hw, HNS3_CMDQ_TX_ADDR_H_REG, 0);
147 	hns3_write_dev(hw, HNS3_CMDQ_TX_DEPTH_REG, 0);
148 	hns3_write_dev(hw, HNS3_CMDQ_TX_HEAD_REG, 0);
149 	hns3_write_dev(hw, HNS3_CMDQ_TX_TAIL_REG, 0);
150 	hns3_write_dev(hw, HNS3_CMDQ_RX_ADDR_L_REG, 0);
151 	hns3_write_dev(hw, HNS3_CMDQ_RX_ADDR_H_REG, 0);
152 	hns3_write_dev(hw, HNS3_CMDQ_RX_DEPTH_REG, 0);
153 	hns3_write_dev(hw, HNS3_CMDQ_RX_HEAD_REG, 0);
154 	hns3_write_dev(hw, HNS3_CMDQ_RX_TAIL_REG, 0);
155 }
156 
157 static void
158 hns3_cmd_config_regs(struct hns3_cmq_ring *ring)
159 {
160 	uint64_t dma = ring->desc_dma_addr;
161 
162 	if (ring->ring_type == HNS3_TYPE_CSQ) {
163 		hns3_write_dev(ring->hw, HNS3_CMDQ_TX_ADDR_L_REG,
164 			       lower_32_bits(dma));
165 		hns3_write_dev(ring->hw, HNS3_CMDQ_TX_ADDR_H_REG,
166 			       upper_32_bits(dma));
167 		hns3_write_dev(ring->hw, HNS3_CMDQ_TX_DEPTH_REG,
168 			       ring->desc_num >> HNS3_NIC_CMQ_DESC_NUM_S |
169 			       HNS3_NIC_SW_RST_RDY);
170 		hns3_write_dev(ring->hw, HNS3_CMDQ_TX_HEAD_REG, 0);
171 		hns3_write_dev(ring->hw, HNS3_CMDQ_TX_TAIL_REG, 0);
172 	} else {
173 		hns3_write_dev(ring->hw, HNS3_CMDQ_RX_ADDR_L_REG,
174 			       lower_32_bits(dma));
175 		hns3_write_dev(ring->hw, HNS3_CMDQ_RX_ADDR_H_REG,
176 			       upper_32_bits(dma));
177 		hns3_write_dev(ring->hw, HNS3_CMDQ_RX_DEPTH_REG,
178 			       ring->desc_num >> HNS3_NIC_CMQ_DESC_NUM_S);
179 		hns3_write_dev(ring->hw, HNS3_CMDQ_RX_HEAD_REG, 0);
180 		hns3_write_dev(ring->hw, HNS3_CMDQ_RX_TAIL_REG, 0);
181 	}
182 }
183 
184 static void
185 hns3_cmd_init_regs(struct hns3_hw *hw)
186 {
187 	hns3_cmd_config_regs(&hw->cmq.csq);
188 	hns3_cmd_config_regs(&hw->cmq.crq);
189 }
190 
191 static int
192 hns3_cmd_csq_clean(struct hns3_hw *hw)
193 {
194 	struct hns3_cmq_ring *csq = &hw->cmq.csq;
195 	uint32_t head;
196 	uint32_t addr;
197 	int clean;
198 
199 	head = hns3_read_dev(hw, HNS3_CMDQ_TX_HEAD_REG);
200 	addr = hns3_read_dev(hw, HNS3_CMDQ_TX_ADDR_L_REG);
201 	if (!is_valid_csq_clean_head(csq, head) || addr == 0) {
202 		hns3_err(hw, "wrong cmd addr(%0x) head (%u, %u-%u)", addr, head,
203 			 csq->next_to_use, csq->next_to_clean);
204 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
205 			__atomic_store_n(&hw->reset.disable_cmd, 1,
206 					 __ATOMIC_RELAXED);
207 			hns3_schedule_delayed_reset(HNS3_DEV_HW_TO_ADAPTER(hw));
208 		}
209 
210 		return -EIO;
211 	}
212 
213 	clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
214 	csq->next_to_clean = head;
215 	return clean;
216 }
217 
218 static int
219 hns3_cmd_csq_done(struct hns3_hw *hw)
220 {
221 	uint32_t head = hns3_read_dev(hw, HNS3_CMDQ_TX_HEAD_REG);
222 
223 	return head == hw->cmq.csq.next_to_use;
224 }
225 
226 static bool
227 hns3_is_special_opcode(uint16_t opcode)
228 {
229 	/*
230 	 * These commands have several descriptors,
231 	 * and use the first one to save opcode and return value.
232 	 */
233 	uint16_t spec_opcode[] = {HNS3_OPC_STATS_64_BIT,
234 				  HNS3_OPC_STATS_32_BIT,
235 				  HNS3_OPC_STATS_MAC,
236 				  HNS3_OPC_STATS_MAC_ALL,
237 				  HNS3_OPC_QUERY_32_BIT_REG,
238 				  HNS3_OPC_QUERY_64_BIT_REG,
239 				  HNS3_OPC_QUERY_CLEAR_MPF_RAS_INT,
240 				  HNS3_OPC_QUERY_CLEAR_PF_RAS_INT,
241 				  HNS3_OPC_QUERY_CLEAR_ALL_MPF_MSIX_INT,
242 				  HNS3_OPC_QUERY_CLEAR_ALL_PF_MSIX_INT,
243 				  HNS3_OPC_QUERY_ALL_ERR_INFO,};
244 	uint32_t i;
245 
246 	for (i = 0; i < RTE_DIM(spec_opcode); i++)
247 		if (spec_opcode[i] == opcode)
248 			return true;
249 
250 	return false;
251 }
252 
253 static int
254 hns3_cmd_convert_err_code(uint16_t desc_ret)
255 {
256 	static const struct {
257 		uint16_t imp_errcode;
258 		int linux_errcode;
259 	} hns3_cmdq_status[] = {
260 		{HNS3_CMD_EXEC_SUCCESS, 0},
261 		{HNS3_CMD_NO_AUTH, -EPERM},
262 		{HNS3_CMD_NOT_SUPPORTED, -EOPNOTSUPP},
263 		{HNS3_CMD_QUEUE_FULL, -EXFULL},
264 		{HNS3_CMD_NEXT_ERR, -ENOSR},
265 		{HNS3_CMD_UNEXE_ERR, -ENOTBLK},
266 		{HNS3_CMD_PARA_ERR, -EINVAL},
267 		{HNS3_CMD_RESULT_ERR, -ERANGE},
268 		{HNS3_CMD_TIMEOUT, -ETIME},
269 		{HNS3_CMD_HILINK_ERR, -ENOLINK},
270 		{HNS3_CMD_QUEUE_ILLEGAL, -ENXIO},
271 		{HNS3_CMD_INVALID, -EBADR},
272 		{HNS3_CMD_ROH_CHECK_FAIL, -EINVAL}
273 	};
274 
275 	uint32_t i;
276 
277 	for (i = 0; i < RTE_DIM(hns3_cmdq_status); i++)
278 		if (hns3_cmdq_status[i].imp_errcode == desc_ret)
279 			return hns3_cmdq_status[i].linux_errcode;
280 
281 	return -EREMOTEIO;
282 }
283 
284 static int
285 hns3_cmd_get_hardware_reply(struct hns3_hw *hw,
286 			    struct hns3_cmd_desc *desc, int num, int ntc)
287 {
288 	uint16_t opcode, desc_ret;
289 	int current_ntc = ntc;
290 	int handle;
291 
292 	opcode = rte_le_to_cpu_16(desc[0].opcode);
293 	for (handle = 0; handle < num; handle++) {
294 		/* Get the result of hardware write back */
295 		desc[handle] = hw->cmq.csq.desc[current_ntc];
296 
297 		current_ntc++;
298 		if (current_ntc == hw->cmq.csq.desc_num)
299 			current_ntc = 0;
300 	}
301 
302 	if (likely(!hns3_is_special_opcode(opcode)))
303 		desc_ret = rte_le_to_cpu_16(desc[num - 1].retval);
304 	else
305 		desc_ret = rte_le_to_cpu_16(desc[0].retval);
306 
307 	hw->cmq.last_status = desc_ret;
308 	return hns3_cmd_convert_err_code(desc_ret);
309 }
310 
311 static int hns3_cmd_poll_reply(struct hns3_hw *hw)
312 {
313 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
314 	uint32_t timeout = 0;
315 
316 	do {
317 		if (hns3_cmd_csq_done(hw))
318 			return 0;
319 
320 		if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
321 			hns3_err(hw,
322 				 "Don't wait for reply because of disable_cmd");
323 			return -EBUSY;
324 		}
325 
326 		if (is_reset_pending(hns)) {
327 			hns3_err(hw, "Don't wait for reply because of reset pending");
328 			return -EIO;
329 		}
330 
331 		rte_delay_us(1);
332 		timeout++;
333 	} while (timeout < hw->cmq.tx_timeout);
334 	hns3_err(hw, "Wait for reply timeout");
335 	return -ETIME;
336 }
337 
338 /*
339  * hns3_cmd_send - send command to command queue
340  *
341  * @param hw
342  *   pointer to the hw struct
343  * @param desc
344  *   prefilled descriptor for describing the command
345  * @param num
346  *   the number of descriptors to be sent
347  * @return
348  *   - -EBUSY if detect device is in resetting
349  *   - -EIO   if detect cmd csq corrupted (due to reset) or
350  *            there is reset pending
351  *   - -ENOMEM/-ETIME/...(Non-Zero) if other error case
352  *   - Zero   if operation completed successfully
353  *
354  * Note -BUSY/-EIO only used in reset case
355  *
356  * Note this is the main send command for command queue, it
357  * sends the queue, cleans the queue, etc
358  */
359 int
360 hns3_cmd_send(struct hns3_hw *hw, struct hns3_cmd_desc *desc, int num)
361 {
362 	struct hns3_cmd_desc *desc_to_use;
363 	int handle = 0;
364 	int retval;
365 	uint32_t ntc;
366 
367 	if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED))
368 		return -EBUSY;
369 
370 	rte_spinlock_lock(&hw->cmq.csq.lock);
371 
372 	/* Clean the command send queue */
373 	retval = hns3_cmd_csq_clean(hw);
374 	if (retval < 0) {
375 		rte_spinlock_unlock(&hw->cmq.csq.lock);
376 		return retval;
377 	}
378 
379 	if (num > hns3_ring_space(&hw->cmq.csq)) {
380 		rte_spinlock_unlock(&hw->cmq.csq.lock);
381 		return -ENOMEM;
382 	}
383 
384 	/*
385 	 * Record the location of desc in the ring for this time
386 	 * which will be use for hardware to write back
387 	 */
388 	ntc = hw->cmq.csq.next_to_use;
389 
390 	while (handle < num) {
391 		desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
392 		*desc_to_use = desc[handle];
393 		(hw->cmq.csq.next_to_use)++;
394 		if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
395 			hw->cmq.csq.next_to_use = 0;
396 		handle++;
397 	}
398 
399 	/* Write to hardware */
400 	hns3_write_dev(hw, HNS3_CMDQ_TX_TAIL_REG, hw->cmq.csq.next_to_use);
401 
402 	/*
403 	 * If the command is sync, wait for the firmware to write back,
404 	 * if multi descriptors to be sent, use the first one to check.
405 	 */
406 	if (HNS3_CMD_SEND_SYNC(rte_le_to_cpu_16(desc->flag))) {
407 		retval = hns3_cmd_poll_reply(hw);
408 		if (!retval)
409 			retval = hns3_cmd_get_hardware_reply(hw, desc, num,
410 							     ntc);
411 	}
412 
413 	rte_spinlock_unlock(&hw->cmq.csq.lock);
414 	return retval;
415 }
416 
417 static const char *
418 hns3_get_caps_name(uint32_t caps_id)
419 {
420 	const struct {
421 		enum HNS3_CAPS_BITS caps;
422 		const char *name;
423 	} dev_caps[] = {
424 		{ HNS3_CAPS_FD_QUEUE_REGION_B, "fd_queue_region" },
425 		{ HNS3_CAPS_PTP_B,             "ptp"             },
426 		{ HNS3_CAPS_TX_PUSH_B,         "tx_push"         },
427 		{ HNS3_CAPS_PHY_IMP_B,         "phy_imp"         },
428 		{ HNS3_CAPS_TQP_TXRX_INDEP_B,  "tqp_txrx_indep"  },
429 		{ HNS3_CAPS_HW_PAD_B,          "hw_pad"          },
430 		{ HNS3_CAPS_STASH_B,           "stash"           },
431 		{ HNS3_CAPS_UDP_TUNNEL_CSUM_B, "udp_tunnel_csum" },
432 		{ HNS3_CAPS_RAS_IMP_B,         "ras_imp"         },
433 		{ HNS3_CAPS_RXD_ADV_LAYOUT_B,  "rxd_adv_layout"  },
434 		{ HNS3_CAPS_TM_B,              "tm_capability"   }
435 	};
436 	uint32_t i;
437 
438 	for (i = 0; i < RTE_DIM(dev_caps); i++) {
439 		if (dev_caps[i].caps == caps_id)
440 			return dev_caps[i].name;
441 	}
442 
443 	return "unknown";
444 }
445 
446 static void
447 hns3_mask_capability(struct hns3_hw *hw,
448 		     struct hns3_query_version_cmd *cmd)
449 {
450 #define MAX_CAPS_BIT	64
451 
452 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
453 	uint64_t caps_org, caps_new, caps_masked;
454 	uint32_t i;
455 
456 	if (hns->dev_caps_mask == 0)
457 		return;
458 
459 	memcpy(&caps_org, &cmd->caps[0], sizeof(caps_org));
460 	caps_org = rte_le_to_cpu_64(caps_org);
461 	caps_new = caps_org ^ (caps_org & hns->dev_caps_mask);
462 	caps_masked = caps_org ^ caps_new;
463 	caps_new = rte_cpu_to_le_64(caps_new);
464 	memcpy(&cmd->caps[0], &caps_new, sizeof(caps_new));
465 
466 	for (i = 0; i < MAX_CAPS_BIT; i++) {
467 		if (!(caps_masked & BIT_ULL(i)))
468 			continue;
469 		hns3_info(hw, "mask capabiliy: id-%u, name-%s.",
470 			  i, hns3_get_caps_name(i));
471 	}
472 }
473 
474 static void
475 hns3_parse_capability(struct hns3_hw *hw,
476 		      struct hns3_query_version_cmd *cmd)
477 {
478 	uint32_t caps = rte_le_to_cpu_32(cmd->caps[0]);
479 
480 	if (hns3_get_bit(caps, HNS3_CAPS_FD_QUEUE_REGION_B))
481 		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_FD_QUEUE_REGION_B,
482 			     1);
483 	if (hns3_get_bit(caps, HNS3_CAPS_PTP_B)) {
484 		/*
485 		 * PTP depends on special packet type reported by hardware which
486 		 * enabled rxd advanced layout, so if the hardware doesn't
487 		 * support rxd advanced layout, driver should ignore the PTP
488 		 * capability.
489 		 */
490 		if (hns3_get_bit(caps, HNS3_CAPS_RXD_ADV_LAYOUT_B))
491 			hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_PTP_B, 1);
492 		else
493 			hns3_warn(hw, "ignore PTP capability due to lack of "
494 				  "rxd advanced layout capability.");
495 	}
496 	if (hns3_get_bit(caps, HNS3_CAPS_TX_PUSH_B))
497 		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_TX_PUSH_B, 1);
498 	if (hns3_get_bit(caps, HNS3_CAPS_PHY_IMP_B))
499 		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_COPPER_B, 1);
500 	if (hns3_get_bit(caps, HNS3_CAPS_TQP_TXRX_INDEP_B))
501 		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_INDEP_TXRX_B, 1);
502 	if (hns3_get_bit(caps, HNS3_CAPS_STASH_B))
503 		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_STASH_B, 1);
504 	if (hns3_get_bit(caps, HNS3_CAPS_RXD_ADV_LAYOUT_B))
505 		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_RXD_ADV_LAYOUT_B,
506 			     1);
507 	if (hns3_get_bit(caps, HNS3_CAPS_UDP_TUNNEL_CSUM_B))
508 		hns3_set_bit(hw->capability,
509 				HNS3_DEV_SUPPORT_OUTER_UDP_CKSUM_B, 1);
510 	if (hns3_get_bit(caps, HNS3_CAPS_RAS_IMP_B))
511 		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_RAS_IMP_B, 1);
512 	if (hns3_get_bit(caps, HNS3_CAPS_TM_B))
513 		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_TM_B, 1);
514 }
515 
516 static uint32_t
517 hns3_build_api_caps(void)
518 {
519 	uint32_t api_caps = 0;
520 
521 	hns3_set_bit(api_caps, HNS3_API_CAP_FLEX_RSS_TBL_B, 1);
522 
523 	return rte_cpu_to_le_32(api_caps);
524 }
525 
526 static int
527 hns3_cmd_query_firmware_version_and_capability(struct hns3_hw *hw)
528 {
529 	struct hns3_query_version_cmd *resp;
530 	struct hns3_cmd_desc desc;
531 	int ret;
532 
533 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FW_VER, 1);
534 	resp = (struct hns3_query_version_cmd *)desc.data;
535 	resp->api_caps = hns3_build_api_caps();
536 
537 	/* Initialize the cmd function */
538 	ret = hns3_cmd_send(hw, &desc, 1);
539 	if (ret)
540 		return ret;
541 
542 	hw->fw_version = rte_le_to_cpu_32(resp->firmware);
543 	/*
544 	 * Make sure mask the capability before parse capability because it
545 	 * may overwrite resp's data.
546 	 */
547 	hns3_mask_capability(hw, resp);
548 	hns3_parse_capability(hw, resp);
549 
550 	return 0;
551 }
552 
553 int
554 hns3_cmd_init_queue(struct hns3_hw *hw)
555 {
556 	int ret;
557 
558 	/* Setup the lock for command queue */
559 	rte_spinlock_init(&hw->cmq.csq.lock);
560 	rte_spinlock_init(&hw->cmq.crq.lock);
561 
562 	/*
563 	 * Clear up all command register,
564 	 * in case there are some residual values
565 	 */
566 	hns3_cmd_clear_regs(hw);
567 
568 	/* Setup the queue entries for use cmd queue */
569 	hw->cmq.csq.desc_num = HNS3_NIC_CMQ_DESC_NUM;
570 	hw->cmq.crq.desc_num = HNS3_NIC_CMQ_DESC_NUM;
571 
572 	/* Setup Tx write back timeout */
573 	hw->cmq.tx_timeout = HNS3_CMDQ_TX_TIMEOUT;
574 
575 	/* Setup queue rings */
576 	ret = hns3_alloc_cmd_queue(hw, HNS3_TYPE_CSQ);
577 	if (ret) {
578 		PMD_INIT_LOG(ERR, "CSQ ring setup error %d", ret);
579 		return ret;
580 	}
581 
582 	ret = hns3_alloc_cmd_queue(hw, HNS3_TYPE_CRQ);
583 	if (ret) {
584 		PMD_INIT_LOG(ERR, "CRQ ring setup error %d", ret);
585 		goto err_crq;
586 	}
587 
588 	return 0;
589 
590 err_crq:
591 	hns3_free_cmd_desc(hw, &hw->cmq.csq);
592 
593 	return ret;
594 }
595 
596 static void
597 hns3_update_dev_lsc_cap(struct hns3_hw *hw, int fw_compact_cmd_result)
598 {
599 	struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
600 
601 	if (hw->adapter_state != HNS3_NIC_UNINITIALIZED)
602 		return;
603 
604 	if (fw_compact_cmd_result != 0) {
605 		/*
606 		 * If fw_compact_cmd_result is not zero, it means firmware don't
607 		 * support link status change interrupt.
608 		 * Framework already set RTE_ETH_DEV_INTR_LSC bit because driver
609 		 * declared RTE_PCI_DRV_INTR_LSC in drv_flags. It need to clear
610 		 * the RTE_ETH_DEV_INTR_LSC capability when detect firmware
611 		 * don't support link status change interrupt.
612 		 */
613 		dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
614 	}
615 }
616 
617 static int
618 hns3_apply_fw_compat_cmd_result(struct hns3_hw *hw, int result)
619 {
620 	if (result != 0 && hns3_dev_copper_supported(hw)) {
621 		hns3_err(hw, "firmware fails to initialize the PHY, ret = %d.",
622 			 result);
623 		return result;
624 	}
625 
626 	hns3_update_dev_lsc_cap(hw, result);
627 
628 	return 0;
629 }
630 
631 static int
632 hns3_firmware_compat_config(struct hns3_hw *hw, bool is_init)
633 {
634 	struct hns3_firmware_compat_cmd *req;
635 	struct hns3_cmd_desc desc;
636 	uint32_t compat = 0;
637 
638 #if defined(RTE_HNS3_ONLY_1630_FPGA)
639 	/* If resv reg enabled phy driver of imp is not configured, driver
640 	 * will use temporary phy driver.
641 	 */
642 	struct rte_pci_device *pci_dev;
643 	struct rte_eth_dev *eth_dev;
644 	uint8_t revision;
645 	int ret;
646 
647 	eth_dev = &rte_eth_devices[hw->data->port_id];
648 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
649 	/* Get PCI revision id */
650 	ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN,
651 				  HNS3_PCI_REVISION_ID);
652 	if (ret != HNS3_PCI_REVISION_ID_LEN) {
653 		PMD_INIT_LOG(ERR, "failed to read pci revision id, ret = %d",
654 			     ret);
655 		return -EIO;
656 	}
657 	if (revision == PCI_REVISION_ID_HIP09_A) {
658 		struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
659 		if (hns3_dev_copper_supported(hw) == 0 || pf->is_tmp_phy) {
660 			PMD_INIT_LOG(ERR, "***use temp phy driver in dpdk***");
661 			pf->is_tmp_phy = true;
662 			hns3_set_bit(hw->capability,
663 				     HNS3_DEV_SUPPORT_COPPER_B, 1);
664 			return 0;
665 		}
666 
667 		PMD_INIT_LOG(ERR, "***use phy driver in imp***");
668 	}
669 #endif
670 
671 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_FIRMWARE_COMPAT_CFG, false);
672 	req = (struct hns3_firmware_compat_cmd *)desc.data;
673 
674 	if (is_init) {
675 		hns3_set_bit(compat, HNS3_LINK_EVENT_REPORT_EN_B, 1);
676 		hns3_set_bit(compat, HNS3_NCSI_ERROR_REPORT_EN_B, 0);
677 		if (hns3_dev_copper_supported(hw))
678 			hns3_set_bit(compat, HNS3_FIRMWARE_PHY_DRIVER_EN_B, 1);
679 	}
680 	req->compat = rte_cpu_to_le_32(compat);
681 
682 	return hns3_cmd_send(hw, &desc, 1);
683 }
684 
685 int
686 hns3_cmd_init(struct hns3_hw *hw)
687 {
688 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
689 	uint32_t version;
690 	int ret;
691 
692 	rte_spinlock_lock(&hw->cmq.csq.lock);
693 	rte_spinlock_lock(&hw->cmq.crq.lock);
694 
695 	hw->cmq.csq.next_to_clean = 0;
696 	hw->cmq.csq.next_to_use = 0;
697 	hw->cmq.crq.next_to_clean = 0;
698 	hw->cmq.crq.next_to_use = 0;
699 	hw->mbx_resp.head = 0;
700 	hw->mbx_resp.tail = 0;
701 	hw->mbx_resp.lost = 0;
702 	hns3_cmd_init_regs(hw);
703 
704 	rte_spinlock_unlock(&hw->cmq.crq.lock);
705 	rte_spinlock_unlock(&hw->cmq.csq.lock);
706 
707 	/*
708 	 * Check if there is new reset pending, because the higher level
709 	 * reset may happen when lower level reset is being processed.
710 	 */
711 	if (is_reset_pending(HNS3_DEV_HW_TO_ADAPTER(hw))) {
712 		PMD_INIT_LOG(ERR, "New reset pending, keep disable cmd");
713 		ret = -EBUSY;
714 		goto err_cmd_init;
715 	}
716 	__atomic_store_n(&hw->reset.disable_cmd, 0, __ATOMIC_RELAXED);
717 
718 	ret = hns3_cmd_query_firmware_version_and_capability(hw);
719 	if (ret) {
720 		PMD_INIT_LOG(ERR, "firmware version query failed %d", ret);
721 		goto err_cmd_init;
722 	}
723 
724 	version = hw->fw_version;
725 	PMD_INIT_LOG(INFO, "The firmware version is %lu.%lu.%lu.%lu",
726 		     hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M,
727 				    HNS3_FW_VERSION_BYTE3_S),
728 		     hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M,
729 				    HNS3_FW_VERSION_BYTE2_S),
730 		     hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M,
731 				    HNS3_FW_VERSION_BYTE1_S),
732 		     hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M,
733 				    HNS3_FW_VERSION_BYTE0_S));
734 
735 	if (hns->is_vf)
736 		return 0;
737 
738 	/*
739 	 * Requiring firmware to enable some features, firber port can still
740 	 * work without it, but copper port can't work because the firmware
741 	 * fails to take over the PHY.
742 	 */
743 	ret = hns3_firmware_compat_config(hw, true);
744 	if (ret)
745 		PMD_INIT_LOG(WARNING, "firmware compatible features not "
746 			     "supported, ret = %d.", ret);
747 
748 	/*
749 	 * Perform some corresponding operations based on the firmware
750 	 * compatibility configuration result.
751 	 */
752 	ret = hns3_apply_fw_compat_cmd_result(hw, ret);
753 	if (ret)
754 		goto err_cmd_init;
755 
756 	return 0;
757 
758 err_cmd_init:
759 	__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
760 	return ret;
761 }
762 
763 static void
764 hns3_destroy_queue(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
765 {
766 	rte_spinlock_lock(&ring->lock);
767 
768 	hns3_free_cmd_desc(hw, ring);
769 
770 	rte_spinlock_unlock(&ring->lock);
771 }
772 
773 void
774 hns3_cmd_destroy_queue(struct hns3_hw *hw)
775 {
776 	hns3_destroy_queue(hw, &hw->cmq.csq);
777 	hns3_destroy_queue(hw, &hw->cmq.crq);
778 }
779 
780 void
781 hns3_cmd_uninit(struct hns3_hw *hw)
782 {
783 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
784 
785 	if (!hns->is_vf)
786 		(void)hns3_firmware_compat_config(hw, false);
787 
788 	__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
789 
790 	/*
791 	 * A delay is added to ensure that the register cleanup operations
792 	 * will not be performed concurrently with the firmware command and
793 	 * ensure that all the reserved commands are executed.
794 	 * Concurrency may occur in two scenarios: asynchronous command and
795 	 * timeout command. If the command fails to be executed due to busy
796 	 * scheduling, the command will be processed in the next scheduling
797 	 * of the firmware.
798 	 */
799 	rte_delay_ms(HNS3_CMDQ_CLEAR_WAIT_TIME);
800 
801 	rte_spinlock_lock(&hw->cmq.csq.lock);
802 	rte_spinlock_lock(&hw->cmq.crq.lock);
803 	hns3_cmd_clear_regs(hw);
804 	rte_spinlock_unlock(&hw->cmq.crq.lock);
805 	rte_spinlock_unlock(&hw->cmq.csq.lock);
806 }
807