xref: /dpdk/drivers/net/hns3/hns3_cmd.c (revision 4b61b8774be951c7caeaba2edde27c42f2f4c58a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 HiSilicon Limited.
3  */
4 
5 #include <ethdev_pci.h>
6 #include <rte_io.h>
7 
8 #include "hns3_ethdev.h"
9 #include "hns3_regs.h"
10 #include "hns3_intr.h"
11 #include "hns3_logs.h"
12 
13 #define hns3_is_csq(ring) ((ring)->flag & HNS3_TYPE_CSQ)
14 
15 #define cmq_ring_to_dev(ring)   (&(ring)->dev->pdev->dev)
16 
17 static int
18 hns3_ring_space(struct hns3_cmq_ring *ring)
19 {
20 	int ntu = ring->next_to_use;
21 	int ntc = ring->next_to_clean;
22 	int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
23 
24 	return ring->desc_num - used - 1;
25 }
26 
27 static bool
28 is_valid_csq_clean_head(struct hns3_cmq_ring *ring, int head)
29 {
30 	int ntu = ring->next_to_use;
31 	int ntc = ring->next_to_clean;
32 
33 	if (ntu > ntc)
34 		return head >= ntc && head <= ntu;
35 
36 	return head >= ntc || head <= ntu;
37 }
38 
39 /*
40  * hns3_allocate_dma_mem - Specific memory alloc for command function.
41  * Malloc a memzone, which is a contiguous portion of physical memory identified
42  * by a name.
43  * @ring: pointer to the ring structure
44  * @size: size of memory requested
45  * @alignment: what to align the allocation to
46  */
47 static int
48 hns3_allocate_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring,
49 		      uint64_t size, uint32_t alignment)
50 {
51 	const struct rte_memzone *mz = NULL;
52 	char z_name[RTE_MEMZONE_NAMESIZE];
53 
54 	snprintf(z_name, sizeof(z_name), "hns3_dma_%" PRIu64, rte_rand());
55 	mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
56 					 RTE_MEMZONE_IOVA_CONTIG, alignment,
57 					 RTE_PGSIZE_2M);
58 	if (mz == NULL)
59 		return -ENOMEM;
60 
61 	ring->buf_size = size;
62 	ring->desc = mz->addr;
63 	ring->desc_dma_addr = mz->iova;
64 	ring->zone = (const void *)mz;
65 	hns3_dbg(hw, "memzone %s allocated with physical address: %" PRIu64,
66 		 mz->name, ring->desc_dma_addr);
67 
68 	return 0;
69 }
70 
71 static void
72 hns3_free_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
73 {
74 	hns3_dbg(hw, "memzone %s to be freed with physical address: %" PRIu64,
75 		 ((const struct rte_memzone *)ring->zone)->name,
76 		 ring->desc_dma_addr);
77 	rte_memzone_free((const struct rte_memzone *)ring->zone);
78 	ring->buf_size = 0;
79 	ring->desc = NULL;
80 	ring->desc_dma_addr = 0;
81 	ring->zone = NULL;
82 }
83 
84 static int
85 hns3_alloc_cmd_desc(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
86 {
87 	int size  = ring->desc_num * sizeof(struct hns3_cmd_desc);
88 
89 	if (hns3_allocate_dma_mem(hw, ring, size, HNS3_CMD_DESC_ALIGNMENT)) {
90 		hns3_err(hw, "allocate dma mem failed");
91 		return -ENOMEM;
92 	}
93 
94 	return 0;
95 }
96 
97 static void
98 hns3_free_cmd_desc(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
99 {
100 	if (ring->desc)
101 		hns3_free_dma_mem(hw, ring);
102 }
103 
104 static int
105 hns3_alloc_cmd_queue(struct hns3_hw *hw, int ring_type)
106 {
107 	struct hns3_cmq_ring *ring =
108 		(ring_type == HNS3_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
109 	int ret;
110 
111 	ring->ring_type = ring_type;
112 	ring->hw = hw;
113 
114 	ret = hns3_alloc_cmd_desc(hw, ring);
115 	if (ret)
116 		hns3_err(hw, "descriptor %s alloc error %d",
117 			    (ring_type == HNS3_TYPE_CSQ) ? "CSQ" : "CRQ", ret);
118 
119 	return ret;
120 }
121 
122 void
123 hns3_cmd_reuse_desc(struct hns3_cmd_desc *desc, bool is_read)
124 {
125 	desc->flag = rte_cpu_to_le_16(HNS3_CMD_FLAG_NO_INTR | HNS3_CMD_FLAG_IN);
126 	if (is_read)
127 		desc->flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_WR);
128 	else
129 		desc->flag &= rte_cpu_to_le_16(~HNS3_CMD_FLAG_WR);
130 }
131 
132 void
133 hns3_cmd_setup_basic_desc(struct hns3_cmd_desc *desc,
134 			  enum hns3_opcode_type opcode, bool is_read)
135 {
136 	memset((void *)desc, 0, sizeof(struct hns3_cmd_desc));
137 	desc->opcode = rte_cpu_to_le_16(opcode);
138 	desc->flag = rte_cpu_to_le_16(HNS3_CMD_FLAG_NO_INTR | HNS3_CMD_FLAG_IN);
139 
140 	if (is_read)
141 		desc->flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_WR);
142 }
143 
144 static void
145 hns3_cmd_clear_regs(struct hns3_hw *hw)
146 {
147 	hns3_write_dev(hw, HNS3_CMDQ_TX_ADDR_L_REG, 0);
148 	hns3_write_dev(hw, HNS3_CMDQ_TX_ADDR_H_REG, 0);
149 	hns3_write_dev(hw, HNS3_CMDQ_TX_DEPTH_REG, 0);
150 	hns3_write_dev(hw, HNS3_CMDQ_TX_HEAD_REG, 0);
151 	hns3_write_dev(hw, HNS3_CMDQ_TX_TAIL_REG, 0);
152 	hns3_write_dev(hw, HNS3_CMDQ_RX_ADDR_L_REG, 0);
153 	hns3_write_dev(hw, HNS3_CMDQ_RX_ADDR_H_REG, 0);
154 	hns3_write_dev(hw, HNS3_CMDQ_RX_DEPTH_REG, 0);
155 	hns3_write_dev(hw, HNS3_CMDQ_RX_HEAD_REG, 0);
156 	hns3_write_dev(hw, HNS3_CMDQ_RX_TAIL_REG, 0);
157 }
158 
159 static void
160 hns3_cmd_config_regs(struct hns3_cmq_ring *ring)
161 {
162 	uint64_t dma = ring->desc_dma_addr;
163 
164 	if (ring->ring_type == HNS3_TYPE_CSQ) {
165 		hns3_write_dev(ring->hw, HNS3_CMDQ_TX_ADDR_L_REG,
166 			       lower_32_bits(dma));
167 		hns3_write_dev(ring->hw, HNS3_CMDQ_TX_ADDR_H_REG,
168 			       upper_32_bits(dma));
169 		hns3_write_dev(ring->hw, HNS3_CMDQ_TX_DEPTH_REG,
170 			       ring->desc_num >> HNS3_NIC_CMQ_DESC_NUM_S |
171 			       HNS3_NIC_SW_RST_RDY);
172 		hns3_write_dev(ring->hw, HNS3_CMDQ_TX_HEAD_REG, 0);
173 		hns3_write_dev(ring->hw, HNS3_CMDQ_TX_TAIL_REG, 0);
174 	} else {
175 		hns3_write_dev(ring->hw, HNS3_CMDQ_RX_ADDR_L_REG,
176 			       lower_32_bits(dma));
177 		hns3_write_dev(ring->hw, HNS3_CMDQ_RX_ADDR_H_REG,
178 			       upper_32_bits(dma));
179 		hns3_write_dev(ring->hw, HNS3_CMDQ_RX_DEPTH_REG,
180 			       ring->desc_num >> HNS3_NIC_CMQ_DESC_NUM_S);
181 		hns3_write_dev(ring->hw, HNS3_CMDQ_RX_HEAD_REG, 0);
182 		hns3_write_dev(ring->hw, HNS3_CMDQ_RX_TAIL_REG, 0);
183 	}
184 }
185 
186 static void
187 hns3_cmd_init_regs(struct hns3_hw *hw)
188 {
189 	hns3_cmd_config_regs(&hw->cmq.csq);
190 	hns3_cmd_config_regs(&hw->cmq.crq);
191 }
192 
193 static int
194 hns3_cmd_csq_clean(struct hns3_hw *hw)
195 {
196 	struct hns3_cmq_ring *csq = &hw->cmq.csq;
197 	uint32_t head;
198 	uint32_t addr;
199 	int clean;
200 
201 	head = hns3_read_dev(hw, HNS3_CMDQ_TX_HEAD_REG);
202 	addr = hns3_read_dev(hw, HNS3_CMDQ_TX_ADDR_L_REG);
203 	if (!is_valid_csq_clean_head(csq, head) || addr == 0) {
204 		hns3_err(hw, "wrong cmd addr(%0x) head (%u, %u-%u)", addr, head,
205 			 csq->next_to_use, csq->next_to_clean);
206 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
207 			__atomic_store_n(&hw->reset.disable_cmd, 1,
208 					 __ATOMIC_RELAXED);
209 			hns3_schedule_delayed_reset(HNS3_DEV_HW_TO_ADAPTER(hw));
210 		}
211 
212 		return -EIO;
213 	}
214 
215 	clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
216 	csq->next_to_clean = head;
217 	return clean;
218 }
219 
220 static int
221 hns3_cmd_csq_done(struct hns3_hw *hw)
222 {
223 	uint32_t head = hns3_read_dev(hw, HNS3_CMDQ_TX_HEAD_REG);
224 
225 	return head == hw->cmq.csq.next_to_use;
226 }
227 
228 static bool
229 hns3_is_special_opcode(uint16_t opcode)
230 {
231 	/*
232 	 * These commands have several descriptors,
233 	 * and use the first one to save opcode and return value.
234 	 */
235 	uint16_t spec_opcode[] = {HNS3_OPC_STATS_64_BIT,
236 				  HNS3_OPC_STATS_32_BIT,
237 				  HNS3_OPC_STATS_MAC,
238 				  HNS3_OPC_STATS_MAC_ALL,
239 				  HNS3_OPC_QUERY_32_BIT_REG,
240 				  HNS3_OPC_QUERY_64_BIT_REG,
241 				  HNS3_OPC_QUERY_CLEAR_MPF_RAS_INT,
242 				  HNS3_OPC_QUERY_CLEAR_PF_RAS_INT,
243 				  HNS3_OPC_QUERY_CLEAR_ALL_MPF_MSIX_INT,
244 				  HNS3_OPC_QUERY_CLEAR_ALL_PF_MSIX_INT,
245 				  HNS3_OPC_QUERY_ALL_ERR_INFO,};
246 	uint32_t i;
247 
248 	for (i = 0; i < ARRAY_SIZE(spec_opcode); i++)
249 		if (spec_opcode[i] == opcode)
250 			return true;
251 
252 	return false;
253 }
254 
255 static int
256 hns3_cmd_convert_err_code(uint16_t desc_ret)
257 {
258 	static const struct {
259 		uint16_t imp_errcode;
260 		int linux_errcode;
261 	} hns3_cmdq_status[] = {
262 		{HNS3_CMD_EXEC_SUCCESS, 0},
263 		{HNS3_CMD_NO_AUTH, -EPERM},
264 		{HNS3_CMD_NOT_SUPPORTED, -EOPNOTSUPP},
265 		{HNS3_CMD_QUEUE_FULL, -EXFULL},
266 		{HNS3_CMD_NEXT_ERR, -ENOSR},
267 		{HNS3_CMD_UNEXE_ERR, -ENOTBLK},
268 		{HNS3_CMD_PARA_ERR, -EINVAL},
269 		{HNS3_CMD_RESULT_ERR, -ERANGE},
270 		{HNS3_CMD_TIMEOUT, -ETIME},
271 		{HNS3_CMD_HILINK_ERR, -ENOLINK},
272 		{HNS3_CMD_QUEUE_ILLEGAL, -ENXIO},
273 		{HNS3_CMD_INVALID, -EBADR},
274 		{HNS3_CMD_ROH_CHECK_FAIL, -EINVAL}
275 	};
276 
277 	uint32_t i;
278 
279 	for (i = 0; i < ARRAY_SIZE(hns3_cmdq_status); i++)
280 		if (hns3_cmdq_status[i].imp_errcode == desc_ret)
281 			return hns3_cmdq_status[i].linux_errcode;
282 
283 	return -EREMOTEIO;
284 }
285 
286 static int
287 hns3_cmd_get_hardware_reply(struct hns3_hw *hw,
288 			    struct hns3_cmd_desc *desc, int num, int ntc)
289 {
290 	uint16_t opcode, desc_ret;
291 	int current_ntc = ntc;
292 	int handle;
293 
294 	opcode = rte_le_to_cpu_16(desc[0].opcode);
295 	for (handle = 0; handle < num; handle++) {
296 		/* Get the result of hardware write back */
297 		desc[handle] = hw->cmq.csq.desc[current_ntc];
298 
299 		current_ntc++;
300 		if (current_ntc == hw->cmq.csq.desc_num)
301 			current_ntc = 0;
302 	}
303 
304 	if (likely(!hns3_is_special_opcode(opcode)))
305 		desc_ret = rte_le_to_cpu_16(desc[num - 1].retval);
306 	else
307 		desc_ret = rte_le_to_cpu_16(desc[0].retval);
308 
309 	hw->cmq.last_status = desc_ret;
310 	return hns3_cmd_convert_err_code(desc_ret);
311 }
312 
313 static int hns3_cmd_poll_reply(struct hns3_hw *hw)
314 {
315 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
316 	uint32_t timeout = 0;
317 
318 	do {
319 		if (hns3_cmd_csq_done(hw))
320 			return 0;
321 
322 		if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
323 			hns3_err(hw,
324 				 "Don't wait for reply because of disable_cmd");
325 			return -EBUSY;
326 		}
327 
328 		if (is_reset_pending(hns)) {
329 			hns3_err(hw, "Don't wait for reply because of reset pending");
330 			return -EIO;
331 		}
332 
333 		rte_delay_us(1);
334 		timeout++;
335 	} while (timeout < hw->cmq.tx_timeout);
336 	hns3_err(hw, "Wait for reply timeout");
337 	return -ETIME;
338 }
339 
340 /*
341  * hns3_cmd_send - send command to command queue
342  *
343  * @param hw
344  *   pointer to the hw struct
345  * @param desc
346  *   prefilled descriptor for describing the command
347  * @param num
348  *   the number of descriptors to be sent
349  * @return
350  *   - -EBUSY if detect device is in resetting
351  *   - -EIO   if detect cmd csq corrupted (due to reset) or
352  *            there is reset pending
353  *   - -ENOMEM/-ETIME/...(Non-Zero) if other error case
354  *   - Zero   if operation completed successfully
355  *
356  * Note -BUSY/-EIO only used in reset case
357  *
358  * Note this is the main send command for command queue, it
359  * sends the queue, cleans the queue, etc
360  */
361 int
362 hns3_cmd_send(struct hns3_hw *hw, struct hns3_cmd_desc *desc, int num)
363 {
364 	struct hns3_cmd_desc *desc_to_use;
365 	int handle = 0;
366 	int retval;
367 	uint32_t ntc;
368 
369 	if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED))
370 		return -EBUSY;
371 
372 	rte_spinlock_lock(&hw->cmq.csq.lock);
373 
374 	/* Clean the command send queue */
375 	retval = hns3_cmd_csq_clean(hw);
376 	if (retval < 0) {
377 		rte_spinlock_unlock(&hw->cmq.csq.lock);
378 		return retval;
379 	}
380 
381 	if (num > hns3_ring_space(&hw->cmq.csq)) {
382 		rte_spinlock_unlock(&hw->cmq.csq.lock);
383 		return -ENOMEM;
384 	}
385 
386 	/*
387 	 * Record the location of desc in the ring for this time
388 	 * which will be use for hardware to write back
389 	 */
390 	ntc = hw->cmq.csq.next_to_use;
391 
392 	while (handle < num) {
393 		desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
394 		*desc_to_use = desc[handle];
395 		(hw->cmq.csq.next_to_use)++;
396 		if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
397 			hw->cmq.csq.next_to_use = 0;
398 		handle++;
399 	}
400 
401 	/* Write to hardware */
402 	hns3_write_dev(hw, HNS3_CMDQ_TX_TAIL_REG, hw->cmq.csq.next_to_use);
403 
404 	/*
405 	 * If the command is sync, wait for the firmware to write back,
406 	 * if multi descriptors to be sent, use the first one to check.
407 	 */
408 	if (HNS3_CMD_SEND_SYNC(rte_le_to_cpu_16(desc->flag))) {
409 		retval = hns3_cmd_poll_reply(hw);
410 		if (!retval)
411 			retval = hns3_cmd_get_hardware_reply(hw, desc, num,
412 							     ntc);
413 	}
414 
415 	rte_spinlock_unlock(&hw->cmq.csq.lock);
416 	return retval;
417 }
418 
419 static void
420 hns3_parse_capability(struct hns3_hw *hw,
421 		      struct hns3_query_version_cmd *cmd)
422 {
423 	uint32_t caps = rte_le_to_cpu_32(cmd->caps[0]);
424 
425 	if (hns3_get_bit(caps, HNS3_CAPS_UDP_GSO_B))
426 		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_UDP_GSO_B, 1);
427 	if (hns3_get_bit(caps, HNS3_CAPS_FD_QUEUE_REGION_B))
428 		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_FD_QUEUE_REGION_B,
429 			     1);
430 	if (hns3_get_bit(caps, HNS3_CAPS_PTP_B)) {
431 		/*
432 		 * PTP depends on special packet type reported by hardware which
433 		 * enabled rxd advanced layout, so if the hardware doesn't
434 		 * support rxd advanced layout, driver should ignore the PTP
435 		 * capability.
436 		 */
437 		if (hns3_get_bit(caps, HNS3_CAPS_RXD_ADV_LAYOUT_B))
438 			hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_PTP_B, 1);
439 		else
440 			hns3_warn(hw, "ignore PTP capability due to lack of "
441 				  "rxd advanced layout capability.");
442 	}
443 	if (hns3_get_bit(caps, HNS3_CAPS_TX_PUSH_B))
444 		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_TX_PUSH_B, 1);
445 	if (hns3_get_bit(caps, HNS3_CAPS_PHY_IMP_B))
446 		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_COPPER_B, 1);
447 	if (hns3_get_bit(caps, HNS3_CAPS_TQP_TXRX_INDEP_B))
448 		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_INDEP_TXRX_B, 1);
449 	if (hns3_get_bit(caps, HNS3_CAPS_STASH_B))
450 		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_STASH_B, 1);
451 	if (hns3_get_bit(caps, HNS3_CAPS_RXD_ADV_LAYOUT_B))
452 		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_RXD_ADV_LAYOUT_B,
453 			     1);
454 	if (hns3_get_bit(caps, HNS3_CAPS_UDP_TUNNEL_CSUM_B))
455 		hns3_set_bit(hw->capability,
456 				HNS3_DEV_SUPPORT_OUTER_UDP_CKSUM_B, 1);
457 	if (hns3_get_bit(caps, HNS3_CAPS_RAS_IMP_B))
458 		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_RAS_IMP_B, 1);
459 }
460 
461 static uint32_t
462 hns3_build_api_caps(void)
463 {
464 	uint32_t api_caps = 0;
465 
466 	hns3_set_bit(api_caps, HNS3_API_CAP_FLEX_RSS_TBL_B, 1);
467 
468 	return rte_cpu_to_le_32(api_caps);
469 }
470 
471 static int
472 hns3_cmd_query_firmware_version_and_capability(struct hns3_hw *hw)
473 {
474 	struct hns3_query_version_cmd *resp;
475 	struct hns3_cmd_desc desc;
476 	int ret;
477 
478 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FW_VER, 1);
479 	resp = (struct hns3_query_version_cmd *)desc.data;
480 	resp->api_caps = hns3_build_api_caps();
481 
482 	/* Initialize the cmd function */
483 	ret = hns3_cmd_send(hw, &desc, 1);
484 	if (ret)
485 		return ret;
486 
487 	hw->fw_version = rte_le_to_cpu_32(resp->firmware);
488 	hns3_parse_capability(hw, resp);
489 
490 	return 0;
491 }
492 
493 int
494 hns3_cmd_init_queue(struct hns3_hw *hw)
495 {
496 	int ret;
497 
498 	/* Setup the lock for command queue */
499 	rte_spinlock_init(&hw->cmq.csq.lock);
500 	rte_spinlock_init(&hw->cmq.crq.lock);
501 
502 	/*
503 	 * Clear up all command register,
504 	 * in case there are some residual values
505 	 */
506 	hns3_cmd_clear_regs(hw);
507 
508 	/* Setup the queue entries for use cmd queue */
509 	hw->cmq.csq.desc_num = HNS3_NIC_CMQ_DESC_NUM;
510 	hw->cmq.crq.desc_num = HNS3_NIC_CMQ_DESC_NUM;
511 
512 	/* Setup Tx write back timeout */
513 	hw->cmq.tx_timeout = HNS3_CMDQ_TX_TIMEOUT;
514 
515 	/* Setup queue rings */
516 	ret = hns3_alloc_cmd_queue(hw, HNS3_TYPE_CSQ);
517 	if (ret) {
518 		PMD_INIT_LOG(ERR, "CSQ ring setup error %d", ret);
519 		return ret;
520 	}
521 
522 	ret = hns3_alloc_cmd_queue(hw, HNS3_TYPE_CRQ);
523 	if (ret) {
524 		PMD_INIT_LOG(ERR, "CRQ ring setup error %d", ret);
525 		goto err_crq;
526 	}
527 
528 	return 0;
529 
530 err_crq:
531 	hns3_free_cmd_desc(hw, &hw->cmq.csq);
532 
533 	return ret;
534 }
535 
536 static void
537 hns3_update_dev_lsc_cap(struct hns3_hw *hw, int fw_compact_cmd_result)
538 {
539 	struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
540 
541 	if (hw->adapter_state != HNS3_NIC_UNINITIALIZED)
542 		return;
543 
544 	if (fw_compact_cmd_result != 0) {
545 		/*
546 		 * If fw_compact_cmd_result is not zero, it means firmware don't
547 		 * support link status change interrupt.
548 		 * Framework already set RTE_ETH_DEV_INTR_LSC bit because driver
549 		 * declared RTE_PCI_DRV_INTR_LSC in drv_flags. It need to clear
550 		 * the RTE_ETH_DEV_INTR_LSC capability when detect firmware
551 		 * don't support link status change interrupt.
552 		 */
553 		dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
554 	}
555 }
556 
557 static int
558 hns3_apply_fw_compat_cmd_result(struct hns3_hw *hw, int result)
559 {
560 	if (result != 0 && hns3_dev_copper_supported(hw)) {
561 		hns3_err(hw, "firmware fails to initialize the PHY, ret = %d.",
562 			 result);
563 		return result;
564 	}
565 
566 	hns3_update_dev_lsc_cap(hw, result);
567 
568 	return 0;
569 }
570 
571 static int
572 hns3_firmware_compat_config(struct hns3_hw *hw, bool is_init)
573 {
574 	struct hns3_firmware_compat_cmd *req;
575 	struct hns3_cmd_desc desc;
576 	uint32_t compat = 0;
577 
578 #if defined(RTE_HNS3_ONLY_1630_FPGA)
579 	/* If resv reg enabled phy driver of imp is not configured, driver
580 	 * will use temporary phy driver.
581 	 */
582 	struct rte_pci_device *pci_dev;
583 	struct rte_eth_dev *eth_dev;
584 	uint8_t revision;
585 	int ret;
586 
587 	eth_dev = &rte_eth_devices[hw->data->port_id];
588 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
589 	/* Get PCI revision id */
590 	ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN,
591 				  HNS3_PCI_REVISION_ID);
592 	if (ret != HNS3_PCI_REVISION_ID_LEN) {
593 		PMD_INIT_LOG(ERR, "failed to read pci revision id, ret = %d",
594 			     ret);
595 		return -EIO;
596 	}
597 	if (revision == PCI_REVISION_ID_HIP09_A) {
598 		struct hns3_pf *pf = HNS3_DEV_HW_TO_PF(hw);
599 		if (hns3_dev_copper_supported(hw) == 0 || pf->is_tmp_phy) {
600 			PMD_INIT_LOG(ERR, "***use temp phy driver in dpdk***");
601 			pf->is_tmp_phy = true;
602 			hns3_set_bit(hw->capability,
603 				     HNS3_DEV_SUPPORT_COPPER_B, 1);
604 			return 0;
605 		}
606 
607 		PMD_INIT_LOG(ERR, "***use phy driver in imp***");
608 	}
609 #endif
610 
611 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_FIRMWARE_COMPAT_CFG, false);
612 	req = (struct hns3_firmware_compat_cmd *)desc.data;
613 
614 	if (is_init) {
615 		hns3_set_bit(compat, HNS3_LINK_EVENT_REPORT_EN_B, 1);
616 		hns3_set_bit(compat, HNS3_NCSI_ERROR_REPORT_EN_B, 0);
617 		if (hns3_dev_copper_supported(hw))
618 			hns3_set_bit(compat, HNS3_FIRMWARE_PHY_DRIVER_EN_B, 1);
619 	}
620 	req->compat = rte_cpu_to_le_32(compat);
621 
622 	return hns3_cmd_send(hw, &desc, 1);
623 }
624 
625 int
626 hns3_cmd_init(struct hns3_hw *hw)
627 {
628 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
629 	uint32_t version;
630 	int ret;
631 
632 	rte_spinlock_lock(&hw->cmq.csq.lock);
633 	rte_spinlock_lock(&hw->cmq.crq.lock);
634 
635 	hw->cmq.csq.next_to_clean = 0;
636 	hw->cmq.csq.next_to_use = 0;
637 	hw->cmq.crq.next_to_clean = 0;
638 	hw->cmq.crq.next_to_use = 0;
639 	hw->mbx_resp.head = 0;
640 	hw->mbx_resp.tail = 0;
641 	hw->mbx_resp.lost = 0;
642 	hns3_cmd_init_regs(hw);
643 
644 	rte_spinlock_unlock(&hw->cmq.crq.lock);
645 	rte_spinlock_unlock(&hw->cmq.csq.lock);
646 
647 	/*
648 	 * Check if there is new reset pending, because the higher level
649 	 * reset may happen when lower level reset is being processed.
650 	 */
651 	if (is_reset_pending(HNS3_DEV_HW_TO_ADAPTER(hw))) {
652 		PMD_INIT_LOG(ERR, "New reset pending, keep disable cmd");
653 		ret = -EBUSY;
654 		goto err_cmd_init;
655 	}
656 	__atomic_store_n(&hw->reset.disable_cmd, 0, __ATOMIC_RELAXED);
657 
658 	ret = hns3_cmd_query_firmware_version_and_capability(hw);
659 	if (ret) {
660 		PMD_INIT_LOG(ERR, "firmware version query failed %d", ret);
661 		goto err_cmd_init;
662 	}
663 
664 	version = hw->fw_version;
665 	PMD_INIT_LOG(INFO, "The firmware version is %lu.%lu.%lu.%lu",
666 		     hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M,
667 				    HNS3_FW_VERSION_BYTE3_S),
668 		     hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M,
669 				    HNS3_FW_VERSION_BYTE2_S),
670 		     hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M,
671 				    HNS3_FW_VERSION_BYTE1_S),
672 		     hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M,
673 				    HNS3_FW_VERSION_BYTE0_S));
674 
675 	if (hns->is_vf)
676 		return 0;
677 
678 	/*
679 	 * Requiring firmware to enable some features, firber port can still
680 	 * work without it, but copper port can't work because the firmware
681 	 * fails to take over the PHY.
682 	 */
683 	ret = hns3_firmware_compat_config(hw, true);
684 	if (ret)
685 		PMD_INIT_LOG(WARNING, "firmware compatible features not "
686 			     "supported, ret = %d.", ret);
687 
688 	/*
689 	 * Perform some corresponding operations based on the firmware
690 	 * compatibility configuration result.
691 	 */
692 	ret = hns3_apply_fw_compat_cmd_result(hw, ret);
693 	if (ret)
694 		goto err_cmd_init;
695 
696 	return 0;
697 
698 err_cmd_init:
699 	__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
700 	return ret;
701 }
702 
703 static void
704 hns3_destroy_queue(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
705 {
706 	rte_spinlock_lock(&ring->lock);
707 
708 	hns3_free_cmd_desc(hw, ring);
709 
710 	rte_spinlock_unlock(&ring->lock);
711 }
712 
713 void
714 hns3_cmd_destroy_queue(struct hns3_hw *hw)
715 {
716 	hns3_destroy_queue(hw, &hw->cmq.csq);
717 	hns3_destroy_queue(hw, &hw->cmq.crq);
718 }
719 
720 void
721 hns3_cmd_uninit(struct hns3_hw *hw)
722 {
723 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
724 
725 	if (!hns->is_vf)
726 		(void)hns3_firmware_compat_config(hw, false);
727 
728 	__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
729 
730 	/*
731 	 * A delay is added to ensure that the register cleanup operations
732 	 * will not be performed concurrently with the firmware command and
733 	 * ensure that all the reserved commands are executed.
734 	 * Concurrency may occur in two scenarios: asynchronous command and
735 	 * timeout command. If the command fails to be executed due to busy
736 	 * scheduling, the command will be processed in the next scheduling
737 	 * of the firmware.
738 	 */
739 	rte_delay_ms(HNS3_CMDQ_CLEAR_WAIT_TIME);
740 
741 	rte_spinlock_lock(&hw->cmq.csq.lock);
742 	rte_spinlock_lock(&hw->cmq.crq.lock);
743 	hns3_cmd_clear_regs(hw);
744 	rte_spinlock_unlock(&hw->cmq.crq.lock);
745 	rte_spinlock_unlock(&hw->cmq.csq.lock);
746 }
747