xref: /dpdk/drivers/net/hns3/hns3_cmd.c (revision 7917b0d38e92e8b9ec5a870415b791420e10f11a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 HiSilicon Limited.
3  */
4 
5 #include <ethdev_pci.h>
6 #include <rte_io.h>
7 
8 #include "hns3_common.h"
9 #include "hns3_regs.h"
10 #include "hns3_intr.h"
11 #include "hns3_logs.h"
12 
13 static int
14 hns3_ring_space(struct hns3_cmq_ring *ring)
15 {
16 	int ntu = ring->next_to_use;
17 	int ntc = ring->next_to_clean;
18 	int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
19 
20 	return ring->desc_num - used - 1;
21 }
22 
23 static bool
24 is_valid_csq_clean_head(struct hns3_cmq_ring *ring, int head)
25 {
26 	int ntu = ring->next_to_use;
27 	int ntc = ring->next_to_clean;
28 
29 	if (ntu > ntc)
30 		return head >= ntc && head <= ntu;
31 
32 	return head >= ntc || head <= ntu;
33 }
34 
35 /*
36  * hns3_allocate_dma_mem - Specific memory alloc for command function.
37  * Malloc a memzone, which is a contiguous portion of physical memory identified
38  * by a name.
39  * @ring: pointer to the ring structure
40  * @size: size of memory requested
41  * @alignment: what to align the allocation to
42  */
43 static int
44 hns3_allocate_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring,
45 		      uint64_t size, uint32_t alignment)
46 {
47 	static RTE_ATOMIC(uint64_t) hns3_dma_memzone_id;
48 	const struct rte_memzone *mz = NULL;
49 	char z_name[RTE_MEMZONE_NAMESIZE];
50 
51 	snprintf(z_name, sizeof(z_name), "hns3_dma_%" PRIu64,
52 		rte_atomic_fetch_add_explicit(&hns3_dma_memzone_id, 1, rte_memory_order_relaxed));
53 	mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
54 					 RTE_MEMZONE_IOVA_CONTIG, alignment,
55 					 RTE_PGSIZE_2M);
56 	if (mz == NULL)
57 		return -ENOMEM;
58 
59 	ring->buf_size = size;
60 	ring->desc = mz->addr;
61 	ring->desc_dma_addr = mz->iova;
62 	ring->zone = (const void *)mz;
63 	hns3_dbg(hw, "cmd ring memzone name: %s", mz->name);
64 
65 	return 0;
66 }
67 
68 static void
69 hns3_free_dma_mem(struct hns3_cmq_ring *ring)
70 {
71 	rte_memzone_free((const struct rte_memzone *)ring->zone);
72 	ring->buf_size = 0;
73 	ring->desc = NULL;
74 	ring->desc_dma_addr = 0;
75 	ring->zone = NULL;
76 }
77 
78 static int
79 hns3_alloc_cmd_desc(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
80 {
81 	int size  = ring->desc_num * sizeof(struct hns3_cmd_desc);
82 
83 	if (hns3_allocate_dma_mem(hw, ring, size, HNS3_CMD_DESC_ALIGNMENT)) {
84 		hns3_err(hw, "allocate dma mem failed");
85 		return -ENOMEM;
86 	}
87 
88 	return 0;
89 }
90 
91 static void
92 hns3_free_cmd_desc(__rte_unused struct hns3_hw *hw, struct hns3_cmq_ring *ring)
93 {
94 	if (ring->desc)
95 		hns3_free_dma_mem(ring);
96 }
97 
98 static int
99 hns3_alloc_cmd_queue(struct hns3_hw *hw, int ring_type)
100 {
101 	struct hns3_cmq_ring *ring =
102 		(ring_type == HNS3_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
103 	int ret;
104 
105 	ring->ring_type = ring_type;
106 	ring->hw = hw;
107 
108 	ret = hns3_alloc_cmd_desc(hw, ring);
109 	if (ret)
110 		hns3_err(hw, "descriptor %s alloc error %d",
111 			 (ring_type == HNS3_TYPE_CSQ) ? "CSQ" : "CRQ", ret);
112 
113 	return ret;
114 }
115 
116 void
117 hns3_cmd_reuse_desc(struct hns3_cmd_desc *desc, bool is_read)
118 {
119 	desc->flag = rte_cpu_to_le_16(HNS3_CMD_FLAG_NO_INTR | HNS3_CMD_FLAG_IN);
120 	if (is_read)
121 		desc->flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_WR);
122 	else
123 		desc->flag &= rte_cpu_to_le_16(~HNS3_CMD_FLAG_WR);
124 }
125 
126 void
127 hns3_cmd_setup_basic_desc(struct hns3_cmd_desc *desc,
128 			  enum hns3_opcode_type opcode, bool is_read)
129 {
130 	memset((void *)desc, 0, sizeof(struct hns3_cmd_desc));
131 	desc->opcode = rte_cpu_to_le_16(opcode);
132 	desc->flag = rte_cpu_to_le_16(HNS3_CMD_FLAG_NO_INTR | HNS3_CMD_FLAG_IN);
133 
134 	if (is_read)
135 		desc->flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_WR);
136 }
137 
138 static void
139 hns3_cmd_clear_regs(struct hns3_hw *hw)
140 {
141 	hns3_write_dev(hw, HNS3_CMDQ_TX_ADDR_L_REG, 0);
142 	hns3_write_dev(hw, HNS3_CMDQ_TX_ADDR_H_REG, 0);
143 	hns3_write_dev(hw, HNS3_CMDQ_TX_DEPTH_REG, 0);
144 	hns3_write_dev(hw, HNS3_CMDQ_TX_HEAD_REG, 0);
145 	hns3_write_dev(hw, HNS3_CMDQ_TX_TAIL_REG, 0);
146 	hns3_write_dev(hw, HNS3_CMDQ_RX_ADDR_L_REG, 0);
147 	hns3_write_dev(hw, HNS3_CMDQ_RX_ADDR_H_REG, 0);
148 	hns3_write_dev(hw, HNS3_CMDQ_RX_DEPTH_REG, 0);
149 	hns3_write_dev(hw, HNS3_CMDQ_RX_HEAD_REG, 0);
150 	hns3_write_dev(hw, HNS3_CMDQ_RX_TAIL_REG, 0);
151 }
152 
153 static void
154 hns3_cmd_config_regs(struct hns3_cmq_ring *ring)
155 {
156 	uint64_t dma = ring->desc_dma_addr;
157 
158 	if (ring->ring_type == HNS3_TYPE_CSQ) {
159 		hns3_write_dev(ring->hw, HNS3_CMDQ_TX_ADDR_L_REG,
160 			       lower_32_bits(dma));
161 		hns3_write_dev(ring->hw, HNS3_CMDQ_TX_ADDR_H_REG,
162 			       upper_32_bits(dma));
163 		hns3_write_dev(ring->hw, HNS3_CMDQ_TX_DEPTH_REG,
164 			       ring->desc_num >> HNS3_NIC_CMQ_DESC_NUM_S |
165 			       HNS3_NIC_SW_RST_RDY);
166 		hns3_write_dev(ring->hw, HNS3_CMDQ_TX_HEAD_REG, 0);
167 		hns3_write_dev(ring->hw, HNS3_CMDQ_TX_TAIL_REG, 0);
168 	} else {
169 		hns3_write_dev(ring->hw, HNS3_CMDQ_RX_ADDR_L_REG,
170 			       lower_32_bits(dma));
171 		hns3_write_dev(ring->hw, HNS3_CMDQ_RX_ADDR_H_REG,
172 			       upper_32_bits(dma));
173 		hns3_write_dev(ring->hw, HNS3_CMDQ_RX_DEPTH_REG,
174 			       ring->desc_num >> HNS3_NIC_CMQ_DESC_NUM_S);
175 		hns3_write_dev(ring->hw, HNS3_CMDQ_RX_HEAD_REG, 0);
176 		hns3_write_dev(ring->hw, HNS3_CMDQ_RX_TAIL_REG, 0);
177 	}
178 }
179 
180 static void
181 hns3_cmd_init_regs(struct hns3_hw *hw)
182 {
183 	hns3_cmd_config_regs(&hw->cmq.csq);
184 	hns3_cmd_config_regs(&hw->cmq.crq);
185 }
186 
187 static int
188 hns3_cmd_csq_clean(struct hns3_hw *hw)
189 {
190 	struct hns3_cmq_ring *csq = &hw->cmq.csq;
191 	uint32_t head;
192 	uint32_t addr;
193 	int clean;
194 
195 	head = hns3_read_dev(hw, HNS3_CMDQ_TX_HEAD_REG);
196 	addr = hns3_read_dev(hw, HNS3_CMDQ_TX_ADDR_L_REG);
197 	if (!is_valid_csq_clean_head(csq, head) || addr == 0) {
198 		hns3_err(hw, "wrong cmd addr(%0x) head (%u, %u-%u)", addr, head,
199 			 csq->next_to_use, csq->next_to_clean);
200 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
201 			rte_atomic_store_explicit(&hw->reset.disable_cmd, 1,
202 					 rte_memory_order_relaxed);
203 			hns3_schedule_delayed_reset(HNS3_DEV_HW_TO_ADAPTER(hw));
204 		}
205 
206 		return -EIO;
207 	}
208 
209 	clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
210 	csq->next_to_clean = head;
211 	return clean;
212 }
213 
214 static int
215 hns3_cmd_csq_done(struct hns3_hw *hw)
216 {
217 	uint32_t head = hns3_read_dev(hw, HNS3_CMDQ_TX_HEAD_REG);
218 
219 	return head == hw->cmq.csq.next_to_use;
220 }
221 
222 static bool
223 hns3_is_special_opcode(uint16_t opcode)
224 {
225 	/*
226 	 * These commands have several descriptors,
227 	 * and use the first one to save opcode and return value.
228 	 */
229 	uint16_t spec_opcode[] = {HNS3_OPC_STATS_64_BIT,
230 				  HNS3_OPC_STATS_32_BIT,
231 				  HNS3_OPC_STATS_MAC,
232 				  HNS3_OPC_STATS_MAC_ALL,
233 				  HNS3_OPC_QUERY_32_BIT_REG,
234 				  HNS3_OPC_QUERY_64_BIT_REG,
235 				  HNS3_OPC_QUERY_CLEAR_MPF_RAS_INT,
236 				  HNS3_OPC_QUERY_CLEAR_PF_RAS_INT,
237 				  HNS3_OPC_QUERY_CLEAR_ALL_MPF_MSIX_INT,
238 				  HNS3_OPC_QUERY_CLEAR_ALL_PF_MSIX_INT,
239 				  HNS3_OPC_QUERY_ALL_ERR_INFO,};
240 	uint32_t i;
241 
242 	for (i = 0; i < RTE_DIM(spec_opcode); i++)
243 		if (spec_opcode[i] == opcode)
244 			return true;
245 
246 	return false;
247 }
248 
249 static int
250 hns3_cmd_convert_err_code(uint16_t desc_ret)
251 {
252 	static const struct {
253 		uint16_t imp_errcode;
254 		int linux_errcode;
255 	} hns3_cmdq_status[] = {
256 		{HNS3_CMD_EXEC_SUCCESS, 0},
257 		{HNS3_CMD_NO_AUTH, -EPERM},
258 		{HNS3_CMD_NOT_SUPPORTED, -EOPNOTSUPP},
259 		{HNS3_CMD_QUEUE_FULL, -EXFULL},
260 		{HNS3_CMD_NEXT_ERR, -ENOSR},
261 		{HNS3_CMD_UNEXE_ERR, -ENOTBLK},
262 		{HNS3_CMD_PARA_ERR, -EINVAL},
263 		{HNS3_CMD_RESULT_ERR, -ERANGE},
264 		{HNS3_CMD_TIMEOUT, -ETIME},
265 		{HNS3_CMD_HILINK_ERR, -ENOLINK},
266 		{HNS3_CMD_QUEUE_ILLEGAL, -ENXIO},
267 		{HNS3_CMD_INVALID, -EBADR},
268 		{HNS3_CMD_ROH_CHECK_FAIL, -EINVAL}
269 	};
270 
271 	uint32_t i;
272 
273 	for (i = 0; i < RTE_DIM(hns3_cmdq_status); i++)
274 		if (hns3_cmdq_status[i].imp_errcode == desc_ret)
275 			return hns3_cmdq_status[i].linux_errcode;
276 
277 	return -EREMOTEIO;
278 }
279 
280 static int
281 hns3_cmd_get_hardware_reply(struct hns3_hw *hw,
282 			    struct hns3_cmd_desc *desc, int num, int ntc)
283 {
284 	uint16_t opcode, desc_ret;
285 	int current_ntc = ntc;
286 	int handle;
287 
288 	opcode = rte_le_to_cpu_16(desc[0].opcode);
289 	for (handle = 0; handle < num; handle++) {
290 		/* Get the result of hardware write back */
291 		desc[handle] = hw->cmq.csq.desc[current_ntc];
292 
293 		current_ntc++;
294 		if (current_ntc == hw->cmq.csq.desc_num)
295 			current_ntc = 0;
296 	}
297 
298 	if (likely(!hns3_is_special_opcode(opcode)))
299 		desc_ret = rte_le_to_cpu_16(desc[num - 1].retval);
300 	else
301 		desc_ret = rte_le_to_cpu_16(desc[0].retval);
302 
303 	hw->cmq.last_status = desc_ret;
304 	return hns3_cmd_convert_err_code(desc_ret);
305 }
306 
307 static int hns3_cmd_poll_reply(struct hns3_hw *hw)
308 {
309 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
310 	uint32_t timeout = 0;
311 
312 	do {
313 		if (hns3_cmd_csq_done(hw))
314 			return 0;
315 
316 		if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed)) {
317 			hns3_err(hw,
318 				 "Don't wait for reply because of disable_cmd");
319 			return -EBUSY;
320 		}
321 
322 		if (is_reset_pending(hns)) {
323 			hns3_err(hw, "Don't wait for reply because of reset pending");
324 			return -EIO;
325 		}
326 
327 		rte_delay_us(1);
328 		timeout++;
329 	} while (timeout < hw->cmq.tx_timeout);
330 	hns3_err(hw, "Wait for reply timeout");
331 	return -ETIME;
332 }
333 
334 /*
335  * hns3_cmd_send - send command to command queue
336  *
337  * @param hw
338  *   pointer to the hw struct
339  * @param desc
340  *   prefilled descriptor for describing the command
341  * @param num
342  *   the number of descriptors to be sent
343  * @return
344  *   - -EBUSY if detect device is in resetting
345  *   - -EIO   if detect cmd csq corrupted (due to reset) or
346  *            there is reset pending
347  *   - -ENOMEM/-ETIME/...(Non-Zero) if other error case
348  *   - Zero   if operation completed successfully
349  *
350  * Note -BUSY/-EIO only used in reset case
351  *
352  * Note this is the main send command for command queue, it
353  * sends the queue, cleans the queue, etc
354  */
355 int
356 hns3_cmd_send(struct hns3_hw *hw, struct hns3_cmd_desc *desc, int num)
357 {
358 	struct hns3_cmd_desc *desc_to_use;
359 	int handle = 0;
360 	int retval;
361 	uint32_t ntc;
362 
363 	if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed))
364 		return -EBUSY;
365 
366 	rte_spinlock_lock(&hw->cmq.csq.lock);
367 
368 	/* Clean the command send queue */
369 	retval = hns3_cmd_csq_clean(hw);
370 	if (retval < 0) {
371 		rte_spinlock_unlock(&hw->cmq.csq.lock);
372 		return retval;
373 	}
374 
375 	if (num > hns3_ring_space(&hw->cmq.csq)) {
376 		rte_spinlock_unlock(&hw->cmq.csq.lock);
377 		return -ENOMEM;
378 	}
379 
380 	/*
381 	 * Record the location of desc in the ring for this time
382 	 * which will be use for hardware to write back
383 	 */
384 	ntc = hw->cmq.csq.next_to_use;
385 
386 	while (handle < num) {
387 		desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
388 		*desc_to_use = desc[handle];
389 		(hw->cmq.csq.next_to_use)++;
390 		if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
391 			hw->cmq.csq.next_to_use = 0;
392 		handle++;
393 	}
394 
395 	/* Write to hardware */
396 	hns3_write_dev(hw, HNS3_CMDQ_TX_TAIL_REG, hw->cmq.csq.next_to_use);
397 
398 	/*
399 	 * If the command is sync, wait for the firmware to write back,
400 	 * if multi descriptors to be sent, use the first one to check.
401 	 */
402 	if (HNS3_CMD_SEND_SYNC(rte_le_to_cpu_16(desc->flag))) {
403 		retval = hns3_cmd_poll_reply(hw);
404 		if (!retval)
405 			retval = hns3_cmd_get_hardware_reply(hw, desc, num,
406 							     ntc);
407 	}
408 
409 	rte_spinlock_unlock(&hw->cmq.csq.lock);
410 	return retval;
411 }
412 
413 static const char *
414 hns3_get_caps_name(uint32_t caps_id)
415 {
416 	const struct {
417 		enum HNS3_CAPS_BITS caps;
418 		const char *name;
419 	} dev_caps[] = {
420 		{ HNS3_CAPS_FD_QUEUE_REGION_B, "fd_queue_region" },
421 		{ HNS3_CAPS_PTP_B,             "ptp"             },
422 		{ HNS3_CAPS_SIMPLE_BD_B,       "simple_bd"       },
423 		{ HNS3_CAPS_TX_PUSH_B,         "tx_push"         },
424 		{ HNS3_CAPS_PHY_IMP_B,         "phy_imp"         },
425 		{ HNS3_CAPS_TQP_TXRX_INDEP_B,  "tqp_txrx_indep"  },
426 		{ HNS3_CAPS_HW_PAD_B,          "hw_pad"          },
427 		{ HNS3_CAPS_STASH_B,           "stash"           },
428 		{ HNS3_CAPS_UDP_TUNNEL_CSUM_B, "udp_tunnel_csum" },
429 		{ HNS3_CAPS_RAS_IMP_B,         "ras_imp"         },
430 		{ HNS3_CAPS_RXD_ADV_LAYOUT_B,  "rxd_adv_layout"  },
431 		{ HNS3_CAPS_TM_B,              "tm_capability"   },
432 		{ HNS3_CAPS_FC_AUTO_B,         "fc_autoneg"      }
433 	};
434 	uint32_t i;
435 
436 	for (i = 0; i < RTE_DIM(dev_caps); i++) {
437 		if (dev_caps[i].caps == caps_id)
438 			return dev_caps[i].name;
439 	}
440 
441 	return "unknown";
442 }
443 
444 static void
445 hns3_mask_capability(struct hns3_hw *hw,
446 		     struct hns3_query_version_cmd *cmd)
447 {
448 #define MAX_CAPS_BIT	64
449 
450 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
451 	uint64_t caps_org, caps_new, caps_masked;
452 	uint32_t i;
453 
454 	if (hns->dev_caps_mask == 0)
455 		return;
456 
457 	memcpy(&caps_org, &cmd->caps[0], sizeof(caps_org));
458 	caps_org = rte_le_to_cpu_64(caps_org);
459 	caps_new = caps_org ^ (caps_org & hns->dev_caps_mask);
460 	caps_masked = caps_org ^ caps_new;
461 	caps_new = rte_cpu_to_le_64(caps_new);
462 	memcpy(&cmd->caps[0], &caps_new, sizeof(caps_new));
463 
464 	for (i = 0; i < MAX_CAPS_BIT; i++) {
465 		if (!(caps_masked & BIT_ULL(i)))
466 			continue;
467 		hns3_info(hw, "mask capability: id-%u, name-%s.",
468 			  i, hns3_get_caps_name(i));
469 	}
470 }
471 
472 static void
473 hns3_parse_capability(struct hns3_hw *hw,
474 		      struct hns3_query_version_cmd *cmd)
475 {
476 	uint32_t caps = rte_le_to_cpu_32(cmd->caps[0]);
477 
478 	if (hns3_get_bit(caps, HNS3_CAPS_FD_QUEUE_REGION_B))
479 		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_FD_QUEUE_REGION_B,
480 			     1);
481 	if (hns3_get_bit(caps, HNS3_CAPS_PTP_B)) {
482 		/*
483 		 * PTP depends on special packet type reported by hardware which
484 		 * enabled rxd advanced layout, so if the hardware doesn't
485 		 * support rxd advanced layout, driver should ignore the PTP
486 		 * capability.
487 		 */
488 		if (hns3_get_bit(caps, HNS3_CAPS_RXD_ADV_LAYOUT_B))
489 			hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_PTP_B, 1);
490 		else
491 			hns3_warn(hw, "ignore PTP capability due to lack of "
492 				  "rxd advanced layout capability.");
493 	}
494 	if (hns3_get_bit(caps, HNS3_CAPS_SIMPLE_BD_B))
495 		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_SIMPLE_BD_B, 1);
496 	if (hns3_get_bit(caps, HNS3_CAPS_TX_PUSH_B))
497 		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_TX_PUSH_B, 1);
498 	if (hns3_get_bit(caps, HNS3_CAPS_PHY_IMP_B))
499 		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_COPPER_B, 1);
500 	if (hns3_get_bit(caps, HNS3_CAPS_TQP_TXRX_INDEP_B))
501 		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_INDEP_TXRX_B, 1);
502 	if (hns3_get_bit(caps, HNS3_CAPS_STASH_B))
503 		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_STASH_B, 1);
504 	if (hns3_get_bit(caps, HNS3_CAPS_RXD_ADV_LAYOUT_B))
505 		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_RXD_ADV_LAYOUT_B,
506 			     1);
507 	if (hns3_get_bit(caps, HNS3_CAPS_UDP_TUNNEL_CSUM_B))
508 		hns3_set_bit(hw->capability,
509 				HNS3_DEV_SUPPORT_OUTER_UDP_CKSUM_B, 1);
510 	if (hns3_get_bit(caps, HNS3_CAPS_RAS_IMP_B))
511 		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_RAS_IMP_B, 1);
512 	if (hns3_get_bit(caps, HNS3_CAPS_TM_B))
513 		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_TM_B, 1);
514 	if (hns3_get_bit(caps, HNS3_CAPS_FC_AUTO_B))
515 		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_FC_AUTO_B, 1);
516 	if (hns3_get_bit(caps, HNS3_CAPS_GRO_B))
517 		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_GRO_B, 1);
518 }
519 
520 static uint32_t
521 hns3_build_api_caps(void)
522 {
523 	uint32_t api_caps = 0;
524 
525 	hns3_set_bit(api_caps, HNS3_API_CAP_FLEX_RSS_TBL_B, 1);
526 
527 	return rte_cpu_to_le_32(api_caps);
528 }
529 
530 static void
531 hns3_set_dcb_capability(struct hns3_hw *hw)
532 {
533 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
534 	struct rte_pci_device *pci_dev;
535 	struct rte_eth_dev *eth_dev;
536 	uint16_t device_id;
537 
538 	if (hns->is_vf)
539 		return;
540 
541 	eth_dev = &rte_eth_devices[hw->data->port_id];
542 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
543 	device_id = pci_dev->id.device_id;
544 
545 	if (device_id == HNS3_DEV_ID_25GE_RDMA ||
546 	    device_id == HNS3_DEV_ID_50GE_RDMA ||
547 	    device_id == HNS3_DEV_ID_100G_RDMA_MACSEC ||
548 	    device_id == HNS3_DEV_ID_200G_RDMA ||
549 	    device_id == HNS3_DEV_ID_100G_ROH ||
550 	    device_id == HNS3_DEV_ID_200G_ROH)
551 		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_DCB_B, 1);
552 }
553 
554 static void
555 hns3_set_default_capability(struct hns3_hw *hw)
556 {
557 	hns3_set_dcb_capability(hw);
558 
559 	/*
560 	 * The firmware of the network engines with HIP08 do not report some
561 	 * capabilities, like GRO. Set default capabilities for it.
562 	 */
563 	if (hw->revision < PCI_REVISION_ID_HIP09_A)
564 		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_GRO_B, 1);
565 }
566 
567 static int
568 hns3_cmd_query_firmware_version_and_capability(struct hns3_hw *hw)
569 {
570 	struct hns3_query_version_cmd *resp;
571 	struct hns3_cmd_desc desc;
572 	int ret;
573 
574 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FW_VER, 1);
575 	resp = (struct hns3_query_version_cmd *)desc.data;
576 	resp->api_caps = hns3_build_api_caps();
577 
578 	/* Initialize the cmd function */
579 	ret = hns3_cmd_send(hw, &desc, 1);
580 	if (ret)
581 		return ret;
582 
583 	hw->fw_version = rte_le_to_cpu_32(resp->firmware);
584 
585 	hns3_set_default_capability(hw);
586 
587 	/*
588 	 * Make sure mask the capability before parse capability because it
589 	 * may overwrite resp's data.
590 	 */
591 	hns3_mask_capability(hw, resp);
592 	hns3_parse_capability(hw, resp);
593 
594 	return 0;
595 }
596 
597 int
598 hns3_cmd_init_queue(struct hns3_hw *hw)
599 {
600 	int ret;
601 
602 	/* Setup the lock for command queue */
603 	rte_spinlock_init(&hw->cmq.csq.lock);
604 	rte_spinlock_init(&hw->cmq.crq.lock);
605 
606 	/*
607 	 * Clear up all command register,
608 	 * in case there are some residual values
609 	 */
610 	hns3_cmd_clear_regs(hw);
611 
612 	/* Setup the queue entries for use cmd queue */
613 	hw->cmq.csq.desc_num = HNS3_NIC_CMQ_DESC_NUM;
614 	hw->cmq.crq.desc_num = HNS3_NIC_CMQ_DESC_NUM;
615 
616 	/* Setup Tx write back timeout */
617 	hw->cmq.tx_timeout = HNS3_CMDQ_TX_TIMEOUT;
618 
619 	/* Setup queue rings */
620 	ret = hns3_alloc_cmd_queue(hw, HNS3_TYPE_CSQ);
621 	if (ret) {
622 		PMD_INIT_LOG(ERR, "CSQ ring setup error %d", ret);
623 		return ret;
624 	}
625 
626 	ret = hns3_alloc_cmd_queue(hw, HNS3_TYPE_CRQ);
627 	if (ret) {
628 		PMD_INIT_LOG(ERR, "CRQ ring setup error %d", ret);
629 		goto err_crq;
630 	}
631 
632 	return 0;
633 
634 err_crq:
635 	hns3_free_cmd_desc(hw, &hw->cmq.csq);
636 
637 	return ret;
638 }
639 
640 static void
641 hns3_update_dev_lsc_cap(struct hns3_hw *hw, int fw_compact_cmd_result)
642 {
643 	struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
644 
645 	if (hw->adapter_state != HNS3_NIC_UNINITIALIZED)
646 		return;
647 
648 	if (fw_compact_cmd_result != 0) {
649 		/*
650 		 * If fw_compact_cmd_result is not zero, it means firmware don't
651 		 * support link status change interrupt.
652 		 * Framework already set RTE_ETH_DEV_INTR_LSC bit because driver
653 		 * declared RTE_PCI_DRV_INTR_LSC in drv_flags. It need to clear
654 		 * the RTE_ETH_DEV_INTR_LSC capability when detect firmware
655 		 * don't support link status change interrupt.
656 		 */
657 		dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
658 	}
659 }
660 
661 static void
662 hns3_set_fc_autoneg_cap(struct hns3_adapter *hns, int fw_compact_cmd_result)
663 {
664 	struct hns3_hw *hw = &hns->hw;
665 	struct hns3_mac *mac = &hw->mac;
666 
667 	if (mac->media_type == HNS3_MEDIA_TYPE_COPPER) {
668 		hns->pf.support_fc_autoneg = true;
669 		return;
670 	}
671 
672 	/*
673 	 * Flow control auto-negotiation requires the cooperation of the driver
674 	 * and firmware.
675 	 */
676 	hns->pf.support_fc_autoneg = (hns3_dev_get_support(hw, FC_AUTO) &&
677 					fw_compact_cmd_result == 0) ?
678 					true : false;
679 }
680 
681 static int
682 hns3_apply_fw_compat_cmd_result(struct hns3_hw *hw, int result)
683 {
684 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
685 
686 	if (result != 0 && hns3_dev_get_support(hw, COPPER)) {
687 		hns3_err(hw, "firmware fails to initialize the PHY, ret = %d.",
688 			 result);
689 		return result;
690 	}
691 
692 	hns3_update_dev_lsc_cap(hw, result);
693 	hns3_set_fc_autoneg_cap(hns, result);
694 
695 	return 0;
696 }
697 
698 static int
699 hns3_firmware_compat_config(struct hns3_hw *hw, bool is_init)
700 {
701 	struct hns3_firmware_compat_cmd *req;
702 	struct hns3_cmd_desc desc;
703 	uint32_t compat = 0;
704 
705 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_FIRMWARE_COMPAT_CFG, false);
706 	req = (struct hns3_firmware_compat_cmd *)desc.data;
707 
708 	if (is_init) {
709 		hns3_set_bit(compat, HNS3_LINK_EVENT_REPORT_EN_B, 1);
710 		hns3_set_bit(compat, HNS3_NCSI_ERROR_REPORT_EN_B, 0);
711 		hns3_set_bit(compat, HNS3_LLRS_FEC_EN_B, 1);
712 		if (hns3_dev_get_support(hw, COPPER))
713 			hns3_set_bit(compat, HNS3_FIRMWARE_PHY_DRIVER_EN_B, 1);
714 		if (hns3_dev_get_support(hw, FC_AUTO))
715 			hns3_set_bit(compat, HNS3_MAC_FC_AUTONEG_EN_B, 1);
716 	}
717 	req->compat = rte_cpu_to_le_32(compat);
718 
719 	return hns3_cmd_send(hw, &desc, 1);
720 }
721 
722 int
723 hns3_cmd_init(struct hns3_hw *hw)
724 {
725 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
726 	uint32_t version;
727 	int ret;
728 
729 	rte_spinlock_lock(&hw->cmq.csq.lock);
730 	rte_spinlock_lock(&hw->cmq.crq.lock);
731 
732 	hw->cmq.csq.next_to_clean = 0;
733 	hw->cmq.csq.next_to_use = 0;
734 	hw->cmq.crq.next_to_clean = 0;
735 	hw->cmq.crq.next_to_use = 0;
736 	hns3_cmd_init_regs(hw);
737 
738 	rte_spinlock_unlock(&hw->cmq.crq.lock);
739 	rte_spinlock_unlock(&hw->cmq.csq.lock);
740 
741 	/*
742 	 * Check if there is new reset pending, because the higher level
743 	 * reset may happen when lower level reset is being processed.
744 	 */
745 	if (is_reset_pending(HNS3_DEV_HW_TO_ADAPTER(hw))) {
746 		PMD_INIT_LOG(ERR, "New reset pending, keep disable cmd");
747 		ret = -EBUSY;
748 		goto err_cmd_init;
749 	}
750 	rte_atomic_store_explicit(&hw->reset.disable_cmd, 0, rte_memory_order_relaxed);
751 
752 	ret = hns3_cmd_query_firmware_version_and_capability(hw);
753 	if (ret) {
754 		PMD_INIT_LOG(ERR, "firmware version query failed %d", ret);
755 		goto err_cmd_init;
756 	}
757 
758 	version = hw->fw_version;
759 	PMD_INIT_LOG(INFO, "The firmware version is %lu.%lu.%lu.%lu",
760 		     hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M,
761 				    HNS3_FW_VERSION_BYTE3_S),
762 		     hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M,
763 				    HNS3_FW_VERSION_BYTE2_S),
764 		     hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M,
765 				    HNS3_FW_VERSION_BYTE1_S),
766 		     hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M,
767 				    HNS3_FW_VERSION_BYTE0_S));
768 
769 	if (hns->is_vf)
770 		return 0;
771 
772 	/*
773 	 * Requiring firmware to enable some features, fiber port can still
774 	 * work without it, but copper port can't work because the firmware
775 	 * fails to take over the PHY.
776 	 */
777 	ret = hns3_firmware_compat_config(hw, true);
778 	if (ret)
779 		PMD_INIT_LOG(WARNING, "firmware compatible features not "
780 			     "supported, ret = %d.", ret);
781 
782 	/*
783 	 * Perform some corresponding operations based on the firmware
784 	 * compatibility configuration result.
785 	 */
786 	ret = hns3_apply_fw_compat_cmd_result(hw, ret);
787 	if (ret)
788 		goto err_cmd_init;
789 
790 	return 0;
791 
792 err_cmd_init:
793 	rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
794 	return ret;
795 }
796 
797 static void
798 hns3_destroy_queue(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
799 {
800 	rte_spinlock_lock(&ring->lock);
801 
802 	hns3_free_cmd_desc(hw, ring);
803 
804 	rte_spinlock_unlock(&ring->lock);
805 }
806 
807 void
808 hns3_cmd_destroy_queue(struct hns3_hw *hw)
809 {
810 	hns3_destroy_queue(hw, &hw->cmq.csq);
811 	hns3_destroy_queue(hw, &hw->cmq.crq);
812 }
813 
814 void
815 hns3_cmd_uninit(struct hns3_hw *hw)
816 {
817 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
818 
819 	if (!hns->is_vf)
820 		(void)hns3_firmware_compat_config(hw, false);
821 
822 	rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
823 
824 	/*
825 	 * A delay is added to ensure that the register cleanup operations
826 	 * will not be performed concurrently with the firmware command and
827 	 * ensure that all the reserved commands are executed.
828 	 * Concurrency may occur in two scenarios: asynchronous command and
829 	 * timeout command. If the command fails to be executed due to busy
830 	 * scheduling, the command will be processed in the next scheduling
831 	 * of the firmware.
832 	 */
833 	rte_delay_ms(HNS3_CMDQ_CLEAR_WAIT_TIME);
834 
835 	rte_spinlock_lock(&hw->cmq.csq.lock);
836 	rte_spinlock_lock(&hw->cmq.crq.lock);
837 	hns3_cmd_clear_regs(hw);
838 	rte_spinlock_unlock(&hw->cmq.crq.lock);
839 	rte_spinlock_unlock(&hw->cmq.csq.lock);
840 }
841