xref: /dpdk/drivers/net/hns3/hns3_cmd.c (revision 9e991f217fc8719e38a812dc280dba5f84db9f59)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2019 Hisilicon Limited.
3  */
4 
5 #include <errno.h>
6 #include <stdbool.h>
7 #include <stdint.h>
8 #include <stdio.h>
9 #include <string.h>
10 #include <sys/queue.h>
11 #include <inttypes.h>
12 #include <unistd.h>
13 #include <rte_bus_pci.h>
14 #include <rte_common.h>
15 #include <rte_cycles.h>
16 #include <rte_dev.h>
17 #include <rte_eal.h>
18 #include <rte_ether.h>
19 #include <rte_ethdev_driver.h>
20 #include <rte_ethdev_pci.h>
21 #include <rte_io.h>
22 
23 #include "hns3_ethdev.h"
24 #include "hns3_regs.h"
25 #include "hns3_intr.h"
26 #include "hns3_logs.h"
27 
28 #define hns3_is_csq(ring) ((ring)->flag & HNS3_TYPE_CSQ)
29 
30 #define cmq_ring_to_dev(ring)   (&(ring)->dev->pdev->dev)
31 
32 static int
33 hns3_ring_space(struct hns3_cmq_ring *ring)
34 {
35 	int ntu = ring->next_to_use;
36 	int ntc = ring->next_to_clean;
37 	int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
38 
39 	return ring->desc_num - used - 1;
40 }
41 
42 static bool
43 is_valid_csq_clean_head(struct hns3_cmq_ring *ring, int head)
44 {
45 	int ntu = ring->next_to_use;
46 	int ntc = ring->next_to_clean;
47 
48 	if (ntu > ntc)
49 		return head >= ntc && head <= ntu;
50 
51 	return head >= ntc || head <= ntu;
52 }
53 
54 /*
55  * hns3_allocate_dma_mem - Specific memory alloc for command function.
56  * Malloc a memzone, which is a contiguous portion of physical memory identified
57  * by a name.
58  * @ring: pointer to the ring structure
59  * @size: size of memory requested
60  * @alignment: what to align the allocation to
61  */
62 static int
63 hns3_allocate_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring,
64 		      uint64_t size, uint32_t alignment)
65 {
66 	const struct rte_memzone *mz = NULL;
67 	char z_name[RTE_MEMZONE_NAMESIZE];
68 
69 	snprintf(z_name, sizeof(z_name), "hns3_dma_%" PRIu64, rte_rand());
70 	mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
71 					 RTE_MEMZONE_IOVA_CONTIG, alignment,
72 					 RTE_PGSIZE_2M);
73 	if (mz == NULL)
74 		return -ENOMEM;
75 
76 	ring->buf_size = size;
77 	ring->desc = mz->addr;
78 	ring->desc_dma_addr = mz->iova;
79 	ring->zone = (const void *)mz;
80 	hns3_dbg(hw, "memzone %s allocated with physical address: %" PRIu64,
81 		 mz->name, ring->desc_dma_addr);
82 
83 	return 0;
84 }
85 
86 static void
87 hns3_free_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
88 {
89 	hns3_dbg(hw, "memzone %s to be freed with physical address: %" PRIu64,
90 		 ((const struct rte_memzone *)ring->zone)->name,
91 		 ring->desc_dma_addr);
92 	rte_memzone_free((const struct rte_memzone *)ring->zone);
93 	ring->buf_size = 0;
94 	ring->desc = NULL;
95 	ring->desc_dma_addr = 0;
96 	ring->zone = NULL;
97 }
98 
99 static int
100 hns3_alloc_cmd_desc(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
101 {
102 	int size  = ring->desc_num * sizeof(struct hns3_cmd_desc);
103 
104 	if (hns3_allocate_dma_mem(hw, ring, size, HNS3_CMD_DESC_ALIGNMENT)) {
105 		hns3_err(hw, "allocate dma mem failed");
106 		return -ENOMEM;
107 	}
108 
109 	return 0;
110 }
111 
112 static void
113 hns3_free_cmd_desc(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
114 {
115 	if (ring->desc)
116 		hns3_free_dma_mem(hw, ring);
117 }
118 
119 static int
120 hns3_alloc_cmd_queue(struct hns3_hw *hw, int ring_type)
121 {
122 	struct hns3_cmq_ring *ring =
123 		(ring_type == HNS3_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
124 	int ret;
125 
126 	ring->ring_type = ring_type;
127 	ring->hw = hw;
128 
129 	ret = hns3_alloc_cmd_desc(hw, ring);
130 	if (ret)
131 		hns3_err(hw, "descriptor %s alloc error %d",
132 			    (ring_type == HNS3_TYPE_CSQ) ? "CSQ" : "CRQ", ret);
133 
134 	return ret;
135 }
136 
137 void
138 hns3_cmd_reuse_desc(struct hns3_cmd_desc *desc, bool is_read)
139 {
140 	desc->flag = rte_cpu_to_le_16(HNS3_CMD_FLAG_NO_INTR | HNS3_CMD_FLAG_IN);
141 	if (is_read)
142 		desc->flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_WR);
143 	else
144 		desc->flag &= rte_cpu_to_le_16(~HNS3_CMD_FLAG_WR);
145 }
146 
147 void
148 hns3_cmd_setup_basic_desc(struct hns3_cmd_desc *desc,
149 			  enum hns3_opcode_type opcode, bool is_read)
150 {
151 	memset((void *)desc, 0, sizeof(struct hns3_cmd_desc));
152 	desc->opcode = rte_cpu_to_le_16(opcode);
153 	desc->flag = rte_cpu_to_le_16(HNS3_CMD_FLAG_NO_INTR | HNS3_CMD_FLAG_IN);
154 
155 	if (is_read)
156 		desc->flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_WR);
157 }
158 
159 static void
160 hns3_cmd_clear_regs(struct hns3_hw *hw)
161 {
162 	hns3_write_dev(hw, HNS3_CMDQ_TX_ADDR_L_REG, 0);
163 	hns3_write_dev(hw, HNS3_CMDQ_TX_ADDR_H_REG, 0);
164 	hns3_write_dev(hw, HNS3_CMDQ_TX_DEPTH_REG, 0);
165 	hns3_write_dev(hw, HNS3_CMDQ_TX_HEAD_REG, 0);
166 	hns3_write_dev(hw, HNS3_CMDQ_TX_TAIL_REG, 0);
167 	hns3_write_dev(hw, HNS3_CMDQ_RX_ADDR_L_REG, 0);
168 	hns3_write_dev(hw, HNS3_CMDQ_RX_ADDR_H_REG, 0);
169 	hns3_write_dev(hw, HNS3_CMDQ_RX_DEPTH_REG, 0);
170 	hns3_write_dev(hw, HNS3_CMDQ_RX_HEAD_REG, 0);
171 	hns3_write_dev(hw, HNS3_CMDQ_RX_TAIL_REG, 0);
172 }
173 
174 static void
175 hns3_cmd_config_regs(struct hns3_cmq_ring *ring)
176 {
177 	uint64_t dma = ring->desc_dma_addr;
178 
179 	if (ring->ring_type == HNS3_TYPE_CSQ) {
180 		hns3_write_dev(ring->hw, HNS3_CMDQ_TX_ADDR_L_REG,
181 			       lower_32_bits(dma));
182 		hns3_write_dev(ring->hw, HNS3_CMDQ_TX_ADDR_H_REG,
183 			       upper_32_bits(dma));
184 		hns3_write_dev(ring->hw, HNS3_CMDQ_TX_DEPTH_REG,
185 			       ring->desc_num >> HNS3_NIC_CMQ_DESC_NUM_S |
186 			       HNS3_NIC_SW_RST_RDY);
187 		hns3_write_dev(ring->hw, HNS3_CMDQ_TX_HEAD_REG, 0);
188 		hns3_write_dev(ring->hw, HNS3_CMDQ_TX_TAIL_REG, 0);
189 	} else {
190 		hns3_write_dev(ring->hw, HNS3_CMDQ_RX_ADDR_L_REG,
191 			       lower_32_bits(dma));
192 		hns3_write_dev(ring->hw, HNS3_CMDQ_RX_ADDR_H_REG,
193 			       upper_32_bits(dma));
194 		hns3_write_dev(ring->hw, HNS3_CMDQ_RX_DEPTH_REG,
195 			       ring->desc_num >> HNS3_NIC_CMQ_DESC_NUM_S);
196 		hns3_write_dev(ring->hw, HNS3_CMDQ_RX_HEAD_REG, 0);
197 		hns3_write_dev(ring->hw, HNS3_CMDQ_RX_TAIL_REG, 0);
198 	}
199 }
200 
201 static void
202 hns3_cmd_init_regs(struct hns3_hw *hw)
203 {
204 	hns3_cmd_config_regs(&hw->cmq.csq);
205 	hns3_cmd_config_regs(&hw->cmq.crq);
206 }
207 
208 static int
209 hns3_cmd_csq_clean(struct hns3_hw *hw)
210 {
211 	struct hns3_cmq_ring *csq = &hw->cmq.csq;
212 	uint32_t head;
213 	int clean;
214 
215 	head = hns3_read_dev(hw, HNS3_CMDQ_TX_HEAD_REG);
216 
217 	if (!is_valid_csq_clean_head(csq, head)) {
218 		hns3_err(hw, "wrong cmd head (%u, %u-%u)", head,
219 			    csq->next_to_use, csq->next_to_clean);
220 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
221 			rte_atomic16_set(&hw->reset.disable_cmd, 1);
222 			hns3_schedule_delayed_reset(HNS3_DEV_HW_TO_ADAPTER(hw));
223 		}
224 
225 		return -EIO;
226 	}
227 
228 	clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
229 	csq->next_to_clean = head;
230 	return clean;
231 }
232 
233 static int
234 hns3_cmd_csq_done(struct hns3_hw *hw)
235 {
236 	uint32_t head = hns3_read_dev(hw, HNS3_CMDQ_TX_HEAD_REG);
237 
238 	return head == hw->cmq.csq.next_to_use;
239 }
240 
241 static bool
242 hns3_is_special_opcode(uint16_t opcode)
243 {
244 	/*
245 	 * These commands have several descriptors,
246 	 * and use the first one to save opcode and return value.
247 	 */
248 	uint16_t spec_opcode[] = {HNS3_OPC_STATS_64_BIT,
249 				  HNS3_OPC_STATS_32_BIT,
250 				  HNS3_OPC_STATS_MAC,
251 				  HNS3_OPC_STATS_MAC_ALL,
252 				  HNS3_OPC_QUERY_32_BIT_REG,
253 				  HNS3_OPC_QUERY_64_BIT_REG};
254 	uint32_t i;
255 
256 	for (i = 0; i < ARRAY_SIZE(spec_opcode); i++)
257 		if (spec_opcode[i] == opcode)
258 			return true;
259 
260 	return false;
261 }
262 
263 static int
264 hns3_cmd_convert_err_code(uint16_t desc_ret)
265 {
266 	switch (desc_ret) {
267 	case HNS3_CMD_EXEC_SUCCESS:
268 		return 0;
269 	case HNS3_CMD_NO_AUTH:
270 		return -EPERM;
271 	case HNS3_CMD_NOT_SUPPORTED:
272 		return -EOPNOTSUPP;
273 	case HNS3_CMD_QUEUE_FULL:
274 		return -EXFULL;
275 	case HNS3_CMD_NEXT_ERR:
276 		return -ENOSR;
277 	case HNS3_CMD_UNEXE_ERR:
278 		return -ENOTBLK;
279 	case HNS3_CMD_PARA_ERR:
280 		return -EINVAL;
281 	case HNS3_CMD_RESULT_ERR:
282 		return -ERANGE;
283 	case HNS3_CMD_TIMEOUT:
284 		return -ETIME;
285 	case HNS3_CMD_HILINK_ERR:
286 		return -ENOLINK;
287 	case HNS3_CMD_QUEUE_ILLEGAL:
288 		return -ENXIO;
289 	case HNS3_CMD_INVALID:
290 		return -EBADR;
291 	default:
292 		return -EIO;
293 	}
294 }
295 
296 static int
297 hns3_cmd_get_hardware_reply(struct hns3_hw *hw,
298 			    struct hns3_cmd_desc *desc, int num, int ntc)
299 {
300 	uint16_t opcode, desc_ret;
301 	int current_ntc = ntc;
302 	int handle;
303 
304 	opcode = rte_le_to_cpu_16(desc[0].opcode);
305 	for (handle = 0; handle < num; handle++) {
306 		/* Get the result of hardware write back */
307 		desc[handle] = hw->cmq.csq.desc[current_ntc];
308 
309 		current_ntc++;
310 		if (current_ntc == hw->cmq.csq.desc_num)
311 			current_ntc = 0;
312 	}
313 
314 	if (likely(!hns3_is_special_opcode(opcode)))
315 		desc_ret = rte_le_to_cpu_16(desc[num - 1].retval);
316 	else
317 		desc_ret = rte_le_to_cpu_16(desc[0].retval);
318 
319 	hw->cmq.last_status = desc_ret;
320 	return hns3_cmd_convert_err_code(desc_ret);
321 }
322 
323 static int hns3_cmd_poll_reply(struct hns3_hw *hw)
324 {
325 	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
326 	uint32_t timeout = 0;
327 
328 	do {
329 		if (hns3_cmd_csq_done(hw))
330 			return 0;
331 
332 		if (rte_atomic16_read(&hw->reset.disable_cmd)) {
333 			hns3_err(hw,
334 				 "Don't wait for reply because of disable_cmd");
335 			return -EBUSY;
336 		}
337 
338 		if (is_reset_pending(hns)) {
339 			hns3_err(hw, "Don't wait for reply because of reset pending");
340 			return -EIO;
341 		}
342 
343 		rte_delay_us(1);
344 		timeout++;
345 	} while (timeout < hw->cmq.tx_timeout);
346 	hns3_err(hw, "Wait for reply timeout");
347 	return -ETIME;
348 }
349 
350 /*
351  * hns3_cmd_send - send command to command queue
352  * @hw: pointer to the hw struct
353  * @desc: prefilled descriptor for describing the command
354  * @num : the number of descriptors to be sent
355  *
356  * This is the main send command for command queue, it
357  * sends the queue, cleans the queue, etc
358  */
359 int
360 hns3_cmd_send(struct hns3_hw *hw, struct hns3_cmd_desc *desc, int num)
361 {
362 	struct hns3_cmd_desc *desc_to_use;
363 	int handle = 0;
364 	int retval;
365 	uint32_t ntc;
366 
367 	if (rte_atomic16_read(&hw->reset.disable_cmd))
368 		return -EBUSY;
369 
370 	rte_spinlock_lock(&hw->cmq.csq.lock);
371 
372 	/* Clean the command send queue */
373 	retval = hns3_cmd_csq_clean(hw);
374 	if (retval < 0) {
375 		rte_spinlock_unlock(&hw->cmq.csq.lock);
376 		return retval;
377 	}
378 
379 	if (num > hns3_ring_space(&hw->cmq.csq)) {
380 		rte_spinlock_unlock(&hw->cmq.csq.lock);
381 		return -ENOMEM;
382 	}
383 
384 	/*
385 	 * Record the location of desc in the ring for this time
386 	 * which will be use for hardware to write back
387 	 */
388 	ntc = hw->cmq.csq.next_to_use;
389 
390 	while (handle < num) {
391 		desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
392 		*desc_to_use = desc[handle];
393 		(hw->cmq.csq.next_to_use)++;
394 		if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
395 			hw->cmq.csq.next_to_use = 0;
396 		handle++;
397 	}
398 
399 	/* Write to hardware */
400 	hns3_write_dev(hw, HNS3_CMDQ_TX_TAIL_REG, hw->cmq.csq.next_to_use);
401 
402 	/*
403 	 * If the command is sync, wait for the firmware to write back,
404 	 * if multi descriptors to be sent, use the first one to check.
405 	 */
406 	if (HNS3_CMD_SEND_SYNC(rte_le_to_cpu_16(desc->flag))) {
407 		retval = hns3_cmd_poll_reply(hw);
408 		if (!retval)
409 			retval = hns3_cmd_get_hardware_reply(hw, desc, num,
410 							     ntc);
411 	}
412 
413 	rte_spinlock_unlock(&hw->cmq.csq.lock);
414 	return retval;
415 }
416 
417 static enum hns3_cmd_status
418 hns3_cmd_query_firmware_version(struct hns3_hw *hw, uint32_t *version)
419 {
420 	struct hns3_query_version_cmd *resp;
421 	struct hns3_cmd_desc desc;
422 	int ret;
423 
424 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FW_VER, 1);
425 	resp = (struct hns3_query_version_cmd *)desc.data;
426 
427 	/* Initialize the cmd function */
428 	ret = hns3_cmd_send(hw, &desc, 1);
429 	if (ret == 0)
430 		*version = rte_le_to_cpu_32(resp->firmware);
431 
432 	return ret;
433 }
434 
435 int
436 hns3_cmd_init_queue(struct hns3_hw *hw)
437 {
438 	int ret;
439 
440 	/* Setup the lock for command queue */
441 	rte_spinlock_init(&hw->cmq.csq.lock);
442 	rte_spinlock_init(&hw->cmq.crq.lock);
443 
444 	/*
445 	 * Clear up all command register,
446 	 * in case there are some residual values
447 	 */
448 	hns3_cmd_clear_regs(hw);
449 
450 	/* Setup the queue entries for use cmd queue */
451 	hw->cmq.csq.desc_num = HNS3_NIC_CMQ_DESC_NUM;
452 	hw->cmq.crq.desc_num = HNS3_NIC_CMQ_DESC_NUM;
453 
454 	/* Setup Tx write back timeout */
455 	hw->cmq.tx_timeout = HNS3_CMDQ_TX_TIMEOUT;
456 
457 	/* Setup queue rings */
458 	ret = hns3_alloc_cmd_queue(hw, HNS3_TYPE_CSQ);
459 	if (ret) {
460 		PMD_INIT_LOG(ERR, "CSQ ring setup error %d", ret);
461 		return ret;
462 	}
463 
464 	ret = hns3_alloc_cmd_queue(hw, HNS3_TYPE_CRQ);
465 	if (ret) {
466 		PMD_INIT_LOG(ERR, "CRQ ring setup error %d", ret);
467 		goto err_crq;
468 	}
469 
470 	return 0;
471 
472 err_crq:
473 	hns3_free_cmd_desc(hw, &hw->cmq.csq);
474 
475 	return ret;
476 }
477 
478 int
479 hns3_cmd_init(struct hns3_hw *hw)
480 {
481 	int ret;
482 
483 	rte_spinlock_lock(&hw->cmq.csq.lock);
484 	rte_spinlock_lock(&hw->cmq.crq.lock);
485 
486 	hw->cmq.csq.next_to_clean = 0;
487 	hw->cmq.csq.next_to_use = 0;
488 	hw->cmq.crq.next_to_clean = 0;
489 	hw->cmq.crq.next_to_use = 0;
490 	hw->mbx_resp.head = 0;
491 	hw->mbx_resp.tail = 0;
492 	hw->mbx_resp.lost = 0;
493 	hns3_cmd_init_regs(hw);
494 
495 	rte_spinlock_unlock(&hw->cmq.crq.lock);
496 	rte_spinlock_unlock(&hw->cmq.csq.lock);
497 
498 	/*
499 	 * Check if there is new reset pending, because the higher level
500 	 * reset may happen when lower level reset is being processed.
501 	 */
502 	if (is_reset_pending(HNS3_DEV_HW_TO_ADAPTER(hw))) {
503 		PMD_INIT_LOG(ERR, "New reset pending, keep disable cmd");
504 		ret = -EBUSY;
505 		goto err_cmd_init;
506 	}
507 	rte_atomic16_clear(&hw->reset.disable_cmd);
508 
509 	ret = hns3_cmd_query_firmware_version(hw, &hw->fw_version);
510 	if (ret) {
511 		PMD_INIT_LOG(ERR, "firmware version query failed %d", ret);
512 		goto err_cmd_init;
513 	}
514 
515 	PMD_INIT_LOG(INFO, "The firmware version is %08x", hw->fw_version);
516 
517 	return 0;
518 
519 err_cmd_init:
520 	hns3_cmd_uninit(hw);
521 	return ret;
522 }
523 
524 static void
525 hns3_destroy_queue(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
526 {
527 	rte_spinlock_lock(&ring->lock);
528 
529 	hns3_free_cmd_desc(hw, ring);
530 
531 	rte_spinlock_unlock(&ring->lock);
532 }
533 
534 void
535 hns3_cmd_destroy_queue(struct hns3_hw *hw)
536 {
537 	hns3_destroy_queue(hw, &hw->cmq.csq);
538 	hns3_destroy_queue(hw, &hw->cmq.crq);
539 }
540 
541 void
542 hns3_cmd_uninit(struct hns3_hw *hw)
543 {
544 	rte_spinlock_lock(&hw->cmq.csq.lock);
545 	rte_spinlock_lock(&hw->cmq.crq.lock);
546 	rte_atomic16_set(&hw->reset.disable_cmd, 1);
547 	hns3_cmd_clear_regs(hw);
548 	rte_spinlock_unlock(&hw->cmq.crq.lock);
549 	rte_spinlock_unlock(&hw->cmq.csq.lock);
550 }
551