xref: /dpdk/drivers/raw/gdtc/gdtc_rawdev.c (revision 81c6bacb0cdc43bc90d72e66968777653fbc4e31)
130495f54SYong Zhang /* SPDX-License-Identifier: BSD-3-Clause
230495f54SYong Zhang  * Copyright 2024 ZTE Corporation
330495f54SYong Zhang  */
430495f54SYong Zhang 
530495f54SYong Zhang #include <errno.h>
630495f54SYong Zhang #include <fcntl.h>
730495f54SYong Zhang #include <inttypes.h>
830495f54SYong Zhang #include <limits.h>
930495f54SYong Zhang #include <stdio.h>
1030495f54SYong Zhang #include <string.h>
1130495f54SYong Zhang #include <unistd.h>
1230495f54SYong Zhang 
1330495f54SYong Zhang #include <bus_pci_driver.h>
1430495f54SYong Zhang #include <rte_atomic.h>
1530495f54SYong Zhang #include <rte_common.h>
1630495f54SYong Zhang #include <rte_dev.h>
1730495f54SYong Zhang #include <rte_eal_paging.h>
1830495f54SYong Zhang #include <rte_errno.h>
1930495f54SYong Zhang #include <rte_lcore.h>
2030495f54SYong Zhang #include <rte_log.h>
2130495f54SYong Zhang #include <rte_malloc.h>
2230495f54SYong Zhang #include <rte_memzone.h>
2330495f54SYong Zhang #include <rte_pci.h>
2430495f54SYong Zhang #include <rte_rawdev.h>
2530495f54SYong Zhang #include <rte_rawdev_pmd.h>
2630495f54SYong Zhang #include <rte_spinlock.h>
2730495f54SYong Zhang #include <rte_branch_prediction.h>
2830495f54SYong Zhang 
2930495f54SYong Zhang #include "gdtc_rawdev.h"
3030495f54SYong Zhang 
31d373c66eSYong Zhang /*
32d373c66eSYong Zhang  * User define:
33d373c66eSYong Zhang  * ep_id-bit[15:12] vfunc_num-bit[11:4] func_num-bit[3:1] vfunc_active-bit0
34d373c66eSYong Zhang  * host ep_id:5~8   zf ep_id:9
35d373c66eSYong Zhang  */
36d373c66eSYong Zhang #define ZXDH_GDMA_ZF_USER                       0x9000      /* ep4 pf0 */
37d373c66eSYong Zhang #define ZXDH_GDMA_PF_NUM_SHIFT                  1
38d373c66eSYong Zhang #define ZXDH_GDMA_VF_NUM_SHIFT                  4
39d373c66eSYong Zhang #define ZXDH_GDMA_EP_ID_SHIFT                   12
40d373c66eSYong Zhang #define ZXDH_GDMA_VF_EN                         1
41d373c66eSYong Zhang #define ZXDH_GDMA_EPID_OFFSET                   5
42d373c66eSYong Zhang 
4330495f54SYong Zhang /* Register offset */
4430495f54SYong Zhang #define ZXDH_GDMA_BASE_OFFSET                   0x100000
45d373c66eSYong Zhang #define ZXDH_GDMA_EXT_ADDR_OFFSET               0x218
46a73d74c2SYong Zhang #define ZXDH_GDMA_SAR_LOW_OFFSET                0x200
47a73d74c2SYong Zhang #define ZXDH_GDMA_DAR_LOW_OFFSET                0x204
48a73d74c2SYong Zhang #define ZXDH_GDMA_SAR_HIGH_OFFSET               0x234
49a73d74c2SYong Zhang #define ZXDH_GDMA_DAR_HIGH_OFFSET               0x238
50a73d74c2SYong Zhang #define ZXDH_GDMA_XFERSIZE_OFFSET               0x208
51d373c66eSYong Zhang #define ZXDH_GDMA_CONTROL_OFFSET                0x230
52a73d74c2SYong Zhang #define ZXDH_GDMA_TC_STATUS_OFFSET              0x0
53a73d74c2SYong Zhang #define ZXDH_GDMA_STATUS_CLEAN_OFFSET           0x80
54a73d74c2SYong Zhang #define ZXDH_GDMA_LLI_L_OFFSET                  0x21c
55a73d74c2SYong Zhang #define ZXDH_GDMA_LLI_H_OFFSET                  0x220
56a73d74c2SYong Zhang #define ZXDH_GDMA_CHAN_CONTINUE_OFFSET          0x224
57d373c66eSYong Zhang #define ZXDH_GDMA_TC_CNT_OFFSET                 0x23c
58d373c66eSYong Zhang #define ZXDH_GDMA_LLI_USER_OFFSET               0x228
59d373c66eSYong Zhang 
60a73d74c2SYong Zhang /* Control register */
61a73d74c2SYong Zhang #define ZXDH_GDMA_CHAN_ENABLE                   0x1
62a73d74c2SYong Zhang #define ZXDH_GDMA_CHAN_DISABLE                  0
63a73d74c2SYong Zhang #define ZXDH_GDMA_SOFT_CHAN                     0x2
64a73d74c2SYong Zhang #define ZXDH_GDMA_TC_INTR_ENABLE                0x10
65a73d74c2SYong Zhang #define ZXDH_GDMA_ALL_INTR_ENABLE               0x30
66a73d74c2SYong Zhang #define ZXDH_GDMA_SBS_SHIFT                     6           /* src burst size */
67a73d74c2SYong Zhang #define ZXDH_GDMA_SBL_SHIFT                     9           /* src burst length */
68a73d74c2SYong Zhang #define ZXDH_GDMA_DBS_SHIFT                     13          /* dest burst size */
69a73d74c2SYong Zhang #define ZXDH_GDMA_BURST_SIZE_MIN                0x1         /* 1 byte */
70a73d74c2SYong Zhang #define ZXDH_GDMA_BURST_SIZE_MEDIUM             0x4         /* 4 word */
71a73d74c2SYong Zhang #define ZXDH_GDMA_BURST_SIZE_MAX                0x6         /* 16 word */
72a73d74c2SYong Zhang #define ZXDH_GDMA_DEFAULT_BURST_LEN             0xf         /* 16 beats */
73a73d74c2SYong Zhang #define ZXDH_GDMA_TC_CNT_ENABLE                 (1 << 27)
74d373c66eSYong Zhang #define ZXDH_GDMA_CHAN_FORCE_CLOSE              (1 << 31)
75d373c66eSYong Zhang 
76d373c66eSYong Zhang /* TC count & Error interrupt status register */
77d373c66eSYong Zhang #define ZXDH_GDMA_SRC_LLI_ERR                   (1 << 16)
78d373c66eSYong Zhang #define ZXDH_GDMA_SRC_DATA_ERR                  (1 << 17)
79d373c66eSYong Zhang #define ZXDH_GDMA_DST_ADDR_ERR                  (1 << 18)
80d373c66eSYong Zhang #define ZXDH_GDMA_ERR_STATUS                    (1 << 19)
81d373c66eSYong Zhang #define ZXDH_GDMA_ERR_INTR_ENABLE               (1 << 20)
82d373c66eSYong Zhang #define ZXDH_GDMA_TC_CNT_CLEAN                  (1)
8330495f54SYong Zhang 
8430495f54SYong Zhang #define ZXDH_GDMA_CHAN_SHIFT                    0x80
85a73d74c2SYong Zhang #define ZXDH_GDMA_LINK_END_NODE                 (1 << 30)
86a73d74c2SYong Zhang #define ZXDH_GDMA_CHAN_CONTINUE                 (1)
87a73d74c2SYong Zhang 
88d373c66eSYong Zhang #define LOW32_MASK                              0xffffffff
89d373c66eSYong Zhang #define LOW16_MASK                              0xffff
90d373c66eSYong Zhang 
91*81c6bacbSYong Zhang #define ZXDH_GDMA_TC_CNT_MAX                    0x10000
92*81c6bacbSYong Zhang 
93a73d74c2SYong Zhang #define IDX_TO_ADDR(addr, idx, t) \
94a73d74c2SYong Zhang 	((t)((uintptr_t)(addr) + (idx) * sizeof(struct zxdh_gdma_buff_desc)))
95a73d74c2SYong Zhang 
96d373c66eSYong Zhang static int zxdh_gdma_queue_init(struct rte_rawdev *dev, uint16_t queue_id);
97d373c66eSYong Zhang static int zxdh_gdma_queue_free(struct rte_rawdev *dev, uint16_t queue_id);
98d373c66eSYong Zhang 
9930495f54SYong Zhang char zxdh_gdma_driver_name[] = "rawdev_zxdh_gdma";
10030495f54SYong Zhang char dev_name[] = "zxdh_gdma";
10130495f54SYong Zhang 
10230495f54SYong Zhang static inline struct zxdh_gdma_rawdev *
10330495f54SYong Zhang zxdh_gdma_rawdev_get_priv(const struct rte_rawdev *rawdev)
10430495f54SYong Zhang {
10530495f54SYong Zhang 	return rawdev->dev_private;
10630495f54SYong Zhang }
10730495f54SYong Zhang 
108d373c66eSYong Zhang static inline struct zxdh_gdma_queue *
109d373c66eSYong Zhang zxdh_gdma_get_queue(struct rte_rawdev *dev, uint16_t queue_id)
110d373c66eSYong Zhang {
111d373c66eSYong Zhang 	struct zxdh_gdma_rawdev *gdmadev = zxdh_gdma_rawdev_get_priv(dev);
112d373c66eSYong Zhang 
113d373c66eSYong Zhang 	if (queue_id >= ZXDH_GDMA_TOTAL_CHAN_NUM) {
114d373c66eSYong Zhang 		ZXDH_PMD_LOG(ERR, "queue id %d is invalid", queue_id);
115d373c66eSYong Zhang 		return NULL;
116d373c66eSYong Zhang 	}
117d373c66eSYong Zhang 
118d373c66eSYong Zhang 	return &(gdmadev->vqs[queue_id]);
119d373c66eSYong Zhang }
120d373c66eSYong Zhang 
121*81c6bacbSYong Zhang static uint32_t
122*81c6bacbSYong Zhang zxdh_gdma_read_reg(struct rte_rawdev *dev, uint16_t queue_id, uint32_t offset)
123*81c6bacbSYong Zhang {
124*81c6bacbSYong Zhang 	struct zxdh_gdma_rawdev *gdmadev = zxdh_gdma_rawdev_get_priv(dev);
125*81c6bacbSYong Zhang 	uint32_t addr = 0;
126*81c6bacbSYong Zhang 	uint32_t val = 0;
127*81c6bacbSYong Zhang 
128*81c6bacbSYong Zhang 	addr = offset + queue_id * ZXDH_GDMA_CHAN_SHIFT;
129*81c6bacbSYong Zhang 	val = *(uint32_t *)(gdmadev->base_addr + addr);
130*81c6bacbSYong Zhang 
131*81c6bacbSYong Zhang 	return val;
132*81c6bacbSYong Zhang }
133*81c6bacbSYong Zhang 
134d373c66eSYong Zhang static void
135d373c66eSYong Zhang zxdh_gdma_write_reg(struct rte_rawdev *dev, uint16_t queue_id, uint32_t offset, uint32_t val)
136d373c66eSYong Zhang {
137d373c66eSYong Zhang 	struct zxdh_gdma_rawdev *gdmadev = zxdh_gdma_rawdev_get_priv(dev);
138d373c66eSYong Zhang 	uint32_t addr = 0;
139d373c66eSYong Zhang 
140d373c66eSYong Zhang 	addr = offset + queue_id * ZXDH_GDMA_CHAN_SHIFT;
141d373c66eSYong Zhang 	*(uint32_t *)(gdmadev->base_addr + addr) = val;
142d373c66eSYong Zhang }
143d373c66eSYong Zhang 
144d373c66eSYong Zhang static int
145648001aaSYong Zhang zxdh_gdma_rawdev_info_get(struct rte_rawdev *dev,
146648001aaSYong Zhang 		__rte_unused rte_rawdev_obj_t dev_info,
147648001aaSYong Zhang 		__rte_unused size_t dev_info_size)
148648001aaSYong Zhang {
149648001aaSYong Zhang 	if (dev == NULL)
150648001aaSYong Zhang 		return -EINVAL;
151648001aaSYong Zhang 
152648001aaSYong Zhang 	return 0;
153648001aaSYong Zhang }
154648001aaSYong Zhang 
155648001aaSYong Zhang static int
156648001aaSYong Zhang zxdh_gdma_rawdev_configure(const struct rte_rawdev *dev,
157648001aaSYong Zhang 		rte_rawdev_obj_t config,
158648001aaSYong Zhang 		size_t config_size)
159648001aaSYong Zhang {
160648001aaSYong Zhang 	struct zxdh_gdma_config *gdma_config = NULL;
161648001aaSYong Zhang 
162648001aaSYong Zhang 	if ((dev == NULL) ||
163648001aaSYong Zhang 		(config == NULL) ||
164648001aaSYong Zhang 		(config_size != sizeof(struct zxdh_gdma_config)))
165648001aaSYong Zhang 		return -EINVAL;
166648001aaSYong Zhang 
167648001aaSYong Zhang 	gdma_config = (struct zxdh_gdma_config *)config;
168648001aaSYong Zhang 	if (gdma_config->max_vqs > ZXDH_GDMA_TOTAL_CHAN_NUM) {
169648001aaSYong Zhang 		ZXDH_PMD_LOG(ERR, "gdma supports up to %d queues", ZXDH_GDMA_TOTAL_CHAN_NUM);
170648001aaSYong Zhang 		return -EINVAL;
171648001aaSYong Zhang 	}
172648001aaSYong Zhang 
173648001aaSYong Zhang 	return 0;
174648001aaSYong Zhang }
175648001aaSYong Zhang 
176648001aaSYong Zhang static int
177648001aaSYong Zhang zxdh_gdma_rawdev_start(struct rte_rawdev *dev)
178648001aaSYong Zhang {
179648001aaSYong Zhang 	struct zxdh_gdma_rawdev *gdmadev = NULL;
180648001aaSYong Zhang 
181648001aaSYong Zhang 	if (dev == NULL)
182648001aaSYong Zhang 		return -EINVAL;
183648001aaSYong Zhang 
184648001aaSYong Zhang 	gdmadev = zxdh_gdma_rawdev_get_priv(dev);
185648001aaSYong Zhang 	gdmadev->device_state = ZXDH_GDMA_DEV_RUNNING;
186648001aaSYong Zhang 
187648001aaSYong Zhang 	return 0;
188648001aaSYong Zhang }
189648001aaSYong Zhang 
190648001aaSYong Zhang static void
191648001aaSYong Zhang zxdh_gdma_rawdev_stop(struct rte_rawdev *dev)
192648001aaSYong Zhang {
193648001aaSYong Zhang 	struct zxdh_gdma_rawdev *gdmadev = NULL;
194648001aaSYong Zhang 
195648001aaSYong Zhang 	if (dev == NULL)
196648001aaSYong Zhang 		return;
197648001aaSYong Zhang 
198648001aaSYong Zhang 	gdmadev = zxdh_gdma_rawdev_get_priv(dev);
199648001aaSYong Zhang 	gdmadev->device_state = ZXDH_GDMA_DEV_STOPPED;
200648001aaSYong Zhang }
201648001aaSYong Zhang 
202648001aaSYong Zhang static int
203648001aaSYong Zhang zxdh_gdma_rawdev_reset(struct rte_rawdev *dev)
204648001aaSYong Zhang {
205648001aaSYong Zhang 	if (dev == NULL)
206648001aaSYong Zhang 		return -EINVAL;
207648001aaSYong Zhang 
208648001aaSYong Zhang 	return 0;
209648001aaSYong Zhang }
210648001aaSYong Zhang 
211648001aaSYong Zhang static int
212648001aaSYong Zhang zxdh_gdma_rawdev_close(struct rte_rawdev *dev)
213648001aaSYong Zhang {
214648001aaSYong Zhang 	struct zxdh_gdma_rawdev *gdmadev = NULL;
215648001aaSYong Zhang 	struct zxdh_gdma_queue *queue = NULL;
216648001aaSYong Zhang 	uint16_t queue_id = 0;
217648001aaSYong Zhang 
218648001aaSYong Zhang 	if (dev == NULL)
219648001aaSYong Zhang 		return -EINVAL;
220648001aaSYong Zhang 
221648001aaSYong Zhang 	for (queue_id = 0; queue_id < ZXDH_GDMA_TOTAL_CHAN_NUM; queue_id++) {
222648001aaSYong Zhang 		queue = zxdh_gdma_get_queue(dev, queue_id);
223648001aaSYong Zhang 		if ((queue == NULL) || (queue->enable == 0))
224648001aaSYong Zhang 			continue;
225648001aaSYong Zhang 
226648001aaSYong Zhang 		zxdh_gdma_queue_free(dev, queue_id);
227648001aaSYong Zhang 	}
228648001aaSYong Zhang 	gdmadev = zxdh_gdma_rawdev_get_priv(dev);
229648001aaSYong Zhang 	gdmadev->device_state = ZXDH_GDMA_DEV_STOPPED;
230648001aaSYong Zhang 
231648001aaSYong Zhang 	return 0;
232648001aaSYong Zhang }
233648001aaSYong Zhang 
234648001aaSYong Zhang static int
235d373c66eSYong Zhang zxdh_gdma_rawdev_queue_setup(struct rte_rawdev *dev,
236d373c66eSYong Zhang 		uint16_t queue_id,
237d373c66eSYong Zhang 		rte_rawdev_obj_t queue_conf,
238d373c66eSYong Zhang 		size_t conf_size)
239d373c66eSYong Zhang {
240d373c66eSYong Zhang 	struct zxdh_gdma_rawdev *gdmadev = NULL;
241d373c66eSYong Zhang 	struct zxdh_gdma_queue *queue = NULL;
242d373c66eSYong Zhang 	struct zxdh_gdma_queue_config *qconfig = NULL;
243d373c66eSYong Zhang 	struct zxdh_gdma_rbp *rbp = NULL;
244d373c66eSYong Zhang 	uint16_t i = 0;
245d373c66eSYong Zhang 	uint8_t is_txq = 0;
246d373c66eSYong Zhang 	uint32_t src_user = 0;
247d373c66eSYong Zhang 	uint32_t dst_user = 0;
248d373c66eSYong Zhang 
249d373c66eSYong Zhang 	if (dev == NULL)
250d373c66eSYong Zhang 		return -EINVAL;
251d373c66eSYong Zhang 
252d373c66eSYong Zhang 	if ((queue_conf == NULL) || (conf_size != sizeof(struct zxdh_gdma_queue_config)))
253d373c66eSYong Zhang 		return -EINVAL;
254d373c66eSYong Zhang 
255d373c66eSYong Zhang 	gdmadev = zxdh_gdma_rawdev_get_priv(dev);
256d373c66eSYong Zhang 	qconfig = (struct zxdh_gdma_queue_config *)queue_conf;
257d373c66eSYong Zhang 
258d373c66eSYong Zhang 	for (i = 0; i < ZXDH_GDMA_TOTAL_CHAN_NUM; i++) {
259d373c66eSYong Zhang 		if (gdmadev->vqs[i].enable == 0)
260d373c66eSYong Zhang 			break;
261d373c66eSYong Zhang 	}
262d373c66eSYong Zhang 	if (i >= ZXDH_GDMA_TOTAL_CHAN_NUM) {
263d373c66eSYong Zhang 		ZXDH_PMD_LOG(ERR, "Failed to setup queue, no avail queues");
264d373c66eSYong Zhang 		return -1;
265d373c66eSYong Zhang 	}
266d373c66eSYong Zhang 	queue_id = i;
267d373c66eSYong Zhang 	if (zxdh_gdma_queue_init(dev, queue_id) != 0) {
268d373c66eSYong Zhang 		ZXDH_PMD_LOG(ERR, "Failed to init queue");
269d373c66eSYong Zhang 		return -1;
270d373c66eSYong Zhang 	}
271d373c66eSYong Zhang 	queue = &(gdmadev->vqs[queue_id]);
272d373c66eSYong Zhang 
273d373c66eSYong Zhang 	rbp = qconfig->rbp;
274d373c66eSYong Zhang 	if ((rbp->srbp != 0) && (rbp->drbp == 0)) {
275d373c66eSYong Zhang 		is_txq = 0;
276d373c66eSYong Zhang 		dst_user = ZXDH_GDMA_ZF_USER;
277d373c66eSYong Zhang 		src_user = ((rbp->spfid << ZXDH_GDMA_PF_NUM_SHIFT) |
278d373c66eSYong Zhang 				((rbp->sportid + ZXDH_GDMA_EPID_OFFSET) << ZXDH_GDMA_EP_ID_SHIFT));
279d373c66eSYong Zhang 
280d373c66eSYong Zhang 		if (rbp->svfid != 0)
281d373c66eSYong Zhang 			src_user |= (ZXDH_GDMA_VF_EN |
282d373c66eSYong Zhang 					((rbp->svfid - 1) << ZXDH_GDMA_VF_NUM_SHIFT));
283d373c66eSYong Zhang 
284d373c66eSYong Zhang 		ZXDH_PMD_LOG(DEBUG, "rxq->qidx:%d setup src_user(ep:%d pf:%d vf:%d) success",
285d373c66eSYong Zhang 				queue_id, (uint8_t)rbp->sportid, (uint8_t)rbp->spfid,
286d373c66eSYong Zhang 				(uint8_t)rbp->svfid);
287d373c66eSYong Zhang 	} else if ((rbp->srbp == 0) && (rbp->drbp != 0)) {
288d373c66eSYong Zhang 		is_txq = 1;
289d373c66eSYong Zhang 		src_user = ZXDH_GDMA_ZF_USER;
290d373c66eSYong Zhang 		dst_user = ((rbp->dpfid << ZXDH_GDMA_PF_NUM_SHIFT) |
291d373c66eSYong Zhang 				((rbp->dportid + ZXDH_GDMA_EPID_OFFSET) << ZXDH_GDMA_EP_ID_SHIFT));
292d373c66eSYong Zhang 
293d373c66eSYong Zhang 		if (rbp->dvfid != 0)
294d373c66eSYong Zhang 			dst_user |= (ZXDH_GDMA_VF_EN |
295d373c66eSYong Zhang 					((rbp->dvfid - 1) << ZXDH_GDMA_VF_NUM_SHIFT));
296d373c66eSYong Zhang 
297d373c66eSYong Zhang 		ZXDH_PMD_LOG(DEBUG, "txq->qidx:%d setup dst_user(ep:%d pf:%d vf:%d) success",
298d373c66eSYong Zhang 				queue_id, (uint8_t)rbp->dportid, (uint8_t)rbp->dpfid,
299d373c66eSYong Zhang 				(uint8_t)rbp->dvfid);
300d373c66eSYong Zhang 	} else {
301d373c66eSYong Zhang 		ZXDH_PMD_LOG(ERR, "Failed to setup queue, srbp/drbp is invalid");
302d373c66eSYong Zhang 		return -EINVAL;
303d373c66eSYong Zhang 	}
304d373c66eSYong Zhang 	queue->is_txq = is_txq;
305d373c66eSYong Zhang 
306d373c66eSYong Zhang 	/* setup queue user info */
307d373c66eSYong Zhang 	queue->user = (src_user & LOW16_MASK) | (dst_user << 16);
308d373c66eSYong Zhang 
309d373c66eSYong Zhang 	zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_EXT_ADDR_OFFSET, queue->user);
310d373c66eSYong Zhang 	gdmadev->used_num++;
311d373c66eSYong Zhang 
312d373c66eSYong Zhang 	return queue_id;
313d373c66eSYong Zhang }
314d373c66eSYong Zhang 
315648001aaSYong Zhang static int
316648001aaSYong Zhang zxdh_gdma_rawdev_queue_release(struct rte_rawdev *dev, uint16_t queue_id)
317648001aaSYong Zhang {
318648001aaSYong Zhang 	struct zxdh_gdma_queue *queue = NULL;
319648001aaSYong Zhang 
320648001aaSYong Zhang 	if (dev == NULL)
321648001aaSYong Zhang 		return -EINVAL;
322648001aaSYong Zhang 
323648001aaSYong Zhang 	queue = zxdh_gdma_get_queue(dev, queue_id);
324648001aaSYong Zhang 	if ((queue == NULL) || (queue->enable == 0))
325648001aaSYong Zhang 		return -EINVAL;
326648001aaSYong Zhang 
327648001aaSYong Zhang 	zxdh_gdma_queue_free(dev, queue_id);
328648001aaSYong Zhang 
329648001aaSYong Zhang 	return 0;
330648001aaSYong Zhang }
331648001aaSYong Zhang 
332648001aaSYong Zhang static int
333648001aaSYong Zhang zxdh_gdma_rawdev_get_attr(struct rte_rawdev *dev,
334648001aaSYong Zhang 				__rte_unused const char *attr_name,
335648001aaSYong Zhang 				uint64_t *attr_value)
336648001aaSYong Zhang {
337648001aaSYong Zhang 	struct zxdh_gdma_rawdev *gdmadev = NULL;
338648001aaSYong Zhang 	struct zxdh_gdma_attr *gdma_attr = NULL;
339648001aaSYong Zhang 
340648001aaSYong Zhang 	if ((dev == NULL) || (attr_value == NULL))
341648001aaSYong Zhang 		return -EINVAL;
342648001aaSYong Zhang 
343648001aaSYong Zhang 	gdmadev   = zxdh_gdma_rawdev_get_priv(dev);
344648001aaSYong Zhang 	gdma_attr = (struct zxdh_gdma_attr *)attr_value;
345648001aaSYong Zhang 	gdma_attr->num_hw_queues = gdmadev->used_num;
346648001aaSYong Zhang 
347648001aaSYong Zhang 	return 0;
348648001aaSYong Zhang }
349a73d74c2SYong Zhang 
350a73d74c2SYong Zhang static inline void
351a73d74c2SYong Zhang zxdh_gdma_control_cal(uint32_t *val, uint8_t tc_enable)
352a73d74c2SYong Zhang {
353a73d74c2SYong Zhang 	*val = (ZXDH_GDMA_CHAN_ENABLE |
354a73d74c2SYong Zhang 			ZXDH_GDMA_SOFT_CHAN |
355a73d74c2SYong Zhang 			(ZXDH_GDMA_DEFAULT_BURST_LEN << ZXDH_GDMA_SBL_SHIFT) |
356a73d74c2SYong Zhang 			(ZXDH_GDMA_BURST_SIZE_MAX << ZXDH_GDMA_SBS_SHIFT) |
357a73d74c2SYong Zhang 			(ZXDH_GDMA_BURST_SIZE_MAX << ZXDH_GDMA_DBS_SHIFT));
358a73d74c2SYong Zhang 
359a73d74c2SYong Zhang 	if (tc_enable != 0)
360a73d74c2SYong Zhang 		*val |= ZXDH_GDMA_TC_CNT_ENABLE;
361a73d74c2SYong Zhang }
362a73d74c2SYong Zhang 
363a73d74c2SYong Zhang static inline uint32_t
364a73d74c2SYong Zhang zxdh_gdma_user_get(struct zxdh_gdma_queue *queue, struct zxdh_gdma_job *job)
365a73d74c2SYong Zhang {
366a73d74c2SYong Zhang 	uint32_t src_user = 0;
367a73d74c2SYong Zhang 	uint32_t dst_user = 0;
368a73d74c2SYong Zhang 
369a73d74c2SYong Zhang 	if ((job->flags & ZXDH_GDMA_JOB_DIR_MASK) == 0) {
370a73d74c2SYong Zhang 		ZXDH_PMD_LOG(DEBUG, "job flags:0x%x default user:0x%x",
371a73d74c2SYong Zhang 				job->flags, queue->user);
372a73d74c2SYong Zhang 		return queue->user;
373a73d74c2SYong Zhang 	} else if ((job->flags & ZXDH_GDMA_JOB_DIR_TX) != 0) {
374a73d74c2SYong Zhang 		src_user = ZXDH_GDMA_ZF_USER;
375a73d74c2SYong Zhang 		dst_user = ((job->pf_id << ZXDH_GDMA_PF_NUM_SHIFT) |
376a73d74c2SYong Zhang 				((job->ep_id + ZXDH_GDMA_EPID_OFFSET) << ZXDH_GDMA_EP_ID_SHIFT));
377a73d74c2SYong Zhang 
378a73d74c2SYong Zhang 		if (job->vf_id != 0)
379a73d74c2SYong Zhang 			dst_user |= (ZXDH_GDMA_VF_EN |
380a73d74c2SYong Zhang 					((job->vf_id - 1) << ZXDH_GDMA_VF_NUM_SHIFT));
381a73d74c2SYong Zhang 	} else {
382a73d74c2SYong Zhang 		dst_user = ZXDH_GDMA_ZF_USER;
383a73d74c2SYong Zhang 		src_user = ((job->pf_id << ZXDH_GDMA_PF_NUM_SHIFT) |
384a73d74c2SYong Zhang 				((job->ep_id + ZXDH_GDMA_EPID_OFFSET) << ZXDH_GDMA_EP_ID_SHIFT));
385a73d74c2SYong Zhang 
386a73d74c2SYong Zhang 		if (job->vf_id != 0)
387a73d74c2SYong Zhang 			src_user |= (ZXDH_GDMA_VF_EN |
388a73d74c2SYong Zhang 					((job->vf_id - 1) << ZXDH_GDMA_VF_NUM_SHIFT));
389a73d74c2SYong Zhang 	}
390a73d74c2SYong Zhang 	ZXDH_PMD_LOG(DEBUG, "job flags:0x%x ep_id:%u, pf_id:%u, vf_id:%u, user:0x%x",
391a73d74c2SYong Zhang 			job->flags, job->ep_id, job->pf_id, job->vf_id,
392a73d74c2SYong Zhang 			(src_user & LOW16_MASK) | (dst_user << 16));
393a73d74c2SYong Zhang 
394a73d74c2SYong Zhang 	return (src_user & LOW16_MASK) | (dst_user << 16);
395a73d74c2SYong Zhang }
396a73d74c2SYong Zhang 
397a73d74c2SYong Zhang static inline void
398a73d74c2SYong Zhang zxdh_gdma_fill_bd(struct zxdh_gdma_queue *queue, struct zxdh_gdma_job *job)
399a73d74c2SYong Zhang {
400a73d74c2SYong Zhang 	struct zxdh_gdma_buff_desc *bd = NULL;
401a73d74c2SYong Zhang 	uint32_t val = 0;
402a73d74c2SYong Zhang 	uint64_t next_bd_addr = 0;
403a73d74c2SYong Zhang 	uint16_t avail_idx = 0;
404a73d74c2SYong Zhang 
405a73d74c2SYong Zhang 	avail_idx = queue->ring.avail_idx;
406a73d74c2SYong Zhang 	bd = &(queue->ring.desc[avail_idx]);
407a73d74c2SYong Zhang 	memset(bd, 0, sizeof(struct zxdh_gdma_buff_desc));
408a73d74c2SYong Zhang 
409a73d74c2SYong Zhang 	/* data bd */
410a73d74c2SYong Zhang 	if (job != NULL) {
411a73d74c2SYong Zhang 		zxdh_gdma_control_cal(&val, 1);
412a73d74c2SYong Zhang 		next_bd_addr   = IDX_TO_ADDR(queue->ring.ring_mem,
413a73d74c2SYong Zhang 				(avail_idx + 1) % ZXDH_GDMA_RING_SIZE, uint64_t);
414a73d74c2SYong Zhang 		bd->SrcAddr_L  = job->src & LOW32_MASK;
415a73d74c2SYong Zhang 		bd->DstAddr_L  = job->dest & LOW32_MASK;
416a73d74c2SYong Zhang 		bd->SrcAddr_H  = (job->src >> 32) & LOW32_MASK;
417a73d74c2SYong Zhang 		bd->DstAddr_H  = (job->dest >> 32) & LOW32_MASK;
418a73d74c2SYong Zhang 		bd->Xpara      = job->len;
419a73d74c2SYong Zhang 		bd->ExtAddr    = zxdh_gdma_user_get(queue, job);
420a73d74c2SYong Zhang 		bd->LLI_Addr_L = (next_bd_addr >> 6) & LOW32_MASK;
421a73d74c2SYong Zhang 		bd->LLI_Addr_H = next_bd_addr >> 38;
422a73d74c2SYong Zhang 		bd->LLI_User   = ZXDH_GDMA_ZF_USER;
423a73d74c2SYong Zhang 		bd->Control    = val;
424a73d74c2SYong Zhang 	} else {
425a73d74c2SYong Zhang 		zxdh_gdma_control_cal(&val, 0);
426a73d74c2SYong Zhang 		next_bd_addr   = IDX_TO_ADDR(queue->ring.ring_mem, avail_idx, uint64_t);
427a73d74c2SYong Zhang 		bd->ExtAddr    = queue->user;
428a73d74c2SYong Zhang 		bd->LLI_User   = ZXDH_GDMA_ZF_USER;
429a73d74c2SYong Zhang 		bd->Control    = val;
430a73d74c2SYong Zhang 		bd->LLI_Addr_L = (next_bd_addr >> 6) & LOW32_MASK;
431a73d74c2SYong Zhang 		bd->LLI_Addr_H = (next_bd_addr >> 38) | ZXDH_GDMA_LINK_END_NODE;
432a73d74c2SYong Zhang 		if (queue->flag != 0) {
433a73d74c2SYong Zhang 			bd = IDX_TO_ADDR(queue->ring.desc,
434a73d74c2SYong Zhang 					queue->ring.last_avail_idx,
435a73d74c2SYong Zhang 					struct zxdh_gdma_buff_desc*);
436a73d74c2SYong Zhang 			next_bd_addr = IDX_TO_ADDR(queue->ring.ring_mem,
437a73d74c2SYong Zhang 					(queue->ring.last_avail_idx + 1) % ZXDH_GDMA_RING_SIZE,
438a73d74c2SYong Zhang 					uint64_t);
439a73d74c2SYong Zhang 			bd->LLI_Addr_L  = (next_bd_addr >> 6) & LOW32_MASK;
440a73d74c2SYong Zhang 			bd->LLI_Addr_H  = next_bd_addr >> 38;
441a73d74c2SYong Zhang 			rte_wmb();
442a73d74c2SYong Zhang 			bd->LLI_Addr_H &= ~ZXDH_GDMA_LINK_END_NODE;
443a73d74c2SYong Zhang 		}
444a73d74c2SYong Zhang 		/* Record the index of empty bd for dynamic chaining */
445a73d74c2SYong Zhang 		queue->ring.last_avail_idx = avail_idx;
446a73d74c2SYong Zhang 	}
447a73d74c2SYong Zhang 
448a73d74c2SYong Zhang 	if (++avail_idx >= ZXDH_GDMA_RING_SIZE)
449a73d74c2SYong Zhang 		avail_idx -= ZXDH_GDMA_RING_SIZE;
450a73d74c2SYong Zhang 
451a73d74c2SYong Zhang 	queue->ring.avail_idx = avail_idx;
452a73d74c2SYong Zhang }
453a73d74c2SYong Zhang 
454a73d74c2SYong Zhang static int
455a73d74c2SYong Zhang zxdh_gdma_rawdev_enqueue_bufs(struct rte_rawdev *dev,
456a73d74c2SYong Zhang 				__rte_unused struct rte_rawdev_buf **buffers,
457a73d74c2SYong Zhang 				uint32_t count,
458a73d74c2SYong Zhang 				rte_rawdev_obj_t context)
459a73d74c2SYong Zhang {
460a73d74c2SYong Zhang 	struct zxdh_gdma_rawdev *gdmadev = NULL;
461a73d74c2SYong Zhang 	struct zxdh_gdma_queue *queue = NULL;
462a73d74c2SYong Zhang 	struct zxdh_gdma_enqdeq *e_context = NULL;
463a73d74c2SYong Zhang 	struct zxdh_gdma_job *job = NULL;
464a73d74c2SYong Zhang 	uint16_t queue_id = 0;
465a73d74c2SYong Zhang 	uint32_t val = 0;
466a73d74c2SYong Zhang 	uint16_t i = 0;
467a73d74c2SYong Zhang 	uint16_t free_cnt = 0;
468a73d74c2SYong Zhang 
469a73d74c2SYong Zhang 	if (dev == NULL)
470a73d74c2SYong Zhang 		return -EINVAL;
471a73d74c2SYong Zhang 
472a73d74c2SYong Zhang 	if (unlikely((count < 1) || (context == NULL)))
473a73d74c2SYong Zhang 		return -EINVAL;
474a73d74c2SYong Zhang 
475a73d74c2SYong Zhang 	gdmadev = zxdh_gdma_rawdev_get_priv(dev);
476a73d74c2SYong Zhang 	if (gdmadev->device_state == ZXDH_GDMA_DEV_STOPPED) {
477a73d74c2SYong Zhang 		ZXDH_PMD_LOG(ERR, "gdma dev is stop");
478a73d74c2SYong Zhang 		return 0;
479a73d74c2SYong Zhang 	}
480a73d74c2SYong Zhang 
481a73d74c2SYong Zhang 	e_context = (struct zxdh_gdma_enqdeq *)context;
482a73d74c2SYong Zhang 	queue_id = e_context->vq_id;
483a73d74c2SYong Zhang 	queue = zxdh_gdma_get_queue(dev, queue_id);
484a73d74c2SYong Zhang 	if ((queue == NULL) || (queue->enable == 0))
485a73d74c2SYong Zhang 		return -EINVAL;
486a73d74c2SYong Zhang 
487a73d74c2SYong Zhang 	free_cnt = queue->sw_ring.free_cnt;
488a73d74c2SYong Zhang 	if (free_cnt == 0) {
489a73d74c2SYong Zhang 		ZXDH_PMD_LOG(ERR, "queue %u is full, enq_idx:%u deq_idx:%u used_idx:%u",
490a73d74c2SYong Zhang 				queue_id, queue->sw_ring.enq_idx,
491a73d74c2SYong Zhang 				queue->sw_ring.deq_idx, queue->sw_ring.used_idx);
492a73d74c2SYong Zhang 		return 0;
493a73d74c2SYong Zhang 	} else if (free_cnt < count) {
494a73d74c2SYong Zhang 		ZXDH_PMD_LOG(DEBUG, "job num %u > free_cnt, change to %u", count, free_cnt);
495a73d74c2SYong Zhang 		count = free_cnt;
496a73d74c2SYong Zhang 	}
497a73d74c2SYong Zhang 
498a73d74c2SYong Zhang 	rte_spinlock_lock(&queue->enqueue_lock);
499a73d74c2SYong Zhang 
500a73d74c2SYong Zhang 	/* Build bd list, the last bd is empty bd */
501a73d74c2SYong Zhang 	for (i = 0; i < count; i++) {
502a73d74c2SYong Zhang 		job = e_context->job[i];
503a73d74c2SYong Zhang 		zxdh_gdma_fill_bd(queue, job);
504a73d74c2SYong Zhang 	}
505a73d74c2SYong Zhang 	zxdh_gdma_fill_bd(queue, NULL);
506a73d74c2SYong Zhang 
507a73d74c2SYong Zhang 	if (unlikely(queue->flag == 0)) {
508a73d74c2SYong Zhang 		zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_LLI_L_OFFSET,
509a73d74c2SYong Zhang 				(queue->ring.ring_mem >> 6) & LOW32_MASK);
510a73d74c2SYong Zhang 		zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_LLI_H_OFFSET,
511a73d74c2SYong Zhang 				queue->ring.ring_mem >> 38);
512a73d74c2SYong Zhang 		/* Start hardware handling */
513a73d74c2SYong Zhang 		zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_XFERSIZE_OFFSET, 0);
514a73d74c2SYong Zhang 		zxdh_gdma_control_cal(&val, 0);
515a73d74c2SYong Zhang 		zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_CONTROL_OFFSET, val);
516a73d74c2SYong Zhang 		queue->flag = 1;
517a73d74c2SYong Zhang 	} else {
518a73d74c2SYong Zhang 		val = ZXDH_GDMA_CHAN_CONTINUE;
519a73d74c2SYong Zhang 		zxdh_gdma_write_reg(dev, queue->vq_id, ZXDH_GDMA_CHAN_CONTINUE_OFFSET, val);
520a73d74c2SYong Zhang 	}
521a73d74c2SYong Zhang 
522a73d74c2SYong Zhang     /* job enqueue */
523a73d74c2SYong Zhang 	for (i = 0; i < count; i++) {
524a73d74c2SYong Zhang 		queue->sw_ring.job[queue->sw_ring.enq_idx] = e_context->job[i];
525a73d74c2SYong Zhang 		if (++queue->sw_ring.enq_idx >= queue->queue_size)
526a73d74c2SYong Zhang 			queue->sw_ring.enq_idx -= queue->queue_size;
527a73d74c2SYong Zhang 
528a73d74c2SYong Zhang 		free_cnt--;
529a73d74c2SYong Zhang 	}
530a73d74c2SYong Zhang 	queue->sw_ring.free_cnt = free_cnt;
531a73d74c2SYong Zhang 	queue->sw_ring.pend_cnt += count;
532a73d74c2SYong Zhang 	rte_spinlock_unlock(&queue->enqueue_lock);
533a73d74c2SYong Zhang 
534a73d74c2SYong Zhang 	return count;
535a73d74c2SYong Zhang }
536*81c6bacbSYong Zhang 
537*81c6bacbSYong Zhang static inline void
538*81c6bacbSYong Zhang zxdh_gdma_used_idx_update(struct zxdh_gdma_queue *queue, uint16_t cnt, uint8_t data_bd_err)
539*81c6bacbSYong Zhang {
540*81c6bacbSYong Zhang 	uint16_t idx = 0;
541*81c6bacbSYong Zhang 
542*81c6bacbSYong Zhang 	if (queue->sw_ring.used_idx + cnt < queue->queue_size)
543*81c6bacbSYong Zhang 		queue->sw_ring.used_idx += cnt;
544*81c6bacbSYong Zhang 	else
545*81c6bacbSYong Zhang 		queue->sw_ring.used_idx = queue->sw_ring.used_idx + cnt - queue->queue_size;
546*81c6bacbSYong Zhang 
547*81c6bacbSYong Zhang 	if (data_bd_err == 1) {
548*81c6bacbSYong Zhang 		/* Update job status, the last job status is error */
549*81c6bacbSYong Zhang 		if (queue->sw_ring.used_idx == 0)
550*81c6bacbSYong Zhang 			idx = queue->queue_size - 1;
551*81c6bacbSYong Zhang 		else
552*81c6bacbSYong Zhang 			idx = queue->sw_ring.used_idx - 1;
553*81c6bacbSYong Zhang 
554*81c6bacbSYong Zhang 		queue->sw_ring.job[idx]->status = 1;
555*81c6bacbSYong Zhang 	}
556*81c6bacbSYong Zhang }
557*81c6bacbSYong Zhang 
558*81c6bacbSYong Zhang static int
559*81c6bacbSYong Zhang zxdh_gdma_rawdev_dequeue_bufs(struct rte_rawdev *dev,
560*81c6bacbSYong Zhang 		__rte_unused struct rte_rawdev_buf **buffers,
561*81c6bacbSYong Zhang 		uint32_t count,
562*81c6bacbSYong Zhang 		rte_rawdev_obj_t context)
563*81c6bacbSYong Zhang {
564*81c6bacbSYong Zhang 	struct zxdh_gdma_queue *queue = NULL;
565*81c6bacbSYong Zhang 	struct zxdh_gdma_enqdeq *e_context = NULL;
566*81c6bacbSYong Zhang 	uint16_t queue_id = 0;
567*81c6bacbSYong Zhang 	uint32_t val = 0;
568*81c6bacbSYong Zhang 	uint16_t tc_cnt = 0;
569*81c6bacbSYong Zhang 	uint16_t diff_cnt = 0;
570*81c6bacbSYong Zhang 	uint16_t i = 0;
571*81c6bacbSYong Zhang 	uint16_t bd_idx = 0;
572*81c6bacbSYong Zhang 	uint64_t next_bd_addr = 0;
573*81c6bacbSYong Zhang 	uint8_t data_bd_err = 0;
574*81c6bacbSYong Zhang 
575*81c6bacbSYong Zhang 	if ((dev == NULL) || (context == NULL))
576*81c6bacbSYong Zhang 		return -EINVAL;
577*81c6bacbSYong Zhang 
578*81c6bacbSYong Zhang 	e_context = (struct zxdh_gdma_enqdeq *)context;
579*81c6bacbSYong Zhang 	queue_id = e_context->vq_id;
580*81c6bacbSYong Zhang 	queue = zxdh_gdma_get_queue(dev, queue_id);
581*81c6bacbSYong Zhang 	if ((queue == NULL) || (queue->enable == 0))
582*81c6bacbSYong Zhang 		return -EINVAL;
583*81c6bacbSYong Zhang 
584*81c6bacbSYong Zhang 	if (queue->sw_ring.pend_cnt == 0)
585*81c6bacbSYong Zhang 		goto deq_job;
586*81c6bacbSYong Zhang 
587*81c6bacbSYong Zhang 	/* Get data transmit count */
588*81c6bacbSYong Zhang 	val = zxdh_gdma_read_reg(dev, queue_id, ZXDH_GDMA_TC_CNT_OFFSET);
589*81c6bacbSYong Zhang 	tc_cnt = val & LOW16_MASK;
590*81c6bacbSYong Zhang 	if (tc_cnt >= queue->tc_cnt)
591*81c6bacbSYong Zhang 		diff_cnt = tc_cnt - queue->tc_cnt;
592*81c6bacbSYong Zhang 	else
593*81c6bacbSYong Zhang 		diff_cnt = tc_cnt + ZXDH_GDMA_TC_CNT_MAX - queue->tc_cnt;
594*81c6bacbSYong Zhang 
595*81c6bacbSYong Zhang 	queue->tc_cnt = tc_cnt;
596*81c6bacbSYong Zhang 
597*81c6bacbSYong Zhang 	/* Data transmit error, channel stopped */
598*81c6bacbSYong Zhang 	if ((val & ZXDH_GDMA_ERR_STATUS) != 0) {
599*81c6bacbSYong Zhang 		next_bd_addr  = zxdh_gdma_read_reg(dev, queue_id, ZXDH_GDMA_LLI_L_OFFSET);
600*81c6bacbSYong Zhang 		next_bd_addr |= ((uint64_t)zxdh_gdma_read_reg(dev, queue_id,
601*81c6bacbSYong Zhang 				ZXDH_GDMA_LLI_H_OFFSET) << 32);
602*81c6bacbSYong Zhang 		next_bd_addr  = next_bd_addr << 6;
603*81c6bacbSYong Zhang 		bd_idx = (next_bd_addr - queue->ring.ring_mem) / sizeof(struct zxdh_gdma_buff_desc);
604*81c6bacbSYong Zhang 		if ((val & ZXDH_GDMA_SRC_DATA_ERR) || (val & ZXDH_GDMA_DST_ADDR_ERR)) {
605*81c6bacbSYong Zhang 			diff_cnt++;
606*81c6bacbSYong Zhang 			data_bd_err = 1;
607*81c6bacbSYong Zhang 		}
608*81c6bacbSYong Zhang 		ZXDH_PMD_LOG(INFO, "queue%d is err(0x%x) next_bd_idx:%u ll_addr:0x%"PRIx64" def user:0x%x",
609*81c6bacbSYong Zhang 				queue_id, val, bd_idx, next_bd_addr, queue->user);
610*81c6bacbSYong Zhang 
611*81c6bacbSYong Zhang 		ZXDH_PMD_LOG(INFO, "Clean up error status");
612*81c6bacbSYong Zhang 		val = ZXDH_GDMA_ERR_STATUS | ZXDH_GDMA_ERR_INTR_ENABLE;
613*81c6bacbSYong Zhang 		zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_TC_CNT_OFFSET, val);
614*81c6bacbSYong Zhang 
615*81c6bacbSYong Zhang 		ZXDH_PMD_LOG(INFO, "Restart channel");
616*81c6bacbSYong Zhang 		zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_XFERSIZE_OFFSET, 0);
617*81c6bacbSYong Zhang 		zxdh_gdma_control_cal(&val, 0);
618*81c6bacbSYong Zhang 		zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_CONTROL_OFFSET, val);
619*81c6bacbSYong Zhang 	}
620*81c6bacbSYong Zhang 
621*81c6bacbSYong Zhang 	if (diff_cnt != 0) {
622*81c6bacbSYong Zhang 		zxdh_gdma_used_idx_update(queue, diff_cnt, data_bd_err);
623*81c6bacbSYong Zhang 		queue->sw_ring.deq_cnt += diff_cnt;
624*81c6bacbSYong Zhang 		queue->sw_ring.pend_cnt -= diff_cnt;
625*81c6bacbSYong Zhang 	}
626*81c6bacbSYong Zhang 
627*81c6bacbSYong Zhang deq_job:
628*81c6bacbSYong Zhang 	if (queue->sw_ring.deq_cnt == 0)
629*81c6bacbSYong Zhang 		return 0;
630*81c6bacbSYong Zhang 	else if (queue->sw_ring.deq_cnt < count)
631*81c6bacbSYong Zhang 		count = queue->sw_ring.deq_cnt;
632*81c6bacbSYong Zhang 
633*81c6bacbSYong Zhang 	queue->sw_ring.deq_cnt -= count;
634*81c6bacbSYong Zhang 
635*81c6bacbSYong Zhang 	for (i = 0; i < count; i++) {
636*81c6bacbSYong Zhang 		e_context->job[i] = queue->sw_ring.job[queue->sw_ring.deq_idx];
637*81c6bacbSYong Zhang 		queue->sw_ring.job[queue->sw_ring.deq_idx] = NULL;
638*81c6bacbSYong Zhang 		if (++queue->sw_ring.deq_idx >= queue->queue_size)
639*81c6bacbSYong Zhang 			queue->sw_ring.deq_idx -= queue->queue_size;
640*81c6bacbSYong Zhang 	}
641*81c6bacbSYong Zhang 	queue->sw_ring.free_cnt += count;
642*81c6bacbSYong Zhang 
643*81c6bacbSYong Zhang 	return count;
644*81c6bacbSYong Zhang }
645*81c6bacbSYong Zhang 
64630495f54SYong Zhang static const struct rte_rawdev_ops zxdh_gdma_rawdev_ops = {
647648001aaSYong Zhang 	.dev_info_get = zxdh_gdma_rawdev_info_get,
648648001aaSYong Zhang 	.dev_configure = zxdh_gdma_rawdev_configure,
649648001aaSYong Zhang 	.dev_start = zxdh_gdma_rawdev_start,
650648001aaSYong Zhang 	.dev_stop = zxdh_gdma_rawdev_stop,
651648001aaSYong Zhang 	.dev_close = zxdh_gdma_rawdev_close,
652648001aaSYong Zhang 	.dev_reset = zxdh_gdma_rawdev_reset,
653648001aaSYong Zhang 
654d373c66eSYong Zhang 	.queue_setup = zxdh_gdma_rawdev_queue_setup,
655648001aaSYong Zhang 	.queue_release = zxdh_gdma_rawdev_queue_release,
656648001aaSYong Zhang 
657648001aaSYong Zhang 	.attr_get = zxdh_gdma_rawdev_get_attr,
658a73d74c2SYong Zhang 
659a73d74c2SYong Zhang 	.enqueue_bufs = zxdh_gdma_rawdev_enqueue_bufs,
660*81c6bacbSYong Zhang 	.dequeue_bufs = zxdh_gdma_rawdev_dequeue_bufs,
66130495f54SYong Zhang };
66230495f54SYong Zhang 
66330495f54SYong Zhang static int
664d373c66eSYong Zhang zxdh_gdma_queue_init(struct rte_rawdev *dev, uint16_t queue_id)
665d373c66eSYong Zhang {
666d373c66eSYong Zhang 	char name[RTE_MEMZONE_NAMESIZE];
667d373c66eSYong Zhang 	struct zxdh_gdma_queue *queue = NULL;
668d373c66eSYong Zhang 	const struct rte_memzone *mz = NULL;
669d373c66eSYong Zhang 	uint32_t size = 0;
670d373c66eSYong Zhang 	int ret = 0;
671d373c66eSYong Zhang 
672d373c66eSYong Zhang 	queue = zxdh_gdma_get_queue(dev, queue_id);
673d373c66eSYong Zhang 	if (queue == NULL)
674d373c66eSYong Zhang 		return -EINVAL;
675d373c66eSYong Zhang 
676d373c66eSYong Zhang 	queue->enable = 1;
677d373c66eSYong Zhang 	queue->vq_id  = queue_id;
678d373c66eSYong Zhang 	queue->flag   = 0;
679d373c66eSYong Zhang 	queue->tc_cnt = 0;
680d373c66eSYong Zhang 
681d373c66eSYong Zhang 	/* Init sw_ring */
682d373c66eSYong Zhang 	queue->sw_ring.job = rte_calloc(NULL, queue->queue_size, sizeof(struct zxdh_gdma_job *), 0);
683d373c66eSYong Zhang 	if (queue->sw_ring.job == NULL) {
684d373c66eSYong Zhang 		ZXDH_PMD_LOG(ERR, "can not allocate sw_ring");
685d373c66eSYong Zhang 		ret = -ENOMEM;
686d373c66eSYong Zhang 		goto free_queue;
687d373c66eSYong Zhang 	}
688d373c66eSYong Zhang 
689d373c66eSYong Zhang 	/* Cache up to size-1 job in the ring to prevent overwriting hardware prefetching */
690d373c66eSYong Zhang 	queue->sw_ring.free_cnt = queue->queue_size - 1;
691d373c66eSYong Zhang 	queue->sw_ring.deq_cnt  = 0;
692d373c66eSYong Zhang 	queue->sw_ring.pend_cnt = 0;
693d373c66eSYong Zhang 	queue->sw_ring.enq_idx  = 0;
694d373c66eSYong Zhang 	queue->sw_ring.deq_idx  = 0;
695d373c66eSYong Zhang 	queue->sw_ring.used_idx = 0;
696d373c66eSYong Zhang 
697d373c66eSYong Zhang 	/* Init ring */
698d373c66eSYong Zhang 	snprintf(name, RTE_MEMZONE_NAMESIZE, "gdma_vq%d_ring", queue_id);
699d373c66eSYong Zhang 	size = ZXDH_GDMA_RING_SIZE * sizeof(struct zxdh_gdma_buff_desc);
700d373c66eSYong Zhang 	mz = rte_memzone_reserve_aligned(name, size, rte_socket_id(),
701d373c66eSYong Zhang 			RTE_MEMZONE_IOVA_CONTIG, size);
702d373c66eSYong Zhang 	if (mz == NULL) {
703d373c66eSYong Zhang 		if (rte_errno == EEXIST)
704d373c66eSYong Zhang 			mz = rte_memzone_lookup(name);
705d373c66eSYong Zhang 		if (mz == NULL) {
706d373c66eSYong Zhang 			ZXDH_PMD_LOG(ERR, "can not allocate ring %s", name);
707d373c66eSYong Zhang 			ret = -ENOMEM;
708d373c66eSYong Zhang 			goto free_queue;
709d373c66eSYong Zhang 		}
710d373c66eSYong Zhang 	}
711d373c66eSYong Zhang 	memset(mz->addr, 0, size);
712d373c66eSYong Zhang 	queue->ring.ring_mz   = mz;
713d373c66eSYong Zhang 	queue->ring.desc      = (struct zxdh_gdma_buff_desc *)(mz->addr);
714d373c66eSYong Zhang 	queue->ring.ring_mem  = mz->iova;
715d373c66eSYong Zhang 	queue->ring.avail_idx = 0;
716d373c66eSYong Zhang 	ZXDH_PMD_LOG(INFO, "queue%u ring phy addr:0x%"PRIx64" virt addr:%p",
717d373c66eSYong Zhang 			queue_id, mz->iova, mz->addr);
718d373c66eSYong Zhang 
719648001aaSYong Zhang 	/* Configure the hardware channel to the initial state */
720d373c66eSYong Zhang 	zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_CONTROL_OFFSET,
721d373c66eSYong Zhang 			ZXDH_GDMA_CHAN_FORCE_CLOSE);
722d373c66eSYong Zhang 	zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_TC_CNT_OFFSET,
723d373c66eSYong Zhang 			ZXDH_GDMA_ERR_INTR_ENABLE | ZXDH_GDMA_ERR_STATUS | ZXDH_GDMA_TC_CNT_CLEAN);
724d373c66eSYong Zhang 	zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_LLI_USER_OFFSET,
725d373c66eSYong Zhang 			ZXDH_GDMA_ZF_USER);
726d373c66eSYong Zhang 
727d373c66eSYong Zhang 	return 0;
728d373c66eSYong Zhang 
729d373c66eSYong Zhang free_queue:
730d373c66eSYong Zhang 	zxdh_gdma_queue_free(dev, queue_id);
731d373c66eSYong Zhang 	return ret;
732d373c66eSYong Zhang }
733d373c66eSYong Zhang 
734d373c66eSYong Zhang static int
735d373c66eSYong Zhang zxdh_gdma_queue_free(struct rte_rawdev *dev, uint16_t queue_id)
736d373c66eSYong Zhang {
737d373c66eSYong Zhang 	struct zxdh_gdma_rawdev *gdmadev = NULL;
738d373c66eSYong Zhang 	struct zxdh_gdma_queue *queue = NULL;
739d373c66eSYong Zhang 	uint32_t val = 0;
740d373c66eSYong Zhang 
741d373c66eSYong Zhang 	queue = zxdh_gdma_get_queue(dev, queue_id);
742d373c66eSYong Zhang 	if (queue == NULL)
743d373c66eSYong Zhang 		return -EINVAL;
744d373c66eSYong Zhang 
745d373c66eSYong Zhang 	gdmadev = zxdh_gdma_rawdev_get_priv(dev);
746d373c66eSYong Zhang 	gdmadev->used_num--;
747d373c66eSYong Zhang 
748d373c66eSYong Zhang 	/* disable gdma channel */
749d373c66eSYong Zhang 	val = ZXDH_GDMA_CHAN_FORCE_CLOSE;
750d373c66eSYong Zhang 	zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_CONTROL_OFFSET, val);
751d373c66eSYong Zhang 
752d373c66eSYong Zhang 	queue->enable           = 0;
753d373c66eSYong Zhang 	queue->is_txq           = 0;
754d373c66eSYong Zhang 	queue->flag             = 0;
755d373c66eSYong Zhang 	queue->user             = 0;
756d373c66eSYong Zhang 	queue->tc_cnt           = 0;
757d373c66eSYong Zhang 	queue->ring.avail_idx   = 0;
758d373c66eSYong Zhang 	queue->sw_ring.free_cnt = 0;
759d373c66eSYong Zhang 	queue->sw_ring.deq_cnt  = 0;
760d373c66eSYong Zhang 	queue->sw_ring.pend_cnt = 0;
761d373c66eSYong Zhang 	queue->sw_ring.enq_idx  = 0;
762d373c66eSYong Zhang 	queue->sw_ring.deq_idx  = 0;
763d373c66eSYong Zhang 	queue->sw_ring.used_idx = 0;
764d373c66eSYong Zhang 	rte_free(queue->sw_ring.job);
765d373c66eSYong Zhang 	rte_memzone_free(queue->ring.ring_mz);
766d373c66eSYong Zhang 
767d373c66eSYong Zhang 	return 0;
768d373c66eSYong Zhang }
769d373c66eSYong Zhang 
770d373c66eSYong Zhang static int
77130495f54SYong Zhang zxdh_gdma_map_resource(struct rte_pci_device *dev)
77230495f54SYong Zhang {
77330495f54SYong Zhang 	int fd = -1;
77430495f54SYong Zhang 	char devname[PATH_MAX];
77530495f54SYong Zhang 	void *mapaddr = NULL;
77630495f54SYong Zhang 	struct rte_pci_addr *loc;
77730495f54SYong Zhang 
77830495f54SYong Zhang 	loc = &dev->addr;
77930495f54SYong Zhang 	snprintf(devname, sizeof(devname), "%s/" PCI_PRI_FMT "/resource0",
78030495f54SYong Zhang 		rte_pci_get_sysfs_path(),
78130495f54SYong Zhang 		loc->domain, loc->bus, loc->devid,
78230495f54SYong Zhang 		loc->function);
78330495f54SYong Zhang 
78430495f54SYong Zhang 		fd = open(devname, O_RDWR);
78530495f54SYong Zhang 		if (fd < 0) {
78630495f54SYong Zhang 			ZXDH_PMD_LOG(ERR, "Cannot open %s: %s", devname, strerror(errno));
78730495f54SYong Zhang 			return -1;
78830495f54SYong Zhang 		}
78930495f54SYong Zhang 
79030495f54SYong Zhang 	/* Map the PCI memory resource of device */
79130495f54SYong Zhang 	mapaddr = rte_mem_map(NULL, (size_t)dev->mem_resource[0].len,
79230495f54SYong Zhang 			RTE_PROT_READ | RTE_PROT_WRITE,
79330495f54SYong Zhang 			RTE_MAP_SHARED, fd, 0);
79430495f54SYong Zhang 	if (mapaddr == NULL) {
79530495f54SYong Zhang 		ZXDH_PMD_LOG(ERR, "cannot map resource(%d, 0x%zx): %s (%p)",
79630495f54SYong Zhang 				fd, (size_t)dev->mem_resource[0].len,
79730495f54SYong Zhang 				rte_strerror(rte_errno), mapaddr);
79830495f54SYong Zhang 		close(fd);
79930495f54SYong Zhang 		return -1;
80030495f54SYong Zhang 	}
80130495f54SYong Zhang 
80230495f54SYong Zhang 	close(fd);
80330495f54SYong Zhang 	dev->mem_resource[0].addr = mapaddr;
80430495f54SYong Zhang 
80530495f54SYong Zhang 	return 0;
80630495f54SYong Zhang }
80730495f54SYong Zhang 
80830495f54SYong Zhang static void
80930495f54SYong Zhang zxdh_gdma_unmap_resource(void *requested_addr, size_t size)
81030495f54SYong Zhang {
81130495f54SYong Zhang 	if (requested_addr == NULL)
81230495f54SYong Zhang 		return;
81330495f54SYong Zhang 
81430495f54SYong Zhang 	/* Unmap the PCI memory resource of device */
81530495f54SYong Zhang 	if (rte_mem_unmap(requested_addr, size))
81630495f54SYong Zhang 		ZXDH_PMD_LOG(ERR, "cannot mem unmap(%p, %#zx): %s",
81730495f54SYong Zhang 				requested_addr, size, rte_strerror(rte_errno));
81830495f54SYong Zhang 	else
81930495f54SYong Zhang 		ZXDH_PMD_LOG(DEBUG, "PCI memory unmapped at %p", requested_addr);
82030495f54SYong Zhang }
82130495f54SYong Zhang 
82230495f54SYong Zhang static int
82330495f54SYong Zhang zxdh_gdma_rawdev_probe(struct rte_pci_driver *pci_drv __rte_unused,
82430495f54SYong Zhang 		struct rte_pci_device *pci_dev)
82530495f54SYong Zhang {
82630495f54SYong Zhang 	struct rte_rawdev *dev = NULL;
82730495f54SYong Zhang 	struct zxdh_gdma_rawdev *gdmadev = NULL;
82830495f54SYong Zhang 	struct zxdh_gdma_queue *queue = NULL;
82930495f54SYong Zhang 	uint8_t i = 0;
83030495f54SYong Zhang 	int ret;
83130495f54SYong Zhang 
83230495f54SYong Zhang 	if (pci_dev->mem_resource[0].phys_addr == 0) {
83330495f54SYong Zhang 		ZXDH_PMD_LOG(ERR, "PCI bar0 resource is invalid");
83430495f54SYong Zhang 		return -1;
83530495f54SYong Zhang 	}
83630495f54SYong Zhang 
83730495f54SYong Zhang 	ret = zxdh_gdma_map_resource(pci_dev);
83830495f54SYong Zhang 	if (ret != 0) {
83930495f54SYong Zhang 		ZXDH_PMD_LOG(ERR, "Failed to mmap pci device(%s)", pci_dev->name);
84030495f54SYong Zhang 		return -1;
84130495f54SYong Zhang 	}
84230495f54SYong Zhang 	ZXDH_PMD_LOG(INFO, "%s bar0 0x%"PRIx64" mapped at %p",
84330495f54SYong Zhang 			pci_dev->name, pci_dev->mem_resource[0].phys_addr,
84430495f54SYong Zhang 			pci_dev->mem_resource[0].addr);
84530495f54SYong Zhang 
84630495f54SYong Zhang 	dev = rte_rawdev_pmd_allocate(dev_name, sizeof(struct zxdh_gdma_rawdev), rte_socket_id());
84730495f54SYong Zhang 	if (dev == NULL) {
84830495f54SYong Zhang 		ZXDH_PMD_LOG(ERR, "Unable to allocate gdma rawdev");
84930495f54SYong Zhang 		goto err_out;
85030495f54SYong Zhang 	}
85130495f54SYong Zhang 	ZXDH_PMD_LOG(INFO, "Init %s on NUMA node %d, dev_id is %d",
85230495f54SYong Zhang 			dev_name, rte_socket_id(), dev->dev_id);
85330495f54SYong Zhang 
85430495f54SYong Zhang 	dev->dev_ops = &zxdh_gdma_rawdev_ops;
85530495f54SYong Zhang 	dev->device = &pci_dev->device;
85630495f54SYong Zhang 	dev->driver_name = zxdh_gdma_driver_name;
85730495f54SYong Zhang 	gdmadev = zxdh_gdma_rawdev_get_priv(dev);
85830495f54SYong Zhang 	gdmadev->device_state = ZXDH_GDMA_DEV_STOPPED;
85930495f54SYong Zhang 	gdmadev->rawdev = dev;
86030495f54SYong Zhang 	gdmadev->queue_num = ZXDH_GDMA_TOTAL_CHAN_NUM;
86130495f54SYong Zhang 	gdmadev->used_num = 0;
86230495f54SYong Zhang 	gdmadev->base_addr = (uintptr_t)pci_dev->mem_resource[0].addr + ZXDH_GDMA_BASE_OFFSET;
86330495f54SYong Zhang 
86430495f54SYong Zhang 	for (i = 0; i < ZXDH_GDMA_TOTAL_CHAN_NUM; i++) {
86530495f54SYong Zhang 		queue = &(gdmadev->vqs[i]);
86630495f54SYong Zhang 		queue->enable = 0;
86730495f54SYong Zhang 		queue->queue_size = ZXDH_GDMA_QUEUE_SIZE;
86830495f54SYong Zhang 		rte_spinlock_init(&(queue->enqueue_lock));
86930495f54SYong Zhang 	}
87030495f54SYong Zhang 
87130495f54SYong Zhang 	return 0;
87230495f54SYong Zhang 
87330495f54SYong Zhang err_out:
87430495f54SYong Zhang 	zxdh_gdma_unmap_resource(pci_dev->mem_resource[0].addr,
87530495f54SYong Zhang 			(size_t)pci_dev->mem_resource[0].len);
87630495f54SYong Zhang 	return -1;
87730495f54SYong Zhang }
87830495f54SYong Zhang 
87930495f54SYong Zhang static int
88030495f54SYong Zhang zxdh_gdma_rawdev_remove(struct rte_pci_device *pci_dev)
88130495f54SYong Zhang {
88230495f54SYong Zhang 	struct rte_rawdev *dev = NULL;
88330495f54SYong Zhang 	int ret = 0;
88430495f54SYong Zhang 
88530495f54SYong Zhang 	dev = rte_rawdev_pmd_get_named_dev(dev_name);
88630495f54SYong Zhang 	if (dev == NULL)
88730495f54SYong Zhang 		return -EINVAL;
88830495f54SYong Zhang 
88930495f54SYong Zhang 	/* rte_rawdev_close is called by pmd_release */
89030495f54SYong Zhang 	ret = rte_rawdev_pmd_release(dev);
89130495f54SYong Zhang 	if (ret != 0) {
89230495f54SYong Zhang 		ZXDH_PMD_LOG(ERR, "Device cleanup failed");
89330495f54SYong Zhang 		return -1;
89430495f54SYong Zhang 	}
89530495f54SYong Zhang 
89630495f54SYong Zhang 	zxdh_gdma_unmap_resource(pci_dev->mem_resource[0].addr,
89730495f54SYong Zhang 			(size_t)pci_dev->mem_resource[0].len);
89830495f54SYong Zhang 
89930495f54SYong Zhang 	ZXDH_PMD_LOG(DEBUG, "rawdev %s remove done!", dev_name);
90030495f54SYong Zhang 
90130495f54SYong Zhang 	return ret;
90230495f54SYong Zhang }
90330495f54SYong Zhang 
90430495f54SYong Zhang static const struct rte_pci_id zxdh_gdma_rawdev_map[] = {
90530495f54SYong Zhang 	{ RTE_PCI_DEVICE(ZXDH_GDMA_VENDORID, ZXDH_GDMA_DEVICEID) },
90630495f54SYong Zhang 	{ .vendor_id = 0, /* sentinel */ },
90730495f54SYong Zhang };
90830495f54SYong Zhang 
90930495f54SYong Zhang static struct rte_pci_driver zxdh_gdma_rawdev_pmd = {
91030495f54SYong Zhang 	.id_table = zxdh_gdma_rawdev_map,
91130495f54SYong Zhang 	.drv_flags = 0,
91230495f54SYong Zhang 	.probe = zxdh_gdma_rawdev_probe,
91330495f54SYong Zhang 	.remove = zxdh_gdma_rawdev_remove,
91430495f54SYong Zhang };
91530495f54SYong Zhang 
91630495f54SYong Zhang RTE_PMD_REGISTER_PCI(zxdh_gdma_rawdev_pci_driver, zxdh_gdma_rawdev_pmd);
91730495f54SYong Zhang RTE_PMD_REGISTER_PCI_TABLE(zxdh_gdma_rawdev_pci_driver, zxdh_gdma_rawdev_map);
91830495f54SYong Zhang RTE_LOG_REGISTER_DEFAULT(zxdh_gdma_rawdev_logtype, NOTICE);
919