xref: /dpdk/drivers/raw/gdtc/gdtc_rawdev.c (revision 81c6bacb0cdc43bc90d72e66968777653fbc4e31)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2024 ZTE Corporation
3  */
4 
5 #include <errno.h>
6 #include <fcntl.h>
7 #include <inttypes.h>
8 #include <limits.h>
9 #include <stdio.h>
10 #include <string.h>
11 #include <unistd.h>
12 
13 #include <bus_pci_driver.h>
14 #include <rte_atomic.h>
15 #include <rte_common.h>
16 #include <rte_dev.h>
17 #include <rte_eal_paging.h>
18 #include <rte_errno.h>
19 #include <rte_lcore.h>
20 #include <rte_log.h>
21 #include <rte_malloc.h>
22 #include <rte_memzone.h>
23 #include <rte_pci.h>
24 #include <rte_rawdev.h>
25 #include <rte_rawdev_pmd.h>
26 #include <rte_spinlock.h>
27 #include <rte_branch_prediction.h>
28 
29 #include "gdtc_rawdev.h"
30 
31 /*
32  * User define:
33  * ep_id-bit[15:12] vfunc_num-bit[11:4] func_num-bit[3:1] vfunc_active-bit0
34  * host ep_id:5~8   zf ep_id:9
35  */
36 #define ZXDH_GDMA_ZF_USER                       0x9000      /* ep4 pf0 */
37 #define ZXDH_GDMA_PF_NUM_SHIFT                  1
38 #define ZXDH_GDMA_VF_NUM_SHIFT                  4
39 #define ZXDH_GDMA_EP_ID_SHIFT                   12
40 #define ZXDH_GDMA_VF_EN                         1
41 #define ZXDH_GDMA_EPID_OFFSET                   5
42 
43 /* Register offset */
44 #define ZXDH_GDMA_BASE_OFFSET                   0x100000
45 #define ZXDH_GDMA_EXT_ADDR_OFFSET               0x218
46 #define ZXDH_GDMA_SAR_LOW_OFFSET                0x200
47 #define ZXDH_GDMA_DAR_LOW_OFFSET                0x204
48 #define ZXDH_GDMA_SAR_HIGH_OFFSET               0x234
49 #define ZXDH_GDMA_DAR_HIGH_OFFSET               0x238
50 #define ZXDH_GDMA_XFERSIZE_OFFSET               0x208
51 #define ZXDH_GDMA_CONTROL_OFFSET                0x230
52 #define ZXDH_GDMA_TC_STATUS_OFFSET              0x0
53 #define ZXDH_GDMA_STATUS_CLEAN_OFFSET           0x80
54 #define ZXDH_GDMA_LLI_L_OFFSET                  0x21c
55 #define ZXDH_GDMA_LLI_H_OFFSET                  0x220
56 #define ZXDH_GDMA_CHAN_CONTINUE_OFFSET          0x224
57 #define ZXDH_GDMA_TC_CNT_OFFSET                 0x23c
58 #define ZXDH_GDMA_LLI_USER_OFFSET               0x228
59 
60 /* Control register */
61 #define ZXDH_GDMA_CHAN_ENABLE                   0x1
62 #define ZXDH_GDMA_CHAN_DISABLE                  0
63 #define ZXDH_GDMA_SOFT_CHAN                     0x2
64 #define ZXDH_GDMA_TC_INTR_ENABLE                0x10
65 #define ZXDH_GDMA_ALL_INTR_ENABLE               0x30
66 #define ZXDH_GDMA_SBS_SHIFT                     6           /* src burst size */
67 #define ZXDH_GDMA_SBL_SHIFT                     9           /* src burst length */
68 #define ZXDH_GDMA_DBS_SHIFT                     13          /* dest burst size */
69 #define ZXDH_GDMA_BURST_SIZE_MIN                0x1         /* 1 byte */
70 #define ZXDH_GDMA_BURST_SIZE_MEDIUM             0x4         /* 4 word */
71 #define ZXDH_GDMA_BURST_SIZE_MAX                0x6         /* 16 word */
72 #define ZXDH_GDMA_DEFAULT_BURST_LEN             0xf         /* 16 beats */
73 #define ZXDH_GDMA_TC_CNT_ENABLE                 (1 << 27)
74 #define ZXDH_GDMA_CHAN_FORCE_CLOSE              (1 << 31)
75 
76 /* TC count & Error interrupt status register */
77 #define ZXDH_GDMA_SRC_LLI_ERR                   (1 << 16)
78 #define ZXDH_GDMA_SRC_DATA_ERR                  (1 << 17)
79 #define ZXDH_GDMA_DST_ADDR_ERR                  (1 << 18)
80 #define ZXDH_GDMA_ERR_STATUS                    (1 << 19)
81 #define ZXDH_GDMA_ERR_INTR_ENABLE               (1 << 20)
82 #define ZXDH_GDMA_TC_CNT_CLEAN                  (1)
83 
84 #define ZXDH_GDMA_CHAN_SHIFT                    0x80
85 #define ZXDH_GDMA_LINK_END_NODE                 (1 << 30)
86 #define ZXDH_GDMA_CHAN_CONTINUE                 (1)
87 
88 #define LOW32_MASK                              0xffffffff
89 #define LOW16_MASK                              0xffff
90 
91 #define ZXDH_GDMA_TC_CNT_MAX                    0x10000
92 
93 #define IDX_TO_ADDR(addr, idx, t) \
94 	((t)((uintptr_t)(addr) + (idx) * sizeof(struct zxdh_gdma_buff_desc)))
95 
96 static int zxdh_gdma_queue_init(struct rte_rawdev *dev, uint16_t queue_id);
97 static int zxdh_gdma_queue_free(struct rte_rawdev *dev, uint16_t queue_id);
98 
99 char zxdh_gdma_driver_name[] = "rawdev_zxdh_gdma";
100 char dev_name[] = "zxdh_gdma";
101 
102 static inline struct zxdh_gdma_rawdev *
103 zxdh_gdma_rawdev_get_priv(const struct rte_rawdev *rawdev)
104 {
105 	return rawdev->dev_private;
106 }
107 
108 static inline struct zxdh_gdma_queue *
109 zxdh_gdma_get_queue(struct rte_rawdev *dev, uint16_t queue_id)
110 {
111 	struct zxdh_gdma_rawdev *gdmadev = zxdh_gdma_rawdev_get_priv(dev);
112 
113 	if (queue_id >= ZXDH_GDMA_TOTAL_CHAN_NUM) {
114 		ZXDH_PMD_LOG(ERR, "queue id %d is invalid", queue_id);
115 		return NULL;
116 	}
117 
118 	return &(gdmadev->vqs[queue_id]);
119 }
120 
121 static uint32_t
122 zxdh_gdma_read_reg(struct rte_rawdev *dev, uint16_t queue_id, uint32_t offset)
123 {
124 	struct zxdh_gdma_rawdev *gdmadev = zxdh_gdma_rawdev_get_priv(dev);
125 	uint32_t addr = 0;
126 	uint32_t val = 0;
127 
128 	addr = offset + queue_id * ZXDH_GDMA_CHAN_SHIFT;
129 	val = *(uint32_t *)(gdmadev->base_addr + addr);
130 
131 	return val;
132 }
133 
134 static void
135 zxdh_gdma_write_reg(struct rte_rawdev *dev, uint16_t queue_id, uint32_t offset, uint32_t val)
136 {
137 	struct zxdh_gdma_rawdev *gdmadev = zxdh_gdma_rawdev_get_priv(dev);
138 	uint32_t addr = 0;
139 
140 	addr = offset + queue_id * ZXDH_GDMA_CHAN_SHIFT;
141 	*(uint32_t *)(gdmadev->base_addr + addr) = val;
142 }
143 
144 static int
145 zxdh_gdma_rawdev_info_get(struct rte_rawdev *dev,
146 		__rte_unused rte_rawdev_obj_t dev_info,
147 		__rte_unused size_t dev_info_size)
148 {
149 	if (dev == NULL)
150 		return -EINVAL;
151 
152 	return 0;
153 }
154 
155 static int
156 zxdh_gdma_rawdev_configure(const struct rte_rawdev *dev,
157 		rte_rawdev_obj_t config,
158 		size_t config_size)
159 {
160 	struct zxdh_gdma_config *gdma_config = NULL;
161 
162 	if ((dev == NULL) ||
163 		(config == NULL) ||
164 		(config_size != sizeof(struct zxdh_gdma_config)))
165 		return -EINVAL;
166 
167 	gdma_config = (struct zxdh_gdma_config *)config;
168 	if (gdma_config->max_vqs > ZXDH_GDMA_TOTAL_CHAN_NUM) {
169 		ZXDH_PMD_LOG(ERR, "gdma supports up to %d queues", ZXDH_GDMA_TOTAL_CHAN_NUM);
170 		return -EINVAL;
171 	}
172 
173 	return 0;
174 }
175 
176 static int
177 zxdh_gdma_rawdev_start(struct rte_rawdev *dev)
178 {
179 	struct zxdh_gdma_rawdev *gdmadev = NULL;
180 
181 	if (dev == NULL)
182 		return -EINVAL;
183 
184 	gdmadev = zxdh_gdma_rawdev_get_priv(dev);
185 	gdmadev->device_state = ZXDH_GDMA_DEV_RUNNING;
186 
187 	return 0;
188 }
189 
190 static void
191 zxdh_gdma_rawdev_stop(struct rte_rawdev *dev)
192 {
193 	struct zxdh_gdma_rawdev *gdmadev = NULL;
194 
195 	if (dev == NULL)
196 		return;
197 
198 	gdmadev = zxdh_gdma_rawdev_get_priv(dev);
199 	gdmadev->device_state = ZXDH_GDMA_DEV_STOPPED;
200 }
201 
202 static int
203 zxdh_gdma_rawdev_reset(struct rte_rawdev *dev)
204 {
205 	if (dev == NULL)
206 		return -EINVAL;
207 
208 	return 0;
209 }
210 
211 static int
212 zxdh_gdma_rawdev_close(struct rte_rawdev *dev)
213 {
214 	struct zxdh_gdma_rawdev *gdmadev = NULL;
215 	struct zxdh_gdma_queue *queue = NULL;
216 	uint16_t queue_id = 0;
217 
218 	if (dev == NULL)
219 		return -EINVAL;
220 
221 	for (queue_id = 0; queue_id < ZXDH_GDMA_TOTAL_CHAN_NUM; queue_id++) {
222 		queue = zxdh_gdma_get_queue(dev, queue_id);
223 		if ((queue == NULL) || (queue->enable == 0))
224 			continue;
225 
226 		zxdh_gdma_queue_free(dev, queue_id);
227 	}
228 	gdmadev = zxdh_gdma_rawdev_get_priv(dev);
229 	gdmadev->device_state = ZXDH_GDMA_DEV_STOPPED;
230 
231 	return 0;
232 }
233 
234 static int
235 zxdh_gdma_rawdev_queue_setup(struct rte_rawdev *dev,
236 		uint16_t queue_id,
237 		rte_rawdev_obj_t queue_conf,
238 		size_t conf_size)
239 {
240 	struct zxdh_gdma_rawdev *gdmadev = NULL;
241 	struct zxdh_gdma_queue *queue = NULL;
242 	struct zxdh_gdma_queue_config *qconfig = NULL;
243 	struct zxdh_gdma_rbp *rbp = NULL;
244 	uint16_t i = 0;
245 	uint8_t is_txq = 0;
246 	uint32_t src_user = 0;
247 	uint32_t dst_user = 0;
248 
249 	if (dev == NULL)
250 		return -EINVAL;
251 
252 	if ((queue_conf == NULL) || (conf_size != sizeof(struct zxdh_gdma_queue_config)))
253 		return -EINVAL;
254 
255 	gdmadev = zxdh_gdma_rawdev_get_priv(dev);
256 	qconfig = (struct zxdh_gdma_queue_config *)queue_conf;
257 
258 	for (i = 0; i < ZXDH_GDMA_TOTAL_CHAN_NUM; i++) {
259 		if (gdmadev->vqs[i].enable == 0)
260 			break;
261 	}
262 	if (i >= ZXDH_GDMA_TOTAL_CHAN_NUM) {
263 		ZXDH_PMD_LOG(ERR, "Failed to setup queue, no avail queues");
264 		return -1;
265 	}
266 	queue_id = i;
267 	if (zxdh_gdma_queue_init(dev, queue_id) != 0) {
268 		ZXDH_PMD_LOG(ERR, "Failed to init queue");
269 		return -1;
270 	}
271 	queue = &(gdmadev->vqs[queue_id]);
272 
273 	rbp = qconfig->rbp;
274 	if ((rbp->srbp != 0) && (rbp->drbp == 0)) {
275 		is_txq = 0;
276 		dst_user = ZXDH_GDMA_ZF_USER;
277 		src_user = ((rbp->spfid << ZXDH_GDMA_PF_NUM_SHIFT) |
278 				((rbp->sportid + ZXDH_GDMA_EPID_OFFSET) << ZXDH_GDMA_EP_ID_SHIFT));
279 
280 		if (rbp->svfid != 0)
281 			src_user |= (ZXDH_GDMA_VF_EN |
282 					((rbp->svfid - 1) << ZXDH_GDMA_VF_NUM_SHIFT));
283 
284 		ZXDH_PMD_LOG(DEBUG, "rxq->qidx:%d setup src_user(ep:%d pf:%d vf:%d) success",
285 				queue_id, (uint8_t)rbp->sportid, (uint8_t)rbp->spfid,
286 				(uint8_t)rbp->svfid);
287 	} else if ((rbp->srbp == 0) && (rbp->drbp != 0)) {
288 		is_txq = 1;
289 		src_user = ZXDH_GDMA_ZF_USER;
290 		dst_user = ((rbp->dpfid << ZXDH_GDMA_PF_NUM_SHIFT) |
291 				((rbp->dportid + ZXDH_GDMA_EPID_OFFSET) << ZXDH_GDMA_EP_ID_SHIFT));
292 
293 		if (rbp->dvfid != 0)
294 			dst_user |= (ZXDH_GDMA_VF_EN |
295 					((rbp->dvfid - 1) << ZXDH_GDMA_VF_NUM_SHIFT));
296 
297 		ZXDH_PMD_LOG(DEBUG, "txq->qidx:%d setup dst_user(ep:%d pf:%d vf:%d) success",
298 				queue_id, (uint8_t)rbp->dportid, (uint8_t)rbp->dpfid,
299 				(uint8_t)rbp->dvfid);
300 	} else {
301 		ZXDH_PMD_LOG(ERR, "Failed to setup queue, srbp/drbp is invalid");
302 		return -EINVAL;
303 	}
304 	queue->is_txq = is_txq;
305 
306 	/* setup queue user info */
307 	queue->user = (src_user & LOW16_MASK) | (dst_user << 16);
308 
309 	zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_EXT_ADDR_OFFSET, queue->user);
310 	gdmadev->used_num++;
311 
312 	return queue_id;
313 }
314 
315 static int
316 zxdh_gdma_rawdev_queue_release(struct rte_rawdev *dev, uint16_t queue_id)
317 {
318 	struct zxdh_gdma_queue *queue = NULL;
319 
320 	if (dev == NULL)
321 		return -EINVAL;
322 
323 	queue = zxdh_gdma_get_queue(dev, queue_id);
324 	if ((queue == NULL) || (queue->enable == 0))
325 		return -EINVAL;
326 
327 	zxdh_gdma_queue_free(dev, queue_id);
328 
329 	return 0;
330 }
331 
332 static int
333 zxdh_gdma_rawdev_get_attr(struct rte_rawdev *dev,
334 				__rte_unused const char *attr_name,
335 				uint64_t *attr_value)
336 {
337 	struct zxdh_gdma_rawdev *gdmadev = NULL;
338 	struct zxdh_gdma_attr *gdma_attr = NULL;
339 
340 	if ((dev == NULL) || (attr_value == NULL))
341 		return -EINVAL;
342 
343 	gdmadev   = zxdh_gdma_rawdev_get_priv(dev);
344 	gdma_attr = (struct zxdh_gdma_attr *)attr_value;
345 	gdma_attr->num_hw_queues = gdmadev->used_num;
346 
347 	return 0;
348 }
349 
350 static inline void
351 zxdh_gdma_control_cal(uint32_t *val, uint8_t tc_enable)
352 {
353 	*val = (ZXDH_GDMA_CHAN_ENABLE |
354 			ZXDH_GDMA_SOFT_CHAN |
355 			(ZXDH_GDMA_DEFAULT_BURST_LEN << ZXDH_GDMA_SBL_SHIFT) |
356 			(ZXDH_GDMA_BURST_SIZE_MAX << ZXDH_GDMA_SBS_SHIFT) |
357 			(ZXDH_GDMA_BURST_SIZE_MAX << ZXDH_GDMA_DBS_SHIFT));
358 
359 	if (tc_enable != 0)
360 		*val |= ZXDH_GDMA_TC_CNT_ENABLE;
361 }
362 
363 static inline uint32_t
364 zxdh_gdma_user_get(struct zxdh_gdma_queue *queue, struct zxdh_gdma_job *job)
365 {
366 	uint32_t src_user = 0;
367 	uint32_t dst_user = 0;
368 
369 	if ((job->flags & ZXDH_GDMA_JOB_DIR_MASK) == 0) {
370 		ZXDH_PMD_LOG(DEBUG, "job flags:0x%x default user:0x%x",
371 				job->flags, queue->user);
372 		return queue->user;
373 	} else if ((job->flags & ZXDH_GDMA_JOB_DIR_TX) != 0) {
374 		src_user = ZXDH_GDMA_ZF_USER;
375 		dst_user = ((job->pf_id << ZXDH_GDMA_PF_NUM_SHIFT) |
376 				((job->ep_id + ZXDH_GDMA_EPID_OFFSET) << ZXDH_GDMA_EP_ID_SHIFT));
377 
378 		if (job->vf_id != 0)
379 			dst_user |= (ZXDH_GDMA_VF_EN |
380 					((job->vf_id - 1) << ZXDH_GDMA_VF_NUM_SHIFT));
381 	} else {
382 		dst_user = ZXDH_GDMA_ZF_USER;
383 		src_user = ((job->pf_id << ZXDH_GDMA_PF_NUM_SHIFT) |
384 				((job->ep_id + ZXDH_GDMA_EPID_OFFSET) << ZXDH_GDMA_EP_ID_SHIFT));
385 
386 		if (job->vf_id != 0)
387 			src_user |= (ZXDH_GDMA_VF_EN |
388 					((job->vf_id - 1) << ZXDH_GDMA_VF_NUM_SHIFT));
389 	}
390 	ZXDH_PMD_LOG(DEBUG, "job flags:0x%x ep_id:%u, pf_id:%u, vf_id:%u, user:0x%x",
391 			job->flags, job->ep_id, job->pf_id, job->vf_id,
392 			(src_user & LOW16_MASK) | (dst_user << 16));
393 
394 	return (src_user & LOW16_MASK) | (dst_user << 16);
395 }
396 
397 static inline void
398 zxdh_gdma_fill_bd(struct zxdh_gdma_queue *queue, struct zxdh_gdma_job *job)
399 {
400 	struct zxdh_gdma_buff_desc *bd = NULL;
401 	uint32_t val = 0;
402 	uint64_t next_bd_addr = 0;
403 	uint16_t avail_idx = 0;
404 
405 	avail_idx = queue->ring.avail_idx;
406 	bd = &(queue->ring.desc[avail_idx]);
407 	memset(bd, 0, sizeof(struct zxdh_gdma_buff_desc));
408 
409 	/* data bd */
410 	if (job != NULL) {
411 		zxdh_gdma_control_cal(&val, 1);
412 		next_bd_addr   = IDX_TO_ADDR(queue->ring.ring_mem,
413 				(avail_idx + 1) % ZXDH_GDMA_RING_SIZE, uint64_t);
414 		bd->SrcAddr_L  = job->src & LOW32_MASK;
415 		bd->DstAddr_L  = job->dest & LOW32_MASK;
416 		bd->SrcAddr_H  = (job->src >> 32) & LOW32_MASK;
417 		bd->DstAddr_H  = (job->dest >> 32) & LOW32_MASK;
418 		bd->Xpara      = job->len;
419 		bd->ExtAddr    = zxdh_gdma_user_get(queue, job);
420 		bd->LLI_Addr_L = (next_bd_addr >> 6) & LOW32_MASK;
421 		bd->LLI_Addr_H = next_bd_addr >> 38;
422 		bd->LLI_User   = ZXDH_GDMA_ZF_USER;
423 		bd->Control    = val;
424 	} else {
425 		zxdh_gdma_control_cal(&val, 0);
426 		next_bd_addr   = IDX_TO_ADDR(queue->ring.ring_mem, avail_idx, uint64_t);
427 		bd->ExtAddr    = queue->user;
428 		bd->LLI_User   = ZXDH_GDMA_ZF_USER;
429 		bd->Control    = val;
430 		bd->LLI_Addr_L = (next_bd_addr >> 6) & LOW32_MASK;
431 		bd->LLI_Addr_H = (next_bd_addr >> 38) | ZXDH_GDMA_LINK_END_NODE;
432 		if (queue->flag != 0) {
433 			bd = IDX_TO_ADDR(queue->ring.desc,
434 					queue->ring.last_avail_idx,
435 					struct zxdh_gdma_buff_desc*);
436 			next_bd_addr = IDX_TO_ADDR(queue->ring.ring_mem,
437 					(queue->ring.last_avail_idx + 1) % ZXDH_GDMA_RING_SIZE,
438 					uint64_t);
439 			bd->LLI_Addr_L  = (next_bd_addr >> 6) & LOW32_MASK;
440 			bd->LLI_Addr_H  = next_bd_addr >> 38;
441 			rte_wmb();
442 			bd->LLI_Addr_H &= ~ZXDH_GDMA_LINK_END_NODE;
443 		}
444 		/* Record the index of empty bd for dynamic chaining */
445 		queue->ring.last_avail_idx = avail_idx;
446 	}
447 
448 	if (++avail_idx >= ZXDH_GDMA_RING_SIZE)
449 		avail_idx -= ZXDH_GDMA_RING_SIZE;
450 
451 	queue->ring.avail_idx = avail_idx;
452 }
453 
454 static int
455 zxdh_gdma_rawdev_enqueue_bufs(struct rte_rawdev *dev,
456 				__rte_unused struct rte_rawdev_buf **buffers,
457 				uint32_t count,
458 				rte_rawdev_obj_t context)
459 {
460 	struct zxdh_gdma_rawdev *gdmadev = NULL;
461 	struct zxdh_gdma_queue *queue = NULL;
462 	struct zxdh_gdma_enqdeq *e_context = NULL;
463 	struct zxdh_gdma_job *job = NULL;
464 	uint16_t queue_id = 0;
465 	uint32_t val = 0;
466 	uint16_t i = 0;
467 	uint16_t free_cnt = 0;
468 
469 	if (dev == NULL)
470 		return -EINVAL;
471 
472 	if (unlikely((count < 1) || (context == NULL)))
473 		return -EINVAL;
474 
475 	gdmadev = zxdh_gdma_rawdev_get_priv(dev);
476 	if (gdmadev->device_state == ZXDH_GDMA_DEV_STOPPED) {
477 		ZXDH_PMD_LOG(ERR, "gdma dev is stop");
478 		return 0;
479 	}
480 
481 	e_context = (struct zxdh_gdma_enqdeq *)context;
482 	queue_id = e_context->vq_id;
483 	queue = zxdh_gdma_get_queue(dev, queue_id);
484 	if ((queue == NULL) || (queue->enable == 0))
485 		return -EINVAL;
486 
487 	free_cnt = queue->sw_ring.free_cnt;
488 	if (free_cnt == 0) {
489 		ZXDH_PMD_LOG(ERR, "queue %u is full, enq_idx:%u deq_idx:%u used_idx:%u",
490 				queue_id, queue->sw_ring.enq_idx,
491 				queue->sw_ring.deq_idx, queue->sw_ring.used_idx);
492 		return 0;
493 	} else if (free_cnt < count) {
494 		ZXDH_PMD_LOG(DEBUG, "job num %u > free_cnt, change to %u", count, free_cnt);
495 		count = free_cnt;
496 	}
497 
498 	rte_spinlock_lock(&queue->enqueue_lock);
499 
500 	/* Build bd list, the last bd is empty bd */
501 	for (i = 0; i < count; i++) {
502 		job = e_context->job[i];
503 		zxdh_gdma_fill_bd(queue, job);
504 	}
505 	zxdh_gdma_fill_bd(queue, NULL);
506 
507 	if (unlikely(queue->flag == 0)) {
508 		zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_LLI_L_OFFSET,
509 				(queue->ring.ring_mem >> 6) & LOW32_MASK);
510 		zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_LLI_H_OFFSET,
511 				queue->ring.ring_mem >> 38);
512 		/* Start hardware handling */
513 		zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_XFERSIZE_OFFSET, 0);
514 		zxdh_gdma_control_cal(&val, 0);
515 		zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_CONTROL_OFFSET, val);
516 		queue->flag = 1;
517 	} else {
518 		val = ZXDH_GDMA_CHAN_CONTINUE;
519 		zxdh_gdma_write_reg(dev, queue->vq_id, ZXDH_GDMA_CHAN_CONTINUE_OFFSET, val);
520 	}
521 
522     /* job enqueue */
523 	for (i = 0; i < count; i++) {
524 		queue->sw_ring.job[queue->sw_ring.enq_idx] = e_context->job[i];
525 		if (++queue->sw_ring.enq_idx >= queue->queue_size)
526 			queue->sw_ring.enq_idx -= queue->queue_size;
527 
528 		free_cnt--;
529 	}
530 	queue->sw_ring.free_cnt = free_cnt;
531 	queue->sw_ring.pend_cnt += count;
532 	rte_spinlock_unlock(&queue->enqueue_lock);
533 
534 	return count;
535 }
536 
537 static inline void
538 zxdh_gdma_used_idx_update(struct zxdh_gdma_queue *queue, uint16_t cnt, uint8_t data_bd_err)
539 {
540 	uint16_t idx = 0;
541 
542 	if (queue->sw_ring.used_idx + cnt < queue->queue_size)
543 		queue->sw_ring.used_idx += cnt;
544 	else
545 		queue->sw_ring.used_idx = queue->sw_ring.used_idx + cnt - queue->queue_size;
546 
547 	if (data_bd_err == 1) {
548 		/* Update job status, the last job status is error */
549 		if (queue->sw_ring.used_idx == 0)
550 			idx = queue->queue_size - 1;
551 		else
552 			idx = queue->sw_ring.used_idx - 1;
553 
554 		queue->sw_ring.job[idx]->status = 1;
555 	}
556 }
557 
558 static int
559 zxdh_gdma_rawdev_dequeue_bufs(struct rte_rawdev *dev,
560 		__rte_unused struct rte_rawdev_buf **buffers,
561 		uint32_t count,
562 		rte_rawdev_obj_t context)
563 {
564 	struct zxdh_gdma_queue *queue = NULL;
565 	struct zxdh_gdma_enqdeq *e_context = NULL;
566 	uint16_t queue_id = 0;
567 	uint32_t val = 0;
568 	uint16_t tc_cnt = 0;
569 	uint16_t diff_cnt = 0;
570 	uint16_t i = 0;
571 	uint16_t bd_idx = 0;
572 	uint64_t next_bd_addr = 0;
573 	uint8_t data_bd_err = 0;
574 
575 	if ((dev == NULL) || (context == NULL))
576 		return -EINVAL;
577 
578 	e_context = (struct zxdh_gdma_enqdeq *)context;
579 	queue_id = e_context->vq_id;
580 	queue = zxdh_gdma_get_queue(dev, queue_id);
581 	if ((queue == NULL) || (queue->enable == 0))
582 		return -EINVAL;
583 
584 	if (queue->sw_ring.pend_cnt == 0)
585 		goto deq_job;
586 
587 	/* Get data transmit count */
588 	val = zxdh_gdma_read_reg(dev, queue_id, ZXDH_GDMA_TC_CNT_OFFSET);
589 	tc_cnt = val & LOW16_MASK;
590 	if (tc_cnt >= queue->tc_cnt)
591 		diff_cnt = tc_cnt - queue->tc_cnt;
592 	else
593 		diff_cnt = tc_cnt + ZXDH_GDMA_TC_CNT_MAX - queue->tc_cnt;
594 
595 	queue->tc_cnt = tc_cnt;
596 
597 	/* Data transmit error, channel stopped */
598 	if ((val & ZXDH_GDMA_ERR_STATUS) != 0) {
599 		next_bd_addr  = zxdh_gdma_read_reg(dev, queue_id, ZXDH_GDMA_LLI_L_OFFSET);
600 		next_bd_addr |= ((uint64_t)zxdh_gdma_read_reg(dev, queue_id,
601 				ZXDH_GDMA_LLI_H_OFFSET) << 32);
602 		next_bd_addr  = next_bd_addr << 6;
603 		bd_idx = (next_bd_addr - queue->ring.ring_mem) / sizeof(struct zxdh_gdma_buff_desc);
604 		if ((val & ZXDH_GDMA_SRC_DATA_ERR) || (val & ZXDH_GDMA_DST_ADDR_ERR)) {
605 			diff_cnt++;
606 			data_bd_err = 1;
607 		}
608 		ZXDH_PMD_LOG(INFO, "queue%d is err(0x%x) next_bd_idx:%u ll_addr:0x%"PRIx64" def user:0x%x",
609 				queue_id, val, bd_idx, next_bd_addr, queue->user);
610 
611 		ZXDH_PMD_LOG(INFO, "Clean up error status");
612 		val = ZXDH_GDMA_ERR_STATUS | ZXDH_GDMA_ERR_INTR_ENABLE;
613 		zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_TC_CNT_OFFSET, val);
614 
615 		ZXDH_PMD_LOG(INFO, "Restart channel");
616 		zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_XFERSIZE_OFFSET, 0);
617 		zxdh_gdma_control_cal(&val, 0);
618 		zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_CONTROL_OFFSET, val);
619 	}
620 
621 	if (diff_cnt != 0) {
622 		zxdh_gdma_used_idx_update(queue, diff_cnt, data_bd_err);
623 		queue->sw_ring.deq_cnt += diff_cnt;
624 		queue->sw_ring.pend_cnt -= diff_cnt;
625 	}
626 
627 deq_job:
628 	if (queue->sw_ring.deq_cnt == 0)
629 		return 0;
630 	else if (queue->sw_ring.deq_cnt < count)
631 		count = queue->sw_ring.deq_cnt;
632 
633 	queue->sw_ring.deq_cnt -= count;
634 
635 	for (i = 0; i < count; i++) {
636 		e_context->job[i] = queue->sw_ring.job[queue->sw_ring.deq_idx];
637 		queue->sw_ring.job[queue->sw_ring.deq_idx] = NULL;
638 		if (++queue->sw_ring.deq_idx >= queue->queue_size)
639 			queue->sw_ring.deq_idx -= queue->queue_size;
640 	}
641 	queue->sw_ring.free_cnt += count;
642 
643 	return count;
644 }
645 
646 static const struct rte_rawdev_ops zxdh_gdma_rawdev_ops = {
647 	.dev_info_get = zxdh_gdma_rawdev_info_get,
648 	.dev_configure = zxdh_gdma_rawdev_configure,
649 	.dev_start = zxdh_gdma_rawdev_start,
650 	.dev_stop = zxdh_gdma_rawdev_stop,
651 	.dev_close = zxdh_gdma_rawdev_close,
652 	.dev_reset = zxdh_gdma_rawdev_reset,
653 
654 	.queue_setup = zxdh_gdma_rawdev_queue_setup,
655 	.queue_release = zxdh_gdma_rawdev_queue_release,
656 
657 	.attr_get = zxdh_gdma_rawdev_get_attr,
658 
659 	.enqueue_bufs = zxdh_gdma_rawdev_enqueue_bufs,
660 	.dequeue_bufs = zxdh_gdma_rawdev_dequeue_bufs,
661 };
662 
663 static int
664 zxdh_gdma_queue_init(struct rte_rawdev *dev, uint16_t queue_id)
665 {
666 	char name[RTE_MEMZONE_NAMESIZE];
667 	struct zxdh_gdma_queue *queue = NULL;
668 	const struct rte_memzone *mz = NULL;
669 	uint32_t size = 0;
670 	int ret = 0;
671 
672 	queue = zxdh_gdma_get_queue(dev, queue_id);
673 	if (queue == NULL)
674 		return -EINVAL;
675 
676 	queue->enable = 1;
677 	queue->vq_id  = queue_id;
678 	queue->flag   = 0;
679 	queue->tc_cnt = 0;
680 
681 	/* Init sw_ring */
682 	queue->sw_ring.job = rte_calloc(NULL, queue->queue_size, sizeof(struct zxdh_gdma_job *), 0);
683 	if (queue->sw_ring.job == NULL) {
684 		ZXDH_PMD_LOG(ERR, "can not allocate sw_ring");
685 		ret = -ENOMEM;
686 		goto free_queue;
687 	}
688 
689 	/* Cache up to size-1 job in the ring to prevent overwriting hardware prefetching */
690 	queue->sw_ring.free_cnt = queue->queue_size - 1;
691 	queue->sw_ring.deq_cnt  = 0;
692 	queue->sw_ring.pend_cnt = 0;
693 	queue->sw_ring.enq_idx  = 0;
694 	queue->sw_ring.deq_idx  = 0;
695 	queue->sw_ring.used_idx = 0;
696 
697 	/* Init ring */
698 	snprintf(name, RTE_MEMZONE_NAMESIZE, "gdma_vq%d_ring", queue_id);
699 	size = ZXDH_GDMA_RING_SIZE * sizeof(struct zxdh_gdma_buff_desc);
700 	mz = rte_memzone_reserve_aligned(name, size, rte_socket_id(),
701 			RTE_MEMZONE_IOVA_CONTIG, size);
702 	if (mz == NULL) {
703 		if (rte_errno == EEXIST)
704 			mz = rte_memzone_lookup(name);
705 		if (mz == NULL) {
706 			ZXDH_PMD_LOG(ERR, "can not allocate ring %s", name);
707 			ret = -ENOMEM;
708 			goto free_queue;
709 		}
710 	}
711 	memset(mz->addr, 0, size);
712 	queue->ring.ring_mz   = mz;
713 	queue->ring.desc      = (struct zxdh_gdma_buff_desc *)(mz->addr);
714 	queue->ring.ring_mem  = mz->iova;
715 	queue->ring.avail_idx = 0;
716 	ZXDH_PMD_LOG(INFO, "queue%u ring phy addr:0x%"PRIx64" virt addr:%p",
717 			queue_id, mz->iova, mz->addr);
718 
719 	/* Configure the hardware channel to the initial state */
720 	zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_CONTROL_OFFSET,
721 			ZXDH_GDMA_CHAN_FORCE_CLOSE);
722 	zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_TC_CNT_OFFSET,
723 			ZXDH_GDMA_ERR_INTR_ENABLE | ZXDH_GDMA_ERR_STATUS | ZXDH_GDMA_TC_CNT_CLEAN);
724 	zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_LLI_USER_OFFSET,
725 			ZXDH_GDMA_ZF_USER);
726 
727 	return 0;
728 
729 free_queue:
730 	zxdh_gdma_queue_free(dev, queue_id);
731 	return ret;
732 }
733 
734 static int
735 zxdh_gdma_queue_free(struct rte_rawdev *dev, uint16_t queue_id)
736 {
737 	struct zxdh_gdma_rawdev *gdmadev = NULL;
738 	struct zxdh_gdma_queue *queue = NULL;
739 	uint32_t val = 0;
740 
741 	queue = zxdh_gdma_get_queue(dev, queue_id);
742 	if (queue == NULL)
743 		return -EINVAL;
744 
745 	gdmadev = zxdh_gdma_rawdev_get_priv(dev);
746 	gdmadev->used_num--;
747 
748 	/* disable gdma channel */
749 	val = ZXDH_GDMA_CHAN_FORCE_CLOSE;
750 	zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_CONTROL_OFFSET, val);
751 
752 	queue->enable           = 0;
753 	queue->is_txq           = 0;
754 	queue->flag             = 0;
755 	queue->user             = 0;
756 	queue->tc_cnt           = 0;
757 	queue->ring.avail_idx   = 0;
758 	queue->sw_ring.free_cnt = 0;
759 	queue->sw_ring.deq_cnt  = 0;
760 	queue->sw_ring.pend_cnt = 0;
761 	queue->sw_ring.enq_idx  = 0;
762 	queue->sw_ring.deq_idx  = 0;
763 	queue->sw_ring.used_idx = 0;
764 	rte_free(queue->sw_ring.job);
765 	rte_memzone_free(queue->ring.ring_mz);
766 
767 	return 0;
768 }
769 
770 static int
771 zxdh_gdma_map_resource(struct rte_pci_device *dev)
772 {
773 	int fd = -1;
774 	char devname[PATH_MAX];
775 	void *mapaddr = NULL;
776 	struct rte_pci_addr *loc;
777 
778 	loc = &dev->addr;
779 	snprintf(devname, sizeof(devname), "%s/" PCI_PRI_FMT "/resource0",
780 		rte_pci_get_sysfs_path(),
781 		loc->domain, loc->bus, loc->devid,
782 		loc->function);
783 
784 		fd = open(devname, O_RDWR);
785 		if (fd < 0) {
786 			ZXDH_PMD_LOG(ERR, "Cannot open %s: %s", devname, strerror(errno));
787 			return -1;
788 		}
789 
790 	/* Map the PCI memory resource of device */
791 	mapaddr = rte_mem_map(NULL, (size_t)dev->mem_resource[0].len,
792 			RTE_PROT_READ | RTE_PROT_WRITE,
793 			RTE_MAP_SHARED, fd, 0);
794 	if (mapaddr == NULL) {
795 		ZXDH_PMD_LOG(ERR, "cannot map resource(%d, 0x%zx): %s (%p)",
796 				fd, (size_t)dev->mem_resource[0].len,
797 				rte_strerror(rte_errno), mapaddr);
798 		close(fd);
799 		return -1;
800 	}
801 
802 	close(fd);
803 	dev->mem_resource[0].addr = mapaddr;
804 
805 	return 0;
806 }
807 
808 static void
809 zxdh_gdma_unmap_resource(void *requested_addr, size_t size)
810 {
811 	if (requested_addr == NULL)
812 		return;
813 
814 	/* Unmap the PCI memory resource of device */
815 	if (rte_mem_unmap(requested_addr, size))
816 		ZXDH_PMD_LOG(ERR, "cannot mem unmap(%p, %#zx): %s",
817 				requested_addr, size, rte_strerror(rte_errno));
818 	else
819 		ZXDH_PMD_LOG(DEBUG, "PCI memory unmapped at %p", requested_addr);
820 }
821 
822 static int
823 zxdh_gdma_rawdev_probe(struct rte_pci_driver *pci_drv __rte_unused,
824 		struct rte_pci_device *pci_dev)
825 {
826 	struct rte_rawdev *dev = NULL;
827 	struct zxdh_gdma_rawdev *gdmadev = NULL;
828 	struct zxdh_gdma_queue *queue = NULL;
829 	uint8_t i = 0;
830 	int ret;
831 
832 	if (pci_dev->mem_resource[0].phys_addr == 0) {
833 		ZXDH_PMD_LOG(ERR, "PCI bar0 resource is invalid");
834 		return -1;
835 	}
836 
837 	ret = zxdh_gdma_map_resource(pci_dev);
838 	if (ret != 0) {
839 		ZXDH_PMD_LOG(ERR, "Failed to mmap pci device(%s)", pci_dev->name);
840 		return -1;
841 	}
842 	ZXDH_PMD_LOG(INFO, "%s bar0 0x%"PRIx64" mapped at %p",
843 			pci_dev->name, pci_dev->mem_resource[0].phys_addr,
844 			pci_dev->mem_resource[0].addr);
845 
846 	dev = rte_rawdev_pmd_allocate(dev_name, sizeof(struct zxdh_gdma_rawdev), rte_socket_id());
847 	if (dev == NULL) {
848 		ZXDH_PMD_LOG(ERR, "Unable to allocate gdma rawdev");
849 		goto err_out;
850 	}
851 	ZXDH_PMD_LOG(INFO, "Init %s on NUMA node %d, dev_id is %d",
852 			dev_name, rte_socket_id(), dev->dev_id);
853 
854 	dev->dev_ops = &zxdh_gdma_rawdev_ops;
855 	dev->device = &pci_dev->device;
856 	dev->driver_name = zxdh_gdma_driver_name;
857 	gdmadev = zxdh_gdma_rawdev_get_priv(dev);
858 	gdmadev->device_state = ZXDH_GDMA_DEV_STOPPED;
859 	gdmadev->rawdev = dev;
860 	gdmadev->queue_num = ZXDH_GDMA_TOTAL_CHAN_NUM;
861 	gdmadev->used_num = 0;
862 	gdmadev->base_addr = (uintptr_t)pci_dev->mem_resource[0].addr + ZXDH_GDMA_BASE_OFFSET;
863 
864 	for (i = 0; i < ZXDH_GDMA_TOTAL_CHAN_NUM; i++) {
865 		queue = &(gdmadev->vqs[i]);
866 		queue->enable = 0;
867 		queue->queue_size = ZXDH_GDMA_QUEUE_SIZE;
868 		rte_spinlock_init(&(queue->enqueue_lock));
869 	}
870 
871 	return 0;
872 
873 err_out:
874 	zxdh_gdma_unmap_resource(pci_dev->mem_resource[0].addr,
875 			(size_t)pci_dev->mem_resource[0].len);
876 	return -1;
877 }
878 
879 static int
880 zxdh_gdma_rawdev_remove(struct rte_pci_device *pci_dev)
881 {
882 	struct rte_rawdev *dev = NULL;
883 	int ret = 0;
884 
885 	dev = rte_rawdev_pmd_get_named_dev(dev_name);
886 	if (dev == NULL)
887 		return -EINVAL;
888 
889 	/* rte_rawdev_close is called by pmd_release */
890 	ret = rte_rawdev_pmd_release(dev);
891 	if (ret != 0) {
892 		ZXDH_PMD_LOG(ERR, "Device cleanup failed");
893 		return -1;
894 	}
895 
896 	zxdh_gdma_unmap_resource(pci_dev->mem_resource[0].addr,
897 			(size_t)pci_dev->mem_resource[0].len);
898 
899 	ZXDH_PMD_LOG(DEBUG, "rawdev %s remove done!", dev_name);
900 
901 	return ret;
902 }
903 
904 static const struct rte_pci_id zxdh_gdma_rawdev_map[] = {
905 	{ RTE_PCI_DEVICE(ZXDH_GDMA_VENDORID, ZXDH_GDMA_DEVICEID) },
906 	{ .vendor_id = 0, /* sentinel */ },
907 };
908 
909 static struct rte_pci_driver zxdh_gdma_rawdev_pmd = {
910 	.id_table = zxdh_gdma_rawdev_map,
911 	.drv_flags = 0,
912 	.probe = zxdh_gdma_rawdev_probe,
913 	.remove = zxdh_gdma_rawdev_remove,
914 };
915 
916 RTE_PMD_REGISTER_PCI(zxdh_gdma_rawdev_pci_driver, zxdh_gdma_rawdev_pmd);
917 RTE_PMD_REGISTER_PCI_TABLE(zxdh_gdma_rawdev_pci_driver, zxdh_gdma_rawdev_map);
918 RTE_LOG_REGISTER_DEFAULT(zxdh_gdma_rawdev_logtype, NOTICE);
919