xref: /dpdk/drivers/raw/gdtc/gdtc_rawdev.c (revision a73d74c2e30e7111b71b863aadf0c351a0f7ec8c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2024 ZTE Corporation
3  */
4 
5 #include <errno.h>
6 #include <fcntl.h>
7 #include <inttypes.h>
8 #include <limits.h>
9 #include <stdio.h>
10 #include <string.h>
11 #include <unistd.h>
12 
13 #include <bus_pci_driver.h>
14 #include <rte_atomic.h>
15 #include <rte_common.h>
16 #include <rte_dev.h>
17 #include <rte_eal_paging.h>
18 #include <rte_errno.h>
19 #include <rte_lcore.h>
20 #include <rte_log.h>
21 #include <rte_malloc.h>
22 #include <rte_memzone.h>
23 #include <rte_pci.h>
24 #include <rte_rawdev.h>
25 #include <rte_rawdev_pmd.h>
26 #include <rte_spinlock.h>
27 #include <rte_branch_prediction.h>
28 
29 #include "gdtc_rawdev.h"
30 
31 /*
32  * User define:
33  * ep_id-bit[15:12] vfunc_num-bit[11:4] func_num-bit[3:1] vfunc_active-bit0
34  * host ep_id:5~8   zf ep_id:9
35  */
36 #define ZXDH_GDMA_ZF_USER                       0x9000      /* ep4 pf0 */
37 #define ZXDH_GDMA_PF_NUM_SHIFT                  1
38 #define ZXDH_GDMA_VF_NUM_SHIFT                  4
39 #define ZXDH_GDMA_EP_ID_SHIFT                   12
40 #define ZXDH_GDMA_VF_EN                         1
41 #define ZXDH_GDMA_EPID_OFFSET                   5
42 
43 /* Register offset */
44 #define ZXDH_GDMA_BASE_OFFSET                   0x100000
45 #define ZXDH_GDMA_EXT_ADDR_OFFSET               0x218
46 #define ZXDH_GDMA_SAR_LOW_OFFSET                0x200
47 #define ZXDH_GDMA_DAR_LOW_OFFSET                0x204
48 #define ZXDH_GDMA_SAR_HIGH_OFFSET               0x234
49 #define ZXDH_GDMA_DAR_HIGH_OFFSET               0x238
50 #define ZXDH_GDMA_XFERSIZE_OFFSET               0x208
51 #define ZXDH_GDMA_CONTROL_OFFSET                0x230
52 #define ZXDH_GDMA_TC_STATUS_OFFSET              0x0
53 #define ZXDH_GDMA_STATUS_CLEAN_OFFSET           0x80
54 #define ZXDH_GDMA_LLI_L_OFFSET                  0x21c
55 #define ZXDH_GDMA_LLI_H_OFFSET                  0x220
56 #define ZXDH_GDMA_CHAN_CONTINUE_OFFSET          0x224
57 #define ZXDH_GDMA_TC_CNT_OFFSET                 0x23c
58 #define ZXDH_GDMA_LLI_USER_OFFSET               0x228
59 
60 /* Control register */
61 #define ZXDH_GDMA_CHAN_ENABLE                   0x1
62 #define ZXDH_GDMA_CHAN_DISABLE                  0
63 #define ZXDH_GDMA_SOFT_CHAN                     0x2
64 #define ZXDH_GDMA_TC_INTR_ENABLE                0x10
65 #define ZXDH_GDMA_ALL_INTR_ENABLE               0x30
66 #define ZXDH_GDMA_SBS_SHIFT                     6           /* src burst size */
67 #define ZXDH_GDMA_SBL_SHIFT                     9           /* src burst length */
68 #define ZXDH_GDMA_DBS_SHIFT                     13          /* dest burst size */
69 #define ZXDH_GDMA_BURST_SIZE_MIN                0x1         /* 1 byte */
70 #define ZXDH_GDMA_BURST_SIZE_MEDIUM             0x4         /* 4 word */
71 #define ZXDH_GDMA_BURST_SIZE_MAX                0x6         /* 16 word */
72 #define ZXDH_GDMA_DEFAULT_BURST_LEN             0xf         /* 16 beats */
73 #define ZXDH_GDMA_TC_CNT_ENABLE                 (1 << 27)
74 #define ZXDH_GDMA_CHAN_FORCE_CLOSE              (1 << 31)
75 
76 /* TC count & Error interrupt status register */
77 #define ZXDH_GDMA_SRC_LLI_ERR                   (1 << 16)
78 #define ZXDH_GDMA_SRC_DATA_ERR                  (1 << 17)
79 #define ZXDH_GDMA_DST_ADDR_ERR                  (1 << 18)
80 #define ZXDH_GDMA_ERR_STATUS                    (1 << 19)
81 #define ZXDH_GDMA_ERR_INTR_ENABLE               (1 << 20)
82 #define ZXDH_GDMA_TC_CNT_CLEAN                  (1)
83 
84 #define ZXDH_GDMA_CHAN_SHIFT                    0x80
85 #define ZXDH_GDMA_LINK_END_NODE                 (1 << 30)
86 #define ZXDH_GDMA_CHAN_CONTINUE                 (1)
87 
88 #define LOW32_MASK                              0xffffffff
89 #define LOW16_MASK                              0xffff
90 
91 #define IDX_TO_ADDR(addr, idx, t) \
92 	((t)((uintptr_t)(addr) + (idx) * sizeof(struct zxdh_gdma_buff_desc)))
93 
94 static int zxdh_gdma_queue_init(struct rte_rawdev *dev, uint16_t queue_id);
95 static int zxdh_gdma_queue_free(struct rte_rawdev *dev, uint16_t queue_id);
96 
97 char zxdh_gdma_driver_name[] = "rawdev_zxdh_gdma";
98 char dev_name[] = "zxdh_gdma";
99 
100 static inline struct zxdh_gdma_rawdev *
101 zxdh_gdma_rawdev_get_priv(const struct rte_rawdev *rawdev)
102 {
103 	return rawdev->dev_private;
104 }
105 
106 static inline struct zxdh_gdma_queue *
107 zxdh_gdma_get_queue(struct rte_rawdev *dev, uint16_t queue_id)
108 {
109 	struct zxdh_gdma_rawdev *gdmadev = zxdh_gdma_rawdev_get_priv(dev);
110 
111 	if (queue_id >= ZXDH_GDMA_TOTAL_CHAN_NUM) {
112 		ZXDH_PMD_LOG(ERR, "queue id %d is invalid", queue_id);
113 		return NULL;
114 	}
115 
116 	return &(gdmadev->vqs[queue_id]);
117 }
118 
119 static void
120 zxdh_gdma_write_reg(struct rte_rawdev *dev, uint16_t queue_id, uint32_t offset, uint32_t val)
121 {
122 	struct zxdh_gdma_rawdev *gdmadev = zxdh_gdma_rawdev_get_priv(dev);
123 	uint32_t addr = 0;
124 
125 	addr = offset + queue_id * ZXDH_GDMA_CHAN_SHIFT;
126 	*(uint32_t *)(gdmadev->base_addr + addr) = val;
127 }
128 
129 static int
130 zxdh_gdma_rawdev_info_get(struct rte_rawdev *dev,
131 		__rte_unused rte_rawdev_obj_t dev_info,
132 		__rte_unused size_t dev_info_size)
133 {
134 	if (dev == NULL)
135 		return -EINVAL;
136 
137 	return 0;
138 }
139 
140 static int
141 zxdh_gdma_rawdev_configure(const struct rte_rawdev *dev,
142 		rte_rawdev_obj_t config,
143 		size_t config_size)
144 {
145 	struct zxdh_gdma_config *gdma_config = NULL;
146 
147 	if ((dev == NULL) ||
148 		(config == NULL) ||
149 		(config_size != sizeof(struct zxdh_gdma_config)))
150 		return -EINVAL;
151 
152 	gdma_config = (struct zxdh_gdma_config *)config;
153 	if (gdma_config->max_vqs > ZXDH_GDMA_TOTAL_CHAN_NUM) {
154 		ZXDH_PMD_LOG(ERR, "gdma supports up to %d queues", ZXDH_GDMA_TOTAL_CHAN_NUM);
155 		return -EINVAL;
156 	}
157 
158 	return 0;
159 }
160 
161 static int
162 zxdh_gdma_rawdev_start(struct rte_rawdev *dev)
163 {
164 	struct zxdh_gdma_rawdev *gdmadev = NULL;
165 
166 	if (dev == NULL)
167 		return -EINVAL;
168 
169 	gdmadev = zxdh_gdma_rawdev_get_priv(dev);
170 	gdmadev->device_state = ZXDH_GDMA_DEV_RUNNING;
171 
172 	return 0;
173 }
174 
175 static void
176 zxdh_gdma_rawdev_stop(struct rte_rawdev *dev)
177 {
178 	struct zxdh_gdma_rawdev *gdmadev = NULL;
179 
180 	if (dev == NULL)
181 		return;
182 
183 	gdmadev = zxdh_gdma_rawdev_get_priv(dev);
184 	gdmadev->device_state = ZXDH_GDMA_DEV_STOPPED;
185 }
186 
187 static int
188 zxdh_gdma_rawdev_reset(struct rte_rawdev *dev)
189 {
190 	if (dev == NULL)
191 		return -EINVAL;
192 
193 	return 0;
194 }
195 
196 static int
197 zxdh_gdma_rawdev_close(struct rte_rawdev *dev)
198 {
199 	struct zxdh_gdma_rawdev *gdmadev = NULL;
200 	struct zxdh_gdma_queue *queue = NULL;
201 	uint16_t queue_id = 0;
202 
203 	if (dev == NULL)
204 		return -EINVAL;
205 
206 	for (queue_id = 0; queue_id < ZXDH_GDMA_TOTAL_CHAN_NUM; queue_id++) {
207 		queue = zxdh_gdma_get_queue(dev, queue_id);
208 		if ((queue == NULL) || (queue->enable == 0))
209 			continue;
210 
211 		zxdh_gdma_queue_free(dev, queue_id);
212 	}
213 	gdmadev = zxdh_gdma_rawdev_get_priv(dev);
214 	gdmadev->device_state = ZXDH_GDMA_DEV_STOPPED;
215 
216 	return 0;
217 }
218 
219 static int
220 zxdh_gdma_rawdev_queue_setup(struct rte_rawdev *dev,
221 		uint16_t queue_id,
222 		rte_rawdev_obj_t queue_conf,
223 		size_t conf_size)
224 {
225 	struct zxdh_gdma_rawdev *gdmadev = NULL;
226 	struct zxdh_gdma_queue *queue = NULL;
227 	struct zxdh_gdma_queue_config *qconfig = NULL;
228 	struct zxdh_gdma_rbp *rbp = NULL;
229 	uint16_t i = 0;
230 	uint8_t is_txq = 0;
231 	uint32_t src_user = 0;
232 	uint32_t dst_user = 0;
233 
234 	if (dev == NULL)
235 		return -EINVAL;
236 
237 	if ((queue_conf == NULL) || (conf_size != sizeof(struct zxdh_gdma_queue_config)))
238 		return -EINVAL;
239 
240 	gdmadev = zxdh_gdma_rawdev_get_priv(dev);
241 	qconfig = (struct zxdh_gdma_queue_config *)queue_conf;
242 
243 	for (i = 0; i < ZXDH_GDMA_TOTAL_CHAN_NUM; i++) {
244 		if (gdmadev->vqs[i].enable == 0)
245 			break;
246 	}
247 	if (i >= ZXDH_GDMA_TOTAL_CHAN_NUM) {
248 		ZXDH_PMD_LOG(ERR, "Failed to setup queue, no avail queues");
249 		return -1;
250 	}
251 	queue_id = i;
252 	if (zxdh_gdma_queue_init(dev, queue_id) != 0) {
253 		ZXDH_PMD_LOG(ERR, "Failed to init queue");
254 		return -1;
255 	}
256 	queue = &(gdmadev->vqs[queue_id]);
257 
258 	rbp = qconfig->rbp;
259 	if ((rbp->srbp != 0) && (rbp->drbp == 0)) {
260 		is_txq = 0;
261 		dst_user = ZXDH_GDMA_ZF_USER;
262 		src_user = ((rbp->spfid << ZXDH_GDMA_PF_NUM_SHIFT) |
263 				((rbp->sportid + ZXDH_GDMA_EPID_OFFSET) << ZXDH_GDMA_EP_ID_SHIFT));
264 
265 		if (rbp->svfid != 0)
266 			src_user |= (ZXDH_GDMA_VF_EN |
267 					((rbp->svfid - 1) << ZXDH_GDMA_VF_NUM_SHIFT));
268 
269 		ZXDH_PMD_LOG(DEBUG, "rxq->qidx:%d setup src_user(ep:%d pf:%d vf:%d) success",
270 				queue_id, (uint8_t)rbp->sportid, (uint8_t)rbp->spfid,
271 				(uint8_t)rbp->svfid);
272 	} else if ((rbp->srbp == 0) && (rbp->drbp != 0)) {
273 		is_txq = 1;
274 		src_user = ZXDH_GDMA_ZF_USER;
275 		dst_user = ((rbp->dpfid << ZXDH_GDMA_PF_NUM_SHIFT) |
276 				((rbp->dportid + ZXDH_GDMA_EPID_OFFSET) << ZXDH_GDMA_EP_ID_SHIFT));
277 
278 		if (rbp->dvfid != 0)
279 			dst_user |= (ZXDH_GDMA_VF_EN |
280 					((rbp->dvfid - 1) << ZXDH_GDMA_VF_NUM_SHIFT));
281 
282 		ZXDH_PMD_LOG(DEBUG, "txq->qidx:%d setup dst_user(ep:%d pf:%d vf:%d) success",
283 				queue_id, (uint8_t)rbp->dportid, (uint8_t)rbp->dpfid,
284 				(uint8_t)rbp->dvfid);
285 	} else {
286 		ZXDH_PMD_LOG(ERR, "Failed to setup queue, srbp/drbp is invalid");
287 		return -EINVAL;
288 	}
289 	queue->is_txq = is_txq;
290 
291 	/* setup queue user info */
292 	queue->user = (src_user & LOW16_MASK) | (dst_user << 16);
293 
294 	zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_EXT_ADDR_OFFSET, queue->user);
295 	gdmadev->used_num++;
296 
297 	return queue_id;
298 }
299 
300 static int
301 zxdh_gdma_rawdev_queue_release(struct rte_rawdev *dev, uint16_t queue_id)
302 {
303 	struct zxdh_gdma_queue *queue = NULL;
304 
305 	if (dev == NULL)
306 		return -EINVAL;
307 
308 	queue = zxdh_gdma_get_queue(dev, queue_id);
309 	if ((queue == NULL) || (queue->enable == 0))
310 		return -EINVAL;
311 
312 	zxdh_gdma_queue_free(dev, queue_id);
313 
314 	return 0;
315 }
316 
317 static int
318 zxdh_gdma_rawdev_get_attr(struct rte_rawdev *dev,
319 				__rte_unused const char *attr_name,
320 				uint64_t *attr_value)
321 {
322 	struct zxdh_gdma_rawdev *gdmadev = NULL;
323 	struct zxdh_gdma_attr *gdma_attr = NULL;
324 
325 	if ((dev == NULL) || (attr_value == NULL))
326 		return -EINVAL;
327 
328 	gdmadev   = zxdh_gdma_rawdev_get_priv(dev);
329 	gdma_attr = (struct zxdh_gdma_attr *)attr_value;
330 	gdma_attr->num_hw_queues = gdmadev->used_num;
331 
332 	return 0;
333 }
334 
335 static inline void
336 zxdh_gdma_control_cal(uint32_t *val, uint8_t tc_enable)
337 {
338 	*val = (ZXDH_GDMA_CHAN_ENABLE |
339 			ZXDH_GDMA_SOFT_CHAN |
340 			(ZXDH_GDMA_DEFAULT_BURST_LEN << ZXDH_GDMA_SBL_SHIFT) |
341 			(ZXDH_GDMA_BURST_SIZE_MAX << ZXDH_GDMA_SBS_SHIFT) |
342 			(ZXDH_GDMA_BURST_SIZE_MAX << ZXDH_GDMA_DBS_SHIFT));
343 
344 	if (tc_enable != 0)
345 		*val |= ZXDH_GDMA_TC_CNT_ENABLE;
346 }
347 
348 static inline uint32_t
349 zxdh_gdma_user_get(struct zxdh_gdma_queue *queue, struct zxdh_gdma_job *job)
350 {
351 	uint32_t src_user = 0;
352 	uint32_t dst_user = 0;
353 
354 	if ((job->flags & ZXDH_GDMA_JOB_DIR_MASK) == 0) {
355 		ZXDH_PMD_LOG(DEBUG, "job flags:0x%x default user:0x%x",
356 				job->flags, queue->user);
357 		return queue->user;
358 	} else if ((job->flags & ZXDH_GDMA_JOB_DIR_TX) != 0) {
359 		src_user = ZXDH_GDMA_ZF_USER;
360 		dst_user = ((job->pf_id << ZXDH_GDMA_PF_NUM_SHIFT) |
361 				((job->ep_id + ZXDH_GDMA_EPID_OFFSET) << ZXDH_GDMA_EP_ID_SHIFT));
362 
363 		if (job->vf_id != 0)
364 			dst_user |= (ZXDH_GDMA_VF_EN |
365 					((job->vf_id - 1) << ZXDH_GDMA_VF_NUM_SHIFT));
366 	} else {
367 		dst_user = ZXDH_GDMA_ZF_USER;
368 		src_user = ((job->pf_id << ZXDH_GDMA_PF_NUM_SHIFT) |
369 				((job->ep_id + ZXDH_GDMA_EPID_OFFSET) << ZXDH_GDMA_EP_ID_SHIFT));
370 
371 		if (job->vf_id != 0)
372 			src_user |= (ZXDH_GDMA_VF_EN |
373 					((job->vf_id - 1) << ZXDH_GDMA_VF_NUM_SHIFT));
374 	}
375 	ZXDH_PMD_LOG(DEBUG, "job flags:0x%x ep_id:%u, pf_id:%u, vf_id:%u, user:0x%x",
376 			job->flags, job->ep_id, job->pf_id, job->vf_id,
377 			(src_user & LOW16_MASK) | (dst_user << 16));
378 
379 	return (src_user & LOW16_MASK) | (dst_user << 16);
380 }
381 
382 static inline void
383 zxdh_gdma_fill_bd(struct zxdh_gdma_queue *queue, struct zxdh_gdma_job *job)
384 {
385 	struct zxdh_gdma_buff_desc *bd = NULL;
386 	uint32_t val = 0;
387 	uint64_t next_bd_addr = 0;
388 	uint16_t avail_idx = 0;
389 
390 	avail_idx = queue->ring.avail_idx;
391 	bd = &(queue->ring.desc[avail_idx]);
392 	memset(bd, 0, sizeof(struct zxdh_gdma_buff_desc));
393 
394 	/* data bd */
395 	if (job != NULL) {
396 		zxdh_gdma_control_cal(&val, 1);
397 		next_bd_addr   = IDX_TO_ADDR(queue->ring.ring_mem,
398 				(avail_idx + 1) % ZXDH_GDMA_RING_SIZE, uint64_t);
399 		bd->SrcAddr_L  = job->src & LOW32_MASK;
400 		bd->DstAddr_L  = job->dest & LOW32_MASK;
401 		bd->SrcAddr_H  = (job->src >> 32) & LOW32_MASK;
402 		bd->DstAddr_H  = (job->dest >> 32) & LOW32_MASK;
403 		bd->Xpara      = job->len;
404 		bd->ExtAddr    = zxdh_gdma_user_get(queue, job);
405 		bd->LLI_Addr_L = (next_bd_addr >> 6) & LOW32_MASK;
406 		bd->LLI_Addr_H = next_bd_addr >> 38;
407 		bd->LLI_User   = ZXDH_GDMA_ZF_USER;
408 		bd->Control    = val;
409 	} else {
410 		zxdh_gdma_control_cal(&val, 0);
411 		next_bd_addr   = IDX_TO_ADDR(queue->ring.ring_mem, avail_idx, uint64_t);
412 		bd->ExtAddr    = queue->user;
413 		bd->LLI_User   = ZXDH_GDMA_ZF_USER;
414 		bd->Control    = val;
415 		bd->LLI_Addr_L = (next_bd_addr >> 6) & LOW32_MASK;
416 		bd->LLI_Addr_H = (next_bd_addr >> 38) | ZXDH_GDMA_LINK_END_NODE;
417 		if (queue->flag != 0) {
418 			bd = IDX_TO_ADDR(queue->ring.desc,
419 					queue->ring.last_avail_idx,
420 					struct zxdh_gdma_buff_desc*);
421 			next_bd_addr = IDX_TO_ADDR(queue->ring.ring_mem,
422 					(queue->ring.last_avail_idx + 1) % ZXDH_GDMA_RING_SIZE,
423 					uint64_t);
424 			bd->LLI_Addr_L  = (next_bd_addr >> 6) & LOW32_MASK;
425 			bd->LLI_Addr_H  = next_bd_addr >> 38;
426 			rte_wmb();
427 			bd->LLI_Addr_H &= ~ZXDH_GDMA_LINK_END_NODE;
428 		}
429 		/* Record the index of empty bd for dynamic chaining */
430 		queue->ring.last_avail_idx = avail_idx;
431 	}
432 
433 	if (++avail_idx >= ZXDH_GDMA_RING_SIZE)
434 		avail_idx -= ZXDH_GDMA_RING_SIZE;
435 
436 	queue->ring.avail_idx = avail_idx;
437 }
438 
439 static int
440 zxdh_gdma_rawdev_enqueue_bufs(struct rte_rawdev *dev,
441 				__rte_unused struct rte_rawdev_buf **buffers,
442 				uint32_t count,
443 				rte_rawdev_obj_t context)
444 {
445 	struct zxdh_gdma_rawdev *gdmadev = NULL;
446 	struct zxdh_gdma_queue *queue = NULL;
447 	struct zxdh_gdma_enqdeq *e_context = NULL;
448 	struct zxdh_gdma_job *job = NULL;
449 	uint16_t queue_id = 0;
450 	uint32_t val = 0;
451 	uint16_t i = 0;
452 	uint16_t free_cnt = 0;
453 
454 	if (dev == NULL)
455 		return -EINVAL;
456 
457 	if (unlikely((count < 1) || (context == NULL)))
458 		return -EINVAL;
459 
460 	gdmadev = zxdh_gdma_rawdev_get_priv(dev);
461 	if (gdmadev->device_state == ZXDH_GDMA_DEV_STOPPED) {
462 		ZXDH_PMD_LOG(ERR, "gdma dev is stop");
463 		return 0;
464 	}
465 
466 	e_context = (struct zxdh_gdma_enqdeq *)context;
467 	queue_id = e_context->vq_id;
468 	queue = zxdh_gdma_get_queue(dev, queue_id);
469 	if ((queue == NULL) || (queue->enable == 0))
470 		return -EINVAL;
471 
472 	free_cnt = queue->sw_ring.free_cnt;
473 	if (free_cnt == 0) {
474 		ZXDH_PMD_LOG(ERR, "queue %u is full, enq_idx:%u deq_idx:%u used_idx:%u",
475 				queue_id, queue->sw_ring.enq_idx,
476 				queue->sw_ring.deq_idx, queue->sw_ring.used_idx);
477 		return 0;
478 	} else if (free_cnt < count) {
479 		ZXDH_PMD_LOG(DEBUG, "job num %u > free_cnt, change to %u", count, free_cnt);
480 		count = free_cnt;
481 	}
482 
483 	rte_spinlock_lock(&queue->enqueue_lock);
484 
485 	/* Build bd list, the last bd is empty bd */
486 	for (i = 0; i < count; i++) {
487 		job = e_context->job[i];
488 		zxdh_gdma_fill_bd(queue, job);
489 	}
490 	zxdh_gdma_fill_bd(queue, NULL);
491 
492 	if (unlikely(queue->flag == 0)) {
493 		zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_LLI_L_OFFSET,
494 				(queue->ring.ring_mem >> 6) & LOW32_MASK);
495 		zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_LLI_H_OFFSET,
496 				queue->ring.ring_mem >> 38);
497 		/* Start hardware handling */
498 		zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_XFERSIZE_OFFSET, 0);
499 		zxdh_gdma_control_cal(&val, 0);
500 		zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_CONTROL_OFFSET, val);
501 		queue->flag = 1;
502 	} else {
503 		val = ZXDH_GDMA_CHAN_CONTINUE;
504 		zxdh_gdma_write_reg(dev, queue->vq_id, ZXDH_GDMA_CHAN_CONTINUE_OFFSET, val);
505 	}
506 
507     /* job enqueue */
508 	for (i = 0; i < count; i++) {
509 		queue->sw_ring.job[queue->sw_ring.enq_idx] = e_context->job[i];
510 		if (++queue->sw_ring.enq_idx >= queue->queue_size)
511 			queue->sw_ring.enq_idx -= queue->queue_size;
512 
513 		free_cnt--;
514 	}
515 	queue->sw_ring.free_cnt = free_cnt;
516 	queue->sw_ring.pend_cnt += count;
517 	rte_spinlock_unlock(&queue->enqueue_lock);
518 
519 	return count;
520 }
521 static const struct rte_rawdev_ops zxdh_gdma_rawdev_ops = {
522 	.dev_info_get = zxdh_gdma_rawdev_info_get,
523 	.dev_configure = zxdh_gdma_rawdev_configure,
524 	.dev_start = zxdh_gdma_rawdev_start,
525 	.dev_stop = zxdh_gdma_rawdev_stop,
526 	.dev_close = zxdh_gdma_rawdev_close,
527 	.dev_reset = zxdh_gdma_rawdev_reset,
528 
529 	.queue_setup = zxdh_gdma_rawdev_queue_setup,
530 	.queue_release = zxdh_gdma_rawdev_queue_release,
531 
532 	.attr_get = zxdh_gdma_rawdev_get_attr,
533 
534 	.enqueue_bufs = zxdh_gdma_rawdev_enqueue_bufs,
535 };
536 
537 static int
538 zxdh_gdma_queue_init(struct rte_rawdev *dev, uint16_t queue_id)
539 {
540 	char name[RTE_MEMZONE_NAMESIZE];
541 	struct zxdh_gdma_queue *queue = NULL;
542 	const struct rte_memzone *mz = NULL;
543 	uint32_t size = 0;
544 	int ret = 0;
545 
546 	queue = zxdh_gdma_get_queue(dev, queue_id);
547 	if (queue == NULL)
548 		return -EINVAL;
549 
550 	queue->enable = 1;
551 	queue->vq_id  = queue_id;
552 	queue->flag   = 0;
553 	queue->tc_cnt = 0;
554 
555 	/* Init sw_ring */
556 	queue->sw_ring.job = rte_calloc(NULL, queue->queue_size, sizeof(struct zxdh_gdma_job *), 0);
557 	if (queue->sw_ring.job == NULL) {
558 		ZXDH_PMD_LOG(ERR, "can not allocate sw_ring");
559 		ret = -ENOMEM;
560 		goto free_queue;
561 	}
562 
563 	/* Cache up to size-1 job in the ring to prevent overwriting hardware prefetching */
564 	queue->sw_ring.free_cnt = queue->queue_size - 1;
565 	queue->sw_ring.deq_cnt  = 0;
566 	queue->sw_ring.pend_cnt = 0;
567 	queue->sw_ring.enq_idx  = 0;
568 	queue->sw_ring.deq_idx  = 0;
569 	queue->sw_ring.used_idx = 0;
570 
571 	/* Init ring */
572 	snprintf(name, RTE_MEMZONE_NAMESIZE, "gdma_vq%d_ring", queue_id);
573 	size = ZXDH_GDMA_RING_SIZE * sizeof(struct zxdh_gdma_buff_desc);
574 	mz = rte_memzone_reserve_aligned(name, size, rte_socket_id(),
575 			RTE_MEMZONE_IOVA_CONTIG, size);
576 	if (mz == NULL) {
577 		if (rte_errno == EEXIST)
578 			mz = rte_memzone_lookup(name);
579 		if (mz == NULL) {
580 			ZXDH_PMD_LOG(ERR, "can not allocate ring %s", name);
581 			ret = -ENOMEM;
582 			goto free_queue;
583 		}
584 	}
585 	memset(mz->addr, 0, size);
586 	queue->ring.ring_mz   = mz;
587 	queue->ring.desc      = (struct zxdh_gdma_buff_desc *)(mz->addr);
588 	queue->ring.ring_mem  = mz->iova;
589 	queue->ring.avail_idx = 0;
590 	ZXDH_PMD_LOG(INFO, "queue%u ring phy addr:0x%"PRIx64" virt addr:%p",
591 			queue_id, mz->iova, mz->addr);
592 
593 	/* Configure the hardware channel to the initial state */
594 	zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_CONTROL_OFFSET,
595 			ZXDH_GDMA_CHAN_FORCE_CLOSE);
596 	zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_TC_CNT_OFFSET,
597 			ZXDH_GDMA_ERR_INTR_ENABLE | ZXDH_GDMA_ERR_STATUS | ZXDH_GDMA_TC_CNT_CLEAN);
598 	zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_LLI_USER_OFFSET,
599 			ZXDH_GDMA_ZF_USER);
600 
601 	return 0;
602 
603 free_queue:
604 	zxdh_gdma_queue_free(dev, queue_id);
605 	return ret;
606 }
607 
608 static int
609 zxdh_gdma_queue_free(struct rte_rawdev *dev, uint16_t queue_id)
610 {
611 	struct zxdh_gdma_rawdev *gdmadev = NULL;
612 	struct zxdh_gdma_queue *queue = NULL;
613 	uint32_t val = 0;
614 
615 	queue = zxdh_gdma_get_queue(dev, queue_id);
616 	if (queue == NULL)
617 		return -EINVAL;
618 
619 	gdmadev = zxdh_gdma_rawdev_get_priv(dev);
620 	gdmadev->used_num--;
621 
622 	/* disable gdma channel */
623 	val = ZXDH_GDMA_CHAN_FORCE_CLOSE;
624 	zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_CONTROL_OFFSET, val);
625 
626 	queue->enable           = 0;
627 	queue->is_txq           = 0;
628 	queue->flag             = 0;
629 	queue->user             = 0;
630 	queue->tc_cnt           = 0;
631 	queue->ring.avail_idx   = 0;
632 	queue->sw_ring.free_cnt = 0;
633 	queue->sw_ring.deq_cnt  = 0;
634 	queue->sw_ring.pend_cnt = 0;
635 	queue->sw_ring.enq_idx  = 0;
636 	queue->sw_ring.deq_idx  = 0;
637 	queue->sw_ring.used_idx = 0;
638 	rte_free(queue->sw_ring.job);
639 	rte_memzone_free(queue->ring.ring_mz);
640 
641 	return 0;
642 }
643 
644 static int
645 zxdh_gdma_map_resource(struct rte_pci_device *dev)
646 {
647 	int fd = -1;
648 	char devname[PATH_MAX];
649 	void *mapaddr = NULL;
650 	struct rte_pci_addr *loc;
651 
652 	loc = &dev->addr;
653 	snprintf(devname, sizeof(devname), "%s/" PCI_PRI_FMT "/resource0",
654 		rte_pci_get_sysfs_path(),
655 		loc->domain, loc->bus, loc->devid,
656 		loc->function);
657 
658 		fd = open(devname, O_RDWR);
659 		if (fd < 0) {
660 			ZXDH_PMD_LOG(ERR, "Cannot open %s: %s", devname, strerror(errno));
661 			return -1;
662 		}
663 
664 	/* Map the PCI memory resource of device */
665 	mapaddr = rte_mem_map(NULL, (size_t)dev->mem_resource[0].len,
666 			RTE_PROT_READ | RTE_PROT_WRITE,
667 			RTE_MAP_SHARED, fd, 0);
668 	if (mapaddr == NULL) {
669 		ZXDH_PMD_LOG(ERR, "cannot map resource(%d, 0x%zx): %s (%p)",
670 				fd, (size_t)dev->mem_resource[0].len,
671 				rte_strerror(rte_errno), mapaddr);
672 		close(fd);
673 		return -1;
674 	}
675 
676 	close(fd);
677 	dev->mem_resource[0].addr = mapaddr;
678 
679 	return 0;
680 }
681 
682 static void
683 zxdh_gdma_unmap_resource(void *requested_addr, size_t size)
684 {
685 	if (requested_addr == NULL)
686 		return;
687 
688 	/* Unmap the PCI memory resource of device */
689 	if (rte_mem_unmap(requested_addr, size))
690 		ZXDH_PMD_LOG(ERR, "cannot mem unmap(%p, %#zx): %s",
691 				requested_addr, size, rte_strerror(rte_errno));
692 	else
693 		ZXDH_PMD_LOG(DEBUG, "PCI memory unmapped at %p", requested_addr);
694 }
695 
696 static int
697 zxdh_gdma_rawdev_probe(struct rte_pci_driver *pci_drv __rte_unused,
698 		struct rte_pci_device *pci_dev)
699 {
700 	struct rte_rawdev *dev = NULL;
701 	struct zxdh_gdma_rawdev *gdmadev = NULL;
702 	struct zxdh_gdma_queue *queue = NULL;
703 	uint8_t i = 0;
704 	int ret;
705 
706 	if (pci_dev->mem_resource[0].phys_addr == 0) {
707 		ZXDH_PMD_LOG(ERR, "PCI bar0 resource is invalid");
708 		return -1;
709 	}
710 
711 	ret = zxdh_gdma_map_resource(pci_dev);
712 	if (ret != 0) {
713 		ZXDH_PMD_LOG(ERR, "Failed to mmap pci device(%s)", pci_dev->name);
714 		return -1;
715 	}
716 	ZXDH_PMD_LOG(INFO, "%s bar0 0x%"PRIx64" mapped at %p",
717 			pci_dev->name, pci_dev->mem_resource[0].phys_addr,
718 			pci_dev->mem_resource[0].addr);
719 
720 	dev = rte_rawdev_pmd_allocate(dev_name, sizeof(struct zxdh_gdma_rawdev), rte_socket_id());
721 	if (dev == NULL) {
722 		ZXDH_PMD_LOG(ERR, "Unable to allocate gdma rawdev");
723 		goto err_out;
724 	}
725 	ZXDH_PMD_LOG(INFO, "Init %s on NUMA node %d, dev_id is %d",
726 			dev_name, rte_socket_id(), dev->dev_id);
727 
728 	dev->dev_ops = &zxdh_gdma_rawdev_ops;
729 	dev->device = &pci_dev->device;
730 	dev->driver_name = zxdh_gdma_driver_name;
731 	gdmadev = zxdh_gdma_rawdev_get_priv(dev);
732 	gdmadev->device_state = ZXDH_GDMA_DEV_STOPPED;
733 	gdmadev->rawdev = dev;
734 	gdmadev->queue_num = ZXDH_GDMA_TOTAL_CHAN_NUM;
735 	gdmadev->used_num = 0;
736 	gdmadev->base_addr = (uintptr_t)pci_dev->mem_resource[0].addr + ZXDH_GDMA_BASE_OFFSET;
737 
738 	for (i = 0; i < ZXDH_GDMA_TOTAL_CHAN_NUM; i++) {
739 		queue = &(gdmadev->vqs[i]);
740 		queue->enable = 0;
741 		queue->queue_size = ZXDH_GDMA_QUEUE_SIZE;
742 		rte_spinlock_init(&(queue->enqueue_lock));
743 	}
744 
745 	return 0;
746 
747 err_out:
748 	zxdh_gdma_unmap_resource(pci_dev->mem_resource[0].addr,
749 			(size_t)pci_dev->mem_resource[0].len);
750 	return -1;
751 }
752 
753 static int
754 zxdh_gdma_rawdev_remove(struct rte_pci_device *pci_dev)
755 {
756 	struct rte_rawdev *dev = NULL;
757 	int ret = 0;
758 
759 	dev = rte_rawdev_pmd_get_named_dev(dev_name);
760 	if (dev == NULL)
761 		return -EINVAL;
762 
763 	/* rte_rawdev_close is called by pmd_release */
764 	ret = rte_rawdev_pmd_release(dev);
765 	if (ret != 0) {
766 		ZXDH_PMD_LOG(ERR, "Device cleanup failed");
767 		return -1;
768 	}
769 
770 	zxdh_gdma_unmap_resource(pci_dev->mem_resource[0].addr,
771 			(size_t)pci_dev->mem_resource[0].len);
772 
773 	ZXDH_PMD_LOG(DEBUG, "rawdev %s remove done!", dev_name);
774 
775 	return ret;
776 }
777 
778 static const struct rte_pci_id zxdh_gdma_rawdev_map[] = {
779 	{ RTE_PCI_DEVICE(ZXDH_GDMA_VENDORID, ZXDH_GDMA_DEVICEID) },
780 	{ .vendor_id = 0, /* sentinel */ },
781 };
782 
783 static struct rte_pci_driver zxdh_gdma_rawdev_pmd = {
784 	.id_table = zxdh_gdma_rawdev_map,
785 	.drv_flags = 0,
786 	.probe = zxdh_gdma_rawdev_probe,
787 	.remove = zxdh_gdma_rawdev_remove,
788 };
789 
790 RTE_PMD_REGISTER_PCI(zxdh_gdma_rawdev_pci_driver, zxdh_gdma_rawdev_pmd);
791 RTE_PMD_REGISTER_PCI_TABLE(zxdh_gdma_rawdev_pci_driver, zxdh_gdma_rawdev_map);
792 RTE_LOG_REGISTER_DEFAULT(zxdh_gdma_rawdev_logtype, NOTICE);
793