xref: /dpdk/drivers/raw/gdtc/gdtc_rawdev.c (revision 648001aa00a15e48bb7f6a3acdf6fc9b48895dbb)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2024 ZTE Corporation
3  */
4 
5 #include <errno.h>
6 #include <fcntl.h>
7 #include <inttypes.h>
8 #include <limits.h>
9 #include <stdio.h>
10 #include <string.h>
11 #include <unistd.h>
12 
13 #include <bus_pci_driver.h>
14 #include <rte_atomic.h>
15 #include <rte_common.h>
16 #include <rte_dev.h>
17 #include <rte_eal_paging.h>
18 #include <rte_errno.h>
19 #include <rte_lcore.h>
20 #include <rte_log.h>
21 #include <rte_malloc.h>
22 #include <rte_memzone.h>
23 #include <rte_pci.h>
24 #include <rte_rawdev.h>
25 #include <rte_rawdev_pmd.h>
26 #include <rte_spinlock.h>
27 #include <rte_branch_prediction.h>
28 
29 #include "gdtc_rawdev.h"
30 
31 /*
32  * User define:
33  * ep_id-bit[15:12] vfunc_num-bit[11:4] func_num-bit[3:1] vfunc_active-bit0
34  * host ep_id:5~8   zf ep_id:9
35  */
36 #define ZXDH_GDMA_ZF_USER                       0x9000      /* ep4 pf0 */
37 #define ZXDH_GDMA_PF_NUM_SHIFT                  1
38 #define ZXDH_GDMA_VF_NUM_SHIFT                  4
39 #define ZXDH_GDMA_EP_ID_SHIFT                   12
40 #define ZXDH_GDMA_VF_EN                         1
41 #define ZXDH_GDMA_EPID_OFFSET                   5
42 
43 /* Register offset */
44 #define ZXDH_GDMA_BASE_OFFSET                   0x100000
45 #define ZXDH_GDMA_EXT_ADDR_OFFSET               0x218
46 #define ZXDH_GDMA_CONTROL_OFFSET                0x230
47 #define ZXDH_GDMA_TC_CNT_OFFSET                 0x23c
48 #define ZXDH_GDMA_LLI_USER_OFFSET               0x228
49 
50 #define ZXDH_GDMA_CHAN_FORCE_CLOSE              (1 << 31)
51 
52 /* TC count & Error interrupt status register */
53 #define ZXDH_GDMA_SRC_LLI_ERR                   (1 << 16)
54 #define ZXDH_GDMA_SRC_DATA_ERR                  (1 << 17)
55 #define ZXDH_GDMA_DST_ADDR_ERR                  (1 << 18)
56 #define ZXDH_GDMA_ERR_STATUS                    (1 << 19)
57 #define ZXDH_GDMA_ERR_INTR_ENABLE               (1 << 20)
58 #define ZXDH_GDMA_TC_CNT_CLEAN                  (1)
59 
60 #define ZXDH_GDMA_CHAN_SHIFT                    0x80
61 #define LOW32_MASK                              0xffffffff
62 #define LOW16_MASK                              0xffff
63 
64 static int zxdh_gdma_queue_init(struct rte_rawdev *dev, uint16_t queue_id);
65 static int zxdh_gdma_queue_free(struct rte_rawdev *dev, uint16_t queue_id);
66 
67 char zxdh_gdma_driver_name[] = "rawdev_zxdh_gdma";
68 char dev_name[] = "zxdh_gdma";
69 
70 static inline struct zxdh_gdma_rawdev *
71 zxdh_gdma_rawdev_get_priv(const struct rte_rawdev *rawdev)
72 {
73 	return rawdev->dev_private;
74 }
75 
76 static inline struct zxdh_gdma_queue *
77 zxdh_gdma_get_queue(struct rte_rawdev *dev, uint16_t queue_id)
78 {
79 	struct zxdh_gdma_rawdev *gdmadev = zxdh_gdma_rawdev_get_priv(dev);
80 
81 	if (queue_id >= ZXDH_GDMA_TOTAL_CHAN_NUM) {
82 		ZXDH_PMD_LOG(ERR, "queue id %d is invalid", queue_id);
83 		return NULL;
84 	}
85 
86 	return &(gdmadev->vqs[queue_id]);
87 }
88 
89 static void
90 zxdh_gdma_write_reg(struct rte_rawdev *dev, uint16_t queue_id, uint32_t offset, uint32_t val)
91 {
92 	struct zxdh_gdma_rawdev *gdmadev = zxdh_gdma_rawdev_get_priv(dev);
93 	uint32_t addr = 0;
94 
95 	addr = offset + queue_id * ZXDH_GDMA_CHAN_SHIFT;
96 	*(uint32_t *)(gdmadev->base_addr + addr) = val;
97 }
98 
99 static int
100 zxdh_gdma_rawdev_info_get(struct rte_rawdev *dev,
101 		__rte_unused rte_rawdev_obj_t dev_info,
102 		__rte_unused size_t dev_info_size)
103 {
104 	if (dev == NULL)
105 		return -EINVAL;
106 
107 	return 0;
108 }
109 
110 static int
111 zxdh_gdma_rawdev_configure(const struct rte_rawdev *dev,
112 		rte_rawdev_obj_t config,
113 		size_t config_size)
114 {
115 	struct zxdh_gdma_config *gdma_config = NULL;
116 
117 	if ((dev == NULL) ||
118 		(config == NULL) ||
119 		(config_size != sizeof(struct zxdh_gdma_config)))
120 		return -EINVAL;
121 
122 	gdma_config = (struct zxdh_gdma_config *)config;
123 	if (gdma_config->max_vqs > ZXDH_GDMA_TOTAL_CHAN_NUM) {
124 		ZXDH_PMD_LOG(ERR, "gdma supports up to %d queues", ZXDH_GDMA_TOTAL_CHAN_NUM);
125 		return -EINVAL;
126 	}
127 
128 	return 0;
129 }
130 
131 static int
132 zxdh_gdma_rawdev_start(struct rte_rawdev *dev)
133 {
134 	struct zxdh_gdma_rawdev *gdmadev = NULL;
135 
136 	if (dev == NULL)
137 		return -EINVAL;
138 
139 	gdmadev = zxdh_gdma_rawdev_get_priv(dev);
140 	gdmadev->device_state = ZXDH_GDMA_DEV_RUNNING;
141 
142 	return 0;
143 }
144 
145 static void
146 zxdh_gdma_rawdev_stop(struct rte_rawdev *dev)
147 {
148 	struct zxdh_gdma_rawdev *gdmadev = NULL;
149 
150 	if (dev == NULL)
151 		return;
152 
153 	gdmadev = zxdh_gdma_rawdev_get_priv(dev);
154 	gdmadev->device_state = ZXDH_GDMA_DEV_STOPPED;
155 }
156 
157 static int
158 zxdh_gdma_rawdev_reset(struct rte_rawdev *dev)
159 {
160 	if (dev == NULL)
161 		return -EINVAL;
162 
163 	return 0;
164 }
165 
166 static int
167 zxdh_gdma_rawdev_close(struct rte_rawdev *dev)
168 {
169 	struct zxdh_gdma_rawdev *gdmadev = NULL;
170 	struct zxdh_gdma_queue *queue = NULL;
171 	uint16_t queue_id = 0;
172 
173 	if (dev == NULL)
174 		return -EINVAL;
175 
176 	for (queue_id = 0; queue_id < ZXDH_GDMA_TOTAL_CHAN_NUM; queue_id++) {
177 		queue = zxdh_gdma_get_queue(dev, queue_id);
178 		if ((queue == NULL) || (queue->enable == 0))
179 			continue;
180 
181 		zxdh_gdma_queue_free(dev, queue_id);
182 	}
183 	gdmadev = zxdh_gdma_rawdev_get_priv(dev);
184 	gdmadev->device_state = ZXDH_GDMA_DEV_STOPPED;
185 
186 	return 0;
187 }
188 
189 static int
190 zxdh_gdma_rawdev_queue_setup(struct rte_rawdev *dev,
191 		uint16_t queue_id,
192 		rte_rawdev_obj_t queue_conf,
193 		size_t conf_size)
194 {
195 	struct zxdh_gdma_rawdev *gdmadev = NULL;
196 	struct zxdh_gdma_queue *queue = NULL;
197 	struct zxdh_gdma_queue_config *qconfig = NULL;
198 	struct zxdh_gdma_rbp *rbp = NULL;
199 	uint16_t i = 0;
200 	uint8_t is_txq = 0;
201 	uint32_t src_user = 0;
202 	uint32_t dst_user = 0;
203 
204 	if (dev == NULL)
205 		return -EINVAL;
206 
207 	if ((queue_conf == NULL) || (conf_size != sizeof(struct zxdh_gdma_queue_config)))
208 		return -EINVAL;
209 
210 	gdmadev = zxdh_gdma_rawdev_get_priv(dev);
211 	qconfig = (struct zxdh_gdma_queue_config *)queue_conf;
212 
213 	for (i = 0; i < ZXDH_GDMA_TOTAL_CHAN_NUM; i++) {
214 		if (gdmadev->vqs[i].enable == 0)
215 			break;
216 	}
217 	if (i >= ZXDH_GDMA_TOTAL_CHAN_NUM) {
218 		ZXDH_PMD_LOG(ERR, "Failed to setup queue, no avail queues");
219 		return -1;
220 	}
221 	queue_id = i;
222 	if (zxdh_gdma_queue_init(dev, queue_id) != 0) {
223 		ZXDH_PMD_LOG(ERR, "Failed to init queue");
224 		return -1;
225 	}
226 	queue = &(gdmadev->vqs[queue_id]);
227 
228 	rbp = qconfig->rbp;
229 	if ((rbp->srbp != 0) && (rbp->drbp == 0)) {
230 		is_txq = 0;
231 		dst_user = ZXDH_GDMA_ZF_USER;
232 		src_user = ((rbp->spfid << ZXDH_GDMA_PF_NUM_SHIFT) |
233 				((rbp->sportid + ZXDH_GDMA_EPID_OFFSET) << ZXDH_GDMA_EP_ID_SHIFT));
234 
235 		if (rbp->svfid != 0)
236 			src_user |= (ZXDH_GDMA_VF_EN |
237 					((rbp->svfid - 1) << ZXDH_GDMA_VF_NUM_SHIFT));
238 
239 		ZXDH_PMD_LOG(DEBUG, "rxq->qidx:%d setup src_user(ep:%d pf:%d vf:%d) success",
240 				queue_id, (uint8_t)rbp->sportid, (uint8_t)rbp->spfid,
241 				(uint8_t)rbp->svfid);
242 	} else if ((rbp->srbp == 0) && (rbp->drbp != 0)) {
243 		is_txq = 1;
244 		src_user = ZXDH_GDMA_ZF_USER;
245 		dst_user = ((rbp->dpfid << ZXDH_GDMA_PF_NUM_SHIFT) |
246 				((rbp->dportid + ZXDH_GDMA_EPID_OFFSET) << ZXDH_GDMA_EP_ID_SHIFT));
247 
248 		if (rbp->dvfid != 0)
249 			dst_user |= (ZXDH_GDMA_VF_EN |
250 					((rbp->dvfid - 1) << ZXDH_GDMA_VF_NUM_SHIFT));
251 
252 		ZXDH_PMD_LOG(DEBUG, "txq->qidx:%d setup dst_user(ep:%d pf:%d vf:%d) success",
253 				queue_id, (uint8_t)rbp->dportid, (uint8_t)rbp->dpfid,
254 				(uint8_t)rbp->dvfid);
255 	} else {
256 		ZXDH_PMD_LOG(ERR, "Failed to setup queue, srbp/drbp is invalid");
257 		return -EINVAL;
258 	}
259 	queue->is_txq = is_txq;
260 
261 	/* setup queue user info */
262 	queue->user = (src_user & LOW16_MASK) | (dst_user << 16);
263 
264 	zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_EXT_ADDR_OFFSET, queue->user);
265 	gdmadev->used_num++;
266 
267 	return queue_id;
268 }
269 
270 static int
271 zxdh_gdma_rawdev_queue_release(struct rte_rawdev *dev, uint16_t queue_id)
272 {
273 	struct zxdh_gdma_queue *queue = NULL;
274 
275 	if (dev == NULL)
276 		return -EINVAL;
277 
278 	queue = zxdh_gdma_get_queue(dev, queue_id);
279 	if ((queue == NULL) || (queue->enable == 0))
280 		return -EINVAL;
281 
282 	zxdh_gdma_queue_free(dev, queue_id);
283 
284 	return 0;
285 }
286 
287 static int
288 zxdh_gdma_rawdev_get_attr(struct rte_rawdev *dev,
289 				__rte_unused const char *attr_name,
290 				uint64_t *attr_value)
291 {
292 	struct zxdh_gdma_rawdev *gdmadev = NULL;
293 	struct zxdh_gdma_attr *gdma_attr = NULL;
294 
295 	if ((dev == NULL) || (attr_value == NULL))
296 		return -EINVAL;
297 
298 	gdmadev   = zxdh_gdma_rawdev_get_priv(dev);
299 	gdma_attr = (struct zxdh_gdma_attr *)attr_value;
300 	gdma_attr->num_hw_queues = gdmadev->used_num;
301 
302 	return 0;
303 }
304 static const struct rte_rawdev_ops zxdh_gdma_rawdev_ops = {
305 	.dev_info_get = zxdh_gdma_rawdev_info_get,
306 	.dev_configure = zxdh_gdma_rawdev_configure,
307 	.dev_start = zxdh_gdma_rawdev_start,
308 	.dev_stop = zxdh_gdma_rawdev_stop,
309 	.dev_close = zxdh_gdma_rawdev_close,
310 	.dev_reset = zxdh_gdma_rawdev_reset,
311 
312 	.queue_setup = zxdh_gdma_rawdev_queue_setup,
313 	.queue_release = zxdh_gdma_rawdev_queue_release,
314 
315 	.attr_get = zxdh_gdma_rawdev_get_attr,
316 };
317 
318 static int
319 zxdh_gdma_queue_init(struct rte_rawdev *dev, uint16_t queue_id)
320 {
321 	char name[RTE_MEMZONE_NAMESIZE];
322 	struct zxdh_gdma_queue *queue = NULL;
323 	const struct rte_memzone *mz = NULL;
324 	uint32_t size = 0;
325 	int ret = 0;
326 
327 	queue = zxdh_gdma_get_queue(dev, queue_id);
328 	if (queue == NULL)
329 		return -EINVAL;
330 
331 	queue->enable = 1;
332 	queue->vq_id  = queue_id;
333 	queue->flag   = 0;
334 	queue->tc_cnt = 0;
335 
336 	/* Init sw_ring */
337 	queue->sw_ring.job = rte_calloc(NULL, queue->queue_size, sizeof(struct zxdh_gdma_job *), 0);
338 	if (queue->sw_ring.job == NULL) {
339 		ZXDH_PMD_LOG(ERR, "can not allocate sw_ring");
340 		ret = -ENOMEM;
341 		goto free_queue;
342 	}
343 
344 	/* Cache up to size-1 job in the ring to prevent overwriting hardware prefetching */
345 	queue->sw_ring.free_cnt = queue->queue_size - 1;
346 	queue->sw_ring.deq_cnt  = 0;
347 	queue->sw_ring.pend_cnt = 0;
348 	queue->sw_ring.enq_idx  = 0;
349 	queue->sw_ring.deq_idx  = 0;
350 	queue->sw_ring.used_idx = 0;
351 
352 	/* Init ring */
353 	snprintf(name, RTE_MEMZONE_NAMESIZE, "gdma_vq%d_ring", queue_id);
354 	size = ZXDH_GDMA_RING_SIZE * sizeof(struct zxdh_gdma_buff_desc);
355 	mz = rte_memzone_reserve_aligned(name, size, rte_socket_id(),
356 			RTE_MEMZONE_IOVA_CONTIG, size);
357 	if (mz == NULL) {
358 		if (rte_errno == EEXIST)
359 			mz = rte_memzone_lookup(name);
360 		if (mz == NULL) {
361 			ZXDH_PMD_LOG(ERR, "can not allocate ring %s", name);
362 			ret = -ENOMEM;
363 			goto free_queue;
364 		}
365 	}
366 	memset(mz->addr, 0, size);
367 	queue->ring.ring_mz   = mz;
368 	queue->ring.desc      = (struct zxdh_gdma_buff_desc *)(mz->addr);
369 	queue->ring.ring_mem  = mz->iova;
370 	queue->ring.avail_idx = 0;
371 	ZXDH_PMD_LOG(INFO, "queue%u ring phy addr:0x%"PRIx64" virt addr:%p",
372 			queue_id, mz->iova, mz->addr);
373 
374 	/* Configure the hardware channel to the initial state */
375 	zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_CONTROL_OFFSET,
376 			ZXDH_GDMA_CHAN_FORCE_CLOSE);
377 	zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_TC_CNT_OFFSET,
378 			ZXDH_GDMA_ERR_INTR_ENABLE | ZXDH_GDMA_ERR_STATUS | ZXDH_GDMA_TC_CNT_CLEAN);
379 	zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_LLI_USER_OFFSET,
380 			ZXDH_GDMA_ZF_USER);
381 
382 	return 0;
383 
384 free_queue:
385 	zxdh_gdma_queue_free(dev, queue_id);
386 	return ret;
387 }
388 
389 static int
390 zxdh_gdma_queue_free(struct rte_rawdev *dev, uint16_t queue_id)
391 {
392 	struct zxdh_gdma_rawdev *gdmadev = NULL;
393 	struct zxdh_gdma_queue *queue = NULL;
394 	uint32_t val = 0;
395 
396 	queue = zxdh_gdma_get_queue(dev, queue_id);
397 	if (queue == NULL)
398 		return -EINVAL;
399 
400 	gdmadev = zxdh_gdma_rawdev_get_priv(dev);
401 	gdmadev->used_num--;
402 
403 	/* disable gdma channel */
404 	val = ZXDH_GDMA_CHAN_FORCE_CLOSE;
405 	zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_CONTROL_OFFSET, val);
406 
407 	queue->enable           = 0;
408 	queue->is_txq           = 0;
409 	queue->flag             = 0;
410 	queue->user             = 0;
411 	queue->tc_cnt           = 0;
412 	queue->ring.avail_idx   = 0;
413 	queue->sw_ring.free_cnt = 0;
414 	queue->sw_ring.deq_cnt  = 0;
415 	queue->sw_ring.pend_cnt = 0;
416 	queue->sw_ring.enq_idx  = 0;
417 	queue->sw_ring.deq_idx  = 0;
418 	queue->sw_ring.used_idx = 0;
419 	rte_free(queue->sw_ring.job);
420 	rte_memzone_free(queue->ring.ring_mz);
421 
422 	return 0;
423 }
424 
425 static int
426 zxdh_gdma_map_resource(struct rte_pci_device *dev)
427 {
428 	int fd = -1;
429 	char devname[PATH_MAX];
430 	void *mapaddr = NULL;
431 	struct rte_pci_addr *loc;
432 
433 	loc = &dev->addr;
434 	snprintf(devname, sizeof(devname), "%s/" PCI_PRI_FMT "/resource0",
435 		rte_pci_get_sysfs_path(),
436 		loc->domain, loc->bus, loc->devid,
437 		loc->function);
438 
439 		fd = open(devname, O_RDWR);
440 		if (fd < 0) {
441 			ZXDH_PMD_LOG(ERR, "Cannot open %s: %s", devname, strerror(errno));
442 			return -1;
443 		}
444 
445 	/* Map the PCI memory resource of device */
446 	mapaddr = rte_mem_map(NULL, (size_t)dev->mem_resource[0].len,
447 			RTE_PROT_READ | RTE_PROT_WRITE,
448 			RTE_MAP_SHARED, fd, 0);
449 	if (mapaddr == NULL) {
450 		ZXDH_PMD_LOG(ERR, "cannot map resource(%d, 0x%zx): %s (%p)",
451 				fd, (size_t)dev->mem_resource[0].len,
452 				rte_strerror(rte_errno), mapaddr);
453 		close(fd);
454 		return -1;
455 	}
456 
457 	close(fd);
458 	dev->mem_resource[0].addr = mapaddr;
459 
460 	return 0;
461 }
462 
463 static void
464 zxdh_gdma_unmap_resource(void *requested_addr, size_t size)
465 {
466 	if (requested_addr == NULL)
467 		return;
468 
469 	/* Unmap the PCI memory resource of device */
470 	if (rte_mem_unmap(requested_addr, size))
471 		ZXDH_PMD_LOG(ERR, "cannot mem unmap(%p, %#zx): %s",
472 				requested_addr, size, rte_strerror(rte_errno));
473 	else
474 		ZXDH_PMD_LOG(DEBUG, "PCI memory unmapped at %p", requested_addr);
475 }
476 
477 static int
478 zxdh_gdma_rawdev_probe(struct rte_pci_driver *pci_drv __rte_unused,
479 		struct rte_pci_device *pci_dev)
480 {
481 	struct rte_rawdev *dev = NULL;
482 	struct zxdh_gdma_rawdev *gdmadev = NULL;
483 	struct zxdh_gdma_queue *queue = NULL;
484 	uint8_t i = 0;
485 	int ret;
486 
487 	if (pci_dev->mem_resource[0].phys_addr == 0) {
488 		ZXDH_PMD_LOG(ERR, "PCI bar0 resource is invalid");
489 		return -1;
490 	}
491 
492 	ret = zxdh_gdma_map_resource(pci_dev);
493 	if (ret != 0) {
494 		ZXDH_PMD_LOG(ERR, "Failed to mmap pci device(%s)", pci_dev->name);
495 		return -1;
496 	}
497 	ZXDH_PMD_LOG(INFO, "%s bar0 0x%"PRIx64" mapped at %p",
498 			pci_dev->name, pci_dev->mem_resource[0].phys_addr,
499 			pci_dev->mem_resource[0].addr);
500 
501 	dev = rte_rawdev_pmd_allocate(dev_name, sizeof(struct zxdh_gdma_rawdev), rte_socket_id());
502 	if (dev == NULL) {
503 		ZXDH_PMD_LOG(ERR, "Unable to allocate gdma rawdev");
504 		goto err_out;
505 	}
506 	ZXDH_PMD_LOG(INFO, "Init %s on NUMA node %d, dev_id is %d",
507 			dev_name, rte_socket_id(), dev->dev_id);
508 
509 	dev->dev_ops = &zxdh_gdma_rawdev_ops;
510 	dev->device = &pci_dev->device;
511 	dev->driver_name = zxdh_gdma_driver_name;
512 	gdmadev = zxdh_gdma_rawdev_get_priv(dev);
513 	gdmadev->device_state = ZXDH_GDMA_DEV_STOPPED;
514 	gdmadev->rawdev = dev;
515 	gdmadev->queue_num = ZXDH_GDMA_TOTAL_CHAN_NUM;
516 	gdmadev->used_num = 0;
517 	gdmadev->base_addr = (uintptr_t)pci_dev->mem_resource[0].addr + ZXDH_GDMA_BASE_OFFSET;
518 
519 	for (i = 0; i < ZXDH_GDMA_TOTAL_CHAN_NUM; i++) {
520 		queue = &(gdmadev->vqs[i]);
521 		queue->enable = 0;
522 		queue->queue_size = ZXDH_GDMA_QUEUE_SIZE;
523 		rte_spinlock_init(&(queue->enqueue_lock));
524 	}
525 
526 	return 0;
527 
528 err_out:
529 	zxdh_gdma_unmap_resource(pci_dev->mem_resource[0].addr,
530 			(size_t)pci_dev->mem_resource[0].len);
531 	return -1;
532 }
533 
534 static int
535 zxdh_gdma_rawdev_remove(struct rte_pci_device *pci_dev)
536 {
537 	struct rte_rawdev *dev = NULL;
538 	int ret = 0;
539 
540 	dev = rte_rawdev_pmd_get_named_dev(dev_name);
541 	if (dev == NULL)
542 		return -EINVAL;
543 
544 	/* rte_rawdev_close is called by pmd_release */
545 	ret = rte_rawdev_pmd_release(dev);
546 	if (ret != 0) {
547 		ZXDH_PMD_LOG(ERR, "Device cleanup failed");
548 		return -1;
549 	}
550 
551 	zxdh_gdma_unmap_resource(pci_dev->mem_resource[0].addr,
552 			(size_t)pci_dev->mem_resource[0].len);
553 
554 	ZXDH_PMD_LOG(DEBUG, "rawdev %s remove done!", dev_name);
555 
556 	return ret;
557 }
558 
559 static const struct rte_pci_id zxdh_gdma_rawdev_map[] = {
560 	{ RTE_PCI_DEVICE(ZXDH_GDMA_VENDORID, ZXDH_GDMA_DEVICEID) },
561 	{ .vendor_id = 0, /* sentinel */ },
562 };
563 
564 static struct rte_pci_driver zxdh_gdma_rawdev_pmd = {
565 	.id_table = zxdh_gdma_rawdev_map,
566 	.drv_flags = 0,
567 	.probe = zxdh_gdma_rawdev_probe,
568 	.remove = zxdh_gdma_rawdev_remove,
569 };
570 
571 RTE_PMD_REGISTER_PCI(zxdh_gdma_rawdev_pci_driver, zxdh_gdma_rawdev_pmd);
572 RTE_PMD_REGISTER_PCI_TABLE(zxdh_gdma_rawdev_pci_driver, zxdh_gdma_rawdev_map);
573 RTE_LOG_REGISTER_DEFAULT(zxdh_gdma_rawdev_logtype, NOTICE);
574