xref: /dpdk/drivers/compress/qat/qat_comp_pmd.c (revision 5ecb687a5698d2d8ec1f3b3b5a7a16bceca3e29c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2019 Intel Corporation
3  */
4 
5 #include <rte_malloc.h>
6 
7 #include "qat_comp.h"
8 #include "qat_comp_pmd.h"
9 
10 #define QAT_PMD_COMP_SGL_DEF_SEGMENTS 16
11 
12 static const struct rte_compressdev_capabilities qat_comp_gen_capabilities[] = {
13 	{/* COMPRESSION - deflate */
14 	 .algo = RTE_COMP_ALGO_DEFLATE,
15 	 .comp_feature_flags = RTE_COMP_FF_MULTI_PKT_CHECKSUM |
16 				RTE_COMP_FF_CRC32_CHECKSUM |
17 				RTE_COMP_FF_ADLER32_CHECKSUM |
18 				RTE_COMP_FF_CRC32_ADLER32_CHECKSUM |
19 				RTE_COMP_FF_SHAREABLE_PRIV_XFORM |
20 				RTE_COMP_FF_HUFFMAN_FIXED |
21 				RTE_COMP_FF_HUFFMAN_DYNAMIC |
22 				RTE_COMP_FF_OOP_SGL_IN_SGL_OUT |
23 				RTE_COMP_FF_OOP_SGL_IN_LB_OUT |
24 				RTE_COMP_FF_OOP_LB_IN_SGL_OUT,
25 	 .window_size = {.min = 15, .max = 15, .increment = 0} },
26 	{RTE_COMP_ALGO_LIST_END, 0, {0, 0, 0} } };
27 
28 static void
29 qat_comp_stats_get(struct rte_compressdev *dev,
30 		struct rte_compressdev_stats *stats)
31 {
32 	struct qat_common_stats qat_stats = {0};
33 	struct qat_comp_dev_private *qat_priv;
34 
35 	if (stats == NULL || dev == NULL) {
36 		QAT_LOG(ERR, "invalid ptr: stats %p, dev %p", stats, dev);
37 		return;
38 	}
39 	qat_priv = dev->data->dev_private;
40 
41 	qat_stats_get(qat_priv->qat_dev, &qat_stats, QAT_SERVICE_COMPRESSION);
42 	stats->enqueued_count = qat_stats.enqueued_count;
43 	stats->dequeued_count = qat_stats.dequeued_count;
44 	stats->enqueue_err_count = qat_stats.enqueue_err_count;
45 	stats->dequeue_err_count = qat_stats.dequeue_err_count;
46 }
47 
48 static void
49 qat_comp_stats_reset(struct rte_compressdev *dev)
50 {
51 	struct qat_comp_dev_private *qat_priv;
52 
53 	if (dev == NULL) {
54 		QAT_LOG(ERR, "invalid compressdev ptr %p", dev);
55 		return;
56 	}
57 	qat_priv = dev->data->dev_private;
58 
59 	qat_stats_reset(qat_priv->qat_dev, QAT_SERVICE_COMPRESSION);
60 
61 }
62 
63 static int
64 qat_comp_qp_release(struct rte_compressdev *dev, uint16_t queue_pair_id)
65 {
66 	struct qat_comp_dev_private *qat_private = dev->data->dev_private;
67 	struct qat_qp **qp_addr =
68 		(struct qat_qp **)&(dev->data->queue_pairs[queue_pair_id]);
69 	struct qat_qp *qp = (struct qat_qp *)*qp_addr;
70 	uint32_t i;
71 
72 	QAT_LOG(DEBUG, "Release comp qp %u on device %d",
73 				queue_pair_id, dev->data->dev_id);
74 
75 	qat_private->qat_dev->qps_in_use[QAT_SERVICE_COMPRESSION][queue_pair_id]
76 						= NULL;
77 
78 	for (i = 0; i < qp->nb_descriptors; i++) {
79 
80 		struct qat_comp_op_cookie *cookie = qp->op_cookies[i];
81 
82 		rte_free(cookie->qat_sgl_src_d);
83 		rte_free(cookie->qat_sgl_dst_d);
84 	}
85 
86 	return qat_qp_release((struct qat_qp **)
87 			&(dev->data->queue_pairs[queue_pair_id]));
88 }
89 
90 static int
91 qat_comp_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
92 		  uint32_t max_inflight_ops, int socket_id)
93 {
94 	struct qat_qp *qp;
95 	int ret = 0;
96 	uint32_t i;
97 	struct qat_qp_config qat_qp_conf;
98 
99 	struct qat_qp **qp_addr =
100 			(struct qat_qp **)&(dev->data->queue_pairs[qp_id]);
101 	struct qat_comp_dev_private *qat_private = dev->data->dev_private;
102 	const struct qat_qp_hw_data *comp_hw_qps =
103 			qat_gen_config[qat_private->qat_dev->qat_dev_gen]
104 				      .qp_hw_data[QAT_SERVICE_COMPRESSION];
105 	const struct qat_qp_hw_data *qp_hw_data = comp_hw_qps + qp_id;
106 
107 	/* If qp is already in use free ring memory and qp metadata. */
108 	if (*qp_addr != NULL) {
109 		ret = qat_comp_qp_release(dev, qp_id);
110 		if (ret < 0)
111 			return ret;
112 	}
113 	if (qp_id >= qat_qps_per_service(comp_hw_qps,
114 					 QAT_SERVICE_COMPRESSION)) {
115 		QAT_LOG(ERR, "qp_id %u invalid for this device", qp_id);
116 		return -EINVAL;
117 	}
118 
119 	qat_qp_conf.hw = qp_hw_data;
120 	qat_qp_conf.build_request = qat_comp_build_request;
121 	qat_qp_conf.cookie_size = sizeof(struct qat_comp_op_cookie);
122 	qat_qp_conf.nb_descriptors = max_inflight_ops;
123 	qat_qp_conf.socket_id = socket_id;
124 	qat_qp_conf.service_str = "comp";
125 
126 	ret = qat_qp_setup(qat_private->qat_dev, qp_addr, qp_id, &qat_qp_conf);
127 	if (ret != 0)
128 		return ret;
129 
130 	/* store a link to the qp in the qat_pci_device */
131 	qat_private->qat_dev->qps_in_use[QAT_SERVICE_COMPRESSION][qp_id]
132 								= *qp_addr;
133 
134 	qp = (struct qat_qp *)*qp_addr;
135 
136 	for (i = 0; i < qp->nb_descriptors; i++) {
137 
138 		struct qat_comp_op_cookie *cookie =
139 				qp->op_cookies[i];
140 
141 		cookie->qat_sgl_src_d = rte_zmalloc_socket(NULL,
142 					sizeof(struct qat_sgl) +
143 					sizeof(struct qat_flat_buf) *
144 					QAT_PMD_COMP_SGL_DEF_SEGMENTS,
145 					64, dev->data->socket_id);
146 
147 		cookie->qat_sgl_dst_d = rte_zmalloc_socket(NULL,
148 					sizeof(struct qat_sgl) +
149 					sizeof(struct qat_flat_buf) *
150 					QAT_PMD_COMP_SGL_DEF_SEGMENTS,
151 					64, dev->data->socket_id);
152 
153 		if (cookie->qat_sgl_src_d == NULL ||
154 				cookie->qat_sgl_dst_d == NULL) {
155 			QAT_LOG(ERR, "Can't allocate SGL"
156 				     " for device %s",
157 				     qat_private->qat_dev->name);
158 			return -ENOMEM;
159 		}
160 
161 		cookie->qat_sgl_src_phys_addr =
162 				rte_malloc_virt2iova(cookie->qat_sgl_src_d);
163 
164 		cookie->qat_sgl_dst_phys_addr =
165 				rte_malloc_virt2iova(cookie->qat_sgl_dst_d);
166 
167 		cookie->dst_nb_elems = cookie->src_nb_elems =
168 				QAT_PMD_COMP_SGL_DEF_SEGMENTS;
169 
170 		cookie->socket_id = dev->data->socket_id;
171 	}
172 
173 	return ret;
174 }
175 
176 
177 #define QAT_IM_BUFFER_DEBUG 0
178 static const struct rte_memzone *
179 qat_comp_setup_inter_buffers(struct qat_comp_dev_private *comp_dev,
180 			      uint32_t buff_size)
181 {
182 	char inter_buff_mz_name[RTE_MEMZONE_NAMESIZE];
183 	const struct rte_memzone *memzone;
184 	uint8_t *mz_start = NULL;
185 	rte_iova_t mz_start_phys = 0;
186 	struct array_of_ptrs *array_of_pointers;
187 	int size_of_ptr_array;
188 	uint32_t full_size;
189 	uint32_t offset_of_sgls, offset_of_flat_buffs = 0;
190 	int i;
191 	int num_im_sgls = qat_gen_config[
192 		comp_dev->qat_dev->qat_dev_gen].comp_num_im_bufs_required;
193 
194 	QAT_LOG(DEBUG, "QAT COMP device %s needs %d sgls",
195 				comp_dev->qat_dev->name, num_im_sgls);
196 	snprintf(inter_buff_mz_name, RTE_MEMZONE_NAMESIZE,
197 				"%s_inter_buff", comp_dev->qat_dev->name);
198 	memzone = rte_memzone_lookup(inter_buff_mz_name);
199 	if (memzone != NULL) {
200 		QAT_LOG(DEBUG, "QAT COMP im buffer memzone created already");
201 		return memzone;
202 	}
203 
204 	/* Create a memzone to hold intermediate buffers and associated
205 	 * meta-data needed by the firmware. The memzone contains 3 parts:
206 	 *  - a list of num_im_sgls physical pointers to sgls
207 	 *  - the num_im_sgl sgl structures, each pointing to
208 	 *    QAT_NUM_BUFS_IN_IM_SGL flat buffers
209 	 *  - the flat buffers: num_im_sgl * QAT_NUM_BUFS_IN_IM_SGL
210 	 *    buffers, each of buff_size
211 	 * num_im_sgls depends on the hardware generation of the device
212 	 * buff_size comes from the user via the config file
213 	 */
214 
215 	size_of_ptr_array = num_im_sgls * sizeof(phys_addr_t);
216 	offset_of_sgls = (size_of_ptr_array + (~QAT_64_BYTE_ALIGN_MASK))
217 			& QAT_64_BYTE_ALIGN_MASK;
218 	offset_of_flat_buffs =
219 	    offset_of_sgls + num_im_sgls * sizeof(struct qat_inter_sgl);
220 	full_size = offset_of_flat_buffs +
221 			num_im_sgls * buff_size * QAT_NUM_BUFS_IN_IM_SGL;
222 
223 	memzone = rte_memzone_reserve_aligned(inter_buff_mz_name, full_size,
224 			comp_dev->compressdev->data->socket_id,
225 			RTE_MEMZONE_IOVA_CONTIG, QAT_64_BYTE_ALIGN);
226 	if (memzone == NULL) {
227 		QAT_LOG(ERR, "Can't allocate intermediate buffers"
228 				" for device %s", comp_dev->qat_dev->name);
229 		return NULL;
230 	}
231 
232 	mz_start = (uint8_t *)memzone->addr;
233 	mz_start_phys = memzone->phys_addr;
234 	QAT_LOG(DEBUG, "Memzone %s: addr = %p, phys = 0x%"PRIx64
235 			", size required %d, size created %zu",
236 			inter_buff_mz_name, mz_start, mz_start_phys,
237 			full_size, memzone->len);
238 
239 	array_of_pointers = (struct array_of_ptrs *)mz_start;
240 	for (i = 0; i < num_im_sgls; i++) {
241 		uint32_t curr_sgl_offset =
242 		    offset_of_sgls + i * sizeof(struct qat_inter_sgl);
243 		struct qat_inter_sgl *sgl =
244 		    (struct qat_inter_sgl *)(mz_start +	curr_sgl_offset);
245 		int lb;
246 		array_of_pointers->pointer[i] = mz_start_phys + curr_sgl_offset;
247 
248 		sgl->num_bufs = QAT_NUM_BUFS_IN_IM_SGL;
249 		sgl->num_mapped_bufs = 0;
250 		sgl->resrvd = 0;
251 
252 #if QAT_IM_BUFFER_DEBUG
253 		QAT_LOG(DEBUG, "  : phys addr of sgl[%i] in array_of_pointers"
254 			" = 0x%"PRIx64, i, array_of_pointers->pointer[i]);
255 		QAT_LOG(DEBUG, "  : virt address of sgl[%i] = %p", i, sgl);
256 #endif
257 		for (lb = 0; lb < QAT_NUM_BUFS_IN_IM_SGL; lb++) {
258 			sgl->buffers[lb].addr =
259 			  mz_start_phys + offset_of_flat_buffs +
260 			  (((i * QAT_NUM_BUFS_IN_IM_SGL) + lb) * buff_size);
261 			sgl->buffers[lb].len = buff_size;
262 			sgl->buffers[lb].resrvd = 0;
263 #if QAT_IM_BUFFER_DEBUG
264 			QAT_LOG(DEBUG,
265 			  "  : sgl->buffers[%d].addr = 0x%"PRIx64", len=%d",
266 			  lb, sgl->buffers[lb].addr, sgl->buffers[lb].len);
267 #endif
268 		}
269 	}
270 #if QAT_IM_BUFFER_DEBUG
271 	QAT_DP_HEXDUMP_LOG(DEBUG,  "IM buffer memzone start:",
272 			mz_start, offset_of_flat_buffs + 32);
273 #endif
274 	return memzone;
275 }
276 
277 static struct rte_mempool *
278 qat_comp_create_xform_pool(struct qat_comp_dev_private *comp_dev,
279 			   struct rte_compressdev_config *config,
280 			   uint32_t num_elements)
281 {
282 	char xform_pool_name[RTE_MEMPOOL_NAMESIZE];
283 	struct rte_mempool *mp;
284 
285 	snprintf(xform_pool_name, RTE_MEMPOOL_NAMESIZE,
286 			"%s_xforms", comp_dev->qat_dev->name);
287 
288 	QAT_LOG(DEBUG, "xformpool: %s", xform_pool_name);
289 	mp = rte_mempool_lookup(xform_pool_name);
290 
291 	if (mp != NULL) {
292 		QAT_LOG(DEBUG, "xformpool already created");
293 		if (mp->size != num_elements) {
294 			QAT_LOG(DEBUG, "xformpool wrong size - delete it");
295 			rte_mempool_free(mp);
296 			mp = NULL;
297 			comp_dev->xformpool = NULL;
298 		}
299 	}
300 
301 	if (mp == NULL)
302 		mp = rte_mempool_create(xform_pool_name,
303 				num_elements,
304 				qat_comp_xform_size(), 0, 0,
305 				NULL, NULL, NULL, NULL, config->socket_id,
306 				0);
307 	if (mp == NULL) {
308 		QAT_LOG(ERR, "Err creating mempool %s w %d elements of size %d",
309 			xform_pool_name, num_elements, qat_comp_xform_size());
310 		return NULL;
311 	}
312 
313 	return mp;
314 }
315 
316 static void
317 _qat_comp_dev_config_clear(struct qat_comp_dev_private *comp_dev)
318 {
319 	/* Free intermediate buffers */
320 	if (comp_dev->interm_buff_mz) {
321 		rte_memzone_free(comp_dev->interm_buff_mz);
322 		comp_dev->interm_buff_mz = NULL;
323 	}
324 
325 	/* Free private_xform pool */
326 	if (comp_dev->xformpool) {
327 		/* Free internal mempool for private xforms */
328 		rte_mempool_free(comp_dev->xformpool);
329 		comp_dev->xformpool = NULL;
330 	}
331 }
332 
333 static int
334 qat_comp_dev_config(struct rte_compressdev *dev,
335 		struct rte_compressdev_config *config)
336 {
337 	struct qat_comp_dev_private *comp_dev = dev->data->dev_private;
338 	int ret = 0;
339 
340 	if (config->max_nb_streams != 0) {
341 		QAT_LOG(ERR,
342 	"QAT device does not support STATEFUL so max_nb_streams must be 0");
343 		return -EINVAL;
344 	}
345 
346 	if (RTE_PMD_QAT_COMP_IM_BUFFER_SIZE == 0) {
347 		QAT_LOG(WARNING,
348 			"RTE_PMD_QAT_COMP_IM_BUFFER_SIZE = 0 in config file, so"
349 			" QAT device can't be used for Dynamic Deflate. "
350 			"Did you really intend to do this?");
351 	} else {
352 		comp_dev->interm_buff_mz =
353 				qat_comp_setup_inter_buffers(comp_dev,
354 					RTE_PMD_QAT_COMP_IM_BUFFER_SIZE);
355 		if (comp_dev->interm_buff_mz == NULL) {
356 			ret = -ENOMEM;
357 			goto error_out;
358 		}
359 	}
360 
361 	comp_dev->xformpool = qat_comp_create_xform_pool(comp_dev, config,
362 					config->max_nb_priv_xforms);
363 	if (comp_dev->xformpool == NULL) {
364 
365 		ret = -ENOMEM;
366 		goto error_out;
367 	}
368 	return 0;
369 
370 error_out:
371 	_qat_comp_dev_config_clear(comp_dev);
372 	return ret;
373 }
374 
375 static int
376 qat_comp_dev_start(struct rte_compressdev *dev __rte_unused)
377 {
378 	return 0;
379 }
380 
381 static void
382 qat_comp_dev_stop(struct rte_compressdev *dev __rte_unused)
383 {
384 
385 }
386 
387 static int
388 qat_comp_dev_close(struct rte_compressdev *dev)
389 {
390 	int i;
391 	int ret = 0;
392 	struct qat_comp_dev_private *comp_dev = dev->data->dev_private;
393 
394 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
395 		ret = qat_comp_qp_release(dev, i);
396 		if (ret < 0)
397 			return ret;
398 	}
399 
400 	_qat_comp_dev_config_clear(comp_dev);
401 
402 	return ret;
403 }
404 
405 
406 static void
407 qat_comp_dev_info_get(struct rte_compressdev *dev,
408 			struct rte_compressdev_info *info)
409 {
410 	struct qat_comp_dev_private *comp_dev = dev->data->dev_private;
411 	const struct qat_qp_hw_data *comp_hw_qps =
412 		qat_gen_config[comp_dev->qat_dev->qat_dev_gen]
413 			      .qp_hw_data[QAT_SERVICE_COMPRESSION];
414 
415 	if (info != NULL) {
416 		info->max_nb_queue_pairs =
417 			qat_qps_per_service(comp_hw_qps,
418 					    QAT_SERVICE_COMPRESSION);
419 		info->feature_flags = dev->feature_flags;
420 		info->capabilities = comp_dev->qat_dev_capabilities;
421 	}
422 }
423 
424 static uint16_t
425 qat_comp_pmd_enqueue_op_burst(void *qp, struct rte_comp_op **ops,
426 		uint16_t nb_ops)
427 {
428 	return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
429 }
430 
431 static uint16_t
432 qat_comp_pmd_dequeue_op_burst(void *qp, struct rte_comp_op **ops,
433 			      uint16_t nb_ops)
434 {
435 	return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
436 }
437 
438 static uint16_t
439 qat_comp_pmd_enq_deq_dummy_op_burst(void *qp __rte_unused,
440 				    struct rte_comp_op **ops __rte_unused,
441 				    uint16_t nb_ops __rte_unused)
442 {
443 	QAT_DP_LOG(ERR, "QAT PMD detected wrong FW version !");
444 	return 0;
445 }
446 
447 static struct rte_compressdev_ops compress_qat_dummy_ops = {
448 
449 	/* Device related operations */
450 	.dev_configure		= NULL,
451 	.dev_start		= NULL,
452 	.dev_stop		= qat_comp_dev_stop,
453 	.dev_close		= qat_comp_dev_close,
454 	.dev_infos_get		= NULL,
455 
456 	.stats_get		= NULL,
457 	.stats_reset		= qat_comp_stats_reset,
458 	.queue_pair_setup	= NULL,
459 	.queue_pair_release	= qat_comp_qp_release,
460 
461 	/* Compression related operations */
462 	.private_xform_create	= NULL,
463 	.private_xform_free	= qat_comp_private_xform_free
464 };
465 
466 static uint16_t
467 qat_comp_pmd_dequeue_frst_op_burst(void *qp, struct rte_comp_op **ops,
468 				   uint16_t nb_ops)
469 {
470 	uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
471 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
472 
473 	if (ret) {
474 		if ((*ops)->debug_status ==
475 				(uint64_t)ERR_CODE_QAT_COMP_WRONG_FW) {
476 			tmp_qp->qat_dev->comp_dev->compressdev->enqueue_burst =
477 					qat_comp_pmd_enq_deq_dummy_op_burst;
478 			tmp_qp->qat_dev->comp_dev->compressdev->dequeue_burst =
479 					qat_comp_pmd_enq_deq_dummy_op_burst;
480 
481 			tmp_qp->qat_dev->comp_dev->compressdev->dev_ops =
482 					&compress_qat_dummy_ops;
483 			QAT_LOG(ERR, "QAT PMD detected wrong FW version !");
484 
485 		} else {
486 			tmp_qp->qat_dev->comp_dev->compressdev->dequeue_burst =
487 					qat_comp_pmd_dequeue_op_burst;
488 		}
489 	}
490 	return ret;
491 }
492 
493 static struct rte_compressdev_ops compress_qat_ops = {
494 
495 	/* Device related operations */
496 	.dev_configure		= qat_comp_dev_config,
497 	.dev_start		= qat_comp_dev_start,
498 	.dev_stop		= qat_comp_dev_stop,
499 	.dev_close		= qat_comp_dev_close,
500 	.dev_infos_get		= qat_comp_dev_info_get,
501 
502 	.stats_get		= qat_comp_stats_get,
503 	.stats_reset		= qat_comp_stats_reset,
504 	.queue_pair_setup	= qat_comp_qp_setup,
505 	.queue_pair_release	= qat_comp_qp_release,
506 
507 	/* Compression related operations */
508 	.private_xform_create	= qat_comp_private_xform_create,
509 	.private_xform_free	= qat_comp_private_xform_free
510 };
511 
512 /* An rte_driver is needed in the registration of the device with compressdev.
513  * The actual qat pci's rte_driver can't be used as its name represents
514  * the whole pci device with all services. Think of this as a holder for a name
515  * for the compression part of the pci device.
516  */
517 static const char qat_comp_drv_name[] = RTE_STR(COMPRESSDEV_NAME_QAT_PMD);
518 static const struct rte_driver compdev_qat_driver = {
519 	.name = qat_comp_drv_name,
520 	.alias = qat_comp_drv_name
521 };
522 int
523 qat_comp_dev_create(struct qat_pci_device *qat_pci_dev)
524 {
525 	if (qat_pci_dev->qat_dev_gen == QAT_GEN3) {
526 		QAT_LOG(ERR, "Compression PMD not supported on QAT c4xxx");
527 		return 0;
528 	}
529 
530 	struct rte_compressdev_pmd_init_params init_params = {
531 		.name = "",
532 		.socket_id = qat_pci_dev->pci_dev->device.numa_node,
533 	};
534 	char name[RTE_COMPRESSDEV_NAME_MAX_LEN];
535 	struct rte_compressdev *compressdev;
536 	struct qat_comp_dev_private *comp_dev;
537 
538 	snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN, "%s_%s",
539 			qat_pci_dev->name, "comp");
540 	QAT_LOG(DEBUG, "Creating QAT COMP device %s", name);
541 
542 	/* Populate subset device to use in compressdev device creation */
543 	qat_pci_dev->comp_rte_dev.driver = &compdev_qat_driver;
544 	qat_pci_dev->comp_rte_dev.numa_node =
545 					qat_pci_dev->pci_dev->device.numa_node;
546 	qat_pci_dev->comp_rte_dev.devargs = NULL;
547 
548 	compressdev = rte_compressdev_pmd_create(name,
549 			&(qat_pci_dev->comp_rte_dev),
550 			sizeof(struct qat_comp_dev_private),
551 			&init_params);
552 
553 	if (compressdev == NULL)
554 		return -ENODEV;
555 
556 	compressdev->dev_ops = &compress_qat_ops;
557 
558 	compressdev->enqueue_burst = qat_comp_pmd_enqueue_op_burst;
559 	compressdev->dequeue_burst = qat_comp_pmd_dequeue_frst_op_burst;
560 
561 	compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
562 
563 	comp_dev = compressdev->data->dev_private;
564 	comp_dev->qat_dev = qat_pci_dev;
565 	comp_dev->compressdev = compressdev;
566 	qat_pci_dev->comp_dev = comp_dev;
567 
568 	switch (qat_pci_dev->qat_dev_gen) {
569 	case QAT_GEN1:
570 	case QAT_GEN2:
571 	case QAT_GEN3:
572 		comp_dev->qat_dev_capabilities = qat_comp_gen_capabilities;
573 		break;
574 	default:
575 		comp_dev->qat_dev_capabilities = qat_comp_gen_capabilities;
576 		QAT_LOG(DEBUG,
577 			"QAT gen %d capabilities unknown, default to GEN1",
578 					qat_pci_dev->qat_dev_gen);
579 		break;
580 	}
581 
582 	QAT_LOG(DEBUG,
583 		    "Created QAT COMP device %s as compressdev instance %d",
584 			name, compressdev->data->dev_id);
585 	return 0;
586 }
587 
588 int
589 qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev)
590 {
591 	struct qat_comp_dev_private *comp_dev;
592 
593 	if (qat_pci_dev == NULL)
594 		return -ENODEV;
595 
596 	comp_dev = qat_pci_dev->comp_dev;
597 	if (comp_dev == NULL)
598 		return 0;
599 
600 	/* clean up any resources used by the device */
601 	qat_comp_dev_close(comp_dev->compressdev);
602 
603 	rte_compressdev_pmd_destroy(comp_dev->compressdev);
604 	qat_pci_dev->comp_dev = NULL;
605 
606 	return 0;
607 }
608