1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2022 Intel Corporation
3 */
4
5 #include <rte_malloc.h>
6
7 #include "qat_comp.h"
8 #include "qat_comp_pmd.h"
9
10 #define QAT_PMD_COMP_SGL_DEF_SEGMENTS 16
11
12 #define COMP_ENQ_THRESHOLD_NAME "qat_comp_enq_threshold"
13
14 static const char *const arguments[] = {
15 COMP_ENQ_THRESHOLD_NAME,
16 NULL
17 };
18
19 struct qat_comp_gen_dev_ops qat_comp_gen_dev_ops[QAT_N_GENS];
20
21 struct stream_create_info {
22 struct qat_comp_dev_private *comp_dev;
23 int socket_id;
24 int error;
25 };
26
27 static struct
qat_comp_get_capa_info(enum qat_device_gen qat_dev_gen,struct qat_pci_device * qat_dev)28 qat_comp_capabilities_info qat_comp_get_capa_info(
29 enum qat_device_gen qat_dev_gen, struct qat_pci_device *qat_dev)
30 {
31 struct qat_comp_capabilities_info ret = { .data = NULL, .size = 0 };
32
33 if (qat_dev_gen >= QAT_N_GENS)
34 return ret;
35 if (qat_comp_gen_dev_ops[qat_dev_gen].qat_comp_get_capabilities == NULL)
36 return ret;
37 return qat_comp_gen_dev_ops[qat_dev_gen]
38 .qat_comp_get_capabilities(qat_dev);
39 }
40
41 void
qat_comp_stats_get(struct rte_compressdev * dev,struct rte_compressdev_stats * stats)42 qat_comp_stats_get(struct rte_compressdev *dev,
43 struct rte_compressdev_stats *stats)
44 {
45 struct qat_common_stats qat_stats = {0};
46 struct qat_comp_dev_private *qat_priv;
47
48 if (stats == NULL || dev == NULL) {
49 QAT_LOG(ERR, "invalid ptr: stats %p, dev %p", stats, dev);
50 return;
51 }
52 qat_priv = dev->data->dev_private;
53
54 qat_stats_get(qat_priv->qat_dev, &qat_stats, QAT_SERVICE_COMPRESSION);
55 stats->enqueued_count = qat_stats.enqueued_count;
56 stats->dequeued_count = qat_stats.dequeued_count;
57 stats->enqueue_err_count = qat_stats.enqueue_err_count;
58 stats->dequeue_err_count = qat_stats.dequeue_err_count;
59 }
60
61 void
qat_comp_stats_reset(struct rte_compressdev * dev)62 qat_comp_stats_reset(struct rte_compressdev *dev)
63 {
64 struct qat_comp_dev_private *qat_priv;
65
66 if (dev == NULL) {
67 QAT_LOG(ERR, "invalid compressdev ptr %p", dev);
68 return;
69 }
70 qat_priv = dev->data->dev_private;
71
72 qat_stats_reset(qat_priv->qat_dev, QAT_SERVICE_COMPRESSION);
73
74 }
75
76 int
qat_comp_qp_release(struct rte_compressdev * dev,uint16_t queue_pair_id)77 qat_comp_qp_release(struct rte_compressdev *dev, uint16_t queue_pair_id)
78 {
79 struct qat_comp_dev_private *qat_private = dev->data->dev_private;
80 struct qat_qp **qp_addr =
81 (struct qat_qp **)&(dev->data->queue_pairs[queue_pair_id]);
82 struct qat_qp *qp = (struct qat_qp *)*qp_addr;
83 enum qat_device_gen qat_dev_gen = qat_private->qat_dev->qat_dev_gen;
84 uint32_t i;
85
86 QAT_LOG(DEBUG, "Release comp qp %u on device %d",
87 queue_pair_id, dev->data->dev_id);
88
89 qat_private->qat_dev->qps_in_use[QAT_SERVICE_COMPRESSION][queue_pair_id]
90 = NULL;
91
92 if (qp != NULL)
93 for (i = 0; i < qp->nb_descriptors; i++) {
94 struct qat_comp_op_cookie *cookie = qp->op_cookies[i];
95
96 rte_free(cookie->qat_sgl_src_d);
97 rte_free(cookie->qat_sgl_dst_d);
98 }
99
100 return qat_qp_release(qat_dev_gen, (struct qat_qp **)
101 &(dev->data->queue_pairs[queue_pair_id]));
102 }
103
104 int
qat_comp_qp_setup(struct rte_compressdev * dev,uint16_t qp_id,uint32_t max_inflight_ops,int socket_id)105 qat_comp_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
106 uint32_t max_inflight_ops, int socket_id)
107 {
108 struct qat_qp_config qat_qp_conf = {0};
109 struct qat_qp **qp_addr =
110 (struct qat_qp **)&(dev->data->queue_pairs[qp_id]);
111 struct qat_comp_dev_private *qat_private = dev->data->dev_private;
112 struct qat_pci_device *qat_dev = qat_private->qat_dev;
113 struct qat_qp *qp;
114 uint32_t i;
115 int ret;
116
117 /* If qp is already in use free ring memory and qp metadata. */
118 if (*qp_addr != NULL) {
119 ret = qat_comp_qp_release(dev, qp_id);
120 if (ret < 0)
121 return ret;
122 }
123 if (qp_id >= qat_qps_per_service(qat_dev,
124 QAT_SERVICE_COMPRESSION)) {
125 QAT_LOG(ERR, "qp_id %u invalid for this device", qp_id);
126 return -EINVAL;
127 }
128
129
130 qat_qp_conf.hw = qat_qp_get_hw_data(qat_dev, QAT_SERVICE_COMPRESSION,
131 qp_id);
132 if (qat_qp_conf.hw == NULL) {
133 QAT_LOG(ERR, "qp_id %u invalid for this device", qp_id);
134 return -EINVAL;
135 }
136 qat_qp_conf.cookie_size = sizeof(struct qat_comp_op_cookie);
137 qat_qp_conf.nb_descriptors = max_inflight_ops;
138 qat_qp_conf.socket_id = socket_id;
139 qat_qp_conf.service_str = "comp";
140
141 ret = qat_qp_setup(qat_private->qat_dev, qp_addr, qp_id, &qat_qp_conf);
142 if (ret != 0)
143 return ret;
144 /* store a link to the qp in the qat_pci_device */
145 qat_private->qat_dev->qps_in_use[QAT_SERVICE_COMPRESSION][qp_id]
146 = *qp_addr;
147
148 qp = (struct qat_qp *)*qp_addr;
149 qp->min_enq_burst_threshold = qat_private->min_enq_burst_threshold;
150
151 for (i = 0; i < qp->nb_descriptors; i++) {
152
153 struct qat_comp_op_cookie *cookie =
154 qp->op_cookies[i];
155
156 cookie->qp = qp;
157 cookie->cookie_index = i;
158
159 cookie->qat_sgl_src_d = rte_zmalloc_socket(NULL,
160 sizeof(struct qat_sgl) +
161 sizeof(struct qat_flat_buf) *
162 QAT_PMD_COMP_SGL_DEF_SEGMENTS,
163 64, dev->data->socket_id);
164
165 cookie->qat_sgl_dst_d = rte_zmalloc_socket(NULL,
166 sizeof(struct qat_sgl) +
167 sizeof(struct qat_flat_buf) *
168 QAT_PMD_COMP_SGL_DEF_SEGMENTS,
169 64, dev->data->socket_id);
170
171 if (cookie->qat_sgl_src_d == NULL ||
172 cookie->qat_sgl_dst_d == NULL) {
173 QAT_LOG(ERR, "Can't allocate SGL"
174 " for device %s",
175 qat_private->qat_dev->name);
176 return -ENOMEM;
177 }
178
179 cookie->qat_sgl_src_phys_addr =
180 rte_malloc_virt2iova(cookie->qat_sgl_src_d);
181
182 cookie->qat_sgl_dst_phys_addr =
183 rte_malloc_virt2iova(cookie->qat_sgl_dst_d);
184
185 cookie->dst_nb_elems = cookie->src_nb_elems =
186 QAT_PMD_COMP_SGL_DEF_SEGMENTS;
187
188 cookie->socket_id = dev->data->socket_id;
189
190 cookie->error = 0;
191 }
192
193 return ret;
194 }
195
196
197 #define QAT_IM_BUFFER_DEBUG 0
198 const struct rte_memzone *
qat_comp_setup_inter_buffers(struct qat_comp_dev_private * comp_dev,uint32_t buff_size)199 qat_comp_setup_inter_buffers(struct qat_comp_dev_private *comp_dev,
200 uint32_t buff_size)
201 {
202 char inter_buff_mz_name[RTE_MEMZONE_NAMESIZE];
203 const struct rte_memzone *memzone;
204 uint8_t *mz_start = NULL;
205 rte_iova_t mz_start_phys = 0;
206 struct array_of_ptrs *array_of_pointers;
207 int size_of_ptr_array;
208 uint32_t full_size;
209 uint32_t offset_of_flat_buffs;
210 int i;
211 int num_im_sgls = qat_comp_get_num_im_bufs_required(
212 comp_dev->qat_dev->qat_dev_gen);
213
214 QAT_LOG(DEBUG, "QAT COMP device %s needs %d sgls",
215 comp_dev->qat_dev->name, num_im_sgls);
216 snprintf(inter_buff_mz_name, RTE_MEMZONE_NAMESIZE,
217 "%s_inter_buff", comp_dev->qat_dev->name);
218 memzone = rte_memzone_lookup(inter_buff_mz_name);
219 if (memzone != NULL) {
220 QAT_LOG(DEBUG, "QAT COMP im buffer memzone created already");
221 return memzone;
222 }
223
224 /* Create multiple memzones to hold intermediate buffers and associated
225 * meta-data needed by the firmware.
226 * The first memzone contains:
227 * - a list of num_im_sgls physical pointers to sgls
228 * All other memzones contain:
229 * - the sgl structure, pointing to QAT_NUM_BUFS_IN_IM_SGL flat buffers
230 * - the flat buffers: QAT_NUM_BUFS_IN_IM_SGL buffers,
231 * each of buff_size
232 * num_im_sgls depends on the hardware generation of the device
233 * buff_size comes from the user via the config file
234 */
235
236 size_of_ptr_array = num_im_sgls * sizeof(phys_addr_t);
237 offset_of_flat_buffs = sizeof(struct qat_inter_sgl);
238 full_size = offset_of_flat_buffs +
239 buff_size * QAT_NUM_BUFS_IN_IM_SGL;
240
241 memzone = rte_memzone_reserve_aligned(inter_buff_mz_name,
242 size_of_ptr_array,
243 comp_dev->compressdev->data->socket_id,
244 RTE_MEMZONE_IOVA_CONTIG, QAT_64_BYTE_ALIGN);
245 if (memzone == NULL) {
246 QAT_LOG(ERR,
247 "Can't allocate intermediate buffers for device %s",
248 comp_dev->qat_dev->name);
249 return NULL;
250 }
251
252 mz_start = (uint8_t *)memzone->addr;
253 mz_start_phys = memzone->iova;
254 QAT_LOG(DEBUG, "Memzone %s: addr = %p, phys = 0x%"PRIx64
255 ", size required %d, size created %zu",
256 inter_buff_mz_name, mz_start, mz_start_phys,
257 size_of_ptr_array, memzone->len);
258
259 array_of_pointers = (struct array_of_ptrs *)mz_start;
260 for (i = 0; i < num_im_sgls; i++) {
261 const struct rte_memzone *mz;
262 struct qat_inter_sgl *sgl;
263 int lb;
264
265 snprintf(inter_buff_mz_name, RTE_MEMZONE_NAMESIZE,
266 "%s_inter_buff_%d", comp_dev->qat_dev->name, i);
267 mz = rte_memzone_lookup(inter_buff_mz_name);
268 if (mz == NULL) {
269 mz = rte_memzone_reserve_aligned(inter_buff_mz_name,
270 full_size,
271 comp_dev->compressdev->data->socket_id,
272 RTE_MEMZONE_IOVA_CONTIG,
273 QAT_64_BYTE_ALIGN);
274 if (mz == NULL) {
275 QAT_LOG(ERR,
276 "Can't allocate intermediate buffers for device %s",
277 comp_dev->qat_dev->name);
278 while (--i >= 0) {
279 snprintf(inter_buff_mz_name,
280 RTE_MEMZONE_NAMESIZE,
281 "%s_inter_buff_%d",
282 comp_dev->qat_dev->name,
283 i);
284 rte_memzone_free(
285 rte_memzone_lookup(
286 inter_buff_mz_name));
287 }
288 rte_memzone_free(memzone);
289 return NULL;
290 }
291 }
292
293 QAT_LOG(DEBUG, "Memzone %s: addr = %p, phys = 0x%"PRIx64
294 ", size required %d, size created %zu",
295 inter_buff_mz_name, mz->addr, mz->iova,
296 full_size, mz->len);
297
298 array_of_pointers->pointer[i] = mz->iova;
299
300 sgl = (struct qat_inter_sgl *) mz->addr;
301 sgl->num_bufs = QAT_NUM_BUFS_IN_IM_SGL;
302 sgl->num_mapped_bufs = 0;
303 sgl->resrvd = 0;
304
305 #if QAT_IM_BUFFER_DEBUG
306 QAT_LOG(DEBUG, " : phys addr of sgl[%i] in array_of_pointers"
307 " = 0x%"PRIx64, i, array_of_pointers->pointer[i]);
308 QAT_LOG(DEBUG, " : virt address of sgl[%i] = %p", i, sgl);
309 #endif
310 for (lb = 0; lb < QAT_NUM_BUFS_IN_IM_SGL; lb++) {
311 sgl->buffers[lb].addr =
312 mz->iova + offset_of_flat_buffs +
313 lb * buff_size;
314 sgl->buffers[lb].len = buff_size;
315 sgl->buffers[lb].resrvd = 0;
316 #if QAT_IM_BUFFER_DEBUG
317 QAT_LOG(DEBUG,
318 " : sgl->buffers[%d].addr = 0x%"PRIx64", len=%d",
319 lb, sgl->buffers[lb].addr, sgl->buffers[lb].len);
320 #endif
321 }
322 }
323 #if QAT_IM_BUFFER_DEBUG
324 QAT_DP_HEXDUMP_LOG(DEBUG, "IM buffer memzone start:",
325 memzone->addr, size_of_ptr_array);
326 #endif
327 return memzone;
328 }
329
330 static struct rte_mempool *
qat_comp_create_xform_pool(struct qat_comp_dev_private * comp_dev,struct rte_compressdev_config * config,uint32_t num_elements)331 qat_comp_create_xform_pool(struct qat_comp_dev_private *comp_dev,
332 struct rte_compressdev_config *config,
333 uint32_t num_elements)
334 {
335 char xform_pool_name[RTE_MEMPOOL_NAMESIZE];
336 struct rte_mempool *mp;
337
338 snprintf(xform_pool_name, RTE_MEMPOOL_NAMESIZE,
339 "%s_xforms", comp_dev->qat_dev->name);
340
341 QAT_LOG(DEBUG, "xformpool: %s", xform_pool_name);
342 mp = rte_mempool_lookup(xform_pool_name);
343
344 if (mp != NULL) {
345 QAT_LOG(DEBUG, "xformpool already created");
346 if (mp->size != num_elements) {
347 QAT_LOG(DEBUG, "xformpool wrong size - delete it");
348 rte_mempool_free(mp);
349 mp = NULL;
350 comp_dev->xformpool = NULL;
351 }
352 }
353
354 if (mp == NULL)
355 mp = rte_mempool_create(xform_pool_name,
356 num_elements,
357 qat_comp_xform_size(), 0, 0,
358 NULL, NULL, NULL, NULL, config->socket_id,
359 0);
360 if (mp == NULL) {
361 QAT_LOG(ERR, "Err creating mempool %s w %d elements of size %d",
362 xform_pool_name, num_elements, qat_comp_xform_size());
363 return NULL;
364 }
365
366 return mp;
367 }
368
369 static void
qat_comp_stream_init(struct rte_mempool * mp __rte_unused,void * opaque,void * obj,unsigned int obj_idx)370 qat_comp_stream_init(struct rte_mempool *mp __rte_unused, void *opaque,
371 void *obj, unsigned int obj_idx)
372 {
373 struct stream_create_info *info = opaque;
374 struct qat_comp_stream *stream = obj;
375 char mz_name[RTE_MEMZONE_NAMESIZE];
376 const struct rte_memzone *memzone;
377 struct qat_inter_sgl *ram_banks_desc;
378
379 /* find a memzone for RAM banks */
380 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "%s_%u_rambanks",
381 info->comp_dev->qat_dev->name, obj_idx);
382 memzone = rte_memzone_lookup(mz_name);
383 if (memzone == NULL) {
384 /* allocate a memzone for compression state and RAM banks */
385 memzone = rte_memzone_reserve_aligned(mz_name,
386 QAT_STATE_REGISTERS_MAX_SIZE
387 + sizeof(struct qat_inter_sgl)
388 + QAT_INFLATE_CONTEXT_SIZE,
389 info->socket_id,
390 RTE_MEMZONE_IOVA_CONTIG, QAT_64_BYTE_ALIGN);
391 if (memzone == NULL) {
392 QAT_LOG(ERR,
393 "Can't allocate RAM banks for device %s, object %u",
394 info->comp_dev->qat_dev->name, obj_idx);
395 info->error = -ENOMEM;
396 return;
397 }
398 }
399
400 /* prepare the buffer list descriptor for RAM banks */
401 ram_banks_desc = (struct qat_inter_sgl *)
402 (((uint8_t *) memzone->addr) + QAT_STATE_REGISTERS_MAX_SIZE);
403 ram_banks_desc->num_bufs = 1;
404 ram_banks_desc->buffers[0].len = QAT_INFLATE_CONTEXT_SIZE;
405 ram_banks_desc->buffers[0].addr = memzone->iova
406 + QAT_STATE_REGISTERS_MAX_SIZE
407 + sizeof(struct qat_inter_sgl);
408
409 memset(stream, 0, qat_comp_stream_size());
410 stream->memzone = memzone;
411 stream->state_registers_decomp = memzone->addr;
412 stream->state_registers_decomp_phys = memzone->iova;
413 stream->inflate_context = ((uint8_t *) memzone->addr)
414 + QAT_STATE_REGISTERS_MAX_SIZE;
415 stream->inflate_context_phys = memzone->iova
416 + QAT_STATE_REGISTERS_MAX_SIZE;
417 }
418
419 static void
qat_comp_stream_destroy(struct rte_mempool * mp __rte_unused,void * opaque __rte_unused,void * obj,unsigned obj_idx __rte_unused)420 qat_comp_stream_destroy(struct rte_mempool *mp __rte_unused,
421 void *opaque __rte_unused, void *obj,
422 unsigned obj_idx __rte_unused)
423 {
424 struct qat_comp_stream *stream = obj;
425
426 rte_memzone_free(stream->memzone);
427 }
428
429 static struct rte_mempool *
qat_comp_create_stream_pool(struct qat_comp_dev_private * comp_dev,int socket_id,uint32_t num_elements)430 qat_comp_create_stream_pool(struct qat_comp_dev_private *comp_dev,
431 int socket_id,
432 uint32_t num_elements)
433 {
434 char stream_pool_name[RTE_MEMPOOL_NAMESIZE];
435 struct rte_mempool *mp;
436
437 snprintf(stream_pool_name, RTE_MEMPOOL_NAMESIZE,
438 "%s_streams", comp_dev->qat_dev->name);
439
440 QAT_LOG(DEBUG, "streampool: %s", stream_pool_name);
441 mp = rte_mempool_lookup(stream_pool_name);
442
443 if (mp != NULL) {
444 QAT_LOG(DEBUG, "streampool already created");
445 if (mp->size != num_elements) {
446 QAT_LOG(DEBUG, "streampool wrong size - delete it");
447 rte_mempool_obj_iter(mp, qat_comp_stream_destroy, NULL);
448 rte_mempool_free(mp);
449 mp = NULL;
450 comp_dev->streampool = NULL;
451 }
452 }
453
454 if (mp == NULL) {
455 struct stream_create_info info = {
456 .comp_dev = comp_dev,
457 .socket_id = socket_id,
458 .error = 0
459 };
460 mp = rte_mempool_create(stream_pool_name,
461 num_elements,
462 qat_comp_stream_size(), 0, 0,
463 NULL, NULL, qat_comp_stream_init, &info,
464 socket_id, 0);
465 if (mp == NULL) {
466 QAT_LOG(ERR,
467 "Err creating mempool %s w %d elements of size %d",
468 stream_pool_name, num_elements,
469 qat_comp_stream_size());
470 } else if (info.error) {
471 rte_mempool_obj_iter(mp, qat_comp_stream_destroy, NULL);
472 QAT_LOG(ERR,
473 "Destroying mempool %s as at least one element failed initialisation",
474 stream_pool_name);
475 rte_mempool_free(mp);
476 mp = NULL;
477 }
478 }
479
480 return mp;
481 }
482
483 static void
_qat_comp_dev_config_clear(struct qat_comp_dev_private * comp_dev)484 _qat_comp_dev_config_clear(struct qat_comp_dev_private *comp_dev)
485 {
486 /* Free intermediate buffers */
487 if (comp_dev->interm_buff_mz) {
488 char mz_name[RTE_MEMZONE_NAMESIZE];
489 int i = qat_comp_get_num_im_bufs_required(
490 comp_dev->qat_dev->qat_dev_gen);
491
492 while (--i >= 0) {
493 snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
494 "%s_inter_buff_%d",
495 comp_dev->qat_dev->name, i);
496 rte_memzone_free(rte_memzone_lookup(mz_name));
497 }
498 rte_memzone_free(comp_dev->interm_buff_mz);
499 comp_dev->interm_buff_mz = NULL;
500 }
501
502 /* Free private_xform pool */
503 if (comp_dev->xformpool) {
504 /* Free internal mempool for private xforms */
505 rte_mempool_free(comp_dev->xformpool);
506 comp_dev->xformpool = NULL;
507 }
508
509 /* Free stream pool */
510 if (comp_dev->streampool) {
511 rte_mempool_obj_iter(comp_dev->streampool,
512 qat_comp_stream_destroy, NULL);
513 rte_mempool_free(comp_dev->streampool);
514 comp_dev->streampool = NULL;
515 }
516 }
517
518 int
qat_comp_dev_config(struct rte_compressdev * dev,struct rte_compressdev_config * config)519 qat_comp_dev_config(struct rte_compressdev *dev,
520 struct rte_compressdev_config *config)
521 {
522 struct qat_comp_dev_private *comp_dev = dev->data->dev_private;
523 int ret = 0;
524
525 if (config->max_nb_priv_xforms) {
526 comp_dev->xformpool = qat_comp_create_xform_pool(comp_dev,
527 config, config->max_nb_priv_xforms);
528 if (comp_dev->xformpool == NULL) {
529 ret = -ENOMEM;
530 goto error_out;
531 }
532 } else
533 comp_dev->xformpool = NULL;
534
535 if (config->max_nb_streams) {
536 comp_dev->streampool = qat_comp_create_stream_pool(comp_dev,
537 config->socket_id, config->max_nb_streams);
538 if (comp_dev->streampool == NULL) {
539 ret = -ENOMEM;
540 goto error_out;
541 }
542 } else
543 comp_dev->streampool = NULL;
544
545 return 0;
546
547 error_out:
548 _qat_comp_dev_config_clear(comp_dev);
549 return ret;
550 }
551
552 int
qat_comp_dev_start(struct rte_compressdev * dev __rte_unused)553 qat_comp_dev_start(struct rte_compressdev *dev __rte_unused)
554 {
555 return 0;
556 }
557
558 void
qat_comp_dev_stop(struct rte_compressdev * dev __rte_unused)559 qat_comp_dev_stop(struct rte_compressdev *dev __rte_unused)
560 {
561
562 }
563
564 int
qat_comp_dev_close(struct rte_compressdev * dev)565 qat_comp_dev_close(struct rte_compressdev *dev)
566 {
567 int i;
568 int ret = 0;
569 struct qat_comp_dev_private *comp_dev = dev->data->dev_private;
570
571 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
572 ret = qat_comp_qp_release(dev, i);
573 if (ret < 0)
574 return ret;
575 }
576
577 _qat_comp_dev_config_clear(comp_dev);
578
579 return ret;
580 }
581
582 void
qat_comp_dev_info_get(struct rte_compressdev * dev,struct rte_compressdev_info * info)583 qat_comp_dev_info_get(struct rte_compressdev *dev,
584 struct rte_compressdev_info *info)
585 {
586 struct qat_comp_dev_private *comp_dev = dev->data->dev_private;
587 struct qat_pci_device *qat_dev = comp_dev->qat_dev;
588
589 if (info != NULL) {
590 info->max_nb_queue_pairs =
591 qat_qps_per_service(qat_dev,
592 QAT_SERVICE_COMPRESSION);
593 info->feature_flags = dev->feature_flags;
594 info->capabilities = comp_dev->qat_dev_capabilities;
595 }
596 }
597
598 static uint16_t
qat_comp_pmd_enq_deq_dummy_op_burst(void * qp __rte_unused,struct rte_comp_op ** ops __rte_unused,uint16_t nb_ops __rte_unused)599 qat_comp_pmd_enq_deq_dummy_op_burst(void *qp __rte_unused,
600 struct rte_comp_op **ops __rte_unused,
601 uint16_t nb_ops __rte_unused)
602 {
603 QAT_DP_LOG(ERR, "QAT PMD detected wrong FW version !");
604 return 0;
605 }
606
607 static struct rte_compressdev_ops compress_qat_dummy_ops = {
608
609 /* Device related operations */
610 .dev_configure = NULL,
611 .dev_start = NULL,
612 .dev_stop = qat_comp_dev_stop,
613 .dev_close = qat_comp_dev_close,
614 .dev_infos_get = NULL,
615
616 .stats_get = NULL,
617 .stats_reset = qat_comp_stats_reset,
618 .queue_pair_setup = NULL,
619 .queue_pair_release = qat_comp_qp_release,
620
621 /* Compression related operations */
622 .private_xform_create = NULL,
623 .private_xform_free = qat_comp_private_xform_free
624 };
625
626 static uint16_t
qat_comp_dequeue_burst(void * qp,struct rte_comp_op ** ops,uint16_t nb_ops)627 qat_comp_dequeue_burst(void *qp, struct rte_comp_op **ops, uint16_t nb_ops)
628 {
629 return qat_dequeue_op_burst(qp, (void **)ops, qat_comp_process_response,
630 nb_ops);
631 }
632
633 static uint16_t
qat_comp_pmd_dequeue_first_op_burst(void * qp,struct rte_comp_op ** ops,uint16_t nb_ops)634 qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops,
635 uint16_t nb_ops)
636 {
637 uint16_t ret = qat_comp_dequeue_burst(qp, ops, nb_ops);
638 struct qat_qp *tmp_qp = (struct qat_qp *)qp;
639 struct qat_comp_dev_private *dev =
640 tmp_qp->qat_dev->pmd[QAT_SERVICE_COMPRESSION];
641
642 if (ret) {
643 if ((*ops)->debug_status ==
644 (uint64_t)ERR_CODE_QAT_COMP_WRONG_FW) {
645 dev->compressdev->enqueue_burst =
646 qat_comp_pmd_enq_deq_dummy_op_burst;
647 dev->compressdev->dequeue_burst =
648 qat_comp_pmd_enq_deq_dummy_op_burst;
649
650 dev->compressdev->dev_ops =
651 &compress_qat_dummy_ops;
652 QAT_LOG(ERR,
653 "This QAT hardware doesn't support compression operation");
654
655 } else {
656 dev->compressdev->dequeue_burst =
657 qat_comp_dequeue_burst;
658 }
659 }
660 return ret;
661 }
662
663 /* An rte_driver is needed in the registration of the device with compressdev.
664 * The actual qat pci's rte_driver can't be used as its name represents
665 * the whole pci device with all services. Think of this as a holder for a name
666 * for the compression part of the pci device.
667 */
668 static const char qat_comp_drv_name[] = RTE_STR(COMPRESSDEV_NAME_QAT_PMD);
669 static const struct rte_driver compdev_qat_driver = {
670 .name = qat_comp_drv_name,
671 .alias = qat_comp_drv_name
672 };
673
674 static int
qat_comp_dev_create(struct qat_pci_device * qat_pci_dev)675 qat_comp_dev_create(struct qat_pci_device *qat_pci_dev)
676 {
677 struct qat_device_info *qat_dev_instance =
678 &qat_pci_devs[qat_pci_dev->qat_dev_id];
679 struct rte_compressdev_pmd_init_params init_params = {
680 .name = "",
681 .socket_id = qat_dev_instance->pci_dev->device.numa_node,
682 };
683 char name[RTE_COMPRESSDEV_NAME_MAX_LEN];
684 char capa_memz_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
685 struct rte_compressdev *compressdev;
686 struct qat_comp_dev_private *comp_dev;
687 struct qat_comp_capabilities_info capabilities_info;
688 const struct rte_compressdev_capabilities *capabilities;
689 const struct qat_comp_gen_dev_ops *qat_comp_gen_ops =
690 &qat_comp_gen_dev_ops[qat_pci_dev->qat_dev_gen];
691 uint64_t capa_size;
692 uint16_t sub_id = qat_dev_instance->pci_dev->id.subsystem_device_id;
693 char *cmdline = NULL;
694
695 snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN, "%s_%s",
696 qat_pci_dev->name, "comp");
697 QAT_LOG(DEBUG, "Creating QAT COMP device %s", name);
698
699 if (qat_pci_dev->qat_dev_gen == QAT_VQAT &&
700 sub_id != ADF_VQAT_DC_PCI_SUBSYSTEM_ID) {
701 QAT_LOG(ERR, "Device (vqat instance) %s does not support compression",
702 name);
703 return -EFAULT;
704 }
705 if (qat_comp_gen_ops->compressdev_ops == NULL) {
706 QAT_LOG(DEBUG, "Device %s does not support compression", name);
707 return -ENOTSUP;
708 }
709
710 /* Populate subset device to use in compressdev device creation */
711 qat_dev_instance->comp_rte_dev.driver = &compdev_qat_driver;
712 qat_dev_instance->comp_rte_dev.numa_node =
713 qat_dev_instance->pci_dev->device.numa_node;
714 qat_dev_instance->comp_rte_dev.devargs = NULL;
715
716 compressdev = rte_compressdev_pmd_create(name,
717 &(qat_dev_instance->comp_rte_dev),
718 sizeof(struct qat_comp_dev_private),
719 &init_params);
720
721 if (compressdev == NULL)
722 return -ENODEV;
723
724 compressdev->dev_ops = qat_comp_gen_ops->compressdev_ops;
725
726 compressdev->enqueue_burst = (compressdev_enqueue_pkt_burst_t)
727 qat_enqueue_comp_op_burst;
728 compressdev->dequeue_burst = qat_comp_pmd_dequeue_first_op_burst;
729 compressdev->feature_flags =
730 qat_comp_gen_ops->qat_comp_get_feature_flags();
731
732 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
733 return 0;
734
735 snprintf(capa_memz_name, RTE_COMPRESSDEV_NAME_MAX_LEN,
736 "QAT_COMP_CAPA_GEN_%d",
737 qat_pci_dev->qat_dev_gen);
738
739 comp_dev = compressdev->data->dev_private;
740 comp_dev->qat_dev = qat_pci_dev;
741 comp_dev->compressdev = compressdev;
742
743 capabilities_info = qat_comp_get_capa_info(qat_pci_dev->qat_dev_gen,
744 qat_pci_dev);
745
746 if (capabilities_info.data == NULL) {
747 QAT_LOG(DEBUG,
748 "QAT gen %d capabilities unknown, default to GEN1",
749 qat_pci_dev->qat_dev_gen);
750 capabilities_info = qat_comp_get_capa_info(QAT_GEN1,
751 qat_pci_dev);
752 }
753
754 capabilities = capabilities_info.data;
755 capa_size = capabilities_info.size;
756
757 comp_dev->capa_mz = rte_memzone_lookup(capa_memz_name);
758 if (comp_dev->capa_mz == NULL) {
759 comp_dev->capa_mz = rte_memzone_reserve(capa_memz_name,
760 capa_size,
761 rte_socket_id(), 0);
762 }
763 if (comp_dev->capa_mz == NULL) {
764 QAT_LOG(DEBUG,
765 "Error allocating memzone for capabilities, destroying PMD for %s",
766 name);
767 memset(&qat_dev_instance->comp_rte_dev, 0,
768 sizeof(qat_dev_instance->comp_rte_dev));
769 rte_compressdev_pmd_destroy(compressdev);
770 return -EFAULT;
771 }
772
773 memcpy(comp_dev->capa_mz->addr, capabilities, capa_size);
774 comp_dev->qat_dev_capabilities = comp_dev->capa_mz->addr;
775
776 cmdline = qat_dev_cmdline_get_val(qat_pci_dev,
777 COMP_ENQ_THRESHOLD_NAME);
778 if (cmdline) {
779 comp_dev->min_enq_burst_threshold =
780 atoi(cmdline) > MAX_QP_THRESHOLD_SIZE ?
781 MAX_QP_THRESHOLD_SIZE :
782 atoi(cmdline);
783 }
784 qat_pci_dev->pmd[QAT_SERVICE_COMPRESSION] = comp_dev;
785
786 QAT_LOG(DEBUG,
787 "Created QAT COMP device %s as compressdev instance %d",
788 name, compressdev->data->dev_id);
789 return 0;
790 }
791
792 static int
qat_comp_dev_destroy(struct qat_pci_device * qat_pci_dev)793 qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev)
794 {
795 struct qat_comp_dev_private *dev;
796
797 if (qat_pci_dev == NULL)
798 return -ENODEV;
799
800 dev = qat_pci_dev->pmd[QAT_SERVICE_COMPRESSION];
801 if (dev == NULL)
802 return 0;
803 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
804 rte_memzone_free(dev->capa_mz);
805
806 /* clean up any resources used by the device */
807 qat_comp_dev_close(dev->compressdev);
808
809 rte_compressdev_pmd_destroy(dev->compressdev);
810 qat_pci_dev->pmd[QAT_SERVICE_COMPRESSION] = NULL;
811
812 return 0;
813 }
814
RTE_INIT(qat_sym_init)815 RTE_INIT(qat_sym_init)
816 {
817 qat_cmdline_defines[QAT_SERVICE_COMPRESSION] = arguments;
818 qat_service[QAT_SERVICE_COMPRESSION].name = "symmetric crypto";
819 qat_service[QAT_SERVICE_COMPRESSION].dev_create = qat_comp_dev_create;
820 qat_service[QAT_SERVICE_COMPRESSION].dev_destroy = qat_comp_dev_destroy;
821 }
822