xref: /dpdk/drivers/common/qat/qat_qp.c (revision f665790a5dbad7b645ff46f31d65e977324e7bfc)
198c4a35cSTomasz Jozwiak /* SPDX-License-Identifier: BSD-3-Clause
2c3352e72SKai Ji  * Copyright(c) 2015-2022 Intel Corporation
398c4a35cSTomasz Jozwiak  */
498c4a35cSTomasz Jozwiak 
598c4a35cSTomasz Jozwiak #include <rte_common.h>
674441114SAdam Dybkowski #include <rte_cycles.h>
71acb7f54SDavid Marchand #include <dev_driver.h>
898c4a35cSTomasz Jozwiak #include <rte_malloc.h>
998c4a35cSTomasz Jozwiak #include <rte_memzone.h>
1098c4a35cSTomasz Jozwiak #include <rte_pci.h>
111f37cb2bSDavid Marchand #include <bus_pci_driver.h>
1298c4a35cSTomasz Jozwiak #include <rte_atomic.h>
1398c4a35cSTomasz Jozwiak #include <rte_prefetch.h>
147bd42fa0SBrian Dooley #ifdef BUILD_QAT_SYM
15ce7a737cSKevin O'Sullivan #include <rte_ether.h>
16ce7a737cSKevin O'Sullivan #endif
1798c4a35cSTomasz Jozwiak 
1898c4a35cSTomasz Jozwiak #include "qat_logs.h"
1998c4a35cSTomasz Jozwiak #include "qat_device.h"
2098c4a35cSTomasz Jozwiak #include "qat_qp.h"
2198c4a35cSTomasz Jozwiak #include "qat_sym.h"
22f81cbc20SArek Kusztal #include "qat_asym.h"
23c0c90bc4SFiona Trahe #include "qat_comp.h"
2498c4a35cSTomasz Jozwiak 
2574441114SAdam Dybkowski #define QAT_CQ_MAX_DEQ_RETRIES 10
2698c4a35cSTomasz Jozwiak 
2798c4a35cSTomasz Jozwiak #define ADF_MAX_DESC				4096
2898c4a35cSTomasz Jozwiak #define ADF_MIN_DESC				128
2998c4a35cSTomasz Jozwiak 
30ce7a737cSKevin O'Sullivan #ifdef BUILD_QAT_SYM
31ce7a737cSKevin O'Sullivan /* Cipher-CRC capability check test parameters */
32ce7a737cSKevin O'Sullivan static const uint8_t cipher_crc_cap_check_iv[] = {
33ce7a737cSKevin O'Sullivan 	0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
34ce7a737cSKevin O'Sullivan 	0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11
35ce7a737cSKevin O'Sullivan };
36ce7a737cSKevin O'Sullivan 
37ce7a737cSKevin O'Sullivan static const uint8_t cipher_crc_cap_check_key[] = {
38ce7a737cSKevin O'Sullivan 	0x00, 0x00, 0x00, 0x00, 0xAA, 0xBB, 0xCC, 0xDD,
39ce7a737cSKevin O'Sullivan 	0xEE, 0xFF, 0x00, 0x11, 0x22, 0x33, 0x44, 0x55
40ce7a737cSKevin O'Sullivan };
41ce7a737cSKevin O'Sullivan 
42ce7a737cSKevin O'Sullivan static const uint8_t cipher_crc_cap_check_plaintext[] = {
43ce7a737cSKevin O'Sullivan 	/* Outer protocol header */
44ce7a737cSKevin O'Sullivan 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
45ce7a737cSKevin O'Sullivan 	/* Ethernet frame */
46ce7a737cSKevin O'Sullivan 	0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
47ce7a737cSKevin O'Sullivan 	0x04, 0x03, 0x02, 0x01, 0x08, 0x00, 0xAA, 0xAA,
48ce7a737cSKevin O'Sullivan 	0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
49ce7a737cSKevin O'Sullivan 	/* CRC */
50ce7a737cSKevin O'Sullivan 	0xFF, 0xFF, 0xFF, 0xFF
51ce7a737cSKevin O'Sullivan };
52ce7a737cSKevin O'Sullivan 
53ce7a737cSKevin O'Sullivan static const uint8_t cipher_crc_cap_check_ciphertext[] = {
54ce7a737cSKevin O'Sullivan 	/* Outer protocol header */
55ce7a737cSKevin O'Sullivan 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
56ce7a737cSKevin O'Sullivan 	/* Ethernet frame */
57ce7a737cSKevin O'Sullivan 	0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
58ce7a737cSKevin O'Sullivan 	0x04, 0x03, 0x02, 0x01, 0xD6, 0xE2, 0x70, 0x5C,
59ce7a737cSKevin O'Sullivan 	0xE6, 0x4D, 0xCC, 0x8C, 0x47, 0xB7, 0x09, 0xD6,
60ce7a737cSKevin O'Sullivan 	/* CRC */
61ce7a737cSKevin O'Sullivan 	0x54, 0x85, 0xF8, 0x32
62ce7a737cSKevin O'Sullivan };
63ce7a737cSKevin O'Sullivan 
64ce7a737cSKevin O'Sullivan static const uint8_t cipher_crc_cap_check_cipher_offset = 18;
65ce7a737cSKevin O'Sullivan static const uint8_t cipher_crc_cap_check_crc_offset = 6;
66ce7a737cSKevin O'Sullivan #endif
67ce7a737cSKevin O'Sullivan 
685dbc8beaSFan Zhang struct qat_qp_hw_spec_funcs*
695dbc8beaSFan Zhang 	qat_qp_hw_spec[QAT_N_GENS];
705dbc8beaSFan Zhang 
7198c4a35cSTomasz Jozwiak static int qat_qp_check_queue_alignment(uint64_t phys_addr,
7298c4a35cSTomasz Jozwiak 	uint32_t queue_size_bytes);
7398c4a35cSTomasz Jozwiak static void qat_queue_delete(struct qat_queue *queue);
7498c4a35cSTomasz Jozwiak static int qat_queue_create(struct qat_pci_device *qat_dev,
7598c4a35cSTomasz Jozwiak 	struct qat_queue *queue, struct qat_qp_config *, uint8_t dir);
7698c4a35cSTomasz Jozwiak static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
7798c4a35cSTomasz Jozwiak 	uint32_t *queue_size_for_csr);
784c778f1aSFan Zhang static int adf_configure_queues(struct qat_qp *queue,
798f393c4fSArek Kusztal 	enum qat_device_gen qat_dev_gen);
804c778f1aSFan Zhang static int adf_queue_arb_enable(struct qat_pci_device *qat_dev,
818f393c4fSArek Kusztal 	struct qat_queue *txq, void *base_addr, rte_spinlock_t *lock);
824c778f1aSFan Zhang static int adf_queue_arb_disable(enum qat_device_gen qat_dev_gen,
838f393c4fSArek Kusztal 	struct qat_queue *txq, void *base_addr, rte_spinlock_t *lock);
844c778f1aSFan Zhang static int qat_qp_build_ring_base(struct qat_pci_device *qat_dev,
854c778f1aSFan Zhang 	void *io_addr, struct qat_queue *queue);
864c778f1aSFan Zhang static const struct rte_memzone *queue_dma_zone_reserve(const char *queue_name,
874c778f1aSFan Zhang 	uint32_t queue_size, int socket_id);
884c778f1aSFan Zhang static int qat_qp_csr_setup(struct qat_pci_device *qat_dev, void *io_addr,
894c778f1aSFan Zhang 	struct qat_qp *qp);
9098c4a35cSTomasz Jozwiak 
914c778f1aSFan Zhang int
924c778f1aSFan Zhang qat_qp_setup(struct qat_pci_device *qat_dev,
9398c4a35cSTomasz Jozwiak 		struct qat_qp **qp_addr,
9498c4a35cSTomasz Jozwiak 		uint16_t queue_pair_id,
9598c4a35cSTomasz Jozwiak 		struct qat_qp_config *qat_qp_conf)
9698c4a35cSTomasz Jozwiak {
974c778f1aSFan Zhang 	struct qat_qp *qp = NULL;
989904ff68SArek Kusztal 	struct rte_pci_device *pci_dev =
999904ff68SArek Kusztal 			qat_pci_devs[qat_dev->qat_dev_id].pci_dev;
10098c4a35cSTomasz Jozwiak 	char op_cookie_pool_name[RTE_RING_NAMESIZE];
1014c778f1aSFan Zhang 	struct qat_dev_hw_spec_funcs *ops_hw =
1024c778f1aSFan Zhang 		qat_dev_hw_spec[qat_dev->qat_dev_gen];
1034c778f1aSFan Zhang 	void *io_addr;
10498c4a35cSTomasz Jozwiak 	uint32_t i;
10598c4a35cSTomasz Jozwiak 
10698c4a35cSTomasz Jozwiak 	QAT_LOG(DEBUG, "Setup qp %u on qat pci device %d gen %d",
10798c4a35cSTomasz Jozwiak 		queue_pair_id, qat_dev->qat_dev_id, qat_dev->qat_dev_gen);
10898c4a35cSTomasz Jozwiak 
10998c4a35cSTomasz Jozwiak 	if ((qat_qp_conf->nb_descriptors > ADF_MAX_DESC) ||
11098c4a35cSTomasz Jozwiak 		(qat_qp_conf->nb_descriptors < ADF_MIN_DESC)) {
11198c4a35cSTomasz Jozwiak 		QAT_LOG(ERR, "Can't create qp for %u descriptors",
11298c4a35cSTomasz Jozwiak 				qat_qp_conf->nb_descriptors);
11398c4a35cSTomasz Jozwiak 		return -EINVAL;
11498c4a35cSTomasz Jozwiak 	}
11598c4a35cSTomasz Jozwiak 
1164c778f1aSFan Zhang 	if (ops_hw->qat_dev_get_transport_bar == NULL)	{
1174c778f1aSFan Zhang 		QAT_LOG(ERR,
1184c778f1aSFan Zhang 			"QAT Internal Error: qat_dev_get_transport_bar not set for gen %d",
1194c778f1aSFan Zhang 			qat_dev->qat_dev_gen);
1204c778f1aSFan Zhang 		goto create_err;
1214c778f1aSFan Zhang 	}
1224c778f1aSFan Zhang 
1234c778f1aSFan Zhang 	io_addr = ops_hw->qat_dev_get_transport_bar(pci_dev)->addr;
1244c778f1aSFan Zhang 	if (io_addr == NULL) {
12598c4a35cSTomasz Jozwiak 		QAT_LOG(ERR, "Could not find VF config space "
12698c4a35cSTomasz Jozwiak 				"(UIO driver attached?).");
12798c4a35cSTomasz Jozwiak 		return -EINVAL;
12898c4a35cSTomasz Jozwiak 	}
12998c4a35cSTomasz Jozwiak 
13098c4a35cSTomasz Jozwiak 	/* Allocate the queue pair data structure. */
1311e796b11STomasz Jozwiak 	qp = rte_zmalloc_socket("qat PMD qp metadata",
1321e796b11STomasz Jozwiak 				sizeof(*qp), RTE_CACHE_LINE_SIZE,
1331e796b11STomasz Jozwiak 				qat_qp_conf->socket_id);
13498c4a35cSTomasz Jozwiak 	if (qp == NULL) {
13598c4a35cSTomasz Jozwiak 		QAT_LOG(ERR, "Failed to alloc mem for qp struct");
13698c4a35cSTomasz Jozwiak 		return -ENOMEM;
13798c4a35cSTomasz Jozwiak 	}
13898c4a35cSTomasz Jozwiak 	qp->nb_descriptors = qat_qp_conf->nb_descriptors;
1391e796b11STomasz Jozwiak 	qp->op_cookies = rte_zmalloc_socket("qat PMD op cookie pointer",
14098c4a35cSTomasz Jozwiak 			qat_qp_conf->nb_descriptors * sizeof(*qp->op_cookies),
1411e796b11STomasz Jozwiak 			RTE_CACHE_LINE_SIZE, qat_qp_conf->socket_id);
14298c4a35cSTomasz Jozwiak 	if (qp->op_cookies == NULL) {
14398c4a35cSTomasz Jozwiak 		QAT_LOG(ERR, "Failed to alloc mem for cookie");
14498c4a35cSTomasz Jozwiak 		rte_free(qp);
14598c4a35cSTomasz Jozwiak 		return -ENOMEM;
14698c4a35cSTomasz Jozwiak 	}
14798c4a35cSTomasz Jozwiak 
1484c778f1aSFan Zhang 	qp->mmap_bar_addr = io_addr;
149026f21c0SFiona Trahe 	qp->enqueued = qp->dequeued = 0;
15098c4a35cSTomasz Jozwiak 
15198c4a35cSTomasz Jozwiak 	if (qat_queue_create(qat_dev, &(qp->tx_q), qat_qp_conf,
15298c4a35cSTomasz Jozwiak 					ADF_RING_DIR_TX) != 0) {
15398c4a35cSTomasz Jozwiak 		QAT_LOG(ERR, "Tx queue create failed "
15498c4a35cSTomasz Jozwiak 				"queue_pair_id=%u", queue_pair_id);
15598c4a35cSTomasz Jozwiak 		goto create_err;
15698c4a35cSTomasz Jozwiak 	}
15798c4a35cSTomasz Jozwiak 
1588f185e7cSFiona Trahe 	qp->max_inflights = ADF_MAX_INFLIGHTS(qp->tx_q.queue_size,
1598f185e7cSFiona Trahe 				ADF_BYTES_TO_MSG_SIZE(qp->tx_q.msg_size));
1608f185e7cSFiona Trahe 
1618f185e7cSFiona Trahe 	if (qp->max_inflights < 2) {
1628f185e7cSFiona Trahe 		QAT_LOG(ERR, "Invalid num inflights");
1638f185e7cSFiona Trahe 		qat_queue_delete(&(qp->tx_q));
1648f185e7cSFiona Trahe 		goto create_err;
1658f185e7cSFiona Trahe 	}
1668f185e7cSFiona Trahe 
16798c4a35cSTomasz Jozwiak 	if (qat_queue_create(qat_dev, &(qp->rx_q), qat_qp_conf,
16898c4a35cSTomasz Jozwiak 					ADF_RING_DIR_RX) != 0) {
16998c4a35cSTomasz Jozwiak 		QAT_LOG(ERR, "Rx queue create failed "
17098c4a35cSTomasz Jozwiak 				"queue_pair_id=%hu", queue_pair_id);
17198c4a35cSTomasz Jozwiak 		qat_queue_delete(&(qp->tx_q));
17298c4a35cSTomasz Jozwiak 		goto create_err;
17398c4a35cSTomasz Jozwiak 	}
17498c4a35cSTomasz Jozwiak 
17598c4a35cSTomasz Jozwiak 	snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE,
17698c4a35cSTomasz Jozwiak 					"%s%d_cookies_%s_qp%hu",
17798c4a35cSTomasz Jozwiak 		pci_dev->driver->driver.name, qat_dev->qat_dev_id,
17898c4a35cSTomasz Jozwiak 		qat_qp_conf->service_str, queue_pair_id);
17998c4a35cSTomasz Jozwiak 
18098c4a35cSTomasz Jozwiak 	QAT_LOG(DEBUG, "cookiepool: %s", op_cookie_pool_name);
18198c4a35cSTomasz Jozwiak 	qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name);
18298c4a35cSTomasz Jozwiak 	if (qp->op_cookie_pool == NULL)
18398c4a35cSTomasz Jozwiak 		qp->op_cookie_pool = rte_mempool_create(op_cookie_pool_name,
18498c4a35cSTomasz Jozwiak 				qp->nb_descriptors,
18598c4a35cSTomasz Jozwiak 				qat_qp_conf->cookie_size, 64, 0,
1861e796b11STomasz Jozwiak 				NULL, NULL, NULL, NULL,
1879904ff68SArek Kusztal 				pci_dev->device.numa_node,
18898c4a35cSTomasz Jozwiak 				0);
18998c4a35cSTomasz Jozwiak 	if (!qp->op_cookie_pool) {
19098c4a35cSTomasz Jozwiak 		QAT_LOG(ERR, "QAT PMD Cannot create"
19198c4a35cSTomasz Jozwiak 				" op mempool");
1924c778f1aSFan Zhang 		qat_queue_delete(&(qp->tx_q));
1934c778f1aSFan Zhang 		qat_queue_delete(&(qp->rx_q));
19498c4a35cSTomasz Jozwiak 		goto create_err;
19598c4a35cSTomasz Jozwiak 	}
19698c4a35cSTomasz Jozwiak 
19798c4a35cSTomasz Jozwiak 	for (i = 0; i < qp->nb_descriptors; i++) {
19898c4a35cSTomasz Jozwiak 		if (rte_mempool_get(qp->op_cookie_pool, &qp->op_cookies[i])) {
19998c4a35cSTomasz Jozwiak 			QAT_LOG(ERR, "QAT PMD Cannot get op_cookie");
20098c4a35cSTomasz Jozwiak 			goto create_err;
20198c4a35cSTomasz Jozwiak 		}
202f81cbc20SArek Kusztal 		memset(qp->op_cookies[i], 0, qat_qp_conf->cookie_size);
20398c4a35cSTomasz Jozwiak 	}
20498c4a35cSTomasz Jozwiak 
20598c4a35cSTomasz Jozwiak 	qp->qat_dev_gen = qat_dev->qat_dev_gen;
20698c4a35cSTomasz Jozwiak 	qp->service_type = qat_qp_conf->hw->service_type;
20798c4a35cSTomasz Jozwiak 	qp->qat_dev = qat_dev;
20898c4a35cSTomasz Jozwiak 
20998c4a35cSTomasz Jozwiak 	QAT_LOG(DEBUG, "QP setup complete: id: %d, cookiepool: %s",
21098c4a35cSTomasz Jozwiak 			queue_pair_id, op_cookie_pool_name);
21198c4a35cSTomasz Jozwiak 
2124c778f1aSFan Zhang 	qat_qp_csr_setup(qat_dev, io_addr, qp);
2134c778f1aSFan Zhang 
21498c4a35cSTomasz Jozwiak 	*qp_addr = qp;
21598c4a35cSTomasz Jozwiak 	return 0;
21698c4a35cSTomasz Jozwiak 
21798c4a35cSTomasz Jozwiak create_err:
2184c778f1aSFan Zhang 	if (qp) {
21998c4a35cSTomasz Jozwiak 		rte_mempool_free(qp->op_cookie_pool);
2204c778f1aSFan Zhang 
22198c4a35cSTomasz Jozwiak 		rte_free(qp->op_cookies);
2224c778f1aSFan Zhang 
22398c4a35cSTomasz Jozwiak 		rte_free(qp);
2244c778f1aSFan Zhang 	}
2254c778f1aSFan Zhang 
22698c4a35cSTomasz Jozwiak 	return -EFAULT;
22798c4a35cSTomasz Jozwiak }
22898c4a35cSTomasz Jozwiak 
22998c4a35cSTomasz Jozwiak static int
23098c4a35cSTomasz Jozwiak qat_queue_create(struct qat_pci_device *qat_dev, struct qat_queue *queue,
23198c4a35cSTomasz Jozwiak 		struct qat_qp_config *qp_conf, uint8_t dir)
23298c4a35cSTomasz Jozwiak {
23398c4a35cSTomasz Jozwiak 	const struct rte_memzone *qp_mz;
2349904ff68SArek Kusztal 	struct rte_pci_device *pci_dev =
2359904ff68SArek Kusztal 			qat_pci_devs[qat_dev->qat_dev_id].pci_dev;
23698c4a35cSTomasz Jozwiak 	int ret = 0;
23798c4a35cSTomasz Jozwiak 	uint16_t desc_size = (dir == ADF_RING_DIR_TX ?
23898c4a35cSTomasz Jozwiak 			qp_conf->hw->tx_msg_size : qp_conf->hw->rx_msg_size);
23998c4a35cSTomasz Jozwiak 	uint32_t queue_size_bytes = (qp_conf->nb_descriptors)*(desc_size);
24098c4a35cSTomasz Jozwiak 
24198c4a35cSTomasz Jozwiak 	queue->hw_bundle_number = qp_conf->hw->hw_bundle_num;
24298c4a35cSTomasz Jozwiak 	queue->hw_queue_number = (dir == ADF_RING_DIR_TX ?
24398c4a35cSTomasz Jozwiak 			qp_conf->hw->tx_ring_num : qp_conf->hw->rx_ring_num);
24498c4a35cSTomasz Jozwiak 
24598c4a35cSTomasz Jozwiak 	if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
24698c4a35cSTomasz Jozwiak 		QAT_LOG(ERR, "Invalid descriptor size %d", desc_size);
24798c4a35cSTomasz Jozwiak 		return -EINVAL;
24898c4a35cSTomasz Jozwiak 	}
24998c4a35cSTomasz Jozwiak 
25098c4a35cSTomasz Jozwiak 	/*
25198c4a35cSTomasz Jozwiak 	 * Allocate a memzone for the queue - create a unique name.
25298c4a35cSTomasz Jozwiak 	 */
25398c4a35cSTomasz Jozwiak 	snprintf(queue->memz_name, sizeof(queue->memz_name),
25498c4a35cSTomasz Jozwiak 			"%s_%d_%s_%s_%d_%d",
25598c4a35cSTomasz Jozwiak 		pci_dev->driver->driver.name, qat_dev->qat_dev_id,
25698c4a35cSTomasz Jozwiak 		qp_conf->service_str, "qp_mem",
25798c4a35cSTomasz Jozwiak 		queue->hw_bundle_number, queue->hw_queue_number);
25898c4a35cSTomasz Jozwiak 	qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
2599904ff68SArek Kusztal 			pci_dev->device.numa_node);
26098c4a35cSTomasz Jozwiak 	if (qp_mz == NULL) {
26198c4a35cSTomasz Jozwiak 		QAT_LOG(ERR, "Failed to allocate ring memzone");
26298c4a35cSTomasz Jozwiak 		return -ENOMEM;
26398c4a35cSTomasz Jozwiak 	}
26498c4a35cSTomasz Jozwiak 
26598c4a35cSTomasz Jozwiak 	queue->base_addr = (char *)qp_mz->addr;
26698c4a35cSTomasz Jozwiak 	queue->base_phys_addr = qp_mz->iova;
26798c4a35cSTomasz Jozwiak 	if (qat_qp_check_queue_alignment(queue->base_phys_addr,
26898c4a35cSTomasz Jozwiak 			queue_size_bytes)) {
26998c4a35cSTomasz Jozwiak 		QAT_LOG(ERR, "Invalid alignment on queue create "
270*f665790aSDavid Marchand 					" 0x%"PRIx64,
27198c4a35cSTomasz Jozwiak 					queue->base_phys_addr);
27298c4a35cSTomasz Jozwiak 		ret = -EFAULT;
27398c4a35cSTomasz Jozwiak 		goto queue_create_err;
27498c4a35cSTomasz Jozwiak 	}
27598c4a35cSTomasz Jozwiak 
27698c4a35cSTomasz Jozwiak 	if (adf_verify_queue_size(desc_size, qp_conf->nb_descriptors,
27798c4a35cSTomasz Jozwiak 			&(queue->queue_size)) != 0) {
27898c4a35cSTomasz Jozwiak 		QAT_LOG(ERR, "Invalid num inflights");
27998c4a35cSTomasz Jozwiak 		ret = -EINVAL;
28098c4a35cSTomasz Jozwiak 		goto queue_create_err;
28198c4a35cSTomasz Jozwiak 	}
28298c4a35cSTomasz Jozwiak 
28398c4a35cSTomasz Jozwiak 	queue->modulo_mask = (1 << ADF_RING_SIZE_MODULO(queue->queue_size)) - 1;
28498c4a35cSTomasz Jozwiak 	queue->head = 0;
28598c4a35cSTomasz Jozwiak 	queue->tail = 0;
28698c4a35cSTomasz Jozwiak 	queue->msg_size = desc_size;
28798c4a35cSTomasz Jozwiak 
288dda27cb3SFiona Trahe 	/* For fast calculation of cookie index, relies on msg_size being 2^n */
2893d4e27fdSDavid Marchand 	queue->trailz = rte_ctz32(desc_size);
290dda27cb3SFiona Trahe 
29198c4a35cSTomasz Jozwiak 	/*
29298c4a35cSTomasz Jozwiak 	 * Write an unused pattern to the queue memory.
29398c4a35cSTomasz Jozwiak 	 */
29498c4a35cSTomasz Jozwiak 	memset(queue->base_addr, 0x7F, queue_size_bytes);
29598c4a35cSTomasz Jozwiak 
29698c4a35cSTomasz Jozwiak 	QAT_LOG(DEBUG, "RING: Name:%s, size in CSR: %u, in bytes %u,"
2978f185e7cSFiona Trahe 		" nb msgs %u, msg_size %u, modulo mask %u",
29898c4a35cSTomasz Jozwiak 			queue->memz_name,
29998c4a35cSTomasz Jozwiak 			queue->queue_size, queue_size_bytes,
30098c4a35cSTomasz Jozwiak 			qp_conf->nb_descriptors, desc_size,
3018f185e7cSFiona Trahe 			queue->modulo_mask);
30298c4a35cSTomasz Jozwiak 
30398c4a35cSTomasz Jozwiak 	return 0;
30498c4a35cSTomasz Jozwiak 
30598c4a35cSTomasz Jozwiak queue_create_err:
30698c4a35cSTomasz Jozwiak 	rte_memzone_free(qp_mz);
30798c4a35cSTomasz Jozwiak 	return ret;
30898c4a35cSTomasz Jozwiak }
30998c4a35cSTomasz Jozwiak 
3104c778f1aSFan Zhang static const struct rte_memzone *
3114c778f1aSFan Zhang queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
3124c778f1aSFan Zhang 		int socket_id)
3138f393c4fSArek Kusztal {
3144c778f1aSFan Zhang 	const struct rte_memzone *mz;
3158f393c4fSArek Kusztal 
3164c778f1aSFan Zhang 	mz = rte_memzone_lookup(queue_name);
3174c778f1aSFan Zhang 	if (mz != 0) {
3184c778f1aSFan Zhang 		if (((size_t)queue_size <= mz->len) &&
3194c778f1aSFan Zhang 				((socket_id == SOCKET_ID_ANY) ||
3204c778f1aSFan Zhang 					(socket_id == mz->socket_id))) {
3214c778f1aSFan Zhang 			QAT_LOG(DEBUG, "re-use memzone already "
3224c778f1aSFan Zhang 					"allocated for %s", queue_name);
3234c778f1aSFan Zhang 			return mz;
3244c778f1aSFan Zhang 		}
3254c778f1aSFan Zhang 
3264c778f1aSFan Zhang 		QAT_LOG(ERR, "Incompatible memzone already "
3274c778f1aSFan Zhang 				"allocated %s, size %u, socket %d. "
3284c778f1aSFan Zhang 				"Requested size %u, socket %u",
3294c778f1aSFan Zhang 				queue_name, (uint32_t)mz->len,
3304c778f1aSFan Zhang 				mz->socket_id, queue_size, socket_id);
3314c778f1aSFan Zhang 		return NULL;
3324c778f1aSFan Zhang 	}
3334c778f1aSFan Zhang 
3344c778f1aSFan Zhang 	QAT_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u",
3354c778f1aSFan Zhang 					queue_name, queue_size, socket_id);
3364c778f1aSFan Zhang 	return rte_memzone_reserve_aligned(queue_name, queue_size,
3374c778f1aSFan Zhang 		socket_id, RTE_MEMZONE_IOVA_CONTIG, queue_size);
3384c778f1aSFan Zhang }
3394c778f1aSFan Zhang 
3404c778f1aSFan Zhang int
3414c778f1aSFan Zhang qat_qp_release(enum qat_device_gen qat_dev_gen, struct qat_qp **qp_addr)
3424c778f1aSFan Zhang {
3434c778f1aSFan Zhang 	int ret;
3444c778f1aSFan Zhang 	struct qat_qp *qp = *qp_addr;
3454c778f1aSFan Zhang 	uint32_t i;
3464c778f1aSFan Zhang 
3474c778f1aSFan Zhang 	if (qp == NULL) {
3484c778f1aSFan Zhang 		QAT_LOG(DEBUG, "qp already freed");
3494c778f1aSFan Zhang 		return 0;
3504c778f1aSFan Zhang 	}
3514c778f1aSFan Zhang 
3524c778f1aSFan Zhang 	QAT_LOG(DEBUG, "Free qp on qat_pci device %d",
3534c778f1aSFan Zhang 				qp->qat_dev->qat_dev_id);
3544c778f1aSFan Zhang 
3554c778f1aSFan Zhang 	/* Don't free memory if there are still responses to be processed */
3564c778f1aSFan Zhang 	if ((qp->enqueued - qp->dequeued) == 0) {
3574c778f1aSFan Zhang 		qat_queue_delete(&(qp->tx_q));
3584c778f1aSFan Zhang 		qat_queue_delete(&(qp->rx_q));
3594c778f1aSFan Zhang 	} else {
3604c778f1aSFan Zhang 		return -EAGAIN;
3614c778f1aSFan Zhang 	}
3624c778f1aSFan Zhang 
3634c778f1aSFan Zhang 	ret = adf_queue_arb_disable(qat_dev_gen, &(qp->tx_q),
3644c778f1aSFan Zhang 			qp->mmap_bar_addr, &qp->qat_dev->arb_csr_lock);
3654c778f1aSFan Zhang 	if (ret)
3664c778f1aSFan Zhang 		return ret;
3674c778f1aSFan Zhang 
3684c778f1aSFan Zhang 	for (i = 0; i < qp->nb_descriptors; i++)
3694c778f1aSFan Zhang 		rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]);
3704c778f1aSFan Zhang 
3714c778f1aSFan Zhang 	rte_mempool_free(qp->op_cookie_pool);
3724c778f1aSFan Zhang 
3734c778f1aSFan Zhang 	rte_free(qp->op_cookies);
3744c778f1aSFan Zhang 	rte_free(qp);
3754c778f1aSFan Zhang 	*qp_addr = NULL;
3764c778f1aSFan Zhang 	return 0;
3774c778f1aSFan Zhang }
3784c778f1aSFan Zhang 
3794c778f1aSFan Zhang 
3804c778f1aSFan Zhang static void
3814c778f1aSFan Zhang qat_queue_delete(struct qat_queue *queue)
3824c778f1aSFan Zhang {
3834c778f1aSFan Zhang 	const struct rte_memzone *mz;
3844c778f1aSFan Zhang 	int status = 0;
3854c778f1aSFan Zhang 
3864c778f1aSFan Zhang 	if (queue == NULL) {
3874c778f1aSFan Zhang 		QAT_LOG(DEBUG, "Invalid queue");
3884c778f1aSFan Zhang 		return;
3894c778f1aSFan Zhang 	}
3904c778f1aSFan Zhang 	QAT_LOG(DEBUG, "Free ring %d, memzone: %s",
3914c778f1aSFan Zhang 			queue->hw_queue_number, queue->memz_name);
3924c778f1aSFan Zhang 
3934c778f1aSFan Zhang 	mz = rte_memzone_lookup(queue->memz_name);
3944c778f1aSFan Zhang 	if (mz != NULL)	{
3954c778f1aSFan Zhang 		/* Write an unused pattern to the queue memory. */
3964c778f1aSFan Zhang 		memset(queue->base_addr, 0x7F, queue->queue_size);
3974c778f1aSFan Zhang 		status = rte_memzone_free(mz);
3984c778f1aSFan Zhang 		if (status != 0)
3994c778f1aSFan Zhang 			QAT_LOG(ERR, "Error %d on freeing queue %s",
4004c778f1aSFan Zhang 					status, queue->memz_name);
4014c778f1aSFan Zhang 	} else {
4024c778f1aSFan Zhang 		QAT_LOG(DEBUG, "queue %s doesn't exist",
4034c778f1aSFan Zhang 				queue->memz_name);
4048f393c4fSArek Kusztal 	}
4058f393c4fSArek Kusztal }
4064c778f1aSFan Zhang 
4074c778f1aSFan Zhang static int __rte_unused
4084c778f1aSFan Zhang adf_queue_arb_enable(struct qat_pci_device *qat_dev, struct qat_queue *txq,
4094c778f1aSFan Zhang 		void *base_addr, rte_spinlock_t *lock)
4104c778f1aSFan Zhang {
4114c778f1aSFan Zhang 	struct qat_qp_hw_spec_funcs *ops =
4124c778f1aSFan Zhang 		qat_qp_hw_spec[qat_dev->qat_dev_gen];
4134c778f1aSFan Zhang 
4148f1d23ecSDavid Marchand 	if (ops->qat_qp_adf_arb_enable == NULL)
4158f1d23ecSDavid Marchand 		return -ENOTSUP;
4164c778f1aSFan Zhang 	ops->qat_qp_adf_arb_enable(txq, base_addr, lock);
4174c778f1aSFan Zhang 	return 0;
4188f393c4fSArek Kusztal }
4194c778f1aSFan Zhang 
4204c778f1aSFan Zhang static int
4214c778f1aSFan Zhang adf_queue_arb_disable(enum qat_device_gen qat_dev_gen, struct qat_queue *txq,
4224c778f1aSFan Zhang 		void *base_addr, rte_spinlock_t *lock)
4234c778f1aSFan Zhang {
4244c778f1aSFan Zhang 	struct qat_qp_hw_spec_funcs *ops =
4254c778f1aSFan Zhang 		qat_qp_hw_spec[qat_dev_gen];
4264c778f1aSFan Zhang 
4278f1d23ecSDavid Marchand 	if (ops->qat_qp_adf_arb_disable == NULL)
4288f1d23ecSDavid Marchand 		return -ENOTSUP;
4294c778f1aSFan Zhang 	ops->qat_qp_adf_arb_disable(txq, base_addr, lock);
4304c778f1aSFan Zhang 	return 0;
4314c778f1aSFan Zhang }
4324c778f1aSFan Zhang 
4334c778f1aSFan Zhang static int __rte_unused
4344c778f1aSFan Zhang qat_qp_build_ring_base(struct qat_pci_device *qat_dev, void *io_addr,
4354c778f1aSFan Zhang 		struct qat_queue *queue)
4364c778f1aSFan Zhang {
4374c778f1aSFan Zhang 	struct qat_qp_hw_spec_funcs *ops =
4384c778f1aSFan Zhang 		qat_qp_hw_spec[qat_dev->qat_dev_gen];
4394c778f1aSFan Zhang 
4408f1d23ecSDavid Marchand 	if (ops->qat_qp_build_ring_base == NULL)
4418f1d23ecSDavid Marchand 		return -ENOTSUP;
4424c778f1aSFan Zhang 	ops->qat_qp_build_ring_base(io_addr, queue);
4434c778f1aSFan Zhang 	return 0;
4444c778f1aSFan Zhang }
4454c778f1aSFan Zhang 
4464c778f1aSFan Zhang int
4474c778f1aSFan Zhang qat_qps_per_service(struct qat_pci_device *qat_dev,
4484c778f1aSFan Zhang 		enum qat_service_type service)
4494c778f1aSFan Zhang {
4504c778f1aSFan Zhang 	struct qat_qp_hw_spec_funcs *ops =
4514c778f1aSFan Zhang 		qat_qp_hw_spec[qat_dev->qat_dev_gen];
4524c778f1aSFan Zhang 
4538f1d23ecSDavid Marchand 	if (ops->qat_qp_rings_per_service == NULL)
4548f1d23ecSDavid Marchand 		return -ENOTSUP;
4554c778f1aSFan Zhang 	return ops->qat_qp_rings_per_service(qat_dev, service);
4564c778f1aSFan Zhang }
4574c778f1aSFan Zhang 
4584c778f1aSFan Zhang const struct qat_qp_hw_data *
4594c778f1aSFan Zhang qat_qp_get_hw_data(struct qat_pci_device *qat_dev,
4604c778f1aSFan Zhang 		enum qat_service_type service, uint16_t qp_id)
4614c778f1aSFan Zhang {
4624c778f1aSFan Zhang 	struct qat_qp_hw_spec_funcs *ops =
4634c778f1aSFan Zhang 		qat_qp_hw_spec[qat_dev->qat_dev_gen];
4644c778f1aSFan Zhang 
4658f1d23ecSDavid Marchand 	if (ops->qat_qp_get_hw_data == NULL)
4668f1d23ecSDavid Marchand 		return NULL;
4674c778f1aSFan Zhang 	return ops->qat_qp_get_hw_data(qat_dev, service, qp_id);
4688f393c4fSArek Kusztal }
4698f393c4fSArek Kusztal 
4708f393c4fSArek Kusztal int
471960ff4d6SArek Kusztal qat_read_qp_config(struct qat_pci_device *qat_dev)
4728f393c4fSArek Kusztal {
4734c778f1aSFan Zhang 	struct qat_dev_hw_spec_funcs *ops_hw =
4744c778f1aSFan Zhang 		qat_dev_hw_spec[qat_dev->qat_dev_gen];
4758f393c4fSArek Kusztal 
4768f1d23ecSDavid Marchand 	if (ops_hw->qat_dev_read_config == NULL)
4778f1d23ecSDavid Marchand 		return -ENOTSUP;
4784c778f1aSFan Zhang 	return ops_hw->qat_dev_read_config(qat_dev);
479960ff4d6SArek Kusztal }
4808f393c4fSArek Kusztal 
4814c778f1aSFan Zhang static int __rte_unused
4824c778f1aSFan Zhang adf_configure_queues(struct qat_qp *qp, enum qat_device_gen qat_dev_gen)
4834c778f1aSFan Zhang {
4844c778f1aSFan Zhang 	struct qat_qp_hw_spec_funcs *ops =
4854c778f1aSFan Zhang 		qat_qp_hw_spec[qat_dev_gen];
4864c778f1aSFan Zhang 
4878f1d23ecSDavid Marchand 	if (ops->qat_qp_adf_configure_queues == NULL)
4888f1d23ecSDavid Marchand 		return -ENOTSUP;
4894c778f1aSFan Zhang 	ops->qat_qp_adf_configure_queues(qp);
4908f393c4fSArek Kusztal 	return 0;
4918f393c4fSArek Kusztal }
49298c4a35cSTomasz Jozwiak 
49398c4a35cSTomasz Jozwiak static inline void
4944c778f1aSFan Zhang qat_qp_csr_write_head(enum qat_device_gen qat_dev_gen, struct qat_qp *qp,
4954c778f1aSFan Zhang 			struct qat_queue *q, uint32_t new_head)
4964c778f1aSFan Zhang {
4974c778f1aSFan Zhang 	struct qat_qp_hw_spec_funcs *ops =
4984c778f1aSFan Zhang 		qat_qp_hw_spec[qat_dev_gen];
4994c778f1aSFan Zhang 
5004c778f1aSFan Zhang 	/*
5014c778f1aSFan Zhang 	 * Pointer check should be done during
5024c778f1aSFan Zhang 	 * initialization
5034c778f1aSFan Zhang 	 */
5044c778f1aSFan Zhang 	ops->qat_qp_csr_write_head(qp, q, new_head);
50598c4a35cSTomasz Jozwiak }
50698c4a35cSTomasz Jozwiak 
5074c778f1aSFan Zhang static int
5084c778f1aSFan Zhang qat_qp_csr_setup(struct qat_pci_device *qat_dev,
5094c778f1aSFan Zhang 		void *io_addr, struct qat_qp *qp)
5104c778f1aSFan Zhang {
5114c778f1aSFan Zhang 	struct qat_qp_hw_spec_funcs *ops =
5124c778f1aSFan Zhang 		qat_qp_hw_spec[qat_dev->qat_dev_gen];
5134c778f1aSFan Zhang 
5148f1d23ecSDavid Marchand 	if (ops->qat_qp_csr_setup == NULL)
5158f1d23ecSDavid Marchand 		return -ENOTSUP;
5164c778f1aSFan Zhang 	ops->qat_qp_csr_setup(qat_dev, io_addr, qp);
5174c778f1aSFan Zhang 	return 0;
5184c778f1aSFan Zhang }
5194c778f1aSFan Zhang 
5204c778f1aSFan Zhang 
52198c4a35cSTomasz Jozwiak static inline
5228f393c4fSArek Kusztal void rxq_free_desc(enum qat_device_gen qat_dev_gen, struct qat_qp *qp,
5238f393c4fSArek Kusztal 				struct qat_queue *q)
52498c4a35cSTomasz Jozwiak {
52598c4a35cSTomasz Jozwiak 	uint32_t old_head, new_head;
52698c4a35cSTomasz Jozwiak 	uint32_t max_head;
52798c4a35cSTomasz Jozwiak 
52898c4a35cSTomasz Jozwiak 	old_head = q->csr_head;
52998c4a35cSTomasz Jozwiak 	new_head = q->head;
53098c4a35cSTomasz Jozwiak 	max_head = qp->nb_descriptors * q->msg_size;
53198c4a35cSTomasz Jozwiak 
53298c4a35cSTomasz Jozwiak 	/* write out free descriptors */
53398c4a35cSTomasz Jozwiak 	void *cur_desc = (uint8_t *)q->base_addr + old_head;
53498c4a35cSTomasz Jozwiak 
53598c4a35cSTomasz Jozwiak 	if (new_head < old_head) {
53698c4a35cSTomasz Jozwiak 		memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, max_head - old_head);
53798c4a35cSTomasz Jozwiak 		memset(q->base_addr, ADF_RING_EMPTY_SIG_BYTE, new_head);
53898c4a35cSTomasz Jozwiak 	} else {
53998c4a35cSTomasz Jozwiak 		memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head - old_head);
54098c4a35cSTomasz Jozwiak 	}
54198c4a35cSTomasz Jozwiak 	q->nb_processed_responses = 0;
54298c4a35cSTomasz Jozwiak 	q->csr_head = new_head;
54398c4a35cSTomasz Jozwiak 
5444c778f1aSFan Zhang 	qat_qp_csr_write_head(qat_dev_gen, qp, q, new_head);
54598c4a35cSTomasz Jozwiak }
54698c4a35cSTomasz Jozwiak 
5474c778f1aSFan Zhang static int
5484c778f1aSFan Zhang qat_qp_check_queue_alignment(uint64_t phys_addr, uint32_t queue_size_bytes)
5494c778f1aSFan Zhang {
5504c778f1aSFan Zhang 	if (((queue_size_bytes - 1) & phys_addr) != 0)
5514c778f1aSFan Zhang 		return -EINVAL;
5524c778f1aSFan Zhang 	return 0;
5534c778f1aSFan Zhang }
5544c778f1aSFan Zhang 
5554c778f1aSFan Zhang static int
5564c778f1aSFan Zhang adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
5574c778f1aSFan Zhang 		uint32_t *p_queue_size_for_csr)
5584c778f1aSFan Zhang {
5594c778f1aSFan Zhang 	uint8_t i = ADF_MIN_RING_SIZE;
5604c778f1aSFan Zhang 
5614c778f1aSFan Zhang 	for (; i <= ADF_MAX_RING_SIZE; i++)
5624c778f1aSFan Zhang 		if ((msg_size * msg_num) ==
5634c778f1aSFan Zhang 				(uint32_t)ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) {
5644c778f1aSFan Zhang 			*p_queue_size_for_csr = i;
5654c778f1aSFan Zhang 			return 0;
5664c778f1aSFan Zhang 		}
5674c778f1aSFan Zhang 	QAT_LOG(ERR, "Invalid ring size %d", msg_size * msg_num);
5684c778f1aSFan Zhang 	return -EINVAL;
5694c778f1aSFan Zhang }
5704c778f1aSFan Zhang 
5714c778f1aSFan Zhang static inline uint32_t
5724c778f1aSFan Zhang adf_modulo(uint32_t data, uint32_t modulo_mask)
5734c778f1aSFan Zhang {
5744c778f1aSFan Zhang 	return data & modulo_mask;
5758f393c4fSArek Kusztal }
5768f393c4fSArek Kusztal 
57798c4a35cSTomasz Jozwiak uint16_t
578fb3b9f49SKai Ji qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request,
579c3352e72SKai Ji 		void **ops, uint16_t nb_ops)
58098c4a35cSTomasz Jozwiak {
58198c4a35cSTomasz Jozwiak 	register struct qat_queue *queue;
58298c4a35cSTomasz Jozwiak 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
58398c4a35cSTomasz Jozwiak 	register uint32_t nb_ops_sent = 0;
584c800c2e0SAdam Dybkowski 	register int ret = -1;
58598c4a35cSTomasz Jozwiak 	uint16_t nb_ops_possible = nb_ops;
58698c4a35cSTomasz Jozwiak 	register uint8_t *base_addr;
58798c4a35cSTomasz Jozwiak 	register uint32_t tail;
58898c4a35cSTomasz Jozwiak 
58998c4a35cSTomasz Jozwiak 	if (unlikely(nb_ops == 0))
59098c4a35cSTomasz Jozwiak 		return 0;
59198c4a35cSTomasz Jozwiak 
59298c4a35cSTomasz Jozwiak 	/* read params used a lot in main loop into registers */
59398c4a35cSTomasz Jozwiak 	queue = &(tmp_qp->tx_q);
59498c4a35cSTomasz Jozwiak 	base_addr = (uint8_t *)queue->base_addr;
59598c4a35cSTomasz Jozwiak 	tail = queue->tail;
59698c4a35cSTomasz Jozwiak 
59798c4a35cSTomasz Jozwiak 	/* Find how many can actually fit on the ring */
598026f21c0SFiona Trahe 	{
599026f21c0SFiona Trahe 		/* dequeued can only be written by one thread, but it may not
600026f21c0SFiona Trahe 		 * be this thread. As it's 4-byte aligned it will be read
601026f21c0SFiona Trahe 		 * atomically here by any Intel CPU.
602026f21c0SFiona Trahe 		 * enqueued can wrap before dequeued, but cannot
603026f21c0SFiona Trahe 		 * lap it as var size of enq/deq (uint32_t) > var size of
604026f21c0SFiona Trahe 		 * max_inflights (uint16_t). In reality inflights is never
605026f21c0SFiona Trahe 		 * even as big as max uint16_t, as it's <= ADF_MAX_DESC.
606026f21c0SFiona Trahe 		 * On wrapping, the calculation still returns the correct
607026f21c0SFiona Trahe 		 * positive value as all three vars are unsigned.
608026f21c0SFiona Trahe 		 */
609026f21c0SFiona Trahe 		uint32_t inflights =
610026f21c0SFiona Trahe 			tmp_qp->enqueued - tmp_qp->dequeued;
611026f21c0SFiona Trahe 
612026f21c0SFiona Trahe 		if ((inflights + nb_ops) > tmp_qp->max_inflights) {
613026f21c0SFiona Trahe 			nb_ops_possible = tmp_qp->max_inflights - inflights;
61498c4a35cSTomasz Jozwiak 			if (nb_ops_possible == 0)
61598c4a35cSTomasz Jozwiak 				return 0;
61698c4a35cSTomasz Jozwiak 		}
61747c3f7a4SArek Kusztal 		/* QAT has plenty of work queued already, so don't waste cycles
61847c3f7a4SArek Kusztal 		 * enqueueing, wait til the application has gathered a bigger
61947c3f7a4SArek Kusztal 		 * burst or some completed ops have been dequeued
62047c3f7a4SArek Kusztal 		 */
62147c3f7a4SArek Kusztal 		if (tmp_qp->min_enq_burst_threshold && inflights >
62247c3f7a4SArek Kusztal 				QAT_QP_MIN_INFL_THRESHOLD && nb_ops_possible <
62347c3f7a4SArek Kusztal 				tmp_qp->min_enq_burst_threshold) {
62447c3f7a4SArek Kusztal 			tmp_qp->stats.threshold_hit_count++;
62547c3f7a4SArek Kusztal 			return 0;
626026f21c0SFiona Trahe 		}
62747c3f7a4SArek Kusztal 	}
62847c3f7a4SArek Kusztal 
629012affe1SDavid Coyle 	if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
630012affe1SDavid Coyle 		qat_sym_preprocess_requests(ops, nb_ops_possible);
63198c4a35cSTomasz Jozwiak 
632fb3b9f49SKai Ji 	memset(tmp_qp->opaque, 0xff, sizeof(tmp_qp->opaque));
633fb3b9f49SKai Ji 
63498c4a35cSTomasz Jozwiak 	while (nb_ops_sent != nb_ops_possible) {
635fb3b9f49SKai Ji 		ret = op_build_request(*ops, base_addr + tail,
636dda27cb3SFiona Trahe 				tmp_qp->op_cookies[tail >> queue->trailz],
637b7bd72d8SArkadiusz Kusztal 				tmp_qp);
638fb3b9f49SKai Ji 
63998c4a35cSTomasz Jozwiak 		if (ret != 0) {
64098c4a35cSTomasz Jozwiak 			tmp_qp->stats.enqueue_err_count++;
641026f21c0SFiona Trahe 			/* This message cannot be enqueued */
64298c4a35cSTomasz Jozwiak 			if (nb_ops_sent == 0)
64398c4a35cSTomasz Jozwiak 				return 0;
64498c4a35cSTomasz Jozwiak 			goto kick_tail;
64598c4a35cSTomasz Jozwiak 		}
64698c4a35cSTomasz Jozwiak 
64798c4a35cSTomasz Jozwiak 		tail = adf_modulo(tail + queue->msg_size, queue->modulo_mask);
64898c4a35cSTomasz Jozwiak 		ops++;
64998c4a35cSTomasz Jozwiak 		nb_ops_sent++;
65098c4a35cSTomasz Jozwiak 	}
65198c4a35cSTomasz Jozwiak kick_tail:
65298c4a35cSTomasz Jozwiak 	queue->tail = tail;
653026f21c0SFiona Trahe 	tmp_qp->enqueued += nb_ops_sent;
65498c4a35cSTomasz Jozwiak 	tmp_qp->stats.enqueued_count += nb_ops_sent;
6558f393c4fSArek Kusztal 	txq_write_tail(tmp_qp->qat_dev_gen, tmp_qp, queue);
65698c4a35cSTomasz Jozwiak 	return nb_ops_sent;
65798c4a35cSTomasz Jozwiak }
65898c4a35cSTomasz Jozwiak 
65998c4a35cSTomasz Jozwiak uint16_t
660c3352e72SKai Ji qat_dequeue_op_burst(void *qp, void **ops,
661fb3b9f49SKai Ji 		qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops)
66298c4a35cSTomasz Jozwiak {
6636cde900bSFiona Trahe 	struct qat_queue *rx_queue;
66498c4a35cSTomasz Jozwiak 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
66598c4a35cSTomasz Jozwiak 	uint32_t head;
666c13cecf6SAdam Dybkowski 	uint32_t op_resp_counter = 0, fw_resp_counter = 0;
66798c4a35cSTomasz Jozwiak 	uint8_t *resp_msg;
668e703b801SAdam Dybkowski 	int nb_fw_responses;
66998c4a35cSTomasz Jozwiak 
67098c4a35cSTomasz Jozwiak 	rx_queue = &(tmp_qp->rx_q);
67198c4a35cSTomasz Jozwiak 	head = rx_queue->head;
67298c4a35cSTomasz Jozwiak 	resp_msg = (uint8_t *)rx_queue->base_addr + rx_queue->head;
67398c4a35cSTomasz Jozwiak 
67498c4a35cSTomasz Jozwiak 	while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
675c13cecf6SAdam Dybkowski 			op_resp_counter != nb_ops) {
67698c4a35cSTomasz Jozwiak 
677c13cecf6SAdam Dybkowski 		nb_fw_responses = 1;
678c13cecf6SAdam Dybkowski 
679fb3b9f49SKai Ji 		nb_fw_responses = qat_dequeue_process_response(
680c13cecf6SAdam Dybkowski 				ops, resp_msg,
681dda27cb3SFiona Trahe 				tmp_qp->op_cookies[head >> rx_queue->trailz],
682ba83e5c0STomasz Jozwiak 				&tmp_qp->stats.dequeue_err_count);
68398c4a35cSTomasz Jozwiak 
68498c4a35cSTomasz Jozwiak 		head = adf_modulo(head + rx_queue->msg_size,
68598c4a35cSTomasz Jozwiak 				  rx_queue->modulo_mask);
68698c4a35cSTomasz Jozwiak 
68798c4a35cSTomasz Jozwiak 		resp_msg = (uint8_t *)rx_queue->base_addr + head;
688c13cecf6SAdam Dybkowski 
689b5d704b9SAdam Dybkowski 		if (nb_fw_responses) {
690c13cecf6SAdam Dybkowski 			/* only move on to next op if one was ready to return
691c13cecf6SAdam Dybkowski 			 * to API
692c13cecf6SAdam Dybkowski 			 */
69398c4a35cSTomasz Jozwiak 			ops++;
694c13cecf6SAdam Dybkowski 			op_resp_counter++;
69598c4a35cSTomasz Jozwiak 		}
696c13cecf6SAdam Dybkowski 
697c13cecf6SAdam Dybkowski 		 /* A compression op may be broken up into multiple fw requests.
698c13cecf6SAdam Dybkowski 		  * Only count fw responses as complete once ALL the responses
699c13cecf6SAdam Dybkowski 		  * associated with an op have been processed, as the cookie
700c13cecf6SAdam Dybkowski 		  * data from the first response must be available until
701c13cecf6SAdam Dybkowski 		  * finished with all firmware responses.
702c13cecf6SAdam Dybkowski 		  */
703c13cecf6SAdam Dybkowski 		fw_resp_counter += nb_fw_responses;
704e703b801SAdam Dybkowski 
705e703b801SAdam Dybkowski 		rx_queue->nb_processed_responses++;
706c13cecf6SAdam Dybkowski 	}
707c13cecf6SAdam Dybkowski 
708c13cecf6SAdam Dybkowski 	tmp_qp->dequeued += fw_resp_counter;
7097277e63fSAdam Dybkowski 	tmp_qp->stats.dequeued_count += op_resp_counter;
71098c4a35cSTomasz Jozwiak 
711e703b801SAdam Dybkowski 	rx_queue->head = head;
712e703b801SAdam Dybkowski 	if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH)
7138f393c4fSArek Kusztal 		rxq_free_desc(tmp_qp->qat_dev_gen, tmp_qp, rx_queue);
714e703b801SAdam Dybkowski 
715c13cecf6SAdam Dybkowski 	QAT_DP_LOG(DEBUG, "Dequeue burst return: %u, QAT responses: %u",
716c13cecf6SAdam Dybkowski 			op_resp_counter, fw_resp_counter);
7176cde900bSFiona Trahe 
718c13cecf6SAdam Dybkowski 	return op_resp_counter;
71998c4a35cSTomasz Jozwiak }
720c0c90bc4SFiona Trahe 
72174441114SAdam Dybkowski /* This is almost same as dequeue_op_burst, without the atomic, without stats
72274441114SAdam Dybkowski  * and without the op. Dequeues one response.
72374441114SAdam Dybkowski  */
72474441114SAdam Dybkowski static uint8_t
72574441114SAdam Dybkowski qat_cq_dequeue_response(struct qat_qp *qp, void *out_data)
72674441114SAdam Dybkowski {
72774441114SAdam Dybkowski 	uint8_t result = 0;
72874441114SAdam Dybkowski 	uint8_t retries = 0;
72974441114SAdam Dybkowski 	struct qat_queue *queue = &(qp->rx_q);
73074441114SAdam Dybkowski 	struct icp_qat_fw_comn_resp *resp_msg = (struct icp_qat_fw_comn_resp *)
73174441114SAdam Dybkowski 			((uint8_t *)queue->base_addr + queue->head);
73274441114SAdam Dybkowski 
73374441114SAdam Dybkowski 	while (retries++ < QAT_CQ_MAX_DEQ_RETRIES &&
73474441114SAdam Dybkowski 			*(uint32_t *)resp_msg == ADF_RING_EMPTY_SIG) {
73574441114SAdam Dybkowski 		/* loop waiting for response until we reach the timeout */
73674441114SAdam Dybkowski 		rte_delay_ms(20);
73774441114SAdam Dybkowski 	}
73874441114SAdam Dybkowski 
73974441114SAdam Dybkowski 	if (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG) {
74074441114SAdam Dybkowski 		/* response received */
74174441114SAdam Dybkowski 		result = 1;
74274441114SAdam Dybkowski 
74374441114SAdam Dybkowski 		/* check status flag */
74474441114SAdam Dybkowski 		if (ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
74574441114SAdam Dybkowski 				resp_msg->comn_hdr.comn_status) ==
74674441114SAdam Dybkowski 				ICP_QAT_FW_COMN_STATUS_FLAG_OK) {
74774441114SAdam Dybkowski 			/* success */
74874441114SAdam Dybkowski 			memcpy(out_data, resp_msg, queue->msg_size);
74974441114SAdam Dybkowski 		} else {
75074441114SAdam Dybkowski 			memset(out_data, 0, queue->msg_size);
75174441114SAdam Dybkowski 		}
75274441114SAdam Dybkowski 
75374441114SAdam Dybkowski 		queue->head = adf_modulo(queue->head + queue->msg_size,
75474441114SAdam Dybkowski 				queue->modulo_mask);
7558f393c4fSArek Kusztal 		rxq_free_desc(qp->qat_dev_gen, qp, queue);
75674441114SAdam Dybkowski 	}
75774441114SAdam Dybkowski 
75874441114SAdam Dybkowski 	return result;
75974441114SAdam Dybkowski }
76074441114SAdam Dybkowski 
76174441114SAdam Dybkowski /* Sends a NULL message and extracts QAT fw version from the response.
76274441114SAdam Dybkowski  * Used to determine detailed capabilities based on the fw version number.
76374441114SAdam Dybkowski  * This assumes that there are no inflight messages, i.e. assumes there's space
76474441114SAdam Dybkowski  * on the qp, one message is sent and only one response collected.
76574441114SAdam Dybkowski  * Returns fw version number or 0 for unknown version or a negative error code.
76674441114SAdam Dybkowski  */
76774441114SAdam Dybkowski int
76874441114SAdam Dybkowski qat_cq_get_fw_version(struct qat_qp *qp)
76974441114SAdam Dybkowski {
77074441114SAdam Dybkowski 	struct qat_queue *queue = &(qp->tx_q);
77174441114SAdam Dybkowski 	uint8_t *base_addr = (uint8_t *)queue->base_addr;
77274441114SAdam Dybkowski 	struct icp_qat_fw_comn_req null_msg;
77374441114SAdam Dybkowski 	struct icp_qat_fw_comn_resp response;
77474441114SAdam Dybkowski 
77574441114SAdam Dybkowski 	/* prepare the NULL request */
77674441114SAdam Dybkowski 	memset(&null_msg, 0, sizeof(null_msg));
77774441114SAdam Dybkowski 	null_msg.comn_hdr.hdr_flags =
77874441114SAdam Dybkowski 		ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
77974441114SAdam Dybkowski 	null_msg.comn_hdr.service_type = ICP_QAT_FW_COMN_REQ_NULL;
78074441114SAdam Dybkowski 	null_msg.comn_hdr.service_cmd_id = ICP_QAT_FW_NULL_REQ_SERV_ID;
78174441114SAdam Dybkowski 
78274441114SAdam Dybkowski #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
78374441114SAdam Dybkowski 	QAT_DP_HEXDUMP_LOG(DEBUG, "NULL request", &null_msg, sizeof(null_msg));
78474441114SAdam Dybkowski #endif
78574441114SAdam Dybkowski 
78674441114SAdam Dybkowski 	/* send the NULL request */
78774441114SAdam Dybkowski 	memcpy(base_addr + queue->tail, &null_msg, sizeof(null_msg));
78874441114SAdam Dybkowski 	queue->tail = adf_modulo(queue->tail + queue->msg_size,
78974441114SAdam Dybkowski 			queue->modulo_mask);
7908f393c4fSArek Kusztal 	txq_write_tail(qp->qat_dev_gen, qp, queue);
79174441114SAdam Dybkowski 
79274441114SAdam Dybkowski 	/* receive a response */
79374441114SAdam Dybkowski 	if (qat_cq_dequeue_response(qp, &response)) {
79474441114SAdam Dybkowski 
79574441114SAdam Dybkowski #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
79674441114SAdam Dybkowski 		QAT_DP_HEXDUMP_LOG(DEBUG, "NULL response:", &response,
79774441114SAdam Dybkowski 				sizeof(response));
79874441114SAdam Dybkowski #endif
79974441114SAdam Dybkowski 		/* if LW0 bit 24 is set - then the fw version was returned */
80074441114SAdam Dybkowski 		if (QAT_FIELD_GET(response.comn_hdr.hdr_flags,
80174441114SAdam Dybkowski 				ICP_QAT_FW_COMN_NULL_VERSION_FLAG_BITPOS,
80274441114SAdam Dybkowski 				ICP_QAT_FW_COMN_NULL_VERSION_FLAG_MASK))
80374441114SAdam Dybkowski 			return response.resrvd[0]; /* return LW4 */
80474441114SAdam Dybkowski 		else
80574441114SAdam Dybkowski 			return 0; /* not set - we don't know fw version */
80674441114SAdam Dybkowski 	}
80774441114SAdam Dybkowski 
80874441114SAdam Dybkowski 	QAT_LOG(ERR, "No response received");
80974441114SAdam Dybkowski 	return -EINVAL;
81074441114SAdam Dybkowski }
81174441114SAdam Dybkowski 
812ce7a737cSKevin O'Sullivan #ifdef BUILD_QAT_SYM
813ce7a737cSKevin O'Sullivan /* Sends an LA bulk req message to determine if a QAT device supports Cipher-CRC
814ce7a737cSKevin O'Sullivan  * offload. This assumes that there are no inflight messages, i.e. assumes
815ce7a737cSKevin O'Sullivan  * there's space  on the qp, one message is sent and only one response
816ce7a737cSKevin O'Sullivan  * collected. The status bit of the response and returned data are checked.
817ce7a737cSKevin O'Sullivan  * Returns:
818ce7a737cSKevin O'Sullivan  *     1 if status bit indicates success and returned data matches expected
819ce7a737cSKevin O'Sullivan  *     data (i.e. Cipher-CRC supported)
820ce7a737cSKevin O'Sullivan  *     0 if status bit indicates error or returned data does not match expected
821ce7a737cSKevin O'Sullivan  *     data (i.e. Cipher-CRC not supported)
822ce7a737cSKevin O'Sullivan  *     Negative error code in case of error
823ce7a737cSKevin O'Sullivan  */
824ce7a737cSKevin O'Sullivan int
825ce7a737cSKevin O'Sullivan qat_cq_get_fw_cipher_crc_cap(struct qat_qp *qp)
826ce7a737cSKevin O'Sullivan {
827ce7a737cSKevin O'Sullivan 	struct qat_queue *queue = &(qp->tx_q);
828ce7a737cSKevin O'Sullivan 	uint8_t *base_addr = (uint8_t *)queue->base_addr;
829ce7a737cSKevin O'Sullivan 	struct icp_qat_fw_la_bulk_req cipher_crc_cap_msg = {{0}};
830ce7a737cSKevin O'Sullivan 	struct icp_qat_fw_comn_resp response = {{0}};
831ce7a737cSKevin O'Sullivan 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
832ce7a737cSKevin O'Sullivan 	struct icp_qat_fw_la_auth_req_params *auth_param;
833ce7a737cSKevin O'Sullivan 	struct qat_sym_session *session;
834ce7a737cSKevin O'Sullivan 	phys_addr_t phy_src_addr;
835ce7a737cSKevin O'Sullivan 	uint64_t *src_data_addr;
836ce7a737cSKevin O'Sullivan 	int ret;
837ce7a737cSKevin O'Sullivan 
838ce7a737cSKevin O'Sullivan 	session = rte_zmalloc(NULL, sizeof(struct qat_sym_session), 0);
839ce7a737cSKevin O'Sullivan 	if (session == NULL)
840ce7a737cSKevin O'Sullivan 		return -EINVAL;
841ce7a737cSKevin O'Sullivan 
842ce7a737cSKevin O'Sullivan 	/* Verify the session physical address is known */
843ce7a737cSKevin O'Sullivan 	rte_iova_t session_paddr = rte_mem_virt2iova(session);
844ce7a737cSKevin O'Sullivan 	if (session_paddr == 0 || session_paddr == RTE_BAD_IOVA) {
845ce7a737cSKevin O'Sullivan 		QAT_LOG(ERR, "Session physical address unknown.");
846ce7a737cSKevin O'Sullivan 		return -EINVAL;
847ce7a737cSKevin O'Sullivan 	}
848ce7a737cSKevin O'Sullivan 
849ce7a737cSKevin O'Sullivan 	/* Prepare the LA bulk request */
850ce7a737cSKevin O'Sullivan 	ret = qat_cipher_crc_cap_msg_sess_prepare(session,
851ce7a737cSKevin O'Sullivan 					session_paddr,
852ce7a737cSKevin O'Sullivan 					cipher_crc_cap_check_key,
853ce7a737cSKevin O'Sullivan 					sizeof(cipher_crc_cap_check_key),
854ce7a737cSKevin O'Sullivan 					qp->qat_dev_gen);
855ce7a737cSKevin O'Sullivan 	if (ret < 0) {
856ce7a737cSKevin O'Sullivan 		rte_free(session);
857ce7a737cSKevin O'Sullivan 		/* Returning 0 here to allow qp setup to continue, but
858ce7a737cSKevin O'Sullivan 		 * indicate that Cipher-CRC offload is not supported on the
859ce7a737cSKevin O'Sullivan 		 * device
860ce7a737cSKevin O'Sullivan 		 */
861ce7a737cSKevin O'Sullivan 		return 0;
862ce7a737cSKevin O'Sullivan 	}
863ce7a737cSKevin O'Sullivan 
864ce7a737cSKevin O'Sullivan 	cipher_crc_cap_msg = session->fw_req;
865ce7a737cSKevin O'Sullivan 
866ce7a737cSKevin O'Sullivan 	src_data_addr = rte_zmalloc(NULL,
867ce7a737cSKevin O'Sullivan 					sizeof(cipher_crc_cap_check_plaintext),
868ce7a737cSKevin O'Sullivan 					0);
869ce7a737cSKevin O'Sullivan 	if (src_data_addr == NULL) {
870ce7a737cSKevin O'Sullivan 		rte_free(session);
871ce7a737cSKevin O'Sullivan 		return -EINVAL;
872ce7a737cSKevin O'Sullivan 	}
873ce7a737cSKevin O'Sullivan 
874ce7a737cSKevin O'Sullivan 	rte_memcpy(src_data_addr,
875ce7a737cSKevin O'Sullivan 			cipher_crc_cap_check_plaintext,
876ce7a737cSKevin O'Sullivan 			sizeof(cipher_crc_cap_check_plaintext));
877ce7a737cSKevin O'Sullivan 
878ce7a737cSKevin O'Sullivan 	phy_src_addr = rte_mem_virt2iova(src_data_addr);
879ce7a737cSKevin O'Sullivan 	if (phy_src_addr == 0 || phy_src_addr == RTE_BAD_IOVA) {
880ce7a737cSKevin O'Sullivan 		QAT_LOG(ERR, "Source physical address unknown.");
881ce7a737cSKevin O'Sullivan 		return -EINVAL;
882ce7a737cSKevin O'Sullivan 	}
883ce7a737cSKevin O'Sullivan 
884ce7a737cSKevin O'Sullivan 	cipher_crc_cap_msg.comn_mid.src_data_addr = phy_src_addr;
885ce7a737cSKevin O'Sullivan 	cipher_crc_cap_msg.comn_mid.src_length =
886ce7a737cSKevin O'Sullivan 					sizeof(cipher_crc_cap_check_plaintext);
887ce7a737cSKevin O'Sullivan 	cipher_crc_cap_msg.comn_mid.dest_data_addr = phy_src_addr;
888ce7a737cSKevin O'Sullivan 	cipher_crc_cap_msg.comn_mid.dst_length =
889ce7a737cSKevin O'Sullivan 					sizeof(cipher_crc_cap_check_plaintext);
890ce7a737cSKevin O'Sullivan 
891ce7a737cSKevin O'Sullivan 	cipher_param = (void *)&cipher_crc_cap_msg.serv_specif_rqpars;
892ce7a737cSKevin O'Sullivan 	auth_param = (void *)((uint8_t *)cipher_param +
893ce7a737cSKevin O'Sullivan 			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
894ce7a737cSKevin O'Sullivan 
895ce7a737cSKevin O'Sullivan 	rte_memcpy(cipher_param->u.cipher_IV_array,
896ce7a737cSKevin O'Sullivan 			cipher_crc_cap_check_iv,
897ce7a737cSKevin O'Sullivan 			sizeof(cipher_crc_cap_check_iv));
898ce7a737cSKevin O'Sullivan 
899ce7a737cSKevin O'Sullivan 	cipher_param->cipher_offset = cipher_crc_cap_check_cipher_offset;
900ce7a737cSKevin O'Sullivan 	cipher_param->cipher_length =
901ce7a737cSKevin O'Sullivan 			sizeof(cipher_crc_cap_check_plaintext) -
902ce7a737cSKevin O'Sullivan 			cipher_crc_cap_check_cipher_offset;
903ce7a737cSKevin O'Sullivan 	auth_param->auth_off = cipher_crc_cap_check_crc_offset;
904ce7a737cSKevin O'Sullivan 	auth_param->auth_len = sizeof(cipher_crc_cap_check_plaintext) -
905ce7a737cSKevin O'Sullivan 				cipher_crc_cap_check_crc_offset -
906ce7a737cSKevin O'Sullivan 				RTE_ETHER_CRC_LEN;
907ce7a737cSKevin O'Sullivan 
908ce7a737cSKevin O'Sullivan 	ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
909ce7a737cSKevin O'Sullivan 			cipher_crc_cap_msg.comn_hdr.serv_specif_flags,
910ce7a737cSKevin O'Sullivan 			ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
911ce7a737cSKevin O'Sullivan 
912ce7a737cSKevin O'Sullivan #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
913ce7a737cSKevin O'Sullivan 	QAT_DP_HEXDUMP_LOG(DEBUG, "LA Bulk request", &cipher_crc_cap_msg,
914ce7a737cSKevin O'Sullivan 			sizeof(cipher_crc_cap_msg));
915ce7a737cSKevin O'Sullivan #endif
916ce7a737cSKevin O'Sullivan 
917ce7a737cSKevin O'Sullivan 	/* Send the cipher_crc_cap_msg request */
918ce7a737cSKevin O'Sullivan 	memcpy(base_addr + queue->tail,
919ce7a737cSKevin O'Sullivan 	       &cipher_crc_cap_msg,
920ce7a737cSKevin O'Sullivan 	       sizeof(cipher_crc_cap_msg));
921ce7a737cSKevin O'Sullivan 	queue->tail = adf_modulo(queue->tail + queue->msg_size,
922ce7a737cSKevin O'Sullivan 			queue->modulo_mask);
923ce7a737cSKevin O'Sullivan 	txq_write_tail(qp->qat_dev_gen, qp, queue);
924ce7a737cSKevin O'Sullivan 
925ce7a737cSKevin O'Sullivan 	/* Check for response and verify data is same as ciphertext */
926ce7a737cSKevin O'Sullivan 	if (qat_cq_dequeue_response(qp, &response)) {
927ce7a737cSKevin O'Sullivan #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
928ce7a737cSKevin O'Sullivan 		QAT_DP_HEXDUMP_LOG(DEBUG, "LA response:", &response,
929ce7a737cSKevin O'Sullivan 				sizeof(response));
930ce7a737cSKevin O'Sullivan #endif
931ce7a737cSKevin O'Sullivan 
932ce7a737cSKevin O'Sullivan 		if (memcmp(src_data_addr,
933ce7a737cSKevin O'Sullivan 				cipher_crc_cap_check_ciphertext,
934ce7a737cSKevin O'Sullivan 				sizeof(cipher_crc_cap_check_ciphertext)) != 0)
935ce7a737cSKevin O'Sullivan 			ret = 0; /* Cipher-CRC offload not supported */
936ce7a737cSKevin O'Sullivan 		else
937ce7a737cSKevin O'Sullivan 			ret = 1;
938ce7a737cSKevin O'Sullivan 	} else {
939ce7a737cSKevin O'Sullivan 		ret = -EINVAL;
940ce7a737cSKevin O'Sullivan 	}
941ce7a737cSKevin O'Sullivan 
942ce7a737cSKevin O'Sullivan 	rte_free(src_data_addr);
943ce7a737cSKevin O'Sullivan 	rte_free(session);
944ce7a737cSKevin O'Sullivan 	return ret;
945ce7a737cSKevin O'Sullivan }
946ce7a737cSKevin O'Sullivan #endif
947ce7a737cSKevin O'Sullivan 
94881bede55SKeith Wiles __rte_weak int
949ba83e5c0STomasz Jozwiak qat_comp_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
950b643808fSTomasz Jozwiak 			  void *op_cookie __rte_unused,
951ba83e5c0STomasz Jozwiak 			  uint64_t *dequeue_err_count __rte_unused)
952c0c90bc4SFiona Trahe {
953c0c90bc4SFiona Trahe 	return  0;
954c0c90bc4SFiona Trahe }
955