xref: /dpdk/drivers/crypto/qat/qat_crypto.c (revision ce7a737c8f02cd001f3f66f5c4e73ab7060ed588)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2021 Intel Corporation
3  */
4 
5 #include "qat_device.h"
6 #include "qat_qp.h"
7 #include "qat_crypto.h"
8 #include "qat_sym.h"
9 #include "qat_asym.h"
10 
11 int
qat_cryptodev_config(__rte_unused struct rte_cryptodev * dev,__rte_unused struct rte_cryptodev_config * config)12 qat_cryptodev_config(__rte_unused struct rte_cryptodev *dev,
13 		__rte_unused struct rte_cryptodev_config *config)
14 {
15 	return 0;
16 }
17 
18 int
qat_cryptodev_start(__rte_unused struct rte_cryptodev * dev)19 qat_cryptodev_start(__rte_unused struct rte_cryptodev *dev)
20 {
21 	return 0;
22 }
23 
24 void
qat_cryptodev_stop(__rte_unused struct rte_cryptodev * dev)25 qat_cryptodev_stop(__rte_unused struct rte_cryptodev *dev)
26 {
27 }
28 
29 int
qat_cryptodev_close(struct rte_cryptodev * dev)30 qat_cryptodev_close(struct rte_cryptodev *dev)
31 {
32 	int i, ret;
33 
34 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
35 		ret = dev->dev_ops->queue_pair_release(dev, i);
36 		if (ret < 0)
37 			return ret;
38 	}
39 
40 	return 0;
41 }
42 
43 void
qat_cryptodev_info_get(struct rte_cryptodev * dev,struct rte_cryptodev_info * info)44 qat_cryptodev_info_get(struct rte_cryptodev *dev,
45 		struct rte_cryptodev_info *info)
46 {
47 	struct qat_cryptodev_private *qat_private = dev->data->dev_private;
48 	struct qat_pci_device *qat_dev = qat_private->qat_dev;
49 	enum qat_service_type service_type = qat_private->service_type;
50 
51 	if (info != NULL) {
52 		info->max_nb_queue_pairs =
53 			qat_qps_per_service(qat_dev, service_type);
54 		info->feature_flags = dev->feature_flags;
55 		info->capabilities = qat_private->qat_dev_capabilities;
56 		if (service_type == QAT_SERVICE_ASYMMETRIC)
57 			info->driver_id = qat_asym_driver_id;
58 
59 		if (service_type == QAT_SERVICE_SYMMETRIC)
60 			info->driver_id = qat_sym_driver_id;
61 		/* No limit of number of sessions */
62 		info->sym.max_nb_sessions = 0;
63 	}
64 }
65 
66 void
qat_cryptodev_stats_get(struct rte_cryptodev * dev,struct rte_cryptodev_stats * stats)67 qat_cryptodev_stats_get(struct rte_cryptodev *dev,
68 		struct rte_cryptodev_stats *stats)
69 {
70 	struct qat_common_stats qat_stats = {0};
71 	struct qat_cryptodev_private *qat_priv;
72 
73 	if (stats == NULL || dev == NULL) {
74 		QAT_LOG(ERR, "invalid ptr: stats %p, dev %p", stats, dev);
75 		return;
76 	}
77 	qat_priv = dev->data->dev_private;
78 
79 	qat_stats_get(qat_priv->qat_dev, &qat_stats, qat_priv->service_type);
80 	stats->enqueued_count = qat_stats.enqueued_count;
81 	stats->dequeued_count = qat_stats.dequeued_count;
82 	stats->enqueue_err_count = qat_stats.enqueue_err_count;
83 	stats->dequeue_err_count = qat_stats.dequeue_err_count;
84 }
85 
86 void
qat_cryptodev_stats_reset(struct rte_cryptodev * dev)87 qat_cryptodev_stats_reset(struct rte_cryptodev *dev)
88 {
89 	struct qat_cryptodev_private *qat_priv;
90 
91 	if (dev == NULL) {
92 		QAT_LOG(ERR, "invalid cryptodev ptr %p", dev);
93 		return;
94 	}
95 	qat_priv = dev->data->dev_private;
96 
97 	qat_stats_reset(qat_priv->qat_dev, qat_priv->service_type);
98 
99 }
100 
101 int
qat_cryptodev_qp_release(struct rte_cryptodev * dev,uint16_t queue_pair_id)102 qat_cryptodev_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
103 {
104 	struct qat_cryptodev_private *qat_private = dev->data->dev_private;
105 	struct qat_pci_device *qat_dev = qat_private->qat_dev;
106 	enum qat_device_gen qat_dev_gen = qat_dev->qat_dev_gen;
107 	enum qat_service_type service_type = qat_private->service_type;
108 
109 	QAT_LOG(DEBUG, "Release %s qp %u on device %d",
110 			qat_service_get_str(service_type),
111 			queue_pair_id, dev->data->dev_id);
112 
113 	qat_private->qat_dev->qps_in_use[service_type][queue_pair_id] = NULL;
114 
115 	return qat_qp_release(qat_dev_gen, (struct qat_qp **)
116 			&(dev->data->queue_pairs[queue_pair_id]));
117 }
118 
119 int
qat_cryptodev_qp_setup(struct rte_cryptodev * dev,uint16_t qp_id,const struct rte_cryptodev_qp_conf * qp_conf,int socket_id)120 qat_cryptodev_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
121 	const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
122 {
123 	struct qat_qp **qp_addr =
124 			(struct qat_qp **)&(dev->data->queue_pairs[qp_id]);
125 	struct qat_cryptodev_private *qat_private = dev->data->dev_private;
126 	struct qat_pci_device *qat_dev = qat_private->qat_dev;
127 	enum qat_service_type service_type = qat_private->service_type;
128 	struct qat_qp_config qat_qp_conf = {0};
129 	struct qat_qp *qp;
130 	int ret = 0;
131 	uint32_t i;
132 
133 	/* If qp is already in use free ring memory and qp metadata. */
134 	if (*qp_addr != NULL) {
135 		ret = dev->dev_ops->queue_pair_release(dev, qp_id);
136 		if (ret < 0)
137 			return -EBUSY;
138 	}
139 	if (qp_id >= qat_qps_per_service(qat_dev, service_type)) {
140 		QAT_LOG(ERR, "qp_id %u invalid for this device", qp_id);
141 		return -EINVAL;
142 	}
143 
144 	qat_qp_conf.hw = qat_qp_get_hw_data(qat_dev, service_type,
145 			qp_id);
146 	if (qat_qp_conf.hw == NULL) {
147 		QAT_LOG(ERR, "qp_id %u invalid for this device", qp_id);
148 		return -EINVAL;
149 	}
150 
151 	qat_qp_conf.cookie_size = service_type == QAT_SERVICE_SYMMETRIC ?
152 			sizeof(struct qat_sym_op_cookie) :
153 			sizeof(struct qat_asym_op_cookie);
154 	qat_qp_conf.nb_descriptors = qp_conf->nb_descriptors;
155 	qat_qp_conf.socket_id = socket_id;
156 	qat_qp_conf.service_str = qat_service_get_str(service_type);
157 
158 	ret = qat_qp_setup(qat_dev, qp_addr, qp_id, &qat_qp_conf);
159 	if (ret != 0)
160 		return ret;
161 
162 	/* store a link to the qp in the qat_pci_device */
163 	qat_dev->qps_in_use[service_type][qp_id] = *qp_addr;
164 
165 	qp = (struct qat_qp *)*qp_addr;
166 	qp->min_enq_burst_threshold = qat_private->min_enq_burst_threshold;
167 
168 	for (i = 0; i < qp->nb_descriptors; i++) {
169 		if (service_type == QAT_SERVICE_SYMMETRIC)
170 			qat_sym_init_op_cookie(qp->op_cookies[i]);
171 		else
172 			qat_asym_init_op_cookie(qp->op_cookies[i]);
173 	}
174 
175 	if (qat_private->cipher_crc_offload_enable) {
176 		ret = qat_cq_get_fw_cipher_crc_cap(qp);
177 		if (ret < 0) {
178 			qat_cryptodev_qp_release(dev, qp_id);
179 			return ret;
180 		}
181 
182 		if (ret != 0)
183 			QAT_LOG(DEBUG, "Cipher CRC supported on QAT device");
184 		else
185 			QAT_LOG(DEBUG, "Cipher CRC not supported on QAT device");
186 
187 		/* Only send the cipher crc offload capability message once */
188 		qat_private->cipher_crc_offload_enable = 0;
189 		/* Set cipher crc offload indicator */
190 		if (ret)
191 			qat_private->internal_capabilities |=
192 						QAT_SYM_CAP_CIPHER_CRC;
193 	}
194 
195 	return 0;
196 }
197