1f0f369a6SFan Zhang /* SPDX-License-Identifier: BSD-3-Clause
2f0f369a6SFan Zhang * Copyright(c) 2021 Intel Corporation
3f0f369a6SFan Zhang */
4f0f369a6SFan Zhang
5f0f369a6SFan Zhang #include "qat_device.h"
6f0f369a6SFan Zhang #include "qat_qp.h"
7f0f369a6SFan Zhang #include "qat_crypto.h"
8f0f369a6SFan Zhang #include "qat_sym.h"
9f0f369a6SFan Zhang #include "qat_asym.h"
10f0f369a6SFan Zhang
11f0f369a6SFan Zhang int
qat_cryptodev_config(__rte_unused struct rte_cryptodev * dev,__rte_unused struct rte_cryptodev_config * config)12f0f369a6SFan Zhang qat_cryptodev_config(__rte_unused struct rte_cryptodev *dev,
13f0f369a6SFan Zhang __rte_unused struct rte_cryptodev_config *config)
14f0f369a6SFan Zhang {
15f0f369a6SFan Zhang return 0;
16f0f369a6SFan Zhang }
17f0f369a6SFan Zhang
18f0f369a6SFan Zhang int
qat_cryptodev_start(__rte_unused struct rte_cryptodev * dev)19f0f369a6SFan Zhang qat_cryptodev_start(__rte_unused struct rte_cryptodev *dev)
20f0f369a6SFan Zhang {
21f0f369a6SFan Zhang return 0;
22f0f369a6SFan Zhang }
23f0f369a6SFan Zhang
24f0f369a6SFan Zhang void
qat_cryptodev_stop(__rte_unused struct rte_cryptodev * dev)25f0f369a6SFan Zhang qat_cryptodev_stop(__rte_unused struct rte_cryptodev *dev)
26f0f369a6SFan Zhang {
27f0f369a6SFan Zhang }
28f0f369a6SFan Zhang
29f0f369a6SFan Zhang int
qat_cryptodev_close(struct rte_cryptodev * dev)30f0f369a6SFan Zhang qat_cryptodev_close(struct rte_cryptodev *dev)
31f0f369a6SFan Zhang {
32f0f369a6SFan Zhang int i, ret;
33f0f369a6SFan Zhang
34f0f369a6SFan Zhang for (i = 0; i < dev->data->nb_queue_pairs; i++) {
35f0f369a6SFan Zhang ret = dev->dev_ops->queue_pair_release(dev, i);
36f0f369a6SFan Zhang if (ret < 0)
37f0f369a6SFan Zhang return ret;
38f0f369a6SFan Zhang }
39f0f369a6SFan Zhang
40f0f369a6SFan Zhang return 0;
41f0f369a6SFan Zhang }
42f0f369a6SFan Zhang
43f0f369a6SFan Zhang void
qat_cryptodev_info_get(struct rte_cryptodev * dev,struct rte_cryptodev_info * info)44f0f369a6SFan Zhang qat_cryptodev_info_get(struct rte_cryptodev *dev,
45f0f369a6SFan Zhang struct rte_cryptodev_info *info)
46f0f369a6SFan Zhang {
47f0f369a6SFan Zhang struct qat_cryptodev_private *qat_private = dev->data->dev_private;
48f0f369a6SFan Zhang struct qat_pci_device *qat_dev = qat_private->qat_dev;
49f0f369a6SFan Zhang enum qat_service_type service_type = qat_private->service_type;
50f0f369a6SFan Zhang
51f0f369a6SFan Zhang if (info != NULL) {
52f0f369a6SFan Zhang info->max_nb_queue_pairs =
53f0f369a6SFan Zhang qat_qps_per_service(qat_dev, service_type);
54f0f369a6SFan Zhang info->feature_flags = dev->feature_flags;
55f0f369a6SFan Zhang info->capabilities = qat_private->qat_dev_capabilities;
56f0f369a6SFan Zhang if (service_type == QAT_SERVICE_ASYMMETRIC)
57f0f369a6SFan Zhang info->driver_id = qat_asym_driver_id;
58f0f369a6SFan Zhang
59f0f369a6SFan Zhang if (service_type == QAT_SERVICE_SYMMETRIC)
60f0f369a6SFan Zhang info->driver_id = qat_sym_driver_id;
61f0f369a6SFan Zhang /* No limit of number of sessions */
62f0f369a6SFan Zhang info->sym.max_nb_sessions = 0;
63f0f369a6SFan Zhang }
64f0f369a6SFan Zhang }
65f0f369a6SFan Zhang
66f0f369a6SFan Zhang void
qat_cryptodev_stats_get(struct rte_cryptodev * dev,struct rte_cryptodev_stats * stats)67f0f369a6SFan Zhang qat_cryptodev_stats_get(struct rte_cryptodev *dev,
68f0f369a6SFan Zhang struct rte_cryptodev_stats *stats)
69f0f369a6SFan Zhang {
70f0f369a6SFan Zhang struct qat_common_stats qat_stats = {0};
71f0f369a6SFan Zhang struct qat_cryptodev_private *qat_priv;
72f0f369a6SFan Zhang
73f0f369a6SFan Zhang if (stats == NULL || dev == NULL) {
74f0f369a6SFan Zhang QAT_LOG(ERR, "invalid ptr: stats %p, dev %p", stats, dev);
75f0f369a6SFan Zhang return;
76f0f369a6SFan Zhang }
77f0f369a6SFan Zhang qat_priv = dev->data->dev_private;
78f0f369a6SFan Zhang
79f0f369a6SFan Zhang qat_stats_get(qat_priv->qat_dev, &qat_stats, qat_priv->service_type);
80f0f369a6SFan Zhang stats->enqueued_count = qat_stats.enqueued_count;
81f0f369a6SFan Zhang stats->dequeued_count = qat_stats.dequeued_count;
82f0f369a6SFan Zhang stats->enqueue_err_count = qat_stats.enqueue_err_count;
83f0f369a6SFan Zhang stats->dequeue_err_count = qat_stats.dequeue_err_count;
84f0f369a6SFan Zhang }
85f0f369a6SFan Zhang
86f0f369a6SFan Zhang void
qat_cryptodev_stats_reset(struct rte_cryptodev * dev)87f0f369a6SFan Zhang qat_cryptodev_stats_reset(struct rte_cryptodev *dev)
88f0f369a6SFan Zhang {
89f0f369a6SFan Zhang struct qat_cryptodev_private *qat_priv;
90f0f369a6SFan Zhang
91f0f369a6SFan Zhang if (dev == NULL) {
92f0f369a6SFan Zhang QAT_LOG(ERR, "invalid cryptodev ptr %p", dev);
93f0f369a6SFan Zhang return;
94f0f369a6SFan Zhang }
95f0f369a6SFan Zhang qat_priv = dev->data->dev_private;
96f0f369a6SFan Zhang
97f0f369a6SFan Zhang qat_stats_reset(qat_priv->qat_dev, qat_priv->service_type);
98f0f369a6SFan Zhang
99f0f369a6SFan Zhang }
100f0f369a6SFan Zhang
101f0f369a6SFan Zhang int
qat_cryptodev_qp_release(struct rte_cryptodev * dev,uint16_t queue_pair_id)102f0f369a6SFan Zhang qat_cryptodev_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
103f0f369a6SFan Zhang {
104f0f369a6SFan Zhang struct qat_cryptodev_private *qat_private = dev->data->dev_private;
105f0f369a6SFan Zhang struct qat_pci_device *qat_dev = qat_private->qat_dev;
106f0f369a6SFan Zhang enum qat_device_gen qat_dev_gen = qat_dev->qat_dev_gen;
107f0f369a6SFan Zhang enum qat_service_type service_type = qat_private->service_type;
108f0f369a6SFan Zhang
109f0f369a6SFan Zhang QAT_LOG(DEBUG, "Release %s qp %u on device %d",
110f0f369a6SFan Zhang qat_service_get_str(service_type),
111f0f369a6SFan Zhang queue_pair_id, dev->data->dev_id);
112f0f369a6SFan Zhang
113f0f369a6SFan Zhang qat_private->qat_dev->qps_in_use[service_type][queue_pair_id] = NULL;
114f0f369a6SFan Zhang
115f0f369a6SFan Zhang return qat_qp_release(qat_dev_gen, (struct qat_qp **)
116f0f369a6SFan Zhang &(dev->data->queue_pairs[queue_pair_id]));
117f0f369a6SFan Zhang }
118f0f369a6SFan Zhang
119f0f369a6SFan Zhang int
qat_cryptodev_qp_setup(struct rte_cryptodev * dev,uint16_t qp_id,const struct rte_cryptodev_qp_conf * qp_conf,int socket_id)120f0f369a6SFan Zhang qat_cryptodev_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
121f0f369a6SFan Zhang const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
122f0f369a6SFan Zhang {
123f0f369a6SFan Zhang struct qat_qp **qp_addr =
124f0f369a6SFan Zhang (struct qat_qp **)&(dev->data->queue_pairs[qp_id]);
125f0f369a6SFan Zhang struct qat_cryptodev_private *qat_private = dev->data->dev_private;
126f0f369a6SFan Zhang struct qat_pci_device *qat_dev = qat_private->qat_dev;
127f0f369a6SFan Zhang enum qat_service_type service_type = qat_private->service_type;
128f0f369a6SFan Zhang struct qat_qp_config qat_qp_conf = {0};
129f0f369a6SFan Zhang struct qat_qp *qp;
130f0f369a6SFan Zhang int ret = 0;
131f0f369a6SFan Zhang uint32_t i;
132f0f369a6SFan Zhang
133f0f369a6SFan Zhang /* If qp is already in use free ring memory and qp metadata. */
134f0f369a6SFan Zhang if (*qp_addr != NULL) {
135f0f369a6SFan Zhang ret = dev->dev_ops->queue_pair_release(dev, qp_id);
136f0f369a6SFan Zhang if (ret < 0)
137f0f369a6SFan Zhang return -EBUSY;
138f0f369a6SFan Zhang }
139f0f369a6SFan Zhang if (qp_id >= qat_qps_per_service(qat_dev, service_type)) {
140f0f369a6SFan Zhang QAT_LOG(ERR, "qp_id %u invalid for this device", qp_id);
141f0f369a6SFan Zhang return -EINVAL;
142f0f369a6SFan Zhang }
143f0f369a6SFan Zhang
144f0f369a6SFan Zhang qat_qp_conf.hw = qat_qp_get_hw_data(qat_dev, service_type,
145f0f369a6SFan Zhang qp_id);
146f0f369a6SFan Zhang if (qat_qp_conf.hw == NULL) {
147f0f369a6SFan Zhang QAT_LOG(ERR, "qp_id %u invalid for this device", qp_id);
148f0f369a6SFan Zhang return -EINVAL;
149f0f369a6SFan Zhang }
150f0f369a6SFan Zhang
151f0f369a6SFan Zhang qat_qp_conf.cookie_size = service_type == QAT_SERVICE_SYMMETRIC ?
152f0f369a6SFan Zhang sizeof(struct qat_sym_op_cookie) :
153f0f369a6SFan Zhang sizeof(struct qat_asym_op_cookie);
154f0f369a6SFan Zhang qat_qp_conf.nb_descriptors = qp_conf->nb_descriptors;
155f0f369a6SFan Zhang qat_qp_conf.socket_id = socket_id;
156f0f369a6SFan Zhang qat_qp_conf.service_str = qat_service_get_str(service_type);
157f0f369a6SFan Zhang
158f0f369a6SFan Zhang ret = qat_qp_setup(qat_dev, qp_addr, qp_id, &qat_qp_conf);
159f0f369a6SFan Zhang if (ret != 0)
160f0f369a6SFan Zhang return ret;
161f0f369a6SFan Zhang
162f0f369a6SFan Zhang /* store a link to the qp in the qat_pci_device */
163f0f369a6SFan Zhang qat_dev->qps_in_use[service_type][qp_id] = *qp_addr;
164f0f369a6SFan Zhang
165f0f369a6SFan Zhang qp = (struct qat_qp *)*qp_addr;
166f0f369a6SFan Zhang qp->min_enq_burst_threshold = qat_private->min_enq_burst_threshold;
167f0f369a6SFan Zhang
168f0f369a6SFan Zhang for (i = 0; i < qp->nb_descriptors; i++) {
169f0f369a6SFan Zhang if (service_type == QAT_SERVICE_SYMMETRIC)
170f0f369a6SFan Zhang qat_sym_init_op_cookie(qp->op_cookies[i]);
171f0f369a6SFan Zhang else
172f0f369a6SFan Zhang qat_asym_init_op_cookie(qp->op_cookies[i]);
173f0f369a6SFan Zhang }
174f0f369a6SFan Zhang
175*ce7a737cSKevin O'Sullivan if (qat_private->cipher_crc_offload_enable) {
176*ce7a737cSKevin O'Sullivan ret = qat_cq_get_fw_cipher_crc_cap(qp);
177*ce7a737cSKevin O'Sullivan if (ret < 0) {
178*ce7a737cSKevin O'Sullivan qat_cryptodev_qp_release(dev, qp_id);
179f0f369a6SFan Zhang return ret;
180f0f369a6SFan Zhang }
181*ce7a737cSKevin O'Sullivan
182*ce7a737cSKevin O'Sullivan if (ret != 0)
183*ce7a737cSKevin O'Sullivan QAT_LOG(DEBUG, "Cipher CRC supported on QAT device");
184*ce7a737cSKevin O'Sullivan else
185*ce7a737cSKevin O'Sullivan QAT_LOG(DEBUG, "Cipher CRC not supported on QAT device");
186*ce7a737cSKevin O'Sullivan
187*ce7a737cSKevin O'Sullivan /* Only send the cipher crc offload capability message once */
188*ce7a737cSKevin O'Sullivan qat_private->cipher_crc_offload_enable = 0;
189*ce7a737cSKevin O'Sullivan /* Set cipher crc offload indicator */
190*ce7a737cSKevin O'Sullivan if (ret)
191*ce7a737cSKevin O'Sullivan qat_private->internal_capabilities |=
192*ce7a737cSKevin O'Sullivan QAT_SYM_CAP_CIPHER_CRC;
193*ce7a737cSKevin O'Sullivan }
194*ce7a737cSKevin O'Sullivan
195*ce7a737cSKevin O'Sullivan return 0;
196*ce7a737cSKevin O'Sullivan }
197