xref: /dpdk/drivers/crypto/ipsec_mb/ipsec_mb_private.c (revision b53d106d34b5c638f5a2cbdfee0da5bd42d4383f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2021 Intel Corporation
3  */
4 
5 #include <rte_bus_vdev.h>
6 #include <rte_common.h>
7 #include <rte_cryptodev.h>
8 
9 #include "ipsec_mb_private.h"
10 
11 RTE_DEFINE_PER_LCORE(IMB_MGR *, mb_mgr);
12 
13 struct ipsec_mb_internals ipsec_mb_pmds[IPSEC_MB_N_PMD_TYPES];
14 int ipsec_mb_logtype_driver;
15 enum ipsec_mb_vector_mode vector_mode;
16 
17 /**
18  * Generic burst enqueue, place crypto operations on ingress queue for
19  * processing.
20  *
21  * @param __qp         Queue Pair to process
22  * @param ops          Crypto operations for processing
23  * @param nb_ops       Number of crypto operations for processing
24  *
25  * @return
26  * - Number of crypto operations enqueued
27  */
28 static uint16_t
29 ipsec_mb_enqueue_burst(void *__qp, struct rte_crypto_op **ops,
30 		uint16_t nb_ops)
31 {
32 	struct ipsec_mb_qp *qp = __qp;
33 
34 	unsigned int nb_enqueued;
35 
36 	nb_enqueued = rte_ring_enqueue_burst(qp->ingress_queue,
37 			(void **)ops, nb_ops, NULL);
38 
39 	qp->stats.enqueued_count += nb_enqueued;
40 	qp->stats.enqueue_err_count += nb_ops - nb_enqueued;
41 
42 	return nb_enqueued;
43 }
44 
45 int
46 ipsec_mb_create(struct rte_vdev_device *vdev,
47 	enum ipsec_mb_pmd_types pmd_type)
48 {
49 	struct rte_cryptodev *dev;
50 	struct ipsec_mb_dev_private *internals;
51 	struct ipsec_mb_internals *pmd_data = &ipsec_mb_pmds[pmd_type];
52 	struct rte_cryptodev_pmd_init_params init_params = {};
53 	const char *name, *args;
54 	int retval;
55 
56 	if (vector_mode == IPSEC_MB_NOT_SUPPORTED) {
57 		/* Check CPU for supported vector instruction set */
58 		if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
59 			vector_mode = IPSEC_MB_AVX512;
60 		else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
61 			vector_mode = IPSEC_MB_AVX2;
62 		else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
63 			vector_mode = IPSEC_MB_AVX;
64 		else
65 			vector_mode = IPSEC_MB_SSE;
66 	}
67 
68 	init_params.private_data_size = sizeof(struct ipsec_mb_dev_private) +
69 		pmd_data->internals_priv_size;
70 	init_params.max_nb_queue_pairs =
71 		RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS;
72 	init_params.socket_id = rte_socket_id();
73 
74 	name = rte_vdev_device_name(vdev);
75 	if (name == NULL)
76 		return -EINVAL;
77 
78 	args = rte_vdev_device_args(vdev);
79 
80 	retval = rte_cryptodev_pmd_parse_input_args(&init_params, args);
81 	if (retval) {
82 		IPSEC_MB_LOG(
83 		    ERR, "Failed to parse initialisation arguments[%s]", args);
84 		return -EINVAL;
85 	}
86 
87 	dev = rte_cryptodev_pmd_create(name, &vdev->device, &init_params);
88 	if (dev == NULL) {
89 		IPSEC_MB_LOG(ERR, "driver %s: create failed",
90 			     init_params.name);
91 		return -ENODEV;
92 	}
93 
94 	/* Set vector instructions mode supported */
95 	internals = dev->data->dev_private;
96 	internals->pmd_type = pmd_type;
97 	internals->max_nb_queue_pairs = init_params.max_nb_queue_pairs;
98 
99 	dev->driver_id = ipsec_mb_get_driver_id(pmd_type);
100 	if (dev->driver_id == UINT8_MAX) {
101 		IPSEC_MB_LOG(ERR, "driver %s: create failed",
102 			     init_params.name);
103 		return -ENODEV;
104 	}
105 	dev->dev_ops = ipsec_mb_pmds[pmd_type].ops;
106 	dev->enqueue_burst = ipsec_mb_enqueue_burst;
107 	dev->dequeue_burst = ipsec_mb_pmds[pmd_type].dequeue_burst;
108 	dev->feature_flags = pmd_data->feature_flags;
109 
110 	if (pmd_data->dev_config) {
111 		retval = (*pmd_data->dev_config)(dev);
112 		if (retval < 0) {
113 			IPSEC_MB_LOG(ERR,
114 				"Failed to configure device %s", name);
115 			rte_cryptodev_pmd_destroy(dev);
116 			return retval;
117 		}
118 	}
119 
120 	switch (vector_mode) {
121 	case IPSEC_MB_AVX512:
122 		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
123 		break;
124 	case IPSEC_MB_AVX2:
125 		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
126 		break;
127 	case IPSEC_MB_AVX:
128 		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
129 		break;
130 	case IPSEC_MB_SSE:
131 		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
132 		break;
133 	default:
134 		break;
135 	}
136 
137 	rte_cryptodev_pmd_probing_finish(dev);
138 
139 	IPSEC_MB_LOG(INFO, "IPSec Multi-buffer library version used: %s\n",
140 		     imb_get_version_str());
141 
142 	return 0;
143 }
144 
145 int
146 ipsec_mb_remove(struct rte_vdev_device *vdev)
147 {
148 	struct rte_cryptodev *cryptodev;
149 	const char *name;
150 	int qp_id;
151 
152 	name = rte_vdev_device_name(vdev);
153 	if (name == NULL)
154 		return -EINVAL;
155 
156 	cryptodev = rte_cryptodev_pmd_get_named_dev(name);
157 	if (cryptodev == NULL)
158 		return -ENODEV;
159 
160 	if (RTE_PER_LCORE(mb_mgr)) {
161 		free_mb_mgr(RTE_PER_LCORE(mb_mgr));
162 		RTE_PER_LCORE(mb_mgr) = NULL;
163 	}
164 
165 	if (cryptodev->security_ctx) {
166 		rte_free(cryptodev->security_ctx);
167 		cryptodev->security_ctx = NULL;
168 	}
169 #ifdef AESNI_MB_DOCSIS_SEC_ENABLED
170 	rte_free(cryptodev->security_ctx);
171 	cryptodev->security_ctx = NULL;
172 #endif
173 
174 	for (qp_id = 0; qp_id < cryptodev->data->nb_queue_pairs; qp_id++)
175 		ipsec_mb_qp_release(cryptodev, qp_id);
176 
177 	return rte_cryptodev_pmd_destroy(cryptodev);
178 }
179