xref: /dpdk/drivers/crypto/cnxk/cn10k_ipsec.c (revision f852c95807f37f390e21874fbfc681442ad865f6)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 
5 #include <cryptodev_pmd.h>
6 #include <rte_esp.h>
7 #include <rte_ip.h>
8 #include <rte_malloc.h>
9 #include <rte_security.h>
10 #include <rte_security_driver.h>
11 #include <rte_udp.h>
12 
13 #include "cn10k_cryptodev_ops.h"
14 #include "cn10k_cryptodev_sec.h"
15 #include "cn10k_ipsec.h"
16 #include "cnxk_cryptodev.h"
17 #include "cnxk_cryptodev_ops.h"
18 #include "cnxk_ipsec.h"
19 #include "cnxk_security.h"
20 
21 #include "roc_api.h"
22 
23 static int
24 cn10k_ipsec_outb_sa_create(struct roc_cpt *roc_cpt, struct roc_cpt_lf *lf,
25 			   struct rte_security_ipsec_xform *ipsec_xfrm,
26 			   struct rte_crypto_sym_xform *crypto_xfrm,
27 			   struct cn10k_sec_session *sec_sess)
28 {
29 	union roc_ot_ipsec_outb_param1 param1;
30 	struct roc_ot_ipsec_outb_sa *sa_dptr;
31 	struct cnxk_ipsec_outb_rlens rlens;
32 	struct cn10k_ipsec_sa *sa;
33 	union cpt_inst_w4 inst_w4;
34 	void *out_sa;
35 	int ret = 0;
36 
37 	sa = &sec_sess->sa;
38 	out_sa = &sa->out_sa;
39 
40 	/* Allocate memory to be used as dptr for CPT ucode WRITE_SA op */
41 	sa_dptr = plt_zmalloc(sizeof(struct roc_ot_ipsec_outb_sa), 8);
42 	if (sa_dptr == NULL) {
43 		plt_err("Couldn't allocate memory for SA dptr");
44 		return -ENOMEM;
45 	}
46 
47 	/* Translate security parameters to SA */
48 	ret = cnxk_ot_ipsec_outb_sa_fill(sa_dptr, ipsec_xfrm, crypto_xfrm);
49 	if (ret) {
50 		plt_err("Could not fill outbound session parameters");
51 		goto sa_dptr_free;
52 	}
53 
54 	sec_sess->inst.w7 = cpt_inst_w7_get(roc_cpt, out_sa);
55 
56 #ifdef LA_IPSEC_DEBUG
57 	/* Use IV from application in debug mode */
58 	if (ipsec_xfrm->options.iv_gen_disable == 1) {
59 		sa_dptr->w2.s.iv_src = ROC_IE_OT_SA_IV_SRC_FROM_SA;
60 		if (crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
61 			sec_sess->iv_offset = crypto_xfrm->aead.iv.offset;
62 			sec_sess->iv_length = crypto_xfrm->aead.iv.length;
63 		} else if (crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
64 			sec_sess->iv_offset = crypto_xfrm->cipher.iv.offset;
65 			sec_sess->iv_length = crypto_xfrm->cipher.iv.length;
66 		} else {
67 			sec_sess->iv_offset = crypto_xfrm->auth.iv.offset;
68 			sec_sess->iv_length = crypto_xfrm->auth.iv.length;
69 		}
70 	}
71 #else
72 	if (ipsec_xfrm->options.iv_gen_disable != 0) {
73 		plt_err("Application provided IV not supported");
74 		ret = -ENOTSUP;
75 		goto sa_dptr_free;
76 	}
77 #endif
78 
79 	sec_sess->ipsec.is_outbound = 1;
80 
81 	/* Get Rlen calculation data */
82 	ret = cnxk_ipsec_outb_rlens_get(&rlens, ipsec_xfrm, crypto_xfrm);
83 	if (ret)
84 		goto sa_dptr_free;
85 
86 	sec_sess->max_extended_len = rlens.max_extended_len;
87 
88 	/* pre-populate CPT INST word 4 */
89 	inst_w4.u64 = 0;
90 	inst_w4.s.opcode_major = ROC_IE_OT_MAJOR_OP_PROCESS_OUTBOUND_IPSEC | ROC_IE_OT_INPLACE_BIT;
91 
92 	param1.u16 = 0;
93 
94 	param1.s.ttl_or_hop_limit = ipsec_xfrm->options.dec_ttl;
95 
96 	/* Disable IP checksum computation by default */
97 	param1.s.ip_csum_disable = ROC_IE_OT_SA_INNER_PKT_IP_CSUM_DISABLE;
98 
99 	if (ipsec_xfrm->options.ip_csum_enable) {
100 		param1.s.ip_csum_disable =
101 			ROC_IE_OT_SA_INNER_PKT_IP_CSUM_ENABLE;
102 	}
103 
104 	/* Disable L4 checksum computation by default */
105 	param1.s.l4_csum_disable = ROC_IE_OT_SA_INNER_PKT_L4_CSUM_DISABLE;
106 
107 	if (ipsec_xfrm->options.l4_csum_enable) {
108 		param1.s.l4_csum_disable =
109 			ROC_IE_OT_SA_INNER_PKT_L4_CSUM_ENABLE;
110 	}
111 
112 	inst_w4.s.param1 = param1.u16;
113 
114 	sec_sess->inst.w4 = inst_w4.u64;
115 
116 	if (ipsec_xfrm->options.stats == 1) {
117 		/* Enable mib counters */
118 		sa_dptr->w0.s.count_mib_bytes = 1;
119 		sa_dptr->w0.s.count_mib_pkts = 1;
120 	}
121 
122 	memset(out_sa, 0, sizeof(struct roc_ot_ipsec_outb_sa));
123 
124 	/* Copy word0 from sa_dptr to populate ctx_push_sz ctx_size fields */
125 	memcpy(out_sa, sa_dptr, 8);
126 
127 	plt_atomic_thread_fence(__ATOMIC_SEQ_CST);
128 
129 	/* Write session using microcode opcode */
130 	ret = roc_cpt_ctx_write(lf, sa_dptr, out_sa,
131 				sizeof(struct roc_ot_ipsec_outb_sa));
132 	if (ret) {
133 		plt_err("Could not write outbound session to hardware");
134 		goto sa_dptr_free;
135 	}
136 
137 	/* Trigger CTX flush so that data is written back to DRAM */
138 	ret = roc_cpt_lf_ctx_flush(lf, out_sa, false);
139 	if (ret == -EFAULT) {
140 		plt_err("Could not flush outbound session");
141 		goto sa_dptr_free;
142 	}
143 
144 	sec_sess->proto = RTE_SECURITY_PROTOCOL_IPSEC;
145 	plt_atomic_thread_fence(__ATOMIC_SEQ_CST);
146 
147 sa_dptr_free:
148 	plt_free(sa_dptr);
149 
150 	return ret;
151 }
152 
153 static int
154 cn10k_ipsec_inb_sa_create(struct roc_cpt *roc_cpt, struct roc_cpt_lf *lf,
155 			  struct rte_security_ipsec_xform *ipsec_xfrm,
156 			  struct rte_crypto_sym_xform *crypto_xfrm,
157 			  struct cn10k_sec_session *sec_sess)
158 {
159 	union roc_ot_ipsec_inb_param1 param1;
160 	struct roc_ot_ipsec_inb_sa *sa_dptr;
161 	struct cn10k_ipsec_sa *sa;
162 	union cpt_inst_w4 inst_w4;
163 	void *in_sa;
164 	int ret = 0;
165 
166 	sa = &sec_sess->sa;
167 	in_sa = &sa->in_sa;
168 
169 	/* Allocate memory to be used as dptr for CPT ucode WRITE_SA op */
170 	sa_dptr = plt_zmalloc(sizeof(struct roc_ot_ipsec_inb_sa), 8);
171 	if (sa_dptr == NULL) {
172 		plt_err("Couldn't allocate memory for SA dptr");
173 		return -ENOMEM;
174 	}
175 
176 	/* Translate security parameters to SA */
177 	ret = cnxk_ot_ipsec_inb_sa_fill(sa_dptr, ipsec_xfrm, crypto_xfrm,
178 					false);
179 	if (ret) {
180 		plt_err("Could not fill inbound session parameters");
181 		goto sa_dptr_free;
182 	}
183 
184 	sec_sess->ipsec.is_outbound = 0;
185 	sec_sess->inst.w7 = cpt_inst_w7_get(roc_cpt, in_sa);
186 
187 	/* Save index/SPI in cookie, specific required for Rx Inject */
188 	sa_dptr->w1.s.cookie = 0xFFFFFFFF;
189 
190 	/* pre-populate CPT INST word 4 */
191 	inst_w4.u64 = 0;
192 	inst_w4.s.opcode_major = ROC_IE_OT_MAJOR_OP_PROCESS_INBOUND_IPSEC | ROC_IE_OT_INPLACE_BIT;
193 
194 	param1.u16 = 0;
195 
196 	/* Disable IP checksum verification by default */
197 	param1.s.ip_csum_disable = ROC_IE_OT_SA_INNER_PKT_IP_CSUM_DISABLE;
198 
199 	/* Set the ip chksum flag in mbuf before enqueue.
200 	 * Reset the flag in post process in case of errors
201 	 */
202 	if (ipsec_xfrm->options.ip_csum_enable) {
203 		param1.s.ip_csum_disable = ROC_IE_OT_SA_INNER_PKT_IP_CSUM_ENABLE;
204 		sec_sess->ipsec.ip_csum = RTE_MBUF_F_RX_IP_CKSUM_GOOD;
205 	}
206 
207 	/* Disable L4 checksum verification by default */
208 	param1.s.l4_csum_disable = ROC_IE_OT_SA_INNER_PKT_L4_CSUM_DISABLE;
209 
210 	if (ipsec_xfrm->options.l4_csum_enable) {
211 		param1.s.l4_csum_disable =
212 			ROC_IE_OT_SA_INNER_PKT_L4_CSUM_ENABLE;
213 	}
214 
215 	param1.s.esp_trailer_disable = 1;
216 
217 	inst_w4.s.param1 = param1.u16;
218 
219 	sec_sess->inst.w4 = inst_w4.u64;
220 
221 	if (ipsec_xfrm->options.stats == 1) {
222 		/* Enable mib counters */
223 		sa_dptr->w0.s.count_mib_bytes = 1;
224 		sa_dptr->w0.s.count_mib_pkts = 1;
225 	}
226 
227 	memset(in_sa, 0, sizeof(struct roc_ot_ipsec_inb_sa));
228 
229 	/* Copy word0 from sa_dptr to populate ctx_push_sz ctx_size fields */
230 	memcpy(in_sa, sa_dptr, 8);
231 
232 	plt_atomic_thread_fence(__ATOMIC_SEQ_CST);
233 
234 	/* Write session using microcode opcode */
235 	ret = roc_cpt_ctx_write(lf, sa_dptr, in_sa,
236 				sizeof(struct roc_ot_ipsec_inb_sa));
237 	if (ret) {
238 		plt_err("Could not write inbound session to hardware");
239 		goto sa_dptr_free;
240 	}
241 
242 	/* Trigger CTX flush so that data is written back to DRAM */
243 	ret = roc_cpt_lf_ctx_flush(lf, in_sa, true);
244 	if (ret == -EFAULT) {
245 		plt_err("Could not flush inbound session");
246 		goto sa_dptr_free;
247 	}
248 
249 	sec_sess->proto = RTE_SECURITY_PROTOCOL_IPSEC;
250 	plt_atomic_thread_fence(__ATOMIC_SEQ_CST);
251 
252 sa_dptr_free:
253 	plt_free(sa_dptr);
254 
255 	return ret;
256 }
257 
258 int
259 cn10k_ipsec_session_create(struct cnxk_cpt_vf *vf, struct cnxk_cpt_qp *qp,
260 			   struct rte_security_ipsec_xform *ipsec_xfrm,
261 			   struct rte_crypto_sym_xform *crypto_xfrm,
262 			   struct rte_security_session *sess)
263 {
264 	struct roc_cpt *roc_cpt;
265 	int ret;
266 
267 	ret = cnxk_ipsec_xform_verify(ipsec_xfrm, crypto_xfrm);
268 	if (ret)
269 		return ret;
270 
271 	roc_cpt = &vf->cpt;
272 
273 	if (ipsec_xfrm->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
274 		return cn10k_ipsec_inb_sa_create(roc_cpt, &qp->lf, ipsec_xfrm, crypto_xfrm,
275 						 (struct cn10k_sec_session *)sess);
276 	else
277 		return cn10k_ipsec_outb_sa_create(roc_cpt, &qp->lf, ipsec_xfrm, crypto_xfrm,
278 						  (struct cn10k_sec_session *)sess);
279 }
280 
281 int
282 cn10k_sec_ipsec_session_destroy(struct cnxk_cpt_qp *qp, struct cn10k_sec_session *sess)
283 {
284 	union roc_ot_ipsec_sa_word2 *w2;
285 	struct cn10k_ipsec_sa *sa;
286 	struct roc_cpt_lf *lf;
287 	void *sa_dptr = NULL;
288 	int ret;
289 
290 	lf = &qp->lf;
291 
292 	sa = &sess->sa;
293 
294 	/* Trigger CTX flush to write dirty data back to DRAM */
295 	roc_cpt_lf_ctx_flush(lf, &sa->in_sa, false);
296 
297 	ret = -1;
298 
299 	if (sess->ipsec.is_outbound) {
300 		sa_dptr = plt_zmalloc(sizeof(struct roc_ot_ipsec_outb_sa), 8);
301 		if (sa_dptr != NULL) {
302 			roc_ot_ipsec_outb_sa_init(sa_dptr);
303 
304 			ret = roc_cpt_ctx_write(
305 				lf, sa_dptr, &sa->out_sa,
306 				sizeof(struct roc_ot_ipsec_outb_sa));
307 		}
308 	} else {
309 		sa_dptr = plt_zmalloc(sizeof(struct roc_ot_ipsec_inb_sa), 8);
310 		if (sa_dptr != NULL) {
311 			roc_ot_ipsec_inb_sa_init(sa_dptr, false);
312 
313 			ret = roc_cpt_ctx_write(
314 				lf, sa_dptr, &sa->in_sa,
315 				sizeof(struct roc_ot_ipsec_inb_sa));
316 		}
317 	}
318 
319 	plt_free(sa_dptr);
320 
321 	if (ret) {
322 		/* MC write_ctx failed. Attempt reload of CTX */
323 
324 		/* Wait for 1 ms so that flush is complete */
325 		rte_delay_ms(1);
326 
327 		w2 = (union roc_ot_ipsec_sa_word2 *)&sa->in_sa.w2;
328 		w2->s.valid = 0;
329 
330 		plt_atomic_thread_fence(__ATOMIC_SEQ_CST);
331 
332 		/* Trigger CTX reload to fetch new data from DRAM */
333 		roc_cpt_lf_ctx_reload(lf, &sa->in_sa);
334 	}
335 
336 	return 0;
337 }
338 
339 int
340 cn10k_ipsec_stats_get(struct cnxk_cpt_qp *qp, struct cn10k_sec_session *sess,
341 		      struct rte_security_stats *stats)
342 {
343 	struct roc_ot_ipsec_outb_sa *out_sa;
344 	struct roc_ot_ipsec_inb_sa *in_sa;
345 	struct cn10k_ipsec_sa *sa;
346 
347 	stats->protocol = RTE_SECURITY_PROTOCOL_IPSEC;
348 	sa = &sess->sa;
349 
350 	if (sess->ipsec.is_outbound) {
351 		out_sa = &sa->out_sa;
352 		roc_cpt_lf_ctx_flush(&qp->lf, out_sa, false);
353 		stats->ipsec.opackets = out_sa->ctx.mib_pkts;
354 		stats->ipsec.obytes = out_sa->ctx.mib_octs;
355 	} else {
356 		in_sa = &sa->in_sa;
357 		roc_cpt_lf_ctx_flush(&qp->lf, in_sa, false);
358 		stats->ipsec.ipackets = in_sa->ctx.mib_pkts;
359 		stats->ipsec.ibytes = in_sa->ctx.mib_octs;
360 	}
361 
362 	return 0;
363 }
364 
365 int
366 cn10k_ipsec_session_update(struct cnxk_cpt_vf *vf, struct cnxk_cpt_qp *qp,
367 			   struct cn10k_sec_session *sess, struct rte_security_session_conf *conf)
368 {
369 	struct roc_cpt *roc_cpt;
370 	int ret;
371 
372 	if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
373 		return -ENOTSUP;
374 
375 	ret = cnxk_ipsec_xform_verify(&conf->ipsec, conf->crypto_xform);
376 	if (ret)
377 		return ret;
378 
379 	roc_cpt = &vf->cpt;
380 
381 	return cn10k_ipsec_outb_sa_create(roc_cpt, &qp->lf, &conf->ipsec, conf->crypto_xform,
382 					  (struct cn10k_sec_session *)sess);
383 }
384