1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2023 Marvell.
3 */
4
5 #include <rte_crypto.h>
6 #include <rte_crypto_sym.h>
7 #include <rte_cryptodev.h>
8 #include <rte_memcpy.h>
9 #include <rte_mbuf_dyn.h>
10 #include <rte_pdcp.h>
11 #include <rte_pdcp_hdr.h>
12
13 #include "pdcp_cnt.h"
14 #include "pdcp_crypto.h"
15 #include "pdcp_entity.h"
16 #include "pdcp_process.h"
17
18 /* Enum of supported algorithms for ciphering */
19 enum pdcp_cipher_algo {
20 PDCP_CIPHER_ALGO_NULL,
21 PDCP_CIPHER_ALGO_AES,
22 PDCP_CIPHER_ALGO_ZUC,
23 PDCP_CIPHER_ALGO_SNOW3G,
24 PDCP_CIPHER_ALGO_MAX
25 };
26
27 /* Enum of supported algorithms for integrity */
28 enum pdcp_auth_algo {
29 PDCP_AUTH_ALGO_NULL,
30 PDCP_AUTH_ALGO_AES,
31 PDCP_AUTH_ALGO_ZUC,
32 PDCP_AUTH_ALGO_SNOW3G,
33 PDCP_AUTH_ALGO_MAX
34 };
35
36 /* IV generation functions based on type of operation (cipher - auth) */
37
38 static void
pdcp_iv_gen_null_null(struct rte_crypto_op * cop,const struct entity_priv * en_priv,uint32_t count)39 pdcp_iv_gen_null_null(struct rte_crypto_op *cop, const struct entity_priv *en_priv, uint32_t count)
40 {
41 /* No IV required for NULL cipher + NULL auth */
42 RTE_SET_USED(cop);
43 RTE_SET_USED(en_priv);
44 RTE_SET_USED(count);
45 }
46
47 static void
pdcp_iv_gen_null_aes_cmac(struct rte_crypto_op * cop,const struct entity_priv * en_priv,uint32_t count)48 pdcp_iv_gen_null_aes_cmac(struct rte_crypto_op *cop, const struct entity_priv *en_priv,
49 uint32_t count)
50 {
51 struct rte_crypto_sym_op *op = cop->sym;
52 struct rte_mbuf *mb = op->m_src;
53 uint8_t *m_ptr;
54 uint64_t m;
55
56 /* AES-CMAC requires message to be prepended with info on count etc */
57
58 /* Prepend by 8 bytes to add custom message */
59 m_ptr = (uint8_t *)rte_pktmbuf_prepend(mb, 8);
60
61 m = en_priv->auth_iv_part.u64[0] | ((uint64_t)(rte_cpu_to_be_32(count)));
62
63 rte_memcpy(m_ptr, &m, 8);
64 }
65
66 static void
pdcp_iv_gen_null_zs(struct rte_crypto_op * cop,const struct entity_priv * en_priv,uint32_t count)67 pdcp_iv_gen_null_zs(struct rte_crypto_op *cop, const struct entity_priv *en_priv, uint32_t count)
68 {
69 uint64_t iv_u64[2];
70 uint8_t *iv;
71
72 iv = rte_crypto_op_ctod_offset(cop, uint8_t *, PDCP_IV_OFFSET);
73
74 iv_u64[0] = en_priv->auth_iv_part.u64[0] | ((uint64_t)(rte_cpu_to_be_32(count)));
75 rte_memcpy(iv, &iv_u64[0], 8);
76
77 iv_u64[1] = iv_u64[0] ^ en_priv->auth_iv_part.u64[1];
78 rte_memcpy(iv + 8, &iv_u64[1], 8);
79 }
80
81 static void
pdcp_iv_gen_aes_ctr_null(struct rte_crypto_op * cop,const struct entity_priv * en_priv,uint32_t count)82 pdcp_iv_gen_aes_ctr_null(struct rte_crypto_op *cop, const struct entity_priv *en_priv,
83 uint32_t count)
84 {
85 uint64_t iv_u64[2];
86 uint8_t *iv;
87
88 iv = rte_crypto_op_ctod_offset(cop, uint8_t *, PDCP_IV_OFFSET);
89
90 iv_u64[0] = en_priv->cipher_iv_part.u64[0] | ((uint64_t)(rte_cpu_to_be_32(count)));
91 iv_u64[1] = 0;
92 rte_memcpy(iv, iv_u64, 16);
93 }
94
95 static void
pdcp_iv_gen_zs_null(struct rte_crypto_op * cop,const struct entity_priv * en_priv,uint32_t count)96 pdcp_iv_gen_zs_null(struct rte_crypto_op *cop, const struct entity_priv *en_priv, uint32_t count)
97 {
98 uint64_t iv_u64;
99 uint8_t *iv;
100
101 iv = rte_crypto_op_ctod_offset(cop, uint8_t *, PDCP_IV_OFFSET);
102
103 iv_u64 = en_priv->cipher_iv_part.u64[0] | ((uint64_t)(rte_cpu_to_be_32(count)));
104 rte_memcpy(iv, &iv_u64, 8);
105 rte_memcpy(iv + 8, &iv_u64, 8);
106 }
107
108 static void
pdcp_iv_gen_zs_zs(struct rte_crypto_op * cop,const struct entity_priv * en_priv,uint32_t count)109 pdcp_iv_gen_zs_zs(struct rte_crypto_op *cop, const struct entity_priv *en_priv, uint32_t count)
110 {
111 uint64_t iv_u64[2];
112 uint8_t *iv;
113
114 iv = rte_crypto_op_ctod_offset(cop, uint8_t *, PDCP_IV_OFFSET);
115
116 /* Generating cipher IV */
117 iv_u64[0] = en_priv->cipher_iv_part.u64[0] | ((uint64_t)(rte_cpu_to_be_32(count)));
118 rte_memcpy(iv, &iv_u64[0], 8);
119 rte_memcpy(iv + 8, &iv_u64[0], 8);
120
121 iv += PDCP_IV_LEN;
122
123 /* Generating auth IV */
124 iv_u64[0] = en_priv->auth_iv_part.u64[0] | ((uint64_t)(rte_cpu_to_be_32(count)));
125 rte_memcpy(iv, &iv_u64[0], 8);
126
127 iv_u64[1] = iv_u64[0] ^ en_priv->auth_iv_part.u64[1];
128 rte_memcpy(iv + 8, &iv_u64[1], 8);
129 }
130
131 static void
pdcp_iv_gen_zs_aes_cmac(struct rte_crypto_op * cop,const struct entity_priv * en_priv,uint32_t count)132 pdcp_iv_gen_zs_aes_cmac(struct rte_crypto_op *cop, const struct entity_priv *en_priv,
133 uint32_t count)
134 {
135 struct rte_crypto_sym_op *op = cop->sym;
136 struct rte_mbuf *mb = op->m_src;
137 uint8_t *m_ptr, *iv;
138 uint64_t iv_u64[2];
139 uint64_t m;
140
141 iv = rte_crypto_op_ctod_offset(cop, uint8_t *, PDCP_IV_OFFSET);
142 iv_u64[0] = en_priv->cipher_iv_part.u64[0] | ((uint64_t)(rte_cpu_to_be_32(count)));
143 rte_memcpy(iv, &iv_u64[0], 8);
144 rte_memcpy(iv + 8, &iv_u64[0], 8);
145
146 m_ptr = (uint8_t *)rte_pktmbuf_prepend(mb, 8);
147 m = en_priv->auth_iv_part.u64[0] | ((uint64_t)(rte_cpu_to_be_32(count)));
148 rte_memcpy(m_ptr, &m, 8);
149 }
150
151 static void
pdcp_iv_gen_aes_ctr_aes_cmac(struct rte_crypto_op * cop,const struct entity_priv * en_priv,uint32_t count)152 pdcp_iv_gen_aes_ctr_aes_cmac(struct rte_crypto_op *cop, const struct entity_priv *en_priv,
153 uint32_t count)
154 {
155 struct rte_crypto_sym_op *op = cop->sym;
156 struct rte_mbuf *mb = op->m_src;
157 uint8_t *m_ptr, *iv;
158 uint64_t iv_u64[2];
159 uint64_t m;
160
161 iv = rte_crypto_op_ctod_offset(cop, uint8_t *, PDCP_IV_OFFSET);
162
163 iv_u64[0] = en_priv->cipher_iv_part.u64[0] | ((uint64_t)(rte_cpu_to_be_32(count)));
164 iv_u64[1] = 0;
165 rte_memcpy(iv, iv_u64, PDCP_IV_LEN);
166
167 m_ptr = (uint8_t *)rte_pktmbuf_prepend(mb, 8);
168 m = en_priv->auth_iv_part.u64[0] | ((uint64_t)(rte_cpu_to_be_32(count)));
169 rte_memcpy(m_ptr, &m, 8);
170 }
171
172 static void
pdcp_iv_gen_aes_ctr_zs(struct rte_crypto_op * cop,const struct entity_priv * en_priv,uint32_t count)173 pdcp_iv_gen_aes_ctr_zs(struct rte_crypto_op *cop, const struct entity_priv *en_priv, uint32_t count)
174 {
175 uint64_t iv_u64[2];
176 uint8_t *iv;
177
178 iv = rte_crypto_op_ctod_offset(cop, uint8_t *, PDCP_IV_OFFSET);
179
180 iv_u64[0] = en_priv->cipher_iv_part.u64[0] | ((uint64_t)(rte_cpu_to_be_32(count)));
181 iv_u64[1] = 0;
182 rte_memcpy(iv, iv_u64, PDCP_IV_LEN);
183
184 iv += PDCP_IV_LEN;
185
186 iv_u64[0] = en_priv->auth_iv_part.u64[0] | ((uint64_t)(rte_cpu_to_be_32(count)));
187 rte_memcpy(iv, &iv_u64[0], 8);
188
189 iv_u64[1] = iv_u64[0] ^ en_priv->auth_iv_part.u64[1];
190 rte_memcpy(iv + 8, &iv_u64[1], 8);
191 }
192
193 static int
pdcp_crypto_xfrm_get(const struct rte_pdcp_entity_conf * conf,struct rte_crypto_sym_xform ** c_xfrm,struct rte_crypto_sym_xform ** a_xfrm)194 pdcp_crypto_xfrm_get(const struct rte_pdcp_entity_conf *conf, struct rte_crypto_sym_xform **c_xfrm,
195 struct rte_crypto_sym_xform **a_xfrm)
196 {
197 *c_xfrm = NULL;
198 *a_xfrm = NULL;
199
200 if (conf->crypto_xfrm == NULL)
201 return -EINVAL;
202
203 if (conf->crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
204 *c_xfrm = conf->crypto_xfrm;
205 *a_xfrm = conf->crypto_xfrm->next;
206 } else if (conf->crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
207 *a_xfrm = conf->crypto_xfrm;
208 *c_xfrm = conf->crypto_xfrm->next;
209 } else {
210 return -EINVAL;
211 }
212
213 return 0;
214 }
215
216 static int
pdcp_iv_gen_func_set(struct rte_pdcp_entity * entity,const struct rte_pdcp_entity_conf * conf)217 pdcp_iv_gen_func_set(struct rte_pdcp_entity *entity, const struct rte_pdcp_entity_conf *conf)
218 {
219 struct rte_crypto_sym_xform *c_xfrm, *a_xfrm;
220 enum rte_security_pdcp_direction direction;
221 enum pdcp_cipher_algo cipher_algo;
222 enum pdcp_auth_algo auth_algo;
223 struct entity_priv *en_priv;
224 int ret;
225
226 en_priv = entity_priv_get(entity);
227
228 direction = conf->pdcp_xfrm.pkt_dir;
229 if (conf->reverse_iv_direction)
230 direction = !direction;
231
232 ret = pdcp_crypto_xfrm_get(conf, &c_xfrm, &a_xfrm);
233 if (ret)
234 return ret;
235
236 if (c_xfrm == NULL)
237 return -EINVAL;
238
239 memset(&en_priv->auth_iv_part, 0, sizeof(en_priv->auth_iv_part));
240 memset(&en_priv->cipher_iv_part, 0, sizeof(en_priv->cipher_iv_part));
241
242 switch (c_xfrm->cipher.algo) {
243 case RTE_CRYPTO_CIPHER_NULL:
244 cipher_algo = PDCP_CIPHER_ALGO_NULL;
245 break;
246 case RTE_CRYPTO_CIPHER_AES_CTR:
247 cipher_algo = PDCP_CIPHER_ALGO_AES;
248 en_priv->cipher_iv_part.aes_ctr.bearer = conf->pdcp_xfrm.bearer;
249 en_priv->cipher_iv_part.aes_ctr.direction = direction;
250 break;
251 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
252 cipher_algo = PDCP_CIPHER_ALGO_SNOW3G;
253 en_priv->cipher_iv_part.zs.bearer = conf->pdcp_xfrm.bearer;
254 en_priv->cipher_iv_part.zs.direction = direction;
255 break;
256 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
257 cipher_algo = PDCP_CIPHER_ALGO_ZUC;
258 en_priv->cipher_iv_part.zs.bearer = conf->pdcp_xfrm.bearer;
259 en_priv->cipher_iv_part.zs.direction = direction;
260 break;
261 default:
262 return -ENOTSUP;
263 }
264
265 if (a_xfrm != NULL) {
266 switch (a_xfrm->auth.algo) {
267 case RTE_CRYPTO_AUTH_NULL:
268 auth_algo = PDCP_AUTH_ALGO_NULL;
269 break;
270 case RTE_CRYPTO_AUTH_AES_CMAC:
271 auth_algo = PDCP_AUTH_ALGO_AES;
272 en_priv->auth_iv_part.aes_cmac.bearer = conf->pdcp_xfrm.bearer;
273 en_priv->auth_iv_part.aes_cmac.direction = direction;
274 break;
275 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
276 auth_algo = PDCP_AUTH_ALGO_SNOW3G;
277 en_priv->auth_iv_part.zs.bearer = conf->pdcp_xfrm.bearer;
278 en_priv->auth_iv_part.zs.direction_64 = direction;
279 en_priv->auth_iv_part.zs.direction_112 = direction;
280 break;
281 case RTE_CRYPTO_AUTH_ZUC_EIA3:
282 auth_algo = PDCP_AUTH_ALGO_ZUC;
283 en_priv->auth_iv_part.zs.bearer = conf->pdcp_xfrm.bearer;
284 en_priv->auth_iv_part.zs.direction_64 = direction;
285 en_priv->auth_iv_part.zs.direction_112 = direction;
286 break;
287 default:
288 return -ENOTSUP;
289 }
290 } else {
291 auth_algo = PDCP_AUTH_ALGO_NULL;
292 }
293
294 static const iv_gen_t iv_gen_map[PDCP_CIPHER_ALGO_MAX][PDCP_AUTH_ALGO_MAX] = {
295 [PDCP_CIPHER_ALGO_NULL][PDCP_AUTH_ALGO_NULL] = pdcp_iv_gen_null_null,
296 [PDCP_CIPHER_ALGO_NULL][PDCP_AUTH_ALGO_AES] = pdcp_iv_gen_null_aes_cmac,
297 [PDCP_CIPHER_ALGO_NULL][PDCP_AUTH_ALGO_SNOW3G] = pdcp_iv_gen_null_zs,
298 [PDCP_CIPHER_ALGO_NULL][PDCP_AUTH_ALGO_ZUC] = pdcp_iv_gen_null_zs,
299
300 [PDCP_CIPHER_ALGO_AES][PDCP_AUTH_ALGO_NULL] = pdcp_iv_gen_aes_ctr_null,
301 [PDCP_CIPHER_ALGO_AES][PDCP_AUTH_ALGO_AES] = pdcp_iv_gen_aes_ctr_aes_cmac,
302 [PDCP_CIPHER_ALGO_AES][PDCP_AUTH_ALGO_SNOW3G] = pdcp_iv_gen_aes_ctr_zs,
303 [PDCP_CIPHER_ALGO_AES][PDCP_AUTH_ALGO_ZUC] = pdcp_iv_gen_aes_ctr_zs,
304
305 [PDCP_CIPHER_ALGO_SNOW3G][PDCP_AUTH_ALGO_NULL] = pdcp_iv_gen_zs_null,
306 [PDCP_CIPHER_ALGO_SNOW3G][PDCP_AUTH_ALGO_AES] = pdcp_iv_gen_zs_aes_cmac,
307 [PDCP_CIPHER_ALGO_SNOW3G][PDCP_AUTH_ALGO_SNOW3G] = pdcp_iv_gen_zs_zs,
308 [PDCP_CIPHER_ALGO_SNOW3G][PDCP_AUTH_ALGO_ZUC] = pdcp_iv_gen_zs_zs,
309
310 [PDCP_CIPHER_ALGO_ZUC][PDCP_AUTH_ALGO_NULL] = pdcp_iv_gen_zs_null,
311 [PDCP_CIPHER_ALGO_ZUC][PDCP_AUTH_ALGO_AES] = pdcp_iv_gen_zs_aes_cmac,
312 [PDCP_CIPHER_ALGO_ZUC][PDCP_AUTH_ALGO_SNOW3G] = pdcp_iv_gen_zs_zs,
313 [PDCP_CIPHER_ALGO_ZUC][PDCP_AUTH_ALGO_ZUC] = pdcp_iv_gen_zs_zs,
314 };
315
316 en_priv->iv_gen = iv_gen_map[cipher_algo][auth_algo];
317
318 return 0;
319 }
320
321 static inline void
cop_prepare(const struct entity_priv * en_priv,struct rte_mbuf * mb,struct rte_crypto_op * cop,uint8_t data_offset,uint32_t count,const bool is_auth)322 cop_prepare(const struct entity_priv *en_priv, struct rte_mbuf *mb, struct rte_crypto_op *cop,
323 uint8_t data_offset, uint32_t count, const bool is_auth)
324 {
325 const struct rte_crypto_op cop_init = {
326 .type = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
327 .status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED,
328 .sess_type = RTE_CRYPTO_OP_WITH_SESSION,
329 };
330 struct rte_crypto_sym_op *op;
331 uint32_t pkt_len;
332
333 const uint8_t cipher_shift = 3 * en_priv->flags.is_cipher_in_bits;
334 const uint8_t auth_shift = 3 * en_priv->flags.is_auth_in_bits;
335
336 op = cop->sym;
337 cop->raw = cop_init.raw;
338 op->m_src = mb;
339 op->m_dst = mb;
340
341 /* Set IV */
342 en_priv->iv_gen(cop, en_priv, count);
343
344 /* Prepare op */
345 pkt_len = rte_pktmbuf_pkt_len(mb);
346 op->cipher.data.offset = data_offset << cipher_shift;
347 op->cipher.data.length = (pkt_len - data_offset) << cipher_shift;
348
349 if (is_auth) {
350 op->auth.data.offset = 0;
351 op->auth.data.length = (pkt_len - RTE_PDCP_MAC_I_LEN) << auth_shift;
352 op->auth.digest.data = rte_pktmbuf_mtod_offset(mb, uint8_t *,
353 (pkt_len - RTE_PDCP_MAC_I_LEN));
354 }
355
356 __rte_crypto_sym_op_attach_sym_session(op, en_priv->crypto_sess);
357 }
358
359 static inline bool
pdcp_pre_process_uplane_sn_12_ul_set_sn(struct entity_priv * en_priv,struct rte_mbuf * mb,uint32_t * count)360 pdcp_pre_process_uplane_sn_12_ul_set_sn(struct entity_priv *en_priv, struct rte_mbuf *mb,
361 uint32_t *count)
362 {
363 struct rte_pdcp_up_data_pdu_sn_12_hdr *pdu_hdr;
364 const uint8_t hdr_sz = en_priv->hdr_sz;
365 uint32_t sn;
366
367 /* Prepend PDU header */
368 pdu_hdr = (struct rte_pdcp_up_data_pdu_sn_12_hdr *)rte_pktmbuf_prepend(mb, hdr_sz);
369 if (unlikely(pdu_hdr == NULL))
370 return false;
371
372 /* Update sequence num in the PDU header */
373 *count = en_priv->state.tx_next++;
374 sn = pdcp_sn_from_count_get(*count, RTE_SECURITY_PDCP_SN_SIZE_12);
375
376 pdu_hdr->d_c = RTE_PDCP_PDU_TYPE_DATA;
377 pdu_hdr->sn_11_8 = ((sn & 0xf00) >> 8);
378 pdu_hdr->sn_7_0 = (sn & 0xff);
379 pdu_hdr->r = 0;
380 return true;
381 }
382
383 static uint16_t
pdcp_pre_process_uplane_sn_12_ul(const struct rte_pdcp_entity * entity,struct rte_mbuf * in_mb[],struct rte_crypto_op * cop[],uint16_t num,uint16_t * nb_err_ret)384 pdcp_pre_process_uplane_sn_12_ul(const struct rte_pdcp_entity *entity, struct rte_mbuf *in_mb[],
385 struct rte_crypto_op *cop[], uint16_t num, uint16_t *nb_err_ret)
386 {
387 struct entity_priv *en_priv = entity_priv_get(entity);
388 uint16_t nb_cop, nb_prep = 0, nb_err = 0;
389 struct rte_mbuf *mb;
390 uint32_t count;
391 uint8_t *mac_i;
392 int i;
393
394 const uint8_t data_offset = en_priv->hdr_sz + en_priv->aad_sz + en_priv->cipher_skip_sz;
395 const int is_null_auth = en_priv->flags.is_null_auth;
396
397 nb_cop = rte_crypto_op_bulk_alloc(en_priv->cop_pool, RTE_CRYPTO_OP_TYPE_SYMMETRIC, cop,
398 num);
399
400 if (en_priv->flags.is_authenticated) {
401 for (i = 0; i < nb_cop; i++) {
402 mb = in_mb[i];
403 mac_i = (uint8_t *)rte_pktmbuf_append(mb, RTE_PDCP_MAC_I_LEN);
404 if (unlikely(mac_i == NULL)) {
405 in_mb[nb_err++] = mb;
406 continue;
407 }
408
409 /* Clear MAC-I field for NULL auth */
410 if (is_null_auth)
411 memset(mac_i, 0, RTE_PDCP_MAC_I_LEN);
412
413 if (unlikely(!pdcp_pre_process_uplane_sn_12_ul_set_sn(en_priv, mb,
414 &count))) {
415 in_mb[nb_err++] = mb;
416 continue;
417 }
418
419 cop_prepare(en_priv, mb, cop[nb_prep++], data_offset, count, true);
420 }
421 } else {
422 for (i = 0; i < nb_cop; i++) {
423 mb = in_mb[i];
424 if (unlikely(!pdcp_pre_process_uplane_sn_12_ul_set_sn(en_priv, mb,
425 &count))) {
426 in_mb[nb_err++] = mb;
427 continue;
428 }
429
430 cop_prepare(en_priv, mb, cop[nb_prep++], data_offset, count, false);
431 }
432 }
433
434 if (unlikely(nb_err))
435 /* Using mempool API since crypto API is not providing bulk free */
436 rte_mempool_put_bulk(en_priv->cop_pool, (void *)&cop[nb_prep], nb_cop - nb_prep);
437
438 *nb_err_ret = num - nb_prep;
439
440 return nb_prep;
441 }
442
443 static inline bool
pdcp_pre_process_uplane_sn_18_ul_set_sn(struct entity_priv * en_priv,struct rte_mbuf * mb,uint32_t * count)444 pdcp_pre_process_uplane_sn_18_ul_set_sn(struct entity_priv *en_priv, struct rte_mbuf *mb,
445 uint32_t *count)
446 {
447 struct rte_pdcp_up_data_pdu_sn_18_hdr *pdu_hdr;
448 const uint8_t hdr_sz = en_priv->hdr_sz;
449 uint32_t sn;
450
451 /* Prepend PDU header */
452 pdu_hdr = (struct rte_pdcp_up_data_pdu_sn_18_hdr *)rte_pktmbuf_prepend(mb, hdr_sz);
453 if (unlikely(pdu_hdr == NULL))
454 return false;
455
456 /* Update sequence num in the PDU header */
457 *count = en_priv->state.tx_next++;
458 sn = pdcp_sn_from_count_get(*count, RTE_SECURITY_PDCP_SN_SIZE_18);
459
460 pdu_hdr->d_c = RTE_PDCP_PDU_TYPE_DATA;
461 pdu_hdr->sn_17_16 = ((sn & 0x30000) >> 16);
462 pdu_hdr->sn_15_8 = ((sn & 0xff00) >> 8);
463 pdu_hdr->sn_7_0 = (sn & 0xff);
464 pdu_hdr->r = 0;
465
466 return true;
467 }
468
469 static inline uint16_t
pdcp_pre_process_uplane_sn_18_ul(const struct rte_pdcp_entity * entity,struct rte_mbuf * in_mb[],struct rte_crypto_op * cop[],uint16_t num,uint16_t * nb_err_ret)470 pdcp_pre_process_uplane_sn_18_ul(const struct rte_pdcp_entity *entity, struct rte_mbuf *in_mb[],
471 struct rte_crypto_op *cop[], uint16_t num, uint16_t *nb_err_ret)
472 {
473 struct entity_priv *en_priv = entity_priv_get(entity);
474 uint16_t nb_cop, nb_prep = 0, nb_err = 0;
475 struct rte_mbuf *mb;
476 uint32_t count;
477 uint8_t *mac_i;
478 int i;
479
480 const uint8_t data_offset = en_priv->hdr_sz + en_priv->aad_sz + en_priv->cipher_skip_sz;
481 const int is_null_auth = en_priv->flags.is_null_auth;
482
483 nb_cop = rte_crypto_op_bulk_alloc(en_priv->cop_pool, RTE_CRYPTO_OP_TYPE_SYMMETRIC, cop,
484 num);
485
486 if (en_priv->flags.is_authenticated) {
487 for (i = 0; i < nb_cop; i++) {
488 mb = in_mb[i];
489 mac_i = (uint8_t *)rte_pktmbuf_append(mb, RTE_PDCP_MAC_I_LEN);
490 if (unlikely(mac_i == NULL)) {
491 in_mb[nb_err++] = mb;
492 continue;
493 }
494
495 /* Clear MAC-I field for NULL auth */
496 if (is_null_auth)
497 memset(mac_i, 0, RTE_PDCP_MAC_I_LEN);
498
499 if (unlikely(!pdcp_pre_process_uplane_sn_18_ul_set_sn(en_priv, mb,
500 &count))) {
501 in_mb[nb_err++] = mb;
502 continue;
503 }
504
505 cop_prepare(en_priv, mb, cop[nb_prep++], data_offset, count, true);
506 }
507 } else {
508 for (i = 0; i < nb_cop; i++) {
509 mb = in_mb[i];
510 if (unlikely(!pdcp_pre_process_uplane_sn_18_ul_set_sn(en_priv, mb,
511 &count))) {
512
513 in_mb[nb_err++] = mb;
514 continue;
515 }
516
517 cop_prepare(en_priv, mb, cop[nb_prep++], data_offset, count, false);
518 }
519 }
520
521 if (unlikely(nb_err))
522 /* Using mempool API since crypto API is not providing bulk free */
523 rte_mempool_put_bulk(en_priv->cop_pool, (void *)&cop[nb_prep], nb_cop - nb_prep);
524
525 *nb_err_ret = num - nb_prep;
526
527 return nb_prep;
528 }
529
530 static uint16_t
pdcp_pre_process_cplane_sn_12_ul(const struct rte_pdcp_entity * entity,struct rte_mbuf * in_mb[],struct rte_crypto_op * cop[],uint16_t num,uint16_t * nb_err_ret)531 pdcp_pre_process_cplane_sn_12_ul(const struct rte_pdcp_entity *entity, struct rte_mbuf *in_mb[],
532 struct rte_crypto_op *cop[], uint16_t num, uint16_t *nb_err_ret)
533 {
534 struct entity_priv *en_priv = entity_priv_get(entity);
535 struct rte_pdcp_cp_data_pdu_sn_12_hdr *pdu_hdr;
536 uint16_t nb_cop, nb_prep = 0, nb_err = 0;
537 struct rte_mbuf *mb;
538 uint32_t count, sn;
539 uint8_t *mac_i;
540 int i;
541
542 const uint8_t hdr_sz = en_priv->hdr_sz;
543 const uint8_t data_offset = hdr_sz + en_priv->aad_sz + en_priv->cipher_skip_sz;
544 const int is_null_auth = en_priv->flags.is_null_auth;
545
546 nb_cop = rte_crypto_op_bulk_alloc(en_priv->cop_pool, RTE_CRYPTO_OP_TYPE_SYMMETRIC, cop,
547 num);
548
549 for (i = 0; i < nb_cop; i++) {
550 mb = in_mb[i];
551 /* Prepend PDU header */
552 pdu_hdr = (struct rte_pdcp_cp_data_pdu_sn_12_hdr *)rte_pktmbuf_prepend(mb, hdr_sz);
553 if (unlikely(pdu_hdr == NULL)) {
554 in_mb[nb_err++] = mb;
555 continue;
556 }
557
558 mac_i = (uint8_t *)rte_pktmbuf_append(mb, RTE_PDCP_MAC_I_LEN);
559 if (unlikely(mac_i == NULL)) {
560 in_mb[nb_err++] = mb;
561 continue;
562 }
563
564 /* Clear MAC-I field for NULL auth */
565 if (is_null_auth)
566 memset(mac_i, 0, RTE_PDCP_MAC_I_LEN);
567
568 /* Update sequence number in the PDU header */
569 count = en_priv->state.tx_next++;
570 sn = pdcp_sn_from_count_get(count, RTE_SECURITY_PDCP_SN_SIZE_12);
571
572 pdu_hdr->sn_11_8 = ((sn & 0xf00) >> 8);
573 pdu_hdr->sn_7_0 = (sn & 0xff);
574 pdu_hdr->r = 0;
575
576 cop_prepare(en_priv, mb, cop[nb_prep++], data_offset, count, true);
577 }
578
579 if (unlikely(nb_err))
580 /* Using mempool API since crypto API is not providing bulk free */
581 rte_mempool_put_bulk(en_priv->cop_pool, (void *)&cop[nb_prep], nb_cop - nb_prep);
582
583 *nb_err_ret = num - nb_prep;
584
585 return nb_prep;
586 }
587
588 static uint16_t
pdcp_post_process_ul(const struct rte_pdcp_entity * entity,struct rte_mbuf * in_mb[],struct rte_mbuf * out_mb[],uint16_t num,uint16_t * nb_err_ret)589 pdcp_post_process_ul(const struct rte_pdcp_entity *entity,
590 struct rte_mbuf *in_mb[], struct rte_mbuf *out_mb[],
591 uint16_t num, uint16_t *nb_err_ret)
592 {
593 struct entity_priv *en_priv = entity_priv_get(entity);
594 const uint32_t hdr_trim_sz = en_priv->aad_sz;
595 int i, nb_success = 0, nb_err = 0;
596 struct rte_mbuf *mb, *err_mb[num];
597
598 #ifdef RTE_ARCH_PPC_64
599 err_mb[0] = NULL; /* workaround PPC-GCC bug */
600 #endif
601 for (i = 0; i < num; i++) {
602 mb = in_mb[i];
603 if (unlikely(mb->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
604 err_mb[nb_err++] = mb;
605 continue;
606 }
607
608 if (hdr_trim_sz)
609 rte_pktmbuf_adj(mb, hdr_trim_sz);
610
611 out_mb[nb_success++] = mb;
612 }
613
614 if (unlikely(nb_err != 0))
615 rte_memcpy(&out_mb[nb_success], err_mb, nb_err * sizeof(struct rte_mbuf *));
616
617 *nb_err_ret = nb_err;
618 return nb_success;
619 }
620
621 static inline int
pdcp_sn_count_get(const uint32_t rx_deliv,int32_t rsn,uint32_t * count,const enum rte_security_pdcp_sn_size sn_size)622 pdcp_sn_count_get(const uint32_t rx_deliv, int32_t rsn, uint32_t *count,
623 const enum rte_security_pdcp_sn_size sn_size)
624 {
625 const uint32_t rx_deliv_sn = pdcp_sn_from_count_get(rx_deliv, sn_size);
626 const uint32_t window_sz = pdcp_window_size_get(sn_size);
627 uint32_t rhfn;
628
629 rhfn = pdcp_hfn_from_count_get(rx_deliv, sn_size);
630
631 if (rsn < (int32_t)(rx_deliv_sn - window_sz)) {
632 if (unlikely(rhfn == pdcp_hfn_max(sn_size)))
633 return -ERANGE;
634 rhfn += 1;
635 } else if ((uint32_t)rsn >= (rx_deliv_sn + window_sz)) {
636 if (unlikely(rhfn == PDCP_HFN_MIN))
637 return -ERANGE;
638 rhfn -= 1;
639 }
640
641 *count = pdcp_count_from_hfn_sn_get(rhfn, rsn, sn_size);
642
643 return 0;
644 }
645
646 static inline uint16_t
pdcp_pre_process_uplane_sn_12_dl_flags(const struct rte_pdcp_entity * entity,struct rte_mbuf * in_mb[],struct rte_crypto_op * cop[],uint16_t num,uint16_t * nb_err_ret,const bool is_integ_protected)647 pdcp_pre_process_uplane_sn_12_dl_flags(const struct rte_pdcp_entity *entity,
648 struct rte_mbuf *in_mb[], struct rte_crypto_op *cop[],
649 uint16_t num, uint16_t *nb_err_ret,
650 const bool is_integ_protected)
651 {
652 struct entity_priv *en_priv = entity_priv_get(entity);
653 struct rte_pdcp_up_data_pdu_sn_12_hdr *pdu_hdr;
654 uint16_t nb_cop, nb_prep = 0, nb_err = 0;
655 rte_pdcp_dynfield_t *mb_dynfield;
656 struct rte_mbuf *mb;
657 int32_t rsn = 0;
658 uint32_t count;
659 int i;
660
661 const uint8_t data_offset = en_priv->hdr_sz + en_priv->aad_sz + en_priv->cipher_skip_sz;
662
663 nb_cop = rte_crypto_op_bulk_alloc(en_priv->cop_pool, RTE_CRYPTO_OP_TYPE_SYMMETRIC, cop,
664 num);
665
666 const uint32_t rx_deliv = en_priv->state.rx_deliv;
667
668 for (i = 0; i < nb_cop; i++) {
669 mb = in_mb[i];
670 pdu_hdr = rte_pktmbuf_mtod(mb, struct rte_pdcp_up_data_pdu_sn_12_hdr *);
671
672 /* Check for PDU type */
673 if (likely(pdu_hdr->d_c == RTE_PDCP_PDU_TYPE_DATA)) {
674 rsn = ((pdu_hdr->sn_11_8 << 8) | (pdu_hdr->sn_7_0));
675 } else {
676 /** NOTE: Control PDU not handled.*/
677 in_mb[nb_err++] = mb;
678 continue;
679 }
680
681 if (unlikely(pdcp_sn_count_get(rx_deliv, rsn, &count,
682 RTE_SECURITY_PDCP_SN_SIZE_12))) {
683 in_mb[nb_err++] = mb;
684 continue;
685 }
686
687 cop_prepare(en_priv, mb, cop[nb_prep++], data_offset, count, is_integ_protected);
688
689 mb_dynfield = pdcp_dynfield(mb);
690 *mb_dynfield = count;
691 }
692
693 if (unlikely(nb_err))
694 rte_mempool_put_bulk(en_priv->cop_pool, (void *)&cop[nb_prep], nb_cop - nb_prep);
695
696 *nb_err_ret = num - nb_prep;
697
698 return nb_prep;
699 }
700
701 static uint16_t
pdcp_pre_process_uplane_sn_12_dl_ip(const struct rte_pdcp_entity * entity,struct rte_mbuf * mb[],struct rte_crypto_op * cop[],uint16_t num,uint16_t * nb_err)702 pdcp_pre_process_uplane_sn_12_dl_ip(const struct rte_pdcp_entity *entity, struct rte_mbuf *mb[],
703 struct rte_crypto_op *cop[], uint16_t num, uint16_t *nb_err)
704 {
705 return pdcp_pre_process_uplane_sn_12_dl_flags(entity, mb, cop, num, nb_err, true);
706 }
707
708 static uint16_t
pdcp_pre_process_uplane_sn_12_dl(const struct rte_pdcp_entity * entity,struct rte_mbuf * mb[],struct rte_crypto_op * cop[],uint16_t num,uint16_t * nb_err)709 pdcp_pre_process_uplane_sn_12_dl(const struct rte_pdcp_entity *entity, struct rte_mbuf *mb[],
710 struct rte_crypto_op *cop[], uint16_t num, uint16_t *nb_err)
711 {
712 return pdcp_pre_process_uplane_sn_12_dl_flags(entity, mb, cop, num, nb_err, false);
713 }
714
715 static inline uint16_t
pdcp_pre_process_uplane_sn_18_dl_flags(const struct rte_pdcp_entity * entity,struct rte_mbuf * in_mb[],struct rte_crypto_op * cop[],uint16_t num,uint16_t * nb_err_ret,const bool is_integ_protected)716 pdcp_pre_process_uplane_sn_18_dl_flags(const struct rte_pdcp_entity *entity,
717 struct rte_mbuf *in_mb[], struct rte_crypto_op *cop[],
718 uint16_t num, uint16_t *nb_err_ret,
719 const bool is_integ_protected)
720 {
721 struct entity_priv *en_priv = entity_priv_get(entity);
722 struct rte_pdcp_up_data_pdu_sn_18_hdr *pdu_hdr;
723 uint16_t nb_cop, nb_prep = 0, nb_err = 0;
724 rte_pdcp_dynfield_t *mb_dynfield;
725 struct rte_mbuf *mb;
726 int32_t rsn = 0;
727 uint32_t count;
728 int i;
729
730 const uint8_t data_offset = en_priv->hdr_sz + en_priv->aad_sz + en_priv->cipher_skip_sz;
731 nb_cop = rte_crypto_op_bulk_alloc(en_priv->cop_pool, RTE_CRYPTO_OP_TYPE_SYMMETRIC, cop,
732 num);
733
734 const uint32_t rx_deliv = en_priv->state.rx_deliv;
735
736 for (i = 0; i < nb_cop; i++) {
737 mb = in_mb[i];
738 pdu_hdr = rte_pktmbuf_mtod(mb, struct rte_pdcp_up_data_pdu_sn_18_hdr *);
739
740 /* Check for PDU type */
741 if (likely(pdu_hdr->d_c == RTE_PDCP_PDU_TYPE_DATA)) {
742 rsn = ((pdu_hdr->sn_17_16 << 16) | (pdu_hdr->sn_15_8 << 8) |
743 (pdu_hdr->sn_7_0));
744 } else {
745 /** NOTE: Control PDU not handled.*/
746 in_mb[nb_err++] = mb;
747 continue;
748 }
749
750 if (unlikely(pdcp_sn_count_get(rx_deliv, rsn, &count,
751 RTE_SECURITY_PDCP_SN_SIZE_18))) {
752 in_mb[nb_err++] = mb;
753 continue;
754 }
755
756 cop_prepare(en_priv, mb, cop[nb_prep++], data_offset, count, is_integ_protected);
757
758 mb_dynfield = pdcp_dynfield(mb);
759 *mb_dynfield = count;
760 }
761
762 if (unlikely(nb_err))
763 /* Using mempool API since crypto API is not providing bulk free */
764 rte_mempool_put_bulk(en_priv->cop_pool, (void *)&cop[nb_prep], nb_cop - nb_prep);
765
766 *nb_err_ret = num - nb_prep;
767
768 return nb_prep;
769 }
770
771 static uint16_t
pdcp_pre_process_uplane_sn_18_dl_ip(const struct rte_pdcp_entity * entity,struct rte_mbuf * mb[],struct rte_crypto_op * cop[],uint16_t num,uint16_t * nb_err)772 pdcp_pre_process_uplane_sn_18_dl_ip(const struct rte_pdcp_entity *entity, struct rte_mbuf *mb[],
773 struct rte_crypto_op *cop[], uint16_t num, uint16_t *nb_err)
774 {
775 return pdcp_pre_process_uplane_sn_18_dl_flags(entity, mb, cop, num, nb_err, true);
776 }
777
778 static uint16_t
pdcp_pre_process_uplane_sn_18_dl(const struct rte_pdcp_entity * entity,struct rte_mbuf * mb[],struct rte_crypto_op * cop[],uint16_t num,uint16_t * nb_err)779 pdcp_pre_process_uplane_sn_18_dl(const struct rte_pdcp_entity *entity, struct rte_mbuf *mb[],
780 struct rte_crypto_op *cop[], uint16_t num, uint16_t *nb_err)
781 {
782 return pdcp_pre_process_uplane_sn_18_dl_flags(entity, mb, cop, num, nb_err, false);
783 }
784
785 static uint16_t
pdcp_pre_process_cplane_sn_12_dl(const struct rte_pdcp_entity * entity,struct rte_mbuf * in_mb[],struct rte_crypto_op * cop[],uint16_t num,uint16_t * nb_err_ret)786 pdcp_pre_process_cplane_sn_12_dl(const struct rte_pdcp_entity *entity, struct rte_mbuf *in_mb[],
787 struct rte_crypto_op *cop[], uint16_t num, uint16_t *nb_err_ret)
788 {
789 struct entity_priv *en_priv = entity_priv_get(entity);
790 struct rte_pdcp_cp_data_pdu_sn_12_hdr *pdu_hdr;
791 uint16_t nb_cop, nb_prep = 0, nb_err = 0;
792 rte_pdcp_dynfield_t *mb_dynfield;
793 struct rte_mbuf *mb;
794 uint32_t count;
795 int32_t rsn;
796 int i;
797
798 const uint8_t data_offset = en_priv->hdr_sz + en_priv->aad_sz + en_priv->cipher_skip_sz;
799
800 nb_cop = rte_crypto_op_bulk_alloc(en_priv->cop_pool, RTE_CRYPTO_OP_TYPE_SYMMETRIC, cop,
801 num);
802
803 const uint32_t rx_deliv = en_priv->state.rx_deliv;
804
805 for (i = 0; i < nb_cop; i++) {
806 mb = in_mb[i];
807 pdu_hdr = rte_pktmbuf_mtod(mb, struct rte_pdcp_cp_data_pdu_sn_12_hdr *);
808 rsn = ((pdu_hdr->sn_11_8 << 8) | (pdu_hdr->sn_7_0));
809 if (unlikely(pdcp_sn_count_get(rx_deliv, rsn, &count,
810 RTE_SECURITY_PDCP_SN_SIZE_12))) {
811 in_mb[nb_err++] = mb;
812 continue;
813 }
814
815 cop_prepare(en_priv, mb, cop[nb_prep++], data_offset, count, true);
816
817 mb_dynfield = pdcp_dynfield(mb);
818 *mb_dynfield = count;
819 }
820
821 if (unlikely(nb_err))
822 /* Using mempool API since crypto API is not providing bulk free */
823 rte_mempool_put_bulk(en_priv->cop_pool, (void *)&cop[nb_prep], nb_cop - nb_prep);
824
825 *nb_err_ret = num - nb_prep;
826
827 return nb_prep;
828 }
829
830 static inline void
pdcp_packet_strip(struct rte_mbuf * mb,const uint32_t hdr_trim_sz,const bool trim_mac)831 pdcp_packet_strip(struct rte_mbuf *mb, const uint32_t hdr_trim_sz, const bool trim_mac)
832 {
833 char *p = rte_pktmbuf_adj(mb, hdr_trim_sz);
834 RTE_ASSERT(p != NULL);
835 RTE_SET_USED(p);
836
837 if (trim_mac) {
838 int ret = rte_pktmbuf_trim(mb, RTE_PDCP_MAC_I_LEN);
839 RTE_ASSERT(ret == 0);
840 RTE_SET_USED(ret);
841 }
842 }
843
844 static inline int
pdcp_post_process_update_entity_state(const struct rte_pdcp_entity * entity,const uint32_t count,struct rte_mbuf * mb,struct rte_mbuf * out_mb[],const bool trim_mac)845 pdcp_post_process_update_entity_state(const struct rte_pdcp_entity *entity,
846 const uint32_t count, struct rte_mbuf *mb,
847 struct rte_mbuf *out_mb[],
848 const bool trim_mac)
849 {
850 struct entity_priv *en_priv = entity_priv_get(entity);
851 struct pdcp_t_reordering *t_reorder;
852 struct pdcp_reorder *reorder;
853 uint16_t processed = 0;
854
855 struct entity_priv_dl_part *dl = entity_dl_part_get(entity);
856 const uint32_t hdr_trim_sz = en_priv->hdr_sz + en_priv->aad_sz;
857
858 if (count < en_priv->state.rx_deliv)
859 return -EINVAL;
860
861 if (count >= en_priv->state.rx_next)
862 en_priv->state.rx_next = count + 1;
863
864 if (unlikely(pdcp_cnt_bitmap_is_set(dl->bitmap, count)))
865 return -EEXIST;
866
867 pdcp_cnt_bitmap_set(dl->bitmap, count);
868 pdcp_packet_strip(mb, hdr_trim_sz, trim_mac);
869
870 if (en_priv->flags.is_out_of_order_delivery) {
871 out_mb[0] = mb;
872 pdcp_rx_deliv_set(entity, count + 1);
873
874 return 1;
875 }
876
877 reorder = &dl->reorder;
878 t_reorder = &dl->t_reorder;
879
880 if (count == en_priv->state.rx_deliv) {
881 if (reorder->is_active) {
882 /*
883 * This insert used only to increment reorder->min_seqn
884 * To remove it - min_seqn_set() has to work with non-empty buffer
885 */
886 pdcp_reorder_insert(reorder, mb, count);
887
888 /* Get buffered packets */
889 struct rte_mbuf **cached_mbufs = &out_mb[processed];
890 uint32_t nb_cached = pdcp_reorder_get_sequential(reorder,
891 cached_mbufs, entity->max_pkt_cache - processed);
892
893 processed += nb_cached;
894 } else {
895 out_mb[processed++] = mb;
896 }
897
898 /* Processed should never exceed the window size */
899 pdcp_rx_deliv_set(entity, count + processed);
900
901 } else {
902 if (!reorder->is_active)
903 /* Initialize reordering buffer with RX_DELIV */
904 pdcp_reorder_start(reorder, en_priv->state.rx_deliv);
905 /* Buffer the packet */
906 pdcp_reorder_insert(reorder, mb, count);
907 }
908
909 /* Stop & reset current timer if rx_reord is received */
910 if (t_reorder->state == TIMER_RUNNING &&
911 en_priv->state.rx_deliv >= en_priv->state.rx_reord) {
912 t_reorder->state = TIMER_STOP;
913 t_reorder->handle.stop(t_reorder->handle.timer, t_reorder->handle.args);
914 /* Stop reorder buffer, only if it's empty */
915 if (en_priv->state.rx_deliv == en_priv->state.rx_next)
916 pdcp_reorder_stop(reorder);
917 }
918
919 /*
920 * If t-Reordering is not running (includes the case when t-Reordering is stopped due to
921 * actions above).
922 */
923 if (t_reorder->state == TIMER_STOP && en_priv->state.rx_deliv < en_priv->state.rx_next) {
924 /* Update RX_REORD to RX_NEXT */
925 en_priv->state.rx_reord = en_priv->state.rx_next;
926 /* Start t-Reordering */
927 t_reorder->state = TIMER_RUNNING;
928 t_reorder->handle.start(t_reorder->handle.timer, t_reorder->handle.args);
929 }
930
931 return processed;
932 }
933
934 static inline uint16_t
pdcp_post_process_uplane_dl_flags(const struct rte_pdcp_entity * entity,struct rte_mbuf * in_mb[],struct rte_mbuf * out_mb[],uint16_t num,uint16_t * nb_err_ret,const bool is_integ_protected)935 pdcp_post_process_uplane_dl_flags(const struct rte_pdcp_entity *entity, struct rte_mbuf *in_mb[],
936 struct rte_mbuf *out_mb[], uint16_t num, uint16_t *nb_err_ret,
937 const bool is_integ_protected)
938 {
939 int i, nb_processed, nb_success = 0, nb_err = 0;
940 rte_pdcp_dynfield_t *mb_dynfield;
941 struct rte_mbuf *err_mb[num];
942 struct rte_mbuf *mb;
943 uint32_t count;
944
945 #ifdef RTE_ARCH_PPC_64
946 err_mb[0] = NULL; /* workaround PPC-GCC bug */
947 #endif
948 for (i = 0; i < num; i++) {
949 mb = in_mb[i];
950 if (unlikely(mb->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED))
951 goto error;
952
953 mb_dynfield = pdcp_dynfield(mb);
954 count = *mb_dynfield;
955
956 nb_processed = pdcp_post_process_update_entity_state(
957 entity, count, mb, &out_mb[nb_success], is_integ_protected);
958 if (nb_processed < 0)
959 goto error;
960
961 nb_success += nb_processed;
962 continue;
963
964 error:
965 err_mb[nb_err++] = mb;
966 }
967
968 if (unlikely(nb_err != 0))
969 rte_memcpy(&out_mb[nb_success], err_mb, nb_err * sizeof(struct rte_mbuf *));
970
971 *nb_err_ret = nb_err;
972 return nb_success;
973 }
974
975 static uint16_t
pdcp_post_process_uplane_dl_ip(const struct rte_pdcp_entity * entity,struct rte_mbuf * in_mb[],struct rte_mbuf * out_mb[],uint16_t num,uint16_t * nb_err)976 pdcp_post_process_uplane_dl_ip(const struct rte_pdcp_entity *entity, struct rte_mbuf *in_mb[],
977 struct rte_mbuf *out_mb[], uint16_t num, uint16_t *nb_err)
978 {
979 return pdcp_post_process_uplane_dl_flags(entity, in_mb, out_mb, num, nb_err, true);
980 }
981
982 static uint16_t
pdcp_post_process_uplane_dl(const struct rte_pdcp_entity * entity,struct rte_mbuf * in_mb[],struct rte_mbuf * out_mb[],uint16_t num,uint16_t * nb_err)983 pdcp_post_process_uplane_dl(const struct rte_pdcp_entity *entity, struct rte_mbuf *in_mb[],
984 struct rte_mbuf *out_mb[], uint16_t num, uint16_t *nb_err)
985 {
986 return pdcp_post_process_uplane_dl_flags(entity, in_mb, out_mb, num, nb_err, false);
987 }
988
989 static uint16_t
pdcp_post_process_cplane_sn_12_dl(const struct rte_pdcp_entity * entity,struct rte_mbuf * in_mb[],struct rte_mbuf * out_mb[],uint16_t num,uint16_t * nb_err_ret)990 pdcp_post_process_cplane_sn_12_dl(const struct rte_pdcp_entity *entity,
991 struct rte_mbuf *in_mb[],
992 struct rte_mbuf *out_mb[],
993 uint16_t num, uint16_t *nb_err_ret)
994 {
995 int i, nb_processed, nb_success = 0, nb_err = 0;
996 rte_pdcp_dynfield_t *mb_dynfield;
997 struct rte_mbuf *err_mb[num];
998 struct rte_mbuf *mb;
999 uint32_t count;
1000
1001 #ifdef RTE_ARCH_PPC_64
1002 err_mb[0] = NULL; /* workaround PPC-GCC bug */
1003 #endif
1004 for (i = 0; i < num; i++) {
1005 mb = in_mb[i];
1006 if (unlikely(mb->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED))
1007 goto error;
1008
1009 mb_dynfield = pdcp_dynfield(mb);
1010 count = *mb_dynfield;
1011
1012 nb_processed = pdcp_post_process_update_entity_state(
1013 entity, count, mb, &out_mb[nb_success], true);
1014 if (nb_processed < 0)
1015 goto error;
1016
1017 nb_success += nb_processed;
1018 continue;
1019
1020 error:
1021 err_mb[nb_err++] = mb;
1022 }
1023
1024 if (unlikely(nb_err != 0))
1025 rte_memcpy(&out_mb[nb_success], err_mb, nb_err * sizeof(struct rte_mbuf *));
1026
1027 *nb_err_ret = nb_err;
1028 return nb_success;
1029 }
1030
1031 static int
pdcp_pre_post_func_set(struct rte_pdcp_entity * entity,const struct rte_pdcp_entity_conf * conf)1032 pdcp_pre_post_func_set(struct rte_pdcp_entity *entity, const struct rte_pdcp_entity_conf *conf)
1033 {
1034 struct entity_priv *en_priv = entity_priv_get(entity);
1035
1036 entity->pre_process = NULL;
1037 entity->post_process = NULL;
1038
1039 if ((conf->pdcp_xfrm.domain == RTE_SECURITY_PDCP_MODE_CONTROL) &&
1040 (conf->pdcp_xfrm.sn_size == RTE_SECURITY_PDCP_SN_SIZE_12) &&
1041 (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_UPLINK)) {
1042 entity->pre_process = pdcp_pre_process_cplane_sn_12_ul;
1043 entity->post_process = pdcp_post_process_ul;
1044 }
1045
1046 if ((conf->pdcp_xfrm.domain == RTE_SECURITY_PDCP_MODE_CONTROL) &&
1047 (conf->pdcp_xfrm.sn_size == RTE_SECURITY_PDCP_SN_SIZE_12) &&
1048 (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_DOWNLINK)) {
1049 entity->pre_process = pdcp_pre_process_cplane_sn_12_dl;
1050 entity->post_process = pdcp_post_process_cplane_sn_12_dl;
1051 }
1052
1053 if ((conf->pdcp_xfrm.domain == RTE_SECURITY_PDCP_MODE_DATA) &&
1054 (conf->pdcp_xfrm.sn_size == RTE_SECURITY_PDCP_SN_SIZE_12) &&
1055 (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_UPLINK)) {
1056 entity->pre_process = pdcp_pre_process_uplane_sn_12_ul;
1057 entity->post_process = pdcp_post_process_ul;
1058 }
1059
1060 if ((conf->pdcp_xfrm.domain == RTE_SECURITY_PDCP_MODE_DATA) &&
1061 (conf->pdcp_xfrm.sn_size == RTE_SECURITY_PDCP_SN_SIZE_18) &&
1062 (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_UPLINK)) {
1063 entity->pre_process = pdcp_pre_process_uplane_sn_18_ul;
1064 entity->post_process = pdcp_post_process_ul;
1065 }
1066
1067 if ((conf->pdcp_xfrm.domain == RTE_SECURITY_PDCP_MODE_DATA) &&
1068 (conf->pdcp_xfrm.sn_size == RTE_SECURITY_PDCP_SN_SIZE_12) &&
1069 (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_DOWNLINK) &&
1070 (en_priv->flags.is_authenticated)) {
1071 entity->pre_process = pdcp_pre_process_uplane_sn_12_dl_ip;
1072 entity->post_process = pdcp_post_process_uplane_dl_ip;
1073 }
1074
1075 if ((conf->pdcp_xfrm.domain == RTE_SECURITY_PDCP_MODE_DATA) &&
1076 (conf->pdcp_xfrm.sn_size == RTE_SECURITY_PDCP_SN_SIZE_12) &&
1077 (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_DOWNLINK) &&
1078 (!en_priv->flags.is_authenticated)) {
1079 entity->pre_process = pdcp_pre_process_uplane_sn_12_dl;
1080 entity->post_process = pdcp_post_process_uplane_dl;
1081 }
1082
1083 if ((conf->pdcp_xfrm.domain == RTE_SECURITY_PDCP_MODE_DATA) &&
1084 (conf->pdcp_xfrm.sn_size == RTE_SECURITY_PDCP_SN_SIZE_18) &&
1085 (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_DOWNLINK) &&
1086 (en_priv->flags.is_authenticated)) {
1087 entity->pre_process = pdcp_pre_process_uplane_sn_18_dl_ip;
1088 entity->post_process = pdcp_post_process_uplane_dl_ip;
1089 }
1090
1091 if ((conf->pdcp_xfrm.domain == RTE_SECURITY_PDCP_MODE_DATA) &&
1092 (conf->pdcp_xfrm.sn_size == RTE_SECURITY_PDCP_SN_SIZE_18) &&
1093 (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_DOWNLINK) &&
1094 (!en_priv->flags.is_authenticated)) {
1095 entity->pre_process = pdcp_pre_process_uplane_sn_18_dl;
1096 entity->post_process = pdcp_post_process_uplane_dl;
1097 }
1098
1099 if (entity->pre_process == NULL || entity->post_process == NULL)
1100 return -ENOTSUP;
1101
1102 return 0;
1103 }
1104
1105 static int
pdcp_entity_priv_populate(struct entity_priv * en_priv,const struct rte_pdcp_entity_conf * conf)1106 pdcp_entity_priv_populate(struct entity_priv *en_priv, const struct rte_pdcp_entity_conf *conf)
1107 {
1108 struct rte_crypto_sym_xform *c_xfrm, *a_xfrm;
1109 int ret;
1110
1111 ret = pdcp_crypto_xfrm_get(conf, &c_xfrm, &a_xfrm);
1112 if (ret)
1113 return ret;
1114
1115 /**
1116 * flags.is_authenticated
1117 *
1118 * MAC-I would be added in case of control plane packets and when authentication
1119 * transform is not NULL.
1120 */
1121
1122 if ((conf->pdcp_xfrm.domain == RTE_SECURITY_PDCP_MODE_CONTROL) && (a_xfrm == NULL))
1123 return -EINVAL;
1124
1125 if (a_xfrm != NULL)
1126 en_priv->flags.is_authenticated = 1;
1127
1128 /**
1129 * flags.is_cipher_in_bits
1130 *
1131 * For ZUC & SNOW3G cipher algos, offset & length need to be provided in bits.
1132 */
1133
1134 if ((c_xfrm->cipher.algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2) ||
1135 (c_xfrm->cipher.algo == RTE_CRYPTO_CIPHER_ZUC_EEA3))
1136 en_priv->flags.is_cipher_in_bits = 1;
1137
1138 /**
1139 * flags.is_auth_in_bits
1140 *
1141 * For ZUC & SNOW3G authentication algos, offset & length need to be provided in bits.
1142 */
1143
1144 if (a_xfrm != NULL) {
1145 if ((a_xfrm->auth.algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2) ||
1146 (a_xfrm->auth.algo == RTE_CRYPTO_AUTH_ZUC_EIA3))
1147 en_priv->flags.is_auth_in_bits = 1;
1148 }
1149
1150 /**
1151 * flags.is_ul_entity
1152 *
1153 * Indicate whether the entity is UL/transmitting PDCP entity.
1154 */
1155 if (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_UPLINK)
1156 en_priv->flags.is_ul_entity = 1;
1157
1158 /**
1159 * flags.is_null_auth
1160 *
1161 * For NULL auth, 4B zeros need to be added by lib PDCP. Indicate that
1162 * algo is NULL auth to perform the same.
1163 */
1164 if (a_xfrm != NULL && a_xfrm->auth.algo == RTE_CRYPTO_AUTH_NULL)
1165 en_priv->flags.is_null_auth = 1;
1166
1167 /**
1168 * flags.is_status_report_required
1169 *
1170 * Indicate whether status report is required.
1171 */
1172 if (conf->status_report_required) {
1173 /** Status report is required only for DL entities. */
1174 if (conf->pdcp_xfrm.pkt_dir != RTE_SECURITY_PDCP_DOWNLINK)
1175 return -EINVAL;
1176
1177 en_priv->flags.is_status_report_required = 1;
1178 }
1179
1180 /**
1181 * flags.is_out_of_order_delivery
1182 *
1183 * Indicate whether the outoforder delivery is enabled for PDCP entity.
1184 */
1185 en_priv->flags.is_out_of_order_delivery = conf->out_of_order_delivery;
1186
1187 /**
1188 * hdr_sz
1189 *
1190 * PDCP header size of the entity
1191 */
1192 en_priv->hdr_sz = pdcp_hdr_size_get(conf->pdcp_xfrm.sn_size);
1193
1194 /**
1195 * aad_sz
1196 *
1197 * For AES-CMAC, additional message is prepended for processing. Need to be trimmed after
1198 * crypto processing is done.
1199 */
1200 if (a_xfrm != NULL && a_xfrm->auth.algo == RTE_CRYPTO_AUTH_AES_CMAC)
1201 en_priv->aad_sz = 8;
1202 else
1203 en_priv->aad_sz = 0;
1204
1205 /**
1206 * cipher_skip_sz
1207 *
1208 * When SDAP protocol is enabled for the PDCP entity, skip the SDAP header from ciphering.
1209 */
1210 if (conf->pdcp_xfrm.sdap_enabled)
1211 en_priv->cipher_skip_sz = 1;
1212 else
1213 en_priv->cipher_skip_sz = 0;
1214
1215 return 0;
1216 }
1217
1218 int
pdcp_process_func_set(struct rte_pdcp_entity * entity,const struct rte_pdcp_entity_conf * conf)1219 pdcp_process_func_set(struct rte_pdcp_entity *entity, const struct rte_pdcp_entity_conf *conf)
1220 {
1221 struct entity_priv *en_priv;
1222 int ret;
1223
1224 if (entity == NULL || conf == NULL)
1225 return -EINVAL;
1226
1227 en_priv = entity_priv_get(entity);
1228
1229 ret = pdcp_iv_gen_func_set(entity, conf);
1230 if (ret)
1231 return ret;
1232
1233 ret = pdcp_entity_priv_populate(en_priv, conf);
1234 if (ret)
1235 return ret;
1236
1237 ret = pdcp_pre_post_func_set(entity, conf);
1238 if (ret)
1239 return ret;
1240
1241 return 0;
1242 }
1243