1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017-2018 Intel Corporation
3 */
4 #include <rte_malloc.h>
5 #include <rte_hash.h>
6 #include <rte_jhash.h>
7 #include <rte_log.h>
8 #include <rte_mbuf.h>
9 #include <rte_cryptodev.h>
10
11 #include "rte_vhost_crypto.h"
12 #include "vhost.h"
13 #include "vhost_user.h"
14 #include "virtio_crypto.h"
15
16 #define INHDR_LEN (sizeof(struct virtio_crypto_inhdr))
17 #define IV_OFFSET (sizeof(struct rte_crypto_op) + \
18 sizeof(struct rte_crypto_sym_op))
19
20 RTE_LOG_REGISTER_SUFFIX(vhost_crypto_logtype, crypto, INFO);
21 #define RTE_LOGTYPE_VHOST_CRYPTO vhost_crypto_logtype
22
23 #define VC_LOG_ERR(...) \
24 RTE_LOG_LINE_PREFIX(ERR, VHOST_CRYPTO, "%s() line %u: ", \
25 __func__ RTE_LOG_COMMA __LINE__, __VA_ARGS__)
26
27 #define VC_LOG_INFO(...) \
28 RTE_LOG_LINE_PREFIX(INFO, VHOST_CRYPTO, "%s() line %u: ", \
29 __func__ RTE_LOG_COMMA __LINE__, __VA_ARGS__)
30
31 #ifdef RTE_LIBRTE_VHOST_DEBUG
32 #define VC_LOG_DBG(...) \
33 RTE_LOG_LINE_PREFIX(DEBUG, VHOST_CRYPTO, "%s() line %u: ", \
34 __func__ RTE_LOG_COMMA __LINE__, __VA_ARGS__)
35 #else
36 #define VC_LOG_DBG(...)
37 #endif
38
39 #define VIRTIO_CRYPTO_FEATURES ((1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) | \
40 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | \
41 (1ULL << VIRTIO_RING_F_EVENT_IDX) | \
42 (1ULL << VIRTIO_NET_F_CTRL_VQ) | \
43 (1ULL << VIRTIO_F_VERSION_1) | \
44 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))
45
46 #define IOVA_TO_VVA(t, r, a, l, p) \
47 ((t)(uintptr_t)vhost_iova_to_vva(r->dev, r->vq, a, l, p))
48
49 /*
50 * vhost_crypto_desc is used to copy original vring_desc to the local buffer
51 * before processing (except the next index). The copy result will be an
52 * array of vhost_crypto_desc elements that follows the sequence of original
53 * vring_desc.next is arranged.
54 */
55 #define vhost_crypto_desc vring_desc
56
57 static int
cipher_algo_transform(uint32_t virtio_cipher_algo,enum rte_crypto_cipher_algorithm * algo)58 cipher_algo_transform(uint32_t virtio_cipher_algo,
59 enum rte_crypto_cipher_algorithm *algo)
60 {
61 switch (virtio_cipher_algo) {
62 case VIRTIO_CRYPTO_CIPHER_AES_CBC:
63 *algo = RTE_CRYPTO_CIPHER_AES_CBC;
64 break;
65 case VIRTIO_CRYPTO_CIPHER_AES_CTR:
66 *algo = RTE_CRYPTO_CIPHER_AES_CTR;
67 break;
68 case VIRTIO_CRYPTO_CIPHER_DES_ECB:
69 *algo = -VIRTIO_CRYPTO_NOTSUPP;
70 break;
71 case VIRTIO_CRYPTO_CIPHER_DES_CBC:
72 *algo = RTE_CRYPTO_CIPHER_DES_CBC;
73 break;
74 case VIRTIO_CRYPTO_CIPHER_3DES_ECB:
75 *algo = RTE_CRYPTO_CIPHER_3DES_ECB;
76 break;
77 case VIRTIO_CRYPTO_CIPHER_3DES_CBC:
78 *algo = RTE_CRYPTO_CIPHER_3DES_CBC;
79 break;
80 case VIRTIO_CRYPTO_CIPHER_3DES_CTR:
81 *algo = RTE_CRYPTO_CIPHER_3DES_CTR;
82 break;
83 case VIRTIO_CRYPTO_CIPHER_KASUMI_F8:
84 *algo = RTE_CRYPTO_CIPHER_KASUMI_F8;
85 break;
86 case VIRTIO_CRYPTO_CIPHER_SNOW3G_UEA2:
87 *algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2;
88 break;
89 case VIRTIO_CRYPTO_CIPHER_AES_F8:
90 *algo = RTE_CRYPTO_CIPHER_AES_F8;
91 break;
92 case VIRTIO_CRYPTO_CIPHER_AES_XTS:
93 *algo = RTE_CRYPTO_CIPHER_AES_XTS;
94 break;
95 case VIRTIO_CRYPTO_CIPHER_ZUC_EEA3:
96 *algo = RTE_CRYPTO_CIPHER_ZUC_EEA3;
97 break;
98 default:
99 return -VIRTIO_CRYPTO_BADMSG;
100 break;
101 }
102
103 return 0;
104 }
105
106 static int
auth_algo_transform(uint32_t virtio_auth_algo,enum rte_crypto_auth_algorithm * algo)107 auth_algo_transform(uint32_t virtio_auth_algo,
108 enum rte_crypto_auth_algorithm *algo)
109 {
110 switch (virtio_auth_algo) {
111 case VIRTIO_CRYPTO_NO_MAC:
112 *algo = RTE_CRYPTO_AUTH_NULL;
113 break;
114 case VIRTIO_CRYPTO_MAC_HMAC_MD5:
115 *algo = RTE_CRYPTO_AUTH_MD5_HMAC;
116 break;
117 case VIRTIO_CRYPTO_MAC_HMAC_SHA1:
118 *algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
119 break;
120 case VIRTIO_CRYPTO_MAC_HMAC_SHA_224:
121 *algo = RTE_CRYPTO_AUTH_SHA224_HMAC;
122 break;
123 case VIRTIO_CRYPTO_MAC_HMAC_SHA_256:
124 *algo = RTE_CRYPTO_AUTH_SHA256_HMAC;
125 break;
126 case VIRTIO_CRYPTO_MAC_HMAC_SHA_384:
127 *algo = RTE_CRYPTO_AUTH_SHA384_HMAC;
128 break;
129 case VIRTIO_CRYPTO_MAC_HMAC_SHA_512:
130 *algo = RTE_CRYPTO_AUTH_SHA512_HMAC;
131 break;
132 case VIRTIO_CRYPTO_MAC_CMAC_AES:
133 *algo = RTE_CRYPTO_AUTH_AES_CMAC;
134 break;
135 case VIRTIO_CRYPTO_MAC_KASUMI_F9:
136 *algo = RTE_CRYPTO_AUTH_KASUMI_F9;
137 break;
138 case VIRTIO_CRYPTO_MAC_SNOW3G_UIA2:
139 *algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2;
140 break;
141 case VIRTIO_CRYPTO_MAC_GMAC_AES:
142 *algo = RTE_CRYPTO_AUTH_AES_GMAC;
143 break;
144 case VIRTIO_CRYPTO_MAC_CBCMAC_AES:
145 *algo = RTE_CRYPTO_AUTH_AES_CBC_MAC;
146 break;
147 case VIRTIO_CRYPTO_MAC_XCBC_AES:
148 *algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC;
149 break;
150 case VIRTIO_CRYPTO_MAC_CMAC_3DES:
151 case VIRTIO_CRYPTO_MAC_GMAC_TWOFISH:
152 case VIRTIO_CRYPTO_MAC_CBCMAC_KASUMI_F9:
153 return -VIRTIO_CRYPTO_NOTSUPP;
154 default:
155 return -VIRTIO_CRYPTO_BADMSG;
156 }
157
158 return 0;
159 }
160
get_iv_len(enum rte_crypto_cipher_algorithm algo)161 static int get_iv_len(enum rte_crypto_cipher_algorithm algo)
162 {
163 int len;
164
165 switch (algo) {
166 case RTE_CRYPTO_CIPHER_3DES_CBC:
167 len = 8;
168 break;
169 case RTE_CRYPTO_CIPHER_3DES_CTR:
170 len = 8;
171 break;
172 case RTE_CRYPTO_CIPHER_3DES_ECB:
173 len = 8;
174 break;
175 case RTE_CRYPTO_CIPHER_AES_CBC:
176 len = 16;
177 break;
178
179 /* TODO: add common algos */
180
181 default:
182 len = -1;
183 break;
184 }
185
186 return len;
187 }
188
189 /**
190 * vhost_crypto struct is used to maintain a number of virtio_cryptos and
191 * one DPDK crypto device that deals with all crypto workloads. It is declared
192 * here and defined in vhost_crypto.c
193 */
194 struct __rte_cache_aligned vhost_crypto {
195 /** Used to lookup DPDK Cryptodev Session based on VIRTIO crypto
196 * session ID.
197 */
198 struct rte_hash *session_map;
199 struct rte_mempool *mbuf_pool;
200 struct rte_mempool *sess_pool;
201 struct rte_mempool *wb_pool;
202
203 /** DPDK cryptodev ID */
204 uint8_t cid;
205 uint16_t nb_qps;
206
207 uint64_t last_session_id;
208
209 uint64_t cache_session_id;
210 struct rte_cryptodev_sym_session *cache_session;
211 /** socket id for the device */
212 int socket_id;
213
214 struct virtio_net *dev;
215
216 uint8_t option;
217 };
218
219 struct vhost_crypto_writeback_data {
220 uint8_t *src;
221 uint8_t *dst;
222 uint64_t len;
223 struct vhost_crypto_writeback_data *next;
224 };
225
226 struct vhost_crypto_data_req {
227 struct vring_desc *head;
228 struct virtio_net *dev;
229 struct virtio_crypto_inhdr *inhdr;
230 struct vhost_virtqueue *vq;
231 struct vhost_crypto_writeback_data *wb;
232 struct rte_mempool *wb_pool;
233 uint16_t desc_idx;
234 uint16_t len;
235 uint16_t zero_copy;
236 };
237
238 static int
transform_cipher_param(struct rte_crypto_sym_xform * xform,VhostUserCryptoSessionParam * param)239 transform_cipher_param(struct rte_crypto_sym_xform *xform,
240 VhostUserCryptoSessionParam *param)
241 {
242 int ret;
243
244 ret = cipher_algo_transform(param->cipher_algo, &xform->cipher.algo);
245 if (unlikely(ret < 0))
246 return ret;
247
248 if (param->cipher_key_len > VHOST_USER_CRYPTO_MAX_CIPHER_KEY_LENGTH) {
249 VC_LOG_DBG("Invalid cipher key length");
250 return -VIRTIO_CRYPTO_BADMSG;
251 }
252
253 xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
254 xform->cipher.key.length = param->cipher_key_len;
255 if (xform->cipher.key.length > 0)
256 xform->cipher.key.data = param->cipher_key_buf;
257 if (param->dir == VIRTIO_CRYPTO_OP_ENCRYPT)
258 xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
259 else if (param->dir == VIRTIO_CRYPTO_OP_DECRYPT)
260 xform->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
261 else {
262 VC_LOG_DBG("Bad operation type");
263 return -VIRTIO_CRYPTO_BADMSG;
264 }
265
266 ret = get_iv_len(xform->cipher.algo);
267 if (unlikely(ret < 0))
268 return ret;
269 xform->cipher.iv.length = (uint16_t)ret;
270 xform->cipher.iv.offset = IV_OFFSET;
271 return 0;
272 }
273
274 static int
transform_chain_param(struct rte_crypto_sym_xform * xforms,VhostUserCryptoSessionParam * param)275 transform_chain_param(struct rte_crypto_sym_xform *xforms,
276 VhostUserCryptoSessionParam *param)
277 {
278 struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
279 int ret;
280
281 switch (param->chaining_dir) {
282 case VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER:
283 xform_auth = xforms;
284 xform_cipher = xforms->next;
285 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
286 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
287 break;
288 case VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH:
289 xform_cipher = xforms;
290 xform_auth = xforms->next;
291 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
292 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
293 break;
294 default:
295 return -VIRTIO_CRYPTO_BADMSG;
296 }
297
298 /* cipher */
299 ret = cipher_algo_transform(param->cipher_algo,
300 &xform_cipher->cipher.algo);
301 if (unlikely(ret < 0))
302 return ret;
303
304 if (param->cipher_key_len > VHOST_USER_CRYPTO_MAX_CIPHER_KEY_LENGTH) {
305 VC_LOG_DBG("Invalid cipher key length");
306 return -VIRTIO_CRYPTO_BADMSG;
307 }
308
309 xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
310 xform_cipher->cipher.key.length = param->cipher_key_len;
311 xform_cipher->cipher.key.data = param->cipher_key_buf;
312 ret = get_iv_len(xform_cipher->cipher.algo);
313 if (unlikely(ret < 0))
314 return ret;
315 xform_cipher->cipher.iv.length = (uint16_t)ret;
316 xform_cipher->cipher.iv.offset = IV_OFFSET;
317
318 /* auth */
319 xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
320 ret = auth_algo_transform(param->hash_algo, &xform_auth->auth.algo);
321 if (unlikely(ret < 0))
322 return ret;
323
324 if (param->auth_key_len > VHOST_USER_CRYPTO_MAX_HMAC_KEY_LENGTH) {
325 VC_LOG_DBG("Invalid auth key length");
326 return -VIRTIO_CRYPTO_BADMSG;
327 }
328
329 xform_auth->auth.digest_length = param->digest_len;
330 xform_auth->auth.key.length = param->auth_key_len;
331 xform_auth->auth.key.data = param->auth_key_buf;
332
333 return 0;
334 }
335
336 static void
vhost_crypto_create_sess(struct vhost_crypto * vcrypto,VhostUserCryptoSessionParam * sess_param)337 vhost_crypto_create_sess(struct vhost_crypto *vcrypto,
338 VhostUserCryptoSessionParam *sess_param)
339 {
340 struct rte_crypto_sym_xform xform1 = {0}, xform2 = {0};
341 struct rte_cryptodev_sym_session *session;
342 int ret;
343
344 switch (sess_param->op_type) {
345 case VIRTIO_CRYPTO_SYM_OP_NONE:
346 case VIRTIO_CRYPTO_SYM_OP_CIPHER:
347 ret = transform_cipher_param(&xform1, sess_param);
348 if (unlikely(ret)) {
349 VC_LOG_ERR("Error transform session msg (%i)", ret);
350 sess_param->session_id = ret;
351 return;
352 }
353 break;
354 case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
355 if (unlikely(sess_param->hash_mode !=
356 VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)) {
357 sess_param->session_id = -VIRTIO_CRYPTO_NOTSUPP;
358 VC_LOG_ERR("Error transform session message (%i)",
359 -VIRTIO_CRYPTO_NOTSUPP);
360 return;
361 }
362
363 xform1.next = &xform2;
364
365 ret = transform_chain_param(&xform1, sess_param);
366 if (unlikely(ret)) {
367 VC_LOG_ERR("Error transform session message (%i)", ret);
368 sess_param->session_id = ret;
369 return;
370 }
371
372 break;
373 default:
374 VC_LOG_ERR("Algorithm not yet supported");
375 sess_param->session_id = -VIRTIO_CRYPTO_NOTSUPP;
376 return;
377 }
378
379 session = rte_cryptodev_sym_session_create(vcrypto->cid, &xform1,
380 vcrypto->sess_pool);
381 if (!session) {
382 VC_LOG_ERR("Failed to create session");
383 sess_param->session_id = -VIRTIO_CRYPTO_ERR;
384 return;
385 }
386
387 /* insert hash to map */
388 if (rte_hash_add_key_data(vcrypto->session_map,
389 &vcrypto->last_session_id, session) < 0) {
390 VC_LOG_ERR("Failed to insert session to hash table");
391
392 if (rte_cryptodev_sym_session_free(vcrypto->cid, session) < 0)
393 VC_LOG_ERR("Failed to free session");
394 sess_param->session_id = -VIRTIO_CRYPTO_ERR;
395 return;
396 }
397
398 VC_LOG_INFO("Session %"PRIu64" created for vdev %i.",
399 vcrypto->last_session_id, vcrypto->dev->vid);
400
401 sess_param->session_id = vcrypto->last_session_id;
402 vcrypto->last_session_id++;
403 }
404
405 static int
vhost_crypto_close_sess(struct vhost_crypto * vcrypto,uint64_t session_id)406 vhost_crypto_close_sess(struct vhost_crypto *vcrypto, uint64_t session_id)
407 {
408 struct rte_cryptodev_sym_session *session;
409 uint64_t sess_id = session_id;
410 int ret;
411
412 ret = rte_hash_lookup_data(vcrypto->session_map, &sess_id,
413 (void **)&session);
414
415 if (unlikely(ret < 0)) {
416 VC_LOG_ERR("Failed to delete session %"PRIu64".", session_id);
417 return -VIRTIO_CRYPTO_INVSESS;
418 }
419
420 if (rte_cryptodev_sym_session_free(vcrypto->cid, session) < 0) {
421 VC_LOG_DBG("Failed to free session");
422 return -VIRTIO_CRYPTO_ERR;
423 }
424
425 if (rte_hash_del_key(vcrypto->session_map, &sess_id) < 0) {
426 VC_LOG_DBG("Failed to delete session from hash table.");
427 return -VIRTIO_CRYPTO_ERR;
428 }
429
430 VC_LOG_INFO("Session %"PRIu64" deleted for vdev %i.", sess_id,
431 vcrypto->dev->vid);
432
433 return 0;
434 }
435
436 static enum rte_vhost_msg_result
vhost_crypto_msg_post_handler(int vid,void * msg)437 vhost_crypto_msg_post_handler(int vid, void *msg)
438 {
439 struct virtio_net *dev = get_device(vid);
440 struct vhost_crypto *vcrypto;
441 struct vhu_msg_context *ctx = msg;
442 enum rte_vhost_msg_result ret = RTE_VHOST_MSG_RESULT_OK;
443
444 if (dev == NULL) {
445 VC_LOG_ERR("Invalid vid %i", vid);
446 return RTE_VHOST_MSG_RESULT_ERR;
447 }
448
449 vcrypto = dev->extern_data;
450 if (vcrypto == NULL) {
451 VC_LOG_ERR("Cannot find required data, is it initialized?");
452 return RTE_VHOST_MSG_RESULT_ERR;
453 }
454
455 switch (ctx->msg.request.frontend) {
456 case VHOST_USER_CRYPTO_CREATE_SESS:
457 vhost_crypto_create_sess(vcrypto,
458 &ctx->msg.payload.crypto_session);
459 ctx->fd_num = 0;
460 ret = RTE_VHOST_MSG_RESULT_REPLY;
461 break;
462 case VHOST_USER_CRYPTO_CLOSE_SESS:
463 if (vhost_crypto_close_sess(vcrypto, ctx->msg.payload.u64))
464 ret = RTE_VHOST_MSG_RESULT_ERR;
465 break;
466 default:
467 ret = RTE_VHOST_MSG_RESULT_NOT_HANDLED;
468 break;
469 }
470
471 return ret;
472 }
473
474 static __rte_always_inline struct vhost_crypto_desc *
find_write_desc(struct vhost_crypto_desc * head,struct vhost_crypto_desc * desc,uint32_t max_n_descs)475 find_write_desc(struct vhost_crypto_desc *head, struct vhost_crypto_desc *desc,
476 uint32_t max_n_descs)
477 {
478 if (desc < head)
479 return NULL;
480
481 while (desc - head < (int)max_n_descs) {
482 if (desc->flags & VRING_DESC_F_WRITE)
483 return desc;
484 desc++;
485 }
486
487 return NULL;
488 }
489
490 static __rte_always_inline struct virtio_crypto_inhdr *
reach_inhdr(struct vhost_crypto_data_req * vc_req,struct vhost_crypto_desc * head,uint32_t max_n_descs)491 reach_inhdr(struct vhost_crypto_data_req *vc_req,
492 struct vhost_crypto_desc *head,
493 uint32_t max_n_descs)
494 __rte_shared_locks_required(&vc_req->vq->iotlb_lock)
495 {
496 struct virtio_crypto_inhdr *inhdr;
497 struct vhost_crypto_desc *last = head + (max_n_descs - 1);
498 uint64_t dlen = last->len;
499
500 if (unlikely(dlen != sizeof(*inhdr)))
501 return NULL;
502
503 inhdr = IOVA_TO_VVA(struct virtio_crypto_inhdr *, vc_req, last->addr,
504 &dlen, VHOST_ACCESS_WO);
505 if (unlikely(!inhdr || dlen != last->len))
506 return NULL;
507
508 return inhdr;
509 }
510
511 static __rte_always_inline int
move_desc(struct vhost_crypto_desc * head,struct vhost_crypto_desc ** cur_desc,uint32_t size,uint32_t max_n_descs)512 move_desc(struct vhost_crypto_desc *head,
513 struct vhost_crypto_desc **cur_desc,
514 uint32_t size, uint32_t max_n_descs)
515 {
516 struct vhost_crypto_desc *desc = *cur_desc;
517 int left = size - desc->len;
518
519 while (desc->flags & VRING_DESC_F_NEXT && left > 0 &&
520 desc >= head &&
521 desc - head < (int)max_n_descs) {
522 desc++;
523 left -= desc->len;
524 }
525
526 if (unlikely(left > 0))
527 return -1;
528
529 if (unlikely(head - desc == (int)max_n_descs))
530 *cur_desc = NULL;
531 else
532 *cur_desc = desc + 1;
533
534 return 0;
535 }
536
537 static __rte_always_inline void *
get_data_ptr(struct vhost_crypto_data_req * vc_req,struct vhost_crypto_desc * cur_desc,uint8_t perm)538 get_data_ptr(struct vhost_crypto_data_req *vc_req,
539 struct vhost_crypto_desc *cur_desc,
540 uint8_t perm)
541 __rte_shared_locks_required(&vc_req->vq->iotlb_lock)
542 {
543 void *data;
544 uint64_t dlen = cur_desc->len;
545
546 data = IOVA_TO_VVA(void *, vc_req, cur_desc->addr, &dlen, perm);
547 if (unlikely(!data || dlen != cur_desc->len)) {
548 VC_LOG_ERR("Failed to map object");
549 return NULL;
550 }
551
552 return data;
553 }
554
555 static __rte_always_inline uint32_t
copy_data_from_desc(void * dst,struct vhost_crypto_data_req * vc_req,struct vhost_crypto_desc * desc,uint32_t size)556 copy_data_from_desc(void *dst, struct vhost_crypto_data_req *vc_req,
557 struct vhost_crypto_desc *desc, uint32_t size)
558 __rte_shared_locks_required(&vc_req->vq->iotlb_lock)
559 {
560 uint64_t remain;
561 uint64_t addr;
562
563 remain = RTE_MIN(desc->len, size);
564 addr = desc->addr;
565 do {
566 uint64_t len;
567 void *src;
568
569 len = remain;
570 src = IOVA_TO_VVA(void *, vc_req, addr, &len, VHOST_ACCESS_RO);
571 if (unlikely(src == NULL || len == 0))
572 return 0;
573
574 rte_memcpy(dst, src, len);
575 remain -= len;
576 /* cast is needed for 32-bit architecture */
577 dst = RTE_PTR_ADD(dst, (size_t)len);
578 addr += len;
579 } while (unlikely(remain != 0));
580
581 return RTE_MIN(desc->len, size);
582 }
583
584
585 static __rte_always_inline int
copy_data(void * data,struct vhost_crypto_data_req * vc_req,struct vhost_crypto_desc * head,struct vhost_crypto_desc ** cur_desc,uint32_t size,uint32_t max_n_descs)586 copy_data(void *data, struct vhost_crypto_data_req *vc_req,
587 struct vhost_crypto_desc *head, struct vhost_crypto_desc **cur_desc,
588 uint32_t size, uint32_t max_n_descs)
589 __rte_shared_locks_required(&vc_req->vq->iotlb_lock)
590 {
591 struct vhost_crypto_desc *desc = *cur_desc;
592 uint32_t left = size;
593
594 do {
595 uint32_t copied;
596
597 copied = copy_data_from_desc(data, vc_req, desc, left);
598 if (copied == 0)
599 return -1;
600 left -= copied;
601 data = RTE_PTR_ADD(data, copied);
602 } while (left != 0 && ++desc < head + max_n_descs);
603
604 if (unlikely(left != 0))
605 return -1;
606
607 if (unlikely(desc == head + max_n_descs))
608 *cur_desc = NULL;
609 else
610 *cur_desc = desc + 1;
611
612 return 0;
613 }
614
615 static void
write_back_data(struct vhost_crypto_data_req * vc_req)616 write_back_data(struct vhost_crypto_data_req *vc_req)
617 {
618 struct vhost_crypto_writeback_data *wb_data = vc_req->wb, *wb_last;
619
620 while (wb_data) {
621 rte_memcpy(wb_data->dst, wb_data->src, wb_data->len);
622 memset(wb_data->src, 0, wb_data->len);
623 wb_last = wb_data;
624 wb_data = wb_data->next;
625 rte_mempool_put(vc_req->wb_pool, wb_last);
626 }
627 }
628
629 static void
free_wb_data(struct vhost_crypto_writeback_data * wb_data,struct rte_mempool * mp)630 free_wb_data(struct vhost_crypto_writeback_data *wb_data,
631 struct rte_mempool *mp)
632 {
633 while (wb_data->next != NULL)
634 free_wb_data(wb_data->next, mp);
635
636 rte_mempool_put(mp, wb_data);
637 }
638
639 /**
640 * The function will allocate a vhost_crypto_writeback_data linked list
641 * containing the source and destination data pointers for the write back
642 * operation after dequeued from Cryptodev PMD queues.
643 *
644 * @param vc_req
645 * The vhost crypto data request pointer
646 * @param cur_desc
647 * The pointer of the current in use descriptor pointer. The content of
648 * cur_desc is expected to be updated after the function execution.
649 * @param end_wb_data
650 * The last write back data element to be returned. It is used only in cipher
651 * and hash chain operations.
652 * @param src
653 * The source data pointer
654 * @param offset
655 * The offset to both source and destination data. For source data the offset
656 * is the number of bytes between src and start point of cipher operation. For
657 * destination data the offset is the number of bytes from *cur_desc->addr
658 * to the point where the src will be written to.
659 * @param write_back_len
660 * The size of the write back length.
661 * @return
662 * The pointer to the start of the write back data linked list.
663 */
664 static __rte_always_inline struct vhost_crypto_writeback_data *
prepare_write_back_data(struct vhost_crypto_data_req * vc_req,struct vhost_crypto_desc * head_desc,struct vhost_crypto_desc ** cur_desc,struct vhost_crypto_writeback_data ** end_wb_data,uint8_t * src,uint32_t offset,uint64_t write_back_len,uint32_t max_n_descs)665 prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
666 struct vhost_crypto_desc *head_desc,
667 struct vhost_crypto_desc **cur_desc,
668 struct vhost_crypto_writeback_data **end_wb_data,
669 uint8_t *src,
670 uint32_t offset,
671 uint64_t write_back_len,
672 uint32_t max_n_descs)
673 __rte_shared_locks_required(&vc_req->vq->iotlb_lock)
674 {
675 struct vhost_crypto_writeback_data *wb_data, *head;
676 struct vhost_crypto_desc *desc = *cur_desc;
677 uint64_t dlen;
678 uint8_t *dst;
679 int ret;
680
681 ret = rte_mempool_get(vc_req->wb_pool, (void **)&head);
682 if (unlikely(ret < 0)) {
683 VC_LOG_ERR("no memory");
684 goto error_exit;
685 }
686
687 wb_data = head;
688
689 if (likely(desc->len > offset)) {
690 wb_data->src = src + offset;
691 dlen = desc->len;
692 dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr,
693 &dlen, VHOST_ACCESS_RW);
694 if (unlikely(!dst || dlen != desc->len)) {
695 VC_LOG_ERR("Failed to map descriptor");
696 goto error_exit;
697 }
698
699 wb_data->dst = dst + offset;
700 wb_data->len = RTE_MIN(dlen - offset, write_back_len);
701 write_back_len -= wb_data->len;
702 src += offset + wb_data->len;
703 offset = 0;
704
705 if (unlikely(write_back_len)) {
706 ret = rte_mempool_get(vc_req->wb_pool,
707 (void **)&(wb_data->next));
708 if (unlikely(ret < 0)) {
709 VC_LOG_ERR("no memory");
710 goto error_exit;
711 }
712
713 wb_data = wb_data->next;
714 } else
715 wb_data->next = NULL;
716 } else
717 offset -= desc->len;
718
719 while (write_back_len &&
720 desc >= head_desc &&
721 desc - head_desc < (int)max_n_descs) {
722 desc++;
723 if (unlikely(!(desc->flags & VRING_DESC_F_WRITE))) {
724 VC_LOG_ERR("incorrect descriptor");
725 goto error_exit;
726 }
727
728 if (desc->len <= offset) {
729 offset -= desc->len;
730 continue;
731 }
732
733 dlen = desc->len;
734 dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
735 VHOST_ACCESS_RW) + offset;
736 if (unlikely(dst == NULL || dlen != desc->len)) {
737 VC_LOG_ERR("Failed to map descriptor");
738 goto error_exit;
739 }
740
741 wb_data->src = src + offset;
742 wb_data->dst = dst;
743 wb_data->len = RTE_MIN(desc->len - offset, write_back_len);
744 write_back_len -= wb_data->len;
745 src += wb_data->len;
746 offset = 0;
747
748 if (write_back_len) {
749 ret = rte_mempool_get(vc_req->wb_pool,
750 (void **)&(wb_data->next));
751 if (unlikely(ret < 0)) {
752 VC_LOG_ERR("no memory");
753 goto error_exit;
754 }
755
756 wb_data = wb_data->next;
757 } else
758 wb_data->next = NULL;
759 }
760
761 if (unlikely(desc - head_desc == (int)max_n_descs))
762 *cur_desc = NULL;
763 else
764 *cur_desc = desc + 1;
765
766 *end_wb_data = wb_data;
767
768 return head;
769
770 error_exit:
771 if (head)
772 free_wb_data(head, vc_req->wb_pool);
773
774 return NULL;
775 }
776
777 static __rte_always_inline uint8_t
vhost_crypto_check_cipher_request(struct virtio_crypto_cipher_data_req * req)778 vhost_crypto_check_cipher_request(struct virtio_crypto_cipher_data_req *req)
779 {
780 if (likely((req->para.iv_len <= VHOST_CRYPTO_MAX_IV_LEN) &&
781 (req->para.src_data_len <= RTE_MBUF_DEFAULT_BUF_SIZE) &&
782 (req->para.dst_data_len >= req->para.src_data_len) &&
783 (req->para.dst_data_len <= RTE_MBUF_DEFAULT_BUF_SIZE)))
784 return VIRTIO_CRYPTO_OK;
785 return VIRTIO_CRYPTO_BADMSG;
786 }
787
788 static __rte_always_inline uint8_t
prepare_sym_cipher_op(struct vhost_crypto * vcrypto,struct rte_crypto_op * op,struct vhost_crypto_data_req * vc_req,struct virtio_crypto_cipher_data_req * cipher,struct vhost_crypto_desc * head,uint32_t max_n_descs)789 prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
790 struct vhost_crypto_data_req *vc_req,
791 struct virtio_crypto_cipher_data_req *cipher,
792 struct vhost_crypto_desc *head,
793 uint32_t max_n_descs)
794 __rte_shared_locks_required(&vc_req->vq->iotlb_lock)
795 {
796 struct vhost_crypto_desc *desc = head;
797 struct vhost_crypto_writeback_data *ewb = NULL;
798 struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst;
799 uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
800 uint8_t ret = vhost_crypto_check_cipher_request(cipher);
801
802 if (unlikely(ret != VIRTIO_CRYPTO_OK))
803 goto error_exit;
804
805 /* prepare */
806 /* iv */
807 if (unlikely(copy_data(iv_data, vc_req, head, &desc,
808 cipher->para.iv_len, max_n_descs))) {
809 VC_LOG_ERR("Incorrect virtio descriptor");
810 ret = VIRTIO_CRYPTO_BADMSG;
811 goto error_exit;
812 }
813
814 switch (vcrypto->option) {
815 case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
816 m_src->data_len = cipher->para.src_data_len;
817 rte_mbuf_iova_set(m_src,
818 gpa_to_hpa(vcrypto->dev, desc->addr, cipher->para.src_data_len));
819 m_src->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO);
820 if (unlikely(rte_mbuf_iova_get(m_src) == 0 || m_src->buf_addr == NULL)) {
821 VC_LOG_ERR("zero_copy may fail due to cross page data");
822 ret = VIRTIO_CRYPTO_ERR;
823 goto error_exit;
824 }
825
826 if (unlikely(move_desc(head, &desc, cipher->para.src_data_len,
827 max_n_descs) < 0)) {
828 VC_LOG_ERR("Incorrect descriptor");
829 ret = VIRTIO_CRYPTO_ERR;
830 goto error_exit;
831 }
832
833 break;
834 case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
835 vc_req->wb_pool = vcrypto->wb_pool;
836 m_src->data_len = cipher->para.src_data_len;
837 if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
838 vc_req, head, &desc, cipher->para.src_data_len,
839 max_n_descs) < 0)) {
840 VC_LOG_ERR("Incorrect virtio descriptor");
841 ret = VIRTIO_CRYPTO_BADMSG;
842 goto error_exit;
843 }
844 break;
845 default:
846 ret = VIRTIO_CRYPTO_BADMSG;
847 goto error_exit;
848 }
849
850 /* dst */
851 desc = find_write_desc(head, desc, max_n_descs);
852 if (unlikely(!desc)) {
853 VC_LOG_ERR("Cannot find write location");
854 ret = VIRTIO_CRYPTO_BADMSG;
855 goto error_exit;
856 }
857
858 switch (vcrypto->option) {
859 case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
860 rte_mbuf_iova_set(m_dst,
861 gpa_to_hpa(vcrypto->dev, desc->addr, cipher->para.dst_data_len));
862 m_dst->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RW);
863 if (unlikely(rte_mbuf_iova_get(m_dst) == 0 || m_dst->buf_addr == NULL)) {
864 VC_LOG_ERR("zero_copy may fail due to cross page data");
865 ret = VIRTIO_CRYPTO_ERR;
866 goto error_exit;
867 }
868
869 if (unlikely(move_desc(head, &desc, cipher->para.dst_data_len,
870 max_n_descs) < 0)) {
871 VC_LOG_ERR("Incorrect descriptor");
872 ret = VIRTIO_CRYPTO_ERR;
873 goto error_exit;
874 }
875
876 m_dst->data_len = cipher->para.dst_data_len;
877 break;
878 case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
879 vc_req->wb = prepare_write_back_data(vc_req, head, &desc, &ewb,
880 rte_pktmbuf_mtod(m_src, uint8_t *), 0,
881 cipher->para.dst_data_len, max_n_descs);
882 if (unlikely(vc_req->wb == NULL)) {
883 ret = VIRTIO_CRYPTO_ERR;
884 goto error_exit;
885 }
886
887 break;
888 default:
889 ret = VIRTIO_CRYPTO_BADMSG;
890 goto error_exit;
891 }
892
893 /* src data */
894 op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
895 op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
896
897 op->sym->cipher.data.offset = 0;
898 op->sym->cipher.data.length = cipher->para.src_data_len;
899
900 vc_req->inhdr = get_data_ptr(vc_req, desc, VHOST_ACCESS_WO);
901 if (unlikely(vc_req->inhdr == NULL)) {
902 ret = VIRTIO_CRYPTO_BADMSG;
903 goto error_exit;
904 }
905
906 vc_req->inhdr->status = VIRTIO_CRYPTO_OK;
907 vc_req->len = cipher->para.dst_data_len + INHDR_LEN;
908
909 return 0;
910
911 error_exit:
912 if (vc_req->wb)
913 free_wb_data(vc_req->wb, vc_req->wb_pool);
914
915 vc_req->len = INHDR_LEN;
916 return ret;
917 }
918
919 static __rte_always_inline uint8_t
vhost_crypto_check_chain_request(struct virtio_crypto_alg_chain_data_req * req)920 vhost_crypto_check_chain_request(struct virtio_crypto_alg_chain_data_req *req)
921 {
922 if (likely((req->para.iv_len <= VHOST_CRYPTO_MAX_IV_LEN) &&
923 (req->para.src_data_len <= VHOST_CRYPTO_MAX_DATA_SIZE) &&
924 (req->para.dst_data_len >= req->para.src_data_len) &&
925 (req->para.dst_data_len <= VHOST_CRYPTO_MAX_DATA_SIZE) &&
926 (req->para.cipher_start_src_offset <
927 VHOST_CRYPTO_MAX_DATA_SIZE) &&
928 (req->para.len_to_cipher <= VHOST_CRYPTO_MAX_DATA_SIZE) &&
929 (req->para.hash_start_src_offset <
930 VHOST_CRYPTO_MAX_DATA_SIZE) &&
931 (req->para.len_to_hash <= VHOST_CRYPTO_MAX_DATA_SIZE) &&
932 (req->para.cipher_start_src_offset + req->para.len_to_cipher <=
933 req->para.src_data_len) &&
934 (req->para.hash_start_src_offset + req->para.len_to_hash <=
935 req->para.src_data_len) &&
936 (req->para.dst_data_len + req->para.hash_result_len <=
937 VHOST_CRYPTO_MAX_DATA_SIZE)))
938 return VIRTIO_CRYPTO_OK;
939 return VIRTIO_CRYPTO_BADMSG;
940 }
941
942 static __rte_always_inline uint8_t
prepare_sym_chain_op(struct vhost_crypto * vcrypto,struct rte_crypto_op * op,struct vhost_crypto_data_req * vc_req,struct virtio_crypto_alg_chain_data_req * chain,struct vhost_crypto_desc * head,uint32_t max_n_descs)943 prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
944 struct vhost_crypto_data_req *vc_req,
945 struct virtio_crypto_alg_chain_data_req *chain,
946 struct vhost_crypto_desc *head,
947 uint32_t max_n_descs)
948 __rte_shared_locks_required(&vc_req->vq->iotlb_lock)
949 {
950 struct vhost_crypto_desc *desc = head, *digest_desc;
951 struct vhost_crypto_writeback_data *ewb = NULL, *ewb2 = NULL;
952 struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst;
953 uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
954 uint32_t digest_offset;
955 void *digest_addr;
956 uint8_t ret = vhost_crypto_check_chain_request(chain);
957
958 if (unlikely(ret != VIRTIO_CRYPTO_OK))
959 goto error_exit;
960
961 /* prepare */
962 /* iv */
963 if (unlikely(copy_data(iv_data, vc_req, head, &desc,
964 chain->para.iv_len, max_n_descs) < 0)) {
965 VC_LOG_ERR("Incorrect virtio descriptor");
966 ret = VIRTIO_CRYPTO_BADMSG;
967 goto error_exit;
968 }
969
970 switch (vcrypto->option) {
971 case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
972 m_src->data_len = chain->para.src_data_len;
973 m_dst->data_len = chain->para.dst_data_len;
974
975 rte_mbuf_iova_set(m_src,
976 gpa_to_hpa(vcrypto->dev, desc->addr, chain->para.src_data_len));
977 m_src->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO);
978 if (unlikely(rte_mbuf_iova_get(m_src) == 0 || m_src->buf_addr == NULL)) {
979 VC_LOG_ERR("zero_copy may fail due to cross page data");
980 ret = VIRTIO_CRYPTO_ERR;
981 goto error_exit;
982 }
983
984 if (unlikely(move_desc(head, &desc, chain->para.src_data_len,
985 max_n_descs) < 0)) {
986 VC_LOG_ERR("Incorrect descriptor");
987 ret = VIRTIO_CRYPTO_ERR;
988 goto error_exit;
989 }
990 break;
991 case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
992 vc_req->wb_pool = vcrypto->wb_pool;
993 m_src->data_len = chain->para.src_data_len;
994 if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
995 vc_req, head, &desc, chain->para.src_data_len,
996 max_n_descs) < 0)) {
997 VC_LOG_ERR("Incorrect virtio descriptor");
998 ret = VIRTIO_CRYPTO_BADMSG;
999 goto error_exit;
1000 }
1001
1002 break;
1003 default:
1004 ret = VIRTIO_CRYPTO_BADMSG;
1005 goto error_exit;
1006 }
1007
1008 /* dst */
1009 desc = find_write_desc(head, desc, max_n_descs);
1010 if (unlikely(!desc)) {
1011 VC_LOG_ERR("Cannot find write location");
1012 ret = VIRTIO_CRYPTO_BADMSG;
1013 goto error_exit;
1014 }
1015
1016 switch (vcrypto->option) {
1017 case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
1018 rte_mbuf_iova_set(m_dst,
1019 gpa_to_hpa(vcrypto->dev, desc->addr, chain->para.dst_data_len));
1020 m_dst->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RW);
1021 if (unlikely(rte_mbuf_iova_get(m_dst) == 0 || m_dst->buf_addr == NULL)) {
1022 VC_LOG_ERR("zero_copy may fail due to cross page data");
1023 ret = VIRTIO_CRYPTO_ERR;
1024 goto error_exit;
1025 }
1026
1027 if (unlikely(move_desc(vc_req->head, &desc,
1028 chain->para.dst_data_len, max_n_descs) < 0)) {
1029 VC_LOG_ERR("Incorrect descriptor");
1030 ret = VIRTIO_CRYPTO_ERR;
1031 goto error_exit;
1032 }
1033
1034 op->sym->auth.digest.phys_addr = gpa_to_hpa(vcrypto->dev,
1035 desc->addr, chain->para.hash_result_len);
1036 op->sym->auth.digest.data = get_data_ptr(vc_req, desc,
1037 VHOST_ACCESS_RW);
1038 if (unlikely(op->sym->auth.digest.phys_addr == 0)) {
1039 VC_LOG_ERR("zero_copy may fail due to cross page data");
1040 ret = VIRTIO_CRYPTO_ERR;
1041 goto error_exit;
1042 }
1043
1044 if (unlikely(move_desc(head, &desc,
1045 chain->para.hash_result_len,
1046 max_n_descs) < 0)) {
1047 VC_LOG_ERR("Incorrect descriptor");
1048 ret = VIRTIO_CRYPTO_ERR;
1049 goto error_exit;
1050 }
1051
1052 break;
1053 case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
1054 vc_req->wb = prepare_write_back_data(vc_req, head, &desc, &ewb,
1055 rte_pktmbuf_mtod(m_src, uint8_t *),
1056 chain->para.cipher_start_src_offset,
1057 chain->para.dst_data_len -
1058 chain->para.cipher_start_src_offset,
1059 max_n_descs);
1060 if (unlikely(vc_req->wb == NULL)) {
1061 ret = VIRTIO_CRYPTO_ERR;
1062 goto error_exit;
1063 }
1064
1065 digest_desc = desc;
1066 digest_offset = m_src->data_len;
1067 digest_addr = rte_pktmbuf_mtod_offset(m_src, void *,
1068 digest_offset);
1069
1070 /** create a wb_data for digest */
1071 ewb->next = prepare_write_back_data(vc_req, head, &desc,
1072 &ewb2, digest_addr, 0,
1073 chain->para.hash_result_len, max_n_descs);
1074 if (unlikely(ewb->next == NULL)) {
1075 ret = VIRTIO_CRYPTO_ERR;
1076 goto error_exit;
1077 }
1078
1079 if (unlikely(copy_data(digest_addr, vc_req, head, &digest_desc,
1080 chain->para.hash_result_len,
1081 max_n_descs) < 0)) {
1082 VC_LOG_ERR("Incorrect virtio descriptor");
1083 ret = VIRTIO_CRYPTO_BADMSG;
1084 goto error_exit;
1085 }
1086
1087 op->sym->auth.digest.data = digest_addr;
1088 op->sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m_src,
1089 digest_offset);
1090 break;
1091 default:
1092 ret = VIRTIO_CRYPTO_BADMSG;
1093 goto error_exit;
1094 }
1095
1096 /* record inhdr */
1097 vc_req->inhdr = get_data_ptr(vc_req, desc, VHOST_ACCESS_WO);
1098 if (unlikely(vc_req->inhdr == NULL)) {
1099 ret = VIRTIO_CRYPTO_BADMSG;
1100 goto error_exit;
1101 }
1102
1103 vc_req->inhdr->status = VIRTIO_CRYPTO_OK;
1104
1105 op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1106 op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
1107
1108 op->sym->cipher.data.offset = chain->para.cipher_start_src_offset;
1109 op->sym->cipher.data.length = chain->para.src_data_len -
1110 chain->para.cipher_start_src_offset;
1111
1112 op->sym->auth.data.offset = chain->para.hash_start_src_offset;
1113 op->sym->auth.data.length = chain->para.len_to_hash;
1114
1115 vc_req->len = chain->para.dst_data_len + chain->para.hash_result_len +
1116 INHDR_LEN;
1117 return 0;
1118
1119 error_exit:
1120 if (vc_req->wb)
1121 free_wb_data(vc_req->wb, vc_req->wb_pool);
1122 vc_req->len = INHDR_LEN;
1123 return ret;
1124 }
1125
1126 /**
1127 * Process on descriptor
1128 */
1129 static __rte_always_inline int
vhost_crypto_process_one_req(struct vhost_crypto * vcrypto,struct vhost_virtqueue * vq,struct rte_crypto_op * op,struct vring_desc * head,struct vhost_crypto_desc * descs,uint16_t desc_idx)1130 vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
1131 struct vhost_virtqueue *vq, struct rte_crypto_op *op,
1132 struct vring_desc *head, struct vhost_crypto_desc *descs,
1133 uint16_t desc_idx)
1134 __rte_no_thread_safety_analysis /* FIXME: requires iotlb_lock? */
1135 {
1136 struct vhost_crypto_data_req *vc_req = rte_mbuf_to_priv(op->sym->m_src);
1137 struct rte_cryptodev_sym_session *session;
1138 struct virtio_crypto_op_data_req req;
1139 struct virtio_crypto_inhdr *inhdr;
1140 struct vhost_crypto_desc *desc = descs;
1141 struct vring_desc *src_desc;
1142 uint64_t session_id;
1143 uint64_t dlen;
1144 uint32_t nb_descs = 0, max_n_descs, i;
1145 int err;
1146
1147 vc_req->desc_idx = desc_idx;
1148 vc_req->dev = vcrypto->dev;
1149 vc_req->vq = vq;
1150
1151 if (unlikely((head->flags & VRING_DESC_F_INDIRECT) == 0)) {
1152 VC_LOG_ERR("Invalid descriptor");
1153 return -1;
1154 }
1155
1156 dlen = head->len;
1157 src_desc = IOVA_TO_VVA(struct vring_desc *, vc_req, head->addr,
1158 &dlen, VHOST_ACCESS_RO);
1159 if (unlikely(!src_desc || dlen != head->len)) {
1160 VC_LOG_ERR("Invalid descriptor");
1161 return -1;
1162 }
1163 head = src_desc;
1164
1165 nb_descs = max_n_descs = dlen / sizeof(struct vring_desc);
1166 if (unlikely(nb_descs > VHOST_CRYPTO_MAX_N_DESC || nb_descs == 0)) {
1167 err = VIRTIO_CRYPTO_ERR;
1168 VC_LOG_ERR("Cannot process num of descriptors %u", nb_descs);
1169 if (nb_descs > 0) {
1170 struct vring_desc *inhdr_desc = head;
1171 while (inhdr_desc->flags & VRING_DESC_F_NEXT) {
1172 if (inhdr_desc->next >= max_n_descs)
1173 return -1;
1174 inhdr_desc = &head[inhdr_desc->next];
1175 }
1176 if (inhdr_desc->len != sizeof(*inhdr))
1177 return -1;
1178 inhdr = IOVA_TO_VVA(struct virtio_crypto_inhdr *,
1179 vc_req, inhdr_desc->addr, &dlen,
1180 VHOST_ACCESS_WO);
1181 if (unlikely(!inhdr || dlen != inhdr_desc->len))
1182 return -1;
1183 inhdr->status = VIRTIO_CRYPTO_ERR;
1184 return -1;
1185 }
1186 }
1187
1188 /* copy descriptors to local variable */
1189 for (i = 0; i < max_n_descs; i++) {
1190 desc->addr = src_desc->addr;
1191 desc->len = src_desc->len;
1192 desc->flags = src_desc->flags;
1193 desc++;
1194 if (unlikely((src_desc->flags & VRING_DESC_F_NEXT) == 0))
1195 break;
1196 if (unlikely(src_desc->next >= max_n_descs)) {
1197 err = VIRTIO_CRYPTO_BADMSG;
1198 VC_LOG_ERR("Invalid descriptor");
1199 goto error_exit;
1200 }
1201 src_desc = &head[src_desc->next];
1202 }
1203
1204 vc_req->head = head;
1205 vc_req->zero_copy = vcrypto->option;
1206
1207 nb_descs = desc - descs;
1208 desc = descs;
1209
1210 if (unlikely(desc->len < sizeof(req))) {
1211 err = VIRTIO_CRYPTO_BADMSG;
1212 VC_LOG_ERR("Invalid descriptor");
1213 goto error_exit;
1214 }
1215
1216 if (unlikely(copy_data(&req, vc_req, descs, &desc, sizeof(req),
1217 max_n_descs) < 0)) {
1218 err = VIRTIO_CRYPTO_BADMSG;
1219 VC_LOG_ERR("Invalid descriptor");
1220 goto error_exit;
1221 }
1222
1223 /* desc is advanced by 1 now */
1224 max_n_descs -= 1;
1225
1226 switch (req.header.opcode) {
1227 case VIRTIO_CRYPTO_CIPHER_ENCRYPT:
1228 case VIRTIO_CRYPTO_CIPHER_DECRYPT:
1229 session_id = req.header.session_id;
1230
1231 /* one branch to avoid unnecessary table lookup */
1232 if (vcrypto->cache_session_id != session_id) {
1233 err = rte_hash_lookup_data(vcrypto->session_map,
1234 &session_id, (void **)&session);
1235 if (unlikely(err < 0)) {
1236 err = VIRTIO_CRYPTO_ERR;
1237 VC_LOG_ERR("Failed to find session %"PRIu64,
1238 session_id);
1239 goto error_exit;
1240 }
1241
1242 vcrypto->cache_session = session;
1243 vcrypto->cache_session_id = session_id;
1244 }
1245
1246 session = vcrypto->cache_session;
1247
1248 err = rte_crypto_op_attach_sym_session(op, session);
1249 if (unlikely(err < 0)) {
1250 err = VIRTIO_CRYPTO_ERR;
1251 VC_LOG_ERR("Failed to attach session to op");
1252 goto error_exit;
1253 }
1254
1255 switch (req.u.sym_req.op_type) {
1256 case VIRTIO_CRYPTO_SYM_OP_NONE:
1257 err = VIRTIO_CRYPTO_NOTSUPP;
1258 break;
1259 case VIRTIO_CRYPTO_SYM_OP_CIPHER:
1260 err = prepare_sym_cipher_op(vcrypto, op, vc_req,
1261 &req.u.sym_req.u.cipher, desc,
1262 max_n_descs);
1263 break;
1264 case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
1265 err = prepare_sym_chain_op(vcrypto, op, vc_req,
1266 &req.u.sym_req.u.chain, desc,
1267 max_n_descs);
1268 break;
1269 }
1270 if (unlikely(err != 0)) {
1271 VC_LOG_ERR("Failed to process sym request");
1272 goto error_exit;
1273 }
1274 break;
1275 default:
1276 err = VIRTIO_CRYPTO_ERR;
1277 VC_LOG_ERR("Unsupported symmetric crypto request type %u",
1278 req.header.opcode);
1279 goto error_exit;
1280 }
1281
1282 return 0;
1283
1284 error_exit:
1285
1286 inhdr = reach_inhdr(vc_req, descs, max_n_descs);
1287 if (likely(inhdr != NULL))
1288 inhdr->status = (uint8_t)err;
1289
1290 return -1;
1291 }
1292
1293 static __rte_always_inline struct vhost_virtqueue *
vhost_crypto_finalize_one_request(struct rte_crypto_op * op,struct vhost_virtqueue * old_vq)1294 vhost_crypto_finalize_one_request(struct rte_crypto_op *op,
1295 struct vhost_virtqueue *old_vq)
1296 {
1297 struct rte_mbuf *m_src = op->sym->m_src;
1298 struct rte_mbuf *m_dst = op->sym->m_dst;
1299 struct vhost_crypto_data_req *vc_req = rte_mbuf_to_priv(m_src);
1300 struct vhost_virtqueue *vq;
1301 uint16_t used_idx, desc_idx;
1302
1303 if (unlikely(!vc_req)) {
1304 VC_LOG_ERR("Failed to retrieve vc_req");
1305 return NULL;
1306 }
1307 vq = vc_req->vq;
1308 used_idx = vc_req->desc_idx;
1309
1310 if (old_vq && (vq != old_vq))
1311 return vq;
1312
1313 if (unlikely(op->status != RTE_CRYPTO_OP_STATUS_SUCCESS))
1314 vc_req->inhdr->status = VIRTIO_CRYPTO_ERR;
1315 else {
1316 if (vc_req->zero_copy == 0)
1317 write_back_data(vc_req);
1318 }
1319
1320 desc_idx = vq->avail->ring[used_idx];
1321 vq->used->ring[desc_idx].id = vq->avail->ring[desc_idx];
1322 vq->used->ring[desc_idx].len = vc_req->len;
1323
1324 rte_mempool_put(m_src->pool, (void *)m_src);
1325
1326 if (m_dst)
1327 rte_mempool_put(m_dst->pool, (void *)m_dst);
1328
1329 return vc_req->vq;
1330 }
1331
1332 static __rte_always_inline uint16_t
vhost_crypto_complete_one_vm_requests(struct rte_crypto_op ** ops,uint16_t nb_ops,int * callfd)1333 vhost_crypto_complete_one_vm_requests(struct rte_crypto_op **ops,
1334 uint16_t nb_ops, int *callfd)
1335 {
1336 uint16_t processed = 1;
1337 struct vhost_virtqueue *vq, *tmp_vq;
1338
1339 if (unlikely(nb_ops == 0))
1340 return 0;
1341
1342 vq = vhost_crypto_finalize_one_request(ops[0], NULL);
1343 if (unlikely(vq == NULL))
1344 return 0;
1345 tmp_vq = vq;
1346
1347 while ((processed < nb_ops)) {
1348 tmp_vq = vhost_crypto_finalize_one_request(ops[processed],
1349 tmp_vq);
1350
1351 if (unlikely(vq != tmp_vq))
1352 break;
1353
1354 processed++;
1355 }
1356
1357 *callfd = vq->callfd;
1358
1359 *(volatile uint16_t *)&vq->used->idx += processed;
1360
1361 return processed;
1362 }
1363
1364 int
rte_vhost_crypto_driver_start(const char * path)1365 rte_vhost_crypto_driver_start(const char *path)
1366 {
1367 uint64_t protocol_features;
1368 int ret;
1369
1370 ret = rte_vhost_driver_set_features(path, VIRTIO_CRYPTO_FEATURES);
1371 if (ret)
1372 return -1;
1373
1374 ret = rte_vhost_driver_get_protocol_features(path, &protocol_features);
1375 if (ret)
1376 return -1;
1377 protocol_features |= (1ULL << VHOST_USER_PROTOCOL_F_CONFIG);
1378 ret = rte_vhost_driver_set_protocol_features(path, protocol_features);
1379 if (ret)
1380 return -1;
1381
1382 return rte_vhost_driver_start(path);
1383 }
1384
1385 int
rte_vhost_crypto_create(int vid,uint8_t cryptodev_id,struct rte_mempool * sess_pool,int socket_id)1386 rte_vhost_crypto_create(int vid, uint8_t cryptodev_id,
1387 struct rte_mempool *sess_pool,
1388 int socket_id)
1389 {
1390 struct virtio_net *dev = get_device(vid);
1391 struct rte_hash_parameters params = {0};
1392 struct vhost_crypto *vcrypto;
1393 char name[128];
1394 int ret;
1395
1396 if (!dev) {
1397 VC_LOG_ERR("Invalid vid %i", vid);
1398 return -EINVAL;
1399 }
1400
1401 vcrypto = rte_zmalloc_socket(NULL, sizeof(*vcrypto),
1402 RTE_CACHE_LINE_SIZE, socket_id);
1403 if (!vcrypto) {
1404 VC_LOG_ERR("Insufficient memory");
1405 return -ENOMEM;
1406 }
1407
1408 vcrypto->sess_pool = sess_pool;
1409 vcrypto->cid = cryptodev_id;
1410 vcrypto->cache_session_id = UINT64_MAX;
1411 vcrypto->last_session_id = 1;
1412 vcrypto->dev = dev;
1413 vcrypto->option = RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE;
1414
1415 snprintf(name, 127, "HASH_VHOST_CRYPT_%u", (uint32_t)vid);
1416 params.name = name;
1417 params.entries = VHOST_CRYPTO_SESSION_MAP_ENTRIES;
1418 params.hash_func = rte_jhash;
1419 params.key_len = sizeof(uint64_t);
1420 params.socket_id = socket_id;
1421 vcrypto->session_map = rte_hash_create(¶ms);
1422 if (!vcrypto->session_map) {
1423 VC_LOG_ERR("Failed to creath session map");
1424 ret = -ENOMEM;
1425 goto error_exit;
1426 }
1427
1428 snprintf(name, 127, "MBUF_POOL_VM_%u", (uint32_t)vid);
1429 vcrypto->mbuf_pool = rte_pktmbuf_pool_create(name,
1430 VHOST_CRYPTO_MBUF_POOL_SIZE, 512,
1431 sizeof(struct vhost_crypto_data_req),
1432 VHOST_CRYPTO_MAX_DATA_SIZE + RTE_PKTMBUF_HEADROOM,
1433 rte_socket_id());
1434 if (!vcrypto->mbuf_pool) {
1435 VC_LOG_ERR("Failed to creath mbuf pool");
1436 ret = -ENOMEM;
1437 goto error_exit;
1438 }
1439
1440 snprintf(name, 127, "WB_POOL_VM_%u", (uint32_t)vid);
1441 vcrypto->wb_pool = rte_mempool_create(name,
1442 VHOST_CRYPTO_MBUF_POOL_SIZE,
1443 sizeof(struct vhost_crypto_writeback_data),
1444 128, 0, NULL, NULL, NULL, NULL,
1445 rte_socket_id(), 0);
1446 if (!vcrypto->wb_pool) {
1447 VC_LOG_ERR("Failed to creath mempool");
1448 ret = -ENOMEM;
1449 goto error_exit;
1450 }
1451
1452 dev->extern_data = vcrypto;
1453 dev->extern_ops.pre_msg_handle = NULL;
1454 dev->extern_ops.post_msg_handle = vhost_crypto_msg_post_handler;
1455
1456 return 0;
1457
1458 error_exit:
1459 rte_hash_free(vcrypto->session_map);
1460 rte_mempool_free(vcrypto->mbuf_pool);
1461
1462 rte_free(vcrypto);
1463
1464 return ret;
1465 }
1466
1467 int
rte_vhost_crypto_free(int vid)1468 rte_vhost_crypto_free(int vid)
1469 {
1470 struct virtio_net *dev = get_device(vid);
1471 struct vhost_crypto *vcrypto;
1472
1473 if (unlikely(dev == NULL)) {
1474 VC_LOG_ERR("Invalid vid %i", vid);
1475 return -EINVAL;
1476 }
1477
1478 vcrypto = dev->extern_data;
1479 if (unlikely(vcrypto == NULL)) {
1480 VC_LOG_ERR("Cannot find required data, is it initialized?");
1481 return -ENOENT;
1482 }
1483
1484 rte_hash_free(vcrypto->session_map);
1485 rte_mempool_free(vcrypto->mbuf_pool);
1486 rte_mempool_free(vcrypto->wb_pool);
1487 rte_free(vcrypto);
1488
1489 dev->extern_data = NULL;
1490 dev->extern_ops.pre_msg_handle = NULL;
1491 dev->extern_ops.post_msg_handle = NULL;
1492
1493 return 0;
1494 }
1495
1496 int
rte_vhost_crypto_set_zero_copy(int vid,enum rte_vhost_crypto_zero_copy option)1497 rte_vhost_crypto_set_zero_copy(int vid, enum rte_vhost_crypto_zero_copy option)
1498 {
1499 struct virtio_net *dev = get_device(vid);
1500 struct vhost_crypto *vcrypto;
1501
1502 if (unlikely(dev == NULL)) {
1503 VC_LOG_ERR("Invalid vid %i", vid);
1504 return -EINVAL;
1505 }
1506
1507 if (unlikely((uint32_t)option >=
1508 RTE_VHOST_CRYPTO_MAX_ZERO_COPY_OPTIONS)) {
1509 VC_LOG_ERR("Invalid option %i", option);
1510 return -EINVAL;
1511 }
1512
1513 vcrypto = (struct vhost_crypto *)dev->extern_data;
1514 if (unlikely(vcrypto == NULL)) {
1515 VC_LOG_ERR("Cannot find required data, is it initialized?");
1516 return -ENOENT;
1517 }
1518
1519 if (vcrypto->option == (uint8_t)option)
1520 return 0;
1521
1522 if (!(rte_mempool_full(vcrypto->mbuf_pool)) ||
1523 !(rte_mempool_full(vcrypto->wb_pool))) {
1524 VC_LOG_ERR("Cannot update zero copy as mempool is not full");
1525 return -EINVAL;
1526 }
1527
1528 if (option == RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE) {
1529 char name[128];
1530
1531 snprintf(name, 127, "WB_POOL_VM_%u", (uint32_t)vid);
1532 vcrypto->wb_pool = rte_mempool_create(name,
1533 VHOST_CRYPTO_MBUF_POOL_SIZE,
1534 sizeof(struct vhost_crypto_writeback_data),
1535 128, 0, NULL, NULL, NULL, NULL,
1536 rte_socket_id(), 0);
1537 if (!vcrypto->wb_pool) {
1538 VC_LOG_ERR("Failed to creath mbuf pool");
1539 return -ENOMEM;
1540 }
1541 } else {
1542 rte_mempool_free(vcrypto->wb_pool);
1543 vcrypto->wb_pool = NULL;
1544 }
1545
1546 vcrypto->option = (uint8_t)option;
1547
1548 return 0;
1549 }
1550
1551 uint16_t
rte_vhost_crypto_fetch_requests(int vid,uint32_t qid,struct rte_crypto_op ** ops,uint16_t nb_ops)1552 rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,
1553 struct rte_crypto_op **ops, uint16_t nb_ops)
1554 {
1555 struct rte_mbuf *mbufs[VHOST_CRYPTO_MAX_BURST_SIZE * 2];
1556 struct vhost_crypto_desc descs[VHOST_CRYPTO_MAX_N_DESC];
1557 struct virtio_net *dev = get_device(vid);
1558 struct vhost_crypto *vcrypto;
1559 struct vhost_virtqueue *vq;
1560 uint16_t avail_idx;
1561 uint16_t start_idx;
1562 uint16_t count;
1563 uint16_t i = 0;
1564
1565 if (unlikely(dev == NULL)) {
1566 VC_LOG_ERR("Invalid vid %i", vid);
1567 return 0;
1568 }
1569
1570 if (unlikely(qid >= VHOST_MAX_QUEUE_PAIRS)) {
1571 VC_LOG_ERR("Invalid qid %u", qid);
1572 return 0;
1573 }
1574
1575 vcrypto = (struct vhost_crypto *)dev->extern_data;
1576 if (unlikely(vcrypto == NULL)) {
1577 VC_LOG_ERR("Cannot find required data, is it initialized?");
1578 return 0;
1579 }
1580
1581 vq = dev->virtqueue[qid];
1582
1583 avail_idx = *((volatile uint16_t *)&vq->avail->idx);
1584 start_idx = vq->last_used_idx;
1585 count = avail_idx - start_idx;
1586 count = RTE_MIN(count, VHOST_CRYPTO_MAX_BURST_SIZE);
1587 count = RTE_MIN(count, nb_ops);
1588
1589 if (unlikely(count == 0))
1590 return 0;
1591
1592 /* for zero copy, we need 2 empty mbufs for src and dst, otherwise
1593 * we need only 1 mbuf as src and dst
1594 */
1595 switch (vcrypto->option) {
1596 case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
1597 if (unlikely(rte_mempool_get_bulk(vcrypto->mbuf_pool,
1598 (void **)mbufs, count * 2) < 0)) {
1599 VC_LOG_ERR("Insufficient memory");
1600 return 0;
1601 }
1602
1603 for (i = 0; i < count; i++) {
1604 uint16_t used_idx = (start_idx + i) & (vq->size - 1);
1605 uint16_t desc_idx = vq->avail->ring[used_idx];
1606 struct vring_desc *head = &vq->desc[desc_idx];
1607 struct rte_crypto_op *op = ops[i];
1608
1609 op->sym->m_src = mbufs[i * 2];
1610 op->sym->m_dst = mbufs[i * 2 + 1];
1611 op->sym->m_src->data_off = 0;
1612 op->sym->m_dst->data_off = 0;
1613
1614 if (unlikely(vhost_crypto_process_one_req(vcrypto, vq,
1615 op, head, descs, used_idx) < 0))
1616 break;
1617 }
1618
1619 if (unlikely(i < count))
1620 rte_mempool_put_bulk(vcrypto->mbuf_pool,
1621 (void **)&mbufs[i * 2],
1622 (count - i) * 2);
1623
1624 break;
1625
1626 case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
1627 if (unlikely(rte_mempool_get_bulk(vcrypto->mbuf_pool,
1628 (void **)mbufs, count) < 0)) {
1629 VC_LOG_ERR("Insufficient memory");
1630 return 0;
1631 }
1632
1633 for (i = 0; i < count; i++) {
1634 uint16_t used_idx = (start_idx + i) & (vq->size - 1);
1635 uint16_t desc_idx = vq->avail->ring[used_idx];
1636 struct vring_desc *head = &vq->desc[desc_idx];
1637 struct rte_crypto_op *op = ops[i];
1638
1639 op->sym->m_src = mbufs[i];
1640 op->sym->m_dst = NULL;
1641 op->sym->m_src->data_off = 0;
1642
1643 if (unlikely(vhost_crypto_process_one_req(vcrypto, vq,
1644 op, head, descs, desc_idx) < 0))
1645 break;
1646 }
1647
1648 if (unlikely(i < count))
1649 rte_mempool_put_bulk(vcrypto->mbuf_pool,
1650 (void **)&mbufs[i],
1651 count - i);
1652
1653 break;
1654
1655 }
1656
1657 vq->last_used_idx += i;
1658
1659 return i;
1660 }
1661
1662 uint16_t
rte_vhost_crypto_finalize_requests(struct rte_crypto_op ** ops,uint16_t nb_ops,int * callfds,uint16_t * nb_callfds)1663 rte_vhost_crypto_finalize_requests(struct rte_crypto_op **ops,
1664 uint16_t nb_ops, int *callfds, uint16_t *nb_callfds)
1665 {
1666 struct rte_crypto_op **tmp_ops = ops;
1667 uint16_t count = 0, left = nb_ops;
1668 int callfd;
1669 uint16_t idx = 0;
1670
1671 while (left) {
1672 count = vhost_crypto_complete_one_vm_requests(tmp_ops, left,
1673 &callfd);
1674 if (unlikely(count == 0))
1675 break;
1676
1677 tmp_ops = &tmp_ops[count];
1678 left -= count;
1679
1680 callfds[idx++] = callfd;
1681
1682 if (unlikely(idx >= VIRTIO_CRYPTO_MAX_NUM_BURST_VQS)) {
1683 VC_LOG_ERR("Too many vqs");
1684 break;
1685 }
1686 }
1687
1688 *nb_callfds = idx;
1689
1690 return nb_ops - left;
1691 }
1692