xref: /dpdk/drivers/crypto/ionic/ionic_crypto_main.c (revision 80518852e2f52bbe27b5763fb05031caf8da4788)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2021-2024 Advanced Micro Devices, Inc.
3  */
4 
5 #include <inttypes.h>
6 
7 #include <rte_common.h>
8 #include <rte_malloc.h>
9 #include <rte_bitops.h>
10 
11 #include "ionic_crypto.h"
12 
13 static int
14 iocpt_cq_init(struct iocpt_cq *cq, uint16_t num_descs)
15 {
16 	if (!rte_is_power_of_2(num_descs) ||
17 	    num_descs < IOCPT_MIN_RING_DESC ||
18 	    num_descs > IOCPT_MAX_RING_DESC) {
19 		IOCPT_PRINT(ERR, "%u descriptors (min: %u max: %u)",
20 			num_descs, IOCPT_MIN_RING_DESC, IOCPT_MAX_RING_DESC);
21 		return -EINVAL;
22 	}
23 
24 	cq->num_descs = num_descs;
25 	cq->size_mask = num_descs - 1;
26 	cq->tail_idx = 0;
27 	cq->done_color = 1;
28 
29 	return 0;
30 }
31 
32 static void
33 iocpt_cq_reset(struct iocpt_cq *cq)
34 {
35 	cq->tail_idx = 0;
36 	cq->done_color = 1;
37 
38 	memset(cq->base, 0, sizeof(struct iocpt_nop_comp) * cq->num_descs);
39 }
40 
41 static void
42 iocpt_cq_map(struct iocpt_cq *cq, void *base, rte_iova_t base_pa)
43 {
44 	cq->base = base;
45 	cq->base_pa = base_pa;
46 }
47 
48 uint32_t
49 iocpt_cq_service(struct iocpt_cq *cq, uint32_t work_to_do,
50 		iocpt_cq_cb cb, void *cb_arg)
51 {
52 	uint32_t work_done = 0;
53 
54 	if (work_to_do == 0)
55 		return 0;
56 
57 	while (cb(cq, cq->tail_idx, cb_arg)) {
58 		cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1);
59 		if (cq->tail_idx == 0)
60 			cq->done_color = !cq->done_color;
61 
62 		if (++work_done == work_to_do)
63 			break;
64 	}
65 
66 	return work_done;
67 }
68 
69 static int
70 iocpt_q_init(struct iocpt_queue *q, uint8_t type, uint32_t index,
71 	uint16_t num_descs, uint16_t num_segs, uint32_t socket_id)
72 {
73 	uint32_t ring_size;
74 
75 	if (!rte_is_power_of_2(num_descs))
76 		return -EINVAL;
77 
78 	ring_size = rte_log2_u32(num_descs);
79 	if (ring_size < 2 || ring_size > 16)
80 		return -EINVAL;
81 
82 	q->type = type;
83 	q->index = index;
84 	q->num_descs = num_descs;
85 	q->num_segs = num_segs;
86 	q->size_mask = num_descs - 1;
87 	q->head_idx = 0;
88 	q->tail_idx = 0;
89 
90 	q->info = rte_calloc_socket("iocpt",
91 				num_descs * num_segs, sizeof(void *),
92 				rte_mem_page_size(), socket_id);
93 	if (q->info == NULL) {
94 		IOCPT_PRINT(ERR, "Cannot allocate queue info");
95 		return -ENOMEM;
96 	}
97 
98 	return 0;
99 }
100 
101 static void
102 iocpt_q_reset(struct iocpt_queue *q)
103 {
104 	q->head_idx = 0;
105 	q->tail_idx = 0;
106 }
107 
108 static void
109 iocpt_q_map(struct iocpt_queue *q, void *base, rte_iova_t base_pa)
110 {
111 	q->base = base;
112 	q->base_pa = base_pa;
113 }
114 
115 static void
116 iocpt_q_sg_map(struct iocpt_queue *q, void *base, rte_iova_t base_pa)
117 {
118 	q->sg_base = base;
119 	q->sg_base_pa = base_pa;
120 }
121 
122 static void
123 iocpt_q_free(struct iocpt_queue *q)
124 {
125 	if (q->info != NULL) {
126 		rte_free(q->info);
127 		q->info = NULL;
128 	}
129 }
130 
131 static int
132 iocpt_session_write(struct iocpt_session_priv *priv,
133 		enum iocpt_sess_control_oper oper)
134 {
135 	struct iocpt_dev *dev = priv->dev;
136 	struct iocpt_admin_ctx ctx = {
137 		.pending_work = true,
138 		.cmd.sess_control = {
139 			.opcode = IOCPT_CMD_SESS_CONTROL,
140 			.type = priv->type,
141 			.oper = oper,
142 			.index = rte_cpu_to_le_32(priv->index),
143 			.key_len = rte_cpu_to_le_16(priv->key_len),
144 			.key_seg_len = (uint8_t)RTE_MIN(priv->key_len,
145 						IOCPT_SESS_KEY_SEG_LEN),
146 		},
147 	};
148 	struct iocpt_sess_control_cmd *cmd = &ctx.cmd.sess_control;
149 	uint16_t key_offset;
150 	uint8_t key_segs, seg;
151 	int err;
152 
153 	key_segs = ((priv->key_len - 1) >> IOCPT_SESS_KEY_SEG_SHFT) + 1;
154 
155 	for (seg = 0; seg < key_segs; seg++) {
156 		ctx.pending_work = true;
157 
158 		key_offset = seg * cmd->key_seg_len;
159 		memcpy(cmd->key, &priv->key[key_offset],
160 			IOCPT_SESS_KEY_SEG_LEN);
161 		cmd->key_seg_idx = seg;
162 
163 		/* Mark final segment */
164 		if (seg + 1 == key_segs)
165 			cmd->flags |= rte_cpu_to_le_16(IOCPT_SCTL_F_END);
166 
167 		err = iocpt_adminq_post_wait(dev, &ctx);
168 		if (err != 0)
169 			return err;
170 	}
171 
172 	return 0;
173 }
174 
175 int
176 iocpt_session_init(struct iocpt_session_priv *priv)
177 {
178 	struct iocpt_dev *dev = priv->dev;
179 	uint64_t bm_slab = 0;
180 	uint32_t bm_pos = 0;
181 	int err = 0;
182 
183 	rte_spinlock_lock(&dev->adminq_lock);
184 
185 	if (rte_bitmap_scan(dev->sess_bm, &bm_pos, &bm_slab) > 0) {
186 		priv->index = bm_pos + rte_ctz64(bm_slab);
187 		rte_bitmap_clear(dev->sess_bm, priv->index);
188 	} else
189 		err = -ENOSPC;
190 
191 	rte_spinlock_unlock(&dev->adminq_lock);
192 
193 	if (err != 0) {
194 		IOCPT_PRINT(ERR, "session index space exhausted");
195 		return err;
196 	}
197 
198 	err = iocpt_session_write(priv, IOCPT_SESS_INIT);
199 	if (err != 0) {
200 		rte_spinlock_lock(&dev->adminq_lock);
201 		rte_bitmap_set(dev->sess_bm, priv->index);
202 		rte_spinlock_unlock(&dev->adminq_lock);
203 		return err;
204 	}
205 
206 	priv->flags |= IOCPT_S_F_INITED;
207 
208 	return 0;
209 }
210 
211 int
212 iocpt_session_update(struct iocpt_session_priv *priv)
213 {
214 	return iocpt_session_write(priv, IOCPT_SESS_UPDATE_KEY);
215 }
216 
217 void
218 iocpt_session_deinit(struct iocpt_session_priv *priv)
219 {
220 	struct iocpt_dev *dev = priv->dev;
221 	struct iocpt_admin_ctx ctx = {
222 		.pending_work = true,
223 		.cmd.sess_control = {
224 			.opcode = IOCPT_CMD_SESS_CONTROL,
225 			.type = priv->type,
226 			.oper = IOCPT_SESS_DISABLE,
227 			.index = rte_cpu_to_le_32(priv->index),
228 			.key_len = rte_cpu_to_le_16(priv->key_len),
229 		},
230 	};
231 
232 	(void)iocpt_adminq_post_wait(dev, &ctx);
233 
234 	rte_spinlock_lock(&dev->adminq_lock);
235 	rte_bitmap_set(dev->sess_bm, priv->index);
236 	rte_spinlock_unlock(&dev->adminq_lock);
237 
238 	priv->flags &= ~IOCPT_S_F_INITED;
239 }
240 
241 static const struct rte_memzone *
242 iocpt_dma_zone_reserve(const char *type_name, uint16_t qid, size_t size,
243 			unsigned int align, int socket_id)
244 {
245 	char zone_name[RTE_MEMZONE_NAMESIZE];
246 	const struct rte_memzone *mz;
247 	int err;
248 
249 	err = snprintf(zone_name, sizeof(zone_name),
250 			"iocpt_%s_%u", type_name, qid);
251 	if (err >= RTE_MEMZONE_NAMESIZE) {
252 		IOCPT_PRINT(ERR, "Name %s too long", type_name);
253 		return NULL;
254 	}
255 
256 	mz = rte_memzone_lookup(zone_name);
257 	if (mz != NULL)
258 		return mz;
259 
260 	return rte_memzone_reserve_aligned(zone_name, size, socket_id,
261 			RTE_MEMZONE_IOVA_CONTIG, align);
262 }
263 
264 static int
265 iocpt_commonq_alloc(struct iocpt_dev *dev,
266 		uint8_t type,
267 		size_t struct_size,
268 		uint32_t socket_id,
269 		uint32_t index,
270 		const char *type_name,
271 		uint16_t flags,
272 		uint16_t num_descs,
273 		uint16_t num_segs,
274 		uint16_t desc_size,
275 		uint16_t cq_desc_size,
276 		uint16_t sg_desc_size,
277 		struct iocpt_common_q **comq)
278 {
279 	struct iocpt_common_q *new;
280 	uint32_t q_size, cq_size, sg_size, total_size;
281 	void *q_base, *cq_base, *sg_base;
282 	rte_iova_t q_base_pa = 0;
283 	rte_iova_t cq_base_pa = 0;
284 	rte_iova_t sg_base_pa = 0;
285 	size_t page_size = rte_mem_page_size();
286 	int err;
287 
288 	*comq = NULL;
289 
290 	q_size	= num_descs * desc_size;
291 	cq_size = num_descs * cq_desc_size;
292 	sg_size = num_descs * sg_desc_size;
293 
294 	/*
295 	 * Note: aligning q_size/cq_size is not enough due to cq_base address
296 	 * aligning as q_base could be not aligned to the page.
297 	 * Adding page_size.
298 	 */
299 	total_size = RTE_ALIGN(q_size, page_size) +
300 		RTE_ALIGN(cq_size, page_size) + page_size;
301 	if (flags & IOCPT_Q_F_SG)
302 		total_size += RTE_ALIGN(sg_size, page_size) + page_size;
303 
304 	new = rte_zmalloc_socket("iocpt", struct_size,
305 			RTE_CACHE_LINE_SIZE, socket_id);
306 	if (new == NULL) {
307 		IOCPT_PRINT(ERR, "Cannot allocate queue structure");
308 		return -ENOMEM;
309 	}
310 
311 	new->dev = dev;
312 
313 	err = iocpt_q_init(&new->q, type, index, num_descs, num_segs,
314 			socket_id);
315 	if (err != 0) {
316 		IOCPT_PRINT(ERR, "Queue initialization failed");
317 		goto err_free_q;
318 	}
319 
320 	err = iocpt_cq_init(&new->cq, num_descs);
321 	if (err != 0) {
322 		IOCPT_PRINT(ERR, "Completion queue initialization failed");
323 		goto err_deinit_q;
324 	}
325 
326 	new->base_z = iocpt_dma_zone_reserve(type_name, index, total_size,
327 					IONIC_ALIGN, socket_id);
328 	if (new->base_z == NULL) {
329 		IOCPT_PRINT(ERR, "Cannot reserve queue DMA memory");
330 		err = -ENOMEM;
331 		goto err_deinit_cq;
332 	}
333 
334 	new->base = new->base_z->addr;
335 	new->base_pa = new->base_z->iova;
336 
337 	q_base = new->base;
338 	q_base_pa = new->base_pa;
339 	iocpt_q_map(&new->q, q_base, q_base_pa);
340 
341 	cq_base = (void *)RTE_ALIGN((uintptr_t)q_base + q_size, page_size);
342 	cq_base_pa = RTE_ALIGN(q_base_pa + q_size, page_size);
343 	iocpt_cq_map(&new->cq, cq_base, cq_base_pa);
344 
345 	if (flags & IOCPT_Q_F_SG) {
346 		sg_base = (void *)RTE_ALIGN((uintptr_t)cq_base + cq_size,
347 			page_size);
348 		sg_base_pa = RTE_ALIGN(cq_base_pa + cq_size, page_size);
349 		iocpt_q_sg_map(&new->q, sg_base, sg_base_pa);
350 	}
351 
352 	IOCPT_PRINT(DEBUG, "q_base_pa %#jx cq_base_pa %#jx sg_base_pa %#jx",
353 		q_base_pa, cq_base_pa, sg_base_pa);
354 
355 	*comq = new;
356 
357 	return 0;
358 
359 err_deinit_cq:
360 err_deinit_q:
361 	iocpt_q_free(&new->q);
362 err_free_q:
363 	rte_free(new);
364 	return err;
365 }
366 
367 int
368 iocpt_cryptoq_alloc(struct iocpt_dev *dev, uint32_t socket_id, uint32_t index,
369 		uint16_t num_descs)
370 {
371 	struct iocpt_crypto_q *cptq;
372 	uint16_t flags = 0;
373 	int err;
374 
375 	/* CryptoQ always supports scatter-gather */
376 	flags |= IOCPT_Q_F_SG;
377 
378 	IOCPT_PRINT(DEBUG, "cptq %u num_descs %u num_segs %u",
379 		index, num_descs, 1);
380 
381 	err = iocpt_commonq_alloc(dev,
382 		IOCPT_QTYPE_CRYPTOQ,
383 		sizeof(struct iocpt_crypto_q),
384 		socket_id,
385 		index,
386 		"crypto",
387 		flags,
388 		num_descs,
389 		1,
390 		sizeof(struct iocpt_crypto_desc),
391 		sizeof(struct iocpt_crypto_comp),
392 		sizeof(struct iocpt_crypto_sg_desc),
393 		(struct iocpt_common_q **)&cptq);
394 	if (err != 0)
395 		return err;
396 
397 	cptq->flags = flags;
398 
399 	dev->cryptoqs[index] = cptq;
400 
401 	return 0;
402 }
403 
404 struct ionic_doorbell *
405 iocpt_db_map(struct iocpt_dev *dev, struct iocpt_queue *q)
406 {
407 	return dev->db_pages + q->hw_type;
408 }
409 
410 static int
411 iocpt_cryptoq_init(struct iocpt_crypto_q *cptq)
412 {
413 	struct iocpt_queue *q = &cptq->q;
414 	struct iocpt_dev *dev = cptq->dev;
415 	struct iocpt_cq *cq = &cptq->cq;
416 	struct iocpt_admin_ctx ctx = {
417 		.pending_work = true,
418 		.cmd.q_init = {
419 			.opcode = IOCPT_CMD_Q_INIT,
420 			.type = IOCPT_QTYPE_CRYPTOQ,
421 			.ver = dev->qtype_info[IOCPT_QTYPE_CRYPTOQ].version,
422 			.index = rte_cpu_to_le_32(q->index),
423 			.flags = rte_cpu_to_le_16(IOCPT_QINIT_F_ENA |
424 						IOCPT_QINIT_F_SG),
425 			.intr_index = rte_cpu_to_le_16(IONIC_INTR_NONE),
426 			.ring_size = rte_log2_u32(q->num_descs),
427 			.ring_base = rte_cpu_to_le_64(q->base_pa),
428 			.cq_ring_base = rte_cpu_to_le_64(cq->base_pa),
429 			.sg_ring_base = rte_cpu_to_le_64(q->sg_base_pa),
430 		},
431 	};
432 	int err;
433 
434 	IOCPT_PRINT(DEBUG, "cptq_init.index %d", q->index);
435 	IOCPT_PRINT(DEBUG, "cptq_init.ring_base %#jx", q->base_pa);
436 	IOCPT_PRINT(DEBUG, "cptq_init.ring_size %d",
437 		ctx.cmd.q_init.ring_size);
438 	IOCPT_PRINT(DEBUG, "cptq_init.ver %u", ctx.cmd.q_init.ver);
439 
440 	iocpt_q_reset(q);
441 	iocpt_cq_reset(cq);
442 
443 	err = iocpt_adminq_post_wait(dev, &ctx);
444 	if (err != 0)
445 		return err;
446 
447 	q->hw_type = ctx.comp.q_init.hw_type;
448 	q->hw_index = rte_le_to_cpu_32(ctx.comp.q_init.hw_index);
449 	q->db = iocpt_db_map(dev, q);
450 
451 	IOCPT_PRINT(DEBUG, "cptq->hw_type %d", q->hw_type);
452 	IOCPT_PRINT(DEBUG, "cptq->hw_index %d", q->hw_index);
453 	IOCPT_PRINT(DEBUG, "cptq->db %p", q->db);
454 
455 	cptq->flags |= IOCPT_Q_F_INITED;
456 
457 	return 0;
458 }
459 
460 static void
461 iocpt_cryptoq_deinit(struct iocpt_crypto_q *cptq)
462 {
463 	struct iocpt_dev *dev = cptq->dev;
464 	struct iocpt_admin_ctx ctx = {
465 		.pending_work = true,
466 		.cmd.q_control = {
467 			.opcode = IOCPT_CMD_Q_CONTROL,
468 			.type = IOCPT_QTYPE_CRYPTOQ,
469 			.index = rte_cpu_to_le_32(cptq->q.index),
470 			.oper = IOCPT_Q_DISABLE,
471 		},
472 	};
473 	unsigned long sleep_usec = 100UL * 1000;
474 	uint32_t sleep_cnt, sleep_max = IOCPT_CRYPTOQ_WAIT;
475 	int err;
476 
477 	for (sleep_cnt = 0; sleep_cnt < sleep_max; sleep_cnt++) {
478 		ctx.pending_work = true;
479 
480 		err = iocpt_adminq_post_wait(dev, &ctx);
481 		if (err != -EAGAIN)
482 			break;
483 
484 		rte_delay_us_block(sleep_usec);
485 	}
486 
487 	if (err != 0)
488 		IOCPT_PRINT(ERR, "Deinit queue %u returned %d after %u ms",
489 			cptq->q.index, err, sleep_cnt * 100);
490 	else
491 		IOCPT_PRINT(DEBUG, "Deinit queue %u returned %d after %u ms",
492 			cptq->q.index, err, sleep_cnt * 100);
493 
494 	cptq->flags &= ~IOCPT_Q_F_INITED;
495 }
496 
497 void
498 iocpt_cryptoq_free(struct iocpt_crypto_q *cptq)
499 {
500 	if (cptq == NULL)
501 		return;
502 
503 	if (cptq->base_z != NULL) {
504 		rte_memzone_free(cptq->base_z);
505 		cptq->base_z = NULL;
506 		cptq->base = NULL;
507 		cptq->base_pa = 0;
508 	}
509 
510 	iocpt_q_free(&cptq->q);
511 
512 	rte_free(cptq);
513 }
514 
515 static int
516 iocpt_adminq_alloc(struct iocpt_dev *dev)
517 {
518 	struct iocpt_admin_q *aq;
519 	uint16_t num_descs = IOCPT_ADMINQ_LENGTH;
520 	uint16_t flags = 0;
521 	int err;
522 
523 	err = iocpt_commonq_alloc(dev,
524 		IOCPT_QTYPE_ADMINQ,
525 		sizeof(struct iocpt_admin_q),
526 		rte_socket_id(),
527 		0,
528 		"admin",
529 		flags,
530 		num_descs,
531 		1,
532 		sizeof(struct iocpt_admin_cmd),
533 		sizeof(struct iocpt_admin_comp),
534 		0,
535 		(struct iocpt_common_q **)&aq);
536 	if (err != 0)
537 		return err;
538 
539 	aq->flags = flags;
540 
541 	dev->adminq = aq;
542 
543 	return 0;
544 }
545 
546 static int
547 iocpt_adminq_init(struct iocpt_dev *dev)
548 {
549 	return iocpt_dev_adminq_init(dev);
550 }
551 
552 static void
553 iocpt_adminq_deinit(struct iocpt_dev *dev)
554 {
555 	dev->adminq->flags &= ~IOCPT_Q_F_INITED;
556 }
557 
558 static void
559 iocpt_adminq_free(struct iocpt_admin_q *aq)
560 {
561 	if (aq->base_z != NULL) {
562 		rte_memzone_free(aq->base_z);
563 		aq->base_z = NULL;
564 		aq->base = NULL;
565 		aq->base_pa = 0;
566 	}
567 
568 	iocpt_q_free(&aq->q);
569 
570 	rte_free(aq);
571 }
572 
573 static int
574 iocpt_alloc_objs(struct iocpt_dev *dev)
575 {
576 	uint32_t bmsize, i;
577 	uint8_t *bm;
578 	int err;
579 
580 	IOCPT_PRINT(DEBUG, "Crypto: %s", dev->name);
581 
582 	dev->cryptoqs = rte_calloc_socket("iocpt",
583 				dev->max_qps, sizeof(*dev->cryptoqs),
584 				RTE_CACHE_LINE_SIZE, dev->socket_id);
585 	if (dev->cryptoqs == NULL) {
586 		IOCPT_PRINT(ERR, "Cannot allocate tx queues array");
587 		return -ENOMEM;
588 	}
589 
590 	rte_spinlock_init(&dev->adminq_lock);
591 	rte_spinlock_init(&dev->adminq_service_lock);
592 
593 	err = iocpt_adminq_alloc(dev);
594 	if (err != 0) {
595 		IOCPT_PRINT(ERR, "Cannot allocate admin queue");
596 		err = -ENOMEM;
597 		goto err_free_cryptoqs;
598 	}
599 
600 	dev->info_sz = RTE_ALIGN(sizeof(*dev->info), rte_mem_page_size());
601 	dev->info_z = iocpt_dma_zone_reserve("info", 0, dev->info_sz,
602 					IONIC_ALIGN, dev->socket_id);
603 	if (dev->info_z == NULL) {
604 		IOCPT_PRINT(ERR, "Cannot allocate dev info memory");
605 		err = -ENOMEM;
606 		goto err_free_adminq;
607 	}
608 
609 	dev->info = dev->info_z->addr;
610 	dev->info_pa = dev->info_z->iova;
611 
612 	bmsize = rte_bitmap_get_memory_footprint(dev->max_sessions);
613 	bm = rte_malloc_socket("iocpt", bmsize,
614 			RTE_CACHE_LINE_SIZE, dev->socket_id);
615 	if (bm == NULL) {
616 		IOCPT_PRINT(ERR, "Cannot allocate %uB bitmap memory", bmsize);
617 		err = -ENOMEM;
618 		goto err_free_dmazone;
619 	}
620 
621 	dev->sess_bm = rte_bitmap_init(dev->max_sessions, bm, bmsize);
622 	if (dev->sess_bm == NULL) {
623 		IOCPT_PRINT(ERR, "Cannot initialize bitmap");
624 		err = -EFAULT;
625 		goto err_free_bm;
626 	}
627 	for (i = 0; i < dev->max_sessions; i++)
628 		rte_bitmap_set(dev->sess_bm, i);
629 
630 	return 0;
631 
632 err_free_bm:
633 	rte_free(bm);
634 err_free_dmazone:
635 	rte_memzone_free(dev->info_z);
636 	dev->info_z = NULL;
637 	dev->info = NULL;
638 	dev->info_pa = 0;
639 err_free_adminq:
640 	iocpt_adminq_free(dev->adminq);
641 	dev->adminq = NULL;
642 err_free_cryptoqs:
643 	rte_free(dev->cryptoqs);
644 	dev->cryptoqs = NULL;
645 	return err;
646 }
647 
648 static int
649 iocpt_init(struct iocpt_dev *dev)
650 {
651 	int err;
652 
653 	/* Uses dev_cmds */
654 	err = iocpt_dev_init(dev, dev->info_pa);
655 	if (err != 0)
656 		return err;
657 
658 	err = iocpt_adminq_init(dev);
659 	if (err != 0)
660 		return err;
661 
662 	dev->state |= IOCPT_DEV_F_INITED;
663 
664 	return 0;
665 }
666 
667 void
668 iocpt_configure(struct iocpt_dev *dev)
669 {
670 	RTE_SET_USED(dev);
671 }
672 
673 int
674 iocpt_start(struct iocpt_dev *dev)
675 {
676 	uint32_t i;
677 	int err;
678 
679 	IOCPT_PRINT(DEBUG, "Starting %u queues",
680 		dev->crypto_dev->data->nb_queue_pairs);
681 
682 	for (i = 0; i < dev->crypto_dev->data->nb_queue_pairs; i++) {
683 		err = iocpt_cryptoq_init(dev->cryptoqs[i]);
684 		if (err != 0)
685 			return err;
686 	}
687 
688 	dev->state |= IOCPT_DEV_F_UP;
689 
690 	return 0;
691 }
692 
693 void
694 iocpt_stop(struct iocpt_dev *dev)
695 {
696 	uint32_t i;
697 
698 	IOCPT_PRINT_CALL();
699 
700 	dev->state &= ~IOCPT_DEV_F_UP;
701 
702 	for (i = 0; i < dev->crypto_dev->data->nb_queue_pairs; i++) {
703 		struct iocpt_crypto_q *cptq = dev->cryptoqs[i];
704 
705 		if (cptq->flags & IOCPT_Q_F_INITED)
706 			(void)iocpt_cryptoq_deinit(cptq);
707 	}
708 }
709 
710 void
711 iocpt_deinit(struct iocpt_dev *dev)
712 {
713 	IOCPT_PRINT_CALL();
714 
715 	if (!(dev->state & IOCPT_DEV_F_INITED))
716 		return;
717 
718 	iocpt_adminq_deinit(dev);
719 
720 	dev->state &= ~IOCPT_DEV_F_INITED;
721 }
722 
723 static void
724 iocpt_free_objs(struct iocpt_dev *dev)
725 {
726 	void **queue_pairs = dev->crypto_dev->data->queue_pairs;
727 	uint32_t i;
728 
729 	IOCPT_PRINT_CALL();
730 
731 	for (i = 0; i < dev->crypto_dev->data->nb_queue_pairs; i++) {
732 		iocpt_cryptoq_free(queue_pairs[i]);
733 		queue_pairs[i] = NULL;
734 	}
735 
736 	if (dev->sess_bm != NULL) {
737 		rte_bitmap_free(dev->sess_bm);
738 		rte_free(dev->sess_bm);
739 		dev->sess_bm = NULL;
740 	}
741 
742 	if (dev->adminq != NULL) {
743 		iocpt_adminq_free(dev->adminq);
744 		dev->adminq = NULL;
745 	}
746 
747 	if (dev->cryptoqs != NULL) {
748 		rte_free(dev->cryptoqs);
749 		dev->cryptoqs = NULL;
750 	}
751 
752 	if (dev->info != NULL) {
753 		rte_memzone_free(dev->info_z);
754 		dev->info_z = NULL;
755 		dev->info = NULL;
756 		dev->info_pa = 0;
757 	}
758 }
759 
760 static int
761 iocpt_devargs(struct rte_devargs *devargs, struct iocpt_dev *dev)
762 {
763 	RTE_SET_USED(devargs);
764 	RTE_SET_USED(dev);
765 
766 	return 0;
767 }
768 
769 int
770 iocpt_probe(void *bus_dev, struct rte_device *rte_dev,
771 	struct iocpt_dev_bars *bars, const struct iocpt_dev_intf *intf,
772 	uint8_t driver_id, uint8_t socket_id)
773 {
774 	struct rte_cryptodev_pmd_init_params init_params = {
775 		"iocpt",
776 		sizeof(struct iocpt_dev),
777 		socket_id,
778 		RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
779 	};
780 	struct rte_cryptodev *cdev;
781 	struct iocpt_dev *dev;
782 	uint32_t i, sig;
783 	int err;
784 
785 	/* Check structs (trigger error at compilation time) */
786 	iocpt_struct_size_checks();
787 
788 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
789 		IOCPT_PRINT(ERR, "Multi-process not supported");
790 		err = -EPERM;
791 		goto err;
792 	}
793 
794 	cdev = rte_cryptodev_pmd_create(rte_dev->name, rte_dev, &init_params);
795 	if (cdev == NULL) {
796 		IOCPT_PRINT(ERR, "Out of memory");
797 		err = -ENOMEM;
798 		goto err;
799 	}
800 
801 	dev = cdev->data->dev_private;
802 	dev->crypto_dev = cdev;
803 	dev->bus_dev = bus_dev;
804 	dev->intf = intf;
805 	dev->driver_id = driver_id;
806 	dev->socket_id = socket_id;
807 
808 	for (i = 0; i < bars->num_bars; i++) {
809 		struct ionic_dev_bar *bar = &bars->bar[i];
810 
811 		IOCPT_PRINT(DEBUG,
812 			"bar[%u] = { .va = %p, .pa = %#jx, .len = %lu }",
813 			i, bar->vaddr, bar->bus_addr, bar->len);
814 		if (bar->vaddr == NULL) {
815 			IOCPT_PRINT(ERR, "Null bar found, aborting");
816 			err = -EFAULT;
817 			goto err_destroy_crypto_dev;
818 		}
819 
820 		dev->bars.bar[i].vaddr = bar->vaddr;
821 		dev->bars.bar[i].bus_addr = bar->bus_addr;
822 		dev->bars.bar[i].len = bar->len;
823 	}
824 	dev->bars.num_bars = bars->num_bars;
825 
826 	err = iocpt_devargs(rte_dev->devargs, dev);
827 	if (err != 0) {
828 		IOCPT_PRINT(ERR, "Cannot parse device arguments");
829 		goto err_destroy_crypto_dev;
830 	}
831 
832 	err = iocpt_setup_bars(dev);
833 	if (err != 0) {
834 		IOCPT_PRINT(ERR, "Cannot setup BARs: %d, aborting", err);
835 		goto err_destroy_crypto_dev;
836 	}
837 
838 	sig = ioread32(&dev->dev_info->signature);
839 	if (sig != IOCPT_DEV_INFO_SIGNATURE) {
840 		IOCPT_PRINT(ERR, "Incompatible firmware signature %#x", sig);
841 		err = -EFAULT;
842 		goto err_destroy_crypto_dev;
843 	}
844 
845 	for (i = 0; i < IOCPT_FWVERS_BUFLEN; i++)
846 		dev->fw_version[i] = ioread8(&dev->dev_info->fw_version[i]);
847 	dev->fw_version[IOCPT_FWVERS_BUFLEN - 1] = '\0';
848 	IOCPT_PRINT(DEBUG, "%s firmware: %s", dev->name, dev->fw_version);
849 
850 	err = iocpt_dev_identify(dev);
851 	if (err != 0) {
852 		IOCPT_PRINT(ERR, "Cannot identify device: %d, aborting",
853 			err);
854 		goto err_destroy_crypto_dev;
855 	}
856 
857 	err = iocpt_alloc_objs(dev);
858 	if (err != 0) {
859 		IOCPT_PRINT(ERR, "Cannot alloc device objects: %d", err);
860 		goto err_destroy_crypto_dev;
861 	}
862 
863 	err = iocpt_init(dev);
864 	if (err != 0) {
865 		IOCPT_PRINT(ERR, "Cannot init device: %d, aborting", err);
866 		goto err_free_objs;
867 	}
868 
869 	err = iocpt_assign_ops(cdev);
870 	if (err != 0) {
871 		IOCPT_PRINT(ERR, "Failed to configure opts");
872 		goto err_deinit_dev;
873 	}
874 
875 	return 0;
876 
877 err_deinit_dev:
878 	iocpt_deinit(dev);
879 err_free_objs:
880 	iocpt_free_objs(dev);
881 err_destroy_crypto_dev:
882 	rte_cryptodev_pmd_destroy(cdev);
883 err:
884 	return err;
885 }
886 
887 int
888 iocpt_remove(struct rte_device *rte_dev)
889 {
890 	struct rte_cryptodev *cdev;
891 	struct iocpt_dev *dev;
892 
893 	cdev = rte_cryptodev_pmd_get_named_dev(rte_dev->name);
894 	if (cdev == NULL) {
895 		IOCPT_PRINT(DEBUG, "Cannot find device %s", rte_dev->name);
896 		return -ENODEV;
897 	}
898 
899 	dev = cdev->data->dev_private;
900 
901 	iocpt_deinit(dev);
902 
903 	iocpt_dev_reset(dev);
904 
905 	iocpt_free_objs(dev);
906 
907 	rte_cryptodev_pmd_destroy(cdev);
908 
909 	return 0;
910 }
911 
912 RTE_LOG_REGISTER_DEFAULT(iocpt_logtype, NOTICE);
913