xref: /dpdk/drivers/crypto/ionic/ionic_crypto_main.c (revision 0750c8b115c5f46c4adde4539172cf4ca922089d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2021-2024 Advanced Micro Devices, Inc.
3  */
4 
5 #include <inttypes.h>
6 
7 #include <rte_common.h>
8 #include <rte_malloc.h>
9 #include <rte_bitops.h>
10 
11 #include "ionic_crypto.h"
12 
13 static int
14 iocpt_cq_init(struct iocpt_cq *cq, uint16_t num_descs)
15 {
16 	if (!rte_is_power_of_2(num_descs) ||
17 	    num_descs < IOCPT_MIN_RING_DESC ||
18 	    num_descs > IOCPT_MAX_RING_DESC) {
19 		IOCPT_PRINT(ERR, "%u descriptors (min: %u max: %u)",
20 			num_descs, IOCPT_MIN_RING_DESC, IOCPT_MAX_RING_DESC);
21 		return -EINVAL;
22 	}
23 
24 	cq->num_descs = num_descs;
25 	cq->size_mask = num_descs - 1;
26 	cq->tail_idx = 0;
27 	cq->done_color = 1;
28 
29 	return 0;
30 }
31 
32 static void
33 iocpt_cq_reset(struct iocpt_cq *cq)
34 {
35 	cq->tail_idx = 0;
36 	cq->done_color = 1;
37 
38 	memset(cq->base, 0, sizeof(struct iocpt_nop_comp) * cq->num_descs);
39 }
40 
41 static void
42 iocpt_cq_map(struct iocpt_cq *cq, void *base, rte_iova_t base_pa)
43 {
44 	cq->base = base;
45 	cq->base_pa = base_pa;
46 }
47 
48 uint32_t
49 iocpt_cq_service(struct iocpt_cq *cq, uint32_t work_to_do,
50 		iocpt_cq_cb cb, void *cb_arg)
51 {
52 	uint32_t work_done = 0;
53 
54 	if (work_to_do == 0)
55 		return 0;
56 
57 	while (cb(cq, cq->tail_idx, cb_arg)) {
58 		cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1);
59 		if (cq->tail_idx == 0)
60 			cq->done_color = !cq->done_color;
61 
62 		if (++work_done == work_to_do)
63 			break;
64 	}
65 
66 	return work_done;
67 }
68 
69 static int
70 iocpt_q_init(struct iocpt_queue *q, uint8_t type, uint32_t index,
71 	uint16_t num_descs, uint16_t num_segs, uint32_t socket_id)
72 {
73 	uint32_t ring_size;
74 
75 	if (!rte_is_power_of_2(num_descs))
76 		return -EINVAL;
77 
78 	ring_size = rte_log2_u32(num_descs);
79 	if (ring_size < 2 || ring_size > 16)
80 		return -EINVAL;
81 
82 	q->type = type;
83 	q->index = index;
84 	q->num_descs = num_descs;
85 	q->num_segs = num_segs;
86 	q->size_mask = num_descs - 1;
87 	q->head_idx = 0;
88 	q->tail_idx = 0;
89 
90 	q->info = rte_calloc_socket("iocpt",
91 				num_descs * num_segs, sizeof(void *),
92 				rte_mem_page_size(), socket_id);
93 	if (q->info == NULL) {
94 		IOCPT_PRINT(ERR, "Cannot allocate queue info");
95 		return -ENOMEM;
96 	}
97 
98 	return 0;
99 }
100 
101 static void
102 iocpt_q_reset(struct iocpt_queue *q)
103 {
104 	q->head_idx = 0;
105 	q->tail_idx = 0;
106 }
107 
108 static void
109 iocpt_q_map(struct iocpt_queue *q, void *base, rte_iova_t base_pa)
110 {
111 	q->base = base;
112 	q->base_pa = base_pa;
113 }
114 
115 static void
116 iocpt_q_sg_map(struct iocpt_queue *q, void *base, rte_iova_t base_pa)
117 {
118 	q->sg_base = base;
119 	q->sg_base_pa = base_pa;
120 }
121 
122 static void
123 iocpt_q_free(struct iocpt_queue *q)
124 {
125 	if (q->info != NULL) {
126 		rte_free(q->info);
127 		q->info = NULL;
128 	}
129 }
130 
131 static void
132 iocpt_get_abs_stats(const struct iocpt_dev *dev,
133 		struct rte_cryptodev_stats *stats)
134 {
135 	uint32_t i;
136 
137 	memset(stats, 0, sizeof(*stats));
138 
139 	/* Sum up the per-queue stats counters */
140 	for (i = 0; i < dev->crypto_dev->data->nb_queue_pairs; i++) {
141 		struct rte_cryptodev_stats *q_stats = &dev->cryptoqs[i]->stats;
142 
143 		stats->enqueued_count    += q_stats->enqueued_count;
144 		stats->dequeued_count    += q_stats->dequeued_count;
145 		stats->enqueue_err_count += q_stats->enqueue_err_count;
146 		stats->dequeue_err_count += q_stats->dequeue_err_count;
147 	}
148 }
149 
150 void
151 iocpt_get_stats(const struct iocpt_dev *dev, struct rte_cryptodev_stats *stats)
152 {
153 	/* Retrieve the new absolute stats values */
154 	iocpt_get_abs_stats(dev, stats);
155 
156 	/* Subtract the base stats values to get relative values */
157 	stats->enqueued_count    -= dev->stats_base.enqueued_count;
158 	stats->dequeued_count    -= dev->stats_base.dequeued_count;
159 	stats->enqueue_err_count -= dev->stats_base.enqueue_err_count;
160 	stats->dequeue_err_count -= dev->stats_base.dequeue_err_count;
161 }
162 
163 void
164 iocpt_reset_stats(struct iocpt_dev *dev)
165 {
166 	uint32_t i;
167 
168 	/* Erase the per-queue stats counters */
169 	for (i = 0; i < dev->crypto_dev->data->nb_queue_pairs; i++)
170 		memset(&dev->cryptoqs[i]->stats, 0,
171 			sizeof(dev->cryptoqs[i]->stats));
172 
173 	/* Update the base stats values */
174 	iocpt_get_abs_stats(dev, &dev->stats_base);
175 }
176 
177 static int
178 iocpt_session_write(struct iocpt_session_priv *priv,
179 		enum iocpt_sess_control_oper oper)
180 {
181 	struct iocpt_dev *dev = priv->dev;
182 	struct iocpt_admin_ctx ctx = {
183 		.pending_work = true,
184 		.cmd.sess_control = {
185 			.opcode = IOCPT_CMD_SESS_CONTROL,
186 			.type = priv->type,
187 			.oper = oper,
188 			.index = rte_cpu_to_le_32(priv->index),
189 			.key_len = rte_cpu_to_le_16(priv->key_len),
190 			.key_seg_len = (uint8_t)RTE_MIN(priv->key_len,
191 						IOCPT_SESS_KEY_SEG_LEN),
192 		},
193 	};
194 	struct iocpt_sess_control_cmd *cmd = &ctx.cmd.sess_control;
195 	uint16_t key_offset;
196 	uint8_t key_segs, seg;
197 	int err;
198 
199 	key_segs = ((priv->key_len - 1) >> IOCPT_SESS_KEY_SEG_SHFT) + 1;
200 
201 	for (seg = 0; seg < key_segs; seg++) {
202 		ctx.pending_work = true;
203 
204 		key_offset = seg * cmd->key_seg_len;
205 		memcpy(cmd->key, &priv->key[key_offset],
206 			IOCPT_SESS_KEY_SEG_LEN);
207 		cmd->key_seg_idx = seg;
208 
209 		/* Mark final segment */
210 		if (seg + 1 == key_segs)
211 			cmd->flags |= rte_cpu_to_le_16(IOCPT_SCTL_F_END);
212 
213 		err = iocpt_adminq_post_wait(dev, &ctx);
214 		if (err != 0)
215 			return err;
216 	}
217 
218 	return 0;
219 }
220 
221 static int
222 iocpt_session_wdog(struct iocpt_dev *dev)
223 {
224 	struct iocpt_session_priv priv = {
225 		.dev = dev,
226 		.index = IOCPT_Q_WDOG_SESS_IDX,
227 		.type = IOCPT_SESS_AEAD_AES_GCM,
228 		.key_len = IOCPT_Q_WDOG_KEY_LEN,
229 	};
230 
231 	/* Reserve session 0 for queue watchdog */
232 	rte_bitmap_clear(dev->sess_bm, IOCPT_Q_WDOG_SESS_IDX);
233 
234 	return iocpt_session_write(&priv, IOCPT_SESS_INIT);
235 }
236 
237 int
238 iocpt_session_init(struct iocpt_session_priv *priv)
239 {
240 	struct iocpt_dev *dev = priv->dev;
241 	uint64_t bm_slab = 0;
242 	uint32_t bm_pos = 0;
243 	int err = 0;
244 
245 	rte_spinlock_lock(&dev->adminq_lock);
246 
247 	if (rte_bitmap_scan(dev->sess_bm, &bm_pos, &bm_slab) > 0) {
248 		priv->index = bm_pos + rte_ctz64(bm_slab);
249 		rte_bitmap_clear(dev->sess_bm, priv->index);
250 	} else
251 		err = -ENOSPC;
252 
253 	rte_spinlock_unlock(&dev->adminq_lock);
254 
255 	if (err != 0) {
256 		IOCPT_PRINT(ERR, "session index space exhausted");
257 		return err;
258 	}
259 
260 	err = iocpt_session_write(priv, IOCPT_SESS_INIT);
261 	if (err != 0) {
262 		rte_spinlock_lock(&dev->adminq_lock);
263 		rte_bitmap_set(dev->sess_bm, priv->index);
264 		rte_spinlock_unlock(&dev->adminq_lock);
265 		return err;
266 	}
267 
268 	priv->flags |= IOCPT_S_F_INITED;
269 
270 	return 0;
271 }
272 
273 int
274 iocpt_session_update(struct iocpt_session_priv *priv)
275 {
276 	return iocpt_session_write(priv, IOCPT_SESS_UPDATE_KEY);
277 }
278 
279 void
280 iocpt_session_deinit(struct iocpt_session_priv *priv)
281 {
282 	struct iocpt_dev *dev = priv->dev;
283 	struct iocpt_admin_ctx ctx = {
284 		.pending_work = true,
285 		.cmd.sess_control = {
286 			.opcode = IOCPT_CMD_SESS_CONTROL,
287 			.type = priv->type,
288 			.oper = IOCPT_SESS_DISABLE,
289 			.index = rte_cpu_to_le_32(priv->index),
290 			.key_len = rte_cpu_to_le_16(priv->key_len),
291 		},
292 	};
293 
294 	(void)iocpt_adminq_post_wait(dev, &ctx);
295 
296 	rte_spinlock_lock(&dev->adminq_lock);
297 	rte_bitmap_set(dev->sess_bm, priv->index);
298 	rte_spinlock_unlock(&dev->adminq_lock);
299 
300 	priv->flags &= ~IOCPT_S_F_INITED;
301 }
302 
303 static const struct rte_memzone *
304 iocpt_dma_zone_reserve(const char *type_name, uint16_t qid, size_t size,
305 			unsigned int align, int socket_id)
306 {
307 	char zone_name[RTE_MEMZONE_NAMESIZE];
308 	const struct rte_memzone *mz;
309 	int err;
310 
311 	err = snprintf(zone_name, sizeof(zone_name),
312 			"iocpt_%s_%u", type_name, qid);
313 	if (err >= RTE_MEMZONE_NAMESIZE) {
314 		IOCPT_PRINT(ERR, "Name %s too long", type_name);
315 		return NULL;
316 	}
317 
318 	mz = rte_memzone_lookup(zone_name);
319 	if (mz != NULL)
320 		return mz;
321 
322 	return rte_memzone_reserve_aligned(zone_name, size, socket_id,
323 			RTE_MEMZONE_IOVA_CONTIG, align);
324 }
325 
326 static int
327 iocpt_commonq_alloc(struct iocpt_dev *dev,
328 		uint8_t type,
329 		size_t struct_size,
330 		uint32_t socket_id,
331 		uint32_t index,
332 		const char *type_name,
333 		uint16_t flags,
334 		uint16_t num_descs,
335 		uint16_t num_segs,
336 		uint16_t desc_size,
337 		uint16_t cq_desc_size,
338 		uint16_t sg_desc_size,
339 		struct iocpt_common_q **comq)
340 {
341 	struct iocpt_common_q *new;
342 	uint32_t q_size, cq_size, sg_size, total_size;
343 	void *q_base, *cq_base, *sg_base;
344 	rte_iova_t q_base_pa = 0;
345 	rte_iova_t cq_base_pa = 0;
346 	rte_iova_t sg_base_pa = 0;
347 	size_t page_size = rte_mem_page_size();
348 	int err;
349 
350 	*comq = NULL;
351 
352 	q_size	= num_descs * desc_size;
353 	cq_size = num_descs * cq_desc_size;
354 	sg_size = num_descs * sg_desc_size;
355 
356 	/*
357 	 * Note: aligning q_size/cq_size is not enough due to cq_base address
358 	 * aligning as q_base could be not aligned to the page.
359 	 * Adding page_size.
360 	 */
361 	total_size = RTE_ALIGN(q_size, page_size) +
362 		RTE_ALIGN(cq_size, page_size) + page_size;
363 	if (flags & IOCPT_Q_F_SG)
364 		total_size += RTE_ALIGN(sg_size, page_size) + page_size;
365 
366 	new = rte_zmalloc_socket("iocpt", struct_size,
367 			RTE_CACHE_LINE_SIZE, socket_id);
368 	if (new == NULL) {
369 		IOCPT_PRINT(ERR, "Cannot allocate queue structure");
370 		return -ENOMEM;
371 	}
372 
373 	new->dev = dev;
374 
375 	err = iocpt_q_init(&new->q, type, index, num_descs, num_segs,
376 			socket_id);
377 	if (err != 0) {
378 		IOCPT_PRINT(ERR, "Queue initialization failed");
379 		goto err_free_q;
380 	}
381 
382 	err = iocpt_cq_init(&new->cq, num_descs);
383 	if (err != 0) {
384 		IOCPT_PRINT(ERR, "Completion queue initialization failed");
385 		goto err_deinit_q;
386 	}
387 
388 	new->base_z = iocpt_dma_zone_reserve(type_name, index, total_size,
389 					IONIC_ALIGN, socket_id);
390 	if (new->base_z == NULL) {
391 		IOCPT_PRINT(ERR, "Cannot reserve queue DMA memory");
392 		err = -ENOMEM;
393 		goto err_deinit_cq;
394 	}
395 
396 	new->base = new->base_z->addr;
397 	new->base_pa = new->base_z->iova;
398 
399 	q_base = new->base;
400 	q_base_pa = new->base_pa;
401 	iocpt_q_map(&new->q, q_base, q_base_pa);
402 
403 	cq_base = (void *)RTE_ALIGN((uintptr_t)q_base + q_size, page_size);
404 	cq_base_pa = RTE_ALIGN(q_base_pa + q_size, page_size);
405 	iocpt_cq_map(&new->cq, cq_base, cq_base_pa);
406 
407 	if (flags & IOCPT_Q_F_SG) {
408 		sg_base = (void *)RTE_ALIGN((uintptr_t)cq_base + cq_size,
409 			page_size);
410 		sg_base_pa = RTE_ALIGN(cq_base_pa + cq_size, page_size);
411 		iocpt_q_sg_map(&new->q, sg_base, sg_base_pa);
412 	}
413 
414 	IOCPT_PRINT(DEBUG, "q_base_pa %#jx cq_base_pa %#jx sg_base_pa %#jx",
415 		q_base_pa, cq_base_pa, sg_base_pa);
416 
417 	*comq = new;
418 
419 	return 0;
420 
421 err_deinit_cq:
422 err_deinit_q:
423 	iocpt_q_free(&new->q);
424 err_free_q:
425 	rte_free(new);
426 	return err;
427 }
428 
429 int
430 iocpt_cryptoq_alloc(struct iocpt_dev *dev, uint32_t socket_id, uint32_t index,
431 		uint16_t num_descs)
432 {
433 	struct iocpt_crypto_q *cptq;
434 	uint16_t flags = 0;
435 	int err;
436 
437 	/* CryptoQ always supports scatter-gather */
438 	flags |= IOCPT_Q_F_SG;
439 
440 	IOCPT_PRINT(DEBUG, "cptq %u num_descs %u num_segs %u",
441 		index, num_descs, 1);
442 
443 	err = iocpt_commonq_alloc(dev,
444 		IOCPT_QTYPE_CRYPTOQ,
445 		sizeof(struct iocpt_crypto_q),
446 		socket_id,
447 		index,
448 		"crypto",
449 		flags,
450 		num_descs,
451 		1,
452 		sizeof(struct iocpt_crypto_desc),
453 		sizeof(struct iocpt_crypto_comp),
454 		sizeof(struct iocpt_crypto_sg_desc),
455 		(struct iocpt_common_q **)&cptq);
456 	if (err != 0)
457 		return err;
458 
459 	cptq->flags = flags;
460 
461 	dev->cryptoqs[index] = cptq;
462 
463 	return 0;
464 }
465 
466 struct ionic_doorbell *
467 iocpt_db_map(struct iocpt_dev *dev, struct iocpt_queue *q)
468 {
469 	return dev->db_pages + q->hw_type;
470 }
471 
472 static int
473 iocpt_cryptoq_init(struct iocpt_crypto_q *cptq)
474 {
475 	struct iocpt_queue *q = &cptq->q;
476 	struct iocpt_dev *dev = cptq->dev;
477 	struct iocpt_cq *cq = &cptq->cq;
478 	struct iocpt_admin_ctx ctx = {
479 		.pending_work = true,
480 		.cmd.q_init = {
481 			.opcode = IOCPT_CMD_Q_INIT,
482 			.type = IOCPT_QTYPE_CRYPTOQ,
483 			.ver = dev->qtype_info[IOCPT_QTYPE_CRYPTOQ].version,
484 			.index = rte_cpu_to_le_32(q->index),
485 			.flags = rte_cpu_to_le_16(IOCPT_QINIT_F_ENA |
486 						IOCPT_QINIT_F_SG),
487 			.intr_index = rte_cpu_to_le_16(IONIC_INTR_NONE),
488 			.ring_size = rte_log2_u32(q->num_descs),
489 			.ring_base = rte_cpu_to_le_64(q->base_pa),
490 			.cq_ring_base = rte_cpu_to_le_64(cq->base_pa),
491 			.sg_ring_base = rte_cpu_to_le_64(q->sg_base_pa),
492 		},
493 	};
494 	int err;
495 
496 	IOCPT_PRINT(DEBUG, "cptq_init.index %d", q->index);
497 	IOCPT_PRINT(DEBUG, "cptq_init.ring_base %#jx", q->base_pa);
498 	IOCPT_PRINT(DEBUG, "cptq_init.ring_size %d",
499 		ctx.cmd.q_init.ring_size);
500 	IOCPT_PRINT(DEBUG, "cptq_init.ver %u", ctx.cmd.q_init.ver);
501 
502 	iocpt_q_reset(q);
503 	iocpt_cq_reset(cq);
504 
505 	err = iocpt_adminq_post_wait(dev, &ctx);
506 	if (err != 0)
507 		return err;
508 
509 	q->hw_type = ctx.comp.q_init.hw_type;
510 	q->hw_index = rte_le_to_cpu_32(ctx.comp.q_init.hw_index);
511 	q->db = iocpt_db_map(dev, q);
512 
513 	IOCPT_PRINT(DEBUG, "cptq->hw_type %d", q->hw_type);
514 	IOCPT_PRINT(DEBUG, "cptq->hw_index %d", q->hw_index);
515 	IOCPT_PRINT(DEBUG, "cptq->db %p", q->db);
516 
517 	cptq->flags |= IOCPT_Q_F_INITED;
518 
519 	return 0;
520 }
521 
522 static void
523 iocpt_cryptoq_deinit(struct iocpt_crypto_q *cptq)
524 {
525 	struct iocpt_dev *dev = cptq->dev;
526 	struct iocpt_admin_ctx ctx = {
527 		.pending_work = true,
528 		.cmd.q_control = {
529 			.opcode = IOCPT_CMD_Q_CONTROL,
530 			.type = IOCPT_QTYPE_CRYPTOQ,
531 			.index = rte_cpu_to_le_32(cptq->q.index),
532 			.oper = IOCPT_Q_DISABLE,
533 		},
534 	};
535 	unsigned long sleep_usec = 100UL * 1000;
536 	uint32_t sleep_cnt, sleep_max = IOCPT_CRYPTOQ_WAIT;
537 	int err;
538 
539 	for (sleep_cnt = 0; sleep_cnt < sleep_max; sleep_cnt++) {
540 		ctx.pending_work = true;
541 
542 		err = iocpt_adminq_post_wait(dev, &ctx);
543 		if (err != -EAGAIN)
544 			break;
545 
546 		rte_delay_us_block(sleep_usec);
547 	}
548 
549 	if (err != 0)
550 		IOCPT_PRINT(ERR, "Deinit queue %u returned %d after %u ms",
551 			cptq->q.index, err, sleep_cnt * 100);
552 	else
553 		IOCPT_PRINT(DEBUG, "Deinit queue %u returned %d after %u ms",
554 			cptq->q.index, err, sleep_cnt * 100);
555 
556 	IOCPT_PRINT(DEBUG, "Queue %u watchdog: enq %"PRIu64" deq %"PRIu64,
557 		cptq->q.index, cptq->enqueued_wdogs, cptq->dequeued_wdogs);
558 
559 	cptq->flags &= ~IOCPT_Q_F_INITED;
560 }
561 
562 void
563 iocpt_cryptoq_free(struct iocpt_crypto_q *cptq)
564 {
565 	if (cptq == NULL)
566 		return;
567 
568 	if (cptq->base_z != NULL) {
569 		rte_memzone_free(cptq->base_z);
570 		cptq->base_z = NULL;
571 		cptq->base = NULL;
572 		cptq->base_pa = 0;
573 	}
574 
575 	iocpt_q_free(&cptq->q);
576 
577 	rte_free(cptq);
578 }
579 
580 static int
581 iocpt_adminq_alloc(struct iocpt_dev *dev)
582 {
583 	struct iocpt_admin_q *aq;
584 	uint16_t num_descs = IOCPT_ADMINQ_LENGTH;
585 	uint16_t flags = 0;
586 	int err;
587 
588 	err = iocpt_commonq_alloc(dev,
589 		IOCPT_QTYPE_ADMINQ,
590 		sizeof(struct iocpt_admin_q),
591 		rte_socket_id(),
592 		0,
593 		"admin",
594 		flags,
595 		num_descs,
596 		1,
597 		sizeof(struct iocpt_admin_cmd),
598 		sizeof(struct iocpt_admin_comp),
599 		0,
600 		(struct iocpt_common_q **)&aq);
601 	if (err != 0)
602 		return err;
603 
604 	aq->flags = flags;
605 
606 	dev->adminq = aq;
607 
608 	return 0;
609 }
610 
611 static int
612 iocpt_adminq_init(struct iocpt_dev *dev)
613 {
614 	return iocpt_dev_adminq_init(dev);
615 }
616 
617 static void
618 iocpt_adminq_deinit(struct iocpt_dev *dev)
619 {
620 	dev->adminq->flags &= ~IOCPT_Q_F_INITED;
621 }
622 
623 static void
624 iocpt_adminq_free(struct iocpt_admin_q *aq)
625 {
626 	if (aq->base_z != NULL) {
627 		rte_memzone_free(aq->base_z);
628 		aq->base_z = NULL;
629 		aq->base = NULL;
630 		aq->base_pa = 0;
631 	}
632 
633 	iocpt_q_free(&aq->q);
634 
635 	rte_free(aq);
636 }
637 
638 static int
639 iocpt_alloc_objs(struct iocpt_dev *dev)
640 {
641 	uint32_t bmsize, i;
642 	uint8_t *bm;
643 	int err;
644 
645 	IOCPT_PRINT(DEBUG, "Crypto: %s", dev->name);
646 
647 	dev->cryptoqs = rte_calloc_socket("iocpt",
648 				dev->max_qps, sizeof(*dev->cryptoqs),
649 				RTE_CACHE_LINE_SIZE, dev->socket_id);
650 	if (dev->cryptoqs == NULL) {
651 		IOCPT_PRINT(ERR, "Cannot allocate tx queues array");
652 		return -ENOMEM;
653 	}
654 
655 	rte_spinlock_init(&dev->adminq_lock);
656 	rte_spinlock_init(&dev->adminq_service_lock);
657 
658 	err = iocpt_adminq_alloc(dev);
659 	if (err != 0) {
660 		IOCPT_PRINT(ERR, "Cannot allocate admin queue");
661 		err = -ENOMEM;
662 		goto err_free_cryptoqs;
663 	}
664 
665 	dev->info_sz = RTE_ALIGN(sizeof(*dev->info), rte_mem_page_size());
666 	dev->info_z = iocpt_dma_zone_reserve("info", 0, dev->info_sz,
667 					IONIC_ALIGN, dev->socket_id);
668 	if (dev->info_z == NULL) {
669 		IOCPT_PRINT(ERR, "Cannot allocate dev info memory");
670 		err = -ENOMEM;
671 		goto err_free_adminq;
672 	}
673 
674 	dev->info = dev->info_z->addr;
675 	dev->info_pa = dev->info_z->iova;
676 
677 	bmsize = rte_bitmap_get_memory_footprint(dev->max_sessions);
678 	bm = rte_malloc_socket("iocpt", bmsize,
679 			RTE_CACHE_LINE_SIZE, dev->socket_id);
680 	if (bm == NULL) {
681 		IOCPT_PRINT(ERR, "Cannot allocate %uB bitmap memory", bmsize);
682 		err = -ENOMEM;
683 		goto err_free_dmazone;
684 	}
685 
686 	dev->sess_bm = rte_bitmap_init(dev->max_sessions, bm, bmsize);
687 	if (dev->sess_bm == NULL) {
688 		IOCPT_PRINT(ERR, "Cannot initialize bitmap");
689 		err = -EFAULT;
690 		goto err_free_bm;
691 	}
692 	for (i = 0; i < dev->max_sessions; i++)
693 		rte_bitmap_set(dev->sess_bm, i);
694 
695 	return 0;
696 
697 err_free_bm:
698 	rte_free(bm);
699 err_free_dmazone:
700 	rte_memzone_free(dev->info_z);
701 	dev->info_z = NULL;
702 	dev->info = NULL;
703 	dev->info_pa = 0;
704 err_free_adminq:
705 	iocpt_adminq_free(dev->adminq);
706 	dev->adminq = NULL;
707 err_free_cryptoqs:
708 	rte_free(dev->cryptoqs);
709 	dev->cryptoqs = NULL;
710 	return err;
711 }
712 
713 static int
714 iocpt_init(struct iocpt_dev *dev)
715 {
716 	int err;
717 
718 	memset(&dev->stats_base, 0, sizeof(dev->stats_base));
719 
720 	/* Uses dev_cmds */
721 	err = iocpt_dev_init(dev, dev->info_pa);
722 	if (err != 0)
723 		return err;
724 
725 	err = iocpt_adminq_init(dev);
726 	if (err != 0)
727 		return err;
728 
729 	/* Write the queue watchdog key */
730 	err = iocpt_session_wdog(dev);
731 	if (err != 0) {
732 		IOCPT_PRINT(ERR, "Cannot setup watchdog session");
733 		goto err_out_adminq_deinit;
734 	}
735 
736 	dev->state |= IOCPT_DEV_F_INITED;
737 
738 	return 0;
739 
740 err_out_adminq_deinit:
741 	iocpt_adminq_deinit(dev);
742 
743 	return err;
744 }
745 
746 void
747 iocpt_configure(struct iocpt_dev *dev)
748 {
749 	RTE_SET_USED(dev);
750 }
751 
752 int
753 iocpt_start(struct iocpt_dev *dev)
754 {
755 	uint32_t i;
756 	int err;
757 
758 	IOCPT_PRINT(DEBUG, "Starting %u queues",
759 		dev->crypto_dev->data->nb_queue_pairs);
760 
761 	for (i = 0; i < dev->crypto_dev->data->nb_queue_pairs; i++) {
762 		err = iocpt_cryptoq_init(dev->cryptoqs[i]);
763 		if (err != 0)
764 			return err;
765 	}
766 
767 	dev->state |= IOCPT_DEV_F_UP;
768 
769 	return 0;
770 }
771 
772 void
773 iocpt_stop(struct iocpt_dev *dev)
774 {
775 	uint32_t i;
776 
777 	IOCPT_PRINT_CALL();
778 
779 	dev->state &= ~IOCPT_DEV_F_UP;
780 
781 	for (i = 0; i < dev->crypto_dev->data->nb_queue_pairs; i++) {
782 		struct iocpt_crypto_q *cptq = dev->cryptoqs[i];
783 
784 		if (cptq->flags & IOCPT_Q_F_INITED)
785 			(void)iocpt_cryptoq_deinit(cptq);
786 	}
787 }
788 
789 void
790 iocpt_deinit(struct iocpt_dev *dev)
791 {
792 	IOCPT_PRINT_CALL();
793 
794 	if (!(dev->state & IOCPT_DEV_F_INITED))
795 		return;
796 
797 	iocpt_adminq_deinit(dev);
798 
799 	dev->state &= ~IOCPT_DEV_F_INITED;
800 }
801 
802 static void
803 iocpt_free_objs(struct iocpt_dev *dev)
804 {
805 	void **queue_pairs = dev->crypto_dev->data->queue_pairs;
806 	uint32_t i;
807 
808 	IOCPT_PRINT_CALL();
809 
810 	for (i = 0; i < dev->crypto_dev->data->nb_queue_pairs; i++) {
811 		iocpt_cryptoq_free(queue_pairs[i]);
812 		queue_pairs[i] = NULL;
813 	}
814 
815 	if (dev->sess_bm != NULL) {
816 		rte_bitmap_free(dev->sess_bm);
817 		rte_free(dev->sess_bm);
818 		dev->sess_bm = NULL;
819 	}
820 
821 	if (dev->adminq != NULL) {
822 		iocpt_adminq_free(dev->adminq);
823 		dev->adminq = NULL;
824 	}
825 
826 	if (dev->cryptoqs != NULL) {
827 		rte_free(dev->cryptoqs);
828 		dev->cryptoqs = NULL;
829 	}
830 
831 	if (dev->info != NULL) {
832 		rte_memzone_free(dev->info_z);
833 		dev->info_z = NULL;
834 		dev->info = NULL;
835 		dev->info_pa = 0;
836 	}
837 }
838 
839 static int
840 iocpt_devargs(struct rte_devargs *devargs, struct iocpt_dev *dev)
841 {
842 	RTE_SET_USED(devargs);
843 	RTE_SET_USED(dev);
844 
845 	return 0;
846 }
847 
848 int
849 iocpt_probe(void *bus_dev, struct rte_device *rte_dev,
850 	struct iocpt_dev_bars *bars, const struct iocpt_dev_intf *intf,
851 	uint8_t driver_id, uint8_t socket_id)
852 {
853 	struct rte_cryptodev_pmd_init_params init_params = {
854 		"iocpt",
855 		sizeof(struct iocpt_dev),
856 		socket_id,
857 		RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
858 	};
859 	struct rte_cryptodev *cdev;
860 	struct iocpt_dev *dev;
861 	uint32_t i, sig;
862 	int err;
863 
864 	/* Check structs (trigger error at compilation time) */
865 	iocpt_struct_size_checks();
866 
867 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
868 		IOCPT_PRINT(ERR, "Multi-process not supported");
869 		err = -EPERM;
870 		goto err;
871 	}
872 
873 	cdev = rte_cryptodev_pmd_create(rte_dev->name, rte_dev, &init_params);
874 	if (cdev == NULL) {
875 		IOCPT_PRINT(ERR, "Out of memory");
876 		err = -ENOMEM;
877 		goto err;
878 	}
879 
880 	dev = cdev->data->dev_private;
881 	dev->crypto_dev = cdev;
882 	dev->bus_dev = bus_dev;
883 	dev->intf = intf;
884 	dev->driver_id = driver_id;
885 	dev->socket_id = socket_id;
886 
887 	for (i = 0; i < bars->num_bars; i++) {
888 		struct ionic_dev_bar *bar = &bars->bar[i];
889 
890 		IOCPT_PRINT(DEBUG,
891 			"bar[%u] = { .va = %p, .pa = %#jx, .len = %lu }",
892 			i, bar->vaddr, bar->bus_addr, bar->len);
893 		if (bar->vaddr == NULL) {
894 			IOCPT_PRINT(ERR, "Null bar found, aborting");
895 			err = -EFAULT;
896 			goto err_destroy_crypto_dev;
897 		}
898 
899 		dev->bars.bar[i].vaddr = bar->vaddr;
900 		dev->bars.bar[i].bus_addr = bar->bus_addr;
901 		dev->bars.bar[i].len = bar->len;
902 	}
903 	dev->bars.num_bars = bars->num_bars;
904 
905 	err = iocpt_devargs(rte_dev->devargs, dev);
906 	if (err != 0) {
907 		IOCPT_PRINT(ERR, "Cannot parse device arguments");
908 		goto err_destroy_crypto_dev;
909 	}
910 
911 	err = iocpt_setup_bars(dev);
912 	if (err != 0) {
913 		IOCPT_PRINT(ERR, "Cannot setup BARs: %d, aborting", err);
914 		goto err_destroy_crypto_dev;
915 	}
916 
917 	sig = ioread32(&dev->dev_info->signature);
918 	if (sig != IOCPT_DEV_INFO_SIGNATURE) {
919 		IOCPT_PRINT(ERR, "Incompatible firmware signature %#x", sig);
920 		err = -EFAULT;
921 		goto err_destroy_crypto_dev;
922 	}
923 
924 	for (i = 0; i < IOCPT_FWVERS_BUFLEN; i++)
925 		dev->fw_version[i] = ioread8(&dev->dev_info->fw_version[i]);
926 	dev->fw_version[IOCPT_FWVERS_BUFLEN - 1] = '\0';
927 	IOCPT_PRINT(DEBUG, "%s firmware: %s", dev->name, dev->fw_version);
928 
929 	err = iocpt_dev_identify(dev);
930 	if (err != 0) {
931 		IOCPT_PRINT(ERR, "Cannot identify device: %d, aborting",
932 			err);
933 		goto err_destroy_crypto_dev;
934 	}
935 
936 	err = iocpt_alloc_objs(dev);
937 	if (err != 0) {
938 		IOCPT_PRINT(ERR, "Cannot alloc device objects: %d", err);
939 		goto err_destroy_crypto_dev;
940 	}
941 
942 	err = iocpt_init(dev);
943 	if (err != 0) {
944 		IOCPT_PRINT(ERR, "Cannot init device: %d, aborting", err);
945 		goto err_free_objs;
946 	}
947 
948 	err = iocpt_assign_ops(cdev);
949 	if (err != 0) {
950 		IOCPT_PRINT(ERR, "Failed to configure opts");
951 		goto err_deinit_dev;
952 	}
953 
954 	return 0;
955 
956 err_deinit_dev:
957 	iocpt_deinit(dev);
958 err_free_objs:
959 	iocpt_free_objs(dev);
960 err_destroy_crypto_dev:
961 	rte_cryptodev_pmd_destroy(cdev);
962 err:
963 	return err;
964 }
965 
966 int
967 iocpt_remove(struct rte_device *rte_dev)
968 {
969 	struct rte_cryptodev *cdev;
970 	struct iocpt_dev *dev;
971 
972 	cdev = rte_cryptodev_pmd_get_named_dev(rte_dev->name);
973 	if (cdev == NULL) {
974 		IOCPT_PRINT(DEBUG, "Cannot find device %s", rte_dev->name);
975 		return -ENODEV;
976 	}
977 
978 	dev = cdev->data->dev_private;
979 
980 	iocpt_deinit(dev);
981 
982 	iocpt_dev_reset(dev);
983 
984 	iocpt_free_objs(dev);
985 
986 	rte_cryptodev_pmd_destroy(cdev);
987 
988 	return 0;
989 }
990 
991 RTE_LOG_REGISTER_DEFAULT(iocpt_logtype, NOTICE);
992