xref: /dpdk/drivers/crypto/ccp/ccp_dev.c (revision f665790a5dbad7b645ff46f31d65e977324e7bfc)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3  */
4 
5 #include <dirent.h>
6 #include <fcntl.h>
7 #include <stdio.h>
8 #include <string.h>
9 #include <sys/mman.h>
10 #include <sys/queue.h>
11 #include <sys/types.h>
12 #include <sys/file.h>
13 #include <unistd.h>
14 
15 #include <rte_hexdump.h>
16 #include <rte_memzone.h>
17 #include <rte_malloc.h>
18 #include <rte_memory.h>
19 #include <rte_spinlock.h>
20 #include <rte_string_fns.h>
21 
22 #include "ccp_dev.h"
23 #include "ccp_pmd_private.h"
24 
25 static TAILQ_HEAD(, ccp_device) ccp_list = TAILQ_HEAD_INITIALIZER(ccp_list);
26 static int ccp_dev_id;
27 
28 int
29 ccp_dev_start(struct rte_cryptodev *dev)
30 {
31 	struct ccp_private *priv = dev->data->dev_private;
32 
33 	priv->last_dev = TAILQ_FIRST(&ccp_list);
34 	return 0;
35 }
36 
37 struct ccp_queue *
38 ccp_allot_queue(struct rte_cryptodev *cdev, int slot_req)
39 {
40 	int i, ret = 0;
41 	struct ccp_device *dev;
42 	struct ccp_private *priv = cdev->data->dev_private;
43 
44 	dev = TAILQ_NEXT(priv->last_dev, next);
45 	if (unlikely(dev == NULL))
46 		dev = TAILQ_FIRST(&ccp_list);
47 	priv->last_dev = dev;
48 	if (dev->qidx >= dev->cmd_q_count)
49 		dev->qidx = 0;
50 	ret = rte_atomic64_read(&dev->cmd_q[dev->qidx].free_slots);
51 	if (ret >= slot_req)
52 		return &dev->cmd_q[dev->qidx];
53 	for (i = 0; i < dev->cmd_q_count; i++) {
54 		dev->qidx++;
55 		if (dev->qidx >= dev->cmd_q_count)
56 			dev->qidx = 0;
57 		ret = rte_atomic64_read(&dev->cmd_q[dev->qidx].free_slots);
58 		if (ret >= slot_req)
59 			return &dev->cmd_q[dev->qidx];
60 	}
61 	return NULL;
62 }
63 
64 int
65 ccp_read_hwrng(uint32_t *value)
66 {
67 	struct ccp_device *dev;
68 
69 	TAILQ_FOREACH(dev, &ccp_list, next) {
70 		void *vaddr = (void *)(dev->pci->mem_resource[2].addr);
71 
72 		while (dev->hwrng_retries++ < CCP_MAX_TRNG_RETRIES) {
73 			*value = CCP_READ_REG(vaddr, TRNG_OUT_REG);
74 			if (*value) {
75 				dev->hwrng_retries = 0;
76 				return 0;
77 			}
78 		}
79 		dev->hwrng_retries = 0;
80 	}
81 	return -1;
82 }
83 
84 static const struct rte_memzone *
85 ccp_queue_dma_zone_reserve(const char *queue_name,
86 			   uint32_t queue_size,
87 			   int socket_id)
88 {
89 	const struct rte_memzone *mz;
90 
91 	mz = rte_memzone_lookup(queue_name);
92 	if (mz != 0) {
93 		if (((size_t)queue_size <= mz->len) &&
94 		    ((socket_id == SOCKET_ID_ANY) ||
95 		     (socket_id == mz->socket_id))) {
96 			CCP_LOG_INFO("re-use memzone already "
97 				     "allocated for %s", queue_name);
98 			return mz;
99 		}
100 		CCP_LOG_ERR("Incompatible memzone already "
101 			    "allocated %s, size %u, socket %d. "
102 			    "Requested size %u, socket %u",
103 			    queue_name, (uint32_t)mz->len,
104 			    mz->socket_id, queue_size, socket_id);
105 		return NULL;
106 	}
107 
108 	CCP_LOG_INFO("Allocate memzone for %s, size %u on socket %u",
109 		     queue_name, queue_size, socket_id);
110 
111 	return rte_memzone_reserve_aligned(queue_name, queue_size,
112 			socket_id, RTE_MEMZONE_IOVA_CONTIG, queue_size);
113 }
114 
115 /* bitmap support apis */
116 static inline void
117 ccp_set_bit(unsigned long *bitmap, int n)
118 {
119 	rte_atomic_fetch_or_explicit((unsigned long __rte_atomic *)&bitmap[WORD_OFFSET(n)],
120 		(1UL << BIT_OFFSET(n)), rte_memory_order_seq_cst);
121 }
122 
123 static inline void
124 ccp_clear_bit(unsigned long *bitmap, int n)
125 {
126 	rte_atomic_fetch_and_explicit((unsigned long __rte_atomic *)&bitmap[WORD_OFFSET(n)],
127 		~(1UL << BIT_OFFSET(n)), rte_memory_order_seq_cst);
128 }
129 
130 static inline uint32_t
131 ccp_get_bit(unsigned long *bitmap, int n)
132 {
133 	return ((bitmap[WORD_OFFSET(n)] & (1 << BIT_OFFSET(n))) != 0);
134 }
135 
136 
137 static inline uint32_t
138 ccp_ffz(unsigned long word)
139 {
140 	unsigned long first_zero;
141 
142 	first_zero = __builtin_ffsl(~word);
143 	return first_zero ? (first_zero - 1) :
144 		BITS_PER_WORD;
145 }
146 
147 static inline uint32_t
148 ccp_find_first_zero_bit(unsigned long *addr, uint32_t limit)
149 {
150 	uint32_t i;
151 	uint32_t nwords = 0;
152 
153 	nwords = (limit - 1) / BITS_PER_WORD + 1;
154 	for (i = 0; i < nwords; i++) {
155 		if (addr[i] == 0UL)
156 			return i * BITS_PER_WORD;
157 		if (addr[i] < ~(0UL))
158 			break;
159 	}
160 	return (i == nwords) ? limit : i * BITS_PER_WORD + ccp_ffz(addr[i]);
161 }
162 
163 static void
164 ccp_bitmap_set(unsigned long *map, unsigned int start, int len)
165 {
166 	unsigned long *p = map + WORD_OFFSET(start);
167 	const unsigned int size = start + len;
168 	int bits_to_set = BITS_PER_WORD - (start % BITS_PER_WORD);
169 	unsigned long mask_to_set = CCP_BITMAP_FIRST_WORD_MASK(start);
170 
171 	while (len - bits_to_set >= 0) {
172 		*p |= mask_to_set;
173 		len -= bits_to_set;
174 		bits_to_set = BITS_PER_WORD;
175 		mask_to_set = ~0UL;
176 		p++;
177 	}
178 	if (len) {
179 		mask_to_set &= CCP_BITMAP_LAST_WORD_MASK(size);
180 		*p |= mask_to_set;
181 	}
182 }
183 
184 static void
185 ccp_bitmap_clear(unsigned long *map, unsigned int start, int len)
186 {
187 	unsigned long *p = map + WORD_OFFSET(start);
188 	const unsigned int size = start + len;
189 	int bits_to_clear = BITS_PER_WORD - (start % BITS_PER_WORD);
190 	unsigned long mask_to_clear = CCP_BITMAP_FIRST_WORD_MASK(start);
191 
192 	while (len - bits_to_clear >= 0) {
193 		*p &= ~mask_to_clear;
194 		len -= bits_to_clear;
195 		bits_to_clear = BITS_PER_WORD;
196 		mask_to_clear = ~0UL;
197 		p++;
198 	}
199 	if (len) {
200 		mask_to_clear &= CCP_BITMAP_LAST_WORD_MASK(size);
201 		*p &= ~mask_to_clear;
202 	}
203 }
204 
205 
206 static unsigned long
207 _ccp_find_next_bit(const unsigned long *addr,
208 		   unsigned long nbits,
209 		   unsigned long start,
210 		   unsigned long invert)
211 {
212 	unsigned long tmp;
213 
214 	if (!nbits || start >= nbits)
215 		return nbits;
216 
217 	tmp = addr[start / BITS_PER_WORD] ^ invert;
218 
219 	/* Handle 1st word. */
220 	tmp &= CCP_BITMAP_FIRST_WORD_MASK(start);
221 	start = ccp_round_down(start, BITS_PER_WORD);
222 
223 	while (!tmp) {
224 		start += BITS_PER_WORD;
225 		if (start >= nbits)
226 			return nbits;
227 
228 		tmp = addr[start / BITS_PER_WORD] ^ invert;
229 	}
230 
231 	return RTE_MIN(start + (ffs(tmp) - 1), nbits);
232 }
233 
234 static unsigned long
235 ccp_find_next_bit(const unsigned long *addr,
236 		  unsigned long size,
237 		  unsigned long offset)
238 {
239 	return _ccp_find_next_bit(addr, size, offset, 0UL);
240 }
241 
242 static unsigned long
243 ccp_find_next_zero_bit(const unsigned long *addr,
244 		       unsigned long size,
245 		       unsigned long offset)
246 {
247 	return _ccp_find_next_bit(addr, size, offset, ~0UL);
248 }
249 
250 /**
251  * bitmap_find_next_zero_area - find a contiguous aligned zero area
252  * @map: The address to base the search on
253  * @size: The bitmap size in bits
254  * @start: The bitnumber to start searching at
255  * @nr: The number of zeroed bits we're looking for
256  */
257 static unsigned long
258 ccp_bitmap_find_next_zero_area(unsigned long *map,
259 			       unsigned long size,
260 			       unsigned long start,
261 			       unsigned int nr)
262 {
263 	unsigned long index, end, i;
264 
265 again:
266 	index = ccp_find_next_zero_bit(map, size, start);
267 
268 	end = index + nr;
269 	if (end > size)
270 		return end;
271 	i = ccp_find_next_bit(map, end, index);
272 	if (i < end) {
273 		start = i + 1;
274 		goto again;
275 	}
276 	return index;
277 }
278 
279 static uint32_t
280 ccp_lsb_alloc(struct ccp_queue *cmd_q, unsigned int count)
281 {
282 	struct ccp_device *ccp;
283 	int start;
284 
285 	/* First look at the map for the queue */
286 	if (cmd_q->lsb >= 0) {
287 		start = (uint32_t)ccp_bitmap_find_next_zero_area(cmd_q->lsbmap,
288 								 LSB_SIZE, 0,
289 								 count);
290 		if (start < LSB_SIZE) {
291 			ccp_bitmap_set(cmd_q->lsbmap, start, count);
292 			return start + cmd_q->lsb * LSB_SIZE;
293 		}
294 	}
295 
296 	/* try to get an entry from the shared blocks */
297 	ccp = cmd_q->dev;
298 
299 	rte_spinlock_lock(&ccp->lsb_lock);
300 
301 	start = (uint32_t)ccp_bitmap_find_next_zero_area(ccp->lsbmap,
302 						    MAX_LSB_CNT * LSB_SIZE,
303 						    0, count);
304 	if (start <= MAX_LSB_CNT * LSB_SIZE) {
305 		ccp_bitmap_set(ccp->lsbmap, start, count);
306 		rte_spinlock_unlock(&ccp->lsb_lock);
307 		return start * LSB_ITEM_SIZE;
308 	}
309 	CCP_LOG_ERR("NO LSBs available");
310 
311 	rte_spinlock_unlock(&ccp->lsb_lock);
312 
313 	return 0;
314 }
315 
316 static void __rte_unused
317 ccp_lsb_free(struct ccp_queue *cmd_q,
318 	     unsigned int start,
319 	     unsigned int count)
320 {
321 	int lsbno = start / LSB_SIZE;
322 
323 	if (!start)
324 		return;
325 
326 	if (cmd_q->lsb == lsbno) {
327 		/* An entry from the private LSB */
328 		ccp_bitmap_clear(cmd_q->lsbmap, start % LSB_SIZE, count);
329 	} else {
330 		/* From the shared LSBs */
331 		struct ccp_device *ccp = cmd_q->dev;
332 
333 		rte_spinlock_lock(&ccp->lsb_lock);
334 		ccp_bitmap_clear(ccp->lsbmap, start, count);
335 		rte_spinlock_unlock(&ccp->lsb_lock);
336 	}
337 }
338 
339 static int
340 ccp_find_lsb_regions(struct ccp_queue *cmd_q, uint64_t status)
341 {
342 	int q_mask = 1 << cmd_q->id;
343 	int weight = 0;
344 	int j;
345 
346 	/* Build a bit mask to know which LSBs
347 	 * this queue has access to.
348 	 * Don't bother with segment 0
349 	 * as it has special
350 	 * privileges.
351 	 */
352 	cmd_q->lsbmask = 0;
353 	status >>= LSB_REGION_WIDTH;
354 	for (j = 1; j < MAX_LSB_CNT; j++) {
355 		if (status & q_mask)
356 			ccp_set_bit(&cmd_q->lsbmask, j);
357 
358 		status >>= LSB_REGION_WIDTH;
359 	}
360 
361 	for (j = 0; j < MAX_LSB_CNT; j++)
362 		if (ccp_get_bit(&cmd_q->lsbmask, j))
363 			weight++;
364 
365 	CCP_LOG_DBG("Queue %d can access %d LSB regions  of mask  %lu",
366 	       (int)cmd_q->id, weight, cmd_q->lsbmask);
367 
368 	return weight ? 0 : -EINVAL;
369 }
370 
371 static int
372 ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp,
373 			     int lsb_cnt, int n_lsbs,
374 			     unsigned long *lsb_pub)
375 {
376 	unsigned long qlsb = 0;
377 	int bitno = 0;
378 	int qlsb_wgt = 0;
379 	int i, j;
380 
381 	/* For each queue:
382 	 * If the count of potential LSBs available to a queue matches the
383 	 * ordinal given to us in lsb_cnt:
384 	 * Copy the mask of possible LSBs for this queue into "qlsb";
385 	 * For each bit in qlsb, see if the corresponding bit in the
386 	 * aggregation mask is set; if so, we have a match.
387 	 *     If we have a match, clear the bit in the aggregation to
388 	 *     mark it as no longer available.
389 	 *     If there is no match, clear the bit in qlsb and keep looking.
390 	 */
391 	for (i = 0; i < ccp->cmd_q_count; i++) {
392 		struct ccp_queue *cmd_q = &ccp->cmd_q[i];
393 
394 		qlsb_wgt = 0;
395 		for (j = 0; j < MAX_LSB_CNT; j++)
396 			if (ccp_get_bit(&cmd_q->lsbmask, j))
397 				qlsb_wgt++;
398 
399 		if (qlsb_wgt == lsb_cnt) {
400 			qlsb = cmd_q->lsbmask;
401 
402 			bitno = ffs(qlsb) - 1;
403 			while (bitno < MAX_LSB_CNT) {
404 				if (ccp_get_bit(lsb_pub, bitno)) {
405 					/* We found an available LSB
406 					 * that this queue can access
407 					 */
408 					cmd_q->lsb = bitno;
409 					ccp_clear_bit(lsb_pub, bitno);
410 					break;
411 				}
412 				ccp_clear_bit(&qlsb, bitno);
413 				bitno = ffs(qlsb) - 1;
414 			}
415 			if (bitno >= MAX_LSB_CNT)
416 				return -EINVAL;
417 			n_lsbs--;
418 		}
419 	}
420 	return n_lsbs;
421 }
422 
423 /* For each queue, from the most- to least-constrained:
424  * find an LSB that can be assigned to the queue. If there are N queues that
425  * can only use M LSBs, where N > M, fail; otherwise, every queue will get a
426  * dedicated LSB. Remaining LSB regions become a shared resource.
427  * If we have fewer LSBs than queues, all LSB regions become shared
428  * resources.
429  */
430 static int
431 ccp_assign_lsbs(struct ccp_device *ccp)
432 {
433 	unsigned long lsb_pub = 0, qlsb = 0;
434 	int n_lsbs = 0;
435 	int bitno;
436 	int i, lsb_cnt;
437 	int rc = 0;
438 
439 	rte_spinlock_init(&ccp->lsb_lock);
440 
441 	/* Create an aggregate bitmap to get a total count of available LSBs */
442 	for (i = 0; i < ccp->cmd_q_count; i++)
443 		lsb_pub |= ccp->cmd_q[i].lsbmask;
444 
445 	for (i = 0; i < MAX_LSB_CNT; i++)
446 		if (ccp_get_bit(&lsb_pub, i))
447 			n_lsbs++;
448 
449 	if (n_lsbs >= ccp->cmd_q_count) {
450 		/* We have enough LSBS to give every queue a private LSB.
451 		 * Brute force search to start with the queues that are more
452 		 * constrained in LSB choice. When an LSB is privately
453 		 * assigned, it is removed from the public mask.
454 		 * This is an ugly N squared algorithm with some optimization.
455 		 */
456 		for (lsb_cnt = 1; n_lsbs && (lsb_cnt <= MAX_LSB_CNT);
457 		     lsb_cnt++) {
458 			rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs,
459 							  &lsb_pub);
460 			if (rc < 0)
461 				return -EINVAL;
462 			n_lsbs = rc;
463 		}
464 	}
465 
466 	rc = 0;
467 	/* What's left of the LSBs, according to the public mask, now become
468 	 * shared. Any zero bits in the lsb_pub mask represent an LSB region
469 	 * that can't be used as a shared resource, so mark the LSB slots for
470 	 * them as "in use".
471 	 */
472 	qlsb = lsb_pub;
473 	bitno = ccp_find_first_zero_bit(&qlsb, MAX_LSB_CNT);
474 	while (bitno < MAX_LSB_CNT) {
475 		ccp_bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE);
476 		ccp_set_bit(&qlsb, bitno);
477 		bitno = ccp_find_first_zero_bit(&qlsb, MAX_LSB_CNT);
478 	}
479 
480 	return rc;
481 }
482 
483 static int
484 ccp_add_device(struct ccp_device *dev)
485 {
486 	int i;
487 	uint32_t qmr, status_lo, status_hi, dma_addr_lo, dma_addr_hi;
488 	uint64_t status;
489 	struct ccp_queue *cmd_q;
490 	const struct rte_memzone *q_mz;
491 	void *vaddr;
492 
493 	if (dev == NULL)
494 		return -1;
495 
496 	dev->id = ccp_dev_id++;
497 	dev->qidx = 0;
498 	vaddr = (void *)(dev->pci->mem_resource[2].addr);
499 
500 	if (dev->pci->id.device_id == AMD_PCI_CCP_5B) {
501 		CCP_WRITE_REG(vaddr, CMD_TRNG_CTL_OFFSET, 0x00012D57);
502 		CCP_WRITE_REG(vaddr, CMD_CONFIG_0_OFFSET, 0x00000003);
503 		for (i = 0; i < 12; i++) {
504 			CCP_WRITE_REG(vaddr, CMD_AES_MASK_OFFSET,
505 				      CCP_READ_REG(vaddr, TRNG_OUT_REG));
506 		}
507 		CCP_WRITE_REG(vaddr, CMD_QUEUE_MASK_OFFSET, 0x0000001F);
508 		CCP_WRITE_REG(vaddr, CMD_QUEUE_PRIO_OFFSET, 0x00005B6D);
509 		CCP_WRITE_REG(vaddr, CMD_CMD_TIMEOUT_OFFSET, 0x00000000);
510 
511 		CCP_WRITE_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET, 0x3FFFFFFF);
512 		CCP_WRITE_REG(vaddr, LSB_PRIVATE_MASK_HI_OFFSET, 0x000003FF);
513 
514 		CCP_WRITE_REG(vaddr, CMD_CLK_GATE_CTL_OFFSET, 0x00108823);
515 	}
516 	CCP_WRITE_REG(vaddr, CMD_REQID_CONFIG_OFFSET, 0x0);
517 
518 	/* Copy the private LSB mask to the public registers */
519 	status_lo = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET);
520 	status_hi = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_HI_OFFSET);
521 	CCP_WRITE_REG(vaddr, LSB_PUBLIC_MASK_LO_OFFSET, status_lo);
522 	CCP_WRITE_REG(vaddr, LSB_PUBLIC_MASK_HI_OFFSET, status_hi);
523 	status = ((uint64_t)status_hi<<30) | ((uint64_t)status_lo);
524 
525 	dev->cmd_q_count = 0;
526 	/* Find available queues */
527 	qmr = CCP_READ_REG(vaddr, Q_MASK_REG);
528 	for (i = 0; i < MAX_HW_QUEUES; i++) {
529 		if (!(qmr & (1 << i)))
530 			continue;
531 		cmd_q = &dev->cmd_q[dev->cmd_q_count++];
532 		cmd_q->dev = dev;
533 		cmd_q->id = i;
534 		cmd_q->qidx = 0;
535 		cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
536 
537 		cmd_q->reg_base = (uint8_t *)vaddr +
538 			CMD_Q_STATUS_INCR * (i + 1);
539 
540 		/* CCP queue memory */
541 		snprintf(cmd_q->memz_name, sizeof(cmd_q->memz_name),
542 			 "%s_%d_%s_%d_%s",
543 			 "ccp_dev",
544 			 (int)dev->id, "queue",
545 			 (int)cmd_q->id, "mem");
546 		q_mz = ccp_queue_dma_zone_reserve(cmd_q->memz_name,
547 						  cmd_q->qsize, SOCKET_ID_ANY);
548 		cmd_q->qbase_addr = (void *)q_mz->addr;
549 		cmd_q->qbase_desc = (void *)q_mz->addr;
550 		cmd_q->qbase_phys_addr =  q_mz->iova;
551 
552 		cmd_q->qcontrol = 0;
553 		/* init control reg to zero */
554 		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
555 			      cmd_q->qcontrol);
556 
557 		/* Disable the interrupts */
558 		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_INT_ENABLE_BASE, 0x00);
559 		CCP_READ_REG(cmd_q->reg_base, CMD_Q_INT_STATUS_BASE);
560 		CCP_READ_REG(cmd_q->reg_base, CMD_Q_STATUS_BASE);
561 
562 		/* Clear the interrupts */
563 		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_INTERRUPT_STATUS_BASE,
564 			      ALL_INTERRUPTS);
565 
566 		/* Configure size of each virtual queue accessible to host */
567 		cmd_q->qcontrol &= ~(CMD_Q_SIZE << CMD_Q_SHIFT);
568 		cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD_Q_SHIFT;
569 
570 		dma_addr_lo = low32_value(cmd_q->qbase_phys_addr);
571 		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE,
572 			      (uint32_t)dma_addr_lo);
573 		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_HEAD_LO_BASE,
574 			      (uint32_t)dma_addr_lo);
575 
576 		dma_addr_hi = high32_value(cmd_q->qbase_phys_addr);
577 		cmd_q->qcontrol |= (dma_addr_hi << 16);
578 		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
579 			      cmd_q->qcontrol);
580 
581 		/* create LSB Mask map */
582 		if (ccp_find_lsb_regions(cmd_q, status))
583 			CCP_LOG_ERR("queue doesn't have lsb regions");
584 		cmd_q->lsb = -1;
585 
586 		rte_atomic64_init(&cmd_q->free_slots);
587 		rte_atomic64_set(&cmd_q->free_slots, (COMMANDS_PER_QUEUE - 1));
588 		/* unused slot barrier b/w H&T */
589 	}
590 
591 	if (ccp_assign_lsbs(dev))
592 		CCP_LOG_ERR("Unable to assign lsb region");
593 
594 	/* pre-allocate LSB slots */
595 	for (i = 0; i < dev->cmd_q_count; i++) {
596 		dev->cmd_q[i].sb_key =
597 			ccp_lsb_alloc(&dev->cmd_q[i], 1);
598 		dev->cmd_q[i].sb_iv =
599 			ccp_lsb_alloc(&dev->cmd_q[i], 1);
600 		dev->cmd_q[i].sb_sha =
601 			ccp_lsb_alloc(&dev->cmd_q[i], 2);
602 		dev->cmd_q[i].sb_hmac =
603 			ccp_lsb_alloc(&dev->cmd_q[i], 2);
604 	}
605 
606 	TAILQ_INSERT_TAIL(&ccp_list, dev, next);
607 	return 0;
608 }
609 
610 static void
611 ccp_remove_device(struct ccp_device *dev)
612 {
613 	if (dev == NULL)
614 		return;
615 
616 	TAILQ_REMOVE(&ccp_list, dev, next);
617 }
618 
619 int
620 ccp_probe_device(struct rte_pci_device *pci_dev)
621 {
622 	struct ccp_device *ccp_dev;
623 
624 	ccp_dev = rte_zmalloc("ccp_device", sizeof(*ccp_dev),
625 			      RTE_CACHE_LINE_SIZE);
626 	if (ccp_dev == NULL)
627 		goto fail;
628 
629 	ccp_dev->pci = pci_dev;
630 
631 	/* device is valid, add in list */
632 	if (ccp_add_device(ccp_dev)) {
633 		ccp_remove_device(ccp_dev);
634 		goto fail;
635 	}
636 
637 	return 0;
638 fail:
639 	CCP_LOG_ERR("CCP Device probe failed");
640 	rte_free(ccp_dev);
641 	return -1;
642 }
643