xref: /dpdk/drivers/common/qat/qat_qp.c (revision 9e991f217fc8719e38a812dc280dba5f84db9f59)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2018 Intel Corporation
3  */
4 
5 #include <rte_common.h>
6 #include <rte_dev.h>
7 #include <rte_malloc.h>
8 #include <rte_memzone.h>
9 #include <rte_pci.h>
10 #include <rte_bus_pci.h>
11 #include <rte_atomic.h>
12 #include <rte_prefetch.h>
13 
14 #include "qat_logs.h"
15 #include "qat_device.h"
16 #include "qat_qp.h"
17 #include "qat_sym.h"
18 #include "qat_asym.h"
19 #include "qat_comp.h"
20 #include "adf_transport_access_macros.h"
21 
22 
23 #define ADF_MAX_DESC				4096
24 #define ADF_MIN_DESC				128
25 
26 #define ADF_ARB_REG_SLOT			0x1000
27 #define ADF_ARB_RINGSRVARBEN_OFFSET		0x19C
28 
29 #define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
30 	ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
31 	(ADF_ARB_REG_SLOT * index), value)
32 
33 __extension__
34 const struct qat_qp_hw_data qat_gen1_qps[QAT_MAX_SERVICES]
35 					 [ADF_MAX_QPS_ON_ANY_SERVICE] = {
36 	/* queue pairs which provide an asymmetric crypto service */
37 	[QAT_SERVICE_ASYMMETRIC] = {
38 		{
39 			.service_type = QAT_SERVICE_ASYMMETRIC,
40 			.hw_bundle_num = 0,
41 			.tx_ring_num = 0,
42 			.rx_ring_num = 8,
43 			.tx_msg_size = 64,
44 			.rx_msg_size = 32,
45 
46 		}, {
47 			.service_type = QAT_SERVICE_ASYMMETRIC,
48 			.hw_bundle_num = 0,
49 			.tx_ring_num = 1,
50 			.rx_ring_num = 9,
51 			.tx_msg_size = 64,
52 			.rx_msg_size = 32,
53 		}
54 	},
55 	/* queue pairs which provide a symmetric crypto service */
56 	[QAT_SERVICE_SYMMETRIC] = {
57 		{
58 			.service_type = QAT_SERVICE_SYMMETRIC,
59 			.hw_bundle_num = 0,
60 			.tx_ring_num = 2,
61 			.rx_ring_num = 10,
62 			.tx_msg_size = 128,
63 			.rx_msg_size = 32,
64 		},
65 		{
66 			.service_type = QAT_SERVICE_SYMMETRIC,
67 			.hw_bundle_num = 0,
68 			.tx_ring_num = 3,
69 			.rx_ring_num = 11,
70 			.tx_msg_size = 128,
71 			.rx_msg_size = 32,
72 		}
73 	},
74 	/* queue pairs which provide a compression service */
75 	[QAT_SERVICE_COMPRESSION] = {
76 		{
77 			.service_type = QAT_SERVICE_COMPRESSION,
78 			.hw_bundle_num = 0,
79 			.tx_ring_num = 6,
80 			.rx_ring_num = 14,
81 			.tx_msg_size = 128,
82 			.rx_msg_size = 32,
83 		}, {
84 			.service_type = QAT_SERVICE_COMPRESSION,
85 			.hw_bundle_num = 0,
86 			.tx_ring_num = 7,
87 			.rx_ring_num = 15,
88 			.tx_msg_size = 128,
89 			.rx_msg_size = 32,
90 		}
91 	}
92 };
93 
94 __extension__
95 const struct qat_qp_hw_data qat_gen3_qps[QAT_MAX_SERVICES]
96 					 [ADF_MAX_QPS_ON_ANY_SERVICE] = {
97 	/* queue pairs which provide an asymmetric crypto service */
98 	[QAT_SERVICE_ASYMMETRIC] = {
99 		{
100 			.service_type = QAT_SERVICE_ASYMMETRIC,
101 			.hw_bundle_num = 0,
102 			.tx_ring_num = 0,
103 			.rx_ring_num = 4,
104 			.tx_msg_size = 64,
105 			.rx_msg_size = 32,
106 		}
107 	},
108 	/* queue pairs which provide a symmetric crypto service */
109 	[QAT_SERVICE_SYMMETRIC] = {
110 		{
111 			.service_type = QAT_SERVICE_SYMMETRIC,
112 			.hw_bundle_num = 0,
113 			.tx_ring_num = 1,
114 			.rx_ring_num = 5,
115 			.tx_msg_size = 128,
116 			.rx_msg_size = 32,
117 		}
118 	},
119 	/* queue pairs which provide a compression service */
120 	[QAT_SERVICE_COMPRESSION] = {
121 		{
122 			.service_type = QAT_SERVICE_COMPRESSION,
123 			.hw_bundle_num = 0,
124 			.tx_ring_num = 3,
125 			.rx_ring_num = 7,
126 			.tx_msg_size = 128,
127 			.rx_msg_size = 32,
128 		}
129 	}
130 };
131 
132 static int qat_qp_check_queue_alignment(uint64_t phys_addr,
133 	uint32_t queue_size_bytes);
134 static void qat_queue_delete(struct qat_queue *queue);
135 static int qat_queue_create(struct qat_pci_device *qat_dev,
136 	struct qat_queue *queue, struct qat_qp_config *, uint8_t dir);
137 static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
138 	uint32_t *queue_size_for_csr);
139 static void adf_configure_queues(struct qat_qp *queue);
140 static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr,
141 	rte_spinlock_t *lock);
142 static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr,
143 	rte_spinlock_t *lock);
144 
145 
146 int qat_qps_per_service(const struct qat_qp_hw_data *qp_hw_data,
147 		enum qat_service_type service)
148 {
149 	int i, count;
150 
151 	for (i = 0, count = 0; i < ADF_MAX_QPS_ON_ANY_SERVICE; i++)
152 		if (qp_hw_data[i].service_type == service)
153 			count++;
154 	return count;
155 }
156 
157 static const struct rte_memzone *
158 queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
159 			int socket_id)
160 {
161 	const struct rte_memzone *mz;
162 
163 	mz = rte_memzone_lookup(queue_name);
164 	if (mz != 0) {
165 		if (((size_t)queue_size <= mz->len) &&
166 				((socket_id == SOCKET_ID_ANY) ||
167 					(socket_id == mz->socket_id))) {
168 			QAT_LOG(DEBUG, "re-use memzone already "
169 					"allocated for %s", queue_name);
170 			return mz;
171 		}
172 
173 		QAT_LOG(ERR, "Incompatible memzone already "
174 				"allocated %s, size %u, socket %d. "
175 				"Requested size %u, socket %u",
176 				queue_name, (uint32_t)mz->len,
177 				mz->socket_id, queue_size, socket_id);
178 		return NULL;
179 	}
180 
181 	QAT_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u",
182 					queue_name, queue_size, socket_id);
183 	return rte_memzone_reserve_aligned(queue_name, queue_size,
184 		socket_id, RTE_MEMZONE_IOVA_CONTIG, queue_size);
185 }
186 
187 int qat_qp_setup(struct qat_pci_device *qat_dev,
188 		struct qat_qp **qp_addr,
189 		uint16_t queue_pair_id,
190 		struct qat_qp_config *qat_qp_conf)
191 
192 {
193 	struct qat_qp *qp;
194 	struct rte_pci_device *pci_dev = qat_dev->pci_dev;
195 	char op_cookie_pool_name[RTE_RING_NAMESIZE];
196 	uint32_t i;
197 
198 	QAT_LOG(DEBUG, "Setup qp %u on qat pci device %d gen %d",
199 		queue_pair_id, qat_dev->qat_dev_id, qat_dev->qat_dev_gen);
200 
201 	if ((qat_qp_conf->nb_descriptors > ADF_MAX_DESC) ||
202 		(qat_qp_conf->nb_descriptors < ADF_MIN_DESC)) {
203 		QAT_LOG(ERR, "Can't create qp for %u descriptors",
204 				qat_qp_conf->nb_descriptors);
205 		return -EINVAL;
206 	}
207 
208 	if (pci_dev->mem_resource[0].addr == NULL) {
209 		QAT_LOG(ERR, "Could not find VF config space "
210 				"(UIO driver attached?).");
211 		return -EINVAL;
212 	}
213 
214 	/* Allocate the queue pair data structure. */
215 	qp = rte_zmalloc_socket("qat PMD qp metadata",
216 				sizeof(*qp), RTE_CACHE_LINE_SIZE,
217 				qat_qp_conf->socket_id);
218 	if (qp == NULL) {
219 		QAT_LOG(ERR, "Failed to alloc mem for qp struct");
220 		return -ENOMEM;
221 	}
222 	qp->nb_descriptors = qat_qp_conf->nb_descriptors;
223 	qp->op_cookies = rte_zmalloc_socket("qat PMD op cookie pointer",
224 			qat_qp_conf->nb_descriptors * sizeof(*qp->op_cookies),
225 			RTE_CACHE_LINE_SIZE, qat_qp_conf->socket_id);
226 	if (qp->op_cookies == NULL) {
227 		QAT_LOG(ERR, "Failed to alloc mem for cookie");
228 		rte_free(qp);
229 		return -ENOMEM;
230 	}
231 
232 	qp->mmap_bar_addr = pci_dev->mem_resource[0].addr;
233 	qp->enqueued = qp->dequeued = 0;
234 
235 	if (qat_queue_create(qat_dev, &(qp->tx_q), qat_qp_conf,
236 					ADF_RING_DIR_TX) != 0) {
237 		QAT_LOG(ERR, "Tx queue create failed "
238 				"queue_pair_id=%u", queue_pair_id);
239 		goto create_err;
240 	}
241 
242 	qp->max_inflights = ADF_MAX_INFLIGHTS(qp->tx_q.queue_size,
243 				ADF_BYTES_TO_MSG_SIZE(qp->tx_q.msg_size));
244 
245 	if (qp->max_inflights < 2) {
246 		QAT_LOG(ERR, "Invalid num inflights");
247 		qat_queue_delete(&(qp->tx_q));
248 		goto create_err;
249 	}
250 
251 	if (qat_queue_create(qat_dev, &(qp->rx_q), qat_qp_conf,
252 					ADF_RING_DIR_RX) != 0) {
253 		QAT_LOG(ERR, "Rx queue create failed "
254 				"queue_pair_id=%hu", queue_pair_id);
255 		qat_queue_delete(&(qp->tx_q));
256 		goto create_err;
257 	}
258 
259 	adf_configure_queues(qp);
260 	adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr,
261 					&qat_dev->arb_csr_lock);
262 
263 	snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE,
264 					"%s%d_cookies_%s_qp%hu",
265 		pci_dev->driver->driver.name, qat_dev->qat_dev_id,
266 		qat_qp_conf->service_str, queue_pair_id);
267 
268 	QAT_LOG(DEBUG, "cookiepool: %s", op_cookie_pool_name);
269 	qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name);
270 	if (qp->op_cookie_pool == NULL)
271 		qp->op_cookie_pool = rte_mempool_create(op_cookie_pool_name,
272 				qp->nb_descriptors,
273 				qat_qp_conf->cookie_size, 64, 0,
274 				NULL, NULL, NULL, NULL,
275 				qat_dev->pci_dev->device.numa_node,
276 				0);
277 	if (!qp->op_cookie_pool) {
278 		QAT_LOG(ERR, "QAT PMD Cannot create"
279 				" op mempool");
280 		goto create_err;
281 	}
282 
283 	for (i = 0; i < qp->nb_descriptors; i++) {
284 		if (rte_mempool_get(qp->op_cookie_pool, &qp->op_cookies[i])) {
285 			QAT_LOG(ERR, "QAT PMD Cannot get op_cookie");
286 			goto create_err;
287 		}
288 		memset(qp->op_cookies[i], 0, qat_qp_conf->cookie_size);
289 	}
290 
291 	qp->qat_dev_gen = qat_dev->qat_dev_gen;
292 	qp->build_request = qat_qp_conf->build_request;
293 	qp->service_type = qat_qp_conf->hw->service_type;
294 	qp->qat_dev = qat_dev;
295 
296 	QAT_LOG(DEBUG, "QP setup complete: id: %d, cookiepool: %s",
297 			queue_pair_id, op_cookie_pool_name);
298 
299 	*qp_addr = qp;
300 	return 0;
301 
302 create_err:
303 	if (qp->op_cookie_pool)
304 		rte_mempool_free(qp->op_cookie_pool);
305 	rte_free(qp->op_cookies);
306 	rte_free(qp);
307 	return -EFAULT;
308 }
309 
310 int qat_qp_release(struct qat_qp **qp_addr)
311 {
312 	struct qat_qp *qp = *qp_addr;
313 	uint32_t i;
314 
315 	if (qp == NULL) {
316 		QAT_LOG(DEBUG, "qp already freed");
317 		return 0;
318 	}
319 
320 	QAT_LOG(DEBUG, "Free qp on qat_pci device %d",
321 				qp->qat_dev->qat_dev_id);
322 
323 	/* Don't free memory if there are still responses to be processed */
324 	if ((qp->enqueued - qp->dequeued) == 0) {
325 		qat_queue_delete(&(qp->tx_q));
326 		qat_queue_delete(&(qp->rx_q));
327 	} else {
328 		return -EAGAIN;
329 	}
330 
331 	adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr,
332 					&qp->qat_dev->arb_csr_lock);
333 
334 	for (i = 0; i < qp->nb_descriptors; i++)
335 		rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]);
336 
337 	if (qp->op_cookie_pool)
338 		rte_mempool_free(qp->op_cookie_pool);
339 
340 	rte_free(qp->op_cookies);
341 	rte_free(qp);
342 	*qp_addr = NULL;
343 	return 0;
344 }
345 
346 
347 static void qat_queue_delete(struct qat_queue *queue)
348 {
349 	const struct rte_memzone *mz;
350 	int status = 0;
351 
352 	if (queue == NULL) {
353 		QAT_LOG(DEBUG, "Invalid queue");
354 		return;
355 	}
356 	QAT_LOG(DEBUG, "Free ring %d, memzone: %s",
357 			queue->hw_queue_number, queue->memz_name);
358 
359 	mz = rte_memzone_lookup(queue->memz_name);
360 	if (mz != NULL)	{
361 		/* Write an unused pattern to the queue memory. */
362 		memset(queue->base_addr, 0x7F, queue->queue_size);
363 		status = rte_memzone_free(mz);
364 		if (status != 0)
365 			QAT_LOG(ERR, "Error %d on freeing queue %s",
366 					status, queue->memz_name);
367 	} else {
368 		QAT_LOG(DEBUG, "queue %s doesn't exist",
369 				queue->memz_name);
370 	}
371 }
372 
373 static int
374 qat_queue_create(struct qat_pci_device *qat_dev, struct qat_queue *queue,
375 		struct qat_qp_config *qp_conf, uint8_t dir)
376 {
377 	uint64_t queue_base;
378 	void *io_addr;
379 	const struct rte_memzone *qp_mz;
380 	struct rte_pci_device *pci_dev = qat_dev->pci_dev;
381 	int ret = 0;
382 	uint16_t desc_size = (dir == ADF_RING_DIR_TX ?
383 			qp_conf->hw->tx_msg_size : qp_conf->hw->rx_msg_size);
384 	uint32_t queue_size_bytes = (qp_conf->nb_descriptors)*(desc_size);
385 
386 	queue->hw_bundle_number = qp_conf->hw->hw_bundle_num;
387 	queue->hw_queue_number = (dir == ADF_RING_DIR_TX ?
388 			qp_conf->hw->tx_ring_num : qp_conf->hw->rx_ring_num);
389 
390 	if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
391 		QAT_LOG(ERR, "Invalid descriptor size %d", desc_size);
392 		return -EINVAL;
393 	}
394 
395 	/*
396 	 * Allocate a memzone for the queue - create a unique name.
397 	 */
398 	snprintf(queue->memz_name, sizeof(queue->memz_name),
399 			"%s_%d_%s_%s_%d_%d",
400 		pci_dev->driver->driver.name, qat_dev->qat_dev_id,
401 		qp_conf->service_str, "qp_mem",
402 		queue->hw_bundle_number, queue->hw_queue_number);
403 	qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
404 			qat_dev->pci_dev->device.numa_node);
405 	if (qp_mz == NULL) {
406 		QAT_LOG(ERR, "Failed to allocate ring memzone");
407 		return -ENOMEM;
408 	}
409 
410 	queue->base_addr = (char *)qp_mz->addr;
411 	queue->base_phys_addr = qp_mz->iova;
412 	if (qat_qp_check_queue_alignment(queue->base_phys_addr,
413 			queue_size_bytes)) {
414 		QAT_LOG(ERR, "Invalid alignment on queue create "
415 					" 0x%"PRIx64"\n",
416 					queue->base_phys_addr);
417 		ret = -EFAULT;
418 		goto queue_create_err;
419 	}
420 
421 	if (adf_verify_queue_size(desc_size, qp_conf->nb_descriptors,
422 			&(queue->queue_size)) != 0) {
423 		QAT_LOG(ERR, "Invalid num inflights");
424 		ret = -EINVAL;
425 		goto queue_create_err;
426 	}
427 
428 	queue->modulo_mask = (1 << ADF_RING_SIZE_MODULO(queue->queue_size)) - 1;
429 	queue->head = 0;
430 	queue->tail = 0;
431 	queue->msg_size = desc_size;
432 
433 	/*
434 	 * Write an unused pattern to the queue memory.
435 	 */
436 	memset(queue->base_addr, 0x7F, queue_size_bytes);
437 
438 	queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr,
439 					queue->queue_size);
440 
441 	io_addr = pci_dev->mem_resource[0].addr;
442 
443 	WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number,
444 			queue->hw_queue_number, queue_base);
445 
446 	QAT_LOG(DEBUG, "RING: Name:%s, size in CSR: %u, in bytes %u,"
447 		" nb msgs %u, msg_size %u, modulo mask %u",
448 			queue->memz_name,
449 			queue->queue_size, queue_size_bytes,
450 			qp_conf->nb_descriptors, desc_size,
451 			queue->modulo_mask);
452 
453 	return 0;
454 
455 queue_create_err:
456 	rte_memzone_free(qp_mz);
457 	return ret;
458 }
459 
460 static int qat_qp_check_queue_alignment(uint64_t phys_addr,
461 					uint32_t queue_size_bytes)
462 {
463 	if (((queue_size_bytes - 1) & phys_addr) != 0)
464 		return -EINVAL;
465 	return 0;
466 }
467 
468 static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
469 	uint32_t *p_queue_size_for_csr)
470 {
471 	uint8_t i = ADF_MIN_RING_SIZE;
472 
473 	for (; i <= ADF_MAX_RING_SIZE; i++)
474 		if ((msg_size * msg_num) ==
475 				(uint32_t)ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) {
476 			*p_queue_size_for_csr = i;
477 			return 0;
478 		}
479 	QAT_LOG(ERR, "Invalid ring size %d", msg_size * msg_num);
480 	return -EINVAL;
481 }
482 
483 static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr,
484 					rte_spinlock_t *lock)
485 {
486 	uint32_t arb_csr_offset =  ADF_ARB_RINGSRVARBEN_OFFSET +
487 					(ADF_ARB_REG_SLOT *
488 							txq->hw_bundle_number);
489 	uint32_t value;
490 
491 	rte_spinlock_lock(lock);
492 	value = ADF_CSR_RD(base_addr, arb_csr_offset);
493 	value |= (0x01 << txq->hw_queue_number);
494 	ADF_CSR_WR(base_addr, arb_csr_offset, value);
495 	rte_spinlock_unlock(lock);
496 }
497 
498 static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr,
499 					rte_spinlock_t *lock)
500 {
501 	uint32_t arb_csr_offset =  ADF_ARB_RINGSRVARBEN_OFFSET +
502 					(ADF_ARB_REG_SLOT *
503 							txq->hw_bundle_number);
504 	uint32_t value;
505 
506 	rte_spinlock_lock(lock);
507 	value = ADF_CSR_RD(base_addr, arb_csr_offset);
508 	value &= ~(0x01 << txq->hw_queue_number);
509 	ADF_CSR_WR(base_addr, arb_csr_offset, value);
510 	rte_spinlock_unlock(lock);
511 }
512 
513 static void adf_configure_queues(struct qat_qp *qp)
514 {
515 	uint32_t queue_config;
516 	struct qat_queue *queue = &qp->tx_q;
517 
518 	queue_config = BUILD_RING_CONFIG(queue->queue_size);
519 
520 	WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
521 			queue->hw_queue_number, queue_config);
522 
523 	queue = &qp->rx_q;
524 	queue_config =
525 			BUILD_RESP_RING_CONFIG(queue->queue_size,
526 					ADF_RING_NEAR_WATERMARK_512,
527 					ADF_RING_NEAR_WATERMARK_0);
528 
529 	WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
530 			queue->hw_queue_number, queue_config);
531 }
532 
533 static inline uint32_t adf_modulo(uint32_t data, uint32_t modulo_mask)
534 {
535 	return data & modulo_mask;
536 }
537 
538 static inline void
539 txq_write_tail(struct qat_qp *qp, struct qat_queue *q) {
540 	WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, q->hw_bundle_number,
541 			q->hw_queue_number, q->tail);
542 	q->csr_tail = q->tail;
543 }
544 
545 static inline
546 void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
547 {
548 	uint32_t old_head, new_head;
549 	uint32_t max_head;
550 
551 	old_head = q->csr_head;
552 	new_head = q->head;
553 	max_head = qp->nb_descriptors * q->msg_size;
554 
555 	/* write out free descriptors */
556 	void *cur_desc = (uint8_t *)q->base_addr + old_head;
557 
558 	if (new_head < old_head) {
559 		memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, max_head - old_head);
560 		memset(q->base_addr, ADF_RING_EMPTY_SIG_BYTE, new_head);
561 	} else {
562 		memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head - old_head);
563 	}
564 	q->nb_processed_responses = 0;
565 	q->csr_head = new_head;
566 
567 	/* write current head to CSR */
568 	WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, q->hw_bundle_number,
569 			    q->hw_queue_number, new_head);
570 }
571 
572 uint16_t
573 qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops)
574 {
575 	register struct qat_queue *queue;
576 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
577 	register uint32_t nb_ops_sent = 0;
578 	register int ret;
579 	uint16_t nb_ops_possible = nb_ops;
580 	register uint8_t *base_addr;
581 	register uint32_t tail;
582 
583 	if (unlikely(nb_ops == 0))
584 		return 0;
585 
586 	/* read params used a lot in main loop into registers */
587 	queue = &(tmp_qp->tx_q);
588 	base_addr = (uint8_t *)queue->base_addr;
589 	tail = queue->tail;
590 
591 	/* Find how many can actually fit on the ring */
592 	{
593 		/* dequeued can only be written by one thread, but it may not
594 		 * be this thread. As it's 4-byte aligned it will be read
595 		 * atomically here by any Intel CPU.
596 		 * enqueued can wrap before dequeued, but cannot
597 		 * lap it as var size of enq/deq (uint32_t) > var size of
598 		 * max_inflights (uint16_t). In reality inflights is never
599 		 * even as big as max uint16_t, as it's <= ADF_MAX_DESC.
600 		 * On wrapping, the calculation still returns the correct
601 		 * positive value as all three vars are unsigned.
602 		 */
603 		uint32_t inflights =
604 			tmp_qp->enqueued - tmp_qp->dequeued;
605 
606 		if ((inflights + nb_ops) > tmp_qp->max_inflights) {
607 			nb_ops_possible = tmp_qp->max_inflights - inflights;
608 			if (nb_ops_possible == 0)
609 				return 0;
610 		}
611 		/* QAT has plenty of work queued already, so don't waste cycles
612 		 * enqueueing, wait til the application has gathered a bigger
613 		 * burst or some completed ops have been dequeued
614 		 */
615 		if (tmp_qp->min_enq_burst_threshold && inflights >
616 				QAT_QP_MIN_INFL_THRESHOLD && nb_ops_possible <
617 				tmp_qp->min_enq_burst_threshold) {
618 			tmp_qp->stats.threshold_hit_count++;
619 			return 0;
620 		}
621 	}
622 
623 
624 	while (nb_ops_sent != nb_ops_possible) {
625 		ret = tmp_qp->build_request(*ops, base_addr + tail,
626 				tmp_qp->op_cookies[tail / queue->msg_size],
627 				tmp_qp->qat_dev_gen);
628 		if (ret != 0) {
629 			tmp_qp->stats.enqueue_err_count++;
630 			/* This message cannot be enqueued */
631 			if (nb_ops_sent == 0)
632 				return 0;
633 			goto kick_tail;
634 		}
635 
636 		tail = adf_modulo(tail + queue->msg_size, queue->modulo_mask);
637 		ops++;
638 		nb_ops_sent++;
639 	}
640 kick_tail:
641 	queue->tail = tail;
642 	tmp_qp->enqueued += nb_ops_sent;
643 	tmp_qp->stats.enqueued_count += nb_ops_sent;
644 	txq_write_tail(tmp_qp, queue);
645 	return nb_ops_sent;
646 }
647 
648 uint16_t
649 qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops)
650 {
651 	struct qat_queue *rx_queue;
652 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
653 	uint32_t head;
654 	uint32_t resp_counter = 0;
655 	uint8_t *resp_msg;
656 
657 	rx_queue = &(tmp_qp->rx_q);
658 	head = rx_queue->head;
659 	resp_msg = (uint8_t *)rx_queue->base_addr + rx_queue->head;
660 
661 	while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
662 			resp_counter != nb_ops) {
663 
664 		if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
665 			qat_sym_process_response(ops, resp_msg);
666 		else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION)
667 			qat_comp_process_response(ops, resp_msg,
668 				tmp_qp->op_cookies[head / rx_queue->msg_size],
669 				&tmp_qp->stats.dequeue_err_count);
670 		else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC) {
671 #ifdef BUILD_QAT_ASYM
672 			qat_asym_process_response(ops, resp_msg,
673 				tmp_qp->op_cookies[head / rx_queue->msg_size]);
674 #endif
675 		}
676 
677 		head = adf_modulo(head + rx_queue->msg_size,
678 				  rx_queue->modulo_mask);
679 
680 		resp_msg = (uint8_t *)rx_queue->base_addr + head;
681 		ops++;
682 		resp_counter++;
683 	}
684 	if (resp_counter > 0) {
685 		rx_queue->head = head;
686 		tmp_qp->dequeued += resp_counter;
687 		tmp_qp->stats.dequeued_count += resp_counter;
688 		rx_queue->nb_processed_responses += resp_counter;
689 
690 		if (rx_queue->nb_processed_responses >
691 						QAT_CSR_HEAD_WRITE_THRESH)
692 			rxq_free_desc(tmp_qp, rx_queue);
693 	}
694 
695 	return resp_counter;
696 }
697 
698 __rte_weak int
699 qat_comp_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
700 			  void *op_cookie __rte_unused,
701 			  uint64_t *dequeue_err_count __rte_unused)
702 {
703 	return  0;
704 }
705