xref: /dpdk/drivers/common/qat/qat_qp.h (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4 #ifndef _QAT_QP_H_
5 #define _QAT_QP_H_
6 
7 #include "qat_common.h"
8 #include "adf_transport_access_macros.h"
9 
10 struct qat_pci_device;
11 
12 #define QAT_CSR_HEAD_WRITE_THRESH 32U
13 /* number of requests to accumulate before writing head CSR */
14 
15 #define QAT_QP_MIN_INFL_THRESHOLD	256
16 
17 /**
18  * Structure with data needed for creation of queue pair.
19  */
20 struct qat_qp_hw_data {
21 	enum qat_service_type service_type;
22 	uint8_t hw_bundle_num;
23 	uint8_t tx_ring_num;
24 	uint8_t rx_ring_num;
25 	uint16_t tx_msg_size;
26 	uint16_t rx_msg_size;
27 };
28 
29 /**
30  * Structure with data needed for creation of queue pair.
31  */
32 struct qat_qp_config {
33 	const struct qat_qp_hw_data *hw;
34 	uint32_t nb_descriptors;
35 	uint32_t cookie_size;
36 	int socket_id;
37 	const char *service_str;
38 };
39 
40 /**
41  * Structure associated with each queue.
42  */
43 struct qat_queue {
44 	char		memz_name[RTE_MEMZONE_NAMESIZE];
45 	void		*base_addr;		/* Base address */
46 	rte_iova_t	base_phys_addr;		/* Queue physical address */
47 	uint32_t	head;			/* Shadow copy of the head */
48 	uint32_t	tail;			/* Shadow copy of the tail */
49 	uint32_t	modulo_mask;
50 	uint32_t	msg_size;
51 	uint32_t	queue_size;
52 	uint8_t		trailz;
53 	uint8_t		hw_bundle_number;
54 	uint8_t		hw_queue_number;
55 	/* HW queue aka ring offset on bundle */
56 	uint32_t	csr_head;		/* last written head value */
57 	uint32_t	csr_tail;		/* last written tail value */
58 	uint16_t	nb_processed_responses;
59 	/* number of responses processed since last CSR head write */
60 };
61 
62 struct qat_qp {
63 	void			*mmap_bar_addr;
64 	struct qat_queue	tx_q;
65 	struct qat_queue	rx_q;
66 	struct qat_common_stats stats;
67 	struct rte_mempool *op_cookie_pool;
68 	void **op_cookies;
69 	uint32_t nb_descriptors;
70 	enum qat_device_gen qat_dev_gen;
71 	enum qat_service_type service_type;
72 	struct qat_pci_device *qat_dev;
73 	/**< qat device this qp is on */
74 	uint32_t enqueued;
75 	uint32_t dequeued __rte_aligned(4);
76 	uint16_t max_inflights;
77 	uint16_t min_enq_burst_threshold;
78 } __rte_cache_aligned;
79 
80 extern const struct qat_qp_hw_data qat_gen1_qps[][ADF_MAX_QPS_ON_ANY_SERVICE];
81 extern const struct qat_qp_hw_data qat_gen3_qps[][ADF_MAX_QPS_ON_ANY_SERVICE];
82 
83 uint16_t
84 qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops);
85 
86 uint16_t
87 qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops);
88 
89 uint16_t
90 qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops);
91 
92 int
93 qat_qp_release(struct qat_qp **qp_addr);
94 
95 int
96 qat_qp_setup(struct qat_pci_device *qat_dev,
97 		struct qat_qp **qp_addr, uint16_t queue_pair_id,
98 		struct qat_qp_config *qat_qp_conf);
99 
100 int
101 qat_qps_per_service(const struct qat_qp_hw_data *qp_hw_data,
102 			enum qat_service_type service);
103 
104 int
105 qat_cq_get_fw_version(struct qat_qp *qp);
106 
107 /* Needed for weak function*/
108 int
109 qat_comp_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
110 			  void *op_cookie __rte_unused,
111 			  uint64_t *dequeue_err_count __rte_unused);
112 
113 #endif /* _QAT_QP_H_ */
114