xref: /dpdk/drivers/common/qat/dev/qat_dev_gen4.c (revision f8dbaebbf1c9efcbb2e2354b341ed62175466a57)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2021 Intel Corporation
3  */
4 
5 #include <rte_dev.h>
6 #include <rte_pci.h>
7 
8 #include "qat_device.h"
9 #include "qat_qp.h"
10 #include "adf_transport_access_macros_gen4vf.h"
11 #include "adf_pf2vf_msg.h"
12 #include "qat_pf2vf.h"
13 
14 #include <stdint.h>
15 
16 /* QAT GEN 4 specific macros */
17 #define QAT_GEN4_BUNDLE_NUM             4
18 #define QAT_GEN4_QPS_PER_BUNDLE_NUM     1
19 
20 struct qat_dev_gen4_extra {
21 	struct qat_qp_hw_data qp_gen4_data[QAT_GEN4_BUNDLE_NUM]
22 		[QAT_GEN4_QPS_PER_BUNDLE_NUM];
23 };
24 
25 static struct qat_pf2vf_dev qat_pf2vf_gen4 = {
26 	.pf2vf_offset = ADF_4XXXIOV_PF2VM_OFFSET,
27 	.vf2pf_offset = ADF_4XXXIOV_VM2PF_OFFSET,
28 	.pf2vf_type_shift = ADF_PFVF_2X_MSGTYPE_SHIFT,
29 	.pf2vf_type_mask = ADF_PFVF_2X_MSGTYPE_MASK,
30 	.pf2vf_data_shift = ADF_PFVF_2X_MSGDATA_SHIFT,
31 	.pf2vf_data_mask = ADF_PFVF_2X_MSGDATA_MASK,
32 };
33 
34 static int
35 qat_query_svc_gen4(struct qat_pci_device *qat_dev, uint8_t *val)
36 {
37 	struct qat_pf2vf_msg pf2vf_msg;
38 
39 	pf2vf_msg.msg_type = ADF_VF2PF_MSGTYPE_GET_SMALL_BLOCK_REQ;
40 	pf2vf_msg.block_hdr = ADF_VF2PF_BLOCK_MSG_GET_RING_TO_SVC_REQ;
41 	pf2vf_msg.msg_data = 2;
42 	return qat_pf2vf_exch_msg(qat_dev, pf2vf_msg, 2, val);
43 }
44 
45 static int
46 qat_select_valid_queue_gen4(struct qat_pci_device *qat_dev, int qp_id,
47 			enum qat_service_type service_type)
48 {
49 	int i = 0, valid_qps = 0;
50 	struct qat_dev_gen4_extra *dev_extra = qat_dev->dev_private;
51 
52 	for (; i < QAT_GEN4_BUNDLE_NUM; i++) {
53 		if (dev_extra->qp_gen4_data[i][0].service_type ==
54 			service_type) {
55 			if (valid_qps == qp_id)
56 				return i;
57 			++valid_qps;
58 		}
59 	}
60 	return -1;
61 }
62 
63 static const struct qat_qp_hw_data *
64 qat_qp_get_hw_data_gen4(struct qat_pci_device *qat_dev,
65 		enum qat_service_type service_type, uint16_t qp_id)
66 {
67 	struct qat_dev_gen4_extra *dev_extra = qat_dev->dev_private;
68 	int ring_pair = qat_select_valid_queue_gen4(qat_dev, qp_id,
69 			service_type);
70 
71 	if (ring_pair < 0)
72 		return NULL;
73 
74 	return &dev_extra->qp_gen4_data[ring_pair][0];
75 }
76 
77 static int
78 qat_qp_rings_per_service_gen4(struct qat_pci_device *qat_dev,
79 		enum qat_service_type service)
80 {
81 	int i = 0, count = 0, max_ops_per_srv = 0;
82 	struct qat_dev_gen4_extra *dev_extra = qat_dev->dev_private;
83 
84 	max_ops_per_srv = QAT_GEN4_BUNDLE_NUM;
85 	for (i = 0, count = 0; i < max_ops_per_srv; i++)
86 		if (dev_extra->qp_gen4_data[i][0].service_type == service)
87 			count++;
88 	return count;
89 }
90 
91 static enum qat_service_type
92 gen4_pick_service(uint8_t hw_service)
93 {
94 	switch (hw_service) {
95 	case QAT_SVC_SYM:
96 		return QAT_SERVICE_SYMMETRIC;
97 	case QAT_SVC_COMPRESSION:
98 		return QAT_SERVICE_COMPRESSION;
99 	case QAT_SVC_ASYM:
100 		return QAT_SERVICE_ASYMMETRIC;
101 	default:
102 		return QAT_SERVICE_INVALID;
103 	}
104 }
105 
106 static int
107 qat_dev_read_config_gen4(struct qat_pci_device *qat_dev)
108 {
109 	int i = 0;
110 	uint16_t svc = 0;
111 	struct qat_dev_gen4_extra *dev_extra = qat_dev->dev_private;
112 	struct qat_qp_hw_data *hw_data;
113 	enum qat_service_type service_type;
114 	uint8_t hw_service;
115 
116 	if (qat_query_svc_gen4(qat_dev, (uint8_t *)&svc))
117 		return -EFAULT;
118 	for (; i < QAT_GEN4_BUNDLE_NUM; i++) {
119 		hw_service = (svc >> (3 * i)) & 0x7;
120 		service_type = gen4_pick_service(hw_service);
121 		if (service_type == QAT_SERVICE_INVALID) {
122 			QAT_LOG(ERR,
123 				"Unrecognized service on bundle %d",
124 				i);
125 			return -ENOTSUP;
126 		}
127 		hw_data = &dev_extra->qp_gen4_data[i][0];
128 		memset(hw_data, 0, sizeof(*hw_data));
129 		hw_data->service_type = service_type;
130 		if (service_type == QAT_SERVICE_ASYMMETRIC) {
131 			hw_data->tx_msg_size = 64;
132 			hw_data->rx_msg_size = 32;
133 		} else if (service_type == QAT_SERVICE_SYMMETRIC ||
134 				service_type ==
135 					QAT_SERVICE_COMPRESSION) {
136 			hw_data->tx_msg_size = 128;
137 			hw_data->rx_msg_size = 32;
138 		}
139 		hw_data->tx_ring_num = 0;
140 		hw_data->rx_ring_num = 1;
141 		hw_data->hw_bundle_num = i;
142 	}
143 	return 0;
144 }
145 
146 static void
147 qat_qp_build_ring_base_gen4(void *io_addr,
148 			struct qat_queue *queue)
149 {
150 	uint64_t queue_base;
151 
152 	queue_base = BUILD_RING_BASE_ADDR_GEN4(queue->base_phys_addr,
153 			queue->queue_size);
154 	WRITE_CSR_RING_BASE_GEN4VF(io_addr, queue->hw_bundle_number,
155 		queue->hw_queue_number, queue_base);
156 }
157 
158 static void
159 qat_qp_adf_arb_enable_gen4(const struct qat_queue *txq,
160 			void *base_addr, rte_spinlock_t *lock)
161 {
162 	uint32_t arb_csr_offset = 0, value;
163 
164 	rte_spinlock_lock(lock);
165 	arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
166 			(ADF_RING_BUNDLE_SIZE_GEN4 *
167 			txq->hw_bundle_number);
168 	value = ADF_CSR_RD(base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN4VF,
169 			arb_csr_offset);
170 	value |= (0x01 << txq->hw_queue_number);
171 	ADF_CSR_WR(base_addr, arb_csr_offset, value);
172 	rte_spinlock_unlock(lock);
173 }
174 
175 static void
176 qat_qp_adf_arb_disable_gen4(const struct qat_queue *txq,
177 			void *base_addr, rte_spinlock_t *lock)
178 {
179 	uint32_t arb_csr_offset = 0, value;
180 
181 	rte_spinlock_lock(lock);
182 	arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
183 			(ADF_RING_BUNDLE_SIZE_GEN4 *
184 			txq->hw_bundle_number);
185 	value = ADF_CSR_RD(base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN4VF,
186 			arb_csr_offset);
187 	value &= ~(0x01 << txq->hw_queue_number);
188 	ADF_CSR_WR(base_addr, arb_csr_offset, value);
189 	rte_spinlock_unlock(lock);
190 }
191 
192 static void
193 qat_qp_adf_configure_queues_gen4(struct qat_qp *qp)
194 {
195 	uint32_t q_tx_config, q_resp_config;
196 	struct qat_queue *q_tx = &qp->tx_q, *q_rx = &qp->rx_q;
197 
198 	q_tx_config = BUILD_RING_CONFIG(q_tx->queue_size);
199 	q_resp_config = BUILD_RESP_RING_CONFIG(q_rx->queue_size,
200 			ADF_RING_NEAR_WATERMARK_512,
201 			ADF_RING_NEAR_WATERMARK_0);
202 
203 	WRITE_CSR_RING_CONFIG_GEN4VF(qp->mmap_bar_addr,
204 		q_tx->hw_bundle_number,	q_tx->hw_queue_number,
205 		q_tx_config);
206 	WRITE_CSR_RING_CONFIG_GEN4VF(qp->mmap_bar_addr,
207 		q_rx->hw_bundle_number,	q_rx->hw_queue_number,
208 		q_resp_config);
209 }
210 
211 static void
212 qat_qp_csr_write_tail_gen4(struct qat_qp *qp, struct qat_queue *q)
213 {
214 	WRITE_CSR_RING_TAIL_GEN4VF(qp->mmap_bar_addr,
215 		q->hw_bundle_number, q->hw_queue_number, q->tail);
216 }
217 
218 static void
219 qat_qp_csr_write_head_gen4(struct qat_qp *qp, struct qat_queue *q,
220 			uint32_t new_head)
221 {
222 	WRITE_CSR_RING_HEAD_GEN4VF(qp->mmap_bar_addr,
223 			q->hw_bundle_number, q->hw_queue_number, new_head);
224 }
225 
226 static void
227 qat_qp_csr_setup_gen4(struct qat_pci_device *qat_dev,
228 			void *io_addr, struct qat_qp *qp)
229 {
230 	qat_qp_build_ring_base_gen4(io_addr, &qp->tx_q);
231 	qat_qp_build_ring_base_gen4(io_addr, &qp->rx_q);
232 	qat_qp_adf_configure_queues_gen4(qp);
233 	qat_qp_adf_arb_enable_gen4(&qp->tx_q, qp->mmap_bar_addr,
234 					&qat_dev->arb_csr_lock);
235 }
236 
237 static struct qat_qp_hw_spec_funcs qat_qp_hw_spec_gen4 = {
238 	.qat_qp_rings_per_service = qat_qp_rings_per_service_gen4,
239 	.qat_qp_build_ring_base = qat_qp_build_ring_base_gen4,
240 	.qat_qp_adf_arb_enable = qat_qp_adf_arb_enable_gen4,
241 	.qat_qp_adf_arb_disable = qat_qp_adf_arb_disable_gen4,
242 	.qat_qp_adf_configure_queues = qat_qp_adf_configure_queues_gen4,
243 	.qat_qp_csr_write_tail = qat_qp_csr_write_tail_gen4,
244 	.qat_qp_csr_write_head = qat_qp_csr_write_head_gen4,
245 	.qat_qp_csr_setup = qat_qp_csr_setup_gen4,
246 	.qat_qp_get_hw_data = qat_qp_get_hw_data_gen4,
247 };
248 
249 static int
250 qat_reset_ring_pairs_gen4(struct qat_pci_device *qat_pci_dev)
251 {
252 	int ret = 0, i;
253 	uint8_t data[4];
254 	struct qat_pf2vf_msg pf2vf_msg;
255 
256 	pf2vf_msg.msg_type = ADF_VF2PF_MSGTYPE_RP_RESET;
257 	pf2vf_msg.block_hdr = -1;
258 	for (i = 0; i < QAT_GEN4_BUNDLE_NUM; i++) {
259 		pf2vf_msg.msg_data = i;
260 		ret = qat_pf2vf_exch_msg(qat_pci_dev, pf2vf_msg, 1, data);
261 		if (ret) {
262 			QAT_LOG(ERR, "QAT error when reset bundle no %d",
263 				i);
264 			return ret;
265 		}
266 	}
267 
268 	return 0;
269 }
270 
271 static const struct rte_mem_resource *
272 qat_dev_get_transport_bar_gen4(struct rte_pci_device *pci_dev)
273 {
274 	return &pci_dev->mem_resource[0];
275 }
276 
277 static int
278 qat_dev_get_misc_bar_gen4(struct rte_mem_resource **mem_resource,
279 		struct rte_pci_device *pci_dev)
280 {
281 	*mem_resource = &pci_dev->mem_resource[2];
282 	return 0;
283 }
284 
285 static int
286 qat_dev_get_extra_size_gen4(void)
287 {
288 	return sizeof(struct qat_dev_gen4_extra);
289 }
290 
291 static struct qat_dev_hw_spec_funcs qat_dev_hw_spec_gen4 = {
292 	.qat_dev_reset_ring_pairs = qat_reset_ring_pairs_gen4,
293 	.qat_dev_get_transport_bar = qat_dev_get_transport_bar_gen4,
294 	.qat_dev_get_misc_bar = qat_dev_get_misc_bar_gen4,
295 	.qat_dev_read_config = qat_dev_read_config_gen4,
296 	.qat_dev_get_extra_size = qat_dev_get_extra_size_gen4,
297 };
298 
299 RTE_INIT(qat_dev_gen_4_init)
300 {
301 	qat_qp_hw_spec[QAT_GEN4] = &qat_qp_hw_spec_gen4;
302 	qat_dev_hw_spec[QAT_GEN4] = &qat_dev_hw_spec_gen4;
303 	qat_gen_config[QAT_GEN4].dev_gen = QAT_GEN4;
304 	qat_gen_config[QAT_GEN4].pf2vf_dev = &qat_pf2vf_gen4;
305 }
306