xref: /dpdk/drivers/common/qat/dev/qat_dev_gen4.c (revision 2e98e808b99d9893aba4d64754f381b4cb0715ea)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2021 Intel Corporation
3  */
4 
5 #include <dev_driver.h>
6 #include <rte_pci.h>
7 
8 #include "qat_device.h"
9 #include "qat_qp.h"
10 #include "adf_transport_access_macros_gen4vf.h"
11 #include "adf_pf2vf_msg.h"
12 #include "qat_pf2vf.h"
13 #include "qat_dev_gens.h"
14 
15 #include <stdint.h>
16 
17 /* QAT GEN 4 specific macros */
18 #define QAT_GEN4_BUNDLE_NUM             4
19 #define QAT_GEN4_QPS_PER_BUNDLE_NUM     1
20 
21 struct qat_dev_gen4_extra {
22 	struct qat_qp_hw_data qp_gen4_data[QAT_GEN4_BUNDLE_NUM]
23 		[QAT_GEN4_QPS_PER_BUNDLE_NUM];
24 };
25 
26 static struct qat_pf2vf_dev qat_pf2vf_gen4 = {
27 	.pf2vf_offset = ADF_4XXXIOV_PF2VM_OFFSET,
28 	.vf2pf_offset = ADF_4XXXIOV_VM2PF_OFFSET,
29 	.pf2vf_type_shift = ADF_PFVF_2X_MSGTYPE_SHIFT,
30 	.pf2vf_type_mask = ADF_PFVF_2X_MSGTYPE_MASK,
31 	.pf2vf_data_shift = ADF_PFVF_2X_MSGDATA_SHIFT,
32 	.pf2vf_data_mask = ADF_PFVF_2X_MSGDATA_MASK,
33 };
34 
35 static int
qat_query_svc_gen4(struct qat_pci_device * qat_dev,uint8_t * val)36 qat_query_svc_gen4(struct qat_pci_device *qat_dev, uint8_t *val)
37 {
38 	struct qat_pf2vf_msg pf2vf_msg;
39 
40 	pf2vf_msg.msg_type = ADF_VF2PF_MSGTYPE_GET_SMALL_BLOCK_REQ;
41 	pf2vf_msg.block_hdr = ADF_VF2PF_BLOCK_MSG_GET_RING_TO_SVC_REQ;
42 	pf2vf_msg.msg_data = 2;
43 	return qat_pf2vf_exch_msg(qat_dev, pf2vf_msg, 2, val);
44 }
45 
46 static int
qat_select_valid_queue_gen4(struct qat_pci_device * qat_dev,int qp_id,enum qat_service_type service_type)47 qat_select_valid_queue_gen4(struct qat_pci_device *qat_dev, int qp_id,
48 			enum qat_service_type service_type)
49 {
50 	int i = 0, valid_qps = 0;
51 	struct qat_dev_gen4_extra *dev_extra = qat_dev->dev_private;
52 
53 	for (; i < QAT_GEN4_BUNDLE_NUM; i++) {
54 		if (dev_extra->qp_gen4_data[i][0].service_type ==
55 			service_type) {
56 			if (valid_qps == qp_id)
57 				return i;
58 			++valid_qps;
59 		}
60 	}
61 	return -1;
62 }
63 
64 const struct qat_qp_hw_data *
qat_qp_get_hw_data_gen4(struct qat_pci_device * qat_dev,enum qat_service_type service_type,uint16_t qp_id)65 qat_qp_get_hw_data_gen4(struct qat_pci_device *qat_dev,
66 		enum qat_service_type service_type, uint16_t qp_id)
67 {
68 	struct qat_dev_gen4_extra *dev_extra = qat_dev->dev_private;
69 	int ring_pair = qat_select_valid_queue_gen4(qat_dev, qp_id,
70 			service_type);
71 
72 	if (ring_pair < 0)
73 		return NULL;
74 
75 	return &dev_extra->qp_gen4_data[ring_pair][0];
76 }
77 
78 int
qat_qp_rings_per_service_gen4(struct qat_pci_device * qat_dev,enum qat_service_type service)79 qat_qp_rings_per_service_gen4(struct qat_pci_device *qat_dev,
80 		enum qat_service_type service)
81 {
82 	int i = 0, count = 0, max_ops_per_srv = 0;
83 	struct qat_dev_gen4_extra *dev_extra = qat_dev->dev_private;
84 
85 	max_ops_per_srv = QAT_GEN4_BUNDLE_NUM;
86 	for (i = 0, count = 0; i < max_ops_per_srv; i++)
87 		if (dev_extra->qp_gen4_data[i][0].service_type == service)
88 			count++;
89 	return count;
90 }
91 
92 static enum qat_service_type
gen4_pick_service(uint8_t hw_service)93 gen4_pick_service(uint8_t hw_service)
94 {
95 	switch (hw_service) {
96 	case QAT_SVC_SYM:
97 		return QAT_SERVICE_SYMMETRIC;
98 	case QAT_SVC_COMPRESSION:
99 		return QAT_SERVICE_COMPRESSION;
100 	case QAT_SVC_ASYM:
101 		return QAT_SERVICE_ASYMMETRIC;
102 	default:
103 		return QAT_SERVICE_INVALID;
104 	}
105 }
106 
107 int
qat_dev_read_config_gen4(struct qat_pci_device * qat_dev)108 qat_dev_read_config_gen4(struct qat_pci_device *qat_dev)
109 {
110 	int i = 0;
111 	uint16_t svc = 0;
112 	struct qat_dev_gen4_extra *dev_extra = qat_dev->dev_private;
113 	struct qat_qp_hw_data *hw_data;
114 	enum qat_service_type service_type;
115 	uint8_t hw_service;
116 
117 	if (qat_query_svc_gen4(qat_dev, (uint8_t *)&svc))
118 		return -EFAULT;
119 	for (; i < QAT_GEN4_BUNDLE_NUM; i++) {
120 		hw_service = (svc >> (3 * i)) & 0x7;
121 		service_type = gen4_pick_service(hw_service);
122 		if (service_type == QAT_SERVICE_INVALID) {
123 			QAT_LOG(ERR,
124 				"Unrecognized service on bundle %d",
125 				i);
126 			return -ENOTSUP;
127 		}
128 		hw_data = &dev_extra->qp_gen4_data[i][0];
129 		memset(hw_data, 0, sizeof(*hw_data));
130 		hw_data->service_type = service_type;
131 		if (service_type == QAT_SERVICE_ASYMMETRIC) {
132 			hw_data->tx_msg_size = 64;
133 			hw_data->rx_msg_size = 32;
134 		} else if (service_type == QAT_SERVICE_SYMMETRIC ||
135 				service_type ==
136 					QAT_SERVICE_COMPRESSION) {
137 			hw_data->tx_msg_size = 128;
138 			hw_data->rx_msg_size = 32;
139 		}
140 		hw_data->tx_ring_num = 0;
141 		hw_data->rx_ring_num = 1;
142 		hw_data->hw_bundle_num = i;
143 	}
144 	return 0;
145 }
146 
147 static int
qat_dev_read_config_vqat(struct qat_pci_device * qat_dev)148 qat_dev_read_config_vqat(struct qat_pci_device *qat_dev)
149 {
150 	int i = 0;
151 	struct qat_dev_gen4_extra *dev_extra = qat_dev->dev_private;
152 	struct qat_qp_hw_data *hw_data;
153 	struct qat_device_info *qat_dev_instance =
154 			&qat_pci_devs[qat_dev->qat_dev_id];
155 	uint16_t sub_id = qat_dev_instance->pci_dev->id.subsystem_device_id;
156 
157 	for (; i < QAT_GEN4_BUNDLE_NUM; i++) {
158 		hw_data = &dev_extra->qp_gen4_data[i][0];
159 		memset(hw_data, 0, sizeof(*hw_data));
160 		if (sub_id == ADF_VQAT_SYM_PCI_SUBSYSTEM_ID) {
161 			hw_data->service_type = QAT_SERVICE_SYMMETRIC;
162 			hw_data->tx_msg_size = 128;
163 			hw_data->rx_msg_size = 32;
164 		} else if (sub_id == ADF_VQAT_ASYM_PCI_SUBSYSTEM_ID) {
165 			hw_data->service_type = QAT_SERVICE_ASYMMETRIC;
166 			hw_data->tx_msg_size = 64;
167 			hw_data->rx_msg_size = 32;
168 		} else if (sub_id == ADF_VQAT_DC_PCI_SUBSYSTEM_ID) {
169 			hw_data->service_type = QAT_SERVICE_COMPRESSION;
170 			hw_data->tx_msg_size = 128;
171 			hw_data->rx_msg_size = 32;
172 		} else {
173 			QAT_LOG(ERR, "Unrecognized subsystem id %hu", sub_id);
174 			return -EINVAL;
175 		}
176 		hw_data->tx_ring_num = 0;
177 		hw_data->rx_ring_num = 1;
178 		hw_data->hw_bundle_num = i;
179 	}
180 	return 0;
181 }
182 
183 void
qat_qp_build_ring_base_gen4(void * io_addr,struct qat_queue * queue)184 qat_qp_build_ring_base_gen4(void *io_addr,
185 			struct qat_queue *queue)
186 {
187 	uint64_t queue_base;
188 
189 	queue_base = BUILD_RING_BASE_ADDR_GEN4(queue->base_phys_addr,
190 			queue->queue_size);
191 	WRITE_CSR_RING_BASE_GEN4VF(io_addr, queue->hw_bundle_number,
192 		queue->hw_queue_number, queue_base);
193 }
194 
195 void
qat_qp_adf_arb_enable_gen4(const struct qat_queue * txq,void * base_addr,rte_spinlock_t * lock)196 qat_qp_adf_arb_enable_gen4(const struct qat_queue *txq,
197 			void *base_addr, rte_spinlock_t *lock)
198 {
199 	uint32_t arb_csr_offset = 0, value;
200 
201 	rte_spinlock_lock(lock);
202 	arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
203 			(ADF_RING_BUNDLE_SIZE_GEN4 *
204 			txq->hw_bundle_number);
205 	value = ADF_CSR_RD(base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN4VF,
206 			arb_csr_offset);
207 	value |= (0x01 << txq->hw_queue_number);
208 	ADF_CSR_WR(base_addr, arb_csr_offset, value);
209 	rte_spinlock_unlock(lock);
210 }
211 
212 void
qat_qp_adf_arb_disable_gen4(const struct qat_queue * txq,void * base_addr,rte_spinlock_t * lock)213 qat_qp_adf_arb_disable_gen4(const struct qat_queue *txq,
214 			void *base_addr, rte_spinlock_t *lock)
215 {
216 	uint32_t arb_csr_offset = 0, value;
217 
218 	rte_spinlock_lock(lock);
219 	arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
220 			(ADF_RING_BUNDLE_SIZE_GEN4 *
221 			txq->hw_bundle_number);
222 	value = ADF_CSR_RD(base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN4VF,
223 			arb_csr_offset);
224 	value &= ~(0x01 << txq->hw_queue_number);
225 	ADF_CSR_WR(base_addr, arb_csr_offset, value);
226 	rte_spinlock_unlock(lock);
227 }
228 
229 void
qat_qp_adf_configure_queues_gen4(struct qat_qp * qp)230 qat_qp_adf_configure_queues_gen4(struct qat_qp *qp)
231 {
232 	uint32_t q_tx_config, q_resp_config;
233 	struct qat_queue *q_tx = &qp->tx_q, *q_rx = &qp->rx_q;
234 
235 	q_tx_config = BUILD_RING_CONFIG(q_tx->queue_size);
236 	q_resp_config = BUILD_RESP_RING_CONFIG(q_rx->queue_size,
237 			ADF_RING_NEAR_WATERMARK_512,
238 			ADF_RING_NEAR_WATERMARK_0);
239 
240 	WRITE_CSR_RING_CONFIG_GEN4VF(qp->mmap_bar_addr,
241 		q_tx->hw_bundle_number,	q_tx->hw_queue_number,
242 		q_tx_config);
243 	WRITE_CSR_RING_CONFIG_GEN4VF(qp->mmap_bar_addr,
244 		q_rx->hw_bundle_number,	q_rx->hw_queue_number,
245 		q_resp_config);
246 }
247 
248 void
qat_qp_csr_write_tail_gen4(struct qat_qp * qp,struct qat_queue * q)249 qat_qp_csr_write_tail_gen4(struct qat_qp *qp, struct qat_queue *q)
250 {
251 	WRITE_CSR_RING_TAIL_GEN4VF(qp->mmap_bar_addr,
252 		q->hw_bundle_number, q->hw_queue_number, q->tail);
253 }
254 
255 void
qat_qp_csr_write_head_gen4(struct qat_qp * qp,struct qat_queue * q,uint32_t new_head)256 qat_qp_csr_write_head_gen4(struct qat_qp *qp, struct qat_queue *q,
257 			uint32_t new_head)
258 {
259 	WRITE_CSR_RING_HEAD_GEN4VF(qp->mmap_bar_addr,
260 			q->hw_bundle_number, q->hw_queue_number, new_head);
261 }
262 
263 void
qat_qp_csr_setup_gen4(struct qat_pci_device * qat_dev,void * io_addr,struct qat_qp * qp)264 qat_qp_csr_setup_gen4(struct qat_pci_device *qat_dev,
265 			void *io_addr, struct qat_qp *qp)
266 {
267 	qat_qp_build_ring_base_gen4(io_addr, &qp->tx_q);
268 	qat_qp_build_ring_base_gen4(io_addr, &qp->rx_q);
269 	qat_qp_adf_configure_queues_gen4(qp);
270 	qat_qp_adf_arb_enable_gen4(&qp->tx_q, qp->mmap_bar_addr,
271 					&qat_dev->arb_csr_lock);
272 }
273 
274 static struct qat_qp_hw_spec_funcs qat_qp_hw_spec_gen4 = {
275 	.qat_qp_rings_per_service = qat_qp_rings_per_service_gen4,
276 	.qat_qp_build_ring_base = qat_qp_build_ring_base_gen4,
277 	.qat_qp_adf_arb_enable = qat_qp_adf_arb_enable_gen4,
278 	.qat_qp_adf_arb_disable = qat_qp_adf_arb_disable_gen4,
279 	.qat_qp_adf_configure_queues = qat_qp_adf_configure_queues_gen4,
280 	.qat_qp_csr_write_tail = qat_qp_csr_write_tail_gen4,
281 	.qat_qp_csr_write_head = qat_qp_csr_write_head_gen4,
282 	.qat_qp_csr_setup = qat_qp_csr_setup_gen4,
283 	.qat_qp_get_hw_data = qat_qp_get_hw_data_gen4,
284 };
285 
286 int
qat_reset_ring_pairs_gen4(struct qat_pci_device * qat_pci_dev)287 qat_reset_ring_pairs_gen4(struct qat_pci_device *qat_pci_dev)
288 {
289 	int ret = 0, i;
290 	uint8_t data[4];
291 	struct qat_pf2vf_msg pf2vf_msg;
292 
293 	pf2vf_msg.msg_type = ADF_VF2PF_MSGTYPE_RP_RESET;
294 	pf2vf_msg.block_hdr = -1;
295 	for (i = 0; i < QAT_GEN4_BUNDLE_NUM; i++) {
296 		pf2vf_msg.msg_data = i;
297 		ret = qat_pf2vf_exch_msg(qat_pci_dev, pf2vf_msg, 1, data);
298 		if (ret) {
299 			QAT_LOG(ERR, "QAT error when reset bundle no %d",
300 				i);
301 			return ret;
302 		}
303 	}
304 
305 	return 0;
306 }
307 
308 static int
qat_reset_ring_pairs_vqat(struct qat_pci_device * qat_pci_dev __rte_unused)309 qat_reset_ring_pairs_vqat(struct qat_pci_device *qat_pci_dev __rte_unused)
310 {
311 	return 0;
312 }
313 
314 const struct rte_mem_resource *
qat_dev_get_transport_bar_gen4(struct rte_pci_device * pci_dev)315 qat_dev_get_transport_bar_gen4(struct rte_pci_device *pci_dev)
316 {
317 	return &pci_dev->mem_resource[0];
318 }
319 
320 int
qat_dev_get_misc_bar_gen4(struct rte_mem_resource ** mem_resource,struct rte_pci_device * pci_dev)321 qat_dev_get_misc_bar_gen4(struct rte_mem_resource **mem_resource,
322 		struct rte_pci_device *pci_dev)
323 {
324 	*mem_resource = &pci_dev->mem_resource[2];
325 	return 0;
326 }
327 
328 int
qat_dev_get_slice_map_gen4(uint32_t * map __rte_unused,const struct rte_pci_device * pci_dev __rte_unused)329 qat_dev_get_slice_map_gen4(uint32_t *map __rte_unused,
330 	const struct rte_pci_device *pci_dev __rte_unused)
331 {
332 	return 0;
333 }
334 
335 int
qat_dev_get_extra_size_gen4(void)336 qat_dev_get_extra_size_gen4(void)
337 {
338 	return sizeof(struct qat_dev_gen4_extra);
339 }
340 
341 static struct qat_dev_hw_spec_funcs qat_dev_hw_spec_gen4 = {
342 	.qat_dev_reset_ring_pairs = qat_reset_ring_pairs_gen4,
343 	.qat_dev_get_transport_bar = qat_dev_get_transport_bar_gen4,
344 	.qat_dev_get_misc_bar = qat_dev_get_misc_bar_gen4,
345 	.qat_dev_read_config = qat_dev_read_config_gen4,
346 	.qat_dev_get_extra_size = qat_dev_get_extra_size_gen4,
347 	.qat_dev_get_slice_map = qat_dev_get_slice_map_gen4,
348 };
349 
350 static struct qat_dev_hw_spec_funcs qat_dev_hw_spec_vqat = {
351 	.qat_dev_reset_ring_pairs = qat_reset_ring_pairs_vqat,
352 	.qat_dev_get_transport_bar = qat_dev_get_transport_bar_gen4,
353 	.qat_dev_get_misc_bar = qat_dev_get_misc_bar_gen4,
354 	.qat_dev_read_config = qat_dev_read_config_vqat,
355 	.qat_dev_get_extra_size = qat_dev_get_extra_size_gen4,
356 	.qat_dev_get_slice_map = qat_dev_get_slice_map_gen4,
357 };
358 
RTE_INIT(qat_dev_gen_4_init)359 RTE_INIT(qat_dev_gen_4_init)
360 {
361 	qat_qp_hw_spec[QAT_VQAT] = qat_qp_hw_spec[QAT_GEN4] = &qat_qp_hw_spec_gen4;
362 	qat_dev_hw_spec[QAT_GEN4] = &qat_dev_hw_spec_gen4;
363 	qat_dev_hw_spec[QAT_VQAT] = &qat_dev_hw_spec_vqat;
364 	qat_gen_config[QAT_GEN4].dev_gen = QAT_GEN4;
365 	qat_gen_config[QAT_VQAT].dev_gen = QAT_VQAT;
366 	qat_gen_config[QAT_GEN4].pf2vf_dev = &qat_pf2vf_gen4;
367 }
368