xref: /dpdk/drivers/common/qat/dev/qat_dev_gen3.c (revision d848fcb84e6cd2bf4c3e943c1188daf8b4631ab8)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2021 Intel Corporation
3  */
4 
5 #include "qat_device.h"
6 #include "qat_qp.h"
7 #include "adf_transport_access_macros.h"
8 #include "qat_dev_gens.h"
9 
10 #include <stdint.h>
11 
12 __extension__
13 const struct qat_qp_hw_data qat_gen3_qps[QAT_MAX_SERVICES]
14 					 [ADF_MAX_QPS_ON_ANY_SERVICE] = {
15 	/* queue pairs which provide an asymmetric crypto service */
16 	[QAT_SERVICE_ASYMMETRIC] = {
17 		{
18 			.service_type = QAT_SERVICE_ASYMMETRIC,
19 			.hw_bundle_num = 0,
20 			.tx_ring_num = 0,
21 			.rx_ring_num = 4,
22 			.tx_msg_size = 64,
23 			.rx_msg_size = 32,
24 		}
25 	},
26 	/* queue pairs which provide a symmetric crypto service */
27 	[QAT_SERVICE_SYMMETRIC] = {
28 		{
29 			.service_type = QAT_SERVICE_SYMMETRIC,
30 			.hw_bundle_num = 0,
31 			.tx_ring_num = 1,
32 			.rx_ring_num = 5,
33 			.tx_msg_size = 128,
34 			.rx_msg_size = 32,
35 		}
36 	},
37 	/* queue pairs which provide a compression service */
38 	[QAT_SERVICE_COMPRESSION] = {
39 		{
40 			.service_type = QAT_SERVICE_COMPRESSION,
41 			.hw_bundle_num = 0,
42 			.tx_ring_num = 3,
43 			.rx_ring_num = 7,
44 			.tx_msg_size = 128,
45 			.rx_msg_size = 32,
46 		}
47 	}
48 };
49 
50 
51 static const struct qat_qp_hw_data *
qat_qp_get_hw_data_gen3(struct qat_pci_device * dev __rte_unused,enum qat_service_type service_type,uint16_t qp_id)52 qat_qp_get_hw_data_gen3(struct qat_pci_device *dev __rte_unused,
53 		enum qat_service_type service_type, uint16_t qp_id)
54 {
55 	return qat_gen3_qps[service_type] + qp_id;
56 }
57 
58 static struct qat_qp_hw_spec_funcs qat_qp_hw_spec_gen3 = {
59 	.qat_qp_rings_per_service  = qat_qp_rings_per_service_gen1,
60 	.qat_qp_build_ring_base = qat_qp_csr_build_ring_base_gen1,
61 	.qat_qp_adf_arb_enable = qat_qp_adf_arb_enable_gen1,
62 	.qat_qp_adf_arb_disable = qat_qp_adf_arb_disable_gen1,
63 	.qat_qp_adf_configure_queues = qat_qp_adf_configure_queues_gen1,
64 	.qat_qp_csr_write_tail = qat_qp_csr_write_tail_gen1,
65 	.qat_qp_csr_write_head = qat_qp_csr_write_head_gen1,
66 	.qat_qp_csr_setup = qat_qp_csr_setup_gen1,
67 	.qat_qp_get_hw_data = qat_qp_get_hw_data_gen3
68 };
69 
70 static int
qat_dev_get_slice_map_gen3(uint32_t * map,const struct rte_pci_device * pci_dev)71 qat_dev_get_slice_map_gen3(uint32_t *map,
72 	const struct rte_pci_device *pci_dev)
73 {
74 	if (rte_pci_read_config(pci_dev, map,
75 			ADF1_C4XXXIOV_VFLEGFUSES_LEN,
76 			ADF_C4XXXIOV_VFLEGFUSES_OFFSET) < 0) {
77 		return -1;
78 	}
79 	return 0;
80 }
81 
82 static struct qat_dev_hw_spec_funcs qat_dev_hw_spec_gen3 = {
83 	.qat_dev_reset_ring_pairs = qat_reset_ring_pairs_gen1,
84 	.qat_dev_get_transport_bar = qat_dev_get_transport_bar_gen1,
85 	.qat_dev_get_misc_bar = qat_dev_get_misc_bar_gen1,
86 	.qat_dev_read_config = qat_dev_read_config_gen1,
87 	.qat_dev_get_extra_size = qat_dev_get_extra_size_gen1,
88 	.qat_dev_get_slice_map = qat_dev_get_slice_map_gen3,
89 };
90 
RTE_INIT(qat_dev_gen_gen3_init)91 RTE_INIT(qat_dev_gen_gen3_init)
92 {
93 	qat_qp_hw_spec[QAT_GEN3] = &qat_qp_hw_spec_gen3;
94 	qat_dev_hw_spec[QAT_GEN3] = &qat_dev_hw_spec_gen3;
95 	qat_gen_config[QAT_GEN3].dev_gen = QAT_GEN3;
96 }
97