xref: /netbsd-src/sys/dev/pci/qat/qat_c3xxx.c (revision 87bee1d0282418fa954c1037956c296d640039b9)
1 /*	$NetBSD: qat_c3xxx.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $	*/
2 
3 /*
4  * Copyright (c) 2019 Internet Initiative Japan, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 /*
30  *   Copyright(c) 2014 Intel Corporation.
31  *   Redistribution and use in source and binary forms, with or without
32  *   modification, are permitted provided that the following conditions
33  *   are met:
34  *
35  *     * Redistributions of source code must retain the above copyright
36  *       notice, this list of conditions and the following disclaimer.
37  *     * Redistributions in binary form must reproduce the above copyright
38  *       notice, this list of conditions and the following disclaimer in
39  *       the documentation and/or other materials provided with the
40  *       distribution.
41  *     * Neither the name of Intel Corporation nor the names of its
42  *       contributors may be used to endorse or promote products derived
43  *       from this software without specific prior written permission.
44  *
45  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
46  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
47  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
48  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
49  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
50  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
51  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
52  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
53  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
54  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
55  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
56  */
57 
58 #include <sys/cdefs.h>
59 __KERNEL_RCSID(0, "$NetBSD: qat_c3xxx.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $");
60 
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 
64 #include <dev/pci/pcireg.h>
65 #include <dev/pci/pcivar.h>
66 
67 #include "qatreg.h"
68 #include "qat_hw17reg.h"
69 #include "qat_c3xxxreg.h"
70 #include "qatvar.h"
71 #include "qat_hw17var.h"
72 
73 static uint32_t
qat_c3xxx_get_accel_mask(struct qat_softc * sc)74 qat_c3xxx_get_accel_mask(struct qat_softc *sc)
75 {
76 	pcireg_t fusectl, strap;
77 
78 	fusectl = pci_conf_read(sc->sc_pc, sc->sc_pcitag, FUSECTL_REG);
79 	strap = pci_conf_read(sc->sc_pc, sc->sc_pcitag, SOFTSTRAP_REG_C3XXX);
80 
81 	return (((~(fusectl | strap)) >> ACCEL_REG_OFFSET_C3XXX) &
82 	    ACCEL_MASK_C3XXX);
83 }
84 
85 static uint32_t
qat_c3xxx_get_ae_mask(struct qat_softc * sc)86 qat_c3xxx_get_ae_mask(struct qat_softc *sc)
87 {
88 	pcireg_t fusectl, me_strap, me_disable, ssms_disabled;
89 
90 	fusectl = pci_conf_read(sc->sc_pc, sc->sc_pcitag, FUSECTL_REG);
91 	me_strap = pci_conf_read(sc->sc_pc, sc->sc_pcitag, SOFTSTRAP_REG_C3XXX);
92 
93 	/* If SSMs are disabled, then disable the corresponding MEs */
94 	ssms_disabled = (~qat_c3xxx_get_accel_mask(sc)) & ACCEL_MASK_C3XXX;
95 	me_disable = 0x3;
96 	while (ssms_disabled) {
97 		if (ssms_disabled & 1)
98 			me_strap |= me_disable;
99 		ssms_disabled >>= 1;
100 		me_disable <<= 2;
101 	}
102 
103 	return (~(fusectl | me_strap)) & AE_MASK_C3XXX;
104 }
105 
106 static enum qat_sku
qat_c3xxx_get_sku(struct qat_softc * sc)107 qat_c3xxx_get_sku(struct qat_softc *sc)
108 {
109 	switch (sc->sc_ae_num) {
110 	case MAX_AE_C3XXX:
111 		return QAT_SKU_4;
112 	}
113 
114 	return QAT_SKU_UNKNOWN;
115 }
116 
117 static uint32_t
qat_c3xxx_get_accel_cap(struct qat_softc * sc)118 qat_c3xxx_get_accel_cap(struct qat_softc *sc)
119 {
120 	uint32_t cap;
121 	pcireg_t legfuse, strap;
122 
123 	legfuse = pci_conf_read(sc->sc_pc, sc->sc_pcitag, LEGFUSE_REG);
124 	strap = pci_conf_read(sc->sc_pc, sc->sc_pcitag, SOFTSTRAP_REG_C3XXX);
125 
126 	cap = QAT_ACCEL_CAP_CRYPTO_SYMMETRIC +
127 		QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC +
128 		QAT_ACCEL_CAP_CIPHER +
129 		QAT_ACCEL_CAP_AUTHENTICATION +
130 		QAT_ACCEL_CAP_COMPRESSION +
131 		QAT_ACCEL_CAP_ZUC +
132 		QAT_ACCEL_CAP_SHA3;
133 
134 	if (legfuse & LEGFUSE_ACCEL_MASK_CIPHER_SLICE) {
135 		cap &= ~QAT_ACCEL_CAP_CRYPTO_SYMMETRIC;
136 		cap &= ~QAT_ACCEL_CAP_CIPHER;
137 	}
138 	if (legfuse & LEGFUSE_ACCEL_MASK_AUTH_SLICE)
139 		cap &= ~QAT_ACCEL_CAP_AUTHENTICATION;
140 	if (legfuse & LEGFUSE_ACCEL_MASK_PKE_SLICE)
141 		cap &= ~QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC;
142 	if (legfuse & LEGFUSE_ACCEL_MASK_COMPRESS_SLICE)
143 		cap &= ~QAT_ACCEL_CAP_COMPRESSION;
144 	if (legfuse & LEGFUSE_ACCEL_MASK_EIA3_SLICE)
145 		cap &= ~QAT_ACCEL_CAP_ZUC;
146 
147 	if ((strap | legfuse) & SOFTSTRAP_SS_POWERGATE_PKE_C3XXX)
148 		cap &= ~QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC;
149 	if ((strap | legfuse) & SOFTSTRAP_SS_POWERGATE_CY_C3XXX)
150 		cap &= ~QAT_ACCEL_CAP_COMPRESSION;
151 
152 	return cap;
153 }
154 
155 static const char *
qat_c3xxx_get_fw_uof_name(struct qat_softc * sc)156 qat_c3xxx_get_fw_uof_name(struct qat_softc *sc)
157 {
158 
159 	return AE_FW_UOF_NAME_C3XXX;
160 }
161 
162 static void
qat_c3xxx_enable_intr(struct qat_softc * sc)163 qat_c3xxx_enable_intr(struct qat_softc *sc)
164 {
165 
166 	/* Enable bundle and misc interrupts */
167 	qat_misc_write_4(sc, SMIAPF0_C3XXX, SMIA0_MASK_C3XXX);
168 	qat_misc_write_4(sc, SMIAPF1_C3XXX, SMIA1_MASK_C3XXX);
169 }
170 
171 /* Worker thread to service arbiter mappings */
172 static uint32_t thrd_to_arb_map[] = {
173 	0x12222AAA, 0x11222AAA, 0x12222AAA,
174 	0x11222AAA, 0x12222AAA, 0x11222AAA
175 };
176 
177 static void
qat_c3xxx_get_arb_mapping(struct qat_softc * sc,const uint32_t ** arb_map_config)178 qat_c3xxx_get_arb_mapping(struct qat_softc *sc, const uint32_t **arb_map_config)
179 {
180 	int i;
181 
182 	for (i = 1; i < MAX_AE_C3XXX; i++) {
183 		if ((~sc->sc_ae_mask) & (1 << i))
184 			thrd_to_arb_map[i] = 0;
185 	}
186 	*arb_map_config = thrd_to_arb_map;
187 }
188 
189 static void
qat_c3xxx_enable_error_interrupts(struct qat_softc * sc)190 qat_c3xxx_enable_error_interrupts(struct qat_softc *sc)
191 {
192 	qat_misc_write_4(sc, ERRMSK0, ERRMSK0_CERR_C3XXX); /* ME0-ME3 */
193 	qat_misc_write_4(sc, ERRMSK1, ERRMSK1_CERR_C3XXX); /* ME4-ME5 */
194 	qat_misc_write_4(sc, ERRMSK5, ERRMSK5_CERR_C3XXX); /* SSM2 */
195 
196 	/* Reset everything except VFtoPF1_16. */
197 	qat_misc_read_write_and_4(sc, ERRMSK3, VF2PF1_16_C3XXX);
198 
199 	/* RI CPP bus interface error detection and reporting. */
200 	qat_misc_write_4(sc, RICPPINTCTL_C3XXX, RICPP_EN_C3XXX);
201 
202 	/* TI CPP bus interface error detection and reporting. */
203 	qat_misc_write_4(sc, TICPPINTCTL_C3XXX, TICPP_EN_C3XXX);
204 
205 	/* Enable CFC Error interrupts and logging. */
206 	qat_misc_write_4(sc, CPP_CFC_ERR_CTRL_C3XXX, CPP_CFC_UE_C3XXX);
207 }
208 
209 static void
qat_c3xxx_disable_error_interrupts(struct qat_softc * sc)210 qat_c3xxx_disable_error_interrupts(struct qat_softc *sc)
211 {
212 	/* ME0-ME3 */
213 	qat_misc_write_4(sc, ERRMSK0, ERRMSK0_UERR_C3XXX | ERRMSK0_CERR_C3XXX);
214 	/* ME4-ME5 */
215 	qat_misc_write_4(sc, ERRMSK1, ERRMSK1_UERR_C3XXX | ERRMSK1_CERR_C3XXX);
216 	/* CPP Push Pull, RI, TI, SSM0-SSM1, CFC */
217 	qat_misc_write_4(sc, ERRMSK3, ERRMSK3_UERR_C3XXX);
218 	/* SSM2 */
219 	qat_misc_write_4(sc, ERRMSK5, ERRMSK5_UERR_C3XXX);
220 }
221 
222 static void
qat_c3xxx_enable_error_correction(struct qat_softc * sc)223 qat_c3xxx_enable_error_correction(struct qat_softc *sc)
224 {
225 	u_int i, mask;
226 
227 	/* Enable Accel Engine error detection & correction */
228 	for (i = 0, mask = sc->sc_ae_mask; mask; i++, mask >>= 1) {
229 		if (!(mask & 1))
230 			continue;
231 		qat_misc_read_write_or_4(sc, AE_CTX_ENABLES_C3XXX(i),
232 		    ENABLE_AE_ECC_ERR_C3XXX);
233 		qat_misc_read_write_or_4(sc, AE_MISC_CONTROL_C3XXX(i),
234 		    ENABLE_AE_ECC_PARITY_CORR_C3XXX);
235 	}
236 
237 	/* Enable shared memory error detection & correction */
238 	for (i = 0, mask = sc->sc_accel_mask; mask; i++, mask >>= 1) {
239 		if (!(mask & 1))
240 			continue;
241 
242 		qat_misc_read_write_or_4(sc, UERRSSMSH(i), ERRSSMSH_EN_C3XXX);
243 		qat_misc_read_write_or_4(sc, CERRSSMSH(i), ERRSSMSH_EN_C3XXX);
244 		qat_misc_read_write_or_4(sc, PPERR(i), PPERR_EN_C3XXX);
245 	}
246 
247 	qat_c3xxx_enable_error_interrupts(sc);
248 }
249 
250 const struct qat_hw qat_hw_c3xxx = {
251 	.qhw_sram_bar_id = BAR_SRAM_ID_C3XXX,
252 	.qhw_misc_bar_id = BAR_PMISC_ID_C3XXX,
253 	.qhw_etr_bar_id = BAR_ETR_ID_C3XXX,
254 	.qhw_cap_global_offset = CAP_GLOBAL_OFFSET_C3XXX,
255 	.qhw_ae_offset = AE_OFFSET_C3XXX,
256 	.qhw_ae_local_offset = AE_LOCAL_OFFSET_C3XXX,
257 	.qhw_etr_bundle_size = ETR_BUNDLE_SIZE_C3XXX,
258 	.qhw_num_banks = ETR_MAX_BANKS_C3XXX,
259 	.qhw_num_rings_per_bank = ETR_MAX_RINGS_PER_BANK,
260 	.qhw_num_accel = MAX_ACCEL_C3XXX,
261 	.qhw_num_engines = MAX_AE_C3XXX,
262 	.qhw_tx_rx_gap = ETR_TX_RX_GAP_C3XXX,
263 	.qhw_tx_rings_mask = ETR_TX_RINGS_MASK_C3XXX,
264 	.qhw_clock_per_sec = CLOCK_PER_SEC_C3XXX,
265 	.qhw_fw_auth = true,
266 	.qhw_fw_req_size = FW_REQ_DEFAULT_SZ_HW17,
267 	.qhw_fw_resp_size = FW_RESP_DEFAULT_SZ_HW17,
268 	.qhw_ring_asym_tx = 0,
269 	.qhw_ring_asym_rx = 8,
270 	.qhw_ring_sym_tx = 2,
271 	.qhw_ring_sym_rx = 10,
272 	.qhw_mof_fwname = AE_FW_MOF_NAME_C3XXX,
273 	.qhw_mmp_fwname = AE_FW_MMP_NAME_C3XXX,
274 	.qhw_prod_type = AE_FW_PROD_TYPE_C3XXX,
275 	.qhw_get_accel_mask = qat_c3xxx_get_accel_mask,
276 	.qhw_get_ae_mask = qat_c3xxx_get_ae_mask,
277 	.qhw_get_sku = qat_c3xxx_get_sku,
278 	.qhw_get_accel_cap = qat_c3xxx_get_accel_cap,
279 	.qhw_get_fw_uof_name = qat_c3xxx_get_fw_uof_name,
280 	.qhw_enable_intr = qat_c3xxx_enable_intr,
281 	.qhw_init_admin_comms = qat_adm_mailbox_init,
282 	.qhw_send_admin_init = qat_adm_mailbox_send_init,
283 	.qhw_init_arb = qat_arb_init,
284 	.qhw_get_arb_mapping = qat_c3xxx_get_arb_mapping,
285 	.qhw_enable_error_correction = qat_c3xxx_enable_error_correction,
286 	.qhw_disable_error_interrupts = qat_c3xxx_disable_error_interrupts,
287 	.qhw_set_ssm_wdtimer = qat_set_ssm_wdtimer,
288 	.qhw_check_slice_hang = qat_check_slice_hang,
289 	.qhw_crypto_setup_desc = qat_hw17_crypto_setup_desc,
290 	.qhw_crypto_setup_req_params = qat_hw17_crypto_setup_req_params,
291 	.qhw_crypto_opaque_offset = offsetof(struct fw_la_resp, opaque_data),
292 };
293