1 /* $NetBSD: qat_c62x.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */
2
3 /*
4 * Copyright (c) 2019 Internet Initiative Japan, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*
30 * Copyright(c) 2014 Intel Corporation.
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
33 * are met:
34 *
35 * * Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * * Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in
39 * the documentation and/or other materials provided with the
40 * distribution.
41 * * Neither the name of Intel Corporation nor the names of its
42 * contributors may be used to endorse or promote products derived
43 * from this software without specific prior written permission.
44 *
45 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
46 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
47 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
48 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
49 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
50 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
51 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
52 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
53 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
54 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
55 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
56 */
57
58 #include <sys/cdefs.h>
59 __KERNEL_RCSID(0, "$NetBSD: qat_c62x.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $");
60
61 #include <sys/param.h>
62 #include <sys/systm.h>
63
64 #include <dev/pci/pcireg.h>
65 #include <dev/pci/pcivar.h>
66
67 #include "qatreg.h"
68 #include "qat_hw17reg.h"
69 #include "qat_c62xreg.h"
70 #include "qatvar.h"
71 #include "qat_hw17var.h"
72
73 static uint32_t
qat_c62x_get_accel_mask(struct qat_softc * sc)74 qat_c62x_get_accel_mask(struct qat_softc *sc)
75 {
76 pcireg_t fusectl, strap;
77
78 fusectl = pci_conf_read(sc->sc_pc, sc->sc_pcitag, FUSECTL_REG);
79 strap = pci_conf_read(sc->sc_pc, sc->sc_pcitag, SOFTSTRAP_REG_C62X);
80
81 return (((~(fusectl | strap)) >> ACCEL_REG_OFFSET_C62X) &
82 ACCEL_MASK_C62X);
83 }
84
85 static uint32_t
qat_c62x_get_ae_mask(struct qat_softc * sc)86 qat_c62x_get_ae_mask(struct qat_softc *sc)
87 {
88 pcireg_t fusectl, me_strap, me_disable, ssms_disabled;
89
90 fusectl = pci_conf_read(sc->sc_pc, sc->sc_pcitag, FUSECTL_REG);
91 me_strap = pci_conf_read(sc->sc_pc, sc->sc_pcitag, SOFTSTRAP_REG_C62X);
92
93 /* If SSMs are disabled, then disable the corresponding MEs */
94 ssms_disabled = (~qat_c62x_get_accel_mask(sc)) & ACCEL_MASK_C62X;
95 me_disable = 0x3;
96 while (ssms_disabled) {
97 if (ssms_disabled & 1)
98 me_strap |= me_disable;
99 ssms_disabled >>= 1;
100 me_disable <<= 2;
101 }
102
103 return (~(fusectl | me_strap)) & AE_MASK_C62X;
104 }
105
106 static enum qat_sku
qat_c62x_get_sku(struct qat_softc * sc)107 qat_c62x_get_sku(struct qat_softc *sc)
108 {
109 switch (sc->sc_ae_num) {
110 case 8:
111 return QAT_SKU_2;
112 case MAX_AE_C62X:
113 return QAT_SKU_4;
114 }
115
116 return QAT_SKU_UNKNOWN;
117 }
118
119 static uint32_t
qat_c62x_get_accel_cap(struct qat_softc * sc)120 qat_c62x_get_accel_cap(struct qat_softc *sc)
121 {
122 uint32_t cap;
123 pcireg_t legfuse, strap;
124
125 legfuse = pci_conf_read(sc->sc_pc, sc->sc_pcitag, LEGFUSE_REG);
126 strap = pci_conf_read(sc->sc_pc, sc->sc_pcitag, SOFTSTRAP_REG_C62X);
127
128 cap = QAT_ACCEL_CAP_CRYPTO_SYMMETRIC +
129 QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC +
130 QAT_ACCEL_CAP_CIPHER +
131 QAT_ACCEL_CAP_AUTHENTICATION +
132 QAT_ACCEL_CAP_COMPRESSION +
133 QAT_ACCEL_CAP_ZUC +
134 QAT_ACCEL_CAP_SHA3;
135
136 if (legfuse & LEGFUSE_ACCEL_MASK_CIPHER_SLICE) {
137 cap &= ~QAT_ACCEL_CAP_CRYPTO_SYMMETRIC;
138 cap &= ~QAT_ACCEL_CAP_CIPHER;
139 }
140 if (legfuse & LEGFUSE_ACCEL_MASK_AUTH_SLICE)
141 cap &= ~QAT_ACCEL_CAP_AUTHENTICATION;
142 if (legfuse & LEGFUSE_ACCEL_MASK_PKE_SLICE)
143 cap &= ~QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC;
144 if (legfuse & LEGFUSE_ACCEL_MASK_COMPRESS_SLICE)
145 cap &= ~QAT_ACCEL_CAP_COMPRESSION;
146 if (legfuse & LEGFUSE_ACCEL_MASK_EIA3_SLICE)
147 cap &= ~QAT_ACCEL_CAP_ZUC;
148
149 if ((strap | legfuse) & SOFTSTRAP_SS_POWERGATE_PKE_C62X)
150 cap &= ~QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC;
151 if ((strap | legfuse) & SOFTSTRAP_SS_POWERGATE_CY_C62X)
152 cap &= ~QAT_ACCEL_CAP_COMPRESSION;
153
154 return cap;
155 }
156
157 static const char *
qat_c62x_get_fw_uof_name(struct qat_softc * sc)158 qat_c62x_get_fw_uof_name(struct qat_softc *sc)
159 {
160
161 return AE_FW_UOF_NAME_C62X;
162 }
163
164 static void
qat_c62x_enable_intr(struct qat_softc * sc)165 qat_c62x_enable_intr(struct qat_softc *sc)
166 {
167
168 /* Enable bundle and misc interrupts */
169 qat_misc_write_4(sc, SMIAPF0_C62X, SMIA0_MASK_C62X);
170 qat_misc_write_4(sc, SMIAPF1_C62X, SMIA1_MASK_C62X);
171 }
172
173 /* Worker thread to service arbiter mappings */
174 static uint32_t thrd_to_arb_map[] = {
175 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA,
176 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA
177 };
178
179 static void
qat_c62x_get_arb_mapping(struct qat_softc * sc,const uint32_t ** arb_map_config)180 qat_c62x_get_arb_mapping(struct qat_softc *sc, const uint32_t **arb_map_config)
181 {
182 int i;
183
184 for (i = 1; i < MAX_AE_C62X; i++) {
185 if ((~sc->sc_ae_mask) & (1 << i))
186 thrd_to_arb_map[i] = 0;
187 }
188 *arb_map_config = thrd_to_arb_map;
189 }
190
191 static void
qat_c62x_enable_error_interrupts(struct qat_softc * sc)192 qat_c62x_enable_error_interrupts(struct qat_softc *sc)
193 {
194 qat_misc_write_4(sc, ERRMSK0, ERRMSK0_CERR_C62X); /* ME0-ME3 */
195 qat_misc_write_4(sc, ERRMSK1, ERRMSK1_CERR_C62X); /* ME4-ME7 */
196 qat_misc_write_4(sc, ERRMSK4, ERRMSK4_CERR_C62X); /* ME8-ME9 */
197 qat_misc_write_4(sc, ERRMSK5, ERRMSK5_CERR_C62X); /* SSM2-SSM4 */
198
199 /* Reset everything except VFtoPF1_16. */
200 qat_misc_read_write_and_4(sc, ERRMSK3, VF2PF1_16_C62X);
201 /* Disable Secure RAM correctable error interrupt */
202 qat_misc_read_write_or_4(sc, ERRMSK3, ERRMSK3_CERR_C62X);
203
204 /* RI CPP bus interface error detection and reporting. */
205 qat_misc_write_4(sc, RICPPINTCTL_C62X, RICPP_EN_C62X);
206
207 /* TI CPP bus interface error detection and reporting. */
208 qat_misc_write_4(sc, TICPPINTCTL_C62X, TICPP_EN_C62X);
209
210 /* Enable CFC Error interrupts and logging. */
211 qat_misc_write_4(sc, CPP_CFC_ERR_CTRL_C62X, CPP_CFC_UE_C62X);
212
213 /* Enable SecureRAM to fix and log Correctable errors */
214 qat_misc_write_4(sc, SECRAMCERR_C62X, SECRAM_CERR_C62X);
215
216 /* Enable SecureRAM Uncorrectable error interrupts and logging */
217 qat_misc_write_4(sc, SECRAMUERR, SECRAM_UERR_C62X);
218
219 /* Enable Push/Pull Misc Uncorrectable error interrupts and logging */
220 qat_misc_write_4(sc, CPPMEMTGTERR, TGT_UERR_C62X);
221 }
222
223 static void
qat_c62x_disable_error_interrupts(struct qat_softc * sc)224 qat_c62x_disable_error_interrupts(struct qat_softc *sc)
225 {
226 /* ME0-ME3 */
227 qat_misc_write_4(sc, ERRMSK0, ERRMSK0_UERR_C62X | ERRMSK0_CERR_C62X);
228 /* ME4-ME7 */
229 qat_misc_write_4(sc, ERRMSK1, ERRMSK1_UERR_C62X | ERRMSK1_CERR_C62X);
230 /* Secure RAM, CPP Push Pull, RI, TI, SSM0-SSM1, CFC */
231 qat_misc_write_4(sc, ERRMSK3, ERRMSK3_UERR_C62X | ERRMSK3_CERR_C62X);
232 /* ME8-ME9 */
233 qat_misc_write_4(sc, ERRMSK4, ERRMSK4_UERR_C62X | ERRMSK4_CERR_C62X);
234 /* SSM2-SSM4 */
235 qat_misc_write_4(sc, ERRMSK5, ERRMSK5_UERR_C62X | ERRMSK5_CERR_C62X);
236 }
237
238 static void
qat_c62x_enable_error_correction(struct qat_softc * sc)239 qat_c62x_enable_error_correction(struct qat_softc *sc)
240 {
241 u_int i, mask;
242
243 /* Enable Accel Engine error detection & correction */
244 for (i = 0, mask = sc->sc_ae_mask; mask; i++, mask >>= 1) {
245 if (!(mask & 1))
246 continue;
247 qat_misc_read_write_or_4(sc, AE_CTX_ENABLES_C62X(i),
248 ENABLE_AE_ECC_ERR_C62X);
249 qat_misc_read_write_or_4(sc, AE_MISC_CONTROL_C62X(i),
250 ENABLE_AE_ECC_PARITY_CORR_C62X);
251 }
252
253 /* Enable shared memory error detection & correction */
254 for (i = 0, mask = sc->sc_accel_mask; mask; i++, mask >>= 1) {
255 if (!(mask & 1))
256 continue;
257
258 qat_misc_read_write_or_4(sc, UERRSSMSH(i), ERRSSMSH_EN_C62X);
259 qat_misc_read_write_or_4(sc, CERRSSMSH(i), ERRSSMSH_EN_C62X);
260 qat_misc_read_write_or_4(sc, PPERR(i), PPERR_EN_C62X);
261 }
262
263 qat_c62x_enable_error_interrupts(sc);
264 }
265
266 const struct qat_hw qat_hw_c62x = {
267 .qhw_sram_bar_id = BAR_SRAM_ID_C62X,
268 .qhw_misc_bar_id = BAR_PMISC_ID_C62X,
269 .qhw_etr_bar_id = BAR_ETR_ID_C62X,
270 .qhw_cap_global_offset = CAP_GLOBAL_OFFSET_C62X,
271 .qhw_ae_offset = AE_OFFSET_C62X,
272 .qhw_ae_local_offset = AE_LOCAL_OFFSET_C62X,
273 .qhw_etr_bundle_size = ETR_BUNDLE_SIZE_C62X,
274 .qhw_num_banks = ETR_MAX_BANKS_C62X,
275 .qhw_num_rings_per_bank = ETR_MAX_RINGS_PER_BANK,
276 .qhw_num_accel = MAX_ACCEL_C62X,
277 .qhw_num_engines = MAX_AE_C62X,
278 .qhw_tx_rx_gap = ETR_TX_RX_GAP_C62X,
279 .qhw_tx_rings_mask = ETR_TX_RINGS_MASK_C62X,
280 .qhw_clock_per_sec = CLOCK_PER_SEC_C62X,
281 .qhw_fw_auth = true,
282 .qhw_fw_req_size = FW_REQ_DEFAULT_SZ_HW17,
283 .qhw_fw_resp_size = FW_RESP_DEFAULT_SZ_HW17,
284 .qhw_ring_asym_tx = 0,
285 .qhw_ring_asym_rx = 8,
286 .qhw_ring_sym_tx = 2,
287 .qhw_ring_sym_rx = 10,
288 .qhw_mof_fwname = AE_FW_MOF_NAME_C62X,
289 .qhw_mmp_fwname = AE_FW_MMP_NAME_C62X,
290 .qhw_prod_type = AE_FW_PROD_TYPE_C62X,
291 .qhw_get_accel_mask = qat_c62x_get_accel_mask,
292 .qhw_get_ae_mask = qat_c62x_get_ae_mask,
293 .qhw_get_sku = qat_c62x_get_sku,
294 .qhw_get_accel_cap = qat_c62x_get_accel_cap,
295 .qhw_get_fw_uof_name = qat_c62x_get_fw_uof_name,
296 .qhw_enable_intr = qat_c62x_enable_intr,
297 .qhw_init_admin_comms = qat_adm_mailbox_init,
298 .qhw_send_admin_init = qat_adm_mailbox_send_init,
299 .qhw_init_arb = qat_arb_init,
300 .qhw_get_arb_mapping = qat_c62x_get_arb_mapping,
301 .qhw_enable_error_correction = qat_c62x_enable_error_correction,
302 .qhw_disable_error_interrupts = qat_c62x_disable_error_interrupts,
303 .qhw_set_ssm_wdtimer = qat_set_ssm_wdtimer,
304 .qhw_check_slice_hang = qat_check_slice_hang,
305 .qhw_crypto_setup_desc = qat_hw17_crypto_setup_desc,
306 .qhw_crypto_setup_req_params = qat_hw17_crypto_setup_req_params,
307 .qhw_crypto_opaque_offset = offsetof(struct fw_la_resp, opaque_data),
308 };
309