xref: /netbsd-src/sys/dev/pci/qat/qat_hw15.c (revision 87bee1d0282418fa954c1037956c296d640039b9)
1 /*	$NetBSD: qat_hw15.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $	*/
2 
3 /*
4  * Copyright (c) 2019 Internet Initiative Japan, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 /*
30  *   Copyright(c) 2007-2013 Intel Corporation. All rights reserved.
31  *
32  *   Redistribution and use in source and binary forms, with or without
33  *   modification, are permitted provided that the following conditions
34  *   are met:
35  *
36  *     * Redistributions of source code must retain the above copyright
37  *       notice, this list of conditions and the following disclaimer.
38  *     * Redistributions in binary form must reproduce the above copyright
39  *       notice, this list of conditions and the following disclaimer in
40  *       the documentation and/or other materials provided with the
41  *       distribution.
42  *     * Neither the name of Intel Corporation nor the names of its
43  *       contributors may be used to endorse or promote products derived
44  *       from this software without specific prior written permission.
45  *
46  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
47  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
48  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
49  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
50  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
51  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
52  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
53  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
54  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
55  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
56  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57  */
58 
59 #include <sys/cdefs.h>
60 __KERNEL_RCSID(0, "$NetBSD: qat_hw15.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $");
61 
62 #include <sys/param.h>
63 #include <sys/systm.h>
64 #include <sys/proc.h>
65 
66 #include <opencrypto/xform.h>
67 
68 /* XXX same as sys/arch/x86/x86/via_padlock.c */
69 #include <opencrypto/cryptosoft_xform.c>
70 
71 #include <dev/pci/pcireg.h>
72 #include <dev/pci/pcivar.h>
73 
74 #include "qatreg.h"
75 #include "qat_hw15reg.h"
76 #include "qatvar.h"
77 #include "qat_hw15var.h"
78 
79 int		qat_adm_ring_init_ring_table(struct qat_softc *);
80 void		qat_adm_ring_build_slice_mask(uint16_t *, uint32_t, uint32_t);
81 void		qat_adm_ring_build_shram_mask(uint64_t *, uint32_t, uint32_t);
82 int		qat_adm_ring_build_ring_table(struct qat_softc *, uint32_t);
83 int		qat_adm_ring_build_init_msg(struct qat_softc *,
84 		    struct fw_init_req *, enum fw_init_cmd_id, uint32_t,
85 		    struct qat_accel_init_cb *);
86 int		qat_adm_ring_send_init_msg_sync(struct qat_softc *,
87 		    enum fw_init_cmd_id, uint32_t);
88 int		qat_adm_ring_send_init_msg(struct qat_softc *,
89 		    enum fw_init_cmd_id);
90 int		qat_adm_ring_intr(struct qat_softc *, void *, void *);
91 
92 uint32_t	qat_crypto_setup_cipher_desc(struct qat_session *,
93 		    struct qat_crypto_desc *desc, struct cryptoini *,
94 		    struct fw_cipher_hdr *, uint8_t *, uint32_t, enum fw_slice);
95 uint32_t	qat_crypto_setup_auth_desc(struct qat_session *,
96 		    struct qat_crypto_desc *, struct cryptoini *,
97 		    struct fw_auth_hdr *, uint8_t *, uint32_t, enum fw_slice);
98 
99 void
qat_msg_req_type_populate(struct arch_if_req_hdr * msg,enum arch_if_req type,uint32_t rxring)100 qat_msg_req_type_populate(struct arch_if_req_hdr *msg, enum arch_if_req type,
101     uint32_t rxring)
102 {
103 
104 	memset(msg, 0, sizeof(struct arch_if_req_hdr));
105 	msg->flags = ARCH_IF_FLAGS_VALID_FLAG |
106 	    ARCH_IF_FLAGS_RESP_RING_TYPE_ET | ARCH_IF_FLAGS_RESP_TYPE_S;
107 	msg->req_type = type;
108 	msg->resp_pipe_id = rxring;
109 }
110 
111 void
qat_msg_cmn_hdr_populate(struct fw_la_bulk_req * msg,bus_addr_t desc_paddr,uint8_t hdrsz,uint8_t hwblksz,uint16_t comn_req_flags,uint32_t flow_id)112 qat_msg_cmn_hdr_populate(struct fw_la_bulk_req *msg, bus_addr_t desc_paddr,
113     uint8_t hdrsz, uint8_t hwblksz, uint16_t comn_req_flags, uint32_t flow_id)
114 {
115 	struct fw_comn_req_hdr *hdr = &msg->comn_hdr;
116 
117 	hdr->comn_req_flags = comn_req_flags;
118 	hdr->content_desc_params_sz = hwblksz;
119 	hdr->content_desc_hdr_sz = hdrsz;
120 	hdr->content_desc_addr = desc_paddr;
121 	msg->flow_id = flow_id;
122 }
123 
124 void
qat_msg_service_cmd_populate(struct fw_la_bulk_req * msg,enum fw_la_cmd_id cmdid,uint16_t cmd_flags)125 qat_msg_service_cmd_populate(struct fw_la_bulk_req *msg, enum fw_la_cmd_id cmdid,
126     uint16_t cmd_flags)
127 {
128 	msg->comn_la_req.la_cmd_id = cmdid;
129 	msg->comn_la_req.u.la_flags = cmd_flags;
130 }
131 
132 void
qat_msg_cmn_mid_populate(struct fw_comn_req_mid * msg,void * cookie,uint64_t src,uint64_t dst)133 qat_msg_cmn_mid_populate(struct fw_comn_req_mid *msg, void *cookie,
134     uint64_t src, uint64_t dst)
135 {
136 
137 	msg->opaque_data = (uint64_t)(uintptr_t)cookie;
138 	msg->src_data_addr = src;
139 	if (dst == 0)
140 		msg->dest_data_addr = src;
141 	else
142 		msg->dest_data_addr = dst;
143 }
144 
145 void
qat_msg_req_params_populate(struct fw_la_bulk_req * msg,bus_addr_t req_params_paddr,uint8_t req_params_sz)146 qat_msg_req_params_populate(struct fw_la_bulk_req *msg,
147     bus_addr_t req_params_paddr, uint8_t req_params_sz)
148 {
149 	msg->req_params_addr = req_params_paddr;
150 	msg->comn_la_req.u1.req_params_blk_sz = req_params_sz / 8;
151 }
152 
153 
154 void
qat_msg_cmn_footer_populate(union fw_comn_req_ftr * msg,uint64_t next_addr)155 qat_msg_cmn_footer_populate(union fw_comn_req_ftr *msg, uint64_t next_addr)
156 {
157 	msg->next_request_addr = next_addr;
158 }
159 
160 void
qat_msg_params_populate(struct fw_la_bulk_req * msg,struct qat_crypto_desc * desc,uint8_t req_params_sz,uint16_t service_cmd_flags,uint16_t comn_req_flags)161 qat_msg_params_populate(struct fw_la_bulk_req *msg,
162     struct qat_crypto_desc *desc, uint8_t req_params_sz,
163     uint16_t service_cmd_flags, uint16_t comn_req_flags)
164 {
165 	qat_msg_cmn_hdr_populate(msg, desc->qcd_desc_paddr,
166 	    desc->qcd_hdr_sz, desc->qcd_hw_blk_sz, comn_req_flags, 0);
167 	qat_msg_service_cmd_populate(msg, desc->qcd_cmd_id, service_cmd_flags);
168 	qat_msg_cmn_mid_populate(&msg->comn_mid, NULL, 0, 0);
169 	qat_msg_req_params_populate(msg, 0, req_params_sz);
170 	qat_msg_cmn_footer_populate(&msg->comn_ftr, 0);
171 }
172 
173 int
qat_adm_ring_init_ring_table(struct qat_softc * sc)174 qat_adm_ring_init_ring_table(struct qat_softc *sc)
175 {
176 	struct qat_admin_rings *qadr = &sc->sc_admin_rings;
177 
178 	if (sc->sc_ae_num == 1) {
179 		qadr->qadr_cya_ring_tbl =
180 		    &qadr->qadr_master_ring_tbl[0];
181 		qadr->qadr_srv_mask[0] = QAT_SERVICE_CRYPTO_A;
182 	} else if (sc->sc_ae_num == 2 ||
183 	    sc->sc_ae_num == 4) {
184 		qadr->qadr_cya_ring_tbl =
185 		    &qadr->qadr_master_ring_tbl[0];
186 		qadr->qadr_srv_mask[0] = QAT_SERVICE_CRYPTO_A;
187 		qadr->qadr_cyb_ring_tbl =
188 		    &qadr->qadr_master_ring_tbl[1];
189 		qadr->qadr_srv_mask[1] = QAT_SERVICE_CRYPTO_B;
190 	}
191 
192 	return 0;
193 }
194 
195 int
qat_adm_ring_init(struct qat_softc * sc)196 qat_adm_ring_init(struct qat_softc *sc)
197 {
198 	struct qat_admin_rings *qadr = &sc->sc_admin_rings;
199 	int error, i, j;
200 
201 	error = qat_alloc_dmamem(sc, &qadr->qadr_dma,
202 	    PAGE_SIZE, PAGE_SIZE);
203 	if (error)
204 		return error;
205 
206 	qadr->qadr_master_ring_tbl = qadr->qadr_dma.qdm_dma_vaddr;
207 
208 	KASSERT(sc->sc_ae_num *
209 	    sizeof(struct fw_init_ring_table) <= PAGE_SIZE);
210 
211 	/* Initialize the Master Ring Table */
212 	for (i = 0; i < sc->sc_ae_num; i++) {
213 		struct fw_init_ring_table *firt =
214 		    &qadr->qadr_master_ring_tbl[i];
215 
216 		for (j = 0; j < INIT_RING_TABLE_SZ; j++) {
217 			struct fw_init_ring_params *firp =
218 			    &firt->firt_bulk_rings[j];
219 
220 			firp->firp_reserved = 0;
221 			firp->firp_curr_weight = QAT_DEFAULT_RING_WEIGHT;
222 			firp->firp_init_weight = QAT_DEFAULT_RING_WEIGHT;
223 			firp->firp_ring_pvl = QAT_DEFAULT_PVL;
224 		}
225 		memset(firt->firt_ring_mask, 0, sizeof(firt->firt_ring_mask));
226 	}
227 
228 	error = qat_etr_setup_ring(sc, 0, RING_NUM_ADMIN_TX,
229 	    ADMIN_RING_SIZE, sc->sc_hw.qhw_fw_req_size,
230 	    NULL, NULL, "admin_tx", &qadr->qadr_admin_tx);
231 	if (error)
232 		return error;
233 
234 	error = qat_etr_setup_ring(sc, 0, RING_NUM_ADMIN_RX,
235 	    ADMIN_RING_SIZE, sc->sc_hw.qhw_fw_resp_size,
236 	    qat_adm_ring_intr, qadr, "admin_rx", &qadr->qadr_admin_rx);
237 	if (error)
238 		return error;
239 
240 	/*
241 	 * Finally set up the service indices into the Master Ring Table
242 	 * and convenient ring table pointers for each service enabled.
243 	 * Only the Admin rings are initialized.
244 	 */
245 	error = qat_adm_ring_init_ring_table(sc);
246 	if (error)
247 		return error;
248 
249 	/*
250 	 * Calculate the number of active AEs per QAT
251 	 * needed for Shram partitioning.
252 	 */
253 	for (i = 0; i < sc->sc_ae_num; i++) {
254 		if (qadr->qadr_srv_mask[i])
255 			qadr->qadr_active_aes_per_accel++;
256 	}
257 
258 	return 0;
259 }
260 
261 void
qat_adm_ring_build_slice_mask(uint16_t * slice_mask,uint32_t srv_mask,uint32_t init_shram)262 qat_adm_ring_build_slice_mask(uint16_t *slice_mask, uint32_t srv_mask,
263    uint32_t init_shram)
264 {
265 	uint16_t shram = 0, comn_req = 0;
266 
267 	if (init_shram)
268 		shram = COMN_REQ_SHRAM_INIT_REQUIRED;
269 
270 	if (srv_mask & QAT_SERVICE_CRYPTO_A)
271 		comn_req |= COMN_REQ_CY0_ONLY(shram);
272 	if (srv_mask & QAT_SERVICE_CRYPTO_B)
273 		comn_req |= COMN_REQ_CY1_ONLY(shram);
274 
275 	*slice_mask = comn_req;
276 }
277 
278 void
qat_adm_ring_build_shram_mask(uint64_t * shram_mask,uint32_t active_aes,uint32_t ae)279 qat_adm_ring_build_shram_mask(uint64_t *shram_mask, uint32_t active_aes,
280     uint32_t ae)
281 {
282 	*shram_mask = 0;
283 
284 	if (active_aes == 1) {
285 		*shram_mask = ~(*shram_mask);
286 	} else if (active_aes == 2) {
287 		if (ae == 1)
288 			*shram_mask = ((~(*shram_mask)) & 0xffffffff);
289 		else
290 			*shram_mask = ((~(*shram_mask)) & 0xffffffff00000000ull);
291 	} else if (active_aes == 3) {
292 		if (ae == 0)
293 			*shram_mask = ((~(*shram_mask)) & 0x7fffff);
294 		else if (ae == 1)
295 			*shram_mask = ((~(*shram_mask)) & 0x3fffff800000ull);
296 		else
297 			*shram_mask = ((~(*shram_mask)) & 0xffffc00000000000ull);
298 	} else {
299 		panic("Only three services are supported in current version");
300 	}
301 }
302 
303 int
qat_adm_ring_build_ring_table(struct qat_softc * sc,uint32_t ae)304 qat_adm_ring_build_ring_table(struct qat_softc *sc, uint32_t ae)
305 {
306 	struct qat_admin_rings *qadr = &sc->sc_admin_rings;
307 	struct fw_init_ring_table *tbl;
308 	struct fw_init_ring_params *param;
309 	uint8_t srv_mask = sc->sc_admin_rings.qadr_srv_mask[ae];
310 
311 	if ((srv_mask & QAT_SERVICE_CRYPTO_A)) {
312 		tbl = qadr->qadr_cya_ring_tbl;
313 	} else if ((srv_mask & QAT_SERVICE_CRYPTO_B)) {
314 		tbl = qadr->qadr_cyb_ring_tbl;
315 	} else {
316 		aprint_error_dev(sc->sc_dev,
317 		    "Invalid execution engine %d\n", ae);
318 		return EINVAL;
319 	}
320 
321 	param = &tbl->firt_bulk_rings[sc->sc_hw.qhw_ring_sym_tx];
322 	param->firp_curr_weight = QAT_HI_PRIO_RING_WEIGHT;
323 	param->firp_init_weight = QAT_HI_PRIO_RING_WEIGHT;
324 	FW_INIT_RING_MASK_SET(tbl, sc->sc_hw.qhw_ring_sym_tx);
325 
326 	return 0;
327 }
328 
329 int
qat_adm_ring_build_init_msg(struct qat_softc * sc,struct fw_init_req * initmsg,enum fw_init_cmd_id cmd,uint32_t ae,struct qat_accel_init_cb * cb)330 qat_adm_ring_build_init_msg(struct qat_softc *sc,
331     struct fw_init_req *initmsg, enum fw_init_cmd_id cmd, uint32_t ae,
332     struct qat_accel_init_cb *cb)
333 {
334 	struct fw_init_set_ae_info_hdr *aehdr;
335 	struct fw_init_set_ae_info *aeinfo;
336 	struct fw_init_set_ring_info_hdr *ringhdr;
337 	struct fw_init_set_ring_info *ringinfo;
338 	int init_shram = 0, tgt_id, cluster_id;
339 	uint32_t srv_mask;
340 
341 	srv_mask = sc->sc_admin_rings.qadr_srv_mask[
342 	    ae % sc->sc_ae_num];
343 
344 	memset(initmsg, 0, sizeof(struct fw_init_req));
345 
346 	qat_msg_req_type_populate(&initmsg->comn_hdr.arch_if,
347 	    ARCH_IF_REQ_QAT_FW_INIT,
348 	    sc->sc_admin_rings.qadr_admin_rx->qr_ring_id);
349 
350 	qat_msg_cmn_mid_populate(&initmsg->comn_mid, cb, 0, 0);
351 
352 	switch (cmd) {
353 	case FW_INIT_CMD_SET_AE_INFO:
354 		if (ae % sc->sc_ae_num == 0)
355 			init_shram = 1;
356 		if (ae >= sc->sc_ae_num) {
357 			tgt_id = 1;
358 			cluster_id = 1;
359 		} else {
360 			cluster_id = 0;
361 			if (sc->sc_ae_mask)
362 				tgt_id = 0;
363 			else
364 				tgt_id = 1;
365 		}
366 		aehdr = &initmsg->u.set_ae_info;
367 		aeinfo = &initmsg->u1.set_ae_info;
368 
369 		aehdr->init_cmd_id = cmd;
370 		/* XXX that does not support sparse ae_mask */
371 		aehdr->init_trgt_id = ae;
372 		aehdr->init_ring_cluster_id = cluster_id;
373 		aehdr->init_qat_id = tgt_id;
374 
375 		qat_adm_ring_build_slice_mask(&aehdr->init_slice_mask, srv_mask,
376 		    init_shram);
377 
378 		qat_adm_ring_build_shram_mask(&aeinfo->init_shram_mask,
379 		    sc->sc_admin_rings.qadr_active_aes_per_accel,
380 		    ae % sc->sc_ae_num);
381 
382 		break;
383 	case FW_INIT_CMD_SET_RING_INFO:
384 		ringhdr = &initmsg->u.set_ring_info;
385 		ringinfo = &initmsg->u1.set_ring_info;
386 
387 		ringhdr->init_cmd_id = cmd;
388 		/* XXX that does not support sparse ae_mask */
389 		ringhdr->init_trgt_id = ae;
390 
391 		/* XXX */
392 		qat_adm_ring_build_ring_table(sc,
393 		    ae % sc->sc_ae_num);
394 
395 		ringhdr->init_ring_tbl_sz = sizeof(struct fw_init_ring_table);
396 
397 		ringinfo->init_ring_table_ptr =
398 		    sc->sc_admin_rings.qadr_dma.qdm_dma_seg.ds_addr +
399 		    ((ae % sc->sc_ae_num) *
400 		    sizeof(struct fw_init_ring_table));
401 
402 		break;
403 	default:
404 		return ENOTSUP;
405 	}
406 
407 	return 0;
408 }
409 
410 int
qat_adm_ring_send_init_msg_sync(struct qat_softc * sc,enum fw_init_cmd_id cmd,uint32_t ae)411 qat_adm_ring_send_init_msg_sync(struct qat_softc *sc,
412     enum fw_init_cmd_id cmd, uint32_t ae)
413 {
414 	struct fw_init_req initmsg;
415 	struct qat_accel_init_cb cb;
416 	int error;
417 
418 	error = qat_adm_ring_build_init_msg(sc, &initmsg, cmd, ae, &cb);
419 	if (error)
420 		return error;
421 
422 	error = qat_etr_put_msg(sc, sc->sc_admin_rings.qadr_admin_tx,
423 	    (uint32_t *)&initmsg);
424 	if (error)
425 		return error;
426 
427 	error = tsleep(&cb, PZERO, "qat_init", hz * 3 / 2);
428 	if (error) {
429 		aprint_error_dev(sc->sc_dev,
430 		    "Timed out initialization firmware: %d\n", error);
431 		return error;
432 	}
433 	if (cb.qaic_status) {
434 		aprint_error_dev(sc->sc_dev, "Failed to initialize firmware\n");
435 		return EIO;
436 	}
437 
438 	return error;
439 }
440 
441 int
qat_adm_ring_send_init_msg(struct qat_softc * sc,enum fw_init_cmd_id cmd)442 qat_adm_ring_send_init_msg(struct qat_softc *sc,
443     enum fw_init_cmd_id cmd)
444 {
445 	struct qat_admin_rings *qadr = &sc->sc_admin_rings;
446 	uint32_t error, ae;
447 
448 	for (ae = 0; ae < sc->sc_ae_num; ae++) {
449 		uint8_t srv_mask = qadr->qadr_srv_mask[ae];
450 		switch (cmd) {
451 		case FW_INIT_CMD_SET_AE_INFO:
452 		case FW_INIT_CMD_SET_RING_INFO:
453 			if (!srv_mask)
454 				continue;
455 			break;
456 		case FW_INIT_CMD_TRNG_ENABLE:
457 		case FW_INIT_CMD_TRNG_DISABLE:
458 			if (!(srv_mask & QAT_SERVICE_CRYPTO_A))
459 				continue;
460 			break;
461 		default:
462 			return ENOTSUP;
463 		}
464 
465 		error = qat_adm_ring_send_init_msg_sync(sc, cmd, ae);
466 		if (error)
467 			return error;
468 	}
469 
470 	return 0;
471 }
472 
473 int
qat_adm_ring_send_init(struct qat_softc * sc)474 qat_adm_ring_send_init(struct qat_softc *sc)
475 {
476 	int error;
477 
478 	error = qat_adm_ring_send_init_msg(sc, FW_INIT_CMD_SET_AE_INFO);
479 	if (error)
480 		return error;
481 
482 	error = qat_adm_ring_send_init_msg(sc, FW_INIT_CMD_SET_RING_INFO);
483 	if (error)
484 		return error;
485 
486 	aprint_verbose_dev(sc->sc_dev, "Initialization completed\n");
487 
488 	return 0;
489 }
490 
491 int
qat_adm_ring_intr(struct qat_softc * sc,void * arg,void * msg)492 qat_adm_ring_intr(struct qat_softc *sc, void *arg, void *msg)
493 {
494 	struct arch_if_resp_hdr *resp;
495 	struct fw_init_resp *init_resp;
496 	struct qat_accel_init_cb *init_cb;
497 	int handled = 0;
498 
499 	resp = (struct arch_if_resp_hdr *)msg;
500 
501 	switch (resp->resp_type) {
502 	case ARCH_IF_REQ_QAT_FW_INIT:
503 		init_resp = (struct fw_init_resp *)msg;
504 		init_cb = (struct qat_accel_init_cb *)
505 		    (uintptr_t)init_resp->comn_resp.opaque_data;
506 		init_cb->qaic_status =
507 		    __SHIFTOUT(init_resp->comn_resp.comn_status,
508 		    COMN_RESP_INIT_ADMIN_STATUS);
509 		wakeup(init_cb);
510 		break;
511 	default:
512 		aprint_error_dev(sc->sc_dev,
513 		    "unknown resp type %d\n", resp->resp_type);
514 		break;
515 	}
516 
517 	return handled;
518 }
519 
520 static inline uint16_t
qat_hw15_get_comn_req_flags(uint8_t ae)521 qat_hw15_get_comn_req_flags(uint8_t ae)
522 {
523 	if (ae == 0) {
524 		return COMN_REQ_ORD_STRICT | COMN_REQ_PTR_TYPE_SGL |
525 		    COMN_REQ_AUTH0_SLICE_REQUIRED |
526 		    COMN_REQ_CIPHER0_SLICE_REQUIRED;
527 	} else {
528 		return COMN_REQ_ORD_STRICT | COMN_REQ_PTR_TYPE_SGL |
529 		    COMN_REQ_AUTH1_SLICE_REQUIRED |
530 		    COMN_REQ_CIPHER1_SLICE_REQUIRED;
531 	}
532 }
533 
534 uint32_t
qat_crypto_setup_cipher_desc(struct qat_session * qs,struct qat_crypto_desc * desc,struct cryptoini * crie,struct fw_cipher_hdr * cipher_hdr,uint8_t * hw_blk_ptr,uint32_t hw_blk_offset,enum fw_slice next_slice)535 qat_crypto_setup_cipher_desc(struct qat_session *qs,
536     struct qat_crypto_desc *desc, struct cryptoini *crie,
537     struct fw_cipher_hdr *cipher_hdr, uint8_t *hw_blk_ptr,
538     uint32_t hw_blk_offset, enum fw_slice next_slice)
539 {
540 	struct hw_cipher_config *cipher_config = (struct hw_cipher_config *)
541 	    (hw_blk_ptr + hw_blk_offset);
542 	uint32_t hw_blk_size;
543 	uint8_t *cipher_key = (uint8_t *)(cipher_config + 1);
544 
545 	cipher_config->val = qat_crypto_load_cipher_cryptoini(desc, crie);
546 	cipher_config->reserved = 0;
547 
548 	cipher_hdr->state_padding_sz = 0;
549 	cipher_hdr->key_sz = crie->cri_klen / 64; /* bits to quad words */
550 
551 	cipher_hdr->state_sz = desc->qcd_cipher_blk_sz / 8;
552 
553 	cipher_hdr->next_id = next_slice;
554 	cipher_hdr->curr_id = FW_SLICE_CIPHER;
555 	cipher_hdr->offset = hw_blk_offset / 8;
556 	cipher_hdr->resrvd = 0;
557 
558 	hw_blk_size = sizeof(struct hw_cipher_config);
559 
560 	memcpy(cipher_key, crie->cri_key, crie->cri_klen / 8);
561 	hw_blk_size += crie->cri_klen / 8;
562 
563 	return hw_blk_size;
564 }
565 
566 uint32_t
qat_crypto_setup_auth_desc(struct qat_session * qs,struct qat_crypto_desc * desc,struct cryptoini * cria,struct fw_auth_hdr * auth_hdr,uint8_t * hw_blk_ptr,uint32_t hw_blk_offset,enum fw_slice next_slice)567 qat_crypto_setup_auth_desc(struct qat_session *qs, struct qat_crypto_desc *desc,
568     struct cryptoini *cria, struct fw_auth_hdr *auth_hdr, uint8_t *hw_blk_ptr,
569     uint32_t hw_blk_offset, enum fw_slice next_slice)
570 {
571 	struct qat_sym_hash_def const *hash_def;
572 	const struct swcr_auth_hash *sah;
573 	struct hw_auth_setup *auth_setup;
574 	uint32_t hw_blk_size;
575 	uint8_t *state1, *state2;
576 	uint32_t state_size;
577 
578 	auth_setup = (struct hw_auth_setup *)(hw_blk_ptr + hw_blk_offset);
579 
580 	auth_setup->auth_config.config =
581 	    qat_crypto_load_auth_cryptoini(desc, cria, &hash_def);
582 	sah = hash_def->qshd_alg->qshai_sah;
583 	auth_setup->auth_config.reserved = 0;
584 
585 	/* for HMAC in mode 1 authCounter is the block size
586 	 * else the authCounter is 0. The firmware expects the counter to be
587 	 * big endian */
588 	auth_setup->auth_counter.counter =
589 	    htonl(hash_def->qshd_qat->qshqi_auth_counter);
590 	auth_setup->auth_counter.reserved = 0;
591 
592 	auth_hdr->next_id = next_slice;
593 	auth_hdr->curr_id = FW_SLICE_AUTH;
594 	auth_hdr->offset = hw_blk_offset / 8;
595 	auth_hdr->resrvd = 0;
596 
597 	auth_hdr->hash_flags = FW_AUTH_HDR_FLAG_NO_NESTED;
598 	auth_hdr->u.inner_prefix_sz = 0;
599 	auth_hdr->outer_prefix_sz = 0;
600 	auth_hdr->final_sz = sah->auth_hash->authsize;
601 	auth_hdr->inner_state1_sz =
602 	    roundup(hash_def->qshd_qat->qshqi_state1_len, 8);
603 	auth_hdr->inner_res_sz = hash_def->qshd_alg->qshai_digest_len;
604 	auth_hdr->inner_state2_sz =
605 	    roundup(hash_def->qshd_qat->qshqi_state2_len, 8);
606 	auth_hdr->inner_state2_off = auth_hdr->offset +
607 	    ((sizeof(struct hw_auth_setup) + auth_hdr->inner_state1_sz) / 8);
608 
609 	hw_blk_size = sizeof(struct hw_auth_setup) + auth_hdr->inner_state1_sz +
610 	    auth_hdr->inner_state2_sz;
611 
612 	auth_hdr->outer_config_off = 0;
613 	auth_hdr->outer_state1_sz = 0;
614 	auth_hdr->outer_res_sz = 0;
615 	auth_hdr->outer_prefix_off = 0;
616 
617 	state1 = (uint8_t *)(auth_setup + 1);
618 	state2 = state1 + auth_hdr->inner_state1_sz;
619 
620 	state_size = hash_def->qshd_alg->qshai_state_size;
621 	if (hash_def->qshd_qat->qshqi_algo_enc == HW_AUTH_ALGO_SHA1) {
622 		uint32_t state1_pad_len = auth_hdr->inner_state1_sz -
623 		    state_size;
624 		uint32_t state2_pad_len = auth_hdr->inner_state2_sz -
625 		    state_size;
626 		if (state1_pad_len > 0)
627 			memset(state1 + state_size, 0, state1_pad_len);
628 		if (state2_pad_len > 0)
629 			memset(state2 + state_size, 0, state2_pad_len);
630 	}
631 
632 	desc->qcd_state_storage_sz = (sizeof(struct hw_auth_counter) +
633 	    roundup(state_size, 8)) / 8;
634 
635 	qat_crypto_hmac_precompute(desc, cria, hash_def, state1, state2);
636 
637 	return hw_blk_size;
638 }
639 
640 
641 void
qat_hw15_crypto_setup_desc(struct qat_crypto * qcy,struct qat_session * qs,struct qat_crypto_desc * desc,struct cryptoini * crie,struct cryptoini * cria)642 qat_hw15_crypto_setup_desc(struct qat_crypto *qcy, struct qat_session *qs,
643     struct qat_crypto_desc *desc,
644     struct cryptoini *crie, struct cryptoini *cria)
645 {
646 	struct fw_cipher_hdr *cipher_hdr;
647 	struct fw_auth_hdr *auth_hdr;
648 	struct fw_la_bulk_req *req_cache;
649 	uint32_t ctrl_blk_size = 0, ctrl_blk_offset = 0, hw_blk_offset = 0;
650 	int i;
651 	uint16_t la_cmd_flags = 0;
652 	uint8_t req_params_sz = 0;
653 	uint8_t *ctrl_blk_ptr;
654 	uint8_t *hw_blk_ptr;
655 
656 	if (crie != NULL)
657 		ctrl_blk_size += sizeof(struct fw_cipher_hdr);
658 	if (cria != NULL)
659 		ctrl_blk_size += sizeof(struct fw_auth_hdr);
660 
661 	ctrl_blk_ptr = desc->qcd_content_desc;
662 	hw_blk_ptr = ctrl_blk_ptr + ctrl_blk_size;
663 
664 	for (i = 0; i < MAX_FW_SLICE; i++) {
665 		switch (desc->qcd_slices[i]) {
666 		case FW_SLICE_CIPHER:
667 			cipher_hdr = (struct fw_cipher_hdr *)(ctrl_blk_ptr +
668 			    ctrl_blk_offset);
669 			ctrl_blk_offset += sizeof(struct fw_cipher_hdr);
670 			hw_blk_offset += qat_crypto_setup_cipher_desc(qs, desc,
671 			    crie, cipher_hdr, hw_blk_ptr, hw_blk_offset,
672 			    desc->qcd_slices[i + 1]);
673 			req_params_sz += sizeof(struct fw_la_cipher_req_params);
674 			break;
675 		case FW_SLICE_AUTH:
676 			auth_hdr = (struct fw_auth_hdr *)(ctrl_blk_ptr +
677 			    ctrl_blk_offset);
678 			ctrl_blk_offset += sizeof(struct fw_auth_hdr);
679 			hw_blk_offset += qat_crypto_setup_auth_desc(qs, desc,
680 			    cria, auth_hdr, hw_blk_ptr, hw_blk_offset,
681 			    desc->qcd_slices[i + 1]);
682 			req_params_sz += sizeof(struct fw_la_auth_req_params);
683 			la_cmd_flags |= LA_FLAGS_RET_AUTH_RES;
684 			/* no digest verify */
685 			break;
686 		case FW_SLICE_DRAM_WR:
687 			i = MAX_FW_SLICE; /* end of chain */
688 			break;
689 		default:
690 			KASSERT(0);
691 			break;
692 		}
693 	}
694 
695 	desc->qcd_hdr_sz = ctrl_blk_offset / 8;
696 	desc->qcd_hw_blk_sz = hw_blk_offset / 8;
697 
698 	req_cache = (struct fw_la_bulk_req *)desc->qcd_req_cache;
699 	qat_msg_req_type_populate(
700 	    &req_cache->comn_hdr.arch_if,
701 	    ARCH_IF_REQ_QAT_FW_LA, 0);
702 
703 	la_cmd_flags |= LA_FLAGS_PROTO_NO;
704 
705 	qat_msg_params_populate(req_cache,
706 	    desc, req_params_sz, la_cmd_flags, 0);
707 
708 #ifdef QAT_DUMP
709 	qat_dump_raw(QAT_DUMP_DESC, "qcd_content_desc",
710 	    desc->qcd_content_desc, sizeof(desc->qcd_content_desc));
711 	qat_dump_raw(QAT_DUMP_DESC, "qcd_req_cache",
712 	    &desc->qcd_req_cache, sizeof(desc->qcd_req_cache));
713 #endif
714 
715 	bus_dmamap_sync(qcy->qcy_sc->sc_dmat,
716 	    qcy->qcy_session_dmamems[qs->qs_lid].qdm_dma_map, 0,
717 	    sizeof(struct qat_session),
718 	    BUS_DMASYNC_PREWRITE);
719 }
720 
721 void
qat_hw15_crypto_setup_req_params(struct qat_crypto_bank * qcb,struct qat_session * qs,struct qat_crypto_desc const * desc,struct qat_sym_cookie * qsc,struct cryptodesc * crde,struct cryptodesc * crda,bus_addr_t icv_paddr)722 qat_hw15_crypto_setup_req_params(struct qat_crypto_bank *qcb, struct qat_session *qs,
723     struct qat_crypto_desc const *desc, struct qat_sym_cookie *qsc,
724     struct cryptodesc *crde, struct cryptodesc *crda, bus_addr_t icv_paddr)
725 {
726 	struct qat_sym_bulk_cookie *qsbc;
727 	struct fw_la_bulk_req *bulk_req;
728 	struct fw_la_cipher_req_params *cipher_req;
729 	struct fw_la_auth_req_params *auth_req;
730 	uint32_t req_params_offset = 0;
731 	uint8_t *req_params_ptr;
732 	enum fw_la_cmd_id cmd_id = desc->qcd_cmd_id;
733 	enum fw_slice next_slice;
734 
735 	qsbc = &qsc->u.qsc_bulk_cookie;
736 
737 	bulk_req = (struct fw_la_bulk_req *)qsbc->qsbc_msg;
738 	memcpy(bulk_req, &desc->qcd_req_cache, QAT_HW15_SESSION_REQ_CACHE_SIZE);
739 	bulk_req->comn_hdr.arch_if.resp_pipe_id = qcb->qcb_sym_rx->qr_ring_id;
740 	bulk_req->comn_hdr.comn_req_flags =
741 	    qat_hw15_get_comn_req_flags(qcb->qcb_bank % 2);
742 	bulk_req->comn_mid.src_data_addr = qsc->qsc_buffer_list_desc_paddr;
743 	bulk_req->comn_mid.dest_data_addr = qsc->qsc_buffer_list_desc_paddr;
744 	bulk_req->req_params_addr = qsc->qsc_bulk_req_params_buf_paddr;
745 	bulk_req->comn_ftr.next_request_addr = 0;
746 	bulk_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)qsc;
747 
748 	if (icv_paddr != 0)
749 		bulk_req->comn_la_req.u.la_flags |= LA_FLAGS_DIGEST_IN_BUFFER;
750 
751 	req_params_ptr = qsbc->qsbc_req_params_buf;
752 
753 	if (cmd_id != FW_LA_CMD_AUTH) {
754 		cipher_req = (struct fw_la_cipher_req_params *)
755 		    (req_params_ptr + req_params_offset);
756 		req_params_offset += sizeof(struct fw_la_cipher_req_params);
757 
758 		if (cmd_id == FW_LA_CMD_CIPHER || cmd_id == FW_LA_CMD_HASH_CIPHER)
759 			next_slice = FW_SLICE_DRAM_WR;
760 		else
761 			next_slice = FW_SLICE_AUTH;
762 
763 		cipher_req->resrvd = 0;
764 
765 		cipher_req->cipher_state_sz = desc->qcd_cipher_blk_sz / 8;
766 
767 		cipher_req->curr_id = FW_SLICE_CIPHER;
768 		cipher_req->next_id = next_slice;
769 
770 		cipher_req->resrvd1 = 0;
771 
772 		cipher_req->cipher_off = crde->crd_skip;
773 		cipher_req->cipher_len = crde->crd_len;
774 		cipher_req->state_address = qsc->qsc_iv_buf_paddr;
775 
776 	}
777 	if (cmd_id != FW_LA_CMD_CIPHER) {
778 		auth_req = (struct fw_la_auth_req_params *)
779 		    (req_params_ptr + req_params_offset);
780 		req_params_offset += sizeof(struct fw_la_auth_req_params);
781 
782 		if (cmd_id == FW_LA_CMD_HASH_CIPHER)
783 			next_slice = FW_SLICE_CIPHER;
784 		else
785 			next_slice = FW_SLICE_DRAM_WR;
786 
787 		auth_req->next_id = next_slice;
788 		auth_req->curr_id = FW_SLICE_AUTH;
789 
790 		auth_req->auth_res_address = icv_paddr;
791 		auth_req->auth_res_sz = 0; /* no digest verify */
792 
793 		auth_req->auth_len = crda->crd_len;
794 		auth_req->auth_off = crda->crd_skip;
795 
796 		auth_req->hash_state_sz = 0;
797 		auth_req->u1.prefix_addr = desc->qcd_hash_state_paddr +
798 		    desc->qcd_state_storage_sz;
799 
800 		auth_req->u.resrvd = 0;
801 	}
802 
803 #ifdef QAT_DUMP
804 	qat_dump_raw(QAT_DUMP_DESC, "req_params", req_params_ptr, req_params_offset);
805 #endif
806 }
807 
808