Lines Matching +full:ouput +full:- +full:only
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
97 return (bus_space_read_4(sc->pci_bus_tag, sc->pci_bus_handle, offset)); in ccp_read_4()
103 bus_space_write_4(sc->pci_bus_tag, sc->pci_bus_handle, offset, value); in ccp_write_4()
125 ccp_write_queue_4(qp->cq_softc, qp->cq_qindex, CMD_Q_TAIL_LO_BASE, in ccp_queue_write_tail()
126 ((uint32_t)qp->desc_ring_bus_addr) + (Q_DESC_SIZE * qp->cq_tail)); in ccp_queue_write_tail()
136 return ((qp->private_lsb * LSB_REGION_LENGTH + lsb_entry)); in ccp_queue_lsb_entry()
152 * LSB - Local Storage Block
166 * "Pass-through" mode
169 * Pass-through is a generic DMA engine, much like ioat(4). Some nice
172 * - Supports byte-swapping for endian conversion (32- or 256-bit words)
173 * - AND, OR, XOR with fixed 256-bit mask
174 * - CRC32 of data (may be used in tandem with bswap, but not bit operations)
175 * - Read/write of LSB
176 * - Memset
181 * If byte-swapping is enabled, input must be a multiple of the word size.
183 * Zlib mode -- only usable from one queue at a time, single job at a time.
186 * Only usable from private host, aka PSP? Not host processor?
194 * NIST SP 800-90B Repetition Count and Adaptive Proportion health checks are
195 * implemented on the raw input stream and may be enabled to verify min-entropy
206 *baddr = segs->ds_addr; in ccp_dmamap_cb()
220 qp = &sc->queues[queue]; in ccp_hw_attach_queue()
226 if ((sc->valid_queues & (1 << queue)) == 0) in ccp_hw_attach_queue()
232 if (qp->lsb_mask == 0) { in ccp_hw_attach_queue()
235 sc->valid_queues &= ~(1 << queue); in ccp_hw_attach_queue()
239 num_descriptors = 1 << sc->ring_size_order; in ccp_hw_attach_queue()
243 * "Queue_Size" is order - 1. in ccp_hw_attach_queue()
248 1 << (5 + sc->ring_size_order), in ccp_hw_attach_queue()
255 ringsz, 0, NULL, NULL, &qp->ring_desc_tag); in ccp_hw_attach_queue()
259 error = bus_dmamem_alloc(qp->ring_desc_tag, &desc, in ccp_hw_attach_queue()
260 BUS_DMA_ZERO | BUS_DMA_WAITOK, &qp->ring_desc_map); in ccp_hw_attach_queue()
264 error = bus_dmamap_load(qp->ring_desc_tag, qp->ring_desc_map, desc, in ccp_hw_attach_queue()
265 ringsz, ccp_dmamap_cb, &qp->desc_ring_bus_addr, BUS_DMA_WAITOK); in ccp_hw_attach_queue()
269 qp->desc_ring = desc; in ccp_hw_attach_queue()
270 qp->completions_ring = malloc(num_descriptors * in ccp_hw_attach_queue()
271 sizeof(*qp->completions_ring), M_CCP, M_ZERO | M_WAITOK); in ccp_hw_attach_queue()
274 qp->qcontrol = 0; in ccp_hw_attach_queue()
275 ccp_write_queue_4(sc, queue, CMD_Q_CONTROL_BASE, qp->qcontrol); in ccp_hw_attach_queue()
282 qp->qcontrol |= (sc->ring_size_order - 1) << CMD_Q_SIZE_SHIFT; in ccp_hw_attach_queue()
285 (uint32_t)qp->desc_ring_bus_addr); in ccp_hw_attach_queue()
287 (uint32_t)qp->desc_ring_bus_addr); in ccp_hw_attach_queue()
297 qp->qcontrol |= (qp->desc_ring_bus_addr >> 32) << CMD_Q_PTR_HI_SHIFT; in ccp_hw_attach_queue()
298 qp->qcontrol |= CMD_Q_RUN; in ccp_hw_attach_queue()
299 ccp_write_queue_4(sc, queue, CMD_Q_CONTROL_BASE, qp->qcontrol); in ccp_hw_attach_queue()
303 if (qp->desc_ring != NULL) in ccp_hw_attach_queue()
304 bus_dmamap_unload(qp->ring_desc_tag, in ccp_hw_attach_queue()
305 qp->ring_desc_map); in ccp_hw_attach_queue()
307 bus_dmamem_free(qp->ring_desc_tag, desc, in ccp_hw_attach_queue()
308 qp->ring_desc_map); in ccp_hw_attach_queue()
309 if (qp->ring_desc_tag != NULL) in ccp_hw_attach_queue()
310 bus_dma_tag_destroy(qp->ring_desc_tag); in ccp_hw_attach_queue()
322 qp = &sc->queues[queue]; in ccp_hw_detach_queue()
328 if ((sc->valid_queues & (1 << queue)) == 0) in ccp_hw_detach_queue()
331 free(qp->completions_ring, M_CCP); in ccp_hw_detach_queue()
332 bus_dmamap_unload(qp->ring_desc_tag, qp->ring_desc_map); in ccp_hw_detach_queue()
333 bus_dmamem_free(qp->ring_desc_tag, qp->desc_ring, qp->ring_desc_map); in ccp_hw_detach_queue()
334 bus_dma_tag_destroy(qp->ring_desc_tag); in ccp_hw_detach_queue()
344 sc->pci_resource_id = PCIR_BAR(2); in ccp_map_pci_bar()
345 sc->pci_resource = bus_alloc_resource_any(dev, SYS_RES_MEMORY, in ccp_map_pci_bar()
346 &sc->pci_resource_id, RF_ACTIVE); in ccp_map_pci_bar()
347 if (sc->pci_resource == NULL) { in ccp_map_pci_bar()
352 sc->pci_resource_id_msix = PCIR_BAR(5); in ccp_map_pci_bar()
353 sc->pci_resource_msix = bus_alloc_resource_any(dev, SYS_RES_MEMORY, in ccp_map_pci_bar()
354 &sc->pci_resource_id_msix, RF_ACTIVE); in ccp_map_pci_bar()
355 if (sc->pci_resource_msix == NULL) { in ccp_map_pci_bar()
357 bus_release_resource(dev, SYS_RES_MEMORY, sc->pci_resource_id, in ccp_map_pci_bar()
358 sc->pci_resource); in ccp_map_pci_bar()
362 sc->pci_bus_tag = rman_get_bustag(sc->pci_resource); in ccp_map_pci_bar()
363 sc->pci_bus_handle = rman_get_bushandle(sc->pci_resource); in ccp_map_pci_bar()
374 bus_release_resource(dev, SYS_RES_MEMORY, sc->pci_resource_id_msix, in ccp_unmap_pci_bar()
375 sc->pci_resource_msix); in ccp_unmap_pci_bar()
376 bus_release_resource(dev, SYS_RES_MEMORY, sc->pci_resource_id, in ccp_unmap_pci_bar()
377 sc->pci_resource); in ccp_unmap_pci_bar()
388 "A non-supported function type was specified" },
390 "A non-supported function mode was specified" },
394 "A non-supported function size was specified.\n"
395 "AES-CFB: Size was not 127 or 7;\n"
396 "3DES-CFB: Size was not 7;\n"
413 "Pointer with Fixed=1 is not 32-bit aligned; or\n"
414 "Pointer with Fixed=1 attempted to reference non-AXI1 (local) memory."
464 sc = qp->cq_softc; in ccp_intr_handle_error()
465 q = qp->cq_qindex; in ccp_intr_handle_error()
483 device_printf(sc->dev, "Error: %s (%u) Source: %u Faulting LSB block: %u\n", in ccp_intr_handle_error()
484 (ec != NULL) ? ec->ce_name : "(reserved)", error, esource, in ccp_intr_handle_error()
487 device_printf(sc->dev, "Error description: %s\n", ec->ce_desc); in ccp_intr_handle_error()
490 idx = desc - qp->desc_ring; in ccp_intr_handle_error()
491 DPRINTF(sc->dev, "Bad descriptor index: %u contents: %32D\n", idx, in ccp_intr_handle_error()
501 memset(&qp->desc_ring[idx], 0, sizeof(qp->desc_ring[idx])); in ccp_intr_handle_error()
503 cctx = &qp->completions_ring[idx]; in ccp_intr_handle_error()
512 idx = (idx + 1) % (1 << sc->ring_size_order); in ccp_intr_handle_error()
515 if (cctx->callback_fn != NULL) { in ccp_intr_handle_error()
519 errno = ec->ce_errno; in ccp_intr_handle_error()
521 cctx->callback_fn(qp, cctx->session, cctx->callback_arg, errno); in ccp_intr_handle_error()
522 cctx->callback_fn = NULL; in ccp_intr_handle_error()
527 qp->cq_head = idx; in ccp_intr_handle_error()
528 qp->cq_waiting = false; in ccp_intr_handle_error()
529 wakeup(&qp->cq_tail); in ccp_intr_handle_error()
530 DPRINTF(sc->dev, "%s: wrote sw head:%u\n", __func__, qp->cq_head); in ccp_intr_handle_error()
532 (uint32_t)qp->desc_ring_bus_addr + (idx * Q_DESC_SIZE)); in ccp_intr_handle_error()
533 ccp_write_queue_4(sc, q, CMD_Q_CONTROL_BASE, qp->qcontrol); in ccp_intr_handle_error()
534 DPRINTF(sc->dev, "%s: Restarted queue\n", __func__); in ccp_intr_handle_error()
546 sc = qp->cq_softc; in ccp_intr_run_completions()
547 q = qp->cq_qindex; in ccp_intr_run_completions()
549 mtx_lock(&qp->cq_lock); in ccp_intr_run_completions()
557 idx = (headlo - (uint32_t)qp->desc_ring_bus_addr) / Q_DESC_SIZE; in ccp_intr_run_completions()
559 DPRINTF(sc->dev, "%s: hw head:%u sw head:%u\n", __func__, idx, in ccp_intr_run_completions()
560 qp->cq_head); in ccp_intr_run_completions()
562 while (qp->cq_head != idx) { in ccp_intr_run_completions()
563 DPRINTF(sc->dev, "%s: completing:%u\n", __func__, qp->cq_head); in ccp_intr_run_completions()
565 cctx = &qp->completions_ring[qp->cq_head]; in ccp_intr_run_completions()
566 if (cctx->callback_fn != NULL) { in ccp_intr_run_completions()
567 cctx->callback_fn(qp, cctx->session, in ccp_intr_run_completions()
568 cctx->callback_arg, 0); in ccp_intr_run_completions()
569 cctx->callback_fn = NULL; in ccp_intr_run_completions()
573 memset(&qp->desc_ring[qp->cq_head], 0, in ccp_intr_run_completions()
574 sizeof(qp->desc_ring[qp->cq_head])); in ccp_intr_run_completions()
576 qp->cq_head = (qp->cq_head + 1) % (1 << sc->ring_size_order); in ccp_intr_run_completions()
580 qp->cq_waiting = false; in ccp_intr_run_completions()
581 wakeup(&qp->cq_tail); in ccp_intr_run_completions()
584 DPRINTF(sc->dev, "%s: wrote sw head:%u\n", __func__, qp->cq_head); in ccp_intr_run_completions()
591 desc = &qp->desc_ring[idx]; in ccp_intr_run_completions()
595 mtx_unlock(&qp->cq_lock); in ccp_intr_run_completions()
605 DPRINTF(sc->dev, "%s: interrupt\n", __func__); in ccp_intr_handler()
612 for (i = 0; i < nitems(sc->queues); i++) { in ccp_intr_handler()
613 if ((sc->valid_queues & (1 << i)) == 0) in ccp_intr_handler()
621 DPRINTF(sc->dev, "%s: %x interrupts on queue %zu\n", __func__, in ccp_intr_handler()
633 ccp_intr_run_completions(&sc->queues[i], ints); in ccp_intr_handler()
636 device_printf(sc->dev, "%s: queue %zu stopped\n", in ccp_intr_handler()
640 /* Re-enable interrupts after processing */ in ccp_intr_handler()
641 for (i = 0; i < nitems(sc->queues); i++) { in ccp_intr_handler()
642 if ((sc->valid_queues & (1 << i)) == 0) in ccp_intr_handler()
656 for (i = 0; i < nitems(sc->queues); i++) { in ccp_intr_filter()
657 if ((sc->valid_queues & (1 << i)) == 0) in ccp_intr_filter()
673 n = pci_msix_count(sc->dev); in ccp_setup_interrupts()
675 device_printf(sc->dev, "%s: msix_count: %d\n", __func__, n); in ccp_setup_interrupts()
680 error = pci_alloc_msix(sc->dev, &nvec); in ccp_setup_interrupts()
682 device_printf(sc->dev, "%s: alloc_msix error: %d\n", __func__, in ccp_setup_interrupts()
687 device_printf(sc->dev, "%s: alloc_msix: 0 vectors\n", in ccp_setup_interrupts()
691 if (nvec > nitems(sc->intr_res)) { in ccp_setup_interrupts()
692 device_printf(sc->dev, "%s: too many vectors: %u\n", __func__, in ccp_setup_interrupts()
694 nvec = nitems(sc->intr_res); in ccp_setup_interrupts()
699 sc->intr_res[rid - 1] = bus_alloc_resource_any(sc->dev, in ccp_setup_interrupts()
701 if (sc->intr_res[rid - 1] == NULL) { in ccp_setup_interrupts()
702 device_printf(sc->dev, "%s: Failed to alloc IRQ resource\n", in ccp_setup_interrupts()
707 sc->intr_tag[rid - 1] = NULL; in ccp_setup_interrupts()
708 error = bus_setup_intr(sc->dev, sc->intr_res[rid - 1], in ccp_setup_interrupts()
710 ccp_intr_handler, sc, &sc->intr_tag[rid - 1]); in ccp_setup_interrupts()
712 device_printf(sc->dev, "%s: setup_intr: %d\n", in ccp_setup_interrupts()
715 sc->intr_count = nvec; in ccp_setup_interrupts()
725 for (i = 0; i < sc->intr_count; i++) { in ccp_release_interrupts()
726 if (sc->intr_tag[i] != NULL) in ccp_release_interrupts()
727 bus_teardown_intr(sc->dev, sc->intr_res[i], in ccp_release_interrupts()
728 sc->intr_tag[i]); in ccp_release_interrupts()
729 if (sc->intr_res[i] != NULL) in ccp_release_interrupts()
730 bus_release_resource(sc->dev, SYS_RES_IRQ, in ccp_release_interrupts()
731 rman_get_rid(sc->intr_res[i]), sc->intr_res[i]); in ccp_release_interrupts()
734 pci_release_msi(sc->dev); in ccp_release_interrupts()
765 sc->ring_size_order = g_ccp_ring_order; in ccp_hw_attach()
766 if (sc->ring_size_order < 6 || sc->ring_size_order > 16) { in ccp_hw_attach()
771 sc->valid_queues = ccp_read_4(sc, CMD_QUEUE_MASK_OFFSET); in ccp_hw_attach()
786 sc->hw_version = version & VERSION_NUM_MASK; in ccp_hw_attach()
787 sc->num_queues = (version >> VERSION_NUMVQM_SHIFT) & in ccp_hw_attach()
789 sc->num_lsb_entries = (version >> VERSION_LSBSIZE_SHIFT) & in ccp_hw_attach()
791 sc->hw_features = version & VERSION_CAP_MASK; in ccp_hw_attach()
804 for (; queue_idx < nitems(sc->queues); queue_idx++) { in ccp_hw_attach()
820 if (sc->ring_size_order != 0) in ccp_hw_attach()
836 for (i = 0; i < nitems(sc->queues); i++) in ccp_hw_detach()
856 desc = &qp->desc_ring[qp->cq_tail]; in ccp_passthrough()
859 desc->engine = CCP_ENGINE_PASSTHRU; in ccp_passthrough()
861 desc->pt.ioc = interrupt; in ccp_passthrough()
862 desc->pt.byteswap = swapmode; in ccp_passthrough()
863 desc->pt.bitwise = bitmode; in ccp_passthrough()
864 desc->length = len; in ccp_passthrough()
866 desc->src_lo = (uint32_t)src; in ccp_passthrough()
867 desc->src_hi = src >> 32; in ccp_passthrough()
868 desc->src_mem = src_type; in ccp_passthrough()
870 desc->dst_lo = (uint32_t)dst; in ccp_passthrough()
871 desc->dst_hi = dst >> 32; in ccp_passthrough()
872 desc->dst_mem = dst_type; in ccp_passthrough()
875 desc->lsb_ctx_id = ccp_queue_lsb_entry(qp, LSB_ENTRY_KEY); in ccp_passthrough()
878 memcpy(&qp->completions_ring[qp->cq_tail], cctx, sizeof(*cctx)); in ccp_passthrough()
880 qp->cq_tail = (qp->cq_tail + 1) % (1 << qp->cq_softc->ring_size_order); in ccp_passthrough()
894 for (i = 0; i < sgl->sg_nseg && remain != 0; i++) { in ccp_passthrough_sgl()
895 seg = &sgl->sg_segs[i]; in ccp_passthrough_sgl()
896 /* crp lengths are int, so 32-bit min() is ok. */ in ccp_passthrough_sgl()
897 nb = min(remain, seg->ss_len); in ccp_passthrough_sgl()
901 seg->ss_paddr, CCP_MEMTYPE_SYSTEM, nb, in ccp_passthrough_sgl()
906 error = ccp_passthrough(qp, seg->ss_paddr, in ccp_passthrough_sgl()
914 remain -= nb; in ccp_passthrough_sgl()
988 CTASSERT(PAGE_SIZE - ((uintptr_t)&SHA_H % PAGE_SIZE) >= sizeof(SHA_H));
1046 desc = &qp->desc_ring[qp->cq_tail]; in ccp_sha_single_desc()
1049 desc->engine = CCP_ENGINE_SHA; in ccp_sha_single_desc()
1050 desc->som = start; in ccp_sha_single_desc()
1051 desc->eom = end; in ccp_sha_single_desc()
1053 desc->sha.type = defn->engine_type; in ccp_sha_single_desc()
1054 desc->length = len; in ccp_sha_single_desc()
1057 desc->sha_len_lo = (uint32_t)msgbits; in ccp_sha_single_desc()
1058 desc->sha_len_hi = msgbits >> 32; in ccp_sha_single_desc()
1061 desc->src_lo = (uint32_t)addr; in ccp_sha_single_desc()
1062 desc->src_hi = addr >> 32; in ccp_sha_single_desc()
1063 desc->src_mem = CCP_MEMTYPE_SYSTEM; in ccp_sha_single_desc()
1065 desc->lsb_ctx_id = ccp_queue_lsb_entry(qp, LSB_ENTRY_SHA); in ccp_sha_single_desc()
1067 qp->cq_tail = (qp->cq_tail + 1) % (1 << qp->cq_softc->ring_size_order); in ccp_sha_single_desc()
1093 CCP_MEMTYPE_SB, pmap_kextract((vm_offset_t)defn->H_vectors), in ccp_sha()
1094 CCP_MEMTYPE_SYSTEM, roundup2(defn->H_size, LSB_ENTRY_SIZE), in ccp_sha()
1102 for (i = 0; i < sgl_src->sg_nseg; i++) { in ccp_sha()
1103 seg = &sgl_src->sg_segs[i]; in ccp_sha()
1104 msgsize += seg->ss_len; in ccp_sha()
1105 error = ccp_sha_single_desc(qp, defn, seg->ss_paddr, in ccp_sha()
1106 seg->ss_len, i == 0, i == sgl_src->sg_nseg - 1, in ccp_sha()
1113 remaining = roundup2(defn->H_size, LSB_ENTRY_SIZE); in ccp_sha()
1115 for (i = 0; i < sgl_dst->sg_nseg; i++) { in ccp_sha()
1116 seg = &sgl_dst->sg_segs[i]; in ccp_sha()
1117 /* crp lengths are int, so 32-bit min() is ok. */ in ccp_sha()
1118 nb = min(remaining, seg->ss_len); in ccp_sha()
1120 error = ccp_passthrough(qp, seg->ss_paddr, CCP_MEMTYPE_SYSTEM, in ccp_sha()
1128 remaining -= nb; in ccp_sha()
1152 * Translate CCP internal LSB hash format into a standard hash ouput.
1170 /* Swap 256bit manually -- DMA engine can, but with limitations */ in ccp_sha_copy_result()
1172 if (defn->axf->hashsize > LSB_ENTRY_SIZE) in ccp_sha_copy_result()
1175 switch (defn->version) { in ccp_sha_copy_result()
1177 memcpy(output, buffer + 12, defn->axf->hashsize); in ccp_sha_copy_result()
1181 memcpy(output, buffer + XXX, defn->axf->hashsize); in ccp_sha_copy_result()
1185 memcpy(output, buffer, defn->axf->hashsize); in ccp_sha_copy_result()
1189 buffer + LSB_ENTRY_SIZE * 3 - defn->axf->hashsize, in ccp_sha_copy_result()
1190 defn->axf->hashsize - LSB_ENTRY_SIZE); in ccp_sha_copy_result()
1191 memcpy(output + defn->axf->hashsize - LSB_ENTRY_SIZE, buffer, in ccp_sha_copy_result()
1209 axf = s->hmac.auth_hash; in ccp_do_hmac_done()
1211 s->pending--; in ccp_do_hmac_done()
1214 crp->crp_etype = error; in ccp_do_hmac_done()
1219 axf->Init(&auth_ctx); in ccp_do_hmac_done()
1220 axf->Update(&auth_ctx, s->hmac.opad, axf->blocksize); in ccp_do_hmac_done()
1221 ccp_sha_copy_result(ihash, s->hmac.res, s->hmac.auth_mode); in ccp_do_hmac_done()
1226 axf->Update(&auth_ctx, ihash, axf->hashsize); in ccp_do_hmac_done()
1227 axf->Final(s->hmac.res, &auth_ctx); in ccp_do_hmac_done()
1229 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { in ccp_do_hmac_done()
1230 crypto_copydata(crp, crp->crp_digest_start, s->hmac.hash_len, in ccp_do_hmac_done()
1232 if (timingsafe_bcmp(s->hmac.res, ihash, s->hmac.hash_len) != 0) in ccp_do_hmac_done()
1233 crp->crp_etype = EBADMSG; in ccp_do_hmac_done()
1235 crypto_copyback(crp, crp->crp_digest_start, s->hmac.hash_len, in ccp_do_hmac_done()
1236 s->hmac.res); in ccp_do_hmac_done()
1240 explicit_bzero(s->hmac.res, sizeof(s->hmac.res)); in ccp_do_hmac_done()
1264 dev = qp->cq_softc->dev; in ccp_do_hmac()
1265 axf = s->hmac.auth_hash; in ccp_do_hmac()
1272 sglist_reset(qp->cq_sg_ulptx); in ccp_do_hmac()
1273 error = sglist_append(qp->cq_sg_ulptx, s->hmac.ipad, axf->blocksize); in ccp_do_hmac()
1276 if (crp->crp_aad_length != 0) { in ccp_do_hmac()
1277 error = sglist_append_sglist(qp->cq_sg_ulptx, qp->cq_sg_crp, in ccp_do_hmac()
1278 crp->crp_aad_start, crp->crp_aad_length); in ccp_do_hmac()
1282 error = sglist_append_sglist(qp->cq_sg_ulptx, qp->cq_sg_crp, in ccp_do_hmac()
1283 crp->crp_payload_start, crp->crp_payload_length); in ccp_do_hmac()
1288 /* Populate SGL for output -- use hmac.res buffer. */ in ccp_do_hmac()
1289 sglist_reset(qp->cq_sg_dst); in ccp_do_hmac()
1290 error = sglist_append(qp->cq_sg_dst, s->hmac.res, in ccp_do_hmac()
1291 roundup2(axf->hashsize, LSB_ENTRY_SIZE)); in ccp_do_hmac()
1295 error = ccp_sha(qp, s->hmac.auth_mode, qp->cq_sg_ulptx, qp->cq_sg_dst, in ccp_do_hmac()
1322 len--; in ccp_byteswap()
1323 for (i = 0; i < len; i++, len--) { in ccp_byteswap()
1336 explicit_bzero(&s->blkcipher.iv, sizeof(s->blkcipher.iv)); in ccp_blkcipher_done()
1340 s->pending--; in ccp_blkcipher_done()
1343 crp->crp_etype = error; in ccp_blkcipher_done()
1345 DPRINTF(qp->cq_softc->dev, "%s: qp=%p crp=%p\n", __func__, qp, crp); in ccp_blkcipher_done()
1359 if (csp->csp_cipher_alg == CRYPTO_AES_NIST_GCM_16) in ccp_collect_iv()
1362 if (csp->csp_cipher_alg == CRYPTO_AES_XTS && in ccp_collect_iv()
1363 csp->csp_ivlen < AES_BLOCK_LEN) in ccp_collect_iv()
1364 memset(&iv[csp->csp_ivlen], 0, AES_BLOCK_LEN - csp->csp_ivlen); in ccp_collect_iv()
1368 csp->csp_ivlen); in ccp_collect_iv()
1374 if (csp->csp_cipher_alg != CRYPTO_AES_XTS) in ccp_collect_iv()
1384 sglist_reset(qp->cq_sg_ulptx); in ccp_do_pst_to_lsb()
1385 error = sglist_append(qp->cq_sg_ulptx, __DECONST(void *, src), len); in ccp_do_pst_to_lsb()
1389 error = ccp_passthrough_sgl(qp, lsbaddr, true, qp->cq_sg_ulptx, len, in ccp_do_pst_to_lsb()
1405 dev = qp->cq_softc->dev; in ccp_do_xts()
1409 crp->crp_payload_length) { in ccp_do_xts()
1416 for (i = 0; i < qp->cq_sg_ulptx->sg_nseg; i++) { in ccp_do_xts()
1419 seg = &qp->cq_sg_ulptx->sg_segs[i]; in ccp_do_xts()
1421 desc = &qp->desc_ring[qp->cq_tail]; in ccp_do_xts()
1422 desc->engine = CCP_ENGINE_XTS_AES; in ccp_do_xts()
1423 desc->som = (i == 0); in ccp_do_xts()
1424 desc->eom = (i == qp->cq_sg_ulptx->sg_nseg - 1); in ccp_do_xts()
1425 desc->ioc = (desc->eom && cctx != NULL); in ccp_do_xts()
1427 __func__, qp->cq_tail, (int)desc->som, (int)desc->eom, in ccp_do_xts()
1428 (int)desc->ioc, (int)dir); in ccp_do_xts()
1430 if (desc->ioc) in ccp_do_xts()
1431 memcpy(&qp->completions_ring[qp->cq_tail], cctx, in ccp_do_xts()
1434 desc->aes_xts.encrypt = dir; in ccp_do_xts()
1435 desc->aes_xts.type = s->blkcipher.cipher_type; in ccp_do_xts()
1436 desc->aes_xts.size = usize; in ccp_do_xts()
1439 qp->cq_tail, (unsigned)desc->aes_xts.type, in ccp_do_xts()
1440 (unsigned)desc->aes_xts.size); in ccp_do_xts()
1442 desc->length = seg->ss_len; in ccp_do_xts()
1443 desc->src_lo = (uint32_t)seg->ss_paddr; in ccp_do_xts()
1444 desc->src_hi = (seg->ss_paddr >> 32); in ccp_do_xts()
1445 desc->src_mem = CCP_MEMTYPE_SYSTEM; in ccp_do_xts()
1447 /* Crypt in-place */ in ccp_do_xts()
1448 desc->dst_lo = desc->src_lo; in ccp_do_xts()
1449 desc->dst_hi = desc->src_hi; in ccp_do_xts()
1450 desc->dst_mem = desc->src_mem; in ccp_do_xts()
1452 desc->key_lo = ccp_queue_lsb_address(qp, LSB_ENTRY_KEY); in ccp_do_xts()
1453 desc->key_hi = 0; in ccp_do_xts()
1454 desc->key_mem = CCP_MEMTYPE_SB; in ccp_do_xts()
1456 desc->lsb_ctx_id = ccp_queue_lsb_entry(qp, LSB_ENTRY_IV); in ccp_do_xts()
1458 qp->cq_tail = (qp->cq_tail + 1) % in ccp_do_xts()
1459 (1 << qp->cq_softc->ring_size_order); in ccp_do_xts()
1477 dev = qp->cq_softc->dev; in ccp_do_blkcipher()
1479 if (s->blkcipher.key_len == 0 || crp->crp_payload_length == 0) { in ccp_do_blkcipher()
1483 if ((crp->crp_payload_length % AES_BLOCK_LEN) != 0) { in ccp_do_blkcipher()
1485 crp->crp_payload_length); in ccp_do_blkcipher()
1491 * to process it. Non-compliant inputs aren't bogus, just not doable in ccp_do_blkcipher()
1494 for (i = 0; i < qp->cq_sg_crp->sg_nseg; i++) in ccp_do_blkcipher()
1495 if ((qp->cq_sg_crp->sg_segs[i].ss_len % AES_BLOCK_LEN) != 0) { in ccp_do_blkcipher()
1497 qp->cq_sg_crp->sg_segs[i].ss_len); in ccp_do_blkcipher()
1502 csp = crypto_get_params(crp->crp_session); in ccp_do_blkcipher()
1503 ccp_collect_iv(crp, csp, s->blkcipher.iv); in ccp_do_blkcipher()
1504 iv_len = csp->csp_ivlen; in ccp_do_blkcipher()
1505 if (csp->csp_cipher_alg == CRYPTO_AES_XTS) in ccp_do_blkcipher()
1508 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) in ccp_do_blkcipher()
1515 s->blkcipher.iv, iv_len); in ccp_do_blkcipher()
1526 switch (csp->csp_cipher_alg) { in ccp_do_blkcipher()
1530 crp->crp_payload_length) in ccp_do_blkcipher()
1535 __func__, crp->crp_payload_length); in ccp_do_blkcipher()
1541 keydata = s->blkcipher.enckey; in ccp_do_blkcipher()
1542 keydata_len = s->blkcipher.key_len; in ccp_do_blkcipher()
1548 if (csp->csp_cipher_alg == CRYPTO_AES_XTS) in ccp_do_blkcipher()
1555 if (csp->csp_cipher_alg == CRYPTO_AES_XTS) { in ccp_do_blkcipher()
1557 * XTS mode uses 2 256-bit vectors for the primary key and the in ccp_do_blkcipher()
1558 * tweak key. For 128-bit keys, the vectors are zero-padded. in ccp_do_blkcipher()
1560 * After byteswapping the combined OCF-provided K1:K2 vector in ccp_do_blkcipher()
1573 /* Zero-pad 128 bit keys */ in ccp_do_blkcipher()
1597 sglist_reset(qp->cq_sg_ulptx); in ccp_do_blkcipher()
1598 error = sglist_append_sglist(qp->cq_sg_ulptx, qp->cq_sg_crp, in ccp_do_blkcipher()
1599 crp->crp_payload_start, crp->crp_payload_length); in ccp_do_blkcipher()
1604 (void *)PHYS_TO_DMAP(qp->cq_sg_ulptx->sg_segs[0].ss_paddr), " "); in ccp_do_blkcipher()
1606 DPRINTF(dev, "%s: starting AES ops @ %u\n", __func__, qp->cq_tail); in ccp_do_blkcipher()
1608 if (ccp_queue_get_ring_space(qp) < qp->cq_sg_ulptx->sg_nseg) in ccp_do_blkcipher()
1611 if (csp->csp_cipher_alg == CRYPTO_AES_XTS) in ccp_do_blkcipher()
1614 for (i = 0; i < qp->cq_sg_ulptx->sg_nseg; i++) { in ccp_do_blkcipher()
1617 seg = &qp->cq_sg_ulptx->sg_segs[i]; in ccp_do_blkcipher()
1619 desc = &qp->desc_ring[qp->cq_tail]; in ccp_do_blkcipher()
1620 desc->engine = CCP_ENGINE_AES; in ccp_do_blkcipher()
1621 desc->som = (i == 0); in ccp_do_blkcipher()
1622 desc->eom = (i == qp->cq_sg_ulptx->sg_nseg - 1); in ccp_do_blkcipher()
1623 desc->ioc = (desc->eom && cctx != NULL); in ccp_do_blkcipher()
1625 __func__, qp->cq_tail, (int)desc->som, (int)desc->eom, in ccp_do_blkcipher()
1626 (int)desc->ioc, (int)dir); in ccp_do_blkcipher()
1628 if (desc->ioc) in ccp_do_blkcipher()
1629 memcpy(&qp->completions_ring[qp->cq_tail], cctx, in ccp_do_blkcipher()
1632 desc->aes.encrypt = dir; in ccp_do_blkcipher()
1633 desc->aes.mode = s->blkcipher.cipher_mode; in ccp_do_blkcipher()
1634 desc->aes.type = s->blkcipher.cipher_type; in ccp_do_blkcipher()
1635 if (csp->csp_cipher_alg == CRYPTO_AES_ICM) in ccp_do_blkcipher()
1637 * Size of CTR value in bits, - 1. ICM mode uses all in ccp_do_blkcipher()
1640 desc->aes.size = 127; in ccp_do_blkcipher()
1643 qp->cq_tail, (unsigned)desc->aes.mode, in ccp_do_blkcipher()
1644 (unsigned)desc->aes.type, (unsigned)desc->aes.size); in ccp_do_blkcipher()
1646 desc->length = seg->ss_len; in ccp_do_blkcipher()
1647 desc->src_lo = (uint32_t)seg->ss_paddr; in ccp_do_blkcipher()
1648 desc->src_hi = (seg->ss_paddr >> 32); in ccp_do_blkcipher()
1649 desc->src_mem = CCP_MEMTYPE_SYSTEM; in ccp_do_blkcipher()
1651 /* Crypt in-place */ in ccp_do_blkcipher()
1652 desc->dst_lo = desc->src_lo; in ccp_do_blkcipher()
1653 desc->dst_hi = desc->src_hi; in ccp_do_blkcipher()
1654 desc->dst_mem = desc->src_mem; in ccp_do_blkcipher()
1656 desc->key_lo = ccp_queue_lsb_address(qp, LSB_ENTRY_KEY); in ccp_do_blkcipher()
1657 desc->key_hi = 0; in ccp_do_blkcipher()
1658 desc->key_mem = CCP_MEMTYPE_SB; in ccp_do_blkcipher()
1660 desc->lsb_ctx_id = ccp_queue_lsb_entry(qp, LSB_ENTRY_IV); in ccp_do_blkcipher()
1662 qp->cq_tail = (qp->cq_tail + 1) % in ccp_do_blkcipher()
1663 (1 << qp->cq_softc->ring_size_order); in ccp_do_blkcipher()
1686 explicit_bzero(&s->blkcipher.iv, sizeof(s->blkcipher.iv)); in ccp_authenc_done()
1704 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) in ccp_authenc()
1712 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) in ccp_authenc()
1726 if (ccp_queue_get_ring_space(qp) < qp->cq_sg_ulptx->sg_nseg) in ccp_do_ghash_aad()
1729 for (i = 0; i < qp->cq_sg_ulptx->sg_nseg; i++) { in ccp_do_ghash_aad()
1730 seg = &qp->cq_sg_ulptx->sg_segs[i]; in ccp_do_ghash_aad()
1732 desc = &qp->desc_ring[qp->cq_tail]; in ccp_do_ghash_aad()
1734 desc->engine = CCP_ENGINE_AES; in ccp_do_ghash_aad()
1735 desc->aes.mode = CCP_AES_MODE_GHASH; in ccp_do_ghash_aad()
1736 desc->aes.type = s->blkcipher.cipher_type; in ccp_do_ghash_aad()
1737 desc->aes.encrypt = CCP_AES_MODE_GHASH_AAD; in ccp_do_ghash_aad()
1739 desc->som = (i == 0); in ccp_do_ghash_aad()
1740 desc->length = seg->ss_len; in ccp_do_ghash_aad()
1742 desc->src_lo = (uint32_t)seg->ss_paddr; in ccp_do_ghash_aad()
1743 desc->src_hi = (seg->ss_paddr >> 32); in ccp_do_ghash_aad()
1744 desc->src_mem = CCP_MEMTYPE_SYSTEM; in ccp_do_ghash_aad()
1746 desc->lsb_ctx_id = ccp_queue_lsb_entry(qp, LSB_ENTRY_IV); in ccp_do_ghash_aad()
1748 desc->key_lo = ccp_queue_lsb_address(qp, LSB_ENTRY_KEY); in ccp_do_ghash_aad()
1749 desc->key_mem = CCP_MEMTYPE_SB; in ccp_do_ghash_aad()
1751 qp->cq_tail = (qp->cq_tail + 1) % in ccp_do_ghash_aad()
1752 (1 << qp->cq_softc->ring_size_order); in ccp_do_ghash_aad()
1766 desc = &qp->desc_ring[qp->cq_tail]; in ccp_do_gctr()
1768 desc->engine = CCP_ENGINE_AES; in ccp_do_gctr()
1769 desc->aes.mode = CCP_AES_MODE_GCTR; in ccp_do_gctr()
1770 desc->aes.type = s->blkcipher.cipher_type; in ccp_do_gctr()
1771 desc->aes.encrypt = dir; in ccp_do_gctr()
1772 desc->aes.size = 8 * (seg->ss_len % GMAC_BLOCK_LEN) - 1; in ccp_do_gctr()
1774 desc->som = som; in ccp_do_gctr()
1775 desc->eom = eom; in ccp_do_gctr()
1778 desc->length = roundup2(seg->ss_len, GMAC_BLOCK_LEN); in ccp_do_gctr()
1780 desc->dst_lo = desc->src_lo = (uint32_t)seg->ss_paddr; in ccp_do_gctr()
1781 desc->dst_hi = desc->src_hi = seg->ss_paddr >> 32; in ccp_do_gctr()
1782 desc->dst_mem = desc->src_mem = CCP_MEMTYPE_SYSTEM; in ccp_do_gctr()
1784 desc->lsb_ctx_id = ccp_queue_lsb_entry(qp, LSB_ENTRY_IV); in ccp_do_gctr()
1786 desc->key_lo = ccp_queue_lsb_address(qp, LSB_ENTRY_KEY); in ccp_do_gctr()
1787 desc->key_mem = CCP_MEMTYPE_SB; in ccp_do_gctr()
1789 qp->cq_tail = (qp->cq_tail + 1) % in ccp_do_gctr()
1790 (1 << qp->cq_softc->ring_size_order); in ccp_do_gctr()
1802 desc = &qp->desc_ring[qp->cq_tail]; in ccp_do_ghash_final()
1804 desc->engine = CCP_ENGINE_AES; in ccp_do_ghash_final()
1805 desc->aes.mode = CCP_AES_MODE_GHASH; in ccp_do_ghash_final()
1806 desc->aes.type = s->blkcipher.cipher_type; in ccp_do_ghash_final()
1807 desc->aes.encrypt = CCP_AES_MODE_GHASH_FINAL; in ccp_do_ghash_final()
1809 desc->length = GMAC_BLOCK_LEN; in ccp_do_ghash_final()
1811 desc->src_lo = ccp_queue_lsb_address(qp, LSB_ENTRY_GHASH_IN); in ccp_do_ghash_final()
1812 desc->src_mem = CCP_MEMTYPE_SB; in ccp_do_ghash_final()
1814 desc->lsb_ctx_id = ccp_queue_lsb_entry(qp, LSB_ENTRY_IV); in ccp_do_ghash_final()
1816 desc->key_lo = ccp_queue_lsb_address(qp, LSB_ENTRY_KEY); in ccp_do_ghash_final()
1817 desc->key_mem = CCP_MEMTYPE_SB; in ccp_do_ghash_final()
1819 desc->dst_lo = ccp_queue_lsb_address(qp, LSB_ENTRY_GHASH); in ccp_do_ghash_final()
1820 desc->dst_mem = CCP_MEMTYPE_SB; in ccp_do_ghash_final()
1822 qp->cq_tail = (qp->cq_tail + 1) % in ccp_do_ghash_final()
1823 (1 << qp->cq_softc->ring_size_order); in ccp_do_ghash_final()
1836 s->pending--; in ccp_gcm_done()
1839 crp->crp_etype = error; in ccp_gcm_done()
1844 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) in ccp_gcm_done()
1848 crypto_copydata(crp, crp->crp_digest_start, s->gmac.hash_len, tag); in ccp_gcm_done()
1851 if (timingsafe_bcmp(tag, s->gmac.final_block, s->gmac.hash_len) != 0) in ccp_gcm_done()
1852 crp->crp_etype = EBADMSG; in ccp_gcm_done()
1855 explicit_bzero(&s->blkcipher.iv, sizeof(s->blkcipher.iv)); in ccp_gcm_done()
1856 explicit_bzero(&s->gmac.final_block, sizeof(s->gmac.final_block)); in ccp_gcm_done()
1870 if (s->blkcipher.key_len == 0) in ccp_gcm()
1873 dev = qp->cq_softc->dev; in ccp_gcm()
1875 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) in ccp_gcm()
1881 memset(s->blkcipher.iv, 0, sizeof(s->blkcipher.iv)); in ccp_gcm()
1884 csp = crypto_get_params(crp->crp_session); in ccp_gcm()
1885 ccp_collect_iv(crp, csp, s->blkcipher.iv); in ccp_gcm()
1888 ccp_byteswap(s->blkcipher.enckey, s->blkcipher.key_len); in ccp_gcm()
1891 be64enc(s->gmac.final_block, (uint64_t)crp->crp_aad_length * 8); in ccp_gcm()
1892 be64enc(&s->gmac.final_block[8], (uint64_t)crp->crp_payload_length * 8); in ccp_gcm()
1896 s->blkcipher.iv, 32); in ccp_gcm()
1900 s->blkcipher.enckey, s->blkcipher.key_len); in ccp_gcm()
1904 ccp_queue_lsb_address(qp, LSB_ENTRY_GHASH_IN), s->gmac.final_block, in ccp_gcm()
1909 /* First step - compute GHASH over AAD */ in ccp_gcm()
1910 if (crp->crp_aad_length != 0) { in ccp_gcm()
1911 sglist_reset(qp->cq_sg_ulptx); in ccp_gcm()
1912 error = sglist_append_sglist(qp->cq_sg_ulptx, qp->cq_sg_crp, in ccp_gcm()
1913 crp->crp_aad_start, crp->crp_aad_length); in ccp_gcm()
1917 /* This engine cannot process non-block multiple AAD data. */ in ccp_gcm()
1918 for (i = 0; i < qp->cq_sg_ulptx->sg_nseg; i++) in ccp_gcm()
1919 if ((qp->cq_sg_ulptx->sg_segs[i].ss_len % in ccp_gcm()
1923 qp->cq_sg_ulptx->sg_segs[i].ss_len); in ccp_gcm()
1933 sglist_reset(qp->cq_sg_ulptx); in ccp_gcm()
1934 error = sglist_append_sglist(qp->cq_sg_ulptx, qp->cq_sg_crp, in ccp_gcm()
1935 crp->crp_payload_start, crp->crp_payload_length); in ccp_gcm()
1941 * size for the HW to process it. Non-compliant inputs aren't bogus, in ccp_gcm()
1945 * segment inputs, but it will still write out a block-sized plaintext in ccp_gcm()
1949 for (i = 0; i < qp->cq_sg_ulptx->sg_nseg; i++) in ccp_gcm()
1950 if ((qp->cq_sg_ulptx->sg_segs[i].ss_len % AES_BLOCK_LEN) != 0) { in ccp_gcm()
1952 qp->cq_sg_ulptx->sg_segs[i].ss_len); in ccp_gcm()
1956 for (i = 0; i < qp->cq_sg_ulptx->sg_nseg; i++) { in ccp_gcm()
1959 seg = &qp->cq_sg_ulptx->sg_segs[i]; in ccp_gcm()
1961 (i == 0 && crp->crp_aad_length == 0), in ccp_gcm()
1962 i == (qp->cq_sg_ulptx->sg_nseg - 1)); in ccp_gcm()
1969 s->blkcipher.iv, AES_BLOCK_LEN); in ccp_gcm()
1983 sglist_reset(qp->cq_sg_ulptx); in ccp_gcm()
1985 error = sglist_append_sglist(qp->cq_sg_ulptx, qp->cq_sg_crp, in ccp_gcm()
1986 crp->crp_digest_start, s->gmac.hash_len); in ccp_gcm()
1992 error = sglist_append(qp->cq_sg_ulptx, s->gmac.final_block, in ccp_gcm()
1993 s->gmac.hash_len); in ccp_gcm()
1997 ccp_queue_lsb_address(qp, LSB_ENTRY_GHASH), false, qp->cq_sg_ulptx, in ccp_gcm()
1998 s->gmac.hash_len, true, &ctx); in ccp_gcm()
2012 for (i = c; i > 0; i -= sizeof(*buf)) { in random_ccp_read()
2058 sc = qp->cq_softc; in db_ccp_show_queue_hw()
2059 q = qp->cq_qindex; in db_ccp_show_queue_hw()
2095 (ec != NULL) ? ec->ce_name : "(reserved)", error, esource, in db_ccp_show_queue_hw()
2098 db_printf(" Error description: %s\n", ec->ce_desc); in db_ccp_show_queue_hw()
2100 i = (headlo - (uint32_t)qp->desc_ring_bus_addr) / Q_DESC_SIZE; in db_ccp_show_queue_hw()
2102 (void *)&qp->desc_ring[i], " "); in db_ccp_show_queue_hw()