111600SVikram.Hegde@Sun.COM /*
211600SVikram.Hegde@Sun.COM * CDDL HEADER START
311600SVikram.Hegde@Sun.COM *
411600SVikram.Hegde@Sun.COM * The contents of this file are subject to the terms of the
511600SVikram.Hegde@Sun.COM * Common Development and Distribution License (the "License").
611600SVikram.Hegde@Sun.COM * You may not use this file except in compliance with the License.
711600SVikram.Hegde@Sun.COM *
811600SVikram.Hegde@Sun.COM * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
911600SVikram.Hegde@Sun.COM * or http://www.opensolaris.org/os/licensing.
1011600SVikram.Hegde@Sun.COM * See the License for the specific language governing permissions
1111600SVikram.Hegde@Sun.COM * and limitations under the License.
1211600SVikram.Hegde@Sun.COM *
1311600SVikram.Hegde@Sun.COM * When distributing Covered Code, include this CDDL HEADER in each
1411600SVikram.Hegde@Sun.COM * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
1511600SVikram.Hegde@Sun.COM * If applicable, add the following below this CDDL HEADER, with the
1611600SVikram.Hegde@Sun.COM * fields enclosed by brackets "[]" replaced with your own identifying
1711600SVikram.Hegde@Sun.COM * information: Portions Copyright [yyyy] [name of copyright owner]
1811600SVikram.Hegde@Sun.COM *
1911600SVikram.Hegde@Sun.COM * CDDL HEADER END
2011600SVikram.Hegde@Sun.COM */
2111600SVikram.Hegde@Sun.COM /*
2212513Sfrank.van.der.linden@oracle.com * Portions Copyright (c) 2010, Oracle and/or its affiliates.
2312513Sfrank.van.der.linden@oracle.com * All rights reserved.
2411600SVikram.Hegde@Sun.COM */
2511600SVikram.Hegde@Sun.COM
2611600SVikram.Hegde@Sun.COM /*
2711600SVikram.Hegde@Sun.COM * Copyright (c) 2009, Intel Corporation.
2811600SVikram.Hegde@Sun.COM * All rights reserved.
2911600SVikram.Hegde@Sun.COM */
3011600SVikram.Hegde@Sun.COM
3111600SVikram.Hegde@Sun.COM #include <sys/ddi.h>
3211600SVikram.Hegde@Sun.COM #include <sys/archsystm.h>
3311600SVikram.Hegde@Sun.COM #include <vm/hat_i86.h>
3411600SVikram.Hegde@Sun.COM #include <sys/types.h>
35*13050Sfrank.van.der.linden@oracle.com #include <sys/cpu.h>
3611600SVikram.Hegde@Sun.COM #include <sys/sysmacros.h>
3711600SVikram.Hegde@Sun.COM #include <sys/immu.h>
3811600SVikram.Hegde@Sun.COM
3911600SVikram.Hegde@Sun.COM /* invalidation queue table entry size */
4011600SVikram.Hegde@Sun.COM #define QINV_ENTRY_SIZE 0x10
4111600SVikram.Hegde@Sun.COM
4211600SVikram.Hegde@Sun.COM /* max value of Queue Size field of Invalidation Queue Address Register */
4311600SVikram.Hegde@Sun.COM #define QINV_MAX_QUEUE_SIZE 0x7
4411600SVikram.Hegde@Sun.COM
4511600SVikram.Hegde@Sun.COM /* status data size of invalidation wait descriptor */
4611600SVikram.Hegde@Sun.COM #define QINV_SYNC_DATA_SIZE 0x4
4711600SVikram.Hegde@Sun.COM
4811600SVikram.Hegde@Sun.COM /* invalidation queue head and tail */
4911600SVikram.Hegde@Sun.COM #define QINV_IQA_HEAD(QH) BITX((QH), 18, 4)
5011600SVikram.Hegde@Sun.COM #define QINV_IQA_TAIL_SHIFT 4
5111600SVikram.Hegde@Sun.COM
5211600SVikram.Hegde@Sun.COM /* invalidation queue entry structure */
5311600SVikram.Hegde@Sun.COM typedef struct qinv_inv_dsc {
5411600SVikram.Hegde@Sun.COM uint64_t lo;
5511600SVikram.Hegde@Sun.COM uint64_t hi;
5611600SVikram.Hegde@Sun.COM } qinv_dsc_t;
5711600SVikram.Hegde@Sun.COM
5811600SVikram.Hegde@Sun.COM /* physical contigous pages for invalidation queue */
5911600SVikram.Hegde@Sun.COM typedef struct qinv_mem {
6011600SVikram.Hegde@Sun.COM kmutex_t qinv_mem_lock;
6111600SVikram.Hegde@Sun.COM ddi_dma_handle_t qinv_mem_dma_hdl;
6211600SVikram.Hegde@Sun.COM ddi_acc_handle_t qinv_mem_acc_hdl;
6311600SVikram.Hegde@Sun.COM caddr_t qinv_mem_vaddr;
6411600SVikram.Hegde@Sun.COM paddr_t qinv_mem_paddr;
6511600SVikram.Hegde@Sun.COM uint_t qinv_mem_size;
6611600SVikram.Hegde@Sun.COM uint16_t qinv_mem_head;
6711600SVikram.Hegde@Sun.COM uint16_t qinv_mem_tail;
6811600SVikram.Hegde@Sun.COM } qinv_mem_t;
6911600SVikram.Hegde@Sun.COM
7011600SVikram.Hegde@Sun.COM
7111600SVikram.Hegde@Sun.COM /*
7211600SVikram.Hegde@Sun.COM * invalidation queue state
7311600SVikram.Hegde@Sun.COM * This structure describes the state information of the
7411600SVikram.Hegde@Sun.COM * invalidation queue table and related status memeory for
7511600SVikram.Hegde@Sun.COM * invalidation wait descriptor
7611600SVikram.Hegde@Sun.COM *
7711600SVikram.Hegde@Sun.COM * qinv_table - invalidation queue table
7811600SVikram.Hegde@Sun.COM * qinv_sync - sync status memory for invalidation wait descriptor
7911600SVikram.Hegde@Sun.COM */
8011600SVikram.Hegde@Sun.COM typedef struct qinv {
8111600SVikram.Hegde@Sun.COM qinv_mem_t qinv_table;
8211600SVikram.Hegde@Sun.COM qinv_mem_t qinv_sync;
8311600SVikram.Hegde@Sun.COM } qinv_t;
8411600SVikram.Hegde@Sun.COM
85*13050Sfrank.van.der.linden@oracle.com static void immu_qinv_inv_wait(immu_inv_wait_t *iwp);
86*13050Sfrank.van.der.linden@oracle.com
8712513Sfrank.van.der.linden@oracle.com static struct immu_flushops immu_qinv_flushops = {
8812513Sfrank.van.der.linden@oracle.com immu_qinv_context_fsi,
8912513Sfrank.van.der.linden@oracle.com immu_qinv_context_dsi,
9012513Sfrank.van.der.linden@oracle.com immu_qinv_context_gbl,
9112513Sfrank.van.der.linden@oracle.com immu_qinv_iotlb_psi,
9212513Sfrank.van.der.linden@oracle.com immu_qinv_iotlb_dsi,
93*13050Sfrank.van.der.linden@oracle.com immu_qinv_iotlb_gbl,
94*13050Sfrank.van.der.linden@oracle.com immu_qinv_inv_wait
9512513Sfrank.van.der.linden@oracle.com };
9611600SVikram.Hegde@Sun.COM
9711600SVikram.Hegde@Sun.COM /* helper macro for making queue invalidation descriptor */
9811600SVikram.Hegde@Sun.COM #define INV_DSC_TYPE(dsc) ((dsc)->lo & 0xF)
9911600SVikram.Hegde@Sun.COM #define CC_INV_DSC_HIGH (0)
10011600SVikram.Hegde@Sun.COM #define CC_INV_DSC_LOW(fm, sid, did, g) (((uint64_t)(fm) << 48) | \
10111600SVikram.Hegde@Sun.COM ((uint64_t)(sid) << 32) | \
10211600SVikram.Hegde@Sun.COM ((uint64_t)(did) << 16) | \
10311600SVikram.Hegde@Sun.COM ((uint64_t)(g) << 4) | \
10411600SVikram.Hegde@Sun.COM 1)
10511600SVikram.Hegde@Sun.COM
10611600SVikram.Hegde@Sun.COM #define IOTLB_INV_DSC_HIGH(addr, ih, am) (((uint64_t)(addr)) | \
10711600SVikram.Hegde@Sun.COM ((uint64_t)(ih) << 6) | \
10811600SVikram.Hegde@Sun.COM ((uint64_t)(am)))
10911600SVikram.Hegde@Sun.COM
11011600SVikram.Hegde@Sun.COM #define IOTLB_INV_DSC_LOW(did, dr, dw, g) (((uint64_t)(did) << 16) | \
11111600SVikram.Hegde@Sun.COM ((uint64_t)(dr) << 7) | \
11211600SVikram.Hegde@Sun.COM ((uint64_t)(dw) << 6) | \
11311600SVikram.Hegde@Sun.COM ((uint64_t)(g) << 4) | \
11411600SVikram.Hegde@Sun.COM 2)
11511600SVikram.Hegde@Sun.COM
11611600SVikram.Hegde@Sun.COM #define DEV_IOTLB_INV_DSC_HIGH(addr, s) (((uint64_t)(addr)) | (s))
11711600SVikram.Hegde@Sun.COM
11811600SVikram.Hegde@Sun.COM #define DEV_IOTLB_INV_DSC_LOW(sid, max_invs_pd) ( \
11911600SVikram.Hegde@Sun.COM ((uint64_t)(sid) << 32) | \
12011600SVikram.Hegde@Sun.COM ((uint64_t)(max_invs_pd) << 16) | \
12111600SVikram.Hegde@Sun.COM 3)
12211600SVikram.Hegde@Sun.COM
12311600SVikram.Hegde@Sun.COM #define IEC_INV_DSC_HIGH (0)
12411600SVikram.Hegde@Sun.COM #define IEC_INV_DSC_LOW(idx, im, g) (((uint64_t)(idx) << 32) | \
12511600SVikram.Hegde@Sun.COM ((uint64_t)(im) << 27) | \
12611600SVikram.Hegde@Sun.COM ((uint64_t)(g) << 4) | \
12711600SVikram.Hegde@Sun.COM 4)
12811600SVikram.Hegde@Sun.COM
12911600SVikram.Hegde@Sun.COM #define INV_WAIT_DSC_HIGH(saddr) ((uint64_t)(saddr))
13011600SVikram.Hegde@Sun.COM
13111600SVikram.Hegde@Sun.COM #define INV_WAIT_DSC_LOW(sdata, fn, sw, iflag) (((uint64_t)(sdata) << 32) | \
13211600SVikram.Hegde@Sun.COM ((uint64_t)(fn) << 6) | \
13311600SVikram.Hegde@Sun.COM ((uint64_t)(sw) << 5) | \
13411600SVikram.Hegde@Sun.COM ((uint64_t)(iflag) << 4) | \
13511600SVikram.Hegde@Sun.COM 5)
13611600SVikram.Hegde@Sun.COM
13711600SVikram.Hegde@Sun.COM /*
13811600SVikram.Hegde@Sun.COM * QS field of Invalidation Queue Address Register
13911600SVikram.Hegde@Sun.COM * the size of invalidation queue is 1 << (qinv_iqa_qs + 8)
14011600SVikram.Hegde@Sun.COM */
14111600SVikram.Hegde@Sun.COM static uint_t qinv_iqa_qs = 6;
14211600SVikram.Hegde@Sun.COM
14311600SVikram.Hegde@Sun.COM /*
14411600SVikram.Hegde@Sun.COM * the invalidate desctiptor type of queued invalidation interface
14511600SVikram.Hegde@Sun.COM */
14611600SVikram.Hegde@Sun.COM static char *qinv_dsc_type[] = {
14711600SVikram.Hegde@Sun.COM "Reserved",
14811600SVikram.Hegde@Sun.COM "Context Cache Invalidate Descriptor",
14911600SVikram.Hegde@Sun.COM "IOTLB Invalidate Descriptor",
15011600SVikram.Hegde@Sun.COM "Device-IOTLB Invalidate Descriptor",
15111600SVikram.Hegde@Sun.COM "Interrupt Entry Cache Invalidate Descriptor",
15211600SVikram.Hegde@Sun.COM "Invalidation Wait Descriptor",
15311600SVikram.Hegde@Sun.COM "Incorrect queue invalidation type"
15411600SVikram.Hegde@Sun.COM };
15511600SVikram.Hegde@Sun.COM
15611600SVikram.Hegde@Sun.COM #define QINV_MAX_DSC_TYPE (sizeof (qinv_dsc_type) / sizeof (char *))
15711600SVikram.Hegde@Sun.COM
15811600SVikram.Hegde@Sun.COM /*
15911600SVikram.Hegde@Sun.COM * the queued invalidation interface functions
16011600SVikram.Hegde@Sun.COM */
16111600SVikram.Hegde@Sun.COM static void qinv_submit_inv_dsc(immu_t *immu, qinv_dsc_t *dsc);
16211600SVikram.Hegde@Sun.COM static void qinv_context_common(immu_t *immu, uint8_t function_mask,
16311600SVikram.Hegde@Sun.COM uint16_t source_id, uint_t domain_id, ctt_inv_g_t type);
16411600SVikram.Hegde@Sun.COM static void qinv_iotlb_common(immu_t *immu, uint_t domain_id,
16511600SVikram.Hegde@Sun.COM uint64_t addr, uint_t am, uint_t hint, tlb_inv_g_t type);
16611600SVikram.Hegde@Sun.COM static void qinv_iec_common(immu_t *immu, uint_t iidx,
16711600SVikram.Hegde@Sun.COM uint_t im, uint_t g);
168*13050Sfrank.van.der.linden@oracle.com static void immu_qinv_inv_wait(immu_inv_wait_t *iwp);
169*13050Sfrank.van.der.linden@oracle.com static void qinv_wait_sync(immu_t *immu, immu_inv_wait_t *iwp);
17011600SVikram.Hegde@Sun.COM /*LINTED*/
17111600SVikram.Hegde@Sun.COM static void qinv_dev_iotlb_common(immu_t *immu, uint16_t sid,
17211600SVikram.Hegde@Sun.COM uint64_t addr, uint_t size, uint_t max_invs_pd);
17311600SVikram.Hegde@Sun.COM
17411600SVikram.Hegde@Sun.COM
17511600SVikram.Hegde@Sun.COM /* submit invalidation request descriptor to invalidation queue */
17611600SVikram.Hegde@Sun.COM static void
qinv_submit_inv_dsc(immu_t * immu,qinv_dsc_t * dsc)17711600SVikram.Hegde@Sun.COM qinv_submit_inv_dsc(immu_t *immu, qinv_dsc_t *dsc)
17811600SVikram.Hegde@Sun.COM {
17911600SVikram.Hegde@Sun.COM qinv_t *qinv;
18011600SVikram.Hegde@Sun.COM qinv_mem_t *qinv_table;
18111600SVikram.Hegde@Sun.COM uint_t tail;
182*13050Sfrank.van.der.linden@oracle.com #ifdef DEBUG
183*13050Sfrank.van.der.linden@oracle.com uint_t count = 0;
184*13050Sfrank.van.der.linden@oracle.com #endif
18511600SVikram.Hegde@Sun.COM
18611600SVikram.Hegde@Sun.COM qinv = (qinv_t *)immu->immu_qinv;
18711600SVikram.Hegde@Sun.COM qinv_table = &(qinv->qinv_table);
18811600SVikram.Hegde@Sun.COM
18911600SVikram.Hegde@Sun.COM mutex_enter(&qinv_table->qinv_mem_lock);
19011600SVikram.Hegde@Sun.COM tail = qinv_table->qinv_mem_tail;
19111600SVikram.Hegde@Sun.COM qinv_table->qinv_mem_tail++;
19211600SVikram.Hegde@Sun.COM
19311600SVikram.Hegde@Sun.COM if (qinv_table->qinv_mem_tail == qinv_table->qinv_mem_size)
19411600SVikram.Hegde@Sun.COM qinv_table->qinv_mem_tail = 0;
19511600SVikram.Hegde@Sun.COM
19611600SVikram.Hegde@Sun.COM while (qinv_table->qinv_mem_head == qinv_table->qinv_mem_tail) {
197*13050Sfrank.van.der.linden@oracle.com #ifdef DEBUG
198*13050Sfrank.van.der.linden@oracle.com count++;
199*13050Sfrank.van.der.linden@oracle.com #endif
20011600SVikram.Hegde@Sun.COM /*
20111600SVikram.Hegde@Sun.COM * inv queue table exhausted, wait hardware to fetch
20211600SVikram.Hegde@Sun.COM * next descriptor
20311600SVikram.Hegde@Sun.COM */
20411600SVikram.Hegde@Sun.COM qinv_table->qinv_mem_head = QINV_IQA_HEAD(
20511600SVikram.Hegde@Sun.COM immu_regs_get64(immu, IMMU_REG_INVAL_QH));
20611600SVikram.Hegde@Sun.COM }
20711600SVikram.Hegde@Sun.COM
208*13050Sfrank.van.der.linden@oracle.com IMMU_DPROBE3(immu__qinv__sub, uint64_t, dsc->lo, uint64_t, dsc->hi,
209*13050Sfrank.van.der.linden@oracle.com uint_t, count);
210*13050Sfrank.van.der.linden@oracle.com
21111600SVikram.Hegde@Sun.COM bcopy(dsc, qinv_table->qinv_mem_vaddr + tail * QINV_ENTRY_SIZE,
21211600SVikram.Hegde@Sun.COM QINV_ENTRY_SIZE);
21311600SVikram.Hegde@Sun.COM
21411600SVikram.Hegde@Sun.COM immu_regs_put64(immu, IMMU_REG_INVAL_QT,
21511600SVikram.Hegde@Sun.COM qinv_table->qinv_mem_tail << QINV_IQA_TAIL_SHIFT);
21611600SVikram.Hegde@Sun.COM
21711600SVikram.Hegde@Sun.COM mutex_exit(&qinv_table->qinv_mem_lock);
21811600SVikram.Hegde@Sun.COM }
21911600SVikram.Hegde@Sun.COM
22011600SVikram.Hegde@Sun.COM /* queued invalidation interface -- invalidate context cache */
22111600SVikram.Hegde@Sun.COM static void
qinv_context_common(immu_t * immu,uint8_t function_mask,uint16_t source_id,uint_t domain_id,ctt_inv_g_t type)22211600SVikram.Hegde@Sun.COM qinv_context_common(immu_t *immu, uint8_t function_mask,
22311600SVikram.Hegde@Sun.COM uint16_t source_id, uint_t domain_id, ctt_inv_g_t type)
22411600SVikram.Hegde@Sun.COM {
22511600SVikram.Hegde@Sun.COM qinv_dsc_t dsc;
22611600SVikram.Hegde@Sun.COM
22711600SVikram.Hegde@Sun.COM dsc.lo = CC_INV_DSC_LOW(function_mask, source_id, domain_id, type);
22811600SVikram.Hegde@Sun.COM dsc.hi = CC_INV_DSC_HIGH;
22911600SVikram.Hegde@Sun.COM
23011600SVikram.Hegde@Sun.COM qinv_submit_inv_dsc(immu, &dsc);
23111600SVikram.Hegde@Sun.COM }
23211600SVikram.Hegde@Sun.COM
23311600SVikram.Hegde@Sun.COM /* queued invalidation interface -- invalidate iotlb */
23411600SVikram.Hegde@Sun.COM static void
qinv_iotlb_common(immu_t * immu,uint_t domain_id,uint64_t addr,uint_t am,uint_t hint,tlb_inv_g_t type)23511600SVikram.Hegde@Sun.COM qinv_iotlb_common(immu_t *immu, uint_t domain_id,
23611600SVikram.Hegde@Sun.COM uint64_t addr, uint_t am, uint_t hint, tlb_inv_g_t type)
23711600SVikram.Hegde@Sun.COM {
23811600SVikram.Hegde@Sun.COM qinv_dsc_t dsc;
23911600SVikram.Hegde@Sun.COM uint8_t dr = 0;
24011600SVikram.Hegde@Sun.COM uint8_t dw = 0;
24111600SVikram.Hegde@Sun.COM
24211600SVikram.Hegde@Sun.COM if (IMMU_CAP_GET_DRD(immu->immu_regs_cap))
24311600SVikram.Hegde@Sun.COM dr = 1;
24411600SVikram.Hegde@Sun.COM if (IMMU_CAP_GET_DWD(immu->immu_regs_cap))
24511600SVikram.Hegde@Sun.COM dw = 1;
24611600SVikram.Hegde@Sun.COM
24711600SVikram.Hegde@Sun.COM switch (type) {
24811600SVikram.Hegde@Sun.COM case TLB_INV_G_PAGE:
24911600SVikram.Hegde@Sun.COM if (!IMMU_CAP_GET_PSI(immu->immu_regs_cap) ||
25011600SVikram.Hegde@Sun.COM am > IMMU_CAP_GET_MAMV(immu->immu_regs_cap) ||
25111600SVikram.Hegde@Sun.COM addr & IMMU_PAGEOFFSET) {
25211600SVikram.Hegde@Sun.COM type = TLB_INV_G_DOMAIN;
25311600SVikram.Hegde@Sun.COM goto qinv_ignore_psi;
25411600SVikram.Hegde@Sun.COM }
25511600SVikram.Hegde@Sun.COM dsc.lo = IOTLB_INV_DSC_LOW(domain_id, dr, dw, type);
25611600SVikram.Hegde@Sun.COM dsc.hi = IOTLB_INV_DSC_HIGH(addr, hint, am);
25711600SVikram.Hegde@Sun.COM break;
25811600SVikram.Hegde@Sun.COM
25911600SVikram.Hegde@Sun.COM qinv_ignore_psi:
26011600SVikram.Hegde@Sun.COM case TLB_INV_G_DOMAIN:
26111600SVikram.Hegde@Sun.COM dsc.lo = IOTLB_INV_DSC_LOW(domain_id, dr, dw, type);
26211600SVikram.Hegde@Sun.COM dsc.hi = 0;
26311600SVikram.Hegde@Sun.COM break;
26411600SVikram.Hegde@Sun.COM
26511600SVikram.Hegde@Sun.COM case TLB_INV_G_GLOBAL:
26611600SVikram.Hegde@Sun.COM dsc.lo = IOTLB_INV_DSC_LOW(0, dr, dw, type);
26711600SVikram.Hegde@Sun.COM dsc.hi = 0;
26811600SVikram.Hegde@Sun.COM break;
26911600SVikram.Hegde@Sun.COM default:
27011600SVikram.Hegde@Sun.COM ddi_err(DER_WARN, NULL, "incorrect iotlb flush type");
27111600SVikram.Hegde@Sun.COM return;
27211600SVikram.Hegde@Sun.COM }
27311600SVikram.Hegde@Sun.COM
27411600SVikram.Hegde@Sun.COM qinv_submit_inv_dsc(immu, &dsc);
27511600SVikram.Hegde@Sun.COM }
27611600SVikram.Hegde@Sun.COM
27711600SVikram.Hegde@Sun.COM /* queued invalidation interface -- invalidate dev_iotlb */
27811600SVikram.Hegde@Sun.COM static void
qinv_dev_iotlb_common(immu_t * immu,uint16_t sid,uint64_t addr,uint_t size,uint_t max_invs_pd)27911600SVikram.Hegde@Sun.COM qinv_dev_iotlb_common(immu_t *immu, uint16_t sid,
28011600SVikram.Hegde@Sun.COM uint64_t addr, uint_t size, uint_t max_invs_pd)
28111600SVikram.Hegde@Sun.COM {
28211600SVikram.Hegde@Sun.COM qinv_dsc_t dsc;
28311600SVikram.Hegde@Sun.COM
28411600SVikram.Hegde@Sun.COM dsc.lo = DEV_IOTLB_INV_DSC_LOW(sid, max_invs_pd);
28511600SVikram.Hegde@Sun.COM dsc.hi = DEV_IOTLB_INV_DSC_HIGH(addr, size);
28611600SVikram.Hegde@Sun.COM
28711600SVikram.Hegde@Sun.COM qinv_submit_inv_dsc(immu, &dsc);
28811600SVikram.Hegde@Sun.COM }
28911600SVikram.Hegde@Sun.COM
29011600SVikram.Hegde@Sun.COM /* queued invalidation interface -- invalidate interrupt entry cache */
29111600SVikram.Hegde@Sun.COM static void
qinv_iec_common(immu_t * immu,uint_t iidx,uint_t im,uint_t g)29211600SVikram.Hegde@Sun.COM qinv_iec_common(immu_t *immu, uint_t iidx, uint_t im, uint_t g)
29311600SVikram.Hegde@Sun.COM {
29411600SVikram.Hegde@Sun.COM qinv_dsc_t dsc;
29511600SVikram.Hegde@Sun.COM
29611600SVikram.Hegde@Sun.COM dsc.lo = IEC_INV_DSC_LOW(iidx, im, g);
29711600SVikram.Hegde@Sun.COM dsc.hi = IEC_INV_DSC_HIGH;
29811600SVikram.Hegde@Sun.COM
29911600SVikram.Hegde@Sun.COM qinv_submit_inv_dsc(immu, &dsc);
30011600SVikram.Hegde@Sun.COM }
30111600SVikram.Hegde@Sun.COM
30211600SVikram.Hegde@Sun.COM /*
30311600SVikram.Hegde@Sun.COM * queued invalidation interface -- invalidation wait descriptor
304*13050Sfrank.van.der.linden@oracle.com * wait until the invalidation request finished
30511600SVikram.Hegde@Sun.COM */
30611600SVikram.Hegde@Sun.COM static void
qinv_wait_sync(immu_t * immu,immu_inv_wait_t * iwp)307*13050Sfrank.van.der.linden@oracle.com qinv_wait_sync(immu_t *immu, immu_inv_wait_t *iwp)
30811600SVikram.Hegde@Sun.COM {
30911600SVikram.Hegde@Sun.COM qinv_dsc_t dsc;
310*13050Sfrank.van.der.linden@oracle.com volatile uint32_t *status;
311*13050Sfrank.van.der.linden@oracle.com uint64_t paddr;
312*13050Sfrank.van.der.linden@oracle.com #ifdef DEBUG
313*13050Sfrank.van.der.linden@oracle.com uint_t count;
314*13050Sfrank.van.der.linden@oracle.com #endif
31511600SVikram.Hegde@Sun.COM
316*13050Sfrank.van.der.linden@oracle.com status = &iwp->iwp_vstatus;
317*13050Sfrank.van.der.linden@oracle.com paddr = iwp->iwp_pstatus;
31811600SVikram.Hegde@Sun.COM
319*13050Sfrank.van.der.linden@oracle.com *status = IMMU_INV_DATA_PENDING;
320*13050Sfrank.van.der.linden@oracle.com membar_producer();
32111600SVikram.Hegde@Sun.COM
32211600SVikram.Hegde@Sun.COM /*
323*13050Sfrank.van.der.linden@oracle.com * sdata = IMMU_INV_DATA_DONE, fence = 1, sw = 1, if = 0
32411600SVikram.Hegde@Sun.COM * indicate the invalidation wait descriptor completion by
32511600SVikram.Hegde@Sun.COM * performing a coherent DWORD write to the status address,
32611600SVikram.Hegde@Sun.COM * not by generating an invalidation completion event
32711600SVikram.Hegde@Sun.COM */
328*13050Sfrank.van.der.linden@oracle.com dsc.lo = INV_WAIT_DSC_LOW(IMMU_INV_DATA_DONE, 1, 1, 0);
329*13050Sfrank.van.der.linden@oracle.com dsc.hi = INV_WAIT_DSC_HIGH(paddr);
33011600SVikram.Hegde@Sun.COM
33111600SVikram.Hegde@Sun.COM qinv_submit_inv_dsc(immu, &dsc);
33211600SVikram.Hegde@Sun.COM
333*13050Sfrank.van.der.linden@oracle.com if (iwp->iwp_sync) {
334*13050Sfrank.van.der.linden@oracle.com #ifdef DEBUG
335*13050Sfrank.van.der.linden@oracle.com count = 0;
336*13050Sfrank.van.der.linden@oracle.com while (*status != IMMU_INV_DATA_DONE) {
337*13050Sfrank.van.der.linden@oracle.com count++;
338*13050Sfrank.van.der.linden@oracle.com ht_pause();
339*13050Sfrank.van.der.linden@oracle.com }
340*13050Sfrank.van.der.linden@oracle.com DTRACE_PROBE2(immu__wait__sync, const char *, iwp->iwp_name,
341*13050Sfrank.van.der.linden@oracle.com uint_t, count);
342*13050Sfrank.van.der.linden@oracle.com #else
343*13050Sfrank.van.der.linden@oracle.com while (*status != IMMU_INV_DATA_DONE)
344*13050Sfrank.van.der.linden@oracle.com ht_pause();
345*13050Sfrank.van.der.linden@oracle.com #endif
346*13050Sfrank.van.der.linden@oracle.com }
34711600SVikram.Hegde@Sun.COM }
34811600SVikram.Hegde@Sun.COM
349*13050Sfrank.van.der.linden@oracle.com static void
immu_qinv_inv_wait(immu_inv_wait_t * iwp)350*13050Sfrank.van.der.linden@oracle.com immu_qinv_inv_wait(immu_inv_wait_t *iwp)
35111600SVikram.Hegde@Sun.COM {
352*13050Sfrank.van.der.linden@oracle.com volatile uint32_t *status = &iwp->iwp_vstatus;
353*13050Sfrank.van.der.linden@oracle.com #ifdef DEBUG
354*13050Sfrank.van.der.linden@oracle.com uint_t count;
35511600SVikram.Hegde@Sun.COM
356*13050Sfrank.van.der.linden@oracle.com count = 0;
357*13050Sfrank.van.der.linden@oracle.com while (*status != IMMU_INV_DATA_DONE) {
358*13050Sfrank.van.der.linden@oracle.com count++;
359*13050Sfrank.van.der.linden@oracle.com ht_pause();
36011600SVikram.Hegde@Sun.COM }
361*13050Sfrank.van.der.linden@oracle.com DTRACE_PROBE2(immu__wait__async, const char *, iwp->iwp_name,
362*13050Sfrank.van.der.linden@oracle.com uint_t, count);
363*13050Sfrank.van.der.linden@oracle.com #else
36411600SVikram.Hegde@Sun.COM
365*13050Sfrank.van.der.linden@oracle.com while (*status != IMMU_INV_DATA_DONE)
366*13050Sfrank.van.der.linden@oracle.com ht_pause();
367*13050Sfrank.van.der.linden@oracle.com #endif
36811600SVikram.Hegde@Sun.COM }
36911600SVikram.Hegde@Sun.COM
37011600SVikram.Hegde@Sun.COM /*
37111600SVikram.Hegde@Sun.COM * call ddi_dma_mem_alloc to allocate physical contigous
37211600SVikram.Hegde@Sun.COM * pages for invalidation queue table
37311600SVikram.Hegde@Sun.COM */
37411600SVikram.Hegde@Sun.COM static int
qinv_setup(immu_t * immu)37511600SVikram.Hegde@Sun.COM qinv_setup(immu_t *immu)
37611600SVikram.Hegde@Sun.COM {
37711600SVikram.Hegde@Sun.COM qinv_t *qinv;
37811600SVikram.Hegde@Sun.COM size_t size;
37911600SVikram.Hegde@Sun.COM
38011600SVikram.Hegde@Sun.COM ddi_dma_attr_t qinv_dma_attr = {
38111600SVikram.Hegde@Sun.COM DMA_ATTR_V0,
38211600SVikram.Hegde@Sun.COM 0U,
38312716Sfrank.van.der.linden@oracle.com 0xffffffffffffffffULL,
38411600SVikram.Hegde@Sun.COM 0xffffffffU,
38511600SVikram.Hegde@Sun.COM MMU_PAGESIZE, /* page aligned */
38611600SVikram.Hegde@Sun.COM 0x1,
38711600SVikram.Hegde@Sun.COM 0x1,
38811600SVikram.Hegde@Sun.COM 0xffffffffU,
38912716Sfrank.van.der.linden@oracle.com 0xffffffffffffffffULL,
39011600SVikram.Hegde@Sun.COM 1,
39111600SVikram.Hegde@Sun.COM 4,
39211600SVikram.Hegde@Sun.COM 0
39311600SVikram.Hegde@Sun.COM };
39411600SVikram.Hegde@Sun.COM
39511600SVikram.Hegde@Sun.COM ddi_device_acc_attr_t qinv_acc_attr = {
39611600SVikram.Hegde@Sun.COM DDI_DEVICE_ATTR_V0,
39711600SVikram.Hegde@Sun.COM DDI_NEVERSWAP_ACC,
39811600SVikram.Hegde@Sun.COM DDI_STRICTORDER_ACC
39911600SVikram.Hegde@Sun.COM };
40011600SVikram.Hegde@Sun.COM
40111600SVikram.Hegde@Sun.COM mutex_init(&(immu->immu_qinv_lock), NULL, MUTEX_DRIVER, NULL);
40211600SVikram.Hegde@Sun.COM
40311600SVikram.Hegde@Sun.COM
40411600SVikram.Hegde@Sun.COM mutex_enter(&(immu->immu_qinv_lock));
40511600SVikram.Hegde@Sun.COM
40611600SVikram.Hegde@Sun.COM immu->immu_qinv = NULL;
40711600SVikram.Hegde@Sun.COM if (!IMMU_ECAP_GET_QI(immu->immu_regs_excap) ||
40811600SVikram.Hegde@Sun.COM immu_qinv_enable == B_FALSE) {
40911600SVikram.Hegde@Sun.COM mutex_exit(&(immu->immu_qinv_lock));
41011600SVikram.Hegde@Sun.COM return (DDI_SUCCESS);
41111600SVikram.Hegde@Sun.COM }
41211600SVikram.Hegde@Sun.COM
41311600SVikram.Hegde@Sun.COM if (qinv_iqa_qs > QINV_MAX_QUEUE_SIZE)
41411600SVikram.Hegde@Sun.COM qinv_iqa_qs = QINV_MAX_QUEUE_SIZE;
41511600SVikram.Hegde@Sun.COM
41611600SVikram.Hegde@Sun.COM qinv = kmem_zalloc(sizeof (qinv_t), KM_SLEEP);
41711600SVikram.Hegde@Sun.COM
41811600SVikram.Hegde@Sun.COM if (ddi_dma_alloc_handle(root_devinfo,
41911600SVikram.Hegde@Sun.COM &qinv_dma_attr, DDI_DMA_SLEEP, NULL,
42011600SVikram.Hegde@Sun.COM &(qinv->qinv_table.qinv_mem_dma_hdl)) != DDI_SUCCESS) {
42111600SVikram.Hegde@Sun.COM ddi_err(DER_WARN, root_devinfo,
42211600SVikram.Hegde@Sun.COM "alloc invalidation queue table handler failed");
42311600SVikram.Hegde@Sun.COM goto queue_table_handle_failed;
42411600SVikram.Hegde@Sun.COM }
42511600SVikram.Hegde@Sun.COM
42611600SVikram.Hegde@Sun.COM if (ddi_dma_alloc_handle(root_devinfo,
42711600SVikram.Hegde@Sun.COM &qinv_dma_attr, DDI_DMA_SLEEP, NULL,
42811600SVikram.Hegde@Sun.COM &(qinv->qinv_sync.qinv_mem_dma_hdl)) != DDI_SUCCESS) {
42911600SVikram.Hegde@Sun.COM ddi_err(DER_WARN, root_devinfo,
43011600SVikram.Hegde@Sun.COM "alloc invalidation queue sync mem handler failed");
43111600SVikram.Hegde@Sun.COM goto sync_table_handle_failed;
43211600SVikram.Hegde@Sun.COM }
43311600SVikram.Hegde@Sun.COM
43411600SVikram.Hegde@Sun.COM qinv->qinv_table.qinv_mem_size = (1 << (qinv_iqa_qs + 8));
43511600SVikram.Hegde@Sun.COM size = qinv->qinv_table.qinv_mem_size * QINV_ENTRY_SIZE;
43611600SVikram.Hegde@Sun.COM
43711600SVikram.Hegde@Sun.COM /* alloc physical contiguous pages for invalidation queue */
43811600SVikram.Hegde@Sun.COM if (ddi_dma_mem_alloc(qinv->qinv_table.qinv_mem_dma_hdl,
43911600SVikram.Hegde@Sun.COM size,
44011600SVikram.Hegde@Sun.COM &qinv_acc_attr,
44111600SVikram.Hegde@Sun.COM DDI_DMA_CONSISTENT | IOMEM_DATA_UNCACHED,
44211600SVikram.Hegde@Sun.COM DDI_DMA_SLEEP,
44311600SVikram.Hegde@Sun.COM NULL,
44411600SVikram.Hegde@Sun.COM &(qinv->qinv_table.qinv_mem_vaddr),
44511600SVikram.Hegde@Sun.COM &size,
44611600SVikram.Hegde@Sun.COM &(qinv->qinv_table.qinv_mem_acc_hdl)) != DDI_SUCCESS) {
44711600SVikram.Hegde@Sun.COM ddi_err(DER_WARN, root_devinfo,
44811600SVikram.Hegde@Sun.COM "alloc invalidation queue table failed");
44911600SVikram.Hegde@Sun.COM goto queue_table_mem_failed;
45011600SVikram.Hegde@Sun.COM }
45111600SVikram.Hegde@Sun.COM
45211600SVikram.Hegde@Sun.COM ASSERT(!((uintptr_t)qinv->qinv_table.qinv_mem_vaddr & MMU_PAGEOFFSET));
45311600SVikram.Hegde@Sun.COM bzero(qinv->qinv_table.qinv_mem_vaddr, size);
45411600SVikram.Hegde@Sun.COM
45511600SVikram.Hegde@Sun.COM /* get the base physical address of invalidation request queue */
45611600SVikram.Hegde@Sun.COM qinv->qinv_table.qinv_mem_paddr = pfn_to_pa(
45711600SVikram.Hegde@Sun.COM hat_getpfnum(kas.a_hat, qinv->qinv_table.qinv_mem_vaddr));
45811600SVikram.Hegde@Sun.COM
45911600SVikram.Hegde@Sun.COM qinv->qinv_table.qinv_mem_head = qinv->qinv_table.qinv_mem_tail = 0;
46011600SVikram.Hegde@Sun.COM
46111600SVikram.Hegde@Sun.COM qinv->qinv_sync.qinv_mem_size = qinv->qinv_table.qinv_mem_size;
46211600SVikram.Hegde@Sun.COM size = qinv->qinv_sync.qinv_mem_size * QINV_SYNC_DATA_SIZE;
46311600SVikram.Hegde@Sun.COM
46411600SVikram.Hegde@Sun.COM /* alloc status memory for invalidation wait descriptor */
46511600SVikram.Hegde@Sun.COM if (ddi_dma_mem_alloc(qinv->qinv_sync.qinv_mem_dma_hdl,
46611600SVikram.Hegde@Sun.COM size,
46711600SVikram.Hegde@Sun.COM &qinv_acc_attr,
46811600SVikram.Hegde@Sun.COM DDI_DMA_CONSISTENT | IOMEM_DATA_UNCACHED,
46911600SVikram.Hegde@Sun.COM DDI_DMA_SLEEP,
47011600SVikram.Hegde@Sun.COM NULL,
47111600SVikram.Hegde@Sun.COM &(qinv->qinv_sync.qinv_mem_vaddr),
47211600SVikram.Hegde@Sun.COM &size,
47311600SVikram.Hegde@Sun.COM &(qinv->qinv_sync.qinv_mem_acc_hdl)) != DDI_SUCCESS) {
47411600SVikram.Hegde@Sun.COM ddi_err(DER_WARN, root_devinfo,
47511600SVikram.Hegde@Sun.COM "alloc invalidation queue sync mem failed");
47611600SVikram.Hegde@Sun.COM goto sync_table_mem_failed;
47711600SVikram.Hegde@Sun.COM }
47811600SVikram.Hegde@Sun.COM
47911600SVikram.Hegde@Sun.COM ASSERT(!((uintptr_t)qinv->qinv_sync.qinv_mem_vaddr & MMU_PAGEOFFSET));
48011600SVikram.Hegde@Sun.COM bzero(qinv->qinv_sync.qinv_mem_vaddr, size);
48111600SVikram.Hegde@Sun.COM qinv->qinv_sync.qinv_mem_paddr = pfn_to_pa(
48211600SVikram.Hegde@Sun.COM hat_getpfnum(kas.a_hat, qinv->qinv_sync.qinv_mem_vaddr));
48311600SVikram.Hegde@Sun.COM
48411600SVikram.Hegde@Sun.COM qinv->qinv_sync.qinv_mem_head = qinv->qinv_sync.qinv_mem_tail = 0;
48511600SVikram.Hegde@Sun.COM
48611600SVikram.Hegde@Sun.COM mutex_init(&(qinv->qinv_table.qinv_mem_lock), NULL, MUTEX_DRIVER, NULL);
48711600SVikram.Hegde@Sun.COM mutex_init(&(qinv->qinv_sync.qinv_mem_lock), NULL, MUTEX_DRIVER, NULL);
48811600SVikram.Hegde@Sun.COM
48911600SVikram.Hegde@Sun.COM immu->immu_qinv = qinv;
49011600SVikram.Hegde@Sun.COM
49111600SVikram.Hegde@Sun.COM mutex_exit(&(immu->immu_qinv_lock));
49211600SVikram.Hegde@Sun.COM
49311600SVikram.Hegde@Sun.COM return (DDI_SUCCESS);
49411600SVikram.Hegde@Sun.COM
49511600SVikram.Hegde@Sun.COM sync_table_mem_failed:
49611600SVikram.Hegde@Sun.COM ddi_dma_mem_free(&(qinv->qinv_table.qinv_mem_acc_hdl));
49711600SVikram.Hegde@Sun.COM
49811600SVikram.Hegde@Sun.COM queue_table_mem_failed:
49911600SVikram.Hegde@Sun.COM ddi_dma_free_handle(&(qinv->qinv_sync.qinv_mem_dma_hdl));
50011600SVikram.Hegde@Sun.COM
50111600SVikram.Hegde@Sun.COM sync_table_handle_failed:
50211600SVikram.Hegde@Sun.COM ddi_dma_free_handle(&(qinv->qinv_table.qinv_mem_dma_hdl));
50311600SVikram.Hegde@Sun.COM
50411600SVikram.Hegde@Sun.COM queue_table_handle_failed:
50511600SVikram.Hegde@Sun.COM kmem_free(qinv, sizeof (qinv_t));
50611600SVikram.Hegde@Sun.COM
50711600SVikram.Hegde@Sun.COM mutex_exit(&(immu->immu_qinv_lock));
50811600SVikram.Hegde@Sun.COM
50911600SVikram.Hegde@Sun.COM return (DDI_FAILURE);
51011600SVikram.Hegde@Sun.COM }
51111600SVikram.Hegde@Sun.COM
51211600SVikram.Hegde@Sun.COM /*
51311600SVikram.Hegde@Sun.COM * ###########################################################################
51411600SVikram.Hegde@Sun.COM *
51511600SVikram.Hegde@Sun.COM * Functions exported by immu_qinv.c
51611600SVikram.Hegde@Sun.COM *
51711600SVikram.Hegde@Sun.COM * ###########################################################################
51811600SVikram.Hegde@Sun.COM */
51911600SVikram.Hegde@Sun.COM
52011600SVikram.Hegde@Sun.COM /*
52111600SVikram.Hegde@Sun.COM * initialize invalidation request queue structure.
52211600SVikram.Hegde@Sun.COM */
52312716Sfrank.van.der.linden@oracle.com int
immu_qinv_setup(list_t * listp)52411600SVikram.Hegde@Sun.COM immu_qinv_setup(list_t *listp)
52511600SVikram.Hegde@Sun.COM {
52611600SVikram.Hegde@Sun.COM immu_t *immu;
52712716Sfrank.van.der.linden@oracle.com int nerr;
52811600SVikram.Hegde@Sun.COM
52911600SVikram.Hegde@Sun.COM if (immu_qinv_enable == B_FALSE) {
53012716Sfrank.van.der.linden@oracle.com return (DDI_FAILURE);
53111600SVikram.Hegde@Sun.COM }
53211600SVikram.Hegde@Sun.COM
53312716Sfrank.van.der.linden@oracle.com nerr = 0;
53411600SVikram.Hegde@Sun.COM immu = list_head(listp);
53511600SVikram.Hegde@Sun.COM for (; immu; immu = list_next(listp, immu)) {
53611600SVikram.Hegde@Sun.COM if (qinv_setup(immu) == DDI_SUCCESS) {
53711600SVikram.Hegde@Sun.COM immu->immu_qinv_setup = B_TRUE;
53812716Sfrank.van.der.linden@oracle.com } else {
53912716Sfrank.van.der.linden@oracle.com nerr++;
54012716Sfrank.van.der.linden@oracle.com break;
54111600SVikram.Hegde@Sun.COM }
54211600SVikram.Hegde@Sun.COM }
54312716Sfrank.van.der.linden@oracle.com
54412716Sfrank.van.der.linden@oracle.com return (nerr > 0 ? DDI_FAILURE : DDI_SUCCESS);
54511600SVikram.Hegde@Sun.COM }
54611600SVikram.Hegde@Sun.COM
54711600SVikram.Hegde@Sun.COM void
immu_qinv_startup(immu_t * immu)54811600SVikram.Hegde@Sun.COM immu_qinv_startup(immu_t *immu)
54911600SVikram.Hegde@Sun.COM {
55011600SVikram.Hegde@Sun.COM qinv_t *qinv;
55111600SVikram.Hegde@Sun.COM uint64_t qinv_reg_value;
55211600SVikram.Hegde@Sun.COM
55311600SVikram.Hegde@Sun.COM if (immu->immu_qinv_setup == B_FALSE) {
55411600SVikram.Hegde@Sun.COM return;
55511600SVikram.Hegde@Sun.COM }
55611600SVikram.Hegde@Sun.COM
55711600SVikram.Hegde@Sun.COM qinv = (qinv_t *)immu->immu_qinv;
55811600SVikram.Hegde@Sun.COM qinv_reg_value = qinv->qinv_table.qinv_mem_paddr | qinv_iqa_qs;
55911600SVikram.Hegde@Sun.COM immu_regs_qinv_enable(immu, qinv_reg_value);
56012513Sfrank.van.der.linden@oracle.com immu->immu_flushops = &immu_qinv_flushops;
56111600SVikram.Hegde@Sun.COM immu->immu_qinv_running = B_TRUE;
56211600SVikram.Hegde@Sun.COM }
56311600SVikram.Hegde@Sun.COM
56411600SVikram.Hegde@Sun.COM /*
56511600SVikram.Hegde@Sun.COM * queued invalidation interface
56611600SVikram.Hegde@Sun.COM * function based context cache invalidation
56711600SVikram.Hegde@Sun.COM */
56811600SVikram.Hegde@Sun.COM void
immu_qinv_context_fsi(immu_t * immu,uint8_t function_mask,uint16_t source_id,uint_t domain_id,immu_inv_wait_t * iwp)56911600SVikram.Hegde@Sun.COM immu_qinv_context_fsi(immu_t *immu, uint8_t function_mask,
570*13050Sfrank.van.der.linden@oracle.com uint16_t source_id, uint_t domain_id, immu_inv_wait_t *iwp)
57111600SVikram.Hegde@Sun.COM {
57211600SVikram.Hegde@Sun.COM qinv_context_common(immu, function_mask, source_id,
57311600SVikram.Hegde@Sun.COM domain_id, CTT_INV_G_DEVICE);
574*13050Sfrank.van.der.linden@oracle.com qinv_wait_sync(immu, iwp);
57511600SVikram.Hegde@Sun.COM }
57611600SVikram.Hegde@Sun.COM
57711600SVikram.Hegde@Sun.COM /*
57811600SVikram.Hegde@Sun.COM * queued invalidation interface
57911600SVikram.Hegde@Sun.COM * domain based context cache invalidation
58011600SVikram.Hegde@Sun.COM */
58111600SVikram.Hegde@Sun.COM void
immu_qinv_context_dsi(immu_t * immu,uint_t domain_id,immu_inv_wait_t * iwp)582*13050Sfrank.van.der.linden@oracle.com immu_qinv_context_dsi(immu_t *immu, uint_t domain_id, immu_inv_wait_t *iwp)
58311600SVikram.Hegde@Sun.COM {
58411600SVikram.Hegde@Sun.COM qinv_context_common(immu, 0, 0, domain_id, CTT_INV_G_DOMAIN);
585*13050Sfrank.van.der.linden@oracle.com qinv_wait_sync(immu, iwp);
58611600SVikram.Hegde@Sun.COM }
58711600SVikram.Hegde@Sun.COM
58811600SVikram.Hegde@Sun.COM /*
58911600SVikram.Hegde@Sun.COM * queued invalidation interface
59011600SVikram.Hegde@Sun.COM * invalidation global context cache
59111600SVikram.Hegde@Sun.COM */
59211600SVikram.Hegde@Sun.COM void
immu_qinv_context_gbl(immu_t * immu,immu_inv_wait_t * iwp)593*13050Sfrank.van.der.linden@oracle.com immu_qinv_context_gbl(immu_t *immu, immu_inv_wait_t *iwp)
59411600SVikram.Hegde@Sun.COM {
59511600SVikram.Hegde@Sun.COM qinv_context_common(immu, 0, 0, 0, CTT_INV_G_GLOBAL);
596*13050Sfrank.van.der.linden@oracle.com qinv_wait_sync(immu, iwp);
59711600SVikram.Hegde@Sun.COM }
59811600SVikram.Hegde@Sun.COM
59911600SVikram.Hegde@Sun.COM /*
60011600SVikram.Hegde@Sun.COM * queued invalidation interface
60111600SVikram.Hegde@Sun.COM * paged based iotlb invalidation
60211600SVikram.Hegde@Sun.COM */
60311600SVikram.Hegde@Sun.COM void
immu_qinv_iotlb_psi(immu_t * immu,uint_t domain_id,uint64_t dvma,uint_t count,uint_t hint,immu_inv_wait_t * iwp)60412513Sfrank.van.der.linden@oracle.com immu_qinv_iotlb_psi(immu_t *immu, uint_t domain_id,
605*13050Sfrank.van.der.linden@oracle.com uint64_t dvma, uint_t count, uint_t hint, immu_inv_wait_t *iwp)
60611600SVikram.Hegde@Sun.COM {
60711600SVikram.Hegde@Sun.COM uint_t am = 0;
60811600SVikram.Hegde@Sun.COM uint_t max_am;
60911600SVikram.Hegde@Sun.COM
61011600SVikram.Hegde@Sun.COM max_am = IMMU_CAP_GET_MAMV(immu->immu_regs_cap);
61111600SVikram.Hegde@Sun.COM
61211600SVikram.Hegde@Sun.COM /* choose page specified invalidation */
61311600SVikram.Hegde@Sun.COM if (IMMU_CAP_GET_PSI(immu->immu_regs_cap)) {
61411600SVikram.Hegde@Sun.COM while (am <= max_am) {
61511600SVikram.Hegde@Sun.COM if ((ADDR_AM_OFFSET(IMMU_BTOP(dvma), am) + count)
61611600SVikram.Hegde@Sun.COM <= ADDR_AM_MAX(am)) {
61711600SVikram.Hegde@Sun.COM qinv_iotlb_common(immu, domain_id,
61811600SVikram.Hegde@Sun.COM dvma, am, hint, TLB_INV_G_PAGE);
61911600SVikram.Hegde@Sun.COM break;
62011600SVikram.Hegde@Sun.COM }
62111600SVikram.Hegde@Sun.COM am++;
62211600SVikram.Hegde@Sun.COM }
62311600SVikram.Hegde@Sun.COM if (am > max_am) {
62411600SVikram.Hegde@Sun.COM qinv_iotlb_common(immu, domain_id,
62511600SVikram.Hegde@Sun.COM dvma, 0, hint, TLB_INV_G_DOMAIN);
62611600SVikram.Hegde@Sun.COM }
62711600SVikram.Hegde@Sun.COM
62811600SVikram.Hegde@Sun.COM /* choose domain invalidation */
62911600SVikram.Hegde@Sun.COM } else {
63011600SVikram.Hegde@Sun.COM qinv_iotlb_common(immu, domain_id, dvma,
63111600SVikram.Hegde@Sun.COM 0, hint, TLB_INV_G_DOMAIN);
63211600SVikram.Hegde@Sun.COM }
633*13050Sfrank.van.der.linden@oracle.com
634*13050Sfrank.van.der.linden@oracle.com qinv_wait_sync(immu, iwp);
63511600SVikram.Hegde@Sun.COM }
63611600SVikram.Hegde@Sun.COM
63711600SVikram.Hegde@Sun.COM /*
63811600SVikram.Hegde@Sun.COM * queued invalidation interface
63911600SVikram.Hegde@Sun.COM * domain based iotlb invalidation
64011600SVikram.Hegde@Sun.COM */
64111600SVikram.Hegde@Sun.COM void
immu_qinv_iotlb_dsi(immu_t * immu,uint_t domain_id,immu_inv_wait_t * iwp)642*13050Sfrank.van.der.linden@oracle.com immu_qinv_iotlb_dsi(immu_t *immu, uint_t domain_id, immu_inv_wait_t *iwp)
64311600SVikram.Hegde@Sun.COM {
64411600SVikram.Hegde@Sun.COM qinv_iotlb_common(immu, domain_id, 0, 0, 0, TLB_INV_G_DOMAIN);
645*13050Sfrank.van.der.linden@oracle.com qinv_wait_sync(immu, iwp);
64611600SVikram.Hegde@Sun.COM }
64711600SVikram.Hegde@Sun.COM
64811600SVikram.Hegde@Sun.COM /*
64911600SVikram.Hegde@Sun.COM * queued invalidation interface
65011600SVikram.Hegde@Sun.COM * global iotlb invalidation
65111600SVikram.Hegde@Sun.COM */
65211600SVikram.Hegde@Sun.COM void
immu_qinv_iotlb_gbl(immu_t * immu,immu_inv_wait_t * iwp)653*13050Sfrank.van.der.linden@oracle.com immu_qinv_iotlb_gbl(immu_t *immu, immu_inv_wait_t *iwp)
65411600SVikram.Hegde@Sun.COM {
65511600SVikram.Hegde@Sun.COM qinv_iotlb_common(immu, 0, 0, 0, 0, TLB_INV_G_GLOBAL);
656*13050Sfrank.van.der.linden@oracle.com qinv_wait_sync(immu, iwp);
65711600SVikram.Hegde@Sun.COM }
65811600SVikram.Hegde@Sun.COM
65911600SVikram.Hegde@Sun.COM /* queued invalidation interface -- global invalidate interrupt entry cache */
66011600SVikram.Hegde@Sun.COM void
immu_qinv_intr_global(immu_t * immu,immu_inv_wait_t * iwp)661*13050Sfrank.van.der.linden@oracle.com immu_qinv_intr_global(immu_t *immu, immu_inv_wait_t *iwp)
66211600SVikram.Hegde@Sun.COM {
66311600SVikram.Hegde@Sun.COM qinv_iec_common(immu, 0, 0, IEC_INV_GLOBAL);
664*13050Sfrank.van.der.linden@oracle.com qinv_wait_sync(immu, iwp);
66511600SVikram.Hegde@Sun.COM }
66611600SVikram.Hegde@Sun.COM
66711600SVikram.Hegde@Sun.COM /* queued invalidation interface -- invalidate single interrupt entry cache */
66811600SVikram.Hegde@Sun.COM void
immu_qinv_intr_one_cache(immu_t * immu,uint_t iidx,immu_inv_wait_t * iwp)669*13050Sfrank.van.der.linden@oracle.com immu_qinv_intr_one_cache(immu_t *immu, uint_t iidx, immu_inv_wait_t *iwp)
67011600SVikram.Hegde@Sun.COM {
67111600SVikram.Hegde@Sun.COM qinv_iec_common(immu, iidx, 0, IEC_INV_INDEX);
672*13050Sfrank.van.der.linden@oracle.com qinv_wait_sync(immu, iwp);
67311600SVikram.Hegde@Sun.COM }
67411600SVikram.Hegde@Sun.COM
67511600SVikram.Hegde@Sun.COM /* queued invalidation interface -- invalidate interrupt entry caches */
67611600SVikram.Hegde@Sun.COM void
immu_qinv_intr_caches(immu_t * immu,uint_t iidx,uint_t cnt,immu_inv_wait_t * iwp)677*13050Sfrank.van.der.linden@oracle.com immu_qinv_intr_caches(immu_t *immu, uint_t iidx, uint_t cnt,
678*13050Sfrank.van.der.linden@oracle.com immu_inv_wait_t *iwp)
67911600SVikram.Hegde@Sun.COM {
68011600SVikram.Hegde@Sun.COM uint_t i, mask = 0;
68111600SVikram.Hegde@Sun.COM
68211600SVikram.Hegde@Sun.COM ASSERT(cnt != 0);
68311600SVikram.Hegde@Sun.COM
68411600SVikram.Hegde@Sun.COM /* requested interrupt count is not a power of 2 */
68511600SVikram.Hegde@Sun.COM if (!ISP2(cnt)) {
68611600SVikram.Hegde@Sun.COM for (i = 0; i < cnt; i++) {
68711600SVikram.Hegde@Sun.COM qinv_iec_common(immu, iidx + cnt, 0, IEC_INV_INDEX);
68811600SVikram.Hegde@Sun.COM }
689*13050Sfrank.van.der.linden@oracle.com qinv_wait_sync(immu, iwp);
69011600SVikram.Hegde@Sun.COM return;
69111600SVikram.Hegde@Sun.COM }
69211600SVikram.Hegde@Sun.COM
69311600SVikram.Hegde@Sun.COM while ((2 << mask) < cnt) {
69411600SVikram.Hegde@Sun.COM mask++;
69511600SVikram.Hegde@Sun.COM }
69611600SVikram.Hegde@Sun.COM
69711600SVikram.Hegde@Sun.COM if (mask > IMMU_ECAP_GET_MHMV(immu->immu_regs_excap)) {
69811600SVikram.Hegde@Sun.COM for (i = 0; i < cnt; i++) {
69911600SVikram.Hegde@Sun.COM qinv_iec_common(immu, iidx + cnt, 0, IEC_INV_INDEX);
70011600SVikram.Hegde@Sun.COM }
701*13050Sfrank.van.der.linden@oracle.com qinv_wait_sync(immu, iwp);
70211600SVikram.Hegde@Sun.COM return;
70311600SVikram.Hegde@Sun.COM }
70411600SVikram.Hegde@Sun.COM
70511600SVikram.Hegde@Sun.COM qinv_iec_common(immu, iidx, mask, IEC_INV_INDEX);
70611600SVikram.Hegde@Sun.COM
707*13050Sfrank.van.der.linden@oracle.com qinv_wait_sync(immu, iwp);
70811600SVikram.Hegde@Sun.COM }
70911600SVikram.Hegde@Sun.COM
71011600SVikram.Hegde@Sun.COM void
immu_qinv_report_fault(immu_t * immu)71111600SVikram.Hegde@Sun.COM immu_qinv_report_fault(immu_t *immu)
71211600SVikram.Hegde@Sun.COM {
71311600SVikram.Hegde@Sun.COM uint16_t head;
71411600SVikram.Hegde@Sun.COM qinv_dsc_t *dsc;
71511600SVikram.Hegde@Sun.COM qinv_t *qinv;
71611600SVikram.Hegde@Sun.COM
71711600SVikram.Hegde@Sun.COM /* access qinv data */
71811600SVikram.Hegde@Sun.COM mutex_enter(&(immu->immu_qinv_lock));
71911600SVikram.Hegde@Sun.COM
72011600SVikram.Hegde@Sun.COM qinv = (qinv_t *)(immu->immu_qinv);
72111600SVikram.Hegde@Sun.COM
72211600SVikram.Hegde@Sun.COM head = QINV_IQA_HEAD(
72311600SVikram.Hegde@Sun.COM immu_regs_get64(immu, IMMU_REG_INVAL_QH));
72411600SVikram.Hegde@Sun.COM
72511600SVikram.Hegde@Sun.COM dsc = (qinv_dsc_t *)(qinv->qinv_table.qinv_mem_vaddr
72611600SVikram.Hegde@Sun.COM + (head * QINV_ENTRY_SIZE));
72711600SVikram.Hegde@Sun.COM
72811600SVikram.Hegde@Sun.COM /* report the error */
72911600SVikram.Hegde@Sun.COM ddi_err(DER_WARN, immu->immu_dip,
73011600SVikram.Hegde@Sun.COM "generated a fault when fetching a descriptor from the"
73111600SVikram.Hegde@Sun.COM "\tinvalidation queue, or detects that the fetched"
73211600SVikram.Hegde@Sun.COM "\tdescriptor is invalid. The head register is "
73311600SVikram.Hegde@Sun.COM "0x%" PRIx64
73411600SVikram.Hegde@Sun.COM "\tthe type is %s",
73511600SVikram.Hegde@Sun.COM head,
73611600SVikram.Hegde@Sun.COM qinv_dsc_type[MIN(INV_DSC_TYPE(dsc), QINV_MAX_DSC_TYPE)]);
73711600SVikram.Hegde@Sun.COM
73811600SVikram.Hegde@Sun.COM mutex_exit(&(immu->immu_qinv_lock));
73911600SVikram.Hegde@Sun.COM }
740