111600SVikram.Hegde@Sun.COM /*
211600SVikram.Hegde@Sun.COM * CDDL HEADER START
311600SVikram.Hegde@Sun.COM *
411600SVikram.Hegde@Sun.COM * The contents of this file are subject to the terms of the
511600SVikram.Hegde@Sun.COM * Common Development and Distribution License (the "License").
611600SVikram.Hegde@Sun.COM * You may not use this file except in compliance with the License.
711600SVikram.Hegde@Sun.COM *
811600SVikram.Hegde@Sun.COM * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
911600SVikram.Hegde@Sun.COM * or http://www.opensolaris.org/os/licensing.
1011600SVikram.Hegde@Sun.COM * See the License for the specific language governing permissions
1111600SVikram.Hegde@Sun.COM * and limitations under the License.
1211600SVikram.Hegde@Sun.COM *
1311600SVikram.Hegde@Sun.COM * When distributing Covered Code, include this CDDL HEADER in each
1411600SVikram.Hegde@Sun.COM * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
1511600SVikram.Hegde@Sun.COM * If applicable, add the following below this CDDL HEADER, with the
1611600SVikram.Hegde@Sun.COM * fields enclosed by brackets "[]" replaced with your own identifying
1711600SVikram.Hegde@Sun.COM * information: Portions Copyright [yyyy] [name of copyright owner]
1811600SVikram.Hegde@Sun.COM *
1911600SVikram.Hegde@Sun.COM * CDDL HEADER END
2011600SVikram.Hegde@Sun.COM */
2111600SVikram.Hegde@Sun.COM /*
2212465Sfrank.van.der.linden@oracle.com * Portions Copyright (c) 2010, Oracle and/or its affiliates.
2312465Sfrank.van.der.linden@oracle.com * All rights reserved.
2411600SVikram.Hegde@Sun.COM */
2511600SVikram.Hegde@Sun.COM /*
2611600SVikram.Hegde@Sun.COM * Copyright (c) 2009, Intel Corporation.
2711600SVikram.Hegde@Sun.COM * All rights reserved.
2811600SVikram.Hegde@Sun.COM */
2911600SVikram.Hegde@Sun.COM
3011600SVikram.Hegde@Sun.COM /*
3111600SVikram.Hegde@Sun.COM * DVMA code
3211600SVikram.Hegde@Sun.COM * This file contains Intel IOMMU code that deals with DVMA
3311600SVikram.Hegde@Sun.COM * i.e. DMA remapping.
3411600SVikram.Hegde@Sun.COM */
3511600SVikram.Hegde@Sun.COM
3611600SVikram.Hegde@Sun.COM #include <sys/sysmacros.h>
3711600SVikram.Hegde@Sun.COM #include <sys/pcie.h>
3811600SVikram.Hegde@Sun.COM #include <sys/pci_cfgspace.h>
3911600SVikram.Hegde@Sun.COM #include <vm/hat_i86.h>
4011600SVikram.Hegde@Sun.COM #include <sys/memlist.h>
4111600SVikram.Hegde@Sun.COM #include <sys/acpi/acpi.h>
4211600SVikram.Hegde@Sun.COM #include <sys/acpica.h>
4311600SVikram.Hegde@Sun.COM #include <sys/modhash.h>
4411600SVikram.Hegde@Sun.COM #include <sys/immu.h>
45*13050Sfrank.van.der.linden@oracle.com #include <sys/x86_archext.h>
46*13050Sfrank.van.der.linden@oracle.com #include <sys/archsystm.h>
4711600SVikram.Hegde@Sun.COM
4811600SVikram.Hegde@Sun.COM #undef TEST
4911600SVikram.Hegde@Sun.COM
5011600SVikram.Hegde@Sun.COM /*
5111600SVikram.Hegde@Sun.COM * Macros based on PCI spec
5211600SVikram.Hegde@Sun.COM */
5311600SVikram.Hegde@Sun.COM #define IMMU_PCI_REV2CLASS(r) ((r) >> 8) /* classcode from revid */
5411600SVikram.Hegde@Sun.COM #define IMMU_PCI_CLASS2BASE(c) ((c) >> 16) /* baseclass from classcode */
5511600SVikram.Hegde@Sun.COM #define IMMU_PCI_CLASS2SUB(c) (((c) >> 8) & 0xff); /* classcode */
5611600SVikram.Hegde@Sun.COM
5711600SVikram.Hegde@Sun.COM #define IMMU_CONTIG_PADDR(d, p) \
5811600SVikram.Hegde@Sun.COM ((d).dck_paddr && ((d).dck_paddr + IMMU_PAGESIZE) == (p))
5911600SVikram.Hegde@Sun.COM
6011600SVikram.Hegde@Sun.COM typedef struct dvma_arg {
6111600SVikram.Hegde@Sun.COM immu_t *dva_immu;
6211600SVikram.Hegde@Sun.COM dev_info_t *dva_rdip;
6311600SVikram.Hegde@Sun.COM dev_info_t *dva_ddip;
6411600SVikram.Hegde@Sun.COM domain_t *dva_domain;
6511600SVikram.Hegde@Sun.COM int dva_level;
6611600SVikram.Hegde@Sun.COM immu_flags_t dva_flags;
6711600SVikram.Hegde@Sun.COM list_t *dva_list;
6811600SVikram.Hegde@Sun.COM int dva_error;
6911600SVikram.Hegde@Sun.COM } dvma_arg_t;
7011600SVikram.Hegde@Sun.COM
7111600SVikram.Hegde@Sun.COM static domain_t *domain_create(immu_t *immu, dev_info_t *ddip,
7211600SVikram.Hegde@Sun.COM dev_info_t *rdip, immu_flags_t immu_flags);
7311600SVikram.Hegde@Sun.COM static immu_devi_t *create_immu_devi(dev_info_t *rdip, int bus,
7411600SVikram.Hegde@Sun.COM int dev, int func, immu_flags_t immu_flags);
7511600SVikram.Hegde@Sun.COM static void destroy_immu_devi(immu_devi_t *immu_devi);
76*13050Sfrank.van.der.linden@oracle.com static boolean_t dvma_map(domain_t *domain, uint64_t sdvma,
77*13050Sfrank.van.der.linden@oracle.com uint64_t nvpages, immu_dcookie_t *dcookies, int dcount, dev_info_t *rdip,
7811600SVikram.Hegde@Sun.COM immu_flags_t immu_flags);
7911658SVikram.Hegde@Sun.COM
8011658SVikram.Hegde@Sun.COM /* Extern globals */
8111600SVikram.Hegde@Sun.COM extern struct memlist *phys_install;
8211600SVikram.Hegde@Sun.COM
83*13050Sfrank.van.der.linden@oracle.com /*
84*13050Sfrank.van.der.linden@oracle.com * iommulib interface functions.
85*13050Sfrank.van.der.linden@oracle.com */
86*13050Sfrank.van.der.linden@oracle.com static int immu_probe(iommulib_handle_t unitp, dev_info_t *dip);
87*13050Sfrank.van.der.linden@oracle.com static int immu_allochdl(iommulib_handle_t handle,
88*13050Sfrank.van.der.linden@oracle.com dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
89*13050Sfrank.van.der.linden@oracle.com int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *dma_handlep);
90*13050Sfrank.van.der.linden@oracle.com static int immu_freehdl(iommulib_handle_t handle,
91*13050Sfrank.van.der.linden@oracle.com dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t dma_handle);
92*13050Sfrank.van.der.linden@oracle.com static int immu_bindhdl(iommulib_handle_t handle, dev_info_t *dip,
93*13050Sfrank.van.der.linden@oracle.com dev_info_t *rdip, ddi_dma_handle_t dma_handle, struct ddi_dma_req *dma_req,
94*13050Sfrank.van.der.linden@oracle.com ddi_dma_cookie_t *cookiep, uint_t *ccountp);
95*13050Sfrank.van.der.linden@oracle.com static int immu_unbindhdl(iommulib_handle_t handle,
96*13050Sfrank.van.der.linden@oracle.com dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t dma_handle);
97*13050Sfrank.van.der.linden@oracle.com static int immu_sync(iommulib_handle_t handle, dev_info_t *dip,
98*13050Sfrank.van.der.linden@oracle.com dev_info_t *rdip, ddi_dma_handle_t dma_handle, off_t off, size_t len,
99*13050Sfrank.van.der.linden@oracle.com uint_t cachefl);
100*13050Sfrank.van.der.linden@oracle.com static int immu_win(iommulib_handle_t handle, dev_info_t *dip,
101*13050Sfrank.van.der.linden@oracle.com dev_info_t *rdip, ddi_dma_handle_t dma_handle, uint_t win,
102*13050Sfrank.van.der.linden@oracle.com off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp);
103*13050Sfrank.van.der.linden@oracle.com static int immu_mapobject(iommulib_handle_t handle, dev_info_t *dip,
104*13050Sfrank.van.der.linden@oracle.com dev_info_t *rdip, ddi_dma_handle_t dma_handle,
105*13050Sfrank.van.der.linden@oracle.com struct ddi_dma_req *dmareq, ddi_dma_obj_t *dmao);
106*13050Sfrank.van.der.linden@oracle.com static int immu_unmapobject(iommulib_handle_t handle, dev_info_t *dip,
107*13050Sfrank.van.der.linden@oracle.com dev_info_t *rdip, ddi_dma_handle_t dma_handle, ddi_dma_obj_t *dmao);
108*13050Sfrank.van.der.linden@oracle.com static int immu_map(iommulib_handle_t handle, dev_info_t *dip,
109*13050Sfrank.van.der.linden@oracle.com dev_info_t *rdip, struct ddi_dma_req *dmareq,
110*13050Sfrank.van.der.linden@oracle.com ddi_dma_handle_t *dma_handle);
111*13050Sfrank.van.der.linden@oracle.com static int immu_mctl(iommulib_handle_t handle, dev_info_t *dip,
112*13050Sfrank.van.der.linden@oracle.com dev_info_t *rdip, ddi_dma_handle_t dma_handle,
113*13050Sfrank.van.der.linden@oracle.com enum ddi_dma_ctlops request, off_t *offp, size_t *lenp,
114*13050Sfrank.van.der.linden@oracle.com caddr_t *objpp, uint_t cachefl);
11511600SVikram.Hegde@Sun.COM
11611600SVikram.Hegde@Sun.COM /* static Globals */
11711600SVikram.Hegde@Sun.COM
11811600SVikram.Hegde@Sun.COM /*
11911600SVikram.Hegde@Sun.COM * Used to setup DMA objects (memory regions)
12011600SVikram.Hegde@Sun.COM * for DMA reads by IOMMU units
12111600SVikram.Hegde@Sun.COM */
12211600SVikram.Hegde@Sun.COM static ddi_dma_attr_t immu_dma_attr = {
12311600SVikram.Hegde@Sun.COM DMA_ATTR_V0,
12411600SVikram.Hegde@Sun.COM 0U,
12512716Sfrank.van.der.linden@oracle.com 0xffffffffffffffffULL,
12611600SVikram.Hegde@Sun.COM 0xffffffffU,
12711600SVikram.Hegde@Sun.COM MMU_PAGESIZE, /* MMU page aligned */
12811600SVikram.Hegde@Sun.COM 0x1,
12911600SVikram.Hegde@Sun.COM 0x1,
13011600SVikram.Hegde@Sun.COM 0xffffffffU,
13112716Sfrank.van.der.linden@oracle.com 0xffffffffffffffffULL,
13211600SVikram.Hegde@Sun.COM 1,
13311600SVikram.Hegde@Sun.COM 4,
13411600SVikram.Hegde@Sun.COM 0
13511600SVikram.Hegde@Sun.COM };
13611600SVikram.Hegde@Sun.COM
13711600SVikram.Hegde@Sun.COM static ddi_device_acc_attr_t immu_acc_attr = {
13811600SVikram.Hegde@Sun.COM DDI_DEVICE_ATTR_V0,
13911600SVikram.Hegde@Sun.COM DDI_NEVERSWAP_ACC,
14011600SVikram.Hegde@Sun.COM DDI_STRICTORDER_ACC
14111600SVikram.Hegde@Sun.COM };
14211600SVikram.Hegde@Sun.COM
143*13050Sfrank.van.der.linden@oracle.com struct iommulib_ops immulib_ops = {
144*13050Sfrank.van.der.linden@oracle.com IOMMU_OPS_VERSION,
145*13050Sfrank.van.der.linden@oracle.com INTEL_IOMMU,
146*13050Sfrank.van.der.linden@oracle.com "Intel IOMMU",
147*13050Sfrank.van.der.linden@oracle.com NULL,
148*13050Sfrank.van.der.linden@oracle.com immu_probe,
149*13050Sfrank.van.der.linden@oracle.com immu_allochdl,
150*13050Sfrank.van.der.linden@oracle.com immu_freehdl,
151*13050Sfrank.van.der.linden@oracle.com immu_bindhdl,
152*13050Sfrank.van.der.linden@oracle.com immu_unbindhdl,
153*13050Sfrank.van.der.linden@oracle.com immu_sync,
154*13050Sfrank.van.der.linden@oracle.com immu_win,
155*13050Sfrank.van.der.linden@oracle.com immu_mapobject,
156*13050Sfrank.van.der.linden@oracle.com immu_unmapobject,
157*13050Sfrank.van.der.linden@oracle.com immu_map,
158*13050Sfrank.van.der.linden@oracle.com immu_mctl
159*13050Sfrank.van.der.linden@oracle.com };
160*13050Sfrank.van.der.linden@oracle.com
161*13050Sfrank.van.der.linden@oracle.com /*
162*13050Sfrank.van.der.linden@oracle.com * Fake physical address range used to set up initial prealloc mappings.
163*13050Sfrank.van.der.linden@oracle.com * This memory is never actually accessed. It is mapped read-only,
164*13050Sfrank.van.der.linden@oracle.com * and is overwritten as soon as the first DMA bind operation is
165*13050Sfrank.van.der.linden@oracle.com * performed. Since 0 is a special case, just start at the 2nd
166*13050Sfrank.van.der.linden@oracle.com * physical page.
167*13050Sfrank.van.der.linden@oracle.com */
168*13050Sfrank.van.der.linden@oracle.com
169*13050Sfrank.van.der.linden@oracle.com static immu_dcookie_t immu_precookie = { MMU_PAGESIZE, IMMU_NPREPTES };
17011600SVikram.Hegde@Sun.COM
17111600SVikram.Hegde@Sun.COM /* globals private to this file */
17211600SVikram.Hegde@Sun.COM static kmutex_t immu_domain_lock;
17311600SVikram.Hegde@Sun.COM static list_t immu_unity_domain_list;
17411600SVikram.Hegde@Sun.COM static list_t immu_xlate_domain_list;
17511600SVikram.Hegde@Sun.COM
17611600SVikram.Hegde@Sun.COM /* structure used to store idx into each level of the page tables */
17711600SVikram.Hegde@Sun.COM typedef struct xlate {
17811600SVikram.Hegde@Sun.COM int xlt_level;
17911600SVikram.Hegde@Sun.COM uint_t xlt_idx;
18011600SVikram.Hegde@Sun.COM pgtable_t *xlt_pgtable;
18111600SVikram.Hegde@Sun.COM } xlate_t;
18211600SVikram.Hegde@Sun.COM
18311600SVikram.Hegde@Sun.COM /* 0 is reserved by Vt-d spec. Solaris reserves 1 */
18411600SVikram.Hegde@Sun.COM #define IMMU_UNITY_DID 1
18511600SVikram.Hegde@Sun.COM
18611600SVikram.Hegde@Sun.COM static mod_hash_t *bdf_domain_hash;
18711600SVikram.Hegde@Sun.COM
188*13050Sfrank.van.der.linden@oracle.com int immu_use_alh;
189*13050Sfrank.van.der.linden@oracle.com int immu_use_tm;
190*13050Sfrank.van.der.linden@oracle.com
19111600SVikram.Hegde@Sun.COM static domain_t *
bdf_domain_lookup(immu_devi_t * immu_devi)19211600SVikram.Hegde@Sun.COM bdf_domain_lookup(immu_devi_t *immu_devi)
19311600SVikram.Hegde@Sun.COM {
19411600SVikram.Hegde@Sun.COM domain_t *domain;
19511600SVikram.Hegde@Sun.COM int16_t seg = immu_devi->imd_seg;
19611600SVikram.Hegde@Sun.COM int16_t bus = immu_devi->imd_bus;
19711600SVikram.Hegde@Sun.COM int16_t devfunc = immu_devi->imd_devfunc;
19811600SVikram.Hegde@Sun.COM uintptr_t bdf = (seg << 16 | bus << 8 | devfunc);
19911600SVikram.Hegde@Sun.COM
20011600SVikram.Hegde@Sun.COM if (seg < 0 || bus < 0 || devfunc < 0) {
20111600SVikram.Hegde@Sun.COM return (NULL);
20211600SVikram.Hegde@Sun.COM }
20311600SVikram.Hegde@Sun.COM
20411600SVikram.Hegde@Sun.COM domain = NULL;
20511600SVikram.Hegde@Sun.COM if (mod_hash_find(bdf_domain_hash,
20611600SVikram.Hegde@Sun.COM (void *)bdf, (void *)&domain) == 0) {
20711600SVikram.Hegde@Sun.COM ASSERT(domain);
20811600SVikram.Hegde@Sun.COM ASSERT(domain->dom_did > 0);
20911600SVikram.Hegde@Sun.COM return (domain);
21011600SVikram.Hegde@Sun.COM } else {
21111600SVikram.Hegde@Sun.COM return (NULL);
21211600SVikram.Hegde@Sun.COM }
21311600SVikram.Hegde@Sun.COM }
21411600SVikram.Hegde@Sun.COM
21511600SVikram.Hegde@Sun.COM static void
bdf_domain_insert(immu_devi_t * immu_devi,domain_t * domain)21611600SVikram.Hegde@Sun.COM bdf_domain_insert(immu_devi_t *immu_devi, domain_t *domain)
21711600SVikram.Hegde@Sun.COM {
21811600SVikram.Hegde@Sun.COM int16_t seg = immu_devi->imd_seg;
21911600SVikram.Hegde@Sun.COM int16_t bus = immu_devi->imd_bus;
22011600SVikram.Hegde@Sun.COM int16_t devfunc = immu_devi->imd_devfunc;
22111600SVikram.Hegde@Sun.COM uintptr_t bdf = (seg << 16 | bus << 8 | devfunc);
22211600SVikram.Hegde@Sun.COM
22311600SVikram.Hegde@Sun.COM if (seg < 0 || bus < 0 || devfunc < 0) {
22411600SVikram.Hegde@Sun.COM return;
22511600SVikram.Hegde@Sun.COM }
22611600SVikram.Hegde@Sun.COM
227*13050Sfrank.van.der.linden@oracle.com (void) mod_hash_insert(bdf_domain_hash, (void *)bdf, (void *)domain);
22811600SVikram.Hegde@Sun.COM }
22911600SVikram.Hegde@Sun.COM
23011600SVikram.Hegde@Sun.COM static int
match_lpc(dev_info_t * pdip,void * arg)23111600SVikram.Hegde@Sun.COM match_lpc(dev_info_t *pdip, void *arg)
23211600SVikram.Hegde@Sun.COM {
23311600SVikram.Hegde@Sun.COM immu_devi_t *immu_devi;
23411600SVikram.Hegde@Sun.COM dvma_arg_t *dvap = (dvma_arg_t *)arg;
23511600SVikram.Hegde@Sun.COM
23611600SVikram.Hegde@Sun.COM if (list_is_empty(dvap->dva_list)) {
23711600SVikram.Hegde@Sun.COM return (DDI_WALK_TERMINATE);
23811600SVikram.Hegde@Sun.COM }
23911600SVikram.Hegde@Sun.COM
24011600SVikram.Hegde@Sun.COM immu_devi = list_head(dvap->dva_list);
24111600SVikram.Hegde@Sun.COM for (; immu_devi; immu_devi = list_next(dvap->dva_list,
24211600SVikram.Hegde@Sun.COM immu_devi)) {
24311600SVikram.Hegde@Sun.COM if (immu_devi->imd_dip == pdip) {
24411600SVikram.Hegde@Sun.COM dvap->dva_ddip = pdip;
24511600SVikram.Hegde@Sun.COM dvap->dva_error = DDI_SUCCESS;
24611600SVikram.Hegde@Sun.COM return (DDI_WALK_TERMINATE);
24711600SVikram.Hegde@Sun.COM }
24811600SVikram.Hegde@Sun.COM }
24911600SVikram.Hegde@Sun.COM
25011600SVikram.Hegde@Sun.COM return (DDI_WALK_CONTINUE);
25111600SVikram.Hegde@Sun.COM }
25211600SVikram.Hegde@Sun.COM
25311600SVikram.Hegde@Sun.COM static void
immu_devi_set_spclist(dev_info_t * dip,immu_t * immu)25411600SVikram.Hegde@Sun.COM immu_devi_set_spclist(dev_info_t *dip, immu_t *immu)
25511600SVikram.Hegde@Sun.COM {
25611600SVikram.Hegde@Sun.COM list_t *spclist = NULL;
25711600SVikram.Hegde@Sun.COM immu_devi_t *immu_devi;
25811600SVikram.Hegde@Sun.COM
25911600SVikram.Hegde@Sun.COM immu_devi = IMMU_DEVI(dip);
26011600SVikram.Hegde@Sun.COM if (immu_devi->imd_display == B_TRUE) {
26111600SVikram.Hegde@Sun.COM spclist = &(immu->immu_dvma_gfx_list);
26211600SVikram.Hegde@Sun.COM } else if (immu_devi->imd_lpc == B_TRUE) {
26311600SVikram.Hegde@Sun.COM spclist = &(immu->immu_dvma_lpc_list);
26411600SVikram.Hegde@Sun.COM }
26511600SVikram.Hegde@Sun.COM
26611600SVikram.Hegde@Sun.COM if (spclist) {
26711600SVikram.Hegde@Sun.COM mutex_enter(&(immu->immu_lock));
26811600SVikram.Hegde@Sun.COM list_insert_head(spclist, immu_devi);
26911600SVikram.Hegde@Sun.COM mutex_exit(&(immu->immu_lock));
27011600SVikram.Hegde@Sun.COM }
27111600SVikram.Hegde@Sun.COM }
27211600SVikram.Hegde@Sun.COM
27311600SVikram.Hegde@Sun.COM /*
27411600SVikram.Hegde@Sun.COM * Set the immu_devi struct in the immu_devi field of a devinfo node
27511600SVikram.Hegde@Sun.COM */
27611600SVikram.Hegde@Sun.COM int
immu_devi_set(dev_info_t * dip,immu_flags_t immu_flags)27711600SVikram.Hegde@Sun.COM immu_devi_set(dev_info_t *dip, immu_flags_t immu_flags)
27811600SVikram.Hegde@Sun.COM {
27911600SVikram.Hegde@Sun.COM int bus, dev, func;
28011600SVikram.Hegde@Sun.COM immu_devi_t *new_imd;
28111600SVikram.Hegde@Sun.COM immu_devi_t *immu_devi;
28211600SVikram.Hegde@Sun.COM
28311600SVikram.Hegde@Sun.COM immu_devi = immu_devi_get(dip);
28411600SVikram.Hegde@Sun.COM if (immu_devi != NULL) {
28511600SVikram.Hegde@Sun.COM return (DDI_SUCCESS);
28611600SVikram.Hegde@Sun.COM }
28711600SVikram.Hegde@Sun.COM
28811600SVikram.Hegde@Sun.COM bus = dev = func = -1;
28911600SVikram.Hegde@Sun.COM
29011600SVikram.Hegde@Sun.COM /*
29111600SVikram.Hegde@Sun.COM * Assume a new immu_devi struct is needed
29211600SVikram.Hegde@Sun.COM */
29311600SVikram.Hegde@Sun.COM if (!DEVI_IS_PCI(dip) || acpica_get_bdf(dip, &bus, &dev, &func) != 0) {
29411600SVikram.Hegde@Sun.COM /*
29511600SVikram.Hegde@Sun.COM * No BDF. Set bus = -1 to indicate this.
29611600SVikram.Hegde@Sun.COM * We still need to create a immu_devi struct
29711600SVikram.Hegde@Sun.COM * though
29811600SVikram.Hegde@Sun.COM */
29911600SVikram.Hegde@Sun.COM bus = -1;
30011600SVikram.Hegde@Sun.COM dev = 0;
30111600SVikram.Hegde@Sun.COM func = 0;
30211600SVikram.Hegde@Sun.COM }
30311600SVikram.Hegde@Sun.COM
30411600SVikram.Hegde@Sun.COM new_imd = create_immu_devi(dip, bus, dev, func, immu_flags);
30511600SVikram.Hegde@Sun.COM if (new_imd == NULL) {
30611600SVikram.Hegde@Sun.COM ddi_err(DER_WARN, dip, "Failed to create immu_devi "
30711600SVikram.Hegde@Sun.COM "structure");
30811600SVikram.Hegde@Sun.COM return (DDI_FAILURE);
30911600SVikram.Hegde@Sun.COM }
31011600SVikram.Hegde@Sun.COM
31111600SVikram.Hegde@Sun.COM /*
31211600SVikram.Hegde@Sun.COM * Check if some other thread allocated a immu_devi while we
31311600SVikram.Hegde@Sun.COM * didn't own the lock.
31411600SVikram.Hegde@Sun.COM */
31511600SVikram.Hegde@Sun.COM mutex_enter(&(DEVI(dip)->devi_lock));
31611600SVikram.Hegde@Sun.COM if (IMMU_DEVI(dip) == NULL) {
31711600SVikram.Hegde@Sun.COM IMMU_DEVI_SET(dip, new_imd);
31811600SVikram.Hegde@Sun.COM } else {
31911600SVikram.Hegde@Sun.COM destroy_immu_devi(new_imd);
32011600SVikram.Hegde@Sun.COM }
32111600SVikram.Hegde@Sun.COM mutex_exit(&(DEVI(dip)->devi_lock));
32211600SVikram.Hegde@Sun.COM
32311600SVikram.Hegde@Sun.COM return (DDI_SUCCESS);
32411600SVikram.Hegde@Sun.COM }
32511600SVikram.Hegde@Sun.COM
32611600SVikram.Hegde@Sun.COM static dev_info_t *
get_lpc_devinfo(immu_t * immu,dev_info_t * rdip,immu_flags_t immu_flags)32711600SVikram.Hegde@Sun.COM get_lpc_devinfo(immu_t *immu, dev_info_t *rdip, immu_flags_t immu_flags)
32811600SVikram.Hegde@Sun.COM {
32911600SVikram.Hegde@Sun.COM dvma_arg_t dvarg = {0};
33011600SVikram.Hegde@Sun.COM dvarg.dva_list = &(immu->immu_dvma_lpc_list);
33111600SVikram.Hegde@Sun.COM dvarg.dva_rdip = rdip;
33211600SVikram.Hegde@Sun.COM dvarg.dva_error = DDI_FAILURE;
33311600SVikram.Hegde@Sun.COM
33411600SVikram.Hegde@Sun.COM if (immu_walk_ancestor(rdip, NULL, match_lpc,
33511600SVikram.Hegde@Sun.COM &dvarg, NULL, immu_flags) != DDI_SUCCESS) {
33611600SVikram.Hegde@Sun.COM ddi_err(DER_MODE, rdip, "Could not walk ancestors to "
33711600SVikram.Hegde@Sun.COM "find lpc_devinfo for ISA device");
33811600SVikram.Hegde@Sun.COM return (NULL);
33911600SVikram.Hegde@Sun.COM }
34011600SVikram.Hegde@Sun.COM
34111600SVikram.Hegde@Sun.COM if (dvarg.dva_error != DDI_SUCCESS || dvarg.dva_ddip == NULL) {
34211600SVikram.Hegde@Sun.COM ddi_err(DER_MODE, rdip, "Could not find lpc_devinfo for "
34311600SVikram.Hegde@Sun.COM "ISA device");
34411600SVikram.Hegde@Sun.COM return (NULL);
34511600SVikram.Hegde@Sun.COM }
34611600SVikram.Hegde@Sun.COM
34711600SVikram.Hegde@Sun.COM return (dvarg.dva_ddip);
34811600SVikram.Hegde@Sun.COM }
34911600SVikram.Hegde@Sun.COM
35011600SVikram.Hegde@Sun.COM static dev_info_t *
get_gfx_devinfo(dev_info_t * rdip)35111600SVikram.Hegde@Sun.COM get_gfx_devinfo(dev_info_t *rdip)
35211600SVikram.Hegde@Sun.COM {
35311600SVikram.Hegde@Sun.COM immu_t *immu;
35411600SVikram.Hegde@Sun.COM immu_devi_t *immu_devi;
35511600SVikram.Hegde@Sun.COM list_t *list_gfx;
35611600SVikram.Hegde@Sun.COM
35711600SVikram.Hegde@Sun.COM /*
358*13050Sfrank.van.der.linden@oracle.com * The GFX device may not be on the same iommu unit as "agpgart"
35911600SVikram.Hegde@Sun.COM * so search globally
36011600SVikram.Hegde@Sun.COM */
36111600SVikram.Hegde@Sun.COM immu_devi = NULL;
36211600SVikram.Hegde@Sun.COM immu = list_head(&immu_list);
36311600SVikram.Hegde@Sun.COM for (; immu; immu = list_next(&immu_list, immu)) {
36411600SVikram.Hegde@Sun.COM list_gfx = &(immu->immu_dvma_gfx_list);
36511600SVikram.Hegde@Sun.COM if (!list_is_empty(list_gfx)) {
36611600SVikram.Hegde@Sun.COM immu_devi = list_head(list_gfx);
36711600SVikram.Hegde@Sun.COM break;
36811600SVikram.Hegde@Sun.COM }
36911600SVikram.Hegde@Sun.COM }
37011600SVikram.Hegde@Sun.COM
37111600SVikram.Hegde@Sun.COM if (immu_devi == NULL) {
372*13050Sfrank.van.der.linden@oracle.com ddi_err(DER_WARN, rdip, "iommu: No GFX device. "
37311658SVikram.Hegde@Sun.COM "Cannot redirect agpgart");
37411600SVikram.Hegde@Sun.COM return (NULL);
37511600SVikram.Hegde@Sun.COM }
37611600SVikram.Hegde@Sun.COM
377*13050Sfrank.van.der.linden@oracle.com ddi_err(DER_LOG, rdip, "iommu: GFX redirect to %s",
37811600SVikram.Hegde@Sun.COM ddi_node_name(immu_devi->imd_dip));
37911600SVikram.Hegde@Sun.COM
38011600SVikram.Hegde@Sun.COM return (immu_devi->imd_dip);
38111600SVikram.Hegde@Sun.COM }
38211600SVikram.Hegde@Sun.COM
38311600SVikram.Hegde@Sun.COM static immu_flags_t
dma_to_immu_flags(struct ddi_dma_req * dmareq)38411600SVikram.Hegde@Sun.COM dma_to_immu_flags(struct ddi_dma_req *dmareq)
38511600SVikram.Hegde@Sun.COM {
38611600SVikram.Hegde@Sun.COM immu_flags_t flags = 0;
38711600SVikram.Hegde@Sun.COM
38811600SVikram.Hegde@Sun.COM if (dmareq->dmar_fp == DDI_DMA_SLEEP) {
38911600SVikram.Hegde@Sun.COM flags |= IMMU_FLAGS_SLEEP;
39011600SVikram.Hegde@Sun.COM } else {
39111600SVikram.Hegde@Sun.COM flags |= IMMU_FLAGS_NOSLEEP;
39211600SVikram.Hegde@Sun.COM }
39311600SVikram.Hegde@Sun.COM
39411658SVikram.Hegde@Sun.COM #ifdef BUGGY_DRIVERS
39511658SVikram.Hegde@Sun.COM
39611658SVikram.Hegde@Sun.COM flags |= (IMMU_FLAGS_READ | IMMU_FLAGS_WRITE);
39711658SVikram.Hegde@Sun.COM
39811658SVikram.Hegde@Sun.COM #else
39911600SVikram.Hegde@Sun.COM /*
40011600SVikram.Hegde@Sun.COM * Read and write flags need to be reversed.
40111600SVikram.Hegde@Sun.COM * DMA_READ means read from device and write
40211600SVikram.Hegde@Sun.COM * to memory. So DMA read means DVMA write.
40311600SVikram.Hegde@Sun.COM */
40411600SVikram.Hegde@Sun.COM if (dmareq->dmar_flags & DDI_DMA_READ)
40511600SVikram.Hegde@Sun.COM flags |= IMMU_FLAGS_WRITE;
40611600SVikram.Hegde@Sun.COM
40711600SVikram.Hegde@Sun.COM if (dmareq->dmar_flags & DDI_DMA_WRITE)
40811600SVikram.Hegde@Sun.COM flags |= IMMU_FLAGS_READ;
40911600SVikram.Hegde@Sun.COM
41011600SVikram.Hegde@Sun.COM /*
41111600SVikram.Hegde@Sun.COM * Some buggy drivers specify neither READ or WRITE
41211600SVikram.Hegde@Sun.COM * For such drivers set both read and write permissions
41311600SVikram.Hegde@Sun.COM */
41411600SVikram.Hegde@Sun.COM if ((dmareq->dmar_flags & (DDI_DMA_READ | DDI_DMA_WRITE)) == 0) {
41511600SVikram.Hegde@Sun.COM flags |= (IMMU_FLAGS_READ | IMMU_FLAGS_WRITE);
41611600SVikram.Hegde@Sun.COM }
41711600SVikram.Hegde@Sun.COM #endif
41811600SVikram.Hegde@Sun.COM
41911600SVikram.Hegde@Sun.COM return (flags);
42011600SVikram.Hegde@Sun.COM }
42111600SVikram.Hegde@Sun.COM
422*13050Sfrank.van.der.linden@oracle.com /*ARGSUSED*/
42311658SVikram.Hegde@Sun.COM int
pgtable_ctor(void * buf,void * arg,int kmflag)42411658SVikram.Hegde@Sun.COM pgtable_ctor(void *buf, void *arg, int kmflag)
42511658SVikram.Hegde@Sun.COM {
42611658SVikram.Hegde@Sun.COM size_t actual_size = 0;
42711658SVikram.Hegde@Sun.COM pgtable_t *pgtable;
42811658SVikram.Hegde@Sun.COM int (*dmafp)(caddr_t);
42911658SVikram.Hegde@Sun.COM caddr_t vaddr;
43011658SVikram.Hegde@Sun.COM void *next;
431*13050Sfrank.van.der.linden@oracle.com uint_t flags;
432*13050Sfrank.van.der.linden@oracle.com immu_t *immu = arg;
43311658SVikram.Hegde@Sun.COM
43411658SVikram.Hegde@Sun.COM pgtable = (pgtable_t *)buf;
43511658SVikram.Hegde@Sun.COM
43611658SVikram.Hegde@Sun.COM dmafp = (kmflag & KM_NOSLEEP) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
43711658SVikram.Hegde@Sun.COM
43811658SVikram.Hegde@Sun.COM next = kmem_zalloc(IMMU_PAGESIZE, kmflag);
43911658SVikram.Hegde@Sun.COM if (next == NULL) {
44011658SVikram.Hegde@Sun.COM return (-1);
44111658SVikram.Hegde@Sun.COM }
44211658SVikram.Hegde@Sun.COM
44311658SVikram.Hegde@Sun.COM if (ddi_dma_alloc_handle(root_devinfo, &immu_dma_attr,
44411658SVikram.Hegde@Sun.COM dmafp, NULL, &pgtable->hwpg_dmahdl) != DDI_SUCCESS) {
44511658SVikram.Hegde@Sun.COM kmem_free(next, IMMU_PAGESIZE);
44611658SVikram.Hegde@Sun.COM return (-1);
44711658SVikram.Hegde@Sun.COM }
44811658SVikram.Hegde@Sun.COM
449*13050Sfrank.van.der.linden@oracle.com flags = DDI_DMA_CONSISTENT;
450*13050Sfrank.van.der.linden@oracle.com if (!immu->immu_dvma_coherent)
451*13050Sfrank.van.der.linden@oracle.com flags |= IOMEM_DATA_UC_WR_COMBINE;
452*13050Sfrank.van.der.linden@oracle.com
45311658SVikram.Hegde@Sun.COM if (ddi_dma_mem_alloc(pgtable->hwpg_dmahdl, IMMU_PAGESIZE,
454*13050Sfrank.van.der.linden@oracle.com &immu_acc_attr, flags,
45511658SVikram.Hegde@Sun.COM dmafp, NULL, &vaddr, &actual_size,
45611658SVikram.Hegde@Sun.COM &pgtable->hwpg_memhdl) != DDI_SUCCESS) {
45711658SVikram.Hegde@Sun.COM ddi_dma_free_handle(&pgtable->hwpg_dmahdl);
45811658SVikram.Hegde@Sun.COM kmem_free(next, IMMU_PAGESIZE);
45911658SVikram.Hegde@Sun.COM return (-1);
46011658SVikram.Hegde@Sun.COM }
46111658SVikram.Hegde@Sun.COM
46211658SVikram.Hegde@Sun.COM /*
46311658SVikram.Hegde@Sun.COM * Memory allocation failure. Maybe a temporary condition
46411658SVikram.Hegde@Sun.COM * so return error rather than panic, so we can try again
46511658SVikram.Hegde@Sun.COM */
46611658SVikram.Hegde@Sun.COM if (actual_size < IMMU_PAGESIZE) {
46711658SVikram.Hegde@Sun.COM ddi_dma_mem_free(&pgtable->hwpg_memhdl);
46811658SVikram.Hegde@Sun.COM ddi_dma_free_handle(&pgtable->hwpg_dmahdl);
46911658SVikram.Hegde@Sun.COM kmem_free(next, IMMU_PAGESIZE);
47011658SVikram.Hegde@Sun.COM return (-1);
47111658SVikram.Hegde@Sun.COM }
47211658SVikram.Hegde@Sun.COM
47311658SVikram.Hegde@Sun.COM pgtable->hwpg_paddr = pfn_to_pa(hat_getpfnum(kas.a_hat, vaddr));
47411658SVikram.Hegde@Sun.COM pgtable->hwpg_vaddr = vaddr;
47511658SVikram.Hegde@Sun.COM pgtable->swpg_next_array = next;
47611658SVikram.Hegde@Sun.COM
47711658SVikram.Hegde@Sun.COM rw_init(&(pgtable->swpg_rwlock), NULL, RW_DEFAULT, NULL);
47811658SVikram.Hegde@Sun.COM
47911658SVikram.Hegde@Sun.COM return (0);
48011658SVikram.Hegde@Sun.COM }
48111658SVikram.Hegde@Sun.COM
482*13050Sfrank.van.der.linden@oracle.com /*ARGSUSED*/
48311658SVikram.Hegde@Sun.COM void
pgtable_dtor(void * buf,void * arg)48411658SVikram.Hegde@Sun.COM pgtable_dtor(void *buf, void *arg)
48511658SVikram.Hegde@Sun.COM {
48611658SVikram.Hegde@Sun.COM pgtable_t *pgtable;
48711658SVikram.Hegde@Sun.COM
48811658SVikram.Hegde@Sun.COM pgtable = (pgtable_t *)buf;
48911658SVikram.Hegde@Sun.COM
49011658SVikram.Hegde@Sun.COM /* destroy will panic if lock is held. */
49111658SVikram.Hegde@Sun.COM rw_destroy(&(pgtable->swpg_rwlock));
49211658SVikram.Hegde@Sun.COM
49311658SVikram.Hegde@Sun.COM ddi_dma_mem_free(&pgtable->hwpg_memhdl);
49411658SVikram.Hegde@Sun.COM ddi_dma_free_handle(&pgtable->hwpg_dmahdl);
49511658SVikram.Hegde@Sun.COM kmem_free(pgtable->swpg_next_array, IMMU_PAGESIZE);
49611658SVikram.Hegde@Sun.COM }
49711658SVikram.Hegde@Sun.COM
49811600SVikram.Hegde@Sun.COM /*
49911600SVikram.Hegde@Sun.COM * pgtable_alloc()
50011600SVikram.Hegde@Sun.COM * alloc a IOMMU pgtable structure.
50111600SVikram.Hegde@Sun.COM * This same struct is used for root and context tables as well.
50211600SVikram.Hegde@Sun.COM * This routine allocs the f/ollowing:
50311600SVikram.Hegde@Sun.COM * - a pgtable_t struct
50411600SVikram.Hegde@Sun.COM * - a HW page which holds PTEs/entries which is accesssed by HW
50511600SVikram.Hegde@Sun.COM * so we set up DMA for this page
50611600SVikram.Hegde@Sun.COM * - a SW page which is only for our bookeeping
50711600SVikram.Hegde@Sun.COM * (for example to hold pointers to the next level pgtable).
50811600SVikram.Hegde@Sun.COM * So a simple kmem_alloc suffices
50911600SVikram.Hegde@Sun.COM */
51011600SVikram.Hegde@Sun.COM static pgtable_t *
pgtable_alloc(immu_t * immu,immu_flags_t immu_flags)51111658SVikram.Hegde@Sun.COM pgtable_alloc(immu_t *immu, immu_flags_t immu_flags)
51211600SVikram.Hegde@Sun.COM {
51311600SVikram.Hegde@Sun.COM pgtable_t *pgtable;
51411600SVikram.Hegde@Sun.COM int kmflags;
51511600SVikram.Hegde@Sun.COM
51611658SVikram.Hegde@Sun.COM kmflags = (immu_flags & IMMU_FLAGS_NOSLEEP) ? KM_NOSLEEP : KM_SLEEP;
51711658SVikram.Hegde@Sun.COM
518*13050Sfrank.van.der.linden@oracle.com pgtable = kmem_cache_alloc(immu->immu_pgtable_cache, kmflags);
51911600SVikram.Hegde@Sun.COM if (pgtable == NULL) {
52011600SVikram.Hegde@Sun.COM return (NULL);
52111600SVikram.Hegde@Sun.COM }
52211600SVikram.Hegde@Sun.COM return (pgtable);
52311600SVikram.Hegde@Sun.COM }
52411600SVikram.Hegde@Sun.COM
52511600SVikram.Hegde@Sun.COM static void
pgtable_zero(pgtable_t * pgtable)526*13050Sfrank.van.der.linden@oracle.com pgtable_zero(pgtable_t *pgtable)
52711658SVikram.Hegde@Sun.COM {
52811658SVikram.Hegde@Sun.COM bzero(pgtable->hwpg_vaddr, IMMU_PAGESIZE);
52911658SVikram.Hegde@Sun.COM bzero(pgtable->swpg_next_array, IMMU_PAGESIZE);
53011658SVikram.Hegde@Sun.COM }
53111658SVikram.Hegde@Sun.COM
53211658SVikram.Hegde@Sun.COM static void
pgtable_free(immu_t * immu,pgtable_t * pgtable)53311658SVikram.Hegde@Sun.COM pgtable_free(immu_t *immu, pgtable_t *pgtable)
53411600SVikram.Hegde@Sun.COM {
535*13050Sfrank.van.der.linden@oracle.com kmem_cache_free(immu->immu_pgtable_cache, pgtable);
53611600SVikram.Hegde@Sun.COM }
53711600SVikram.Hegde@Sun.COM
53811600SVikram.Hegde@Sun.COM /*
53911600SVikram.Hegde@Sun.COM * Function to identify a display device from the PCI class code
54011600SVikram.Hegde@Sun.COM */
54111600SVikram.Hegde@Sun.COM static boolean_t
device_is_display(uint_t classcode)54211600SVikram.Hegde@Sun.COM device_is_display(uint_t classcode)
54311600SVikram.Hegde@Sun.COM {
54411600SVikram.Hegde@Sun.COM static uint_t disp_classes[] = {
54511600SVikram.Hegde@Sun.COM 0x000100,
54611600SVikram.Hegde@Sun.COM 0x030000,
54711600SVikram.Hegde@Sun.COM 0x030001
54811600SVikram.Hegde@Sun.COM };
54911600SVikram.Hegde@Sun.COM int i, nclasses = sizeof (disp_classes) / sizeof (uint_t);
55011600SVikram.Hegde@Sun.COM
55111600SVikram.Hegde@Sun.COM for (i = 0; i < nclasses; i++) {
55211600SVikram.Hegde@Sun.COM if (classcode == disp_classes[i])
55311600SVikram.Hegde@Sun.COM return (B_TRUE);
55411600SVikram.Hegde@Sun.COM }
55511600SVikram.Hegde@Sun.COM return (B_FALSE);
55611600SVikram.Hegde@Sun.COM }
55711600SVikram.Hegde@Sun.COM
55811600SVikram.Hegde@Sun.COM /*
55911600SVikram.Hegde@Sun.COM * Function that determines if device is PCIEX and/or PCIEX bridge
56011600SVikram.Hegde@Sun.COM */
56111600SVikram.Hegde@Sun.COM static boolean_t
device_is_pciex(uchar_t bus,uchar_t dev,uchar_t func,boolean_t * is_pcib)56211600SVikram.Hegde@Sun.COM device_is_pciex(
56311600SVikram.Hegde@Sun.COM uchar_t bus, uchar_t dev, uchar_t func, boolean_t *is_pcib)
56411600SVikram.Hegde@Sun.COM {
56511600SVikram.Hegde@Sun.COM ushort_t cap;
56611600SVikram.Hegde@Sun.COM ushort_t capsp;
56711600SVikram.Hegde@Sun.COM ushort_t cap_count = PCI_CAP_MAX_PTR;
56811600SVikram.Hegde@Sun.COM ushort_t status;
56911600SVikram.Hegde@Sun.COM boolean_t is_pciex = B_FALSE;
57011600SVikram.Hegde@Sun.COM
57111600SVikram.Hegde@Sun.COM *is_pcib = B_FALSE;
57211600SVikram.Hegde@Sun.COM
57311600SVikram.Hegde@Sun.COM status = pci_getw_func(bus, dev, func, PCI_CONF_STAT);
57411600SVikram.Hegde@Sun.COM if (!(status & PCI_STAT_CAP))
57511600SVikram.Hegde@Sun.COM return (B_FALSE);
57611600SVikram.Hegde@Sun.COM
57711600SVikram.Hegde@Sun.COM capsp = pci_getb_func(bus, dev, func, PCI_CONF_CAP_PTR);
57811600SVikram.Hegde@Sun.COM while (cap_count-- && capsp >= PCI_CAP_PTR_OFF) {
57911600SVikram.Hegde@Sun.COM capsp &= PCI_CAP_PTR_MASK;
58011600SVikram.Hegde@Sun.COM cap = pci_getb_func(bus, dev, func, capsp);
58111600SVikram.Hegde@Sun.COM
58211600SVikram.Hegde@Sun.COM if (cap == PCI_CAP_ID_PCI_E) {
58311600SVikram.Hegde@Sun.COM status = pci_getw_func(bus, dev, func, capsp + 2);
58411600SVikram.Hegde@Sun.COM /*
58511600SVikram.Hegde@Sun.COM * See section 7.8.2 of PCI-Express Base Spec v1.0a
58611600SVikram.Hegde@Sun.COM * for Device/Port Type.
58711600SVikram.Hegde@Sun.COM * PCIE_PCIECAP_DEV_TYPE_PCIE2PCI implies that the
58811600SVikram.Hegde@Sun.COM * device is a PCIE2PCI bridge
58911600SVikram.Hegde@Sun.COM */
59011600SVikram.Hegde@Sun.COM *is_pcib =
59111600SVikram.Hegde@Sun.COM ((status & PCIE_PCIECAP_DEV_TYPE_MASK) ==
59211600SVikram.Hegde@Sun.COM PCIE_PCIECAP_DEV_TYPE_PCIE2PCI) ? B_TRUE : B_FALSE;
59311600SVikram.Hegde@Sun.COM is_pciex = B_TRUE;
59411600SVikram.Hegde@Sun.COM }
59511600SVikram.Hegde@Sun.COM
59611600SVikram.Hegde@Sun.COM capsp = (*pci_getb_func)(bus, dev, func,
59711600SVikram.Hegde@Sun.COM capsp + PCI_CAP_NEXT_PTR);
59811600SVikram.Hegde@Sun.COM }
59911600SVikram.Hegde@Sun.COM
60011600SVikram.Hegde@Sun.COM return (is_pciex);
60111600SVikram.Hegde@Sun.COM }
60211600SVikram.Hegde@Sun.COM
603*13050Sfrank.van.der.linden@oracle.com static boolean_t
device_use_premap(uint_t classcode)604*13050Sfrank.van.der.linden@oracle.com device_use_premap(uint_t classcode)
605*13050Sfrank.van.der.linden@oracle.com {
606*13050Sfrank.van.der.linden@oracle.com if (IMMU_PCI_CLASS2BASE(classcode) == PCI_CLASS_NET)
607*13050Sfrank.van.der.linden@oracle.com return (B_TRUE);
608*13050Sfrank.van.der.linden@oracle.com return (B_FALSE);
609*13050Sfrank.van.der.linden@oracle.com }
610*13050Sfrank.van.der.linden@oracle.com
61111600SVikram.Hegde@Sun.COM
61211600SVikram.Hegde@Sun.COM /*
61311600SVikram.Hegde@Sun.COM * immu_dvma_get_immu()
61411600SVikram.Hegde@Sun.COM * get the immu unit structure for a dev_info node
61511600SVikram.Hegde@Sun.COM */
61611600SVikram.Hegde@Sun.COM immu_t *
immu_dvma_get_immu(dev_info_t * dip,immu_flags_t immu_flags)61711600SVikram.Hegde@Sun.COM immu_dvma_get_immu(dev_info_t *dip, immu_flags_t immu_flags)
61811600SVikram.Hegde@Sun.COM {
61911600SVikram.Hegde@Sun.COM immu_devi_t *immu_devi;
62011600SVikram.Hegde@Sun.COM immu_t *immu;
62111600SVikram.Hegde@Sun.COM
62211600SVikram.Hegde@Sun.COM /*
62311600SVikram.Hegde@Sun.COM * check if immu unit was already found earlier.
62411600SVikram.Hegde@Sun.COM * If yes, then it will be stashed in immu_devi struct.
62511600SVikram.Hegde@Sun.COM */
62611600SVikram.Hegde@Sun.COM immu_devi = immu_devi_get(dip);
62711600SVikram.Hegde@Sun.COM if (immu_devi == NULL) {
62811600SVikram.Hegde@Sun.COM if (immu_devi_set(dip, immu_flags) != DDI_SUCCESS) {
62911600SVikram.Hegde@Sun.COM /*
63011600SVikram.Hegde@Sun.COM * May fail because of low memory. Return error rather
63111600SVikram.Hegde@Sun.COM * than panic as we want driver to rey again later
63211600SVikram.Hegde@Sun.COM */
63311600SVikram.Hegde@Sun.COM ddi_err(DER_PANIC, dip, "immu_dvma_get_immu: "
63411600SVikram.Hegde@Sun.COM "No immu_devi structure");
63511600SVikram.Hegde@Sun.COM /*NOTREACHED*/
63611600SVikram.Hegde@Sun.COM }
63711600SVikram.Hegde@Sun.COM immu_devi = immu_devi_get(dip);
63811600SVikram.Hegde@Sun.COM }
63911600SVikram.Hegde@Sun.COM
64011600SVikram.Hegde@Sun.COM mutex_enter(&(DEVI(dip)->devi_lock));
64111600SVikram.Hegde@Sun.COM if (immu_devi->imd_immu) {
64211600SVikram.Hegde@Sun.COM immu = immu_devi->imd_immu;
64311600SVikram.Hegde@Sun.COM mutex_exit(&(DEVI(dip)->devi_lock));
64411600SVikram.Hegde@Sun.COM return (immu);
64511600SVikram.Hegde@Sun.COM }
64611600SVikram.Hegde@Sun.COM mutex_exit(&(DEVI(dip)->devi_lock));
64711600SVikram.Hegde@Sun.COM
64811600SVikram.Hegde@Sun.COM immu = immu_dmar_get_immu(dip);
64911600SVikram.Hegde@Sun.COM if (immu == NULL) {
65011600SVikram.Hegde@Sun.COM ddi_err(DER_PANIC, dip, "immu_dvma_get_immu: "
65111600SVikram.Hegde@Sun.COM "Cannot find immu_t for device");
65211600SVikram.Hegde@Sun.COM /*NOTREACHED*/
65311600SVikram.Hegde@Sun.COM }
65411600SVikram.Hegde@Sun.COM
65511600SVikram.Hegde@Sun.COM /*
65611600SVikram.Hegde@Sun.COM * Check if some other thread found immu
65711600SVikram.Hegde@Sun.COM * while lock was not held
65811600SVikram.Hegde@Sun.COM */
65911600SVikram.Hegde@Sun.COM immu_devi = immu_devi_get(dip);
66011600SVikram.Hegde@Sun.COM /* immu_devi should be present as we found it earlier */
66111600SVikram.Hegde@Sun.COM if (immu_devi == NULL) {
66211600SVikram.Hegde@Sun.COM ddi_err(DER_PANIC, dip,
66311600SVikram.Hegde@Sun.COM "immu_dvma_get_immu: No immu_devi structure");
66411600SVikram.Hegde@Sun.COM /*NOTREACHED*/
66511600SVikram.Hegde@Sun.COM }
66611600SVikram.Hegde@Sun.COM
66711600SVikram.Hegde@Sun.COM mutex_enter(&(DEVI(dip)->devi_lock));
66811600SVikram.Hegde@Sun.COM if (immu_devi->imd_immu == NULL) {
66911600SVikram.Hegde@Sun.COM /* nobody else set it, so we should do it */
67011600SVikram.Hegde@Sun.COM immu_devi->imd_immu = immu;
67111600SVikram.Hegde@Sun.COM immu_devi_set_spclist(dip, immu);
67211600SVikram.Hegde@Sun.COM } else {
67311600SVikram.Hegde@Sun.COM /*
67411600SVikram.Hegde@Sun.COM * if some other thread got immu before
67511600SVikram.Hegde@Sun.COM * us, it should get the same results
67611600SVikram.Hegde@Sun.COM */
67711600SVikram.Hegde@Sun.COM if (immu_devi->imd_immu != immu) {
67811600SVikram.Hegde@Sun.COM ddi_err(DER_PANIC, dip, "Multiple "
67911600SVikram.Hegde@Sun.COM "immu units found for device. Expected (%p), "
68011600SVikram.Hegde@Sun.COM "actual (%p)", (void *)immu,
68111600SVikram.Hegde@Sun.COM (void *)immu_devi->imd_immu);
68211600SVikram.Hegde@Sun.COM mutex_exit(&(DEVI(dip)->devi_lock));
68311600SVikram.Hegde@Sun.COM /*NOTREACHED*/
68411600SVikram.Hegde@Sun.COM }
68511600SVikram.Hegde@Sun.COM }
68611600SVikram.Hegde@Sun.COM mutex_exit(&(DEVI(dip)->devi_lock));
68711600SVikram.Hegde@Sun.COM
68811600SVikram.Hegde@Sun.COM return (immu);
68911600SVikram.Hegde@Sun.COM }
69011600SVikram.Hegde@Sun.COM
69111600SVikram.Hegde@Sun.COM
69211600SVikram.Hegde@Sun.COM /* ############################# IMMU_DEVI code ############################ */
69311600SVikram.Hegde@Sun.COM
69411600SVikram.Hegde@Sun.COM /*
69511600SVikram.Hegde@Sun.COM * Allocate a immu_devi structure and initialize it
69611600SVikram.Hegde@Sun.COM */
69711600SVikram.Hegde@Sun.COM static immu_devi_t *
create_immu_devi(dev_info_t * rdip,int bus,int dev,int func,immu_flags_t immu_flags)69811600SVikram.Hegde@Sun.COM create_immu_devi(dev_info_t *rdip, int bus, int dev, int func,
69911600SVikram.Hegde@Sun.COM immu_flags_t immu_flags)
70011600SVikram.Hegde@Sun.COM {
70111600SVikram.Hegde@Sun.COM uchar_t baseclass, subclass;
70211600SVikram.Hegde@Sun.COM uint_t classcode, revclass;
70311600SVikram.Hegde@Sun.COM immu_devi_t *immu_devi;
70411600SVikram.Hegde@Sun.COM boolean_t pciex = B_FALSE;
70511600SVikram.Hegde@Sun.COM int kmflags;
70611600SVikram.Hegde@Sun.COM boolean_t is_pcib = B_FALSE;
70711600SVikram.Hegde@Sun.COM
70811600SVikram.Hegde@Sun.COM /* bus == -1 indicate non-PCI device (no BDF) */
70911600SVikram.Hegde@Sun.COM ASSERT(bus == -1 || bus >= 0);
71011600SVikram.Hegde@Sun.COM ASSERT(dev >= 0);
71111600SVikram.Hegde@Sun.COM ASSERT(func >= 0);
71211600SVikram.Hegde@Sun.COM
71311600SVikram.Hegde@Sun.COM kmflags = (immu_flags & IMMU_FLAGS_NOSLEEP) ? KM_NOSLEEP : KM_SLEEP;
71411600SVikram.Hegde@Sun.COM immu_devi = kmem_zalloc(sizeof (immu_devi_t), kmflags);
71511600SVikram.Hegde@Sun.COM if (immu_devi == NULL) {
71611600SVikram.Hegde@Sun.COM ddi_err(DER_WARN, rdip, "Failed to allocate memory for "
71711600SVikram.Hegde@Sun.COM "Intel IOMMU immu_devi structure");
71811600SVikram.Hegde@Sun.COM return (NULL);
71911600SVikram.Hegde@Sun.COM }
72011600SVikram.Hegde@Sun.COM immu_devi->imd_dip = rdip;
72111600SVikram.Hegde@Sun.COM immu_devi->imd_seg = 0; /* Currently seg can only be 0 */
72211600SVikram.Hegde@Sun.COM immu_devi->imd_bus = bus;
72311600SVikram.Hegde@Sun.COM immu_devi->imd_pcib_type = IMMU_PCIB_BAD;
72411600SVikram.Hegde@Sun.COM
72511600SVikram.Hegde@Sun.COM if (bus == -1) {
72611600SVikram.Hegde@Sun.COM immu_devi->imd_pcib_type = IMMU_PCIB_NOBDF;
72711600SVikram.Hegde@Sun.COM return (immu_devi);
72811600SVikram.Hegde@Sun.COM }
72911600SVikram.Hegde@Sun.COM
73011600SVikram.Hegde@Sun.COM immu_devi->imd_devfunc = IMMU_PCI_DEVFUNC(dev, func);
73111600SVikram.Hegde@Sun.COM immu_devi->imd_sec = 0;
73211600SVikram.Hegde@Sun.COM immu_devi->imd_sub = 0;
73311600SVikram.Hegde@Sun.COM
73411600SVikram.Hegde@Sun.COM revclass = pci_getl_func(bus, dev, func, PCI_CONF_REVID);
73511600SVikram.Hegde@Sun.COM
73611600SVikram.Hegde@Sun.COM classcode = IMMU_PCI_REV2CLASS(revclass);
73711600SVikram.Hegde@Sun.COM baseclass = IMMU_PCI_CLASS2BASE(classcode);
73811600SVikram.Hegde@Sun.COM subclass = IMMU_PCI_CLASS2SUB(classcode);
73911600SVikram.Hegde@Sun.COM
74011600SVikram.Hegde@Sun.COM if (baseclass == PCI_CLASS_BRIDGE && subclass == PCI_BRIDGE_PCI) {
74111600SVikram.Hegde@Sun.COM
74211600SVikram.Hegde@Sun.COM immu_devi->imd_sec = pci_getb_func(bus, dev, func,
74311600SVikram.Hegde@Sun.COM PCI_BCNF_SECBUS);
74411600SVikram.Hegde@Sun.COM immu_devi->imd_sub = pci_getb_func(bus, dev, func,
74511600SVikram.Hegde@Sun.COM PCI_BCNF_SUBBUS);
74611600SVikram.Hegde@Sun.COM
74711600SVikram.Hegde@Sun.COM pciex = device_is_pciex(bus, dev, func, &is_pcib);
74811600SVikram.Hegde@Sun.COM if (pciex == B_TRUE && is_pcib == B_TRUE) {
74911600SVikram.Hegde@Sun.COM immu_devi->imd_pcib_type = IMMU_PCIB_PCIE_PCI;
75011600SVikram.Hegde@Sun.COM } else if (pciex == B_TRUE) {
75111600SVikram.Hegde@Sun.COM immu_devi->imd_pcib_type = IMMU_PCIB_PCIE_PCIE;
75211600SVikram.Hegde@Sun.COM } else {
75311600SVikram.Hegde@Sun.COM immu_devi->imd_pcib_type = IMMU_PCIB_PCI_PCI;
75411600SVikram.Hegde@Sun.COM }
75511600SVikram.Hegde@Sun.COM } else {
75611600SVikram.Hegde@Sun.COM immu_devi->imd_pcib_type = IMMU_PCIB_ENDPOINT;
75711600SVikram.Hegde@Sun.COM }
75811600SVikram.Hegde@Sun.COM
75911600SVikram.Hegde@Sun.COM /* check for certain special devices */
76011600SVikram.Hegde@Sun.COM immu_devi->imd_display = device_is_display(classcode);
76111600SVikram.Hegde@Sun.COM immu_devi->imd_lpc = ((baseclass == PCI_CLASS_BRIDGE) &&
76211600SVikram.Hegde@Sun.COM (subclass == PCI_BRIDGE_ISA)) ? B_TRUE : B_FALSE;
763*13050Sfrank.van.der.linden@oracle.com immu_devi->imd_use_premap = device_use_premap(classcode);
76411600SVikram.Hegde@Sun.COM
76511600SVikram.Hegde@Sun.COM immu_devi->imd_domain = NULL;
76611600SVikram.Hegde@Sun.COM
76712465Sfrank.van.der.linden@oracle.com immu_devi->imd_dvma_flags = immu_global_dvma_flags;
76812465Sfrank.van.der.linden@oracle.com
76911600SVikram.Hegde@Sun.COM return (immu_devi);
77011600SVikram.Hegde@Sun.COM }
77111600SVikram.Hegde@Sun.COM
77211600SVikram.Hegde@Sun.COM static void
destroy_immu_devi(immu_devi_t * immu_devi)77311600SVikram.Hegde@Sun.COM destroy_immu_devi(immu_devi_t *immu_devi)
77411600SVikram.Hegde@Sun.COM {
77511600SVikram.Hegde@Sun.COM kmem_free(immu_devi, sizeof (immu_devi_t));
77611600SVikram.Hegde@Sun.COM }
77711600SVikram.Hegde@Sun.COM
77811600SVikram.Hegde@Sun.COM static domain_t *
immu_devi_domain(dev_info_t * rdip,dev_info_t ** ddipp)77911600SVikram.Hegde@Sun.COM immu_devi_domain(dev_info_t *rdip, dev_info_t **ddipp)
78011600SVikram.Hegde@Sun.COM {
78111600SVikram.Hegde@Sun.COM immu_devi_t *immu_devi;
78211600SVikram.Hegde@Sun.COM domain_t *domain;
78311600SVikram.Hegde@Sun.COM dev_info_t *ddip;
78411600SVikram.Hegde@Sun.COM
78511600SVikram.Hegde@Sun.COM *ddipp = NULL;
78611600SVikram.Hegde@Sun.COM
78711600SVikram.Hegde@Sun.COM immu_devi = immu_devi_get(rdip);
78811600SVikram.Hegde@Sun.COM if (immu_devi == NULL) {
78911600SVikram.Hegde@Sun.COM return (NULL);
79011600SVikram.Hegde@Sun.COM }
79111600SVikram.Hegde@Sun.COM
79211600SVikram.Hegde@Sun.COM mutex_enter(&(DEVI(rdip)->devi_lock));
79311600SVikram.Hegde@Sun.COM domain = immu_devi->imd_domain;
79411600SVikram.Hegde@Sun.COM ddip = immu_devi->imd_ddip;
79511600SVikram.Hegde@Sun.COM mutex_exit(&(DEVI(rdip)->devi_lock));
79611600SVikram.Hegde@Sun.COM
797*13050Sfrank.van.der.linden@oracle.com if (domain)
79811600SVikram.Hegde@Sun.COM *ddipp = ddip;
79911600SVikram.Hegde@Sun.COM
80011600SVikram.Hegde@Sun.COM return (domain);
80111600SVikram.Hegde@Sun.COM
80211600SVikram.Hegde@Sun.COM }
80311600SVikram.Hegde@Sun.COM
80411600SVikram.Hegde@Sun.COM /* ############################# END IMMU_DEVI code ######################## */
80511600SVikram.Hegde@Sun.COM /* ############################# DOMAIN code ############################### */
80611600SVikram.Hegde@Sun.COM
80711600SVikram.Hegde@Sun.COM /*
80811600SVikram.Hegde@Sun.COM * This routine always succeeds
80911600SVikram.Hegde@Sun.COM */
81011600SVikram.Hegde@Sun.COM static int
did_alloc(immu_t * immu,dev_info_t * rdip,dev_info_t * ddip,immu_flags_t immu_flags)81111600SVikram.Hegde@Sun.COM did_alloc(immu_t *immu, dev_info_t *rdip,
81211600SVikram.Hegde@Sun.COM dev_info_t *ddip, immu_flags_t immu_flags)
81311600SVikram.Hegde@Sun.COM {
81411600SVikram.Hegde@Sun.COM int did;
81511600SVikram.Hegde@Sun.COM
81611600SVikram.Hegde@Sun.COM did = (uintptr_t)vmem_alloc(immu->immu_did_arena, 1,
81711600SVikram.Hegde@Sun.COM (immu_flags & IMMU_FLAGS_NOSLEEP) ? VM_NOSLEEP : VM_SLEEP);
81811600SVikram.Hegde@Sun.COM
81911600SVikram.Hegde@Sun.COM if (did == 0) {
82011600SVikram.Hegde@Sun.COM ddi_err(DER_WARN, rdip, "device domain-id alloc error"
82111600SVikram.Hegde@Sun.COM " domain-device: %s%d. immu unit is %s. Using "
82211600SVikram.Hegde@Sun.COM "unity domain with domain-id (%d)",
82311600SVikram.Hegde@Sun.COM ddi_driver_name(ddip), ddi_get_instance(ddip),
82411600SVikram.Hegde@Sun.COM immu->immu_name, immu->immu_unity_domain->dom_did);
82511600SVikram.Hegde@Sun.COM did = immu->immu_unity_domain->dom_did;
82611600SVikram.Hegde@Sun.COM }
82711600SVikram.Hegde@Sun.COM
82811600SVikram.Hegde@Sun.COM return (did);
82911600SVikram.Hegde@Sun.COM }
83011600SVikram.Hegde@Sun.COM
83111600SVikram.Hegde@Sun.COM static int
get_branch_domain(dev_info_t * pdip,void * arg)83211600SVikram.Hegde@Sun.COM get_branch_domain(dev_info_t *pdip, void *arg)
83311600SVikram.Hegde@Sun.COM {
83411600SVikram.Hegde@Sun.COM immu_devi_t *immu_devi;
83511600SVikram.Hegde@Sun.COM domain_t *domain;
83611600SVikram.Hegde@Sun.COM dev_info_t *ddip;
83711600SVikram.Hegde@Sun.COM immu_t *immu;
83811600SVikram.Hegde@Sun.COM dvma_arg_t *dvp = (dvma_arg_t *)arg;
83911600SVikram.Hegde@Sun.COM
84011600SVikram.Hegde@Sun.COM /*
84111600SVikram.Hegde@Sun.COM * The field dvp->dva_rdip is a work-in-progress
84211600SVikram.Hegde@Sun.COM * and gets updated as we walk up the ancestor
84311600SVikram.Hegde@Sun.COM * tree. The final ddip is set only when we reach
84411600SVikram.Hegde@Sun.COM * the top of the tree. So the dvp->dva_ddip field cannot
84511600SVikram.Hegde@Sun.COM * be relied on until we reach the top of the field.
84611600SVikram.Hegde@Sun.COM */
84711600SVikram.Hegde@Sun.COM
84811600SVikram.Hegde@Sun.COM /* immu_devi may not be set. */
84911600SVikram.Hegde@Sun.COM immu_devi = immu_devi_get(pdip);
85011600SVikram.Hegde@Sun.COM if (immu_devi == NULL) {
85111600SVikram.Hegde@Sun.COM if (immu_devi_set(pdip, dvp->dva_flags) != DDI_SUCCESS) {
85211600SVikram.Hegde@Sun.COM dvp->dva_error = DDI_FAILURE;
85311600SVikram.Hegde@Sun.COM return (DDI_WALK_TERMINATE);
85411600SVikram.Hegde@Sun.COM }
85511600SVikram.Hegde@Sun.COM }
85611600SVikram.Hegde@Sun.COM
85711600SVikram.Hegde@Sun.COM immu_devi = immu_devi_get(pdip);
85811600SVikram.Hegde@Sun.COM immu = immu_devi->imd_immu;
859*13050Sfrank.van.der.linden@oracle.com if (immu == NULL)
86011600SVikram.Hegde@Sun.COM immu = immu_dvma_get_immu(pdip, dvp->dva_flags);
86111600SVikram.Hegde@Sun.COM
86211600SVikram.Hegde@Sun.COM /*
86311600SVikram.Hegde@Sun.COM * If we encounter a PCIE_PCIE bridge *ANCESTOR* we need to
86411600SVikram.Hegde@Sun.COM * terminate the walk (since the device under the PCIE bridge
86511600SVikram.Hegde@Sun.COM * is a PCIE device and has an independent entry in the
86611600SVikram.Hegde@Sun.COM * root/context table)
86711600SVikram.Hegde@Sun.COM */
86811600SVikram.Hegde@Sun.COM if (dvp->dva_rdip != pdip &&
86911600SVikram.Hegde@Sun.COM immu_devi->imd_pcib_type == IMMU_PCIB_PCIE_PCIE) {
87011600SVikram.Hegde@Sun.COM return (DDI_WALK_TERMINATE);
87111600SVikram.Hegde@Sun.COM }
87211600SVikram.Hegde@Sun.COM
87311600SVikram.Hegde@Sun.COM /*
87411600SVikram.Hegde@Sun.COM * In order to be a domain-dim, it must be a PCI device i.e.
87511600SVikram.Hegde@Sun.COM * must have valid BDF. This also eliminates the root complex.
87611600SVikram.Hegde@Sun.COM */
87711600SVikram.Hegde@Sun.COM if (immu_devi->imd_pcib_type != IMMU_PCIB_BAD &&
87811600SVikram.Hegde@Sun.COM immu_devi->imd_pcib_type != IMMU_PCIB_NOBDF) {
87911600SVikram.Hegde@Sun.COM ASSERT(immu_devi->imd_bus >= 0);
88011600SVikram.Hegde@Sun.COM ASSERT(immu_devi->imd_devfunc >= 0);
88111600SVikram.Hegde@Sun.COM dvp->dva_ddip = pdip;
88211600SVikram.Hegde@Sun.COM }
88311600SVikram.Hegde@Sun.COM
88411600SVikram.Hegde@Sun.COM if (immu_devi->imd_display == B_TRUE ||
88511600SVikram.Hegde@Sun.COM (dvp->dva_flags & IMMU_FLAGS_UNITY)) {
88611600SVikram.Hegde@Sun.COM dvp->dva_domain = immu->immu_unity_domain;
88711600SVikram.Hegde@Sun.COM /* continue walking to find ddip */
88811600SVikram.Hegde@Sun.COM return (DDI_WALK_CONTINUE);
88911600SVikram.Hegde@Sun.COM }
89011600SVikram.Hegde@Sun.COM
89111600SVikram.Hegde@Sun.COM mutex_enter(&(DEVI(pdip)->devi_lock));
89211600SVikram.Hegde@Sun.COM domain = immu_devi->imd_domain;
89311600SVikram.Hegde@Sun.COM ddip = immu_devi->imd_ddip;
89411600SVikram.Hegde@Sun.COM mutex_exit(&(DEVI(pdip)->devi_lock));
89511600SVikram.Hegde@Sun.COM
89611600SVikram.Hegde@Sun.COM if (domain && ddip) {
89711600SVikram.Hegde@Sun.COM /* if domain is set, it must be the same */
89811600SVikram.Hegde@Sun.COM if (dvp->dva_domain) {
89911600SVikram.Hegde@Sun.COM ASSERT(domain == dvp->dva_domain);
90011600SVikram.Hegde@Sun.COM }
90111600SVikram.Hegde@Sun.COM dvp->dva_domain = domain;
90211600SVikram.Hegde@Sun.COM dvp->dva_ddip = ddip;
90311600SVikram.Hegde@Sun.COM return (DDI_WALK_TERMINATE);
90411600SVikram.Hegde@Sun.COM }
90511600SVikram.Hegde@Sun.COM
90611600SVikram.Hegde@Sun.COM /* Domain may already be set, continue walking so that ddip gets set */
90711600SVikram.Hegde@Sun.COM if (dvp->dva_domain) {
90811600SVikram.Hegde@Sun.COM return (DDI_WALK_CONTINUE);
90911600SVikram.Hegde@Sun.COM }
91011600SVikram.Hegde@Sun.COM
91111600SVikram.Hegde@Sun.COM /* domain is not set in either immu_devi or dvp */
91211600SVikram.Hegde@Sun.COM domain = bdf_domain_lookup(immu_devi);
91311600SVikram.Hegde@Sun.COM if (domain == NULL) {
91411600SVikram.Hegde@Sun.COM return (DDI_WALK_CONTINUE);
91511600SVikram.Hegde@Sun.COM }
91611600SVikram.Hegde@Sun.COM
91711600SVikram.Hegde@Sun.COM /* ok, the BDF hash had a domain for this BDF. */
91811600SVikram.Hegde@Sun.COM
91911600SVikram.Hegde@Sun.COM /* Grab lock again to check if something else set immu_devi fields */
92011600SVikram.Hegde@Sun.COM mutex_enter(&(DEVI(pdip)->devi_lock));
92111600SVikram.Hegde@Sun.COM if (immu_devi->imd_domain != NULL) {
92211600SVikram.Hegde@Sun.COM dvp->dva_domain = domain;
92311600SVikram.Hegde@Sun.COM } else {
92411600SVikram.Hegde@Sun.COM dvp->dva_domain = domain;
92511600SVikram.Hegde@Sun.COM }
92611600SVikram.Hegde@Sun.COM mutex_exit(&(DEVI(pdip)->devi_lock));
92711600SVikram.Hegde@Sun.COM
92811600SVikram.Hegde@Sun.COM /*
92911600SVikram.Hegde@Sun.COM * walk upwards until the topmost PCI bridge is found
93011600SVikram.Hegde@Sun.COM */
93111600SVikram.Hegde@Sun.COM return (DDI_WALK_CONTINUE);
93211658SVikram.Hegde@Sun.COM
93311600SVikram.Hegde@Sun.COM }
93411600SVikram.Hegde@Sun.COM
93511600SVikram.Hegde@Sun.COM static void
map_unity_domain(domain_t * domain)93611600SVikram.Hegde@Sun.COM map_unity_domain(domain_t *domain)
93711600SVikram.Hegde@Sun.COM {
93811600SVikram.Hegde@Sun.COM struct memlist *mp;
93911600SVikram.Hegde@Sun.COM uint64_t start;
94011600SVikram.Hegde@Sun.COM uint64_t npages;
941*13050Sfrank.van.der.linden@oracle.com immu_dcookie_t dcookies[1] = {0};
94211658SVikram.Hegde@Sun.COM int dcount = 0;
94311600SVikram.Hegde@Sun.COM
94411600SVikram.Hegde@Sun.COM /*
94511600SVikram.Hegde@Sun.COM * UNITY arenas are a mirror of the physical memory
94611600SVikram.Hegde@Sun.COM * installed on the system.
94711600SVikram.Hegde@Sun.COM */
94811600SVikram.Hegde@Sun.COM
94911600SVikram.Hegde@Sun.COM #ifdef BUGGY_DRIVERS
95011600SVikram.Hegde@Sun.COM /*
95111600SVikram.Hegde@Sun.COM * Dont skip page0. Some broken HW/FW access it.
95211600SVikram.Hegde@Sun.COM */
95311658SVikram.Hegde@Sun.COM dcookies[0].dck_paddr = 0;
95411658SVikram.Hegde@Sun.COM dcookies[0].dck_npages = 1;
95511658SVikram.Hegde@Sun.COM dcount = 1;
956*13050Sfrank.van.der.linden@oracle.com (void) dvma_map(domain, 0, 1, dcookies, dcount, NULL,
95711600SVikram.Hegde@Sun.COM IMMU_FLAGS_READ | IMMU_FLAGS_WRITE | IMMU_FLAGS_PAGE1);
95811600SVikram.Hegde@Sun.COM #endif
95911600SVikram.Hegde@Sun.COM
96011600SVikram.Hegde@Sun.COM memlist_read_lock();
96111600SVikram.Hegde@Sun.COM
96211600SVikram.Hegde@Sun.COM mp = phys_install;
96311600SVikram.Hegde@Sun.COM
96411600SVikram.Hegde@Sun.COM if (mp->ml_address == 0) {
96511600SVikram.Hegde@Sun.COM /* since we already mapped page1 above */
96611600SVikram.Hegde@Sun.COM start = IMMU_PAGESIZE;
96711600SVikram.Hegde@Sun.COM } else {
96811600SVikram.Hegde@Sun.COM start = mp->ml_address;
96911600SVikram.Hegde@Sun.COM }
97011600SVikram.Hegde@Sun.COM npages = mp->ml_size/IMMU_PAGESIZE + 1;
97111600SVikram.Hegde@Sun.COM
97211658SVikram.Hegde@Sun.COM dcookies[0].dck_paddr = start;
97311658SVikram.Hegde@Sun.COM dcookies[0].dck_npages = npages;
97411658SVikram.Hegde@Sun.COM dcount = 1;
975*13050Sfrank.van.der.linden@oracle.com (void) dvma_map(domain, start, npages, dcookies,
97611658SVikram.Hegde@Sun.COM dcount, NULL, IMMU_FLAGS_READ | IMMU_FLAGS_WRITE);
97711600SVikram.Hegde@Sun.COM
978*13050Sfrank.van.der.linden@oracle.com ddi_err(DER_LOG, domain->dom_dip, "iommu: mapping PHYS span [0x%" PRIx64
97911600SVikram.Hegde@Sun.COM " - 0x%" PRIx64 "]", start, start + mp->ml_size);
98011600SVikram.Hegde@Sun.COM
98111600SVikram.Hegde@Sun.COM mp = mp->ml_next;
98211600SVikram.Hegde@Sun.COM while (mp) {
983*13050Sfrank.van.der.linden@oracle.com ddi_err(DER_LOG, domain->dom_dip,
984*13050Sfrank.van.der.linden@oracle.com "iommu: mapping PHYS span [0x%" PRIx64 " - 0x%" PRIx64 "]",
985*13050Sfrank.van.der.linden@oracle.com mp->ml_address, mp->ml_address + mp->ml_size);
98611600SVikram.Hegde@Sun.COM
98711600SVikram.Hegde@Sun.COM start = mp->ml_address;
98811600SVikram.Hegde@Sun.COM npages = mp->ml_size/IMMU_PAGESIZE + 1;
98911600SVikram.Hegde@Sun.COM
99011658SVikram.Hegde@Sun.COM dcookies[0].dck_paddr = start;
99111658SVikram.Hegde@Sun.COM dcookies[0].dck_npages = npages;
99211658SVikram.Hegde@Sun.COM dcount = 1;
993*13050Sfrank.van.der.linden@oracle.com (void) dvma_map(domain, start, npages,
99411658SVikram.Hegde@Sun.COM dcookies, dcount, NULL, IMMU_FLAGS_READ | IMMU_FLAGS_WRITE);
99511600SVikram.Hegde@Sun.COM mp = mp->ml_next;
99611600SVikram.Hegde@Sun.COM }
99711600SVikram.Hegde@Sun.COM
99811600SVikram.Hegde@Sun.COM mp = bios_rsvd;
99911600SVikram.Hegde@Sun.COM while (mp) {
1000*13050Sfrank.van.der.linden@oracle.com ddi_err(DER_LOG, domain->dom_dip,
1001*13050Sfrank.van.der.linden@oracle.com "iommu: mapping PHYS span [0x%" PRIx64 " - 0x%" PRIx64 "]",
1002*13050Sfrank.van.der.linden@oracle.com mp->ml_address, mp->ml_address + mp->ml_size);
100311600SVikram.Hegde@Sun.COM
100411600SVikram.Hegde@Sun.COM start = mp->ml_address;
100511600SVikram.Hegde@Sun.COM npages = mp->ml_size/IMMU_PAGESIZE + 1;
100611600SVikram.Hegde@Sun.COM
100711658SVikram.Hegde@Sun.COM dcookies[0].dck_paddr = start;
100811658SVikram.Hegde@Sun.COM dcookies[0].dck_npages = npages;
100911658SVikram.Hegde@Sun.COM dcount = 1;
1010*13050Sfrank.van.der.linden@oracle.com (void) dvma_map(domain, start, npages,
101111658SVikram.Hegde@Sun.COM dcookies, dcount, NULL, IMMU_FLAGS_READ | IMMU_FLAGS_WRITE);
101211600SVikram.Hegde@Sun.COM
101311600SVikram.Hegde@Sun.COM mp = mp->ml_next;
101411600SVikram.Hegde@Sun.COM }
101511600SVikram.Hegde@Sun.COM
101611600SVikram.Hegde@Sun.COM memlist_read_unlock();
101711600SVikram.Hegde@Sun.COM }
101811600SVikram.Hegde@Sun.COM
101911600SVikram.Hegde@Sun.COM /*
102011600SVikram.Hegde@Sun.COM * create_xlate_arena()
102111600SVikram.Hegde@Sun.COM * Create the dvma arena for a domain with translation
102211600SVikram.Hegde@Sun.COM * mapping
102311600SVikram.Hegde@Sun.COM */
102411600SVikram.Hegde@Sun.COM static void
create_xlate_arena(immu_t * immu,domain_t * domain,dev_info_t * rdip,immu_flags_t immu_flags)102511600SVikram.Hegde@Sun.COM create_xlate_arena(immu_t *immu, domain_t *domain,
102611600SVikram.Hegde@Sun.COM dev_info_t *rdip, immu_flags_t immu_flags)
102711600SVikram.Hegde@Sun.COM {
102811600SVikram.Hegde@Sun.COM char *arena_name;
102911600SVikram.Hegde@Sun.COM struct memlist *mp;
103011600SVikram.Hegde@Sun.COM int vmem_flags;
103111600SVikram.Hegde@Sun.COM uint64_t start;
103211600SVikram.Hegde@Sun.COM uint_t mgaw;
103311600SVikram.Hegde@Sun.COM uint64_t size;
103411600SVikram.Hegde@Sun.COM uint64_t maxaddr;
103511600SVikram.Hegde@Sun.COM void *vmem_ret;
103611600SVikram.Hegde@Sun.COM
103711600SVikram.Hegde@Sun.COM arena_name = domain->dom_dvma_arena_name;
103811600SVikram.Hegde@Sun.COM
103911600SVikram.Hegde@Sun.COM /* Note, don't do sizeof (arena_name) - it is just a pointer */
104011600SVikram.Hegde@Sun.COM (void) snprintf(arena_name,
104111600SVikram.Hegde@Sun.COM sizeof (domain->dom_dvma_arena_name),
104211600SVikram.Hegde@Sun.COM "%s-domain-%d-xlate-DVMA-arena", immu->immu_name,
104311600SVikram.Hegde@Sun.COM domain->dom_did);
104411600SVikram.Hegde@Sun.COM
104511600SVikram.Hegde@Sun.COM vmem_flags = (immu_flags & IMMU_FLAGS_NOSLEEP) ? VM_NOSLEEP : VM_SLEEP;
104611600SVikram.Hegde@Sun.COM
104711600SVikram.Hegde@Sun.COM /* Restrict mgaddr (max guest addr) to MGAW */
104811600SVikram.Hegde@Sun.COM mgaw = IMMU_CAP_MGAW(immu->immu_regs_cap);
104911600SVikram.Hegde@Sun.COM
105011600SVikram.Hegde@Sun.COM /*
105111600SVikram.Hegde@Sun.COM * To ensure we avoid ioapic and PCI MMIO ranges we just
105211600SVikram.Hegde@Sun.COM * use the physical memory address range of the system as the
105311600SVikram.Hegde@Sun.COM * range
105411600SVikram.Hegde@Sun.COM */
105511600SVikram.Hegde@Sun.COM maxaddr = ((uint64_t)1 << mgaw);
105611600SVikram.Hegde@Sun.COM
105711658SVikram.Hegde@Sun.COM memlist_read_lock();
105811658SVikram.Hegde@Sun.COM
105911658SVikram.Hegde@Sun.COM mp = phys_install;
106011658SVikram.Hegde@Sun.COM
106111658SVikram.Hegde@Sun.COM if (mp->ml_address == 0)
106211600SVikram.Hegde@Sun.COM start = MMU_PAGESIZE;
106311658SVikram.Hegde@Sun.COM else
106411658SVikram.Hegde@Sun.COM start = mp->ml_address;
106511658SVikram.Hegde@Sun.COM
106611658SVikram.Hegde@Sun.COM if (start + mp->ml_size > maxaddr)
106711600SVikram.Hegde@Sun.COM size = maxaddr - start;
106811658SVikram.Hegde@Sun.COM else
106911658SVikram.Hegde@Sun.COM size = mp->ml_size;
107011658SVikram.Hegde@Sun.COM
107111658SVikram.Hegde@Sun.COM ddi_err(DER_VERB, rdip,
1072*13050Sfrank.van.der.linden@oracle.com "iommu: %s: Creating dvma vmem arena [0x%" PRIx64
107311658SVikram.Hegde@Sun.COM " - 0x%" PRIx64 "]", arena_name, start, start + size);
107411658SVikram.Hegde@Sun.COM
107511658SVikram.Hegde@Sun.COM /*
107611658SVikram.Hegde@Sun.COM * We always allocate in quanta of IMMU_PAGESIZE
107711658SVikram.Hegde@Sun.COM */
107811658SVikram.Hegde@Sun.COM domain->dom_dvma_arena = vmem_create(arena_name,
107911658SVikram.Hegde@Sun.COM (void *)(uintptr_t)start, /* start addr */
108011658SVikram.Hegde@Sun.COM size, /* size */
108111658SVikram.Hegde@Sun.COM IMMU_PAGESIZE, /* quantum */
108211658SVikram.Hegde@Sun.COM NULL, /* afunc */
108311658SVikram.Hegde@Sun.COM NULL, /* ffunc */
108411658SVikram.Hegde@Sun.COM NULL, /* source */
108511658SVikram.Hegde@Sun.COM 0, /* qcache_max */
108611658SVikram.Hegde@Sun.COM vmem_flags);
108711658SVikram.Hegde@Sun.COM
108811658SVikram.Hegde@Sun.COM if (domain->dom_dvma_arena == NULL) {
108911658SVikram.Hegde@Sun.COM ddi_err(DER_PANIC, rdip,
109011658SVikram.Hegde@Sun.COM "Failed to allocate DVMA arena(%s) "
109111658SVikram.Hegde@Sun.COM "for domain ID (%d)", arena_name, domain->dom_did);
109211658SVikram.Hegde@Sun.COM /*NOTREACHED*/
109311658SVikram.Hegde@Sun.COM }
109411658SVikram.Hegde@Sun.COM
109511658SVikram.Hegde@Sun.COM mp = mp->ml_next;
109611658SVikram.Hegde@Sun.COM while (mp) {
109711600SVikram.Hegde@Sun.COM
109811600SVikram.Hegde@Sun.COM if (mp->ml_address == 0)
109911600SVikram.Hegde@Sun.COM start = MMU_PAGESIZE;
110011600SVikram.Hegde@Sun.COM else
110111600SVikram.Hegde@Sun.COM start = mp->ml_address;
110211600SVikram.Hegde@Sun.COM
110311600SVikram.Hegde@Sun.COM if (start + mp->ml_size > maxaddr)
110411600SVikram.Hegde@Sun.COM size = maxaddr - start;
110511600SVikram.Hegde@Sun.COM else
110611600SVikram.Hegde@Sun.COM size = mp->ml_size;
110711600SVikram.Hegde@Sun.COM
110811600SVikram.Hegde@Sun.COM ddi_err(DER_VERB, rdip,
1109*13050Sfrank.van.der.linden@oracle.com "iommu: %s: Adding dvma vmem span [0x%" PRIx64
111011658SVikram.Hegde@Sun.COM " - 0x%" PRIx64 "]", arena_name, start,
111111658SVikram.Hegde@Sun.COM start + size);
111211658SVikram.Hegde@Sun.COM
111311658SVikram.Hegde@Sun.COM vmem_ret = vmem_add(domain->dom_dvma_arena,
111411658SVikram.Hegde@Sun.COM (void *)(uintptr_t)start, size, vmem_flags);
111511658SVikram.Hegde@Sun.COM
111611658SVikram.Hegde@Sun.COM if (vmem_ret == NULL) {
111711600SVikram.Hegde@Sun.COM ddi_err(DER_PANIC, rdip,
111811600SVikram.Hegde@Sun.COM "Failed to allocate DVMA arena(%s) "
111911658SVikram.Hegde@Sun.COM "for domain ID (%d)",
112011658SVikram.Hegde@Sun.COM arena_name, domain->dom_did);
112111600SVikram.Hegde@Sun.COM /*NOTREACHED*/
112211600SVikram.Hegde@Sun.COM }
112311600SVikram.Hegde@Sun.COM mp = mp->ml_next;
112411600SVikram.Hegde@Sun.COM }
112511658SVikram.Hegde@Sun.COM memlist_read_unlock();
112611600SVikram.Hegde@Sun.COM }
112711600SVikram.Hegde@Sun.COM
112811600SVikram.Hegde@Sun.COM /* ################################### DOMAIN CODE ######################### */
112911600SVikram.Hegde@Sun.COM
113011600SVikram.Hegde@Sun.COM /*
113111600SVikram.Hegde@Sun.COM * Set the domain and domain-dip for a dip
113211600SVikram.Hegde@Sun.COM */
113311600SVikram.Hegde@Sun.COM static void
set_domain(dev_info_t * dip,dev_info_t * ddip,domain_t * domain)113411600SVikram.Hegde@Sun.COM set_domain(
113511600SVikram.Hegde@Sun.COM dev_info_t *dip,
113611600SVikram.Hegde@Sun.COM dev_info_t *ddip,
113711600SVikram.Hegde@Sun.COM domain_t *domain)
113811600SVikram.Hegde@Sun.COM {
113911600SVikram.Hegde@Sun.COM immu_devi_t *immu_devi;
114011600SVikram.Hegde@Sun.COM domain_t *fdomain;
114111600SVikram.Hegde@Sun.COM dev_info_t *fddip;
114211600SVikram.Hegde@Sun.COM
114311600SVikram.Hegde@Sun.COM immu_devi = immu_devi_get(dip);
114411600SVikram.Hegde@Sun.COM
114511600SVikram.Hegde@Sun.COM mutex_enter(&(DEVI(dip)->devi_lock));
114611600SVikram.Hegde@Sun.COM fddip = immu_devi->imd_ddip;
114711600SVikram.Hegde@Sun.COM fdomain = immu_devi->imd_domain;
114811600SVikram.Hegde@Sun.COM
114911600SVikram.Hegde@Sun.COM if (fddip) {
115011600SVikram.Hegde@Sun.COM ASSERT(fddip == ddip);
115111600SVikram.Hegde@Sun.COM } else {
115211600SVikram.Hegde@Sun.COM immu_devi->imd_ddip = ddip;
115311600SVikram.Hegde@Sun.COM }
115411600SVikram.Hegde@Sun.COM
115511600SVikram.Hegde@Sun.COM if (fdomain) {
115611600SVikram.Hegde@Sun.COM ASSERT(fdomain == domain);
115711600SVikram.Hegde@Sun.COM } else {
115811600SVikram.Hegde@Sun.COM immu_devi->imd_domain = domain;
115911600SVikram.Hegde@Sun.COM }
116011600SVikram.Hegde@Sun.COM mutex_exit(&(DEVI(dip)->devi_lock));
116111600SVikram.Hegde@Sun.COM }
116211600SVikram.Hegde@Sun.COM
116311600SVikram.Hegde@Sun.COM /*
116411600SVikram.Hegde@Sun.COM * device_domain()
116511600SVikram.Hegde@Sun.COM * Get domain for a device. The domain may be global in which case it
116611600SVikram.Hegde@Sun.COM * is shared between all IOMMU units. Due to potential AGAW differences
116711600SVikram.Hegde@Sun.COM * between IOMMU units, such global domains *have to be* UNITY mapping
116811600SVikram.Hegde@Sun.COM * domains. Alternatively, the domain may be local to a IOMMU unit.
116911600SVikram.Hegde@Sun.COM * Local domains may be shared or immu_devi, although the
117011600SVikram.Hegde@Sun.COM * scope of sharing
117111600SVikram.Hegde@Sun.COM * is restricted to devices controlled by the IOMMU unit to
117211600SVikram.Hegde@Sun.COM * which the domain
117311600SVikram.Hegde@Sun.COM * belongs. If shared, they (currently) have to be UNITY domains. If
117411600SVikram.Hegde@Sun.COM * immu_devi a domain may be either UNITY or translation (XLATE) domain.
117511600SVikram.Hegde@Sun.COM */
117611600SVikram.Hegde@Sun.COM static domain_t *
device_domain(dev_info_t * rdip,dev_info_t ** ddipp,immu_flags_t immu_flags)117711600SVikram.Hegde@Sun.COM device_domain(dev_info_t *rdip, dev_info_t **ddipp, immu_flags_t immu_flags)
117811600SVikram.Hegde@Sun.COM {
117911600SVikram.Hegde@Sun.COM dev_info_t *ddip; /* topmost dip in domain i.e. domain owner */
118011600SVikram.Hegde@Sun.COM immu_t *immu;
118111600SVikram.Hegde@Sun.COM domain_t *domain;
118211600SVikram.Hegde@Sun.COM dvma_arg_t dvarg = {0};
118311600SVikram.Hegde@Sun.COM int level;
118411600SVikram.Hegde@Sun.COM
118511600SVikram.Hegde@Sun.COM *ddipp = NULL;
118611600SVikram.Hegde@Sun.COM
118711600SVikram.Hegde@Sun.COM /*
118811600SVikram.Hegde@Sun.COM * Check if the domain is already set. This is usually true
118911600SVikram.Hegde@Sun.COM * if this is not the first DVMA transaction.
119011600SVikram.Hegde@Sun.COM */
119111600SVikram.Hegde@Sun.COM ddip = NULL;
119211600SVikram.Hegde@Sun.COM domain = immu_devi_domain(rdip, &ddip);
119311600SVikram.Hegde@Sun.COM if (domain) {
119411600SVikram.Hegde@Sun.COM *ddipp = ddip;
119511600SVikram.Hegde@Sun.COM return (domain);
119611600SVikram.Hegde@Sun.COM }
119711600SVikram.Hegde@Sun.COM
119811600SVikram.Hegde@Sun.COM immu = immu_dvma_get_immu(rdip, immu_flags);
119911600SVikram.Hegde@Sun.COM if (immu == NULL) {
120011600SVikram.Hegde@Sun.COM /*
120111600SVikram.Hegde@Sun.COM * possible that there is no IOMMU unit for this device
120211600SVikram.Hegde@Sun.COM * - BIOS bugs are one example.
120311600SVikram.Hegde@Sun.COM */
1204*13050Sfrank.van.der.linden@oracle.com ddi_err(DER_WARN, rdip, "No iommu unit found for device");
120511600SVikram.Hegde@Sun.COM return (NULL);
120611600SVikram.Hegde@Sun.COM }
120711600SVikram.Hegde@Sun.COM
120812465Sfrank.van.der.linden@oracle.com immu_flags |= immu_devi_get(rdip)->imd_dvma_flags;
120912465Sfrank.van.der.linden@oracle.com
121011658SVikram.Hegde@Sun.COM dvarg.dva_rdip = rdip;
121111600SVikram.Hegde@Sun.COM dvarg.dva_ddip = NULL;
121211600SVikram.Hegde@Sun.COM dvarg.dva_domain = NULL;
121311600SVikram.Hegde@Sun.COM dvarg.dva_flags = immu_flags;
121411600SVikram.Hegde@Sun.COM level = 0;
121511658SVikram.Hegde@Sun.COM if (immu_walk_ancestor(rdip, NULL, get_branch_domain,
121611600SVikram.Hegde@Sun.COM &dvarg, &level, immu_flags) != DDI_SUCCESS) {
121711600SVikram.Hegde@Sun.COM /*
121811600SVikram.Hegde@Sun.COM * maybe low memory. return error,
121911600SVikram.Hegde@Sun.COM * so driver tries again later
122011600SVikram.Hegde@Sun.COM */
122111600SVikram.Hegde@Sun.COM return (NULL);
122211600SVikram.Hegde@Sun.COM }
122311600SVikram.Hegde@Sun.COM
122411600SVikram.Hegde@Sun.COM /* should have walked at least 1 dip (i.e. edip) */
122511600SVikram.Hegde@Sun.COM ASSERT(level > 0);
122611600SVikram.Hegde@Sun.COM
122711600SVikram.Hegde@Sun.COM ddip = dvarg.dva_ddip; /* must be present */
122811600SVikram.Hegde@Sun.COM domain = dvarg.dva_domain; /* may be NULL */
122911600SVikram.Hegde@Sun.COM
123011600SVikram.Hegde@Sun.COM /*
123111600SVikram.Hegde@Sun.COM * We may find the domain during our ancestor walk on any one of our
123211600SVikram.Hegde@Sun.COM * ancestor dips, If the domain is found then the domain-dip
123311600SVikram.Hegde@Sun.COM * (i.e. ddip) will also be found in the same immu_devi struct.
123411600SVikram.Hegde@Sun.COM * The domain-dip is the highest ancestor dip which shares the
123511600SVikram.Hegde@Sun.COM * same domain with edip.
123611600SVikram.Hegde@Sun.COM * The domain may or may not be found, but the domain dip must
123711600SVikram.Hegde@Sun.COM * be found.
123811600SVikram.Hegde@Sun.COM */
123911600SVikram.Hegde@Sun.COM if (ddip == NULL) {
124011658SVikram.Hegde@Sun.COM ddi_err(DER_MODE, rdip, "Cannot find domain dip for device.");
124111600SVikram.Hegde@Sun.COM return (NULL);
124211600SVikram.Hegde@Sun.COM }
124311600SVikram.Hegde@Sun.COM
124411600SVikram.Hegde@Sun.COM /*
124511600SVikram.Hegde@Sun.COM * Did we find a domain ?
124611600SVikram.Hegde@Sun.COM */
124711600SVikram.Hegde@Sun.COM if (domain) {
124811600SVikram.Hegde@Sun.COM goto found;
124911600SVikram.Hegde@Sun.COM }
125011600SVikram.Hegde@Sun.COM
125111600SVikram.Hegde@Sun.COM /* nope, so allocate */
125211600SVikram.Hegde@Sun.COM domain = domain_create(immu, ddip, rdip, immu_flags);
125311600SVikram.Hegde@Sun.COM if (domain == NULL) {
125411600SVikram.Hegde@Sun.COM return (NULL);
125511600SVikram.Hegde@Sun.COM }
125611600SVikram.Hegde@Sun.COM
125711600SVikram.Hegde@Sun.COM /*FALLTHROUGH*/
125811600SVikram.Hegde@Sun.COM found:
125911600SVikram.Hegde@Sun.COM /*
126011600SVikram.Hegde@Sun.COM * We know *domain *is* the right domain, so panic if
126111600SVikram.Hegde@Sun.COM * another domain is set for either the request-dip or
126211600SVikram.Hegde@Sun.COM * effective dip.
126311600SVikram.Hegde@Sun.COM */
126411600SVikram.Hegde@Sun.COM set_domain(ddip, ddip, domain);
126511600SVikram.Hegde@Sun.COM set_domain(rdip, ddip, domain);
126611600SVikram.Hegde@Sun.COM
126711600SVikram.Hegde@Sun.COM *ddipp = ddip;
126811600SVikram.Hegde@Sun.COM return (domain);
126911600SVikram.Hegde@Sun.COM }
127011600SVikram.Hegde@Sun.COM
127111600SVikram.Hegde@Sun.COM static void
create_unity_domain(immu_t * immu)127211600SVikram.Hegde@Sun.COM create_unity_domain(immu_t *immu)
127311600SVikram.Hegde@Sun.COM {
127411600SVikram.Hegde@Sun.COM domain_t *domain;
127511600SVikram.Hegde@Sun.COM
127611600SVikram.Hegde@Sun.COM /* domain created during boot and always use sleep flag */
127711600SVikram.Hegde@Sun.COM domain = kmem_zalloc(sizeof (domain_t), KM_SLEEP);
127811600SVikram.Hegde@Sun.COM
127911600SVikram.Hegde@Sun.COM rw_init(&(domain->dom_pgtable_rwlock), NULL, RW_DEFAULT, NULL);
128011600SVikram.Hegde@Sun.COM
128111600SVikram.Hegde@Sun.COM domain->dom_did = IMMU_UNITY_DID;
128211600SVikram.Hegde@Sun.COM domain->dom_maptype = IMMU_MAPTYPE_UNITY;
128311600SVikram.Hegde@Sun.COM
128411600SVikram.Hegde@Sun.COM domain->dom_immu = immu;
128511600SVikram.Hegde@Sun.COM immu->immu_unity_domain = domain;
128611600SVikram.Hegde@Sun.COM
128711600SVikram.Hegde@Sun.COM /*
128811600SVikram.Hegde@Sun.COM * Setup the domain's initial page table
128911600SVikram.Hegde@Sun.COM * should never fail.
129011600SVikram.Hegde@Sun.COM */
129111658SVikram.Hegde@Sun.COM domain->dom_pgtable_root = pgtable_alloc(immu, IMMU_FLAGS_SLEEP);
1292*13050Sfrank.van.der.linden@oracle.com pgtable_zero(domain->dom_pgtable_root);
129311600SVikram.Hegde@Sun.COM
129412990Sfrank.van.der.linden@oracle.com /*
129512990Sfrank.van.der.linden@oracle.com * Only map all physical memory in to the unity domain
129612990Sfrank.van.der.linden@oracle.com * if passthrough is not supported. If it is supported,
129712990Sfrank.van.der.linden@oracle.com * passthrough is set in the context entry instead.
129812990Sfrank.van.der.linden@oracle.com */
129912990Sfrank.van.der.linden@oracle.com if (!IMMU_ECAP_GET_PT(immu->immu_regs_excap))
130012990Sfrank.van.der.linden@oracle.com map_unity_domain(domain);
130112990Sfrank.van.der.linden@oracle.com
130211600SVikram.Hegde@Sun.COM
130311600SVikram.Hegde@Sun.COM /*
130411600SVikram.Hegde@Sun.COM * put it on the system-wide UNITY domain list
130511600SVikram.Hegde@Sun.COM */
130611600SVikram.Hegde@Sun.COM mutex_enter(&(immu_domain_lock));
130711600SVikram.Hegde@Sun.COM list_insert_tail(&immu_unity_domain_list, domain);
130811600SVikram.Hegde@Sun.COM mutex_exit(&(immu_domain_lock));
130911600SVikram.Hegde@Sun.COM }
131011600SVikram.Hegde@Sun.COM
131111600SVikram.Hegde@Sun.COM /*
131211600SVikram.Hegde@Sun.COM * ddip is the domain-dip - the topmost dip in a domain
131311600SVikram.Hegde@Sun.COM * rdip is the requesting-dip - the device which is
131411600SVikram.Hegde@Sun.COM * requesting DVMA setup
131511600SVikram.Hegde@Sun.COM * if domain is a non-shared domain rdip == ddip
131611600SVikram.Hegde@Sun.COM */
131711600SVikram.Hegde@Sun.COM static domain_t *
domain_create(immu_t * immu,dev_info_t * ddip,dev_info_t * rdip,immu_flags_t immu_flags)131811600SVikram.Hegde@Sun.COM domain_create(immu_t *immu, dev_info_t *ddip, dev_info_t *rdip,
131911600SVikram.Hegde@Sun.COM immu_flags_t immu_flags)
132011600SVikram.Hegde@Sun.COM {
132111600SVikram.Hegde@Sun.COM int kmflags;
132211600SVikram.Hegde@Sun.COM domain_t *domain;
132311600SVikram.Hegde@Sun.COM char mod_hash_name[128];
132411600SVikram.Hegde@Sun.COM immu_devi_t *immu_devi;
132511600SVikram.Hegde@Sun.COM int did;
1326*13050Sfrank.van.der.linden@oracle.com immu_dcookie_t dcookies[1] = {0};
132711658SVikram.Hegde@Sun.COM int dcount = 0;
132811600SVikram.Hegde@Sun.COM
132911600SVikram.Hegde@Sun.COM immu_devi = immu_devi_get(rdip);
133011600SVikram.Hegde@Sun.COM
133111600SVikram.Hegde@Sun.COM /*
133211600SVikram.Hegde@Sun.COM * First allocate a domainid.
133311600SVikram.Hegde@Sun.COM * This routine will never fail, since if we run out
133411600SVikram.Hegde@Sun.COM * of domains the unity domain will be allocated.
133511600SVikram.Hegde@Sun.COM */
133611600SVikram.Hegde@Sun.COM did = did_alloc(immu, rdip, ddip, immu_flags);
133711600SVikram.Hegde@Sun.COM if (did == IMMU_UNITY_DID) {
133811600SVikram.Hegde@Sun.COM /* domain overflow */
133911600SVikram.Hegde@Sun.COM ASSERT(immu->immu_unity_domain);
134011600SVikram.Hegde@Sun.COM return (immu->immu_unity_domain);
134111600SVikram.Hegde@Sun.COM }
134211600SVikram.Hegde@Sun.COM
134311600SVikram.Hegde@Sun.COM kmflags = (immu_flags & IMMU_FLAGS_NOSLEEP) ? KM_NOSLEEP : KM_SLEEP;
134411600SVikram.Hegde@Sun.COM domain = kmem_zalloc(sizeof (domain_t), kmflags);
134511600SVikram.Hegde@Sun.COM if (domain == NULL) {
134611600SVikram.Hegde@Sun.COM ddi_err(DER_PANIC, rdip, "Failed to alloc DVMA domain "
134711600SVikram.Hegde@Sun.COM "structure for device. IOMMU unit: %s", immu->immu_name);
134811600SVikram.Hegde@Sun.COM /*NOTREACHED*/
134911600SVikram.Hegde@Sun.COM }
135011600SVikram.Hegde@Sun.COM
135111600SVikram.Hegde@Sun.COM rw_init(&(domain->dom_pgtable_rwlock), NULL, RW_DEFAULT, NULL);
135211600SVikram.Hegde@Sun.COM
135311600SVikram.Hegde@Sun.COM (void) snprintf(mod_hash_name, sizeof (mod_hash_name),
135411600SVikram.Hegde@Sun.COM "immu%s-domain%d-pava-hash", immu->immu_name, did);
135511600SVikram.Hegde@Sun.COM
135611600SVikram.Hegde@Sun.COM domain->dom_did = did;
135711600SVikram.Hegde@Sun.COM domain->dom_immu = immu;
135811600SVikram.Hegde@Sun.COM domain->dom_maptype = IMMU_MAPTYPE_XLATE;
1359*13050Sfrank.van.der.linden@oracle.com domain->dom_dip = ddip;
136011600SVikram.Hegde@Sun.COM
136111600SVikram.Hegde@Sun.COM /*
136211600SVikram.Hegde@Sun.COM * Create xlate DVMA arena for this domain.
136311600SVikram.Hegde@Sun.COM */
136411600SVikram.Hegde@Sun.COM create_xlate_arena(immu, domain, rdip, immu_flags);
136511600SVikram.Hegde@Sun.COM
136611600SVikram.Hegde@Sun.COM /*
136711600SVikram.Hegde@Sun.COM * Setup the domain's initial page table
136811600SVikram.Hegde@Sun.COM */
136911658SVikram.Hegde@Sun.COM domain->dom_pgtable_root = pgtable_alloc(immu, immu_flags);
137011600SVikram.Hegde@Sun.COM if (domain->dom_pgtable_root == NULL) {
137111600SVikram.Hegde@Sun.COM ddi_err(DER_PANIC, rdip, "Failed to alloc root "
137211600SVikram.Hegde@Sun.COM "pgtable for domain (%d). IOMMU unit: %s",
137311600SVikram.Hegde@Sun.COM domain->dom_did, immu->immu_name);
137411600SVikram.Hegde@Sun.COM /*NOTREACHED*/
137511600SVikram.Hegde@Sun.COM }
1376*13050Sfrank.van.der.linden@oracle.com pgtable_zero(domain->dom_pgtable_root);
137711600SVikram.Hegde@Sun.COM
137811600SVikram.Hegde@Sun.COM /*
137911600SVikram.Hegde@Sun.COM * Since this is a immu unit-specific domain, put it on
138011600SVikram.Hegde@Sun.COM * the per-immu domain list.
138111600SVikram.Hegde@Sun.COM */
138211600SVikram.Hegde@Sun.COM mutex_enter(&(immu->immu_lock));
138311600SVikram.Hegde@Sun.COM list_insert_head(&immu->immu_domain_list, domain);
138411600SVikram.Hegde@Sun.COM mutex_exit(&(immu->immu_lock));
138511600SVikram.Hegde@Sun.COM
138611600SVikram.Hegde@Sun.COM /*
138711600SVikram.Hegde@Sun.COM * Also put it on the system-wide xlate domain list
138811600SVikram.Hegde@Sun.COM */
138911600SVikram.Hegde@Sun.COM mutex_enter(&(immu_domain_lock));
139011600SVikram.Hegde@Sun.COM list_insert_head(&immu_xlate_domain_list, domain);
139111600SVikram.Hegde@Sun.COM mutex_exit(&(immu_domain_lock));
139211600SVikram.Hegde@Sun.COM
139311600SVikram.Hegde@Sun.COM bdf_domain_insert(immu_devi, domain);
139411600SVikram.Hegde@Sun.COM
139511600SVikram.Hegde@Sun.COM #ifdef BUGGY_DRIVERS
139611600SVikram.Hegde@Sun.COM /*
139711600SVikram.Hegde@Sun.COM * Map page0. Some broken HW/FW access it.
139811600SVikram.Hegde@Sun.COM */
139911658SVikram.Hegde@Sun.COM dcookies[0].dck_paddr = 0;
140011658SVikram.Hegde@Sun.COM dcookies[0].dck_npages = 1;
140111658SVikram.Hegde@Sun.COM dcount = 1;
1402*13050Sfrank.van.der.linden@oracle.com (void) dvma_map(domain, 0, 1, dcookies, dcount, NULL,
140311600SVikram.Hegde@Sun.COM IMMU_FLAGS_READ | IMMU_FLAGS_WRITE | IMMU_FLAGS_PAGE1);
140411600SVikram.Hegde@Sun.COM #endif
140511600SVikram.Hegde@Sun.COM return (domain);
140611600SVikram.Hegde@Sun.COM }
140711600SVikram.Hegde@Sun.COM
140811600SVikram.Hegde@Sun.COM /*
140911600SVikram.Hegde@Sun.COM * Create domainid arena.
141011600SVikram.Hegde@Sun.COM * Domainid 0 is reserved by Vt-d spec and cannot be used by
141111600SVikram.Hegde@Sun.COM * system software.
141211600SVikram.Hegde@Sun.COM * Domainid 1 is reserved by solaris and used for *all* of the following:
141311600SVikram.Hegde@Sun.COM * as the "uninitialized" domain - For devices not yet controlled
141411600SVikram.Hegde@Sun.COM * by Solaris
141511600SVikram.Hegde@Sun.COM * as the "unity" domain - For devices that will always belong
141611600SVikram.Hegde@Sun.COM * to the unity domain
141711600SVikram.Hegde@Sun.COM * as the "overflow" domain - Used for any new device after we
141811600SVikram.Hegde@Sun.COM * run out of domains
141911600SVikram.Hegde@Sun.COM * All of the above domains map into a single domain with
142011600SVikram.Hegde@Sun.COM * domainid 1 and UNITY DVMA mapping
142111600SVikram.Hegde@Sun.COM * Each IMMU unity has its own unity/uninit/overflow domain
142211600SVikram.Hegde@Sun.COM */
142311600SVikram.Hegde@Sun.COM static void
did_init(immu_t * immu)142411600SVikram.Hegde@Sun.COM did_init(immu_t *immu)
142511600SVikram.Hegde@Sun.COM {
142611600SVikram.Hegde@Sun.COM (void) snprintf(immu->immu_did_arena_name,
142711600SVikram.Hegde@Sun.COM sizeof (immu->immu_did_arena_name),
142811600SVikram.Hegde@Sun.COM "%s_domainid_arena", immu->immu_name);
142911600SVikram.Hegde@Sun.COM
1430*13050Sfrank.van.der.linden@oracle.com ddi_err(DER_VERB, immu->immu_dip, "creating domainid arena %s",
1431*13050Sfrank.van.der.linden@oracle.com immu->immu_did_arena_name);
143211600SVikram.Hegde@Sun.COM
143311600SVikram.Hegde@Sun.COM immu->immu_did_arena = vmem_create(
143411600SVikram.Hegde@Sun.COM immu->immu_did_arena_name,
143511600SVikram.Hegde@Sun.COM (void *)(uintptr_t)(IMMU_UNITY_DID + 1), /* start addr */
143611600SVikram.Hegde@Sun.COM immu->immu_max_domains - IMMU_UNITY_DID,
143711600SVikram.Hegde@Sun.COM 1, /* quantum */
143811600SVikram.Hegde@Sun.COM NULL, /* afunc */
143911600SVikram.Hegde@Sun.COM NULL, /* ffunc */
144011600SVikram.Hegde@Sun.COM NULL, /* source */
144111600SVikram.Hegde@Sun.COM 0, /* qcache_max */
144211600SVikram.Hegde@Sun.COM VM_SLEEP);
144311600SVikram.Hegde@Sun.COM
144411600SVikram.Hegde@Sun.COM /* Even with SLEEP flag, vmem_create() can fail */
144511600SVikram.Hegde@Sun.COM if (immu->immu_did_arena == NULL) {
144611600SVikram.Hegde@Sun.COM ddi_err(DER_PANIC, NULL, "%s: Failed to create Intel "
144711600SVikram.Hegde@Sun.COM "IOMMU domainid allocator: %s", immu->immu_name,
144811600SVikram.Hegde@Sun.COM immu->immu_did_arena_name);
144911600SVikram.Hegde@Sun.COM }
145011600SVikram.Hegde@Sun.COM }
145111600SVikram.Hegde@Sun.COM
145211600SVikram.Hegde@Sun.COM /* ######################### CONTEXT CODE ################################# */
145311600SVikram.Hegde@Sun.COM
145411600SVikram.Hegde@Sun.COM static void
context_set(immu_t * immu,domain_t * domain,pgtable_t * root_table,int bus,int devfunc)145511600SVikram.Hegde@Sun.COM context_set(immu_t *immu, domain_t *domain, pgtable_t *root_table,
145611600SVikram.Hegde@Sun.COM int bus, int devfunc)
145711600SVikram.Hegde@Sun.COM {
145811600SVikram.Hegde@Sun.COM pgtable_t *context;
145911600SVikram.Hegde@Sun.COM pgtable_t *pgtable_root;
146011600SVikram.Hegde@Sun.COM hw_rce_t *hw_rent;
146111600SVikram.Hegde@Sun.COM hw_rce_t *hw_cent;
146211600SVikram.Hegde@Sun.COM hw_rce_t *ctxp;
146311658SVikram.Hegde@Sun.COM int sid;
146411658SVikram.Hegde@Sun.COM krw_t rwtype;
146511658SVikram.Hegde@Sun.COM boolean_t fill_root;
146611658SVikram.Hegde@Sun.COM boolean_t fill_ctx;
146711600SVikram.Hegde@Sun.COM
146811658SVikram.Hegde@Sun.COM pgtable_root = domain->dom_pgtable_root;
146911658SVikram.Hegde@Sun.COM
147011600SVikram.Hegde@Sun.COM ctxp = (hw_rce_t *)(root_table->swpg_next_array);
147111600SVikram.Hegde@Sun.COM context = *(pgtable_t **)(ctxp + bus);
147211600SVikram.Hegde@Sun.COM hw_rent = (hw_rce_t *)(root_table->hwpg_vaddr) + bus;
147311658SVikram.Hegde@Sun.COM
147411658SVikram.Hegde@Sun.COM fill_root = B_FALSE;
147511658SVikram.Hegde@Sun.COM fill_ctx = B_FALSE;
147611658SVikram.Hegde@Sun.COM
147711658SVikram.Hegde@Sun.COM /* Check the most common case first with reader lock */
147811658SVikram.Hegde@Sun.COM rw_enter(&(immu->immu_ctx_rwlock), RW_READER);
147911658SVikram.Hegde@Sun.COM rwtype = RW_READER;
148011658SVikram.Hegde@Sun.COM again:
148111600SVikram.Hegde@Sun.COM if (ROOT_GET_P(hw_rent)) {
148211658SVikram.Hegde@Sun.COM hw_cent = (hw_rce_t *)(context->hwpg_vaddr) + devfunc;
148311658SVikram.Hegde@Sun.COM if (CONT_GET_AVAIL(hw_cent) == IMMU_CONT_INITED) {
148411658SVikram.Hegde@Sun.COM rw_exit(&(immu->immu_ctx_rwlock));
148511658SVikram.Hegde@Sun.COM return;
148611658SVikram.Hegde@Sun.COM } else {
148711658SVikram.Hegde@Sun.COM fill_ctx = B_TRUE;
148811658SVikram.Hegde@Sun.COM }
148911600SVikram.Hegde@Sun.COM } else {
149011658SVikram.Hegde@Sun.COM fill_root = B_TRUE;
149111658SVikram.Hegde@Sun.COM fill_ctx = B_TRUE;
149211658SVikram.Hegde@Sun.COM }
149311658SVikram.Hegde@Sun.COM
149411658SVikram.Hegde@Sun.COM if (rwtype == RW_READER &&
149511658SVikram.Hegde@Sun.COM rw_tryupgrade(&(immu->immu_ctx_rwlock)) == 0) {
149611658SVikram.Hegde@Sun.COM rw_exit(&(immu->immu_ctx_rwlock));
149711658SVikram.Hegde@Sun.COM rw_enter(&(immu->immu_ctx_rwlock), RW_WRITER);
149811658SVikram.Hegde@Sun.COM rwtype = RW_WRITER;
149911658SVikram.Hegde@Sun.COM goto again;
150011658SVikram.Hegde@Sun.COM }
150111658SVikram.Hegde@Sun.COM rwtype = RW_WRITER;
150211658SVikram.Hegde@Sun.COM
150311658SVikram.Hegde@Sun.COM if (fill_root == B_TRUE) {
150411600SVikram.Hegde@Sun.COM ROOT_SET_CONT(hw_rent, context->hwpg_paddr);
150511600SVikram.Hegde@Sun.COM ROOT_SET_P(hw_rent);
150611600SVikram.Hegde@Sun.COM immu_regs_cpu_flush(immu, (caddr_t)hw_rent, sizeof (hw_rce_t));
150711600SVikram.Hegde@Sun.COM }
150811658SVikram.Hegde@Sun.COM
150911658SVikram.Hegde@Sun.COM if (fill_ctx == B_TRUE) {
151011658SVikram.Hegde@Sun.COM hw_cent = (hw_rce_t *)(context->hwpg_vaddr) + devfunc;
151111600SVikram.Hegde@Sun.COM /* need to disable context entry before reprogramming it */
151211600SVikram.Hegde@Sun.COM bzero(hw_cent, sizeof (hw_rce_t));
151311600SVikram.Hegde@Sun.COM
151411600SVikram.Hegde@Sun.COM /* flush caches */
151511600SVikram.Hegde@Sun.COM immu_regs_cpu_flush(immu, (caddr_t)hw_cent, sizeof (hw_rce_t));
151611658SVikram.Hegde@Sun.COM
151711658SVikram.Hegde@Sun.COM sid = ((bus << 8) | devfunc);
1518*13050Sfrank.van.der.linden@oracle.com immu_flush_context_fsi(immu, 0, sid, domain->dom_did,
1519*13050Sfrank.van.der.linden@oracle.com &immu->immu_ctx_inv_wait);
152011600SVikram.Hegde@Sun.COM
152111600SVikram.Hegde@Sun.COM CONT_SET_AVAIL(hw_cent, IMMU_CONT_INITED);
152211600SVikram.Hegde@Sun.COM CONT_SET_DID(hw_cent, domain->dom_did);
152311600SVikram.Hegde@Sun.COM CONT_SET_AW(hw_cent, immu->immu_dvma_agaw);
152411600SVikram.Hegde@Sun.COM CONT_SET_ASR(hw_cent, pgtable_root->hwpg_paddr);
152512990Sfrank.van.der.linden@oracle.com if (domain->dom_did == IMMU_UNITY_DID &&
152612990Sfrank.van.der.linden@oracle.com IMMU_ECAP_GET_PT(immu->immu_regs_excap))
152712990Sfrank.van.der.linden@oracle.com CONT_SET_TTYPE(hw_cent, TTYPE_PASSTHRU);
152812990Sfrank.van.der.linden@oracle.com else
152912990Sfrank.van.der.linden@oracle.com /*LINTED*/
153012990Sfrank.van.der.linden@oracle.com CONT_SET_TTYPE(hw_cent, TTYPE_XLATE_ONLY);
153111600SVikram.Hegde@Sun.COM CONT_SET_P(hw_cent);
1532*13050Sfrank.van.der.linden@oracle.com if (IMMU_ECAP_GET_CH(immu->immu_regs_excap)) {
1533*13050Sfrank.van.der.linden@oracle.com CONT_SET_EH(hw_cent);
1534*13050Sfrank.van.der.linden@oracle.com if (immu_use_alh)
1535*13050Sfrank.van.der.linden@oracle.com CONT_SET_ALH(hw_cent);
1536*13050Sfrank.van.der.linden@oracle.com }
153711600SVikram.Hegde@Sun.COM immu_regs_cpu_flush(immu, (caddr_t)hw_cent, sizeof (hw_rce_t));
153811600SVikram.Hegde@Sun.COM }
153911658SVikram.Hegde@Sun.COM rw_exit(&(immu->immu_ctx_rwlock));
154011600SVikram.Hegde@Sun.COM }
154111600SVikram.Hegde@Sun.COM
154211600SVikram.Hegde@Sun.COM static pgtable_t *
context_create(immu_t * immu)154311600SVikram.Hegde@Sun.COM context_create(immu_t *immu)
154411600SVikram.Hegde@Sun.COM {
154511600SVikram.Hegde@Sun.COM int bus;
154611600SVikram.Hegde@Sun.COM int devfunc;
154711600SVikram.Hegde@Sun.COM pgtable_t *root_table;
154811600SVikram.Hegde@Sun.COM pgtable_t *context;
154911600SVikram.Hegde@Sun.COM pgtable_t *pgtable_root;
155011600SVikram.Hegde@Sun.COM hw_rce_t *ctxp;
155111600SVikram.Hegde@Sun.COM hw_rce_t *hw_rent;
155211600SVikram.Hegde@Sun.COM hw_rce_t *hw_cent;
155311600SVikram.Hegde@Sun.COM
155411600SVikram.Hegde@Sun.COM /* Allocate a zeroed root table (4K 256b entries) */
155511658SVikram.Hegde@Sun.COM root_table = pgtable_alloc(immu, IMMU_FLAGS_SLEEP);
1556*13050Sfrank.van.der.linden@oracle.com pgtable_zero(root_table);
155711600SVikram.Hegde@Sun.COM
155811600SVikram.Hegde@Sun.COM /*
155911600SVikram.Hegde@Sun.COM * Setup context tables for all possible root table entries.
156011600SVikram.Hegde@Sun.COM * Start out with unity domains for all entries.
156111600SVikram.Hegde@Sun.COM */
156211600SVikram.Hegde@Sun.COM ctxp = (hw_rce_t *)(root_table->swpg_next_array);
156311600SVikram.Hegde@Sun.COM hw_rent = (hw_rce_t *)(root_table->hwpg_vaddr);
156411600SVikram.Hegde@Sun.COM for (bus = 0; bus < IMMU_ROOT_NUM; bus++, ctxp++, hw_rent++) {
156511658SVikram.Hegde@Sun.COM context = pgtable_alloc(immu, IMMU_FLAGS_SLEEP);
1566*13050Sfrank.van.der.linden@oracle.com pgtable_zero(context);
156711600SVikram.Hegde@Sun.COM ROOT_SET_P(hw_rent);
156811600SVikram.Hegde@Sun.COM ROOT_SET_CONT(hw_rent, context->hwpg_paddr);
156911600SVikram.Hegde@Sun.COM hw_cent = (hw_rce_t *)(context->hwpg_vaddr);
157011600SVikram.Hegde@Sun.COM for (devfunc = 0; devfunc < IMMU_CONT_NUM;
157111600SVikram.Hegde@Sun.COM devfunc++, hw_cent++) {
157211600SVikram.Hegde@Sun.COM pgtable_root =
157311600SVikram.Hegde@Sun.COM immu->immu_unity_domain->dom_pgtable_root;
157411600SVikram.Hegde@Sun.COM CONT_SET_DID(hw_cent,
157511600SVikram.Hegde@Sun.COM immu->immu_unity_domain->dom_did);
157611600SVikram.Hegde@Sun.COM CONT_SET_AW(hw_cent, immu->immu_dvma_agaw);
157711600SVikram.Hegde@Sun.COM CONT_SET_ASR(hw_cent, pgtable_root->hwpg_paddr);
157812990Sfrank.van.der.linden@oracle.com if (IMMU_ECAP_GET_PT(immu->immu_regs_excap))
157912990Sfrank.van.der.linden@oracle.com CONT_SET_TTYPE(hw_cent, TTYPE_PASSTHRU);
158012990Sfrank.van.der.linden@oracle.com else
158112990Sfrank.van.der.linden@oracle.com /*LINTED*/
158212990Sfrank.van.der.linden@oracle.com CONT_SET_TTYPE(hw_cent, TTYPE_XLATE_ONLY);
158311600SVikram.Hegde@Sun.COM CONT_SET_AVAIL(hw_cent, IMMU_CONT_UNINITED);
158411600SVikram.Hegde@Sun.COM CONT_SET_P(hw_cent);
158511600SVikram.Hegde@Sun.COM }
158611600SVikram.Hegde@Sun.COM immu_regs_cpu_flush(immu, context->hwpg_vaddr, IMMU_PAGESIZE);
158711600SVikram.Hegde@Sun.COM *((pgtable_t **)ctxp) = context;
158811600SVikram.Hegde@Sun.COM }
158911600SVikram.Hegde@Sun.COM
159011600SVikram.Hegde@Sun.COM return (root_table);
159111600SVikram.Hegde@Sun.COM }
159211600SVikram.Hegde@Sun.COM
159311600SVikram.Hegde@Sun.COM /*
159411600SVikram.Hegde@Sun.COM * Called during rootnex attach, so no locks needed
159511600SVikram.Hegde@Sun.COM */
159611600SVikram.Hegde@Sun.COM static void
context_init(immu_t * immu)159711600SVikram.Hegde@Sun.COM context_init(immu_t *immu)
159811600SVikram.Hegde@Sun.COM {
159911600SVikram.Hegde@Sun.COM rw_init(&(immu->immu_ctx_rwlock), NULL, RW_DEFAULT, NULL);
160011600SVikram.Hegde@Sun.COM
1601*13050Sfrank.van.der.linden@oracle.com immu_init_inv_wait(&immu->immu_ctx_inv_wait, "ctxglobal", B_TRUE);
1602*13050Sfrank.van.der.linden@oracle.com
160311600SVikram.Hegde@Sun.COM immu_regs_wbf_flush(immu);
160411600SVikram.Hegde@Sun.COM
160511600SVikram.Hegde@Sun.COM immu->immu_ctx_root = context_create(immu);
160611600SVikram.Hegde@Sun.COM
160711600SVikram.Hegde@Sun.COM immu_regs_set_root_table(immu);
160811600SVikram.Hegde@Sun.COM
160911600SVikram.Hegde@Sun.COM rw_enter(&(immu->immu_ctx_rwlock), RW_WRITER);
1610*13050Sfrank.van.der.linden@oracle.com immu_flush_context_gbl(immu, &immu->immu_ctx_inv_wait);
1611*13050Sfrank.van.der.linden@oracle.com immu_flush_iotlb_gbl(immu, &immu->immu_ctx_inv_wait);
161211600SVikram.Hegde@Sun.COM rw_exit(&(immu->immu_ctx_rwlock));
161311600SVikram.Hegde@Sun.COM }
161411600SVikram.Hegde@Sun.COM
161511600SVikram.Hegde@Sun.COM
161611600SVikram.Hegde@Sun.COM /*
161711600SVikram.Hegde@Sun.COM * Find top pcib
161811600SVikram.Hegde@Sun.COM */
161911600SVikram.Hegde@Sun.COM static int
find_top_pcib(dev_info_t * dip,void * arg)162011600SVikram.Hegde@Sun.COM find_top_pcib(dev_info_t *dip, void *arg)
162111600SVikram.Hegde@Sun.COM {
162211600SVikram.Hegde@Sun.COM immu_devi_t *immu_devi;
162311600SVikram.Hegde@Sun.COM dev_info_t **pcibdipp = (dev_info_t **)arg;
162411600SVikram.Hegde@Sun.COM
162511600SVikram.Hegde@Sun.COM immu_devi = immu_devi_get(dip);
162611600SVikram.Hegde@Sun.COM
162711600SVikram.Hegde@Sun.COM if (immu_devi->imd_pcib_type == IMMU_PCIB_PCI_PCI) {
162811600SVikram.Hegde@Sun.COM *pcibdipp = dip;
162911600SVikram.Hegde@Sun.COM }
163011600SVikram.Hegde@Sun.COM
163111600SVikram.Hegde@Sun.COM return (DDI_WALK_CONTINUE);
163211600SVikram.Hegde@Sun.COM }
163311600SVikram.Hegde@Sun.COM
163411600SVikram.Hegde@Sun.COM static int
immu_context_update(immu_t * immu,domain_t * domain,dev_info_t * ddip,dev_info_t * rdip,immu_flags_t immu_flags)163511600SVikram.Hegde@Sun.COM immu_context_update(immu_t *immu, domain_t *domain, dev_info_t *ddip,
163611600SVikram.Hegde@Sun.COM dev_info_t *rdip, immu_flags_t immu_flags)
163711600SVikram.Hegde@Sun.COM {
163811600SVikram.Hegde@Sun.COM immu_devi_t *r_immu_devi;
163911600SVikram.Hegde@Sun.COM immu_devi_t *d_immu_devi;
164011600SVikram.Hegde@Sun.COM int r_bus;
164111600SVikram.Hegde@Sun.COM int d_bus;
164211600SVikram.Hegde@Sun.COM int r_devfunc;
164311600SVikram.Hegde@Sun.COM int d_devfunc;
164411600SVikram.Hegde@Sun.COM immu_pcib_t d_pcib_type;
164511600SVikram.Hegde@Sun.COM dev_info_t *pcibdip;
164611600SVikram.Hegde@Sun.COM
164711600SVikram.Hegde@Sun.COM if (ddip == NULL || rdip == NULL ||
164811600SVikram.Hegde@Sun.COM ddip == root_devinfo || rdip == root_devinfo) {
164911600SVikram.Hegde@Sun.COM ddi_err(DER_MODE, rdip, "immu_contexts_update: domain-dip or "
165011600SVikram.Hegde@Sun.COM "request-dip are NULL or are root devinfo");
165111600SVikram.Hegde@Sun.COM return (DDI_FAILURE);
165211600SVikram.Hegde@Sun.COM }
165311600SVikram.Hegde@Sun.COM
165411600SVikram.Hegde@Sun.COM /*
165511600SVikram.Hegde@Sun.COM * We need to set the context fields
165611600SVikram.Hegde@Sun.COM * based on what type of device rdip and ddip are.
165711600SVikram.Hegde@Sun.COM * To do that we need the immu_devi field.
165811600SVikram.Hegde@Sun.COM * Set the immu_devi field (if not already set)
165911600SVikram.Hegde@Sun.COM */
166011600SVikram.Hegde@Sun.COM if (immu_devi_set(ddip, immu_flags) == DDI_FAILURE) {
166111600SVikram.Hegde@Sun.COM ddi_err(DER_MODE, rdip,
166211600SVikram.Hegde@Sun.COM "immu_context_update: failed to set immu_devi for ddip");
166311600SVikram.Hegde@Sun.COM return (DDI_FAILURE);
166411600SVikram.Hegde@Sun.COM }
166511600SVikram.Hegde@Sun.COM
166611600SVikram.Hegde@Sun.COM if (immu_devi_set(rdip, immu_flags) == DDI_FAILURE) {
166711600SVikram.Hegde@Sun.COM ddi_err(DER_MODE, rdip,
166811600SVikram.Hegde@Sun.COM "immu_context_update: failed to set immu_devi for rdip");
166911600SVikram.Hegde@Sun.COM return (DDI_FAILURE);
167011600SVikram.Hegde@Sun.COM }
167111600SVikram.Hegde@Sun.COM
167211600SVikram.Hegde@Sun.COM d_immu_devi = immu_devi_get(ddip);
167311600SVikram.Hegde@Sun.COM r_immu_devi = immu_devi_get(rdip);
167411600SVikram.Hegde@Sun.COM
167511600SVikram.Hegde@Sun.COM d_bus = d_immu_devi->imd_bus;
167611600SVikram.Hegde@Sun.COM d_devfunc = d_immu_devi->imd_devfunc;
167711600SVikram.Hegde@Sun.COM d_pcib_type = d_immu_devi->imd_pcib_type;
167811600SVikram.Hegde@Sun.COM r_bus = r_immu_devi->imd_bus;
167911600SVikram.Hegde@Sun.COM r_devfunc = r_immu_devi->imd_devfunc;
168011600SVikram.Hegde@Sun.COM
168111600SVikram.Hegde@Sun.COM if (rdip == ddip) {
168211600SVikram.Hegde@Sun.COM /* rdip is a PCIE device. set context for it only */
168311600SVikram.Hegde@Sun.COM context_set(immu, domain, immu->immu_ctx_root, r_bus,
168411600SVikram.Hegde@Sun.COM r_devfunc);
168511600SVikram.Hegde@Sun.COM #ifdef BUGGY_DRIVERS
168611600SVikram.Hegde@Sun.COM } else if (r_immu_devi == d_immu_devi) {
168711600SVikram.Hegde@Sun.COM #ifdef TEST
168811600SVikram.Hegde@Sun.COM ddi_err(DER_WARN, rdip, "Driver bug: Devices 0x%lx and "
168911600SVikram.Hegde@Sun.COM "0x%lx are identical", rdip, ddip);
169011600SVikram.Hegde@Sun.COM #endif
169111600SVikram.Hegde@Sun.COM /* rdip is a PCIE device. set context for it only */
169211600SVikram.Hegde@Sun.COM context_set(immu, domain, immu->immu_ctx_root, r_bus,
169311600SVikram.Hegde@Sun.COM r_devfunc);
169411600SVikram.Hegde@Sun.COM #endif
169511600SVikram.Hegde@Sun.COM } else if (d_pcib_type == IMMU_PCIB_PCIE_PCI) {
169611600SVikram.Hegde@Sun.COM /*
169711600SVikram.Hegde@Sun.COM * ddip is a PCIE_PCI bridge. Set context for ddip's
169811600SVikram.Hegde@Sun.COM * secondary bus. If rdip is on ddip's secondary
169911600SVikram.Hegde@Sun.COM * bus, set context for rdip. Else, set context
170011600SVikram.Hegde@Sun.COM * for rdip's PCI bridge on ddip's secondary bus.
170111600SVikram.Hegde@Sun.COM */
170211600SVikram.Hegde@Sun.COM context_set(immu, domain, immu->immu_ctx_root,
170311600SVikram.Hegde@Sun.COM d_immu_devi->imd_sec, 0);
170411600SVikram.Hegde@Sun.COM if (d_immu_devi->imd_sec == r_bus) {
170511600SVikram.Hegde@Sun.COM context_set(immu, domain, immu->immu_ctx_root,
170611600SVikram.Hegde@Sun.COM r_bus, r_devfunc);
170711600SVikram.Hegde@Sun.COM } else {
170811600SVikram.Hegde@Sun.COM pcibdip = NULL;
170911600SVikram.Hegde@Sun.COM if (immu_walk_ancestor(rdip, ddip, find_top_pcib,
171011600SVikram.Hegde@Sun.COM &pcibdip, NULL, immu_flags) == DDI_SUCCESS &&
171111600SVikram.Hegde@Sun.COM pcibdip != NULL) {
171211600SVikram.Hegde@Sun.COM r_immu_devi = immu_devi_get(pcibdip);
171311600SVikram.Hegde@Sun.COM r_bus = r_immu_devi->imd_bus;
171411600SVikram.Hegde@Sun.COM r_devfunc = r_immu_devi->imd_devfunc;
171511600SVikram.Hegde@Sun.COM context_set(immu, domain, immu->immu_ctx_root,
171611600SVikram.Hegde@Sun.COM r_bus, r_devfunc);
171711600SVikram.Hegde@Sun.COM } else {
171811600SVikram.Hegde@Sun.COM ddi_err(DER_PANIC, rdip, "Failed to find PCI "
171911600SVikram.Hegde@Sun.COM " bridge for PCI device");
172011600SVikram.Hegde@Sun.COM /*NOTREACHED*/
172111600SVikram.Hegde@Sun.COM }
172211600SVikram.Hegde@Sun.COM }
172311600SVikram.Hegde@Sun.COM } else if (d_pcib_type == IMMU_PCIB_PCI_PCI) {
172411600SVikram.Hegde@Sun.COM context_set(immu, domain, immu->immu_ctx_root, d_bus,
172511600SVikram.Hegde@Sun.COM d_devfunc);
172611600SVikram.Hegde@Sun.COM } else if (d_pcib_type == IMMU_PCIB_ENDPOINT) {
172711600SVikram.Hegde@Sun.COM /*
172811600SVikram.Hegde@Sun.COM * ddip is a PCIE device which has a non-PCI device under it
172911600SVikram.Hegde@Sun.COM * i.e. it is a PCI-nonPCI bridge. Example: pciicde-ata
173011600SVikram.Hegde@Sun.COM */
173111600SVikram.Hegde@Sun.COM context_set(immu, domain, immu->immu_ctx_root, d_bus,
173211600SVikram.Hegde@Sun.COM d_devfunc);
173311600SVikram.Hegde@Sun.COM } else {
173411600SVikram.Hegde@Sun.COM ddi_err(DER_PANIC, rdip, "unknown device type. Cannot "
1735*13050Sfrank.van.der.linden@oracle.com "set iommu context.");
173611600SVikram.Hegde@Sun.COM /*NOTREACHED*/
173711600SVikram.Hegde@Sun.COM }
173811600SVikram.Hegde@Sun.COM
173911600SVikram.Hegde@Sun.COM /* XXX do we need a membar_producer() here */
174011600SVikram.Hegde@Sun.COM return (DDI_SUCCESS);
174111600SVikram.Hegde@Sun.COM }
174211600SVikram.Hegde@Sun.COM
174311600SVikram.Hegde@Sun.COM /* ##################### END CONTEXT CODE ################################## */
174411600SVikram.Hegde@Sun.COM /* ##################### MAPPING CODE ################################## */
174511600SVikram.Hegde@Sun.COM
174611600SVikram.Hegde@Sun.COM
1747*13050Sfrank.van.der.linden@oracle.com #ifdef DEBUG
174811600SVikram.Hegde@Sun.COM static boolean_t
PDTE_check(immu_t * immu,hw_pdte_t pdte,pgtable_t * next,paddr_t paddr,dev_info_t * rdip,immu_flags_t immu_flags)174911600SVikram.Hegde@Sun.COM PDTE_check(immu_t *immu, hw_pdte_t pdte, pgtable_t *next, paddr_t paddr,
175011600SVikram.Hegde@Sun.COM dev_info_t *rdip, immu_flags_t immu_flags)
175111600SVikram.Hegde@Sun.COM {
175211600SVikram.Hegde@Sun.COM /* The PDTE must be set i.e. present bit is set */
175311600SVikram.Hegde@Sun.COM if (!PDTE_P(pdte)) {
175411600SVikram.Hegde@Sun.COM ddi_err(DER_MODE, rdip, "No present flag");
175511600SVikram.Hegde@Sun.COM return (B_FALSE);
175611600SVikram.Hegde@Sun.COM }
175711600SVikram.Hegde@Sun.COM
175811600SVikram.Hegde@Sun.COM /*
175911600SVikram.Hegde@Sun.COM * Just assert to check most significant system software field
176011600SVikram.Hegde@Sun.COM * (PDTE_SW4) as it is same as present bit and we
176111600SVikram.Hegde@Sun.COM * checked that above
176211600SVikram.Hegde@Sun.COM */
176311600SVikram.Hegde@Sun.COM ASSERT(PDTE_SW4(pdte));
176411600SVikram.Hegde@Sun.COM
176511600SVikram.Hegde@Sun.COM /*
176611600SVikram.Hegde@Sun.COM * TM field should be clear if not reserved.
176711600SVikram.Hegde@Sun.COM * non-leaf is always reserved
176811600SVikram.Hegde@Sun.COM */
176911658SVikram.Hegde@Sun.COM if (next == NULL && immu->immu_TM_reserved == B_FALSE) {
177011600SVikram.Hegde@Sun.COM if (PDTE_TM(pdte)) {
177111600SVikram.Hegde@Sun.COM ddi_err(DER_MODE, rdip, "TM flag set");
177211600SVikram.Hegde@Sun.COM return (B_FALSE);
177311600SVikram.Hegde@Sun.COM }
177411600SVikram.Hegde@Sun.COM }
177511600SVikram.Hegde@Sun.COM
177611600SVikram.Hegde@Sun.COM /*
177711600SVikram.Hegde@Sun.COM * The SW3 field is not used and must be clear
177811600SVikram.Hegde@Sun.COM */
177911600SVikram.Hegde@Sun.COM if (PDTE_SW3(pdte)) {
178011600SVikram.Hegde@Sun.COM ddi_err(DER_MODE, rdip, "SW3 set");
178111600SVikram.Hegde@Sun.COM return (B_FALSE);
178211600SVikram.Hegde@Sun.COM }
178311600SVikram.Hegde@Sun.COM
178411600SVikram.Hegde@Sun.COM /*
178511600SVikram.Hegde@Sun.COM * PFN (for PTE) or next level pgtable-paddr (for PDE) must be set
178611600SVikram.Hegde@Sun.COM */
178711600SVikram.Hegde@Sun.COM if (next == NULL) {
178811600SVikram.Hegde@Sun.COM ASSERT(paddr % IMMU_PAGESIZE == 0);
178911600SVikram.Hegde@Sun.COM if (PDTE_PADDR(pdte) != paddr) {
179011600SVikram.Hegde@Sun.COM ddi_err(DER_MODE, rdip,
179111600SVikram.Hegde@Sun.COM "PTE paddr mismatch: %lx != %lx",
179211600SVikram.Hegde@Sun.COM PDTE_PADDR(pdte), paddr);
179311600SVikram.Hegde@Sun.COM return (B_FALSE);
179411600SVikram.Hegde@Sun.COM }
179511600SVikram.Hegde@Sun.COM } else {
179611600SVikram.Hegde@Sun.COM if (PDTE_PADDR(pdte) != next->hwpg_paddr) {
179711600SVikram.Hegde@Sun.COM ddi_err(DER_MODE, rdip,
179811600SVikram.Hegde@Sun.COM "PDE paddr mismatch: %lx != %lx",
179911600SVikram.Hegde@Sun.COM PDTE_PADDR(pdte), next->hwpg_paddr);
180011600SVikram.Hegde@Sun.COM return (B_FALSE);
180111600SVikram.Hegde@Sun.COM }
180211600SVikram.Hegde@Sun.COM }
180311600SVikram.Hegde@Sun.COM
180411600SVikram.Hegde@Sun.COM /*
180511600SVikram.Hegde@Sun.COM * SNP field should be clear if not reserved.
180611600SVikram.Hegde@Sun.COM * non-leaf is always reserved
180711600SVikram.Hegde@Sun.COM */
180811658SVikram.Hegde@Sun.COM if (next == NULL && immu->immu_SNP_reserved == B_FALSE) {
180911600SVikram.Hegde@Sun.COM if (PDTE_SNP(pdte)) {
181011600SVikram.Hegde@Sun.COM ddi_err(DER_MODE, rdip, "SNP set");
181111600SVikram.Hegde@Sun.COM return (B_FALSE);
181211600SVikram.Hegde@Sun.COM }
181311600SVikram.Hegde@Sun.COM }
181411600SVikram.Hegde@Sun.COM
181511600SVikram.Hegde@Sun.COM /* second field available for system software should be clear */
181611600SVikram.Hegde@Sun.COM if (PDTE_SW2(pdte)) {
181711600SVikram.Hegde@Sun.COM ddi_err(DER_MODE, rdip, "SW2 set");
181811600SVikram.Hegde@Sun.COM return (B_FALSE);
181911600SVikram.Hegde@Sun.COM }
182011600SVikram.Hegde@Sun.COM
182111600SVikram.Hegde@Sun.COM /* Super pages field should be clear */
182211600SVikram.Hegde@Sun.COM if (PDTE_SP(pdte)) {
182311600SVikram.Hegde@Sun.COM ddi_err(DER_MODE, rdip, "SP set");
182411600SVikram.Hegde@Sun.COM return (B_FALSE);
182511600SVikram.Hegde@Sun.COM }
182611600SVikram.Hegde@Sun.COM
182711600SVikram.Hegde@Sun.COM /*
182811600SVikram.Hegde@Sun.COM * least significant field available for
182911600SVikram.Hegde@Sun.COM * system software should be clear
183011600SVikram.Hegde@Sun.COM */
183111600SVikram.Hegde@Sun.COM if (PDTE_SW1(pdte)) {
183211600SVikram.Hegde@Sun.COM ddi_err(DER_MODE, rdip, "SW1 set");
183311600SVikram.Hegde@Sun.COM return (B_FALSE);
183411600SVikram.Hegde@Sun.COM }
183511600SVikram.Hegde@Sun.COM
183611600SVikram.Hegde@Sun.COM if ((immu_flags & IMMU_FLAGS_READ) && !PDTE_READ(pdte)) {
183711600SVikram.Hegde@Sun.COM ddi_err(DER_MODE, rdip, "READ not set");
183811600SVikram.Hegde@Sun.COM return (B_FALSE);
183911600SVikram.Hegde@Sun.COM }
184011600SVikram.Hegde@Sun.COM
184111600SVikram.Hegde@Sun.COM if ((immu_flags & IMMU_FLAGS_WRITE) && !PDTE_WRITE(pdte)) {
184211600SVikram.Hegde@Sun.COM ddi_err(DER_MODE, rdip, "WRITE not set");
184311600SVikram.Hegde@Sun.COM return (B_FALSE);
184411600SVikram.Hegde@Sun.COM }
184511600SVikram.Hegde@Sun.COM
184611600SVikram.Hegde@Sun.COM return (B_TRUE);
184711600SVikram.Hegde@Sun.COM }
1848*13050Sfrank.van.der.linden@oracle.com #endif
1849*13050Sfrank.van.der.linden@oracle.com
185011600SVikram.Hegde@Sun.COM /*ARGSUSED*/
185111600SVikram.Hegde@Sun.COM static void
PTE_clear_all(immu_t * immu,domain_t * domain,xlate_t * xlate,uint64_t * dvma_ptr,uint64_t * npages_ptr,dev_info_t * rdip)185211658SVikram.Hegde@Sun.COM PTE_clear_all(immu_t *immu, domain_t *domain, xlate_t *xlate,
185311658SVikram.Hegde@Sun.COM uint64_t *dvma_ptr, uint64_t *npages_ptr, dev_info_t *rdip)
185411600SVikram.Hegde@Sun.COM {
185511658SVikram.Hegde@Sun.COM uint64_t npages;
185611658SVikram.Hegde@Sun.COM uint64_t dvma;
185711658SVikram.Hegde@Sun.COM pgtable_t *pgtable;
185811600SVikram.Hegde@Sun.COM hw_pdte_t *hwp;
185911658SVikram.Hegde@Sun.COM hw_pdte_t *shwp;
186011600SVikram.Hegde@Sun.COM int idx;
186111600SVikram.Hegde@Sun.COM
186211658SVikram.Hegde@Sun.COM pgtable = xlate->xlt_pgtable;
186311600SVikram.Hegde@Sun.COM idx = xlate->xlt_idx;
186411658SVikram.Hegde@Sun.COM
186511658SVikram.Hegde@Sun.COM dvma = *dvma_ptr;
186611658SVikram.Hegde@Sun.COM npages = *npages_ptr;
186711658SVikram.Hegde@Sun.COM
186811600SVikram.Hegde@Sun.COM /*
186911658SVikram.Hegde@Sun.COM * since a caller gets a unique dvma for a physical address,
187011658SVikram.Hegde@Sun.COM * no other concurrent thread will be writing to the same
187111658SVikram.Hegde@Sun.COM * PTE even if it has the same paddr. So no locks needed.
187211600SVikram.Hegde@Sun.COM */
187311658SVikram.Hegde@Sun.COM shwp = (hw_pdte_t *)(pgtable->hwpg_vaddr) + idx;
187411658SVikram.Hegde@Sun.COM
187511658SVikram.Hegde@Sun.COM hwp = shwp;
187611658SVikram.Hegde@Sun.COM for (; npages > 0 && idx <= IMMU_PGTABLE_MAXIDX; idx++, hwp++) {
1877*13050Sfrank.van.der.linden@oracle.com PDTE_CLEAR_P(*hwp);
187811658SVikram.Hegde@Sun.COM dvma += IMMU_PAGESIZE;
187911658SVikram.Hegde@Sun.COM npages--;
188011658SVikram.Hegde@Sun.COM }
188111658SVikram.Hegde@Sun.COM
188211658SVikram.Hegde@Sun.COM *dvma_ptr = dvma;
188311658SVikram.Hegde@Sun.COM *npages_ptr = npages;
188411658SVikram.Hegde@Sun.COM
188511658SVikram.Hegde@Sun.COM xlate->xlt_idx = idx;
188611600SVikram.Hegde@Sun.COM }
188711600SVikram.Hegde@Sun.COM
188811600SVikram.Hegde@Sun.COM static void
xlate_setup(uint64_t dvma,xlate_t * xlate,int nlevels)1889*13050Sfrank.van.der.linden@oracle.com xlate_setup(uint64_t dvma, xlate_t *xlate, int nlevels)
189011600SVikram.Hegde@Sun.COM {
189111600SVikram.Hegde@Sun.COM int level;
189211600SVikram.Hegde@Sun.COM uint64_t offbits;
189311600SVikram.Hegde@Sun.COM
189411600SVikram.Hegde@Sun.COM /*
189511600SVikram.Hegde@Sun.COM * Skip the first 12 bits which is the offset into
189611600SVikram.Hegde@Sun.COM * 4K PFN (phys page frame based on IMMU_PAGESIZE)
189711600SVikram.Hegde@Sun.COM */
189811600SVikram.Hegde@Sun.COM offbits = dvma >> IMMU_PAGESHIFT;
189911600SVikram.Hegde@Sun.COM
190011600SVikram.Hegde@Sun.COM /* skip to level 1 i.e. leaf PTE */
190111600SVikram.Hegde@Sun.COM for (level = 1, xlate++; level <= nlevels; level++, xlate++) {
190211600SVikram.Hegde@Sun.COM xlate->xlt_level = level;
190311600SVikram.Hegde@Sun.COM xlate->xlt_idx = (offbits & IMMU_PGTABLE_LEVEL_MASK);
190411600SVikram.Hegde@Sun.COM ASSERT(xlate->xlt_idx <= IMMU_PGTABLE_MAXIDX);
190511600SVikram.Hegde@Sun.COM xlate->xlt_pgtable = NULL;
190611600SVikram.Hegde@Sun.COM offbits >>= IMMU_PGTABLE_LEVEL_STRIDE;
190711600SVikram.Hegde@Sun.COM }
190811600SVikram.Hegde@Sun.COM }
190911600SVikram.Hegde@Sun.COM
191011600SVikram.Hegde@Sun.COM /*
191111600SVikram.Hegde@Sun.COM * Read the pgtables
191211600SVikram.Hegde@Sun.COM */
1913*13050Sfrank.van.der.linden@oracle.com static boolean_t
PDE_lookup(domain_t * domain,xlate_t * xlate,int nlevels)1914*13050Sfrank.van.der.linden@oracle.com PDE_lookup(domain_t *domain, xlate_t *xlate, int nlevels)
191511600SVikram.Hegde@Sun.COM {
191611600SVikram.Hegde@Sun.COM pgtable_t *pgtable;
191711600SVikram.Hegde@Sun.COM pgtable_t *next;
191811600SVikram.Hegde@Sun.COM uint_t idx;
191911600SVikram.Hegde@Sun.COM
192011600SVikram.Hegde@Sun.COM /* start with highest level pgtable i.e. root */
192111600SVikram.Hegde@Sun.COM xlate += nlevels;
192211600SVikram.Hegde@Sun.COM
192311600SVikram.Hegde@Sun.COM if (xlate->xlt_pgtable == NULL) {
192411600SVikram.Hegde@Sun.COM xlate->xlt_pgtable = domain->dom_pgtable_root;
192511600SVikram.Hegde@Sun.COM }
192611600SVikram.Hegde@Sun.COM
192711600SVikram.Hegde@Sun.COM for (; xlate->xlt_level > 1; xlate--) {
192811600SVikram.Hegde@Sun.COM idx = xlate->xlt_idx;
192911600SVikram.Hegde@Sun.COM pgtable = xlate->xlt_pgtable;
193011600SVikram.Hegde@Sun.COM
193111600SVikram.Hegde@Sun.COM if ((xlate - 1)->xlt_pgtable) {
193211600SVikram.Hegde@Sun.COM continue;
193311600SVikram.Hegde@Sun.COM }
193411600SVikram.Hegde@Sun.COM
193511600SVikram.Hegde@Sun.COM /* Lock the pgtable in read mode */
193611600SVikram.Hegde@Sun.COM rw_enter(&(pgtable->swpg_rwlock), RW_READER);
193711600SVikram.Hegde@Sun.COM
193811600SVikram.Hegde@Sun.COM /*
193911600SVikram.Hegde@Sun.COM * since we are unmapping, the pgtable should
194011600SVikram.Hegde@Sun.COM * already point to a leafier pgtable.
194111600SVikram.Hegde@Sun.COM */
194211600SVikram.Hegde@Sun.COM next = *(pgtable->swpg_next_array + idx);
194311600SVikram.Hegde@Sun.COM (xlate - 1)->xlt_pgtable = next;
194411600SVikram.Hegde@Sun.COM rw_exit(&(pgtable->swpg_rwlock));
1945*13050Sfrank.van.der.linden@oracle.com if (next == NULL)
1946*13050Sfrank.van.der.linden@oracle.com return (B_FALSE);
194711600SVikram.Hegde@Sun.COM }
1948*13050Sfrank.van.der.linden@oracle.com
1949*13050Sfrank.van.der.linden@oracle.com return (B_TRUE);
1950*13050Sfrank.van.der.linden@oracle.com }
1951*13050Sfrank.van.der.linden@oracle.com
1952*13050Sfrank.van.der.linden@oracle.com static void
immu_fault_walk(void * arg,void * base,size_t len)1953*13050Sfrank.van.der.linden@oracle.com immu_fault_walk(void *arg, void *base, size_t len)
1954*13050Sfrank.van.der.linden@oracle.com {
1955*13050Sfrank.van.der.linden@oracle.com uint64_t dvma, start;
1956*13050Sfrank.van.der.linden@oracle.com
1957*13050Sfrank.van.der.linden@oracle.com dvma = *(uint64_t *)arg;
1958*13050Sfrank.van.der.linden@oracle.com start = (uint64_t)(uintptr_t)base;
1959*13050Sfrank.van.der.linden@oracle.com
1960*13050Sfrank.van.der.linden@oracle.com if (dvma >= start && dvma < (start + len)) {
1961*13050Sfrank.van.der.linden@oracle.com ddi_err(DER_WARN, NULL,
1962*13050Sfrank.van.der.linden@oracle.com "faulting DVMA address is in vmem arena "
1963*13050Sfrank.van.der.linden@oracle.com "(%" PRIx64 "-%" PRIx64 ")",
1964*13050Sfrank.van.der.linden@oracle.com start, start + len);
1965*13050Sfrank.van.der.linden@oracle.com *(uint64_t *)arg = ~0ULL;
1966*13050Sfrank.van.der.linden@oracle.com }
1967*13050Sfrank.van.der.linden@oracle.com }
1968*13050Sfrank.van.der.linden@oracle.com
1969*13050Sfrank.van.der.linden@oracle.com void
immu_print_fault_info(uint_t sid,uint64_t dvma)1970*13050Sfrank.van.der.linden@oracle.com immu_print_fault_info(uint_t sid, uint64_t dvma)
1971*13050Sfrank.van.der.linden@oracle.com {
1972*13050Sfrank.van.der.linden@oracle.com int nlevels;
1973*13050Sfrank.van.der.linden@oracle.com xlate_t xlate[IMMU_PGTABLE_MAX_LEVELS + 1] = {0};
1974*13050Sfrank.van.der.linden@oracle.com xlate_t *xlatep;
1975*13050Sfrank.van.der.linden@oracle.com hw_pdte_t pte;
1976*13050Sfrank.van.der.linden@oracle.com domain_t *domain;
1977*13050Sfrank.van.der.linden@oracle.com immu_t *immu;
1978*13050Sfrank.van.der.linden@oracle.com uint64_t dvma_arg;
1979*13050Sfrank.van.der.linden@oracle.com
1980*13050Sfrank.van.der.linden@oracle.com if (mod_hash_find(bdf_domain_hash,
1981*13050Sfrank.van.der.linden@oracle.com (void *)(uintptr_t)sid, (void *)&domain) != 0) {
1982*13050Sfrank.van.der.linden@oracle.com ddi_err(DER_WARN, NULL,
1983*13050Sfrank.van.der.linden@oracle.com "no domain for faulting SID %08x", sid);
1984*13050Sfrank.van.der.linden@oracle.com return;
1985*13050Sfrank.van.der.linden@oracle.com }
1986*13050Sfrank.van.der.linden@oracle.com
1987*13050Sfrank.van.der.linden@oracle.com immu = domain->dom_immu;
1988*13050Sfrank.van.der.linden@oracle.com
1989*13050Sfrank.van.der.linden@oracle.com dvma_arg = dvma;
1990*13050Sfrank.van.der.linden@oracle.com vmem_walk(domain->dom_dvma_arena, VMEM_ALLOC, immu_fault_walk,
1991*13050Sfrank.van.der.linden@oracle.com (void *)&dvma_arg);
1992*13050Sfrank.van.der.linden@oracle.com if (dvma_arg != ~0ULL)
1993*13050Sfrank.van.der.linden@oracle.com ddi_err(DER_WARN, domain->dom_dip,
1994*13050Sfrank.van.der.linden@oracle.com "faulting DVMA address is not in vmem arena");
1995*13050Sfrank.van.der.linden@oracle.com
1996*13050Sfrank.van.der.linden@oracle.com nlevels = immu->immu_dvma_nlevels;
1997*13050Sfrank.van.der.linden@oracle.com xlate_setup(dvma, xlate, nlevels);
1998*13050Sfrank.van.der.linden@oracle.com
1999*13050Sfrank.van.der.linden@oracle.com if (!PDE_lookup(domain, xlate, nlevels)) {
2000*13050Sfrank.van.der.linden@oracle.com ddi_err(DER_WARN, domain->dom_dip,
2001*13050Sfrank.van.der.linden@oracle.com "pte not found in domid %d for faulting addr %" PRIx64,
2002*13050Sfrank.van.der.linden@oracle.com domain->dom_did, dvma);
2003*13050Sfrank.van.der.linden@oracle.com return;
2004*13050Sfrank.van.der.linden@oracle.com }
2005*13050Sfrank.van.der.linden@oracle.com
2006*13050Sfrank.van.der.linden@oracle.com xlatep = &xlate[1];
2007*13050Sfrank.van.der.linden@oracle.com pte = *((hw_pdte_t *)
2008*13050Sfrank.van.der.linden@oracle.com (xlatep->xlt_pgtable->hwpg_vaddr) + xlatep->xlt_idx);
2009*13050Sfrank.van.der.linden@oracle.com
2010*13050Sfrank.van.der.linden@oracle.com ddi_err(DER_WARN, domain->dom_dip,
2011*13050Sfrank.van.der.linden@oracle.com "domid %d pte: %" PRIx64 "(paddr %" PRIx64 ")", domain->dom_did,
2012*13050Sfrank.van.der.linden@oracle.com (unsigned long long)pte, (unsigned long long)PDTE_PADDR(pte));
201311600SVikram.Hegde@Sun.COM }
201411600SVikram.Hegde@Sun.COM
201511658SVikram.Hegde@Sun.COM /*ARGSUSED*/
201611600SVikram.Hegde@Sun.COM static void
PTE_set_one(immu_t * immu,hw_pdte_t * hwp,paddr_t paddr,dev_info_t * rdip,immu_flags_t immu_flags)201711600SVikram.Hegde@Sun.COM PTE_set_one(immu_t *immu, hw_pdte_t *hwp, paddr_t paddr,
201811600SVikram.Hegde@Sun.COM dev_info_t *rdip, immu_flags_t immu_flags)
201911600SVikram.Hegde@Sun.COM {
202011600SVikram.Hegde@Sun.COM hw_pdte_t pte;
202111600SVikram.Hegde@Sun.COM
2022*13050Sfrank.van.der.linden@oracle.com #ifndef DEBUG
2023*13050Sfrank.van.der.linden@oracle.com pte = immu->immu_ptemask;
2024*13050Sfrank.van.der.linden@oracle.com PDTE_SET_PADDR(pte, paddr);
2025*13050Sfrank.van.der.linden@oracle.com #else
202611600SVikram.Hegde@Sun.COM pte = *hwp;
202711600SVikram.Hegde@Sun.COM
202811600SVikram.Hegde@Sun.COM if (PDTE_P(pte)) {
202911600SVikram.Hegde@Sun.COM if (PDTE_PADDR(pte) != paddr) {
203011600SVikram.Hegde@Sun.COM ddi_err(DER_MODE, rdip, "PTE paddr %lx != paddr %lx",
203111600SVikram.Hegde@Sun.COM PDTE_PADDR(pte), paddr);
203211600SVikram.Hegde@Sun.COM }
203311658SVikram.Hegde@Sun.COM #ifdef BUGGY_DRIVERS
203411658SVikram.Hegde@Sun.COM return;
203511658SVikram.Hegde@Sun.COM #else
203611600SVikram.Hegde@Sun.COM goto out;
203711658SVikram.Hegde@Sun.COM #endif
203811600SVikram.Hegde@Sun.COM }
203911600SVikram.Hegde@Sun.COM
204011600SVikram.Hegde@Sun.COM /* clear TM field if not reserved */
204111658SVikram.Hegde@Sun.COM if (immu->immu_TM_reserved == B_FALSE) {
204211600SVikram.Hegde@Sun.COM PDTE_CLEAR_TM(pte);
204311600SVikram.Hegde@Sun.COM }
204411600SVikram.Hegde@Sun.COM
204511600SVikram.Hegde@Sun.COM /* Clear 3rd field for system software - not used */
204611600SVikram.Hegde@Sun.COM PDTE_CLEAR_SW3(pte);
204711600SVikram.Hegde@Sun.COM
204811600SVikram.Hegde@Sun.COM /* Set paddr */
204911600SVikram.Hegde@Sun.COM ASSERT(paddr % IMMU_PAGESIZE == 0);
205011600SVikram.Hegde@Sun.COM PDTE_CLEAR_PADDR(pte);
205111600SVikram.Hegde@Sun.COM PDTE_SET_PADDR(pte, paddr);
205211600SVikram.Hegde@Sun.COM
205311600SVikram.Hegde@Sun.COM /* clear SNP field if not reserved. */
205411658SVikram.Hegde@Sun.COM if (immu->immu_SNP_reserved == B_FALSE) {
205511600SVikram.Hegde@Sun.COM PDTE_CLEAR_SNP(pte);
205611600SVikram.Hegde@Sun.COM }
205711600SVikram.Hegde@Sun.COM
205811600SVikram.Hegde@Sun.COM /* Clear SW2 field available for software */
205911600SVikram.Hegde@Sun.COM PDTE_CLEAR_SW2(pte);
2060*13050Sfrank.van.der.linden@oracle.com
2061*13050Sfrank.van.der.linden@oracle.com
206211600SVikram.Hegde@Sun.COM /* SP is don't care for PTEs. Clear it for cleanliness */
206311600SVikram.Hegde@Sun.COM PDTE_CLEAR_SP(pte);
2064*13050Sfrank.van.der.linden@oracle.com
206511600SVikram.Hegde@Sun.COM /* Clear SW1 field available for software */
206611600SVikram.Hegde@Sun.COM PDTE_CLEAR_SW1(pte);
206711600SVikram.Hegde@Sun.COM
206811600SVikram.Hegde@Sun.COM /*
206911600SVikram.Hegde@Sun.COM * Now that we are done writing the PTE
207011600SVikram.Hegde@Sun.COM * set the "present" flag. Note this present
207111600SVikram.Hegde@Sun.COM * flag is a bit in the PDE/PTE that the
207211600SVikram.Hegde@Sun.COM * spec says is available for system software.
207311600SVikram.Hegde@Sun.COM * This is an implementation detail of Solaris
207411600SVikram.Hegde@Sun.COM * bare-metal Intel IOMMU.
207511600SVikram.Hegde@Sun.COM * The present field in a PDE/PTE is not defined
207611600SVikram.Hegde@Sun.COM * by the Vt-d spec
207711600SVikram.Hegde@Sun.COM */
207811600SVikram.Hegde@Sun.COM
207911600SVikram.Hegde@Sun.COM PDTE_SET_P(pte);
208011600SVikram.Hegde@Sun.COM
2081*13050Sfrank.van.der.linden@oracle.com pte |= immu->immu_ptemask;
2082*13050Sfrank.van.der.linden@oracle.com
208311600SVikram.Hegde@Sun.COM out:
2084*13050Sfrank.van.der.linden@oracle.com #endif /* DEBUG */
208511658SVikram.Hegde@Sun.COM #ifdef BUGGY_DRIVERS
208611658SVikram.Hegde@Sun.COM PDTE_SET_READ(pte);
208711658SVikram.Hegde@Sun.COM PDTE_SET_WRITE(pte);
208811658SVikram.Hegde@Sun.COM #else
208911600SVikram.Hegde@Sun.COM if (immu_flags & IMMU_FLAGS_READ)
209011600SVikram.Hegde@Sun.COM PDTE_SET_READ(pte);
209111600SVikram.Hegde@Sun.COM if (immu_flags & IMMU_FLAGS_WRITE)
209211600SVikram.Hegde@Sun.COM PDTE_SET_WRITE(pte);
2093*13050Sfrank.van.der.linden@oracle.com #endif /* BUGGY_DRIVERS */
209411600SVikram.Hegde@Sun.COM
209511600SVikram.Hegde@Sun.COM *hwp = pte;
209611600SVikram.Hegde@Sun.COM }
209711600SVikram.Hegde@Sun.COM
209811600SVikram.Hegde@Sun.COM /*ARGSUSED*/
209911600SVikram.Hegde@Sun.COM static void
PTE_set_all(immu_t * immu,domain_t * domain,xlate_t * xlate,uint64_t * dvma_ptr,uint64_t * nvpages_ptr,immu_dcookie_t * dcookies,int dcount,dev_info_t * rdip,immu_flags_t immu_flags)210011600SVikram.Hegde@Sun.COM PTE_set_all(immu_t *immu, domain_t *domain, xlate_t *xlate,
2101*13050Sfrank.van.der.linden@oracle.com uint64_t *dvma_ptr, uint64_t *nvpages_ptr, immu_dcookie_t *dcookies,
210211658SVikram.Hegde@Sun.COM int dcount, dev_info_t *rdip, immu_flags_t immu_flags)
210311600SVikram.Hegde@Sun.COM {
210411600SVikram.Hegde@Sun.COM paddr_t paddr;
210511658SVikram.Hegde@Sun.COM uint64_t nvpages;
210611658SVikram.Hegde@Sun.COM uint64_t nppages;
210711600SVikram.Hegde@Sun.COM uint64_t dvma;
210811600SVikram.Hegde@Sun.COM pgtable_t *pgtable;
210911600SVikram.Hegde@Sun.COM hw_pdte_t *hwp;
211011600SVikram.Hegde@Sun.COM hw_pdte_t *shwp;
2111*13050Sfrank.van.der.linden@oracle.com int idx, nset;
211211658SVikram.Hegde@Sun.COM int j;
211311600SVikram.Hegde@Sun.COM
211411600SVikram.Hegde@Sun.COM pgtable = xlate->xlt_pgtable;
211511600SVikram.Hegde@Sun.COM idx = xlate->xlt_idx;
211611600SVikram.Hegde@Sun.COM
211711600SVikram.Hegde@Sun.COM dvma = *dvma_ptr;
211811658SVikram.Hegde@Sun.COM nvpages = *nvpages_ptr;
211911658SVikram.Hegde@Sun.COM
212011600SVikram.Hegde@Sun.COM /*
212111658SVikram.Hegde@Sun.COM * since a caller gets a unique dvma for a physical address,
212211658SVikram.Hegde@Sun.COM * no other concurrent thread will be writing to the same
212311658SVikram.Hegde@Sun.COM * PTE even if it has the same paddr. So no locks needed.
212411600SVikram.Hegde@Sun.COM */
212511600SVikram.Hegde@Sun.COM shwp = (hw_pdte_t *)(pgtable->hwpg_vaddr) + idx;
212611600SVikram.Hegde@Sun.COM
212711600SVikram.Hegde@Sun.COM hwp = shwp;
212811658SVikram.Hegde@Sun.COM for (j = dcount - 1; j >= 0; j--) {
212911658SVikram.Hegde@Sun.COM if (nvpages <= dcookies[j].dck_npages)
213011658SVikram.Hegde@Sun.COM break;
213111658SVikram.Hegde@Sun.COM nvpages -= dcookies[j].dck_npages;
213211658SVikram.Hegde@Sun.COM }
213311658SVikram.Hegde@Sun.COM
213411658SVikram.Hegde@Sun.COM nppages = nvpages;
213511658SVikram.Hegde@Sun.COM paddr = dcookies[j].dck_paddr +
213611658SVikram.Hegde@Sun.COM (dcookies[j].dck_npages - nppages) * IMMU_PAGESIZE;
213711658SVikram.Hegde@Sun.COM
213811658SVikram.Hegde@Sun.COM nvpages = *nvpages_ptr;
2139*13050Sfrank.van.der.linden@oracle.com nset = 0;
214011658SVikram.Hegde@Sun.COM for (; nvpages > 0 && idx <= IMMU_PGTABLE_MAXIDX; idx++, hwp++) {
214111600SVikram.Hegde@Sun.COM PTE_set_one(immu, hwp, paddr, rdip, immu_flags);
2142*13050Sfrank.van.der.linden@oracle.com nset++;
214311600SVikram.Hegde@Sun.COM
214411600SVikram.Hegde@Sun.COM ASSERT(PDTE_check(immu, *hwp, NULL, paddr, rdip, immu_flags)
214511600SVikram.Hegde@Sun.COM == B_TRUE);
214611658SVikram.Hegde@Sun.COM nppages--;
214711658SVikram.Hegde@Sun.COM nvpages--;
214811600SVikram.Hegde@Sun.COM paddr += IMMU_PAGESIZE;
214911600SVikram.Hegde@Sun.COM dvma += IMMU_PAGESIZE;
215011658SVikram.Hegde@Sun.COM
215111658SVikram.Hegde@Sun.COM if (nppages == 0) {
215211658SVikram.Hegde@Sun.COM j++;
215311658SVikram.Hegde@Sun.COM }
215411658SVikram.Hegde@Sun.COM
2155*13050Sfrank.van.der.linden@oracle.com if (j == dcount)
215611658SVikram.Hegde@Sun.COM break;
2157*13050Sfrank.van.der.linden@oracle.com
215811658SVikram.Hegde@Sun.COM if (nppages == 0) {
215911658SVikram.Hegde@Sun.COM nppages = dcookies[j].dck_npages;
216011658SVikram.Hegde@Sun.COM paddr = dcookies[j].dck_paddr;
216111658SVikram.Hegde@Sun.COM }
216211600SVikram.Hegde@Sun.COM }
216311600SVikram.Hegde@Sun.COM
216411658SVikram.Hegde@Sun.COM if (nvpages) {
216511658SVikram.Hegde@Sun.COM *dvma_ptr = dvma;
216611658SVikram.Hegde@Sun.COM *nvpages_ptr = nvpages;
216711658SVikram.Hegde@Sun.COM } else {
216811658SVikram.Hegde@Sun.COM *dvma_ptr = 0;
216911658SVikram.Hegde@Sun.COM *nvpages_ptr = 0;
217011658SVikram.Hegde@Sun.COM }
217111658SVikram.Hegde@Sun.COM
217211600SVikram.Hegde@Sun.COM xlate->xlt_idx = idx;
217311600SVikram.Hegde@Sun.COM }
217411600SVikram.Hegde@Sun.COM
217511600SVikram.Hegde@Sun.COM /*ARGSUSED*/
217611600SVikram.Hegde@Sun.COM static void
PDE_set_one(immu_t * immu,hw_pdte_t * hwp,pgtable_t * next,dev_info_t * rdip,immu_flags_t immu_flags)217711600SVikram.Hegde@Sun.COM PDE_set_one(immu_t *immu, hw_pdte_t *hwp, pgtable_t *next,
217811600SVikram.Hegde@Sun.COM dev_info_t *rdip, immu_flags_t immu_flags)
217911600SVikram.Hegde@Sun.COM {
218011600SVikram.Hegde@Sun.COM hw_pdte_t pde;
218111600SVikram.Hegde@Sun.COM
218211600SVikram.Hegde@Sun.COM pde = *hwp;
218311600SVikram.Hegde@Sun.COM
218411600SVikram.Hegde@Sun.COM /* if PDE is already set, make sure it is correct */
218511600SVikram.Hegde@Sun.COM if (PDTE_P(pde)) {
218611600SVikram.Hegde@Sun.COM ASSERT(PDTE_PADDR(pde) == next->hwpg_paddr);
218711658SVikram.Hegde@Sun.COM #ifdef BUGGY_DRIVERS
218811658SVikram.Hegde@Sun.COM return;
218911658SVikram.Hegde@Sun.COM #else
219011600SVikram.Hegde@Sun.COM goto out;
219111658SVikram.Hegde@Sun.COM #endif
219211600SVikram.Hegde@Sun.COM }
219311600SVikram.Hegde@Sun.COM
219411600SVikram.Hegde@Sun.COM /* Dont touch SW4, it is the present bit */
219511600SVikram.Hegde@Sun.COM
219611600SVikram.Hegde@Sun.COM /* don't touch TM field it is reserved for PDEs */
219711600SVikram.Hegde@Sun.COM
219811600SVikram.Hegde@Sun.COM /* 3rd field available for system software is not used */
219911600SVikram.Hegde@Sun.COM PDTE_CLEAR_SW3(pde);
220011600SVikram.Hegde@Sun.COM
220111600SVikram.Hegde@Sun.COM /* Set next level pgtable-paddr for PDE */
220211600SVikram.Hegde@Sun.COM PDTE_CLEAR_PADDR(pde);
220311600SVikram.Hegde@Sun.COM PDTE_SET_PADDR(pde, next->hwpg_paddr);
220411600SVikram.Hegde@Sun.COM
220511600SVikram.Hegde@Sun.COM /* don't touch SNP field it is reserved for PDEs */
220611600SVikram.Hegde@Sun.COM
220711600SVikram.Hegde@Sun.COM /* Clear second field available for system software */
220811600SVikram.Hegde@Sun.COM PDTE_CLEAR_SW2(pde);
220911600SVikram.Hegde@Sun.COM
221011600SVikram.Hegde@Sun.COM /* No super pages for PDEs */
221111600SVikram.Hegde@Sun.COM PDTE_CLEAR_SP(pde);
221211600SVikram.Hegde@Sun.COM
221311600SVikram.Hegde@Sun.COM /* Clear SW1 for software */
221411600SVikram.Hegde@Sun.COM PDTE_CLEAR_SW1(pde);
221511600SVikram.Hegde@Sun.COM
221611600SVikram.Hegde@Sun.COM /*
221711600SVikram.Hegde@Sun.COM * Now that we are done writing the PDE
221811600SVikram.Hegde@Sun.COM * set the "present" flag. Note this present
221911600SVikram.Hegde@Sun.COM * flag is a bit in the PDE/PTE that the
222011600SVikram.Hegde@Sun.COM * spec says is available for system software.
222111600SVikram.Hegde@Sun.COM * This is an implementation detail of Solaris
222211600SVikram.Hegde@Sun.COM * base-metal Intel IOMMU.
222311600SVikram.Hegde@Sun.COM * The present field in a PDE/PTE is not defined
222411600SVikram.Hegde@Sun.COM * by the Vt-d spec
222511600SVikram.Hegde@Sun.COM */
222611658SVikram.Hegde@Sun.COM
222711600SVikram.Hegde@Sun.COM out:
222811658SVikram.Hegde@Sun.COM #ifdef BUGGY_DRIVERS
222911658SVikram.Hegde@Sun.COM PDTE_SET_READ(pde);
223011658SVikram.Hegde@Sun.COM PDTE_SET_WRITE(pde);
223111658SVikram.Hegde@Sun.COM #else
223211600SVikram.Hegde@Sun.COM if (immu_flags & IMMU_FLAGS_READ)
223311600SVikram.Hegde@Sun.COM PDTE_SET_READ(pde);
223411600SVikram.Hegde@Sun.COM if (immu_flags & IMMU_FLAGS_WRITE)
223511600SVikram.Hegde@Sun.COM PDTE_SET_WRITE(pde);
223611600SVikram.Hegde@Sun.COM #endif
223711600SVikram.Hegde@Sun.COM
223811600SVikram.Hegde@Sun.COM PDTE_SET_P(pde);
223911600SVikram.Hegde@Sun.COM
224011600SVikram.Hegde@Sun.COM *hwp = pde;
224111600SVikram.Hegde@Sun.COM }
224211600SVikram.Hegde@Sun.COM
224311600SVikram.Hegde@Sun.COM /*
224411600SVikram.Hegde@Sun.COM * Used to set PDEs
224511600SVikram.Hegde@Sun.COM */
224611658SVikram.Hegde@Sun.COM static boolean_t
PDE_set_all(immu_t * immu,domain_t * domain,xlate_t * xlate,int nlevels,dev_info_t * rdip,immu_flags_t immu_flags)224711600SVikram.Hegde@Sun.COM PDE_set_all(immu_t *immu, domain_t *domain, xlate_t *xlate, int nlevels,
224811600SVikram.Hegde@Sun.COM dev_info_t *rdip, immu_flags_t immu_flags)
224911600SVikram.Hegde@Sun.COM {
225011600SVikram.Hegde@Sun.COM pgtable_t *pgtable;
225111600SVikram.Hegde@Sun.COM pgtable_t *new;
225211600SVikram.Hegde@Sun.COM pgtable_t *next;
225311600SVikram.Hegde@Sun.COM hw_pdte_t *hwp;
225411600SVikram.Hegde@Sun.COM int level;
225511600SVikram.Hegde@Sun.COM uint_t idx;
225611658SVikram.Hegde@Sun.COM krw_t rwtype;
225711658SVikram.Hegde@Sun.COM boolean_t set = B_FALSE;
225811600SVikram.Hegde@Sun.COM
225911600SVikram.Hegde@Sun.COM /* start with highest level pgtable i.e. root */
226011600SVikram.Hegde@Sun.COM xlate += nlevels;
226111600SVikram.Hegde@Sun.COM
226211600SVikram.Hegde@Sun.COM new = NULL;
226311600SVikram.Hegde@Sun.COM xlate->xlt_pgtable = domain->dom_pgtable_root;
226411600SVikram.Hegde@Sun.COM for (level = nlevels; level > 1; level--, xlate--) {
226511600SVikram.Hegde@Sun.COM idx = xlate->xlt_idx;
226611600SVikram.Hegde@Sun.COM pgtable = xlate->xlt_pgtable;
226711600SVikram.Hegde@Sun.COM
226811658SVikram.Hegde@Sun.COM /* Lock the pgtable in READ mode first */
226911658SVikram.Hegde@Sun.COM rw_enter(&(pgtable->swpg_rwlock), RW_READER);
227011658SVikram.Hegde@Sun.COM rwtype = RW_READER;
227111658SVikram.Hegde@Sun.COM again:
227211600SVikram.Hegde@Sun.COM hwp = (hw_pdte_t *)(pgtable->hwpg_vaddr) + idx;
227311600SVikram.Hegde@Sun.COM next = (pgtable->swpg_next_array)[idx];
227411600SVikram.Hegde@Sun.COM
227511600SVikram.Hegde@Sun.COM /*
227611600SVikram.Hegde@Sun.COM * check if leafier level already has a pgtable
227711600SVikram.Hegde@Sun.COM * if yes, verify
227811600SVikram.Hegde@Sun.COM */
227911600SVikram.Hegde@Sun.COM if (next == NULL) {
2280*13050Sfrank.van.der.linden@oracle.com if (new == NULL) {
2281*13050Sfrank.van.der.linden@oracle.com
2282*13050Sfrank.van.der.linden@oracle.com IMMU_DPROBE2(immu__pdp__alloc, dev_info_t *,
2283*13050Sfrank.van.der.linden@oracle.com rdip, int, level);
2284*13050Sfrank.van.der.linden@oracle.com
2285*13050Sfrank.van.der.linden@oracle.com new = pgtable_alloc(immu, immu_flags);
2286*13050Sfrank.van.der.linden@oracle.com if (new == NULL) {
2287*13050Sfrank.van.der.linden@oracle.com ddi_err(DER_PANIC, rdip,
2288*13050Sfrank.van.der.linden@oracle.com "pgtable alloc err");
2289*13050Sfrank.van.der.linden@oracle.com }
2290*13050Sfrank.van.der.linden@oracle.com pgtable_zero(new);
2291*13050Sfrank.van.der.linden@oracle.com }
2292*13050Sfrank.van.der.linden@oracle.com
229311658SVikram.Hegde@Sun.COM /* Change to a write lock */
229411658SVikram.Hegde@Sun.COM if (rwtype == RW_READER &&
229511658SVikram.Hegde@Sun.COM rw_tryupgrade(&(pgtable->swpg_rwlock)) == 0) {
229611658SVikram.Hegde@Sun.COM rw_exit(&(pgtable->swpg_rwlock));
229711658SVikram.Hegde@Sun.COM rw_enter(&(pgtable->swpg_rwlock), RW_WRITER);
229811658SVikram.Hegde@Sun.COM rwtype = RW_WRITER;
229911658SVikram.Hegde@Sun.COM goto again;
230011658SVikram.Hegde@Sun.COM }
230111658SVikram.Hegde@Sun.COM rwtype = RW_WRITER;
230211600SVikram.Hegde@Sun.COM next = new;
2303*13050Sfrank.van.der.linden@oracle.com (pgtable->swpg_next_array)[idx] = next;
230411600SVikram.Hegde@Sun.COM new = NULL;
230511600SVikram.Hegde@Sun.COM PDE_set_one(immu, hwp, next, rdip, immu_flags);
230611658SVikram.Hegde@Sun.COM set = B_TRUE;
230711658SVikram.Hegde@Sun.COM rw_downgrade(&(pgtable->swpg_rwlock));
230811658SVikram.Hegde@Sun.COM rwtype = RW_READER;
2309*13050Sfrank.van.der.linden@oracle.com }
2310*13050Sfrank.van.der.linden@oracle.com #ifndef BUGGY_DRIVERS
2311*13050Sfrank.van.der.linden@oracle.com else {
231211600SVikram.Hegde@Sun.COM hw_pdte_t pde = *hwp;
231311600SVikram.Hegde@Sun.COM
231411658SVikram.Hegde@Sun.COM /*
231511658SVikram.Hegde@Sun.COM * If buggy driver we already set permission
231611658SVikram.Hegde@Sun.COM * READ+WRITE so nothing to do for that case
231711658SVikram.Hegde@Sun.COM * XXX Check that read writer perms change before
231811658SVikram.Hegde@Sun.COM * actually setting perms. Also need to hold lock
231911658SVikram.Hegde@Sun.COM */
232011600SVikram.Hegde@Sun.COM if (immu_flags & IMMU_FLAGS_READ)
232111600SVikram.Hegde@Sun.COM PDTE_SET_READ(pde);
232211600SVikram.Hegde@Sun.COM if (immu_flags & IMMU_FLAGS_WRITE)
232311600SVikram.Hegde@Sun.COM PDTE_SET_WRITE(pde);
232411600SVikram.Hegde@Sun.COM
232511600SVikram.Hegde@Sun.COM *hwp = pde;
232611600SVikram.Hegde@Sun.COM }
2327*13050Sfrank.van.der.linden@oracle.com #endif
232811600SVikram.Hegde@Sun.COM
232911600SVikram.Hegde@Sun.COM ASSERT(PDTE_check(immu, *hwp, next, 0, rdip, immu_flags)
233011600SVikram.Hegde@Sun.COM == B_TRUE);
233111600SVikram.Hegde@Sun.COM
233211600SVikram.Hegde@Sun.COM (xlate - 1)->xlt_pgtable = next;
233311600SVikram.Hegde@Sun.COM rw_exit(&(pgtable->swpg_rwlock));
233411600SVikram.Hegde@Sun.COM }
233511600SVikram.Hegde@Sun.COM
233611600SVikram.Hegde@Sun.COM if (new) {
233711658SVikram.Hegde@Sun.COM pgtable_free(immu, new);
233811600SVikram.Hegde@Sun.COM }
233911658SVikram.Hegde@Sun.COM
234011658SVikram.Hegde@Sun.COM return (set);
234111600SVikram.Hegde@Sun.COM }
234211600SVikram.Hegde@Sun.COM
234311600SVikram.Hegde@Sun.COM /*
234411600SVikram.Hegde@Sun.COM * dvma_map()
234511600SVikram.Hegde@Sun.COM * map a contiguous range of DVMA pages
234611600SVikram.Hegde@Sun.COM *
234711600SVikram.Hegde@Sun.COM * immu: IOMMU unit for which we are generating DVMA cookies
234811600SVikram.Hegde@Sun.COM * domain: domain
234911600SVikram.Hegde@Sun.COM * sdvma: Starting dvma
235011600SVikram.Hegde@Sun.COM * spaddr: Starting paddr
235111600SVikram.Hegde@Sun.COM * npages: Number of pages
235211600SVikram.Hegde@Sun.COM * rdip: requesting device
235311600SVikram.Hegde@Sun.COM * immu_flags: flags
235411600SVikram.Hegde@Sun.COM */
235511658SVikram.Hegde@Sun.COM static boolean_t
dvma_map(domain_t * domain,uint64_t sdvma,uint64_t snvpages,immu_dcookie_t * dcookies,int dcount,dev_info_t * rdip,immu_flags_t immu_flags)2356*13050Sfrank.van.der.linden@oracle.com dvma_map(domain_t *domain, uint64_t sdvma, uint64_t snvpages,
2357*13050Sfrank.van.der.linden@oracle.com immu_dcookie_t *dcookies, int dcount, dev_info_t *rdip,
2358*13050Sfrank.van.der.linden@oracle.com immu_flags_t immu_flags)
235911600SVikram.Hegde@Sun.COM {
236011600SVikram.Hegde@Sun.COM uint64_t dvma;
236111600SVikram.Hegde@Sun.COM uint64_t n;
2362*13050Sfrank.van.der.linden@oracle.com immu_t *immu = domain->dom_immu;
236311600SVikram.Hegde@Sun.COM int nlevels = immu->immu_dvma_nlevels;
236411600SVikram.Hegde@Sun.COM xlate_t xlate[IMMU_PGTABLE_MAX_LEVELS + 1] = {0};
236511658SVikram.Hegde@Sun.COM boolean_t pde_set = B_FALSE;
236611600SVikram.Hegde@Sun.COM
236711658SVikram.Hegde@Sun.COM n = snvpages;
236811600SVikram.Hegde@Sun.COM dvma = sdvma;
236911600SVikram.Hegde@Sun.COM
237011600SVikram.Hegde@Sun.COM while (n > 0) {
2371*13050Sfrank.van.der.linden@oracle.com xlate_setup(dvma, xlate, nlevels);
237211600SVikram.Hegde@Sun.COM
237311600SVikram.Hegde@Sun.COM /* Lookup or allocate PGDIRs and PGTABLEs if necessary */
237411658SVikram.Hegde@Sun.COM if (PDE_set_all(immu, domain, xlate, nlevels, rdip, immu_flags)
237511658SVikram.Hegde@Sun.COM == B_TRUE) {
237611658SVikram.Hegde@Sun.COM pde_set = B_TRUE;
237711658SVikram.Hegde@Sun.COM }
237811600SVikram.Hegde@Sun.COM
237911600SVikram.Hegde@Sun.COM /* set all matching ptes that fit into this leaf pgtable */
238011658SVikram.Hegde@Sun.COM PTE_set_all(immu, domain, &xlate[1], &dvma, &n, dcookies,
238111658SVikram.Hegde@Sun.COM dcount, rdip, immu_flags);
238211600SVikram.Hegde@Sun.COM }
238311658SVikram.Hegde@Sun.COM
238411658SVikram.Hegde@Sun.COM return (pde_set);
238511600SVikram.Hegde@Sun.COM }
238611600SVikram.Hegde@Sun.COM
238711600SVikram.Hegde@Sun.COM /*
238811600SVikram.Hegde@Sun.COM * dvma_unmap()
238911600SVikram.Hegde@Sun.COM * unmap a range of DVMAs
239011600SVikram.Hegde@Sun.COM *
239111600SVikram.Hegde@Sun.COM * immu: IOMMU unit state
239211600SVikram.Hegde@Sun.COM * domain: domain for requesting device
239311600SVikram.Hegde@Sun.COM * ddip: domain-dip
239411600SVikram.Hegde@Sun.COM * dvma: starting DVMA
239511600SVikram.Hegde@Sun.COM * npages: Number of IMMU pages to be unmapped
239611600SVikram.Hegde@Sun.COM * rdip: requesting device
239711600SVikram.Hegde@Sun.COM */
239811600SVikram.Hegde@Sun.COM static void
dvma_unmap(domain_t * domain,uint64_t sdvma,uint64_t snpages,dev_info_t * rdip)2399*13050Sfrank.van.der.linden@oracle.com dvma_unmap(domain_t *domain, uint64_t sdvma, uint64_t snpages,
240011600SVikram.Hegde@Sun.COM dev_info_t *rdip)
240111600SVikram.Hegde@Sun.COM {
2402*13050Sfrank.van.der.linden@oracle.com immu_t *immu = domain->dom_immu;
240311600SVikram.Hegde@Sun.COM int nlevels = immu->immu_dvma_nlevels;
240411600SVikram.Hegde@Sun.COM xlate_t xlate[IMMU_PGTABLE_MAX_LEVELS + 1] = {0};
240511658SVikram.Hegde@Sun.COM uint64_t n;
240611658SVikram.Hegde@Sun.COM uint64_t dvma;
240711600SVikram.Hegde@Sun.COM
240811658SVikram.Hegde@Sun.COM dvma = sdvma;
240911658SVikram.Hegde@Sun.COM n = snpages;
241011658SVikram.Hegde@Sun.COM
241111658SVikram.Hegde@Sun.COM while (n > 0) {
241211600SVikram.Hegde@Sun.COM /* setup the xlate array */
2413*13050Sfrank.van.der.linden@oracle.com xlate_setup(dvma, xlate, nlevels);
241411600SVikram.Hegde@Sun.COM
241511600SVikram.Hegde@Sun.COM /* just lookup existing pgtables. Should never fail */
2416*13050Sfrank.van.der.linden@oracle.com if (!PDE_lookup(domain, xlate, nlevels))
2417*13050Sfrank.van.der.linden@oracle.com ddi_err(DER_PANIC, rdip,
2418*13050Sfrank.van.der.linden@oracle.com "PTE not found for addr %" PRIx64,
2419*13050Sfrank.van.der.linden@oracle.com (unsigned long long)dvma);
242011600SVikram.Hegde@Sun.COM
242111658SVikram.Hegde@Sun.COM /* clear all matching ptes that fit into this leaf pgtable */
242211658SVikram.Hegde@Sun.COM PTE_clear_all(immu, domain, &xlate[1], &dvma, &n, rdip);
242311600SVikram.Hegde@Sun.COM }
242411658SVikram.Hegde@Sun.COM
242511658SVikram.Hegde@Sun.COM /* No need to flush IOTLB after unmap */
242611600SVikram.Hegde@Sun.COM }
242711600SVikram.Hegde@Sun.COM
242811600SVikram.Hegde@Sun.COM static uint64_t
dvma_alloc(domain_t * domain,ddi_dma_attr_t * dma_attr,uint_t npages,int kmf)2429*13050Sfrank.van.der.linden@oracle.com dvma_alloc(domain_t *domain, ddi_dma_attr_t *dma_attr, uint_t npages, int kmf)
243011600SVikram.Hegde@Sun.COM {
243111600SVikram.Hegde@Sun.COM uint64_t dvma;
243211658SVikram.Hegde@Sun.COM size_t xsize, align;
243311600SVikram.Hegde@Sun.COM uint64_t minaddr, maxaddr;
243411600SVikram.Hegde@Sun.COM
243511600SVikram.Hegde@Sun.COM /* parameters */
243611600SVikram.Hegde@Sun.COM xsize = npages * IMMU_PAGESIZE;
243711600SVikram.Hegde@Sun.COM align = MAX((size_t)(dma_attr->dma_attr_align), IMMU_PAGESIZE);
243811600SVikram.Hegde@Sun.COM minaddr = dma_attr->dma_attr_addr_lo;
243911600SVikram.Hegde@Sun.COM maxaddr = dma_attr->dma_attr_addr_hi + 1;
244011600SVikram.Hegde@Sun.COM
244111600SVikram.Hegde@Sun.COM /* handle the rollover cases */
244211600SVikram.Hegde@Sun.COM if (maxaddr < dma_attr->dma_attr_addr_hi) {
244311600SVikram.Hegde@Sun.COM maxaddr = dma_attr->dma_attr_addr_hi;
244411600SVikram.Hegde@Sun.COM }
244511600SVikram.Hegde@Sun.COM
244611600SVikram.Hegde@Sun.COM /*
244711600SVikram.Hegde@Sun.COM * allocate from vmem arena.
244811600SVikram.Hegde@Sun.COM */
244911600SVikram.Hegde@Sun.COM dvma = (uint64_t)(uintptr_t)vmem_xalloc(domain->dom_dvma_arena,
245011658SVikram.Hegde@Sun.COM xsize, align, 0, 0, (void *)(uintptr_t)minaddr,
2451*13050Sfrank.van.der.linden@oracle.com (void *)(uintptr_t)maxaddr, kmf);
245211600SVikram.Hegde@Sun.COM
245311600SVikram.Hegde@Sun.COM return (dvma);
245411600SVikram.Hegde@Sun.COM }
245511600SVikram.Hegde@Sun.COM
245611600SVikram.Hegde@Sun.COM static void
dvma_prealloc(dev_info_t * rdip,immu_hdl_priv_t * ihp,ddi_dma_attr_t * dma_attr)2457*13050Sfrank.van.der.linden@oracle.com dvma_prealloc(dev_info_t *rdip, immu_hdl_priv_t *ihp, ddi_dma_attr_t *dma_attr)
2458*13050Sfrank.van.der.linden@oracle.com {
2459*13050Sfrank.van.der.linden@oracle.com int nlevels;
2460*13050Sfrank.van.der.linden@oracle.com xlate_t xlate[IMMU_PGTABLE_MAX_LEVELS + 1] = {0}, *xlp;
2461*13050Sfrank.van.der.linden@oracle.com uint64_t dvma, n;
2462*13050Sfrank.van.der.linden@oracle.com size_t xsize, align;
2463*13050Sfrank.van.der.linden@oracle.com uint64_t minaddr, maxaddr, dmamax;
2464*13050Sfrank.van.der.linden@oracle.com int on, npte, pindex;
2465*13050Sfrank.van.der.linden@oracle.com hw_pdte_t *shwp;
2466*13050Sfrank.van.der.linden@oracle.com immu_t *immu;
2467*13050Sfrank.van.der.linden@oracle.com domain_t *domain;
2468*13050Sfrank.van.der.linden@oracle.com
2469*13050Sfrank.van.der.linden@oracle.com /* parameters */
2470*13050Sfrank.van.der.linden@oracle.com domain = IMMU_DEVI(rdip)->imd_domain;
2471*13050Sfrank.van.der.linden@oracle.com immu = domain->dom_immu;
2472*13050Sfrank.van.der.linden@oracle.com nlevels = immu->immu_dvma_nlevels;
2473*13050Sfrank.van.der.linden@oracle.com xsize = IMMU_NPREPTES * IMMU_PAGESIZE;
2474*13050Sfrank.van.der.linden@oracle.com align = MAX((size_t)(dma_attr->dma_attr_align), IMMU_PAGESIZE);
2475*13050Sfrank.van.der.linden@oracle.com minaddr = dma_attr->dma_attr_addr_lo;
2476*13050Sfrank.van.der.linden@oracle.com if (dma_attr->dma_attr_flags & _DDI_DMA_BOUNCE_ON_SEG)
2477*13050Sfrank.van.der.linden@oracle.com dmamax = dma_attr->dma_attr_seg;
2478*13050Sfrank.van.der.linden@oracle.com else
2479*13050Sfrank.van.der.linden@oracle.com dmamax = dma_attr->dma_attr_addr_hi;
2480*13050Sfrank.van.der.linden@oracle.com maxaddr = dmamax + 1;
2481*13050Sfrank.van.der.linden@oracle.com
2482*13050Sfrank.van.der.linden@oracle.com if (maxaddr < dmamax)
2483*13050Sfrank.van.der.linden@oracle.com maxaddr = dmamax;
2484*13050Sfrank.van.der.linden@oracle.com
2485*13050Sfrank.van.der.linden@oracle.com dvma = (uint64_t)(uintptr_t)vmem_xalloc(domain->dom_dvma_arena,
2486*13050Sfrank.van.der.linden@oracle.com xsize, align, 0, dma_attr->dma_attr_seg + 1,
2487*13050Sfrank.van.der.linden@oracle.com (void *)(uintptr_t)minaddr, (void *)(uintptr_t)maxaddr, VM_NOSLEEP);
2488*13050Sfrank.van.der.linden@oracle.com
2489*13050Sfrank.van.der.linden@oracle.com ihp->ihp_predvma = dvma;
2490*13050Sfrank.van.der.linden@oracle.com ihp->ihp_npremapped = 0;
2491*13050Sfrank.van.der.linden@oracle.com if (dvma == 0)
2492*13050Sfrank.van.der.linden@oracle.com return;
2493*13050Sfrank.van.der.linden@oracle.com
2494*13050Sfrank.van.der.linden@oracle.com n = IMMU_NPREPTES;
2495*13050Sfrank.van.der.linden@oracle.com pindex = 0;
2496*13050Sfrank.van.der.linden@oracle.com
2497*13050Sfrank.van.der.linden@oracle.com /*
2498*13050Sfrank.van.der.linden@oracle.com * Set up a mapping at address 0, just so that all PDPs get allocated
2499*13050Sfrank.van.der.linden@oracle.com * now. Although this initial mapping should never be used,
2500*13050Sfrank.van.der.linden@oracle.com * explicitly set it to read-only, just to be safe.
2501*13050Sfrank.van.der.linden@oracle.com */
2502*13050Sfrank.van.der.linden@oracle.com while (n > 0) {
2503*13050Sfrank.van.der.linden@oracle.com xlate_setup(dvma, xlate, nlevels);
2504*13050Sfrank.van.der.linden@oracle.com
2505*13050Sfrank.van.der.linden@oracle.com (void) PDE_set_all(immu, domain, xlate, nlevels, rdip,
2506*13050Sfrank.van.der.linden@oracle.com IMMU_FLAGS_READ | IMMU_FLAGS_WRITE);
2507*13050Sfrank.van.der.linden@oracle.com
2508*13050Sfrank.van.der.linden@oracle.com xlp = &xlate[1];
2509*13050Sfrank.van.der.linden@oracle.com shwp = (hw_pdte_t *)(xlp->xlt_pgtable->hwpg_vaddr)
2510*13050Sfrank.van.der.linden@oracle.com + xlp->xlt_idx;
2511*13050Sfrank.van.der.linden@oracle.com on = n;
2512*13050Sfrank.van.der.linden@oracle.com
2513*13050Sfrank.van.der.linden@oracle.com PTE_set_all(immu, domain, xlp, &dvma, &n, &immu_precookie,
2514*13050Sfrank.van.der.linden@oracle.com 1, rdip, IMMU_FLAGS_READ);
2515*13050Sfrank.van.der.linden@oracle.com
2516*13050Sfrank.van.der.linden@oracle.com npte = on - n;
2517*13050Sfrank.van.der.linden@oracle.com
2518*13050Sfrank.van.der.linden@oracle.com while (npte > 0) {
2519*13050Sfrank.van.der.linden@oracle.com ihp->ihp_preptes[pindex++] = shwp;
2520*13050Sfrank.van.der.linden@oracle.com #ifdef BUGGY_DRIVERS
2521*13050Sfrank.van.der.linden@oracle.com PDTE_CLEAR_WRITE(*shwp);
2522*13050Sfrank.van.der.linden@oracle.com #endif
2523*13050Sfrank.van.der.linden@oracle.com shwp++;
2524*13050Sfrank.van.der.linden@oracle.com npte--;
2525*13050Sfrank.van.der.linden@oracle.com }
2526*13050Sfrank.van.der.linden@oracle.com }
2527*13050Sfrank.van.der.linden@oracle.com }
2528*13050Sfrank.van.der.linden@oracle.com
2529*13050Sfrank.van.der.linden@oracle.com static void
dvma_prefree(dev_info_t * rdip,immu_hdl_priv_t * ihp)2530*13050Sfrank.van.der.linden@oracle.com dvma_prefree(dev_info_t *rdip, immu_hdl_priv_t *ihp)
2531*13050Sfrank.van.der.linden@oracle.com {
2532*13050Sfrank.van.der.linden@oracle.com domain_t *domain;
2533*13050Sfrank.van.der.linden@oracle.com
2534*13050Sfrank.van.der.linden@oracle.com domain = IMMU_DEVI(rdip)->imd_domain;
2535*13050Sfrank.van.der.linden@oracle.com
2536*13050Sfrank.van.der.linden@oracle.com if (ihp->ihp_predvma != 0) {
2537*13050Sfrank.van.der.linden@oracle.com dvma_unmap(domain, ihp->ihp_predvma, IMMU_NPREPTES, rdip);
2538*13050Sfrank.van.der.linden@oracle.com vmem_free(domain->dom_dvma_arena,
2539*13050Sfrank.van.der.linden@oracle.com (void *)(uintptr_t)ihp->ihp_predvma,
2540*13050Sfrank.van.der.linden@oracle.com IMMU_NPREPTES * IMMU_PAGESIZE);
2541*13050Sfrank.van.der.linden@oracle.com }
2542*13050Sfrank.van.der.linden@oracle.com }
2543*13050Sfrank.van.der.linden@oracle.com
2544*13050Sfrank.van.der.linden@oracle.com static void
dvma_free(domain_t * domain,uint64_t dvma,uint64_t npages)254511600SVikram.Hegde@Sun.COM dvma_free(domain_t *domain, uint64_t dvma, uint64_t npages)
254611600SVikram.Hegde@Sun.COM {
254711600SVikram.Hegde@Sun.COM uint64_t size = npages * IMMU_PAGESIZE;
254811600SVikram.Hegde@Sun.COM
2549*13050Sfrank.van.der.linden@oracle.com if (domain->dom_maptype != IMMU_MAPTYPE_XLATE)
255011600SVikram.Hegde@Sun.COM return;
255111600SVikram.Hegde@Sun.COM
255211600SVikram.Hegde@Sun.COM vmem_free(domain->dom_dvma_arena, (void *)(uintptr_t)dvma, size);
255311600SVikram.Hegde@Sun.COM }
2554*13050Sfrank.van.der.linden@oracle.com
255511600SVikram.Hegde@Sun.COM static int
immu_map_dvmaseg(dev_info_t * rdip,ddi_dma_handle_t handle,immu_hdl_priv_t * ihp,struct ddi_dma_req * dmareq,ddi_dma_obj_t * dma_out)2556*13050Sfrank.van.der.linden@oracle.com immu_map_dvmaseg(dev_info_t *rdip, ddi_dma_handle_t handle,
2557*13050Sfrank.van.der.linden@oracle.com immu_hdl_priv_t *ihp, struct ddi_dma_req *dmareq,
2558*13050Sfrank.van.der.linden@oracle.com ddi_dma_obj_t *dma_out)
255911600SVikram.Hegde@Sun.COM {
2560*13050Sfrank.van.der.linden@oracle.com domain_t *domain;
2561*13050Sfrank.van.der.linden@oracle.com immu_t *immu;
2562*13050Sfrank.van.der.linden@oracle.com immu_flags_t immu_flags;
256311600SVikram.Hegde@Sun.COM ddi_dma_atyp_t buftype;
2564*13050Sfrank.van.der.linden@oracle.com ddi_dma_obj_t *dmar_object;
2565*13050Sfrank.van.der.linden@oracle.com ddi_dma_attr_t *attrp;
2566*13050Sfrank.van.der.linden@oracle.com uint64_t offset, paddr, dvma, sdvma, rwmask;
2567*13050Sfrank.van.der.linden@oracle.com size_t npages, npgalloc;
2568*13050Sfrank.van.der.linden@oracle.com uint_t psize, size, pcnt, dmax;
256911600SVikram.Hegde@Sun.COM page_t **pparray;
257011600SVikram.Hegde@Sun.COM caddr_t vaddr;
257111600SVikram.Hegde@Sun.COM page_t *page;
2572*13050Sfrank.van.der.linden@oracle.com struct as *vas;
2573*13050Sfrank.van.der.linden@oracle.com immu_dcookie_t *dcookies;
2574*13050Sfrank.van.der.linden@oracle.com int pde_set;
2575*13050Sfrank.van.der.linden@oracle.com
2576*13050Sfrank.van.der.linden@oracle.com domain = IMMU_DEVI(rdip)->imd_domain;
2577*13050Sfrank.van.der.linden@oracle.com immu = domain->dom_immu;
2578*13050Sfrank.van.der.linden@oracle.com immu_flags = dma_to_immu_flags(dmareq);
2579*13050Sfrank.van.der.linden@oracle.com
2580*13050Sfrank.van.der.linden@oracle.com attrp = &((ddi_dma_impl_t *)handle)->dmai_attr;
2581*13050Sfrank.van.der.linden@oracle.com
2582*13050Sfrank.van.der.linden@oracle.com dmar_object = &dmareq->dmar_object;
258311600SVikram.Hegde@Sun.COM pparray = dmar_object->dmao_obj.virt_obj.v_priv;
258411600SVikram.Hegde@Sun.COM vaddr = dmar_object->dmao_obj.virt_obj.v_addr;
258511600SVikram.Hegde@Sun.COM buftype = dmar_object->dmao_type;
258611600SVikram.Hegde@Sun.COM size = dmar_object->dmao_size;
2587*13050Sfrank.van.der.linden@oracle.com
2588*13050Sfrank.van.der.linden@oracle.com IMMU_DPROBE3(immu__map__dvma, dev_info_t *, rdip, ddi_dma_atyp_t,
2589*13050Sfrank.van.der.linden@oracle.com buftype, uint_t, size);
2590*13050Sfrank.van.der.linden@oracle.com
2591*13050Sfrank.van.der.linden@oracle.com dcookies = &ihp->ihp_dcookies[0];
2592*13050Sfrank.van.der.linden@oracle.com
2593*13050Sfrank.van.der.linden@oracle.com pcnt = dmax = 0;
259411600SVikram.Hegde@Sun.COM
259511600SVikram.Hegde@Sun.COM /* retrieve paddr, psize, offset from dmareq */
259611600SVikram.Hegde@Sun.COM if (buftype == DMA_OTYP_PAGES) {
259711600SVikram.Hegde@Sun.COM page = dmar_object->dmao_obj.pp_obj.pp_pp;
259811600SVikram.Hegde@Sun.COM offset = dmar_object->dmao_obj.pp_obj.pp_offset &
259911600SVikram.Hegde@Sun.COM MMU_PAGEOFFSET;
260011600SVikram.Hegde@Sun.COM paddr = pfn_to_pa(page->p_pagenum) + offset;
260111600SVikram.Hegde@Sun.COM psize = MIN((MMU_PAGESIZE - offset), size);
260211600SVikram.Hegde@Sun.COM page = page->p_next;
2603*13050Sfrank.van.der.linden@oracle.com vas = dmar_object->dmao_obj.virt_obj.v_as;
260411600SVikram.Hegde@Sun.COM } else {
2605*13050Sfrank.van.der.linden@oracle.com if (vas == NULL) {
2606*13050Sfrank.van.der.linden@oracle.com vas = &kas;
260711600SVikram.Hegde@Sun.COM }
260811600SVikram.Hegde@Sun.COM offset = (uintptr_t)vaddr & MMU_PAGEOFFSET;
260911600SVikram.Hegde@Sun.COM if (pparray != NULL) {
261011600SVikram.Hegde@Sun.COM paddr = pfn_to_pa(pparray[pcnt]->p_pagenum) + offset;
261111600SVikram.Hegde@Sun.COM psize = MIN((MMU_PAGESIZE - offset), size);
261211600SVikram.Hegde@Sun.COM pcnt++;
261311600SVikram.Hegde@Sun.COM } else {
2614*13050Sfrank.van.der.linden@oracle.com paddr = pfn_to_pa(hat_getpfnum(vas->a_hat,
261511600SVikram.Hegde@Sun.COM vaddr)) + offset;
261611600SVikram.Hegde@Sun.COM psize = MIN(size, (MMU_PAGESIZE - offset));
261711600SVikram.Hegde@Sun.COM vaddr += psize;
261811600SVikram.Hegde@Sun.COM }
261911600SVikram.Hegde@Sun.COM }
262011600SVikram.Hegde@Sun.COM
2621*13050Sfrank.van.der.linden@oracle.com npgalloc = IMMU_BTOPR(size + offset);
2622*13050Sfrank.van.der.linden@oracle.com
2623*13050Sfrank.van.der.linden@oracle.com if (npgalloc <= IMMU_NPREPTES && ihp->ihp_predvma != 0) {
2624*13050Sfrank.van.der.linden@oracle.com #ifdef BUGGY_DRIVERS
2625*13050Sfrank.van.der.linden@oracle.com rwmask = PDTE_MASK_R | PDTE_MASK_W | immu->immu_ptemask;
2626*13050Sfrank.van.der.linden@oracle.com #else
2627*13050Sfrank.van.der.linden@oracle.com rwmask = immu->immu_ptemask;
2628*13050Sfrank.van.der.linden@oracle.com if (immu_flags & IMMU_FLAGS_READ)
2629*13050Sfrank.van.der.linden@oracle.com rwmask |= PDTE_MASK_R;
2630*13050Sfrank.van.der.linden@oracle.com if (immu_flags & IMMU_FLAGS_WRITE)
2631*13050Sfrank.van.der.linden@oracle.com rwmask |= PDTE_MASK_W;
2632*13050Sfrank.van.der.linden@oracle.com #endif
2633*13050Sfrank.van.der.linden@oracle.com #ifdef DEBUG
2634*13050Sfrank.van.der.linden@oracle.com rwmask |= PDTE_MASK_P;
2635*13050Sfrank.van.der.linden@oracle.com #endif
2636*13050Sfrank.van.der.linden@oracle.com sdvma = ihp->ihp_predvma;
2637*13050Sfrank.van.der.linden@oracle.com ihp->ihp_npremapped = npgalloc;
2638*13050Sfrank.van.der.linden@oracle.com *ihp->ihp_preptes[0] =
2639*13050Sfrank.van.der.linden@oracle.com PDTE_PADDR(paddr & ~MMU_PAGEOFFSET) | rwmask;
2640*13050Sfrank.van.der.linden@oracle.com } else {
2641*13050Sfrank.van.der.linden@oracle.com ihp->ihp_npremapped = 0;
2642*13050Sfrank.van.der.linden@oracle.com sdvma = dvma_alloc(domain, attrp, npgalloc,
2643*13050Sfrank.van.der.linden@oracle.com dmareq->dmar_fp == DDI_DMA_SLEEP ? VM_SLEEP : VM_NOSLEEP);
2644*13050Sfrank.van.der.linden@oracle.com if (sdvma == 0)
2645*13050Sfrank.van.der.linden@oracle.com return (DDI_DMA_NORESOURCES);
2646*13050Sfrank.van.der.linden@oracle.com
2647*13050Sfrank.van.der.linden@oracle.com dcookies[0].dck_paddr = (paddr & ~MMU_PAGEOFFSET);
2648*13050Sfrank.van.der.linden@oracle.com dcookies[0].dck_npages = 1;
2649*13050Sfrank.van.der.linden@oracle.com }
2650*13050Sfrank.van.der.linden@oracle.com
2651*13050Sfrank.van.der.linden@oracle.com IMMU_DPROBE3(immu__dvma__alloc, dev_info_t *, rdip, uint64_t, npgalloc,
2652*13050Sfrank.van.der.linden@oracle.com uint64_t, sdvma);
2653*13050Sfrank.van.der.linden@oracle.com
2654*13050Sfrank.van.der.linden@oracle.com dvma = sdvma;
2655*13050Sfrank.van.der.linden@oracle.com pde_set = 0;
2656*13050Sfrank.van.der.linden@oracle.com npages = 1;
265711600SVikram.Hegde@Sun.COM size -= psize;
265811600SVikram.Hegde@Sun.COM while (size > 0) {
265911600SVikram.Hegde@Sun.COM /* get the size for this page (i.e. partial or full page) */
266011600SVikram.Hegde@Sun.COM psize = MIN(size, MMU_PAGESIZE);
266111600SVikram.Hegde@Sun.COM if (buftype == DMA_OTYP_PAGES) {
266211600SVikram.Hegde@Sun.COM /* get the paddr from the page_t */
266311600SVikram.Hegde@Sun.COM paddr = pfn_to_pa(page->p_pagenum);
266411600SVikram.Hegde@Sun.COM page = page->p_next;
266511600SVikram.Hegde@Sun.COM } else if (pparray != NULL) {
266611600SVikram.Hegde@Sun.COM /* index into the array of page_t's to get the paddr */
266711600SVikram.Hegde@Sun.COM paddr = pfn_to_pa(pparray[pcnt]->p_pagenum);
266811600SVikram.Hegde@Sun.COM pcnt++;
266911600SVikram.Hegde@Sun.COM } else {
267011600SVikram.Hegde@Sun.COM /* call into the VM to get the paddr */
2671*13050Sfrank.van.der.linden@oracle.com paddr = pfn_to_pa(hat_getpfnum(vas->a_hat, vaddr));
267211600SVikram.Hegde@Sun.COM vaddr += psize;
267311600SVikram.Hegde@Sun.COM }
2674*13050Sfrank.van.der.linden@oracle.com
2675*13050Sfrank.van.der.linden@oracle.com npages++;
2676*13050Sfrank.van.der.linden@oracle.com
2677*13050Sfrank.van.der.linden@oracle.com if (ihp->ihp_npremapped > 0) {
2678*13050Sfrank.van.der.linden@oracle.com *ihp->ihp_preptes[npages - 1] =
2679*13050Sfrank.van.der.linden@oracle.com PDTE_PADDR(paddr) | rwmask;
2680*13050Sfrank.van.der.linden@oracle.com } else if (IMMU_CONTIG_PADDR(dcookies[dmax], paddr)) {
2681*13050Sfrank.van.der.linden@oracle.com dcookies[dmax].dck_npages++;
2682*13050Sfrank.van.der.linden@oracle.com } else {
2683*13050Sfrank.van.der.linden@oracle.com /* No, we need a new dcookie */
2684*13050Sfrank.van.der.linden@oracle.com if (dmax == (IMMU_NDCK - 1)) {
2685*13050Sfrank.van.der.linden@oracle.com /*
2686*13050Sfrank.van.der.linden@oracle.com * Ran out of dcookies. Map them now.
2687*13050Sfrank.van.der.linden@oracle.com */
2688*13050Sfrank.van.der.linden@oracle.com if (dvma_map(domain, dvma,
2689*13050Sfrank.van.der.linden@oracle.com npages, dcookies, dmax + 1, rdip,
2690*13050Sfrank.van.der.linden@oracle.com immu_flags))
2691*13050Sfrank.van.der.linden@oracle.com pde_set++;
2692*13050Sfrank.van.der.linden@oracle.com
2693*13050Sfrank.van.der.linden@oracle.com IMMU_DPROBE4(immu__dvmamap__early,
2694*13050Sfrank.van.der.linden@oracle.com dev_info_t *, rdip, uint64_t, dvma,
2695*13050Sfrank.van.der.linden@oracle.com uint_t, npages, uint_t, dmax+1);
2696*13050Sfrank.van.der.linden@oracle.com
2697*13050Sfrank.van.der.linden@oracle.com dvma += (npages << IMMU_PAGESHIFT);
2698*13050Sfrank.van.der.linden@oracle.com npages = 0;
2699*13050Sfrank.van.der.linden@oracle.com dmax = 0;
2700*13050Sfrank.van.der.linden@oracle.com } else
2701*13050Sfrank.van.der.linden@oracle.com dmax++;
2702*13050Sfrank.van.der.linden@oracle.com dcookies[dmax].dck_paddr = paddr;
2703*13050Sfrank.van.der.linden@oracle.com dcookies[dmax].dck_npages = 1;
2704*13050Sfrank.van.der.linden@oracle.com }
270511600SVikram.Hegde@Sun.COM size -= psize;
270611600SVikram.Hegde@Sun.COM }
270711600SVikram.Hegde@Sun.COM
2708*13050Sfrank.van.der.linden@oracle.com /*
2709*13050Sfrank.van.der.linden@oracle.com * Finish up, mapping all, or all of the remaining,
2710*13050Sfrank.van.der.linden@oracle.com * physical memory ranges.
2711*13050Sfrank.van.der.linden@oracle.com */
2712*13050Sfrank.van.der.linden@oracle.com if (ihp->ihp_npremapped == 0 && npages > 0) {
2713*13050Sfrank.van.der.linden@oracle.com IMMU_DPROBE4(immu__dvmamap__late, dev_info_t *, rdip, \
2714*13050Sfrank.van.der.linden@oracle.com uint64_t, dvma, uint_t, npages, uint_t, dmax+1);
2715*13050Sfrank.van.der.linden@oracle.com
2716*13050Sfrank.van.der.linden@oracle.com if (dvma_map(domain, dvma, npages, dcookies,
2717*13050Sfrank.van.der.linden@oracle.com dmax + 1, rdip, immu_flags))
2718*13050Sfrank.van.der.linden@oracle.com pde_set++;
2719*13050Sfrank.van.der.linden@oracle.com }
2720*13050Sfrank.van.der.linden@oracle.com
2721*13050Sfrank.van.der.linden@oracle.com /* Invalidate the IOTLB */
2722*13050Sfrank.van.der.linden@oracle.com immu_flush_iotlb_psi(immu, domain->dom_did, sdvma, npgalloc,
2723*13050Sfrank.van.der.linden@oracle.com pde_set > 0 ? TLB_IVA_WHOLE : TLB_IVA_LEAF,
2724*13050Sfrank.van.der.linden@oracle.com &ihp->ihp_inv_wait);
2725*13050Sfrank.van.der.linden@oracle.com
2726*13050Sfrank.van.der.linden@oracle.com ihp->ihp_ndvseg = 1;
2727*13050Sfrank.van.der.linden@oracle.com ihp->ihp_dvseg[0].dvs_start = sdvma;
2728*13050Sfrank.van.der.linden@oracle.com ihp->ihp_dvseg[0].dvs_len = dmar_object->dmao_size;
2729*13050Sfrank.van.der.linden@oracle.com
2730*13050Sfrank.van.der.linden@oracle.com dma_out->dmao_size = dmar_object->dmao_size;
2731*13050Sfrank.van.der.linden@oracle.com dma_out->dmao_obj.dvma_obj.dv_off = offset & IMMU_PAGEOFFSET;
2732*13050Sfrank.van.der.linden@oracle.com dma_out->dmao_obj.dvma_obj.dv_nseg = 1;
2733*13050Sfrank.van.der.linden@oracle.com dma_out->dmao_obj.dvma_obj.dv_seg = &ihp->ihp_dvseg[0];
2734*13050Sfrank.van.der.linden@oracle.com dma_out->dmao_type = DMA_OTYP_DVADDR;
2735*13050Sfrank.van.der.linden@oracle.com
2736*13050Sfrank.van.der.linden@oracle.com return (DDI_DMA_MAPPED);
2737*13050Sfrank.van.der.linden@oracle.com }
2738*13050Sfrank.van.der.linden@oracle.com
2739*13050Sfrank.van.der.linden@oracle.com static int
immu_unmap_dvmaseg(dev_info_t * rdip,ddi_dma_obj_t * dmao)2740*13050Sfrank.van.der.linden@oracle.com immu_unmap_dvmaseg(dev_info_t *rdip, ddi_dma_obj_t *dmao)
2741*13050Sfrank.van.der.linden@oracle.com {
2742*13050Sfrank.van.der.linden@oracle.com uint64_t dvma, npages;
2743*13050Sfrank.van.der.linden@oracle.com domain_t *domain;
2744*13050Sfrank.van.der.linden@oracle.com struct dvmaseg *dvs;
2745*13050Sfrank.van.der.linden@oracle.com
2746*13050Sfrank.van.der.linden@oracle.com domain = IMMU_DEVI(rdip)->imd_domain;
2747*13050Sfrank.van.der.linden@oracle.com dvs = dmao->dmao_obj.dvma_obj.dv_seg;
2748*13050Sfrank.van.der.linden@oracle.com
2749*13050Sfrank.van.der.linden@oracle.com dvma = dvs[0].dvs_start;
2750*13050Sfrank.van.der.linden@oracle.com npages = IMMU_BTOPR(dvs[0].dvs_len + dmao->dmao_obj.dvma_obj.dv_off);
2751*13050Sfrank.van.der.linden@oracle.com
2752*13050Sfrank.van.der.linden@oracle.com #ifdef DEBUG
2753*13050Sfrank.van.der.linden@oracle.com /* Unmap only in DEBUG mode */
2754*13050Sfrank.van.der.linden@oracle.com dvma_unmap(domain, dvma, npages, rdip);
2755*13050Sfrank.van.der.linden@oracle.com #endif
2756*13050Sfrank.van.der.linden@oracle.com dvma_free(domain, dvma, npages);
2757*13050Sfrank.van.der.linden@oracle.com
2758*13050Sfrank.van.der.linden@oracle.com IMMU_DPROBE3(immu__dvma__free, dev_info_t *, rdip, uint_t, npages,
2759*13050Sfrank.van.der.linden@oracle.com uint64_t, dvma);
2760*13050Sfrank.van.der.linden@oracle.com
2761*13050Sfrank.van.der.linden@oracle.com #ifdef DEBUG
2762*13050Sfrank.van.der.linden@oracle.com /*
2763*13050Sfrank.van.der.linden@oracle.com * In the DEBUG case, the unmap was actually done,
2764*13050Sfrank.van.der.linden@oracle.com * but an IOTLB flush was not done. So, an explicit
2765*13050Sfrank.van.der.linden@oracle.com * write back flush is needed.
2766*13050Sfrank.van.der.linden@oracle.com */
2767*13050Sfrank.van.der.linden@oracle.com immu_regs_wbf_flush(domain->dom_immu);
2768*13050Sfrank.van.der.linden@oracle.com #endif
276911600SVikram.Hegde@Sun.COM
277011600SVikram.Hegde@Sun.COM return (DDI_SUCCESS);
277111600SVikram.Hegde@Sun.COM }
277211600SVikram.Hegde@Sun.COM
277311600SVikram.Hegde@Sun.COM /* ############################# Functions exported ######################## */
277411600SVikram.Hegde@Sun.COM
277511600SVikram.Hegde@Sun.COM /*
277611600SVikram.Hegde@Sun.COM * setup the DVMA subsystem
277711600SVikram.Hegde@Sun.COM * this code runs only for the first IOMMU unit
277811600SVikram.Hegde@Sun.COM */
277911600SVikram.Hegde@Sun.COM void
immu_dvma_setup(list_t * listp)278011600SVikram.Hegde@Sun.COM immu_dvma_setup(list_t *listp)
278111600SVikram.Hegde@Sun.COM {
278211600SVikram.Hegde@Sun.COM immu_t *immu;
278311600SVikram.Hegde@Sun.COM uint_t kval;
278411600SVikram.Hegde@Sun.COM size_t nchains;
278511600SVikram.Hegde@Sun.COM
278611600SVikram.Hegde@Sun.COM /* locks */
278711600SVikram.Hegde@Sun.COM mutex_init(&immu_domain_lock, NULL, MUTEX_DEFAULT, NULL);
278811600SVikram.Hegde@Sun.COM
278911600SVikram.Hegde@Sun.COM /* Create lists */
279011600SVikram.Hegde@Sun.COM list_create(&immu_unity_domain_list, sizeof (domain_t),
279111600SVikram.Hegde@Sun.COM offsetof(domain_t, dom_maptype_node));
279211600SVikram.Hegde@Sun.COM list_create(&immu_xlate_domain_list, sizeof (domain_t),
279311600SVikram.Hegde@Sun.COM offsetof(domain_t, dom_maptype_node));
279411600SVikram.Hegde@Sun.COM
279511600SVikram.Hegde@Sun.COM /* Setup BDF domain hash */
279611600SVikram.Hegde@Sun.COM nchains = 0xff;
279711600SVikram.Hegde@Sun.COM kval = mod_hash_iddata_gen(nchains);
279811600SVikram.Hegde@Sun.COM
279911600SVikram.Hegde@Sun.COM bdf_domain_hash = mod_hash_create_extended("BDF-DOMAIN_HASH",
280011600SVikram.Hegde@Sun.COM nchains, mod_hash_null_keydtor, mod_hash_null_valdtor,
280111600SVikram.Hegde@Sun.COM mod_hash_byid, (void *)(uintptr_t)kval, mod_hash_idkey_cmp,
280211600SVikram.Hegde@Sun.COM KM_NOSLEEP);
280311600SVikram.Hegde@Sun.COM
280411600SVikram.Hegde@Sun.COM immu = list_head(listp);
280511600SVikram.Hegde@Sun.COM for (; immu; immu = list_next(listp, immu)) {
280611600SVikram.Hegde@Sun.COM create_unity_domain(immu);
280711600SVikram.Hegde@Sun.COM did_init(immu);
280811600SVikram.Hegde@Sun.COM context_init(immu);
280911600SVikram.Hegde@Sun.COM immu->immu_dvma_setup = B_TRUE;
281011600SVikram.Hegde@Sun.COM }
281111600SVikram.Hegde@Sun.COM }
281211600SVikram.Hegde@Sun.COM
281311600SVikram.Hegde@Sun.COM /*
281411600SVikram.Hegde@Sun.COM * Startup up one DVMA unit
281511600SVikram.Hegde@Sun.COM */
281611600SVikram.Hegde@Sun.COM void
immu_dvma_startup(immu_t * immu)281711600SVikram.Hegde@Sun.COM immu_dvma_startup(immu_t *immu)
281811600SVikram.Hegde@Sun.COM {
281911600SVikram.Hegde@Sun.COM if (immu_gfxdvma_enable == B_FALSE &&
282011600SVikram.Hegde@Sun.COM immu->immu_dvma_gfx_only == B_TRUE) {
282111600SVikram.Hegde@Sun.COM return;
282211600SVikram.Hegde@Sun.COM }
282311600SVikram.Hegde@Sun.COM
282411600SVikram.Hegde@Sun.COM /*
282511600SVikram.Hegde@Sun.COM * DVMA will start once IOMMU is "running"
282611600SVikram.Hegde@Sun.COM */
282711600SVikram.Hegde@Sun.COM immu->immu_dvma_running = B_TRUE;
282811600SVikram.Hegde@Sun.COM }
282911600SVikram.Hegde@Sun.COM
283011600SVikram.Hegde@Sun.COM /*
283111600SVikram.Hegde@Sun.COM * immu_dvma_physmem_update()
283211600SVikram.Hegde@Sun.COM * called when the installed memory on a
283311600SVikram.Hegde@Sun.COM * system increases, to expand domain DVMA
283411600SVikram.Hegde@Sun.COM * for domains with UNITY mapping
283511600SVikram.Hegde@Sun.COM */
283611600SVikram.Hegde@Sun.COM void
immu_dvma_physmem_update(uint64_t addr,uint64_t size)283711600SVikram.Hegde@Sun.COM immu_dvma_physmem_update(uint64_t addr, uint64_t size)
283811600SVikram.Hegde@Sun.COM {
283911600SVikram.Hegde@Sun.COM uint64_t start;
284011600SVikram.Hegde@Sun.COM uint64_t npages;
284111658SVikram.Hegde@Sun.COM int dcount;
2842*13050Sfrank.van.der.linden@oracle.com immu_dcookie_t dcookies[1] = {0};
284311600SVikram.Hegde@Sun.COM domain_t *domain;
284411600SVikram.Hegde@Sun.COM
284511600SVikram.Hegde@Sun.COM /*
284611600SVikram.Hegde@Sun.COM * Just walk the system-wide list of domains with
284711600SVikram.Hegde@Sun.COM * UNITY mapping. Both the list of *all* domains
284811600SVikram.Hegde@Sun.COM * and *UNITY* domains is protected by the same
284911600SVikram.Hegde@Sun.COM * single lock
285011600SVikram.Hegde@Sun.COM */
285111600SVikram.Hegde@Sun.COM mutex_enter(&immu_domain_lock);
285211600SVikram.Hegde@Sun.COM domain = list_head(&immu_unity_domain_list);
285311600SVikram.Hegde@Sun.COM for (; domain; domain = list_next(&immu_unity_domain_list, domain)) {
285412990Sfrank.van.der.linden@oracle.com /*
285512990Sfrank.van.der.linden@oracle.com * Nothing to do if the IOMMU supports passthrough.
285612990Sfrank.van.der.linden@oracle.com */
285712990Sfrank.van.der.linden@oracle.com if (IMMU_ECAP_GET_PT(domain->dom_immu->immu_regs_excap))
285812990Sfrank.van.der.linden@oracle.com continue;
285911600SVikram.Hegde@Sun.COM
286011600SVikram.Hegde@Sun.COM /* There is no vmem_arena for unity domains. Just map it */
2861*13050Sfrank.van.der.linden@oracle.com ddi_err(DER_LOG, domain->dom_dip,
2862*13050Sfrank.van.der.linden@oracle.com "iommu: unity-domain: Adding map "
286311600SVikram.Hegde@Sun.COM "[0x%" PRIx64 " - 0x%" PRIx64 "]", addr, addr + size);
286411600SVikram.Hegde@Sun.COM
286511600SVikram.Hegde@Sun.COM start = IMMU_ROUNDOWN(addr);
286611600SVikram.Hegde@Sun.COM npages = (IMMU_ROUNDUP(size) / IMMU_PAGESIZE) + 1;
286711600SVikram.Hegde@Sun.COM
286811658SVikram.Hegde@Sun.COM dcookies[0].dck_paddr = start;
286911658SVikram.Hegde@Sun.COM dcookies[0].dck_npages = npages;
287011658SVikram.Hegde@Sun.COM dcount = 1;
2871*13050Sfrank.van.der.linden@oracle.com (void) dvma_map(domain, start, npages,
287211658SVikram.Hegde@Sun.COM dcookies, dcount, NULL, IMMU_FLAGS_READ | IMMU_FLAGS_WRITE);
287311600SVikram.Hegde@Sun.COM
287411600SVikram.Hegde@Sun.COM }
287511600SVikram.Hegde@Sun.COM mutex_exit(&immu_domain_lock);
287611600SVikram.Hegde@Sun.COM }
287711600SVikram.Hegde@Sun.COM
287811600SVikram.Hegde@Sun.COM int
immu_dvma_device_setup(dev_info_t * rdip,immu_flags_t immu_flags)2879*13050Sfrank.van.der.linden@oracle.com immu_dvma_device_setup(dev_info_t *rdip, immu_flags_t immu_flags)
288011600SVikram.Hegde@Sun.COM {
2881*13050Sfrank.van.der.linden@oracle.com dev_info_t *ddip, *odip;
288211600SVikram.Hegde@Sun.COM immu_t *immu;
2883*13050Sfrank.van.der.linden@oracle.com domain_t *domain;
2884*13050Sfrank.van.der.linden@oracle.com
2885*13050Sfrank.van.der.linden@oracle.com odip = rdip;
288611600SVikram.Hegde@Sun.COM
288711658SVikram.Hegde@Sun.COM immu = immu_dvma_get_immu(rdip, immu_flags);
288811658SVikram.Hegde@Sun.COM if (immu == NULL) {
288911658SVikram.Hegde@Sun.COM /*
289011658SVikram.Hegde@Sun.COM * possible that there is no IOMMU unit for this device
289111658SVikram.Hegde@Sun.COM * - BIOS bugs are one example.
289211658SVikram.Hegde@Sun.COM */
2893*13050Sfrank.van.der.linden@oracle.com ddi_err(DER_WARN, rdip, "No iommu unit found for device");
289411658SVikram.Hegde@Sun.COM return (DDI_DMA_NORESOURCES);
289511658SVikram.Hegde@Sun.COM }
289611658SVikram.Hegde@Sun.COM
289711658SVikram.Hegde@Sun.COM /*
289811658SVikram.Hegde@Sun.COM * redirect isa devices attached under lpc to lpc dip
289911658SVikram.Hegde@Sun.COM */
290011658SVikram.Hegde@Sun.COM if (strcmp(ddi_node_name(ddi_get_parent(rdip)), "isa") == 0) {
290111658SVikram.Hegde@Sun.COM rdip = get_lpc_devinfo(immu, rdip, immu_flags);
290211658SVikram.Hegde@Sun.COM if (rdip == NULL) {
2903*13050Sfrank.van.der.linden@oracle.com ddi_err(DER_PANIC, rdip, "iommu redirect failed");
290411658SVikram.Hegde@Sun.COM /*NOTREACHED*/
290511658SVikram.Hegde@Sun.COM }
290611658SVikram.Hegde@Sun.COM }
290711658SVikram.Hegde@Sun.COM
290811658SVikram.Hegde@Sun.COM /* Reset immu, as redirection can change IMMU */
290911658SVikram.Hegde@Sun.COM immu = NULL;
291011658SVikram.Hegde@Sun.COM
291111658SVikram.Hegde@Sun.COM /*
291211658SVikram.Hegde@Sun.COM * for gart, redirect to the real graphic devinfo
291311658SVikram.Hegde@Sun.COM */
291411658SVikram.Hegde@Sun.COM if (strcmp(ddi_node_name(rdip), "agpgart") == 0) {
291511658SVikram.Hegde@Sun.COM rdip = get_gfx_devinfo(rdip);
291611658SVikram.Hegde@Sun.COM if (rdip == NULL) {
2917*13050Sfrank.van.der.linden@oracle.com ddi_err(DER_PANIC, rdip, "iommu redirect failed");
291811658SVikram.Hegde@Sun.COM /*NOTREACHED*/
291911658SVikram.Hegde@Sun.COM }
292011658SVikram.Hegde@Sun.COM }
292111658SVikram.Hegde@Sun.COM
292211600SVikram.Hegde@Sun.COM /*
292311600SVikram.Hegde@Sun.COM * Setup DVMA domain for the device. This does
292411600SVikram.Hegde@Sun.COM * work only the first time we do DVMA for a
292511600SVikram.Hegde@Sun.COM * device.
292611600SVikram.Hegde@Sun.COM */
292711600SVikram.Hegde@Sun.COM ddip = NULL;
292811600SVikram.Hegde@Sun.COM domain = device_domain(rdip, &ddip, immu_flags);
292911600SVikram.Hegde@Sun.COM if (domain == NULL) {
293011600SVikram.Hegde@Sun.COM ddi_err(DER_MODE, rdip, "Intel IOMMU setup failed for device");
293111600SVikram.Hegde@Sun.COM return (DDI_DMA_NORESOURCES);
293211600SVikram.Hegde@Sun.COM }
293311600SVikram.Hegde@Sun.COM
2934*13050Sfrank.van.der.linden@oracle.com immu = domain->dom_immu;
2935*13050Sfrank.van.der.linden@oracle.com
293611600SVikram.Hegde@Sun.COM /*
293711600SVikram.Hegde@Sun.COM * If a domain is found, we must also have a domain dip
293811600SVikram.Hegde@Sun.COM * which is the topmost ancestor dip of rdip that shares
293911600SVikram.Hegde@Sun.COM * the same domain with rdip.
294011600SVikram.Hegde@Sun.COM */
294111600SVikram.Hegde@Sun.COM if (domain->dom_did == 0 || ddip == NULL) {
294211600SVikram.Hegde@Sun.COM ddi_err(DER_MODE, rdip, "domain did 0(%d) or ddip NULL(%p)",
294311600SVikram.Hegde@Sun.COM domain->dom_did, ddip);
294411600SVikram.Hegde@Sun.COM return (DDI_DMA_NORESOURCES);
294511600SVikram.Hegde@Sun.COM }
294611600SVikram.Hegde@Sun.COM
2947*13050Sfrank.van.der.linden@oracle.com if (odip != rdip)
2948*13050Sfrank.van.der.linden@oracle.com set_domain(odip, ddip, domain);
294911600SVikram.Hegde@Sun.COM
295011600SVikram.Hegde@Sun.COM /*
295111600SVikram.Hegde@Sun.COM * Update the root and context entries
295211600SVikram.Hegde@Sun.COM */
295311600SVikram.Hegde@Sun.COM if (immu_context_update(immu, domain, ddip, rdip, immu_flags)
295411600SVikram.Hegde@Sun.COM != DDI_SUCCESS) {
295511600SVikram.Hegde@Sun.COM ddi_err(DER_MODE, rdip, "DVMA map: context update failed");
295611600SVikram.Hegde@Sun.COM return (DDI_DMA_NORESOURCES);
295711600SVikram.Hegde@Sun.COM }
295811600SVikram.Hegde@Sun.COM
2959*13050Sfrank.van.der.linden@oracle.com return (DDI_SUCCESS);
296011600SVikram.Hegde@Sun.COM }
296111600SVikram.Hegde@Sun.COM
296211600SVikram.Hegde@Sun.COM int
immu_map_memrange(dev_info_t * rdip,memrng_t * mrng)2963*13050Sfrank.van.der.linden@oracle.com immu_map_memrange(dev_info_t *rdip, memrng_t *mrng)
296411600SVikram.Hegde@Sun.COM {
2965*13050Sfrank.van.der.linden@oracle.com immu_dcookie_t dcookies[1] = {0};
2966*13050Sfrank.van.der.linden@oracle.com boolean_t pde_set;
296711600SVikram.Hegde@Sun.COM immu_t *immu;
2968*13050Sfrank.van.der.linden@oracle.com domain_t *domain;
2969*13050Sfrank.van.der.linden@oracle.com immu_inv_wait_t iw;
2970*13050Sfrank.van.der.linden@oracle.com
2971*13050Sfrank.van.der.linden@oracle.com dcookies[0].dck_paddr = mrng->mrng_start;
2972*13050Sfrank.van.der.linden@oracle.com dcookies[0].dck_npages = mrng->mrng_npages;
2973*13050Sfrank.van.der.linden@oracle.com
2974*13050Sfrank.van.der.linden@oracle.com domain = IMMU_DEVI(rdip)->imd_domain;
297511600SVikram.Hegde@Sun.COM immu = domain->dom_immu;
2976*13050Sfrank.van.der.linden@oracle.com
2977*13050Sfrank.van.der.linden@oracle.com pde_set = dvma_map(domain, mrng->mrng_start,
2978*13050Sfrank.van.der.linden@oracle.com mrng->mrng_npages, dcookies, 1, rdip,
2979*13050Sfrank.van.der.linden@oracle.com IMMU_FLAGS_READ | IMMU_FLAGS_WRITE);
2980*13050Sfrank.van.der.linden@oracle.com
2981*13050Sfrank.van.der.linden@oracle.com immu_init_inv_wait(&iw, "memrange", B_TRUE);
2982*13050Sfrank.van.der.linden@oracle.com
2983*13050Sfrank.van.der.linden@oracle.com immu_flush_iotlb_psi(immu, domain->dom_did, mrng->mrng_start,
2984*13050Sfrank.van.der.linden@oracle.com mrng->mrng_npages, pde_set == B_TRUE ?
2985*13050Sfrank.van.der.linden@oracle.com TLB_IVA_WHOLE : TLB_IVA_LEAF, &iw);
298611600SVikram.Hegde@Sun.COM
298711600SVikram.Hegde@Sun.COM return (DDI_SUCCESS);
298811600SVikram.Hegde@Sun.COM }
298911600SVikram.Hegde@Sun.COM
299011600SVikram.Hegde@Sun.COM immu_devi_t *
immu_devi_get(dev_info_t * rdip)299111600SVikram.Hegde@Sun.COM immu_devi_get(dev_info_t *rdip)
299211600SVikram.Hegde@Sun.COM {
299311600SVikram.Hegde@Sun.COM immu_devi_t *immu_devi;
299411658SVikram.Hegde@Sun.COM volatile uintptr_t *vptr = (uintptr_t *)&(DEVI(rdip)->devi_iommu);
299511658SVikram.Hegde@Sun.COM
299611658SVikram.Hegde@Sun.COM /* Just want atomic reads. No need for lock */
299711658SVikram.Hegde@Sun.COM immu_devi = (immu_devi_t *)(uintptr_t)atomic_or_64_nv((uint64_t *)vptr,
299811658SVikram.Hegde@Sun.COM 0);
299911600SVikram.Hegde@Sun.COM return (immu_devi);
300011600SVikram.Hegde@Sun.COM }
3001*13050Sfrank.van.der.linden@oracle.com
3002*13050Sfrank.van.der.linden@oracle.com /*ARGSUSED*/
3003*13050Sfrank.van.der.linden@oracle.com int
immu_hdl_priv_ctor(void * buf,void * arg,int kmf)3004*13050Sfrank.van.der.linden@oracle.com immu_hdl_priv_ctor(void *buf, void *arg, int kmf)
3005*13050Sfrank.van.der.linden@oracle.com {
3006*13050Sfrank.van.der.linden@oracle.com immu_hdl_priv_t *ihp;
3007*13050Sfrank.van.der.linden@oracle.com
3008*13050Sfrank.van.der.linden@oracle.com ihp = buf;
3009*13050Sfrank.van.der.linden@oracle.com immu_init_inv_wait(&ihp->ihp_inv_wait, "dmahandle", B_FALSE);
3010*13050Sfrank.van.der.linden@oracle.com
3011*13050Sfrank.van.der.linden@oracle.com return (0);
3012*13050Sfrank.van.der.linden@oracle.com }
3013*13050Sfrank.van.der.linden@oracle.com
3014*13050Sfrank.van.der.linden@oracle.com /*
3015*13050Sfrank.van.der.linden@oracle.com * iommulib interface functions
3016*13050Sfrank.van.der.linden@oracle.com */
3017*13050Sfrank.van.der.linden@oracle.com static int
immu_probe(iommulib_handle_t handle,dev_info_t * dip)3018*13050Sfrank.van.der.linden@oracle.com immu_probe(iommulib_handle_t handle, dev_info_t *dip)
3019*13050Sfrank.van.der.linden@oracle.com {
3020*13050Sfrank.van.der.linden@oracle.com immu_devi_t *immu_devi;
3021*13050Sfrank.van.der.linden@oracle.com int ret;
3022*13050Sfrank.van.der.linden@oracle.com
3023*13050Sfrank.van.der.linden@oracle.com if (!immu_enable)
3024*13050Sfrank.van.der.linden@oracle.com return (DDI_FAILURE);
3025*13050Sfrank.van.der.linden@oracle.com
3026*13050Sfrank.van.der.linden@oracle.com /*
3027*13050Sfrank.van.der.linden@oracle.com * Make sure the device has all the IOMMU structures
3028*13050Sfrank.van.der.linden@oracle.com * initialized. If this device goes through an IOMMU
3029*13050Sfrank.van.der.linden@oracle.com * unit (e.g. this probe function returns success),
3030*13050Sfrank.van.der.linden@oracle.com * this will be called at most N times, with N being
3031*13050Sfrank.van.der.linden@oracle.com * the number of IOMMUs in the system.
3032*13050Sfrank.van.der.linden@oracle.com *
3033*13050Sfrank.van.der.linden@oracle.com * After that, when iommulib_nex_open succeeds,
3034*13050Sfrank.van.der.linden@oracle.com * we can always assume that this device has all
3035*13050Sfrank.van.der.linden@oracle.com * the structures initialized. IOMMU_USED(dip) will
3036*13050Sfrank.van.der.linden@oracle.com * be true. There is no need to find the controlling
3037*13050Sfrank.van.der.linden@oracle.com * IOMMU/domain again.
3038*13050Sfrank.van.der.linden@oracle.com */
3039*13050Sfrank.van.der.linden@oracle.com ret = immu_dvma_device_setup(dip, IMMU_FLAGS_NOSLEEP);
3040*13050Sfrank.van.der.linden@oracle.com if (ret != DDI_SUCCESS)
3041*13050Sfrank.van.der.linden@oracle.com return (ret);
3042*13050Sfrank.van.der.linden@oracle.com
3043*13050Sfrank.van.der.linden@oracle.com immu_devi = IMMU_DEVI(dip);
3044*13050Sfrank.van.der.linden@oracle.com
3045*13050Sfrank.van.der.linden@oracle.com /*
3046*13050Sfrank.van.der.linden@oracle.com * For unity domains, there is no need to call in to
3047*13050Sfrank.van.der.linden@oracle.com * the IOMMU code.
3048*13050Sfrank.van.der.linden@oracle.com */
3049*13050Sfrank.van.der.linden@oracle.com if (immu_devi->imd_domain->dom_did == IMMU_UNITY_DID)
3050*13050Sfrank.van.der.linden@oracle.com return (DDI_FAILURE);
3051*13050Sfrank.van.der.linden@oracle.com
3052*13050Sfrank.van.der.linden@oracle.com if (immu_devi->imd_immu->immu_dip == iommulib_iommu_getdip(handle))
3053*13050Sfrank.van.der.linden@oracle.com return (DDI_SUCCESS);
3054*13050Sfrank.van.der.linden@oracle.com
3055*13050Sfrank.van.der.linden@oracle.com return (DDI_FAILURE);
3056*13050Sfrank.van.der.linden@oracle.com }
3057*13050Sfrank.van.der.linden@oracle.com
3058*13050Sfrank.van.der.linden@oracle.com /*ARGSUSED*/
3059*13050Sfrank.van.der.linden@oracle.com static int
immu_allochdl(iommulib_handle_t handle,dev_info_t * dip,dev_info_t * rdip,ddi_dma_attr_t * attr,int (* waitfp)(caddr_t),caddr_t arg,ddi_dma_handle_t * dma_handlep)3060*13050Sfrank.van.der.linden@oracle.com immu_allochdl(iommulib_handle_t handle,
3061*13050Sfrank.van.der.linden@oracle.com dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
3062*13050Sfrank.van.der.linden@oracle.com int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *dma_handlep)
3063*13050Sfrank.van.der.linden@oracle.com {
3064*13050Sfrank.van.der.linden@oracle.com int ret;
3065*13050Sfrank.van.der.linden@oracle.com immu_hdl_priv_t *ihp;
3066*13050Sfrank.van.der.linden@oracle.com immu_t *immu;
3067*13050Sfrank.van.der.linden@oracle.com
3068*13050Sfrank.van.der.linden@oracle.com ret = iommulib_iommu_dma_allochdl(dip, rdip, attr, waitfp,
3069*13050Sfrank.van.der.linden@oracle.com arg, dma_handlep);
3070*13050Sfrank.van.der.linden@oracle.com if (ret == DDI_SUCCESS) {
3071*13050Sfrank.van.der.linden@oracle.com immu = IMMU_DEVI(rdip)->imd_immu;
3072*13050Sfrank.van.der.linden@oracle.com
3073*13050Sfrank.van.der.linden@oracle.com ihp = kmem_cache_alloc(immu->immu_hdl_cache,
3074*13050Sfrank.van.der.linden@oracle.com waitfp == DDI_DMA_SLEEP ? KM_SLEEP : KM_NOSLEEP);
3075*13050Sfrank.van.der.linden@oracle.com if (ihp == NULL) {
3076*13050Sfrank.van.der.linden@oracle.com (void) iommulib_iommu_dma_freehdl(dip, rdip,
3077*13050Sfrank.van.der.linden@oracle.com *dma_handlep);
3078*13050Sfrank.van.der.linden@oracle.com return (DDI_DMA_NORESOURCES);
3079*13050Sfrank.van.der.linden@oracle.com }
3080*13050Sfrank.van.der.linden@oracle.com
3081*13050Sfrank.van.der.linden@oracle.com if (IMMU_DEVI(rdip)->imd_use_premap)
3082*13050Sfrank.van.der.linden@oracle.com dvma_prealloc(rdip, ihp, attr);
3083*13050Sfrank.van.der.linden@oracle.com else {
3084*13050Sfrank.van.der.linden@oracle.com ihp->ihp_npremapped = 0;
3085*13050Sfrank.van.der.linden@oracle.com ihp->ihp_predvma = 0;
3086*13050Sfrank.van.der.linden@oracle.com }
3087*13050Sfrank.van.der.linden@oracle.com ret = iommulib_iommu_dmahdl_setprivate(dip, rdip, *dma_handlep,
3088*13050Sfrank.van.der.linden@oracle.com ihp);
3089*13050Sfrank.van.der.linden@oracle.com }
3090*13050Sfrank.van.der.linden@oracle.com return (ret);
3091*13050Sfrank.van.der.linden@oracle.com }
3092*13050Sfrank.van.der.linden@oracle.com
3093*13050Sfrank.van.der.linden@oracle.com /*ARGSUSED*/
3094*13050Sfrank.van.der.linden@oracle.com static int
immu_freehdl(iommulib_handle_t handle,dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t dma_handle)3095*13050Sfrank.van.der.linden@oracle.com immu_freehdl(iommulib_handle_t handle,
3096*13050Sfrank.van.der.linden@oracle.com dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t dma_handle)
3097*13050Sfrank.van.der.linden@oracle.com {
3098*13050Sfrank.van.der.linden@oracle.com immu_hdl_priv_t *ihp;
3099*13050Sfrank.van.der.linden@oracle.com
3100*13050Sfrank.van.der.linden@oracle.com ihp = iommulib_iommu_dmahdl_getprivate(dip, rdip, dma_handle);
3101*13050Sfrank.van.der.linden@oracle.com if (ihp != NULL) {
3102*13050Sfrank.van.der.linden@oracle.com if (IMMU_DEVI(rdip)->imd_use_premap)
3103*13050Sfrank.van.der.linden@oracle.com dvma_prefree(rdip, ihp);
3104*13050Sfrank.van.der.linden@oracle.com kmem_cache_free(IMMU_DEVI(rdip)->imd_immu->immu_hdl_cache, ihp);
3105*13050Sfrank.van.der.linden@oracle.com }
3106*13050Sfrank.van.der.linden@oracle.com
3107*13050Sfrank.van.der.linden@oracle.com return (iommulib_iommu_dma_freehdl(dip, rdip, dma_handle));
3108*13050Sfrank.van.der.linden@oracle.com }
3109*13050Sfrank.van.der.linden@oracle.com
3110*13050Sfrank.van.der.linden@oracle.com
3111*13050Sfrank.van.der.linden@oracle.com /*ARGSUSED*/
3112*13050Sfrank.van.der.linden@oracle.com static int
immu_bindhdl(iommulib_handle_t handle,dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t dma_handle,struct ddi_dma_req * dma_req,ddi_dma_cookie_t * cookiep,uint_t * ccountp)3113*13050Sfrank.van.der.linden@oracle.com immu_bindhdl(iommulib_handle_t handle, dev_info_t *dip,
3114*13050Sfrank.van.der.linden@oracle.com dev_info_t *rdip, ddi_dma_handle_t dma_handle,
3115*13050Sfrank.van.der.linden@oracle.com struct ddi_dma_req *dma_req, ddi_dma_cookie_t *cookiep,
3116*13050Sfrank.van.der.linden@oracle.com uint_t *ccountp)
3117*13050Sfrank.van.der.linden@oracle.com {
3118*13050Sfrank.van.der.linden@oracle.com int ret;
3119*13050Sfrank.van.der.linden@oracle.com immu_hdl_priv_t *ihp;
3120*13050Sfrank.van.der.linden@oracle.com
3121*13050Sfrank.van.der.linden@oracle.com ret = iommulib_iommu_dma_bindhdl(dip, rdip, dma_handle,
3122*13050Sfrank.van.der.linden@oracle.com dma_req, cookiep, ccountp);
3123*13050Sfrank.van.der.linden@oracle.com
3124*13050Sfrank.van.der.linden@oracle.com if (ret == DDI_DMA_MAPPED) {
3125*13050Sfrank.van.der.linden@oracle.com ihp = iommulib_iommu_dmahdl_getprivate(dip, rdip, dma_handle);
3126*13050Sfrank.van.der.linden@oracle.com immu_flush_wait(IMMU_DEVI(rdip)->imd_immu, &ihp->ihp_inv_wait);
3127*13050Sfrank.van.der.linden@oracle.com }
3128*13050Sfrank.van.der.linden@oracle.com
3129*13050Sfrank.van.der.linden@oracle.com return (ret);
3130*13050Sfrank.van.der.linden@oracle.com }
3131*13050Sfrank.van.der.linden@oracle.com
3132*13050Sfrank.van.der.linden@oracle.com /*ARGSUSED*/
3133*13050Sfrank.van.der.linden@oracle.com static int
immu_unbindhdl(iommulib_handle_t handle,dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t dma_handle)3134*13050Sfrank.van.der.linden@oracle.com immu_unbindhdl(iommulib_handle_t handle,
3135*13050Sfrank.van.der.linden@oracle.com dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t dma_handle)
3136*13050Sfrank.van.der.linden@oracle.com {
3137*13050Sfrank.van.der.linden@oracle.com return (iommulib_iommu_dma_unbindhdl(dip, rdip, dma_handle));
3138*13050Sfrank.van.der.linden@oracle.com }
3139*13050Sfrank.van.der.linden@oracle.com
3140*13050Sfrank.van.der.linden@oracle.com /*ARGSUSED*/
3141*13050Sfrank.van.der.linden@oracle.com static int
immu_sync(iommulib_handle_t handle,dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t dma_handle,off_t off,size_t len,uint_t cachefl)3142*13050Sfrank.van.der.linden@oracle.com immu_sync(iommulib_handle_t handle, dev_info_t *dip,
3143*13050Sfrank.van.der.linden@oracle.com dev_info_t *rdip, ddi_dma_handle_t dma_handle, off_t off,
3144*13050Sfrank.van.der.linden@oracle.com size_t len, uint_t cachefl)
3145*13050Sfrank.van.der.linden@oracle.com {
3146*13050Sfrank.van.der.linden@oracle.com return (iommulib_iommu_dma_sync(dip, rdip, dma_handle, off, len,
3147*13050Sfrank.van.der.linden@oracle.com cachefl));
3148*13050Sfrank.van.der.linden@oracle.com }
3149*13050Sfrank.van.der.linden@oracle.com
3150*13050Sfrank.van.der.linden@oracle.com /*ARGSUSED*/
3151*13050Sfrank.van.der.linden@oracle.com static int
immu_win(iommulib_handle_t handle,dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t dma_handle,uint_t win,off_t * offp,size_t * lenp,ddi_dma_cookie_t * cookiep,uint_t * ccountp)3152*13050Sfrank.van.der.linden@oracle.com immu_win(iommulib_handle_t handle, dev_info_t *dip,
3153*13050Sfrank.van.der.linden@oracle.com dev_info_t *rdip, ddi_dma_handle_t dma_handle, uint_t win,
3154*13050Sfrank.van.der.linden@oracle.com off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep,
3155*13050Sfrank.van.der.linden@oracle.com uint_t *ccountp)
3156*13050Sfrank.van.der.linden@oracle.com {
3157*13050Sfrank.van.der.linden@oracle.com return (iommulib_iommu_dma_win(dip, rdip, dma_handle, win, offp,
3158*13050Sfrank.van.der.linden@oracle.com lenp, cookiep, ccountp));
3159*13050Sfrank.van.der.linden@oracle.com }
3160*13050Sfrank.van.der.linden@oracle.com
3161*13050Sfrank.van.der.linden@oracle.com /*ARGSUSED*/
3162*13050Sfrank.van.der.linden@oracle.com static int
immu_mapobject(iommulib_handle_t handle,dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t dma_handle,struct ddi_dma_req * dmareq,ddi_dma_obj_t * dmao)3163*13050Sfrank.van.der.linden@oracle.com immu_mapobject(iommulib_handle_t handle, dev_info_t *dip,
3164*13050Sfrank.van.der.linden@oracle.com dev_info_t *rdip, ddi_dma_handle_t dma_handle,
3165*13050Sfrank.van.der.linden@oracle.com struct ddi_dma_req *dmareq, ddi_dma_obj_t *dmao)
3166*13050Sfrank.van.der.linden@oracle.com {
3167*13050Sfrank.van.der.linden@oracle.com immu_hdl_priv_t *ihp;
3168*13050Sfrank.van.der.linden@oracle.com
3169*13050Sfrank.van.der.linden@oracle.com ihp = iommulib_iommu_dmahdl_getprivate(dip, rdip, dma_handle);
3170*13050Sfrank.van.der.linden@oracle.com
3171*13050Sfrank.van.der.linden@oracle.com return (immu_map_dvmaseg(rdip, dma_handle, ihp, dmareq, dmao));
3172*13050Sfrank.van.der.linden@oracle.com }
3173*13050Sfrank.van.der.linden@oracle.com
3174*13050Sfrank.van.der.linden@oracle.com /*ARGSUSED*/
3175*13050Sfrank.van.der.linden@oracle.com static int
immu_unmapobject(iommulib_handle_t handle,dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t dma_handle,ddi_dma_obj_t * dmao)3176*13050Sfrank.van.der.linden@oracle.com immu_unmapobject(iommulib_handle_t handle, dev_info_t *dip,
3177*13050Sfrank.van.der.linden@oracle.com dev_info_t *rdip, ddi_dma_handle_t dma_handle, ddi_dma_obj_t *dmao)
3178*13050Sfrank.van.der.linden@oracle.com {
3179*13050Sfrank.van.der.linden@oracle.com immu_hdl_priv_t *ihp;
3180*13050Sfrank.van.der.linden@oracle.com
3181*13050Sfrank.van.der.linden@oracle.com ihp = iommulib_iommu_dmahdl_getprivate(dip, rdip, dma_handle);
3182*13050Sfrank.van.der.linden@oracle.com if (ihp->ihp_npremapped > 0)
3183*13050Sfrank.van.der.linden@oracle.com return (DDI_SUCCESS);
3184*13050Sfrank.van.der.linden@oracle.com return (immu_unmap_dvmaseg(rdip, dmao));
3185*13050Sfrank.van.der.linden@oracle.com }
3186*13050Sfrank.van.der.linden@oracle.com
3187*13050Sfrank.van.der.linden@oracle.com /*ARGSUSED*/
3188*13050Sfrank.van.der.linden@oracle.com static int
immu_map(iommulib_handle_t handle,dev_info_t * dip,dev_info_t * rdip,struct ddi_dma_req * dmareq,ddi_dma_handle_t * dma_handle)3189*13050Sfrank.van.der.linden@oracle.com immu_map(iommulib_handle_t handle, dev_info_t *dip,
3190*13050Sfrank.van.der.linden@oracle.com dev_info_t *rdip, struct ddi_dma_req *dmareq,
3191*13050Sfrank.van.der.linden@oracle.com ddi_dma_handle_t *dma_handle)
3192*13050Sfrank.van.der.linden@oracle.com {
3193*13050Sfrank.van.der.linden@oracle.com ASSERT(0);
3194*13050Sfrank.van.der.linden@oracle.com return (DDI_FAILURE);
3195*13050Sfrank.van.der.linden@oracle.com }
3196*13050Sfrank.van.der.linden@oracle.com
3197*13050Sfrank.van.der.linden@oracle.com /*ARGSUSED*/
3198*13050Sfrank.van.der.linden@oracle.com static int
immu_mctl(iommulib_handle_t handle,dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t dma_handle,enum ddi_dma_ctlops request,off_t * offp,size_t * lenp,caddr_t * objpp,uint_t cachefl)3199*13050Sfrank.van.der.linden@oracle.com immu_mctl(iommulib_handle_t handle, dev_info_t *dip,
3200*13050Sfrank.van.der.linden@oracle.com dev_info_t *rdip, ddi_dma_handle_t dma_handle,
3201*13050Sfrank.van.der.linden@oracle.com enum ddi_dma_ctlops request, off_t *offp, size_t *lenp,
3202*13050Sfrank.van.der.linden@oracle.com caddr_t *objpp, uint_t cachefl)
3203*13050Sfrank.van.der.linden@oracle.com {
3204*13050Sfrank.van.der.linden@oracle.com ASSERT(0);
3205*13050Sfrank.van.der.linden@oracle.com return (DDI_FAILURE);
3206*13050Sfrank.van.der.linden@oracle.com }
3207