1*7c8c0b82SPatrick Mooney /*
2*7c8c0b82SPatrick Mooney * This file and its contents are supplied under the terms of the
3*7c8c0b82SPatrick Mooney * Common Development and Distribution License ("CDDL"), version 1.0.
4*7c8c0b82SPatrick Mooney * You may only use this file in accordance with the terms of version
5*7c8c0b82SPatrick Mooney * 1.0 of the CDDL.
6*7c8c0b82SPatrick Mooney *
7*7c8c0b82SPatrick Mooney * A full copy of the text of the CDDL should have accompanied this
8*7c8c0b82SPatrick Mooney * source. A copy of the CDDL is also available via the Internet at
9*7c8c0b82SPatrick Mooney * http://www.illumos.org/license/CDDL.
10*7c8c0b82SPatrick Mooney */
11*7c8c0b82SPatrick Mooney
12*7c8c0b82SPatrick Mooney /*
13*7c8c0b82SPatrick Mooney * Copyright 2018 Joyent, Inc.
14*7c8c0b82SPatrick Mooney * Copyright 2021 Oxide Computer Company
15*7c8c0b82SPatrick Mooney */
16*7c8c0b82SPatrick Mooney
17*7c8c0b82SPatrick Mooney /*
18*7c8c0b82SPatrick Mooney * segvmm - Virtual-Machine-Memory segment
19*7c8c0b82SPatrick Mooney *
20*7c8c0b82SPatrick Mooney * The vmm segment driver was designed for mapping regions of kernel memory
21*7c8c0b82SPatrick Mooney * allocated to an HVM instance into userspace for manipulation there. It
22*7c8c0b82SPatrick Mooney * draws direct lineage from the umap segment driver, but meant for larger
23*7c8c0b82SPatrick Mooney * mappings with fewer restrictions.
24*7c8c0b82SPatrick Mooney *
25*7c8c0b82SPatrick Mooney * seg*k*vmm, in contrast, has mappings for every VMM into kas. We use its
26*7c8c0b82SPatrick Mooney * mappings here only to find the relevant PFNs in segvmm_fault_in().
27*7c8c0b82SPatrick Mooney */
28*7c8c0b82SPatrick Mooney
29*7c8c0b82SPatrick Mooney
30*7c8c0b82SPatrick Mooney #include <sys/types.h>
31*7c8c0b82SPatrick Mooney #include <sys/param.h>
32*7c8c0b82SPatrick Mooney #include <sys/errno.h>
33*7c8c0b82SPatrick Mooney #include <sys/cred.h>
34*7c8c0b82SPatrick Mooney #include <sys/kmem.h>
35*7c8c0b82SPatrick Mooney #include <sys/lgrp.h>
36*7c8c0b82SPatrick Mooney #include <sys/mman.h>
37*7c8c0b82SPatrick Mooney
38*7c8c0b82SPatrick Mooney #include <vm/hat.h>
39*7c8c0b82SPatrick Mooney #include <vm/hat_pte.h>
40*7c8c0b82SPatrick Mooney #include <vm/htable.h>
41*7c8c0b82SPatrick Mooney #include <vm/as.h>
42*7c8c0b82SPatrick Mooney #include <vm/seg.h>
43*7c8c0b82SPatrick Mooney #include <vm/seg_kmem.h>
44*7c8c0b82SPatrick Mooney
45*7c8c0b82SPatrick Mooney #include <sys/seg_vmm.h>
46*7c8c0b82SPatrick Mooney
47*7c8c0b82SPatrick Mooney typedef struct segvmm_data {
48*7c8c0b82SPatrick Mooney krwlock_t svmd_lock;
49*7c8c0b82SPatrick Mooney vm_object_t *svmd_vmo;
50*7c8c0b82SPatrick Mooney vm_client_t *svmd_vmc;
51*7c8c0b82SPatrick Mooney uintptr_t svmd_off;
52*7c8c0b82SPatrick Mooney uchar_t svmd_prot;
53*7c8c0b82SPatrick Mooney size_t svmd_softlockcnt;
54*7c8c0b82SPatrick Mooney } segvmm_data_t;
55*7c8c0b82SPatrick Mooney
56*7c8c0b82SPatrick Mooney
57*7c8c0b82SPatrick Mooney static int segvmm_dup(struct seg *, struct seg *);
58*7c8c0b82SPatrick Mooney static int segvmm_unmap(struct seg *, caddr_t, size_t);
59*7c8c0b82SPatrick Mooney static void segvmm_free(struct seg *);
60*7c8c0b82SPatrick Mooney static faultcode_t segvmm_fault(struct hat *, struct seg *, caddr_t, size_t,
61*7c8c0b82SPatrick Mooney enum fault_type, enum seg_rw);
62*7c8c0b82SPatrick Mooney static faultcode_t segvmm_faulta(struct seg *, caddr_t);
63*7c8c0b82SPatrick Mooney static int segvmm_setprot(struct seg *, caddr_t, size_t, uint_t);
64*7c8c0b82SPatrick Mooney static int segvmm_checkprot(struct seg *, caddr_t, size_t, uint_t);
65*7c8c0b82SPatrick Mooney static int segvmm_sync(struct seg *, caddr_t, size_t, int, uint_t);
66*7c8c0b82SPatrick Mooney static size_t segvmm_incore(struct seg *, caddr_t, size_t, char *);
67*7c8c0b82SPatrick Mooney static int segvmm_lockop(struct seg *, caddr_t, size_t, int, int, ulong_t *,
68*7c8c0b82SPatrick Mooney size_t);
69*7c8c0b82SPatrick Mooney static int segvmm_getprot(struct seg *, caddr_t, size_t, uint_t *);
70*7c8c0b82SPatrick Mooney static u_offset_t segvmm_getoffset(struct seg *, caddr_t);
71*7c8c0b82SPatrick Mooney static int segvmm_gettype(struct seg *, caddr_t);
72*7c8c0b82SPatrick Mooney static int segvmm_getvp(struct seg *, caddr_t, struct vnode **);
73*7c8c0b82SPatrick Mooney static int segvmm_advise(struct seg *, caddr_t, size_t, uint_t);
74*7c8c0b82SPatrick Mooney static void segvmm_dump(struct seg *);
75*7c8c0b82SPatrick Mooney static int segvmm_pagelock(struct seg *, caddr_t, size_t, struct page ***,
76*7c8c0b82SPatrick Mooney enum lock_type, enum seg_rw);
77*7c8c0b82SPatrick Mooney static int segvmm_setpagesize(struct seg *, caddr_t, size_t, uint_t);
78*7c8c0b82SPatrick Mooney static int segvmm_getmemid(struct seg *, caddr_t, memid_t *);
79*7c8c0b82SPatrick Mooney static int segvmm_capable(struct seg *, segcapability_t);
80*7c8c0b82SPatrick Mooney
81*7c8c0b82SPatrick Mooney static struct seg_ops segvmm_ops = {
82*7c8c0b82SPatrick Mooney .dup = segvmm_dup,
83*7c8c0b82SPatrick Mooney .unmap = segvmm_unmap,
84*7c8c0b82SPatrick Mooney .free = segvmm_free,
85*7c8c0b82SPatrick Mooney .fault = segvmm_fault,
86*7c8c0b82SPatrick Mooney .faulta = segvmm_faulta,
87*7c8c0b82SPatrick Mooney .setprot = segvmm_setprot,
88*7c8c0b82SPatrick Mooney .checkprot = segvmm_checkprot,
89*7c8c0b82SPatrick Mooney .kluster = NULL,
90*7c8c0b82SPatrick Mooney .swapout = NULL,
91*7c8c0b82SPatrick Mooney .sync = segvmm_sync,
92*7c8c0b82SPatrick Mooney .incore = segvmm_incore,
93*7c8c0b82SPatrick Mooney .lockop = segvmm_lockop,
94*7c8c0b82SPatrick Mooney .getprot = segvmm_getprot,
95*7c8c0b82SPatrick Mooney .getoffset = segvmm_getoffset,
96*7c8c0b82SPatrick Mooney .gettype = segvmm_gettype,
97*7c8c0b82SPatrick Mooney .getvp = segvmm_getvp,
98*7c8c0b82SPatrick Mooney .advise = segvmm_advise,
99*7c8c0b82SPatrick Mooney .dump = segvmm_dump,
100*7c8c0b82SPatrick Mooney .pagelock = segvmm_pagelock,
101*7c8c0b82SPatrick Mooney .setpagesize = segvmm_setpagesize,
102*7c8c0b82SPatrick Mooney .getmemid = segvmm_getmemid,
103*7c8c0b82SPatrick Mooney .getpolicy = NULL,
104*7c8c0b82SPatrick Mooney .capable = segvmm_capable,
105*7c8c0b82SPatrick Mooney .inherit = seg_inherit_notsup
106*7c8c0b82SPatrick Mooney };
107*7c8c0b82SPatrick Mooney
108*7c8c0b82SPatrick Mooney /*
109*7c8c0b82SPatrick Mooney * Unload a region from the HAT for A/D tracking.
110*7c8c0b82SPatrick Mooney */
111*7c8c0b82SPatrick Mooney static void
segvmm_invalidate(void * arg,uintptr_t gpa,size_t sz)112*7c8c0b82SPatrick Mooney segvmm_invalidate(void *arg, uintptr_t gpa, size_t sz)
113*7c8c0b82SPatrick Mooney {
114*7c8c0b82SPatrick Mooney struct seg *seg = arg;
115*7c8c0b82SPatrick Mooney segvmm_data_t *svmd = seg->s_data;
116*7c8c0b82SPatrick Mooney
117*7c8c0b82SPatrick Mooney /*
118*7c8c0b82SPatrick Mooney * Invalidations are only necessary (and configured) for vmspace
119*7c8c0b82SPatrick Mooney * mappings. Direct vm_object mappings are not involved.
120*7c8c0b82SPatrick Mooney */
121*7c8c0b82SPatrick Mooney ASSERT3P(svmd->svmd_vmo, ==, NULL);
122*7c8c0b82SPatrick Mooney
123*7c8c0b82SPatrick Mooney /*
124*7c8c0b82SPatrick Mooney * The region being invalidated may overlap with all, some, or none of
125*7c8c0b82SPatrick Mooney * this segment. We are only concerned about that overlap.
126*7c8c0b82SPatrick Mooney */
127*7c8c0b82SPatrick Mooney const uintptr_t start = MAX(gpa, svmd->svmd_off);
128*7c8c0b82SPatrick Mooney const uintptr_t end = MIN(gpa + sz, svmd->svmd_off + seg->s_size);
129*7c8c0b82SPatrick Mooney if (start >= end) {
130*7c8c0b82SPatrick Mooney return;
131*7c8c0b82SPatrick Mooney }
132*7c8c0b82SPatrick Mooney ASSERT(start >= svmd->svmd_off && end <= svmd->svmd_off + seg->s_size);
133*7c8c0b82SPatrick Mooney ASSERT(start >= gpa && end <= gpa + sz);
134*7c8c0b82SPatrick Mooney const caddr_t unload_va = seg->s_base + (start - svmd->svmd_off);
135*7c8c0b82SPatrick Mooney const size_t unload_sz = (end - start);
136*7c8c0b82SPatrick Mooney ASSERT3U(unload_sz, <=, seg->s_size);
137*7c8c0b82SPatrick Mooney
138*7c8c0b82SPatrick Mooney hat_unload(seg->s_as->a_hat, unload_va, unload_sz, HAT_UNLOAD);
139*7c8c0b82SPatrick Mooney }
140*7c8c0b82SPatrick Mooney
141*7c8c0b82SPatrick Mooney /*
142*7c8c0b82SPatrick Mooney * Create a VMM-memory-backed segment.
143*7c8c0b82SPatrick Mooney */
144*7c8c0b82SPatrick Mooney int
segvmm_create(struct seg ** segpp,void * argsp)145*7c8c0b82SPatrick Mooney segvmm_create(struct seg **segpp, void *argsp)
146*7c8c0b82SPatrick Mooney {
147*7c8c0b82SPatrick Mooney struct seg *seg = *segpp;
148*7c8c0b82SPatrick Mooney segvmm_crargs_t *cra = argsp;
149*7c8c0b82SPatrick Mooney segvmm_data_t *data;
150*7c8c0b82SPatrick Mooney
151*7c8c0b82SPatrick Mooney VERIFY((cra->vmo == NULL && cra->vmc != NULL) ||
152*7c8c0b82SPatrick Mooney (cra->vmo != NULL && cra->vmc == NULL));
153*7c8c0b82SPatrick Mooney VERIFY(cra->prot & PROT_USER);
154*7c8c0b82SPatrick Mooney VERIFY0(cra->offset & PAGEOFFSET);
155*7c8c0b82SPatrick Mooney
156*7c8c0b82SPatrick Mooney data = kmem_zalloc(sizeof (*data), KM_SLEEP);
157*7c8c0b82SPatrick Mooney rw_init(&data->svmd_lock, NULL, RW_DEFAULT, NULL);
158*7c8c0b82SPatrick Mooney data->svmd_off = cra->offset;
159*7c8c0b82SPatrick Mooney data->svmd_prot = cra->prot & ~PROT_USER;
160*7c8c0b82SPatrick Mooney
161*7c8c0b82SPatrick Mooney seg->s_ops = &segvmm_ops;
162*7c8c0b82SPatrick Mooney seg->s_data = data;
163*7c8c0b82SPatrick Mooney
164*7c8c0b82SPatrick Mooney if (cra->vmo != NULL) {
165*7c8c0b82SPatrick Mooney data->svmd_vmo = cra->vmo;
166*7c8c0b82SPatrick Mooney /* Grab a hold on the VM object for the lifetime of segment */
167*7c8c0b82SPatrick Mooney vm_object_reference(data->svmd_vmo);
168*7c8c0b82SPatrick Mooney } else {
169*7c8c0b82SPatrick Mooney int err;
170*7c8c0b82SPatrick Mooney
171*7c8c0b82SPatrick Mooney data->svmd_vmc = cra->vmc;
172*7c8c0b82SPatrick Mooney err = vmc_set_inval_cb(data->svmd_vmc, segvmm_invalidate, seg);
173*7c8c0b82SPatrick Mooney if (err != 0) {
174*7c8c0b82SPatrick Mooney seg->s_ops = NULL;
175*7c8c0b82SPatrick Mooney seg->s_data = NULL;
176*7c8c0b82SPatrick Mooney kmem_free(data, sizeof (*data));
177*7c8c0b82SPatrick Mooney return (err);
178*7c8c0b82SPatrick Mooney }
179*7c8c0b82SPatrick Mooney }
180*7c8c0b82SPatrick Mooney return (0);
181*7c8c0b82SPatrick Mooney }
182*7c8c0b82SPatrick Mooney
183*7c8c0b82SPatrick Mooney static int
segvmm_dup(struct seg * seg,struct seg * newseg)184*7c8c0b82SPatrick Mooney segvmm_dup(struct seg *seg, struct seg *newseg)
185*7c8c0b82SPatrick Mooney {
186*7c8c0b82SPatrick Mooney segvmm_data_t *svmd = seg->s_data;
187*7c8c0b82SPatrick Mooney segvmm_data_t *newsvmd;
188*7c8c0b82SPatrick Mooney
189*7c8c0b82SPatrick Mooney ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
190*7c8c0b82SPatrick Mooney
191*7c8c0b82SPatrick Mooney newsvmd = kmem_zalloc(sizeof (segvmm_data_t), KM_SLEEP);
192*7c8c0b82SPatrick Mooney rw_init(&newsvmd->svmd_lock, NULL, RW_DEFAULT, NULL);
193*7c8c0b82SPatrick Mooney newsvmd->svmd_off = svmd->svmd_off;
194*7c8c0b82SPatrick Mooney newsvmd->svmd_prot = svmd->svmd_prot;
195*7c8c0b82SPatrick Mooney
196*7c8c0b82SPatrick Mooney newseg->s_ops = seg->s_ops;
197*7c8c0b82SPatrick Mooney newseg->s_data = newsvmd;
198*7c8c0b82SPatrick Mooney
199*7c8c0b82SPatrick Mooney if (svmd->svmd_vmo != NULL) {
200*7c8c0b82SPatrick Mooney /* Grab another hold for the duplicate segment */
201*7c8c0b82SPatrick Mooney vm_object_reference(svmd->svmd_vmo);
202*7c8c0b82SPatrick Mooney newsvmd->svmd_vmo = svmd->svmd_vmo;
203*7c8c0b82SPatrick Mooney } else {
204*7c8c0b82SPatrick Mooney int err;
205*7c8c0b82SPatrick Mooney
206*7c8c0b82SPatrick Mooney newsvmd->svmd_vmc = vmc_clone(svmd->svmd_vmc);
207*7c8c0b82SPatrick Mooney /*
208*7c8c0b82SPatrick Mooney * The cloned client does not inherit the invalidation
209*7c8c0b82SPatrick Mooney * configuration, so attempt to set it here for the new segment.
210*7c8c0b82SPatrick Mooney */
211*7c8c0b82SPatrick Mooney err = vmc_set_inval_cb(newsvmd->svmd_vmc, segvmm_invalidate,
212*7c8c0b82SPatrick Mooney newseg);
213*7c8c0b82SPatrick Mooney if (err != 0) {
214*7c8c0b82SPatrick Mooney newseg->s_ops = NULL;
215*7c8c0b82SPatrick Mooney newseg->s_data = NULL;
216*7c8c0b82SPatrick Mooney kmem_free(newsvmd, sizeof (*newsvmd));
217*7c8c0b82SPatrick Mooney return (err);
218*7c8c0b82SPatrick Mooney }
219*7c8c0b82SPatrick Mooney }
220*7c8c0b82SPatrick Mooney
221*7c8c0b82SPatrick Mooney return (0);
222*7c8c0b82SPatrick Mooney }
223*7c8c0b82SPatrick Mooney
224*7c8c0b82SPatrick Mooney static int
segvmm_unmap(struct seg * seg,caddr_t addr,size_t len)225*7c8c0b82SPatrick Mooney segvmm_unmap(struct seg *seg, caddr_t addr, size_t len)
226*7c8c0b82SPatrick Mooney {
227*7c8c0b82SPatrick Mooney segvmm_data_t *svmd = seg->s_data;
228*7c8c0b82SPatrick Mooney
229*7c8c0b82SPatrick Mooney ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
230*7c8c0b82SPatrick Mooney
231*7c8c0b82SPatrick Mooney /* Only allow unmap of entire segment */
232*7c8c0b82SPatrick Mooney if (addr != seg->s_base || len != seg->s_size) {
233*7c8c0b82SPatrick Mooney return (EINVAL);
234*7c8c0b82SPatrick Mooney }
235*7c8c0b82SPatrick Mooney if (svmd->svmd_softlockcnt != 0) {
236*7c8c0b82SPatrick Mooney return (EAGAIN);
237*7c8c0b82SPatrick Mooney }
238*7c8c0b82SPatrick Mooney
239*7c8c0b82SPatrick Mooney /* Unconditionally unload the entire segment range. */
240*7c8c0b82SPatrick Mooney hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD_UNMAP);
241*7c8c0b82SPatrick Mooney
242*7c8c0b82SPatrick Mooney seg_free(seg);
243*7c8c0b82SPatrick Mooney return (0);
244*7c8c0b82SPatrick Mooney }
245*7c8c0b82SPatrick Mooney
246*7c8c0b82SPatrick Mooney static void
segvmm_free(struct seg * seg)247*7c8c0b82SPatrick Mooney segvmm_free(struct seg *seg)
248*7c8c0b82SPatrick Mooney {
249*7c8c0b82SPatrick Mooney segvmm_data_t *svmd = seg->s_data;
250*7c8c0b82SPatrick Mooney
251*7c8c0b82SPatrick Mooney ASSERT(svmd != NULL);
252*7c8c0b82SPatrick Mooney
253*7c8c0b82SPatrick Mooney if (svmd->svmd_vmo != NULL) {
254*7c8c0b82SPatrick Mooney /* Release the VM object hold this segment possessed */
255*7c8c0b82SPatrick Mooney vm_object_release(svmd->svmd_vmo);
256*7c8c0b82SPatrick Mooney svmd->svmd_vmo = NULL;
257*7c8c0b82SPatrick Mooney } else {
258*7c8c0b82SPatrick Mooney vmc_destroy(svmd->svmd_vmc);
259*7c8c0b82SPatrick Mooney svmd->svmd_vmc = NULL;
260*7c8c0b82SPatrick Mooney }
261*7c8c0b82SPatrick Mooney rw_destroy(&svmd->svmd_lock);
262*7c8c0b82SPatrick Mooney VERIFY(svmd->svmd_softlockcnt == 0);
263*7c8c0b82SPatrick Mooney kmem_free(svmd, sizeof (*svmd));
264*7c8c0b82SPatrick Mooney seg->s_data = NULL;
265*7c8c0b82SPatrick Mooney }
266*7c8c0b82SPatrick Mooney
267*7c8c0b82SPatrick Mooney static int
segvmm_fault_obj(struct hat * hat,struct seg * seg,uintptr_t va,size_t len)268*7c8c0b82SPatrick Mooney segvmm_fault_obj(struct hat *hat, struct seg *seg, uintptr_t va, size_t len)
269*7c8c0b82SPatrick Mooney {
270*7c8c0b82SPatrick Mooney segvmm_data_t *svmd = seg->s_data;
271*7c8c0b82SPatrick Mooney const uintptr_t end = va + len;
272*7c8c0b82SPatrick Mooney const int prot = svmd->svmd_prot;
273*7c8c0b82SPatrick Mooney const int uprot = prot | PROT_USER;
274*7c8c0b82SPatrick Mooney vm_object_t *vmo = svmd->svmd_vmo;
275*7c8c0b82SPatrick Mooney
276*7c8c0b82SPatrick Mooney ASSERT(vmo != NULL);
277*7c8c0b82SPatrick Mooney
278*7c8c0b82SPatrick Mooney va &= PAGEMASK;
279*7c8c0b82SPatrick Mooney uintptr_t off = va - (uintptr_t)seg->s_base + svmd->svmd_off;
280*7c8c0b82SPatrick Mooney do {
281*7c8c0b82SPatrick Mooney pfn_t pfn;
282*7c8c0b82SPatrick Mooney
283*7c8c0b82SPatrick Mooney pfn = vm_object_pfn(vmo, off);
284*7c8c0b82SPatrick Mooney if (pfn == PFN_INVALID) {
285*7c8c0b82SPatrick Mooney return (FC_NOMAP);
286*7c8c0b82SPatrick Mooney }
287*7c8c0b82SPatrick Mooney
288*7c8c0b82SPatrick Mooney /* Ignore any large-page possibilities for now */
289*7c8c0b82SPatrick Mooney hat_devload(hat, (caddr_t)va, PAGESIZE, pfn, uprot, HAT_LOAD);
290*7c8c0b82SPatrick Mooney va += PAGESIZE;
291*7c8c0b82SPatrick Mooney off += PAGESIZE;
292*7c8c0b82SPatrick Mooney } while (va < end);
293*7c8c0b82SPatrick Mooney
294*7c8c0b82SPatrick Mooney return (0);
295*7c8c0b82SPatrick Mooney }
296*7c8c0b82SPatrick Mooney
297*7c8c0b82SPatrick Mooney static int
segvmm_fault_space(struct hat * hat,struct seg * seg,uintptr_t va,size_t len)298*7c8c0b82SPatrick Mooney segvmm_fault_space(struct hat *hat, struct seg *seg, uintptr_t va, size_t len)
299*7c8c0b82SPatrick Mooney {
300*7c8c0b82SPatrick Mooney segvmm_data_t *svmd = seg->s_data;
301*7c8c0b82SPatrick Mooney const uintptr_t end = va + len;
302*7c8c0b82SPatrick Mooney const int prot = svmd->svmd_prot;
303*7c8c0b82SPatrick Mooney const int uprot = prot | PROT_USER;
304*7c8c0b82SPatrick Mooney vm_client_t *vmc = svmd->svmd_vmc;
305*7c8c0b82SPatrick Mooney
306*7c8c0b82SPatrick Mooney ASSERT(vmc != NULL);
307*7c8c0b82SPatrick Mooney
308*7c8c0b82SPatrick Mooney va &= PAGEMASK;
309*7c8c0b82SPatrick Mooney uintptr_t off = va - (uintptr_t)seg->s_base + svmd->svmd_off;
310*7c8c0b82SPatrick Mooney
311*7c8c0b82SPatrick Mooney do {
312*7c8c0b82SPatrick Mooney vm_page_t *vmp;
313*7c8c0b82SPatrick Mooney pfn_t pfn;
314*7c8c0b82SPatrick Mooney
315*7c8c0b82SPatrick Mooney vmp = vmc_hold(vmc, off, prot);
316*7c8c0b82SPatrick Mooney if (vmp == NULL) {
317*7c8c0b82SPatrick Mooney return (FC_NOMAP);
318*7c8c0b82SPatrick Mooney }
319*7c8c0b82SPatrick Mooney
320*7c8c0b82SPatrick Mooney pfn = vmp_get_pfn(vmp);
321*7c8c0b82SPatrick Mooney ASSERT3U(pfn, !=, PFN_INVALID);
322*7c8c0b82SPatrick Mooney
323*7c8c0b82SPatrick Mooney /* Ignore any large-page possibilities for now */
324*7c8c0b82SPatrick Mooney hat_devload(hat, (caddr_t)va, PAGESIZE, pfn, uprot, HAT_LOAD);
325*7c8c0b82SPatrick Mooney
326*7c8c0b82SPatrick Mooney if (vmp_release(vmp)) {
327*7c8c0b82SPatrick Mooney /*
328*7c8c0b82SPatrick Mooney * Region was unmapped from vmspace while we were
329*7c8c0b82SPatrick Mooney * loading it into this AS. Communicate it as if it
330*7c8c0b82SPatrick Mooney * were a fault.
331*7c8c0b82SPatrick Mooney */
332*7c8c0b82SPatrick Mooney hat_unload(hat, (caddr_t)va, PAGESIZE, HAT_UNLOAD);
333*7c8c0b82SPatrick Mooney return (FC_NOMAP);
334*7c8c0b82SPatrick Mooney }
335*7c8c0b82SPatrick Mooney
336*7c8c0b82SPatrick Mooney va += PAGESIZE;
337*7c8c0b82SPatrick Mooney off += PAGESIZE;
338*7c8c0b82SPatrick Mooney } while (va < end);
339*7c8c0b82SPatrick Mooney
340*7c8c0b82SPatrick Mooney return (0);
341*7c8c0b82SPatrick Mooney }
342*7c8c0b82SPatrick Mooney
343*7c8c0b82SPatrick Mooney /* ARGSUSED */
344*7c8c0b82SPatrick Mooney static faultcode_t
segvmm_fault(struct hat * hat,struct seg * seg,caddr_t addr,size_t len,enum fault_type type,enum seg_rw rw)345*7c8c0b82SPatrick Mooney segvmm_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len,
346*7c8c0b82SPatrick Mooney enum fault_type type, enum seg_rw rw)
347*7c8c0b82SPatrick Mooney {
348*7c8c0b82SPatrick Mooney segvmm_data_t *svmd = seg->s_data;
349*7c8c0b82SPatrick Mooney int err = 0;
350*7c8c0b82SPatrick Mooney
351*7c8c0b82SPatrick Mooney ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
352*7c8c0b82SPatrick Mooney
353*7c8c0b82SPatrick Mooney if (type == F_PROT) {
354*7c8c0b82SPatrick Mooney /*
355*7c8c0b82SPatrick Mooney * Since protection on the segment is fixed, there is nothing
356*7c8c0b82SPatrick Mooney * to do but report an error for protection faults.
357*7c8c0b82SPatrick Mooney */
358*7c8c0b82SPatrick Mooney return (FC_PROT);
359*7c8c0b82SPatrick Mooney } else if (type == F_SOFTUNLOCK) {
360*7c8c0b82SPatrick Mooney size_t plen = btop(len);
361*7c8c0b82SPatrick Mooney
362*7c8c0b82SPatrick Mooney rw_enter(&svmd->svmd_lock, RW_WRITER);
363*7c8c0b82SPatrick Mooney VERIFY(svmd->svmd_softlockcnt >= plen);
364*7c8c0b82SPatrick Mooney svmd->svmd_softlockcnt -= plen;
365*7c8c0b82SPatrick Mooney rw_exit(&svmd->svmd_lock);
366*7c8c0b82SPatrick Mooney return (0);
367*7c8c0b82SPatrick Mooney }
368*7c8c0b82SPatrick Mooney
369*7c8c0b82SPatrick Mooney VERIFY(type == F_INVAL || type == F_SOFTLOCK);
370*7c8c0b82SPatrick Mooney rw_enter(&svmd->svmd_lock, RW_WRITER);
371*7c8c0b82SPatrick Mooney
372*7c8c0b82SPatrick Mooney if (svmd->svmd_vmo != NULL) {
373*7c8c0b82SPatrick Mooney err = segvmm_fault_obj(hat, seg, (uintptr_t)addr, len);
374*7c8c0b82SPatrick Mooney } else {
375*7c8c0b82SPatrick Mooney err = segvmm_fault_space(hat, seg, (uintptr_t)addr, len);
376*7c8c0b82SPatrick Mooney }
377*7c8c0b82SPatrick Mooney if (type == F_SOFTLOCK && err == 0) {
378*7c8c0b82SPatrick Mooney size_t nval = svmd->svmd_softlockcnt + btop(len);
379*7c8c0b82SPatrick Mooney
380*7c8c0b82SPatrick Mooney if (svmd->svmd_softlockcnt >= nval) {
381*7c8c0b82SPatrick Mooney rw_exit(&svmd->svmd_lock);
382*7c8c0b82SPatrick Mooney return (FC_MAKE_ERR(EOVERFLOW));
383*7c8c0b82SPatrick Mooney }
384*7c8c0b82SPatrick Mooney svmd->svmd_softlockcnt = nval;
385*7c8c0b82SPatrick Mooney }
386*7c8c0b82SPatrick Mooney
387*7c8c0b82SPatrick Mooney rw_exit(&svmd->svmd_lock);
388*7c8c0b82SPatrick Mooney return (err);
389*7c8c0b82SPatrick Mooney }
390*7c8c0b82SPatrick Mooney
391*7c8c0b82SPatrick Mooney /* ARGSUSED */
392*7c8c0b82SPatrick Mooney static faultcode_t
segvmm_faulta(struct seg * seg,caddr_t addr)393*7c8c0b82SPatrick Mooney segvmm_faulta(struct seg *seg, caddr_t addr)
394*7c8c0b82SPatrick Mooney {
395*7c8c0b82SPatrick Mooney /* Do nothing since asynch pagefault should not load translation. */
396*7c8c0b82SPatrick Mooney return (0);
397*7c8c0b82SPatrick Mooney }
398*7c8c0b82SPatrick Mooney
399*7c8c0b82SPatrick Mooney /* ARGSUSED */
400*7c8c0b82SPatrick Mooney static int
segvmm_setprot(struct seg * seg,caddr_t addr,size_t len,uint_t prot)401*7c8c0b82SPatrick Mooney segvmm_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
402*7c8c0b82SPatrick Mooney {
403*7c8c0b82SPatrick Mooney ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
404*7c8c0b82SPatrick Mooney
405*7c8c0b82SPatrick Mooney /* The seg_vmm driver does not yet allow protection to be changed. */
406*7c8c0b82SPatrick Mooney return (EACCES);
407*7c8c0b82SPatrick Mooney }
408*7c8c0b82SPatrick Mooney
409*7c8c0b82SPatrick Mooney /* ARGSUSED */
410*7c8c0b82SPatrick Mooney static int
segvmm_checkprot(struct seg * seg,caddr_t addr,size_t len,uint_t prot)411*7c8c0b82SPatrick Mooney segvmm_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
412*7c8c0b82SPatrick Mooney {
413*7c8c0b82SPatrick Mooney segvmm_data_t *svmd = seg->s_data;
414*7c8c0b82SPatrick Mooney int error = 0;
415*7c8c0b82SPatrick Mooney
416*7c8c0b82SPatrick Mooney ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
417*7c8c0b82SPatrick Mooney
418*7c8c0b82SPatrick Mooney rw_enter(&svmd->svmd_lock, RW_READER);
419*7c8c0b82SPatrick Mooney if ((svmd->svmd_prot & prot) != prot) {
420*7c8c0b82SPatrick Mooney error = EACCES;
421*7c8c0b82SPatrick Mooney }
422*7c8c0b82SPatrick Mooney rw_exit(&svmd->svmd_lock);
423*7c8c0b82SPatrick Mooney return (error);
424*7c8c0b82SPatrick Mooney }
425*7c8c0b82SPatrick Mooney
426*7c8c0b82SPatrick Mooney /* ARGSUSED */
427*7c8c0b82SPatrick Mooney static int
segvmm_sync(struct seg * seg,caddr_t addr,size_t len,int attr,uint_t flags)428*7c8c0b82SPatrick Mooney segvmm_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags)
429*7c8c0b82SPatrick Mooney {
430*7c8c0b82SPatrick Mooney /* Always succeed since there are no backing store to sync */
431*7c8c0b82SPatrick Mooney return (0);
432*7c8c0b82SPatrick Mooney }
433*7c8c0b82SPatrick Mooney
434*7c8c0b82SPatrick Mooney /* ARGSUSED */
435*7c8c0b82SPatrick Mooney static size_t
segvmm_incore(struct seg * seg,caddr_t addr,size_t len,char * vec)436*7c8c0b82SPatrick Mooney segvmm_incore(struct seg *seg, caddr_t addr, size_t len, char *vec)
437*7c8c0b82SPatrick Mooney {
438*7c8c0b82SPatrick Mooney size_t sz = 0;
439*7c8c0b82SPatrick Mooney
440*7c8c0b82SPatrick Mooney ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
441*7c8c0b82SPatrick Mooney
442*7c8c0b82SPatrick Mooney len = (len + PAGEOFFSET) & PAGEMASK;
443*7c8c0b82SPatrick Mooney while (len > 0) {
444*7c8c0b82SPatrick Mooney *vec = 1;
445*7c8c0b82SPatrick Mooney sz += PAGESIZE;
446*7c8c0b82SPatrick Mooney vec++;
447*7c8c0b82SPatrick Mooney len -= PAGESIZE;
448*7c8c0b82SPatrick Mooney }
449*7c8c0b82SPatrick Mooney return (sz);
450*7c8c0b82SPatrick Mooney }
451*7c8c0b82SPatrick Mooney
452*7c8c0b82SPatrick Mooney /* ARGSUSED */
453*7c8c0b82SPatrick Mooney static int
segvmm_lockop(struct seg * seg,caddr_t addr,size_t len,int attr,int op,ulong_t * lockmap,size_t pos)454*7c8c0b82SPatrick Mooney segvmm_lockop(struct seg *seg, caddr_t addr, size_t len, int attr, int op,
455*7c8c0b82SPatrick Mooney ulong_t *lockmap, size_t pos)
456*7c8c0b82SPatrick Mooney {
457*7c8c0b82SPatrick Mooney /* Report success since kernel pages are always in memory. */
458*7c8c0b82SPatrick Mooney return (0);
459*7c8c0b82SPatrick Mooney }
460*7c8c0b82SPatrick Mooney
461*7c8c0b82SPatrick Mooney static int
segvmm_getprot(struct seg * seg,caddr_t addr,size_t len,uint_t * protv)462*7c8c0b82SPatrick Mooney segvmm_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
463*7c8c0b82SPatrick Mooney {
464*7c8c0b82SPatrick Mooney segvmm_data_t *svmd = seg->s_data;
465*7c8c0b82SPatrick Mooney size_t pgno;
466*7c8c0b82SPatrick Mooney uint_t prot;
467*7c8c0b82SPatrick Mooney
468*7c8c0b82SPatrick Mooney ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
469*7c8c0b82SPatrick Mooney
470*7c8c0b82SPatrick Mooney rw_enter(&svmd->svmd_lock, RW_READER);
471*7c8c0b82SPatrick Mooney prot = svmd->svmd_prot;
472*7c8c0b82SPatrick Mooney rw_exit(&svmd->svmd_lock);
473*7c8c0b82SPatrick Mooney
474*7c8c0b82SPatrick Mooney /*
475*7c8c0b82SPatrick Mooney * Reporting protection is simple since it is not tracked per-page.
476*7c8c0b82SPatrick Mooney */
477*7c8c0b82SPatrick Mooney pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1;
478*7c8c0b82SPatrick Mooney while (pgno > 0) {
479*7c8c0b82SPatrick Mooney protv[--pgno] = prot;
480*7c8c0b82SPatrick Mooney }
481*7c8c0b82SPatrick Mooney return (0);
482*7c8c0b82SPatrick Mooney }
483*7c8c0b82SPatrick Mooney
484*7c8c0b82SPatrick Mooney /* ARGSUSED */
485*7c8c0b82SPatrick Mooney static u_offset_t
segvmm_getoffset(struct seg * seg,caddr_t addr)486*7c8c0b82SPatrick Mooney segvmm_getoffset(struct seg *seg, caddr_t addr)
487*7c8c0b82SPatrick Mooney {
488*7c8c0b82SPatrick Mooney /*
489*7c8c0b82SPatrick Mooney * To avoid leaking information about the layout of the kernel address
490*7c8c0b82SPatrick Mooney * space, always report '0' as the offset.
491*7c8c0b82SPatrick Mooney */
492*7c8c0b82SPatrick Mooney return (0);
493*7c8c0b82SPatrick Mooney }
494*7c8c0b82SPatrick Mooney
495*7c8c0b82SPatrick Mooney /* ARGSUSED */
496*7c8c0b82SPatrick Mooney static int
segvmm_gettype(struct seg * seg,caddr_t addr)497*7c8c0b82SPatrick Mooney segvmm_gettype(struct seg *seg, caddr_t addr)
498*7c8c0b82SPatrick Mooney {
499*7c8c0b82SPatrick Mooney /*
500*7c8c0b82SPatrick Mooney * Since already-existing vmm reservoir pages are being mapped into
501*7c8c0b82SPatrick Mooney * userspace, always report the segment type as shared.
502*7c8c0b82SPatrick Mooney */
503*7c8c0b82SPatrick Mooney return (MAP_SHARED);
504*7c8c0b82SPatrick Mooney }
505*7c8c0b82SPatrick Mooney
506*7c8c0b82SPatrick Mooney /* ARGSUSED */
507*7c8c0b82SPatrick Mooney static int
segvmm_getvp(struct seg * seg,caddr_t addr,struct vnode ** vpp)508*7c8c0b82SPatrick Mooney segvmm_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
509*7c8c0b82SPatrick Mooney {
510*7c8c0b82SPatrick Mooney ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
511*7c8c0b82SPatrick Mooney
512*7c8c0b82SPatrick Mooney *vpp = NULL;
513*7c8c0b82SPatrick Mooney return (0);
514*7c8c0b82SPatrick Mooney }
515*7c8c0b82SPatrick Mooney
516*7c8c0b82SPatrick Mooney /* ARGSUSED */
517*7c8c0b82SPatrick Mooney static int
segvmm_advise(struct seg * seg,caddr_t addr,size_t len,uint_t behav)518*7c8c0b82SPatrick Mooney segvmm_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
519*7c8c0b82SPatrick Mooney {
520*7c8c0b82SPatrick Mooney if (behav == MADV_PURGE) {
521*7c8c0b82SPatrick Mooney /* Purge does not make sense for this mapping */
522*7c8c0b82SPatrick Mooney return (EINVAL);
523*7c8c0b82SPatrick Mooney }
524*7c8c0b82SPatrick Mooney /* Indicate success for everything else. */
525*7c8c0b82SPatrick Mooney return (0);
526*7c8c0b82SPatrick Mooney }
527*7c8c0b82SPatrick Mooney
528*7c8c0b82SPatrick Mooney /* ARGSUSED */
529*7c8c0b82SPatrick Mooney static void
segvmm_dump(struct seg * seg)530*7c8c0b82SPatrick Mooney segvmm_dump(struct seg *seg)
531*7c8c0b82SPatrick Mooney {
532*7c8c0b82SPatrick Mooney /*
533*7c8c0b82SPatrick Mooney * Since this is a mapping to share kernel data with userspace, nothing
534*7c8c0b82SPatrick Mooney * additional should be dumped.
535*7c8c0b82SPatrick Mooney */
536*7c8c0b82SPatrick Mooney }
537*7c8c0b82SPatrick Mooney
538*7c8c0b82SPatrick Mooney /* ARGSUSED */
539*7c8c0b82SPatrick Mooney static int
segvmm_pagelock(struct seg * seg,caddr_t addr,size_t len,struct page *** ppp,enum lock_type type,enum seg_rw rw)540*7c8c0b82SPatrick Mooney segvmm_pagelock(struct seg *seg, caddr_t addr, size_t len, struct page ***ppp,
541*7c8c0b82SPatrick Mooney enum lock_type type, enum seg_rw rw)
542*7c8c0b82SPatrick Mooney {
543*7c8c0b82SPatrick Mooney return (ENOTSUP);
544*7c8c0b82SPatrick Mooney }
545*7c8c0b82SPatrick Mooney
546*7c8c0b82SPatrick Mooney /* ARGSUSED */
547*7c8c0b82SPatrick Mooney static int
segvmm_setpagesize(struct seg * seg,caddr_t addr,size_t len,uint_t szc)548*7c8c0b82SPatrick Mooney segvmm_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
549*7c8c0b82SPatrick Mooney {
550*7c8c0b82SPatrick Mooney return (ENOTSUP);
551*7c8c0b82SPatrick Mooney }
552*7c8c0b82SPatrick Mooney
553*7c8c0b82SPatrick Mooney static int
segvmm_getmemid(struct seg * seg,caddr_t addr,memid_t * memidp)554*7c8c0b82SPatrick Mooney segvmm_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
555*7c8c0b82SPatrick Mooney {
556*7c8c0b82SPatrick Mooney segvmm_data_t *svmd = seg->s_data;
557*7c8c0b82SPatrick Mooney
558*7c8c0b82SPatrick Mooney memidp->val[0] = (uintptr_t)svmd->svmd_vmo;
559*7c8c0b82SPatrick Mooney memidp->val[1] = (uintptr_t)(addr - seg->s_base) + svmd->svmd_off;
560*7c8c0b82SPatrick Mooney return (0);
561*7c8c0b82SPatrick Mooney }
562*7c8c0b82SPatrick Mooney
563*7c8c0b82SPatrick Mooney /* ARGSUSED */
564*7c8c0b82SPatrick Mooney static int
segvmm_capable(struct seg * seg,segcapability_t capability)565*7c8c0b82SPatrick Mooney segvmm_capable(struct seg *seg, segcapability_t capability)
566*7c8c0b82SPatrick Mooney {
567*7c8c0b82SPatrick Mooney /* no special capablities */
568*7c8c0b82SPatrick Mooney return (0);
569*7c8c0b82SPatrick Mooney }
570