xref: /netbsd-src/sys/arch/xen/x86/xen_shm_machdep.c (revision 93bf6008f8b7982c1d1a9486e4a4a0e687fe36eb)
1 /*      $NetBSD: xen_shm_machdep.c,v 1.5 2009/03/16 06:17:20 cegger Exp $      */
2 
3 /*
4  * Copyright (c) 2006 Manuel Bouyer.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *      This product includes software developed by Manuel Bouyer.
17  * 4. The name of the author may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  *
31  */
32 
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: xen_shm_machdep.c,v 1.5 2009/03/16 06:17:20 cegger Exp $");
35 
36 
37 #include <sys/types.h>
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/queue.h>
41 #include <sys/vmem.h>
42 #include <sys/kernel.h>
43 #include <uvm/uvm.h>
44 
45 #include <machine/pmap.h>
46 #include <xen/hypervisor.h>
47 #include <xen/xen.h>
48 #include <xen/evtchn.h>
49 #include <xen/xen_shm.h>
50 
51 /*
52  * Helper routines for the backend drivers. This implement the necessary
53  * functions to map a bunch of pages from foreign domains in our kernel VM
54  * space, do I/O to it, and unmap it.
55  *
56  * At boot time, we grap some kernel VM space that we'll use to map the foreign
57  * pages. We also maintain a virtual to machine mapping table to give back
58  * the appropriate address to bus_dma if requested.
59  * If no more VM space is available, we return an error. The caller can then
60  * register a callback which will be called when the required VM space is
61  * available.
62  */
63 
64 /* pointers to our VM space */
65 static vaddr_t xen_shm_base_address;
66 static u_long xen_shm_base_address_pg;
67 static vaddr_t xen_shm_end_address;
68 
69 /* Grab enouth VM space to map an entire vbd ring. */
70 #ifdef XEN3
71 /* Xen3 linux guests seems to eat more pages, gives enough for 10 vbd rings */
72 #define BLKIF_RING_SIZE __RING_SIZE((blkif_sring_t *)0, PAGE_SIZE)
73 #define XENSHM_NPAGES (BLKIF_RING_SIZE * (BLKIF_MAX_SEGMENTS_PER_REQUEST + 1) * 10)
74 #else
75 #define XENSHM_NPAGES (BLKIF_RING_SIZE * (BLKIF_MAX_SEGMENTS_PER_REQUEST + 1))
76 #endif
77 
78 static vsize_t xen_shm_size = (XENSHM_NPAGES * PAGE_SIZE);
79 
80 /* vm space management */
81 static vmem_t *xen_shm_arena;
82 
83 /* callbacks are registered in a FIFO list. */
84 
85 static SIMPLEQ_HEAD(xen_shm_callback_head, xen_shm_callback_entry)
86     xen_shm_callbacks;
87 struct xen_shm_callback_entry {
88 	SIMPLEQ_ENTRY(xen_shm_callback_entry) xshmc_entries;
89 	int (*xshmc_callback)(void *); /* our callback */
90 	void *xshmc_arg; /* cookie passed to the callback */
91 };
92 /* a pool of struct xen_shm_callback_entry */
93 static struct pool xen_shm_callback_pool;
94 
95 #ifdef DEBUG
96 /* for ratecheck(9) */
97 static struct timeval xen_shm_errintvl = { 60, 0 };  /* a minute, each */
98 #endif
99 
100 void
101 xen_shm_init(void)
102 {
103 	SIMPLEQ_INIT(&xen_shm_callbacks);
104 	pool_init(&xen_shm_callback_pool, sizeof(struct xen_shm_callback_entry),
105 	    0, 0, 0, "xshmc", NULL, IPL_VM);
106 	/* ensure we'll always get items */
107 	if (pool_prime(&xen_shm_callback_pool,
108 	    PAGE_SIZE / sizeof(struct xen_shm_callback_entry)) != 0) {
109 		panic("xen_shm_init can't prime pool");
110 	}
111 
112 	xen_shm_base_address = uvm_km_alloc(kernel_map, xen_shm_size, 0,
113 	    UVM_KMF_VAONLY);
114 	xen_shm_end_address = xen_shm_base_address + xen_shm_size;
115 	xen_shm_base_address_pg = xen_shm_base_address >> PAGE_SHIFT;
116 	if (xen_shm_base_address == 0) {
117 		panic("xen_shm_init no VM space");
118 	}
119 	xen_shm_arena = vmem_create("xen_shm",
120 	    xen_shm_base_address_pg,
121 	    (xen_shm_end_address >> PAGE_SHIFT) - 1 - xen_shm_base_address_pg,
122 	    1, NULL, NULL, NULL, 1, VM_NOSLEEP, IPL_VM);
123 	if (xen_shm_arena == NULL) {
124 		panic("xen_shm_init no arena");
125 	}
126 }
127 
128 int
129 #ifdef XEN3
130 xen_shm_map(int nentries, int domid, grant_ref_t *grefp, vaddr_t *vap,
131     grant_handle_t *handlep, int flags)
132 #else
133 xen_shm_map(paddr_t *ma, int nentries, int domid, vaddr_t *vap, int flags)
134 #endif
135 {
136 	int s, i;
137 	vaddr_t new_va;
138 	u_long new_va_pg;
139 #ifdef XEN3
140 	int err;
141 	gnttab_map_grant_ref_t op[XENSHM_MAX_PAGES_PER_REQUEST];
142 #else
143 	multicall_entry_t mcl[XENSHM_MAX_PAGES_PER_REQUEST];
144 	int remap_prot = PG_V | PG_RW | PG_U | PG_M;
145 #endif
146 
147 #ifdef DIAGNOSTIC
148 	if (nentries > XENSHM_MAX_PAGES_PER_REQUEST) {
149 		printf("xen_shm_map: %d entries\n", nentries);
150 		panic("xen_shm_map");
151 	}
152 #endif
153 	s = splvm(); /* splvm is the lowest level blocking disk and net IRQ */
154 	/*
155 	 * if a driver is waiting for ressources, don't try to allocate
156 	 * yet. This is to avoid a flood of small requests stalling large
157 	 * ones.
158 	 */
159 	if (__predict_false(SIMPLEQ_FIRST(&xen_shm_callbacks) != NULL) &&
160 	    (flags & XSHM_CALLBACK) == 0) {
161 #ifdef DEBUG
162 		static struct timeval lasttime;
163 #endif
164 		splx(s);
165 #ifdef DEBUG
166 		if (ratecheck(&lasttime, &xen_shm_errintvl))
167 			printf("xen_shm_map: ENOMEM1\n");
168 #endif
169 		return ENOMEM;
170 	}
171 	/* allocate the needed virtual space */
172 	new_va_pg = vmem_alloc(xen_shm_arena, nentries,
173 	    VM_INSTANTFIT | VM_NOSLEEP);
174 	if (new_va_pg == 0) {
175 #ifdef DEBUG
176 		static struct timeval lasttime;
177 #endif
178 		splx(s);
179 #ifdef DEBUG
180 		if (ratecheck(&lasttime, &xen_shm_errintvl))
181 			printf("xen_shm_map: ENOMEM\n");
182 #endif
183 		return ENOMEM;
184 	}
185 	splx(s);
186 
187 	new_va = new_va_pg << PAGE_SHIFT;
188 #ifdef XEN3
189 	for (i = 0; i < nentries; i++) {
190 		op[i].host_addr = new_va + i * PAGE_SIZE;
191 		op[i].dom = domid;
192 		op[i].ref = grefp[i];
193 		op[i].flags = GNTMAP_host_map |
194 		    ((flags & XSHM_RO) ? GNTMAP_readonly : 0);
195 	}
196 	err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, op, nentries);
197 	if (__predict_false(err))
198 		panic("xen_shm_map: HYPERVISOR_grant_table_op failed");
199 	for (i = 0; i < nentries; i++) {
200 		if (__predict_false(op[i].status))
201 			return op[i].status;
202 		handlep[i] = op[i].handle;
203 	}
204 #else /* !XEN3 */
205 	for (i = 0; i < nentries; i++, new_va_pg++) {
206 		mcl[i].op = __HYPERVISOR_update_va_mapping_otherdomain;
207 		mcl[i].args[0] = new_va_pg;
208 		mcl[i].args[1] = ma[i] | remap_prot;
209 		mcl[i].args[2] = 0;
210 		mcl[i].args[3] = domid;
211 	}
212 	if (HYPERVISOR_multicall(mcl, nentries) != 0)
213 	    panic("xen_shm_map: HYPERVISOR_multicall");
214 
215 	for (i = 0; i < nentries; i++) {
216 		if ((mcl[i].args[5] != 0)) {
217 			printf("xen_shm_map: mcl[%d] failed\n", i);
218 			xen_shm_unmap(new_va, ma, nentries, domid);
219 			return EINVAL;
220 		}
221 	}
222 #endif /* !XEN3 */
223 	*vap = new_va;
224 	return 0;
225 }
226 
227 void
228 #ifdef XEN3
229 xen_shm_unmap(vaddr_t va, int nentries, grant_handle_t *handlep)
230 #else
231 xen_shm_unmap(vaddr_t va, paddr_t *pa, int nentries, int domid)
232 #endif
233 {
234 #ifdef XEN3
235 	gnttab_unmap_grant_ref_t op[XENSHM_MAX_PAGES_PER_REQUEST];
236 	int ret;
237 #else
238 	multicall_entry_t mcl[XENSHM_MAX_PAGES_PER_REQUEST];
239 #endif
240 	int i;
241 	int s;
242 	struct xen_shm_callback_entry *xshmc;
243 
244 #ifdef DIAGNOSTIC
245 	if (nentries > XENSHM_MAX_PAGES_PER_REQUEST) {
246 		printf("xen_shm_unmap: %d entries\n", nentries);
247 		panic("xen_shm_unmap");
248 	}
249 #endif
250 
251 #ifdef XEN3
252 	for (i = 0; i < nentries; i++) {
253 		op[i].host_addr = va + i * PAGE_SIZE;
254 		op[i].dev_bus_addr = 0;
255 		op[i].handle = handlep[i];
256 	}
257 	ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
258 	    op, nentries);
259 	if (__predict_false(ret))
260 		panic("xen_shm_unmap: unmap failed");
261 	va = va >> PAGE_SHIFT;
262 #else /* !XEN3 */
263 	va = va >> PAGE_SHIFT;
264 	for (i = 0; i < nentries; i++) {
265 		mcl[i].op = __HYPERVISOR_update_va_mapping;
266 		mcl[i].args[0] = va + i;
267 		mcl[i].args[1] = 0;
268 		mcl[i].args[2] = 0;
269 	}
270 	mcl[nentries - 1].args[2] = UVMF_FLUSH_TLB;
271 	if (HYPERVISOR_multicall(mcl, nentries) != 0)
272 		panic("xen_shm_unmap");
273 #endif /* !XEN3 */
274 	s = splvm(); /* splvm is the lowest level blocking disk and net IRQ */
275 	vmem_free(xen_shm_arena, va, nentries);
276 	while (__predict_false((xshmc = SIMPLEQ_FIRST(&xen_shm_callbacks))
277 	    != NULL)) {
278 		SIMPLEQ_REMOVE_HEAD(&xen_shm_callbacks, xshmc_entries);
279 		splx(s);
280 		if (xshmc->xshmc_callback(xshmc->xshmc_arg) == 0) {
281 			/* callback succeeded */
282 			s = splvm();
283 			pool_put(&xen_shm_callback_pool, xshmc);
284 		} else {
285 			/* callback failed, probably out of ressources */
286 			s = splvm();
287 			SIMPLEQ_INSERT_TAIL(&xen_shm_callbacks, xshmc,
288 					    xshmc_entries);
289 
290 			break;
291 		}
292 	}
293 	splx(s);
294 }
295 
296 int
297 xen_shm_callback(int (*callback)(void *), void *arg)
298 {
299 	struct xen_shm_callback_entry *xshmc;
300 	int s;
301 
302 	s = splvm();
303 	xshmc = pool_get(&xen_shm_callback_pool, PR_NOWAIT);
304 	if (xshmc == NULL) {
305 		splx(s);
306 		return ENOMEM;
307 	}
308 	xshmc->xshmc_arg = arg;
309 	xshmc->xshmc_callback = callback;
310 	SIMPLEQ_INSERT_TAIL(&xen_shm_callbacks, xshmc, xshmc_entries);
311 	splx(s);
312 	return 0;
313 }
314