xref: /openbsd-src/sys/dev/pv/xen.c (revision 0b7734b3d77bb9b21afec6f4621cae6c805dbd45)
1 /*	$OpenBSD: xen.c,v 1.56 2016/04/28 16:40:10 mikeb Exp $	*/
2 
3 /*
4  * Copyright (c) 2015 Mike Belopuhov
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/param.h>
20 
21 /* Xen requires locked atomic operations */
22 #ifndef MULTIPROCESSOR
23 #define _XENMPATOMICS
24 #define MULTIPROCESSOR
25 #endif
26 #include <sys/atomic.h>
27 #ifdef _XENMPATOMICS
28 #undef MULTIPROCESSOR
29 #undef _XENMPATOMICS
30 #endif
31 
32 #include <sys/systm.h>
33 #include <sys/proc.h>
34 #include <sys/signal.h>
35 #include <sys/signalvar.h>
36 #include <sys/malloc.h>
37 #include <sys/kernel.h>
38 #include <sys/device.h>
39 #include <sys/task.h>
40 #include <sys/syslog.h>
41 
42 #include <machine/bus.h>
43 #include <machine/cpu.h>
44 #include <machine/cpufunc.h>
45 
46 #include <uvm/uvm_extern.h>
47 
48 #include <machine/i82489var.h>
49 
50 #include <dev/rndvar.h>
51 
52 #include <dev/pv/pvvar.h>
53 #include <dev/pv/pvreg.h>
54 #include <dev/pv/xenreg.h>
55 #include <dev/pv/xenvar.h>
56 
57 struct xen_softc *xen_sc;
58 
59 int	xen_init_hypercall(struct xen_softc *);
60 int	xen_getfeatures(struct xen_softc *);
61 int	xen_init_info_page(struct xen_softc *);
62 int	xen_init_cbvec(struct xen_softc *);
63 int	xen_init_interrupts(struct xen_softc *);
64 int	xen_init_grant_tables(struct xen_softc *);
65 struct xen_gntent *
66 	xen_grant_table_grow(struct xen_softc *);
67 int	xen_grant_table_alloc(struct xen_softc *, grant_ref_t *);
68 void	xen_grant_table_free(struct xen_softc *, grant_ref_t);
69 void	xen_grant_table_enter(struct xen_softc *, grant_ref_t, paddr_t,
70 	    int, int);
71 void	xen_grant_table_remove(struct xen_softc *, grant_ref_t);
72 void	xen_disable_emulated_devices(struct xen_softc *);
73 
74 int 	xen_match(struct device *, void *, void *);
75 void	xen_attach(struct device *, struct device *, void *);
76 void	xen_deferred(struct device *);
77 void	xen_control(void *);
78 void	xen_resume(struct device *);
79 int	xen_activate(struct device *, int);
80 int	xen_probe_devices(struct xen_softc *);
81 
82 int	xen_bus_dmamap_create(bus_dma_tag_t, bus_size_t, int, bus_size_t,
83 	    bus_size_t, int, bus_dmamap_t *);
84 void	xen_bus_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t);
85 int	xen_bus_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *, bus_size_t,
86 	    struct proc *, int);
87 int	xen_bus_dmamap_load_mbuf(bus_dma_tag_t, bus_dmamap_t, struct mbuf *,
88 	    int);
89 void	xen_bus_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
90 void	xen_bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
91 	    bus_size_t, int);
92 
93 int	xs_attach(struct xen_softc *);
94 
95 struct cfdriver xen_cd = {
96 	NULL, "xen", DV_DULL
97 };
98 
99 const struct cfattach xen_ca = {
100 	sizeof(struct xen_softc), xen_match, xen_attach, NULL, xen_activate
101 };
102 
103 struct bus_dma_tag xen_bus_dma_tag = {
104 	NULL,
105 	xen_bus_dmamap_create,
106 	xen_bus_dmamap_destroy,
107 	xen_bus_dmamap_load,
108 	xen_bus_dmamap_load_mbuf,
109 	NULL,
110 	NULL,
111 	xen_bus_dmamap_unload,
112 	xen_bus_dmamap_sync,
113 	_bus_dmamem_alloc,
114 	NULL,
115 	_bus_dmamem_free,
116 	_bus_dmamem_map,
117 	_bus_dmamem_unmap,
118 	NULL,
119 };
120 
121 int
122 xen_match(struct device *parent, void *match, void *aux)
123 {
124 	struct pv_attach_args *pva = aux;
125 	struct pvbus_hv *hv = &pva->pva_hv[PVBUS_XEN];
126 
127 	if (hv->hv_base == 0)
128 		return (0);
129 
130 	return (1);
131 }
132 
133 void
134 xen_attach(struct device *parent, struct device *self, void *aux)
135 {
136 	struct pv_attach_args *pva = (struct pv_attach_args *)aux;
137 	struct pvbus_hv *hv = &pva->pva_hv[PVBUS_XEN];
138 	struct xen_softc *sc = (struct xen_softc *)self;
139 
140 	sc->sc_base = hv->hv_base;
141 
142 	if (xen_init_hypercall(sc))
143 		return;
144 
145 	/* Wire it up to the global */
146 	xen_sc = sc;
147 
148 	if (xen_getfeatures(sc))
149 		return;
150 
151 	if (xen_init_info_page(sc))
152 		return;
153 
154 	xen_init_cbvec(sc);
155 
156 	if (xen_init_interrupts(sc))
157 		return;
158 
159 	if (xen_init_grant_tables(sc))
160 		return;
161 
162 	if (xs_attach(sc))
163 		return;
164 
165 	xen_probe_devices(sc);
166 
167 	/* pvbus(4) key/value interface */
168 	hv->hv_kvop = xs_kvop;
169 	hv->hv_arg = sc;
170 
171 	xen_disable_emulated_devices(sc);
172 
173 	config_mountroot(self, xen_deferred);
174 }
175 
176 void
177 xen_deferred(struct device *self)
178 {
179 	struct xen_softc *sc = (struct xen_softc *)self;
180 
181 	if (!(sc->sc_flags & XSF_CBVEC)) {
182 		DPRINTF("%s: callback vector hasn't been established\n",
183 		    sc->sc_dev.dv_xname);
184 		return;
185 	}
186 
187 	xen_intr_enable();
188 
189 	if (xs_watch(sc, "control", "shutdown", &sc->sc_ctltsk,
190 	    xen_control, sc))
191 		printf("%s: failed to setup shutdown control watch\n",
192 		    sc->sc_dev.dv_xname);
193 }
194 
195 void
196 xen_control(void *arg)
197 {
198 	struct xen_softc *sc = arg;
199 	struct xs_transaction xst;
200 	char action[128];
201 	int error;
202 
203 	memset(&xst, 0, sizeof(xst));
204 	xst.xst_id = 0;
205 	xst.xst_sc = sc->sc_xs;
206 
207 	error = xs_getprop(sc, "control", "shutdown", action, sizeof(action));
208 	if (error) {
209 		if (error != ENOENT)
210 			printf("%s: failed to process control event\n",
211 			    sc->sc_dev.dv_xname);
212 		return;
213 	}
214 
215 	if (strlen(action) == 0)
216 		return;
217 
218 	/* Acknowledge the event */
219 	xs_setprop(sc, "control", "shutdown", "", 0);
220 
221 	if (strcmp(action, "halt") == 0 || strcmp(action, "poweroff") == 0) {
222 		extern int allowpowerdown;
223 
224 		if (allowpowerdown == 0)
225 			return;
226 
227 		suspend_randomness();
228 
229 		log(LOG_KERN | LOG_NOTICE, "Shutting down in response to "
230 		    "request from Xen host\n");
231 		prsignal(initprocess, SIGUSR2);
232 	} else if (strcmp(action, "reboot") == 0) {
233 		extern int allowpowerdown;
234 
235 		if (allowpowerdown == 0)
236 			return;
237 
238 		suspend_randomness();
239 
240 		log(LOG_KERN | LOG_NOTICE, "Rebooting in response to request "
241 		    "from Xen host\n");
242 		prsignal(initprocess, SIGINT);
243 	} else if (strcmp(action, "crash") == 0) {
244 		panic("xen told us to do this");
245 	} else if (strcmp(action, "suspend") == 0) {
246 		/* Not implemented yet */
247 	} else {
248 		printf("%s: unknown shutdown event \"%s\"\n",
249 		    sc->sc_dev.dv_xname, action);
250 	}
251 }
252 
253 void
254 xen_resume(struct device *self)
255 {
256 }
257 
258 int
259 xen_activate(struct device *self, int act)
260 {
261 	int rv = 0;
262 
263 	switch (act) {
264 	case DVACT_RESUME:
265 		xen_resume(self);
266 		break;
267 	}
268 	return (rv);
269 }
270 
271 int
272 xen_init_hypercall(struct xen_softc *sc)
273 {
274 	extern void *xen_hypercall_page;
275 	uint32_t regs[4];
276 	paddr_t pa;
277 
278 	/* Get hypercall page configuration MSR */
279 	CPUID(sc->sc_base + CPUID_OFFSET_XEN_HYPERCALL,
280 	    regs[0], regs[1], regs[2], regs[3]);
281 
282 	/* We don't support more than one hypercall page */
283 	if (regs[0] != 1) {
284 		printf(": requested %d hypercall pages\n", regs[0]);
285 		return (-1);
286 	}
287 
288 	sc->sc_hc = &xen_hypercall_page;
289 
290 	if (!pmap_extract(pmap_kernel(), (vaddr_t)sc->sc_hc, &pa)) {
291 		printf(": hypercall page PA extraction failed\n");
292 		return (-1);
293 	}
294 	wrmsr(regs[1], pa);
295 
296 	return (0);
297 }
298 
299 int
300 xen_hypercall(struct xen_softc *sc, int op, int argc, ...)
301 {
302 	va_list ap;
303 	ulong argv[5];
304 	int i;
305 
306 	if (argc < 0 || argc > 5)
307 		return (-1);
308 	va_start(ap, argc);
309 	for (i = 0; i < argc; i++)
310 		argv[i] = (ulong)va_arg(ap, ulong);
311 	return (xen_hypercallv(sc, op, argc, argv));
312 }
313 
314 int
315 xen_hypercallv(struct xen_softc *sc, int op, int argc, ulong *argv)
316 {
317 	ulong hcall;
318 	int rv = 0;
319 
320 	hcall = (ulong)sc->sc_hc + op * 32;
321 
322 #if defined(XEN_DEBUG) && disabled
323 	{
324 		int i;
325 
326 		printf("hypercall %d", op);
327 		if (argc > 0) {
328 			printf(", args {");
329 			for (i = 0; i < argc; i++)
330 				printf(" %#lx", argv[i]);
331 			printf(" }\n");
332 		} else
333 			printf("\n");
334 	}
335 #endif
336 
337 	switch (argc) {
338 	case 0: {
339 		HYPERCALL_RES1;
340 		__asm__ volatile (			\
341 			  HYPERCALL_LABEL		\
342 			: HYPERCALL_OUT1		\
343 			: HYPERCALL_PTR(hcall)		\
344 			: HYPERCALL_CLOBBER		\
345 		);
346 		HYPERCALL_RET(rv);
347 		break;
348 	}
349 	case 1: {
350 		HYPERCALL_RES1; HYPERCALL_RES2;
351 		HYPERCALL_ARG1(argv[0]);
352 		__asm__ volatile (			\
353 			  HYPERCALL_LABEL		\
354 			: HYPERCALL_OUT1 HYPERCALL_OUT2	\
355 			: HYPERCALL_IN1			\
356 			, HYPERCALL_PTR(hcall)		\
357 			: HYPERCALL_CLOBBER		\
358 		);
359 		HYPERCALL_RET(rv);
360 		break;
361 	}
362 	case 2: {
363 		HYPERCALL_RES1; HYPERCALL_RES2; HYPERCALL_RES3;
364 		HYPERCALL_ARG1(argv[0]); HYPERCALL_ARG2(argv[1]);
365 		__asm__ volatile (			\
366 			  HYPERCALL_LABEL		\
367 			: HYPERCALL_OUT1 HYPERCALL_OUT2	\
368 			  HYPERCALL_OUT3		\
369 			: HYPERCALL_IN1	HYPERCALL_IN2	\
370 			, HYPERCALL_PTR(hcall)		\
371 			: HYPERCALL_CLOBBER		\
372 		);
373 		HYPERCALL_RET(rv);
374 		break;
375 	}
376 	case 3: {
377 		HYPERCALL_RES1; HYPERCALL_RES2; HYPERCALL_RES3;
378 		HYPERCALL_RES4;
379 		HYPERCALL_ARG1(argv[0]); HYPERCALL_ARG2(argv[1]);
380 		HYPERCALL_ARG3(argv[2]);
381 		__asm__ volatile (			\
382 			  HYPERCALL_LABEL		\
383 			: HYPERCALL_OUT1 HYPERCALL_OUT2	\
384 			  HYPERCALL_OUT3 HYPERCALL_OUT4	\
385 			: HYPERCALL_IN1	HYPERCALL_IN2	\
386 			  HYPERCALL_IN3			\
387 			, HYPERCALL_PTR(hcall)		\
388 			: HYPERCALL_CLOBBER		\
389 		);
390 		HYPERCALL_RET(rv);
391 		break;
392 	}
393 	case 4: {
394 		HYPERCALL_RES1; HYPERCALL_RES2; HYPERCALL_RES3;
395 		HYPERCALL_RES4; HYPERCALL_RES5;
396 		HYPERCALL_ARG1(argv[0]); HYPERCALL_ARG2(argv[1]);
397 		HYPERCALL_ARG3(argv[2]); HYPERCALL_ARG4(argv[3]);
398 		__asm__ volatile (			\
399 			  HYPERCALL_LABEL		\
400 			: HYPERCALL_OUT1 HYPERCALL_OUT2	\
401 			  HYPERCALL_OUT3 HYPERCALL_OUT4	\
402 			  HYPERCALL_OUT5		\
403 			: HYPERCALL_IN1	HYPERCALL_IN2	\
404 			  HYPERCALL_IN3	HYPERCALL_IN4	\
405 			, HYPERCALL_PTR(hcall)		\
406 			: HYPERCALL_CLOBBER		\
407 		);
408 		HYPERCALL_RET(rv);
409 		break;
410 	}
411 	case 5: {
412 		HYPERCALL_RES1; HYPERCALL_RES2; HYPERCALL_RES3;
413 		HYPERCALL_RES4; HYPERCALL_RES5; HYPERCALL_RES6;
414 		HYPERCALL_ARG1(argv[0]); HYPERCALL_ARG2(argv[1]);
415 		HYPERCALL_ARG3(argv[2]); HYPERCALL_ARG4(argv[3]);
416 		HYPERCALL_ARG5(argv[4]);
417 		__asm__ volatile (			\
418 			  HYPERCALL_LABEL		\
419 			: HYPERCALL_OUT1 HYPERCALL_OUT2	\
420 			  HYPERCALL_OUT3 HYPERCALL_OUT4	\
421 			  HYPERCALL_OUT5 HYPERCALL_OUT6	\
422 			: HYPERCALL_IN1	HYPERCALL_IN2	\
423 			  HYPERCALL_IN3	HYPERCALL_IN4	\
424 			  HYPERCALL_IN5			\
425 			, HYPERCALL_PTR(hcall)		\
426 			: HYPERCALL_CLOBBER		\
427 		);
428 		HYPERCALL_RET(rv);
429 		break;
430 	}
431 	default:
432 		DPRINTF("%s: wrong number of arguments: %d\n", __func__, argc);
433 		rv = -1;
434 		break;
435 	}
436 	return (rv);
437 }
438 
439 int
440 xen_getfeatures(struct xen_softc *sc)
441 {
442 	struct xen_feature_info xfi;
443 
444 	memset(&xfi, 0, sizeof(xfi));
445 	if (xen_hypercall(sc, XC_VERSION, 2, XENVER_get_features, &xfi) < 0) {
446 		printf(": failed to fetch features\n");
447 		return (-1);
448 	}
449 	sc->sc_features = xfi.submap;
450 #ifdef XEN_DEBUG
451 	printf(": features %b", sc->sc_features,
452 	    "\20\014DOM0\013PIRQ\012PVCLOCK\011CBVEC\010GNTFLAGS\007HMA"
453 	    "\006PTUPD\005PAE4G\004SUPERVISOR\003AUTOPMAP\002WDT\001WPT");
454 #else
455 	printf(": features %#x", sc->sc_features);
456 #endif
457 	return (0);
458 }
459 
460 #ifdef XEN_DEBUG
461 void
462 xen_print_info_page(void)
463 {
464 	struct xen_softc *sc = xen_sc;
465 	struct shared_info *s = sc->sc_ipg;
466 	struct vcpu_info *v;
467 	int i;
468 
469 	virtio_membar_sync();
470 	for (i = 0; i < XEN_LEGACY_MAX_VCPUS; i++) {
471 		v = &s->vcpu_info[i];
472 		if (!v->evtchn_upcall_pending && !v->evtchn_upcall_mask &&
473 		    !v->evtchn_pending_sel && !v->time.version &&
474 		    !v->time.tsc_timestamp && !v->time.system_time &&
475 		    !v->time.tsc_to_system_mul && !v->time.tsc_shift)
476 			continue;
477 		printf("vcpu%d:\n"
478 		    "   upcall_pending=%02x upcall_mask=%02x pending_sel=%#lx\n"
479 		    "   time version=%u tsc=%llu system=%llu\n"
480 		    "   time mul=%u shift=%d\n",
481 		    i, v->evtchn_upcall_pending, v->evtchn_upcall_mask,
482 		    v->evtchn_pending_sel, v->time.version,
483 		    v->time.tsc_timestamp, v->time.system_time,
484 		    v->time.tsc_to_system_mul, v->time.tsc_shift);
485 	}
486 	printf("pending events: ");
487 	for (i = 0; i < nitems(s->evtchn_pending); i++) {
488 		if (s->evtchn_pending[i] == 0)
489 			continue;
490 		printf(" %d:%#lx", i, s->evtchn_pending[i]);
491 	}
492 	printf("\nmasked events: ");
493 	for (i = 0; i < nitems(s->evtchn_mask); i++) {
494 		if (s->evtchn_mask[i] == 0xffffffffffffffffULL)
495 			continue;
496 		printf(" %d:%#lx", i, s->evtchn_mask[i]);
497 	}
498 	printf("\nwc ver=%u sec=%u nsec=%u\n", s->wc_version, s->wc_sec,
499 	    s->wc_nsec);
500 	printf("arch maxpfn=%lu framelist=%lu nmi=%lu\n", s->arch.max_pfn,
501 	    s->arch.pfn_to_mfn_frame_list, s->arch.nmi_reason);
502 }
503 #endif	/* XEN_DEBUG */
504 
505 int
506 xen_init_info_page(struct xen_softc *sc)
507 {
508 	struct xen_add_to_physmap xatp;
509 	paddr_t pa;
510 
511 	sc->sc_ipg = malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT | M_ZERO);
512 	if (sc->sc_ipg == NULL) {
513 		printf(": failed to allocate shared info page\n");
514 		return (-1);
515 	}
516 	if (!pmap_extract(pmap_kernel(), (vaddr_t)sc->sc_ipg, &pa)) {
517 		printf(": shared info page PA extraction failed\n");
518 		free(sc->sc_ipg, M_DEVBUF, PAGE_SIZE);
519 		return (-1);
520 	}
521 	xatp.domid = DOMID_SELF;
522 	xatp.idx = 0;
523 	xatp.space = XENMAPSPACE_shared_info;
524 	xatp.gpfn = atop(pa);
525 	if (xen_hypercall(sc, XC_MEMORY, 2, XENMEM_add_to_physmap, &xatp)) {
526 		printf(": failed to register shared info page\n");
527 		free(sc->sc_ipg, M_DEVBUF, PAGE_SIZE);
528 		return (-1);
529 	}
530 	return (0);
531 }
532 
533 int
534 xen_init_cbvec(struct xen_softc *sc)
535 {
536 	struct xen_hvm_param xhp;
537 
538 	if ((sc->sc_features & XENFEAT_CBVEC) == 0)
539 		return (ENOENT);
540 
541 	xhp.domid = DOMID_SELF;
542 	xhp.index = HVM_PARAM_CALLBACK_IRQ;
543 	xhp.value = HVM_CALLBACK_VECTOR(LAPIC_XEN_VECTOR);
544 	if (xen_hypercall(sc, XC_HVM, 2, HVMOP_set_param, &xhp)) {
545 		/* Will retry with the xspd(4) PCI interrupt */
546 		return (ENOENT);
547 	}
548 	DPRINTF(", idtvec %d", LAPIC_XEN_VECTOR);
549 
550 	sc->sc_flags |= XSF_CBVEC;
551 
552 	return (0);
553 }
554 
555 int
556 xen_init_interrupts(struct xen_softc *sc)
557 {
558 	int i;
559 
560 	sc->sc_irq = LAPIC_XEN_VECTOR;
561 
562 	/*
563 	 * Clear all pending events and mask all interrupts
564 	 */
565 	for (i = 0; i < nitems(sc->sc_ipg->evtchn_pending); i++) {
566 		sc->sc_ipg->evtchn_pending[i] = 0;
567 		sc->sc_ipg->evtchn_mask[i] = ~0UL;
568 	}
569 
570 	SLIST_INIT(&sc->sc_intrs);
571 
572 	return (0);
573 }
574 
575 static int
576 xen_evtchn_hypercall(struct xen_softc *sc, int cmd, void *arg, size_t len)
577 {
578 	struct evtchn_op compat;
579 	int error;
580 
581 	error = xen_hypercall(sc, XC_EVTCHN, 2, cmd, arg);
582 	if (error == -ENOXENSYS) {
583 		memset(&compat, 0, sizeof(compat));
584 		compat.cmd = cmd;
585 		memcpy(&compat.u, arg, len);
586 		error = xen_hypercall(sc, XC_OEVTCHN, 1, &compat);
587 	}
588 	return (error);
589 }
590 
591 static inline struct xen_intsrc *
592 xen_lookup_intsrc(struct xen_softc *sc, evtchn_port_t port)
593 {
594 	struct xen_intsrc *xi;
595 
596 	SLIST_FOREACH(xi, &sc->sc_intrs, xi_entry)
597 		if (xi->xi_port == port)
598 			break;
599 	return (xi);
600 }
601 
602 void
603 xen_intr_ack(void)
604 {
605 	struct xen_softc *sc = xen_sc;
606 	struct shared_info *s = sc->sc_ipg;
607 	struct cpu_info *ci = curcpu();
608 	struct vcpu_info *v = &s->vcpu_info[CPU_INFO_UNIT(ci)];
609 
610 	v->evtchn_upcall_pending = 0;
611 	virtio_membar_sync();
612 }
613 
614 void
615 xen_intr(void)
616 {
617 	struct xen_softc *sc = xen_sc;
618 	struct xen_intsrc *xi;
619 	struct shared_info *s = sc->sc_ipg;
620 	struct cpu_info *ci = curcpu();
621 	struct vcpu_info *v = &s->vcpu_info[CPU_INFO_UNIT(ci)];
622 	ulong pending, selector;
623 	int port, bit, row;
624 
625 	v->evtchn_upcall_pending = 0;
626 	selector = atomic_swap_ulong(&v->evtchn_pending_sel, 0);
627 
628 	for (row = 0; selector > 0; selector >>= 1, row++) {
629 		if ((selector & 1) == 0)
630 			continue;
631 		pending = sc->sc_ipg->evtchn_pending[row] &
632 		    ~(sc->sc_ipg->evtchn_mask[row]);
633 		for (bit = 0; pending > 0; pending >>= 1, bit++) {
634 			if ((pending & 1) == 0)
635 				continue;
636 			sc->sc_ipg->evtchn_pending[row] &= ~(1 << bit);
637 			virtio_membar_producer();
638 			port = (row * LONG_BIT) + bit;
639 			if ((xi = xen_lookup_intsrc(sc, port)) == NULL) {
640 				printf("%s: unhandled interrupt on port %u\n",
641 				    sc->sc_dev.dv_xname, port);
642 				continue;
643 			}
644 			xi->xi_evcnt.ec_count++;
645 			if (xi->xi_handler)
646 				xi->xi_handler(xi->xi_arg);
647 		}
648 	}
649 }
650 
651 void
652 xen_intr_signal(xen_intr_handle_t xih)
653 {
654 	struct xen_softc *sc = xen_sc;
655 	struct xen_intsrc *xi;
656 	struct evtchn_send es;
657 
658 	if ((xi = xen_lookup_intsrc(sc, (evtchn_port_t)xih)) != NULL) {
659 		es.port = xi->xi_port;
660 		xen_evtchn_hypercall(sc, EVTCHNOP_send, &es, sizeof(es));
661 	}
662 }
663 
664 int
665 xen_intr_establish(evtchn_port_t port, xen_intr_handle_t *xih, int domain,
666     void (*handler)(void *), void *arg, char *name)
667 {
668 	struct xen_softc *sc = xen_sc;
669 	struct xen_intsrc *xi;
670 	struct evtchn_alloc_unbound eau;
671 #ifdef notyet
672 	struct evtchn_bind_vcpu ebv;
673 #endif
674 #if defined(XEN_DEBUG) && disabled
675 	struct evtchn_status es;
676 #endif
677 
678 	if (port && xen_lookup_intsrc(sc, port)) {
679 		DPRINTF("%s: interrupt handler has already been established "
680 		    "for port %u\n", sc->sc_dev.dv_xname, port);
681 		return (-1);
682 	}
683 
684 	xi = malloc(sizeof(*xi), M_DEVBUF, M_NOWAIT | M_ZERO);
685 	if (xi == NULL)
686 		return (-1);
687 
688 	xi->xi_handler = handler;
689 	xi->xi_arg = arg;
690 	xi->xi_port = (evtchn_port_t)*xih;
691 
692 	if (port == 0) {
693 		/* We're being asked to allocate a new event port */
694 		memset(&eau, 0, sizeof(eau));
695 		eau.dom = DOMID_SELF;
696 		eau.remote_dom = domain;
697 		if (xen_evtchn_hypercall(sc, EVTCHNOP_alloc_unbound, &eau,
698 		    sizeof(eau)) != 0) {
699 			DPRINTF("%s: failed to allocate new event port\n",
700 			    sc->sc_dev.dv_xname);
701 			free(xi, M_DEVBUF, sizeof(*xi));
702 			return (-1);
703 		}
704 		*xih = xi->xi_port = eau.port;
705 	} else {
706 		*xih = xi->xi_port = port;
707 		/*
708 		 * The Event Channel API didn't open this port, so it is not
709 		 * responsible for closing it automatically on unbind.
710 		 */
711 		xi->xi_noclose = 1;
712 	}
713 
714 #ifdef notyet
715 	/* Bind interrupt to VCPU#0 */
716 	memset(&ebv, 0, sizeof(ebv));
717 	ebv.port = xi->xi_port;
718 	ebv.vcpu = 0;
719 	if (xen_evtchn_hypercall(sc, EVTCHNOP_bind_vcpu, &ebv, sizeof(ebv))) {
720 		printf("%s: failed to bind interrupt on port %u to vcpu%d\n",
721 		    sc->sc_dev.dv_xname, ebv.port, ebv.vcpu);
722 	}
723 #endif
724 
725 	evcount_attach(&xi->xi_evcnt, name, &sc->sc_irq);
726 
727 	SLIST_INSERT_HEAD(&sc->sc_intrs, xi, xi_entry);
728 
729 	/* Mask the event port */
730 	setbit((char *)&sc->sc_ipg->evtchn_mask[0], xi->xi_port);
731 
732 #if defined(XEN_DEBUG) && disabled
733 	memset(&es, 0, sizeof(es));
734 	es.dom = DOMID_SELF;
735 	es.port = xi->xi_port;
736 	if (xen_evtchn_hypercall(sc, EVTCHNOP_status, &es, sizeof(es))) {
737 		printf("%s: failed to obtain status for port %d\n",
738 		    sc->sc_dev.dv_xname, es.port);
739 	}
740 	printf("%s: port %u bound to vcpu%u", sc->sc_dev.dv_xname,
741 	    es.port, es.vcpu);
742 	if (es.status == EVTCHNSTAT_interdomain)
743 		printf(": domain %d port %u\n", es.u.interdomain.dom,
744 		    es.u.interdomain.port);
745 	else if (es.status == EVTCHNSTAT_unbound)
746 		printf(": domain %d\n", es.u.unbound.dom);
747 	else if (es.status == EVTCHNSTAT_pirq)
748 		printf(": pirq %u\n", es.u.pirq);
749 	else if (es.status == EVTCHNSTAT_virq)
750 		printf(": virq %u\n", es.u.virq);
751 	else
752 		printf("\n");
753 #endif
754 
755 	return (0);
756 }
757 
758 int
759 xen_intr_disestablish(xen_intr_handle_t xih)
760 {
761 	struct xen_softc *sc = xen_sc;
762 	evtchn_port_t port = (evtchn_port_t)xih;
763 	struct evtchn_close ec;
764 	struct xen_intsrc *xi;
765 
766 	if ((xi = xen_lookup_intsrc(sc, port)) == NULL)
767 		return (-1);
768 
769 	evcount_detach(&xi->xi_evcnt);
770 
771 	SLIST_REMOVE(&sc->sc_intrs, xi, xen_intsrc, xi_entry);
772 
773 	setbit((char *)&sc->sc_ipg->evtchn_mask[0], xi->xi_port);
774 	clrbit((char *)&sc->sc_ipg->evtchn_pending[0], xi->xi_port);
775 	virtio_membar_sync();
776 
777 	if (!xi->xi_noclose) {
778 		ec.port = xi->xi_port;
779 		if (xen_evtchn_hypercall(sc, EVTCHNOP_close, &ec, sizeof(ec))) {
780 			DPRINTF("%s: failed to close event port %u\n",
781 			    sc->sc_dev.dv_xname, xi->xi_port);
782 		}
783 	}
784 
785 	free(xi, M_DEVBUF, sizeof(*xi));
786 	return (0);
787 }
788 
789 void
790 xen_intr_enable(void)
791 {
792 	struct xen_softc *sc = xen_sc;
793 	struct xen_intsrc *xi;
794 	struct evtchn_unmask eu;
795 
796 	SLIST_FOREACH(xi, &sc->sc_intrs, xi_entry) {
797 		if (!xi->xi_masked) {
798 			eu.port = xi->xi_port;
799 			if (xen_evtchn_hypercall(sc, EVTCHNOP_unmask, &eu,
800 			    sizeof(eu)))
801 				printf("%s: unmasking port %u failed\n",
802 				    sc->sc_dev.dv_xname, xi->xi_port);
803 			virtio_membar_sync();
804 			if (isset((char *)&sc->sc_ipg->evtchn_mask[0],
805 			    xi->xi_port))
806 				printf("%s: port %u is still masked\n",
807 				    sc->sc_dev.dv_xname, xi->xi_port);
808 		}
809 	}
810 }
811 
812 void
813 xen_intr_mask(xen_intr_handle_t xih)
814 {
815 	struct xen_softc *sc = xen_sc;
816 	evtchn_port_t port = (evtchn_port_t)xih;
817 	struct xen_intsrc *xi;
818 
819 	if ((xi = xen_lookup_intsrc(sc, port)) != NULL) {
820 		xi->xi_masked = 1;
821 		setbit((char *)&sc->sc_ipg->evtchn_mask[0], xi->xi_port);
822 		virtio_membar_sync();
823 	}
824 }
825 
826 int
827 xen_intr_unmask(xen_intr_handle_t xih)
828 {
829 	struct xen_softc *sc = xen_sc;
830 	evtchn_port_t port = (evtchn_port_t)xih;
831 	struct xen_intsrc *xi;
832 	struct evtchn_unmask eu;
833 
834 	if ((xi = xen_lookup_intsrc(sc, port)) != NULL) {
835 		xi->xi_masked = 0;
836 		if (!isset((char *)&sc->sc_ipg->evtchn_mask[0], xi->xi_port))
837 			return (0);
838 		eu.port = xi->xi_port;
839 		return (xen_evtchn_hypercall(sc, EVTCHNOP_unmask, &eu,
840 		    sizeof(eu)));
841 	}
842 	return (0);
843 }
844 
845 int
846 xen_init_grant_tables(struct xen_softc *sc)
847 {
848 	struct gnttab_query_size gqs;
849 
850 	gqs.dom = DOMID_SELF;
851 	if (xen_hypercall(sc, XC_GNTTAB, 3, GNTTABOP_query_size, &gqs, 1)) {
852 		printf(": failed the query for grant table pages\n");
853 		return (-1);
854 	}
855 	if (gqs.nr_frames == 0 || gqs.nr_frames > gqs.max_nr_frames) {
856 		printf(": invalid number of grant table pages: %u/%u\n",
857 		    gqs.nr_frames, gqs.max_nr_frames);
858 		return (-1);
859 	}
860 
861 	sc->sc_gntmax = gqs.max_nr_frames;
862 
863 	sc->sc_gnt = mallocarray(sc->sc_gntmax + 1, sizeof(struct xen_gntent),
864 	    M_DEVBUF, M_ZERO | M_NOWAIT);
865 	if (sc->sc_gnt == NULL) {
866 		printf(": failed to allocate grant table lookup table\n");
867 		return (-1);
868 	}
869 
870 	mtx_init(&sc->sc_gntmtx, IPL_NET);
871 
872 	if (xen_grant_table_grow(sc) == NULL) {
873 		free(sc->sc_gnt, M_DEVBUF, sc->sc_gntmax *
874 		    sizeof(struct xen_gntent));
875 		return (-1);
876 	}
877 
878 	printf(", %u grant table frames", sc->sc_gntmax);
879 
880 	xen_bus_dma_tag._cookie = sc;
881 
882 	return (0);
883 }
884 
885 struct xen_gntent *
886 xen_grant_table_grow(struct xen_softc *sc)
887 {
888 	struct xen_add_to_physmap xatp;
889 	struct xen_gntent *ge;
890 	paddr_t pa;
891 
892 	if (sc->sc_gntcnt == sc->sc_gntmax) {
893 		printf("%s: grant table frame allotment limit reached\n",
894 		    sc->sc_dev.dv_xname);
895 		return (NULL);
896 	}
897 
898 	mtx_enter(&sc->sc_gntmtx);
899 
900 	ge = &sc->sc_gnt[sc->sc_gntcnt];
901 	ge->ge_table = km_alloc(PAGE_SIZE, &kv_any, &kp_zero, &kd_nowait);
902 	if (ge->ge_table == NULL) {
903 		free(ge, M_DEVBUF, sizeof(*ge));
904 		mtx_leave(&sc->sc_gntmtx);
905 		return (NULL);
906 	}
907 	if (!pmap_extract(pmap_kernel(), (vaddr_t)ge->ge_table, &pa)) {
908 		printf("%s: grant table page PA extraction failed\n",
909 		    sc->sc_dev.dv_xname);
910 		km_free(ge->ge_table, PAGE_SIZE, &kv_any, &kp_zero);
911 		free(ge, M_DEVBUF, sizeof(*ge));
912 		mtx_leave(&sc->sc_gntmtx);
913 		return (NULL);
914 	}
915 	xatp.domid = DOMID_SELF;
916 	xatp.idx = sc->sc_gntcnt;
917 	xatp.space = XENMAPSPACE_grant_table;
918 	xatp.gpfn = atop(pa);
919 	if (xen_hypercall(sc, XC_MEMORY, 2, XENMEM_add_to_physmap, &xatp)) {
920 		printf("%s: failed to add a grant table page\n",
921 		    sc->sc_dev.dv_xname);
922 		km_free(ge->ge_table, PAGE_SIZE, &kv_any, &kp_zero);
923 		free(ge, M_DEVBUF, sizeof(*ge));
924 		mtx_leave(&sc->sc_gntmtx);
925 		return (NULL);
926 	}
927 	ge->ge_start = sc->sc_gntcnt * GNTTAB_NEPG;
928 	/* First page has 8 reserved entries */
929 	ge->ge_reserved = ge->ge_start == 0 ? GNTTAB_NR_RESERVED_ENTRIES : 0;
930 	ge->ge_free = GNTTAB_NEPG - ge->ge_reserved;
931 	ge->ge_next = ge->ge_reserved;
932 	mtx_init(&ge->ge_mtx, IPL_NET);
933 
934 	sc->sc_gntcnt++;
935 	mtx_leave(&sc->sc_gntmtx);
936 
937 	return (ge);
938 }
939 
940 int
941 xen_grant_table_alloc(struct xen_softc *sc, grant_ref_t *ref)
942 {
943 	struct xen_gntent *ge;
944 	int i;
945 
946 	/* Start with a previously allocated table page */
947 	ge = &sc->sc_gnt[sc->sc_gntcnt - 1];
948 	if (ge->ge_free > 0) {
949 		mtx_enter(&ge->ge_mtx);
950 		if (ge->ge_free > 0)
951 			goto search;
952 		mtx_leave(&ge->ge_mtx);
953 	}
954 
955 	/* Try other existing table pages */
956 	for (i = 0; i < sc->sc_gntcnt; i++) {
957 		ge = &sc->sc_gnt[i];
958 		if (ge->ge_free == 0)
959 			continue;
960 		mtx_enter(&ge->ge_mtx);
961 		if (ge->ge_free > 0)
962 			goto search;
963 		mtx_leave(&ge->ge_mtx);
964 	}
965 
966  alloc:
967 	/* Allocate a new table page */
968 	if ((ge = xen_grant_table_grow(sc)) == NULL)
969 		return (-1);
970 
971 	mtx_enter(&ge->ge_mtx);
972 	if (ge->ge_free == 0) {
973 		/* We were not fast enough... */
974 		mtx_leave(&ge->ge_mtx);
975 		goto alloc;
976 	}
977 
978  search:
979 	for (i = ge->ge_next;
980 	     /* Math works here because GNTTAB_NEPG is a power of 2 */
981 	     i != ((ge->ge_next + GNTTAB_NEPG - 1) & (GNTTAB_NEPG - 1));
982 	     i++) {
983 		if (i == GNTTAB_NEPG)
984 			i = 0;
985 		if (ge->ge_reserved && i < ge->ge_reserved)
986 			continue;
987 		if (ge->ge_table[i].flags != GTF_invalid &&
988 		    ge->ge_table[i].frame != 0)
989 			continue;
990 		*ref = ge->ge_start + i;
991 		/* XXX Mark as taken */
992 		ge->ge_table[i].frame = 0xffffffff;
993 		if ((ge->ge_next = i + 1) == GNTTAB_NEPG)
994 			ge->ge_next = ge->ge_reserved;
995 		ge->ge_free--;
996 		mtx_leave(&ge->ge_mtx);
997 		return (0);
998 	}
999 	mtx_leave(&ge->ge_mtx);
1000 
1001 	panic("page full, sc %p gnt %p (%d) ge %p", sc, sc->sc_gnt,
1002 	    sc->sc_gntcnt, ge);
1003 	return (-1);
1004 }
1005 
1006 void
1007 xen_grant_table_free(struct xen_softc *sc, grant_ref_t ref)
1008 {
1009 	struct xen_gntent *ge;
1010 
1011 #ifdef XEN_DEBUG
1012 	if (ref > sc->sc_gntcnt * GNTTAB_NEPG)
1013 		panic("unmanaged ref %u sc %p gnt %p (%d)", ref, sc,
1014 		    sc->sc_gnt, sc->sc_gntcnt);
1015 #endif
1016 	ge = &sc->sc_gnt[ref / GNTTAB_NEPG];
1017 	mtx_enter(&ge->ge_mtx);
1018 #ifdef XEN_DEBUG
1019 	if (ref < ge->ge_start || ref > ge->ge_start + GNTTAB_NEPG) {
1020 		mtx_leave(&ge->ge_mtx);
1021 		panic("out of bounds ref %u ge %p start %u sc %p gnt %p",
1022 		    ref, ge, ge->ge_start, sc, sc->sc_gnt);
1023 	}
1024 #endif
1025 	ref -= ge->ge_start;
1026 	if (ge->ge_table[ref].flags != GTF_invalid) {
1027 		mtx_leave(&ge->ge_mtx);
1028 #ifdef XEN_DEBUG
1029 		panic("ref %u is still in use, sc %p gnt %p", ref +
1030 		    ge->ge_start, sc, sc->sc_gnt);
1031 #else
1032 		printf("%s: reference %u is still in use\n",
1033 		    sc->sc_dev.dv_xname, ref + ge->ge_start);
1034 #endif
1035 	}
1036 	ge->ge_table[ref].frame = 0;
1037 	ge->ge_next = ref;
1038 	ge->ge_free++;
1039 	mtx_leave(&ge->ge_mtx);
1040 }
1041 
1042 void
1043 xen_grant_table_enter(struct xen_softc *sc, grant_ref_t ref, paddr_t pa,
1044     int domain, int flags)
1045 {
1046 	struct xen_gntent *ge;
1047 
1048 #ifdef XEN_DEBUG
1049 	if (ref > sc->sc_gntcnt * GNTTAB_NEPG)
1050 		panic("unmanaged ref %u sc %p gnt %p (%d)", ref, sc,
1051 		    sc->sc_gnt, sc->sc_gntcnt);
1052 #endif
1053 	ge = &sc->sc_gnt[ref / GNTTAB_NEPG];
1054 #ifdef XEN_DEBUG
1055 	if (ref < ge->ge_start || ref > ge->ge_start + GNTTAB_NEPG) {
1056 		panic("out of bounds ref %u ge %p start %u sc %p gnt %p",
1057 		    ref, ge, ge->ge_start, sc, sc->sc_gnt);
1058 	}
1059 #endif
1060 	ref -= ge->ge_start;
1061 	ge->ge_table[ref].frame = atop(pa);
1062 	ge->ge_table[ref].domid = domain;
1063 	virtio_membar_sync();
1064 	ge->ge_table[ref].flags = GTF_permit_access | flags;
1065 	virtio_membar_sync();
1066 }
1067 
1068 void
1069 xen_grant_table_remove(struct xen_softc *sc, grant_ref_t ref)
1070 {
1071 	struct xen_gntent *ge;
1072 	uint32_t flags, *ptr;
1073 	int loop;
1074 
1075 #ifdef XEN_DEBUG
1076 	if (ref > sc->sc_gntcnt * GNTTAB_NEPG)
1077 		panic("unmanaged ref %u sc %p gnt %p (%d)", ref, sc,
1078 		    sc->sc_gnt, sc->sc_gntcnt);
1079 #endif
1080 	ge = &sc->sc_gnt[ref / GNTTAB_NEPG];
1081 #ifdef XEN_DEBUG
1082 	if (ref < ge->ge_start || ref > ge->ge_start + GNTTAB_NEPG) {
1083 		panic("out of bounds ref %u ge %p start %u sc %p gnt %p",
1084 		    ref, ge, ge->ge_start, sc, sc->sc_gnt);
1085 	}
1086 #endif
1087 	ref -= ge->ge_start;
1088 	/* Invalidate the grant reference */
1089 	virtio_membar_sync();
1090 	ptr = (uint32_t *)&ge->ge_table[ref];
1091 	flags = (ge->ge_table[ref].flags & ~(GTF_reading|GTF_writing)) |
1092 	    (ge->ge_table[ref].domid << 16);
1093 	loop = 0;
1094 	while (atomic_cas_uint(ptr, flags, GTF_invalid) != flags) {
1095 		if (loop++ > 10000000) {
1096 			printf("%s: grant table reference %u is held "
1097 			    "by domain %d\n", sc->sc_dev.dv_xname, ref +
1098 			    ge->ge_start, ge->ge_table[ref].domid);
1099 			return;
1100 		}
1101 		CPU_BUSY_CYCLE();
1102 	}
1103 	ge->ge_table[ref].frame = 0xffffffff;
1104 }
1105 
1106 int
1107 xen_bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
1108     bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
1109 {
1110 	struct xen_softc *sc = t->_cookie;
1111 	struct xen_gntmap *gm;
1112 	int i, error;
1113 
1114 	if (maxsegsz < PAGE_SIZE)
1115 		return (EINVAL);
1116 
1117 	/* Allocate a dma map structure */
1118 	error = _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary,
1119 	    flags, dmamp);
1120 	if (error)
1121 		return (error);
1122 	/* Allocate an array of grant table pa<->ref maps */
1123 	gm = mallocarray(nsegments, sizeof(struct xen_gntmap), M_DEVBUF,
1124 	    M_ZERO | ((flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK));
1125 	if (gm == NULL) {
1126 		_bus_dmamap_destroy(t, *dmamp);
1127 		*dmamp = NULL;
1128 		return (ENOMEM);
1129 	}
1130 	/* Wire it to the dma map */
1131 	(*dmamp)->_dm_cookie = gm;
1132 	/* Claim references from the grant table */
1133 	for (i = 0; i < (*dmamp)->_dm_segcnt; i++) {
1134 		if (xen_grant_table_alloc(sc, &gm[i].gm_ref)) {
1135 			xen_bus_dmamap_destroy(t, *dmamp);
1136 			*dmamp = NULL;
1137 			return (ENOBUFS);
1138 		}
1139 	}
1140 	return (0);
1141 }
1142 
1143 void
1144 xen_bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
1145 {
1146 	struct xen_softc *sc = t->_cookie;
1147 	struct xen_gntmap *gm;
1148 	int i;
1149 
1150 	gm = map->_dm_cookie;
1151 	for (i = 0; i < map->_dm_segcnt; i++) {
1152 		if (gm[i].gm_ref == 0)
1153 			continue;
1154 		xen_grant_table_free(sc, gm[i].gm_ref);
1155 	}
1156 	free(gm, M_DEVBUF, map->_dm_segcnt * sizeof(struct xen_gntmap));
1157 	_bus_dmamap_destroy(t, map);
1158 }
1159 
1160 int
1161 xen_bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
1162     bus_size_t buflen, struct proc *p, int flags)
1163 {
1164 	struct xen_softc *sc = t->_cookie;
1165 	struct xen_gntmap *gm = map->_dm_cookie;
1166 	int i, domain, error;
1167 
1168 	domain = flags >> 16;
1169 	flags &= 0xffff;
1170 	error = _bus_dmamap_load(t, map, buf, buflen, p, flags);
1171 	if (error)
1172 		return (error);
1173 	for (i = 0; i < map->dm_nsegs; i++) {
1174 		xen_grant_table_enter(sc, gm[i].gm_ref, map->dm_segs[i].ds_addr,
1175 		    domain, flags & BUS_DMA_WRITE ? GTF_readonly : 0);
1176 		gm[i].gm_paddr = map->dm_segs[i].ds_addr;
1177 		map->dm_segs[i].ds_addr = gm[i].gm_ref;
1178 	}
1179 	return (0);
1180 }
1181 
1182 int
1183 xen_bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
1184     int flags)
1185 {
1186 	struct xen_softc *sc = t->_cookie;
1187 	struct xen_gntmap *gm = map->_dm_cookie;
1188 	int i, domain, error;
1189 
1190 	domain = flags >> 16;
1191 	flags &= 0xffff;
1192 	error = _bus_dmamap_load_mbuf(t, map, m0, flags);
1193 	if (error)
1194 		return (error);
1195 	for (i = 0; i < map->dm_nsegs; i++) {
1196 		xen_grant_table_enter(sc, gm[i].gm_ref, map->dm_segs[i].ds_addr,
1197 		    domain, flags & BUS_DMA_WRITE ? GTF_readonly : 0);
1198 		gm[i].gm_paddr = map->dm_segs[i].ds_addr;
1199 		map->dm_segs[i].ds_addr = gm[i].gm_ref;
1200 	}
1201 	return (0);
1202 }
1203 
1204 void
1205 xen_bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
1206 {
1207 	struct xen_softc *sc = t->_cookie;
1208 	struct xen_gntmap *gm = map->_dm_cookie;
1209 	int i;
1210 
1211 	for (i = 0; i < map->dm_nsegs; i++) {
1212 		if (gm[i].gm_paddr == 0)
1213 			continue;
1214 		xen_grant_table_remove(sc, gm[i].gm_ref);
1215 		map->dm_segs[i].ds_addr = gm[i].gm_paddr;
1216 		gm[i].gm_paddr = 0;
1217 	}
1218 	_bus_dmamap_unload(t, map);
1219 }
1220 
1221 void
1222 xen_bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t addr,
1223     bus_size_t size, int op)
1224 {
1225 	if ((op == (BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)) ||
1226 	    (op == (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)))
1227 		virtio_membar_sync();
1228 }
1229 
1230 static int
1231 atoi(char *cp, int *res)
1232 {
1233 	*res = 0;
1234 	do {
1235 		if (*cp < '0' || *cp > '9')
1236 			return (-1);
1237 		*res *= 10;
1238 		*res += *cp - '0';
1239 	} while (*(++cp) != '\0');
1240 	return (0);
1241 }
1242 
1243 static int
1244 xen_attach_print(void *aux, const char *name)
1245 {
1246 	struct xen_attach_args *xa = aux;
1247 
1248 	if (name)
1249 		printf("\"%s\" at %s: %s", xa->xa_name, name, xa->xa_node);
1250 
1251 	return (UNCONF);
1252 }
1253 
1254 int
1255 xen_probe_devices(struct xen_softc *sc)
1256 {
1257 	struct xen_attach_args xa;
1258 	struct xs_transaction xst;
1259 	struct iovec *iovp1 = NULL, *iovp2 = NULL;
1260 	int i, j, error = 0, iov1_cnt = 0, iov2_cnt = 0;
1261 	char domid[16];
1262 	char path[256];
1263 
1264 	memset(&xst, 0, sizeof(xst));
1265 	xst.xst_id = 0;
1266 	xst.xst_sc = sc->sc_xs;
1267 	xst.xst_flags |= XST_POLL;
1268 
1269 	if ((error = xs_cmd(&xst, XS_LIST, "device", &iovp1, &iov1_cnt)) != 0)
1270 		return (error);
1271 
1272 	for (i = 0; i < iov1_cnt; i++) {
1273 		if (strcmp("suspend", (char *)iovp1[i].iov_base) == 0)
1274 			continue;
1275 		snprintf(path, sizeof(path), "device/%s",
1276 		    (char *)iovp1[i].iov_base);
1277 		if ((error = xs_cmd(&xst, XS_LIST, path, &iovp2,
1278 		    &iov2_cnt)) != 0) {
1279 			xs_resfree(&xst, iovp1, iov1_cnt);
1280 			return (error);
1281 		}
1282 		for (j = 0; j < iov2_cnt; j++) {
1283 			xa.xa_parent = sc;
1284 			xa.xa_dmat = &xen_bus_dma_tag;
1285 			strlcpy(xa.xa_name, (char *)iovp1[i].iov_base,
1286 			    sizeof(xa.xa_name));
1287 			snprintf(xa.xa_node, sizeof(xa.xa_node), "device/%s/%s",
1288 			    (char *)iovp1[i].iov_base,
1289 			    (char *)iovp2[j].iov_base);
1290 			if (xs_getprop(sc, xa.xa_node, "backend-id", domid,
1291 			    sizeof(domid)) ||
1292 			    xs_getprop(sc, xa.xa_node, "backend", xa.xa_backend,
1293 			    sizeof(xa.xa_backend))) {
1294 				printf("%s: failed to identify \"backend\" "
1295 				    "for \"%s\"\n", sc->sc_dev.dv_xname,
1296 				    xa.xa_node);
1297 			} else if (atoi(domid, &xa.xa_domid)) {
1298 				printf("%s: non-numeric backend domain id "
1299 				    "\"%s\" for \"%s\"\n", sc->sc_dev.dv_xname,
1300 				    domid, xa.xa_node);
1301 			}
1302 			config_found((struct device *)sc, &xa,
1303 			    xen_attach_print);
1304 		}
1305 		xs_resfree(&xst, iovp2, iov2_cnt);
1306 	}
1307 
1308 	return (error);
1309 }
1310 
1311 #include <machine/pio.h>
1312 
1313 #define	XMI_PORT		0x10
1314 #define XMI_MAGIC		0x49d2
1315 #define XMI_UNPLUG_IDE		0x01
1316 #define XMI_UNPLUG_NIC		0x02
1317 #define XMI_UNPLUG_IDESEC	0x04
1318 
1319 void
1320 xen_disable_emulated_devices(struct xen_softc *sc)
1321 {
1322 #if defined(__i386__) || defined(__amd64__)
1323 	ushort unplug = 0;
1324 
1325 	if (inw(XMI_PORT) != XMI_MAGIC) {
1326 		printf("%s: failed to disable emulated devices\n",
1327 		    sc->sc_dev.dv_xname);
1328 		return;
1329 	}
1330 	if (sc->sc_flags & XSF_UNPLUG_IDE)
1331 		unplug |= XMI_UNPLUG_IDE;
1332 	if (sc->sc_flags & XSF_UNPLUG_IDESEC)
1333 		unplug |= XMI_UNPLUG_IDESEC;
1334 	if (sc->sc_flags & XSF_UNPLUG_NIC)
1335 		unplug |= XMI_UNPLUG_NIC;
1336 	if (unplug)
1337 		outw(XMI_PORT, unplug);
1338 #endif	/* __i386__ || __amd64__ */
1339 }
1340