xref: /netbsd-src/sys/dev/pci/btvmei.c (revision b95c5bcdecffc83f33687e2f19d785ade512190c)
1 /* $NetBSD: btvmei.c,v 1.37 2023/12/05 15:41:34 thorpej Exp $ */
2 
3 /*
4  * Copyright (c) 1999
5  *	Matthias Drochner.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. The name of the author may not be used to endorse or promote products
16  *    derived from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  *
29  */
30 
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: btvmei.c,v 1.37 2023/12/05 15:41:34 thorpej Exp $");
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/device.h>
38 #include <sys/proc.h>
39 #include <sys/kmem.h>
40 #include <sys/vmem.h>
41 
42 #include <sys/bus.h>
43 
44 #include <dev/pci/pcireg.h>
45 #include <dev/pci/pcivar.h>
46 #include <dev/pci/pcidevs.h>
47 
48 #include <dev/vme/vmereg.h>
49 #include <dev/vme/vmevar.h>
50 
51 #include <dev/pci/btvmeireg.h>
52 #include <dev/pci/btvmeivar.h>
53 
54 static int b3_617_match(device_t, cfdata_t, void *);
55 static void b3_617_attach(device_t, device_t, void *);
56 #ifdef notyet
57 static int b3_617_detach(device_t);
58 #endif
59 void b3_617_slaveconfig(device_t, struct vme_attach_args *);
60 
61 static void b3_617_vmeintr(struct b3_617_softc *, unsigned char);
62 
63 /*
64  * mapping resources, needed for deallocation
65  */
66 struct b3_617_vmeresc {
67 	bus_space_handle_t handle;
68 	bus_size_t len;
69 	int firstpage, maplen;
70 };
71 
72 CFATTACH_DECL_NEW(btvmei, sizeof(struct b3_617_softc),
73     b3_617_match, b3_617_attach, NULL, NULL);
74 
75 static int
b3_617_match(device_t parent,cfdata_t match,void * aux)76 b3_617_match(device_t parent, cfdata_t match, void *aux)
77 {
78 	struct pci_attach_args *pa = aux;
79 
80 	if ((PCI_VENDOR(pa->pa_id) != PCI_VENDOR_BIT3)
81 	    || (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BIT3_PCIVME617))
82 		return (0);
83 	return (1);
84 }
85 
86 static void
b3_617_attach(device_t parent,device_t self,void * aux)87 b3_617_attach(device_t parent, device_t self, void *aux)
88 {
89 	struct b3_617_softc *sc = device_private(self);
90 	struct pci_attach_args *pa = aux;
91 	pci_chipset_tag_t pc = pa->pa_pc;
92 
93 	pci_intr_handle_t ih;
94 	const char *intrstr;
95 	struct vmebus_attach_args vaa;
96 	char intrbuf[PCI_INTRSTR_LEN];
97 
98 	sc->sc_dev = self;
99 	sc->sc_pc = pc;
100 	sc->sc_dmat = pa->pa_dmat;
101 
102 	pci_aprint_devinfo_fancy(pa, "VME bus adapter", "BIT3 PCI-VME 617", 1);
103 
104 	/*
105 	 * Map CSR and mapping table spaces.
106 	 * Don't map VME window; parts are mapped as needed to
107 	 * save kernel virtual memory space
108 	 */
109 	if (pci_mapreg_map(pa, 0x14,
110 			   PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT,
111 			   0, &sc->csrt, &sc->csrh, NULL, NULL) &&
112 	    pci_mapreg_map(pa, 0x10,
113 			   PCI_MAPREG_TYPE_IO,
114 			   0, &sc->csrt, &sc->csrh, NULL, NULL)) {
115 		aprint_error_dev(self, "can't map CSR space\n");
116 		return;
117 	}
118 
119 	if (pci_mapreg_map(pa, 0x18,
120 			   PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT,
121 			   0, &sc->mapt, &sc->maph, NULL, NULL)) {
122 		aprint_error_dev(self, "can't map map space\n");
123 		return;
124 	}
125 
126 	if (pci_mapreg_info(pc, pa->pa_tag, 0x1c,
127 			    PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT,
128 			    &sc->vmepbase, 0, 0)) {
129 		aprint_error_dev(self, "can't get VME range\n");
130 		return;
131 	}
132 	sc->sc_vmet = pa->pa_memt; /* XXX needed for VME mappings */
133 
134 	/* Map and establish the interrupt. */
135 	if (pci_intr_map(pa, &ih)) {
136 		aprint_error_dev(sc->sc_dev, "couldn't map interrupt\n");
137 		return;
138 	}
139 	intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
140 	/*
141 	 * Use a low interrupt level (the lowest?).
142 	 * We will raise before calling a subdevice's handler.
143 	 */
144 	sc->sc_ih = pci_intr_establish_xname(pc, ih, IPL_BIO, b3_617_intr, sc,
145 	    device_xname(self));
146 	if (sc->sc_ih == NULL) {
147 		aprint_error_dev(sc->sc_dev, "couldn't establish interrupt");
148 		if (intrstr != NULL)
149 			aprint_error(" at %s", intrstr);
150 		aprint_error("\n");
151 		return;
152 	}
153 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
154 
155 	if (b3_617_init(sc))
156 		return;
157 
158 	/*
159 	 * set up all the tags for use by VME devices
160 	 */
161 	sc->sc_vct.cookie = self;
162 	sc->sc_vct.vct_probe = b3_617_vme_probe;
163 	sc->sc_vct.vct_map = b3_617_map_vme;
164 	sc->sc_vct.vct_unmap = b3_617_unmap_vme;
165 	sc->sc_vct.vct_int_map = b3_617_map_vmeint;
166 	sc->sc_vct.vct_int_establish = b3_617_establish_vmeint;
167 	sc->sc_vct.vct_int_disestablish = b3_617_disestablish_vmeint;
168 	sc->sc_vct.vct_dmamap_create = b3_617_dmamap_create;
169 	sc->sc_vct.vct_dmamap_destroy = b3_617_dmamap_destroy;
170 	sc->sc_vct.vct_dmamem_alloc = b3_617_dmamem_alloc;
171 	sc->sc_vct.vct_dmamem_free = b3_617_dmamem_free;
172 
173 	vaa.va_vct = &(sc->sc_vct);
174 	vaa.va_bdt = pa->pa_dmat;
175 	vaa.va_slaveconfig = b3_617_slaveconfig;
176 
177 	sc->csrwindow.offset = -1;
178 	sc->dmawindow24.offset = -1;
179 	sc->dmawindow32.offset = -1;
180 	config_found(self, &vaa, 0, CFARGS_NONE);
181 }
182 
183 #ifdef notyet
184 static int
b3_617_detach(device_t dev)185 b3_617_detach(device_t dev)
186 {
187 	struct b3_617_softc *sc = device_private(dev);
188 
189 	b3_617_halt(sc);
190 
191 	if (sc->sc_ih)
192 		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
193 
194 	bus_space_unmap(sc->sc_bc, sc->csrbase, 32);
195 	bus_space_unmap(sc->sc_bc, sc->mapbase, 64*1024);
196 
197 	return(0);
198 }
199 #endif
200 
201 void
b3_617_slaveconfig(device_t dev,struct vme_attach_args * va)202 b3_617_slaveconfig(device_t dev, struct vme_attach_args *va)
203 {
204 	struct b3_617_softc *sc = device_private(dev);
205 	vme_chipset_tag_t vmect;
206 	int i, res;
207 	const char *name = 0; /* XXX gcc! */
208 
209 	vmect = &sc->sc_vct;
210 	if (!va)
211 		goto freeit;
212 
213 #ifdef DIAGNOSTIC
214 	if (vmect != va->va_vct)
215 		panic("pcivme_slaveconfig: chipset tag?");
216 #endif
217 
218 	for (i = 0; i < va->numcfranges; i++) {
219 		res = vme_space_alloc(vmect, va->r[i].offset,
220 				      va->r[i].size, va->r[i].am);
221 		if (res)
222 			panic("%s: can't alloc slave window %x/%x/%x",
223 			       device_xname(dev), va->r[i].offset,
224 			       va->r[i].size, va->r[i].am);
225 
226 		switch (va->r[i].am & VME_AM_ADRSIZEMASK) {
227 			/* structure assignments! */
228 		    case VME_AM_A16:
229 			sc->csrwindow = va->r[i];
230 			name = "VME CSR";
231 			break;
232 		    case VME_AM_A24:
233 			sc->dmawindow24 = va->r[i];
234 			name = "A24 DMA";
235 			break;
236 		    case VME_AM_A32:
237 			sc->dmawindow32 = va->r[i];
238 			name = "A32 DMA";
239 			break;
240 		}
241 		printf("%s: %s window: %x-%x\n", device_xname(dev),
242 		       name, va->r[i].offset,
243 		       va->r[i].offset + va->r[i].size - 1);
244 	}
245 	return;
246 
247 freeit:
248 	if (sc->csrwindow.offset != -1)
249 		vme_space_free(vmect, sc->csrwindow.offset,
250 			       sc->csrwindow.size, sc->csrwindow.am);
251 	if (sc->dmawindow32.offset != -1)
252 		vme_space_free(vmect, sc->dmawindow32.offset,
253 			       sc->dmawindow32.size, sc->dmawindow32.am);
254 	if (sc->dmawindow24.offset != -1)
255 		vme_space_free(vmect, sc->dmawindow24.offset,
256 			       sc->dmawindow24.size, sc->dmawindow24.am);
257 }
258 
259 int
b3_617_reset(struct b3_617_softc * sc)260 b3_617_reset(struct b3_617_softc *sc)
261 {
262 	unsigned char status;
263 
264 	/* reset sequence, ch 5.2 */
265 	status = read_csr_byte(sc, LOC_STATUS);
266 	if (status & LSR_NO_CONNECT) {
267 		printf("%s: not connected\n", device_xname(sc->sc_dev));
268 		return (-1);
269 	}
270 	status = read_csr_byte(sc, REM_STATUS); /* discard */
271 	write_csr_byte(sc, LOC_CMD1, LC1_CLR_ERROR);
272 	status = read_csr_byte(sc, LOC_STATUS);
273 	if (status & LSR_CERROR_MASK) {
274 		char sbuf[sizeof(BIT3_LSR_BITS) + 64];
275 
276 		snprintb(sbuf, sizeof(sbuf), BIT3_LSR_BITS, status);
277 		printf("%s: interface error, lsr=%s\n", device_xname(sc->sc_dev),
278 		       sbuf);
279 		return (-1);
280 	}
281 	return (0);
282 }
283 
284 int
b3_617_init(struct b3_617_softc * sc)285 b3_617_init(struct b3_617_softc *sc)
286 {
287 	unsigned int i;
288 
289 	if (b3_617_reset(sc))
290 		return (-1);
291 
292 	/* all maps invalid */
293 	for (i = MR_PCI_VME; i < MR_PCI_VME + MR_PCI_VME_SIZE; i += 4)
294 		write_mapmem(sc, i, MR_RAM_INVALID);
295 	for (i = MR_VME_PCI; i < MR_VME_PCI + MR_VME_PCI_SIZE; i += 4)
296 		write_mapmem(sc, i, MR_RAM_INVALID);
297 	for (i = MR_DMA_PCI; i < MR_DMA_PCI + MR_DMA_PCI_SIZE; i += 4)
298 		write_mapmem(sc, i, MR_RAM_INVALID);
299 
300 	/*
301 	 * set up scatter page allocation control
302 	 */
303 	sc->vme_arena = vmem_create("pcivme",
304 				    MR_PCI_VME,		/* base */
305 				    MR_PCI_VME_SIZE,	/* size */
306 				    4,			/* quantum */
307 				    NULL,		/* allocfn */
308 				    NULL,		/* releasefn */
309 				    NULL,		/* source */
310 				    0,			/* qcache_max */
311 				    VM_SLEEP,
312 				    IPL_NONE);
313 #if 0
314 	sc->vme_arena = vmem_create("vmepci",
315 				    MR_VME_PCI,		/* base */
316 				    MR_VME_PCI_SIZE,	/* size */
317 				    4,			/* quantum */
318 				    NULL,		/* allocfn */
319 				    NULL,		/* releasefn */
320 				    NULL,		/* source */
321 				    0,			/* qcache_max */
322 				    VM_SLEEP,
323 				    IPL_NONE);
324 
325 	sc->dma_arena = vmem_create("dmapci",
326 				    MR_DMA_PCI,		/* base */
327 				    MR_DMA_PCI_SIZE,	/* size */
328 				    XXX,		/* quantum */
329 				    NULL,		/* allocfn */
330 				    NULL,		/* releasefn */
331 				    NULL,		/* source */
332 				    0,			/* qcache_max */
333 				    VM_SLEEP,
334 				    IPL_VM);
335 #endif
336 
337 	/*
338 	 * init int handler queue,
339 	 * enable interrupts if PCI interrupt available
340 	 */
341 	TAILQ_INIT(&(sc->intrhdls));
342 	sc->strayintrs = 0;
343 
344 	if (sc->sc_ih)
345 		write_csr_byte(sc, LOC_INT_CTRL, LIC_INT_ENABLE);
346 	/* no error ints */
347 	write_csr_byte(sc, REM_CMD2, 0); /* enables VME IRQ */
348 
349 	return (0);
350 }
351 
352 #ifdef notyet /* for detach */
353 void
b3_617_halt(struct b3_617_softc * sc)354 b3_617_halt(struct b3_617_softc *sc)
355 {
356 	/*
357 	 * because detach code checks for existence of children,
358 	 * all resources (mappings, VME IRQs, DMA requests)
359 	 * should be deallocated at this point
360 	 */
361 
362 	/* disable IRQ */
363 	write_csr_byte(sc, LOC_INT_CTRL, 0);
364 }
365 #endif
366 
367 static void
b3_617_vmeintr(struct b3_617_softc * sc,unsigned char lstat)368 b3_617_vmeintr(struct b3_617_softc *sc, unsigned char lstat)
369 {
370 	int level;
371 
372 	for (level = 7; level >= 1; level--) {
373 		unsigned char vector;
374 		struct b3_617_vmeintrhand *ih;
375 		int found;
376 
377 		if (!(lstat & (1 << level)))
378 			continue;
379 
380 		write_csr_byte(sc, REM_CMD1, level);
381 		vector = read_csr_byte(sc, REM_IACK);
382 
383 		found = 0;
384 
385 		for (ih = sc->intrhdls.tqh_first; ih;
386 		     ih = ih->ih_next.tqe_next) {
387 			if ((ih->ih_level == level) &&
388 			    ((ih->ih_vector == -1) ||
389 			     (ih->ih_vector == vector))) {
390 				int s, res;
391 				/*
392 				 * We should raise the interrupt level
393 				 * to ih->ih_prior here. How to do this
394 				 * machine-independently?
395 				 * To be safe, raise to the maximum.
396 				 */
397 				s = splhigh();
398 				found |= (res = (*(ih->ih_fun))(ih->ih_arg));
399 				splx(s);
400 				if (res)
401 					ih->ih_count++;
402 				if (res == 1)
403 					break;
404 			}
405 		}
406 		if (!found)
407 			sc->strayintrs++;
408 	}
409 }
410 
411 #define sc ((struct b3_617_softc*)vsc)
412 
413 int
b3_617_map_vme(void * vsc,vme_addr_t vmeaddr,vme_size_t len,vme_am_t am,vme_datasize_t datasizes,vme_swap_t swap,bus_space_tag_t * tag,bus_space_handle_t * handle,vme_mapresc_t * resc)414 b3_617_map_vme(void *vsc, vme_addr_t vmeaddr, vme_size_t len, vme_am_t am, vme_datasize_t datasizes, vme_swap_t swap, bus_space_tag_t *tag, bus_space_handle_t *handle, vme_mapresc_t *resc)
415 {
416 	vme_addr_t vmebase, vmeend, va;
417 	unsigned long maplen, i;
418 	vmem_addr_t first;
419 	u_int32_t mapreg;
420 	bus_addr_t pcibase;
421 	int res;
422 	struct b3_617_vmeresc *r;
423 
424 	/* first mapped address */
425 	vmebase = vmeaddr & ~(VME_PAGESIZE - 1);
426 	/* base of last mapped page */
427 	vmeend = (vmeaddr + len - 1) & ~(VME_PAGESIZE - 1);
428 	/* bytes in scatter table required */
429 	maplen = ((vmeend - vmebase) / VME_PAGESIZE + 1) * 4;
430 
431 	if (vmem_alloc(sc->vme_arena, maplen, VM_BESTFIT | VM_NOSLEEP, &first))
432 		return (ENOMEM);
433 
434 	/*
435 	 * set up adapter mapping registers
436 	 */
437 	mapreg = (am << MR_AMOD_SHIFT) | MR_FC_RRAM | swap;
438 
439 	for (i = first, va = vmebase;
440 	     i < first + maplen;
441 	     i += 4, va += VME_PAGESIZE) {
442 		write_mapmem(sc, i, mapreg | va);
443 #ifdef BIT3DEBUG
444 		printf("mapreg@%lx=%x\n", i, read_mapmem(sc, i));
445 #endif
446 	}
447 
448 #ifdef DIAGNOSTIC
449 	if (va != vmeend + VME_PAGESIZE)
450 		panic("b3_617_map_pci_vme: botch");
451 #endif
452 	/*
453 	 * map needed range in PCI space
454 	 */
455 	pcibase = sc->vmepbase + (first - MR_PCI_VME) / 4 * VME_PAGESIZE
456 	    + (vmeaddr & (VME_PAGESIZE - 1));
457 
458 	if ((res = bus_space_map(sc->sc_vmet, pcibase, len, 0, handle))) {
459 		for (i = first; i < first + maplen; i += 4)
460 			write_mapmem(sc, i, MR_RAM_INVALID);
461 		vmem_free(sc->vme_arena, first, maplen);
462 		return (res);
463 	}
464 
465 	*tag = sc->sc_vmet;
466 
467 	/*
468 	 * save all data needed for later unmapping
469 	 */
470 	r = kmem_alloc(sizeof(*r), KM_SLEEP);
471 	r->handle = *handle;
472 	r->len = len;
473 	r->firstpage = first;
474 	r->maplen = maplen;
475 	*resc = r;
476 	return (0);
477 }
478 
479 void
b3_617_unmap_vme(void * vsc,vme_mapresc_t resc)480 b3_617_unmap_vme(void *vsc, vme_mapresc_t resc)
481 {
482 	unsigned long i;
483 	struct b3_617_vmeresc *r = resc;
484 
485 	/* unmap PCI window */
486 	bus_space_unmap(sc->sc_vmet, r->handle, r->len);
487 
488 	for (i = r->firstpage; i < r->firstpage + r->maplen; i += 4)
489 		write_mapmem(sc, i, MR_RAM_INVALID);
490 
491 	vmem_free(sc->vme_arena, r->firstpage, r->maplen);
492 	kmem_free(r, sizeof(*r));
493 }
494 
495 int
b3_617_vme_probe(void * vsc,vme_addr_t addr,vme_size_t len,vme_am_t am,vme_datasize_t datasize,int (* callback)(void *,bus_space_tag_t,bus_space_handle_t),void * cbarg)496 b3_617_vme_probe(void *vsc, vme_addr_t addr, vme_size_t len, vme_am_t am, vme_datasize_t datasize, int (*callback)(void *, bus_space_tag_t, bus_space_handle_t), void *cbarg)
497 {
498 	bus_space_tag_t tag;
499 	bus_space_handle_t handle;
500 	vme_mapresc_t resc;
501 	int res, i;
502 	volatile u_int32_t dummy;
503 	int status;
504 
505 	res = b3_617_map_vme(vsc, addr, len, am, 0, 0,
506 			     &tag, &handle, &resc);
507 	if (res)
508 		return (res);
509 
510 	if (read_csr_byte(sc, LOC_STATUS) & LSR_ERROR_MASK) {
511 		printf("b3_617_vme_badaddr: error bit not clean - resetting\n");
512 		write_csr_byte(sc, LOC_CMD1, LC1_CLR_ERROR);
513 	}
514 
515 	if (callback)
516 		res = (*callback)(cbarg, tag, handle);
517 	else {
518 		for (i = 0; i < len;) {
519 			switch (datasize) {
520 			    case VME_D8:
521 				dummy = bus_space_read_1(tag, handle, i);
522 				(void)dummy;
523 				i++;
524 				break;
525 			    case VME_D16:
526 				dummy = bus_space_read_2(tag, handle, i);
527 				(void)dummy;
528 				i += 2;
529 				break;
530 			    case VME_D32:
531 				dummy = bus_space_read_4(tag, handle, i);
532 				(void)dummy;
533 				i += 4;
534 				break;
535 			    default:
536 				panic("b3_617_vme_probe: invalid datasize %x",
537 				      datasize);
538 			}
539 		}
540 	}
541 
542 	if ((status = read_csr_byte(sc, LOC_STATUS)) & LSR_ERROR_MASK) {
543 #ifdef BIT3DEBUG
544 		printf("b3_617_vme_badaddr: caught error %x\n", status);
545 #endif
546 		write_csr_byte(sc, LOC_CMD1, LC1_CLR_ERROR);
547 		res = EIO;
548 	}
549 
550 	b3_617_unmap_vme(vsc, resc);
551 	return (res);
552 }
553 
554 int
b3_617_map_vmeint(void * vsc,int level,int vector,vme_intr_handle_t * handlep)555 b3_617_map_vmeint(void *vsc, int level, int vector, vme_intr_handle_t *handlep)
556 {
557 	if (!sc->sc_ih) {
558 		printf("%s: b3_617_map_vmeint: no IRQ\n",
559 		       device_xname(sc->sc_dev));
560 		return (ENXIO);
561 	}
562 	/*
563 	 * We should check whether the interface can pass this interrupt
564 	 * level at all, but we don't know much about the jumper setting.
565 	 */
566 	*handlep = (void *)(long)((level << 8) | vector); /* XXX */
567 	return (0);
568 }
569 
570 void *
b3_617_establish_vmeint(void * vsc,vme_intr_handle_t handle,int prior,int (* func)(void *),void * arg)571 b3_617_establish_vmeint(void *vsc, vme_intr_handle_t handle, int prior, int (*func)(void *), void *arg)
572 {
573 	struct b3_617_vmeintrhand *ih;
574 	long lv;
575 	int s;
576 
577 	ih = kmem_alloc(sizeof *ih, KM_SLEEP);
578 
579 	lv = (long)handle; /* XXX */
580 
581 	ih->ih_fun = func;
582 	ih->ih_arg = arg;
583 	ih->ih_level = lv >> 8;
584 	ih->ih_vector = lv & 0xff;
585 	ih->ih_prior = prior;
586 	ih->ih_count = 0;
587 
588 	s = splhigh();
589 	TAILQ_INSERT_TAIL(&(sc->intrhdls), ih, ih_next);
590 	splx(s);
591 
592 	return (ih);
593 }
594 
595 void
b3_617_disestablish_vmeint(void * vsc,void * cookie)596 b3_617_disestablish_vmeint(void *vsc, void *cookie)
597 {
598 	struct b3_617_vmeintrhand *ih = cookie;
599 	int s;
600 
601 	if (!ih) {
602 		printf("b3_617_unmap_vmeint: NULL arg\n");
603 		return;
604 	}
605 
606 	s = splhigh();
607 	TAILQ_REMOVE(&(sc->intrhdls), ih, ih_next);
608 	splx(s);
609 
610 	kmem_free(ih, sizeof(*ih));
611 }
612 
613 int
b3_617_intr(void * vsc)614 b3_617_intr(void *vsc)
615 {
616 	int handled = 0;
617 
618 	/* follows ch. 5.5.5 (reordered for speed) */
619 	while (read_csr_byte(sc, LOC_INT_CTRL) & LIC_INT_PENDING) {
620 		unsigned char lstat;
621 
622 		handled = 1;
623 
624 		/* no error interrupts! */
625 
626 		lstat = read_csr_byte(sc, LDMA_CMD);
627 		if ((lstat & LDC_DMA_DONE) && (lstat & LDC_DMA_INT_ENABLE)) {
628 			/* DMA done indicator flag */
629 			write_csr_byte(sc, LDMA_CMD, lstat & (~LDC_DMA_DONE));
630 #if 0
631 			b3_617_cntlrdma_done(sc);
632 #endif
633 			continue;
634 		}
635 
636 		lstat = read_csr_byte(sc, LOC_INT_STATUS);
637 		if (lstat & LIS_CINT_MASK) {
638 			/* VME backplane interrupt, ch. 5.5.3 */
639 			b3_617_vmeintr(sc, lstat);
640 		}
641 
642 		/* for now, ignore "mailbox interrupts" */
643 
644 		lstat = read_csr_byte(sc, LOC_STATUS);
645 		if (lstat & LSR_PR_STATUS) {
646 			/* PR interrupt received from REMOTE  */
647 			write_csr_byte(sc, LOC_CMD1, LC1_CLR_PR_INT);
648 			continue;
649 		}
650 
651 		lstat = read_csr_byte(sc, REM_STATUS);
652 		if (lstat & RSR_PT_STATUS) {
653 			/* PT interrupt is set */
654 			write_csr_byte(sc, REM_CMD1, RC1_CLR_PT_INT);
655 			continue;
656 		}
657 	}
658 	return (handled);
659 }
660 
661 int
b3_617_dmamap_create(void * vsc,vme_size_t len,vme_am_t am,vme_datasize_t datasize,vme_swap_t swap,int nsegs,vme_size_t segsz,vme_addr_t bound,int flags,bus_dmamap_t * mapp)662 b3_617_dmamap_create(void *vsc, vme_size_t len, vme_am_t am,
663     vme_datasize_t datasize, vme_swap_t swap, int nsegs, vme_size_t segsz,
664     vme_addr_t bound, int flags, bus_dmamap_t *mapp)
665 {
666 	return (EINVAL);
667 }
668 
669 void
b3_617_dmamap_destroy(void * vsc,bus_dmamap_t map)670 b3_617_dmamap_destroy(void *vsc, bus_dmamap_t map)
671 {
672 }
673 
674 int
b3_617_dmamem_alloc(void * vsc,vme_size_t len,vme_am_t am,vme_datasize_t datasizes,vme_swap_t swap,bus_dma_segment_t * segs,int nsegs,int * rsegs,int flags)675 b3_617_dmamem_alloc(void *vsc, vme_size_t len, vme_am_t am,
676     vme_datasize_t datasizes, vme_swap_t swap, bus_dma_segment_t *segs,
677     int nsegs, int *rsegs, int flags)
678 {
679 	return (EINVAL);
680 }
681 
682 void
b3_617_dmamem_free(void * vsc,bus_dma_segment_t * segs,int nsegs)683 b3_617_dmamem_free(void *vsc, bus_dma_segment_t *segs, int nsegs)
684 {
685 }
686 
687 #undef sc
688