xref: /netbsd-src/sys/dev/pci/btvmei.c (revision 6a493d6bc668897c91594964a732d38505b70cbb)
1 /* $NetBSD: btvmei.c,v 1.29 2012/10/27 17:18:28 chs Exp $ */
2 
3 /*
4  * Copyright (c) 1999
5  *	Matthias Drochner.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. The name of the author may not be used to endorse or promote products
16  *    derived from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  *
29  */
30 
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: btvmei.c,v 1.29 2012/10/27 17:18:28 chs Exp $");
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/device.h>
38 #include <sys/proc.h>
39 #include <sys/malloc.h>
40 
41 #include <sys/bus.h>
42 #include <sys/extent.h>
43 
44 #include <dev/pci/pcireg.h>
45 #include <dev/pci/pcivar.h>
46 #include <dev/pci/pcidevs.h>
47 
48 #include <dev/vme/vmereg.h>
49 #include <dev/vme/vmevar.h>
50 
51 #include <dev/pci/btvmeireg.h>
52 #include <dev/pci/btvmeivar.h>
53 
54 static int b3_617_match(device_t, cfdata_t, void *);
55 static void b3_617_attach(device_t, device_t, void *);
56 #ifdef notyet
57 static int b3_617_detach(device_t);
58 #endif
59 void b3_617_slaveconfig(device_t, struct vme_attach_args *);
60 
61 static void b3_617_vmeintr(struct b3_617_softc *, unsigned char);
62 
63 /*
64  * mapping ressources, needed for deallocation
65  */
66 struct b3_617_vmeresc {
67 	bus_space_handle_t handle;
68 	bus_size_t len;
69 	int firstpage, maplen;
70 };
71 
72 CFATTACH_DECL_NEW(btvmei, sizeof(struct b3_617_softc),
73     b3_617_match, b3_617_attach, NULL, NULL);
74 
75 static int
76 b3_617_match(device_t parent, cfdata_t match, void *aux)
77 {
78 	struct pci_attach_args *pa = aux;
79 
80 	if ((PCI_VENDOR(pa->pa_id) != PCI_VENDOR_BIT3)
81 	    || (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BIT3_PCIVME617))
82 		return (0);
83 	return (1);
84 }
85 
86 static void
87 b3_617_attach(device_t parent, device_t self, void *aux)
88 {
89 	struct b3_617_softc *sc = device_private(self);
90 	struct pci_attach_args *pa = aux;
91 	pci_chipset_tag_t pc = pa->pa_pc;
92 
93 	pci_intr_handle_t ih;
94 	const char *intrstr;
95 	struct vmebus_attach_args vaa;
96 
97 	sc->sc_dev = self;
98 	sc->sc_pc = pc;
99 	sc->sc_dmat = pa->pa_dmat;
100 
101 	pci_aprint_devinfo_fancy(pa, "VME bus adapter", "BIT3 PCI-VME 617", 1);
102 
103 	/*
104 	 * Map CSR and mapping table spaces.
105 	 * Don't map VME window; parts are mapped as needed to
106 	 * save kernel virtual memory space
107 	 */
108 	if (pci_mapreg_map(pa, 0x14,
109 			   PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT,
110 			   0, &sc->csrt, &sc->csrh, NULL, NULL) &&
111 	    pci_mapreg_map(pa, 0x10,
112 			   PCI_MAPREG_TYPE_IO,
113 			   0, &sc->csrt, &sc->csrh, NULL, NULL)) {
114 		aprint_error_dev(self, "can't map CSR space\n");
115 		return;
116 	}
117 
118 	if (pci_mapreg_map(pa, 0x18,
119 			   PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT,
120 			   0, &sc->mapt, &sc->maph, NULL, NULL)) {
121 		aprint_error_dev(self, "can't map map space\n");
122 		return;
123 	}
124 
125 	if (pci_mapreg_info(pc, pa->pa_tag, 0x1c,
126 			    PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT,
127 			    &sc->vmepbase, 0, 0)) {
128 		aprint_error_dev(self, "can't get VME range\n");
129 		return;
130 	}
131 	sc->sc_vmet = pa->pa_memt; /* XXX needed for VME mappings */
132 
133 	/* Map and establish the interrupt. */
134 	if (pci_intr_map(pa, &ih)) {
135 		aprint_error_dev(sc->sc_dev, "couldn't map interrupt\n");
136 		return;
137 	}
138 	intrstr = pci_intr_string(pc, ih);
139 	/*
140 	 * Use a low interrupt level (the lowest?).
141 	 * We will raise before calling a subdevice's handler.
142 	 */
143 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_BIO, b3_617_intr, sc);
144 	if (sc->sc_ih == NULL) {
145 		aprint_error_dev(sc->sc_dev, "couldn't establish interrupt");
146 		if (intrstr != NULL)
147 			aprint_error(" at %s", intrstr);
148 		aprint_error("\n");
149 		return;
150 	}
151 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
152 
153 	if (b3_617_init(sc))
154 		return;
155 
156 	/*
157 	 * set up all the tags for use by VME devices
158 	 */
159 	sc->sc_vct.cookie = self;
160 	sc->sc_vct.vct_probe = b3_617_vme_probe;
161 	sc->sc_vct.vct_map = b3_617_map_vme;
162 	sc->sc_vct.vct_unmap = b3_617_unmap_vme;
163 	sc->sc_vct.vct_int_map = b3_617_map_vmeint;
164 	sc->sc_vct.vct_int_establish = b3_617_establish_vmeint;
165 	sc->sc_vct.vct_int_disestablish = b3_617_disestablish_vmeint;
166 	sc->sc_vct.vct_dmamap_create = b3_617_dmamap_create;
167 	sc->sc_vct.vct_dmamap_destroy = b3_617_dmamap_destroy;
168 	sc->sc_vct.vct_dmamem_alloc = b3_617_dmamem_alloc;
169 	sc->sc_vct.vct_dmamem_free = b3_617_dmamem_free;
170 
171 	vaa.va_vct = &(sc->sc_vct);
172 	vaa.va_bdt = pa->pa_dmat;
173 	vaa.va_slaveconfig = b3_617_slaveconfig;
174 
175 	sc->csrwindow.offset = -1;
176 	sc->dmawindow24.offset = -1;
177 	sc->dmawindow32.offset = -1;
178 	config_found(self, &vaa, 0);
179 }
180 
181 #ifdef notyet
182 static int
183 b3_617_detach(device_t dev)
184 {
185 	struct b3_617_softc *sc = device_private(dev);
186 
187 	b3_617_halt(sc);
188 
189 	if (sc->sc_ih)
190 		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
191 
192 	bus_space_unmap(sc->sc_bc, sc->csrbase, 32);
193 	bus_space_unmap(sc->sc_bc, sc->mapbase, 64*1024);
194 
195 	return(0);
196 }
197 #endif
198 
199 void
200 b3_617_slaveconfig(device_t dev, struct vme_attach_args *va)
201 {
202 	struct b3_617_softc *sc = device_private(dev);
203 	vme_chipset_tag_t vmect;
204 	int i, res;
205 	const char *name = 0; /* XXX gcc! */
206 
207 	vmect = &sc->sc_vct;
208 	if (!va)
209 		goto freeit;
210 
211 #ifdef DIAGNOSTIC
212 	if (vmect != va->va_vct)
213 		panic("pcivme_slaveconfig: chipset tag?");
214 #endif
215 
216 	for (i = 0; i < va->numcfranges; i++) {
217 		res = vme_space_alloc(vmect, va->r[i].offset,
218 				      va->r[i].size, va->r[i].am);
219 		if (res)
220 			panic("%s: can't alloc slave window %x/%x/%x",
221 			       device_xname(dev), va->r[i].offset,
222 			       va->r[i].size, va->r[i].am);
223 
224 		switch (va->r[i].am & VME_AM_ADRSIZEMASK) {
225 			/* structure assignments! */
226 		    case VME_AM_A16:
227 			sc->csrwindow = va->r[i];
228 			name = "VME CSR";
229 			break;
230 		    case VME_AM_A24:
231 			sc->dmawindow24 = va->r[i];
232 			name = "A24 DMA";
233 			break;
234 		    case VME_AM_A32:
235 			sc->dmawindow32 = va->r[i];
236 			name = "A32 DMA";
237 			break;
238 		}
239 		printf("%s: %s window: %x-%x\n", device_xname(dev),
240 		       name, va->r[i].offset,
241 		       va->r[i].offset + va->r[i].size - 1);
242 	}
243 	return;
244 
245 freeit:
246 	if (sc->csrwindow.offset != -1)
247 		vme_space_free(vmect, sc->csrwindow.offset,
248 			       sc->csrwindow.size, sc->csrwindow.am);
249 	if (sc->dmawindow32.offset != -1)
250 		vme_space_free(vmect, sc->dmawindow32.offset,
251 			       sc->dmawindow32.size, sc->dmawindow32.am);
252 	if (sc->dmawindow24.offset != -1)
253 		vme_space_free(vmect, sc->dmawindow24.offset,
254 			       sc->dmawindow24.size, sc->dmawindow24.am);
255 }
256 
257 int
258 b3_617_reset(struct b3_617_softc *sc)
259 {
260 	unsigned char status;
261 
262 	/* reset sequence, ch 5.2 */
263 	status = read_csr_byte(sc, LOC_STATUS);
264 	if (status & LSR_NO_CONNECT) {
265 		printf("%s: not connected\n", device_xname(sc->sc_dev));
266 		return (-1);
267 	}
268 	status = read_csr_byte(sc, REM_STATUS); /* discard */
269 	write_csr_byte(sc, LOC_CMD1, LC1_CLR_ERROR);
270 	status = read_csr_byte(sc, LOC_STATUS);
271 	if (status & LSR_CERROR_MASK) {
272 		char sbuf[sizeof(BIT3_LSR_BITS) + 64];
273 
274 		snprintb(sbuf, sizeof(sbuf), BIT3_LSR_BITS, status);
275 		printf("%s: interface error, lsr=%s\n", device_xname(sc->sc_dev),
276 		       sbuf);
277 		return (-1);
278 	}
279 	return (0);
280 }
281 
282 int
283 b3_617_init(struct b3_617_softc *sc)
284 {
285 	unsigned int i;
286 
287 	if (b3_617_reset(sc))
288 		return (-1);
289 
290 	/* all maps invalid */
291 	for (i = MR_PCI_VME; i < MR_PCI_VME + MR_PCI_VME_SIZE; i += 4)
292 		write_mapmem(sc, i, MR_RAM_INVALID);
293 	for (i = MR_VME_PCI; i < MR_VME_PCI + MR_VME_PCI_SIZE; i += 4)
294 		write_mapmem(sc, i, MR_RAM_INVALID);
295 	for (i = MR_DMA_PCI; i < MR_DMA_PCI + MR_DMA_PCI_SIZE; i += 4)
296 		write_mapmem(sc, i, MR_RAM_INVALID);
297 
298 	/*
299 	 * set up scatter page allocation control
300 	 */
301 	sc->vmeext = extent_create("pcivme", MR_PCI_VME,
302 				   MR_PCI_VME + MR_PCI_VME_SIZE - 1,
303 				   sc->vmemap, sizeof(sc->vmemap),
304 				   EX_NOCOALESCE);
305 #if 0
306 	sc->pciext = extent_create("vmepci", MR_VME_PCI,
307 				   MR_VME_PCI + MR_VME_PCI_SIZE - 1,
308 				   sc->pcimap, sizeof(sc->pcimap),
309 				   EX_NOCOALESCE);
310 	sc->dmaext = extent_create("dmapci", MR_DMA_PCI,
311 				   MR_DMA_PCI + MR_DMA_PCI_SIZE - 1,
312 				   sc->dmamap, sizeof(sc->dmamap),
313 				   EX_NOCOALESCE);
314 #endif
315 
316 	/*
317 	 * init int handler queue,
318 	 * enable interrupts if PCI interrupt available
319 	 */
320 	TAILQ_INIT(&(sc->intrhdls));
321 	sc->strayintrs = 0;
322 
323 	if (sc->sc_ih)
324 		write_csr_byte(sc, LOC_INT_CTRL, LIC_INT_ENABLE);
325 	/* no error ints */
326 	write_csr_byte(sc, REM_CMD2, 0); /* enables VME IRQ */
327 
328 	return (0);
329 }
330 
331 #ifdef notyet /* for detach */
332 void
333 b3_617_halt(struct b3_617_softc *sc)
334 {
335 	/*
336 	 * because detach code checks for existence of children,
337 	 * all ressources (mappings, VME IRQs, DMA requests)
338 	 * should be deallocated at this point
339 	 */
340 
341 	/* disable IRQ */
342 	write_csr_byte(sc, LOC_INT_CTRL, 0);
343 }
344 #endif
345 
346 static void
347 b3_617_vmeintr(struct b3_617_softc *sc, unsigned char lstat)
348 {
349 	int level;
350 
351 	for (level = 7; level >= 1; level--) {
352 		unsigned char vector;
353 		struct b3_617_vmeintrhand *ih;
354 		int found;
355 
356 		if (!(lstat & (1 << level)))
357 			continue;
358 
359 		write_csr_byte(sc, REM_CMD1, level);
360 		vector = read_csr_byte(sc, REM_IACK);
361 
362 		found = 0;
363 
364 		for (ih = sc->intrhdls.tqh_first; ih;
365 		     ih = ih->ih_next.tqe_next) {
366 			if ((ih->ih_level == level) &&
367 			    ((ih->ih_vector == -1) ||
368 			     (ih->ih_vector == vector))) {
369 				int s, res;
370 				/*
371 				 * We should raise the interrupt level
372 				 * to ih->ih_prior here. How to do this
373 				 * machine-independently?
374 				 * To be safe, raise to the maximum.
375 				 */
376 				s = splhigh();
377 				found |= (res = (*(ih->ih_fun))(ih->ih_arg));
378 				splx(s);
379 				if (res)
380 					ih->ih_count++;
381 				if (res == 1)
382 					break;
383 			}
384 		}
385 		if (!found)
386 			sc->strayintrs++;
387 	}
388 }
389 
390 #define sc ((struct b3_617_softc*)vsc)
391 
392 int
393 b3_617_map_vme(void *vsc, vme_addr_t vmeaddr, vme_size_t len, vme_am_t am, vme_datasize_t datasizes, vme_swap_t swap, bus_space_tag_t *tag, bus_space_handle_t *handle, vme_mapresc_t *resc)
394 {
395 	vme_addr_t vmebase, vmeend, va;
396 	unsigned long maplen, first, i;
397 	u_int32_t mapreg;
398 	bus_addr_t pcibase;
399 	int res;
400 	struct b3_617_vmeresc *r;
401 
402 	/* first mapped address */
403 	vmebase = vmeaddr & ~(VME_PAGESIZE - 1);
404 	/* base of last mapped page */
405 	vmeend = (vmeaddr + len - 1) & ~(VME_PAGESIZE - 1);
406 	/* bytes in scatter table required */
407 	maplen = ((vmeend - vmebase) / VME_PAGESIZE + 1) * 4;
408 
409 	if (extent_alloc(sc->vmeext, maplen, 4, 0, EX_FAST, &first))
410 		return (ENOMEM);
411 
412 	/*
413 	 * set up adapter mapping registers
414 	 */
415 	mapreg = (am << MR_AMOD_SHIFT) | MR_FC_RRAM | swap;
416 
417 	for (i = first, va = vmebase;
418 	     i < first + maplen;
419 	     i += 4, va += VME_PAGESIZE) {
420 		write_mapmem(sc, i, mapreg | va);
421 #ifdef BIT3DEBUG
422 		printf("mapreg@%lx=%x\n", i, read_mapmem(sc, i));
423 #endif
424 	}
425 
426 #ifdef DIAGNOSTIC
427 	if (va != vmeend + VME_PAGESIZE)
428 		panic("b3_617_map_pci_vme: botch");
429 #endif
430 	/*
431 	 * map needed range in PCI space
432 	 */
433 	pcibase = sc->vmepbase + (first - MR_PCI_VME) / 4 * VME_PAGESIZE
434 	    + (vmeaddr & (VME_PAGESIZE - 1));
435 
436 	if ((res = bus_space_map(sc->sc_vmet, pcibase, len, 0, handle))) {
437 		for (i = first; i < first + maplen; i += 4)
438 			write_mapmem(sc, i, MR_RAM_INVALID);
439 		extent_free(sc->vmeext, first, maplen, 0);
440 		return (res);
441 	}
442 
443 	*tag = sc->sc_vmet;
444 
445 	/*
446 	 * save all data needed for later unmapping
447 	 */
448 	r = malloc(sizeof(*r), M_DEVBUF, M_NOWAIT); /* XXX check! */
449 	r->handle = *handle;
450 	r->len = len;
451 	r->firstpage = first;
452 	r->maplen = maplen;
453 	*resc = r;
454 	return (0);
455 }
456 
457 void
458 b3_617_unmap_vme(void *vsc, vme_mapresc_t resc)
459 {
460 	unsigned long i;
461 	struct b3_617_vmeresc *r = resc;
462 
463 	/* unmap PCI window */
464 	bus_space_unmap(sc->sc_vmet, r->handle, r->len);
465 
466 	for (i = r->firstpage; i < r->firstpage + r->maplen; i += 4)
467 		write_mapmem(sc, i, MR_RAM_INVALID);
468 
469 	extent_free(sc->vmeext, r->firstpage, r->maplen, 0);
470 
471 	free(r, M_DEVBUF);
472 }
473 
474 int
475 b3_617_vme_probe(void *vsc, vme_addr_t addr, vme_size_t len, vme_am_t am, vme_datasize_t datasize, int (*callback)(void *, bus_space_tag_t, bus_space_handle_t), void *cbarg)
476 {
477 	bus_space_tag_t tag;
478 	bus_space_handle_t handle;
479 	vme_mapresc_t resc;
480 	int res, i;
481 	volatile u_int32_t dummy;
482 	int status;
483 
484 	res = b3_617_map_vme(vsc, addr, len, am, 0, 0,
485 			     &tag, &handle, &resc);
486 	if (res)
487 		return (res);
488 
489 	if (read_csr_byte(sc, LOC_STATUS) & LSR_ERROR_MASK) {
490 		printf("b3_617_vme_badaddr: error bit not clean - resetting\n");
491 		write_csr_byte(sc, LOC_CMD1, LC1_CLR_ERROR);
492 	}
493 
494 	if (callback)
495 		res = (*callback)(cbarg, tag, handle);
496 	else {
497 		for (i = 0; i < len;) {
498 			switch (datasize) {
499 			    case VME_D8:
500 				dummy = bus_space_read_1(tag, handle, i);
501 				i++;
502 				break;
503 			    case VME_D16:
504 				dummy = bus_space_read_2(tag, handle, i);
505 				i += 2;
506 				break;
507 			    case VME_D32:
508 				dummy = bus_space_read_4(tag, handle, i);
509 				i += 4;
510 				break;
511 			    default:
512 				panic("b3_617_vme_probe: invalid datasize %x",
513 				      datasize);
514 			}
515 		}
516 	}
517 
518 	if ((status = read_csr_byte(sc, LOC_STATUS)) & LSR_ERROR_MASK) {
519 #ifdef BIT3DEBUG
520 		printf("b3_617_vme_badaddr: caught error %x\n", status);
521 #endif
522 		write_csr_byte(sc, LOC_CMD1, LC1_CLR_ERROR);
523 		res = EIO;
524 	}
525 
526 	b3_617_unmap_vme(vsc, resc);
527 	return (res);
528 }
529 
530 int
531 b3_617_map_vmeint(void *vsc, int level, int vector, vme_intr_handle_t *handlep)
532 {
533 	if (!sc->sc_ih) {
534 		printf("%s: b3_617_map_vmeint: no IRQ\n",
535 		       device_xname(sc->sc_dev));
536 		return (ENXIO);
537 	}
538 	/*
539 	 * We should check whether the interface can pass this interrupt
540 	 * level at all, but we don't know much about the jumper setting.
541 	 */
542 	*handlep = (void *)(long)((level << 8) | vector); /* XXX */
543 	return (0);
544 }
545 
546 void *
547 b3_617_establish_vmeint(void *vsc, vme_intr_handle_t handle, int prior, int (*func)(void *), void *arg)
548 {
549 	struct b3_617_vmeintrhand *ih;
550 	long lv;
551 	int s;
552 
553 	/* no point in sleeping unless someone can free memory. */
554 	ih = malloc(sizeof *ih, M_DEVBUF, cold ? M_NOWAIT : M_WAITOK);
555 	if (ih == NULL)
556 		panic("b3_617_map_vmeint: can't malloc handler info");
557 
558 	lv = (long)handle; /* XXX */
559 
560 	ih->ih_fun = func;
561 	ih->ih_arg = arg;
562 	ih->ih_level = lv >> 8;
563 	ih->ih_vector = lv & 0xff;
564 	ih->ih_prior = prior;
565 	ih->ih_count = 0;
566 
567 	s = splhigh();
568 	TAILQ_INSERT_TAIL(&(sc->intrhdls), ih, ih_next);
569 	splx(s);
570 
571 	return (ih);
572 }
573 
574 void
575 b3_617_disestablish_vmeint(void *vsc, void *cookie)
576 {
577 	struct b3_617_vmeintrhand *ih = cookie;
578 	int s;
579 
580 	if (!ih) {
581 		printf("b3_617_unmap_vmeint: NULL arg\n");
582 		return;
583 	}
584 
585 	s = splhigh();
586 	TAILQ_REMOVE(&(sc->intrhdls), ih, ih_next);
587 	splx(s);
588 
589 	free(ih, M_DEVBUF);
590 }
591 
592 int
593 b3_617_intr(void *vsc)
594 {
595 	int handled = 0;
596 
597 	/* follows ch. 5.5.5 (reordered for speed) */
598 	while (read_csr_byte(sc, LOC_INT_CTRL) & LIC_INT_PENDING) {
599 		unsigned char lstat;
600 
601 		handled = 1;
602 
603 		/* no error interrupts! */
604 
605 		lstat = read_csr_byte(sc, LDMA_CMD);
606 		if ((lstat & LDC_DMA_DONE) && (lstat & LDC_DMA_INT_ENABLE)) {
607 			/* DMA done indicator flag */
608 			write_csr_byte(sc, LDMA_CMD, lstat & (~LDC_DMA_DONE));
609 #if 0
610 			b3_617_cntlrdma_done(sc);
611 #endif
612 			continue;
613 		}
614 
615 		lstat = read_csr_byte(sc, LOC_INT_STATUS);
616 		if (lstat & LIS_CINT_MASK) {
617 			/* VME backplane interrupt, ch. 5.5.3 */
618 			b3_617_vmeintr(sc, lstat);
619 		}
620 
621 		/* for now, ignore "mailbox interrupts" */
622 
623 		lstat = read_csr_byte(sc, LOC_STATUS);
624 		if (lstat & LSR_PR_STATUS) {
625 			/* PR interrupt received from REMOTE  */
626 			write_csr_byte(sc, LOC_CMD1, LC1_CLR_PR_INT);
627 			continue;
628 		}
629 
630 		lstat = read_csr_byte(sc, REM_STATUS);
631 		if (lstat & RSR_PT_STATUS) {
632 			/* PT interrupt is set */
633 			write_csr_byte(sc, REM_CMD1, RC1_CLR_PT_INT);
634 			continue;
635 		}
636 	}
637 	return (handled);
638 }
639 
640 int
641 b3_617_dmamap_create(vsc, len, am, datasize, swap,
642 		   nsegs, segsz, bound,
643 		   flags, mapp)
644 	void *vsc;
645 	vme_size_t len;
646 	vme_am_t am;
647 	vme_datasize_t datasize;
648 	vme_swap_t swap;
649 	int nsegs;
650 	vme_size_t segsz;
651 	vme_addr_t bound;
652 	int flags;
653 	bus_dmamap_t *mapp;
654 {
655 	return (EINVAL);
656 }
657 
658 void
659 b3_617_dmamap_destroy(void *vsc, bus_dmamap_t map)
660 {
661 }
662 
663 int
664 b3_617_dmamem_alloc(vsc, len, am, datasizes, swap,
665 		    segs, nsegs, rsegs, flags)
666 	void *vsc;
667 	vme_size_t len;
668 	vme_am_t am;
669 	vme_datasize_t datasizes;
670 	vme_swap_t swap;
671 	bus_dma_segment_t *segs;
672 	int nsegs;
673 	int *rsegs;
674 	int flags;
675 {
676 	return (EINVAL);
677 }
678 
679 void
680 b3_617_dmamem_free(void *vsc, bus_dma_segment_t *segs, int nsegs)
681 {
682 }
683 
684 #undef sc
685