xref: /netbsd-src/sys/dev/mvme/mvmebus.c (revision c7fb772b85b2b5d4cfb282f868f454b4701534fd)
1 /*	$NetBSD: mvmebus.c,v 1.24 2021/08/07 16:19:13 thorpej Exp $	*/
2 
3 /*-
4  * Copyright (c) 2000, 2002 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Steve C. Woodford.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: mvmebus.c,v 1.24 2021/08/07 16:19:13 thorpej Exp $");
34 
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/systm.h>
38 #include <sys/device.h>
39 #include <sys/malloc.h>
40 #include <sys/kcore.h>
41 
42 #include <sys/cpu.h>
43 #include <sys/bus.h>
44 
45 #include <dev/vme/vmereg.h>
46 #include <dev/vme/vmevar.h>
47 
48 #include <dev/mvme/mvmebus.h>
49 
50 #ifdef DIAGNOSTIC
51 int	mvmebus_dummy_dmamap_create(bus_dma_tag_t, bus_size_t, int, bus_size_t,
52 	    bus_size_t, int, bus_dmamap_t *);
53 void	mvmebus_dummy_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t);
54 int	mvmebus_dummy_dmamem_alloc(bus_dma_tag_t, bus_size_t, bus_size_t,
55 	    bus_size_t, bus_dma_segment_t *, int, int *, int);
56 void	mvmebus_dummy_dmamem_free(bus_dma_tag_t, bus_dma_segment_t *, int);
57 #endif
58 
59 #ifdef DEBUG
60 static const char *mvmebus_mod_string(vme_addr_t, vme_size_t,
61 	    vme_am_t, vme_datasize_t);
62 #endif
63 
64 static void mvmebus_offboard_ram(struct mvmebus_softc *);
65 static int mvmebus_dmamap_load_common(struct mvmebus_softc *, bus_dmamap_t);
66 
67 vme_am_t	_mvmebus_am_cap[] = {
68 	MVMEBUS_AM_CAP_BLKD64 | MVMEBUS_AM_CAP_USER,
69 	MVMEBUS_AM_CAP_DATA   | MVMEBUS_AM_CAP_USER,
70 	MVMEBUS_AM_CAP_PROG   | MVMEBUS_AM_CAP_USER,
71 	MVMEBUS_AM_CAP_BLK    | MVMEBUS_AM_CAP_USER,
72 	MVMEBUS_AM_CAP_BLKD64 | MVMEBUS_AM_CAP_SUPER,
73 	MVMEBUS_AM_CAP_DATA   | MVMEBUS_AM_CAP_SUPER,
74 	MVMEBUS_AM_CAP_PROG   | MVMEBUS_AM_CAP_SUPER,
75 	MVMEBUS_AM_CAP_BLK    | MVMEBUS_AM_CAP_SUPER
76 };
77 
78 const char *mvmebus_irq_name[] = {
79 	"vmeirq0", "vmeirq1", "vmeirq2", "vmeirq3",
80 	"vmeirq4", "vmeirq5", "vmeirq6", "vmeirq7"
81 };
82 
83 extern phys_ram_seg_t mem_clusters[];
84 extern int mem_cluster_cnt;
85 
86 
87 static void
mvmebus_offboard_ram(struct mvmebus_softc * sc)88 mvmebus_offboard_ram(struct mvmebus_softc *sc)
89 {
90 	struct mvmebus_range *svr, *mvr;
91 	vme_addr_t start, end, size;
92 	int i;
93 
94 	/*
95 	 * If we have any offboard RAM (i.e. a VMEbus RAM board) then
96 	 * we need to record its details since it's effectively another
97 	 * VMEbus slave image as far as we're concerned.
98 	 * The chip-specific backend will have reserved sc->sc_slaves[0]
99 	 * for exactly this purpose.
100 	 */
101 	svr = sc->sc_slaves;
102 	if (mem_cluster_cnt < 2) {
103 		svr->vr_am = MVMEBUS_AM_DISABLED;
104 		return;
105 	}
106 
107 	start = mem_clusters[1].start;
108 	size = mem_clusters[1].size - 1;
109 	end = start + size;
110 
111 	/*
112 	 * Figure out which VMEbus master image the RAM is
113 	 * visible through. This will tell us the address
114 	 * modifier and datasizes it uses, as well as allowing
115 	 * us to calculate its `real' VMEbus address.
116 	 *
117 	 * XXX FIXME: This is broken if the RAM is mapped through
118 	 * a translated address space. For example, on mvme167 it's
119 	 * perfectly legal to set up the following A32 mapping:
120 	 *
121 	 *  vr_locaddr  == 0x80000000
122 	 *  vr_vmestart == 0x10000000
123 	 *  vr_vmeend   == 0x10ffffff
124 	 *
125 	 * In this case, RAM at VMEbus address 0x10800000 will appear at local
126 	 * address 0x80800000, but we need to set the slave vr_vmestart to
127 	 * 0x10800000.
128 	 */
129 	for (i = 0, mvr = sc->sc_masters; i < sc->sc_nmasters; i++, mvr++) {
130 		vme_addr_t vstart = mvr->vr_locstart + mvr->vr_vmestart;
131 
132 		if (start >= vstart &&
133 		    end <= vstart + (mvr->vr_vmeend - mvr->vr_vmestart))
134 			break;
135 	}
136 	if (i == sc->sc_nmasters) {
137 		svr->vr_am = MVMEBUS_AM_DISABLED;
138 #ifdef DEBUG
139 		printf("%s: No VMEbus master mapping for offboard RAM!\n",
140 		    device_xname(sc->sc_dev));
141 #endif
142 		return;
143 	}
144 
145 	svr->vr_locstart = start;
146 	svr->vr_vmestart = start & mvr->vr_mask;
147 	svr->vr_vmeend = svr->vr_vmestart + size;
148 	svr->vr_datasize = mvr->vr_datasize;
149 	svr->vr_mask = mvr->vr_mask;
150 	svr->vr_am = mvr->vr_am & VME_AM_ADRSIZEMASK;
151 	svr->vr_am |= MVMEBUS_AM_CAP_DATA  | MVMEBUS_AM_CAP_PROG |
152 		      MVMEBUS_AM_CAP_SUPER | MVMEBUS_AM_CAP_USER;
153 }
154 
155 void
mvmebus_attach(struct mvmebus_softc * sc)156 mvmebus_attach(struct mvmebus_softc *sc)
157 {
158 	struct vmebus_attach_args vaa;
159 	int i;
160 
161 	/* Zap the IRQ reference counts */
162 	for (i = 0; i < 8; i++)
163 		sc->sc_irqref[i] = 0;
164 
165 	/* If there's offboard RAM, get its VMEbus slave attributes */
166 	mvmebus_offboard_ram(sc);
167 
168 #ifdef DEBUG
169 	for (i = 0; i < sc->sc_nmasters; i++) {
170 		struct mvmebus_range *vr = &sc->sc_masters[i];
171 		if (vr->vr_am == MVMEBUS_AM_DISABLED) {
172 			printf("%s: Master#%d: disabled\n",
173 			    device_xname(sc->sc_dev), i);
174 			continue;
175 		}
176 		printf("%s: Master#%d: 0x%08lx -> %s\n",
177 		    device_xname(sc->sc_dev), i,
178 		    vr->vr_locstart + (vr->vr_vmestart & vr->vr_mask),
179 		    mvmebus_mod_string(vr->vr_vmestart,
180 			(vr->vr_vmeend - vr->vr_vmestart) + 1,
181 			vr->vr_am, vr->vr_datasize));
182 	}
183 
184 	for (i = 0; i < sc->sc_nslaves; i++) {
185 		struct mvmebus_range *vr = &sc->sc_slaves[i];
186 		if (vr->vr_am == MVMEBUS_AM_DISABLED) {
187 			printf("%s:  Slave#%d: disabled\n",
188 			    device_xname(sc->sc_dev), i);
189 			continue;
190 		}
191 		printf("%s:  Slave#%d: 0x%08lx -> %s\n",
192 		    device_xname(sc->sc_dev), i, vr->vr_locstart,
193 		    mvmebus_mod_string(vr->vr_vmestart,
194 			(vr->vr_vmeend - vr->vr_vmestart) + 1,
195 			vr->vr_am, vr->vr_datasize));
196 	}
197 #endif
198 
199 	sc->sc_vct.cookie = sc;
200 	sc->sc_vct.vct_probe = mvmebus_probe;
201 	sc->sc_vct.vct_map = mvmebus_map;
202 	sc->sc_vct.vct_unmap = mvmebus_unmap;
203 	sc->sc_vct.vct_int_map = mvmebus_intmap;
204 	sc->sc_vct.vct_int_evcnt = mvmebus_intr_evcnt;
205 	sc->sc_vct.vct_int_establish = mvmebus_intr_establish;
206 	sc->sc_vct.vct_int_disestablish = mvmebus_intr_disestablish;
207 	sc->sc_vct.vct_dmamap_create = mvmebus_dmamap_create;
208 	sc->sc_vct.vct_dmamap_destroy = mvmebus_dmamap_destroy;
209 	sc->sc_vct.vct_dmamem_alloc = mvmebus_dmamem_alloc;
210 	sc->sc_vct.vct_dmamem_free = mvmebus_dmamem_free;
211 
212 	sc->sc_mvmedmat._cookie = sc;
213 	sc->sc_mvmedmat._dmamap_load = mvmebus_dmamap_load;
214 	sc->sc_mvmedmat._dmamap_load_mbuf = mvmebus_dmamap_load_mbuf;
215 	sc->sc_mvmedmat._dmamap_load_uio = mvmebus_dmamap_load_uio;
216 	sc->sc_mvmedmat._dmamap_load_raw = mvmebus_dmamap_load_raw;
217 	sc->sc_mvmedmat._dmamap_unload = mvmebus_dmamap_unload;
218 	sc->sc_mvmedmat._dmamap_sync = mvmebus_dmamap_sync;
219 	sc->sc_mvmedmat._dmamem_map = mvmebus_dmamem_map;
220 	sc->sc_mvmedmat._dmamem_unmap = mvmebus_dmamem_unmap;
221 	sc->sc_mvmedmat._dmamem_mmap = mvmebus_dmamem_mmap;
222 
223 #ifdef DIAGNOSTIC
224 	sc->sc_mvmedmat._dmamap_create = mvmebus_dummy_dmamap_create;
225 	sc->sc_mvmedmat._dmamap_destroy = mvmebus_dummy_dmamap_destroy;
226 	sc->sc_mvmedmat._dmamem_alloc = mvmebus_dummy_dmamem_alloc;
227 	sc->sc_mvmedmat._dmamem_free = mvmebus_dummy_dmamem_free;
228 #else
229 	sc->sc_mvmedmat._dmamap_create = NULL;
230 	sc->sc_mvmedmat._dmamap_destroy = NULL;
231 	sc->sc_mvmedmat._dmamem_alloc = NULL;
232 	sc->sc_mvmedmat._dmamem_free = NULL;
233 #endif
234 
235 	vaa.va_vct = &sc->sc_vct;
236 	vaa.va_bdt = &sc->sc_mvmedmat;
237 	vaa.va_slaveconfig = NULL;
238 
239 	config_found(sc->sc_dev, &vaa, 0, CFARGS_NONE);
240 }
241 
242 int
mvmebus_map(void * vsc,vme_addr_t vmeaddr,vme_size_t len,vme_am_t am,vme_datasize_t datasize,vme_swap_t swap,bus_space_tag_t * tag,bus_space_handle_t * handle,vme_mapresc_t * resc)243 mvmebus_map(void *vsc, vme_addr_t vmeaddr, vme_size_t len, vme_am_t am, vme_datasize_t datasize, vme_swap_t swap, bus_space_tag_t *tag, bus_space_handle_t *handle, vme_mapresc_t *resc)
244 {
245 	struct mvmebus_softc *sc;
246 	struct mvmebus_mapresc *mr;
247 	struct mvmebus_range *vr;
248 	vme_addr_t end;
249 	vme_am_t cap, as;
250 	paddr_t paddr;
251 	int rv, i;
252 
253 	sc = vsc;
254 	end = (vmeaddr + len) - 1;
255 	paddr = 0;
256 	vr = sc->sc_masters;
257 	cap = MVMEBUS_AM2CAP(am);
258 	as = am & VME_AM_ADRSIZEMASK;
259 
260 	for (i = 0; i < sc->sc_nmasters && paddr == 0; i++, vr++) {
261 		if (vr->vr_am == MVMEBUS_AM_DISABLED)
262 			continue;
263 
264 		if (cap == (vr->vr_am & cap) &&
265 		    as == (vr->vr_am & VME_AM_ADRSIZEMASK) &&
266 		    datasize <= vr->vr_datasize &&
267 		    vmeaddr >= vr->vr_vmestart && end < vr->vr_vmeend)
268 			paddr = vr->vr_locstart + (vmeaddr & vr->vr_mask);
269 	}
270 	if (paddr == 0)
271 		return (ENOMEM);
272 
273 	rv = bus_space_map(sc->sc_bust, paddr, len, 0, handle);
274 	if (rv != 0)
275 		return (rv);
276 
277 	/* Allocate space for the resource tag */
278 	mr = malloc(sizeof(*mr), M_DEVBUF, M_WAITOK);
279 
280 	/* Record the range's details */
281 	mr->mr_am = am;
282 	mr->mr_datasize = datasize;
283 	mr->mr_addr = vmeaddr;
284 	mr->mr_size = len;
285 	mr->mr_handle = *handle;
286 	mr->mr_range = i;
287 
288 	*tag = sc->sc_bust;
289 	*resc = (vme_mapresc_t *) mr;
290 
291 	return (0);
292 }
293 
294 /* ARGSUSED */
295 void
mvmebus_unmap(void * vsc,vme_mapresc_t resc)296 mvmebus_unmap(void *vsc, vme_mapresc_t resc)
297 {
298 	struct mvmebus_softc *sc = vsc;
299 	struct mvmebus_mapresc *mr = (struct mvmebus_mapresc *) resc;
300 
301 	bus_space_unmap(sc->sc_bust, mr->mr_handle, mr->mr_size);
302 
303 	free(mr, M_DEVBUF);
304 }
305 
306 int
mvmebus_probe(void * vsc,vme_addr_t vmeaddr,vme_size_t len,vme_am_t am,vme_datasize_t datasize,int (* callback)(void *,bus_space_tag_t,bus_space_handle_t),void * arg)307 mvmebus_probe(void *vsc, vme_addr_t vmeaddr, vme_size_t len, vme_am_t am, vme_datasize_t datasize, int (*callback)(void *, bus_space_tag_t, bus_space_handle_t), void *arg)
308 {
309 	bus_space_tag_t tag;
310 	bus_space_handle_t handle;
311 	vme_mapresc_t resc;
312 	vme_size_t offs;
313 	int rv;
314 
315 	/* Get a temporary mapping to the VMEbus range */
316 	rv = mvmebus_map(vsc, vmeaddr, len, am, datasize, 0,
317 	    &tag, &handle, &resc);
318 	if (rv)
319 		return (rv);
320 
321 	if (callback)
322 		rv = (*callback) (arg, tag, handle);
323 	else
324 		for (offs = 0; offs < len && rv == 0;) {
325 			switch (datasize) {
326 			case VME_D8:
327 				rv = bus_space_peek_1(tag, handle, offs, NULL);
328 				offs += 1;
329 				break;
330 
331 			case VME_D16:
332 				rv = bus_space_peek_2(tag, handle, offs, NULL);
333 				offs += 2;
334 				break;
335 
336 			case VME_D32:
337 				rv = bus_space_peek_4(tag, handle, offs, NULL);
338 				offs += 4;
339 				break;
340 			}
341 		}
342 
343 	mvmebus_unmap(vsc, resc);
344 
345 	return (rv);
346 }
347 
348 /* ARGSUSED */
349 int
mvmebus_intmap(void * vsc,int level,int vector,vme_intr_handle_t * handlep)350 mvmebus_intmap(void *vsc, int level, int vector, vme_intr_handle_t *handlep)
351 {
352 
353 	if (level < 1 || level > 7 || vector < 0x80 || vector > 0xff)
354 		return (EINVAL);
355 
356 	/* This is rather gross */
357 	*handlep = (void *) (int) ((level << 8) | vector);
358 	return (0);
359 }
360 
361 /* ARGSUSED */
362 const struct evcnt *
mvmebus_intr_evcnt(void * vsc,vme_intr_handle_t handle)363 mvmebus_intr_evcnt(void *vsc, vme_intr_handle_t handle)
364 {
365 	struct mvmebus_softc *sc = vsc;
366 
367 	return (&sc->sc_evcnt[(((int) handle) >> 8) - 1]);
368 }
369 
370 void *
mvmebus_intr_establish(void * vsc,vme_intr_handle_t handle,int prior,int (* func)(void *),void * arg)371 mvmebus_intr_establish(void *vsc, vme_intr_handle_t handle, int prior, int (*func)(void *), void *arg)
372 {
373 	struct mvmebus_softc *sc;
374 	int level, vector, first;
375 
376 	sc = vsc;
377 
378 	/* Extract the interrupt's level and vector */
379 	level = ((int) handle) >> 8;
380 	vector = ((int) handle) & 0xff;
381 
382 #ifdef DIAGNOSTIC
383 	if (vector < 0 || vector > 0xff) {
384 		printf("%s: Illegal vector offset: 0x%x\n",
385 		    device_xname(sc->sc_dev), vector);
386 		panic("mvmebus_intr_establish");
387 	}
388 	if (level < 1 || level > 7) {
389 		printf("%s: Illegal interrupt level: %d\n",
390 		    device_xname(sc->sc_dev), level);
391 		panic("mvmebus_intr_establish");
392 	}
393 #endif
394 
395 	first = (sc->sc_irqref[level]++ == 0);
396 
397 	(*sc->sc_intr_establish)(sc->sc_chip, prior, level, vector, first,
398 	    func, arg, &sc->sc_evcnt[level - 1]);
399 
400 	return ((void *) handle);
401 }
402 
403 void
mvmebus_intr_disestablish(void * vsc,vme_intr_handle_t handle)404 mvmebus_intr_disestablish(void *vsc, vme_intr_handle_t handle)
405 {
406 	struct mvmebus_softc *sc;
407 	int level, vector, last;
408 
409 	sc = vsc;
410 
411 	/* Extract the interrupt's level and vector */
412 	level = ((int) handle) >> 8;
413 	vector = ((int) handle) & 0xff;
414 
415 #ifdef DIAGNOSTIC
416 	if (vector < 0 || vector > 0xff) {
417 		printf("%s: Illegal vector offset: 0x%x\n",
418 		    device_xname(sc->sc_dev), vector);
419 		panic("mvmebus_intr_disestablish");
420 	}
421 	if (level < 1 || level > 7) {
422 		printf("%s: Illegal interrupt level: %d\n",
423 		    device_xname(sc->sc_dev), level);
424 		panic("mvmebus_intr_disestablish");
425 	}
426 	if (sc->sc_irqref[level] == 0) {
427 		printf("%s: VMEirq#%d: Reference count already zero!\n",
428 		    device_xname(sc->sc_dev), level);
429 		panic("mvmebus_intr_disestablish");
430 	}
431 #endif
432 
433 	last = (--(sc->sc_irqref[level]) == 0);
434 
435 	(*sc->sc_intr_disestablish)(sc->sc_chip, level, vector, last,
436 	    &sc->sc_evcnt[level - 1]);
437 }
438 
439 #ifdef DIAGNOSTIC
440 /* ARGSUSED */
441 int
mvmebus_dummy_dmamap_create(bus_dma_tag_t t,bus_size_t size,int nsegs,bus_size_t maxsegsz,bus_size_t boundary,int flags,bus_dmamap_t * dmamp)442 mvmebus_dummy_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegs, bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
443 {
444 
445 	panic("Must use vme_dmamap_create() in place of bus_dmamap_create()");
446 	return (0);	/* Shutup the compiler */
447 }
448 
449 /* ARGSUSED */
450 void
mvmebus_dummy_dmamap_destroy(bus_dma_tag_t t,bus_dmamap_t map)451 mvmebus_dummy_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
452 {
453 
454 	panic("Must use vme_dmamap_destroy() in place of bus_dmamap_destroy()");
455 }
456 #endif
457 
458 /* ARGSUSED */
459 int
mvmebus_dmamap_create(void * vsc,vme_size_t len,vme_am_t am,vme_datasize_t datasize,vme_swap_t swap,int nsegs,vme_size_t segsz,vme_addr_t bound,int flags,bus_dmamap_t * mapp)460 mvmebus_dmamap_create(
461 	void *vsc,
462 	vme_size_t len,
463 	vme_am_t am,
464 	vme_datasize_t datasize,
465 	vme_swap_t swap,
466 	int nsegs,
467 	vme_size_t segsz,
468 	vme_addr_t bound,
469 	int flags,
470 	bus_dmamap_t *mapp)
471 {
472 	struct mvmebus_softc *sc = vsc;
473 	struct mvmebus_dmamap *vmap;
474 	struct mvmebus_range *vr;
475 	vme_am_t cap, as;
476 	int i, rv;
477 
478 	cap = MVMEBUS_AM2CAP(am);
479 	as = am & VME_AM_ADRSIZEMASK;
480 
481 	/*
482 	 * Verify that we even stand a chance of satisfying
483 	 * the VMEbus address space and datasize requested.
484 	 */
485 	for (i = 0, vr = sc->sc_slaves; i < sc->sc_nslaves; i++, vr++) {
486 		if (vr->vr_am == MVMEBUS_AM_DISABLED)
487 			continue;
488 
489 		if (as == (vr->vr_am & VME_AM_ADRSIZEMASK) &&
490 		    cap == (vr->vr_am & cap) && datasize <= vr->vr_datasize &&
491 		    len <= (vr->vr_vmeend - vr->vr_vmestart))
492 			break;
493 	}
494 
495 	if (i == sc->sc_nslaves)
496 		return (EINVAL);
497 
498 	if ((vmap = malloc(sizeof(*vmap), M_DMAMAP,
499 	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
500 		return (ENOMEM);
501 
502 
503 	rv = bus_dmamap_create(sc->sc_dmat, len, nsegs, segsz,
504 	    bound, flags, mapp);
505 	if (rv != 0) {
506 		free(vmap, M_DMAMAP);
507 		return (rv);
508 	}
509 
510 	vmap->vm_am = am;
511 	vmap->vm_datasize = datasize;
512 	vmap->vm_swap = swap;
513 	vmap->vm_slave = vr;
514 
515 	(*mapp)->_dm_cookie = vmap;
516 
517 	return (0);
518 }
519 
520 void
mvmebus_dmamap_destroy(void * vsc,bus_dmamap_t map)521 mvmebus_dmamap_destroy(void *vsc, bus_dmamap_t map)
522 {
523 	struct mvmebus_softc *sc = vsc;
524 
525 	free(map->_dm_cookie, M_DMAMAP);
526 	bus_dmamap_destroy(sc->sc_dmat, map);
527 }
528 
529 static int
mvmebus_dmamap_load_common(struct mvmebus_softc * sc,bus_dmamap_t map)530 mvmebus_dmamap_load_common(struct mvmebus_softc *sc, bus_dmamap_t map)
531 {
532 	struct mvmebus_dmamap *vmap = map->_dm_cookie;
533 	struct mvmebus_range *vr = vmap->vm_slave;
534 	bus_dma_segment_t *ds;
535 	vme_am_t cap, am;
536 	int i;
537 
538 	cap = MVMEBUS_AM2CAP(vmap->vm_am);
539 	am = vmap->vm_am & VME_AM_ADRSIZEMASK;
540 
541 	/*
542 	 * Traverse the list of segments which make up this map, and
543 	 * convert the CPU-relative addresses therein to VMEbus addresses.
544 	 */
545 	for (ds = &map->dm_segs[0]; ds < &map->dm_segs[map->dm_nsegs]; ds++) {
546 		/*
547 		 * First, see if this map's slave image can access the
548 		 * segment, otherwise we have to waste time scanning all
549 		 * the slave images.
550 		 */
551 		vr = vmap->vm_slave;
552 		if (am == (vr->vr_am & VME_AM_ADRSIZEMASK) &&
553 		    cap == (vr->vr_am & cap) &&
554 		    vmap->vm_datasize <= vr->vr_datasize &&
555 		    ds->_ds_cpuaddr >= vr->vr_locstart &&
556 		    ds->ds_len <= (vr->vr_vmeend - vr->vr_vmestart))
557 			goto found;
558 
559 		for (i = 0, vr = sc->sc_slaves; i < sc->sc_nslaves; i++, vr++) {
560 			if (vr->vr_am == MVMEBUS_AM_DISABLED)
561 				continue;
562 
563 			/*
564 			 * Filter out any slave images which don't have the
565 			 * same VMEbus address modifier and datasize as
566 			 * this DMA map, and those which don't cover the
567 			 * physical address region containing the segment.
568 			 */
569 			if (vr != vmap->vm_slave &&
570 			    am == (vr->vr_am & VME_AM_ADRSIZEMASK) &&
571 			    cap == (vr->vr_am & cap) &&
572 			    vmap->vm_datasize <= vr->vr_datasize &&
573 			    ds->_ds_cpuaddr >= vr->vr_locstart &&
574 			    ds->ds_len <= (vr->vr_vmeend - vr->vr_vmestart))
575 				break;
576 		}
577 
578 		/*
579 		 * Did we find an applicable slave image which covers this
580 		 * segment?
581 		 */
582 		if (i == sc->sc_nslaves) {
583 			/*
584 			 * XXX TODO:
585 			 *
586 			 * Bounce this segment via a bounce buffer allocated
587 			 * from this DMA map.
588 			 */
589 			printf("mvmebus_dmamap_load_common: bounce needed!\n");
590 			return (EINVAL);
591 		}
592 
593 found:
594 		/*
595 		 * Generate the VMEbus address of this segment
596 		 */
597 		ds->ds_addr = (ds->_ds_cpuaddr - vr->vr_locstart) +
598 		    vr->vr_vmestart;
599 	}
600 
601 	return (0);
602 }
603 
604 int
mvmebus_dmamap_load(bus_dma_tag_t t,bus_dmamap_t map,void * buf,bus_size_t buflen,struct proc * p,int flags)605 mvmebus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, bus_size_t buflen, struct proc *p, int flags)
606 {
607 	struct mvmebus_softc *sc = t->_cookie;
608 	int rv;
609 
610 	rv = bus_dmamap_load(sc->sc_dmat, map, buf, buflen, p, flags);
611 	if (rv != 0)
612 		return rv;
613 
614 	return mvmebus_dmamap_load_common(sc, map);
615 }
616 
617 int
mvmebus_dmamap_load_mbuf(bus_dma_tag_t t,bus_dmamap_t map,struct mbuf * chain,int flags)618 mvmebus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *chain, int flags)
619 {
620 	struct mvmebus_softc *sc = t->_cookie;
621 	int rv;
622 
623 	rv = bus_dmamap_load_mbuf(sc->sc_dmat, map, chain, flags);
624 	if (rv != 0)
625 		return rv;
626 
627 	return mvmebus_dmamap_load_common(sc, map);
628 }
629 
630 int
mvmebus_dmamap_load_uio(bus_dma_tag_t t,bus_dmamap_t map,struct uio * uio,int flags)631 mvmebus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio, int flags)
632 {
633 	struct mvmebus_softc *sc = t->_cookie;
634 	int rv;
635 
636 	rv = bus_dmamap_load_uio(sc->sc_dmat, map, uio, flags);
637 	if (rv != 0)
638 		return rv;
639 
640 	return mvmebus_dmamap_load_common(sc, map);
641 }
642 
643 int
mvmebus_dmamap_load_raw(bus_dma_tag_t t,bus_dmamap_t map,bus_dma_segment_t * segs,int nsegs,bus_size_t size,int flags)644 mvmebus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
645 {
646 	struct mvmebus_softc *sc = t->_cookie;
647 	int rv;
648 
649 	/*
650 	 * mvmebus_dmamem_alloc() will ensure that the physical memory
651 	 * backing these segments is 100% accessible in at least one
652 	 * of the board's VMEbus slave images.
653 	 */
654 	rv = bus_dmamap_load_raw(sc->sc_dmat, map, segs, nsegs, size, flags);
655 	if (rv != 0)
656 		return rv;
657 
658 	return mvmebus_dmamap_load_common(sc, map);
659 }
660 
661 void
mvmebus_dmamap_unload(bus_dma_tag_t t,bus_dmamap_t map)662 mvmebus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
663 {
664 	struct mvmebus_softc *sc = t->_cookie;
665 
666 	/* XXX Deal with bounce buffers */
667 
668 	bus_dmamap_unload(sc->sc_dmat, map);
669 }
670 
671 void
mvmebus_dmamap_sync(bus_dma_tag_t t,bus_dmamap_t map,bus_addr_t offset,bus_size_t len,int ops)672 mvmebus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, bus_size_t len, int ops)
673 {
674 	struct mvmebus_softc *sc = t->_cookie;
675 
676 	/* XXX Bounce buffers */
677 
678 	bus_dmamap_sync(sc->sc_dmat, map, offset, len, ops);
679 }
680 
681 #ifdef DIAGNOSTIC
682 /* ARGSUSED */
683 int
mvmebus_dummy_dmamem_alloc(bus_dma_tag_t t,bus_size_t size,bus_size_t align,bus_size_t boundary,bus_dma_segment_t * segs,int nsegs,int * rsegs,int flags)684 mvmebus_dummy_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t align, bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags)
685 {
686 
687 	panic("Must use vme_dmamem_alloc() in place of bus_dmamem_alloc()");
688 }
689 
690 /* ARGSUSED */
691 void
mvmebus_dummy_dmamem_free(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs)692 mvmebus_dummy_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
693 {
694 
695 	panic("Must use vme_dmamem_free() in place of bus_dmamem_free()");
696 }
697 #endif
698 
699 /* ARGSUSED */
700 int
mvmebus_dmamem_alloc(void * vsc,vme_size_t len,vme_am_t am,vme_datasize_t datasize,vme_swap_t swap,bus_dma_segment_t * segs,int nsegs,int * rsegs,int flags)701 mvmebus_dmamem_alloc(void *vsc, vme_size_t len, vme_am_t am, vme_datasize_t datasize, vme_swap_t swap, bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags)
702 {
703 	extern paddr_t avail_start;
704 	struct mvmebus_softc *sc = vsc;
705 	struct mvmebus_range *vr;
706 	bus_addr_t low, high;
707 	bus_size_t bound;
708 	vme_am_t cap;
709 	int i;
710 
711 	cap = MVMEBUS_AM2CAP(am);
712 	am &= VME_AM_ADRSIZEMASK;
713 
714 	/*
715 	 * Find a slave mapping in the requested VMEbus address space.
716 	 */
717 	for (i = 0, vr = sc->sc_slaves; i < sc->sc_nslaves; i++, vr++) {
718 		if (vr->vr_am == MVMEBUS_AM_DISABLED)
719 			continue;
720 
721 		if (i == 0 && (flags & BUS_DMA_ONBOARD_RAM) != 0)
722 			continue;
723 
724 		if (am == (vr->vr_am & VME_AM_ADRSIZEMASK) &&
725 		    cap == (vr->vr_am & cap) && datasize <= vr->vr_datasize &&
726 		    len <= (vr->vr_vmeend - vr->vr_vmestart))
727 			break;
728 	}
729 	if (i == sc->sc_nslaves)
730 		return (EINVAL);
731 
732 	/*
733 	 * Set up the constraints so we can allocate physical memory which
734 	 * is visible in the requested address space
735 	 */
736 	low = uimax(vr->vr_locstart, avail_start);
737 	high = vr->vr_locstart + (vr->vr_vmeend - vr->vr_vmestart) + 1;
738 	bound = (bus_size_t) vr->vr_mask + 1;
739 
740 	/*
741 	 * Allocate physical memory.
742 	 *
743 	 * Note: This fills in the segments with CPU-relative physical
744 	 * addresses. A further call to bus_dmamap_load_raw() (with a
745 	 * DMA map which specifies the same VMEbus address space and
746 	 * constraints as the call to here) must be made. The segments
747 	 * of the DMA map will then contain VMEbus-relative physical
748 	 * addresses of the memory allocated here.
749 	 */
750 	return _bus_dmamem_alloc_common(sc->sc_dmat, low, high,
751 	    len, 0, bound, segs, nsegs, rsegs, flags);
752 }
753 
754 void
mvmebus_dmamem_free(void * vsc,bus_dma_segment_t * segs,int nsegs)755 mvmebus_dmamem_free(void *vsc, bus_dma_segment_t *segs, int nsegs)
756 {
757 	struct mvmebus_softc *sc = vsc;
758 
759 	bus_dmamem_free(sc->sc_dmat, segs, nsegs);
760 }
761 
762 int
mvmebus_dmamem_map(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs,size_t size,void ** kvap,int flags)763 mvmebus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size, void **kvap, int flags)
764 {
765 	struct mvmebus_softc *sc = t->_cookie;
766 
767 	return bus_dmamem_map(sc->sc_dmat, segs, nsegs, size, kvap, flags);
768 }
769 
770 void
mvmebus_dmamem_unmap(bus_dma_tag_t t,void * kva,size_t size)771 mvmebus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
772 {
773 	struct mvmebus_softc *sc = t->_cookie;
774 
775 	bus_dmamem_unmap(sc->sc_dmat, kva, size);
776 }
777 
778 paddr_t
mvmebus_dmamem_mmap(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs,off_t offset,int prot,int flags)779 mvmebus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, off_t offset, int prot, int flags)
780 {
781 	struct mvmebus_softc *sc = t->_cookie;
782 
783 	return bus_dmamem_mmap(sc->sc_dmat, segs, nsegs, offset, prot, flags);
784 }
785 
786 #ifdef DEBUG
787 static const char *
mvmebus_mod_string(vme_addr_t addr,vme_size_t len,vme_am_t am,vme_datasize_t ds)788 mvmebus_mod_string(vme_addr_t addr, vme_size_t len, vme_am_t am, vme_datasize_t ds)
789 {
790 	static const char *mode[] = {"BLT64)", "DATA)", "PROG)", "BLT32)"};
791 	static const char *dsiz[] = {"(", "(D8,", "(D16,", "(D16-D8,",
792 	"(D32,", "(D32,D8,", "(D32-D16,", "(D32-D8,"};
793 	static const char *adrfmt[] = { "A32:%08x-%08x ", "USR:%08x-%08x ",
794 	    "A16:%04x-%04x ", "A24:%06x-%06x " };
795 	static char mstring[40];
796 
797 	snprintf(mstring, sizeof(mstring),
798 	    adrfmt[(am & VME_AM_ADRSIZEMASK) >> VME_AM_ADRSIZESHIFT],
799 	    addr, addr + len - 1);
800 	strlcat(mstring, dsiz[ds & 0x7], sizeof(mstring));
801 
802 	if (MVMEBUS_AM_HAS_CAP(am)) {
803 		if (am & MVMEBUS_AM_CAP_DATA)
804 			strlcat(mstring, "D", sizeof(mstring));
805 		if (am & MVMEBUS_AM_CAP_PROG)
806 			strlcat(mstring, "P", sizeof(mstring));
807 		if (am & MVMEBUS_AM_CAP_USER)
808 			strlcat(mstring, "U", sizeof(mstring));
809 		if (am & MVMEBUS_AM_CAP_SUPER)
810 			strlcat(mstring, "S", sizeof(mstring));
811 		if (am & MVMEBUS_AM_CAP_BLK)
812 			strlcat(mstring, "B", sizeof(mstring));
813 		if (am & MVMEBUS_AM_CAP_BLKD64)
814 			strlcat(mstring, "6", sizeof(mstring));
815 		strlcat(mstring, ")", sizeof(mstring));
816 	} else {
817 		strlcat(mstring, ((am & VME_AM_PRIVMASK) == VME_AM_USER) ?
818 		    "USER," : "SUPER,", sizeof(mstring));
819 		strlcat(mstring, mode[am & VME_AM_MODEMASK], sizeof(mstring));
820 	}
821 
822 	return (mstring);
823 }
824 #endif
825