xref: /netbsd-src/sys/arch/x68k/x68k/bus.c (revision 6b664a713479c31d4f17b38b42182a5d5fa21802)
1 /*	$NetBSD: bus.c,v 1.39 2024/01/07 07:58:35 isaki Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * bus_space(9) and bus_dma(9) implementation for NetBSD/x68k.
34  * These are default implementations; some buses may use their own.
35  */
36 
37 #include "opt_m68k_arch.h"
38 
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: bus.c,v 1.39 2024/01/07 07:58:35 isaki Exp $");
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/malloc.h>
45 #include <sys/mbuf.h>
46 #include <sys/kernel.h>
47 #include <sys/conf.h>
48 #include <sys/device.h>
49 #include <sys/proc.h>
50 
51 #include <uvm/uvm_extern.h>
52 
53 #include <m68k/cacheops.h>
54 #include <machine/bus.h>
55 
56 #include <dev/bus_dma/bus_dmamem_common.h>
57 
58 #if defined(M68040) || defined(M68060)
59 static inline void dmasync_flush(bus_addr_t, bus_size_t);
60 static inline void dmasync_inval(bus_addr_t, bus_size_t);
61 #endif
62 
63 int
x68k_bus_space_alloc(bus_space_tag_t t,bus_addr_t rstart,bus_addr_t rend,bus_size_t size,bus_size_t alignment,bus_size_t boundary,int flags,bus_addr_t * bpap,bus_space_handle_t * bshp)64 x68k_bus_space_alloc(bus_space_tag_t t, bus_addr_t rstart, bus_addr_t rend,
65     bus_size_t size, bus_size_t alignment, bus_size_t boundary, int flags,
66     bus_addr_t *bpap, bus_space_handle_t *bshp)
67 {
68 	return (EINVAL);
69 }
70 
71 void
x68k_bus_space_free(bus_space_tag_t t,bus_space_handle_t bsh,bus_size_t size)72 x68k_bus_space_free(bus_space_tag_t t, bus_space_handle_t bsh, bus_size_t size)
73 {
74 	panic("bus_space_free: shouldn't be here");
75 }
76 
77 
78 extern paddr_t avail_end;
79 
80 /*
81  * Common function for DMA map creation.  May be called by bus-specific
82  * DMA map creation functions.
83  */
84 int
x68k_bus_dmamap_create(bus_dma_tag_t t,bus_size_t size,int nsegments,bus_size_t maxsegsz,bus_size_t boundary,int flags,bus_dmamap_t * dmamp)85 x68k_bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
86     bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
87 {
88 	struct x68k_bus_dmamap *map;
89 	void *mapstore;
90 	size_t mapsize;
91 
92 	/*
93 	 * Allocate and initialize the DMA map.  The end of the map
94 	 * is a variable-sized array of segments, so we allocate enough
95 	 * room for them in one shot.
96 	 *
97 	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
98 	 * of ALLOCNOW notifies others that we've reserved these resources,
99 	 * and they are not to be freed.
100 	 *
101 	 * The bus_dmamap_t includes one bus_dma_segment_t, hence
102 	 * the (nsegments - 1).
103 	 */
104 	mapsize = sizeof(struct x68k_bus_dmamap) +
105 	    (sizeof(bus_dma_segment_t) * (nsegments - 1));
106 	if ((mapstore = malloc(mapsize, M_DMAMAP,
107 	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
108 		return (ENOMEM);
109 
110 	memset(mapstore, 0, mapsize);
111 	map = (struct x68k_bus_dmamap *)mapstore;
112 	map->x68k_dm_size = size;
113 	map->x68k_dm_segcnt = nsegments;
114 	map->x68k_dm_maxmaxsegsz = maxsegsz;
115 	map->x68k_dm_boundary = boundary;
116 	map->x68k_dm_bounce_thresh = t->_bounce_thresh;
117 	map->x68k_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
118 	map->dm_maxsegsz = maxsegsz;
119 	map->dm_mapsize = 0;		/* no valid mappings */
120 	map->dm_nsegs = 0;
121 
122 	*dmamp = map;
123 	return (0);
124 }
125 
126 /*
127  * Common function for DMA map destruction.  May be called by bus-specific
128  * DMA map destruction functions.
129  */
130 void
x68k_bus_dmamap_destroy(bus_dma_tag_t t,bus_dmamap_t map)131 x68k_bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
132 {
133 
134 	free(map, M_DMAMAP);
135 }
136 
137 /*
138  * Common function for loading a DMA map with a linear buffer.  May
139  * be called by bus-specific DMA map load functions.
140  */
141 int
x68k_bus_dmamap_load(bus_dma_tag_t t,bus_dmamap_t map,void * buf,bus_size_t buflen,struct proc * p,int flags)142 x68k_bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
143     bus_size_t buflen, struct proc *p, int flags)
144 {
145 	paddr_t lastaddr;
146 	int seg, error;
147 
148 	/*
149 	 * Make sure that on error condition we return "no valid mappings".
150 	 */
151 	map->dm_mapsize = 0;
152 	map->dm_nsegs = 0;
153 	KASSERT(map->dm_maxsegsz <= map->x68k_dm_maxmaxsegsz);
154 
155 	if (buflen > map->x68k_dm_size)
156 		return (EINVAL);
157 
158 	seg = 0;
159 	error = x68k_bus_dmamap_load_buffer(map, buf, buflen, p, flags,
160 	    &lastaddr, &seg, 1);
161 	if (error == 0) {
162 		map->dm_mapsize = buflen;
163 		map->dm_nsegs = seg + 1;
164 	}
165 	return (error);
166 }
167 
168 /*
169  * Like x68k_bus_dmamap_load(), but for mbufs.
170  */
171 int
x68k_bus_dmamap_load_mbuf(bus_dma_tag_t t,bus_dmamap_t map,struct mbuf * m0,int flags)172 x68k_bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
173     int flags)
174 {
175 	paddr_t lastaddr;
176 	int seg, error, first;
177 	struct mbuf *m;
178 
179 	/*
180 	 * Make sure that on error condition we return "no valid mappings."
181 	 */
182 	map->dm_mapsize = 0;
183 	map->dm_nsegs = 0;
184 	KASSERT(map->dm_maxsegsz <= map->x68k_dm_maxmaxsegsz);
185 
186 #ifdef DIAGNOSTIC
187 	if ((m0->m_flags & M_PKTHDR) == 0)
188 		panic("x68k_bus_dmamap_load_mbuf: no packet header");
189 #endif
190 
191 	if (m0->m_pkthdr.len > map->x68k_dm_size)
192 		return (EINVAL);
193 
194 	first = 1;
195 	seg = 0;
196 	error = 0;
197 	for (m = m0; m != NULL && error == 0; m = m->m_next) {
198 		if (m->m_len == 0)
199 			continue;
200 		error = x68k_bus_dmamap_load_buffer(map, m->m_data, m->m_len,
201 		    NULL, flags, &lastaddr, &seg, first);
202 		first = 0;
203 	}
204 	if (error == 0) {
205 		map->dm_mapsize = m0->m_pkthdr.len;
206 		map->dm_nsegs = seg + 1;
207 	}
208 	return (error);
209 }
210 
211 /*
212  * Like x68k_bus_dmamap_load(), but for uios.
213  */
214 int
x68k_bus_dmamap_load_uio(bus_dma_tag_t t,bus_dmamap_t map,struct uio * uio,int flags)215 x68k_bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
216     int flags)
217 {
218 #if 0
219 	paddr_t lastaddr;
220 	int seg, i, error, first;
221 	bus_size_t minlen, resid;
222 	struct proc *p = NULL;
223 	struct iovec *iov;
224 	void *addr;
225 
226 	/*
227 	 * Make sure that on error condition we return "no valid mappings."
228 	 */
229 	map->dm_mapsize = 0;
230 	map->dm_nsegs = 0;
231 	KASSERT(map->dm_maxsegsz <= map->x68k_dm_maxmaxsegsz);
232 
233 	resid = uio->uio_resid;
234 	iov = uio->uio_iov;
235 
236 	if (uio->uio_segflg == UIO_USERSPACE) {
237 		p = uio->uio_lwp ? uio->uio_lwp->l_proc : NULL;
238 #ifdef DIAGNOSTIC
239 		if (p == NULL)
240 			panic("_bus_dmamap_load_uio: USERSPACE but no proc");
241 #endif
242 	}
243 
244 	first = 1;
245 	seg = 0;
246 	error = 0;
247 	for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
248 		/*
249 		 * Now at the first iovec to load.  Load each iovec
250 		 * until we have exhausted the residual count.
251 		 */
252 		minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
253 		addr = (void *)iov[i].iov_base;
254 
255 		error = x68k_bus_dmamap_load_buffer(map, addr, minlen,
256 		    p, flags, &lastaddr, &seg, first);
257 		first = 0;
258 
259 		resid -= minlen;
260 	}
261 	if (error == 0) {
262 		map->dm_mapsize = uio->uio_resid;
263 		map->dm_nsegs = seg + 1;
264 	}
265 	return (error);
266 #else
267 	panic ("x68k_bus_dmamap_load_uio: not implemented");
268 #endif
269 }
270 
271 /*
272  * Like x68k_bus_dmamap_load(), but for raw memory allocated with
273  * bus_dmamem_alloc().
274  */
275 int
x68k_bus_dmamap_load_raw(bus_dma_tag_t t,bus_dmamap_t map,bus_dma_segment_t * segs,int nsegs,bus_size_t size,int flags)276 x68k_bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
277     bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
278 {
279 
280 	panic("x68k_bus_dmamap_load_raw: not implemented");
281 }
282 
283 /*
284  * Common function for unloading a DMA map.  May be called by
285  * bus-specific DMA map unload functions.
286  */
287 void
x68k_bus_dmamap_unload(bus_dma_tag_t t,bus_dmamap_t map)288 x68k_bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
289 {
290 
291 	/*
292 	 * No resources to free; just mark the mappings as
293 	 * invalid.
294 	 */
295 	map->dm_maxsegsz = map->x68k_dm_maxmaxsegsz;
296 	map->dm_mapsize = 0;
297 	map->dm_nsegs = 0;
298 }
299 
300 #if defined(M68040) || defined(M68060)
301 static inline void
dmasync_flush(bus_addr_t addr,bus_size_t len)302 dmasync_flush(bus_addr_t addr, bus_size_t len)
303 {
304 	bus_addr_t end = addr+len;
305 
306 	if (len <= 1024) {
307 		addr = addr & ~0xF;
308 
309 		do {
310 			DCFL(addr);
311 			addr += 16;
312 		} while (addr < end);
313 	} else {
314 		addr = m68k_trunc_page(addr);
315 
316 		do {
317 			DCFP(addr);
318 			addr += PAGE_SIZE;
319 		} while (addr < end);
320 	}
321 }
322 
323 static inline void
dmasync_inval(bus_addr_t addr,bus_size_t len)324 dmasync_inval(bus_addr_t addr, bus_size_t len)
325 {
326 	bus_addr_t end = addr+len;
327 
328 	if (len <= 1024) {
329 		addr = addr & ~0xF;
330 
331 		do {
332 			DCFL(addr);
333 			ICPL(addr);
334 			addr += 16;
335 		} while (addr < end);
336 	} else {
337 		addr = m68k_trunc_page(addr);
338 
339 		do {
340 			DCPL(addr);
341 			ICPP(addr);
342 			addr += PAGE_SIZE;
343 		} while (addr < end);
344 	}
345 }
346 #endif
347 
348 /*
349  * Common function for DMA map synchronization.  May be called
350  * by bus-specific DMA map synchronization functions.
351  */
352 void
x68k_bus_dmamap_sync(bus_dma_tag_t t,bus_dmamap_t map,bus_addr_t offset,bus_size_t len,int ops)353 x68k_bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
354     bus_size_t len, int ops)
355 {
356 #if defined(M68040) || defined(M68060)
357 	bus_dma_segment_t *ds = map->dm_segs;
358 	bus_addr_t seg;
359 	int i;
360 
361 	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_POSTWRITE)) == 0)
362 		return;
363 #if defined(M68020) || defined(M68030)
364 	if (mmutype != MMU_68040) {
365 		if ((ops & BUS_DMASYNC_POSTWRITE) == 0)
366 			return;	/* no copyback cache */
367 		ICIA();		/* no per-page/per-line control */
368 		DCIA();
369 		return;
370 	}
371 #endif
372 	if (offset >= map->dm_mapsize)
373 		return;	/* driver bug; warn it? */
374 	if (offset+len > map->dm_mapsize)
375 		len = map->dm_mapsize; /* driver bug; warn it? */
376 
377 	i = 0;
378 	while (ds[i].ds_len <= offset) {
379 		offset -= ds[i++].ds_len;
380 		continue;
381 	}
382 	while (len > 0) {
383 		seg = ds[i].ds_len - offset;
384 		if (seg > len)
385 			seg = len;
386 		if (mmutype == MMU_68040 && (ops & BUS_DMASYNC_PREWRITE))
387 			dmasync_flush(ds[i].ds_addr+offset, seg);
388 		if (ops & BUS_DMASYNC_POSTREAD)
389 			dmasync_inval(ds[i].ds_addr+offset, seg);
390 		offset = 0;
391 		len -= seg;
392 		i++;
393 	}
394 #else  /* no 040/060 */
395 	if ((ops & BUS_DMASYNC_POSTWRITE)) {
396 		ICIA();		/* no per-page/per-line control */
397 		DCIA();
398 	}
399 #endif
400 }
401 
402 /*
403  * Common function for DMA-safe memory allocation.  May be called
404  * by bus-specific DMA memory allocation functions.
405  */
406 int
x68k_bus_dmamem_alloc(bus_dma_tag_t t,bus_size_t size,bus_size_t alignment,bus_size_t boundary,bus_dma_segment_t * segs,int nsegs,int * rsegs,int flags)407 x68k_bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
408     bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
409     int flags)
410 {
411 
412 	return (x68k_bus_dmamem_alloc_range(t, size, alignment, boundary,
413 	    segs, nsegs, rsegs, flags, 0, trunc_page(avail_end)));
414 }
415 
416 /*
417  * Common function for freeing DMA-safe memory.  May be called by
418  * bus-specific DMA memory free functions.
419  */
420 void
x68k_bus_dmamem_free(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs)421 x68k_bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
422 {
423 
424 	_bus_dmamem_free_common(t, segs, nsegs);
425 }
426 
427 /*
428  * Common function for mapping DMA-safe memory.  May be called by
429  * bus-specific DMA memory map functions.
430  */
431 int
x68k_bus_dmamem_map(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs,size_t size,void ** kvap,int flags)432 x68k_bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
433     size_t size, void **kvap, int flags)
434 {
435 
436 	/* XXX BUS_DMA_COHERENT */
437 	return (_bus_dmamem_map_common(t, segs, nsegs, size, kvap, flags, 0));
438 }
439 
440 /*
441  * Common function for unmapping DMA-safe memory.  May be called by
442  * bus-specific DMA memory unmapping functions.
443  */
444 void
x68k_bus_dmamem_unmap(bus_dma_tag_t t,void * kva,size_t size)445 x68k_bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
446 {
447 
448 	_bus_dmamem_unmap_common(t, kva, size);
449 }
450 
451 /*
452  * Common function for mmap(2)'ing DMA-safe memory.  May be called by
453  * bus-specific DMA mmap(2)'ing functions.
454  */
455 paddr_t
x68k_bus_dmamem_mmap(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs,off_t off,int prot,int flags)456 x68k_bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
457     off_t off, int prot, int flags)
458 {
459 	bus_addr_t rv;
460 
461 	rv = _bus_dmamem_mmap_common(t, segs, nsegs, off, prot, flags);
462 	if (rv == (bus_addr_t)-1)
463 		return (-1);
464 
465 	return (m68k_btop((char *)rv));
466 }
467 
468 
469 /**********************************************************************
470  * DMA utility functions
471  **********************************************************************/
472 
473 /*
474  * Utility function to load a linear buffer.  lastaddrp holds state
475  * between invocations (for multiple-buffer loads).  segp contains
476  * the starting segment on entrance, and the ending segment on exit.
477  * first indicates if this is the first invocation of this function.
478  */
479 int
x68k_bus_dmamap_load_buffer(bus_dmamap_t map,void * buf,bus_size_t buflen,struct proc * p,int flags,paddr_t * lastaddrp,int * segp,int first)480 x68k_bus_dmamap_load_buffer(bus_dmamap_t map, void *buf, bus_size_t buflen,
481      struct proc *p, int flags, paddr_t *lastaddrp, int *segp, int first)
482 {
483 	bus_size_t sgsize;
484 	bus_addr_t curaddr, lastaddr, baddr, bmask;
485 	vaddr_t vaddr = (vaddr_t)buf;
486 	int seg;
487 	pmap_t pmap;
488 
489 	if (p != NULL)
490 		pmap = p->p_vmspace->vm_map.pmap;
491 	else
492 		pmap = pmap_kernel();
493 
494 	lastaddr = *lastaddrp;
495 	bmask  = ~(map->x68k_dm_boundary - 1);
496 
497 	for (seg = *segp; buflen > 0 ; ) {
498 		/*
499 		 * Get the physical address for this segment.
500 		 */
501 		(void) pmap_extract(pmap, vaddr, &curaddr);
502 
503 		/*
504 		 * If we're beyond the bounce threshold, notify
505 		 * the caller.
506 		 */
507 		if (map->x68k_dm_bounce_thresh != 0 &&
508 		    curaddr >= map->x68k_dm_bounce_thresh)
509 			return (EINVAL);
510 
511 		/*
512 		 * Compute the segment size, and adjust counts.
513 		 */
514 		sgsize = PAGE_SIZE - m68k_page_offset(vaddr);
515 		if (buflen < sgsize)
516 			sgsize = buflen;
517 
518 		/*
519 		 * Make sure we don't cross any boundaries.
520 		 */
521 		if (map->x68k_dm_boundary > 0) {
522 			baddr = (curaddr + map->x68k_dm_boundary) & bmask;
523 			if (sgsize > (baddr - curaddr))
524 				sgsize = (baddr - curaddr);
525 		}
526 
527 		/*
528 		 * Insert chunk into a segment, coalescing with
529 		 * previous segment if possible.
530 		 */
531 		if (first) {
532 			map->dm_segs[seg].ds_addr = curaddr;
533 			map->dm_segs[seg].ds_len = sgsize;
534 			first = 0;
535 		} else {
536 			if (curaddr == lastaddr &&
537 			    (map->dm_segs[seg].ds_len + sgsize) <=
538 			     map->dm_maxsegsz &&
539 			    (map->x68k_dm_boundary == 0 ||
540 			     (map->dm_segs[seg].ds_addr & bmask) ==
541 			     (curaddr & bmask)))
542 				map->dm_segs[seg].ds_len += sgsize;
543 			else {
544 				if (++seg >= map->x68k_dm_segcnt)
545 					break;
546 				map->dm_segs[seg].ds_addr = curaddr;
547 				map->dm_segs[seg].ds_len = sgsize;
548 			}
549 		}
550 
551 		lastaddr = curaddr + sgsize;
552 		vaddr += sgsize;
553 		buflen -= sgsize;
554 	}
555 
556 	*segp = seg;
557 	*lastaddrp = lastaddr;
558 
559 	/*
560 	 * Did we fit?
561 	 */
562 	if (buflen != 0)
563 		return (EFBIG);		/* XXX better return value here? */
564 	return (0);
565 }
566 
567 /*
568  * Allocate physical memory from the given physical address range.
569  * Called by DMA-safe memory allocation methods.
570  */
571 int
x68k_bus_dmamem_alloc_range(bus_dma_tag_t t,bus_size_t size,bus_size_t alignment,bus_size_t boundary,bus_dma_segment_t * segs,int nsegs,int * rsegs,int flags,paddr_t low,paddr_t high)572 x68k_bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size,
573     bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs,
574     int nsegs, int *rsegs, int flags, paddr_t low, paddr_t high)
575 {
576 
577 	return (_bus_dmamem_alloc_range_common(t, size, alignment, boundary,
578 					       segs, nsegs, rsegs, flags,
579 					       low, high));
580 }
581