1 /* $NetBSD: bus.c,v 1.69 2023/12/07 16:56:09 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center and by Chris G. Demetriou.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include "opt_m68k_arch.h"
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: bus.c,v 1.69 2023/12/07 16:56:09 thorpej Exp $");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/extent.h>
41 #include <sys/kmem.h>
42 #include <sys/mbuf.h>
43 #include <sys/proc.h>
44 #include <sys/vmem_impl.h>
45
46 #include <uvm/uvm.h>
47
48 #include <machine/cpu.h>
49 #include <m68k/cacheops.h>
50 #define _ATARI_BUS_DMA_PRIVATE
51 #include <sys/bus.h>
52
53 /*
54 * Vmem arena to manage all memory space, including I/O ranges. Allocate
55 * storage for 16 regions in each, initially.
56 *
57 * This means that the fixed static storage is only used for registrating
58 * the found memory regions and the bus-mapping of the console.
59 */
60 #define IOMEM_BTAG_COUNT VMEM_EST_BTCOUNT(1, 16)
61 static struct vmem iomem_arena_store;
62 static struct vmem_btag iomem_btag_store[IOMEM_BTAG_COUNT];
63 static vmem_t *iomem_arena;
64
65 static int _bus_dmamap_load_buffer(bus_dma_tag_t tag, bus_dmamap_t,
66 void *, bus_size_t, struct vmspace *, int, paddr_t *,
67 int *, int);
68 static int bus_mem_add_mapping(bus_space_tag_t t, bus_addr_t bpa,
69 bus_size_t size, int flags, bus_space_handle_t *bsph);
70
71 extern paddr_t avail_end;
72
73 /*
74 * We need these for the early memory allocator. The idea is this:
75 * Allocate VA-space through ptextra (atari_init.c:startc()). When
76 * The VA & size of this space are known, call bootm_init().
77 * Until the VM-system is up, bus_mem_add_mapping() allocates its virtual
78 * addresses from this extent-map.
79 *
80 * This allows for the console code to use the bus_space interface at a
81 * very early stage of the system configuration.
82 */
83 static pt_entry_t *bootm_ptep;
84 static vaddr_t bootm_start;
85 static vaddr_t bootm_end; /* inclusive */
86 #define BOOTM_BTAG_COUNT VMEM_EST_BTCOUNT(1, 32)
87 static struct vmem bootm_arena_store;
88 static struct vmem_btag bootm_btag_store[BOOTM_BTAG_COUNT];
89 static vmem_t * bootm_arena;
90
91 static vaddr_t bootm_alloc(paddr_t pa, u_long size, int flags);
92 static int bootm_free(vaddr_t va, u_long size);
93
94 void
bootm_init(vaddr_t va,void * ptep,vsize_t size)95 bootm_init(vaddr_t va, void *ptep, vsize_t size)
96 {
97
98 bootm_start = va;
99 bootm_end = va + size - 1;
100 bootm_ptep = (pt_entry_t *)ptep;
101
102 bootm_arena = vmem_init(&bootm_arena_store,
103 "bootmem", /* name */
104 0, /* addr */
105 0, /* size */
106 PAGE_SIZE, /* quantum */
107 NULL, /* importfn */
108 NULL, /* releasefn */
109 NULL, /* source */
110 0, /* qcache_max */
111 VM_NOSLEEP | VM_PRIVTAGS,
112 IPL_NONE);
113
114 vmem_add_bts(bootm_arena, bootm_btag_store, BOOTM_BTAG_COUNT);
115 vmem_add(bootm_arena, va, size, VM_NOSLEEP);
116 }
117
118 vaddr_t
bootm_alloc(paddr_t pa,u_long size,int flags)119 bootm_alloc(paddr_t pa, u_long size, int flags)
120 {
121 pt_entry_t *pg, *epg;
122 pt_entry_t pg_proto;
123 vmem_addr_t rva;
124 vaddr_t va;
125
126 if (vmem_alloc(bootm_arena, size, VM_NOSLEEP, &rva) != 0) {
127 printf("bootm_alloc fails! Not enough fixed boundary tags?\n");
128 printf("Requested extent: pa=%lx, size=%lx\n",
129 (u_long)pa, size);
130 return 0;
131 }
132
133 pg = &bootm_ptep[btoc(rva - bootm_start)];
134 epg = &pg[btoc(size)];
135 va = rva;
136 pg_proto = pa | PG_RW | PG_V;
137 if ((flags & BUS_SPACE_MAP_CACHEABLE) == 0)
138 pg_proto |= PG_CI;
139 while (pg < epg) {
140 *pg++ = pg_proto;
141 pg_proto += PAGE_SIZE;
142 #if defined(M68040) || defined(M68060)
143 if (mmutype == MMU_68040) {
144 DCFP(pa);
145 pa += PAGE_SIZE;
146 }
147 #endif
148 TBIS(va);
149 va += PAGE_SIZE;
150 }
151 return rva;
152 }
153
154 int
bootm_free(vaddr_t va,u_long size)155 bootm_free(vaddr_t va, u_long size)
156 {
157
158 if ((va < bootm_start) || ((va + size - 1) > bootm_end))
159 return 0; /* Not for us! */
160 vmem_free(bootm_arena, va, size);
161 return 1;
162 }
163
164 void
atari_bus_space_arena_init(paddr_t startpa,paddr_t endpa)165 atari_bus_space_arena_init(paddr_t startpa, paddr_t endpa)
166 {
167 vmem_size_t size;
168
169 /*
170 * Initialize the I/O mem vmem arena.
171 *
172 * Note: we don't have to check the return value since
173 * creation of a fixed extent map will never fail (since
174 * descriptor storage has already been allocated).
175 *
176 * N.B. The iomem arena manages _all_ physical addresses
177 * on the machine. When the amount of RAM is found, all
178 * extents of RAM are allocated from the map.
179 */
180
181 iomem_arena = vmem_init(&iomem_arena_store,
182 "iomem", /* name */
183 0, /* addr */
184 0, /* size */
185 1, /* quantum */
186 NULL, /* importfn */
187 NULL, /* releasefn */
188 NULL, /* source */
189 0, /* qcache_max */
190 VM_NOSLEEP | VM_PRIVTAGS,
191 IPL_NONE);
192
193 vmem_add_bts(iomem_arena, iomem_btag_store, IOMEM_BTAG_COUNT);
194
195 /* XXX kern/57748 */
196 size = (vmem_size_t)(endpa - startpa) + 1;
197 if (size == 0) {
198 size--;
199 }
200 vmem_add(iomem_arena, startpa, size, VM_NOSLEEP);
201 }
202
203 int
atari_bus_space_alloc_physmem(paddr_t startpa,paddr_t endpa)204 atari_bus_space_alloc_physmem(paddr_t startpa, paddr_t endpa)
205 {
206
207 return vmem_xalloc_addr(iomem_arena, startpa, endpa - startpa,
208 VM_NOSLEEP);
209 }
210
211 int
bus_space_map(bus_space_tag_t t,bus_addr_t bpa,bus_size_t size,int flags,bus_space_handle_t * mhp)212 bus_space_map(bus_space_tag_t t, bus_addr_t bpa, bus_size_t size, int flags,
213 bus_space_handle_t *mhp)
214 {
215 int error;
216
217 /*
218 * Before we go any further, let's make sure that this
219 * region is available.
220 */
221 error = vmem_xalloc_addr(iomem_arena, bpa + t->base, size,
222 VM_NOSLEEP);
223 if (error != 0)
224 return error;
225
226 error = bus_mem_add_mapping(t, bpa, size, flags, mhp);
227 if (error != 0) {
228 vmem_xfree(iomem_arena, bpa + t->base, size);
229 }
230 return error;
231 }
232
233 int
bus_space_alloc(bus_space_tag_t t,bus_addr_t rstart,bus_addr_t rend,bus_size_t size,bus_size_t alignment,bus_size_t boundary,int flags,bus_addr_t * bpap,bus_space_handle_t * bshp)234 bus_space_alloc(bus_space_tag_t t, bus_addr_t rstart, bus_addr_t rend,
235 bus_size_t size, bus_size_t alignment, bus_size_t boundary, int flags,
236 bus_addr_t *bpap, bus_space_handle_t *bshp)
237 {
238 vmem_addr_t bpa;
239 int error;
240
241 /*
242 * Do the requested allocation.
243 */
244 error = vmem_xalloc(iomem_arena, size,
245 alignment, /* align */
246 0, /* phase */
247 boundary, /* boundary */
248 rstart + t->base, /* minaddr */
249 rend + t->base, /* maxaddr */
250 VM_BESTFIT | VM_NOSLEEP,
251 &bpa);
252 if (error != 0)
253 return error;
254
255 /*
256 * Map the bus physical address to a kernel virtual address.
257 */
258 error = bus_mem_add_mapping(t, bpa, size, flags, bshp);
259 if (error != 0) {
260 vmem_xfree(iomem_arena, bpa, size);
261 }
262
263 *bpap = bpa;
264
265 return error;
266 }
267
268 static int
bus_mem_add_mapping(bus_space_tag_t t,bus_addr_t bpa,bus_size_t size,int flags,bus_space_handle_t * bshp)269 bus_mem_add_mapping(bus_space_tag_t t, bus_addr_t bpa, bus_size_t size,
270 int flags, bus_space_handle_t *bshp)
271 {
272 vaddr_t va;
273 paddr_t pa, endpa;
274
275 pa = m68k_trunc_page(bpa + t->base);
276 endpa = m68k_round_page((bpa + t->base + size) - 1);
277
278 #ifdef DIAGNOSTIC
279 if (endpa <= pa)
280 panic("%s: overflow", __func__);
281 #endif
282
283 if (kernel_map == NULL) {
284 /*
285 * The VM-system is not yet operational, allocate from
286 * a special pool.
287 */
288 va = bootm_alloc(pa, endpa - pa, flags);
289 if (va == 0)
290 return ENOMEM;
291 *bshp = va + (bpa & PGOFSET);
292 return 0;
293 }
294
295 va = uvm_km_alloc(kernel_map, endpa - pa, 0,
296 UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
297 if (va == 0)
298 return ENOMEM;
299
300 *bshp = va + (bpa & PGOFSET);
301
302 for (; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
303 pt_entry_t *ptep, npte;
304
305 pmap_enter(pmap_kernel(), (vaddr_t)va, pa,
306 VM_PROT_READ|VM_PROT_WRITE, VM_PROT_READ|VM_PROT_WRITE);
307
308 ptep = kvtopte(va);
309 npte = *ptep & ~PG_CMASK;
310
311 if ((flags & BUS_SPACE_MAP_CACHEABLE) == 0)
312 npte |= PG_CI;
313 else if (mmutype == MMU_68040)
314 npte |= PG_CCB;
315
316 *ptep = npte;
317 }
318 pmap_update(pmap_kernel());
319 TBIAS();
320 return 0;
321 }
322
323 void
bus_space_unmap(bus_space_tag_t t,bus_space_handle_t bsh,bus_size_t size)324 bus_space_unmap(bus_space_tag_t t, bus_space_handle_t bsh, bus_size_t size)
325 {
326 vaddr_t va, endva;
327 paddr_t bpa;
328
329 va = m68k_trunc_page(bsh);
330 endva = m68k_round_page(((char *)bsh + size) - 1);
331 #ifdef DIAGNOSTIC
332 if (endva < va)
333 panic("%s: overflow", __func__);
334 #endif
335
336 (void)pmap_extract(pmap_kernel(), va, &bpa);
337 bpa += ((paddr_t)bsh & PGOFSET);
338
339 /*
340 * Free the kernel virtual mapping.
341 */
342 if (!bootm_free(va, endva - va)) {
343 pmap_remove(pmap_kernel(), va, endva);
344 pmap_update(pmap_kernel());
345 uvm_km_free(kernel_map, va, endva - va, UVM_KMF_VAONLY);
346 }
347
348 /*
349 * Mark as free in the extent map.
350 */
351 vmem_xfree(iomem_arena, bpa, size);
352 }
353
354 /*
355 * Get a new handle for a subregion of an already-mapped area of bus space.
356 */
357 int
bus_space_subregion(bus_space_tag_t t,bus_space_handle_t memh,bus_size_t off,bus_size_t sz,bus_space_handle_t * mhp)358 bus_space_subregion(bus_space_tag_t t, bus_space_handle_t memh,
359 bus_size_t off, bus_size_t sz, bus_space_handle_t *mhp)
360 {
361
362 *mhp = memh + off;
363 return 0;
364 }
365
366 paddr_t
bus_space_mmap(bus_space_tag_t t,bus_addr_t addr,off_t off,int prot,int flags)367 bus_space_mmap(bus_space_tag_t t, bus_addr_t addr, off_t off, int prot,
368 int flags)
369 {
370
371 /*
372 * "addr" is the base address of the device we're mapping.
373 * "off" is the offset into that device.
374 *
375 * Note we are called for each "page" in the device that
376 * the upper layers want to map.
377 */
378 return m68k_btop(addr + off);
379 }
380
381 static size_t
_bus_dmamap_mapsize(int const nsegments)382 _bus_dmamap_mapsize(int const nsegments)
383 {
384
385 KASSERT(nsegments > 0);
386 return sizeof(struct atari_bus_dmamap) +
387 (sizeof(bus_dma_segment_t) * (nsegments - 1));
388 }
389
390 /*
391 * Common function for DMA map creation. May be called by bus-specific
392 * DMA map creation functions.
393 */
394 int
_bus_dmamap_create(bus_dma_tag_t t,bus_size_t size,int nsegments,bus_size_t maxsegsz,bus_size_t boundary,int flags,bus_dmamap_t * dmamp)395 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
396 bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
397 {
398 struct atari_bus_dmamap *map;
399 void *mapstore;
400
401 /*
402 * Allocate and initialize the DMA map. The end of the map
403 * is a variable-sized array of segments, so we allocate enough
404 * room for them in one shot.
405 *
406 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation
407 * of ALLOCNOW notifies others that we've reserved these resources,
408 * and they are not to be freed.
409 *
410 * The bus_dmamap_t includes one bus_dma_segment_t, hence
411 * the (nsegments - 1).
412 */
413 if ((mapstore = kmem_zalloc(_bus_dmamap_mapsize(nsegments),
414 (flags & BUS_DMA_NOWAIT) != 0 ? KM_NOSLEEP : KM_SLEEP)) == NULL)
415 return ENOMEM;
416
417 map = (struct atari_bus_dmamap *)mapstore;
418 map->_dm_size = size;
419 map->_dm_segcnt = nsegments;
420 map->_dm_maxmaxsegsz = maxsegsz;
421 map->_dm_boundary = boundary;
422 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
423 map->dm_maxsegsz = maxsegsz;
424 map->dm_mapsize = 0; /* no valid mappings */
425 map->dm_nsegs = 0;
426
427 *dmamp = map;
428 return 0;
429 }
430
431 /*
432 * Common function for DMA map destruction. May be called by bus-specific
433 * DMA map destruction functions.
434 */
435 void
_bus_dmamap_destroy(bus_dma_tag_t t,bus_dmamap_t map)436 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
437 {
438
439 kmem_free(map, _bus_dmamap_mapsize(map->_dm_segcnt));
440 }
441
442 /*
443 * Common function for loading a DMA map with a linear buffer. May
444 * be called by bus-specific DMA map load functions.
445 */
446 int
_bus_dmamap_load(bus_dma_tag_t t,bus_dmamap_t map,void * buf,bus_size_t buflen,struct proc * p,int flags)447 _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
448 bus_size_t buflen, struct proc *p, int flags)
449 {
450 paddr_t lastaddr;
451 int seg, error;
452 struct vmspace *vm;
453
454 /*
455 * Make sure that on error condition we return "no valid mappings".
456 */
457 map->dm_mapsize = 0;
458 map->dm_nsegs = 0;
459 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
460
461 if (buflen > map->_dm_size)
462 return EINVAL;
463
464 if (p != NULL) {
465 vm = p->p_vmspace;
466 } else {
467 vm = vmspace_kernel();
468 }
469
470 seg = 0;
471 error = _bus_dmamap_load_buffer(t, map, buf, buflen, vm, flags,
472 &lastaddr, &seg, 1);
473 if (error == 0) {
474 map->dm_mapsize = buflen;
475 map->dm_nsegs = seg + 1;
476 }
477 return error;
478 }
479
480 /*
481 * Like _bus_dmamap_load(), but for mbufs.
482 */
483 int
_bus_dmamap_load_mbuf(bus_dma_tag_t t,bus_dmamap_t map,struct mbuf * m0,int flags)484 _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
485 int flags)
486 {
487 paddr_t lastaddr;
488 int seg, error, first;
489 struct mbuf *m;
490
491 /*
492 * Make sure that on error condition we return "no valid mappings."
493 */
494 map->dm_mapsize = 0;
495 map->dm_nsegs = 0;
496 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
497
498 #ifdef DIAGNOSTIC
499 if ((m0->m_flags & M_PKTHDR) == 0)
500 panic("%s: no packet header", __func__);
501 #endif
502
503 if (m0->m_pkthdr.len > map->_dm_size)
504 return EINVAL;
505
506 first = 1;
507 seg = 0;
508 error = 0;
509 for (m = m0; m != NULL && error == 0; m = m->m_next) {
510 if (m->m_len == 0)
511 continue;
512 error = _bus_dmamap_load_buffer(t, map, m->m_data, m->m_len,
513 vmspace_kernel(), flags, &lastaddr, &seg, first);
514 first = 0;
515 }
516 if (error == 0) {
517 map->dm_mapsize = m0->m_pkthdr.len;
518 map->dm_nsegs = seg + 1;
519 }
520 return error;
521 }
522
523 /*
524 * Like _bus_dmamap_load(), but for uios.
525 */
526 int
_bus_dmamap_load_uio(bus_dma_tag_t t,bus_dmamap_t map,struct uio * uio,int flags)527 _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
528 int flags)
529 {
530 paddr_t lastaddr;
531 int seg, i, error, first;
532 bus_size_t minlen, resid;
533 struct iovec *iov;
534 void *addr;
535
536 /*
537 * Make sure that on error condition we return "no valid mappings."
538 */
539 map->dm_mapsize = 0;
540 map->dm_nsegs = 0;
541 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
542
543 resid = uio->uio_resid;
544 iov = uio->uio_iov;
545
546 first = 1;
547 seg = 0;
548 error = 0;
549 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
550 /*
551 * Now at the first iovec to load. Load each iovec
552 * until we have exhausted the residual count.
553 */
554 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
555 addr = (void *)iov[i].iov_base;
556
557 error = _bus_dmamap_load_buffer(t, map, addr, minlen,
558 uio->uio_vmspace, flags, &lastaddr, &seg, first);
559 first = 0;
560
561 resid -= minlen;
562 }
563 if (error == 0) {
564 map->dm_mapsize = uio->uio_resid;
565 map->dm_nsegs = seg + 1;
566 }
567 return error;
568 }
569
570 /*
571 * Like _bus_dmamap_load(), but for raw memory allocated with
572 * bus_dmamem_alloc().
573 */
574 int
_bus_dmamap_load_raw(bus_dma_tag_t t,bus_dmamap_t map,bus_dma_segment_t * segs,int nsegs,bus_size_t size,int flags)575 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
576 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
577 {
578
579 panic("%s: not implemented", __func__);
580 }
581
582 /*
583 * Common function for unloading a DMA map. May be called by
584 * bus-specific DMA map unload functions.
585 */
586 void
_bus_dmamap_unload(bus_dma_tag_t t,bus_dmamap_t map)587 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
588 {
589
590 /*
591 * No resources to free; just mark the mappings as
592 * invalid.
593 */
594 map->dm_maxsegsz = map->_dm_maxmaxsegsz;
595 map->dm_mapsize = 0;
596 map->dm_nsegs = 0;
597 }
598
599 /*
600 * Common function for DMA map synchronization. May be called
601 * by bus-specific DMA map synchronization functions.
602 */
603 void
_bus_dmamap_sync(bus_dma_tag_t t,bus_dmamap_t map,bus_addr_t offset,bus_size_t len,int ops)604 _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
605 bus_size_t len, int ops)
606 {
607 #if defined(M68040) || defined(M68060)
608 bus_addr_t p, e, ps, pe;
609 bus_size_t seglen;
610 bus_dma_segment_t *seg;
611 int i;
612 #endif
613
614 #if defined(M68020) || defined(M68030)
615 #if defined(M68040) || defined(M68060)
616 if (cputype == CPU_68020 || cputype == CPU_68030)
617 #endif
618 /* assume no L2 physical cache */
619 return;
620 #endif
621
622 #if defined(M68040) || defined(M68060)
623 /* If the whole DMA map is uncached, do nothing. */
624 if ((map->_dm_flags & BUS_DMA_COHERENT) != 0)
625 return;
626
627 /* Short-circuit for unsupported `ops' */
628 if ((ops & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)) == 0)
629 return;
630
631 /*
632 * flush/purge the cache.
633 */
634 for (i = 0; i < map->dm_nsegs && len != 0; i++) {
635 seg = &map->dm_segs[i];
636 if (seg->ds_len <= offset) {
637 /* Segment irrelevant - before requested offset */
638 offset -= seg->ds_len;
639 continue;
640 }
641
642 /*
643 * Now at the first segment to sync; nail
644 * each segment until we have exhausted the
645 * length.
646 */
647 seglen = seg->ds_len - offset;
648 if (seglen > len)
649 seglen = len;
650
651 ps = seg->ds_addr + offset;
652 pe = ps + seglen;
653
654 if ((ops & BUS_DMASYNC_PREWRITE) != 0) {
655 p = ps & ~CACHELINE_MASK;
656 e = (pe + CACHELINE_MASK) & ~CACHELINE_MASK;
657
658 /* flush cacheline */
659 while ((p < e) && (p & (CACHELINE_SIZE * 8 - 1)) != 0) {
660 DCFL(p);
661 p += CACHELINE_SIZE;
662 }
663
664 /* flush cachelines per 128bytes */
665 while ((p < e) && (p & PAGE_MASK) != 0) {
666 DCFL(p);
667 p += CACHELINE_SIZE;
668 DCFL(p);
669 p += CACHELINE_SIZE;
670 DCFL(p);
671 p += CACHELINE_SIZE;
672 DCFL(p);
673 p += CACHELINE_SIZE;
674 DCFL(p);
675 p += CACHELINE_SIZE;
676 DCFL(p);
677 p += CACHELINE_SIZE;
678 DCFL(p);
679 p += CACHELINE_SIZE;
680 DCFL(p);
681 p += CACHELINE_SIZE;
682 }
683
684 /* flush page */
685 while (p + PAGE_SIZE <= e) {
686 DCFP(p);
687 p += PAGE_SIZE;
688 }
689
690 /* flush cachelines per 128bytes */
691 while (p + CACHELINE_SIZE * 8 <= e) {
692 DCFL(p);
693 p += CACHELINE_SIZE;
694 DCFL(p);
695 p += CACHELINE_SIZE;
696 DCFL(p);
697 p += CACHELINE_SIZE;
698 DCFL(p);
699 p += CACHELINE_SIZE;
700 DCFL(p);
701 p += CACHELINE_SIZE;
702 DCFL(p);
703 p += CACHELINE_SIZE;
704 DCFL(p);
705 p += CACHELINE_SIZE;
706 DCFL(p);
707 p += CACHELINE_SIZE;
708 }
709
710 /* flush cacheline */
711 while (p < e) {
712 DCFL(p);
713 p += CACHELINE_SIZE;
714 }
715 }
716
717 /*
718 * Normally, the `PREREAD' flag instructs us to purge the
719 * cache for the specified offset and length. However, if
720 * the offset/length is not aligned to a cacheline boundary,
721 * we may end up purging some legitimate data from the
722 * start/end of the cache. In such a case, *flush* the
723 * cachelines at the start and end of the required region.
724 */
725 else if ((ops & BUS_DMASYNC_PREREAD) != 0) {
726 /* flush cacheline on start boundary */
727 if ((ps & CACHELINE_MASK) != 0) {
728 DCFL(ps & ~CACHELINE_MASK);
729 }
730
731 p = (ps + CACHELINE_MASK) & ~CACHELINE_MASK;
732 e = pe & ~CACHELINE_MASK;
733
734 /* purge cacheline */
735 while ((p < e) && (p & (CACHELINE_SIZE * 8 - 1)) != 0) {
736 DCPL(p);
737 p += CACHELINE_SIZE;
738 }
739
740 /* purge cachelines per 128bytes */
741 while ((p < e) && (p & PAGE_MASK) != 0) {
742 DCPL(p);
743 p += CACHELINE_SIZE;
744 DCPL(p);
745 p += CACHELINE_SIZE;
746 DCPL(p);
747 p += CACHELINE_SIZE;
748 DCPL(p);
749 p += CACHELINE_SIZE;
750 DCPL(p);
751 p += CACHELINE_SIZE;
752 DCPL(p);
753 p += CACHELINE_SIZE;
754 DCPL(p);
755 p += CACHELINE_SIZE;
756 DCPL(p);
757 p += CACHELINE_SIZE;
758 }
759
760 /* purge page */
761 while (p + PAGE_SIZE <= e) {
762 DCPP(p);
763 p += PAGE_SIZE;
764 }
765
766 /* purge cachelines per 128bytes */
767 while (p + CACHELINE_SIZE * 8 <= e) {
768 DCPL(p);
769 p += CACHELINE_SIZE;
770 DCPL(p);
771 p += CACHELINE_SIZE;
772 DCPL(p);
773 p += CACHELINE_SIZE;
774 DCPL(p);
775 p += CACHELINE_SIZE;
776 DCPL(p);
777 p += CACHELINE_SIZE;
778 DCPL(p);
779 p += CACHELINE_SIZE;
780 DCPL(p);
781 p += CACHELINE_SIZE;
782 DCPL(p);
783 p += CACHELINE_SIZE;
784 }
785
786 /* purge cacheline */
787 while (p < e) {
788 DCPL(p);
789 p += CACHELINE_SIZE;
790 }
791
792 /* flush cacheline on end boundary */
793 if (p < pe) {
794 DCFL(p);
795 }
796 }
797 offset = 0;
798 len -= seglen;
799 }
800 #endif /* defined(M68040) || defined(M68060) */
801 }
802
803 /*
804 * Common function for DMA-safe memory allocation. May be called
805 * by bus-specific DMA memory allocation functions.
806 */
807 int
bus_dmamem_alloc(bus_dma_tag_t t,bus_size_t size,bus_size_t alignment,bus_size_t boundary,bus_dma_segment_t * segs,int nsegs,int * rsegs,int flags)808 bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
809 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
810 int flags)
811 {
812
813 return bus_dmamem_alloc_range(t, size, alignment, boundary,
814 segs, nsegs, rsegs, flags, 0, trunc_page(avail_end));
815 }
816
817 /*
818 * Common function for freeing DMA-safe memory. May be called by
819 * bus-specific DMA memory free functions.
820 */
821 void
bus_dmamem_free(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs)822 bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
823 {
824 struct vm_page *m;
825 bus_addr_t addr, offset;
826 struct pglist mlist;
827 int curseg;
828
829 offset = t->_displacement;
830
831 /*
832 * Build a list of pages to free back to the VM system.
833 */
834 TAILQ_INIT(&mlist);
835 for (curseg = 0; curseg < nsegs; curseg++) {
836 for (addr = segs[curseg].ds_addr;
837 addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
838 addr += PAGE_SIZE) {
839 m = PHYS_TO_VM_PAGE(addr - offset);
840 TAILQ_INSERT_TAIL(&mlist, m, pageq.queue);
841 }
842 }
843
844 uvm_pglistfree(&mlist);
845 }
846
847 /*
848 * Common function for mapping DMA-safe memory. May be called by
849 * bus-specific DMA memory map functions.
850 */
851 int
bus_dmamem_map(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs,size_t size,void ** kvap,int flags)852 bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
853 size_t size, void **kvap, int flags)
854 {
855 vaddr_t va;
856 bus_addr_t addr, offset;
857 int curseg;
858 const uvm_flag_t kmflags =
859 (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
860
861 offset = t->_displacement;
862
863 size = round_page(size);
864
865 va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
866
867 if (va == 0)
868 return ENOMEM;
869
870 *kvap = (void *)va;
871
872 for (curseg = 0; curseg < nsegs; curseg++) {
873 for (addr = segs[curseg].ds_addr;
874 addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
875 addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
876 if (size == 0)
877 panic("%s: size botch", __func__);
878 pmap_enter(pmap_kernel(), va, addr - offset,
879 VM_PROT_READ | VM_PROT_WRITE,
880 VM_PROT_READ | VM_PROT_WRITE);
881 }
882 }
883 pmap_update(pmap_kernel());
884
885 return 0;
886 }
887
888 /*
889 * Common function for unmapping DMA-safe memory. May be called by
890 * bus-specific DMA memory unmapping functions.
891 */
892 void
bus_dmamem_unmap(bus_dma_tag_t t,void * kva,size_t size)893 bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
894 {
895
896 #ifdef DIAGNOSTIC
897 if ((vaddr_t)kva & PGOFSET)
898 panic("%s", __func__);
899 #endif
900
901 size = round_page(size);
902
903 pmap_remove(pmap_kernel(), (vaddr_t)kva, (vaddr_t)kva + size);
904 pmap_update(pmap_kernel());
905 uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
906 }
907
908 /*
909 * Common function for mmap(2)'ing DMA-safe memory. May be called by
910 * bus-specific DMA mmap(2)'ing functions.
911 */
912 paddr_t
bus_dmamem_mmap(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs,off_t off,int prot,int flags)913 bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, off_t off,
914 int prot, int flags)
915 {
916 int i, offset;
917
918 offset = t->_displacement;
919
920 for (i = 0; i < nsegs; i++) {
921 #ifdef DIAGNOSTIC
922 if ((off & PGOFSET) != 0)
923 panic("%s: offset unaligned", __func__);
924 if ((segs[i].ds_addr & PGOFSET) != 0)
925 panic("%s: segment unaligned", __func__);
926 if ((segs[i].ds_len & PGOFSET) != 0)
927 panic("%s: segment size not multiple of page size",
928 __func__);
929 #endif
930 if (off >= segs[i].ds_len) {
931 off -= segs[i].ds_len;
932 continue;
933 }
934
935 return m68k_btop((char *)segs[i].ds_addr - offset + off);
936 }
937
938 /* Page not found. */
939 return -1;
940 }
941
942 /**********************************************************************
943 * DMA utility functions
944 **********************************************************************/
945
946 /*
947 * Utility function to load a linear buffer. lastaddrp holds state
948 * between invocations (for multiple-buffer loads). segp contains
949 * the starting segment on entrance, and the ending segment on exit.
950 * first indicates if this is the first invocation of this function.
951 */
952 static int
_bus_dmamap_load_buffer(bus_dma_tag_t t,bus_dmamap_t map,void * buf,bus_size_t buflen,struct vmspace * vm,int flags,paddr_t * lastaddrp,int * segp,int first)953 _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
954 bus_size_t buflen, struct vmspace *vm, int flags, paddr_t *lastaddrp,
955 int *segp, int first)
956 {
957 bus_size_t sgsize;
958 bus_addr_t curaddr, lastaddr, offset, baddr, bmask;
959 vaddr_t vaddr = (vaddr_t)buf;
960 int seg;
961 pmap_t pmap;
962
963 offset = t->_displacement;
964
965 pmap = vm_map_pmap(&vm->vm_map);
966
967 lastaddr = *lastaddrp;
968 bmask = ~(map->_dm_boundary - 1);
969
970 for (seg = *segp; buflen > 0 ; ) {
971 /*
972 * Get the physical address for this segment.
973 */
974 (void)pmap_extract(pmap, vaddr, &curaddr);
975
976 /*
977 * Compute the segment size, and adjust counts.
978 */
979 sgsize = PAGE_SIZE - ((vaddr_t)vaddr & PGOFSET);
980 if (buflen < sgsize)
981 sgsize = buflen;
982
983 /*
984 * Make sure we don't cross any boundaries.
985 */
986 if (map->_dm_boundary > 0) {
987 baddr = (curaddr + map->_dm_boundary) & bmask;
988 if (sgsize > (baddr - curaddr))
989 sgsize = (baddr - curaddr);
990 }
991
992 /*
993 * Insert chunk into a segment, coalescing with
994 * previous segment if possible.
995 */
996 if (first) {
997 map->dm_segs[seg].ds_addr = curaddr + offset;
998 map->dm_segs[seg].ds_len = sgsize;
999 first = 0;
1000 } else {
1001 if (curaddr == lastaddr &&
1002 (map->dm_segs[seg].ds_len + sgsize) <=
1003 map->dm_maxsegsz &&
1004 (map->_dm_boundary == 0 ||
1005 (map->dm_segs[seg].ds_addr & bmask) ==
1006 (curaddr & bmask)))
1007 map->dm_segs[seg].ds_len += sgsize;
1008 else {
1009 if (++seg >= map->_dm_segcnt)
1010 break;
1011 map->dm_segs[seg].ds_addr = curaddr + offset;
1012 map->dm_segs[seg].ds_len = sgsize;
1013 }
1014 }
1015
1016 lastaddr = curaddr + sgsize;
1017 vaddr += sgsize;
1018 buflen -= sgsize;
1019 }
1020
1021 *segp = seg;
1022 *lastaddrp = lastaddr;
1023
1024 /*
1025 * Did we fit?
1026 */
1027 if (buflen != 0)
1028 return EFBIG; /* XXX better return value here? */
1029 return 0;
1030 }
1031
1032 /*
1033 * Allocate physical memory from the given physical address range.
1034 * Called by DMA-safe memory allocation methods.
1035 */
1036 int
bus_dmamem_alloc_range(bus_dma_tag_t t,bus_size_t size,bus_size_t alignment,bus_size_t boundary,bus_dma_segment_t * segs,int nsegs,int * rsegs,int flags,paddr_t low,paddr_t high)1037 bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
1038 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
1039 int flags, paddr_t low, paddr_t high)
1040 {
1041 paddr_t curaddr, lastaddr;
1042 bus_addr_t offset;
1043 struct vm_page *m;
1044 struct pglist mlist;
1045 int curseg, error;
1046
1047 offset = t->_displacement;
1048
1049 /* Always round the size. */
1050 size = round_page(size);
1051
1052 /*
1053 * Allocate pages from the VM system.
1054 */
1055 error = uvm_pglistalloc(size, low, high, alignment, boundary,
1056 &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
1057 if (error != 0)
1058 return error;
1059
1060 /*
1061 * Compute the location, size, and number of segments actually
1062 * returned by the VM code.
1063 */
1064 m = TAILQ_FIRST(&mlist);
1065 curseg = 0;
1066 lastaddr = VM_PAGE_TO_PHYS(m);
1067 segs[curseg].ds_addr = lastaddr + offset;
1068 segs[curseg].ds_len = PAGE_SIZE;
1069 m = TAILQ_NEXT(m, pageq.queue);
1070
1071 for (; m != NULL; m = TAILQ_NEXT(m, pageq.queue)) {
1072 curaddr = VM_PAGE_TO_PHYS(m);
1073 #ifdef DIAGNOSTIC
1074 if (curaddr < low || curaddr >= high) {
1075 printf("uvm_pglistalloc returned non-sensical"
1076 " address 0x%lx\n", curaddr);
1077 panic("%s", __func__);
1078 }
1079 #endif
1080 if (curaddr == (lastaddr + PAGE_SIZE))
1081 segs[curseg].ds_len += PAGE_SIZE;
1082 else {
1083 curseg++;
1084 segs[curseg].ds_addr = curaddr + offset;
1085 segs[curseg].ds_len = PAGE_SIZE;
1086 }
1087 lastaddr = curaddr;
1088 }
1089
1090 *rsegs = curseg + 1;
1091
1092 return 0;
1093 }
1094