Lines Matching +full:no +full:- +full:map
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2012-2015 Ian Lepore
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
32 * From i386/busdma_machdep.c 191438 2009-04-23 20:24:19Z jhb
159 bus_dmamap_t map, void *buf, bus_size_t buflen, int flags);
160 static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
170 #define dmat_alignment(dmat) ((dmat)->alignment)
172 #define dmat_boundary(dmat) ((dmat)->boundary)
173 #define dmat_flags(dmat) ((dmat)->flags)
174 #define dmat_highaddr(dmat) ((dmat)->highaddr)
175 #define dmat_lowaddr(dmat) ((dmat)->lowaddr)
176 #define dmat_lockfunc(dmat) ((dmat)->lockfunc)
177 #define dmat_lockfuncarg(dmat) ((dmat)->lockfuncarg)
178 #define dmat_maxsegsz(dmat) ((dmat)->maxsegsz)
179 #define dmat_nsegments(dmat) ((dmat)->nsegments)
211 * operation is not guaranteed for write-back caches in busdma_init()
237 * but there's no RAM in that zone, then we avoid allocating resources to bounce
242 * same value on 32-bit architectures) as their lowaddr constraint, and we can't
269 return (dmat->flags & BUS_DMA_EXCL_BOUNCE); in exclusion_bounce()
279 return (!vm_addr_align_ok(addr, dmat->alignment)); in alignment_bounce()
294 cacheline_bounce(bus_dmamap_t map, bus_addr_t addr, bus_size_t size) in cacheline_bounce() argument
297 if (map->flags & (DMAMAP_DMAMEM_ALLOC | DMAMAP_COHERENT | DMAMAP_MBUF)) in cacheline_bounce()
305 * This is used to quick-check whether we need to do the more expensive work of
306 * checking the DMA page-by-page looking for alignment and exclusion bounces.
309 * matter because we only look at the low-order bits, which are the same in both
317 might_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t addr, in might_bounce() argument
321 KASSERT(map->flags & DMAMAP_DMAMEM_ALLOC || in might_bounce()
322 dmat->alignment <= PAGE_SIZE, in might_bounce()
325 __func__, dmat->alignment)); in might_bounce()
327 return (!(map->flags & DMAMAP_DMAMEM_ALLOC) && in might_bounce()
328 ((dmat->flags & BUS_DMA_EXCL_BOUNCE) || in might_bounce()
330 cacheline_bounce(map, addr, size))); in might_bounce()
341 must_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t paddr, in must_bounce() argument
345 if (cacheline_bounce(map, paddr, size)) in must_bounce()
376 ("dma tag alignment %lu, must be non-zero power of 2", alignment)); in bus_dma_tag_create()
382 /* Filters are no longer supported. */ in bus_dma_tag_create()
394 newtag->alignment = alignment; in bus_dma_tag_create()
395 newtag->boundary = boundary; in bus_dma_tag_create()
396 newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); in bus_dma_tag_create()
397 newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + in bus_dma_tag_create()
398 (PAGE_SIZE - 1); in bus_dma_tag_create()
399 newtag->maxsize = maxsize; in bus_dma_tag_create()
400 newtag->nsegments = nsegments; in bus_dma_tag_create()
401 newtag->maxsegsz = maxsegsz; in bus_dma_tag_create()
402 newtag->flags = flags; in bus_dma_tag_create()
403 newtag->map_count = 0; in bus_dma_tag_create()
405 newtag->lockfunc = lockfunc; in bus_dma_tag_create()
406 newtag->lockfuncarg = lockfuncarg; in bus_dma_tag_create()
408 newtag->lockfunc = _busdma_dflt_lock; in bus_dma_tag_create()
409 newtag->lockfuncarg = NULL; in bus_dma_tag_create()
414 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); in bus_dma_tag_create()
415 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); in bus_dma_tag_create()
416 newtag->alignment = MAX(parent->alignment, newtag->alignment); in bus_dma_tag_create()
417 newtag->flags |= parent->flags & BUS_DMA_COULD_BOUNCE; in bus_dma_tag_create()
418 newtag->flags |= parent->flags & BUS_DMA_COHERENT; in bus_dma_tag_create()
419 if (newtag->boundary == 0) in bus_dma_tag_create()
420 newtag->boundary = parent->boundary; in bus_dma_tag_create()
421 else if (parent->boundary != 0) in bus_dma_tag_create()
422 newtag->boundary = MIN(parent->boundary, in bus_dma_tag_create()
423 newtag->boundary); in bus_dma_tag_create()
426 if (exclusion_bounce_check(newtag->lowaddr, newtag->highaddr)) in bus_dma_tag_create()
427 newtag->flags |= BUS_DMA_EXCL_BOUNCE; in bus_dma_tag_create()
429 newtag->flags |= BUS_DMA_ALIGN_BOUNCE; in bus_dma_tag_create()
432 * Any request can auto-bounce due to cacheline alignment, in addition in bus_dma_tag_create()
449 bz = newtag->bounce_zone; in bus_dma_tag_create()
451 if (ptoa(bz->total_bpages) < maxsize) { in bus_dma_tag_create()
454 pages = atop(maxsize) - bz->total_bpages; in bus_dma_tag_create()
461 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; in bus_dma_tag_create()
463 newtag->bounce_zone = NULL; in bus_dma_tag_create()
472 __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); in bus_dma_tag_create()
483 t->alignment = dmat->alignment; in bus_dma_template_clone()
484 t->boundary = dmat->boundary; in bus_dma_template_clone()
485 t->lowaddr = dmat->lowaddr; in bus_dma_template_clone()
486 t->highaddr = dmat->highaddr; in bus_dma_template_clone()
487 t->maxsize = dmat->maxsize; in bus_dma_template_clone()
488 t->nsegments = dmat->nsegments; in bus_dma_template_clone()
489 t->maxsegsize = dmat->maxsegsz; in bus_dma_template_clone()
490 t->flags = dmat->flags; in bus_dma_template_clone()
491 t->lockfunc = dmat->lockfunc; in bus_dma_template_clone()
492 t->lockfuncarg = dmat->lockfuncarg; in bus_dma_template_clone()
508 if (dmat->map_count != 0) { in bus_dma_tag_destroy()
526 if (dmat->bounce_zone == NULL) in allocate_bz_and_pages()
529 bz = dmat->bounce_zone; in allocate_bz_and_pages()
530 /* Initialize the new map */ in allocate_bz_and_pages()
531 STAILQ_INIT(&(mapp->bpages)); in allocate_bz_and_pages()
534 * Attempt to add pages to our pool on a per-instance basis up to a sane in allocate_bz_and_pages()
536 * alignment and boundary constraints, it could still auto-bounce due to in allocate_bz_and_pages()
539 if (dmat->flags & BUS_DMA_COULD_BOUNCE) in allocate_bz_and_pages()
542 maxpages = 2 * bz->map_count; in allocate_bz_and_pages()
543 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 || in allocate_bz_and_pages()
544 (bz->map_count > 0 && bz->total_bpages < maxpages)) { in allocate_bz_and_pages()
547 pages = atop(roundup2(dmat->maxsize, PAGE_SIZE)) + 1; in allocate_bz_and_pages()
548 pages = MIN(maxpages - bz->total_bpages, pages); in allocate_bz_and_pages()
553 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) in allocate_bz_and_pages()
554 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; in allocate_bz_and_pages()
556 bz->map_count++; in allocate_bz_and_pages()
564 bus_dmamap_t map; in allocate_map() local
567 * Allocate the map. The map structure ends with an embedded in allocate_map()
568 * variable-sized array of sync_list structures. Following that in allocate_map()
571 KASSERT(dmat->nsegments <= MAX_DMA_SEGMENTS, in allocate_map()
573 dmat->nsegments, MAX_DMA_SEGMENTS)); in allocate_map()
574 segsize = sizeof(struct bus_dma_segment) * dmat->nsegments; in allocate_map()
575 mapsize = sizeof(*map) + sizeof(struct sync_list) * dmat->nsegments; in allocate_map()
576 map = malloc(mapsize + segsize, M_BUSDMA, mflags | M_ZERO); in allocate_map()
577 if (map == NULL) { in allocate_map()
581 map->segments = (bus_dma_segment_t *)((uintptr_t)map + mapsize); in allocate_map()
582 STAILQ_INIT(&map->bpages); in allocate_map()
583 return (map); in allocate_map()
593 bus_dmamap_t map; in bus_dmamap_create() local
596 *mapp = map = allocate_map(dmat, M_NOWAIT); in bus_dmamap_create()
597 if (map == NULL) { in bus_dmamap_create()
606 * happen can't be known until mapping time, but we need to pre-allocate in bus_dmamap_create()
609 error = allocate_bz_and_pages(dmat, map); in bus_dmamap_create()
611 free(map, M_BUSDMA); in bus_dmamap_create()
615 if (map->flags & DMAMAP_COHERENT) in bus_dmamap_create()
618 dmat->map_count++; in bus_dmamap_create()
628 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) in bus_dmamap_destroy() argument
631 if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) { in bus_dmamap_destroy()
636 if (dmat->bounce_zone) in bus_dmamap_destroy()
637 dmat->bounce_zone->map_count--; in bus_dmamap_destroy()
638 if (map->flags & DMAMAP_COHERENT) in bus_dmamap_destroy()
641 free(map, M_BUSDMA); in bus_dmamap_destroy()
642 dmat->map_count--; in bus_dmamap_destroy()
658 bus_dmamap_t map; in bus_dmamem_alloc() local
669 *mapp = map = allocate_map(dmat, mflags); in bus_dmamem_alloc()
670 if (map == NULL) { in bus_dmamem_alloc()
672 __func__, dmat, dmat->flags, ENOMEM); in bus_dmamem_alloc()
675 map->flags = DMAMAP_DMAMEM_ALLOC; in bus_dmamem_alloc()
677 /* For coherent memory, set the map flag that disables sync ops. */ in bus_dmamem_alloc()
679 map->flags |= DMAMAP_COHERENT; in bus_dmamem_alloc()
687 ((dmat->flags & BUS_DMA_COHERENT) == 0)) { in bus_dmamem_alloc()
700 bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize); in bus_dmamem_alloc()
704 * - It's small enough to be in the allocator (bufzone not NULL). in bus_dmamem_alloc()
705 * - The alignment constraint isn't larger than the allocation size in bus_dmamem_alloc()
707 * - There's no need to handle lowaddr/highaddr exclusion zones. in bus_dmamem_alloc()
708 * else allocate non-contiguous pages if... in bus_dmamem_alloc()
709 * - The page count that could get allocated doesn't exceed in bus_dmamem_alloc()
712 * - The alignment constraint isn't larger than a page boundary. in bus_dmamem_alloc()
713 * - There are no boundary-crossing constraints. in bus_dmamem_alloc()
717 if (bufzone != NULL && dmat->alignment <= bufzone->size && in bus_dmamem_alloc()
719 *vaddr = uma_zalloc(bufzone->umazone, mflags); in bus_dmamem_alloc()
720 } else if (dmat->nsegments >= in bus_dmamem_alloc()
721 howmany(dmat->maxsize, MIN(dmat->maxsegsz, PAGE_SIZE)) && in bus_dmamem_alloc()
722 dmat->alignment <= PAGE_SIZE && in bus_dmamem_alloc()
723 (dmat->boundary % PAGE_SIZE) == 0) { in bus_dmamem_alloc()
724 *vaddr = kmem_alloc_attr(dmat->maxsize, mflags, 0, in bus_dmamem_alloc()
725 dmat->lowaddr, memattr); in bus_dmamem_alloc()
727 *vaddr = kmem_alloc_contig(dmat->maxsize, mflags, 0, in bus_dmamem_alloc()
728 dmat->lowaddr, dmat->alignment, dmat->boundary, memattr); in bus_dmamem_alloc()
732 __func__, dmat, dmat->flags, ENOMEM); in bus_dmamem_alloc()
733 free(map, M_BUSDMA); in bus_dmamem_alloc()
737 if (map->flags & DMAMAP_COHERENT) in bus_dmamem_alloc()
741 dmat->map_count++; in bus_dmamem_alloc()
744 __func__, dmat, dmat->flags, 0); in bus_dmamem_alloc()
750 * its associated map.
753 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) in bus_dmamem_free() argument
758 if ((map->flags & DMAMAP_COHERENT) && in bus_dmamem_free()
759 ((dmat->flags & BUS_DMA_COHERENT) == 0)) in bus_dmamem_free()
764 bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize); in bus_dmamem_free()
766 if (bufzone != NULL && dmat->alignment <= bufzone->size && in bus_dmamem_free()
768 uma_zfree(bufzone->umazone, vaddr); in bus_dmamem_free()
770 kmem_free(vaddr, dmat->maxsize); in bus_dmamem_free()
772 dmat->map_count--; in bus_dmamem_free()
773 if (map->flags & DMAMAP_COHERENT) in bus_dmamem_free()
777 free(map, M_BUSDMA); in bus_dmamem_free()
778 CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); in bus_dmamem_free()
782 _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, in _bus_dmamap_count_phys() argument
788 if (map->pagesneeded == 0) { in _bus_dmamap_count_phys()
790 " map= %p, pagesneeded= %d", in _bus_dmamap_count_phys()
791 dmat->lowaddr, dmat->boundary, dmat->alignment, in _bus_dmamap_count_phys()
792 map, map->pagesneeded); in _bus_dmamap_count_phys()
800 if (must_bounce(dmat, map, curaddr, sgsize) != 0) { in _bus_dmamap_count_phys()
802 PAGE_SIZE - (curaddr & PAGE_MASK)); in _bus_dmamap_count_phys()
803 map->pagesneeded++; in _bus_dmamap_count_phys()
806 buflen -= sgsize; in _bus_dmamap_count_phys()
808 CTR1(KTR_BUSDMA, "pagesneeded= %d", map->pagesneeded); in _bus_dmamap_count_phys()
813 _bus_dmamap_count_pages(bus_dma_tag_t dmat, pmap_t pmap, bus_dmamap_t map, in _bus_dmamap_count_pages() argument
821 if (map->pagesneeded == 0) { in _bus_dmamap_count_pages()
823 " map= %p, pagesneeded= %d", in _bus_dmamap_count_pages()
824 dmat->lowaddr, dmat->boundary, dmat->alignment, in _bus_dmamap_count_pages()
825 map, map->pagesneeded); in _bus_dmamap_count_pages()
834 sg_len = MIN(vendaddr - vaddr, in _bus_dmamap_count_pages()
835 (PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK))); in _bus_dmamap_count_pages()
840 if (must_bounce(dmat, map, paddr, sg_len) != 0) in _bus_dmamap_count_pages()
841 map->pagesneeded++; in _bus_dmamap_count_pages()
844 CTR1(KTR_BUSDMA, "pagesneeded= %d", map->pagesneeded); in _bus_dmamap_count_pages()
853 _bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, in _bus_dmamap_load_phys() argument
863 segs = map->segments; in _bus_dmamap_load_phys()
870 if (might_bounce(dmat, map, (bus_addr_t)buf, buflen)) { in _bus_dmamap_load_phys()
871 _bus_dmamap_count_phys(dmat, map, buf, buflen, flags); in _bus_dmamap_load_phys()
872 if (map->pagesneeded != 0) { in _bus_dmamap_load_phys()
876 error = _bus_dmamap_reserve_pages(dmat, map, flags); in _bus_dmamap_load_phys()
882 sl = map->slist + map->sync_count - 1; in _bus_dmamap_load_phys()
887 if (map->pagesneeded != 0 && must_bounce(dmat, map, curaddr, in _bus_dmamap_load_phys()
889 sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK)); in _bus_dmamap_load_phys()
890 curaddr = add_bounce_page(dmat, map, 0, curaddr, in _bus_dmamap_load_phys()
892 } else if ((dmat->flags & BUS_DMA_COHERENT) == 0) { in _bus_dmamap_load_phys()
893 if (map->sync_count > 0) in _bus_dmamap_load_phys()
894 sl_end = sl->paddr + sl->datacount; in _bus_dmamap_load_phys()
896 if (map->sync_count == 0 || curaddr != sl_end) { in _bus_dmamap_load_phys()
897 if (++map->sync_count > dmat->nsegments) in _bus_dmamap_load_phys()
900 sl->vaddr = 0; in _bus_dmamap_load_phys()
901 sl->paddr = curaddr; in _bus_dmamap_load_phys()
902 sl->datacount = sgsize; in _bus_dmamap_load_phys()
903 sl->pages = PHYS_TO_VM_PAGE(curaddr); in _bus_dmamap_load_phys()
904 KASSERT(sl->pages != NULL, in _bus_dmamap_load_phys()
908 sl->datacount += sgsize; in _bus_dmamap_load_phys()
910 if (!_bus_dmamap_addsegs(dmat, map, curaddr, sgsize, segs, in _bus_dmamap_load_phys()
914 buflen -= sgsize; in _bus_dmamap_load_phys()
921 bus_dmamap_unload(dmat, map); in _bus_dmamap_load_phys()
928 _bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map, in _bus_dmamap_load_ma() argument
933 return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags, in _bus_dmamap_load_ma()
942 _bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, in _bus_dmamap_load_buffer() argument
955 if (map->flags & DMAMAP_COHERENT) in _bus_dmamap_load_buffer()
957 if (map->flags & DMAMAP_DMAMEM_ALLOC) in _bus_dmamap_load_buffer()
962 segs = map->segments; in _bus_dmamap_load_buffer()
968 map->flags |= DMAMAP_MBUF; in _bus_dmamap_load_buffer()
971 if (might_bounce(dmat, map, (bus_addr_t)buf, buflen)) { in _bus_dmamap_load_buffer()
972 _bus_dmamap_count_pages(dmat, pmap, map, buf, buflen, flags); in _bus_dmamap_load_buffer()
973 if (map->pagesneeded != 0) { in _bus_dmamap_load_buffer()
977 error = _bus_dmamap_reserve_pages(dmat, map, flags); in _bus_dmamap_load_buffer()
983 sl = map->slist + map->sync_count - 1; in _bus_dmamap_load_buffer()
1001 sgsize = MIN(buflen, PAGE_SIZE - (curaddr & PAGE_MASK)); in _bus_dmamap_load_buffer()
1003 if (map->pagesneeded != 0 && must_bounce(dmat, map, curaddr, in _bus_dmamap_load_buffer()
1005 curaddr = add_bounce_page(dmat, map, kvaddr, curaddr, in _bus_dmamap_load_buffer()
1007 } else if ((dmat->flags & BUS_DMA_COHERENT) == 0) { in _bus_dmamap_load_buffer()
1008 if (map->sync_count > 0) { in _bus_dmamap_load_buffer()
1009 sl_pend = sl->paddr + sl->datacount; in _bus_dmamap_load_buffer()
1010 sl_vend = sl->vaddr + sl->datacount; in _bus_dmamap_load_buffer()
1013 if (map->sync_count == 0 || in _bus_dmamap_load_buffer()
1016 if (++map->sync_count > dmat->nsegments) in _bus_dmamap_load_buffer()
1019 sl->vaddr = kvaddr; in _bus_dmamap_load_buffer()
1020 sl->paddr = curaddr; in _bus_dmamap_load_buffer()
1022 sl->pages = NULL; in _bus_dmamap_load_buffer()
1024 sl->pages = PHYS_TO_VM_PAGE(curaddr); in _bus_dmamap_load_buffer()
1025 KASSERT(sl->pages != NULL, in _bus_dmamap_load_buffer()
1030 sl->datacount = sgsize; in _bus_dmamap_load_buffer()
1032 sl->datacount += sgsize; in _bus_dmamap_load_buffer()
1034 if (!_bus_dmamap_addsegs(dmat, map, curaddr, sgsize, segs, in _bus_dmamap_load_buffer()
1038 buflen -= MIN(sgsize, buflen); /* avoid underflow */ in _bus_dmamap_load_buffer()
1046 bus_dmamap_unload(dmat, map); in _bus_dmamap_load_buffer()
1053 _bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, struct memdesc *mem, in _bus_dmamap_waitok() argument
1057 map->mem = *mem; in _bus_dmamap_waitok()
1058 map->dmat = dmat; in _bus_dmamap_waitok()
1059 map->callback = callback; in _bus_dmamap_waitok()
1060 map->callback_arg = callback_arg; in _bus_dmamap_waitok()
1064 _bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, in _bus_dmamap_complete() argument
1069 segs = map->segments; in _bus_dmamap_complete()
1074 * Release the mapping held by map.
1077 bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) in bus_dmamap_unload() argument
1081 if ((bz = dmat->bounce_zone) != NULL) { in bus_dmamap_unload()
1082 free_bounce_pages(dmat, map); in bus_dmamap_unload()
1084 if (map->pagesreserved != 0) { in bus_dmamap_unload()
1086 bz->free_bpages += map->pagesreserved; in bus_dmamap_unload()
1087 bz->reserved_bpages -= map->pagesreserved; in bus_dmamap_unload()
1089 map->pagesreserved = 0; in bus_dmamap_unload()
1091 map->pagesneeded = 0; in bus_dmamap_unload()
1093 map->sync_count = 0; in bus_dmamap_unload()
1094 map->flags &= ~DMAMAP_MBUF; in bus_dmamap_unload()
1124 offset = sl->paddr & PAGE_MASK; in dma_dcache_sync()
1125 m = sl->pages; in dma_dcache_sync()
1126 size = sl->datacount; in dma_dcache_sync()
1127 pa = sl->paddr; in dma_dcache_sync()
1129 for ( ; size != 0; size -= len, pa += len, offset = 0, ++m) { in dma_dcache_sync()
1131 if (sl->vaddr == 0) { in dma_dcache_sync()
1132 len = min(PAGE_SIZE - offset, size); in dma_dcache_sync()
1139 len = sl->datacount; in dma_dcache_sync()
1140 va = sl->vaddr; in dma_dcache_sync()
1151 * will be no cpu writes to the beginning of that line in dma_dcache_sync()
1176 bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) in bus_dmamap_sync() argument
1187 * the same vm map, especially on a POST operation. It's not clear that in bus_dmamap_sync()
1189 * we're able to test direct userland dma, panic on a map mismatch. in bus_dmamap_sync()
1191 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { in bus_dmamap_sync()
1193 "performing bounce", __func__, dmat, dmat->flags, op); in bus_dmamap_sync()
1202 datavaddr = bpage->datavaddr; in bus_dmamap_sync()
1205 bpage->datapage); in bus_dmamap_sync()
1206 datavaddr = tempvaddr | bpage->dataoffs; in bus_dmamap_sync()
1208 bcopy((void *)datavaddr, (void *)bpage->vaddr, in bus_dmamap_sync()
1209 bpage->datacount); in bus_dmamap_sync()
1212 if ((dmat->flags & BUS_DMA_COHERENT) == 0) in bus_dmamap_sync()
1213 dcache_wb_poc(bpage->vaddr, in bus_dmamap_sync()
1214 bpage->busaddr, bpage->datacount); in bus_dmamap_sync()
1217 dmat->bounce_zone->total_bounced++; in bus_dmamap_sync()
1225 * done due to PREWRITE also being set there will be no dirty in bus_dmamap_sync()
1229 * evicted from L1 before we invalidated it, re-dirtying the L2. in bus_dmamap_sync()
1232 bpage = STAILQ_FIRST(&map->bpages); in bus_dmamap_sync()
1234 if ((dmat->flags & BUS_DMA_COHERENT) == 0) in bus_dmamap_sync()
1235 dcache_inv_poc_dma(bpage->vaddr, in bus_dmamap_sync()
1236 bpage->busaddr, bpage->datacount); in bus_dmamap_sync()
1242 * Re-invalidate the caches on a POSTREAD, even though they were in bus_dmamap_sync()
1252 if ((dmat->flags & BUS_DMA_COHERENT) == 0) in bus_dmamap_sync()
1253 dcache_inv_poc(bpage->vaddr, in bus_dmamap_sync()
1254 bpage->busaddr, bpage->datacount); in bus_dmamap_sync()
1256 datavaddr = bpage->datavaddr; in bus_dmamap_sync()
1259 bpage->datapage); in bus_dmamap_sync()
1260 datavaddr = tempvaddr | bpage->dataoffs; in bus_dmamap_sync()
1262 bcopy((void *)bpage->vaddr, (void *)datavaddr, in bus_dmamap_sync()
1263 bpage->datacount); in bus_dmamap_sync()
1268 dmat->bounce_zone->total_bounced++; in bus_dmamap_sync()
1273 * For COHERENT memory no cache maintenance is necessary, but ensure all in bus_dmamap_sync()
1274 * writes have reached memory for the PREWRITE case. No action is in bus_dmamap_sync()
1280 if (map->flags & DMAMAP_COHERENT) { in bus_dmamap_sync()
1283 if ((dmat->flags & BUS_DMA_COHERENT) == 0) in bus_dmamap_sync()
1290 * Cache maintenance for normal (non-COHERENT non-bounce) buffers. All in bus_dmamap_sync()
1293 * that the sequence is inner-to-outer for PREREAD invalidation and in bus_dmamap_sync()
1294 * outer-to-inner for POSTREAD invalidation is not a mistake. in bus_dmamap_sync()
1296 if (map->sync_count != 0) { in bus_dmamap_sync()
1297 sl = &map->slist[0]; in bus_dmamap_sync()
1298 end = &map->slist[map->sync_count]; in bus_dmamap_sync()
1300 "performing sync", __func__, dmat, dmat->flags, op); in bus_dmamap_sync()