Lines Matching full:map
159 bus_dmamap_t map, void *buf, bus_size_t buflen, int flags);
160 static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
294 cacheline_bounce(bus_dmamap_t map, bus_addr_t addr, bus_size_t size) in cacheline_bounce() argument
297 if (map->flags & (DMAMAP_DMAMEM_ALLOC | DMAMAP_COHERENT | DMAMAP_MBUF)) in cacheline_bounce()
317 might_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t addr, in might_bounce() argument
321 KASSERT(map->flags & DMAMAP_DMAMEM_ALLOC || in might_bounce()
327 return (!(map->flags & DMAMAP_DMAMEM_ALLOC) && in might_bounce()
330 cacheline_bounce(map, addr, size))); in might_bounce()
341 must_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t paddr, in must_bounce() argument
345 if (cacheline_bounce(map, paddr, size)) in must_bounce()
530 /* Initialize the new map */ in allocate_bz_and_pages()
564 bus_dmamap_t map; in allocate_map() local
567 * Allocate the map. The map structure ends with an embedded in allocate_map()
575 mapsize = sizeof(*map) + sizeof(struct sync_list) * dmat->nsegments; in allocate_map()
576 map = malloc(mapsize + segsize, M_BUSDMA, mflags | M_ZERO); in allocate_map()
577 if (map == NULL) { in allocate_map()
581 map->segments = (bus_dma_segment_t *)((uintptr_t)map + mapsize); in allocate_map()
582 STAILQ_INIT(&map->bpages); in allocate_map()
583 return (map); in allocate_map()
593 bus_dmamap_t map; in bus_dmamap_create() local
596 *mapp = map = allocate_map(dmat, M_NOWAIT); in bus_dmamap_create()
597 if (map == NULL) { in bus_dmamap_create()
609 error = allocate_bz_and_pages(dmat, map); in bus_dmamap_create()
611 free(map, M_BUSDMA); in bus_dmamap_create()
615 if (map->flags & DMAMAP_COHERENT) in bus_dmamap_create()
628 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) in bus_dmamap_destroy() argument
631 if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) { in bus_dmamap_destroy()
638 if (map->flags & DMAMAP_COHERENT) in bus_dmamap_destroy()
641 free(map, M_BUSDMA); in bus_dmamap_destroy()
658 bus_dmamap_t map; in bus_dmamem_alloc() local
669 *mapp = map = allocate_map(dmat, mflags); in bus_dmamem_alloc()
670 if (map == NULL) { in bus_dmamem_alloc()
675 map->flags = DMAMAP_DMAMEM_ALLOC; in bus_dmamem_alloc()
677 /* For coherent memory, set the map flag that disables sync ops. */ in bus_dmamem_alloc()
679 map->flags |= DMAMAP_COHERENT; in bus_dmamem_alloc()
733 free(map, M_BUSDMA); in bus_dmamem_alloc()
737 if (map->flags & DMAMAP_COHERENT) in bus_dmamem_alloc()
750 * its associated map.
753 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) in bus_dmamem_free() argument
758 if ((map->flags & DMAMAP_COHERENT) && in bus_dmamem_free()
773 if (map->flags & DMAMAP_COHERENT) in bus_dmamem_free()
777 free(map, M_BUSDMA); in bus_dmamem_free()
782 _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, in _bus_dmamap_count_phys() argument
788 if (map->pagesneeded == 0) { in _bus_dmamap_count_phys()
790 " map= %p, pagesneeded= %d", in _bus_dmamap_count_phys()
792 map, map->pagesneeded); in _bus_dmamap_count_phys()
800 if (must_bounce(dmat, map, curaddr, sgsize) != 0) { in _bus_dmamap_count_phys()
803 map->pagesneeded++; in _bus_dmamap_count_phys()
808 CTR1(KTR_BUSDMA, "pagesneeded= %d", map->pagesneeded); in _bus_dmamap_count_phys()
813 _bus_dmamap_count_pages(bus_dma_tag_t dmat, pmap_t pmap, bus_dmamap_t map, in _bus_dmamap_count_pages() argument
821 if (map->pagesneeded == 0) { in _bus_dmamap_count_pages()
823 " map= %p, pagesneeded= %d", in _bus_dmamap_count_pages()
825 map, map->pagesneeded); in _bus_dmamap_count_pages()
840 if (must_bounce(dmat, map, paddr, sg_len) != 0) in _bus_dmamap_count_pages()
841 map->pagesneeded++; in _bus_dmamap_count_pages()
844 CTR1(KTR_BUSDMA, "pagesneeded= %d", map->pagesneeded); in _bus_dmamap_count_pages()
853 _bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, in _bus_dmamap_load_phys() argument
863 segs = map->segments; in _bus_dmamap_load_phys()
870 if (might_bounce(dmat, map, (bus_addr_t)buf, buflen)) { in _bus_dmamap_load_phys()
871 _bus_dmamap_count_phys(dmat, map, buf, buflen, flags); in _bus_dmamap_load_phys()
872 if (map->pagesneeded != 0) { in _bus_dmamap_load_phys()
876 error = _bus_dmamap_reserve_pages(dmat, map, flags); in _bus_dmamap_load_phys()
882 sl = map->slist + map->sync_count - 1; in _bus_dmamap_load_phys()
887 if (map->pagesneeded != 0 && must_bounce(dmat, map, curaddr, in _bus_dmamap_load_phys()
890 curaddr = add_bounce_page(dmat, map, 0, curaddr, in _bus_dmamap_load_phys()
893 if (map->sync_count > 0) in _bus_dmamap_load_phys()
896 if (map->sync_count == 0 || curaddr != sl_end) { in _bus_dmamap_load_phys()
897 if (++map->sync_count > dmat->nsegments) in _bus_dmamap_load_phys()
910 if (!_bus_dmamap_addsegs(dmat, map, curaddr, sgsize, segs, in _bus_dmamap_load_phys()
921 bus_dmamap_unload(dmat, map); in _bus_dmamap_load_phys()
928 _bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map, in _bus_dmamap_load_ma() argument
933 return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags, in _bus_dmamap_load_ma()
942 _bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, in _bus_dmamap_load_buffer() argument
955 if (map->flags & DMAMAP_COHERENT) in _bus_dmamap_load_buffer()
957 if (map->flags & DMAMAP_DMAMEM_ALLOC) in _bus_dmamap_load_buffer()
962 segs = map->segments; in _bus_dmamap_load_buffer()
968 map->flags |= DMAMAP_MBUF; in _bus_dmamap_load_buffer()
971 if (might_bounce(dmat, map, (bus_addr_t)buf, buflen)) { in _bus_dmamap_load_buffer()
972 _bus_dmamap_count_pages(dmat, pmap, map, buf, buflen, flags); in _bus_dmamap_load_buffer()
973 if (map->pagesneeded != 0) { in _bus_dmamap_load_buffer()
977 error = _bus_dmamap_reserve_pages(dmat, map, flags); in _bus_dmamap_load_buffer()
983 sl = map->slist + map->sync_count - 1; in _bus_dmamap_load_buffer()
1003 if (map->pagesneeded != 0 && must_bounce(dmat, map, curaddr, in _bus_dmamap_load_buffer()
1005 curaddr = add_bounce_page(dmat, map, kvaddr, curaddr, in _bus_dmamap_load_buffer()
1008 if (map->sync_count > 0) { in _bus_dmamap_load_buffer()
1013 if (map->sync_count == 0 || in _bus_dmamap_load_buffer()
1016 if (++map->sync_count > dmat->nsegments) in _bus_dmamap_load_buffer()
1034 if (!_bus_dmamap_addsegs(dmat, map, curaddr, sgsize, segs, in _bus_dmamap_load_buffer()
1046 bus_dmamap_unload(dmat, map); in _bus_dmamap_load_buffer()
1053 _bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, struct memdesc *mem, in _bus_dmamap_waitok() argument
1057 map->mem = *mem; in _bus_dmamap_waitok()
1058 map->dmat = dmat; in _bus_dmamap_waitok()
1059 map->callback = callback; in _bus_dmamap_waitok()
1060 map->callback_arg = callback_arg; in _bus_dmamap_waitok()
1064 _bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, in _bus_dmamap_complete() argument
1069 segs = map->segments; in _bus_dmamap_complete()
1074 * Release the mapping held by map.
1077 bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) in bus_dmamap_unload() argument
1082 free_bounce_pages(dmat, map); in bus_dmamap_unload()
1084 if (map->pagesreserved != 0) { in bus_dmamap_unload()
1086 bz->free_bpages += map->pagesreserved; in bus_dmamap_unload()
1087 bz->reserved_bpages -= map->pagesreserved; in bus_dmamap_unload()
1089 map->pagesreserved = 0; in bus_dmamap_unload()
1091 map->pagesneeded = 0; in bus_dmamap_unload()
1093 map->sync_count = 0; in bus_dmamap_unload()
1094 map->flags &= ~DMAMAP_MBUF; in bus_dmamap_unload()
1176 bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) in bus_dmamap_sync() argument
1187 * the same vm map, especially on a POST operation. It's not clear that in bus_dmamap_sync()
1189 * we're able to test direct userland dma, panic on a map mismatch. in bus_dmamap_sync()
1191 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { in bus_dmamap_sync()
1232 bpage = STAILQ_FIRST(&map->bpages); in bus_dmamap_sync()
1280 if (map->flags & DMAMAP_COHERENT) { in bus_dmamap_sync()
1296 if (map->sync_count != 0) { in bus_dmamap_sync()
1297 sl = &map->slist[0]; in bus_dmamap_sync()
1298 end = &map->slist[map->sync_count]; in bus_dmamap_sync()