1 /* $NetBSD: dvma.c,v 1.13 1999/07/08 18:11:01 thorpej Exp $ */ 2 3 /*- 4 * Copyright (c) 1996 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Gordon W. Ross and Jeremy Cooper. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * DVMA (Direct Virtual Memory Access - like DMA) 41 * 42 * In the Sun3 architecture, memory cycles initiated by secondary bus 43 * masters (DVMA devices) passed through the same MMU that governed CPU 44 * accesses. All DVMA devices were wired in such a way so that an offset 45 * was added to the addresses they issued, causing them to access virtual 46 * memory starting at address 0x0FF00000 - the offset. The task of 47 * enabling a DVMA device to access main memory only involved creating 48 * valid mapping in the MMU that translated these high addresses into the 49 * appropriate physical addresses. 50 * 51 * The Sun3x presents a challenge to programming DVMA because the MMU is no 52 * longer shared by both secondary bus masters and the CPU. The MC68030's 53 * built-in MMU serves only to manage virtual memory accesses initiated by 54 * the CPU. Secondary bus master bus accesses pass through a different MMU, 55 * aptly named the 'I/O Mapper'. To enable every device driver that uses 56 * DVMA to understand that these two address spaces are disconnected would 57 * require a tremendous amount of code re-writing. To avoid this, we will 58 * ensure that the I/O Mapper and the MC68030 MMU are programmed together, 59 * so that DVMA mappings are consistent in both the CPU virtual address 60 * space and secondary bus master address space - creating an environment 61 * just like the Sun3 system. 62 * 63 * The maximum address space that any DVMA device in the Sun3x architecture 64 * is capable of addressing is 24 bits wide (16 Megabytes.) We can alias 65 * all of the mappings that exist in the I/O mapper by duplicating them in 66 * a specially reserved section of the CPU's virtual address space, 16 67 * Megabytes in size. Whenever a DVMA buffer is allocated, the allocation 68 * code will enter in a mapping both in the MC68030 MMU page tables and the 69 * I/O mapper. 70 * 71 * The address returned by the allocation routine is a virtual address that 72 * the requesting driver must use to access the buffer. It is up to the 73 * device driver to convert this virtual address into the appropriate slave 74 * address that its device should issue to access the buffer. (There will be 75 * routines that assist the driver in doing so.) 76 */ 77 78 #include <sys/param.h> 79 #include <sys/systm.h> 80 #include <sys/device.h> 81 #include <sys/proc.h> 82 #include <sys/malloc.h> 83 #include <sys/map.h> 84 #include <sys/buf.h> 85 #include <sys/vnode.h> 86 #include <sys/user.h> 87 #include <sys/core.h> 88 #include <sys/exec.h> 89 90 #include <vm/vm.h> 91 #include <vm/vm_kern.h> 92 #include <vm/vm_map.h> 93 94 #include <uvm/uvm_extern.h> 95 96 #include <machine/autoconf.h> 97 #include <machine/cpu.h> 98 #include <machine/dvma.h> 99 #include <machine/pmap.h> 100 101 #include <sun3/sun3/machdep.h> 102 103 #include <sun3/sun3x/enable.h> 104 #include <sun3/sun3x/iommu.h> 105 106 /* 107 * Use a resource map to manage DVMA scratch-memory pages. 108 * Note: SunOS says last three pages are reserved (PROM?) 109 * Note: need a separate map (sub-map?) for last 1MB for 110 * use by VME slave interface. 111 */ 112 113 /* Number of slots in dvmamap. */ 114 int dvma_max_segs = btoc(DVMA_MAP_SIZE); 115 struct map *dvmamap; 116 117 void 118 dvma_init() 119 { 120 121 /* 122 * Create the resource map for DVMA pages. 123 */ 124 dvmamap = malloc((sizeof(struct map) * dvma_max_segs), 125 M_DEVBUF, M_WAITOK); 126 127 rminit(dvmamap, btoc(DVMA_MAP_AVAIL), btoc(DVMA_MAP_BASE), 128 "dvmamap", dvma_max_segs); 129 130 /* 131 * Enable DVMA in the System Enable register. 132 * Note: This is only necessary for VME slave accesses. 133 * On-board devices are always capable of DVMA. 134 */ 135 *enable_reg |= ENA_SDVMA; 136 } 137 138 139 /* 140 * Given a DVMA address, return the physical address that 141 * would be used by some OTHER bus-master besides the CPU. 142 * (Examples: on-board ie/le, VME xy board). 143 */ 144 u_long 145 dvma_kvtopa(kva, bustype) 146 void * kva; 147 int bustype; 148 { 149 u_long addr, mask; 150 151 addr = (u_long)kva; 152 if ((addr & DVMA_MAP_BASE) != DVMA_MAP_BASE) 153 panic("dvma_kvtopa: bad dmva addr=0x%x\n", addr); 154 155 switch (bustype) { 156 case BUS_OBIO: 157 case BUS_OBMEM: 158 mask = DVMA_OBIO_SLAVE_MASK; 159 break; 160 default: /* VME bus device. */ 161 mask = DVMA_VME_SLAVE_MASK; 162 break; 163 } 164 165 return(addr & mask); 166 } 167 168 169 /* 170 * Map a range [va, va+len] of wired virtual addresses in the given map 171 * to a kernel address in DVMA space. 172 */ 173 void * 174 dvma_mapin(kmem_va, len, canwait) 175 void * kmem_va; 176 int len, canwait; 177 { 178 void * dvma_addr; 179 vm_offset_t kva, tva; 180 register int npf, s; 181 paddr_t pa; 182 long off, pn; 183 boolean_t rv; 184 185 kva = (u_long)kmem_va; 186 #ifdef DIAGNOSTIC 187 /* 188 * Addresses below VM_MIN_KERNEL_ADDRESS are not part of the kernel 189 * map and should not participate in DVMA. 190 */ 191 if (kva < VM_MIN_KERNEL_ADDRESS) 192 panic("dvma_mapin: bad kva"); 193 #endif 194 195 /* 196 * Calculate the offset of the data buffer from a page boundary. 197 */ 198 off = (int)kva & PGOFSET; 199 kva -= off; /* Truncate starting address to nearest page. */ 200 len = round_page(len + off); /* Round the buffer length to pages. */ 201 npf = btoc(len); /* Determine the number of pages to be mapped. */ 202 203 s = splimp(); 204 for (;;) { 205 /* 206 * Try to allocate DVMA space of the appropriate size 207 * in which to do a transfer. 208 */ 209 pn = rmalloc(dvmamap, npf); 210 211 if (pn != 0) 212 break; 213 if (canwait) { 214 (void)tsleep(dvmamap, PRIBIO+1, "physio", 0); 215 continue; 216 } 217 splx(s); 218 return NULL; 219 } 220 splx(s); 221 222 223 /* 224 * Tva is the starting page to which the data buffer will be double 225 * mapped. Dvma_addr is the starting address of the buffer within 226 * that page and is the return value of the function. 227 */ 228 tva = ctob(pn); 229 dvma_addr = (void *) (tva + off); 230 231 for (;npf--; kva += NBPG, tva += NBPG) { 232 /* 233 * Retrieve the physical address of each page in the buffer 234 * and enter mappings into the I/O MMU so they may be seen 235 * by external bus masters and into the special DVMA space 236 * in the MC68030 MMU so they may be seen by the CPU. 237 */ 238 rv = pmap_extract(pmap_kernel(), kva, &pa); 239 #ifdef DEBUG 240 if (rv == FALSE) 241 panic("dvma_mapin: null page frame"); 242 #endif DEBUG 243 244 iommu_enter((tva & IOMMU_VA_MASK), pa); 245 pmap_enter(pmap_kernel(), tva, pa | PMAP_NC, 246 VM_PROT_READ|VM_PROT_WRITE, 1, 0); 247 } 248 249 return (dvma_addr); 250 } 251 252 /* 253 * Remove double map of `va' in DVMA space at `kva'. 254 * 255 * TODO - This function might be the perfect place to handle the 256 * synchronization between the DVMA cache and central RAM 257 * on the 3/470. 258 */ 259 void 260 dvma_mapout(dvma_addr, len) 261 void * dvma_addr; 262 int len; 263 { 264 u_long kva; 265 int s, off; 266 267 kva = (u_long)dvma_addr; 268 off = (int)kva & PGOFSET; 269 kva -= off; 270 len = round_page(len + off); 271 272 iommu_remove((kva & IOMMU_VA_MASK), len); 273 274 /* 275 * XXX - don't call pmap_remove() with DVMA space yet. 276 * XXX It cannot (currently) handle the removal 277 * XXX of address ranges which do not participate in the 278 * XXX PV system by virtue of their _virtual_ addresses. 279 * XXX DVMA is one of these special address spaces. 280 */ 281 #ifdef DVMA_ON_PVLIST 282 pmap_remove(pmap_kernel(), kva, kva + len); 283 #endif /* DVMA_ON_PVLIST */ 284 285 s = splimp(); 286 rmfree(dvmamap, btoc(len), btoc(kva)); 287 wakeup(dvmamap); 288 splx(s); 289 } 290 291 /* 292 * Allocate actual memory pages in DVMA space. 293 * (For sun3 compatibility - the ie driver.) 294 */ 295 void * 296 dvma_malloc(bytes) 297 size_t bytes; 298 { 299 void *new_mem, *dvma_mem; 300 vm_size_t new_size; 301 302 if (!bytes) 303 return NULL; 304 new_size = m68k_round_page(bytes); 305 new_mem = (void*)uvm_km_alloc(kernel_map, new_size); 306 if (!new_mem) 307 return NULL; 308 dvma_mem = dvma_mapin(new_mem, new_size, 1); 309 return (dvma_mem); 310 } 311 312 /* 313 * Free pages from dvma_malloc() 314 */ 315 void 316 dvma_free(addr, size) 317 void *addr; 318 size_t size; 319 { 320 vm_size_t sz = m68k_round_page(size); 321 322 dvma_mapout(addr, sz); 323 /* XXX: need kmem address to free it... 324 Oh well, we never call this anyway. */ 325 } 326