1 /* $OpenBSD: if_le_ioasic.c,v 1.17 2014/12/22 02:28:52 tedu Exp $ */ 2 /* $NetBSD: if_le_ioasic.c,v 1.18 2001/11/13 06:26:10 lukem Exp $ */ 3 4 /* 5 * Copyright (c) 1996 Carnegie-Mellon University. 6 * All rights reserved. 7 * 8 * Author: Chris G. Demetriou 9 * 10 * Permission to use, copy, modify and distribute this software and 11 * its documentation is hereby granted, provided that both the copyright 12 * notice and this permission notice appear in all copies of the 13 * software, derivative works or modified versions, and any portions 14 * thereof, and that both notices appear in supporting documentation. 15 * 16 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 17 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 18 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 19 * 20 * Carnegie Mellon requests users of this software to return to 21 * 22 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 23 * School of Computer Science 24 * Carnegie Mellon University 25 * Pittsburgh PA 15213-3890 26 * 27 * any improvements or extensions that they make and grant Carnegie the 28 * rights to redistribute these changes. 29 */ 30 31 /* 32 * LANCE on DEC IOCTL ASIC. 33 */ 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/mbuf.h> 38 #include <sys/syslog.h> 39 #include <sys/socket.h> 40 #include <sys/device.h> 41 42 #include <net/if.h> 43 #include <net/if_media.h> 44 45 #include <netinet/in.h> 46 #include <netinet/if_ether.h> 47 48 #include <dev/ic/lancereg.h> 49 #include <dev/ic/lancevar.h> 50 #include <dev/ic/am7990reg.h> 51 #include <dev/ic/am7990var.h> 52 53 #include <dev/tc/if_levar.h> 54 #include <dev/tc/tcvar.h> 55 #include <dev/tc/ioasicreg.h> 56 #include <dev/tc/ioasicvar.h> 57 58 struct le_ioasic_softc { 59 struct am7990_softc sc_am7990; /* glue to MI code */ 60 struct lereg1 *sc_r1; /* LANCE registers */ 61 /* XXX must match with le_softc of if_levar.h XXX */ 62 63 bus_dma_tag_t sc_dmat; /* bus dma tag */ 64 bus_dmamap_t sc_dmamap; /* bus dmamap */ 65 }; 66 67 int le_ioasic_match(struct device *, void *, void *); 68 void le_ioasic_attach(struct device *, struct device *, void *); 69 70 struct cfattach le_ioasic_ca = { 71 sizeof(struct le_softc), le_ioasic_match, le_ioasic_attach 72 }; 73 74 void le_ioasic_copytobuf_gap2(struct lance_softc *, void *, int, int); 75 void le_ioasic_copyfrombuf_gap2(struct lance_softc *, void *, int, int); 76 void le_ioasic_copytobuf_gap16(struct lance_softc *, void *, int, int); 77 void le_ioasic_copyfrombuf_gap16(struct lance_softc *, void *, int, int); 78 void le_ioasic_zerobuf_gap16(struct lance_softc *, int, int); 79 80 int 81 le_ioasic_match(struct device *parent, void *match, void *aux) 82 { 83 struct ioasicdev_attach_args *d = aux; 84 85 if (strncmp("PMAD-BA ", d->iada_modname, TC_ROM_LLEN) != 0) 86 return 0; 87 88 return 1; 89 } 90 91 /* IOASIC LANCE DMA needs 128KB boundary aligned 128KB chunk */ 92 #define LE_IOASIC_MEMSIZE (128*1024) 93 #define LE_IOASIC_MEMALIGN (128*1024) 94 95 void 96 le_ioasic_attach(struct device *parent, struct device *self, void *aux) 97 { 98 struct le_ioasic_softc *sc = (void *)self; 99 struct ioasicdev_attach_args *d = aux; 100 struct lance_softc *le = &sc->sc_am7990.lsc; 101 bus_space_tag_t ioasic_bst; 102 bus_space_handle_t ioasic_bsh; 103 bus_dma_tag_t dmat; 104 bus_dma_segment_t seg; 105 tc_addr_t tca; 106 u_int32_t ssr; 107 int rseg; 108 caddr_t le_iomem; 109 110 ioasic_bst = ((struct ioasic_softc *)parent)->sc_bst; 111 ioasic_bsh = ((struct ioasic_softc *)parent)->sc_bsh; 112 dmat = sc->sc_dmat = ((struct ioasic_softc *)parent)->sc_dmat; 113 /* 114 * Allocate a DMA area for the chip. 115 */ 116 if (bus_dmamem_alloc(dmat, LE_IOASIC_MEMSIZE, LE_IOASIC_MEMALIGN, 117 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 118 printf("can't allocate DMA area for LANCE\n"); 119 return; 120 } 121 if (bus_dmamem_map(dmat, &seg, rseg, LE_IOASIC_MEMSIZE, 122 &le_iomem, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) { 123 printf("can't map DMA area for LANCE\n"); 124 bus_dmamem_free(dmat, &seg, rseg); 125 return; 126 } 127 /* 128 * Create and load the DMA map for the DMA area. 129 */ 130 if (bus_dmamap_create(dmat, LE_IOASIC_MEMSIZE, 1, 131 LE_IOASIC_MEMSIZE, 0, BUS_DMA_NOWAIT, &sc->sc_dmamap)) { 132 printf("can't create DMA map\n"); 133 goto bad; 134 } 135 if (bus_dmamap_load(dmat, sc->sc_dmamap, 136 le_iomem, LE_IOASIC_MEMSIZE, NULL, BUS_DMA_NOWAIT)) { 137 printf("can't load DMA map\n"); 138 goto bad; 139 } 140 /* 141 * Bind 128KB buffer with IOASIC DMA. 142 */ 143 tca = IOASIC_DMA_ADDR(sc->sc_dmamap->dm_segs[0].ds_addr); 144 bus_space_write_4(ioasic_bst, ioasic_bsh, IOASIC_LANCE_DMAPTR, tca); 145 ssr = bus_space_read_4(ioasic_bst, ioasic_bsh, IOASIC_CSR); 146 ssr |= IOASIC_CSR_DMAEN_LANCE; 147 bus_space_write_4(ioasic_bst, ioasic_bsh, IOASIC_CSR, ssr); 148 149 sc->sc_r1 = (struct lereg1 *) 150 TC_DENSE_TO_SPARSE(TC_PHYS_TO_UNCACHED(d->iada_addr)); 151 le->sc_mem = (void *)TC_PHYS_TO_UNCACHED(le_iomem); 152 le->sc_copytodesc = le_ioasic_copytobuf_gap2; 153 le->sc_copyfromdesc = le_ioasic_copyfrombuf_gap2; 154 le->sc_copytobuf = le_ioasic_copytobuf_gap16; 155 le->sc_copyfrombuf = le_ioasic_copyfrombuf_gap16; 156 le->sc_zerobuf = le_ioasic_zerobuf_gap16; 157 158 dec_le_common_attach(&sc->sc_am7990, 159 (u_char *)((struct ioasic_softc *)parent)->sc_base 160 + IOASIC_SLOT_2_START); 161 162 ioasic_intr_establish(parent, d->iada_cookie, IPL_NET, 163 am7990_intr, sc, self->dv_xname); 164 return; 165 166 bad: 167 bus_dmamem_unmap(dmat, le_iomem, LE_IOASIC_MEMSIZE); 168 bus_dmamem_free(dmat, &seg, rseg); 169 } 170 171 /* 172 * Special memory access functions needed by ioasic-attached LANCE 173 * chips. 174 */ 175 176 /* 177 * gap2: two bytes of data followed by two bytes of pad. 178 * 179 * Buffers must be 4-byte aligned. The code doesn't worry about 180 * doing an extra byte. 181 */ 182 183 void 184 le_ioasic_copytobuf_gap2(struct lance_softc *sc, void *fromv, 185 int boff, int len) 186 { 187 volatile caddr_t buf = sc->sc_mem; 188 caddr_t from = fromv; 189 volatile u_int16_t *bptr; 190 191 if (boff & 0x1) { 192 /* handle unaligned first byte */ 193 bptr = ((volatile u_int16_t *)buf) + (boff - 1); 194 *bptr = (*from++ << 8) | (*bptr & 0xff); 195 bptr += 2; 196 len--; 197 } else 198 bptr = ((volatile u_int16_t *)buf) + boff; 199 while (len > 1) { 200 *bptr = (from[1] << 8) | (from[0] & 0xff); 201 bptr += 2; 202 from += 2; 203 len -= 2; 204 } 205 if (len == 1) 206 *bptr = (u_int16_t)*from; 207 } 208 209 void 210 le_ioasic_copyfrombuf_gap2(struct lance_softc *sc, void *tov, 211 int boff, int len) 212 { 213 volatile caddr_t buf = sc->sc_mem; 214 caddr_t to = tov; 215 volatile u_int16_t *bptr; 216 u_int16_t tmp; 217 218 if (boff & 0x1) { 219 /* handle unaligned first byte */ 220 bptr = ((volatile u_int16_t *)buf) + (boff - 1); 221 *to++ = (*bptr >> 8) & 0xff; 222 bptr += 2; 223 len--; 224 } else 225 bptr = ((volatile u_int16_t *)buf) + boff; 226 while (len > 1) { 227 tmp = *bptr; 228 *to++ = tmp & 0xff; 229 *to++ = (tmp >> 8) & 0xff; 230 bptr += 2; 231 len -= 2; 232 } 233 if (len == 1) 234 *to = *bptr & 0xff; 235 } 236 237 /* 238 * gap16: 16 bytes of data followed by 16 bytes of pad. 239 * 240 * Buffers must be 32-byte aligned. 241 */ 242 243 void 244 le_ioasic_copytobuf_gap16(struct lance_softc *sc, void *fromv, 245 int boff, int len) 246 { 247 volatile caddr_t buf = sc->sc_mem; 248 caddr_t from = fromv; 249 caddr_t bptr; 250 251 bptr = buf + ((boff << 1) & ~0x1f); 252 boff &= 0xf; 253 254 /* 255 * Dispose of boff so destination of subsequent copies is 256 * 16-byte aligned. 257 */ 258 if (boff) { 259 int xfer; 260 xfer = min(len, 16 - boff); 261 bcopy(from, bptr + boff, xfer); 262 from += xfer; 263 bptr += 32; 264 len -= xfer; 265 } 266 267 /* Destination of copies is now 16-byte aligned. */ 268 if (len >= 16) 269 switch ((u_long)from & (sizeof(u_int32_t) -1)) { 270 case 2: 271 /* Ethernet headers make this the dominant case. */ 272 do { 273 u_int32_t *dst = (u_int32_t*)bptr; 274 u_int16_t t0; 275 u_int32_t t1, t2, t3, t4; 276 277 /* read from odd-16-bit-aligned, cached src */ 278 t0 = *(u_int16_t*)from; 279 t1 = *(u_int32_t*)(from+2); 280 t2 = *(u_int32_t*)(from+6); 281 t3 = *(u_int32_t*)(from+10); 282 t4 = *(u_int16_t*)(from+14); 283 284 /* DMA buffer is uncached on mips */ 285 dst[0] = t0 | (t1 << 16); 286 dst[1] = (t1 >> 16) | (t2 << 16); 287 dst[2] = (t2 >> 16) | (t3 << 16); 288 dst[3] = (t3 >> 16) | (t4 << 16); 289 290 from += 16; 291 bptr += 32; 292 len -= 16; 293 } while (len >= 16); 294 break; 295 296 case 0: 297 do { 298 u_int32_t *src = (u_int32_t*)from; 299 u_int32_t *dst = (u_int32_t*)bptr; 300 u_int32_t t0, t1, t2, t3; 301 302 t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3]; 303 dst[0] = t0; dst[1] = t1; dst[2] = t2; dst[3] = t3; 304 305 from += 16; 306 bptr += 32; 307 len -= 16; 308 } while (len >= 16); 309 break; 310 311 default: 312 /* Does odd-aligned case ever happen? */ 313 do { 314 bcopy(from, bptr, 16); 315 from += 16; 316 bptr += 32; 317 len -= 16; 318 } while (len >= 16); 319 break; 320 } 321 if (len) 322 bcopy(from, bptr, len); 323 } 324 325 void 326 le_ioasic_copyfrombuf_gap16(struct lance_softc *sc, void *tov, 327 int boff, int len) 328 { 329 volatile caddr_t buf = sc->sc_mem; 330 caddr_t to = tov; 331 caddr_t bptr; 332 333 bptr = buf + ((boff << 1) & ~0x1f); 334 boff &= 0xf; 335 336 /* Dispose of boff. source of copy is subsequently 16-byte aligned. */ 337 if (boff) { 338 int xfer; 339 xfer = min(len, 16 - boff); 340 bcopy(bptr+boff, to, xfer); 341 to += xfer; 342 bptr += 32; 343 len -= xfer; 344 } 345 if (len >= 16) 346 switch ((u_long)to & (sizeof(u_int32_t) -1)) { 347 case 2: 348 /* 349 * to is aligned to an odd 16-bit boundary. Ethernet headers 350 * make this the dominant case (98% or more). 351 */ 352 do { 353 u_int32_t *src = (u_int32_t*)bptr; 354 u_int32_t t0, t1, t2, t3; 355 356 /* read from uncached aligned DMA buf */ 357 t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3]; 358 359 /* write to odd-16-bit-word aligned dst */ 360 *(u_int16_t *) (to+0) = (u_short) t0; 361 *(u_int32_t *) (to+2) = (t0 >> 16) | (t1 << 16); 362 *(u_int32_t *) (to+6) = (t1 >> 16) | (t2 << 16); 363 *(u_int32_t *) (to+10) = (t2 >> 16) | (t3 << 16); 364 *(u_int16_t *) (to+14) = (t3 >> 16); 365 bptr += 32; 366 to += 16; 367 len -= 16; 368 } while (len > 16); 369 break; 370 case 0: 371 /* 32-bit aligned aligned copy. Rare. */ 372 do { 373 u_int32_t *src = (u_int32_t*)bptr; 374 u_int32_t *dst = (u_int32_t*)to; 375 u_int32_t t0, t1, t2, t3; 376 377 t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3]; 378 dst[0] = t0; dst[1] = t1; dst[2] = t2; dst[3] = t3; 379 to += 16; 380 bptr += 32; 381 len -= 16; 382 } while (len > 16); 383 break; 384 385 /* XXX Does odd-byte-aligned case ever happen? */ 386 default: 387 do { 388 bcopy(bptr, to, 16); 389 to += 16; 390 bptr += 32; 391 len -= 16; 392 } while (len > 16); 393 break; 394 } 395 if (len) 396 bcopy(bptr, to, len); 397 } 398 399 void 400 le_ioasic_zerobuf_gap16(struct lance_softc *sc, int boff, int len) 401 { 402 volatile caddr_t buf = sc->sc_mem; 403 caddr_t bptr; 404 int xfer; 405 406 bptr = buf + ((boff << 1) & ~0x1f); 407 boff &= 0xf; 408 xfer = min(len, 16 - boff); 409 while (len > 0) { 410 bzero(bptr + boff, xfer); 411 bptr += 32; 412 boff = 0; 413 len -= xfer; 414 xfer = min(len, 16); 415 } 416 } 417