1 /* $OpenBSD: if_le_ioasic.c,v 1.16 2013/09/24 20:11:05 miod Exp $ */ 2 /* $NetBSD: if_le_ioasic.c,v 1.18 2001/11/13 06:26:10 lukem Exp $ */ 3 4 /* 5 * Copyright (c) 1996 Carnegie-Mellon University. 6 * All rights reserved. 7 * 8 * Author: Chris G. Demetriou 9 * 10 * Permission to use, copy, modify and distribute this software and 11 * its documentation is hereby granted, provided that both the copyright 12 * notice and this permission notice appear in all copies of the 13 * software, derivative works or modified versions, and any portions 14 * thereof, and that both notices appear in supporting documentation. 15 * 16 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 17 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 18 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 19 * 20 * Carnegie Mellon requests users of this software to return to 21 * 22 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 23 * School of Computer Science 24 * Carnegie Mellon University 25 * Pittsburgh PA 15213-3890 26 * 27 * any improvements or extensions that they make and grant Carnegie the 28 * rights to redistribute these changes. 29 */ 30 31 /* 32 * LANCE on DEC IOCTL ASIC. 33 */ 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/mbuf.h> 38 #include <sys/syslog.h> 39 #include <sys/socket.h> 40 #include <sys/device.h> 41 42 #include <net/if.h> 43 #include <net/if_media.h> 44 45 #ifdef INET 46 #include <netinet/in.h> 47 #include <netinet/if_ether.h> 48 #endif 49 50 #include <dev/ic/lancereg.h> 51 #include <dev/ic/lancevar.h> 52 #include <dev/ic/am7990reg.h> 53 #include <dev/ic/am7990var.h> 54 55 #include <dev/tc/if_levar.h> 56 #include <dev/tc/tcvar.h> 57 #include <dev/tc/ioasicreg.h> 58 #include <dev/tc/ioasicvar.h> 59 60 struct le_ioasic_softc { 61 struct am7990_softc sc_am7990; /* glue to MI code */ 62 struct lereg1 *sc_r1; /* LANCE registers */ 63 /* XXX must match with le_softc of if_levar.h XXX */ 64 65 bus_dma_tag_t sc_dmat; /* bus dma tag */ 66 bus_dmamap_t sc_dmamap; /* bus dmamap */ 67 }; 68 69 int le_ioasic_match(struct device *, void *, void *); 70 void le_ioasic_attach(struct device *, struct device *, void *); 71 72 struct cfattach le_ioasic_ca = { 73 sizeof(struct le_softc), le_ioasic_match, le_ioasic_attach 74 }; 75 76 void le_ioasic_copytobuf_gap2(struct lance_softc *, void *, int, int); 77 void le_ioasic_copyfrombuf_gap2(struct lance_softc *, void *, int, int); 78 void le_ioasic_copytobuf_gap16(struct lance_softc *, void *, int, int); 79 void le_ioasic_copyfrombuf_gap16(struct lance_softc *, void *, int, int); 80 void le_ioasic_zerobuf_gap16(struct lance_softc *, int, int); 81 82 int 83 le_ioasic_match(struct device *parent, void *match, void *aux) 84 { 85 struct ioasicdev_attach_args *d = aux; 86 87 if (strncmp("PMAD-BA ", d->iada_modname, TC_ROM_LLEN) != 0) 88 return 0; 89 90 return 1; 91 } 92 93 /* IOASIC LANCE DMA needs 128KB boundary aligned 128KB chunk */ 94 #define LE_IOASIC_MEMSIZE (128*1024) 95 #define LE_IOASIC_MEMALIGN (128*1024) 96 97 void 98 le_ioasic_attach(struct device *parent, struct device *self, void *aux) 99 { 100 struct le_ioasic_softc *sc = (void *)self; 101 struct ioasicdev_attach_args *d = aux; 102 struct lance_softc *le = &sc->sc_am7990.lsc; 103 bus_space_tag_t ioasic_bst; 104 bus_space_handle_t ioasic_bsh; 105 bus_dma_tag_t dmat; 106 bus_dma_segment_t seg; 107 tc_addr_t tca; 108 u_int32_t ssr; 109 int rseg; 110 caddr_t le_iomem; 111 112 ioasic_bst = ((struct ioasic_softc *)parent)->sc_bst; 113 ioasic_bsh = ((struct ioasic_softc *)parent)->sc_bsh; 114 dmat = sc->sc_dmat = ((struct ioasic_softc *)parent)->sc_dmat; 115 /* 116 * Allocate a DMA area for the chip. 117 */ 118 if (bus_dmamem_alloc(dmat, LE_IOASIC_MEMSIZE, LE_IOASIC_MEMALIGN, 119 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 120 printf("can't allocate DMA area for LANCE\n"); 121 return; 122 } 123 if (bus_dmamem_map(dmat, &seg, rseg, LE_IOASIC_MEMSIZE, 124 &le_iomem, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) { 125 printf("can't map DMA area for LANCE\n"); 126 bus_dmamem_free(dmat, &seg, rseg); 127 return; 128 } 129 /* 130 * Create and load the DMA map for the DMA area. 131 */ 132 if (bus_dmamap_create(dmat, LE_IOASIC_MEMSIZE, 1, 133 LE_IOASIC_MEMSIZE, 0, BUS_DMA_NOWAIT, &sc->sc_dmamap)) { 134 printf("can't create DMA map\n"); 135 goto bad; 136 } 137 if (bus_dmamap_load(dmat, sc->sc_dmamap, 138 le_iomem, LE_IOASIC_MEMSIZE, NULL, BUS_DMA_NOWAIT)) { 139 printf("can't load DMA map\n"); 140 goto bad; 141 } 142 /* 143 * Bind 128KB buffer with IOASIC DMA. 144 */ 145 tca = IOASIC_DMA_ADDR(sc->sc_dmamap->dm_segs[0].ds_addr); 146 bus_space_write_4(ioasic_bst, ioasic_bsh, IOASIC_LANCE_DMAPTR, tca); 147 ssr = bus_space_read_4(ioasic_bst, ioasic_bsh, IOASIC_CSR); 148 ssr |= IOASIC_CSR_DMAEN_LANCE; 149 bus_space_write_4(ioasic_bst, ioasic_bsh, IOASIC_CSR, ssr); 150 151 sc->sc_r1 = (struct lereg1 *) 152 TC_DENSE_TO_SPARSE(TC_PHYS_TO_UNCACHED(d->iada_addr)); 153 le->sc_mem = (void *)TC_PHYS_TO_UNCACHED(le_iomem); 154 le->sc_copytodesc = le_ioasic_copytobuf_gap2; 155 le->sc_copyfromdesc = le_ioasic_copyfrombuf_gap2; 156 le->sc_copytobuf = le_ioasic_copytobuf_gap16; 157 le->sc_copyfrombuf = le_ioasic_copyfrombuf_gap16; 158 le->sc_zerobuf = le_ioasic_zerobuf_gap16; 159 160 dec_le_common_attach(&sc->sc_am7990, 161 (u_char *)((struct ioasic_softc *)parent)->sc_base 162 + IOASIC_SLOT_2_START); 163 164 ioasic_intr_establish(parent, d->iada_cookie, IPL_NET, 165 am7990_intr, sc, self->dv_xname); 166 return; 167 168 bad: 169 bus_dmamem_unmap(dmat, le_iomem, LE_IOASIC_MEMSIZE); 170 bus_dmamem_free(dmat, &seg, rseg); 171 } 172 173 /* 174 * Special memory access functions needed by ioasic-attached LANCE 175 * chips. 176 */ 177 178 /* 179 * gap2: two bytes of data followed by two bytes of pad. 180 * 181 * Buffers must be 4-byte aligned. The code doesn't worry about 182 * doing an extra byte. 183 */ 184 185 void 186 le_ioasic_copytobuf_gap2(struct lance_softc *sc, void *fromv, 187 int boff, int len) 188 { 189 volatile caddr_t buf = sc->sc_mem; 190 caddr_t from = fromv; 191 volatile u_int16_t *bptr; 192 193 if (boff & 0x1) { 194 /* handle unaligned first byte */ 195 bptr = ((volatile u_int16_t *)buf) + (boff - 1); 196 *bptr = (*from++ << 8) | (*bptr & 0xff); 197 bptr += 2; 198 len--; 199 } else 200 bptr = ((volatile u_int16_t *)buf) + boff; 201 while (len > 1) { 202 *bptr = (from[1] << 8) | (from[0] & 0xff); 203 bptr += 2; 204 from += 2; 205 len -= 2; 206 } 207 if (len == 1) 208 *bptr = (u_int16_t)*from; 209 } 210 211 void 212 le_ioasic_copyfrombuf_gap2(struct lance_softc *sc, void *tov, 213 int boff, int len) 214 { 215 volatile caddr_t buf = sc->sc_mem; 216 caddr_t to = tov; 217 volatile u_int16_t *bptr; 218 u_int16_t tmp; 219 220 if (boff & 0x1) { 221 /* handle unaligned first byte */ 222 bptr = ((volatile u_int16_t *)buf) + (boff - 1); 223 *to++ = (*bptr >> 8) & 0xff; 224 bptr += 2; 225 len--; 226 } else 227 bptr = ((volatile u_int16_t *)buf) + boff; 228 while (len > 1) { 229 tmp = *bptr; 230 *to++ = tmp & 0xff; 231 *to++ = (tmp >> 8) & 0xff; 232 bptr += 2; 233 len -= 2; 234 } 235 if (len == 1) 236 *to = *bptr & 0xff; 237 } 238 239 /* 240 * gap16: 16 bytes of data followed by 16 bytes of pad. 241 * 242 * Buffers must be 32-byte aligned. 243 */ 244 245 void 246 le_ioasic_copytobuf_gap16(struct lance_softc *sc, void *fromv, 247 int boff, int len) 248 { 249 volatile caddr_t buf = sc->sc_mem; 250 caddr_t from = fromv; 251 caddr_t bptr; 252 253 bptr = buf + ((boff << 1) & ~0x1f); 254 boff &= 0xf; 255 256 /* 257 * Dispose of boff so destination of subsequent copies is 258 * 16-byte aligned. 259 */ 260 if (boff) { 261 int xfer; 262 xfer = min(len, 16 - boff); 263 bcopy(from, bptr + boff, xfer); 264 from += xfer; 265 bptr += 32; 266 len -= xfer; 267 } 268 269 /* Destination of copies is now 16-byte aligned. */ 270 if (len >= 16) 271 switch ((u_long)from & (sizeof(u_int32_t) -1)) { 272 case 2: 273 /* Ethernet headers make this the dominant case. */ 274 do { 275 u_int32_t *dst = (u_int32_t*)bptr; 276 u_int16_t t0; 277 u_int32_t t1, t2, t3, t4; 278 279 /* read from odd-16-bit-aligned, cached src */ 280 t0 = *(u_int16_t*)from; 281 t1 = *(u_int32_t*)(from+2); 282 t2 = *(u_int32_t*)(from+6); 283 t3 = *(u_int32_t*)(from+10); 284 t4 = *(u_int16_t*)(from+14); 285 286 /* DMA buffer is uncached on mips */ 287 dst[0] = t0 | (t1 << 16); 288 dst[1] = (t1 >> 16) | (t2 << 16); 289 dst[2] = (t2 >> 16) | (t3 << 16); 290 dst[3] = (t3 >> 16) | (t4 << 16); 291 292 from += 16; 293 bptr += 32; 294 len -= 16; 295 } while (len >= 16); 296 break; 297 298 case 0: 299 do { 300 u_int32_t *src = (u_int32_t*)from; 301 u_int32_t *dst = (u_int32_t*)bptr; 302 u_int32_t t0, t1, t2, t3; 303 304 t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3]; 305 dst[0] = t0; dst[1] = t1; dst[2] = t2; dst[3] = t3; 306 307 from += 16; 308 bptr += 32; 309 len -= 16; 310 } while (len >= 16); 311 break; 312 313 default: 314 /* Does odd-aligned case ever happen? */ 315 do { 316 bcopy(from, bptr, 16); 317 from += 16; 318 bptr += 32; 319 len -= 16; 320 } while (len >= 16); 321 break; 322 } 323 if (len) 324 bcopy(from, bptr, len); 325 } 326 327 void 328 le_ioasic_copyfrombuf_gap16(struct lance_softc *sc, void *tov, 329 int boff, int len) 330 { 331 volatile caddr_t buf = sc->sc_mem; 332 caddr_t to = tov; 333 caddr_t bptr; 334 335 bptr = buf + ((boff << 1) & ~0x1f); 336 boff &= 0xf; 337 338 /* Dispose of boff. source of copy is subsequently 16-byte aligned. */ 339 if (boff) { 340 int xfer; 341 xfer = min(len, 16 - boff); 342 bcopy(bptr+boff, to, xfer); 343 to += xfer; 344 bptr += 32; 345 len -= xfer; 346 } 347 if (len >= 16) 348 switch ((u_long)to & (sizeof(u_int32_t) -1)) { 349 case 2: 350 /* 351 * to is aligned to an odd 16-bit boundary. Ethernet headers 352 * make this the dominant case (98% or more). 353 */ 354 do { 355 u_int32_t *src = (u_int32_t*)bptr; 356 u_int32_t t0, t1, t2, t3; 357 358 /* read from uncached aligned DMA buf */ 359 t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3]; 360 361 /* write to odd-16-bit-word aligned dst */ 362 *(u_int16_t *) (to+0) = (u_short) t0; 363 *(u_int32_t *) (to+2) = (t0 >> 16) | (t1 << 16); 364 *(u_int32_t *) (to+6) = (t1 >> 16) | (t2 << 16); 365 *(u_int32_t *) (to+10) = (t2 >> 16) | (t3 << 16); 366 *(u_int16_t *) (to+14) = (t3 >> 16); 367 bptr += 32; 368 to += 16; 369 len -= 16; 370 } while (len > 16); 371 break; 372 case 0: 373 /* 32-bit aligned aligned copy. Rare. */ 374 do { 375 u_int32_t *src = (u_int32_t*)bptr; 376 u_int32_t *dst = (u_int32_t*)to; 377 u_int32_t t0, t1, t2, t3; 378 379 t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3]; 380 dst[0] = t0; dst[1] = t1; dst[2] = t2; dst[3] = t3; 381 to += 16; 382 bptr += 32; 383 len -= 16; 384 } while (len > 16); 385 break; 386 387 /* XXX Does odd-byte-aligned case ever happen? */ 388 default: 389 do { 390 bcopy(bptr, to, 16); 391 to += 16; 392 bptr += 32; 393 len -= 16; 394 } while (len > 16); 395 break; 396 } 397 if (len) 398 bcopy(bptr, to, len); 399 } 400 401 void 402 le_ioasic_zerobuf_gap16(struct lance_softc *sc, int boff, int len) 403 { 404 volatile caddr_t buf = sc->sc_mem; 405 caddr_t bptr; 406 int xfer; 407 408 bptr = buf + ((boff << 1) & ~0x1f); 409 boff &= 0xf; 410 xfer = min(len, 16 - boff); 411 while (len > 0) { 412 bzero(bptr + boff, xfer); 413 bptr += 32; 414 boff = 0; 415 len -= xfer; 416 xfer = min(len, 16); 417 } 418 } 419