1 /* $NetBSD: if_le_ioasic.c,v 1.9 1997/08/26 02:38:47 jonathan Exp $ */ 2 3 /* 4 * Copyright (c) 1996 Carnegie-Mellon University. 5 * All rights reserved. 6 * 7 * Author: Chris G. Demetriou 8 * 9 * Permission to use, copy, modify and distribute this software and 10 * its documentation is hereby granted, provided that both the copyright 11 * notice and this permission notice appear in all copies of the 12 * software, derivative works or modified versions, and any portions 13 * thereof, and that both notices appear in supporting documentation. 14 * 15 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 16 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 17 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 18 * 19 * Carnegie Mellon requests users of this software to return to 20 * 21 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 22 * School of Computer Science 23 * Carnegie Mellon University 24 * Pittsburgh PA 15213-3890 25 * 26 * any improvements or extensions that they make and grant Carnegie the 27 * rights to redistribute these changes. 28 */ 29 30 /* 31 * LANCE on DEC IOCTL ASIC. 32 */ 33 34 #include <sys/cdefs.h> /* RCS ID & macro defns */ 35 __KERNEL_RCSID(0, "$NetBSD: if_le_ioasic.c,v 1.9 1997/08/26 02:38:47 jonathan Exp $"); 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/mbuf.h> 40 #include <sys/syslog.h> 41 #include <sys/socket.h> 42 #include <sys/device.h> 43 44 #include <net/if.h> 45 #include <net/if_ether.h> 46 #include <net/if_media.h> 47 48 #ifdef INET 49 #include <netinet/in.h> 50 #include <netinet/if_inarp.h> 51 #endif 52 53 #include <dev/ic/am7990reg.h> 54 #include <dev/ic/am7990var.h> 55 56 #include <dev/tc/if_levar.h> 57 #include <dev/tc/tcvar.h> 58 #include <dev/tc/ioasicvar.h> 59 60 extern caddr_t le_iomem; 61 62 int le_ioasic_match __P((struct device *, struct cfdata *, void *)); 63 void le_ioasic_attach __P((struct device *, struct device *, void *)); 64 65 hide void le_ioasic_copytobuf_gap2 __P((struct am7990_softc *, void *, 66 int, int)); 67 hide void le_ioasic_copyfrombuf_gap2 __P((struct am7990_softc *, void *, 68 int, int)); 69 70 hide void le_ioasic_copytobuf_gap16 __P((struct am7990_softc *, void *, 71 int, int)); 72 hide void le_ioasic_copyfrombuf_gap16 __P((struct am7990_softc *, void *, 73 int, int)); 74 hide void le_ioasic_zerobuf_gap16 __P((struct am7990_softc *, int, int)); 75 76 struct cfattach le_ioasic_ca = { 77 sizeof(struct le_softc), le_ioasic_match, le_ioasic_attach 78 }; 79 80 int 81 le_ioasic_match(parent, match, aux) 82 struct device *parent; 83 struct cfdata *match; 84 void *aux; 85 { 86 struct ioasicdev_attach_args *d = aux; 87 88 if (!ioasic_submatch(match, aux)) 89 return (0); 90 if (strncmp("lance", d->iada_modname, TC_ROM_LLEN)) 91 return (0); 92 93 return (1); 94 } 95 96 void 97 le_ioasic_attach(parent, self, aux) 98 struct device *parent, *self; 99 void *aux; 100 { 101 struct ioasicdev_attach_args *d = aux; 102 register struct le_softc *lesc = (void *)self; 103 register struct am7990_softc *sc = &lesc->sc_am7990; 104 105 lesc->sc_r1 = (struct lereg1 *) 106 TC_DENSE_TO_SPARSE(TC_PHYS_TO_UNCACHED(d->iada_addr)); 107 sc->sc_mem = (void *)TC_PHYS_TO_UNCACHED(le_iomem); 108 109 sc->sc_copytodesc = le_ioasic_copytobuf_gap2; 110 sc->sc_copyfromdesc = le_ioasic_copyfrombuf_gap2; 111 sc->sc_copytobuf = le_ioasic_copytobuf_gap16; 112 sc->sc_copyfrombuf = le_ioasic_copyfrombuf_gap16; 113 sc->sc_zerobuf = le_ioasic_zerobuf_gap16; 114 115 ioasic_lance_dma_setup(le_iomem); /* XXX more thought */ 116 117 dec_le_common_attach(sc, ioasic_lance_ether_address()); 118 119 ioasic_intr_establish(parent, d->iada_cookie, TC_IPL_NET, 120 am7990_intr, sc); 121 } 122 123 /* 124 * Special memory access functions needed by ioasic-attached LANCE 125 * chips. 126 */ 127 128 /* 129 * gap2: two bytes of data followed by two bytes of pad. 130 * 131 * Buffers must be 4-byte aligned. The code doesn't worry about 132 * doing an extra byte. 133 */ 134 135 void 136 le_ioasic_copytobuf_gap2(sc, fromv, boff, len) 137 struct am7990_softc *sc; 138 void *fromv; 139 int boff; 140 register int len; 141 { 142 volatile caddr_t buf = sc->sc_mem; 143 register caddr_t from = fromv; 144 register volatile u_int16_t *bptr; 145 146 if (boff & 0x1) { 147 /* handle unaligned first byte */ 148 bptr = ((volatile u_int16_t *)buf) + (boff - 1); 149 *bptr = (*from++ << 8) | (*bptr & 0xff); 150 bptr += 2; 151 len--; 152 } else 153 bptr = ((volatile u_int16_t *)buf) + boff; 154 while (len > 1) { 155 *bptr = (from[1] << 8) | (from[0] & 0xff); 156 bptr += 2; 157 from += 2; 158 len -= 2; 159 } 160 if (len == 1) 161 *bptr = (u_int16_t)*from; 162 } 163 164 void 165 le_ioasic_copyfrombuf_gap2(sc, tov, boff, len) 166 struct am7990_softc *sc; 167 void *tov; 168 int boff, len; 169 { 170 volatile caddr_t buf = sc->sc_mem; 171 register caddr_t to = tov; 172 register volatile u_int16_t *bptr; 173 register u_int16_t tmp; 174 175 if (boff & 0x1) { 176 /* handle unaligned first byte */ 177 bptr = ((volatile u_int16_t *)buf) + (boff - 1); 178 *to++ = (*bptr >> 8) & 0xff; 179 bptr += 2; 180 len--; 181 } else 182 bptr = ((volatile u_int16_t *)buf) + boff; 183 while (len > 1) { 184 tmp = *bptr; 185 *to++ = tmp & 0xff; 186 *to++ = (tmp >> 8) & 0xff; 187 bptr += 2; 188 len -= 2; 189 } 190 if (len == 1) 191 *to = *bptr & 0xff; 192 } 193 194 /* 195 * gap16: 16 bytes of data followed by 16 bytes of pad. 196 * 197 * Buffers must be 32-byte aligned. 198 */ 199 200 void 201 le_ioasic_copytobuf_gap16(sc, fromv, boff, len) 202 struct am7990_softc *sc; 203 void *fromv; 204 int boff; 205 register int len; 206 { 207 volatile caddr_t buf = sc->sc_mem; 208 register caddr_t from = fromv; 209 register caddr_t bptr; 210 211 bptr = buf + ((boff << 1) & ~0x1f); 212 boff &= 0xf; 213 214 /* 215 * Dispose of boff so destination of subsequent copies is 216 * 16-byte aligned. 217 */ 218 if (boff) { 219 register int xfer; 220 xfer = min(len, 16 - boff); 221 bcopy(from, bptr + boff, xfer); 222 from += xfer; 223 bptr += 32; 224 len -= xfer; 225 } 226 227 /* Destination of copies is now 16-byte aligned. */ 228 if (len >= 16) 229 switch ((u_long)from & (sizeof(u_int32_t) -1)) { 230 case 2: 231 /* Ethernet headers make this the dominant case. */ 232 do { 233 register u_int32_t *dst = (u_int32_t*)bptr; 234 register u_int16_t t0; 235 register u_int32_t t1, t2, t3, t4; 236 237 /* read from odd-16-bit-aligned, cached src */ 238 t0 = *(u_int16_t*)from; 239 t1 = *(u_int32_t*)(from+2); 240 t2 = *(u_int32_t*)(from+6); 241 t3 = *(u_int32_t*)(from+10); 242 t4 = *(u_int16_t*)(from+14); 243 244 /* DMA buffer is uncached on mips */ 245 dst[0] = t0 | (t1 << 16); 246 dst[1] = (t1 >> 16) | (t2 << 16); 247 dst[2] = (t2 >> 16) | (t3 << 16); 248 dst[3] = (t3 >> 16) | (t4 << 16); 249 250 from += 16; 251 bptr += 32; 252 len -= 16; 253 } while (len >= 16); 254 break; 255 256 case 0: 257 do { 258 register u_int32_t *src = (u_int32_t*)from; 259 register u_int32_t *dst = (u_int32_t*)bptr; 260 register u_int32_t t0, t1, t2, t3; 261 262 t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3]; 263 dst[0] = t0; dst[1] = t1; dst[2] = t2; dst[3] = t3; 264 265 from += 16; 266 bptr += 32; 267 len -= 16; 268 } while (len >= 16); 269 break; 270 271 default: 272 /* Does odd-aligned case ever happen? */ 273 do { 274 bcopy(from, bptr, 16); 275 from += 16; 276 bptr += 32; 277 len -= 16; 278 } while (len >= 16); 279 break; 280 } 281 if (len) 282 bcopy(from, bptr, len); 283 } 284 285 void 286 le_ioasic_copyfrombuf_gap16(sc, tov, boff, len) 287 struct am7990_softc *sc; 288 void *tov; 289 int boff, len; 290 { 291 volatile caddr_t buf = sc->sc_mem; 292 register caddr_t to = tov; 293 register caddr_t bptr; 294 295 bptr = buf + ((boff << 1) & ~0x1f); 296 boff &= 0xf; 297 298 /* Dispose of boff. source of copy is subsequently 16-byte aligned. */ 299 if (boff) { 300 register int xfer; 301 xfer = min(len, 16 - boff); 302 bcopy(bptr+boff, to, xfer); 303 to += xfer; 304 bptr += 32; 305 len -= xfer; 306 } 307 if (len >= 16) 308 switch ((u_long)to & (sizeof(u_int32_t) -1)) { 309 case 2: 310 /* 311 * to is aligned to an odd 16-bit boundary. Ethernet headers 312 * make this the dominant case (98% or more). 313 */ 314 do { 315 register u_int32_t *src = (u_int32_t*)bptr; 316 register u_int32_t t0, t1, t2, t3; 317 318 /* read from uncached aligned DMA buf */ 319 t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3]; 320 321 /* write to odd-16-bit-word aligned dst */ 322 *(u_int16_t *) (to+0) = (u_short) t0; 323 *(u_int32_t *) (to+2) = (t0 >> 16) | (t1 << 16); 324 *(u_int32_t *) (to+6) = (t1 >> 16) | (t2 << 16); 325 *(u_int32_t *) (to+10) = (t2 >> 16) | (t3 << 16); 326 *(u_int16_t *) (to+14) = (t3 >> 16); 327 bptr += 32; 328 to += 16; 329 len -= 16; 330 } while (len > 16); 331 break; 332 case 0: 333 /* 32-bit aligned aligned copy. Rare. */ 334 do { 335 register u_int32_t *src = (u_int32_t*)bptr; 336 register u_int32_t *dst = (u_int32_t*)to; 337 register u_int32_t t0, t1, t2, t3; 338 339 t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3]; 340 dst[0] = t0; dst[1] = t1; dst[2] = t2; dst[3] = t3; 341 to += 16; 342 bptr += 32; 343 len -= 16; 344 } while (len > 16); 345 break; 346 347 /* XXX Does odd-byte-aligned case ever happen? */ 348 default: 349 do { 350 bcopy(bptr, to, 16); 351 to += 16; 352 bptr += 32; 353 len -= 16; 354 } while (len > 16); 355 break; 356 } 357 if (len) 358 bcopy(bptr, to, len); 359 } 360 361 void 362 le_ioasic_zerobuf_gap16(sc, boff, len) 363 struct am7990_softc *sc; 364 int boff, len; 365 { 366 volatile caddr_t buf = sc->sc_mem; 367 register caddr_t bptr; 368 register int xfer; 369 370 bptr = buf + ((boff << 1) & ~0x1f); 371 boff &= 0xf; 372 xfer = min(len, 16 - boff); 373 while (len > 0) { 374 bzero(bptr + boff, xfer); 375 bptr += 32; 376 boff = 0; 377 len -= xfer; 378 xfer = min(len, 16); 379 } 380 } 381