1 /* $NetBSD: if_le_ioasic.c,v 1.35 2022/05/29 10:43:46 rin Exp $ */ 2 3 /* 4 * Copyright (c) 1996 Carnegie-Mellon University. 5 * All rights reserved. 6 * 7 * Author: Chris G. Demetriou 8 * 9 * Permission to use, copy, modify and distribute this software and 10 * its documentation is hereby granted, provided that both the copyright 11 * notice and this permission notice appear in all copies of the 12 * software, derivative works or modified versions, and any portions 13 * thereof, and that both notices appear in supporting documentation. 14 * 15 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 16 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 17 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 18 * 19 * Carnegie Mellon requests users of this software to return to 20 * 21 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 22 * School of Computer Science 23 * Carnegie Mellon University 24 * Pittsburgh PA 15213-3890 25 * 26 * any improvements or extensions that they make and grant Carnegie the 27 * rights to redistribute these changes. 28 */ 29 30 /* 31 * LANCE on DEC IOCTL ASIC. 32 */ 33 34 #include <sys/cdefs.h> 35 __KERNEL_RCSID(0, "$NetBSD: if_le_ioasic.c,v 1.35 2022/05/29 10:43:46 rin Exp $"); 36 37 #include "opt_inet.h" 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/mbuf.h> 42 #include <sys/syslog.h> 43 #include <sys/socket.h> 44 #include <sys/device.h> 45 46 #include <net/if.h> 47 #include <net/if_ether.h> 48 #include <net/if_media.h> 49 50 #ifdef INET 51 #include <netinet/in.h> 52 #include <netinet/if_inarp.h> 53 #endif 54 55 #include <dev/ic/lancereg.h> 56 #include <dev/ic/lancevar.h> 57 #include <dev/ic/am7990reg.h> 58 #include <dev/ic/am7990var.h> 59 60 #include <dev/tc/if_levar.h> 61 #include <dev/tc/tcvar.h> 62 #include <dev/tc/ioasicreg.h> 63 #include <dev/tc/ioasicvar.h> 64 65 struct le_ioasic_softc { 66 struct am7990_softc sc_am7990; /* glue to MI code */ 67 struct lereg1 *sc_r1; /* LANCE registers */ 68 /* XXX must match with le_softc of if_levar.h XXX */ 69 70 bus_dma_tag_t sc_dmat; /* bus dma tag */ 71 bus_dmamap_t sc_dmamap; /* bus dmamap */ 72 }; 73 74 static int le_ioasic_match(device_t, cfdata_t, void *); 75 static void le_ioasic_attach(device_t, device_t, void *); 76 77 CFATTACH_DECL_NEW(le_ioasic, sizeof(struct le_softc), 78 le_ioasic_match, le_ioasic_attach, NULL, NULL); 79 80 static void le_ioasic_copytobuf_gap2(struct lance_softc *, void *, int, int); 81 static void le_ioasic_copyfrombuf_gap2(struct lance_softc *, void *, int, int); 82 static void le_ioasic_copytobuf_gap16(struct lance_softc *, void *, int, int); 83 static void le_ioasic_copyfrombuf_gap16(struct lance_softc *, void *, 84 int, int); 85 static void le_ioasic_zerobuf_gap16(struct lance_softc *, int, int); 86 87 static int 88 le_ioasic_match(device_t parent, cfdata_t cf, void *aux) 89 { 90 struct ioasicdev_attach_args *d = aux; 91 92 if (strncmp("PMAD-BA ", d->iada_modname, TC_ROM_LLEN) != 0) 93 return 0; 94 95 return 1; 96 } 97 98 /* IOASIC LANCE DMA needs 128KB boundary aligned 128KB chunk */ 99 #define LE_IOASIC_MEMSIZE (128 * 1024) 100 #define LE_IOASIC_MEMALIGN (128 * 1024) 101 102 static void 103 le_ioasic_attach(device_t parent, device_t self, void *aux) 104 { 105 struct le_ioasic_softc *sc = device_private(self); 106 struct ioasicdev_attach_args *d = aux; 107 struct lance_softc *le = &sc->sc_am7990.lsc; 108 struct ioasic_softc *iosc = device_private(parent); 109 bus_space_tag_t ioasic_bst; 110 bus_space_handle_t ioasic_bsh; 111 bus_dma_tag_t dmat; 112 bus_dma_segment_t seg; 113 tc_addr_t tca; 114 uint32_t ssr; 115 int rseg; 116 void *le_iomem; 117 118 le->sc_dev = self; 119 ioasic_bst = iosc->sc_bst; 120 ioasic_bsh = iosc->sc_bsh; 121 dmat = sc->sc_dmat = iosc->sc_dmat; 122 /* 123 * Allocate a DMA area for the chip. 124 */ 125 if (bus_dmamem_alloc(dmat, LE_IOASIC_MEMSIZE, LE_IOASIC_MEMALIGN, 126 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 127 aprint_error(": can't allocate DMA area for LANCE\n"); 128 return; 129 } 130 if (bus_dmamem_map(dmat, &seg, rseg, LE_IOASIC_MEMSIZE, 131 &le_iomem, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) { 132 aprint_error(": can't map DMA area for LANCE\n"); 133 goto bad_free; 134 } 135 /* 136 * Create and load the DMA map for the DMA area. 137 */ 138 if (bus_dmamap_create(dmat, LE_IOASIC_MEMSIZE, 1, 139 LE_IOASIC_MEMSIZE, 0, BUS_DMA_NOWAIT, &sc->sc_dmamap)) { 140 aprint_error(": can't create DMA map\n"); 141 goto bad_unmap; 142 } 143 if (bus_dmamap_load(dmat, sc->sc_dmamap, 144 le_iomem, LE_IOASIC_MEMSIZE, NULL, BUS_DMA_NOWAIT)) { 145 aprint_error(": can't load DMA map\n"); 146 goto bad_destroy; 147 } 148 /* 149 * Bind 128KB buffer with IOASIC DMA. 150 */ 151 tca = IOASIC_DMA_ADDR(sc->sc_dmamap->dm_segs[0].ds_addr); 152 bus_space_write_4(ioasic_bst, ioasic_bsh, IOASIC_LANCE_DMAPTR, tca); 153 ssr = bus_space_read_4(ioasic_bst, ioasic_bsh, IOASIC_CSR); 154 ssr |= IOASIC_CSR_DMAEN_LANCE; 155 bus_space_write_4(ioasic_bst, ioasic_bsh, IOASIC_CSR, ssr); 156 157 sc->sc_r1 = (struct lereg1 *) 158 TC_DENSE_TO_SPARSE(TC_PHYS_TO_UNCACHED(d->iada_addr)); 159 le->sc_mem = (void *)TC_PHYS_TO_UNCACHED(le_iomem); 160 le->sc_copytodesc = le_ioasic_copytobuf_gap2; 161 le->sc_copyfromdesc = le_ioasic_copyfrombuf_gap2; 162 le->sc_copytobuf = le_ioasic_copytobuf_gap16; 163 le->sc_copyfrombuf = le_ioasic_copyfrombuf_gap16; 164 le->sc_zerobuf = le_ioasic_zerobuf_gap16; 165 166 dec_le_common_attach(&sc->sc_am7990, 167 (uint8_t *)iosc->sc_base + IOASIC_SLOT_2_START); 168 169 ioasic_intr_establish(parent, d->iada_cookie, TC_IPL_NET, 170 am7990_intr, sc); 171 return; 172 173 bad_destroy: 174 bus_dmamap_destroy(dmat, sc->sc_dmamap); 175 bad_unmap: 176 bus_dmamem_unmap(dmat, le_iomem, LE_IOASIC_MEMSIZE); 177 bad_free: 178 bus_dmamem_free(dmat, &seg, rseg); 179 } 180 181 /* 182 * Special memory access functions needed by ioasic-attached LANCE 183 * chips. 184 */ 185 186 /* 187 * gap2: two bytes of data followed by two bytes of pad. 188 * 189 * Buffers must be 4-byte aligned. The code doesn't worry about 190 * doing an extra byte. 191 */ 192 193 void 194 le_ioasic_copytobuf_gap2(struct lance_softc *sc, void *fromv, int boff, int len) 195 { 196 volatile void *buf = sc->sc_mem; 197 uint8_t *from = fromv; 198 volatile uint16_t *bptr; 199 200 if (boff & 0x1) { 201 /* handle unaligned first byte */ 202 bptr = ((volatile uint16_t *)buf) + (boff - 1); 203 *bptr = (*from++ << 8) | (*bptr & 0xff); 204 bptr += 2; 205 len--; 206 } else 207 bptr = ((volatile uint16_t *)buf) + boff; 208 while (len > 1) { 209 *bptr = (from[1] << 8) | (from[0] & 0xff); 210 bptr += 2; 211 from += 2; 212 len -= 2; 213 } 214 if (len == 1) 215 *bptr = (uint16_t)*from; 216 } 217 218 void 219 le_ioasic_copyfrombuf_gap2(struct lance_softc *sc, void *tov, int boff, int len) 220 { 221 volatile void *buf = sc->sc_mem; 222 uint8_t *to = tov; 223 volatile uint16_t *bptr; 224 uint16_t tmp; 225 226 if (boff & 0x1) { 227 /* handle unaligned first byte */ 228 bptr = ((volatile uint16_t *)buf) + (boff - 1); 229 *to++ = (*bptr >> 8) & 0xff; 230 bptr += 2; 231 len--; 232 } else 233 bptr = ((volatile uint16_t *)buf) + boff; 234 while (len > 1) { 235 tmp = *bptr; 236 *to++ = tmp & 0xff; 237 *to++ = (tmp >> 8) & 0xff; 238 bptr += 2; 239 len -= 2; 240 } 241 if (len == 1) 242 *to = *bptr & 0xff; 243 } 244 245 /* 246 * gap16: 16 bytes of data followed by 16 bytes of pad. 247 * 248 * Buffers must be 32-byte aligned. 249 */ 250 251 void 252 le_ioasic_copytobuf_gap16(struct lance_softc *sc, void *fromv, int boff, 253 int len) 254 { 255 uint8_t *buf = sc->sc_mem; 256 uint8_t *from = fromv; 257 uint8_t *bptr; 258 259 bptr = buf + ((boff << 1) & ~0x1f); 260 boff &= 0xf; 261 262 /* 263 * Dispose of boff so destination of subsequent copies is 264 * 16-byte aligned. 265 */ 266 if (boff) { 267 int xfer; 268 xfer = uimin(len, 16 - boff); 269 memcpy(bptr + boff, from, xfer); 270 from += xfer; 271 bptr += 32; 272 len -= xfer; 273 } 274 275 /* Destination of copies is now 16-byte aligned. */ 276 if (len >= 16) 277 switch ((u_long)from & (sizeof(uint32_t) -1)) { 278 case 2: 279 /* Ethernet headers make this the dominant case. */ 280 do { 281 uint32_t *dst = (uint32_t *)bptr; 282 uint16_t t0; 283 uint32_t t1, t2, t3, t4; 284 285 /* read from odd-16-bit-aligned, cached src */ 286 t0 = *(uint16_t *)(from + 0); 287 t1 = *(uint32_t *)(from + 2); 288 t2 = *(uint32_t *)(from + 6); 289 t3 = *(uint32_t *)(from + 10); 290 t4 = *(uint16_t *)(from + 14); 291 292 /* DMA buffer is uncached on mips */ 293 dst[0] = t0 | (t1 << 16); 294 dst[1] = (t1 >> 16) | (t2 << 16); 295 dst[2] = (t2 >> 16) | (t3 << 16); 296 dst[3] = (t3 >> 16) | (t4 << 16); 297 298 from += 16; 299 bptr += 32; 300 len -= 16; 301 } while (len >= 16); 302 break; 303 304 case 0: 305 do { 306 uint32_t *src = (uint32_t*)from; 307 uint32_t *dst = (uint32_t*)bptr; 308 uint32_t t0, t1, t2, t3; 309 310 t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3]; 311 dst[0] = t0; dst[1] = t1; dst[2] = t2; dst[3] = t3; 312 313 from += 16; 314 bptr += 32; 315 len -= 16; 316 } while (len >= 16); 317 break; 318 319 default: 320 /* Does odd-aligned case ever happen? */ 321 do { 322 memcpy(bptr, from, 16); 323 from += 16; 324 bptr += 32; 325 len -= 16; 326 } while (len >= 16); 327 break; 328 } 329 if (len) 330 memcpy(bptr, from, len); 331 } 332 333 void 334 le_ioasic_copyfrombuf_gap16(struct lance_softc *sc, void *tov, int boff, 335 int len) 336 { 337 uint8_t *buf = sc->sc_mem; 338 uint8_t *to = tov; 339 uint8_t *bptr; 340 341 bptr = buf + ((boff << 1) & ~0x1f); 342 boff &= 0xf; 343 344 /* Dispose of boff. source of copy is subsequently 16-byte aligned. */ 345 if (boff) { 346 int xfer; 347 xfer = uimin(len, 16 - boff); 348 memcpy(to, bptr + boff, xfer); 349 to += xfer; 350 bptr += 32; 351 len -= xfer; 352 } 353 if (len >= 16) 354 switch ((u_long)to & (sizeof(uint32_t) -1)) { 355 case 2: 356 /* 357 * to is aligned to an odd 16-bit boundary. Ethernet headers 358 * make this the dominant case (98% or more). 359 */ 360 do { 361 uint32_t *src = (uint32_t *)bptr; 362 uint32_t t0, t1, t2, t3; 363 364 /* read from uncached aligned DMA buf */ 365 t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3]; 366 367 /* write to odd-16-bit-word aligned dst */ 368 *(uint16_t *)(to + 0) = (uint16_t)t0; 369 *(uint32_t *)(to + 2) = (t0 >> 16) | (t1 << 16); 370 *(uint32_t *)(to + 6) = (t1 >> 16) | (t2 << 16); 371 *(uint32_t *)(to + 10) = (t2 >> 16) | (t3 << 16); 372 *(uint16_t *)(to + 14) = (t3 >> 16); 373 bptr += 32; 374 to += 16; 375 len -= 16; 376 } while (len > 16); 377 break; 378 case 0: 379 /* 32-bit aligned aligned copy. Rare. */ 380 do { 381 uint32_t *src = (uint32_t *)bptr; 382 uint32_t *dst = (uint32_t *)to; 383 uint32_t t0, t1, t2, t3; 384 385 t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3]; 386 dst[0] = t0; dst[1] = t1; dst[2] = t2; dst[3] = t3; 387 to += 16; 388 bptr += 32; 389 len -= 16; 390 } while (len > 16); 391 break; 392 393 /* XXX Does odd-byte-aligned case ever happen? */ 394 default: 395 do { 396 memcpy(to, bptr, 16); 397 to += 16; 398 bptr += 32; 399 len -= 16; 400 } while (len > 16); 401 break; 402 } 403 if (len) 404 memcpy(to, bptr, len); 405 } 406 407 void 408 le_ioasic_zerobuf_gap16(struct lance_softc *sc, int boff, int len) 409 { 410 uint8_t *buf = sc->sc_mem; 411 uint8_t *bptr; 412 int xfer; 413 414 bptr = buf + ((boff << 1) & ~0x1f); 415 boff &= 0xf; 416 xfer = uimin(len, 16 - boff); 417 while (len > 0) { 418 memset(bptr + boff, 0, xfer); 419 bptr += 32; 420 boff = 0; 421 len -= xfer; 422 xfer = uimin(len, 16); 423 } 424 } 425