xref: /openbsd-src/sys/dev/tc/if_le_ioasic.c (revision db3296cf5c1dd9058ceecc3a29fe4aaa0bd26000)
1 /*	$OpenBSD: if_le_ioasic.c,v 1.11 2002/05/02 22:56:06 miod Exp $	*/
2 /*	$NetBSD: if_le_ioasic.c,v 1.18 2001/11/13 06:26:10 lukem Exp $	*/
3 
4 /*
5  * Copyright (c) 1996 Carnegie-Mellon University.
6  * All rights reserved.
7  *
8  * Author: Chris G. Demetriou
9  *
10  * Permission to use, copy, modify and distribute this software and
11  * its documentation is hereby granted, provided that both the copyright
12  * notice and this permission notice appear in all copies of the
13  * software, derivative works or modified versions, and any portions
14  * thereof, and that both notices appear in supporting documentation.
15  *
16  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
17  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
18  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
19  *
20  * Carnegie Mellon requests users of this software to return to
21  *
22  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
23  *  School of Computer Science
24  *  Carnegie Mellon University
25  *  Pittsburgh PA 15213-3890
26  *
27  * any improvements or extensions that they make and grant Carnegie the
28  * rights to redistribute these changes.
29  */
30 
31 /*
32  * LANCE on DEC IOCTL ASIC.
33  */
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/mbuf.h>
38 #include <sys/syslog.h>
39 #include <sys/socket.h>
40 #include <sys/device.h>
41 
42 #include <net/if.h>
43 #include <net/if_media.h>
44 
45 #ifdef INET
46 #include <netinet/in.h>
47 #include <netinet/if_ether.h>
48 #endif
49 
50 #include <dev/ic/am7990reg.h>
51 #include <dev/ic/am7990var.h>
52 
53 #include <dev/tc/if_levar.h>
54 #include <dev/tc/tcvar.h>
55 #include <dev/tc/ioasicreg.h>
56 #include <dev/tc/ioasicvar.h>
57 
58 struct le_ioasic_softc {
59 	struct	am7990_softc sc_am7990;	/* glue to MI code */
60 	struct	lereg1 *sc_r1;		/* LANCE registers */
61 	/* XXX must match with le_softc of if_levar.h XXX */
62 
63 	bus_dma_tag_t sc_dmat;		/* bus dma tag */
64 	bus_dmamap_t sc_dmamap;		/* bus dmamap */
65 };
66 
67 int  le_ioasic_match(struct device *, void *, void *);
68 void le_ioasic_attach(struct device *, struct device *, void *);
69 
70 struct cfattach le_ioasic_ca = {
71 	sizeof(struct le_softc), le_ioasic_match, le_ioasic_attach
72 };
73 
74 void le_ioasic_copytobuf_gap2(struct am7990_softc *, void *,
75 	    int, int);
76 void le_ioasic_copyfrombuf_gap2(struct am7990_softc *, void *,
77 	    int, int);
78 void le_ioasic_copytobuf_gap16(struct am7990_softc *, void *,
79 	    int, int);
80 void le_ioasic_copyfrombuf_gap16(struct am7990_softc *, void *,
81 	    int, int);
82 void le_ioasic_zerobuf_gap16(struct am7990_softc *, int, int);
83 
84 int
85 le_ioasic_match(parent, match, aux)
86 	struct device *parent;
87 	void *match, *aux;
88 {
89 	struct ioasicdev_attach_args *d = aux;
90 
91 	if (strncmp("PMAD-BA ", d->iada_modname, TC_ROM_LLEN) != 0)
92 		return 0;
93 
94 	return 1;
95 }
96 
97 /* IOASIC LANCE DMA needs 128KB boundary aligned 128KB chunk */
98 #define	LE_IOASIC_MEMSIZE	(128*1024)
99 #define	LE_IOASIC_MEMALIGN	(128*1024)
100 
101 void
102 le_ioasic_attach(parent, self, aux)
103 	struct device *parent, *self;
104 	void *aux;
105 {
106 	struct le_ioasic_softc *sc = (void *)self;
107 	struct ioasicdev_attach_args *d = aux;
108 	struct am7990_softc *le = &sc->sc_am7990;
109 	bus_space_tag_t ioasic_bst;
110 	bus_space_handle_t ioasic_bsh;
111 	bus_dma_tag_t dmat;
112 	bus_dma_segment_t seg;
113 	tc_addr_t tca;
114 	u_int32_t ssr;
115 	int rseg;
116 	caddr_t le_iomem;
117 
118 	ioasic_bst = ((struct ioasic_softc *)parent)->sc_bst;
119 	ioasic_bsh = ((struct ioasic_softc *)parent)->sc_bsh;
120 	dmat = sc->sc_dmat = ((struct ioasic_softc *)parent)->sc_dmat;
121 	/*
122 	 * Allocate a DMA area for the chip.
123 	 */
124 	if (bus_dmamem_alloc(dmat, LE_IOASIC_MEMSIZE, LE_IOASIC_MEMALIGN,
125 	    0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
126 		printf("can't allocate DMA area for LANCE\n");
127 		return;
128 	}
129 	if (bus_dmamem_map(dmat, &seg, rseg, LE_IOASIC_MEMSIZE,
130 	    &le_iomem, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
131 		printf("can't map DMA area for LANCE\n");
132 		bus_dmamem_free(dmat, &seg, rseg);
133 		return;
134 	}
135 	/*
136 	 * Create and load the DMA map for the DMA area.
137 	 */
138 	if (bus_dmamap_create(dmat, LE_IOASIC_MEMSIZE, 1,
139 	    LE_IOASIC_MEMSIZE, 0, BUS_DMA_NOWAIT, &sc->sc_dmamap)) {
140 		printf("can't create DMA map\n");
141 		goto bad;
142 	}
143 	if (bus_dmamap_load(dmat, sc->sc_dmamap,
144 	    le_iomem, LE_IOASIC_MEMSIZE, NULL, BUS_DMA_NOWAIT)) {
145 		printf("can't load DMA map\n");
146 		goto bad;
147 	}
148 	/*
149 	 * Bind 128KB buffer with IOASIC DMA.
150 	 */
151 	tca = IOASIC_DMA_ADDR(sc->sc_dmamap->dm_segs[0].ds_addr);
152 	bus_space_write_4(ioasic_bst, ioasic_bsh, IOASIC_LANCE_DMAPTR, tca);
153 	ssr = bus_space_read_4(ioasic_bst, ioasic_bsh, IOASIC_CSR);
154 	ssr |= IOASIC_CSR_DMAEN_LANCE;
155 	bus_space_write_4(ioasic_bst, ioasic_bsh, IOASIC_CSR, ssr);
156 
157 	sc->sc_r1 = (struct lereg1 *)
158 		TC_DENSE_TO_SPARSE(TC_PHYS_TO_UNCACHED(d->iada_addr));
159 	le->sc_mem = (void *)TC_PHYS_TO_UNCACHED(le_iomem);
160 	le->sc_copytodesc = le_ioasic_copytobuf_gap2;
161 	le->sc_copyfromdesc = le_ioasic_copyfrombuf_gap2;
162 	le->sc_copytobuf = le_ioasic_copytobuf_gap16;
163 	le->sc_copyfrombuf = le_ioasic_copyfrombuf_gap16;
164 	le->sc_zerobuf = le_ioasic_zerobuf_gap16;
165 
166 	dec_le_common_attach(&sc->sc_am7990,
167 	    (u_char *)((struct ioasic_softc *)parent)->sc_base
168 	        + IOASIC_SLOT_2_START);
169 
170 	ioasic_intr_establish(parent, d->iada_cookie, TC_IPL_NET,
171 	    am7990_intr, sc);
172 	return;
173 
174  bad:
175 	bus_dmamem_unmap(dmat, le_iomem, LE_IOASIC_MEMSIZE);
176 	bus_dmamem_free(dmat, &seg, rseg);
177 }
178 
179 /*
180  * Special memory access functions needed by ioasic-attached LANCE
181  * chips.
182  */
183 
184 /*
185  * gap2: two bytes of data followed by two bytes of pad.
186  *
187  * Buffers must be 4-byte aligned.  The code doesn't worry about
188  * doing an extra byte.
189  */
190 
191 void
192 le_ioasic_copytobuf_gap2(sc, fromv, boff, len)
193 	struct am7990_softc *sc;
194 	void *fromv;
195 	int boff;
196 	int len;
197 {
198 	volatile caddr_t buf = sc->sc_mem;
199 	caddr_t from = fromv;
200 	volatile u_int16_t *bptr;
201 
202 	if (boff & 0x1) {
203 		/* handle unaligned first byte */
204 		bptr = ((volatile u_int16_t *)buf) + (boff - 1);
205 		*bptr = (*from++ << 8) | (*bptr & 0xff);
206 		bptr += 2;
207 		len--;
208 	} else
209 		bptr = ((volatile u_int16_t *)buf) + boff;
210 	while (len > 1) {
211 		*bptr = (from[1] << 8) | (from[0] & 0xff);
212 		bptr += 2;
213 		from += 2;
214 		len -= 2;
215 	}
216 	if (len == 1)
217 		*bptr = (u_int16_t)*from;
218 }
219 
220 void
221 le_ioasic_copyfrombuf_gap2(sc, tov, boff, len)
222 	struct am7990_softc *sc;
223 	void *tov;
224 	int boff, len;
225 {
226 	volatile caddr_t buf = sc->sc_mem;
227 	caddr_t to = tov;
228 	volatile u_int16_t *bptr;
229 	u_int16_t tmp;
230 
231 	if (boff & 0x1) {
232 		/* handle unaligned first byte */
233 		bptr = ((volatile u_int16_t *)buf) + (boff - 1);
234 		*to++ = (*bptr >> 8) & 0xff;
235 		bptr += 2;
236 		len--;
237 	} else
238 		bptr = ((volatile u_int16_t *)buf) + boff;
239 	while (len > 1) {
240 		tmp = *bptr;
241 		*to++ = tmp & 0xff;
242 		*to++ = (tmp >> 8) & 0xff;
243 		bptr += 2;
244 		len -= 2;
245 	}
246 	if (len == 1)
247 		*to = *bptr & 0xff;
248 }
249 
250 /*
251  * gap16: 16 bytes of data followed by 16 bytes of pad.
252  *
253  * Buffers must be 32-byte aligned.
254  */
255 
256 void
257 le_ioasic_copytobuf_gap16(sc, fromv, boff, len)
258 	struct am7990_softc *sc;
259 	void *fromv;
260 	int boff;
261 	int len;
262 {
263 	volatile caddr_t buf = sc->sc_mem;
264 	caddr_t from = fromv;
265 	caddr_t bptr;
266 
267 	bptr = buf + ((boff << 1) & ~0x1f);
268 	boff &= 0xf;
269 
270 	/*
271 	 * Dispose of boff so destination of subsequent copies is
272 	 * 16-byte aligned.
273 	 */
274 	if (boff) {
275 		int xfer;
276 		xfer = min(len, 16 - boff);
277 		bcopy(from, bptr + boff, xfer);
278 		from += xfer;
279 		bptr += 32;
280 		len -= xfer;
281 	}
282 
283 	/* Destination of  copies is now 16-byte aligned. */
284 	if (len >= 16)
285 		switch ((u_long)from & (sizeof(u_int32_t) -1)) {
286 		case 2:
287 			/*  Ethernet headers make this the dominant case. */
288 		do {
289 			u_int32_t *dst = (u_int32_t*)bptr;
290 			u_int16_t t0;
291 			u_int32_t t1,  t2, t3, t4;
292 
293 			/* read from odd-16-bit-aligned, cached src */
294 			t0 = *(u_int16_t*)from;
295 			t1 = *(u_int32_t*)(from+2);
296 			t2 = *(u_int32_t*)(from+6);
297 			t3 = *(u_int32_t*)(from+10);
298 			t4 = *(u_int16_t*)(from+14);
299 
300 			/* DMA buffer is uncached on mips */
301 			dst[0] =         t0 |  (t1 << 16);
302 			dst[1] = (t1 >> 16) |  (t2 << 16);
303 			dst[2] = (t2 >> 16) |  (t3 << 16);
304 			dst[3] = (t3 >> 16) |  (t4 << 16);
305 
306 			from += 16;
307 			bptr += 32;
308 			len -= 16;
309 		} while (len >= 16);
310 		break;
311 
312 		case 0:
313 		do {
314 			u_int32_t *src = (u_int32_t*)from;
315 			u_int32_t *dst = (u_int32_t*)bptr;
316 			u_int32_t t0, t1, t2, t3;
317 
318 			t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3];
319 			dst[0] = t0; dst[1] = t1; dst[2] = t2; dst[3] = t3;
320 
321 			from += 16;
322 			bptr += 32;
323 			len -= 16;
324 		} while (len >= 16);
325 		break;
326 
327 		default:
328 		/* Does odd-aligned case ever happen? */
329 		do {
330 			bcopy(from, bptr, 16);
331 			from += 16;
332 			bptr += 32;
333 			len -= 16;
334 		} while (len >= 16);
335 		break;
336 	}
337 	if (len)
338 		bcopy(from, bptr, len);
339 }
340 
341 void
342 le_ioasic_copyfrombuf_gap16(sc, tov, boff, len)
343 	struct am7990_softc *sc;
344 	void *tov;
345 	int boff, len;
346 {
347 	volatile caddr_t buf = sc->sc_mem;
348 	caddr_t to = tov;
349 	caddr_t bptr;
350 
351 	bptr = buf + ((boff << 1) & ~0x1f);
352 	boff &= 0xf;
353 
354 	/* Dispose of boff. source of copy is subsequently 16-byte aligned. */
355 	if (boff) {
356 		int xfer;
357 		xfer = min(len, 16 - boff);
358 		bcopy(bptr+boff, to, xfer);
359 		to += xfer;
360 		bptr += 32;
361 		len -= xfer;
362 	}
363 	if (len >= 16)
364 	switch ((u_long)to & (sizeof(u_int32_t) -1)) {
365 	case 2:
366 		/*
367 		 * to is aligned to an odd 16-bit boundary.  Ethernet headers
368 		 * make this the dominant case (98% or more).
369 		 */
370 		do {
371 			u_int32_t *src = (u_int32_t*)bptr;
372 			u_int32_t t0, t1, t2, t3;
373 
374 			/* read from uncached aligned DMA buf */
375 			t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3];
376 
377 			/* write to odd-16-bit-word aligned dst */
378 			*(u_int16_t *) (to+0)  = (u_short)  t0;
379 			*(u_int32_t *) (to+2)  = (t0 >> 16) |  (t1 << 16);
380 			*(u_int32_t *) (to+6)  = (t1 >> 16) |  (t2 << 16);
381 			*(u_int32_t *) (to+10) = (t2 >> 16) |  (t3 << 16);
382 			*(u_int16_t *) (to+14) = (t3 >> 16);
383 			bptr += 32;
384 			to += 16;
385 			len -= 16;
386 		} while (len > 16);
387 		break;
388 	case 0:
389 		/* 32-bit aligned aligned copy. Rare. */
390 		do {
391 			u_int32_t *src = (u_int32_t*)bptr;
392 			u_int32_t *dst = (u_int32_t*)to;
393 			u_int32_t t0, t1, t2, t3;
394 
395 			t0 = src[0]; t1 = src[1]; t2 = src[2]; t3 = src[3];
396 			dst[0] = t0; dst[1] = t1; dst[2] = t2; dst[3] = t3;
397 			to += 16;
398 			bptr += 32;
399 			len -= 16;
400 		} while (len  > 16);
401 		break;
402 
403 	/* XXX Does odd-byte-aligned case ever happen? */
404 	default:
405 		do {
406 			bcopy(bptr, to, 16);
407 			to += 16;
408 			bptr += 32;
409 			len -= 16;
410 		} while (len  > 16);
411 		break;
412 	}
413 	if (len)
414 		bcopy(bptr, to, len);
415 }
416 
417 void
418 le_ioasic_zerobuf_gap16(sc, boff, len)
419 	struct am7990_softc *sc;
420 	int boff, len;
421 {
422 	volatile caddr_t buf = sc->sc_mem;
423 	caddr_t bptr;
424 	int xfer;
425 
426 	bptr = buf + ((boff << 1) & ~0x1f);
427 	boff &= 0xf;
428 	xfer = min(len, 16 - boff);
429 	while (len > 0) {
430 		bzero(bptr + boff, xfer);
431 		bptr += 32;
432 		boff = 0;
433 		len -= xfer;
434 		xfer = min(len, 16);
435 	}
436 }
437