xref: /openbsd-src/sys/arch/octeon/dev/amdcf.c (revision 0b7734b3d77bb9b21afec6f4621cae6c805dbd45)
1 /*	$OpenBSD: amdcf.c,v 1.2 2016/01/20 17:23:58 stefan Exp $	*/
2 
3 /*
4  * Copyright (c) 2007, Juniper Networks, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the author nor the names of any co-contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 /*
33  * Copyright (c) 2009 Sam Leffler, Errno Consulting
34  * All rights reserved.
35  *
36  * Redistribution and use in source and binary forms, with or without
37  * modification, are permitted provided that the following conditions
38  * are met:
39  * 1. Redistributions of source code must retain the above copyright
40  *    notice, this list of conditions and the following disclaimer.
41  * 2. Redistributions in binary form must reproduce the above copyright
42  *    notice, this list of conditions and the following disclaimer in the
43  *    documentation and/or other materials provided with the distribution.
44  *
45  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
46  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
47  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
48  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
49  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
50  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
51  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
52  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
53  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
54  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
55  */
56 
57 /*
58  * Copyright (c) 2015 Paul Irofti.
59  *
60  * Permission to use, copy, modify, and distribute this software for any
61  * purpose with or without fee is hereby granted, provided that the above
62  * copyright notice and this permission notice appear in all copies.
63  *
64  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
65  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
66  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
67  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
68  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
69  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
70  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
71  */
72 
73 
74 #include <sys/param.h>
75 #include <sys/systm.h>
76 #include <sys/kernel.h>
77 #include <sys/conf.h>
78 #include <sys/file.h>
79 #include <sys/stat.h>
80 #include <sys/ioctl.h>
81 #include <sys/mutex.h>
82 #include <sys/buf.h>
83 #include <sys/uio.h>
84 #include <sys/malloc.h>
85 #include <sys/device.h>
86 #include <sys/disklabel.h>
87 #include <sys/disk.h>
88 #include <sys/syslog.h>
89 #include <sys/proc.h>
90 #include <sys/vnode.h>
91 #include <sys/dkio.h>
92 
93 #include <machine/intr.h>
94 #include <machine/bus.h>
95 #include <machine/autoconf.h>
96 
97 #include <octeon/dev/iobusvar.h>
98 #include <machine/octeonreg.h>
99 #include <machine/octeonvar.h>
100 
101 
102 #define CFI_QRY_CMD_ADDR	0x55
103 #define CFI_QRY_CMD_DATA	0x98
104 
105 #define CFI_QRY_TTO_WRITE	0x1f
106 #define CFI_QRY_TTO_ERASE	0x21
107 #define CFI_QRY_MTO_WRITE	0x23
108 #define CFI_QRY_MTO_ERASE	0x25
109 
110 #define CFI_QRY_SIZE		0x27
111 #define	CFI_QRY_NREGIONS	0x2c
112 #define CFI_QRY_REGION0 	0x31
113 #define CFI_QRY_REGION(x)	(CFI_QRY_REGION0 + (x) * 4)
114 
115 #define CFI_BCS_READ_ARRAY	0xff
116 
117 #define CFI_DISK_SECSIZE	512
118 #define CFI_DISK_MAXIOSIZE	65536
119 
120 #define AMDCF_MAP_SIZE		0x02000000
121 
122 #define CFI_AMD_BLOCK_ERASE	0x30
123 #define CFI_AMD_UNLOCK		0xaa
124 #define CFI_AMD_UNLOCK_ACK	0x55
125 #define CFI_AMD_PROGRAM		0xa0
126 #define CFI_AMD_RESET		0xf0
127 
128 #define AMD_ADDR_START		0x555
129 #define AMD_ADDR_ACK		0x2aa
130 
131 #define BOOTLOADER_ADDR		0xa0000
132 
133 struct cfi_region {
134 	u_int r_blocks;
135 	u_int r_blksz;
136 };
137 
138 struct amdcf_softc {
139 	/* General disk infos */
140 	struct device sc_dev;
141 	struct disk sc_dk;
142 	struct bufq sc_bufq;
143 	struct buf *sc_bp;
144 
145 	int sc_flags;
146 #define AMDCF_LOADED	0x10
147 
148 	struct iobus_attach_args *sc_io;
149 	bus_space_tag_t sc_iot;
150 	bus_space_handle_t sc_ioh;
151 
152 	size_t sc_size;		/* Disk size in bytes */
153 	u_int sc_regions;	/* Erase regions. */
154 	struct cfi_region *sc_region;	/* Array of region info. */
155 
156 	u_int sc_width;
157 	u_int sc_shift;
158 	u_int sc_mask;
159 
160 	u_int sc_erase_timeout;
161 	u_int sc_erase_max_timeout;
162 	u_int sc_write_timeout;
163 	u_int sc_write_max_timeout;
164 	u_int sc_rstcmd;
165 
166 	u_char *sc_wrbuf;
167 	u_int sc_wrbufsz;
168 	u_int sc_wrofs;
169 	u_int sc_writing;
170 };
171 
172 int	amdcf_match(struct device *, void *, void *);
173 void	amdcf_attach(struct device *, struct device *, void *);
174 int	amdcf_detach(struct device *, int);
175 
176 struct cfattach amdcf_ca = {
177 	sizeof(struct amdcf_softc), amdcf_match, amdcf_attach, amdcf_detach
178 };
179 
180 struct cfdriver amdcf_cd = {
181 	NULL, "amdcf", DV_DISK
182 };
183 
184 cdev_decl(amdcf);
185 bdev_decl(amdcf);
186 
187 #define amdcflookup(unit) (struct amdcf_softc *)disk_lookup(&amdcf_cd, (unit))
188 int amdcfgetdisklabel(dev_t, struct amdcf_softc *, struct disklabel *, int);
189 
190 void amdcfstart(void *);
191 void _amdcfstart(struct amdcf_softc *, struct buf *);
192 void amdcfdone(void *);
193 
194 void amdcf_disk_read(struct amdcf_softc *, struct buf *, off_t);
195 void amdcf_disk_write(struct amdcf_softc *, struct buf *, off_t);
196 
197 int cfi_block_start(struct amdcf_softc *, u_int);
198 int cfi_write_block(struct amdcf_softc *);
199 int cfi_erase_block(struct amdcf_softc *, u_int);
200 int cfi_block_finish(struct amdcf_softc *);
201 
202 void cfi_array_write(struct amdcf_softc *sc, u_int, u_int, u_int);
203 void cfi_amd_write(struct amdcf_softc *, u_int, u_int, u_int);
204 
205 uint8_t cfi_read_qry(struct amdcf_softc *, uint64_t);
206 uint8_t cfi_read(struct amdcf_softc *, bus_size_t, bus_size_t);
207 void cfi_write(struct amdcf_softc *, bus_size_t, bus_size_t, uint8_t);
208 int cfi_wait_ready(struct amdcf_softc *, u_int, u_int, u_int);
209 int cfi_make_cmd(uint8_t, u_int);
210 
211 int
212 amdcf_match(struct device *parent, void *match, void *aux)
213 {
214 	struct mainbus_attach_args *maa = aux;
215 	struct cfdata *cf = match;
216 
217 	if (strcmp(maa->maa_name, cf->cf_driver->cd_name) != 0)
218 		return 0;
219 
220 	/* Only for DSR machines */
221 	if (octeon_boot_info->board_type != BOARD_TYPE_DSR_500)
222 		return 0;
223 
224 	return 1;
225 }
226 
227 void
228 amdcf_attach(struct device *parent, struct device *self, void *aux)
229 {
230 	struct amdcf_softc *sc = (void *)self;
231 	u_int blksz, blocks, r;
232 
233 	sc->sc_io = aux;
234 	sc->sc_iot = sc->sc_io->aa_bust;
235 
236 	if (bus_space_map(sc->sc_iot, OCTEON_AMDCF_BASE, AMDCF_MAP_SIZE, 0,
237 	    &sc->sc_ioh)) {
238 		printf(": can't map registers");
239 	}
240 
241 	/* should be detected in the generic driver */
242 	sc->sc_width = 1;
243 	sc->sc_shift = 2;
244 	sc->sc_mask = 0x000000ff;
245 	sc->sc_rstcmd = CFI_AMD_RESET;
246 
247 	/* Initialize the Query Database from the CF */
248 	cfi_array_write(sc, 0, 0, sc->sc_rstcmd);
249 	cfi_write(sc, 0, CFI_QRY_CMD_ADDR, CFI_QRY_CMD_DATA);
250 
251 	/* Get time-out values for erase and write. */
252 	sc->sc_write_timeout = 1 << cfi_read(sc, 0, CFI_QRY_TTO_WRITE);
253 	sc->sc_erase_timeout = 1 << cfi_read(sc, 0, CFI_QRY_TTO_ERASE);
254 	sc->sc_write_max_timeout = 1 << cfi_read(sc, 0, CFI_QRY_MTO_WRITE);
255 	sc->sc_erase_max_timeout = 1 << cfi_read(sc, 0, CFI_QRY_MTO_ERASE);
256 
257 	/* Get the device size. */
258 	sc->sc_size = 1U << cfi_read(sc, 0, CFI_QRY_SIZE);
259 	printf(": AMD/Fujitsu %zu bytes\n", sc->sc_size);
260 
261 	/* Get erase regions. */
262 	sc->sc_regions = cfi_read(sc, 0, CFI_QRY_NREGIONS);
263 	sc->sc_region = malloc(sc->sc_regions *
264 	    sizeof(struct cfi_region), M_TEMP, M_WAITOK | M_ZERO);
265 
266 	for (r = 0; r < sc->sc_regions; r++) {
267 		blocks = cfi_read(sc, 0, CFI_QRY_REGION(r)) |
268 		    (cfi_read(sc, 0, CFI_QRY_REGION(r) + 1) << 8);
269 		sc->sc_region[r].r_blocks = blocks + 1;
270 
271 		blksz = cfi_read(sc, 0, CFI_QRY_REGION(r) + 2) |
272 		    (cfi_read(sc, 0, CFI_QRY_REGION(r) + 3) << 8);
273 		sc->sc_region[r].r_blksz = (blksz == 0) ? 128 :
274 		    blksz * 256;
275 	}
276 
277 	/* Reset the device to the default state */
278 	cfi_array_write(sc, 0, 0, sc->sc_rstcmd);
279 
280 	/*
281 	 * Initialize disk structures.
282 	 */
283 	sc->sc_dk.dk_name = sc->sc_dev.dv_xname;
284 	bufq_init(&sc->sc_bufq, BUFQ_DEFAULT);
285 
286 	/* Attach disk. */
287 	disk_attach(&sc->sc_dev, &sc->sc_dk);
288 
289 }
290 
291 int
292 amdcf_detach(struct device *self, int flags)
293 {
294 	struct amdcf_softc *sc = (struct amdcf_softc *)self;
295 
296 	bufq_drain(&sc->sc_bufq);
297 
298 	disk_gone(amdcfopen, self->dv_unit);
299 
300 	/* Detach disk. */
301 	bufq_destroy(&sc->sc_bufq);
302 	disk_detach(&sc->sc_dk);
303 
304 	return 0;
305 }
306 
307 
308 int
309 amdcfopen(dev_t dev, int flag, int fmt, struct proc *p)
310 {
311 	struct amdcf_softc *sc;
312 	int unit, part;
313 	int error;
314 
315 	unit = DISKUNIT(dev);
316 	sc = amdcflookup(unit);
317 	if (sc == NULL)
318 		return ENXIO;
319 
320 	/*
321 	 * If this is the first open of this device, add a reference
322 	 * to the adapter.
323 	 */
324 	if ((error = disk_lock(&sc->sc_dk)) != 0)
325 		goto out1;
326 
327 	if (sc->sc_dk.dk_openmask != 0) {
328 		/*
329 		 * If any partition is open, but the disk has been invalidated,
330 		 * disallow further opens.
331 		 */
332 		if ((sc->sc_flags & AMDCF_LOADED) == 0) {
333 			error = EIO;
334 			goto out;
335 		}
336 	} else {
337 		if ((sc->sc_flags & AMDCF_LOADED) == 0) {
338 			sc->sc_flags |= AMDCF_LOADED;
339 
340 			/* Load the partition info if not already loaded. */
341 			if (amdcfgetdisklabel(dev, sc,
342 			    sc->sc_dk.dk_label, 0) == EIO) {
343 				error = EIO;
344 				goto out;
345 			}
346 		}
347 	}
348 
349 	part = DISKPART(dev);
350 
351 	if ((error = disk_openpart(&sc->sc_dk, part, fmt, 1)) != 0)
352 		goto out;
353 
354 	disk_unlock(&sc->sc_dk);
355 	device_unref(&sc->sc_dev);
356 	return 0;
357 
358 out:
359 	disk_unlock(&sc->sc_dk);
360 out1:
361 	device_unref(&sc->sc_dev);
362 	return error;
363 }
364 
365 /*
366  * Load the label information on the named device
367  */
368 int
369 amdcfgetdisklabel(dev_t dev, struct amdcf_softc *sc, struct disklabel *lp,
370     int spoofonly)
371 {
372 	memset(lp, 0, sizeof(struct disklabel));
373 
374 	lp->d_secsize = DEV_BSIZE;
375 	lp->d_nsectors = 1;	/* bogus */
376 	lp->d_ntracks = 1;	/* bogus */
377 	lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
378 	lp->d_ncylinders = sc->sc_size / lp->d_secpercyl;
379 
380 	strlcpy(lp->d_typename, "amdcf device", sizeof(lp->d_typename));
381 	lp->d_type = DTYPE_SCSI;	/* bogus type, can be anything */
382 	strlcpy(lp->d_packname, "CFI Disk", sizeof(lp->d_packname));
383 	DL_SETDSIZE(lp, sc->sc_size / DEV_BSIZE);
384 	lp->d_flags = 0;
385 	lp->d_version = 1;
386 
387 	lp->d_magic = DISKMAGIC;
388 	lp->d_magic2 = DISKMAGIC;
389 	lp->d_checksum = dkcksum(lp);
390 
391 	/* Call the generic disklabel extraction routine */
392 	return readdisklabel(DISKLABELDEV(dev), amdcfstrategy, lp, spoofonly);
393 }
394 
395 int
396 amdcfclose(dev_t dev, int flag, int fmt, struct proc *p)
397 {
398 	struct amdcf_softc *sc;
399 	int part = DISKPART(dev);
400 
401 	sc = amdcflookup(DISKUNIT(dev));
402 	if (sc == NULL)
403 		return ENXIO;
404 
405 	disk_lock_nointr(&sc->sc_dk);
406 
407 	disk_closepart(&sc->sc_dk, part, fmt);
408 
409 	disk_unlock(&sc->sc_dk);
410 
411 	device_unref(&sc->sc_dev);
412 	return 0;
413 }
414 
415 int
416 amdcfread(dev_t dev, struct uio *uio, int flags)
417 {
418 	return (physio(amdcfstrategy, dev, B_READ, minphys, uio));
419 }
420 
421 int
422 amdcfwrite(dev_t dev, struct uio *uio, int flags)
423 {
424 #ifdef AMDCF_DISK_WRITE_ENABLE
425 	return (physio(amdcfstrategy, dev, B_WRITE, minphys, uio));
426 #else
427 	return 0;
428 #endif
429 }
430 
431 void
432 amdcfstrategy(struct buf *bp)
433 {
434 	struct amdcf_softc *sc;
435 	int s;
436 
437 	sc = amdcflookup(DISKUNIT(bp->b_dev));
438 	if (sc == NULL) {
439 		bp->b_error = ENXIO;
440 		goto bad;
441 	}
442 	/* If device invalidated (e.g. media change, door open), error. */
443 	if ((sc->sc_flags & AMDCF_LOADED) == 0) {
444 		bp->b_error = EIO;
445 		goto bad;
446 	}
447 
448 	/* Validate the request. */
449 	if (bounds_check_with_label(bp, sc->sc_dk.dk_label) == -1)
450 		goto done;
451 
452 	/* Check that the number of sectors can fit in a byte. */
453 	if ((bp->b_bcount / sc->sc_dk.dk_label->d_secsize) >= (1 << NBBY)) {
454 		bp->b_error = EINVAL;
455 		goto bad;
456 	}
457 
458 	/* Queue transfer on drive, activate drive and controller if idle. */
459 	bufq_queue(&sc->sc_bufq, bp);
460 	s = splbio();
461 	amdcfstart(sc);
462 	splx(s);
463 	device_unref(&sc->sc_dev);
464 	return;
465 
466  bad:
467 	bp->b_flags |= B_ERROR;
468 	bp->b_resid = bp->b_bcount;
469  done:
470 	s = splbio();
471 	biodone(bp);
472 	splx(s);
473 	if (sc != NULL)
474 		device_unref(&sc->sc_dev);
475 }
476 
477 int
478 amdcfioctl(dev_t dev, u_long xfer, caddr_t addr, int flag, struct proc *p)
479 {
480 	struct amdcf_softc *sc;
481 	struct disklabel *lp;
482 	int error = 0;
483 
484 	sc = amdcflookup(DISKUNIT(dev));
485 	if (sc == NULL)
486 		return ENXIO;
487 
488 	if ((sc->sc_flags & AMDCF_LOADED) == 0) {
489 		error = EIO;
490 		goto exit;
491 	}
492 
493 	switch (xfer) {
494 	case DIOCRLDINFO:
495 		lp = malloc(sizeof(*lp), M_TEMP, M_WAITOK);
496 		amdcfgetdisklabel(dev, sc, lp, 0);
497 		bcopy(lp, sc->sc_dk.dk_label, sizeof(*lp));
498 		free(lp, M_TEMP, 0);
499 		goto exit;
500 
501 	case DIOCGPDINFO:
502 		amdcfgetdisklabel(dev, sc, (struct disklabel *)addr, 1);
503 		goto exit;
504 
505 	case DIOCGDINFO:
506 		*(struct disklabel *)addr = *(sc->sc_dk.dk_label);
507 		goto exit;
508 
509 	case DIOCGPART:
510 		((struct partinfo *)addr)->disklab = sc->sc_dk.dk_label;
511 		((struct partinfo *)addr)->part =
512 		    &sc->sc_dk.dk_label->d_partitions[DISKPART(dev)];
513 		goto exit;
514 
515 	case DIOCWDINFO:
516 	case DIOCSDINFO:
517 		if ((flag & FWRITE) == 0) {
518 			error = EBADF;
519 			goto exit;
520 		}
521 
522 		if ((error = disk_lock(&sc->sc_dk)) != 0)
523 			goto exit;
524 
525 		error = setdisklabel(sc->sc_dk.dk_label,
526 		    (struct disklabel *)addr, sc->sc_dk.dk_openmask);
527 		if (error == 0) {
528 			if (xfer == DIOCWDINFO)
529 				error = writedisklabel(DISKLABELDEV(dev),
530 				    amdcfstrategy, sc->sc_dk.dk_label);
531 		}
532 
533 		disk_unlock(&sc->sc_dk);
534 		goto exit;
535 
536 #ifdef notyet
537 	case DIOCWFORMAT:
538 		if ((flag & FWRITE) == 0)
539 			return EBADF;
540 		{
541 		struct format_op *fop;
542 		struct iovec aiov;
543 		struct uio auio;
544 
545 		fop = (struct format_op *)addr;
546 		aiov.iov_base = fop->df_buf;
547 		aiov.iov_len = fop->df_count;
548 		auio.uio_iov = &aiov;
549 		auio.uio_iovcnt = 1;
550 		auio.uio_resid = fop->df_count;
551 		auio.uio_segflg = UIO_USERSPACE;
552 		auio.uio_offset =
553 			fop->df_startblk * sc->sc_dk.dk_label->d_secsize;
554 		auio.uio_procp = p;
555 		error = physio(scformat, dev, B_WRITE, minphys, &auio);
556 		fop->df_count -= auio.uio_resid;
557 		fop->df_reg[0] = scc->sc_status;
558 		fop->df_reg[1] = scc->sc_error;
559 		goto exit;
560 		}
561 #endif
562 
563 	default:
564 		error = ENOTTY;
565 		goto exit;
566 	}
567 
568 #ifdef DIAGNOSTIC
569 	panic("amdcfioctl: impossible");
570 #endif
571 
572  exit:
573 	device_unref(&sc->sc_dev);
574 	return error;
575 }
576 
577 /*
578  * Dump core after a system crash.
579  */
580 int
581 amdcfdump(dev_t dev, daddr_t blkno, caddr_t va, size_t size)
582 {
583 	return ENXIO;
584 }
585 
586 daddr_t
587 amdcfsize(dev_t dev)
588 {
589 	struct amdcf_softc *sc;
590 	struct disklabel *lp;
591 	int part, omask;
592 	daddr_t size;
593 
594 	sc = amdcflookup(DISKUNIT(dev));
595 	if (sc == NULL)
596 		return (-1);
597 
598 	part = DISKPART(dev);
599 	omask = sc->sc_dk.dk_openmask & (1 << part);
600 
601 	if (omask == 0 && amdcfopen(dev, 0, S_IFBLK, NULL) != 0) {
602 		size = -1;
603 		goto exit;
604 	}
605 
606 	lp = sc->sc_dk.dk_label;
607 	size = DL_SECTOBLK(lp, DL_GETPSIZE(&lp->d_partitions[part]));
608 	if (omask == 0 && amdcfclose(dev, 0, S_IFBLK, NULL) != 0)
609 		size = -1;
610 
611  exit:
612 	device_unref(&sc->sc_dev);
613 	return size;
614 }
615 
616 
617 /*
618  * Queue a drive for I/O.
619  */
620 void
621 amdcfstart(void *arg)
622 {
623 	struct amdcf_softc *sc = arg;
624 	struct buf *bp;
625 
626 	while ((bp = bufq_dequeue(&sc->sc_bufq)) != NULL) {
627 		/* Transfer this buffer now. */
628 		_amdcfstart(sc, bp);
629 	}
630 }
631 
632 void
633 _amdcfstart(struct amdcf_softc *sc, struct buf *bp)
634 {
635 	off_t off;
636 	struct partition *p;
637 
638 	sc->sc_bp = bp;
639 
640 	/* Fetch buffer's read/write offset */
641 	p = &sc->sc_dk.dk_label->d_partitions[DISKPART(bp->b_dev)];
642 	off = DL_GETPOFFSET(p) * sc->sc_dk.dk_label->d_secsize +
643 	    (u_int64_t)bp->b_blkno * DEV_BSIZE;
644 	if (off > sc->sc_size) {
645 		bp->b_flags |= B_ERROR;
646 		bp->b_error = EIO;
647 		return;
648 	}
649 
650 	/* Instrumentation. */
651 	disk_busy(&sc->sc_dk);
652 
653 	if (bp->b_flags & B_READ)
654 		amdcf_disk_read(sc, bp, off);
655 #ifdef AMDCF_DISK_WRITE_ENABLE
656 	else
657 		amdcf_disk_write(sc, bp, off);
658 #endif
659 
660 	amdcfdone(sc);
661 }
662 
663 void
664 amdcfdone(void *arg)
665 {
666 	struct amdcf_softc *sc = arg;
667 	struct buf *bp = sc->sc_bp;
668 
669 	if (bp->b_error == 0)
670 		bp->b_resid = 0;
671 	else
672 		bp->b_flags |= B_ERROR;
673 
674 	disk_unbusy(&sc->sc_dk, (bp->b_bcount - bp->b_resid),
675 	    (bp->b_flags & B_READ));
676 	biodone(bp);
677 }
678 
679 void
680 amdcf_disk_read(struct amdcf_softc *sc, struct buf *bp, off_t off)
681 {
682 	long resid;
683 
684 	if (sc->sc_writing) {
685 		bp->b_error = cfi_block_finish(sc);
686 		if (bp->b_error) {
687 			bp->b_flags |= B_ERROR;
688 			return;
689 		}
690 	}
691 
692 	resid = bp->b_bcount;
693 	uint8_t *dp = (uint8_t *)bp->b_data;
694 	while (resid > 0 && off < sc->sc_size) {
695 		*dp++ = cfi_read(sc, off, 0);
696 		off += 1, resid -= 1;
697 	}
698 	bp->b_resid = resid;
699 }
700 
701 void
702 amdcf_disk_write(struct amdcf_softc *sc, struct buf *bp, off_t off)
703 {
704 	long resid;
705 	u_int top;
706 
707 	resid = bp->b_bcount;
708 	while (resid > 0) {
709 		/*
710 		 * Finish the current block if we're about to write
711 		 * to a different block.
712 		 */
713 		if (sc->sc_writing) {
714 			top = sc->sc_wrofs + sc->sc_wrbufsz;
715 			if (off < sc->sc_wrofs || off >= top)
716 				cfi_block_finish(sc);
717 		}
718 
719 		/* Start writing to a (new) block if applicable. */
720 		if (!sc->sc_writing) {
721 			bp->b_error = cfi_block_start(sc, off);
722 			if (bp->b_error) {
723 				bp->b_flags |= B_ERROR;
724 				return;
725 			}
726 		}
727 
728 		top = sc->sc_wrofs + sc->sc_wrbufsz;
729 		bcopy(bp->b_data,
730 		    sc->sc_wrbuf + off - sc->sc_wrofs,
731 		    MIN(top - off, resid));
732 		resid -= MIN(top - off, resid);
733 	}
734 	bp->b_resid = resid;
735 }
736 
737 /*
738  * Begin writing into a new block/sector.  We read the sector into
739  * memory and keep updating that, until we move into another sector
740  * or the process stops writing. At that time we write the whole
741  * sector to flash (see cfi_block_finish).
742  */
743 int
744 cfi_block_start(struct amdcf_softc *sc, u_int ofs)
745 {
746 	u_int rofs, rsz;
747 	int r;
748 	uint8_t *ptr;
749 
750 	rofs = 0;
751 	for (r = 0; r < sc->sc_regions; r++) {
752 		rsz = sc->sc_region[r].r_blocks * sc->sc_region[r].r_blksz;
753 		if (ofs < rofs + rsz)
754 			break;
755 		rofs += rsz;
756 	}
757 	if (r == sc->sc_regions)
758 		return (EFAULT);
759 
760 	sc->sc_wrbufsz = sc->sc_region[r].r_blksz;
761 	sc->sc_wrbuf = malloc(sc->sc_wrbufsz, M_TEMP, M_WAITOK);
762 	sc->sc_wrofs = ofs - (ofs - rofs) % sc->sc_wrbufsz;
763 
764 	ptr = sc->sc_wrbuf;
765 	/* Read the block from flash for byte-serving. */
766 	for (r = 0; r < sc->sc_wrbufsz; r++)
767 		*(ptr)++ = cfi_read(sc, sc->sc_wrofs + r, 0);
768 
769 	sc->sc_writing = 1;
770 	return (0);
771 }
772 
773 /*
774  * Finish updating the current block/sector by writing the compound
775  * set of changes to the flash.
776  */
777 int
778 cfi_block_finish(struct amdcf_softc *sc)
779 {
780 	int error;
781 
782 	error = cfi_write_block(sc);
783 	free(sc->sc_wrbuf, M_TEMP, sc->sc_wrbufsz);
784 	sc->sc_wrbuf = NULL;
785 	sc->sc_wrbufsz = 0;
786 	sc->sc_wrofs = 0;
787 	sc->sc_writing = 0;
788 	return (error);
789 }
790 
791 int
792 cfi_write_block(struct amdcf_softc *sc)
793 {
794 	uint8_t *ptr;
795 	int error, i, s;
796 
797 	if (sc->sc_wrofs > sc->sc_size)
798 		panic("CFI: write offset (%x) bigger "
799 		    "than cfi array size (%zu)\n",
800 		    sc->sc_wrofs, sc->sc_size);
801 
802 	if ((sc->sc_wrofs < BOOTLOADER_ADDR) ||
803 	    ((sc->sc_wrofs + sc->sc_wrbufsz) < BOOTLOADER_ADDR))
804 		return EOPNOTSUPP;
805 
806 	error = cfi_erase_block(sc, sc->sc_wrofs);
807 	if (error)
808 		goto out;
809 
810 	/* Write the block. */
811 	ptr = sc->sc_wrbuf;
812 
813 	for (i = 0; i < sc->sc_wrbufsz; i += sc->sc_width) {
814 
815 		/*
816 		 * Make sure the command to start a write and the
817 		 * actual write happens back-to-back without any
818 		 * excessive delays.
819 		 */
820 		s = splbio();
821 
822 		cfi_amd_write(sc, sc->sc_wrofs, AMD_ADDR_START,
823 			    CFI_AMD_PROGRAM);
824 		/* Raw data do not use cfi_array_write */
825 		cfi_write(sc, sc->sc_wrofs + i, 0, *(ptr)++);
826 
827 		splx(s);
828 
829 		error = cfi_wait_ready(sc, sc->sc_wrofs + i,
830 		    sc->sc_write_timeout, sc->sc_write_max_timeout);
831 		if (error)
832 			goto out;
833 	}
834 
835 out:
836 	cfi_array_write(sc, sc->sc_wrofs, 0, sc->sc_rstcmd);
837 	return error;
838 }
839 
840 int
841 cfi_erase_block(struct amdcf_softc *sc, u_int offset)
842 {
843 	int error = 0;
844 
845 	if (offset > sc->sc_size)
846 		panic("CFI: erase offset (%x) bigger "
847 		    "than cfi array size (%zu)\n",
848 		    sc->sc_wrofs, sc->sc_size);
849 
850 	/* Erase the block. */
851 	cfi_amd_write(sc, offset, 0, CFI_AMD_BLOCK_ERASE);
852 
853 	error = cfi_wait_ready(sc, offset, sc->sc_erase_timeout,
854 	    sc->sc_erase_max_timeout);
855 
856 	return error;
857 }
858 
859 
860 
861 int
862 cfi_wait_ready(struct amdcf_softc *sc, u_int ofs, u_int timeout, u_int count)
863 {
864 	int done, error;
865 	u_int st0 = 0, st = 0;
866 
867 	done = 0;
868 	error = 0;
869 
870 	if (!timeout)
871 		timeout = 100;  /* Default to 100 uS */
872 	if (!count)
873 		count = 100;    /* Max timeout is 10 mS */
874 
875 	while (!done && !error && count) {
876 		DELAY(timeout);
877 
878 		count--;
879 
880 		/*
881 		 * read sc->sc_width bytes, and check for toggle bit.
882 		 */
883 		st0 = cfi_read(sc, ofs, 0);
884 		st = cfi_read(sc, ofs, 0);
885 		done = ((st & cfi_make_cmd(0x40, sc->sc_mask)) ==
886 		    (st0 & cfi_make_cmd(0x40, sc->sc_mask))) ? 1 : 0;
887 
888 		break;
889 	}
890 	if (!done && !error)
891 		error = ETIMEDOUT;
892 	if (error)
893 		printf("\nerror=%d (st 0x%x st0 0x%x) at offset=%x\n",
894 		    error, st, st0, ofs);
895 	return error;
896 }
897 
898 /*
899  * cfi_array_write
900  * fill "bus width" word with value of var data by array mask sc->sc_mask
901  */
902 void
903 cfi_array_write(struct amdcf_softc *sc, u_int ofs, u_int addr, u_int data)
904 {
905 	data &= 0xff;
906 	cfi_write(sc, ofs, addr, cfi_make_cmd(data, sc->sc_mask));
907 }
908 
909 void
910 cfi_amd_write(struct amdcf_softc *sc, u_int ofs, u_int addr, u_int data)
911 {
912 	cfi_array_write(sc, ofs, AMD_ADDR_START, CFI_AMD_UNLOCK);
913 	cfi_array_write(sc, ofs, AMD_ADDR_ACK, CFI_AMD_UNLOCK_ACK);
914 	cfi_array_write(sc, ofs, addr, data);
915 }
916 
917 
918 
919 /*
920  * The following routines assume width=1 and shift=2 as that is
921  * the case on the Octeon DSR machines.
922  * If this assumption fails a new detection routine should be written
923  * and called during attach.
924  */
925 uint8_t
926 cfi_read(struct amdcf_softc *sc, bus_size_t base, bus_size_t offset)
927 {
928 	return bus_space_read_1(sc->sc_iot, sc->sc_ioh,
929 	    base | (offset * sc->sc_shift));
930 }
931 
932 void
933 cfi_write(struct amdcf_softc *sc, bus_size_t base, bus_size_t offset,
934     uint8_t val)
935 {
936 	bus_space_write_1(sc->sc_iot, sc->sc_ioh,
937 	    base | (offset * sc->sc_shift), val);
938 }
939 
940 int
941 cfi_make_cmd(uint8_t cmd, u_int mask)
942 {
943 	int i;
944 	u_int data = 0;
945 
946 	for (i = 0; i < sizeof(int); i ++) {
947 		if (mask & (0xff << (i*8)))
948 			data |= cmd << (i*8);
949 	}
950 
951 	return data;
952 }
953