1 /* $OpenBSD: amdcf.c,v 1.7 2021/01/30 14:59:13 visa Exp $ */ 2 3 /* 4 * Copyright (c) 2007, Juniper Networks, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the author nor the names of any co-contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 27 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 /* 33 * Copyright (c) 2009 Sam Leffler, Errno Consulting 34 * All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 45 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 46 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 47 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 48 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 49 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 50 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 51 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 52 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 53 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 54 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 55 */ 56 57 /* 58 * Copyright (c) 2015 Paul Irofti. 59 * 60 * Permission to use, copy, modify, and distribute this software for any 61 * purpose with or without fee is hereby granted, provided that the above 62 * copyright notice and this permission notice appear in all copies. 63 * 64 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 65 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 66 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 67 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 68 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 69 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 70 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 71 */ 72 73 74 #include <sys/param.h> 75 #include <sys/systm.h> 76 #include <sys/kernel.h> 77 #include <sys/conf.h> 78 #include <sys/fcntl.h> 79 #include <sys/stat.h> 80 #include <sys/ioctl.h> 81 #include <sys/mutex.h> 82 #include <sys/buf.h> 83 #include <sys/uio.h> 84 #include <sys/malloc.h> 85 #include <sys/device.h> 86 #include <sys/disklabel.h> 87 #include <sys/disk.h> 88 #include <sys/syslog.h> 89 #include <sys/proc.h> 90 #include <sys/vnode.h> 91 #include <sys/dkio.h> 92 93 #include <machine/intr.h> 94 #include <machine/bus.h> 95 #include <machine/autoconf.h> 96 97 #include <octeon/dev/iobusvar.h> 98 #include <machine/octeonreg.h> 99 #include <machine/octeonvar.h> 100 101 102 #define CFI_QRY_CMD_ADDR 0x55 103 #define CFI_QRY_CMD_DATA 0x98 104 105 #define CFI_QRY_TTO_WRITE 0x1f 106 #define CFI_QRY_TTO_ERASE 0x21 107 #define CFI_QRY_MTO_WRITE 0x23 108 #define CFI_QRY_MTO_ERASE 0x25 109 110 #define CFI_QRY_SIZE 0x27 111 #define CFI_QRY_NREGIONS 0x2c 112 #define CFI_QRY_REGION0 0x31 113 #define CFI_QRY_REGION(x) (CFI_QRY_REGION0 + (x) * 4) 114 115 #define CFI_BCS_READ_ARRAY 0xff 116 117 #define CFI_DISK_SECSIZE 512 118 #define CFI_DISK_MAXIOSIZE 65536 119 120 #define AMDCF_MAP_SIZE 0x02000000 121 122 #define CFI_AMD_BLOCK_ERASE 0x30 123 #define CFI_AMD_UNLOCK 0xaa 124 #define CFI_AMD_UNLOCK_ACK 0x55 125 #define CFI_AMD_PROGRAM 0xa0 126 #define CFI_AMD_RESET 0xf0 127 128 #define AMD_ADDR_START 0x555 129 #define AMD_ADDR_ACK 0x2aa 130 131 #define BOOTLOADER_ADDR 0xa0000 132 133 struct cfi_region { 134 u_int r_blocks; 135 u_int r_blksz; 136 }; 137 138 struct amdcf_softc { 139 /* General disk infos */ 140 struct device sc_dev; 141 struct disk sc_dk; 142 struct bufq sc_bufq; 143 struct buf *sc_bp; 144 145 int sc_flags; 146 #define AMDCF_LOADED 0x10 147 148 struct iobus_attach_args *sc_io; 149 bus_space_tag_t sc_iot; 150 bus_space_handle_t sc_ioh; 151 152 size_t sc_size; /* Disk size in bytes */ 153 u_int sc_regions; /* Erase regions. */ 154 struct cfi_region *sc_region; /* Array of region info. */ 155 156 u_int sc_width; 157 u_int sc_shift; 158 u_int sc_mask; 159 160 u_int sc_erase_timeout; 161 u_int sc_erase_max_timeout; 162 u_int sc_write_timeout; 163 u_int sc_write_max_timeout; 164 u_int sc_rstcmd; 165 166 u_char *sc_wrbuf; 167 u_int sc_wrbufsz; 168 u_int sc_wrofs; 169 u_int sc_writing; 170 }; 171 172 int amdcf_match(struct device *, void *, void *); 173 void amdcf_attach(struct device *, struct device *, void *); 174 int amdcf_detach(struct device *, int); 175 176 struct cfattach amdcf_ca = { 177 sizeof(struct amdcf_softc), amdcf_match, amdcf_attach, amdcf_detach 178 }; 179 180 struct cfdriver amdcf_cd = { 181 NULL, "amdcf", DV_DISK 182 }; 183 184 cdev_decl(amdcf); 185 bdev_decl(amdcf); 186 187 #define amdcflookup(unit) (struct amdcf_softc *)disk_lookup(&amdcf_cd, (unit)) 188 int amdcfgetdisklabel(dev_t, struct amdcf_softc *, struct disklabel *, int); 189 190 void amdcfstart(void *); 191 void _amdcfstart(struct amdcf_softc *, struct buf *); 192 void amdcfdone(void *); 193 194 void amdcf_disk_read(struct amdcf_softc *, struct buf *, off_t); 195 void amdcf_disk_write(struct amdcf_softc *, struct buf *, off_t); 196 197 int cfi_block_start(struct amdcf_softc *, u_int); 198 int cfi_write_block(struct amdcf_softc *); 199 int cfi_erase_block(struct amdcf_softc *, u_int); 200 int cfi_block_finish(struct amdcf_softc *); 201 202 void cfi_array_write(struct amdcf_softc *sc, u_int, u_int, u_int); 203 void cfi_amd_write(struct amdcf_softc *, u_int, u_int, u_int); 204 205 uint8_t cfi_read_qry(struct amdcf_softc *, uint64_t); 206 uint8_t cfi_read(struct amdcf_softc *, bus_size_t, bus_size_t); 207 void cfi_write(struct amdcf_softc *, bus_size_t, bus_size_t, uint8_t); 208 int cfi_wait_ready(struct amdcf_softc *, u_int, u_int, u_int); 209 int cfi_make_cmd(uint8_t, u_int); 210 211 int 212 amdcf_match(struct device *parent, void *match, void *aux) 213 { 214 struct mainbus_attach_args *maa = aux; 215 struct cfdata *cf = match; 216 217 if (strcmp(maa->maa_name, cf->cf_driver->cd_name) != 0) 218 return 0; 219 220 /* Only for DSR machines */ 221 if (octeon_board != BOARD_DLINK_DSR_500) 222 return 0; 223 224 return 1; 225 } 226 227 void 228 amdcf_attach(struct device *parent, struct device *self, void *aux) 229 { 230 struct amdcf_softc *sc = (void *)self; 231 u_int blksz, blocks, r; 232 233 sc->sc_io = aux; 234 sc->sc_iot = sc->sc_io->aa_bust; 235 236 if (bus_space_map(sc->sc_iot, OCTEON_AMDCF_BASE, AMDCF_MAP_SIZE, 0, 237 &sc->sc_ioh)) { 238 printf(": can't map registers"); 239 } 240 241 /* should be detected in the generic driver */ 242 sc->sc_width = 1; 243 sc->sc_shift = 2; 244 sc->sc_mask = 0x000000ff; 245 sc->sc_rstcmd = CFI_AMD_RESET; 246 247 /* Initialize the Query Database from the CF */ 248 cfi_array_write(sc, 0, 0, sc->sc_rstcmd); 249 cfi_write(sc, 0, CFI_QRY_CMD_ADDR, CFI_QRY_CMD_DATA); 250 251 /* Get time-out values for erase and write. */ 252 sc->sc_write_timeout = 1 << cfi_read(sc, 0, CFI_QRY_TTO_WRITE); 253 sc->sc_erase_timeout = 1 << cfi_read(sc, 0, CFI_QRY_TTO_ERASE); 254 sc->sc_write_max_timeout = 1 << cfi_read(sc, 0, CFI_QRY_MTO_WRITE); 255 sc->sc_erase_max_timeout = 1 << cfi_read(sc, 0, CFI_QRY_MTO_ERASE); 256 257 /* Get the device size. */ 258 sc->sc_size = 1U << cfi_read(sc, 0, CFI_QRY_SIZE); 259 printf(": AMD/Fujitsu %zu bytes\n", sc->sc_size); 260 261 /* Get erase regions. */ 262 sc->sc_regions = cfi_read(sc, 0, CFI_QRY_NREGIONS); 263 sc->sc_region = malloc(sc->sc_regions * 264 sizeof(struct cfi_region), M_TEMP, M_WAITOK | M_ZERO); 265 266 for (r = 0; r < sc->sc_regions; r++) { 267 blocks = cfi_read(sc, 0, CFI_QRY_REGION(r)) | 268 (cfi_read(sc, 0, CFI_QRY_REGION(r) + 1) << 8); 269 sc->sc_region[r].r_blocks = blocks + 1; 270 271 blksz = cfi_read(sc, 0, CFI_QRY_REGION(r) + 2) | 272 (cfi_read(sc, 0, CFI_QRY_REGION(r) + 3) << 8); 273 sc->sc_region[r].r_blksz = (blksz == 0) ? 128 : 274 blksz * 256; 275 } 276 277 /* Reset the device to the default state */ 278 cfi_array_write(sc, 0, 0, sc->sc_rstcmd); 279 280 /* 281 * Initialize disk structures. 282 */ 283 sc->sc_dk.dk_name = sc->sc_dev.dv_xname; 284 bufq_init(&sc->sc_bufq, BUFQ_DEFAULT); 285 286 /* Attach disk. */ 287 disk_attach(&sc->sc_dev, &sc->sc_dk); 288 289 } 290 291 int 292 amdcf_detach(struct device *self, int flags) 293 { 294 struct amdcf_softc *sc = (struct amdcf_softc *)self; 295 296 bufq_drain(&sc->sc_bufq); 297 298 disk_gone(amdcfopen, self->dv_unit); 299 300 /* Detach disk. */ 301 bufq_destroy(&sc->sc_bufq); 302 disk_detach(&sc->sc_dk); 303 304 return 0; 305 } 306 307 308 int 309 amdcfopen(dev_t dev, int flag, int fmt, struct proc *p) 310 { 311 struct amdcf_softc *sc; 312 int unit, part; 313 int error; 314 315 unit = DISKUNIT(dev); 316 sc = amdcflookup(unit); 317 if (sc == NULL) 318 return ENXIO; 319 320 /* 321 * If this is the first open of this device, add a reference 322 * to the adapter. 323 */ 324 if ((error = disk_lock(&sc->sc_dk)) != 0) 325 goto out1; 326 327 if (sc->sc_dk.dk_openmask != 0) { 328 /* 329 * If any partition is open, but the disk has been invalidated, 330 * disallow further opens. 331 */ 332 if ((sc->sc_flags & AMDCF_LOADED) == 0) { 333 error = EIO; 334 goto out; 335 } 336 } else { 337 if ((sc->sc_flags & AMDCF_LOADED) == 0) { 338 sc->sc_flags |= AMDCF_LOADED; 339 340 /* Load the partition info if not already loaded. */ 341 if (amdcfgetdisklabel(dev, sc, 342 sc->sc_dk.dk_label, 0) == EIO) { 343 error = EIO; 344 goto out; 345 } 346 } 347 } 348 349 part = DISKPART(dev); 350 351 if ((error = disk_openpart(&sc->sc_dk, part, fmt, 1)) != 0) 352 goto out; 353 354 disk_unlock(&sc->sc_dk); 355 device_unref(&sc->sc_dev); 356 return 0; 357 358 out: 359 disk_unlock(&sc->sc_dk); 360 out1: 361 device_unref(&sc->sc_dev); 362 return error; 363 } 364 365 /* 366 * Load the label information on the named device 367 */ 368 int 369 amdcfgetdisklabel(dev_t dev, struct amdcf_softc *sc, struct disklabel *lp, 370 int spoofonly) 371 { 372 memset(lp, 0, sizeof(struct disklabel)); 373 374 lp->d_secsize = DEV_BSIZE; 375 lp->d_nsectors = 1; /* bogus */ 376 lp->d_ntracks = 1; /* bogus */ 377 lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors; 378 lp->d_ncylinders = sc->sc_size / lp->d_secpercyl; 379 380 strlcpy(lp->d_typename, "amdcf device", sizeof(lp->d_typename)); 381 lp->d_type = DTYPE_SCSI; /* bogus type, can be anything */ 382 strlcpy(lp->d_packname, "CFI Disk", sizeof(lp->d_packname)); 383 DL_SETDSIZE(lp, sc->sc_size / DEV_BSIZE); 384 lp->d_flags = 0; 385 lp->d_version = 1; 386 387 lp->d_magic = DISKMAGIC; 388 lp->d_magic2 = DISKMAGIC; 389 lp->d_checksum = dkcksum(lp); 390 391 /* Call the generic disklabel extraction routine */ 392 return readdisklabel(DISKLABELDEV(dev), amdcfstrategy, lp, spoofonly); 393 } 394 395 int 396 amdcfclose(dev_t dev, int flag, int fmt, struct proc *p) 397 { 398 struct amdcf_softc *sc; 399 int part = DISKPART(dev); 400 401 sc = amdcflookup(DISKUNIT(dev)); 402 if (sc == NULL) 403 return ENXIO; 404 405 disk_lock_nointr(&sc->sc_dk); 406 407 disk_closepart(&sc->sc_dk, part, fmt); 408 409 disk_unlock(&sc->sc_dk); 410 411 device_unref(&sc->sc_dev); 412 return 0; 413 } 414 415 int 416 amdcfread(dev_t dev, struct uio *uio, int flags) 417 { 418 return (physio(amdcfstrategy, dev, B_READ, minphys, uio)); 419 } 420 421 int 422 amdcfwrite(dev_t dev, struct uio *uio, int flags) 423 { 424 #ifdef AMDCF_DISK_WRITE_ENABLE 425 return (physio(amdcfstrategy, dev, B_WRITE, minphys, uio)); 426 #else 427 return 0; 428 #endif 429 } 430 431 void 432 amdcfstrategy(struct buf *bp) 433 { 434 struct amdcf_softc *sc; 435 int s; 436 437 sc = amdcflookup(DISKUNIT(bp->b_dev)); 438 if (sc == NULL) { 439 bp->b_error = ENXIO; 440 goto bad; 441 } 442 /* If device invalidated (e.g. media change, door open), error. */ 443 if ((sc->sc_flags & AMDCF_LOADED) == 0) { 444 bp->b_error = EIO; 445 goto bad; 446 } 447 448 /* Validate the request. */ 449 if (bounds_check_with_label(bp, sc->sc_dk.dk_label) == -1) 450 goto done; 451 452 /* Check that the number of sectors can fit in a byte. */ 453 if ((bp->b_bcount / sc->sc_dk.dk_label->d_secsize) >= (1 << NBBY)) { 454 bp->b_error = EINVAL; 455 goto bad; 456 } 457 458 /* Queue transfer on drive, activate drive and controller if idle. */ 459 bufq_queue(&sc->sc_bufq, bp); 460 s = splbio(); 461 amdcfstart(sc); 462 splx(s); 463 device_unref(&sc->sc_dev); 464 return; 465 466 bad: 467 bp->b_flags |= B_ERROR; 468 bp->b_resid = bp->b_bcount; 469 done: 470 s = splbio(); 471 biodone(bp); 472 splx(s); 473 if (sc != NULL) 474 device_unref(&sc->sc_dev); 475 } 476 477 int 478 amdcfioctl(dev_t dev, u_long xfer, caddr_t addr, int flag, struct proc *p) 479 { 480 struct amdcf_softc *sc; 481 struct disklabel *lp; 482 int error = 0; 483 484 sc = amdcflookup(DISKUNIT(dev)); 485 if (sc == NULL) 486 return ENXIO; 487 488 if ((sc->sc_flags & AMDCF_LOADED) == 0) { 489 error = EIO; 490 goto exit; 491 } 492 493 switch (xfer) { 494 case DIOCRLDINFO: 495 lp = malloc(sizeof(*lp), M_TEMP, M_WAITOK); 496 amdcfgetdisklabel(dev, sc, lp, 0); 497 bcopy(lp, sc->sc_dk.dk_label, sizeof(*lp)); 498 free(lp, M_TEMP, sizeof(*lp)); 499 goto exit; 500 501 case DIOCGPDINFO: 502 amdcfgetdisklabel(dev, sc, (struct disklabel *)addr, 1); 503 goto exit; 504 505 case DIOCGDINFO: 506 *(struct disklabel *)addr = *(sc->sc_dk.dk_label); 507 goto exit; 508 509 case DIOCGPART: 510 ((struct partinfo *)addr)->disklab = sc->sc_dk.dk_label; 511 ((struct partinfo *)addr)->part = 512 &sc->sc_dk.dk_label->d_partitions[DISKPART(dev)]; 513 goto exit; 514 515 case DIOCWDINFO: 516 case DIOCSDINFO: 517 if ((flag & FWRITE) == 0) { 518 error = EBADF; 519 goto exit; 520 } 521 522 if ((error = disk_lock(&sc->sc_dk)) != 0) 523 goto exit; 524 525 error = setdisklabel(sc->sc_dk.dk_label, 526 (struct disklabel *)addr, sc->sc_dk.dk_openmask); 527 if (error == 0) { 528 if (xfer == DIOCWDINFO) 529 error = writedisklabel(DISKLABELDEV(dev), 530 amdcfstrategy, sc->sc_dk.dk_label); 531 } 532 533 disk_unlock(&sc->sc_dk); 534 goto exit; 535 536 default: 537 error = ENOTTY; 538 goto exit; 539 } 540 541 #ifdef DIAGNOSTIC 542 panic("amdcfioctl: impossible"); 543 #endif 544 545 exit: 546 device_unref(&sc->sc_dev); 547 return error; 548 } 549 550 /* 551 * Dump core after a system crash. 552 */ 553 int 554 amdcfdump(dev_t dev, daddr_t blkno, caddr_t va, size_t size) 555 { 556 return ENXIO; 557 } 558 559 daddr_t 560 amdcfsize(dev_t dev) 561 { 562 struct amdcf_softc *sc; 563 struct disklabel *lp; 564 int part, omask; 565 daddr_t size; 566 567 sc = amdcflookup(DISKUNIT(dev)); 568 if (sc == NULL) 569 return (-1); 570 571 part = DISKPART(dev); 572 omask = sc->sc_dk.dk_openmask & (1 << part); 573 574 if (omask == 0 && amdcfopen(dev, 0, S_IFBLK, NULL) != 0) { 575 size = -1; 576 goto exit; 577 } 578 579 lp = sc->sc_dk.dk_label; 580 size = DL_SECTOBLK(lp, DL_GETPSIZE(&lp->d_partitions[part])); 581 if (omask == 0 && amdcfclose(dev, 0, S_IFBLK, NULL) != 0) 582 size = -1; 583 584 exit: 585 device_unref(&sc->sc_dev); 586 return size; 587 } 588 589 590 /* 591 * Queue a drive for I/O. 592 */ 593 void 594 amdcfstart(void *arg) 595 { 596 struct amdcf_softc *sc = arg; 597 struct buf *bp; 598 599 while ((bp = bufq_dequeue(&sc->sc_bufq)) != NULL) { 600 /* Transfer this buffer now. */ 601 _amdcfstart(sc, bp); 602 } 603 } 604 605 void 606 _amdcfstart(struct amdcf_softc *sc, struct buf *bp) 607 { 608 off_t off; 609 struct partition *p; 610 611 sc->sc_bp = bp; 612 613 /* Fetch buffer's read/write offset */ 614 p = &sc->sc_dk.dk_label->d_partitions[DISKPART(bp->b_dev)]; 615 off = DL_GETPOFFSET(p) * sc->sc_dk.dk_label->d_secsize + 616 (u_int64_t)bp->b_blkno * DEV_BSIZE; 617 if (off > sc->sc_size) { 618 bp->b_flags |= B_ERROR; 619 bp->b_error = EIO; 620 return; 621 } 622 623 /* Instrumentation. */ 624 disk_busy(&sc->sc_dk); 625 626 if (bp->b_flags & B_READ) 627 amdcf_disk_read(sc, bp, off); 628 #ifdef AMDCF_DISK_WRITE_ENABLE 629 else 630 amdcf_disk_write(sc, bp, off); 631 #endif 632 633 amdcfdone(sc); 634 } 635 636 void 637 amdcfdone(void *arg) 638 { 639 struct amdcf_softc *sc = arg; 640 struct buf *bp = sc->sc_bp; 641 642 if (bp->b_error == 0) 643 bp->b_resid = 0; 644 else 645 bp->b_flags |= B_ERROR; 646 647 disk_unbusy(&sc->sc_dk, (bp->b_bcount - bp->b_resid), 648 bp->b_blkno, (bp->b_flags & B_READ)); 649 biodone(bp); 650 } 651 652 void 653 amdcf_disk_read(struct amdcf_softc *sc, struct buf *bp, off_t off) 654 { 655 long resid; 656 657 if (sc->sc_writing) { 658 bp->b_error = cfi_block_finish(sc); 659 if (bp->b_error) { 660 bp->b_flags |= B_ERROR; 661 return; 662 } 663 } 664 665 resid = bp->b_bcount; 666 uint8_t *dp = (uint8_t *)bp->b_data; 667 while (resid > 0 && off < sc->sc_size) { 668 *dp++ = cfi_read(sc, off, 0); 669 off += 1, resid -= 1; 670 } 671 bp->b_resid = resid; 672 } 673 674 void 675 amdcf_disk_write(struct amdcf_softc *sc, struct buf *bp, off_t off) 676 { 677 long resid; 678 u_int top; 679 680 resid = bp->b_bcount; 681 while (resid > 0) { 682 /* 683 * Finish the current block if we're about to write 684 * to a different block. 685 */ 686 if (sc->sc_writing) { 687 top = sc->sc_wrofs + sc->sc_wrbufsz; 688 if (off < sc->sc_wrofs || off >= top) 689 cfi_block_finish(sc); 690 } 691 692 /* Start writing to a (new) block if applicable. */ 693 if (!sc->sc_writing) { 694 bp->b_error = cfi_block_start(sc, off); 695 if (bp->b_error) { 696 bp->b_flags |= B_ERROR; 697 return; 698 } 699 } 700 701 top = sc->sc_wrofs + sc->sc_wrbufsz; 702 bcopy(bp->b_data, 703 sc->sc_wrbuf + off - sc->sc_wrofs, 704 MIN(top - off, resid)); 705 resid -= MIN(top - off, resid); 706 } 707 bp->b_resid = resid; 708 } 709 710 /* 711 * Begin writing into a new block/sector. We read the sector into 712 * memory and keep updating that, until we move into another sector 713 * or the process stops writing. At that time we write the whole 714 * sector to flash (see cfi_block_finish). 715 */ 716 int 717 cfi_block_start(struct amdcf_softc *sc, u_int ofs) 718 { 719 u_int rofs, rsz; 720 int r; 721 uint8_t *ptr; 722 723 rofs = 0; 724 for (r = 0; r < sc->sc_regions; r++) { 725 rsz = sc->sc_region[r].r_blocks * sc->sc_region[r].r_blksz; 726 if (ofs < rofs + rsz) 727 break; 728 rofs += rsz; 729 } 730 if (r == sc->sc_regions) 731 return (EFAULT); 732 733 sc->sc_wrbufsz = sc->sc_region[r].r_blksz; 734 sc->sc_wrbuf = malloc(sc->sc_wrbufsz, M_TEMP, M_WAITOK); 735 sc->sc_wrofs = ofs - (ofs - rofs) % sc->sc_wrbufsz; 736 737 ptr = sc->sc_wrbuf; 738 /* Read the block from flash for byte-serving. */ 739 for (r = 0; r < sc->sc_wrbufsz; r++) 740 *(ptr)++ = cfi_read(sc, sc->sc_wrofs + r, 0); 741 742 sc->sc_writing = 1; 743 return (0); 744 } 745 746 /* 747 * Finish updating the current block/sector by writing the compound 748 * set of changes to the flash. 749 */ 750 int 751 cfi_block_finish(struct amdcf_softc *sc) 752 { 753 int error; 754 755 error = cfi_write_block(sc); 756 free(sc->sc_wrbuf, M_TEMP, sc->sc_wrbufsz); 757 sc->sc_wrbuf = NULL; 758 sc->sc_wrbufsz = 0; 759 sc->sc_wrofs = 0; 760 sc->sc_writing = 0; 761 return (error); 762 } 763 764 int 765 cfi_write_block(struct amdcf_softc *sc) 766 { 767 uint8_t *ptr; 768 int error, i, s; 769 770 if (sc->sc_wrofs > sc->sc_size) 771 panic("CFI: write offset (%x) bigger " 772 "than cfi array size (%zu)\n", 773 sc->sc_wrofs, sc->sc_size); 774 775 if ((sc->sc_wrofs < BOOTLOADER_ADDR) || 776 ((sc->sc_wrofs + sc->sc_wrbufsz) < BOOTLOADER_ADDR)) 777 return EOPNOTSUPP; 778 779 error = cfi_erase_block(sc, sc->sc_wrofs); 780 if (error) 781 goto out; 782 783 /* Write the block. */ 784 ptr = sc->sc_wrbuf; 785 786 for (i = 0; i < sc->sc_wrbufsz; i += sc->sc_width) { 787 788 /* 789 * Make sure the command to start a write and the 790 * actual write happens back-to-back without any 791 * excessive delays. 792 */ 793 s = splbio(); 794 795 cfi_amd_write(sc, sc->sc_wrofs, AMD_ADDR_START, 796 CFI_AMD_PROGRAM); 797 /* Raw data do not use cfi_array_write */ 798 cfi_write(sc, sc->sc_wrofs + i, 0, *(ptr)++); 799 800 splx(s); 801 802 error = cfi_wait_ready(sc, sc->sc_wrofs + i, 803 sc->sc_write_timeout, sc->sc_write_max_timeout); 804 if (error) 805 goto out; 806 } 807 808 out: 809 cfi_array_write(sc, sc->sc_wrofs, 0, sc->sc_rstcmd); 810 return error; 811 } 812 813 int 814 cfi_erase_block(struct amdcf_softc *sc, u_int offset) 815 { 816 int error = 0; 817 818 if (offset > sc->sc_size) 819 panic("CFI: erase offset (%x) bigger " 820 "than cfi array size (%zu)\n", 821 sc->sc_wrofs, sc->sc_size); 822 823 /* Erase the block. */ 824 cfi_amd_write(sc, offset, 0, CFI_AMD_BLOCK_ERASE); 825 826 error = cfi_wait_ready(sc, offset, sc->sc_erase_timeout, 827 sc->sc_erase_max_timeout); 828 829 return error; 830 } 831 832 833 834 int 835 cfi_wait_ready(struct amdcf_softc *sc, u_int ofs, u_int timeout, u_int count) 836 { 837 int done, error; 838 u_int st0 = 0, st = 0; 839 840 done = 0; 841 error = 0; 842 843 if (!timeout) 844 timeout = 100; /* Default to 100 uS */ 845 if (!count) 846 count = 100; /* Max timeout is 10 mS */ 847 848 while (!done && !error && count) { 849 DELAY(timeout); 850 851 count--; 852 853 /* 854 * read sc->sc_width bytes, and check for toggle bit. 855 */ 856 st0 = cfi_read(sc, ofs, 0); 857 st = cfi_read(sc, ofs, 0); 858 done = ((st & cfi_make_cmd(0x40, sc->sc_mask)) == 859 (st0 & cfi_make_cmd(0x40, sc->sc_mask))) ? 1 : 0; 860 861 break; 862 } 863 if (!done && !error) 864 error = ETIMEDOUT; 865 if (error) 866 printf("\nerror=%d (st 0x%x st0 0x%x) at offset=%x\n", 867 error, st, st0, ofs); 868 return error; 869 } 870 871 /* 872 * cfi_array_write 873 * fill "bus width" word with value of var data by array mask sc->sc_mask 874 */ 875 void 876 cfi_array_write(struct amdcf_softc *sc, u_int ofs, u_int addr, u_int data) 877 { 878 data &= 0xff; 879 cfi_write(sc, ofs, addr, cfi_make_cmd(data, sc->sc_mask)); 880 } 881 882 void 883 cfi_amd_write(struct amdcf_softc *sc, u_int ofs, u_int addr, u_int data) 884 { 885 cfi_array_write(sc, ofs, AMD_ADDR_START, CFI_AMD_UNLOCK); 886 cfi_array_write(sc, ofs, AMD_ADDR_ACK, CFI_AMD_UNLOCK_ACK); 887 cfi_array_write(sc, ofs, addr, data); 888 } 889 890 891 892 /* 893 * The following routines assume width=1 and shift=2 as that is 894 * the case on the Octeon DSR machines. 895 * If this assumption fails a new detection routine should be written 896 * and called during attach. 897 */ 898 uint8_t 899 cfi_read(struct amdcf_softc *sc, bus_size_t base, bus_size_t offset) 900 { 901 return bus_space_read_1(sc->sc_iot, sc->sc_ioh, 902 base | (offset * sc->sc_shift)); 903 } 904 905 void 906 cfi_write(struct amdcf_softc *sc, bus_size_t base, bus_size_t offset, 907 uint8_t val) 908 { 909 bus_space_write_1(sc->sc_iot, sc->sc_ioh, 910 base | (offset * sc->sc_shift), val); 911 } 912 913 int 914 cfi_make_cmd(uint8_t cmd, u_int mask) 915 { 916 int i; 917 u_int data = 0; 918 919 for (i = 0; i < sizeof(int); i ++) { 920 if (mask & (0xff << (i*8))) 921 data |= cmd << (i*8); 922 } 923 924 return data; 925 } 926