1 /* $OpenBSD: amdcf.c,v 1.10 2024/05/20 23:13:33 jsg Exp $ */ 2 3 /* 4 * Copyright (c) 2007, Juniper Networks, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the author nor the names of any co-contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 27 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 /* 33 * Copyright (c) 2009 Sam Leffler, Errno Consulting 34 * All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 45 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 46 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 47 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 48 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 49 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 50 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 51 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 52 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 53 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 54 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 55 */ 56 57 /* 58 * Copyright (c) 2015 Paul Irofti. 59 * 60 * Permission to use, copy, modify, and distribute this software for any 61 * purpose with or without fee is hereby granted, provided that the above 62 * copyright notice and this permission notice appear in all copies. 63 * 64 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 65 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 66 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 67 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 68 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 69 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 70 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 71 */ 72 73 74 #include <sys/param.h> 75 #include <sys/systm.h> 76 #include <sys/kernel.h> 77 #include <sys/conf.h> 78 #include <sys/fcntl.h> 79 #include <sys/stat.h> 80 #include <sys/ioctl.h> 81 #include <sys/mutex.h> 82 #include <sys/buf.h> 83 #include <sys/uio.h> 84 #include <sys/malloc.h> 85 #include <sys/device.h> 86 #include <sys/disklabel.h> 87 #include <sys/disk.h> 88 #include <sys/syslog.h> 89 #include <sys/proc.h> 90 #include <sys/vnode.h> 91 #include <sys/dkio.h> 92 93 #include <machine/intr.h> 94 #include <machine/bus.h> 95 #include <machine/autoconf.h> 96 97 #include <octeon/dev/iobusvar.h> 98 #include <machine/octeonreg.h> 99 #include <machine/octeonvar.h> 100 101 102 #define CFI_QRY_CMD_ADDR 0x55 103 #define CFI_QRY_CMD_DATA 0x98 104 105 #define CFI_QRY_TTO_WRITE 0x1f 106 #define CFI_QRY_TTO_ERASE 0x21 107 #define CFI_QRY_MTO_WRITE 0x23 108 #define CFI_QRY_MTO_ERASE 0x25 109 110 #define CFI_QRY_SIZE 0x27 111 #define CFI_QRY_NREGIONS 0x2c 112 #define CFI_QRY_REGION0 0x31 113 #define CFI_QRY_REGION(x) (CFI_QRY_REGION0 + (x) * 4) 114 115 #define CFI_BCS_READ_ARRAY 0xff 116 117 #define CFI_DISK_SECSIZE 512 118 #define CFI_DISK_MAXIOSIZE 65536 119 120 #define AMDCF_MAP_SIZE 0x02000000 121 122 #define CFI_AMD_BLOCK_ERASE 0x30 123 #define CFI_AMD_UNLOCK 0xaa 124 #define CFI_AMD_UNLOCK_ACK 0x55 125 #define CFI_AMD_PROGRAM 0xa0 126 #define CFI_AMD_RESET 0xf0 127 128 #define AMD_ADDR_START 0x555 129 #define AMD_ADDR_ACK 0x2aa 130 131 #define BOOTLOADER_ADDR 0xa0000 132 133 struct cfi_region { 134 u_int r_blocks; 135 u_int r_blksz; 136 }; 137 138 struct amdcf_softc { 139 /* General disk infos */ 140 struct device sc_dev; 141 struct disk sc_dk; 142 struct bufq sc_bufq; 143 struct buf *sc_bp; 144 145 int sc_flags; 146 #define AMDCF_LOADED 0x10 147 148 struct iobus_attach_args *sc_io; 149 bus_space_tag_t sc_iot; 150 bus_space_handle_t sc_ioh; 151 152 size_t sc_size; /* Disk size in bytes */ 153 u_int sc_regions; /* Erase regions. */ 154 struct cfi_region *sc_region; /* Array of region info. */ 155 156 u_int sc_width; 157 u_int sc_shift; 158 u_int sc_mask; 159 160 u_int sc_erase_timeout; 161 u_int sc_erase_max_timeout; 162 u_int sc_write_timeout; 163 u_int sc_write_max_timeout; 164 u_int sc_rstcmd; 165 166 u_char *sc_wrbuf; 167 u_int sc_wrbufsz; 168 u_int sc_wrofs; 169 u_int sc_writing; 170 }; 171 172 int amdcf_match(struct device *, void *, void *); 173 void amdcf_attach(struct device *, struct device *, void *); 174 int amdcf_detach(struct device *, int); 175 176 const struct cfattach amdcf_ca = { 177 sizeof(struct amdcf_softc), amdcf_match, amdcf_attach, amdcf_detach 178 }; 179 180 struct cfdriver amdcf_cd = { 181 NULL, "amdcf", DV_DISK 182 }; 183 184 cdev_decl(amdcf); 185 bdev_decl(amdcf); 186 187 #define amdcflookup(unit) (struct amdcf_softc *)disk_lookup(&amdcf_cd, (unit)) 188 int amdcfgetdisklabel(dev_t, struct amdcf_softc *, struct disklabel *, int); 189 190 void amdcfstart(void *); 191 void _amdcfstart(struct amdcf_softc *, struct buf *); 192 void amdcfdone(void *); 193 194 void amdcf_disk_read(struct amdcf_softc *, struct buf *, off_t); 195 void amdcf_disk_write(struct amdcf_softc *, struct buf *, off_t); 196 197 int cfi_block_start(struct amdcf_softc *, u_int); 198 int cfi_write_block(struct amdcf_softc *); 199 int cfi_erase_block(struct amdcf_softc *, u_int); 200 int cfi_block_finish(struct amdcf_softc *); 201 202 void cfi_array_write(struct amdcf_softc *sc, u_int, u_int, u_int); 203 void cfi_amd_write(struct amdcf_softc *, u_int, u_int, u_int); 204 205 uint8_t cfi_read(struct amdcf_softc *, bus_size_t, bus_size_t); 206 void cfi_write(struct amdcf_softc *, bus_size_t, bus_size_t, uint8_t); 207 int cfi_wait_ready(struct amdcf_softc *, u_int, u_int, u_int); 208 int cfi_make_cmd(uint8_t, u_int); 209 210 int 211 amdcf_match(struct device *parent, void *match, void *aux) 212 { 213 struct mainbus_attach_args *maa = aux; 214 struct cfdata *cf = match; 215 216 if (strcmp(maa->maa_name, cf->cf_driver->cd_name) != 0) 217 return 0; 218 219 /* Only for DSR machines */ 220 if (octeon_board != BOARD_DLINK_DSR_500) 221 return 0; 222 223 return 1; 224 } 225 226 void 227 amdcf_attach(struct device *parent, struct device *self, void *aux) 228 { 229 struct amdcf_softc *sc = (void *)self; 230 u_int blksz, blocks, r; 231 232 sc->sc_io = aux; 233 sc->sc_iot = sc->sc_io->aa_bust; 234 235 if (bus_space_map(sc->sc_iot, OCTEON_AMDCF_BASE, AMDCF_MAP_SIZE, 0, 236 &sc->sc_ioh)) { 237 printf(": can't map registers"); 238 } 239 240 /* should be detected in the generic driver */ 241 sc->sc_width = 1; 242 sc->sc_shift = 2; 243 sc->sc_mask = 0x000000ff; 244 sc->sc_rstcmd = CFI_AMD_RESET; 245 246 /* Initialize the Query Database from the CF */ 247 cfi_array_write(sc, 0, 0, sc->sc_rstcmd); 248 cfi_write(sc, 0, CFI_QRY_CMD_ADDR, CFI_QRY_CMD_DATA); 249 250 /* Get time-out values for erase and write. */ 251 sc->sc_write_timeout = 1 << cfi_read(sc, 0, CFI_QRY_TTO_WRITE); 252 sc->sc_erase_timeout = 1 << cfi_read(sc, 0, CFI_QRY_TTO_ERASE); 253 sc->sc_write_max_timeout = 1 << cfi_read(sc, 0, CFI_QRY_MTO_WRITE); 254 sc->sc_erase_max_timeout = 1 << cfi_read(sc, 0, CFI_QRY_MTO_ERASE); 255 256 /* Get the device size. */ 257 sc->sc_size = 1U << cfi_read(sc, 0, CFI_QRY_SIZE); 258 printf(": AMD/Fujitsu %zu bytes\n", sc->sc_size); 259 260 /* Get erase regions. */ 261 sc->sc_regions = cfi_read(sc, 0, CFI_QRY_NREGIONS); 262 sc->sc_region = malloc(sc->sc_regions * 263 sizeof(struct cfi_region), M_TEMP, M_WAITOK | M_ZERO); 264 265 for (r = 0; r < sc->sc_regions; r++) { 266 blocks = cfi_read(sc, 0, CFI_QRY_REGION(r)) | 267 (cfi_read(sc, 0, CFI_QRY_REGION(r) + 1) << 8); 268 sc->sc_region[r].r_blocks = blocks + 1; 269 270 blksz = cfi_read(sc, 0, CFI_QRY_REGION(r) + 2) | 271 (cfi_read(sc, 0, CFI_QRY_REGION(r) + 3) << 8); 272 sc->sc_region[r].r_blksz = (blksz == 0) ? 128 : 273 blksz * 256; 274 } 275 276 /* Reset the device to the default state */ 277 cfi_array_write(sc, 0, 0, sc->sc_rstcmd); 278 279 /* 280 * Initialize disk structures. 281 */ 282 sc->sc_dk.dk_name = sc->sc_dev.dv_xname; 283 bufq_init(&sc->sc_bufq, BUFQ_DEFAULT); 284 285 /* Attach disk. */ 286 disk_attach(&sc->sc_dev, &sc->sc_dk); 287 288 } 289 290 int 291 amdcf_detach(struct device *self, int flags) 292 { 293 struct amdcf_softc *sc = (struct amdcf_softc *)self; 294 295 bufq_drain(&sc->sc_bufq); 296 297 disk_gone(amdcfopen, self->dv_unit); 298 299 /* Detach disk. */ 300 bufq_destroy(&sc->sc_bufq); 301 disk_detach(&sc->sc_dk); 302 303 return 0; 304 } 305 306 307 int 308 amdcfopen(dev_t dev, int flag, int fmt, struct proc *p) 309 { 310 struct amdcf_softc *sc; 311 int unit, part; 312 int error; 313 314 unit = DISKUNIT(dev); 315 sc = amdcflookup(unit); 316 if (sc == NULL) 317 return ENXIO; 318 319 /* 320 * If this is the first open of this device, add a reference 321 * to the adapter. 322 */ 323 if ((error = disk_lock(&sc->sc_dk)) != 0) 324 goto out1; 325 326 if (sc->sc_dk.dk_openmask != 0) { 327 /* 328 * If any partition is open, but the disk has been invalidated, 329 * disallow further opens. 330 */ 331 if ((sc->sc_flags & AMDCF_LOADED) == 0) { 332 error = EIO; 333 goto out; 334 } 335 } else { 336 if ((sc->sc_flags & AMDCF_LOADED) == 0) { 337 sc->sc_flags |= AMDCF_LOADED; 338 339 /* Load the partition info if not already loaded. */ 340 if (amdcfgetdisklabel(dev, sc, 341 sc->sc_dk.dk_label, 0) == EIO) { 342 error = EIO; 343 goto out; 344 } 345 } 346 } 347 348 part = DISKPART(dev); 349 350 if ((error = disk_openpart(&sc->sc_dk, part, fmt, 1)) != 0) 351 goto out; 352 353 disk_unlock(&sc->sc_dk); 354 device_unref(&sc->sc_dev); 355 return 0; 356 357 out: 358 disk_unlock(&sc->sc_dk); 359 out1: 360 device_unref(&sc->sc_dev); 361 return error; 362 } 363 364 /* 365 * Load the label information on the named device 366 */ 367 int 368 amdcfgetdisklabel(dev_t dev, struct amdcf_softc *sc, struct disklabel *lp, 369 int spoofonly) 370 { 371 memset(lp, 0, sizeof(struct disklabel)); 372 373 lp->d_secsize = DEV_BSIZE; 374 lp->d_nsectors = 1; /* bogus */ 375 lp->d_ntracks = 1; /* bogus */ 376 lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors; 377 lp->d_ncylinders = sc->sc_size / lp->d_secpercyl; 378 379 strlcpy(lp->d_typename, "amdcf device", sizeof(lp->d_typename)); 380 lp->d_type = DTYPE_SCSI; /* bogus type, can be anything */ 381 strlcpy(lp->d_packname, "CFI Disk", sizeof(lp->d_packname)); 382 DL_SETDSIZE(lp, sc->sc_size / DEV_BSIZE); 383 lp->d_version = 1; 384 385 lp->d_magic = DISKMAGIC; 386 lp->d_magic2 = DISKMAGIC; 387 lp->d_checksum = dkcksum(lp); 388 389 /* Call the generic disklabel extraction routine */ 390 return readdisklabel(DISKLABELDEV(dev), amdcfstrategy, lp, spoofonly); 391 } 392 393 int 394 amdcfclose(dev_t dev, int flag, int fmt, struct proc *p) 395 { 396 struct amdcf_softc *sc; 397 int part = DISKPART(dev); 398 399 sc = amdcflookup(DISKUNIT(dev)); 400 if (sc == NULL) 401 return ENXIO; 402 403 disk_lock_nointr(&sc->sc_dk); 404 405 disk_closepart(&sc->sc_dk, part, fmt); 406 407 disk_unlock(&sc->sc_dk); 408 409 device_unref(&sc->sc_dev); 410 return 0; 411 } 412 413 int 414 amdcfread(dev_t dev, struct uio *uio, int flags) 415 { 416 return (physio(amdcfstrategy, dev, B_READ, minphys, uio)); 417 } 418 419 int 420 amdcfwrite(dev_t dev, struct uio *uio, int flags) 421 { 422 #ifdef AMDCF_DISK_WRITE_ENABLE 423 return (physio(amdcfstrategy, dev, B_WRITE, minphys, uio)); 424 #else 425 return 0; 426 #endif 427 } 428 429 void 430 amdcfstrategy(struct buf *bp) 431 { 432 struct amdcf_softc *sc; 433 int s; 434 435 sc = amdcflookup(DISKUNIT(bp->b_dev)); 436 if (sc == NULL) { 437 bp->b_error = ENXIO; 438 goto bad; 439 } 440 /* If device invalidated (e.g. media change, door open), error. */ 441 if ((sc->sc_flags & AMDCF_LOADED) == 0) { 442 bp->b_error = EIO; 443 goto bad; 444 } 445 446 /* Validate the request. */ 447 if (bounds_check_with_label(bp, sc->sc_dk.dk_label) == -1) 448 goto done; 449 450 /* Check that the number of sectors can fit in a byte. */ 451 if ((bp->b_bcount / sc->sc_dk.dk_label->d_secsize) >= (1 << NBBY)) { 452 bp->b_error = EINVAL; 453 goto bad; 454 } 455 456 /* Queue transfer on drive, activate drive and controller if idle. */ 457 bufq_queue(&sc->sc_bufq, bp); 458 s = splbio(); 459 amdcfstart(sc); 460 splx(s); 461 device_unref(&sc->sc_dev); 462 return; 463 464 bad: 465 bp->b_flags |= B_ERROR; 466 bp->b_resid = bp->b_bcount; 467 done: 468 s = splbio(); 469 biodone(bp); 470 splx(s); 471 if (sc != NULL) 472 device_unref(&sc->sc_dev); 473 } 474 475 int 476 amdcfioctl(dev_t dev, u_long xfer, caddr_t addr, int flag, struct proc *p) 477 { 478 struct amdcf_softc *sc; 479 struct disklabel *lp; 480 int error = 0; 481 482 sc = amdcflookup(DISKUNIT(dev)); 483 if (sc == NULL) 484 return ENXIO; 485 486 if ((sc->sc_flags & AMDCF_LOADED) == 0) { 487 error = EIO; 488 goto exit; 489 } 490 491 switch (xfer) { 492 case DIOCRLDINFO: 493 lp = malloc(sizeof(*lp), M_TEMP, M_WAITOK); 494 amdcfgetdisklabel(dev, sc, lp, 0); 495 bcopy(lp, sc->sc_dk.dk_label, sizeof(*lp)); 496 free(lp, M_TEMP, sizeof(*lp)); 497 goto exit; 498 499 case DIOCGPDINFO: 500 amdcfgetdisklabel(dev, sc, (struct disklabel *)addr, 1); 501 goto exit; 502 503 case DIOCGDINFO: 504 *(struct disklabel *)addr = *(sc->sc_dk.dk_label); 505 goto exit; 506 507 case DIOCGPART: 508 ((struct partinfo *)addr)->disklab = sc->sc_dk.dk_label; 509 ((struct partinfo *)addr)->part = 510 &sc->sc_dk.dk_label->d_partitions[DISKPART(dev)]; 511 goto exit; 512 513 case DIOCWDINFO: 514 case DIOCSDINFO: 515 if ((flag & FWRITE) == 0) { 516 error = EBADF; 517 goto exit; 518 } 519 520 if ((error = disk_lock(&sc->sc_dk)) != 0) 521 goto exit; 522 523 error = setdisklabel(sc->sc_dk.dk_label, 524 (struct disklabel *)addr, sc->sc_dk.dk_openmask); 525 if (error == 0) { 526 if (xfer == DIOCWDINFO) 527 error = writedisklabel(DISKLABELDEV(dev), 528 amdcfstrategy, sc->sc_dk.dk_label); 529 } 530 531 disk_unlock(&sc->sc_dk); 532 goto exit; 533 534 default: 535 error = ENOTTY; 536 goto exit; 537 } 538 539 #ifdef DIAGNOSTIC 540 panic("amdcfioctl: impossible"); 541 #endif 542 543 exit: 544 device_unref(&sc->sc_dev); 545 return error; 546 } 547 548 /* 549 * Dump core after a system crash. 550 */ 551 int 552 amdcfdump(dev_t dev, daddr_t blkno, caddr_t va, size_t size) 553 { 554 return ENXIO; 555 } 556 557 daddr_t 558 amdcfsize(dev_t dev) 559 { 560 struct amdcf_softc *sc; 561 struct disklabel *lp; 562 int part, omask; 563 daddr_t size; 564 565 sc = amdcflookup(DISKUNIT(dev)); 566 if (sc == NULL) 567 return (-1); 568 569 part = DISKPART(dev); 570 omask = sc->sc_dk.dk_openmask & (1 << part); 571 572 if (omask == 0 && amdcfopen(dev, 0, S_IFBLK, NULL) != 0) { 573 size = -1; 574 goto exit; 575 } 576 577 lp = sc->sc_dk.dk_label; 578 size = DL_SECTOBLK(lp, DL_GETPSIZE(&lp->d_partitions[part])); 579 if (omask == 0 && amdcfclose(dev, 0, S_IFBLK, NULL) != 0) 580 size = -1; 581 582 exit: 583 device_unref(&sc->sc_dev); 584 return size; 585 } 586 587 588 /* 589 * Queue a drive for I/O. 590 */ 591 void 592 amdcfstart(void *arg) 593 { 594 struct amdcf_softc *sc = arg; 595 struct buf *bp; 596 597 while ((bp = bufq_dequeue(&sc->sc_bufq)) != NULL) { 598 /* Transfer this buffer now. */ 599 _amdcfstart(sc, bp); 600 } 601 } 602 603 void 604 _amdcfstart(struct amdcf_softc *sc, struct buf *bp) 605 { 606 off_t off; 607 struct partition *p; 608 609 sc->sc_bp = bp; 610 611 /* Fetch buffer's read/write offset */ 612 p = &sc->sc_dk.dk_label->d_partitions[DISKPART(bp->b_dev)]; 613 off = DL_GETPOFFSET(p) * sc->sc_dk.dk_label->d_secsize + 614 (u_int64_t)bp->b_blkno * DEV_BSIZE; 615 if (off > sc->sc_size) { 616 bp->b_flags |= B_ERROR; 617 bp->b_error = EIO; 618 return; 619 } 620 621 /* Instrumentation. */ 622 disk_busy(&sc->sc_dk); 623 624 if (bp->b_flags & B_READ) 625 amdcf_disk_read(sc, bp, off); 626 #ifdef AMDCF_DISK_WRITE_ENABLE 627 else 628 amdcf_disk_write(sc, bp, off); 629 #endif 630 631 amdcfdone(sc); 632 } 633 634 void 635 amdcfdone(void *arg) 636 { 637 struct amdcf_softc *sc = arg; 638 struct buf *bp = sc->sc_bp; 639 640 if (bp->b_error == 0) 641 bp->b_resid = 0; 642 else 643 bp->b_flags |= B_ERROR; 644 645 disk_unbusy(&sc->sc_dk, (bp->b_bcount - bp->b_resid), 646 bp->b_blkno, (bp->b_flags & B_READ)); 647 biodone(bp); 648 } 649 650 void 651 amdcf_disk_read(struct amdcf_softc *sc, struct buf *bp, off_t off) 652 { 653 long resid; 654 655 if (sc->sc_writing) { 656 bp->b_error = cfi_block_finish(sc); 657 if (bp->b_error) { 658 bp->b_flags |= B_ERROR; 659 return; 660 } 661 } 662 663 resid = bp->b_bcount; 664 uint8_t *dp = (uint8_t *)bp->b_data; 665 while (resid > 0 && off < sc->sc_size) { 666 *dp++ = cfi_read(sc, off, 0); 667 off += 1, resid -= 1; 668 } 669 bp->b_resid = resid; 670 } 671 672 void 673 amdcf_disk_write(struct amdcf_softc *sc, struct buf *bp, off_t off) 674 { 675 long resid; 676 u_int top; 677 678 resid = bp->b_bcount; 679 while (resid > 0) { 680 /* 681 * Finish the current block if we're about to write 682 * to a different block. 683 */ 684 if (sc->sc_writing) { 685 top = sc->sc_wrofs + sc->sc_wrbufsz; 686 if (off < sc->sc_wrofs || off >= top) 687 cfi_block_finish(sc); 688 } 689 690 /* Start writing to a (new) block if applicable. */ 691 if (!sc->sc_writing) { 692 bp->b_error = cfi_block_start(sc, off); 693 if (bp->b_error) { 694 bp->b_flags |= B_ERROR; 695 return; 696 } 697 } 698 699 top = sc->sc_wrofs + sc->sc_wrbufsz; 700 bcopy(bp->b_data, 701 sc->sc_wrbuf + off - sc->sc_wrofs, 702 MIN(top - off, resid)); 703 resid -= MIN(top - off, resid); 704 } 705 bp->b_resid = resid; 706 } 707 708 /* 709 * Begin writing into a new block/sector. We read the sector into 710 * memory and keep updating that, until we move into another sector 711 * or the process stops writing. At that time we write the whole 712 * sector to flash (see cfi_block_finish). 713 */ 714 int 715 cfi_block_start(struct amdcf_softc *sc, u_int ofs) 716 { 717 u_int rofs, rsz; 718 int r; 719 uint8_t *ptr; 720 721 rofs = 0; 722 for (r = 0; r < sc->sc_regions; r++) { 723 rsz = sc->sc_region[r].r_blocks * sc->sc_region[r].r_blksz; 724 if (ofs < rofs + rsz) 725 break; 726 rofs += rsz; 727 } 728 if (r == sc->sc_regions) 729 return (EFAULT); 730 731 sc->sc_wrbufsz = sc->sc_region[r].r_blksz; 732 sc->sc_wrbuf = malloc(sc->sc_wrbufsz, M_TEMP, M_WAITOK); 733 sc->sc_wrofs = ofs - (ofs - rofs) % sc->sc_wrbufsz; 734 735 ptr = sc->sc_wrbuf; 736 /* Read the block from flash for byte-serving. */ 737 for (r = 0; r < sc->sc_wrbufsz; r++) 738 *(ptr)++ = cfi_read(sc, sc->sc_wrofs + r, 0); 739 740 sc->sc_writing = 1; 741 return (0); 742 } 743 744 /* 745 * Finish updating the current block/sector by writing the compound 746 * set of changes to the flash. 747 */ 748 int 749 cfi_block_finish(struct amdcf_softc *sc) 750 { 751 int error; 752 753 error = cfi_write_block(sc); 754 free(sc->sc_wrbuf, M_TEMP, sc->sc_wrbufsz); 755 sc->sc_wrbuf = NULL; 756 sc->sc_wrbufsz = 0; 757 sc->sc_wrofs = 0; 758 sc->sc_writing = 0; 759 return (error); 760 } 761 762 int 763 cfi_write_block(struct amdcf_softc *sc) 764 { 765 uint8_t *ptr; 766 int error, i, s; 767 768 if (sc->sc_wrofs > sc->sc_size) 769 panic("CFI: write offset (%x) bigger " 770 "than cfi array size (%zu)\n", 771 sc->sc_wrofs, sc->sc_size); 772 773 if ((sc->sc_wrofs < BOOTLOADER_ADDR) || 774 ((sc->sc_wrofs + sc->sc_wrbufsz) < BOOTLOADER_ADDR)) 775 return EOPNOTSUPP; 776 777 error = cfi_erase_block(sc, sc->sc_wrofs); 778 if (error) 779 goto out; 780 781 /* Write the block. */ 782 ptr = sc->sc_wrbuf; 783 784 for (i = 0; i < sc->sc_wrbufsz; i += sc->sc_width) { 785 786 /* 787 * Make sure the command to start a write and the 788 * actual write happens back-to-back without any 789 * excessive delays. 790 */ 791 s = splbio(); 792 793 cfi_amd_write(sc, sc->sc_wrofs, AMD_ADDR_START, 794 CFI_AMD_PROGRAM); 795 /* Raw data do not use cfi_array_write */ 796 cfi_write(sc, sc->sc_wrofs + i, 0, *(ptr)++); 797 798 splx(s); 799 800 error = cfi_wait_ready(sc, sc->sc_wrofs + i, 801 sc->sc_write_timeout, sc->sc_write_max_timeout); 802 if (error) 803 goto out; 804 } 805 806 out: 807 cfi_array_write(sc, sc->sc_wrofs, 0, sc->sc_rstcmd); 808 return error; 809 } 810 811 int 812 cfi_erase_block(struct amdcf_softc *sc, u_int offset) 813 { 814 int error = 0; 815 816 if (offset > sc->sc_size) 817 panic("CFI: erase offset (%x) bigger " 818 "than cfi array size (%zu)\n", 819 sc->sc_wrofs, sc->sc_size); 820 821 /* Erase the block. */ 822 cfi_amd_write(sc, offset, 0, CFI_AMD_BLOCK_ERASE); 823 824 error = cfi_wait_ready(sc, offset, sc->sc_erase_timeout, 825 sc->sc_erase_max_timeout); 826 827 return error; 828 } 829 830 831 832 int 833 cfi_wait_ready(struct amdcf_softc *sc, u_int ofs, u_int timeout, u_int count) 834 { 835 int done, error; 836 u_int st0 = 0, st = 0; 837 838 done = 0; 839 error = 0; 840 841 if (!timeout) 842 timeout = 100; /* Default to 100 uS */ 843 if (!count) 844 count = 100; /* Max timeout is 10 mS */ 845 846 while (!done && !error && count) { 847 DELAY(timeout); 848 849 count--; 850 851 /* 852 * read sc->sc_width bytes, and check for toggle bit. 853 */ 854 st0 = cfi_read(sc, ofs, 0); 855 st = cfi_read(sc, ofs, 0); 856 done = ((st & cfi_make_cmd(0x40, sc->sc_mask)) == 857 (st0 & cfi_make_cmd(0x40, sc->sc_mask))) ? 1 : 0; 858 859 break; 860 } 861 if (!done && !error) 862 error = ETIMEDOUT; 863 if (error) 864 printf("\nerror=%d (st 0x%x st0 0x%x) at offset=%x\n", 865 error, st, st0, ofs); 866 return error; 867 } 868 869 /* 870 * cfi_array_write 871 * fill "bus width" word with value of var data by array mask sc->sc_mask 872 */ 873 void 874 cfi_array_write(struct amdcf_softc *sc, u_int ofs, u_int addr, u_int data) 875 { 876 data &= 0xff; 877 cfi_write(sc, ofs, addr, cfi_make_cmd(data, sc->sc_mask)); 878 } 879 880 void 881 cfi_amd_write(struct amdcf_softc *sc, u_int ofs, u_int addr, u_int data) 882 { 883 cfi_array_write(sc, ofs, AMD_ADDR_START, CFI_AMD_UNLOCK); 884 cfi_array_write(sc, ofs, AMD_ADDR_ACK, CFI_AMD_UNLOCK_ACK); 885 cfi_array_write(sc, ofs, addr, data); 886 } 887 888 889 890 /* 891 * The following routines assume width=1 and shift=2 as that is 892 * the case on the Octeon DSR machines. 893 * If this assumption fails a new detection routine should be written 894 * and called during attach. 895 */ 896 uint8_t 897 cfi_read(struct amdcf_softc *sc, bus_size_t base, bus_size_t offset) 898 { 899 return bus_space_read_1(sc->sc_iot, sc->sc_ioh, 900 base | (offset * sc->sc_shift)); 901 } 902 903 void 904 cfi_write(struct amdcf_softc *sc, bus_size_t base, bus_size_t offset, 905 uint8_t val) 906 { 907 bus_space_write_1(sc->sc_iot, sc->sc_ioh, 908 base | (offset * sc->sc_shift), val); 909 } 910 911 int 912 cfi_make_cmd(uint8_t cmd, u_int mask) 913 { 914 int i; 915 u_int data = 0; 916 917 for (i = 0; i < sizeof(int); i ++) { 918 if (mask & (0xff << (i*8))) 919 data |= cmd << (i*8); 920 } 921 922 return data; 923 } 924