1 /* $NetBSD: spiflash.c,v 1.23 2019/09/14 15:12:12 tnn Exp $ */ 2 3 /*- 4 * Copyright (c) 2006 Urbana-Champaign Independent Media Center. 5 * Copyright (c) 2006 Garrett D'Amore. 6 * All rights reserved. 7 * 8 * Portions of this code were written by Garrett D'Amore for the 9 * Champaign-Urbana Community Wireless Network Project. 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer in the documentation and/or other materials provided 19 * with the distribution. 20 * 3. All advertising materials mentioning features or use of this 21 * software must display the following acknowledgements: 22 * This product includes software developed by the Urbana-Champaign 23 * Independent Media Center. 24 * This product includes software developed by Garrett D'Amore. 25 * 4. Urbana-Champaign Independent Media Center's name and Garrett 26 * D'Amore's name may not be used to endorse or promote products 27 * derived from this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE URBANA-CHAMPAIGN INDEPENDENT 30 * MEDIA CENTER AND GARRETT D'AMORE ``AS IS'' AND ANY EXPRESS OR 31 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 32 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33 * ARE DISCLAIMED. IN NO EVENT SHALL THE URBANA-CHAMPAIGN INDEPENDENT 34 * MEDIA CENTER OR GARRETT D'AMORE BE LIABLE FOR ANY DIRECT, INDIRECT, 35 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 36 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 37 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 38 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 40 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 41 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 42 */ 43 44 #include <sys/cdefs.h> 45 __KERNEL_RCSID(0, "$NetBSD: spiflash.c,v 1.23 2019/09/14 15:12:12 tnn Exp $"); 46 47 #include <sys/param.h> 48 #include <sys/conf.h> 49 #include <sys/proc.h> 50 #include <sys/systm.h> 51 #include <sys/device.h> 52 #include <sys/kernel.h> 53 #include <sys/file.h> 54 #include <sys/ioctl.h> 55 #include <sys/disk.h> 56 #include <sys/disklabel.h> 57 #include <sys/buf.h> 58 #include <sys/bufq.h> 59 #include <sys/uio.h> 60 #include <sys/kthread.h> 61 #include <sys/malloc.h> 62 #include <sys/errno.h> 63 64 #include <dev/spi/spivar.h> 65 #include <dev/spi/spiflash.h> 66 67 /* 68 * This is an MI block driver for SPI flash devices. It could probably be 69 * converted to some more generic framework, if someone wanted to create one 70 * for NOR flashes. Note that some flashes have the ability to handle 71 * interrupts. 72 */ 73 74 struct spiflash_softc { 75 struct disk sc_dk; 76 77 struct spiflash_hw_if sc_hw; 78 void *sc_cookie; 79 80 const char *sc_name; 81 struct spi_handle *sc_handle; 82 int sc_device_size; 83 int sc_write_size; 84 int sc_erase_size; 85 int sc_read_size; 86 int sc_device_blks; 87 88 struct bufq_state *sc_waitq; 89 struct bufq_state *sc_workq; 90 struct bufq_state *sc_doneq; 91 lwp_t *sc_thread; 92 }; 93 94 #define sc_getname sc_hw.sf_getname 95 #define sc_gethandle sc_hw.sf_gethandle 96 #define sc_getsize sc_hw.sf_getsize 97 #define sc_getflags sc_hw.sf_getflags 98 #define sc_erase sc_hw.sf_erase 99 #define sc_write sc_hw.sf_write 100 #define sc_read sc_hw.sf_read 101 #define sc_getstatus sc_hw.sf_getstatus 102 #define sc_setstatus sc_hw.sf_setstatus 103 104 struct spiflash_attach_args { 105 const struct spiflash_hw_if *hw; 106 void *cookie; 107 }; 108 109 #define STATIC 110 STATIC int spiflash_match(device_t , cfdata_t , void *); 111 STATIC void spiflash_attach(device_t , device_t , void *); 112 STATIC int spiflash_print(void *, const char *); 113 STATIC int spiflash_common_erase(spiflash_handle_t, size_t, size_t); 114 STATIC int spiflash_common_write(spiflash_handle_t, size_t, size_t, 115 const uint8_t *); 116 STATIC int spiflash_common_read(spiflash_handle_t, size_t, size_t, uint8_t *); 117 STATIC void spiflash_process_done(spiflash_handle_t, int); 118 STATIC void spiflash_process_read(spiflash_handle_t); 119 STATIC void spiflash_process_write(spiflash_handle_t); 120 STATIC void spiflash_thread(void *); 121 STATIC int spiflash_nsectors(spiflash_handle_t, struct buf *); 122 STATIC int spiflash_nsectors(spiflash_handle_t, struct buf *); 123 STATIC int spiflash_sector(spiflash_handle_t, struct buf *); 124 125 CFATTACH_DECL_NEW(spiflash, sizeof(struct spiflash_softc), 126 spiflash_match, spiflash_attach, NULL, NULL); 127 128 #ifdef SPIFLASH_DEBUG 129 #define DPRINTF(x) do { printf x; } while (0/*CONSTCOND*/) 130 #else 131 #define DPRINTF(x) do { } while (0/*CONSTCOND*/) 132 #endif 133 134 extern struct cfdriver spiflash_cd; 135 136 dev_type_open(spiflash_open); 137 dev_type_close(spiflash_close); 138 dev_type_read(spiflash_read); 139 dev_type_write(spiflash_write); 140 dev_type_ioctl(spiflash_ioctl); 141 dev_type_strategy(spiflash_strategy); 142 143 const struct bdevsw spiflash_bdevsw = { 144 .d_open = spiflash_open, 145 .d_close = spiflash_close, 146 .d_strategy = spiflash_strategy, 147 .d_ioctl = spiflash_ioctl, 148 .d_dump = nodump, 149 .d_psize = nosize, 150 .d_discard = nodiscard, 151 .d_flag = D_DISK, 152 }; 153 154 const struct cdevsw spiflash_cdevsw = { 155 .d_open = spiflash_open, 156 .d_close = spiflash_close, 157 .d_read = spiflash_read, 158 .d_write = spiflash_write, 159 .d_ioctl = spiflash_ioctl, 160 .d_stop = nostop, 161 .d_tty = notty, 162 .d_poll = nopoll, 163 .d_mmap = nommap, 164 .d_kqfilter = nokqfilter, 165 .d_discard = nodiscard, 166 .d_flag = D_DISK, 167 }; 168 169 static struct dkdriver spiflash_dkdriver = { 170 .d_strategy = spiflash_strategy 171 }; 172 173 spiflash_handle_t 174 spiflash_attach_mi(const struct spiflash_hw_if *hw, void *cookie, 175 device_t dev) 176 { 177 struct spiflash_attach_args sfa; 178 sfa.hw = hw; 179 sfa.cookie = cookie; 180 181 return (spiflash_handle_t)config_found(dev, &sfa, spiflash_print); 182 } 183 184 int 185 spiflash_print(void *aux, const char *pnp) 186 { 187 if (pnp != NULL) 188 printf("spiflash at %s\n", pnp); 189 190 return UNCONF; 191 } 192 193 int 194 spiflash_match(device_t parent, cfdata_t cf, void *aux) 195 { 196 197 return 1; 198 } 199 200 void 201 spiflash_attach(device_t parent, device_t self, void *aux) 202 { 203 struct spiflash_softc *sc = device_private(self); 204 struct spiflash_attach_args *sfa = aux; 205 void *cookie = sfa->cookie; 206 207 sc->sc_hw = *sfa->hw; 208 sc->sc_cookie = cookie; 209 sc->sc_name = sc->sc_getname(cookie); 210 sc->sc_handle = sc->sc_gethandle(cookie); 211 sc->sc_device_size = sc->sc_getsize(cookie, SPIFLASH_SIZE_DEVICE); 212 sc->sc_erase_size = sc->sc_getsize(cookie, SPIFLASH_SIZE_ERASE); 213 sc->sc_write_size = sc->sc_getsize(cookie, SPIFLASH_SIZE_WRITE); 214 sc->sc_read_size = sc->sc_getsize(cookie, SPIFLASH_SIZE_READ); 215 sc->sc_device_blks = sc->sc_device_size / DEV_BSIZE; 216 217 if (sc->sc_read == NULL) 218 sc->sc_read = spiflash_common_read; 219 if (sc->sc_write == NULL) 220 sc->sc_write = spiflash_common_write; 221 if (sc->sc_erase == NULL) 222 sc->sc_erase = spiflash_common_erase; 223 224 aprint_naive(": SPI flash\n"); 225 aprint_normal(": %s SPI flash\n", sc->sc_name); 226 /* XXX: note that this has to change for boot-sectored flash */ 227 aprint_normal_dev(self, "%d KB, %d sectors of %d KB each\n", 228 sc->sc_device_size / 1024, 229 sc->sc_device_size / sc->sc_erase_size, 230 sc->sc_erase_size / 1024); 231 232 /* first-come first-served strategy works best for us */ 233 bufq_alloc(&sc->sc_waitq, "fcfs", BUFQ_SORT_RAWBLOCK); 234 bufq_alloc(&sc->sc_workq, "fcfs", BUFQ_SORT_RAWBLOCK); 235 bufq_alloc(&sc->sc_doneq, "fcfs", BUFQ_SORT_RAWBLOCK); 236 237 disk_init(&sc->sc_dk, device_xname(self), &spiflash_dkdriver); 238 disk_attach(&sc->sc_dk); 239 240 /* arrange to allocate the kthread */ 241 kthread_create(PRI_NONE, 0, NULL, spiflash_thread, sc, 242 &sc->sc_thread, "spiflash"); 243 } 244 245 int 246 spiflash_open(dev_t dev, int flags, int mode, struct lwp *l) 247 { 248 spiflash_handle_t sc; 249 250 sc = device_lookup_private(&spiflash_cd, DISKUNIT(dev)); 251 if (sc == NULL) 252 return ENXIO; 253 254 /* 255 * XXX: We need to handle partitions here. The problem is 256 * that it isn't entirely clear to me how to deal with this. 257 * There are devices that could be used "in the raw" with a 258 * NetBSD label, but then you get into devices that have other 259 * kinds of data on them -- some have VxWorks data, some have 260 * RedBoot data, and some have other contraints -- for example 261 * some devices might have a portion that is read-only, 262 * whereas others might have a portion that is read-write. 263 * 264 * For now we just permit access to the entire device. 265 */ 266 return 0; 267 } 268 269 int 270 spiflash_close(dev_t dev, int flags, int mode, struct lwp *l) 271 { 272 spiflash_handle_t sc; 273 274 sc = device_lookup_private(&spiflash_cd, DISKUNIT(dev)); 275 if (sc == NULL) 276 return ENXIO; 277 278 return 0; 279 } 280 281 int 282 spiflash_read(dev_t dev, struct uio *uio, int ioflag) 283 { 284 285 return physio(spiflash_strategy, NULL, dev, B_READ, minphys, uio); 286 } 287 288 int 289 spiflash_write(dev_t dev, struct uio *uio, int ioflag) 290 { 291 292 return physio(spiflash_strategy, NULL, dev, B_WRITE, minphys, uio); 293 } 294 295 int 296 spiflash_ioctl(dev_t dev, u_long cmd, void *data, int flags, struct lwp *l) 297 { 298 spiflash_handle_t sc; 299 300 sc = device_lookup_private(&spiflash_cd, DISKUNIT(dev)); 301 if (sc == NULL) 302 return ENXIO; 303 304 return EINVAL; 305 } 306 307 void 308 spiflash_strategy(struct buf *bp) 309 { 310 spiflash_handle_t sc; 311 int s; 312 313 bp->b_resid = bp->b_bcount; 314 315 sc = device_lookup_private(&spiflash_cd, DISKUNIT(bp->b_dev)); 316 if (sc == NULL) { 317 bp->b_error = ENXIO; 318 biodone(bp); 319 return; 320 } 321 322 if (((bp->b_bcount % sc->sc_write_size) != 0) || 323 (bp->b_blkno < 0)) { 324 bp->b_error = EINVAL; 325 biodone(bp); 326 return; 327 } 328 329 /* no work? */ 330 if (bp->b_bcount == 0) { 331 biodone(bp); 332 return; 333 } 334 335 if (bounds_check_with_mediasize(bp, DEV_BSIZE, 336 sc->sc_device_blks) <= 0) { 337 biodone(bp); 338 return; 339 } 340 341 /* all ready, hand off to thread for async processing */ 342 s = splbio(); 343 bufq_put(sc->sc_waitq, bp); 344 wakeup(&sc->sc_thread); 345 splx(s); 346 } 347 348 void 349 spiflash_process_done(spiflash_handle_t sc, int err) 350 { 351 struct buf *bp; 352 int cnt = 0; 353 int flag = 0; 354 355 while ((bp = bufq_get(sc->sc_doneq)) != NULL) { 356 flag = bp->b_flags & B_READ; 357 if ((bp->b_error = err) == 0) 358 bp->b_resid = 0; 359 cnt += bp->b_bcount - bp->b_resid; 360 biodone(bp); 361 } 362 disk_unbusy(&sc->sc_dk, cnt, flag); 363 } 364 365 void 366 spiflash_process_read(spiflash_handle_t sc) 367 { 368 struct buf *bp; 369 int err = 0; 370 371 disk_busy(&sc->sc_dk); 372 while ((bp = bufq_get(sc->sc_workq)) != NULL) { 373 size_t addr = bp->b_blkno * DEV_BSIZE; 374 uint8_t *data = bp->b_data; 375 int cnt = bp->b_resid; 376 377 bufq_put(sc->sc_doneq, bp); 378 379 DPRINTF(("read from addr %x, cnt %d\n", (unsigned)addr, cnt)); 380 381 if ((err = sc->sc_read(sc, addr, cnt, data)) != 0) { 382 /* error occurred, fail all pending workq bufs */ 383 bufq_move(sc->sc_doneq, sc->sc_workq); 384 break; 385 } 386 387 bp->b_resid -= cnt; 388 data += cnt; 389 addr += cnt; 390 } 391 spiflash_process_done(sc, err); 392 } 393 394 void 395 spiflash_process_write(spiflash_handle_t sc) 396 { 397 int len; 398 size_t base; 399 daddr_t blkno; 400 uint8_t *save; 401 int err = 0, neederase = 0; 402 struct buf *bp; 403 404 /* 405 * due to other considerations, we are guaranteed that 406 * we will only have multiple buffers if they are all in 407 * the same erase sector. Therefore we never need to look 408 * beyond the first block to determine how much data we need 409 * to save. 410 */ 411 412 bp = bufq_peek(sc->sc_workq); 413 len = spiflash_nsectors(sc, bp) * sc->sc_erase_size; 414 blkno = bp->b_blkno; 415 base = (blkno * DEV_BSIZE) & ~ (sc->sc_erase_size - 1); 416 417 /* get ourself a scratch buffer */ 418 save = malloc(len, M_DEVBUF, M_WAITOK); 419 420 disk_busy(&sc->sc_dk); 421 /* read in as much of the data as we need */ 422 DPRINTF(("reading in %d bytes\n", len)); 423 if ((err = sc->sc_read(sc, base, len, save)) != 0) { 424 bufq_move(sc->sc_doneq, sc->sc_workq); 425 spiflash_process_done(sc, err); 426 return; 427 } 428 429 /* 430 * now coalesce the writes into the save area, but also 431 * check to see if we need to do an erase 432 */ 433 while ((bp = bufq_get(sc->sc_workq)) != NULL) { 434 uint8_t *data, *dst; 435 int resid = bp->b_resid; 436 437 DPRINTF(("coalesce write, blkno %x, count %d, resid %d\n", 438 (unsigned)bp->b_blkno, bp->b_bcount, resid)); 439 440 data = bp->b_data; 441 dst = save + (bp->b_blkno * DEV_BSIZE) - base; 442 443 /* 444 * NOR flash bits. We can clear a bit, but we cannot 445 * set a bit, without erasing. This should help reduce 446 * unnecessary erases. 447 */ 448 while (resid) { 449 if ((*data) & ~(*dst)) 450 neederase = 1; 451 *dst++ = *data++; 452 resid--; 453 } 454 455 bufq_put(sc->sc_doneq, bp); 456 } 457 458 /* 459 * do the erase, if we need to. 460 */ 461 if (neederase) { 462 DPRINTF(("erasing from %zx - %zx\n", base, base + len)); 463 if ((err = sc->sc_erase(sc, base, len)) != 0) { 464 spiflash_process_done(sc, err); 465 return; 466 } 467 } 468 469 /* 470 * now write our save area, and finish up. 471 */ 472 DPRINTF(("flashing %d bytes to %zx from %p\n", len, base, save)); 473 err = sc->sc_write(sc, base, len, save); 474 spiflash_process_done(sc, err); 475 } 476 477 478 int 479 spiflash_nsectors(spiflash_handle_t sc, struct buf *bp) 480 { 481 unsigned addr, sector; 482 483 addr = bp->b_blkno * DEV_BSIZE; 484 sector = addr / sc->sc_erase_size; 485 486 addr += bp->b_bcount; 487 addr--; 488 return (((addr / sc->sc_erase_size) - sector) + 1); 489 } 490 491 int 492 spiflash_sector(spiflash_handle_t sc, struct buf *bp) 493 { 494 unsigned addr, sector; 495 496 addr = bp->b_blkno * DEV_BSIZE; 497 sector = addr / sc->sc_erase_size; 498 499 /* if it spans multiple blocks, error it */ 500 addr += bp->b_bcount; 501 addr--; 502 if (sector != (addr / sc->sc_erase_size)) 503 return -1; 504 505 return sector; 506 } 507 508 void 509 spiflash_thread(void *arg) 510 { 511 spiflash_handle_t sc = arg; 512 struct buf *bp; 513 int sector; 514 515 (void)splbio(); 516 for (;;) { 517 if ((bp = bufq_get(sc->sc_waitq)) == NULL) { 518 tsleep(&sc->sc_thread, PRIBIO, "spiflash_thread", 0); 519 continue; 520 } 521 522 bufq_put(sc->sc_workq, bp); 523 524 if (bp->b_flags & B_READ) { 525 /* just do the read */ 526 spiflash_process_read(sc); 527 continue; 528 } 529 530 /* 531 * Because writing a flash filesystem is particularly 532 * painful, involving erase, modify, write, we prefer 533 * to coalesce writes to the same sector together. 534 */ 535 536 sector = spiflash_sector(sc, bp); 537 538 /* 539 * if the write spans multiple sectors, skip 540 * coalescing. (It would be nice if we could break 541 * these up. minphys is honored for read/write, but 542 * not necessarily for bread.) 543 */ 544 if (sector < 0) 545 goto dowrite; 546 547 while ((bp = bufq_peek(sc->sc_waitq)) != NULL) { 548 /* can't deal with read requests! */ 549 if (bp->b_flags & B_READ) 550 break; 551 552 /* is it for the same sector? */ 553 if (spiflash_sector(sc, bp) != sector) 554 break; 555 556 bp = bufq_get(sc->sc_waitq); 557 bufq_put(sc->sc_workq, bp); 558 } 559 560 dowrite: 561 spiflash_process_write(sc); 562 } 563 } 564 /* 565 * SPI flash common implementation. 566 */ 567 568 /* 569 * Most devices take on the order of 1 second for each block that they 570 * delete. 571 */ 572 int 573 spiflash_common_erase(spiflash_handle_t sc, size_t start, size_t size) 574 { 575 int rv; 576 577 if ((start % sc->sc_erase_size) || (size % sc->sc_erase_size)) 578 return EINVAL; 579 580 /* the second test is to test against wrap */ 581 if ((start > sc->sc_device_size) || 582 ((start + size) > sc->sc_device_size)) 583 return EINVAL; 584 585 /* 586 * XXX: check protection status? Requires master table mapping 587 * sectors to status bits, and so forth. 588 */ 589 590 while (size) { 591 if ((rv = spiflash_write_enable(sc)) != 0) { 592 spiflash_write_disable(sc); 593 return rv; 594 } 595 if ((rv = spiflash_cmd(sc, SPIFLASH_CMD_ERASE, 3, start, 0, 596 NULL, NULL)) != 0) { 597 spiflash_write_disable(sc); 598 return rv; 599 } 600 601 /* 602 * The devices I have all say typical for sector erase 603 * is ~1sec. We check ten times that often. (There 604 * is no way to interrupt on this.) 605 */ 606 if ((rv = spiflash_wait(sc, hz / 10)) != 0) 607 return rv; 608 609 start += sc->sc_erase_size; 610 size -= sc->sc_erase_size; 611 612 /* NB: according to the docs I have, the write enable 613 * is automatically cleared upon completion of an erase 614 * command, so there is no need to explicitly disable it. 615 */ 616 } 617 618 return 0; 619 } 620 621 int 622 spiflash_common_write(spiflash_handle_t sc, size_t start, size_t size, 623 const uint8_t *data) 624 { 625 int rv; 626 627 if ((start % sc->sc_write_size) || (size % sc->sc_write_size)) 628 return EINVAL; 629 630 while (size) { 631 int cnt; 632 633 if ((rv = spiflash_write_enable(sc)) != 0) { 634 spiflash_write_disable(sc); 635 return rv; 636 } 637 638 cnt = uimin(size, sc->sc_write_size); 639 if ((rv = spiflash_cmd(sc, SPIFLASH_CMD_PROGRAM, 3, start, 640 cnt, data, NULL)) != 0) { 641 spiflash_write_disable(sc); 642 return rv; 643 } 644 645 /* 646 * It seems that most devices can write bits fairly 647 * quickly. For example, one part I have access to 648 * takes ~5msec to process the entire 256 byte page. 649 * Probably this should be modified to cope with 650 * device-specific timing, and maybe also take into 651 * account systems with higher values of HZ (which 652 * could benefit from sleeping.) 653 */ 654 if ((rv = spiflash_wait(sc, 0)) != 0) 655 return rv; 656 657 data += cnt; 658 start += cnt; 659 size -= cnt; 660 } 661 662 return 0; 663 } 664 665 int 666 spiflash_common_read(spiflash_handle_t sc, size_t start, size_t size, 667 uint8_t *data) 668 { 669 int rv; 670 671 while (size) { 672 int cnt; 673 674 if (sc->sc_read_size > 0) 675 cnt = uimin(size, sc->sc_read_size); 676 else 677 cnt = size; 678 679 if ((rv = spiflash_cmd(sc, SPIFLASH_CMD_READ, 3, start, 680 cnt, NULL, data)) != 0) { 681 return rv; 682 } 683 684 data += cnt; 685 start += cnt; 686 size -= cnt; 687 } 688 689 return 0; 690 } 691 692 /* read status register */ 693 int 694 spiflash_read_status(spiflash_handle_t sc, uint8_t *sr) 695 { 696 697 return spiflash_cmd(sc, SPIFLASH_CMD_RDSR, 0, 0, 1, NULL, sr); 698 } 699 700 int 701 spiflash_write_enable(spiflash_handle_t sc) 702 { 703 704 return spiflash_cmd(sc, SPIFLASH_CMD_WREN, 0, 0, 0, NULL, NULL); 705 } 706 707 int 708 spiflash_write_disable(spiflash_handle_t sc) 709 { 710 711 return spiflash_cmd(sc, SPIFLASH_CMD_WRDI, 0, 0, 0, NULL, NULL); 712 } 713 714 int 715 spiflash_cmd(spiflash_handle_t sc, uint8_t cmd, 716 size_t addrlen, uint32_t addr, 717 size_t cnt, const uint8_t *wdata, uint8_t *rdata) 718 { 719 struct spi_transfer trans; 720 struct spi_chunk chunk1, chunk2; 721 char buf[4]; 722 int i; 723 724 buf[0] = cmd; 725 726 if (addrlen > 3) 727 return EINVAL; 728 729 for (i = addrlen; i > 0; i--) { 730 buf[i] = addr & 0xff; 731 addr >>= 8; 732 } 733 spi_transfer_init(&trans); 734 spi_chunk_init(&chunk1, addrlen + 1, buf, NULL); 735 spi_transfer_add(&trans, &chunk1); 736 if (cnt) { 737 spi_chunk_init(&chunk2, cnt, wdata, rdata); 738 spi_transfer_add(&trans, &chunk2); 739 } 740 741 spi_transfer(sc->sc_handle, &trans); 742 spi_wait(&trans); 743 744 if (trans.st_flags & SPI_F_ERROR) 745 return trans.st_errno; 746 return 0; 747 } 748 749 int 750 spiflash_wait(spiflash_handle_t sc, int tmo) 751 { 752 int rv; 753 uint8_t sr; 754 755 for (;;) { 756 if ((rv = spiflash_read_status(sc, &sr)) != 0) 757 return rv; 758 759 if ((sr & SPIFLASH_SR_BUSY) == 0) 760 break; 761 /* 762 * The devices I have all say typical for sector 763 * erase is ~1sec. We check time times that often. 764 * (There is no way to interrupt on this.) 765 */ 766 if (tmo) 767 tsleep(&sr, PWAIT, "spiflash_wait", tmo); 768 } 769 return 0; 770 } 771