1 /* $NetBSD: spiflash.c,v 1.19 2016/08/19 03:23:39 jakllsch Exp $ */ 2 3 /*- 4 * Copyright (c) 2006 Urbana-Champaign Independent Media Center. 5 * Copyright (c) 2006 Garrett D'Amore. 6 * All rights reserved. 7 * 8 * Portions of this code were written by Garrett D'Amore for the 9 * Champaign-Urbana Community Wireless Network Project. 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer in the documentation and/or other materials provided 19 * with the distribution. 20 * 3. All advertising materials mentioning features or use of this 21 * software must display the following acknowledgements: 22 * This product includes software developed by the Urbana-Champaign 23 * Independent Media Center. 24 * This product includes software developed by Garrett D'Amore. 25 * 4. Urbana-Champaign Independent Media Center's name and Garrett 26 * D'Amore's name may not be used to endorse or promote products 27 * derived from this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE URBANA-CHAMPAIGN INDEPENDENT 30 * MEDIA CENTER AND GARRETT D'AMORE ``AS IS'' AND ANY EXPRESS OR 31 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 32 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33 * ARE DISCLAIMED. IN NO EVENT SHALL THE URBANA-CHAMPAIGN INDEPENDENT 34 * MEDIA CENTER OR GARRETT D'AMORE BE LIABLE FOR ANY DIRECT, INDIRECT, 35 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 36 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 37 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 38 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 40 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 41 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 42 */ 43 44 #include <sys/cdefs.h> 45 __KERNEL_RCSID(0, "$NetBSD: spiflash.c,v 1.19 2016/08/19 03:23:39 jakllsch Exp $"); 46 47 #include <sys/param.h> 48 #include <sys/conf.h> 49 #include <sys/proc.h> 50 #include <sys/systm.h> 51 #include <sys/device.h> 52 #include <sys/kernel.h> 53 #include <sys/file.h> 54 #include <sys/ioctl.h> 55 #include <sys/disk.h> 56 #include <sys/disklabel.h> 57 #include <sys/buf.h> 58 #include <sys/bufq.h> 59 #include <sys/uio.h> 60 #include <sys/kthread.h> 61 #include <sys/malloc.h> 62 #include <sys/errno.h> 63 64 #include <dev/spi/spivar.h> 65 #include <dev/spi/spiflash.h> 66 67 /* 68 * This is an MI block driver for SPI flash devices. It could probably be 69 * converted to some more generic framework, if someone wanted to create one 70 * for NOR flashes. Note that some flashes have the ability to handle 71 * interrupts. 72 */ 73 74 struct spiflash_softc { 75 struct disk sc_dk; 76 77 struct spiflash_hw_if sc_hw; 78 void *sc_cookie; 79 80 const char *sc_name; 81 struct spi_handle *sc_handle; 82 int sc_device_size; 83 int sc_write_size; 84 int sc_erase_size; 85 int sc_read_size; 86 int sc_device_blks; 87 88 struct bufq_state *sc_waitq; 89 struct bufq_state *sc_workq; 90 struct bufq_state *sc_doneq; 91 lwp_t *sc_thread; 92 }; 93 94 #define sc_getname sc_hw.sf_getname 95 #define sc_gethandle sc_hw.sf_gethandle 96 #define sc_getsize sc_hw.sf_getsize 97 #define sc_getflags sc_hw.sf_getflags 98 #define sc_erase sc_hw.sf_erase 99 #define sc_write sc_hw.sf_write 100 #define sc_read sc_hw.sf_read 101 #define sc_getstatus sc_hw.sf_getstatus 102 #define sc_setstatus sc_hw.sf_setstatus 103 104 struct spiflash_attach_args { 105 const struct spiflash_hw_if *hw; 106 void *cookie; 107 }; 108 109 #define STATIC 110 STATIC int spiflash_match(device_t , cfdata_t , void *); 111 STATIC void spiflash_attach(device_t , device_t , void *); 112 STATIC int spiflash_print(void *, const char *); 113 STATIC int spiflash_common_erase(spiflash_handle_t, size_t, size_t); 114 STATIC int spiflash_common_write(spiflash_handle_t, size_t, size_t, 115 const uint8_t *); 116 STATIC int spiflash_common_read(spiflash_handle_t, size_t, size_t, uint8_t *); 117 STATIC void spiflash_process_done(spiflash_handle_t, int); 118 STATIC void spiflash_process_read(spiflash_handle_t); 119 STATIC void spiflash_process_write(spiflash_handle_t); 120 STATIC void spiflash_thread(void *); 121 STATIC int spiflash_nsectors(spiflash_handle_t, struct buf *); 122 STATIC int spiflash_nsectors(spiflash_handle_t, struct buf *); 123 STATIC int spiflash_sector(spiflash_handle_t, struct buf *); 124 125 CFATTACH_DECL_NEW(spiflash, sizeof(struct spiflash_softc), 126 spiflash_match, spiflash_attach, NULL, NULL); 127 128 #ifdef SPIFLASH_DEBUG 129 #define DPRINTF(x) do { printf x; } while (0/*CONSTCOND*/) 130 #else 131 #define DPRINTF(x) do { } while (0/*CONSTCOND*/) 132 #endif 133 134 extern struct cfdriver spiflash_cd; 135 136 dev_type_open(spiflash_open); 137 dev_type_close(spiflash_close); 138 dev_type_read(spiflash_read); 139 dev_type_write(spiflash_write); 140 dev_type_ioctl(spiflash_ioctl); 141 dev_type_strategy(spiflash_strategy); 142 143 const struct bdevsw spiflash_bdevsw = { 144 .d_open = spiflash_open, 145 .d_close = spiflash_close, 146 .d_strategy = spiflash_strategy, 147 .d_ioctl = spiflash_ioctl, 148 .d_dump = nodump, 149 .d_psize = nosize, 150 .d_discard = nodiscard, 151 .d_flag = D_DISK, 152 }; 153 154 const struct cdevsw spiflash_cdevsw = { 155 .d_open = spiflash_open, 156 .d_close = spiflash_close, 157 .d_read = spiflash_read, 158 .d_write = spiflash_write, 159 .d_ioctl = spiflash_ioctl, 160 .d_stop = nostop, 161 .d_tty = notty, 162 .d_poll = nopoll, 163 .d_mmap = nommap, 164 .d_kqfilter = nokqfilter, 165 .d_discard = nodiscard, 166 .d_flag = D_DISK, 167 }; 168 169 static struct dkdriver spiflash_dkdriver = { 170 .d_strategy = spiflash_strategy 171 }; 172 173 spiflash_handle_t 174 spiflash_attach_mi(const struct spiflash_hw_if *hw, void *cookie, 175 device_t dev) 176 { 177 struct spiflash_attach_args sfa; 178 sfa.hw = hw; 179 sfa.cookie = cookie; 180 181 return (spiflash_handle_t)config_found(dev, &sfa, spiflash_print); 182 } 183 184 int 185 spiflash_print(void *aux, const char *pnp) 186 { 187 if (pnp != NULL) 188 printf("spiflash at %s\n", pnp); 189 190 return UNCONF; 191 } 192 193 int 194 spiflash_match(device_t parent, cfdata_t cf, void *aux) 195 { 196 197 return 1; 198 } 199 200 void 201 spiflash_attach(device_t parent, device_t self, void *aux) 202 { 203 struct spiflash_softc *sc = device_private(self); 204 struct spiflash_attach_args *sfa = aux; 205 void *cookie = sfa->cookie; 206 207 sc->sc_hw = *sfa->hw; 208 sc->sc_cookie = cookie; 209 sc->sc_name = sc->sc_getname(cookie); 210 sc->sc_handle = sc->sc_gethandle(cookie); 211 sc->sc_device_size = sc->sc_getsize(cookie, SPIFLASH_SIZE_DEVICE); 212 sc->sc_erase_size = sc->sc_getsize(cookie, SPIFLASH_SIZE_ERASE); 213 sc->sc_write_size = sc->sc_getsize(cookie, SPIFLASH_SIZE_WRITE); 214 sc->sc_read_size = sc->sc_getsize(cookie, SPIFLASH_SIZE_READ); 215 sc->sc_device_blks = sc->sc_device_size / DEV_BSIZE; 216 217 if (sc->sc_read == NULL) 218 sc->sc_read = spiflash_common_read; 219 if (sc->sc_write == NULL) 220 sc->sc_write = spiflash_common_write; 221 if (sc->sc_erase == NULL) 222 sc->sc_erase = spiflash_common_erase; 223 224 aprint_naive(": SPI flash\n"); 225 aprint_normal(": %s SPI flash\n", sc->sc_name); 226 /* XXX: note that this has to change for boot-sectored flash */ 227 aprint_normal_dev(self, "%d KB, %d sectors of %d KB each\n", 228 sc->sc_device_size / 1024, 229 sc->sc_device_size / sc->sc_erase_size, 230 sc->sc_erase_size / 1024); 231 232 /* first-come first-served strategy works best for us */ 233 bufq_alloc(&sc->sc_waitq, "fcfs", BUFQ_SORT_RAWBLOCK); 234 bufq_alloc(&sc->sc_workq, "fcfs", BUFQ_SORT_RAWBLOCK); 235 bufq_alloc(&sc->sc_doneq, "fcfs", BUFQ_SORT_RAWBLOCK); 236 237 disk_init(&sc->sc_dk, device_xname(self), &spiflash_dkdriver); 238 disk_attach(&sc->sc_dk); 239 240 /* arrange to allocate the kthread */ 241 kthread_create(PRI_NONE, 0, NULL, spiflash_thread, sc, 242 &sc->sc_thread, "spiflash"); 243 } 244 245 int 246 spiflash_open(dev_t dev, int flags, int mode, struct lwp *l) 247 { 248 spiflash_handle_t sc; 249 250 sc = device_lookup_private(&spiflash_cd, DISKUNIT(dev)); 251 if (sc == NULL) 252 return ENXIO; 253 254 /* 255 * XXX: We need to handle partitions here. The problem is 256 * that it isn't entirely clear to me how to deal with this. 257 * There are devices that could be used "in the raw" with a 258 * NetBSD label, but then you get into devices that have other 259 * kinds of data on them -- some have VxWorks data, some have 260 * RedBoot data, and some have other contraints -- for example 261 * some devices might have a portion that is read-only, 262 * whereas others might have a portion that is read-write. 263 * 264 * For now we just permit access to the entire device. 265 */ 266 return 0; 267 } 268 269 int 270 spiflash_close(dev_t dev, int flags, int mode, struct lwp *l) 271 { 272 spiflash_handle_t sc; 273 274 sc = device_lookup_private(&spiflash_cd, DISKUNIT(dev)); 275 if (sc == NULL) 276 return ENXIO; 277 278 return 0; 279 } 280 281 int 282 spiflash_read(dev_t dev, struct uio *uio, int ioflag) 283 { 284 285 return physio(spiflash_strategy, NULL, dev, B_READ, minphys, uio); 286 } 287 288 int 289 spiflash_write(dev_t dev, struct uio *uio, int ioflag) 290 { 291 292 return physio(spiflash_strategy, NULL, dev, B_WRITE, minphys, uio); 293 } 294 295 int 296 spiflash_ioctl(dev_t dev, u_long cmd, void *data, int flags, struct lwp *l) 297 { 298 spiflash_handle_t sc; 299 300 sc = device_lookup_private(&spiflash_cd, DISKUNIT(dev)); 301 if (sc == NULL) 302 return ENXIO; 303 304 return EINVAL; 305 } 306 307 void 308 spiflash_strategy(struct buf *bp) 309 { 310 spiflash_handle_t sc; 311 int s; 312 313 sc = device_lookup_private(&spiflash_cd, DISKUNIT(bp->b_dev)); 314 if (sc == NULL) { 315 bp->b_error = ENXIO; 316 bp->b_resid = bp->b_bcount; 317 biodone(bp); 318 return; 319 } 320 321 if (((bp->b_bcount % sc->sc_write_size) != 0) || 322 (bp->b_blkno < 0)) { 323 bp->b_error = EINVAL; 324 bp->b_resid = bp->b_bcount; 325 biodone(bp); 326 return; 327 } 328 329 /* no work? */ 330 if (bp->b_bcount == 0) { 331 biodone(bp); 332 return; 333 } 334 335 if (bounds_check_with_mediasize(bp, DEV_BSIZE, 336 sc->sc_device_blks) <= 0) { 337 biodone(bp); 338 return; 339 } 340 341 bp->b_resid = bp->b_bcount; 342 343 /* all ready, hand off to thread for async processing */ 344 s = splbio(); 345 bufq_put(sc->sc_waitq, bp); 346 wakeup(&sc->sc_thread); 347 splx(s); 348 } 349 350 void 351 spiflash_process_done(spiflash_handle_t sc, int err) 352 { 353 struct buf *bp; 354 int cnt = 0; 355 int flag = 0; 356 357 while ((bp = bufq_get(sc->sc_doneq)) != NULL) { 358 flag = bp->b_flags & B_READ; 359 if ((bp->b_error = err) == 0) 360 bp->b_resid = 0; 361 cnt += bp->b_bcount - bp->b_resid; 362 biodone(bp); 363 } 364 disk_unbusy(&sc->sc_dk, cnt, flag); 365 } 366 367 void 368 spiflash_process_read(spiflash_handle_t sc) 369 { 370 struct buf *bp; 371 int err = 0; 372 373 disk_busy(&sc->sc_dk); 374 while ((bp = bufq_get(sc->sc_workq)) != NULL) { 375 size_t addr = bp->b_blkno * DEV_BSIZE; 376 uint8_t *data = bp->b_data; 377 int cnt = bp->b_resid; 378 379 bufq_put(sc->sc_doneq, bp); 380 381 DPRINTF(("read from addr %x, cnt %d\n", (unsigned)addr, cnt)); 382 383 if ((err = sc->sc_read(sc, addr, cnt, data)) != 0) { 384 /* error occurred, fail all pending workq bufs */ 385 bufq_move(sc->sc_doneq, sc->sc_workq); 386 break; 387 } 388 389 bp->b_resid -= cnt; 390 data += cnt; 391 addr += cnt; 392 } 393 spiflash_process_done(sc, err); 394 } 395 396 void 397 spiflash_process_write(spiflash_handle_t sc) 398 { 399 int len; 400 size_t base; 401 daddr_t blkno; 402 uint8_t *save; 403 int err = 0, neederase = 0; 404 struct buf *bp; 405 406 /* 407 * due to other considerations, we are guaranteed that 408 * we will only have multiple buffers if they are all in 409 * the same erase sector. Therefore we never need to look 410 * beyond the first block to determine how much data we need 411 * to save. 412 */ 413 414 bp = bufq_peek(sc->sc_workq); 415 len = spiflash_nsectors(sc, bp) * sc->sc_erase_size; 416 blkno = bp->b_blkno; 417 base = (blkno * DEV_BSIZE) & ~ (sc->sc_erase_size - 1); 418 419 /* get ourself a scratch buffer */ 420 save = malloc(len, M_DEVBUF, M_WAITOK); 421 422 disk_busy(&sc->sc_dk); 423 /* read in as much of the data as we need */ 424 DPRINTF(("reading in %d bytes\n", len)); 425 if ((err = sc->sc_read(sc, base, len, save)) != 0) { 426 bufq_move(sc->sc_doneq, sc->sc_workq); 427 spiflash_process_done(sc, err); 428 return; 429 } 430 431 /* 432 * now coalesce the writes into the save area, but also 433 * check to see if we need to do an erase 434 */ 435 while ((bp = bufq_get(sc->sc_workq)) != NULL) { 436 uint8_t *data, *dst; 437 int resid = bp->b_resid; 438 439 DPRINTF(("coalesce write, blkno %x, count %d, resid %d\n", 440 (unsigned)bp->b_blkno, bp->b_bcount, resid)); 441 442 data = bp->b_data; 443 dst = save + (bp->b_blkno * DEV_BSIZE) - base; 444 445 /* 446 * NOR flash bits. We can clear a bit, but we cannot 447 * set a bit, without erasing. This should help reduce 448 * unnecessary erases. 449 */ 450 while (resid) { 451 if ((*data) & ~(*dst)) 452 neederase = 1; 453 *dst++ = *data++; 454 resid--; 455 } 456 457 bufq_put(sc->sc_doneq, bp); 458 } 459 460 /* 461 * do the erase, if we need to. 462 */ 463 if (neederase) { 464 DPRINTF(("erasing from %zx - %zx\n", base, base + len)); 465 if ((err = sc->sc_erase(sc, base, len)) != 0) { 466 spiflash_process_done(sc, err); 467 return; 468 } 469 } 470 471 /* 472 * now write our save area, and finish up. 473 */ 474 DPRINTF(("flashing %d bytes to %zx from %p\n", len, base, save)); 475 err = sc->sc_write(sc, base, len, save); 476 spiflash_process_done(sc, err); 477 } 478 479 480 int 481 spiflash_nsectors(spiflash_handle_t sc, struct buf *bp) 482 { 483 unsigned addr, sector; 484 485 addr = bp->b_blkno * DEV_BSIZE; 486 sector = addr / sc->sc_erase_size; 487 488 addr += bp->b_bcount; 489 addr--; 490 return (((addr / sc->sc_erase_size) - sector) + 1); 491 } 492 493 int 494 spiflash_sector(spiflash_handle_t sc, struct buf *bp) 495 { 496 unsigned addr, sector; 497 498 addr = bp->b_blkno * DEV_BSIZE; 499 sector = addr / sc->sc_erase_size; 500 501 /* if it spans multiple blocks, error it */ 502 addr += bp->b_bcount; 503 addr--; 504 if (sector != (addr / sc->sc_erase_size)) 505 return -1; 506 507 return sector; 508 } 509 510 void 511 spiflash_thread(void *arg) 512 { 513 spiflash_handle_t sc = arg; 514 struct buf *bp; 515 int sector; 516 517 (void)splbio(); 518 for (;;) { 519 if ((bp = bufq_get(sc->sc_waitq)) == NULL) { 520 tsleep(&sc->sc_thread, PRIBIO, "spiflash_thread", 0); 521 continue; 522 } 523 524 bufq_put(sc->sc_workq, bp); 525 526 if (bp->b_flags & B_READ) { 527 /* just do the read */ 528 spiflash_process_read(sc); 529 continue; 530 } 531 532 /* 533 * Because writing a flash filesystem is particularly 534 * painful, involving erase, modify, write, we prefer 535 * to coalesce writes to the same sector together. 536 */ 537 538 sector = spiflash_sector(sc, bp); 539 540 /* 541 * if the write spans multiple sectors, skip 542 * coalescing. (It would be nice if we could break 543 * these up. minphys is honored for read/write, but 544 * not necessarily for bread.) 545 */ 546 if (sector < 0) 547 goto dowrite; 548 549 while ((bp = bufq_peek(sc->sc_waitq)) != NULL) { 550 /* can't deal with read requests! */ 551 if (bp->b_flags & B_READ) 552 break; 553 554 /* is it for the same sector? */ 555 if (spiflash_sector(sc, bp) != sector) 556 break; 557 558 bp = bufq_get(sc->sc_waitq); 559 bufq_put(sc->sc_workq, bp); 560 } 561 562 dowrite: 563 spiflash_process_write(sc); 564 } 565 } 566 /* 567 * SPI flash common implementation. 568 */ 569 570 /* 571 * Most devices take on the order of 1 second for each block that they 572 * delete. 573 */ 574 int 575 spiflash_common_erase(spiflash_handle_t sc, size_t start, size_t size) 576 { 577 int rv; 578 579 if ((start % sc->sc_erase_size) || (size % sc->sc_erase_size)) 580 return EINVAL; 581 582 /* the second test is to test against wrap */ 583 if ((start > sc->sc_device_size) || 584 ((start + size) > sc->sc_device_size)) 585 return EINVAL; 586 587 /* 588 * XXX: check protection status? Requires master table mapping 589 * sectors to status bits, and so forth. 590 */ 591 592 while (size) { 593 if ((rv = spiflash_write_enable(sc)) != 0) { 594 spiflash_write_disable(sc); 595 return rv; 596 } 597 if ((rv = spiflash_cmd(sc, SPIFLASH_CMD_ERASE, 3, start, 0, 598 NULL, NULL)) != 0) { 599 spiflash_write_disable(sc); 600 return rv; 601 } 602 603 /* 604 * The devices I have all say typical for sector erase 605 * is ~1sec. We check ten times that often. (There 606 * is no way to interrupt on this.) 607 */ 608 if ((rv = spiflash_wait(sc, hz / 10)) != 0) 609 return rv; 610 611 start += sc->sc_erase_size; 612 size -= sc->sc_erase_size; 613 614 /* NB: according to the docs I have, the write enable 615 * is automatically cleared upon completion of an erase 616 * command, so there is no need to explicitly disable it. 617 */ 618 } 619 620 return 0; 621 } 622 623 int 624 spiflash_common_write(spiflash_handle_t sc, size_t start, size_t size, 625 const uint8_t *data) 626 { 627 int rv; 628 629 if ((start % sc->sc_write_size) || (size % sc->sc_write_size)) 630 return EINVAL; 631 632 while (size) { 633 int cnt; 634 635 if ((rv = spiflash_write_enable(sc)) != 0) { 636 spiflash_write_disable(sc); 637 return rv; 638 } 639 640 cnt = min(size, sc->sc_write_size); 641 if ((rv = spiflash_cmd(sc, SPIFLASH_CMD_PROGRAM, 3, start, 642 cnt, data, NULL)) != 0) { 643 spiflash_write_disable(sc); 644 return rv; 645 } 646 647 /* 648 * It seems that most devices can write bits fairly 649 * quickly. For example, one part I have access to 650 * takes ~5msec to process the entire 256 byte page. 651 * Probably this should be modified to cope with 652 * device-specific timing, and maybe also take into 653 * account systems with higher values of HZ (which 654 * could benefit from sleeping.) 655 */ 656 if ((rv = spiflash_wait(sc, 0)) != 0) 657 return rv; 658 659 data += cnt; 660 start += cnt; 661 size -= cnt; 662 } 663 664 return 0; 665 } 666 667 int 668 spiflash_common_read(spiflash_handle_t sc, size_t start, size_t size, 669 uint8_t *data) 670 { 671 int rv; 672 673 while (size) { 674 int cnt; 675 676 if (sc->sc_read_size > 0) 677 cnt = min(size, sc->sc_read_size); 678 else 679 cnt = size; 680 681 if ((rv = spiflash_cmd(sc, SPIFLASH_CMD_READ, 3, start, 682 cnt, NULL, data)) != 0) { 683 return rv; 684 } 685 686 data += cnt; 687 start += cnt; 688 size -= cnt; 689 } 690 691 return 0; 692 } 693 694 /* read status register */ 695 int 696 spiflash_read_status(spiflash_handle_t sc, uint8_t *sr) 697 { 698 699 return spiflash_cmd(sc, SPIFLASH_CMD_RDSR, 0, 0, 1, NULL, sr); 700 } 701 702 int 703 spiflash_write_enable(spiflash_handle_t sc) 704 { 705 706 return spiflash_cmd(sc, SPIFLASH_CMD_WREN, 0, 0, 0, NULL, NULL); 707 } 708 709 int 710 spiflash_write_disable(spiflash_handle_t sc) 711 { 712 713 return spiflash_cmd(sc, SPIFLASH_CMD_WRDI, 0, 0, 0, NULL, NULL); 714 } 715 716 int 717 spiflash_cmd(spiflash_handle_t sc, uint8_t cmd, 718 size_t addrlen, uint32_t addr, 719 size_t cnt, const uint8_t *wdata, uint8_t *rdata) 720 { 721 struct spi_transfer trans; 722 struct spi_chunk chunk1, chunk2; 723 char buf[4]; 724 int i; 725 726 buf[0] = cmd; 727 728 if (addrlen > 3) 729 return EINVAL; 730 731 for (i = addrlen; i > 0; i--) { 732 buf[i] = addr & 0xff; 733 addr >>= 8; 734 } 735 spi_transfer_init(&trans); 736 spi_chunk_init(&chunk1, addrlen + 1, buf, NULL); 737 spi_transfer_add(&trans, &chunk1); 738 if (cnt) { 739 spi_chunk_init(&chunk2, cnt, wdata, rdata); 740 spi_transfer_add(&trans, &chunk2); 741 } 742 743 spi_transfer(sc->sc_handle, &trans); 744 spi_wait(&trans); 745 746 if (trans.st_flags & SPI_F_ERROR) 747 return trans.st_errno; 748 return 0; 749 } 750 751 int 752 spiflash_wait(spiflash_handle_t sc, int tmo) 753 { 754 int rv; 755 uint8_t sr; 756 757 for (;;) { 758 if ((rv = spiflash_read_status(sc, &sr)) != 0) 759 return rv; 760 761 if ((sr & SPIFLASH_SR_BUSY) == 0) 762 break; 763 /* 764 * The devices I have all say typical for sector 765 * erase is ~1sec. We check time times that often. 766 * (There is no way to interrupt on this.) 767 */ 768 if (tmo) 769 tsleep(&sr, PWAIT, "spiflash_wait", tmo); 770 } 771 return 0; 772 } 773