1 /* $NetBSD: spiflash.c,v 1.1 2006/10/07 07:21:13 gdamore Exp $ */ 2 3 /*- 4 * Copyright (c) 2006 Urbana-Champaign Independent Media Center. 5 * Copyright (c) 2006 Garrett D'Amore. 6 * All rights reserved. 7 * 8 * Portions of this code were written by Garrett D'Amore for the 9 * Champaign-Urbana Community Wireless Network Project. 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer in the documentation and/or other materials provided 19 * with the distribution. 20 * 3. All advertising materials mentioning features or use of this 21 * software must display the following acknowledgements: 22 * This product includes software developed by the Urbana-Champaign 23 * Independent Media Center. 24 * This product includes software developed by Garrett D'Amore. 25 * 4. Urbana-Champaign Independent Media Center's name and Garrett 26 * D'Amore's name may not be used to endorse or promote products 27 * derived from this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE URBANA-CHAMPAIGN INDEPENDENT 30 * MEDIA CENTER AND GARRETT D'AMORE ``AS IS'' AND ANY EXPRESS OR 31 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 32 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33 * ARE DISCLAIMED. IN NO EVENT SHALL THE URBANA-CHAMPAIGN INDEPENDENT 34 * MEDIA CENTER OR GARRETT D'AMORE BE LIABLE FOR ANY DIRECT, INDIRECT, 35 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 36 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 37 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 38 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 40 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 41 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 42 */ 43 44 #include <sys/cdefs.h> 45 __KERNEL_RCSID(0, "$NetBSD: spiflash.c,v 1.1 2006/10/07 07:21:13 gdamore Exp $"); 46 47 #include <sys/param.h> 48 #include <sys/conf.h> 49 #include <sys/proc.h> 50 #include <sys/systm.h> 51 #include <sys/device.h> 52 #include <sys/kernel.h> 53 #include <sys/file.h> 54 #include <sys/ioctl.h> 55 #include <sys/disk.h> 56 #include <sys/disklabel.h> 57 #include <sys/buf.h> 58 #include <sys/bufq.h> 59 #include <sys/uio.h> 60 #include <sys/kthread.h> 61 #include <sys/malloc.h> 62 #include <sys/errno.h> 63 64 #include <dev/spi/spivar.h> 65 #include <dev/spi/spiflash.h> 66 67 /* 68 * This is an MI block driver for SPI flash devices. It could probably be 69 * converted to some more generic framework, if someone wanted to create one 70 * for NOR flashes. Note that some flashes have the ability to handle 71 * interrupts. 72 */ 73 74 struct spiflash_softc { 75 struct device sc_dev; 76 struct disk sc_dk; 77 78 struct spiflash_hw_if sc_hw; 79 void *sc_cookie; 80 81 const char *sc_name; 82 struct spi_handle *sc_handle; 83 int sc_device_size; 84 int sc_write_size; 85 int sc_erase_size; 86 int sc_read_size; 87 int sc_device_blks; 88 89 struct bufq_state *sc_bufq; 90 struct proc *sc_thread; 91 }; 92 93 #define sc_getname sc_hw.sf_getname 94 #define sc_gethandle sc_hw.sf_gethandle 95 #define sc_getsize sc_hw.sf_getsize 96 #define sc_getflags sc_hw.sf_getflags 97 #define sc_erase sc_hw.sf_erase 98 #define sc_write sc_hw.sf_write 99 #define sc_read sc_hw.sf_read 100 #define sc_getstatus sc_hw.sf_getstatus 101 #define sc_setstatus sc_hw.sf_setstatus 102 103 struct spiflash_attach_args { 104 const struct spiflash_hw_if *hw; 105 void *cookie; 106 }; 107 108 #define STATIC 109 STATIC int spiflash_match(struct device *, struct cfdata *, void *); 110 STATIC void spiflash_attach(struct device *, struct device *, void *); 111 STATIC int spiflash_print(void *, const char *); 112 STATIC int spiflash_common_erase(spiflash_handle_t, size_t, size_t); 113 STATIC int spiflash_common_write(spiflash_handle_t, size_t, size_t, 114 const uint8_t *); 115 STATIC int spiflash_common_read(spiflash_handle_t, size_t, size_t, uint8_t *); 116 STATIC void spiflash_process(spiflash_handle_t, struct buf *); 117 STATIC void spiflash_thread(void *); 118 STATIC void spiflash_thread_create(void *); 119 120 CFATTACH_DECL(spiflash, sizeof(struct spiflash_softc), 121 spiflash_match, spiflash_attach, NULL, NULL); 122 123 extern struct cfdriver spiflash_cd; 124 125 dev_type_open(spiflash_open); 126 dev_type_close(spiflash_close); 127 dev_type_read(spiflash_read); 128 dev_type_write(spiflash_write); 129 dev_type_ioctl(spiflash_ioctl); 130 dev_type_strategy(spiflash_strategy); 131 132 const struct bdevsw spiflash_bdevsw = { 133 .d_open = spiflash_open, 134 .d_close = spiflash_close, 135 .d_strategy = spiflash_strategy, 136 .d_ioctl = spiflash_ioctl, 137 .d_dump = nodump, 138 .d_psize = nosize, 139 .d_type = D_DISK, 140 }; 141 142 const struct cdevsw spiflash_cdevsw = { 143 .d_open = spiflash_open, 144 .d_close = spiflash_close, 145 .d_read = spiflash_read, 146 .d_write = spiflash_write, 147 .d_ioctl = spiflash_ioctl, 148 .d_stop = nostop, 149 .d_tty = notty, 150 .d_poll = nopoll, 151 .d_mmap = nommap, 152 .d_kqfilter = nokqfilter, 153 .d_type = D_DISK, 154 }; 155 156 static struct dkdriver spiflash_dkdriver = { spiflash_strategy, NULL }; 157 158 spiflash_handle_t 159 spiflash_attach_mi(const struct spiflash_hw_if *hw, void *cookie, 160 struct device *dev) 161 { 162 struct spiflash_attach_args sfa; 163 sfa.hw = hw; 164 sfa.cookie = cookie; 165 166 return (spiflash_handle_t)config_found(dev, &sfa, spiflash_print); 167 } 168 169 int 170 spiflash_print(void *aux, const char *pnp) 171 { 172 if (pnp != NULL) 173 printf("spiflash at %s\n", pnp); 174 175 return UNCONF; 176 } 177 178 int 179 spiflash_match(struct device *parent, struct cfdata *cf, void *aux) 180 { 181 182 return 1; 183 } 184 185 void 186 spiflash_attach(struct device *parent, struct device *self, void *aux) 187 { 188 struct spiflash_softc *sc = device_private(self); 189 struct spiflash_attach_args *sfa = aux; 190 void *cookie = sfa->cookie; 191 192 sc->sc_hw = *sfa->hw; 193 sc->sc_cookie = cookie; 194 sc->sc_name = sc->sc_getname(cookie); 195 sc->sc_handle = sc->sc_gethandle(cookie); 196 sc->sc_device_size = sc->sc_getsize(cookie, SPIFLASH_SIZE_DEVICE); 197 sc->sc_erase_size = sc->sc_getsize(cookie, SPIFLASH_SIZE_ERASE); 198 sc->sc_write_size = sc->sc_getsize(cookie, SPIFLASH_SIZE_WRITE); 199 sc->sc_read_size = sc->sc_getsize(cookie, SPIFLASH_SIZE_READ); 200 sc->sc_device_blks = sc->sc_device_size / DEV_BSIZE; 201 202 if (sc->sc_read == NULL) 203 sc->sc_read = spiflash_common_read; 204 if (sc->sc_write == NULL) 205 sc->sc_write = spiflash_common_write; 206 if (sc->sc_erase == NULL) 207 sc->sc_erase = spiflash_common_erase; 208 209 aprint_naive(": SPI flash\n"); 210 aprint_normal(": %s SPI flash\n", sc->sc_name); 211 /* XXX: note that this has to change for boot-sectored flash */ 212 aprint_normal("%s: %d KB, %d sectors of %d KB each\n", 213 sc->sc_dev.dv_xname, sc->sc_device_size / 1024, 214 sc->sc_device_size / sc->sc_erase_size, 215 sc->sc_erase_size / 1024); 216 217 /* first-come first-served strategy works best for us */ 218 bufq_alloc(&sc->sc_bufq, "fcfs", BUFQ_SORT_RAWBLOCK); 219 220 /* arrange to allocate the kthread */ 221 kthread_create(spiflash_thread_create, sc); 222 223 sc->sc_dk.dk_driver = &spiflash_dkdriver; 224 sc->sc_dk.dk_name = sc->sc_dev.dv_xname; 225 226 disk_attach(&sc->sc_dk); 227 } 228 229 int 230 spiflash_open(dev_t dev, int flags, int mode, struct lwp *l) 231 { 232 spiflash_handle_t sc; 233 234 if ((sc = device_lookup(&spiflash_cd, DISKUNIT(dev))) == NULL) 235 return ENXIO; 236 237 /* 238 * XXX: We need to handle partitions here. The problem is 239 * that it isn't entirely clear to me how to deal with this. 240 * There are devices that could be used "in the raw" with a 241 * NetBSD label, but then you get into devices that have other 242 * kinds of data on them -- some have VxWorks data, some have 243 * RedBoot data, and some have other contraints -- for example 244 * some devices might have a portion that is read-only, 245 * whereas others might have a portion that is read-write. 246 * 247 * For now we just permit access to the entire device. 248 */ 249 return 0; 250 } 251 252 int 253 spiflash_close(dev_t dev, int flags, int mode, struct lwp *l) 254 { 255 spiflash_handle_t sc; 256 257 if ((sc = device_lookup(&spiflash_cd, DISKUNIT(dev))) == NULL) 258 return ENXIO; 259 260 return 0; 261 } 262 263 int 264 spiflash_read(dev_t dev, struct uio *uio, int ioflag) 265 { 266 267 return physio(spiflash_strategy, NULL, dev, B_READ, minphys, uio); 268 } 269 270 int 271 spiflash_write(dev_t dev, struct uio *uio, int ioflag) 272 { 273 274 return physio(spiflash_strategy, NULL, dev, B_WRITE, minphys, uio); 275 } 276 277 int 278 spiflash_ioctl(dev_t dev, u_long cmd, caddr_t data, int flags, struct lwp *l) 279 { 280 spiflash_handle_t sc; 281 282 if ((sc = device_lookup(&spiflash_cd, DISKUNIT(dev))) == NULL) 283 return ENXIO; 284 285 return EINVAL; 286 } 287 288 void 289 spiflash_strategy(struct buf *bp) 290 { 291 spiflash_handle_t sc; 292 int sz; 293 int s; 294 295 sc = device_lookup(&spiflash_cd, DISKUNIT(bp->b_dev)); 296 if (sc == NULL) { 297 bp->b_error = ENXIO; 298 bp->b_flags |= B_ERROR; 299 biodone(bp); 300 return; 301 } 302 303 if ((bp->b_bcount % sc->sc_write_size) || 304 (bp->b_blkno < 0)) { 305 bp->b_error = EINVAL; 306 bp->b_flags |= B_ERROR; 307 biodone(bp); 308 return; 309 } 310 311 /* no work? */ 312 if (bp->b_bcount == 0) { 313 biodone(bp); 314 return; 315 } 316 317 sz = bp->b_bcount / DEV_BSIZE; 318 319 if ((bp->b_blkno + sz) > sc->sc_device_blks) { 320 sz = sc->sc_device_blks - bp->b_blkno; 321 /* exactly at end of media? return EOF */ 322 if (sz == 0) { 323 biodone(bp); 324 return; 325 } 326 if (sz < 0) { 327 /* past end of disk */ 328 bp->b_error = EINVAL; 329 bp->b_flags |= B_ERROR; 330 biodone(bp); 331 } 332 /* otherwise truncate it */ 333 bp->b_bcount = sz << DEV_BSHIFT; 334 } 335 336 bp->b_resid = bp->b_bcount; 337 338 /* all ready, hand off to thread for async processing */ 339 s = splbio(); 340 BUFQ_PUT(sc->sc_bufq, bp); 341 wakeup(&sc->sc_thread); 342 splx(s); 343 } 344 345 void 346 spiflash_process(spiflash_handle_t sc, struct buf *bp) 347 { 348 int cnt; 349 size_t addr; 350 uint8_t *data; 351 352 addr = bp->b_blkno * DEV_BSIZE; 353 data = bp->b_data; 354 355 while (bp->b_resid > 0) { 356 cnt = max(sc->sc_write_size, DEV_BSIZE); 357 if (bp->b_flags & B_READ) { 358 bp->b_error = sc->sc_read(sc, addr, cnt, data); 359 } else { 360 bp->b_error = sc->sc_write(sc, addr, cnt, data); 361 } 362 if (bp->b_error) { 363 bp->b_flags |= B_ERROR; 364 biodone(bp); 365 return; 366 } 367 bp->b_resid -= cnt; 368 data += cnt; 369 addr += cnt; 370 } 371 biodone(bp); 372 } 373 374 void 375 spiflash_thread(void *arg) 376 { 377 spiflash_handle_t sc = arg; 378 struct buf *bp; 379 int s; 380 381 s = splbio(); 382 for (;;) { 383 if ((bp = BUFQ_GET(sc->sc_bufq)) == NULL) { 384 tsleep(&sc->sc_thread, PRIBIO, "spiflash_thread", 0); 385 continue; 386 } 387 388 spiflash_process(sc, bp); 389 } 390 } 391 392 void 393 spiflash_thread_create(void *arg) 394 { 395 spiflash_handle_t sc = arg; 396 397 kthread_create1(spiflash_thread, arg, &sc->sc_thread, 398 "spiflash_thread"); 399 } 400 401 /* 402 * SPI flash common implementation. 403 */ 404 405 /* 406 * Most devices take on the order of 1 second for each block that they 407 * delete. 408 */ 409 int 410 spiflash_common_erase(spiflash_handle_t sc, size_t start, size_t size) 411 { 412 int rv; 413 414 if ((start % sc->sc_erase_size) || (size % sc->sc_erase_size)) 415 return EINVAL; 416 417 /* the second test is to test against wrap */ 418 if ((start > sc->sc_device_size) || 419 ((start + size) > sc->sc_device_size)) 420 return EINVAL; 421 422 /* 423 * XXX: check protection status? Requires master table mapping 424 * sectors to status bits, and so forth. 425 */ 426 427 while (size) { 428 if ((rv = spiflash_write_enable(sc)) != 0) { 429 spiflash_write_disable(sc); 430 return rv; 431 } 432 if ((rv = spiflash_cmd(sc, SPIFLASH_CMD_ERASE, 3, start, 0, 433 NULL, NULL)) != 0) { 434 spiflash_write_disable(sc); 435 return rv; 436 } 437 438 /* 439 * The devices I have all say typical for sector erase 440 * is ~1sec. We check ten times that often. (There 441 * is no way to interrupt on this.) 442 */ 443 if ((rv = spiflash_wait(sc, hz / 10)) != 0) 444 return rv; 445 446 start += sc->sc_erase_size; 447 size -= sc->sc_erase_size; 448 449 /* NB: according to the docs I have, the write enable 450 * is automatically cleared upon completion of an erase 451 * command, so there is no need to explicitly disable it. 452 */ 453 } 454 455 return 0; 456 } 457 458 int 459 spiflash_common_write(spiflash_handle_t sc, size_t start, size_t size, 460 const uint8_t *data) 461 { 462 int rv; 463 464 if ((start % sc->sc_write_size) || (size % sc->sc_write_size)) 465 return EINVAL; 466 467 /* the second test is to test against wrap */ 468 if ((start > sc->sc_device_size) || 469 ((start + size) > sc->sc_device_size)) 470 return EINVAL; 471 472 while (size) { 473 int cnt; 474 475 if ((rv = spiflash_write_enable(sc)) != 0) { 476 spiflash_write_disable(sc); 477 return rv; 478 } 479 480 cnt = min(size, sc->sc_write_size); 481 if ((rv = spiflash_cmd(sc, SPIFLASH_CMD_PROGRAM, 3, start, 482 cnt, data, NULL)) != 0) { 483 spiflash_write_disable(sc); 484 return rv; 485 } 486 487 /* 488 * It seems that most devices can write bits fairly 489 * quickly. For example, one part I have access to 490 * takes ~5msec to process the entire 256 byte page. 491 * Probably this should be modified to cope with 492 * device-specific timing, and maybe also take into 493 * account systems with higher values of HZ (which 494 * could benefit from sleeping.) 495 */ 496 if ((rv = spiflash_wait(sc, 0)) != 0) 497 return rv; 498 499 start += cnt; 500 size -= cnt; 501 } 502 503 return 0; 504 } 505 506 int 507 spiflash_common_read(spiflash_handle_t sc, size_t start, size_t size, 508 uint8_t *data) 509 { 510 int rv; 511 int align; 512 513 align = sc->sc_write_size; 514 if (sc->sc_read_size > 0) 515 align = sc->sc_read_size; 516 517 if ((start % align) || (size % align)) 518 return EINVAL; 519 520 /* the second test is to test against wrap */ 521 if ((start > sc->sc_device_size) || 522 ((start + size) > sc->sc_device_size)) 523 return EINVAL; 524 525 while (size) { 526 int cnt; 527 528 if ((rv = spiflash_write_enable(sc)) != 0) { 529 spiflash_write_disable(sc); 530 return rv; 531 } 532 533 if (sc->sc_read_size > 0) 534 cnt = min(size, sc->sc_read_size); 535 else 536 cnt = size; 537 538 if ((rv = spiflash_cmd(sc, SPIFLASH_CMD_READ, 3, start, 539 cnt, NULL, data)) != 0) { 540 spiflash_write_disable(sc); 541 return rv; 542 } 543 544 start += cnt; 545 size -= cnt; 546 } 547 548 return 0; 549 } 550 551 /* read status register */ 552 int 553 spiflash_read_status(spiflash_handle_t sc, uint8_t *sr) 554 { 555 556 return spiflash_cmd(sc, SPIFLASH_CMD_RDSR, 0, 0, 1, NULL, sr); 557 } 558 559 int 560 spiflash_write_enable(spiflash_handle_t sc) 561 { 562 563 return spiflash_cmd(sc, SPIFLASH_CMD_WREN, 0, 0, 0, NULL, NULL); 564 } 565 566 int 567 spiflash_write_disable(spiflash_handle_t sc) 568 { 569 570 return spiflash_cmd(sc, SPIFLASH_CMD_WRDI, 0, 0, 0, NULL, NULL); 571 } 572 573 int 574 spiflash_cmd(spiflash_handle_t sc, uint8_t cmd, 575 size_t addrlen, uint32_t addr, 576 size_t cnt, const uint8_t *wdata, uint8_t *rdata) 577 { 578 struct spi_transfer trans; 579 struct spi_chunk chunk1, chunk2; 580 char buf[4]; 581 int i; 582 583 buf[0] = cmd; 584 585 if (addrlen > 3) 586 return EINVAL; 587 588 for (i = addrlen; i > 0; i--) { 589 buf[i] = (addr >> ((i - 1) * 8)) & 0xff; 590 } 591 spi_transfer_init(&trans); 592 spi_chunk_init(&chunk1, addrlen + 1, buf, NULL); 593 spi_transfer_add(&trans, &chunk1); 594 if (cnt) { 595 spi_chunk_init(&chunk2, cnt, wdata, rdata); 596 spi_transfer_add(&trans, &chunk2); 597 } 598 599 spi_transfer(sc->sc_handle, &trans); 600 spi_wait(&trans); 601 602 if (trans.st_flags & SPI_F_ERROR) 603 return trans.st_errno; 604 return 0; 605 } 606 607 int 608 spiflash_wait(spiflash_handle_t sc, int tmo) 609 { 610 int rv; 611 uint8_t sr; 612 613 for (;;) { 614 if ((rv = spiflash_read_status(sc, &sr)) != 0) 615 return rv; 616 617 if ((sr & SPIFLASH_SR_BUSY) == 0) 618 break; 619 /* 620 * The devices I have all say typical for sector 621 * erase is ~1sec. We check time times that often. 622 * (There is no way to interrupt on this.) 623 */ 624 if (tmo) 625 tsleep(&sr, PWAIT, "spiflash_wait", tmo); 626 } 627 return 0; 628 } 629