1 /* $NetBSD: spiflash.c,v 1.27 2022/10/26 21:56:19 andvar Exp $ */
2
3 /*-
4 * Copyright (c) 2006 Urbana-Champaign Independent Media Center.
5 * Copyright (c) 2006 Garrett D'Amore.
6 * All rights reserved.
7 *
8 * Portions of this code were written by Garrett D'Amore for the
9 * Champaign-Urbana Community Wireless Network Project.
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer in the documentation and/or other materials provided
19 * with the distribution.
20 * 3. All advertising materials mentioning features or use of this
21 * software must display the following acknowledgements:
22 * This product includes software developed by the Urbana-Champaign
23 * Independent Media Center.
24 * This product includes software developed by Garrett D'Amore.
25 * 4. Urbana-Champaign Independent Media Center's name and Garrett
26 * D'Amore's name may not be used to endorse or promote products
27 * derived from this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE URBANA-CHAMPAIGN INDEPENDENT
30 * MEDIA CENTER AND GARRETT D'AMORE ``AS IS'' AND ANY EXPRESS OR
31 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
32 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE URBANA-CHAMPAIGN INDEPENDENT
34 * MEDIA CENTER OR GARRETT D'AMORE BE LIABLE FOR ANY DIRECT, INDIRECT,
35 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
36 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
37 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
38 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
40 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
41 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 */
43
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: spiflash.c,v 1.27 2022/10/26 21:56:19 andvar Exp $");
46
47 #include <sys/param.h>
48 #include <sys/conf.h>
49 #include <sys/proc.h>
50 #include <sys/systm.h>
51 #include <sys/device.h>
52 #include <sys/kernel.h>
53 #include <sys/file.h>
54 #include <sys/ioctl.h>
55 #include <sys/disk.h>
56 #include <sys/disklabel.h>
57 #include <sys/buf.h>
58 #include <sys/bufq.h>
59 #include <sys/uio.h>
60 #include <sys/kthread.h>
61 #include <sys/malloc.h>
62 #include <sys/errno.h>
63
64 #include <dev/spi/spivar.h>
65 #include <dev/spi/spiflash.h>
66
67 /*
68 * This is an MI block driver for SPI flash devices. It could probably be
69 * converted to some more generic framework, if someone wanted to create one
70 * for NOR flashes. Note that some flashes have the ability to handle
71 * interrupts.
72 */
73
74 struct spiflash_softc {
75 struct disk sc_dk;
76
77 struct spiflash_hw_if sc_hw;
78 void *sc_cookie;
79
80 const char *sc_name;
81 struct spi_handle *sc_handle;
82 int sc_device_size;
83 int sc_write_size;
84 int sc_erase_size;
85 int sc_read_size;
86 int sc_device_blks;
87
88 struct bufq_state *sc_waitq;
89 struct bufq_state *sc_workq;
90 struct bufq_state *sc_doneq;
91 lwp_t *sc_thread;
92 };
93
94 #define sc_getname sc_hw.sf_getname
95 #define sc_gethandle sc_hw.sf_gethandle
96 #define sc_getsize sc_hw.sf_getsize
97 #define sc_getflags sc_hw.sf_getflags
98 #define sc_erase sc_hw.sf_erase
99 #define sc_write sc_hw.sf_write
100 #define sc_read sc_hw.sf_read
101 #define sc_getstatus sc_hw.sf_getstatus
102 #define sc_setstatus sc_hw.sf_setstatus
103
104 struct spiflash_attach_args {
105 const struct spiflash_hw_if *hw;
106 void *cookie;
107 };
108
109 #define STATIC
110 STATIC int spiflash_match(device_t , cfdata_t , void *);
111 STATIC void spiflash_attach(device_t , device_t , void *);
112 STATIC int spiflash_print(void *, const char *);
113 STATIC int spiflash_common_erase(spiflash_handle_t, size_t, size_t);
114 STATIC int spiflash_common_write(spiflash_handle_t, size_t, size_t,
115 const uint8_t *);
116 STATIC int spiflash_common_read(spiflash_handle_t, size_t, size_t, uint8_t *);
117 STATIC void spiflash_process_done(spiflash_handle_t, int);
118 STATIC void spiflash_process_read(spiflash_handle_t);
119 STATIC void spiflash_process_write(spiflash_handle_t);
120 STATIC void spiflash_thread(void *);
121 STATIC int spiflash_nsectors(spiflash_handle_t, struct buf *);
122 STATIC int spiflash_nsectors(spiflash_handle_t, struct buf *);
123 STATIC int spiflash_sector(spiflash_handle_t, struct buf *);
124
125 CFATTACH_DECL_NEW(spiflash, sizeof(struct spiflash_softc),
126 spiflash_match, spiflash_attach, NULL, NULL);
127
128 #ifdef SPIFLASH_DEBUG
129 #define DPRINTF(x) do { printf x; } while (0/*CONSTCOND*/)
130 #else
131 #define DPRINTF(x) do { } while (0/*CONSTCOND*/)
132 #endif
133
134 extern struct cfdriver spiflash_cd;
135
136 dev_type_open(spiflash_open);
137 dev_type_close(spiflash_close);
138 dev_type_read(spiflash_read);
139 dev_type_write(spiflash_write);
140 dev_type_ioctl(spiflash_ioctl);
141 dev_type_strategy(spiflash_strategy);
142
143 const struct bdevsw spiflash_bdevsw = {
144 .d_open = spiflash_open,
145 .d_close = spiflash_close,
146 .d_strategy = spiflash_strategy,
147 .d_ioctl = spiflash_ioctl,
148 .d_dump = nodump,
149 .d_psize = nosize,
150 .d_discard = nodiscard,
151 .d_flag = D_DISK,
152 };
153
154 const struct cdevsw spiflash_cdevsw = {
155 .d_open = spiflash_open,
156 .d_close = spiflash_close,
157 .d_read = spiflash_read,
158 .d_write = spiflash_write,
159 .d_ioctl = spiflash_ioctl,
160 .d_stop = nostop,
161 .d_tty = notty,
162 .d_poll = nopoll,
163 .d_mmap = nommap,
164 .d_kqfilter = nokqfilter,
165 .d_discard = nodiscard,
166 .d_flag = D_DISK,
167 };
168
169 static struct dkdriver spiflash_dkdriver = {
170 .d_strategy = spiflash_strategy
171 };
172
173 spiflash_handle_t
spiflash_attach_mi(const struct spiflash_hw_if * hw,void * cookie,device_t dev)174 spiflash_attach_mi(const struct spiflash_hw_if *hw, void *cookie,
175 device_t dev)
176 {
177 struct spiflash_attach_args sfa;
178 sfa.hw = hw;
179 sfa.cookie = cookie;
180
181 return (spiflash_handle_t)config_found(dev, &sfa, spiflash_print,
182 CFARGS_NONE);
183 }
184
185 int
spiflash_print(void * aux,const char * pnp)186 spiflash_print(void *aux, const char *pnp)
187 {
188 if (pnp != NULL)
189 printf("spiflash at %s\n", pnp);
190
191 return UNCONF;
192 }
193
194 int
spiflash_match(device_t parent,cfdata_t cf,void * aux)195 spiflash_match(device_t parent, cfdata_t cf, void *aux)
196 {
197
198 return 1;
199 }
200
201 void
spiflash_attach(device_t parent,device_t self,void * aux)202 spiflash_attach(device_t parent, device_t self, void *aux)
203 {
204 struct spiflash_softc *sc = device_private(self);
205 struct spiflash_attach_args *sfa = aux;
206 void *cookie = sfa->cookie;
207
208 sc->sc_hw = *sfa->hw;
209 sc->sc_cookie = cookie;
210 sc->sc_name = sc->sc_getname(cookie);
211 sc->sc_handle = sc->sc_gethandle(cookie);
212 sc->sc_device_size = sc->sc_getsize(cookie, SPIFLASH_SIZE_DEVICE);
213 sc->sc_erase_size = sc->sc_getsize(cookie, SPIFLASH_SIZE_ERASE);
214 sc->sc_write_size = sc->sc_getsize(cookie, SPIFLASH_SIZE_WRITE);
215 sc->sc_read_size = sc->sc_getsize(cookie, SPIFLASH_SIZE_READ);
216 sc->sc_device_blks = sc->sc_device_size / DEV_BSIZE;
217
218 if (sc->sc_read == NULL)
219 sc->sc_read = spiflash_common_read;
220 if (sc->sc_write == NULL)
221 sc->sc_write = spiflash_common_write;
222 if (sc->sc_erase == NULL)
223 sc->sc_erase = spiflash_common_erase;
224
225 aprint_naive(": SPI flash\n");
226 aprint_normal(": %s SPI flash\n", sc->sc_name);
227 /* XXX: note that this has to change for boot-sectored flash */
228 aprint_normal_dev(self, "%d KB, %d sectors of %d KB each\n",
229 sc->sc_device_size / 1024,
230 sc->sc_device_size / sc->sc_erase_size,
231 sc->sc_erase_size / 1024);
232
233 /* first-come first-served strategy works best for us */
234 bufq_alloc(&sc->sc_waitq, "fcfs", BUFQ_SORT_RAWBLOCK);
235 bufq_alloc(&sc->sc_workq, "fcfs", BUFQ_SORT_RAWBLOCK);
236 bufq_alloc(&sc->sc_doneq, "fcfs", BUFQ_SORT_RAWBLOCK);
237
238 disk_init(&sc->sc_dk, device_xname(self), &spiflash_dkdriver);
239 disk_attach(&sc->sc_dk);
240
241 /* arrange to allocate the kthread */
242 kthread_create(PRI_NONE, 0, NULL, spiflash_thread, sc,
243 &sc->sc_thread, "spiflash");
244 }
245
246 int
spiflash_open(dev_t dev,int flags,int mode,struct lwp * l)247 spiflash_open(dev_t dev, int flags, int mode, struct lwp *l)
248 {
249 spiflash_handle_t sc;
250
251 sc = device_lookup_private(&spiflash_cd, DISKUNIT(dev));
252 if (sc == NULL)
253 return ENXIO;
254
255 /*
256 * XXX: We need to handle partitions here. The problem is
257 * that it isn't entirely clear to me how to deal with this.
258 * There are devices that could be used "in the raw" with a
259 * NetBSD label, but then you get into devices that have other
260 * kinds of data on them -- some have VxWorks data, some have
261 * RedBoot data, and some have other constraints -- for example
262 * some devices might have a portion that is read-only,
263 * whereas others might have a portion that is read-write.
264 *
265 * For now we just permit access to the entire device.
266 */
267 return 0;
268 }
269
270 int
spiflash_close(dev_t dev,int flags,int mode,struct lwp * l)271 spiflash_close(dev_t dev, int flags, int mode, struct lwp *l)
272 {
273 spiflash_handle_t sc;
274
275 sc = device_lookup_private(&spiflash_cd, DISKUNIT(dev));
276 if (sc == NULL)
277 return ENXIO;
278
279 return 0;
280 }
281
282 int
spiflash_read(dev_t dev,struct uio * uio,int ioflag)283 spiflash_read(dev_t dev, struct uio *uio, int ioflag)
284 {
285
286 return physio(spiflash_strategy, NULL, dev, B_READ, minphys, uio);
287 }
288
289 int
spiflash_write(dev_t dev,struct uio * uio,int ioflag)290 spiflash_write(dev_t dev, struct uio *uio, int ioflag)
291 {
292
293 return physio(spiflash_strategy, NULL, dev, B_WRITE, minphys, uio);
294 }
295
296 int
spiflash_ioctl(dev_t dev,u_long cmd,void * data,int flags,struct lwp * l)297 spiflash_ioctl(dev_t dev, u_long cmd, void *data, int flags, struct lwp *l)
298 {
299 spiflash_handle_t sc;
300
301 sc = device_lookup_private(&spiflash_cd, DISKUNIT(dev));
302 if (sc == NULL)
303 return ENXIO;
304
305 return EINVAL;
306 }
307
308 void
spiflash_strategy(struct buf * bp)309 spiflash_strategy(struct buf *bp)
310 {
311 spiflash_handle_t sc;
312 int s;
313
314 bp->b_resid = bp->b_bcount;
315
316 sc = device_lookup_private(&spiflash_cd, DISKUNIT(bp->b_dev));
317 if (sc == NULL) {
318 bp->b_error = ENXIO;
319 biodone(bp);
320 return;
321 }
322
323 if (((bp->b_bcount % sc->sc_write_size) != 0) ||
324 (bp->b_blkno < 0)) {
325 bp->b_error = EINVAL;
326 biodone(bp);
327 return;
328 }
329
330 /* no work? */
331 if (bp->b_bcount == 0) {
332 biodone(bp);
333 return;
334 }
335
336 if (bounds_check_with_mediasize(bp, DEV_BSIZE,
337 sc->sc_device_blks) <= 0) {
338 biodone(bp);
339 return;
340 }
341
342 /* all ready, hand off to thread for async processing */
343 s = splbio();
344 bufq_put(sc->sc_waitq, bp);
345 wakeup(&sc->sc_thread);
346 splx(s);
347 }
348
349 void
spiflash_process_done(spiflash_handle_t sc,int err)350 spiflash_process_done(spiflash_handle_t sc, int err)
351 {
352 struct buf *bp;
353 int cnt = 0;
354 int flag = 0;
355
356 while ((bp = bufq_get(sc->sc_doneq)) != NULL) {
357 flag = bp->b_flags & B_READ;
358 if ((bp->b_error = err) == 0)
359 bp->b_resid = 0;
360 cnt += bp->b_bcount - bp->b_resid;
361 biodone(bp);
362 }
363 disk_unbusy(&sc->sc_dk, cnt, flag);
364 }
365
366 void
spiflash_process_read(spiflash_handle_t sc)367 spiflash_process_read(spiflash_handle_t sc)
368 {
369 struct buf *bp;
370 int err = 0;
371
372 disk_busy(&sc->sc_dk);
373 while ((bp = bufq_get(sc->sc_workq)) != NULL) {
374 size_t addr = bp->b_blkno * DEV_BSIZE;
375 uint8_t *data = bp->b_data;
376 int cnt = bp->b_resid;
377
378 bufq_put(sc->sc_doneq, bp);
379
380 DPRINTF(("read from addr %x, cnt %d\n", (unsigned)addr, cnt));
381
382 if ((err = sc->sc_read(sc, addr, cnt, data)) != 0) {
383 /* error occurred, fail all pending workq bufs */
384 bufq_move(sc->sc_doneq, sc->sc_workq);
385 break;
386 }
387
388 bp->b_resid -= cnt;
389 data += cnt;
390 addr += cnt;
391 }
392 spiflash_process_done(sc, err);
393 }
394
395 void
spiflash_process_write(spiflash_handle_t sc)396 spiflash_process_write(spiflash_handle_t sc)
397 {
398 int len;
399 size_t base;
400 daddr_t blkno;
401 uint8_t *save;
402 int err = 0, neederase = 0;
403 struct buf *bp;
404
405 /*
406 * due to other considerations, we are guaranteed that
407 * we will only have multiple buffers if they are all in
408 * the same erase sector. Therefore we never need to look
409 * beyond the first block to determine how much data we need
410 * to save.
411 */
412
413 bp = bufq_peek(sc->sc_workq);
414 len = spiflash_nsectors(sc, bp) * sc->sc_erase_size;
415 blkno = bp->b_blkno;
416 base = (blkno * DEV_BSIZE) & ~ (sc->sc_erase_size - 1);
417
418 /* get ourself a scratch buffer */
419 save = malloc(len, M_DEVBUF, M_WAITOK);
420
421 disk_busy(&sc->sc_dk);
422 /* read in as much of the data as we need */
423 DPRINTF(("reading in %d bytes\n", len));
424 if ((err = sc->sc_read(sc, base, len, save)) != 0) {
425 bufq_move(sc->sc_doneq, sc->sc_workq);
426 spiflash_process_done(sc, err);
427 return;
428 }
429
430 /*
431 * now coalesce the writes into the save area, but also
432 * check to see if we need to do an erase
433 */
434 while ((bp = bufq_get(sc->sc_workq)) != NULL) {
435 uint8_t *data, *dst;
436 int resid = bp->b_resid;
437
438 DPRINTF(("coalesce write, blkno %x, count %d, resid %d\n",
439 (unsigned)bp->b_blkno, bp->b_bcount, resid));
440
441 data = bp->b_data;
442 dst = save + (bp->b_blkno * DEV_BSIZE) - base;
443
444 /*
445 * NOR flash bits. We can clear a bit, but we cannot
446 * set a bit, without erasing. This should help reduce
447 * unnecessary erases.
448 */
449 while (resid) {
450 if ((*data) & ~(*dst))
451 neederase = 1;
452 *dst++ = *data++;
453 resid--;
454 }
455
456 bufq_put(sc->sc_doneq, bp);
457 }
458
459 /*
460 * do the erase, if we need to.
461 */
462 if (neederase) {
463 DPRINTF(("erasing from %zx - %zx\n", base, base + len));
464 if ((err = sc->sc_erase(sc, base, len)) != 0) {
465 spiflash_process_done(sc, err);
466 return;
467 }
468 }
469
470 /*
471 * now write our save area, and finish up.
472 */
473 DPRINTF(("flashing %d bytes to %zx from %p\n", len, base, save));
474 err = sc->sc_write(sc, base, len, save);
475 spiflash_process_done(sc, err);
476 }
477
478
479 int
spiflash_nsectors(spiflash_handle_t sc,struct buf * bp)480 spiflash_nsectors(spiflash_handle_t sc, struct buf *bp)
481 {
482 unsigned addr, sector;
483
484 addr = bp->b_blkno * DEV_BSIZE;
485 sector = addr / sc->sc_erase_size;
486
487 addr += bp->b_bcount;
488 addr--;
489 return (((addr / sc->sc_erase_size) - sector) + 1);
490 }
491
492 int
spiflash_sector(spiflash_handle_t sc,struct buf * bp)493 spiflash_sector(spiflash_handle_t sc, struct buf *bp)
494 {
495 unsigned addr, sector;
496
497 addr = bp->b_blkno * DEV_BSIZE;
498 sector = addr / sc->sc_erase_size;
499
500 /* if it spans multiple blocks, error it */
501 addr += bp->b_bcount;
502 addr--;
503 if (sector != (addr / sc->sc_erase_size))
504 return -1;
505
506 return sector;
507 }
508
509 void
spiflash_thread(void * arg)510 spiflash_thread(void *arg)
511 {
512 spiflash_handle_t sc = arg;
513 struct buf *bp;
514 int sector;
515
516 (void)splbio();
517 for (;;) {
518 if ((bp = bufq_get(sc->sc_waitq)) == NULL) {
519 tsleep(&sc->sc_thread, PRIBIO, "spiflash_thread", 0);
520 continue;
521 }
522
523 bufq_put(sc->sc_workq, bp);
524
525 if (bp->b_flags & B_READ) {
526 /* just do the read */
527 spiflash_process_read(sc);
528 continue;
529 }
530
531 /*
532 * Because writing a flash filesystem is particularly
533 * painful, involving erase, modify, write, we prefer
534 * to coalesce writes to the same sector together.
535 */
536
537 sector = spiflash_sector(sc, bp);
538
539 /*
540 * if the write spans multiple sectors, skip
541 * coalescing. (It would be nice if we could break
542 * these up. minphys is honored for read/write, but
543 * not necessarily for bread.)
544 */
545 if (sector < 0)
546 goto dowrite;
547
548 while ((bp = bufq_peek(sc->sc_waitq)) != NULL) {
549 /* can't deal with read requests! */
550 if (bp->b_flags & B_READ)
551 break;
552
553 /* is it for the same sector? */
554 if (spiflash_sector(sc, bp) != sector)
555 break;
556
557 bp = bufq_get(sc->sc_waitq);
558 bufq_put(sc->sc_workq, bp);
559 }
560
561 dowrite:
562 spiflash_process_write(sc);
563 }
564 }
565 /*
566 * SPI flash common implementation.
567 */
568
569 /*
570 * Most devices take on the order of 1 second for each block that they
571 * delete.
572 */
573 int
spiflash_common_erase(spiflash_handle_t sc,size_t start,size_t size)574 spiflash_common_erase(spiflash_handle_t sc, size_t start, size_t size)
575 {
576 int rv;
577
578 if ((start % sc->sc_erase_size) || (size % sc->sc_erase_size))
579 return EINVAL;
580
581 /* the second test is to test against wrap */
582 if ((start > sc->sc_device_size) ||
583 ((start + size) > sc->sc_device_size))
584 return EINVAL;
585
586 /*
587 * XXX: check protection status? Requires master table mapping
588 * sectors to status bits, and so forth.
589 */
590
591 while (size) {
592 if ((rv = spiflash_write_enable(sc)) != 0) {
593 spiflash_write_disable(sc);
594 return rv;
595 }
596 if ((rv = spiflash_cmd(sc, SPIFLASH_CMD_ERASE, 3, start, 0,
597 NULL, NULL)) != 0) {
598 spiflash_write_disable(sc);
599 return rv;
600 }
601
602 /*
603 * The devices I have all say typical for sector erase
604 * is ~1sec. We check ten times that often. (There
605 * is no way to interrupt on this.)
606 */
607 if ((rv = spiflash_wait(sc, hz / 10)) != 0)
608 return rv;
609
610 start += sc->sc_erase_size;
611 size -= sc->sc_erase_size;
612
613 /* NB: according to the docs I have, the write enable
614 * is automatically cleared upon completion of an erase
615 * command, so there is no need to explicitly disable it.
616 */
617 }
618
619 return 0;
620 }
621
622 int
spiflash_common_write(spiflash_handle_t sc,size_t start,size_t size,const uint8_t * data)623 spiflash_common_write(spiflash_handle_t sc, size_t start, size_t size,
624 const uint8_t *data)
625 {
626 int rv;
627
628 if ((start % sc->sc_write_size) || (size % sc->sc_write_size))
629 return EINVAL;
630
631 while (size) {
632 int cnt;
633
634 if ((rv = spiflash_write_enable(sc)) != 0) {
635 spiflash_write_disable(sc);
636 return rv;
637 }
638
639 cnt = uimin(size, sc->sc_write_size);
640 if ((rv = spiflash_cmd(sc, SPIFLASH_CMD_PROGRAM, 3, start,
641 cnt, data, NULL)) != 0) {
642 spiflash_write_disable(sc);
643 return rv;
644 }
645
646 /*
647 * It seems that most devices can write bits fairly
648 * quickly. For example, one part I have access to
649 * takes ~5msec to process the entire 256 byte page.
650 * Probably this should be modified to cope with
651 * device-specific timing, and maybe also take into
652 * account systems with higher values of HZ (which
653 * could benefit from sleeping.)
654 */
655 if ((rv = spiflash_wait(sc, 0)) != 0)
656 return rv;
657
658 data += cnt;
659 start += cnt;
660 size -= cnt;
661 }
662
663 return 0;
664 }
665
666 int
spiflash_common_read(spiflash_handle_t sc,size_t start,size_t size,uint8_t * data)667 spiflash_common_read(spiflash_handle_t sc, size_t start, size_t size,
668 uint8_t *data)
669 {
670 int rv;
671
672 while (size) {
673 int cnt;
674
675 if (sc->sc_read_size > 0)
676 cnt = uimin(size, sc->sc_read_size);
677 else
678 cnt = size;
679
680 if ((rv = spiflash_cmd(sc, SPIFLASH_CMD_READ, 3, start,
681 cnt, NULL, data)) != 0) {
682 return rv;
683 }
684
685 data += cnt;
686 start += cnt;
687 size -= cnt;
688 }
689
690 return 0;
691 }
692
693 /* read status register */
694 int
spiflash_read_status(spiflash_handle_t sc,uint8_t * sr)695 spiflash_read_status(spiflash_handle_t sc, uint8_t *sr)
696 {
697
698 return spiflash_cmd(sc, SPIFLASH_CMD_RDSR, 0, 0, 1, NULL, sr);
699 }
700
701 int
spiflash_write_enable(spiflash_handle_t sc)702 spiflash_write_enable(spiflash_handle_t sc)
703 {
704
705 return spiflash_cmd(sc, SPIFLASH_CMD_WREN, 0, 0, 0, NULL, NULL);
706 }
707
708 int
spiflash_write_disable(spiflash_handle_t sc)709 spiflash_write_disable(spiflash_handle_t sc)
710 {
711
712 return spiflash_cmd(sc, SPIFLASH_CMD_WRDI, 0, 0, 0, NULL, NULL);
713 }
714
715 int
spiflash_cmd(spiflash_handle_t sc,uint8_t cmd,size_t addrlen,uint32_t addr,size_t cnt,const uint8_t * wdata,uint8_t * rdata)716 spiflash_cmd(spiflash_handle_t sc, uint8_t cmd,
717 size_t addrlen, uint32_t addr,
718 size_t cnt, const uint8_t *wdata, uint8_t *rdata)
719 {
720 struct spi_transfer trans;
721 struct spi_chunk chunk1, chunk2;
722 char buf[4];
723 int i;
724
725 buf[0] = cmd;
726
727 if (addrlen > 3)
728 return EINVAL;
729
730 for (i = addrlen; i > 0; i--) {
731 buf[i] = addr & 0xff;
732 addr >>= 8;
733 }
734 spi_transfer_init(&trans);
735 spi_chunk_init(&chunk1, addrlen + 1, buf, NULL);
736 spi_transfer_add(&trans, &chunk1);
737 if (cnt) {
738 spi_chunk_init(&chunk2, cnt, wdata, rdata);
739 spi_transfer_add(&trans, &chunk2);
740 }
741
742 spi_transfer(sc->sc_handle, &trans);
743 spi_wait(&trans);
744
745 if (trans.st_flags & SPI_F_ERROR)
746 return trans.st_errno;
747 return 0;
748 }
749
750 int
spiflash_wait(spiflash_handle_t sc,int tmo)751 spiflash_wait(spiflash_handle_t sc, int tmo)
752 {
753 int rv;
754 uint8_t sr;
755
756 for (;;) {
757 if ((rv = spiflash_read_status(sc, &sr)) != 0)
758 return rv;
759
760 if ((sr & SPIFLASH_SR_BUSY) == 0)
761 break;
762 /*
763 * The devices I have all say typical for sector
764 * erase is ~1sec. We check time times that often.
765 * (There is no way to interrupt on this.)
766 */
767 if (tmo)
768 tsleep(&sr, PWAIT, "spiflash_wait", tmo);
769 }
770 return 0;
771 }
772