1*f0a7346dSsnj /* $NetBSD: chfs_wbuf.c,v 1.7 2014/10/18 08:33:29 snj Exp $ */
2288addd0Sahoka
3288addd0Sahoka /*-
4288addd0Sahoka * Copyright (c) 2010 Department of Software Engineering,
5288addd0Sahoka * University of Szeged, Hungary
6288addd0Sahoka * Copyright (C) 2010 Tamas Toth <ttoth@inf.u-szeged.hu>
7288addd0Sahoka * Copyright (C) 2010 Adam Hoka <ahoka@NetBSD.org>
8288addd0Sahoka * All rights reserved.
9288addd0Sahoka *
10288addd0Sahoka * This code is derived from software contributed to The NetBSD Foundation
11288addd0Sahoka * by the Department of Software Engineering, University of Szeged, Hungary
12288addd0Sahoka *
13288addd0Sahoka * Redistribution and use in source and binary forms, with or without
14288addd0Sahoka * modification, are permitted provided that the following conditions
15288addd0Sahoka * are met:
16288addd0Sahoka * 1. Redistributions of source code must retain the above copyright
17288addd0Sahoka * notice, this list of conditions and the following disclaimer.
18288addd0Sahoka * 2. Redistributions in binary form must reproduce the above copyright
19288addd0Sahoka * notice, this list of conditions and the following disclaimer in the
20288addd0Sahoka * documentation and/or other materials provided with the distribution.
21288addd0Sahoka *
22288addd0Sahoka * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23288addd0Sahoka * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24288addd0Sahoka * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25288addd0Sahoka * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26288addd0Sahoka * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27288addd0Sahoka * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28288addd0Sahoka * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29288addd0Sahoka * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30288addd0Sahoka * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31288addd0Sahoka * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32288addd0Sahoka * SUCH DAMAGE.
33288addd0Sahoka */
34288addd0Sahoka
35288addd0Sahoka #include <dev/flash/flash.h>
36288addd0Sahoka #include <sys/uio.h>
37288addd0Sahoka #include "chfs.h"
38288addd0Sahoka
39fd29905aSahoka #define DBG_WBUF 1 /* XXX unused, but should be */
40288addd0Sahoka
41288addd0Sahoka #define PAD(x) (((x)+3)&~3)
42288addd0Sahoka
43fd29905aSahoka #define EB_ADDRESS(x) ( rounddown((x), chmp->chm_ebh->eb_size) )
44288addd0Sahoka
45fd29905aSahoka #define PAGE_DIV(x) ( rounddown((x), chmp->chm_wbuf_pagesize) )
46fd29905aSahoka #define PAGE_MOD(x) ( (x) % (chmp->chm_wbuf_pagesize) )
47288addd0Sahoka
48bca84b9eSttoth /* writebuffer options */
491e8c5f14Sahoka enum {
501e8c5f14Sahoka WBUF_NOPAD,
511e8c5f14Sahoka WBUF_SETPAD
521e8c5f14Sahoka };
53288addd0Sahoka
54bca84b9eSttoth /*
55288addd0Sahoka * chfs_flush_wbuf - write wbuf to the flash
56288addd0Sahoka * Returns zero in case of success.
57288addd0Sahoka */
58288addd0Sahoka static int
chfs_flush_wbuf(struct chfs_mount * chmp,int pad)59288addd0Sahoka chfs_flush_wbuf(struct chfs_mount *chmp, int pad)
60288addd0Sahoka {
611e8c5f14Sahoka int ret;
621e8c5f14Sahoka size_t retlen;
631e8c5f14Sahoka struct chfs_node_ref *nref;
641e8c5f14Sahoka struct chfs_flash_padding_node* padnode;
65288addd0Sahoka
66288addd0Sahoka KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
67288addd0Sahoka KASSERT(mutex_owned(&chmp->chm_lock_sizes));
68288addd0Sahoka KASSERT(rw_write_held(&chmp->chm_lock_wbuf));
691e8c5f14Sahoka KASSERT(pad == WBUF_SETPAD || pad == WBUF_NOPAD);
70288addd0Sahoka
71bca84b9eSttoth /* check padding option */
721e8c5f14Sahoka if (pad == WBUF_SETPAD) {
73288addd0Sahoka chmp->chm_wbuf_len = PAD(chmp->chm_wbuf_len);
741e8c5f14Sahoka memset(chmp->chm_wbuf + chmp->chm_wbuf_len, 0,
751e8c5f14Sahoka chmp->chm_wbuf_pagesize - chmp->chm_wbuf_len);
76288addd0Sahoka
77bca84b9eSttoth /* add a padding node */
781e8c5f14Sahoka padnode = (void *)(chmp->chm_wbuf + chmp->chm_wbuf_len);
79288addd0Sahoka padnode->magic = htole16(CHFS_FS_MAGIC_BITMASK);
80288addd0Sahoka padnode->type = htole16(CHFS_NODETYPE_PADDING);
811e8c5f14Sahoka padnode->length = htole32(chmp->chm_wbuf_pagesize
821e8c5f14Sahoka - chmp->chm_wbuf_len);
831e8c5f14Sahoka padnode->hdr_crc = htole32(crc32(0, (uint8_t *)padnode,
841e8c5f14Sahoka sizeof(*padnode)-4));
85288addd0Sahoka
86288addd0Sahoka nref = chfs_alloc_node_ref(chmp->chm_nextblock);
87288addd0Sahoka nref->nref_offset = chmp->chm_wbuf_ofs + chmp->chm_wbuf_len;
88288addd0Sahoka nref->nref_offset = CHFS_GET_OFS(nref->nref_offset) |
89288addd0Sahoka CHFS_OBSOLETE_NODE_MASK;
90288addd0Sahoka chmp->chm_wbuf_len = chmp->chm_wbuf_pagesize;
91288addd0Sahoka
92bca84b9eSttoth /* change sizes after padding node */
931e8c5f14Sahoka chfs_change_size_free(chmp, chmp->chm_nextblock,
941e8c5f14Sahoka -padnode->length);
951e8c5f14Sahoka chfs_change_size_wasted(chmp, chmp->chm_nextblock,
961e8c5f14Sahoka padnode->length);
97288addd0Sahoka }
98288addd0Sahoka
99bca84b9eSttoth /* write out the buffer */
1001e8c5f14Sahoka ret = chfs_write_leb(chmp, chmp->chm_nextblock->lnr, chmp->chm_wbuf,
1011e8c5f14Sahoka chmp->chm_wbuf_ofs, chmp->chm_wbuf_len, &retlen);
102288addd0Sahoka if (ret) {
103288addd0Sahoka return ret;
104288addd0Sahoka }
105288addd0Sahoka
106bca84b9eSttoth /* reset the buffer */
107288addd0Sahoka memset(chmp->chm_wbuf, 0xff, chmp->chm_wbuf_pagesize);
108288addd0Sahoka chmp->chm_wbuf_ofs += chmp->chm_wbuf_pagesize;
109288addd0Sahoka chmp->chm_wbuf_len = 0;
1101e8c5f14Sahoka
111288addd0Sahoka return 0;
112288addd0Sahoka }
113288addd0Sahoka
114288addd0Sahoka
115bca84b9eSttoth /*
116bca84b9eSttoth * chfs_fill_wbuf - write data to wbuf
117288addd0Sahoka * Return the len of the buf what we didn't write to the wbuf.
118288addd0Sahoka */
119288addd0Sahoka static size_t
chfs_fill_wbuf(struct chfs_mount * chmp,const u_char * buf,size_t len)120288addd0Sahoka chfs_fill_wbuf(struct chfs_mount *chmp, const u_char *buf, size_t len)
121288addd0Sahoka {
122bca84b9eSttoth /* check available space */
123288addd0Sahoka if (len && !chmp->chm_wbuf_len && (len >= chmp->chm_wbuf_pagesize)) {
124288addd0Sahoka return 0;
125288addd0Sahoka }
126bca84b9eSttoth /* check buffer's length */
127288addd0Sahoka if (len > (chmp->chm_wbuf_pagesize - chmp->chm_wbuf_len)) {
128288addd0Sahoka len = chmp->chm_wbuf_pagesize - chmp->chm_wbuf_len;
129288addd0Sahoka }
130bca84b9eSttoth /* write into the wbuf */
131288addd0Sahoka memcpy(chmp->chm_wbuf + chmp->chm_wbuf_len, buf, len);
132288addd0Sahoka
133bca84b9eSttoth /* update the actual length of writebuffer */
134288addd0Sahoka chmp->chm_wbuf_len += (int) len;
135288addd0Sahoka return len;
136288addd0Sahoka }
137288addd0Sahoka
138bca84b9eSttoth /*
139288addd0Sahoka * chfs_write_wbuf - write to wbuf and then the flash
140288addd0Sahoka * Returns zero in case of success.
141288addd0Sahoka */
142288addd0Sahoka int
chfs_write_wbuf(struct chfs_mount * chmp,const struct iovec * invecs,long count,off_t to,size_t * retlen)143288addd0Sahoka chfs_write_wbuf(struct chfs_mount* chmp, const struct iovec *invecs, long count,
144288addd0Sahoka off_t to, size_t *retlen)
145288addd0Sahoka {
146288addd0Sahoka int invec, ret = 0;
147288addd0Sahoka size_t wbuf_retlen, donelen = 0;
148288addd0Sahoka int outvec_to = to;
149288addd0Sahoka
150288addd0Sahoka int lnr = chmp->chm_nextblock->lnr;
151288addd0Sahoka
152288addd0Sahoka KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
153288addd0Sahoka KASSERT(mutex_owned(&chmp->chm_lock_sizes));
154288addd0Sahoka KASSERT(!rw_write_held(&chmp->chm_lock_wbuf));
155288addd0Sahoka
156288addd0Sahoka rw_enter(&chmp->chm_lock_wbuf, RW_WRITER);
157288addd0Sahoka
158288addd0Sahoka if (chmp->chm_wbuf_ofs == 0xffffffff) {
159288addd0Sahoka chmp->chm_wbuf_ofs = PAGE_DIV(to);
160288addd0Sahoka chmp->chm_wbuf_len = PAGE_MOD(to);
161288addd0Sahoka memset(chmp->chm_wbuf, 0xff, chmp->chm_wbuf_pagesize);
162288addd0Sahoka }
163288addd0Sahoka
164288addd0Sahoka if (EB_ADDRESS(to) != EB_ADDRESS(chmp->chm_wbuf_ofs)) {
165288addd0Sahoka if (chmp->chm_wbuf_len) {
1661e8c5f14Sahoka ret = chfs_flush_wbuf(chmp, WBUF_SETPAD);
167288addd0Sahoka if (ret)
168288addd0Sahoka goto outerr;
169288addd0Sahoka }
170288addd0Sahoka chmp->chm_wbuf_ofs = PAGE_DIV(to);
171288addd0Sahoka chmp->chm_wbuf_len = PAGE_MOD(to);
172288addd0Sahoka }
173288addd0Sahoka
174288addd0Sahoka if (to != PAD(chmp->chm_wbuf_ofs + chmp->chm_wbuf_len)) {
175e1820d31Sagc dbg("to: %llu != %zu\n", (unsigned long long)to,
176e1820d31Sagc PAD(chmp->chm_wbuf_ofs + chmp->chm_wbuf_len));
177288addd0Sahoka dbg("Non-contiguous write\n");
178288addd0Sahoka panic("BUG\n");
179288addd0Sahoka }
180288addd0Sahoka
181288addd0Sahoka /* adjust alignment offset */
182288addd0Sahoka if (chmp->chm_wbuf_len != PAGE_MOD(to)) {
183288addd0Sahoka chmp->chm_wbuf_len = PAGE_MOD(to);
1842d1abfdfSskrll /* take care of alignment to next page */
185288addd0Sahoka if (!chmp->chm_wbuf_len) {
186288addd0Sahoka chmp->chm_wbuf_len += chmp->chm_wbuf_pagesize;
1871e8c5f14Sahoka ret = chfs_flush_wbuf(chmp, WBUF_NOPAD);
188288addd0Sahoka if (ret)
189288addd0Sahoka goto outerr;
190288addd0Sahoka }
191288addd0Sahoka }
192288addd0Sahoka
193288addd0Sahoka for (invec = 0; invec < count; invec++) {
194288addd0Sahoka int vlen = invecs[invec].iov_len;
195288addd0Sahoka u_char* v = invecs[invec].iov_base;
196288addd0Sahoka
197bca84b9eSttoth /* fill the whole wbuf */
198288addd0Sahoka wbuf_retlen = chfs_fill_wbuf(chmp, v, vlen);
199288addd0Sahoka if (chmp->chm_wbuf_len == chmp->chm_wbuf_pagesize) {
2001e8c5f14Sahoka ret = chfs_flush_wbuf(chmp, WBUF_NOPAD);
201288addd0Sahoka if (ret) {
202288addd0Sahoka goto outerr;
203288addd0Sahoka }
204288addd0Sahoka }
205bca84b9eSttoth
206288addd0Sahoka vlen -= wbuf_retlen;
207288addd0Sahoka outvec_to += wbuf_retlen;
208288addd0Sahoka v += wbuf_retlen;
209288addd0Sahoka donelen += wbuf_retlen;
210bca84b9eSttoth
211bca84b9eSttoth /* if there is more residual data than the length of the wbuf
212*f0a7346dSsnj * write it out directly until it fits in the wbuf */
213288addd0Sahoka if (vlen >= chmp->chm_wbuf_pagesize) {
214288addd0Sahoka ret = chfs_write_leb(chmp, lnr, v, outvec_to, PAGE_DIV(vlen), &wbuf_retlen);
215288addd0Sahoka vlen -= wbuf_retlen;
216288addd0Sahoka outvec_to += wbuf_retlen;
217288addd0Sahoka chmp->chm_wbuf_ofs = outvec_to;
218288addd0Sahoka v += wbuf_retlen;
219288addd0Sahoka donelen += wbuf_retlen;
220288addd0Sahoka }
221bca84b9eSttoth
222bca84b9eSttoth /* write the residual data to the wbuf */
223288addd0Sahoka wbuf_retlen = chfs_fill_wbuf(chmp, v, vlen);
224288addd0Sahoka if (chmp->chm_wbuf_len == chmp->chm_wbuf_pagesize) {
2251e8c5f14Sahoka ret = chfs_flush_wbuf(chmp, WBUF_NOPAD);
226288addd0Sahoka if (ret)
227288addd0Sahoka goto outerr;
228288addd0Sahoka }
229288addd0Sahoka
230288addd0Sahoka outvec_to += wbuf_retlen;
231288addd0Sahoka donelen += wbuf_retlen;
232288addd0Sahoka }
233288addd0Sahoka *retlen = donelen;
234288addd0Sahoka rw_exit(&chmp->chm_lock_wbuf);
235288addd0Sahoka return ret;
236288addd0Sahoka
237288addd0Sahoka outerr:
238288addd0Sahoka *retlen = 0;
239288addd0Sahoka return ret;
240288addd0Sahoka }
241288addd0Sahoka
242bca84b9eSttoth /*
243bca84b9eSttoth * chfs_flush_peding_wbuf - write wbuf to the flash
244bca84b9eSttoth * Used when we must flush wbuf right now.
245bca84b9eSttoth * If wbuf has free space, pad it to the size of wbuf and write out.
246bca84b9eSttoth */
chfs_flush_pending_wbuf(struct chfs_mount * chmp)247288addd0Sahoka int chfs_flush_pending_wbuf(struct chfs_mount *chmp)
248288addd0Sahoka {
249288addd0Sahoka int err;
250288addd0Sahoka KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
251288addd0Sahoka mutex_enter(&chmp->chm_lock_sizes);
252288addd0Sahoka rw_enter(&chmp->chm_lock_wbuf, RW_WRITER);
2531e8c5f14Sahoka err = chfs_flush_wbuf(chmp, WBUF_SETPAD);
254288addd0Sahoka rw_exit(&chmp->chm_lock_wbuf);
255288addd0Sahoka mutex_exit(&chmp->chm_lock_sizes);
256288addd0Sahoka return err;
257288addd0Sahoka }
258