1 /* $NetBSD: fdfs.c,v 1.10 2013/06/18 18:18:57 christos Exp $ */
2
3 /*-
4 * Copyright (c) 2005 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Konrad E. Schroder <perseant@hhhh.org>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Buffer cache routines for a file-descriptor backed filesystem.
34 * This is part of lfs_cleanerd so there is also a "segment pointer" that
35 * we can make buffers out of without duplicating memory or reading the data
36 * again.
37 */
38
39 #include <err.h>
40 #include <fcntl.h>
41 #include <time.h>
42 #include <stdio.h>
43 #include <stdlib.h>
44 #include <string.h>
45 #include <unistd.h>
46
47 #include <sys/syslog.h>
48 #include <sys/param.h>
49 #include <sys/mount.h>
50 #include <sys/stat.h>
51
52 #include "vnode.h"
53 #include "bufcache.h"
54 #include "fdfs.h"
55 #include "kernelops.h"
56
57 /*
58 * Return a "vnode" interface to a given file descriptor.
59 */
60 struct uvnode *
fd_vget(int fd,int bsize,int segsize,int nseg)61 fd_vget(int fd, int bsize, int segsize, int nseg)
62 {
63 struct fdfs *fs;
64 struct uvnode *vp;
65 int i;
66
67 fs = malloc(sizeof(*fs));
68 if (fs == NULL)
69 return NULL;
70 if (segsize > 0) {
71 fs->fd_bufp = malloc(nseg * sizeof(struct fd_buf));
72 if (fs->fd_bufp == NULL) {
73 free(fs);
74 return NULL;
75 }
76 for (i = 0; i < nseg; i++) {
77 fs->fd_bufp[i].start = 0x0;
78 fs->fd_bufp[i].end = 0x0;
79 fs->fd_bufp[i].buf = malloc(segsize);
80 if (fs->fd_bufp[i].buf == NULL) {
81 while (--i >= 0)
82 free(fs->fd_bufp[i].buf);
83 free(fs->fd_bufp);
84 free(fs);
85 return NULL;
86 }
87 }
88 } else
89 fs->fd_bufp = NULL;
90
91 fs->fd_fd = fd;
92 fs->fd_bufc = nseg;
93 fs->fd_bufi = 0;
94 fs->fd_bsize = bsize;
95 fs->fd_ssize = segsize;
96
97 vp = malloc(sizeof(*vp));
98 if (vp == NULL) {
99 if (fs->fd_bufp) {
100 for (i = 0; i < nseg; i++)
101 free(fs->fd_bufp[i].buf);
102 free(fs->fd_bufp);
103 }
104 free(fs);
105 return NULL;
106 }
107 memset(vp, 0, sizeof(*vp));
108 vp->v_fd = fd;
109 vp->v_fs = fs;
110 vp->v_usecount = 0;
111 vp->v_strategy_op = fd_vop_strategy;
112 vp->v_bwrite_op = fd_vop_bwrite;
113 vp->v_bmap_op = fd_vop_bmap;
114 LIST_INIT(&vp->v_cleanblkhd);
115 LIST_INIT(&vp->v_dirtyblkhd);
116 vp->v_data = NULL;
117
118 return vp;
119 }
120
121 /*
122 * Deallocate a vnode.
123 */
124 void
fd_reclaim(struct uvnode * vp)125 fd_reclaim(struct uvnode *vp)
126 {
127 int i;
128 struct ubuf *bp;
129 struct fdfs *fs;
130
131 while ((bp = LIST_FIRST(&vp->v_dirtyblkhd)) != NULL) {
132 bremfree(bp);
133 buf_destroy(bp);
134 }
135 while ((bp = LIST_FIRST(&vp->v_cleanblkhd)) != NULL) {
136 bremfree(bp);
137 buf_destroy(bp);
138 }
139
140 fs = (struct fdfs *)vp->v_fs;
141 for (i = 0; i < fs->fd_bufc; i++)
142 free(fs->fd_bufp[i].buf);
143 free(fs->fd_bufp);
144 free(fs);
145 memset(vp, 0, sizeof(*vp));
146 }
147
148 /*
149 * We won't be using that last segment after all.
150 */
151 void
fd_release(struct uvnode * vp)152 fd_release(struct uvnode *vp)
153 {
154 --((struct fdfs *)vp->v_fs)->fd_bufi;
155 }
156
157 /*
158 * Reset buffer pointer to first buffer.
159 */
160 void
fd_release_all(struct uvnode * vp)161 fd_release_all(struct uvnode *vp)
162 {
163 ((struct fdfs *)vp->v_fs)->fd_bufi = 0;
164 }
165
166 /*
167 * Prepare a segment buffer which we will expect to read from.
168 * We never increment fd_bufi unless we have succeeded to allocate the space,
169 * if necessary, and have read the segment.
170 */
171 int
fd_preload(struct uvnode * vp,daddr_t start)172 fd_preload(struct uvnode *vp, daddr_t start)
173 {
174 struct fdfs *fs = (struct fdfs *)vp->v_fs;
175 struct fd_buf *t;
176 int r;
177
178 /* We might need to allocate more buffers. */
179 if (fs->fd_bufi == fs->fd_bufc) {
180 ++fs->fd_bufc;
181 syslog(LOG_DEBUG, "increasing number of segment buffers to %d",
182 fs->fd_bufc);
183 t = realloc(fs->fd_bufp, fs->fd_bufc * sizeof(struct fd_buf));
184 if (t == NULL) {
185 syslog(LOG_NOTICE, "failed resizing table to %d\n",
186 fs->fd_bufc);
187 return -1;
188 }
189 fs->fd_bufp = t;
190 fs->fd_bufp[fs->fd_bufi].start = 0x0;
191 fs->fd_bufp[fs->fd_bufi].end = 0x0;
192 fs->fd_bufp[fs->fd_bufi].buf = malloc(fs->fd_ssize);
193 if (fs->fd_bufp[fs->fd_bufi].buf == NULL) {
194 syslog(LOG_NOTICE, "failed to allocate buffer #%d\n",
195 fs->fd_bufc);
196 --fs->fd_bufc;
197 return -1;
198 }
199 }
200
201 /* Read the current buffer. */
202 fs->fd_bufp[fs->fd_bufi].start = start;
203 fs->fd_bufp[fs->fd_bufi].end = start + fs->fd_ssize / fs->fd_bsize;
204
205 if ((r = kops.ko_pread(fs->fd_fd, fs->fd_bufp[fs->fd_bufi].buf,
206 (size_t)fs->fd_ssize, start * fs->fd_bsize)) < 0) {
207 syslog(LOG_ERR, "preload to segment buffer %d", fs->fd_bufi);
208 return r;
209 }
210
211 fs->fd_bufi = fs->fd_bufi + 1;
212 return 0;
213 }
214
215 /*
216 * Get a pointer to a block contained in one of the segment buffers,
217 * as if from bread() but avoiding the buffer cache.
218 */
219 char *
fd_ptrget(struct uvnode * vp,daddr_t bn)220 fd_ptrget(struct uvnode *vp, daddr_t bn)
221 {
222 int i;
223 struct fdfs *fs;
224
225 fs = (struct fdfs *)vp->v_fs;
226 for (i = 0; i < fs->fd_bufc; i++) {
227 if (bn >= fs->fd_bufp[i].start && bn < fs->fd_bufp[i].end) {
228 return fs->fd_bufp[i].buf +
229 (bn - fs->fd_bufp[i].start) * fs->fd_bsize;
230 }
231 }
232 return NULL;
233 }
234
235 /*
236 * Strategy routine. We can read from the segment buffer if requested.
237 */
238 int
fd_vop_strategy(struct ubuf * bp)239 fd_vop_strategy(struct ubuf * bp)
240 {
241 struct fdfs *fs;
242 char *cp;
243 int count;
244
245 fs = (struct fdfs *)bp->b_vp->v_fs;
246 if (bp->b_flags & B_READ) {
247 if ((cp = fd_ptrget(bp->b_vp, bp->b_blkno)) != NULL) {
248 free(bp->b_data);
249 bp->b_data = cp;
250 bp->b_flags |= (B_DONTFREE | B_DONE);
251 return 0;
252 }
253 count = kops.ko_pread(bp->b_vp->v_fd, bp->b_data, bp->b_bcount,
254 bp->b_blkno * fs->fd_bsize);
255 if (count == bp->b_bcount)
256 bp->b_flags |= B_DONE;
257 } else {
258 count = kops.ko_pwrite(bp->b_vp->v_fd, bp->b_data, bp->b_bcount,
259 bp->b_blkno * fs->fd_bsize);
260 if (count == 0) {
261 perror("pwrite");
262 return -1;
263 }
264 bp->b_flags &= ~B_DELWRI;
265 reassignbuf(bp, bp->b_vp);
266 }
267 return 0;
268 }
269
270 /*
271 * Delayed write.
272 */
273 int
fd_vop_bwrite(struct ubuf * bp)274 fd_vop_bwrite(struct ubuf * bp)
275 {
276 bp->b_flags |= B_DELWRI;
277 reassignbuf(bp, bp->b_vp);
278 brelse(bp, 0);
279 return 0;
280 }
281
282 /*
283 * Map lbn to disk address. Since we are using the file
284 * descriptor as the "disk", the disk address is meaningless
285 * and we just return the block address.
286 */
287 int
fd_vop_bmap(struct uvnode * vp,daddr_t lbn,daddr_t * daddrp)288 fd_vop_bmap(struct uvnode * vp, daddr_t lbn, daddr_t * daddrp)
289 {
290 *daddrp = lbn;
291 return 0;
292 }
293