1 /* $NetBSD: uvm_readahead.c,v 1.16 2023/09/23 18:21:12 ad Exp $ */
2
3 /*-
4 * Copyright (c)2003, 2005, 2009 YAMAMOTO Takashi,
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 /*
30 * uvm_object read-ahead
31 *
32 * TODO:
33 * - tune.
34 * - handle multiple streams.
35 * - find a better way to deal with PGO_LOCKED pager requests.
36 * (currently just ignored)
37 * - consider the amount of memory in the system.
38 * - consider the speed of the underlying device.
39 * - consider filesystem block size / block layout.
40 */
41
42 #include <sys/cdefs.h>
43 __KERNEL_RCSID(0, "$NetBSD: uvm_readahead.c,v 1.16 2023/09/23 18:21:12 ad Exp $");
44
45 #include <sys/param.h>
46 #include <sys/kmem.h>
47
48 #include <uvm/uvm.h>
49 #include <uvm/uvm_readahead.h>
50
51 #if defined(READAHEAD_DEBUG)
52 #define DPRINTF(a) printf a
53 #else /* defined(READAHEAD_DEBUG) */
54 #define DPRINTF(a) /* nothing */
55 #endif /* defined(READAHEAD_DEBUG) */
56
57 /*
58 * uvm_ractx: read-ahead context.
59 */
60
61 struct uvm_ractx {
62 int ra_flags;
63 #define RA_VALID 1
64 off_t ra_winstart; /* window start offset */
65 size_t ra_winsize; /* window size */
66 off_t ra_next; /* next offset to read-ahead */
67 };
68
69 #if defined(sun2) || defined(sun3)
70 /* XXX: on sun2 and sun3 MAXPHYS is 0xe000 */
71 #undef MAXPHYS
72 #define MAXPHYS 0x8000 /* XXX */
73 #endif
74
75 #define RA_WINSIZE_INIT MAXPHYS /* initial window size */
76 #define RA_WINSIZE_MAX (MAXPHYS * 16) /* max window size */
77 #define RA_WINSIZE_SEQENTIAL RA_WINSIZE_MAX /* fixed window size used for
78 SEQUENTIAL hint */
79 #define RA_MINSIZE (MAXPHYS * 2) /* min size to start i/o */
80 #define RA_IOCHUNK MAXPHYS /* read-ahead i/o chunk size */
81
82 static off_t ra_startio(struct uvm_object *, off_t, size_t);
83 static struct uvm_ractx *ra_allocctx(void);
84 static void ra_freectx(struct uvm_ractx *);
85
86 /*
87 * uvm_ra_init: initialize readahead module.
88 */
89
90 void
uvm_ra_init(void)91 uvm_ra_init(void)
92 {
93
94 }
95
96 static struct uvm_ractx *
ra_allocctx(void)97 ra_allocctx(void)
98 {
99
100 return kmem_alloc(sizeof(struct uvm_ractx), KM_NOSLEEP);
101 }
102
103 static void
ra_freectx(struct uvm_ractx * ra)104 ra_freectx(struct uvm_ractx *ra)
105 {
106
107 kmem_free(ra, sizeof(struct uvm_ractx));
108 }
109
110 /*
111 * ra_startio: start i/o for read-ahead.
112 *
113 * => start i/o for each RA_IOCHUNK sized chunk.
114 * => return offset to which we started i/o.
115 */
116
117 static off_t
ra_startio(struct uvm_object * uobj,off_t off,size_t sz)118 ra_startio(struct uvm_object *uobj, off_t off, size_t sz)
119 {
120 const off_t endoff = off + sz;
121
122 DPRINTF(("%s: uobj=%p, off=%" PRIu64 ", endoff=%" PRIu64 "\n",
123 __func__, uobj, off, endoff));
124
125 KASSERT(rw_write_held(uobj->vmobjlock));
126
127 /*
128 * Don't issue read-ahead if the last page of the range is already cached.
129 * The assumption is that since the access is sequential, the intermediate
130 * pages would have similar LRU stats, and hence likely to be still in cache
131 * too. This speeds up I/O using cache, since it avoids lookups and temporary
132 * allocations done by full pgo_get.
133 */
134 struct vm_page *pg = uvm_pagelookup(uobj, trunc_page(endoff - 1));
135 if (pg != NULL) {
136 DPRINTF(("%s: off=%" PRIu64 ", sz=%zu already cached\n",
137 __func__, off, sz));
138 return endoff;
139 }
140
141 off = trunc_page(off);
142 while (off < endoff) {
143 const size_t chunksize = RA_IOCHUNK;
144 int error;
145 size_t donebytes;
146 int npages;
147 int orignpages;
148 size_t bytelen;
149
150 KASSERT((chunksize & (chunksize - 1)) == 0);
151 KASSERT((off & PAGE_MASK) == 0);
152 bytelen = ((off + chunksize) & -(off_t)chunksize) - off;
153 KASSERT((bytelen & PAGE_MASK) == 0);
154 npages = orignpages = bytelen >> PAGE_SHIFT;
155 KASSERT(npages != 0);
156
157 /*
158 * use UVM_ADV_RANDOM to avoid recursion.
159 */
160
161 error = (*uobj->pgops->pgo_get)(uobj, off, NULL,
162 &npages, 0, VM_PROT_READ, UVM_ADV_RANDOM, PGO_NOTIMESTAMP);
163 rw_enter(uobj->vmobjlock, RW_WRITER);
164 DPRINTF(("%s: off=%" PRIu64 ", bytelen=%zu -> %d\n",
165 __func__, off, bytelen, error));
166 if (error != 0 && error != EBUSY) {
167 if (error != EINVAL) { /* maybe past EOF */
168 DPRINTF(("%s: error=%d\n", __func__, error));
169 }
170 break;
171 }
172 KASSERT(orignpages == npages);
173 donebytes = orignpages << PAGE_SHIFT;
174 off += donebytes;
175 }
176
177 return off;
178 }
179
180 /* ------------------------------------------------------------ */
181
182 /*
183 * uvm_ra_allocctx: allocate a context.
184 */
185
186 struct uvm_ractx *
uvm_ra_allocctx(void)187 uvm_ra_allocctx(void)
188 {
189 struct uvm_ractx *ra;
190
191 ra = ra_allocctx();
192 if (ra != NULL) {
193 ra->ra_flags = 0;
194 }
195
196 return ra;
197 }
198
199 /*
200 * uvm_ra_freectx: free a context.
201 */
202
203 void
uvm_ra_freectx(struct uvm_ractx * ra)204 uvm_ra_freectx(struct uvm_ractx *ra)
205 {
206
207 KASSERT(ra != NULL);
208 ra_freectx(ra);
209 }
210
211 /*
212 * uvm_ra_request: update a read-ahead context and start i/o if appropriate.
213 *
214 * => called when [reqoff, reqoff+reqsize) is requested.
215 * => object must be locked by caller, will return locked.
216 */
217
218 void
uvm_ra_request(struct uvm_ractx * ra,int advice,struct uvm_object * uobj,off_t reqoff,size_t reqsize)219 uvm_ra_request(struct uvm_ractx *ra, int advice, struct uvm_object *uobj,
220 off_t reqoff, size_t reqsize)
221 {
222
223 KASSERT(rw_write_held(uobj->vmobjlock));
224
225 if (ra == NULL || advice == UVM_ADV_RANDOM) {
226 return;
227 }
228
229 if (advice == UVM_ADV_SEQUENTIAL) {
230
231 /*
232 * always do read-ahead with a large window.
233 */
234
235 if ((ra->ra_flags & RA_VALID) == 0) {
236 ra->ra_winstart = ra->ra_next = 0;
237 ra->ra_flags |= RA_VALID;
238 }
239 if (reqoff < ra->ra_winstart) {
240 ra->ra_next = reqoff;
241 }
242 ra->ra_winsize = RA_WINSIZE_SEQENTIAL;
243 goto do_readahead;
244 }
245
246 /*
247 * a request with UVM_ADV_NORMAL hint. (ie. no hint)
248 *
249 * we keep a sliding window in order to determine:
250 * - if the previous read-ahead was successful or not.
251 * - how many bytes to read-ahead.
252 */
253
254 /*
255 * if it's the first request for this context,
256 * initialize context and return.
257 */
258
259 if ((ra->ra_flags & RA_VALID) == 0) {
260 initialize:
261 ra->ra_winstart = ra->ra_next = reqoff + reqsize;
262 ra->ra_winsize = RA_WINSIZE_INIT;
263 ra->ra_flags |= RA_VALID;
264 goto done;
265 }
266
267 /*
268 * if it isn't in our window,
269 * initialize context and return.
270 * (read-ahead miss)
271 */
272
273 if (reqoff < ra->ra_winstart ||
274 ra->ra_winstart + ra->ra_winsize < reqoff) {
275
276 /*
277 * ... unless we seem to be reading the same chunk repeatedly.
278 *
279 * XXX should have some margin?
280 */
281
282 if (reqoff + reqsize == ra->ra_winstart) {
283 DPRINTF(("%s: %p: same block: off=%" PRIu64
284 ", size=%zd, winstart=%" PRIu64 "\n",
285 __func__, ra, reqoff, reqsize, ra->ra_winstart));
286 goto done;
287 }
288 goto initialize;
289 }
290
291 /*
292 * it's in our window. (read-ahead hit)
293 * - start read-ahead i/o if appropriate.
294 * - advance and enlarge window.
295 */
296
297 do_readahead:
298
299 /*
300 * don't bother to read-ahead behind current request.
301 */
302
303 if (reqoff > ra->ra_next) {
304 ra->ra_next = reqoff;
305 }
306
307 /*
308 * try to make [reqoff, reqoff+ra_winsize) in-core.
309 * note that [reqoff, ra_next) is considered already done.
310 */
311
312 if (reqoff + ra->ra_winsize > ra->ra_next) {
313 off_t raoff = MAX(reqoff, ra->ra_next);
314 size_t rasize = reqoff + ra->ra_winsize - ra->ra_next;
315
316 #if defined(DIAGNOSTIC)
317 if (rasize > RA_WINSIZE_MAX) {
318 printf("%s: corrupted context", __func__);
319 rasize = RA_WINSIZE_MAX;
320 }
321 #endif /* defined(DIAGNOSTIC) */
322
323 /*
324 * issue read-ahead only if we can start big enough i/o.
325 * otherwise we end up with a stream of small i/o.
326 */
327
328 if (rasize >= RA_MINSIZE) {
329 off_t next;
330
331 next = ra_startio(uobj, raoff, rasize);
332 ra->ra_next = next;
333 }
334 }
335
336 /*
337 * update window.
338 *
339 * enlarge window by reqsize, so that it grows in a predictable manner
340 * regardless of the size of each read(2).
341 */
342
343 ra->ra_winstart = reqoff + reqsize;
344 ra->ra_winsize = MIN(RA_WINSIZE_MAX, ra->ra_winsize + reqsize);
345
346 done:;
347 }
348
349 int
uvm_readahead(struct uvm_object * uobj,off_t off,off_t size)350 uvm_readahead(struct uvm_object *uobj, off_t off, off_t size)
351 {
352
353 /*
354 * don't allow too much read-ahead.
355 */
356 if (size > RA_WINSIZE_MAX) {
357 size = RA_WINSIZE_MAX;
358 }
359 rw_enter(uobj->vmobjlock, RW_WRITER);
360 ra_startio(uobj, off, size);
361 rw_exit(uobj->vmobjlock);
362 return 0;
363 }
364