1
2 /* This file implements the disk cache.
3 *
4 * If they exist anywhere, cached pages are always in a private
5 * VM datastructure.
6 *
7 * They might also be any combination of:
8 * - be mapped in by a filesystem for reading/writing by it
9 * - be mapped in by a process as the result of an mmap call (future)
10 *
11 * This file manages the datastructure of all cache blocks, and
12 * mapping them in and out of filesystems.
13 */
14
15 #include <assert.h>
16 #include <string.h>
17
18 #include <minix/hash.h>
19
20 #include <machine/vmparam.h>
21
22 #include "proto.h"
23 #include "vm.h"
24 #include "region.h"
25 #include "glo.h"
26 #include "cache.h"
27
28 static int cache_reference(struct phys_region *pr, struct phys_region *pr2);
29 static int cache_unreference(struct phys_region *pr);
30 static int cache_sanitycheck(struct phys_region *pr, const char *file, int line);
31 static int cache_writable(struct phys_region *pr);
32 static int cache_resize(struct vmproc *vmp, struct vir_region *vr, vir_bytes l);
33 static int cache_lowshrink(struct vir_region *vr, vir_bytes len);
34 static int cache_pagefault(struct vmproc *vmp, struct vir_region *region,
35 struct phys_region *ph, int write, vfs_callback_t cb, void *state,
36 int len, int *io);
37 static int cache_pt_flags(struct vir_region *vr);
38
39 struct mem_type mem_type_cache = {
40 .name = "cache memory",
41 .ev_reference = cache_reference,
42 .ev_unreference = cache_unreference,
43 .ev_resize = cache_resize,
44 .ev_lowshrink = cache_lowshrink,
45 .ev_sanitycheck = cache_sanitycheck,
46 .ev_pagefault = cache_pagefault,
47 .writable = cache_writable,
48 .pt_flags = cache_pt_flags,
49 };
50
cache_pt_flags(struct vir_region * vr)51 static int cache_pt_flags(struct vir_region *vr){
52 #if defined(__arm__)
53 return ARM_VM_PTE_CACHED;
54 #else
55 return 0;
56 #endif
57 }
58
59
cache_reference(struct phys_region * pr,struct phys_region * pr2)60 static int cache_reference(struct phys_region *pr, struct phys_region *pr2)
61 {
62 return OK;
63 }
64
cache_unreference(struct phys_region * pr)65 static int cache_unreference(struct phys_region *pr)
66 {
67 return mem_type_anon.ev_unreference(pr);
68 }
69
cache_sanitycheck(struct phys_region * pr,const char * file,int line)70 static int cache_sanitycheck(struct phys_region *pr, const char *file, int line)
71 {
72 MYASSERT(usedpages_add(pr->ph->phys, VM_PAGE_SIZE) == OK);
73 return OK;
74 }
75
cache_writable(struct phys_region * pr)76 static int cache_writable(struct phys_region *pr)
77 {
78 /* Cache blocks are at the moment only used by filesystems so always writable. */
79 assert(pr->ph->refcount > 0);
80 return pr->ph->phys != MAP_NONE;
81 }
82
cache_resize(struct vmproc * vmp,struct vir_region * vr,vir_bytes l)83 static int cache_resize(struct vmproc *vmp, struct vir_region *vr, vir_bytes l)
84 {
85 printf("VM: cannot resize cache blocks.\n");
86 return ENOMEM;
87 }
88
cache_lowshrink(struct vir_region * vr,vir_bytes len)89 static int cache_lowshrink(struct vir_region *vr, vir_bytes len)
90 {
91 return OK;
92 }
93
94 int
do_mapcache(message * msg)95 do_mapcache(message *msg)
96 {
97 dev_t dev = msg->m_vmmcp.dev;
98 uint64_t dev_off = msg->m_vmmcp.dev_offset;
99 off_t ino_off = msg->m_vmmcp.ino_offset;
100 int n;
101 phys_bytes bytes = msg->m_vmmcp.pages * VM_PAGE_SIZE;
102 phys_bytes alloc_bytes;
103 struct vir_region *vr;
104 struct vmproc *caller;
105 vir_bytes offset;
106 int io = 0;
107
108 if(dev_off % PAGE_SIZE || ino_off % PAGE_SIZE) {
109 printf("VM: unaligned cache operation\n");
110 return EFAULT;
111 }
112
113 if(vm_isokendpt(msg->m_source, &n) != OK) panic("bogus source");
114 caller = &vmproc[n];
115
116 if(bytes < VM_PAGE_SIZE) return EINVAL;
117
118 alloc_bytes = bytes;
119 #ifdef _MINIX_MAGIC
120 /* Make sure there is a 1-page hole available before the region,
121 * in case instrumentation needs to allocate in-band metadata later.
122 * This does effectively halve the usable part of the caller's address
123 * space, though, so only do this if we are instrumenting at all.
124 * Also make sure it falls within the mmap range, so that it is
125 * transferred upon live update. This again cuts the usable part of
126 * the address space for caching purposes in half.
127 */
128 alloc_bytes += VM_PAGE_SIZE;
129 #endif
130 if (!(vr = map_page_region(caller, VM_MMAPBASE, VM_MMAPTOP,
131 alloc_bytes, VR_ANON | VR_WRITABLE, 0, &mem_type_cache))) {
132 printf("VM: map_page_region failed\n");
133 return ENOMEM;
134 }
135 #ifdef _MINIX_MAGIC
136 map_unmap_region(caller, vr, 0, VM_PAGE_SIZE);
137 #endif
138
139 assert(vr->length == bytes);
140
141 for(offset = 0; offset < bytes; offset += VM_PAGE_SIZE) {
142 struct cached_page *hb;
143
144 assert(vr->length == bytes);
145 assert(offset < vr->length);
146
147 if(!(hb = find_cached_page_bydev(dev, dev_off + offset,
148 msg->m_vmmcp.ino, ino_off + offset, 1)) ||
149 (hb->flags & VMSF_ONCE)) {
150 map_unmap_region(caller, vr, 0, bytes);
151 return ENOENT;
152 }
153
154 assert(!vr->param.pb_cache);
155 vr->param.pb_cache = hb->page;
156
157 assert(vr->length == bytes);
158 assert(offset < vr->length);
159
160 if(map_pf(caller, vr, offset, 1, NULL, NULL, 0, &io) != OK) {
161 map_unmap_region(caller, vr, 0, bytes);
162 printf("VM: map_pf failed\n");
163 return ENOMEM;
164 }
165 assert(!vr->param.pb_cache);
166 }
167
168 memset(msg, 0, sizeof(*msg));
169
170 msg->m_vmmcp_reply.addr = (void *) vr->vaddr;
171
172 assert(vr);
173
174 #if CACHE_SANITY
175 cache_sanitycheck_internal();
176 #endif
177
178 return OK;
179 }
180
cache_pagefault(struct vmproc * vmp,struct vir_region * region,struct phys_region * ph,int write,vfs_callback_t cb,void * state,int len,int * io)181 static int cache_pagefault(struct vmproc *vmp, struct vir_region *region,
182 struct phys_region *ph, int write, vfs_callback_t cb,
183 void *state, int len, int *io)
184 {
185 vir_bytes offset = ph->offset;
186 assert(ph->ph->phys == MAP_NONE);
187 assert(region->param.pb_cache);
188 pb_unreferenced(region, ph, 0);
189 pb_link(ph, region->param.pb_cache, offset, region);
190 region->param.pb_cache = NULL;
191
192 return OK;
193 }
194
195 int
do_setcache(message * msg)196 do_setcache(message *msg)
197 {
198 int r;
199 dev_t dev = msg->m_vmmcp.dev;
200 uint64_t dev_off = msg->m_vmmcp.dev_offset;
201 off_t ino_off = msg->m_vmmcp.ino_offset;
202 int flags = msg->m_vmmcp.flags;
203 int n;
204 struct vmproc *caller;
205 phys_bytes offset;
206 phys_bytes bytes = msg->m_vmmcp.pages * VM_PAGE_SIZE;
207
208 if(bytes < VM_PAGE_SIZE) return EINVAL;
209
210 if(dev_off % PAGE_SIZE || ino_off % PAGE_SIZE) {
211 printf("VM: unaligned cache operation\n");
212 return EFAULT;
213 }
214
215 if(vm_isokendpt(msg->m_source, &n) != OK) panic("bogus source");
216 caller = &vmproc[n];
217
218 for(offset = 0; offset < bytes; offset += VM_PAGE_SIZE) {
219 struct vir_region *region;
220 struct phys_region *phys_region = NULL;
221 vir_bytes v = (vir_bytes) msg->m_vmmcp.block + offset;
222 struct cached_page *hb;
223
224 if(!(region = map_lookup(caller, v, &phys_region))) {
225 printf("VM: error: no reasonable memory region given (offset 0x%lx, 0x%lx)\n", offset, v);
226 return EFAULT;
227 }
228
229 if(!phys_region) {
230 printf("VM: error: no available memory region given\n");
231 return EFAULT;
232 }
233
234 if((hb=find_cached_page_bydev(dev, dev_off + offset,
235 msg->m_vmmcp.ino, ino_off + offset, 1))) {
236 /* block inode info updated */
237 if(hb->page != phys_region->ph ||
238 (hb->flags & VMSF_ONCE)) {
239 /* previous cache entry has become
240 * obsolete; make a new one. rmcache
241 * removes it from the cache and frees
242 * the page if it isn't mapped in anywhere
243 * else.
244 */
245 rmcache(hb);
246 } else {
247 /* block was already there, inode info might've changed which is fine */
248 continue;
249 }
250 }
251
252 if(phys_region->memtype != &mem_type_anon &&
253 phys_region->memtype != &mem_type_anon_contig) {
254 printf("VM: error: no reasonable memory type\n");
255 return EFAULT;
256 }
257
258 if(phys_region->ph->refcount != 1) {
259 printf("VM: error: no reasonable refcount\n");
260 return EFAULT;
261 }
262
263 phys_region->memtype = &mem_type_cache;
264
265 if((r=addcache(dev, dev_off + offset, msg->m_vmmcp.ino,
266 ino_off + offset, flags, phys_region->ph)) != OK) {
267 printf("VM: addcache failed\n");
268 return r;
269 }
270 }
271
272 #if CACHE_SANITY
273 cache_sanitycheck_internal();
274 #endif
275
276 return OK;
277 }
278
279 /*
280 * Forget all pages associated to a particular block in the cache.
281 */
282 int
do_forgetcache(message * msg)283 do_forgetcache(message *msg)
284 {
285 struct cached_page *hb;
286 dev_t dev;
287 uint64_t dev_off;
288 phys_bytes bytes, offset;
289
290 dev = msg->m_vmmcp.dev;
291 dev_off = msg->m_vmmcp.dev_offset;
292 bytes = msg->m_vmmcp.pages * VM_PAGE_SIZE;
293
294 if (bytes < VM_PAGE_SIZE)
295 return EINVAL;
296
297 if (dev_off % PAGE_SIZE) {
298 printf("VM: unaligned cache operation\n");
299 return EFAULT;
300 }
301
302 for (offset = 0; offset < bytes; offset += VM_PAGE_SIZE) {
303 if ((hb = find_cached_page_bydev(dev, dev_off + offset,
304 VMC_NO_INODE, 0 /*ino_off*/, 0 /*touchlru*/)) != NULL)
305 rmcache(hb);
306 }
307
308 return OK;
309 }
310
311 /*
312 * A file system wants to invalidate all pages belonging to a certain device.
313 */
314 int
do_clearcache(message * msg)315 do_clearcache(message *msg)
316 {
317 dev_t dev;
318
319 dev = msg->m_vmmcp.dev;
320
321 clear_cache_bydev(dev);
322
323 return OK;
324 }
325