xref: /minix3/minix/servers/vm/mem_cache.c (revision 36f477c20e43fafda7f138a73ccbe3b8c476e424)
1 
2 /* This file implements the disk cache.
3  *
4  * If they exist anywhere, cached pages are always in a private
5  * VM datastructure.
6  *
7  * They might also be any combination of:
8  *    - be mapped in by a filesystem for reading/writing by it
9  *    - be mapped in by a process as the result of an mmap call (future)
10  *
11  * This file manages the datastructure of all cache blocks, and
12  * mapping them in and out of filesystems.
13  */
14 
15 #include <assert.h>
16 #include <string.h>
17 
18 #include <minix/hash.h>
19 
20 #include <machine/vmparam.h>
21 
22 #include "proto.h"
23 #include "vm.h"
24 #include "region.h"
25 #include "glo.h"
26 #include "cache.h"
27 
28 static int cache_reference(struct phys_region *pr, struct phys_region *pr2);
29 static int cache_unreference(struct phys_region *pr);
30 static int cache_sanitycheck(struct phys_region *pr, const char *file, int line);
31 static int cache_writable(struct phys_region *pr);
32 static int cache_resize(struct vmproc *vmp, struct vir_region *vr, vir_bytes l);
33 static int cache_lowshrink(struct vir_region *vr, vir_bytes len);
34 static int cache_pagefault(struct vmproc *vmp, struct vir_region *region,
35         struct phys_region *ph, int write, vfs_callback_t cb, void *state,
36 	int len, int *io);
37 static int cache_pt_flags(struct vir_region *vr);
38 
39 struct mem_type mem_type_cache = {
40 	.name = "cache memory",
41 	.ev_reference = cache_reference,
42 	.ev_unreference = cache_unreference,
43 	.ev_resize = cache_resize,
44 	.ev_lowshrink = cache_lowshrink,
45 	.ev_sanitycheck = cache_sanitycheck,
46 	.ev_pagefault = cache_pagefault,
47 	.writable = cache_writable,
48 	.pt_flags = cache_pt_flags,
49 };
50 
51 static int cache_pt_flags(struct vir_region *vr){
52 #if defined(__arm__)
53 	return ARM_VM_PTE_CACHED;
54 #else
55 	return 0;
56 #endif
57 }
58 
59 
60 static int cache_reference(struct phys_region *pr, struct phys_region *pr2)
61 {
62 	return OK;
63 }
64 
65 static int cache_unreference(struct phys_region *pr)
66 {
67 	return mem_type_anon.ev_unreference(pr);
68 }
69 
70 static int cache_sanitycheck(struct phys_region *pr, const char *file, int line)
71 {
72 	MYASSERT(usedpages_add(pr->ph->phys, VM_PAGE_SIZE) == OK);
73 	return OK;
74 }
75 
76 static int cache_writable(struct phys_region *pr)
77 {
78 	/* Cache blocks are at the moment only used by filesystems so always writable. */
79 	assert(pr->ph->refcount > 0);
80 	return pr->ph->phys != MAP_NONE;
81 }
82 
83 static int cache_resize(struct vmproc *vmp, struct vir_region *vr, vir_bytes l)
84 {
85 	printf("VM: cannot resize cache blocks.\n");
86 	return ENOMEM;
87 }
88 
89 static int cache_lowshrink(struct vir_region *vr, vir_bytes len)
90 {
91         return OK;
92 }
93 
94 int
95 do_mapcache(message *msg)
96 {
97 	dev_t dev = msg->m_vmmcp.dev;
98 	uint64_t dev_off = msg->m_vmmcp.dev_offset;
99 	off_t ino_off = msg->m_vmmcp.ino_offset;
100 	int n;
101 	phys_bytes bytes = msg->m_vmmcp.pages * VM_PAGE_SIZE;
102 	phys_bytes alloc_bytes;
103 	struct vir_region *vr;
104 	struct vmproc *caller;
105 	vir_bytes offset;
106 	int io = 0;
107 
108 	if(dev_off % PAGE_SIZE || ino_off % PAGE_SIZE) {
109 		printf("VM: unaligned cache operation\n");
110 		return EFAULT;
111 	}
112 
113 	if(vm_isokendpt(msg->m_source, &n) != OK) panic("bogus source");
114 	caller = &vmproc[n];
115 
116 	if(bytes < VM_PAGE_SIZE) return EINVAL;
117 
118 	alloc_bytes = bytes;
119 #ifdef _MINIX_MAGIC
120 	/* Make sure there is a 1-page hole available before the region,
121 	 * in case instrumentation needs to allocate in-band metadata later.
122 	 * This does effectively halve the usable part of the caller's address
123 	 * space, though, so only do this if we are instrumenting at all.
124 	 */
125 	alloc_bytes += VM_PAGE_SIZE;
126 #endif
127 	if (!(vr = map_page_region(caller, VM_PAGE_SIZE, VM_DATATOP,
128 	    alloc_bytes, VR_ANON | VR_WRITABLE, 0, &mem_type_cache))) {
129 		printf("VM: map_page_region failed\n");
130 		return ENOMEM;
131 	}
132 #ifdef _MINIX_MAGIC
133 	map_unmap_region(caller, vr, 0, VM_PAGE_SIZE);
134 #endif
135 
136 	assert(vr->length == bytes);
137 
138 	for(offset = 0; offset < bytes; offset += VM_PAGE_SIZE) {
139 		struct cached_page *hb;
140 
141 		assert(vr->length == bytes);
142 		assert(offset < vr->length);
143 
144 		if(!(hb = find_cached_page_bydev(dev, dev_off + offset,
145 		    msg->m_vmmcp.ino, ino_off + offset, 1)) ||
146 		    (hb->flags & VMSF_ONCE)) {
147 			map_unmap_region(caller, vr, 0, bytes);
148 			return ENOENT;
149 		}
150 
151 		assert(!vr->param.pb_cache);
152 		vr->param.pb_cache = hb->page;
153 
154 		assert(vr->length == bytes);
155 		assert(offset < vr->length);
156 
157 		if(map_pf(caller, vr, offset, 1, NULL, NULL, 0, &io) != OK) {
158 			map_unmap_region(caller, vr, 0, bytes);
159 			printf("VM: map_pf failed\n");
160 			return ENOMEM;
161 		}
162 		assert(!vr->param.pb_cache);
163 	}
164 
165 	memset(msg, 0, sizeof(*msg));
166 
167 	msg->m_vmmcp_reply.addr = (void *) vr->vaddr;
168 
169  	assert(vr);
170 
171 #if CACHE_SANITY
172 	cache_sanitycheck_internal();
173 #endif
174 
175 	return OK;
176 }
177 
178 static int cache_pagefault(struct vmproc *vmp, struct vir_region *region,
179         struct phys_region *ph, int write, vfs_callback_t cb,
180 	void *state, int len, int *io)
181 {
182 	vir_bytes offset = ph->offset;
183 	assert(ph->ph->phys == MAP_NONE);
184 	assert(region->param.pb_cache);
185 	pb_unreferenced(region, ph, 0);
186 	pb_link(ph, region->param.pb_cache, offset, region);
187 	region->param.pb_cache = NULL;
188 
189 	return OK;
190 }
191 
192 int
193 do_setcache(message *msg)
194 {
195 	int r;
196 	dev_t dev = msg->m_vmmcp.dev;
197 	uint64_t dev_off = msg->m_vmmcp.dev_offset;
198 	off_t ino_off = msg->m_vmmcp.ino_offset;
199 	int flags = msg->m_vmmcp.flags;
200 	int n;
201 	struct vmproc *caller;
202 	phys_bytes offset;
203 	phys_bytes bytes = msg->m_vmmcp.pages * VM_PAGE_SIZE;
204 
205 	if(bytes < VM_PAGE_SIZE) return EINVAL;
206 
207 	if(dev_off % PAGE_SIZE || ino_off % PAGE_SIZE) {
208 		printf("VM: unaligned cache operation\n");
209 		return EFAULT;
210 	}
211 
212 	if(vm_isokendpt(msg->m_source, &n) != OK) panic("bogus source");
213 	caller = &vmproc[n];
214 
215 	for(offset = 0; offset < bytes; offset += VM_PAGE_SIZE) {
216 		struct vir_region *region;
217 		struct phys_region *phys_region = NULL;
218 		vir_bytes v = (vir_bytes) msg->m_vmmcp.block + offset;
219                 struct cached_page *hb;
220 
221 		if(!(region = map_lookup(caller, v, &phys_region))) {
222 			printf("VM: error: no reasonable memory region given (offset 0x%lx, 0x%lx)\n", offset, v);
223 			return EFAULT;
224 		}
225 
226 		if(!phys_region) {
227 			printf("VM: error: no available memory region given\n");
228 			return EFAULT;
229 		}
230 
231 		if((hb=find_cached_page_bydev(dev, dev_off + offset,
232 			msg->m_vmmcp.ino, ino_off + offset, 1))) {
233 			/* block inode info updated */
234 			if(hb->page != phys_region->ph ||
235 			    (hb->flags & VMSF_ONCE)) {
236 				/* previous cache entry has become
237 				 * obsolete; make a new one. rmcache
238 				 * removes it from the cache and frees
239 				 * the page if it isn't mapped in anywhere
240 				 * else.
241 				 */
242                         	rmcache(hb);
243 			} else {
244 				/* block was already there, inode info might've changed which is fine */
245 				continue;
246 			}
247 		}
248 
249 		if(phys_region->memtype != &mem_type_anon &&
250 			phys_region->memtype != &mem_type_anon_contig) {
251 			printf("VM: error: no reasonable memory type\n");
252 			return EFAULT;
253 		}
254 
255 		if(phys_region->ph->refcount != 1) {
256 			printf("VM: error: no reasonable refcount\n");
257 			return EFAULT;
258 		}
259 
260 		phys_region->memtype = &mem_type_cache;
261 
262 		if((r=addcache(dev, dev_off + offset, msg->m_vmmcp.ino,
263 		    ino_off + offset, flags, phys_region->ph)) != OK) {
264 			printf("VM: addcache failed\n");
265 			return r;
266 		}
267 	}
268 
269 #if CACHE_SANITY
270 	cache_sanitycheck_internal();
271 #endif
272 
273 	return OK;
274 }
275 
276 /*
277  * Forget all pages associated to a particular block in the cache.
278  */
279 int
280 do_forgetcache(message *msg)
281 {
282 	struct cached_page *hb;
283 	dev_t dev;
284 	uint64_t dev_off;
285 	phys_bytes bytes, offset;
286 
287 	dev = msg->m_vmmcp.dev;
288 	dev_off = msg->m_vmmcp.dev_offset;
289 	bytes = msg->m_vmmcp.pages * VM_PAGE_SIZE;
290 
291 	if (bytes < VM_PAGE_SIZE)
292 		return EINVAL;
293 
294 	if (dev_off % PAGE_SIZE) {
295 		printf("VM: unaligned cache operation\n");
296 		return EFAULT;
297 	}
298 
299 	for (offset = 0; offset < bytes; offset += VM_PAGE_SIZE) {
300 		if ((hb = find_cached_page_bydev(dev, dev_off + offset,
301 		    VMC_NO_INODE, 0 /*ino_off*/, 0 /*touchlru*/)) != NULL)
302 			rmcache(hb);
303 	}
304 
305 	return OK;
306 }
307 
308 /*
309  * A file system wants to invalidate all pages belonging to a certain device.
310  */
311 int
312 do_clearcache(message *msg)
313 {
314 	dev_t dev;
315 
316 	dev = msg->m_vmmcp.dev;
317 
318 	clear_cache_bydev(dev);
319 
320 	return OK;
321 }
322