xref: /minix3/minix/servers/vm/mem_cache.c (revision 4506a0eebfee22b2a5198e1438f5780efdd94aa4)
1433d6423SLionel Sambuc 
2433d6423SLionel Sambuc /* This file implements the disk cache.
3433d6423SLionel Sambuc  *
4433d6423SLionel Sambuc  * If they exist anywhere, cached pages are always in a private
5433d6423SLionel Sambuc  * VM datastructure.
6433d6423SLionel Sambuc  *
7433d6423SLionel Sambuc  * They might also be any combination of:
8433d6423SLionel Sambuc  *    - be mapped in by a filesystem for reading/writing by it
9433d6423SLionel Sambuc  *    - be mapped in by a process as the result of an mmap call (future)
10433d6423SLionel Sambuc  *
11433d6423SLionel Sambuc  * This file manages the datastructure of all cache blocks, and
12433d6423SLionel Sambuc  * mapping them in and out of filesystems.
13433d6423SLionel Sambuc  */
14433d6423SLionel Sambuc 
15433d6423SLionel Sambuc #include <assert.h>
16433d6423SLionel Sambuc #include <string.h>
17433d6423SLionel Sambuc 
18433d6423SLionel Sambuc #include <minix/hash.h>
19433d6423SLionel Sambuc 
20433d6423SLionel Sambuc #include <machine/vmparam.h>
21433d6423SLionel Sambuc 
22433d6423SLionel Sambuc #include "proto.h"
23433d6423SLionel Sambuc #include "vm.h"
24433d6423SLionel Sambuc #include "region.h"
25433d6423SLionel Sambuc #include "glo.h"
26433d6423SLionel Sambuc #include "cache.h"
27433d6423SLionel Sambuc 
28433d6423SLionel Sambuc static int cache_reference(struct phys_region *pr, struct phys_region *pr2);
29433d6423SLionel Sambuc static int cache_unreference(struct phys_region *pr);
30433d6423SLionel Sambuc static int cache_sanitycheck(struct phys_region *pr, const char *file, int line);
31433d6423SLionel Sambuc static int cache_writable(struct phys_region *pr);
32433d6423SLionel Sambuc static int cache_resize(struct vmproc *vmp, struct vir_region *vr, vir_bytes l);
3336f477c2SCristiano Giuffrida static int cache_lowshrink(struct vir_region *vr, vir_bytes len);
34433d6423SLionel Sambuc static int cache_pagefault(struct vmproc *vmp, struct vir_region *region,
35433d6423SLionel Sambuc         struct phys_region *ph, int write, vfs_callback_t cb, void *state,
36433d6423SLionel Sambuc 	int len, int *io);
37433d6423SLionel Sambuc static int cache_pt_flags(struct vir_region *vr);
38433d6423SLionel Sambuc 
39433d6423SLionel Sambuc struct mem_type mem_type_cache = {
40433d6423SLionel Sambuc 	.name = "cache memory",
41433d6423SLionel Sambuc 	.ev_reference = cache_reference,
42433d6423SLionel Sambuc 	.ev_unreference = cache_unreference,
43433d6423SLionel Sambuc 	.ev_resize = cache_resize,
4436f477c2SCristiano Giuffrida 	.ev_lowshrink = cache_lowshrink,
45433d6423SLionel Sambuc 	.ev_sanitycheck = cache_sanitycheck,
46433d6423SLionel Sambuc 	.ev_pagefault = cache_pagefault,
47433d6423SLionel Sambuc 	.writable = cache_writable,
48433d6423SLionel Sambuc 	.pt_flags = cache_pt_flags,
49433d6423SLionel Sambuc };
50433d6423SLionel Sambuc 
cache_pt_flags(struct vir_region * vr)51433d6423SLionel Sambuc static int cache_pt_flags(struct vir_region *vr){
52433d6423SLionel Sambuc #if defined(__arm__)
53433d6423SLionel Sambuc 	return ARM_VM_PTE_CACHED;
54433d6423SLionel Sambuc #else
55433d6423SLionel Sambuc 	return 0;
56433d6423SLionel Sambuc #endif
57433d6423SLionel Sambuc }
58433d6423SLionel Sambuc 
59433d6423SLionel Sambuc 
cache_reference(struct phys_region * pr,struct phys_region * pr2)60433d6423SLionel Sambuc static int cache_reference(struct phys_region *pr, struct phys_region *pr2)
61433d6423SLionel Sambuc {
62433d6423SLionel Sambuc 	return OK;
63433d6423SLionel Sambuc }
64433d6423SLionel Sambuc 
cache_unreference(struct phys_region * pr)65433d6423SLionel Sambuc static int cache_unreference(struct phys_region *pr)
66433d6423SLionel Sambuc {
67433d6423SLionel Sambuc 	return mem_type_anon.ev_unreference(pr);
68433d6423SLionel Sambuc }
69433d6423SLionel Sambuc 
cache_sanitycheck(struct phys_region * pr,const char * file,int line)70433d6423SLionel Sambuc static int cache_sanitycheck(struct phys_region *pr, const char *file, int line)
71433d6423SLionel Sambuc {
72433d6423SLionel Sambuc 	MYASSERT(usedpages_add(pr->ph->phys, VM_PAGE_SIZE) == OK);
73433d6423SLionel Sambuc 	return OK;
74433d6423SLionel Sambuc }
75433d6423SLionel Sambuc 
cache_writable(struct phys_region * pr)76433d6423SLionel Sambuc static int cache_writable(struct phys_region *pr)
77433d6423SLionel Sambuc {
78433d6423SLionel Sambuc 	/* Cache blocks are at the moment only used by filesystems so always writable. */
79433d6423SLionel Sambuc 	assert(pr->ph->refcount > 0);
80433d6423SLionel Sambuc 	return pr->ph->phys != MAP_NONE;
81433d6423SLionel Sambuc }
82433d6423SLionel Sambuc 
cache_resize(struct vmproc * vmp,struct vir_region * vr,vir_bytes l)83433d6423SLionel Sambuc static int cache_resize(struct vmproc *vmp, struct vir_region *vr, vir_bytes l)
84433d6423SLionel Sambuc {
85433d6423SLionel Sambuc 	printf("VM: cannot resize cache blocks.\n");
86433d6423SLionel Sambuc 	return ENOMEM;
87433d6423SLionel Sambuc }
88433d6423SLionel Sambuc 
cache_lowshrink(struct vir_region * vr,vir_bytes len)8936f477c2SCristiano Giuffrida static int cache_lowshrink(struct vir_region *vr, vir_bytes len)
9036f477c2SCristiano Giuffrida {
9136f477c2SCristiano Giuffrida         return OK;
9236f477c2SCristiano Giuffrida }
9336f477c2SCristiano Giuffrida 
94433d6423SLionel Sambuc int
do_mapcache(message * msg)95433d6423SLionel Sambuc do_mapcache(message *msg)
96433d6423SLionel Sambuc {
97433d6423SLionel Sambuc 	dev_t dev = msg->m_vmmcp.dev;
98e94f856bSDavid van Moolenbroek 	uint64_t dev_off = msg->m_vmmcp.dev_offset;
99433d6423SLionel Sambuc 	off_t ino_off = msg->m_vmmcp.ino_offset;
100433d6423SLionel Sambuc 	int n;
101433d6423SLionel Sambuc 	phys_bytes bytes = msg->m_vmmcp.pages * VM_PAGE_SIZE;
10236f477c2SCristiano Giuffrida 	phys_bytes alloc_bytes;
103433d6423SLionel Sambuc 	struct vir_region *vr;
104433d6423SLionel Sambuc 	struct vmproc *caller;
105433d6423SLionel Sambuc 	vir_bytes offset;
106433d6423SLionel Sambuc 	int io = 0;
107433d6423SLionel Sambuc 
108433d6423SLionel Sambuc 	if(dev_off % PAGE_SIZE || ino_off % PAGE_SIZE) {
109433d6423SLionel Sambuc 		printf("VM: unaligned cache operation\n");
110433d6423SLionel Sambuc 		return EFAULT;
111433d6423SLionel Sambuc 	}
112433d6423SLionel Sambuc 
113433d6423SLionel Sambuc 	if(vm_isokendpt(msg->m_source, &n) != OK) panic("bogus source");
114433d6423SLionel Sambuc 	caller = &vmproc[n];
115433d6423SLionel Sambuc 
116433d6423SLionel Sambuc 	if(bytes < VM_PAGE_SIZE) return EINVAL;
117433d6423SLionel Sambuc 
11836f477c2SCristiano Giuffrida 	alloc_bytes = bytes;
11936f477c2SCristiano Giuffrida #ifdef _MINIX_MAGIC
12036f477c2SCristiano Giuffrida 	/* Make sure there is a 1-page hole available before the region,
12136f477c2SCristiano Giuffrida 	 * in case instrumentation needs to allocate in-band metadata later.
12236f477c2SCristiano Giuffrida 	 * This does effectively halve the usable part of the caller's address
12336f477c2SCristiano Giuffrida 	 * space, though, so only do this if we are instrumenting at all.
124*4506a0eeSDavid van Moolenbroek 	 * Also make sure it falls within the mmap range, so that it is
125*4506a0eeSDavid van Moolenbroek 	 * transferred upon live update.  This again cuts the usable part of
126*4506a0eeSDavid van Moolenbroek 	 * the address space for caching purposes in half.
12736f477c2SCristiano Giuffrida 	 */
12836f477c2SCristiano Giuffrida 	alloc_bytes += VM_PAGE_SIZE;
12936f477c2SCristiano Giuffrida #endif
130*4506a0eeSDavid van Moolenbroek 	if (!(vr = map_page_region(caller, VM_MMAPBASE, VM_MMAPTOP,
13136f477c2SCristiano Giuffrida 	    alloc_bytes, VR_ANON | VR_WRITABLE, 0, &mem_type_cache))) {
132433d6423SLionel Sambuc 		printf("VM: map_page_region failed\n");
133433d6423SLionel Sambuc 		return ENOMEM;
134433d6423SLionel Sambuc 	}
13536f477c2SCristiano Giuffrida #ifdef _MINIX_MAGIC
13636f477c2SCristiano Giuffrida 	map_unmap_region(caller, vr, 0, VM_PAGE_SIZE);
13736f477c2SCristiano Giuffrida #endif
138433d6423SLionel Sambuc 
139433d6423SLionel Sambuc 	assert(vr->length == bytes);
140433d6423SLionel Sambuc 
141433d6423SLionel Sambuc 	for(offset = 0; offset < bytes; offset += VM_PAGE_SIZE) {
142433d6423SLionel Sambuc 		struct cached_page *hb;
143433d6423SLionel Sambuc 
144433d6423SLionel Sambuc 		assert(vr->length == bytes);
145433d6423SLionel Sambuc 		assert(offset < vr->length);
146433d6423SLionel Sambuc 
147433d6423SLionel Sambuc 		if(!(hb = find_cached_page_bydev(dev, dev_off + offset,
1486c46a77dSDavid van Moolenbroek 		    msg->m_vmmcp.ino, ino_off + offset, 1)) ||
1496c46a77dSDavid van Moolenbroek 		    (hb->flags & VMSF_ONCE)) {
150433d6423SLionel Sambuc 			map_unmap_region(caller, vr, 0, bytes);
151433d6423SLionel Sambuc 			return ENOENT;
152433d6423SLionel Sambuc 		}
153433d6423SLionel Sambuc 
154433d6423SLionel Sambuc 		assert(!vr->param.pb_cache);
155433d6423SLionel Sambuc 		vr->param.pb_cache = hb->page;
156433d6423SLionel Sambuc 
157433d6423SLionel Sambuc 		assert(vr->length == bytes);
158433d6423SLionel Sambuc 		assert(offset < vr->length);
159433d6423SLionel Sambuc 
160433d6423SLionel Sambuc 		if(map_pf(caller, vr, offset, 1, NULL, NULL, 0, &io) != OK) {
161433d6423SLionel Sambuc 			map_unmap_region(caller, vr, 0, bytes);
162433d6423SLionel Sambuc 			printf("VM: map_pf failed\n");
163433d6423SLionel Sambuc 			return ENOMEM;
164433d6423SLionel Sambuc 		}
165433d6423SLionel Sambuc 		assert(!vr->param.pb_cache);
166433d6423SLionel Sambuc 	}
167433d6423SLionel Sambuc 
168433d6423SLionel Sambuc 	memset(msg, 0, sizeof(*msg));
169433d6423SLionel Sambuc 
170433d6423SLionel Sambuc 	msg->m_vmmcp_reply.addr = (void *) vr->vaddr;
171433d6423SLionel Sambuc 
172433d6423SLionel Sambuc  	assert(vr);
173433d6423SLionel Sambuc 
174433d6423SLionel Sambuc #if CACHE_SANITY
175433d6423SLionel Sambuc 	cache_sanitycheck_internal();
176433d6423SLionel Sambuc #endif
177433d6423SLionel Sambuc 
178433d6423SLionel Sambuc 	return OK;
179433d6423SLionel Sambuc }
180433d6423SLionel Sambuc 
cache_pagefault(struct vmproc * vmp,struct vir_region * region,struct phys_region * ph,int write,vfs_callback_t cb,void * state,int len,int * io)181433d6423SLionel Sambuc static int cache_pagefault(struct vmproc *vmp, struct vir_region *region,
182433d6423SLionel Sambuc         struct phys_region *ph, int write, vfs_callback_t cb,
183433d6423SLionel Sambuc 	void *state, int len, int *io)
184433d6423SLionel Sambuc {
185433d6423SLionel Sambuc 	vir_bytes offset = ph->offset;
186433d6423SLionel Sambuc 	assert(ph->ph->phys == MAP_NONE);
187433d6423SLionel Sambuc 	assert(region->param.pb_cache);
188433d6423SLionel Sambuc 	pb_unreferenced(region, ph, 0);
189433d6423SLionel Sambuc 	pb_link(ph, region->param.pb_cache, offset, region);
190433d6423SLionel Sambuc 	region->param.pb_cache = NULL;
191433d6423SLionel Sambuc 
192433d6423SLionel Sambuc 	return OK;
193433d6423SLionel Sambuc }
194433d6423SLionel Sambuc 
195433d6423SLionel Sambuc int
do_setcache(message * msg)196433d6423SLionel Sambuc do_setcache(message *msg)
197433d6423SLionel Sambuc {
198433d6423SLionel Sambuc 	int r;
199433d6423SLionel Sambuc 	dev_t dev = msg->m_vmmcp.dev;
200e94f856bSDavid van Moolenbroek 	uint64_t dev_off = msg->m_vmmcp.dev_offset;
201433d6423SLionel Sambuc 	off_t ino_off = msg->m_vmmcp.ino_offset;
202e321f655SDavid van Moolenbroek 	int flags = msg->m_vmmcp.flags;
203433d6423SLionel Sambuc 	int n;
204433d6423SLionel Sambuc 	struct vmproc *caller;
205433d6423SLionel Sambuc 	phys_bytes offset;
206433d6423SLionel Sambuc 	phys_bytes bytes = msg->m_vmmcp.pages * VM_PAGE_SIZE;
207433d6423SLionel Sambuc 
208433d6423SLionel Sambuc 	if(bytes < VM_PAGE_SIZE) return EINVAL;
209433d6423SLionel Sambuc 
210433d6423SLionel Sambuc 	if(dev_off % PAGE_SIZE || ino_off % PAGE_SIZE) {
211433d6423SLionel Sambuc 		printf("VM: unaligned cache operation\n");
212433d6423SLionel Sambuc 		return EFAULT;
213433d6423SLionel Sambuc 	}
214433d6423SLionel Sambuc 
215433d6423SLionel Sambuc 	if(vm_isokendpt(msg->m_source, &n) != OK) panic("bogus source");
216433d6423SLionel Sambuc 	caller = &vmproc[n];
217433d6423SLionel Sambuc 
218433d6423SLionel Sambuc 	for(offset = 0; offset < bytes; offset += VM_PAGE_SIZE) {
219433d6423SLionel Sambuc 		struct vir_region *region;
220433d6423SLionel Sambuc 		struct phys_region *phys_region = NULL;
221433d6423SLionel Sambuc 		vir_bytes v = (vir_bytes) msg->m_vmmcp.block + offset;
222433d6423SLionel Sambuc                 struct cached_page *hb;
223433d6423SLionel Sambuc 
224433d6423SLionel Sambuc 		if(!(region = map_lookup(caller, v, &phys_region))) {
225433d6423SLionel Sambuc 			printf("VM: error: no reasonable memory region given (offset 0x%lx, 0x%lx)\n", offset, v);
226433d6423SLionel Sambuc 			return EFAULT;
227433d6423SLionel Sambuc 		}
228433d6423SLionel Sambuc 
229433d6423SLionel Sambuc 		if(!phys_region) {
230433d6423SLionel Sambuc 			printf("VM: error: no available memory region given\n");
231433d6423SLionel Sambuc 			return EFAULT;
232433d6423SLionel Sambuc 		}
233433d6423SLionel Sambuc 
234433d6423SLionel Sambuc 		if((hb=find_cached_page_bydev(dev, dev_off + offset,
235433d6423SLionel Sambuc 			msg->m_vmmcp.ino, ino_off + offset, 1))) {
236433d6423SLionel Sambuc 			/* block inode info updated */
237e321f655SDavid van Moolenbroek 			if(hb->page != phys_region->ph ||
238e321f655SDavid van Moolenbroek 			    (hb->flags & VMSF_ONCE)) {
239433d6423SLionel Sambuc 				/* previous cache entry has become
240433d6423SLionel Sambuc 				 * obsolete; make a new one. rmcache
241433d6423SLionel Sambuc 				 * removes it from the cache and frees
242433d6423SLionel Sambuc 				 * the page if it isn't mapped in anywhere
243433d6423SLionel Sambuc 				 * else.
244433d6423SLionel Sambuc 				 */
245433d6423SLionel Sambuc                         	rmcache(hb);
246433d6423SLionel Sambuc 			} else {
247433d6423SLionel Sambuc 				/* block was already there, inode info might've changed which is fine */
248433d6423SLionel Sambuc 				continue;
249433d6423SLionel Sambuc 			}
250433d6423SLionel Sambuc 		}
251433d6423SLionel Sambuc 
252433d6423SLionel Sambuc 		if(phys_region->memtype != &mem_type_anon &&
253433d6423SLionel Sambuc 			phys_region->memtype != &mem_type_anon_contig) {
254433d6423SLionel Sambuc 			printf("VM: error: no reasonable memory type\n");
255433d6423SLionel Sambuc 			return EFAULT;
256433d6423SLionel Sambuc 		}
257433d6423SLionel Sambuc 
258433d6423SLionel Sambuc 		if(phys_region->ph->refcount != 1) {
259433d6423SLionel Sambuc 			printf("VM: error: no reasonable refcount\n");
260433d6423SLionel Sambuc 			return EFAULT;
261433d6423SLionel Sambuc 		}
262433d6423SLionel Sambuc 
263433d6423SLionel Sambuc 		phys_region->memtype = &mem_type_cache;
264433d6423SLionel Sambuc 
265e321f655SDavid van Moolenbroek 		if((r=addcache(dev, dev_off + offset, msg->m_vmmcp.ino,
266e321f655SDavid van Moolenbroek 		    ino_off + offset, flags, phys_region->ph)) != OK) {
267433d6423SLionel Sambuc 			printf("VM: addcache failed\n");
268433d6423SLionel Sambuc 			return r;
269433d6423SLionel Sambuc 		}
270433d6423SLionel Sambuc 	}
271433d6423SLionel Sambuc 
272433d6423SLionel Sambuc #if CACHE_SANITY
273433d6423SLionel Sambuc 	cache_sanitycheck_internal();
274433d6423SLionel Sambuc #endif
275433d6423SLionel Sambuc 
276433d6423SLionel Sambuc 	return OK;
277433d6423SLionel Sambuc }
278433d6423SLionel Sambuc 
279433d6423SLionel Sambuc /*
280e94f856bSDavid van Moolenbroek  * Forget all pages associated to a particular block in the cache.
281e94f856bSDavid van Moolenbroek  */
282e94f856bSDavid van Moolenbroek int
do_forgetcache(message * msg)283e94f856bSDavid van Moolenbroek do_forgetcache(message *msg)
284e94f856bSDavid van Moolenbroek {
285e94f856bSDavid van Moolenbroek 	struct cached_page *hb;
286e94f856bSDavid van Moolenbroek 	dev_t dev;
287e94f856bSDavid van Moolenbroek 	uint64_t dev_off;
288e94f856bSDavid van Moolenbroek 	phys_bytes bytes, offset;
289e94f856bSDavid van Moolenbroek 
290e94f856bSDavid van Moolenbroek 	dev = msg->m_vmmcp.dev;
291e94f856bSDavid van Moolenbroek 	dev_off = msg->m_vmmcp.dev_offset;
292e94f856bSDavid van Moolenbroek 	bytes = msg->m_vmmcp.pages * VM_PAGE_SIZE;
293e94f856bSDavid van Moolenbroek 
294e94f856bSDavid van Moolenbroek 	if (bytes < VM_PAGE_SIZE)
295e94f856bSDavid van Moolenbroek 		return EINVAL;
296e94f856bSDavid van Moolenbroek 
297e94f856bSDavid van Moolenbroek 	if (dev_off % PAGE_SIZE) {
298e94f856bSDavid van Moolenbroek 		printf("VM: unaligned cache operation\n");
299e94f856bSDavid van Moolenbroek 		return EFAULT;
300e94f856bSDavid van Moolenbroek 	}
301e94f856bSDavid van Moolenbroek 
302e94f856bSDavid van Moolenbroek 	for (offset = 0; offset < bytes; offset += VM_PAGE_SIZE) {
303e94f856bSDavid van Moolenbroek 		if ((hb = find_cached_page_bydev(dev, dev_off + offset,
304e94f856bSDavid van Moolenbroek 		    VMC_NO_INODE, 0 /*ino_off*/, 0 /*touchlru*/)) != NULL)
305e94f856bSDavid van Moolenbroek 			rmcache(hb);
306e94f856bSDavid van Moolenbroek 	}
307e94f856bSDavid van Moolenbroek 
308e94f856bSDavid van Moolenbroek 	return OK;
309e94f856bSDavid van Moolenbroek }
310e94f856bSDavid van Moolenbroek 
311e94f856bSDavid van Moolenbroek /*
312433d6423SLionel Sambuc  * A file system wants to invalidate all pages belonging to a certain device.
313433d6423SLionel Sambuc  */
314433d6423SLionel Sambuc int
do_clearcache(message * msg)315433d6423SLionel Sambuc do_clearcache(message *msg)
316433d6423SLionel Sambuc {
317433d6423SLionel Sambuc 	dev_t dev;
318433d6423SLionel Sambuc 
319433d6423SLionel Sambuc 	dev = msg->m_vmmcp.dev;
320433d6423SLionel Sambuc 
321433d6423SLionel Sambuc 	clear_cache_bydev(dev);
322433d6423SLionel Sambuc 
323433d6423SLionel Sambuc 	return OK;
324433d6423SLionel Sambuc }
325