xref: /minix3/minix/servers/vm/cache.c (revision e321f6558257ab50a8b750451c96d1a3ca19e300)
1 
2 /* File that implements the data structure, insert, lookup and remove
3  * functions for file system cache blocks.
4  *
5  * Cache blocks can be mapped into the memory of processes by the
6  * 'cache' and 'file' memory types.
7  */
8 
9 #include <assert.h>
10 #include <string.h>
11 
12 #include <minix/hash.h>
13 
14 #include "proto.h"
15 #include "vm.h"
16 #include "region.h"
17 #include "glo.h"
18 #include "cache.h"
19 
20 /* cache datastructure */
21 #define HASHSIZE 65536
22 
23 static struct cached_page *cache_hash_bydev[HASHSIZE];
24 static struct cached_page *cache_hash_byino[HASHSIZE];
25 static struct cached_page *lru_oldest = NULL, *lru_newest = NULL;
26 
27 static u32_t cached_pages = 0;
28 
lru_rm(struct cached_page * hb)29 static void lru_rm(struct cached_page *hb)
30 {
31 	struct cached_page *newer = hb->newer, *older = hb->older;
32 	assert(lru_newest);
33 	assert(lru_oldest);
34 	if(newer) {
35 		assert(newer->older == hb);
36 		newer->older = older;
37 	}
38 	if(older) {
39 		assert(older->newer == hb);
40 		older->newer = newer;
41 	}
42 
43 	if(lru_newest == hb) { assert(!newer); lru_newest = older; }
44 	if(lru_oldest == hb) { assert(!older); lru_oldest = newer; }
45 
46 	if(lru_newest) assert(lru_newest->newer == NULL);
47 	if(lru_oldest) assert(lru_oldest->older == NULL);
48 
49 	cached_pages--;
50 }
51 
lru_add(struct cached_page * hb)52 static void lru_add(struct cached_page *hb)
53 {
54 	if(lru_newest) {
55 		assert(lru_oldest);
56 		assert(!lru_newest->newer);
57 		lru_newest->newer = hb;
58 	} else {
59 		assert(!lru_oldest);
60 		lru_oldest = hb;
61 	}
62 
63 	hb->older = lru_newest;
64 	hb->newer = NULL;
65 	lru_newest = hb;
66 
67 	cached_pages++;
68 }
69 
cache_lru_touch(struct cached_page * hb)70 void cache_lru_touch(struct cached_page *hb)
71 {
72 	lru_rm(hb);
73 	lru_add(hb);
74 }
75 
makehash(u32_t p1,u64_t p2)76 static __inline u32_t makehash(u32_t p1, u64_t p2)
77 {
78 	u32_t offlo = ex64lo(p2), offhi = ex64hi(p2),
79 		v = 0x12345678;
80 	hash_mix(p1, offlo, offhi);
81 	hash_final(offlo, offhi, v);
82 
83 	return v % HASHSIZE;
84 }
85 
86 #if CACHE_SANITY
cache_sanitycheck_internal(void)87 void cache_sanitycheck_internal(void)
88 {
89 	int h;
90 	int n = 0;
91 	int byino = 0;
92 	int withino = 0;
93 	int bydev_total = 0, lru_total = 0;
94 	struct cached_page *cp;
95 
96 	for(h = 0; h < HASHSIZE; h++) {
97 		for(cp = cache_hash_bydev[h]; cp; cp = cp->hash_next_dev) {
98 			assert(cp->dev != NO_DEV);
99 			assert(h == makehash(cp->dev, cp->dev_offset));
100 			assert(cp == find_cached_page_bydev(cp->dev, cp->dev_offset, cp->ino, cp->ino_offset));
101 			if(cp->ino != VMC_NO_INODE) withino++;
102 			bydev_total++;
103 			n++;
104 			assert(n < 1500000);
105 		}
106 		for(cp = cache_hash_byino[h]; cp; cp = cp->hash_next_ino) {
107 			assert(cp->dev != NO_DEV);
108 			assert(cp->ino != VMC_NO_INODE);
109 			assert(h == makehash(cp->ino, cp->ino_offset));
110 			byino++;
111 			n++;
112 			assert(n < 1500000);
113 		}
114 	}
115 
116 	assert(byino == withino);
117 
118 	if(lru_newest) {
119 		assert(lru_oldest);
120 		assert(!lru_newest->newer);
121 		assert(!lru_oldest->older);
122 	} else {
123 		assert(!lru_oldest);
124 	}
125 
126 	for(cp = lru_oldest; cp; cp = cp->newer) {
127 		struct cached_page *newer = cp->newer,
128 			*older = cp->older;
129 		if(newer) assert(newer->older == cp);
130 		if(older) assert(older->newer == cp);
131 		lru_total++;
132 	}
133 
134 	assert(lru_total == bydev_total);
135 
136 	assert(lru_total == cached_pages);
137 }
138 #endif
139 
140 #define rmhash_f(fname, nextfield)			\
141 static void						\
142 fname(struct cached_page *cp, struct cached_page **head)	\
143 {								\
144 	struct cached_page *hb;					\
145 	if(*head == cp) { *head = cp->nextfield; return; }			\
146 	for(hb = *head; hb && cp != hb->nextfield; hb = hb->nextfield) ; \
147 	assert(hb); assert(hb->nextfield == cp);		\
148 	hb->nextfield = cp->nextfield;		\
149 	return;					\
150 }
151 
rmhash_f(rmhash_byino,hash_next_ino)152 rmhash_f(rmhash_byino, hash_next_ino)
153 rmhash_f(rmhash_bydev, hash_next_dev)
154 
155 static void addcache_byino(struct cached_page *hb)
156 {
157 	int hv_ino = makehash(hb->ino, hb->ino_offset);
158 	assert(hb->ino != VMC_NO_INODE);
159 	hb->hash_next_ino = cache_hash_byino[hv_ino];
160 	cache_hash_byino[hv_ino] = hb;
161 }
162 
163 static void
update_inohash(struct cached_page * hb,ino_t ino,u64_t ino_off)164 update_inohash(struct cached_page *hb, ino_t ino, u64_t ino_off)
165 {
166 	assert(ino != VMC_NO_INODE);
167 	if(hb->ino != VMC_NO_INODE) {
168 		int h = makehash(hb->ino, hb->ino_offset);
169 		rmhash_byino(hb, &cache_hash_byino[h]);
170 	}
171 	hb->ino = ino;
172 	hb->ino_offset = ino_off;
173 	addcache_byino(hb);
174 }
175 
176 struct cached_page *
find_cached_page_bydev(dev_t dev,u64_t dev_off,ino_t ino,u64_t ino_off,int touchlru)177 find_cached_page_bydev(dev_t dev, u64_t dev_off, ino_t ino, u64_t ino_off, int touchlru)
178 {
179 	struct cached_page *hb;
180 
181 	for(hb = cache_hash_bydev[makehash(dev, dev_off)]; hb; hb=hb->hash_next_dev) {
182 		if(hb->dev == dev && hb->dev_offset == dev_off) {
183 			if(ino != VMC_NO_INODE) {
184 				if(hb->ino != ino || hb->ino_offset != ino_off) {
185 					update_inohash(hb, ino, ino_off);
186 				}
187 			}
188 
189 			if(touchlru) cache_lru_touch(hb);
190 
191 			return hb;
192 		}
193 	}
194 
195 	return NULL;
196 }
197 
find_cached_page_byino(dev_t dev,ino_t ino,u64_t ino_off,int touchlru)198 struct cached_page *find_cached_page_byino(dev_t dev, ino_t ino, u64_t ino_off, int touchlru)
199 {
200 	struct cached_page *hb;
201 
202 	assert(ino != VMC_NO_INODE);
203 	assert(dev != NO_DEV);
204 
205 	for(hb = cache_hash_byino[makehash(ino, ino_off)]; hb; hb=hb->hash_next_ino) {
206 		if(hb->dev == dev && hb->ino == ino && hb->ino_offset == ino_off) {
207 			if(touchlru) cache_lru_touch(hb);
208 
209 			return hb;
210 		}
211 	}
212 
213 	return NULL;
214 }
215 
addcache(dev_t dev,u64_t dev_off,ino_t ino,u64_t ino_off,int flags,struct phys_block * pb)216 int addcache(dev_t dev, u64_t dev_off, ino_t ino, u64_t ino_off, int flags,
217 	struct phys_block *pb)
218 {
219 	int hv_dev;
220         struct cached_page *hb;
221 
222 	if(pb->flags & PBF_INCACHE) {
223 		printf("VM: already in cache\n");
224 		return EINVAL;
225 	}
226 
227         if(!SLABALLOC(hb)) {
228                 printf("VM: no memory for cache node\n");
229                 return ENOMEM;
230         }
231 
232 	assert(dev != NO_DEV);
233 #if CACHE_SANITY
234 	assert(!find_cached_page_bydev(dev, dev_off, ino, ino_off));
235 #endif
236 
237         hb->dev = dev;
238         hb->dev_offset = dev_off;
239         hb->ino = ino;
240         hb->ino_offset = ino_off;
241 	hb->flags = flags & VMSF_ONCE;
242         hb->page = pb;
243         hb->page->refcount++;   /* block also referenced by cache now */
244 	hb->page->flags |= PBF_INCACHE;
245 
246         hv_dev = makehash(dev, dev_off);
247 
248         hb->hash_next_dev = cache_hash_bydev[hv_dev];
249         cache_hash_bydev[hv_dev] = hb;
250 
251         if(hb->ino != VMC_NO_INODE)
252 		addcache_byino(hb);
253 
254 	lru_add(hb);
255 
256 	return OK;
257 }
258 
rmcache(struct cached_page * cp)259 void rmcache(struct cached_page *cp)
260 {
261 	struct phys_block *pb = cp->page;
262         int hv_dev = makehash(cp->dev, cp->dev_offset);
263 
264 	assert(cp->page->flags & PBF_INCACHE);
265 
266 	cp->page->flags &= ~PBF_INCACHE;
267 
268 	rmhash_bydev(cp, &cache_hash_bydev[hv_dev]);
269 	if(cp->ino != VMC_NO_INODE) {
270 		int hv_ino = makehash(cp->ino, cp->ino_offset);
271 		rmhash_byino(cp, &cache_hash_byino[hv_ino]);
272 	}
273 
274 	assert(cp->page->refcount >= 1);
275 	cp->page->refcount--;
276 
277 	lru_rm(cp);
278 
279 	if(pb->refcount == 0) {
280 		assert(pb->phys != MAP_NONE);
281 		free_mem(ABS2CLICK(pb->phys), 1);
282 		SLABFREE(pb);
283 	}
284 
285 	SLABFREE(cp);
286 }
287 
cache_freepages(int pages)288 int cache_freepages(int pages)
289 {
290 	struct cached_page *cp, *newercp;
291 	int freed = 0;
292 	int oldsteps = 0;
293 	int skips = 0;
294 
295 	for(cp = lru_oldest; cp && freed < pages; cp = newercp) {
296 		newercp = cp->newer;
297 		assert(cp->page->refcount >= 1);
298 		if(cp->page->refcount == 1) {
299 			rmcache(cp);
300 			freed++;
301 			skips = 0;
302 		} else skips++;
303 		oldsteps++;
304 	}
305 
306 	return freed;
307 }
308 
309 /*
310  * Remove all pages that are associated with the given device.
311  */
312 void
clear_cache_bydev(dev_t dev)313 clear_cache_bydev(dev_t dev)
314 {
315 	struct cached_page *cp, *ncp;
316 	int h;
317 
318 	for (h = 0; h < HASHSIZE; h++) {
319 		for (cp = cache_hash_bydev[h]; cp != NULL; cp = ncp) {
320 			ncp = cp->hash_next_dev;
321 
322 			if (cp->dev == dev)
323 				rmcache(cp);
324 		}
325 	}
326 }
327 
get_stats_info(struct vm_stats_info * vsi)328 void get_stats_info(struct vm_stats_info *vsi)
329 {
330         vsi->vsi_cached = cached_pages;
331 }
332 
333