xref: /openbsd-src/sys/uvm/uvm_amap.c (revision f1dd7b858388b4a23f4f67a4957ec5ff656ebbe8)
1 /*	$OpenBSD: uvm_amap.c,v 1.89 2021/03/26 13:40:05 mpi Exp $	*/
2 /*	$NetBSD: uvm_amap.c,v 1.27 2000/11/25 06:27:59 chs Exp $	*/
3 
4 /*
5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 /*
30  * uvm_amap.c: amap operations
31  *
32  * this file contains functions that perform operations on amaps.  see
33  * uvm_amap.h for a brief explanation of the role of amaps in uvm.
34  */
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/malloc.h>
39 #include <sys/kernel.h>
40 #include <sys/pool.h>
41 #include <sys/atomic.h>
42 
43 #include <uvm/uvm.h>
44 #include <uvm/uvm_swap.h>
45 
46 /*
47  * pools for allocation of vm_amap structures.  note that in order to
48  * avoid an endless loop, the amap pool's allocator cannot allocate
49  * memory from an amap (it currently goes through the kernel uobj, so
50  * we are ok).
51  */
52 
53 struct pool uvm_amap_pool;
54 struct pool uvm_small_amap_pool[UVM_AMAP_CHUNK];
55 struct pool uvm_amap_chunk_pool;
56 
57 LIST_HEAD(, vm_amap) amap_list;
58 struct rwlock amap_list_lock = RWLOCK_INITIALIZER("amaplstlk");
59 #define amap_lock_list()	rw_enter_write(&amap_list_lock)
60 #define amap_unlock_list()	rw_exit_write(&amap_list_lock)
61 
62 static char amap_small_pool_names[UVM_AMAP_CHUNK][9];
63 
64 /*
65  * local functions
66  */
67 
68 static struct vm_amap *amap_alloc1(int, int, int);
69 static inline void amap_list_insert(struct vm_amap *);
70 static inline void amap_list_remove(struct vm_amap *);
71 
72 struct vm_amap_chunk *amap_chunk_get(struct vm_amap *, int, int, int);
73 void amap_chunk_free(struct vm_amap *, struct vm_amap_chunk *);
74 
75 /*
76  * if we enable PPREF, then we have a couple of extra functions that
77  * we need to prototype here...
78  */
79 
80 #ifdef UVM_AMAP_PPREF
81 
82 #define PPREF_NONE ((int *) -1)	/* not using ppref */
83 
84 void	amap_pp_adjref(struct vm_amap *, int, vsize_t, int);
85 void	amap_pp_establish(struct vm_amap *);
86 void	amap_wiperange_chunk(struct vm_amap *, struct vm_amap_chunk *, int,
87 	    int);
88 void	amap_wiperange(struct vm_amap *, int, int);
89 
90 #endif	/* UVM_AMAP_PPREF */
91 
92 static inline void
93 amap_list_insert(struct vm_amap *amap)
94 {
95 	amap_lock_list();
96 	LIST_INSERT_HEAD(&amap_list, amap, am_list);
97 	amap_unlock_list();
98 }
99 
100 static inline void
101 amap_list_remove(struct vm_amap *amap)
102 {
103 	amap_lock_list();
104 	LIST_REMOVE(amap, am_list);
105 	amap_unlock_list();
106 }
107 
108 /*
109  * amap_chunk_get: lookup a chunk for slot. if create is non-zero,
110  * the chunk is created if it does not yet exist.
111  *
112  * => returns the chunk on success or NULL on error
113  */
114 struct vm_amap_chunk *
115 amap_chunk_get(struct vm_amap *amap, int slot, int create, int waitf)
116 {
117 	int bucket = UVM_AMAP_BUCKET(amap, slot);
118 	int baseslot = AMAP_BASE_SLOT(slot);
119 	int n;
120 	struct vm_amap_chunk *chunk, *newchunk, *pchunk = NULL;
121 
122 	if (UVM_AMAP_SMALL(amap))
123 		return &amap->am_small;
124 
125 	for (chunk = amap->am_buckets[bucket]; chunk != NULL;
126 	    chunk = TAILQ_NEXT(chunk, ac_list)) {
127 		if (UVM_AMAP_BUCKET(amap, chunk->ac_baseslot) != bucket)
128 			break;
129 		if (chunk->ac_baseslot == baseslot)
130 			return chunk;
131 		pchunk = chunk;
132 	}
133 	if (!create)
134 		return NULL;
135 
136 	if (amap->am_nslot - baseslot >= UVM_AMAP_CHUNK)
137 		n = UVM_AMAP_CHUNK;
138 	else
139 		n = amap->am_nslot - baseslot;
140 
141 	newchunk = pool_get(&uvm_amap_chunk_pool, waitf | PR_ZERO);
142 	if (newchunk == NULL)
143 		return NULL;
144 
145 	if (pchunk == NULL) {
146 		TAILQ_INSERT_TAIL(&amap->am_chunks, newchunk, ac_list);
147 		KASSERT(amap->am_buckets[bucket] == NULL);
148 		amap->am_buckets[bucket] = newchunk;
149 	} else
150 		TAILQ_INSERT_AFTER(&amap->am_chunks, pchunk, newchunk,
151 		    ac_list);
152 
153 	amap->am_ncused++;
154 	newchunk->ac_baseslot = baseslot;
155 	newchunk->ac_nslot = n;
156 	return newchunk;
157 }
158 
159 void
160 amap_chunk_free(struct vm_amap *amap, struct vm_amap_chunk *chunk)
161 {
162 	int bucket = UVM_AMAP_BUCKET(amap, chunk->ac_baseslot);
163 	struct vm_amap_chunk *nchunk;
164 
165 	if (UVM_AMAP_SMALL(amap))
166 		return;
167 
168 	nchunk = TAILQ_NEXT(chunk, ac_list);
169 	TAILQ_REMOVE(&amap->am_chunks, chunk, ac_list);
170 	if (amap->am_buckets[bucket] == chunk) {
171 		if (nchunk != NULL &&
172 		    UVM_AMAP_BUCKET(amap, nchunk->ac_baseslot) == bucket)
173 			amap->am_buckets[bucket] = nchunk;
174 		else
175 			amap->am_buckets[bucket] = NULL;
176 
177 	}
178 	pool_put(&uvm_amap_chunk_pool, chunk);
179 	amap->am_ncused--;
180 }
181 
182 #ifdef UVM_AMAP_PPREF
183 /*
184  * what is ppref?   ppref is an _optional_ amap feature which is used
185  * to keep track of reference counts on a per-page basis.  it is enabled
186  * when UVM_AMAP_PPREF is defined.
187  *
188  * when enabled, an array of ints is allocated for the pprefs.  this
189  * array is allocated only when a partial reference is added to the
190  * map (either by unmapping part of the amap, or gaining a reference
191  * to only a part of an amap).  if the allocation of the array fails
192  * (M_NOWAIT), then we set the array pointer to PPREF_NONE to indicate
193  * that we tried to do ppref's but couldn't alloc the array so just
194  * give up (after all, this is an optional feature!).
195  *
196  * the array is divided into page sized "chunks."   for chunks of length 1,
197  * the chunk reference count plus one is stored in that chunk's slot.
198  * for chunks of length > 1 the first slot contains (the reference count
199  * plus one) * -1.    [the negative value indicates that the length is
200  * greater than one.]   the second slot of the chunk contains the length
201  * of the chunk.   here is an example:
202  *
203  * actual REFS:  2  2  2  2  3  1  1  0  0  0  4  4  0  1  1  1
204  *       ppref: -3  4  x  x  4 -2  2 -1  3  x -5  2  1 -2  3  x
205  *              <----------><-><----><-------><----><-><------->
206  * (x = don't care)
207  *
208  * this allows us to allow one int to contain the ref count for the whole
209  * chunk.    note that the "plus one" part is needed because a reference
210  * count of zero is neither positive or negative (need a way to tell
211  * if we've got one zero or a bunch of them).
212  *
213  * here are some in-line functions to help us.
214  */
215 
216 /*
217  * pp_getreflen: get the reference and length for a specific offset
218  *
219  * => ppref's amap must be locked
220  */
221 static inline void
222 pp_getreflen(int *ppref, int offset, int *refp, int *lenp)
223 {
224 
225 	if (ppref[offset] > 0) {		/* chunk size must be 1 */
226 		*refp = ppref[offset] - 1;	/* don't forget to adjust */
227 		*lenp = 1;
228 	} else {
229 		*refp = (ppref[offset] * -1) - 1;
230 		*lenp = ppref[offset+1];
231 	}
232 }
233 
234 /*
235  * pp_setreflen: set the reference and length for a specific offset
236  *
237  * => ppref's amap must be locked
238  */
239 static inline void
240 pp_setreflen(int *ppref, int offset, int ref, int len)
241 {
242 	if (len == 1) {
243 		ppref[offset] = ref + 1;
244 	} else {
245 		ppref[offset] = (ref + 1) * -1;
246 		ppref[offset+1] = len;
247 	}
248 }
249 #endif /* UVM_AMAP_PPREF */
250 
251 /*
252  * amap_init: called at boot time to init global amap data structures
253  */
254 
255 void
256 amap_init(void)
257 {
258 	int i;
259 	size_t size;
260 
261 	/* Initialize the vm_amap pool. */
262 	pool_init(&uvm_amap_pool, sizeof(struct vm_amap),
263 	    0, IPL_MPFLOOR, PR_WAITOK, "amappl", NULL);
264 	pool_sethiwat(&uvm_amap_pool, 4096);
265 
266 	/* initialize small amap pools */
267 	for (i = 0; i < nitems(uvm_small_amap_pool); i++) {
268 		snprintf(amap_small_pool_names[i],
269 		    sizeof(amap_small_pool_names[0]), "amappl%d", i + 1);
270 		size = offsetof(struct vm_amap, am_small.ac_anon) +
271 		    (i + 1) * sizeof(struct vm_anon *);
272 		pool_init(&uvm_small_amap_pool[i], size, 0, IPL_MPFLOOR,
273 		    PR_WAITOK, amap_small_pool_names[i], NULL);
274 	}
275 
276 	pool_init(&uvm_amap_chunk_pool, sizeof(struct vm_amap_chunk) +
277 	    UVM_AMAP_CHUNK * sizeof(struct vm_anon *),
278 	    0, IPL_MPFLOOR, PR_WAITOK, "amapchunkpl", NULL);
279 	pool_sethiwat(&uvm_amap_chunk_pool, 4096);
280 }
281 
282 /*
283  * amap_alloc1: allocate an amap, but do not initialise the overlay.
284  *
285  * => Note: lock is not set.
286  */
287 static inline struct vm_amap *
288 amap_alloc1(int slots, int waitf, int lazyalloc)
289 {
290 	struct vm_amap *amap;
291 	struct vm_amap_chunk *chunk, *tmp;
292 	int chunks, log_chunks, chunkperbucket = 1, hashshift = 0;
293 	int buckets, i, n;
294 	int pwaitf = (waitf & M_WAITOK) ? PR_WAITOK : PR_NOWAIT;
295 
296 	KASSERT(slots > 0);
297 
298 	/*
299 	 * Cast to unsigned so that rounding up cannot cause integer overflow
300 	 * if slots is large.
301 	 */
302 	chunks = roundup((unsigned int)slots, UVM_AMAP_CHUNK) / UVM_AMAP_CHUNK;
303 
304 	if (lazyalloc) {
305 		/*
306 		 * Basically, the amap is a hash map where the number of
307 		 * buckets is fixed. We select the number of buckets using the
308 		 * following strategy:
309 		 *
310 		 * 1. The maximal number of entries to search in a bucket upon
311 		 * a collision should be less than or equal to
312 		 * log2(slots / UVM_AMAP_CHUNK). This is the worst-case number
313 		 * of lookups we would have if we could chunk the amap. The
314 		 * log2(n) comes from the fact that amaps are chunked by
315 		 * splitting up their vm_map_entries and organizing those
316 		 * in a binary search tree.
317 		 *
318 		 * 2. The maximal number of entries in a bucket must be a
319 		 * power of two.
320 		 *
321 		 * The maximal number of entries per bucket is used to hash
322 		 * a slot to a bucket.
323 		 *
324 		 * In the future, this strategy could be refined to make it
325 		 * even harder/impossible that the total amount of KVA needed
326 		 * for the hash buckets of all amaps to exceed the maximal
327 		 * amount of KVA memory reserved for amaps.
328 		 */
329 		for (log_chunks = 1; (chunks >> log_chunks) > 0; log_chunks++)
330 			continue;
331 
332 		chunkperbucket = 1 << hashshift;
333 		while (chunkperbucket + 1 < log_chunks) {
334 			hashshift++;
335 			chunkperbucket = 1 << hashshift;
336 		}
337 	}
338 
339 	if (slots > UVM_AMAP_CHUNK)
340 		amap = pool_get(&uvm_amap_pool, pwaitf);
341 	else
342 		amap = pool_get(&uvm_small_amap_pool[slots - 1],
343 		    pwaitf | PR_ZERO);
344 	if (amap == NULL)
345 		return NULL;
346 
347 	amap->am_lock = NULL;
348 	amap->am_ref = 1;
349 	amap->am_flags = 0;
350 #ifdef UVM_AMAP_PPREF
351 	amap->am_ppref = NULL;
352 #endif
353 	amap->am_nslot = slots;
354 	amap->am_nused = 0;
355 
356 	if (UVM_AMAP_SMALL(amap)) {
357 		amap->am_small.ac_nslot = slots;
358 		return amap;
359 	}
360 
361 	amap->am_ncused = 0;
362 	TAILQ_INIT(&amap->am_chunks);
363 	amap->am_hashshift = hashshift;
364 	amap->am_buckets = NULL;
365 
366 	buckets = howmany(chunks, chunkperbucket);
367 	amap->am_buckets = mallocarray(buckets, sizeof(*amap->am_buckets),
368 	    M_UVMAMAP, waitf | (lazyalloc ? M_ZERO : 0));
369 	if (amap->am_buckets == NULL)
370 		goto fail1;
371 	amap->am_nbuckets = buckets;
372 
373 	if (!lazyalloc) {
374 		for (i = 0; i < buckets; i++) {
375 			if (i == buckets - 1) {
376 				n = slots % UVM_AMAP_CHUNK;
377 				if (n == 0)
378 					n = UVM_AMAP_CHUNK;
379 			} else
380 				n = UVM_AMAP_CHUNK;
381 
382 			chunk = pool_get(&uvm_amap_chunk_pool,
383 			    PR_ZERO | pwaitf);
384 			if (chunk == NULL)
385 				goto fail1;
386 
387 			amap->am_buckets[i] = chunk;
388 			amap->am_ncused++;
389 			chunk->ac_baseslot = i * UVM_AMAP_CHUNK;
390 			chunk->ac_nslot = n;
391 			TAILQ_INSERT_TAIL(&amap->am_chunks, chunk, ac_list);
392 		}
393 	}
394 
395 	return amap;
396 
397 fail1:
398 	free(amap->am_buckets, M_UVMAMAP, buckets * sizeof(*amap->am_buckets));
399 	TAILQ_FOREACH_SAFE(chunk, &amap->am_chunks, ac_list, tmp)
400 		pool_put(&uvm_amap_chunk_pool, chunk);
401 	pool_put(&uvm_amap_pool, amap);
402 	return NULL;
403 }
404 
405 static void
406 amap_lock_alloc(struct vm_amap *amap)
407 {
408 	rw_obj_alloc(&amap->am_lock, "amaplk");
409 }
410 
411 /*
412  * amap_alloc: allocate an amap to manage "sz" bytes of anonymous VM
413  *
414  * => caller should ensure sz is a multiple of PAGE_SIZE
415  * => reference count to new amap is set to one
416  * => new amap is returned unlocked
417  */
418 struct vm_amap *
419 amap_alloc(vaddr_t sz, int waitf, int lazyalloc)
420 {
421 	struct vm_amap *amap;
422 	size_t slots;
423 
424 	AMAP_B2SLOT(slots, sz);		/* load slots */
425 	if (slots > INT_MAX)
426 		return NULL;
427 
428 	amap = amap_alloc1(slots, waitf, lazyalloc);
429 	if (amap != NULL) {
430 		amap_lock_alloc(amap);
431 		amap_list_insert(amap);
432 	}
433 
434 	return amap;
435 }
436 
437 
438 /*
439  * amap_free: free an amap
440  *
441  * => the amap must be unlocked
442  * => the amap should have a zero reference count and be empty
443  */
444 void
445 amap_free(struct vm_amap *amap)
446 {
447 	struct vm_amap_chunk *chunk, *tmp;
448 
449 	KASSERT(amap->am_ref == 0 && amap->am_nused == 0);
450 	KASSERT((amap->am_flags & AMAP_SWAPOFF) == 0);
451 
452 	if (amap->am_lock != NULL) {
453 		KASSERT(amap->am_lock == NULL || !rw_write_held(amap->am_lock));
454 		rw_obj_free(amap->am_lock);
455 	}
456 
457 #ifdef UVM_AMAP_PPREF
458 	if (amap->am_ppref && amap->am_ppref != PPREF_NONE)
459 		free(amap->am_ppref, M_UVMAMAP, amap->am_nslot * sizeof(int));
460 #endif
461 
462 	if (UVM_AMAP_SMALL(amap))
463 		pool_put(&uvm_small_amap_pool[amap->am_nslot - 1], amap);
464 	else {
465 		TAILQ_FOREACH_SAFE(chunk, &amap->am_chunks, ac_list, tmp)
466 		    pool_put(&uvm_amap_chunk_pool, chunk);
467 		free(amap->am_buckets, M_UVMAMAP,
468 		    amap->am_nbuckets * sizeof(*amap->am_buckets));
469 		pool_put(&uvm_amap_pool, amap);
470 	}
471 }
472 
473 /*
474  * amap_wipeout: wipeout all anon's in an amap; then free the amap!
475  *
476  * => Called from amap_unref(), when reference count drops to zero.
477  * => amap must be locked.
478  */
479 void
480 amap_wipeout(struct vm_amap *amap)
481 {
482 	int slot;
483 	struct vm_anon *anon;
484 	struct vm_amap_chunk *chunk;
485 	struct pglist pgl;
486 
487 	KASSERT(rw_write_held(amap->am_lock));
488 	KASSERT(amap->am_ref == 0);
489 
490 	if (__predict_false((amap->am_flags & AMAP_SWAPOFF) != 0)) {
491 		/*
492 		 * Note: amap_swap_off() will call us again.
493 		 */
494 		amap_unlock(amap);
495 		return;
496 	}
497 
498 	TAILQ_INIT(&pgl);
499 	amap_list_remove(amap);
500 
501 	AMAP_CHUNK_FOREACH(chunk, amap) {
502 		int i, refs, map = chunk->ac_usedmap;
503 
504 		for (i = ffs(map); i != 0; i = ffs(map)) {
505 			slot = i - 1;
506 			map ^= 1 << slot;
507 			anon = chunk->ac_anon[slot];
508 
509 			if (anon == NULL || anon->an_ref == 0)
510 				panic("amap_wipeout: corrupt amap");
511 			KASSERT(anon->an_lock == amap->am_lock);
512 
513 			/*
514 			 * Drop the reference.
515 			 */
516 			refs = --anon->an_ref;
517 			if (refs == 0) {
518 				uvm_anfree_list(anon, &pgl);
519 			}
520 		}
521 	}
522 	/* free the pages */
523 	uvm_pglistfree(&pgl);
524 
525 	/*
526 	 * Finally, destroy the amap.
527 	 */
528 	amap->am_ref = 0;	/* ... was one */
529 	amap->am_nused = 0;
530 	amap_unlock(amap);
531 	amap_free(amap);
532 }
533 
534 /*
535  * amap_copy: ensure that a map entry's "needs_copy" flag is false
536  *	by copying the amap if necessary.
537  *
538  * => an entry with a null amap pointer will get a new (blank) one.
539  * => the map that the map entry belongs to must be locked by caller.
540  * => the amap currently attached to "entry" (if any) must be unlocked.
541  * => if canchunk is true, then we may clip the entry into a chunk
542  * => "startva" and "endva" are used only if canchunk is true.  they are
543  *     used to limit chunking (e.g. if you have a large space that you
544  *     know you are going to need to allocate amaps for, there is no point
545  *     in allowing that to be chunked)
546  */
547 
548 void
549 amap_copy(struct vm_map *map, struct vm_map_entry *entry, int waitf,
550     boolean_t canchunk, vaddr_t startva, vaddr_t endva)
551 {
552 	struct vm_amap *amap, *srcamap;
553 	int slots, lcv, lazyalloc = 0;
554 	vaddr_t chunksize;
555 	int i, j, k, n, srcslot;
556 	struct vm_amap_chunk *chunk = NULL, *srcchunk = NULL;
557 	struct vm_anon *anon;
558 
559 	KASSERT(map != kernel_map);		/* we use sleeping locks */
560 
561 	/*
562 	 * Is there an amap to copy?  If not, create one.
563 	 */
564 	if (entry->aref.ar_amap == NULL) {
565 		/*
566 		 * Check to see if we have a large amap that we can
567 		 * chunk.  We align startva/endva to chunk-sized
568 		 * boundaries and then clip to them.
569 		 *
570 		 * If we cannot chunk the amap, allocate it in a way
571 		 * that makes it grow or shrink dynamically with
572 		 * the number of slots.
573 		 */
574 		if (atop(entry->end - entry->start) >= UVM_AMAP_LARGE) {
575 			if (canchunk) {
576 				/* convert slots to bytes */
577 				chunksize = UVM_AMAP_CHUNK << PAGE_SHIFT;
578 				startva = (startva / chunksize) * chunksize;
579 				endva = roundup(endva, chunksize);
580 				UVM_MAP_CLIP_START(map, entry, startva);
581 				/* watch out for endva wrap-around! */
582 				if (endva >= startva)
583 					UVM_MAP_CLIP_END(map, entry, endva);
584 			} else
585 				lazyalloc = 1;
586 		}
587 
588 		entry->aref.ar_pageoff = 0;
589 		entry->aref.ar_amap = amap_alloc(entry->end - entry->start,
590 		    waitf, lazyalloc);
591 		if (entry->aref.ar_amap != NULL)
592 			entry->etype &= ~UVM_ET_NEEDSCOPY;
593 		return;
594 	}
595 
596 	/*
597 	 * First check and see if we are the only map entry referencing
598 	 * he amap we currently have.  If so, then just take it over instead
599 	 * of copying it.  Note that we are reading am_ref without lock held
600 	 * as the value value can only be one if we have the only reference
601 	 * to the amap (via our locked map).  If the value is greater than
602 	 * one, then allocate amap and re-check the value.
603 	 */
604 	if (entry->aref.ar_amap->am_ref == 1) {
605 		entry->etype &= ~UVM_ET_NEEDSCOPY;
606 		return;
607 	}
608 
609 	/*
610 	 * Allocate a new amap (note: not initialised, etc).
611 	 */
612 	AMAP_B2SLOT(slots, entry->end - entry->start);
613 	if (!UVM_AMAP_SMALL(entry->aref.ar_amap) &&
614 	    entry->aref.ar_amap->am_hashshift != 0)
615 		lazyalloc = 1;
616 	amap = amap_alloc1(slots, waitf, lazyalloc);
617 	if (amap == NULL)
618 		return;
619 	srcamap = entry->aref.ar_amap;
620 
621 	amap_lock(srcamap);
622 
623 	/*
624 	 * Re-check the reference count with the lock held.  If it has
625 	 * dropped to one - we can take over the existing map.
626 	 */
627 	if (srcamap->am_ref == 1) {
628 		/* Just take over the existing amap. */
629 		entry->etype &= ~UVM_ET_NEEDSCOPY;
630 		amap_unlock(srcamap);
631 		/* Destroy the new (unused) amap. */
632 		amap->am_ref--;
633 		amap_free(amap);
634 		return;
635 	}
636 
637 	/*
638 	 * Copy the slots.
639 	 */
640 	for (lcv = 0; lcv < slots; lcv += n) {
641 		srcslot = entry->aref.ar_pageoff + lcv;
642 		i = UVM_AMAP_SLOTIDX(lcv);
643 		j = UVM_AMAP_SLOTIDX(srcslot);
644 		n = UVM_AMAP_CHUNK;
645 		if (i > j)
646 			n -= i;
647 		else
648 			n -= j;
649 		if (lcv + n > slots)
650 			n = slots - lcv;
651 
652 		srcchunk = amap_chunk_get(srcamap, srcslot, 0, PR_NOWAIT);
653 		if (srcchunk == NULL)
654 			continue;
655 
656 		chunk = amap_chunk_get(amap, lcv, 1, PR_NOWAIT);
657 		if (chunk == NULL) {
658 			amap_unlock(srcamap);
659 			amap->am_ref = 0;
660 			amap_wipeout(amap);
661 			return;
662 		}
663 
664 		for (k = 0; k < n; i++, j++, k++) {
665 			chunk->ac_anon[i] = anon = srcchunk->ac_anon[j];
666 			if (anon == NULL)
667 				continue;
668 
669 			KASSERT(anon->an_lock == srcamap->am_lock);
670 			KASSERT(anon->an_ref > 0);
671 			chunk->ac_usedmap |= (1 << i);
672 			anon->an_ref++;
673 			amap->am_nused++;
674 		}
675 	}
676 
677 	/*
678 	 * Drop our reference to the old amap (srcamap) and unlock.
679 	 * Since the reference count on srcamap is greater than one,
680 	 * (we checked above), it cannot drop to zero while it is locked.
681 	 */
682 	srcamap->am_ref--;
683 	KASSERT(srcamap->am_ref > 0);
684 
685 	if (srcamap->am_ref == 1 && (srcamap->am_flags & AMAP_SHARED) != 0)
686 		srcamap->am_flags &= ~AMAP_SHARED;   /* clear shared flag */
687 #ifdef UVM_AMAP_PPREF
688 	if (srcamap->am_ppref && srcamap->am_ppref != PPREF_NONE) {
689 		amap_pp_adjref(srcamap, entry->aref.ar_pageoff,
690 		    (entry->end - entry->start) >> PAGE_SHIFT, -1);
691 	}
692 #endif
693 
694 	/*
695 	 * If we referenced any anons, then share the source amap's lock.
696 	 * Otherwise, we have nothing in common, so allocate a new one.
697 	 */
698 	KASSERT(amap->am_lock == NULL);
699 	if (amap->am_nused != 0) {
700 		amap->am_lock = srcamap->am_lock;
701 		rw_obj_hold(amap->am_lock);
702 	}
703 	amap_unlock(srcamap);
704 
705 	if (amap->am_lock == NULL)
706 		amap_lock_alloc(amap);
707 
708 	/*
709 	 * Install new amap.
710 	 */
711 	entry->aref.ar_pageoff = 0;
712 	entry->aref.ar_amap = amap;
713 	entry->etype &= ~UVM_ET_NEEDSCOPY;
714 
715 	amap_list_insert(amap);
716 }
717 
718 /*
719  * amap_cow_now: resolve all copy-on-write faults in an amap now for fork(2)
720  *
721  *	called during fork(2) when the parent process has a wired map
722  *	entry.   in that case we want to avoid write-protecting pages
723  *	in the parent's map (e.g. like what you'd do for a COW page)
724  *	so we resolve the COW here.
725  *
726  * => assume parent's entry was wired, thus all pages are resident.
727  * => the parent and child vm_map must both be locked.
728  * => caller passes child's map/entry in to us
729  * => XXXCDC: out of memory should cause fork to fail, but there is
730  *	currently no easy way to do this (needs fix)
731  */
732 
733 void
734 amap_cow_now(struct vm_map *map, struct vm_map_entry *entry)
735 {
736 	struct vm_amap *amap = entry->aref.ar_amap;
737 	int slot;
738 	struct vm_anon *anon, *nanon;
739 	struct vm_page *pg, *npg;
740 	struct vm_amap_chunk *chunk;
741 
742 	/*
743 	 * note that if we unlock the amap then we must ReStart the "lcv" for
744 	 * loop because some other process could reorder the anon's in the
745 	 * am_anon[] array on us while the lock is dropped.
746 	 */
747 ReStart:
748 	amap_lock(amap);
749 	AMAP_CHUNK_FOREACH(chunk, amap) {
750 		int i, map = chunk->ac_usedmap;
751 
752 		for (i = ffs(map); i != 0; i = ffs(map)) {
753 			slot = i - 1;
754 			map ^= 1 << slot;
755 			anon = chunk->ac_anon[slot];
756 			pg = anon->an_page;
757 			KASSERT(anon->an_lock == amap->am_lock);
758 
759 			/*
760 			 * The old page must be resident since the parent is
761 			 * wired.
762 			 */
763 			KASSERT(pg != NULL);
764 
765 			/*
766 			 * if the anon ref count is one, we are safe (the child
767 			 * has exclusive access to the page).
768 			 */
769 			if (anon->an_ref <= 1)
770 				continue;
771 
772 			/*
773 			 * If the page is busy, then we have to unlock, wait for
774 			 * it and then restart.
775 			 */
776 			if (pg->pg_flags & PG_BUSY) {
777 				atomic_setbits_int(&pg->pg_flags, PG_WANTED);
778 				rwsleep_nsec(pg, amap->am_lock, PVM | PNORELOCK,
779 				    "cownow", INFSLP);
780 				goto ReStart;
781 			}
782 
783 			/*
784 			 * Perform a copy-on-write.
785 			 * First - get a new anon and a page.
786 			 */
787 			nanon = uvm_analloc();
788 			if (nanon != NULL) {
789 				/* the new anon will share the amap's lock */
790 				nanon->an_lock = amap->am_lock;
791 				npg = uvm_pagealloc(NULL, 0, nanon, 0);
792 			} else
793 				npg = NULL;	/* XXX: quiet gcc warning */
794 
795 			if (nanon == NULL || npg == NULL) {
796 				/* out of memory */
797 				amap_unlock(amap);
798 				if (nanon != NULL) {
799 					nanon->an_lock = NULL;
800 					nanon->an_ref--;
801 					KASSERT(nanon->an_ref == 0);
802 					uvm_anfree(nanon);
803 				}
804 				uvm_wait("cownowpage");
805 				goto ReStart;
806 			}
807 
808 			/*
809 			 * Copy the data and replace anon with the new one.
810 			 * Also, setup its lock (share the with amap's lock).
811 			 */
812 			uvm_pagecopy(pg, npg);
813 			anon->an_ref--;
814 			KASSERT(anon->an_ref > 0);
815 			chunk->ac_anon[slot] = nanon;
816 
817 			/*
818 			 * Drop PG_BUSY on new page.  Since its owner was write
819 			 * locked all this time - it cannot be PG_RELEASED or
820 			 * PG_WANTED.
821 			 */
822 			atomic_clearbits_int(&npg->pg_flags, PG_BUSY|PG_FAKE);
823 			UVM_PAGE_OWN(npg, NULL);
824 			uvm_lock_pageq();
825 			uvm_pageactivate(npg);
826 			uvm_unlock_pageq();
827 		}
828 	}
829 	amap_unlock(amap);
830 }
831 
832 /*
833  * amap_splitref: split a single reference into two separate references
834  *
835  * => called from uvm_map's clip routines
836  * => origref's map should be locked
837  * => origref->ar_amap should be unlocked (we will lock)
838  */
839 void
840 amap_splitref(struct vm_aref *origref, struct vm_aref *splitref, vaddr_t offset)
841 {
842 	struct vm_amap *amap = origref->ar_amap;
843 	int leftslots;
844 
845 	KASSERT(splitref->ar_amap == amap);
846 	AMAP_B2SLOT(leftslots, offset);
847 	if (leftslots == 0)
848 		panic("amap_splitref: split at zero offset");
849 
850 	amap_lock(amap);
851 
852 	if (amap->am_nslot - origref->ar_pageoff - leftslots <= 0)
853 		panic("amap_splitref: map size check failed");
854 
855 #ifdef UVM_AMAP_PPREF
856 	/* Establish ppref before we add a duplicate reference to the amap. */
857 	if (amap->am_ppref == NULL)
858 		amap_pp_establish(amap);
859 #endif
860 
861 	/* Note: not a share reference. */
862 	amap->am_ref++;
863 	splitref->ar_amap = amap;
864 	splitref->ar_pageoff = origref->ar_pageoff + leftslots;
865 	amap_unlock(amap);
866 }
867 
868 #ifdef UVM_AMAP_PPREF
869 
870 /*
871  * amap_pp_establish: add a ppref array to an amap, if possible.
872  *
873  * => amap should be locked by caller* => amap should be locked by caller
874  */
875 void
876 amap_pp_establish(struct vm_amap *amap)
877 {
878 
879 	KASSERT(rw_write_held(amap->am_lock));
880 	amap->am_ppref = mallocarray(amap->am_nslot, sizeof(int),
881 	    M_UVMAMAP, M_NOWAIT|M_ZERO);
882 
883 	if (amap->am_ppref == NULL) {
884 		/* Failure - just do not use ppref. */
885 		amap->am_ppref = PPREF_NONE;
886 		return;
887 	}
888 
889 	pp_setreflen(amap->am_ppref, 0, amap->am_ref, amap->am_nslot);
890 }
891 
892 /*
893  * amap_pp_adjref: adjust reference count to a part of an amap using the
894  * per-page reference count array.
895  *
896  * => caller must check that ppref != PPREF_NONE before calling.
897  * => map and amap must be locked.
898  */
899 void
900 amap_pp_adjref(struct vm_amap *amap, int curslot, vsize_t slotlen, int adjval)
901 {
902  	int stopslot, *ppref, lcv, prevlcv;
903  	int ref, len, prevref, prevlen;
904 
905 	KASSERT(rw_write_held(amap->am_lock));
906 
907 	stopslot = curslot + slotlen;
908 	ppref = amap->am_ppref;
909  	prevlcv = 0;
910 
911 	/*
912 	 * Advance to the correct place in the array, fragment if needed.
913 	 */
914 	for (lcv = 0 ; lcv < curslot ; lcv += len) {
915 		pp_getreflen(ppref, lcv, &ref, &len);
916 		if (lcv + len > curslot) {     /* goes past start? */
917 			pp_setreflen(ppref, lcv, ref, curslot - lcv);
918 			pp_setreflen(ppref, curslot, ref, len - (curslot -lcv));
919 			len = curslot - lcv;   /* new length of entry @ lcv */
920 		}
921 		prevlcv = lcv;
922 	}
923 	if (lcv != 0)
924 		pp_getreflen(ppref, prevlcv, &prevref, &prevlen);
925 	else {
926 		/*
927 		 * Ensure that the "prevref == ref" test below always
928 		 * fails, since we are starting from the beginning of
929 		 * the ppref array; that is, there is no previous chunk.
930 		 */
931 		prevref = -1;
932 		prevlen = 0;
933 	}
934 
935 	/*
936 	 * Now adjust reference counts in range.  Merge the first
937 	 * changed entry with the last unchanged entry if possible.
938 	 */
939 	if (lcv != curslot)
940 		panic("amap_pp_adjref: overshot target");
941 
942 	for (/* lcv already set */; lcv < stopslot ; lcv += len) {
943 		pp_getreflen(ppref, lcv, &ref, &len);
944 		if (lcv + len > stopslot) {     /* goes past end? */
945 			pp_setreflen(ppref, lcv, ref, stopslot - lcv);
946 			pp_setreflen(ppref, stopslot, ref,
947 			    len - (stopslot - lcv));
948 			len = stopslot - lcv;
949 		}
950 		ref += adjval;
951 		if (ref < 0)
952 			panic("amap_pp_adjref: negative reference count");
953 		if (lcv == prevlcv + prevlen && ref == prevref) {
954 			pp_setreflen(ppref, prevlcv, ref, prevlen + len);
955 		} else {
956 			pp_setreflen(ppref, lcv, ref, len);
957 		}
958 		if (ref == 0)
959 			amap_wiperange(amap, lcv, len);
960 	}
961 
962 }
963 
964 void
965 amap_wiperange_chunk(struct vm_amap *amap, struct vm_amap_chunk *chunk,
966     int slotoff, int slots)
967 {
968 	int curslot, i, map;
969 	int startbase, endbase;
970 	struct vm_anon *anon;
971 
972 	startbase = AMAP_BASE_SLOT(slotoff);
973 	endbase = AMAP_BASE_SLOT(slotoff + slots - 1);
974 
975 	map = chunk->ac_usedmap;
976 	if (startbase == chunk->ac_baseslot)
977 		map &= ~((1 << (slotoff - startbase)) - 1);
978 	if (endbase == chunk->ac_baseslot)
979 		map &= (1 << (slotoff + slots - endbase)) - 1;
980 
981 	for (i = ffs(map); i != 0; i = ffs(map)) {
982 		int refs;
983 
984 		curslot = i - 1;
985 		map ^= 1 << curslot;
986 		chunk->ac_usedmap ^= 1 << curslot;
987 		anon = chunk->ac_anon[curslot];
988 		KASSERT(anon->an_lock == amap->am_lock);
989 
990 		/* remove it from the amap */
991 		chunk->ac_anon[curslot] = NULL;
992 
993 		amap->am_nused--;
994 
995 		/* drop anon reference count */
996 		refs = --anon->an_ref;
997 		if (refs == 0) {
998 			uvm_anfree(anon);
999 		}
1000 
1001 		/*
1002 		 * done with this anon, next ...!
1003 		 */
1004 
1005 	}	/* end of 'for' loop */
1006 }
1007 
1008 /*
1009  * amap_wiperange: wipe out a range of an amap.
1010  * Note: different from amap_wipeout because the amap is kept intact.
1011  *
1012  * => Both map and amap must be locked by caller.
1013  */
1014 void
1015 amap_wiperange(struct vm_amap *amap, int slotoff, int slots)
1016 {
1017 	int bucket, startbucket, endbucket;
1018 	struct vm_amap_chunk *chunk, *nchunk;
1019 
1020 	KASSERT(rw_write_held(amap->am_lock));
1021 
1022 	startbucket = UVM_AMAP_BUCKET(amap, slotoff);
1023 	endbucket = UVM_AMAP_BUCKET(amap, slotoff + slots - 1);
1024 
1025 	/*
1026 	 * We can either traverse the amap by am_chunks or by am_buckets.
1027 	 * Determine which way is less expensive.
1028 	 */
1029 	if (UVM_AMAP_SMALL(amap))
1030 		amap_wiperange_chunk(amap, &amap->am_small, slotoff, slots);
1031 	else if (endbucket + 1 - startbucket >= amap->am_ncused) {
1032 		TAILQ_FOREACH_SAFE(chunk, &amap->am_chunks, ac_list, nchunk) {
1033 			if (chunk->ac_baseslot + chunk->ac_nslot <= slotoff)
1034 				continue;
1035 			if (chunk->ac_baseslot >= slotoff + slots)
1036 				continue;
1037 
1038 			amap_wiperange_chunk(amap, chunk, slotoff, slots);
1039 			if (chunk->ac_usedmap == 0)
1040 				amap_chunk_free(amap, chunk);
1041 		}
1042 	} else {
1043 		for (bucket = startbucket; bucket <= endbucket; bucket++) {
1044 			for (chunk = amap->am_buckets[bucket]; chunk != NULL;
1045 			    chunk = nchunk) {
1046 				nchunk = TAILQ_NEXT(chunk, ac_list);
1047 
1048 				if (UVM_AMAP_BUCKET(amap, chunk->ac_baseslot) !=
1049 				    bucket)
1050 					break;
1051 				if (chunk->ac_baseslot + chunk->ac_nslot <=
1052 				    slotoff)
1053 					continue;
1054 				if (chunk->ac_baseslot >= slotoff + slots)
1055 					continue;
1056 
1057 				amap_wiperange_chunk(amap, chunk, slotoff,
1058 				    slots);
1059 				if (chunk->ac_usedmap == 0)
1060 					amap_chunk_free(amap, chunk);
1061 			}
1062 		}
1063 	}
1064 }
1065 
1066 #endif
1067 
1068 /*
1069  * amap_swap_off: pagein anonymous pages in amaps and drop swap slots.
1070  *
1071  * => note that we don't always traverse all anons.
1072  *    eg. amaps being wiped out, released anons.
1073  * => return TRUE if failed.
1074  */
1075 
1076 boolean_t
1077 amap_swap_off(int startslot, int endslot)
1078 {
1079 	struct vm_amap *am;
1080 	struct vm_amap *am_next;
1081 	struct vm_amap marker;
1082 	boolean_t rv = FALSE;
1083 
1084 	amap_lock_list();
1085 	for (am = LIST_FIRST(&amap_list); am != NULL && !rv; am = am_next) {
1086 		int i, map;
1087 		struct vm_amap_chunk *chunk;
1088 
1089 		amap_lock(am);
1090 		if (am->am_nused == 0) {
1091 			amap_unlock(am);
1092 			am_next = LIST_NEXT(am, am_list);
1093 			continue;
1094 		}
1095 
1096 		LIST_INSERT_AFTER(am, &marker, am_list);
1097 		amap_unlock_list();
1098 
1099 again:
1100 		AMAP_CHUNK_FOREACH(chunk, am) {
1101 			map = chunk->ac_usedmap;
1102 
1103 			for (i = ffs(map); i != 0; i = ffs(map)) {
1104 				int swslot;
1105 				int slot = i - 1;
1106 				struct vm_anon *anon;
1107 
1108 				map ^= 1 << slot;
1109 				anon = chunk->ac_anon[slot];
1110 
1111 				swslot = anon->an_swslot;
1112 				if (swslot < startslot || endslot <= swslot) {
1113 					continue;
1114 				}
1115 
1116 				am->am_flags |= AMAP_SWAPOFF;
1117 
1118 				rv = uvm_anon_pagein(am, anon);
1119 				amap_lock(am);
1120 
1121 				am->am_flags &= ~AMAP_SWAPOFF;
1122 				if (amap_refs(am) == 0) {
1123 					amap_wipeout(am);
1124 					am = NULL;
1125 					goto nextamap;
1126 				}
1127 				if (rv)
1128 					goto nextamap;
1129 				goto again;
1130 			}
1131 		}
1132 nextamap:
1133 		if (am != NULL)
1134 			amap_unlock(am);
1135 		amap_lock_list();
1136 		am_next = LIST_NEXT(&marker, am_list);
1137 		LIST_REMOVE(&marker, am_list);
1138 	}
1139 	amap_unlock_list();
1140 
1141 	return rv;
1142 }
1143 
1144 /*
1145  * amap_lookup: look up a page in an amap.
1146  *
1147  * => amap should be locked by caller.
1148  */
1149 struct vm_anon *
1150 amap_lookup(struct vm_aref *aref, vaddr_t offset)
1151 {
1152 	int slot;
1153 	struct vm_amap *amap = aref->ar_amap;
1154 	struct vm_amap_chunk *chunk;
1155 
1156 	AMAP_B2SLOT(slot, offset);
1157 	slot += aref->ar_pageoff;
1158 	KASSERT(slot < amap->am_nslot);
1159 
1160 	chunk = amap_chunk_get(amap, slot, 0, PR_NOWAIT);
1161 	if (chunk == NULL)
1162 		return NULL;
1163 
1164 	return chunk->ac_anon[UVM_AMAP_SLOTIDX(slot)];
1165 }
1166 
1167 /*
1168  * amap_lookups: look up a range of pages in an amap.
1169  *
1170  * => amap should be locked by caller.
1171  * => XXXCDC: this interface is biased toward array-based amaps.  fix.
1172  */
1173 void
1174 amap_lookups(struct vm_aref *aref, vaddr_t offset,
1175     struct vm_anon **anons, int npages)
1176 {
1177 	int i, lcv, n, slot;
1178 	struct vm_amap *amap = aref->ar_amap;
1179 	struct vm_amap_chunk *chunk = NULL;
1180 
1181 	AMAP_B2SLOT(slot, offset);
1182 	slot += aref->ar_pageoff;
1183 
1184 	KASSERT((slot + (npages - 1)) < amap->am_nslot);
1185 
1186 	for (i = 0, lcv = slot; lcv < slot + npages; i += n, lcv += n) {
1187 		n = UVM_AMAP_CHUNK - UVM_AMAP_SLOTIDX(lcv);
1188 		if (lcv + n > slot + npages)
1189 			n = slot + npages - lcv;
1190 
1191 		chunk = amap_chunk_get(amap, lcv, 0, PR_NOWAIT);
1192 		if (chunk == NULL)
1193 			memset(&anons[i], 0, n * sizeof(*anons));
1194 		else
1195 			memcpy(&anons[i],
1196 			    &chunk->ac_anon[UVM_AMAP_SLOTIDX(lcv)],
1197 			    n * sizeof(*anons));
1198 	}
1199 }
1200 
1201 /*
1202  * amap_populate: ensure that the amap can store an anon for the page at
1203  * offset. This function can sleep until memory to store the anon is
1204  * available.
1205  */
1206 void
1207 amap_populate(struct vm_aref *aref, vaddr_t offset)
1208 {
1209 	int slot;
1210 	struct vm_amap *amap = aref->ar_amap;
1211 	struct vm_amap_chunk *chunk;
1212 
1213 	AMAP_B2SLOT(slot, offset);
1214 	slot += aref->ar_pageoff;
1215 	KASSERT(slot < amap->am_nslot);
1216 
1217 	chunk = amap_chunk_get(amap, slot, 1, PR_WAITOK);
1218 	KASSERT(chunk != NULL);
1219 }
1220 
1221 /*
1222  * amap_add: add (or replace) a page to an amap.
1223  *
1224  * => amap should be locked by caller.
1225  * => anon must have the lock associated with this amap.
1226  */
1227 int
1228 amap_add(struct vm_aref *aref, vaddr_t offset, struct vm_anon *anon,
1229     boolean_t replace)
1230 {
1231 	int slot;
1232 	struct vm_amap *amap = aref->ar_amap;
1233 	struct vm_amap_chunk *chunk;
1234 
1235 	AMAP_B2SLOT(slot, offset);
1236 	slot += aref->ar_pageoff;
1237 	KASSERT(slot < amap->am_nslot);
1238 
1239 	chunk = amap_chunk_get(amap, slot, 1, PR_NOWAIT);
1240 	if (chunk == NULL)
1241 		return 1;
1242 
1243 	slot = UVM_AMAP_SLOTIDX(slot);
1244 	if (replace) {
1245 		struct vm_anon *oanon  = chunk->ac_anon[slot];
1246 
1247 		KASSERT(oanon != NULL);
1248 		if (oanon->an_page && (amap->am_flags & AMAP_SHARED) != 0) {
1249 			pmap_page_protect(oanon->an_page, PROT_NONE);
1250 			/*
1251 			 * XXX: suppose page is supposed to be wired somewhere?
1252 			 */
1253 		}
1254 	} else {   /* !replace */
1255 		if (chunk->ac_anon[slot] != NULL)
1256 			panic("amap_add: slot in use");
1257 
1258 		chunk->ac_usedmap |= 1 << slot;
1259 		amap->am_nused++;
1260 	}
1261 	chunk->ac_anon[slot] = anon;
1262 
1263 	return 0;
1264 }
1265 
1266 /*
1267  * amap_unadd: remove a page from an amap.
1268  *
1269  * => amap should be locked by caller.
1270  */
1271 void
1272 amap_unadd(struct vm_aref *aref, vaddr_t offset)
1273 {
1274 	struct vm_amap *amap = aref->ar_amap;
1275 	struct vm_amap_chunk *chunk;
1276 	int slot;
1277 
1278 	KASSERT(rw_write_held(amap->am_lock));
1279 
1280 	AMAP_B2SLOT(slot, offset);
1281 	slot += aref->ar_pageoff;
1282 	KASSERT(slot < amap->am_nslot);
1283 	chunk = amap_chunk_get(amap, slot, 0, PR_NOWAIT);
1284 	KASSERT(chunk != NULL);
1285 
1286 	slot = UVM_AMAP_SLOTIDX(slot);
1287 	KASSERT(chunk->ac_anon[slot] != NULL);
1288 
1289 	chunk->ac_anon[slot] = NULL;
1290 	chunk->ac_usedmap &= ~(1 << slot);
1291 	amap->am_nused--;
1292 
1293 	if (chunk->ac_usedmap == 0)
1294 		amap_chunk_free(amap, chunk);
1295 }
1296 
1297 /*
1298  * amap_adjref_anons: adjust the reference count(s) on amap and its anons.
1299  */
1300 static void
1301 amap_adjref_anons(struct vm_amap *amap, vaddr_t offset, vsize_t len,
1302     int refv, boolean_t all)
1303 {
1304 #ifdef UVM_AMAP_PPREF
1305 	KASSERT(rw_write_held(amap->am_lock));
1306 
1307 	/*
1308 	 * We must establish the ppref array before changing am_ref
1309 	 * so that the ppref values match the current amap refcount.
1310 	 */
1311 	if (amap->am_ppref == NULL && !all && len != amap->am_nslot) {
1312 		amap_pp_establish(amap);
1313 	}
1314 #endif
1315 
1316 	amap->am_ref += refv;
1317 
1318 #ifdef UVM_AMAP_PPREF
1319 	if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
1320 		if (all) {
1321 			amap_pp_adjref(amap, 0, amap->am_nslot, refv);
1322 		} else {
1323 			amap_pp_adjref(amap, offset, len, refv);
1324 		}
1325 	}
1326 #endif
1327 	amap_unlock(amap);
1328 }
1329 
1330 /*
1331  * amap_ref: gain a reference to an amap.
1332  *
1333  * => amap must not be locked (we will lock).
1334  * => "offset" and "len" are in units of pages.
1335  * => Called at fork time to gain the child's reference.
1336  */
1337 void
1338 amap_ref(struct vm_amap *amap, vaddr_t offset, vsize_t len, int flags)
1339 {
1340 	amap_lock(amap);
1341 	if (flags & AMAP_SHARED)
1342 		amap->am_flags |= AMAP_SHARED;
1343 	amap_adjref_anons(amap, offset, len, 1, (flags & AMAP_REFALL) != 0);
1344 }
1345 
1346 /*
1347  * amap_unref: remove a reference to an amap.
1348  *
1349  * => All pmap-level references to this amap must be already removed.
1350  * => Called from uvm_unmap_detach(); entry is already removed from the map.
1351  * => We will lock amap, so it must be unlocked.
1352  */
1353 void
1354 amap_unref(struct vm_amap *amap, vaddr_t offset, vsize_t len, boolean_t all)
1355 {
1356 	amap_lock(amap);
1357 
1358 	KASSERT(amap->am_ref > 0);
1359 
1360 	if (amap->am_ref == 1) {
1361 		/*
1362 		 * If the last reference - wipeout and destroy the amap.
1363 		 */
1364 		amap->am_ref--;
1365 		amap_wipeout(amap);
1366 		return;
1367 	}
1368 
1369 	/*
1370 	 * Otherwise, drop the reference count(s) on anons.
1371 	 */
1372 	if (amap->am_ref == 2 && (amap->am_flags & AMAP_SHARED) != 0) {
1373 		amap->am_flags &= ~AMAP_SHARED;
1374 	}
1375 	amap_adjref_anons(amap, offset, len, -1, all);
1376 }
1377