xref: /csrg-svn/sys/vm/vm_object.c (revision 50917)
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * %sccs.include.redist.c%
9  *
10  *	@(#)vm_object.c	7.7 (Berkeley) 08/28/91
11  *
12  *
13  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
14  * All rights reserved.
15  *
16  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
17  *
18  * Permission to use, copy, modify and distribute this software and
19  * its documentation is hereby granted, provided that both the copyright
20  * notice and this permission notice appear in all copies of the
21  * software, derivative works or modified versions, and any portions
22  * thereof, and that both notices appear in supporting documentation.
23  *
24  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
25  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
26  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
27  *
28  * Carnegie Mellon requests users of this software to return to
29  *
30  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
31  *  School of Computer Science
32  *  Carnegie Mellon University
33  *  Pittsburgh PA 15213-3890
34  *
35  * any improvements or extensions that they make and grant Carnegie the
36  * rights to redistribute these changes.
37  */
38 
39 /*
40  *	Virtual memory object module.
41  */
42 
43 #include "param.h"
44 #include "malloc.h"
45 
46 #include "vm.h"
47 #include "vm_page.h"
48 
49 /*
50  *	Virtual memory objects maintain the actual data
51  *	associated with allocated virtual memory.  A given
52  *	page of memory exists within exactly one object.
53  *
54  *	An object is only deallocated when all "references"
55  *	are given up.  Only one "reference" to a given
56  *	region of an object should be writeable.
57  *
58  *	Associated with each object is a list of all resident
59  *	memory pages belonging to that object; this list is
60  *	maintained by the "vm_page" module, and locked by the object's
61  *	lock.
62  *
63  *	Each object also records a "pager" routine which is
64  *	used to retrieve (and store) pages to the proper backing
65  *	storage.  In addition, objects may be backed by other
66  *	objects from which they were virtual-copied.
67  *
68  *	The only items within the object structure which are
69  *	modified after time of creation are:
70  *		reference count		locked by object's lock
71  *		pager routine		locked by object's lock
72  *
73  */
74 
75 struct vm_object	kernel_object_store;
76 struct vm_object	kmem_object_store;
77 
78 #define	VM_OBJECT_HASH_COUNT	157
79 
80 int		vm_cache_max = 100;	/* can patch if necessary */
81 queue_head_t	vm_object_hashtable[VM_OBJECT_HASH_COUNT];
82 
83 long	object_collapses = 0;
84 long	object_bypasses  = 0;
85 
86 /*
87  *	vm_object_init:
88  *
89  *	Initialize the VM objects module.
90  */
91 void vm_object_init()
92 {
93 	register int	i;
94 
95 	queue_init(&vm_object_cached_list);
96 	queue_init(&vm_object_list);
97 	vm_object_count = 0;
98 	simple_lock_init(&vm_cache_lock);
99 	simple_lock_init(&vm_object_list_lock);
100 
101 	for (i = 0; i < VM_OBJECT_HASH_COUNT; i++)
102 		queue_init(&vm_object_hashtable[i]);
103 
104 	kernel_object = &kernel_object_store;
105 	_vm_object_allocate(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
106 			kernel_object);
107 
108 	kmem_object = &kmem_object_store;
109 	_vm_object_allocate(VM_KMEM_SIZE + VM_MBUF_SIZE, kmem_object);
110 }
111 
112 /*
113  *	vm_object_allocate:
114  *
115  *	Returns a new object with the given size.
116  */
117 
118 vm_object_t vm_object_allocate(size)
119 	vm_size_t	size;
120 {
121 	register vm_object_t	result;
122 
123 	result = (vm_object_t)
124 		malloc((u_long)sizeof *result, M_VMOBJ, M_WAITOK);
125 
126 	_vm_object_allocate(size, result);
127 
128 	return(result);
129 }
130 
131 _vm_object_allocate(size, object)
132 	vm_size_t		size;
133 	register vm_object_t	object;
134 {
135 	queue_init(&object->memq);
136 	vm_object_lock_init(object);
137 	object->ref_count = 1;
138 	object->resident_page_count = 0;
139 	object->size = size;
140 	object->flags = OBJ_INTERNAL;	/* vm_allocate_with_pager will reset */
141 	object->paging_in_progress = 0;
142 	object->copy = NULL;
143 
144 	/*
145 	 *	Object starts out read-write, with no pager.
146 	 */
147 
148 	object->pager = NULL;
149 	object->paging_offset = 0;
150 	object->shadow = NULL;
151 	object->shadow_offset = (vm_offset_t) 0;
152 
153 	simple_lock(&vm_object_list_lock);
154 	queue_enter(&vm_object_list, object, vm_object_t, object_list);
155 	vm_object_count++;
156 	simple_unlock(&vm_object_list_lock);
157 }
158 
159 /*
160  *	vm_object_reference:
161  *
162  *	Gets another reference to the given object.
163  */
164 void vm_object_reference(object)
165 	register vm_object_t	object;
166 {
167 	if (object == NULL)
168 		return;
169 
170 	vm_object_lock(object);
171 	object->ref_count++;
172 	vm_object_unlock(object);
173 }
174 
175 /*
176  *	vm_object_deallocate:
177  *
178  *	Release a reference to the specified object,
179  *	gained either through a vm_object_allocate
180  *	or a vm_object_reference call.  When all references
181  *	are gone, storage associated with this object
182  *	may be relinquished.
183  *
184  *	No object may be locked.
185  */
186 void vm_object_deallocate(object)
187 	register vm_object_t	object;
188 {
189 	vm_object_t	temp;
190 
191 	while (object != NULL) {
192 
193 		/*
194 		 *	The cache holds a reference (uncounted) to
195 		 *	the object; we must lock it before removing
196 		 *	the object.
197 		 */
198 
199 		vm_object_cache_lock();
200 
201 		/*
202 		 *	Lose the reference
203 		 */
204 		vm_object_lock(object);
205 		if (--(object->ref_count) != 0) {
206 
207 			/*
208 			 *	If there are still references, then
209 			 *	we are done.
210 			 */
211 			vm_object_unlock(object);
212 			vm_object_cache_unlock();
213 			return;
214 		}
215 
216 		/*
217 		 *	See if this object can persist.  If so, enter
218 		 *	it in the cache, then deactivate all of its
219 		 *	pages.
220 		 */
221 
222 		if (object->flags & OBJ_CANPERSIST) {
223 
224 			queue_enter(&vm_object_cached_list, object,
225 				vm_object_t, cached_list);
226 			vm_object_cached++;
227 			vm_object_cache_unlock();
228 
229 			vm_object_deactivate_pages(object);
230 			vm_object_unlock(object);
231 
232 			vm_object_cache_trim();
233 			return;
234 		}
235 
236 		/*
237 		 *	Make sure no one can look us up now.
238 		 */
239 		vm_object_remove(object->pager);
240 		vm_object_cache_unlock();
241 
242 		temp = object->shadow;
243 		vm_object_terminate(object);
244 			/* unlocks and deallocates object */
245 		object = temp;
246 	}
247 }
248 
249 
250 /*
251  *	vm_object_terminate actually destroys the specified object, freeing
252  *	up all previously used resources.
253  *
254  *	The object must be locked.
255  */
256 void vm_object_terminate(object)
257 	register vm_object_t	object;
258 {
259 	register vm_page_t	p;
260 	vm_object_t		shadow_object;
261 
262 	/*
263 	 *	Detach the object from its shadow if we are the shadow's
264 	 *	copy.
265 	 */
266 	if ((shadow_object = object->shadow) != NULL) {
267 		vm_object_lock(shadow_object);
268 		if (shadow_object->copy == object)
269 			shadow_object->copy = NULL;
270 #if 0
271 		else if (shadow_object->copy != NULL)
272 			panic("vm_object_terminate: copy/shadow inconsistency");
273 #endif
274 		vm_object_unlock(shadow_object);
275 	}
276 
277 	/*
278 	 *	Wait until the pageout daemon is through
279 	 *	with the object.
280 	 */
281 
282 	while (object->paging_in_progress != 0) {
283 		vm_object_sleep((int)object, object, FALSE);
284 		vm_object_lock(object);
285 	}
286 
287 
288 	/*
289 	 *	While the paging system is locked,
290 	 *	pull the object's pages off the active
291 	 *	and inactive queues.  This keeps the
292 	 *	pageout daemon from playing with them
293 	 *	during vm_pager_deallocate.
294 	 *
295 	 *	We can't free the pages yet, because the
296 	 *	object's pager may have to write them out
297 	 *	before deallocating the paging space.
298 	 */
299 
300 	p = (vm_page_t) queue_first(&object->memq);
301 	while (!queue_end(&object->memq, (queue_entry_t) p)) {
302 		VM_PAGE_CHECK(p);
303 
304 		vm_page_lock_queues();
305 		if (p->active) {
306 			queue_remove(&vm_page_queue_active, p, vm_page_t,
307 						pageq);
308 			p->active = FALSE;
309 			cnt.v_active_count--;
310 		}
311 
312 		if (p->inactive) {
313 			queue_remove(&vm_page_queue_inactive, p, vm_page_t,
314 						pageq);
315 			p->inactive = FALSE;
316 			cnt.v_inactive_count--;
317 		}
318 		vm_page_unlock_queues();
319 		p = (vm_page_t) queue_next(&p->listq);
320 	}
321 
322 	vm_object_unlock(object);
323 
324 	if (object->paging_in_progress != 0)
325 		panic("vm_object_deallocate: pageout in progress");
326 
327 	/*
328 	 *	Clean and free the pages, as appropriate.
329 	 *	All references to the object are gone,
330 	 *	so we don't need to lock it.
331 	 */
332 
333 	if ((object->flags & OBJ_INTERNAL) == 0) {
334 		vm_object_lock(object);
335 		vm_object_page_clean(object, 0, 0);
336 		vm_object_unlock(object);
337 	}
338 	while (!queue_empty(&object->memq)) {
339 		p = (vm_page_t) queue_first(&object->memq);
340 
341 		VM_PAGE_CHECK(p);
342 
343 		vm_page_lock_queues();
344 		vm_page_free(p);
345 		vm_page_unlock_queues();
346 	}
347 
348 	/*
349 	 *	Let the pager know object is dead.
350 	 */
351 
352 	if (object->pager != NULL)
353 		vm_pager_deallocate(object->pager);
354 
355 
356 	simple_lock(&vm_object_list_lock);
357 	queue_remove(&vm_object_list, object, vm_object_t, object_list);
358 	vm_object_count--;
359 	simple_unlock(&vm_object_list_lock);
360 
361 	/*
362 	 *	Free the space for the object.
363 	 */
364 
365 	free((caddr_t)object, M_VMOBJ);
366 }
367 
368 /*
369  *	vm_object_page_clean
370  *
371  *	Clean all dirty pages in the specified range of object.
372  *	Leaves page on whatever queue it is currently on.
373  *
374  *	Odd semantics: if start == end, we clean everything.
375  *
376  *	The object must be locked.
377  */
378 vm_object_page_clean(object, start, end)
379 	register vm_object_t	object;
380 	register vm_offset_t	start;
381 	register vm_offset_t	end;
382 {
383 	register vm_page_t	p;
384 
385 	if (object->pager == NULL)
386 		return;
387 
388 again:
389 	p = (vm_page_t) queue_first(&object->memq);
390 	while (!queue_end(&object->memq, (queue_entry_t) p)) {
391 		if (start == end ||
392 		    p->offset >= start && p->offset < end) {
393 			if (p->clean && pmap_is_modified(VM_PAGE_TO_PHYS(p)))
394 				p->clean = FALSE;
395 			pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE);
396 			if (!p->clean) {
397 				p->busy = TRUE;
398 				object->paging_in_progress++;
399 				vm_object_unlock(object);
400 				(void) vm_pager_put(object->pager, p, TRUE);
401 				vm_object_lock(object);
402 				object->paging_in_progress--;
403 				p->busy = FALSE;
404 				PAGE_WAKEUP(p);
405 				goto again;
406 			}
407 		}
408 		p = (vm_page_t) queue_next(&p->listq);
409 	}
410 }
411 
412 /*
413  *	vm_object_deactivate_pages
414  *
415  *	Deactivate all pages in the specified object.  (Keep its pages
416  *	in memory even though it is no longer referenced.)
417  *
418  *	The object must be locked.
419  */
420 vm_object_deactivate_pages(object)
421 	register vm_object_t	object;
422 {
423 	register vm_page_t	p, next;
424 
425 	p = (vm_page_t) queue_first(&object->memq);
426 	while (!queue_end(&object->memq, (queue_entry_t) p)) {
427 		next = (vm_page_t) queue_next(&p->listq);
428 		vm_page_lock_queues();
429 		vm_page_deactivate(p);
430 		vm_page_unlock_queues();
431 		p = next;
432 	}
433 }
434 
435 /*
436  *	Trim the object cache to size.
437  */
438 vm_object_cache_trim()
439 {
440 	register vm_object_t	object;
441 
442 	vm_object_cache_lock();
443 	while (vm_object_cached > vm_cache_max) {
444 		object = (vm_object_t) queue_first(&vm_object_cached_list);
445 		vm_object_cache_unlock();
446 
447 		if (object != vm_object_lookup(object->pager))
448 			panic("vm_object_deactivate: I'm sooo confused.");
449 
450 		pager_cache(object, FALSE);
451 
452 		vm_object_cache_lock();
453 	}
454 	vm_object_cache_unlock();
455 }
456 
457 
458 /*
459  *	vm_object_shutdown()
460  *
461  *	Shut down the object system.  Unfortunately, while we
462  *	may be trying to do this, init is happily waiting for
463  *	processes to exit, and therefore will be causing some objects
464  *	to be deallocated.  To handle this, we gain a fake reference
465  *	to all objects we release paging areas for.  This will prevent
466  *	a duplicate deallocation.  This routine is probably full of
467  *	race conditions!
468  */
469 
470 void vm_object_shutdown()
471 {
472 	register vm_object_t	object;
473 
474 	/*
475 	 *	Clean up the object cache *before* we screw up the reference
476 	 *	counts on all of the objects.
477 	 */
478 
479 	vm_object_cache_clear();
480 
481 	printf("free paging spaces: ");
482 
483 	/*
484 	 *	First we gain a reference to each object so that
485 	 *	no one else will deallocate them.
486 	 */
487 
488 	simple_lock(&vm_object_list_lock);
489 	object = (vm_object_t) queue_first(&vm_object_list);
490 	while (!queue_end(&vm_object_list, (queue_entry_t) object)) {
491 		vm_object_reference(object);
492 		object = (vm_object_t) queue_next(&object->object_list);
493 	}
494 	simple_unlock(&vm_object_list_lock);
495 
496 	/*
497 	 *	Now we deallocate all the paging areas.  We don't need
498 	 *	to lock anything because we've reduced to a single
499 	 *	processor while shutting down.	This also assumes that
500 	 *	no new objects are being created.
501 	 */
502 
503 	object = (vm_object_t) queue_first(&vm_object_list);
504 	while (!queue_end(&vm_object_list, (queue_entry_t) object)) {
505 		if (object->pager != NULL)
506 			vm_pager_deallocate(object->pager);
507 		object = (vm_object_t) queue_next(&object->object_list);
508 		printf(".");
509 	}
510 	printf("done.\n");
511 }
512 
513 /*
514  *	vm_object_pmap_copy:
515  *
516  *	Makes all physical pages in the specified
517  *	object range copy-on-write.  No writeable
518  *	references to these pages should remain.
519  *
520  *	The object must *not* be locked.
521  */
522 void vm_object_pmap_copy(object, start, end)
523 	register vm_object_t	object;
524 	register vm_offset_t	start;
525 	register vm_offset_t	end;
526 {
527 	register vm_page_t	p;
528 
529 	if (object == NULL)
530 		return;
531 
532 	vm_object_lock(object);
533 	p = (vm_page_t) queue_first(&object->memq);
534 	while (!queue_end(&object->memq, (queue_entry_t) p)) {
535 		if ((start <= p->offset) && (p->offset < end)) {
536 			pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_READ);
537 			p->copy_on_write = TRUE;
538 		}
539 		p = (vm_page_t) queue_next(&p->listq);
540 	}
541 	vm_object_unlock(object);
542 }
543 
544 /*
545  *	vm_object_pmap_remove:
546  *
547  *	Removes all physical pages in the specified
548  *	object range from all physical maps.
549  *
550  *	The object must *not* be locked.
551  */
552 void vm_object_pmap_remove(object, start, end)
553 	register vm_object_t	object;
554 	register vm_offset_t	start;
555 	register vm_offset_t	end;
556 {
557 	register vm_page_t	p;
558 
559 	if (object == NULL)
560 		return;
561 
562 	vm_object_lock(object);
563 	p = (vm_page_t) queue_first(&object->memq);
564 	while (!queue_end(&object->memq, (queue_entry_t) p)) {
565 		if ((start <= p->offset) && (p->offset < end))
566 			pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE);
567 		p = (vm_page_t) queue_next(&p->listq);
568 	}
569 	vm_object_unlock(object);
570 }
571 
572 /*
573  *	vm_object_copy:
574  *
575  *	Create a new object which is a copy of an existing
576  *	object, and mark all of the pages in the existing
577  *	object 'copy-on-write'.  The new object has one reference.
578  *	Returns the new object.
579  *
580  *	May defer the copy until later if the object is not backed
581  *	up by a non-default pager.
582  */
583 void vm_object_copy(src_object, src_offset, size,
584 		    dst_object, dst_offset, src_needs_copy)
585 	register vm_object_t	src_object;
586 	vm_offset_t		src_offset;
587 	vm_size_t		size;
588 	vm_object_t		*dst_object;	/* OUT */
589 	vm_offset_t		*dst_offset;	/* OUT */
590 	boolean_t		*src_needs_copy;	/* OUT */
591 {
592 	register vm_object_t	new_copy;
593 	register vm_object_t	old_copy;
594 	vm_offset_t		new_start, new_end;
595 
596 	register vm_page_t	p;
597 
598 	if (src_object == NULL) {
599 		/*
600 		 *	Nothing to copy
601 		 */
602 		*dst_object = NULL;
603 		*dst_offset = 0;
604 		*src_needs_copy = FALSE;
605 		return;
606 	}
607 
608 	/*
609 	 *	If the object's pager is null_pager or the
610 	 *	default pager, we don't have to make a copy
611 	 *	of it.  Instead, we set the needs copy flag and
612 	 *	make a shadow later.
613 	 */
614 
615 	vm_object_lock(src_object);
616 	if (src_object->pager == NULL ||
617 	    (src_object->flags & OBJ_INTERNAL)) {
618 
619 		/*
620 		 *	Make another reference to the object
621 		 */
622 		src_object->ref_count++;
623 
624 		/*
625 		 *	Mark all of the pages copy-on-write.
626 		 */
627 		for (p = (vm_page_t) queue_first(&src_object->memq);
628 		     !queue_end(&src_object->memq, (queue_entry_t)p);
629 		     p = (vm_page_t) queue_next(&p->listq)) {
630 			if (src_offset <= p->offset &&
631 			    p->offset < src_offset + size)
632 				p->copy_on_write = TRUE;
633 		}
634 		vm_object_unlock(src_object);
635 
636 		*dst_object = src_object;
637 		*dst_offset = src_offset;
638 
639 		/*
640 		 *	Must make a shadow when write is desired
641 		 */
642 		*src_needs_copy = TRUE;
643 		return;
644 	}
645 
646 	/*
647 	 *	Try to collapse the object before copying it.
648 	 */
649 	vm_object_collapse(src_object);
650 
651 	/*
652 	 *	If the object has a pager, the pager wants to
653 	 *	see all of the changes.  We need a copy-object
654 	 *	for the changed pages.
655 	 *
656 	 *	If there is a copy-object, and it is empty,
657 	 *	no changes have been made to the object since the
658 	 *	copy-object was made.  We can use the same copy-
659 	 *	object.
660 	 */
661 
662     Retry1:
663 	old_copy = src_object->copy;
664 	if (old_copy != NULL) {
665 		/*
666 		 *	Try to get the locks (out of order)
667 		 */
668 		if (!vm_object_lock_try(old_copy)) {
669 			vm_object_unlock(src_object);
670 
671 			/* should spin a bit here... */
672 			vm_object_lock(src_object);
673 			goto Retry1;
674 		}
675 
676 		if (old_copy->resident_page_count == 0 &&
677 		    old_copy->pager == NULL) {
678 			/*
679 			 *	Return another reference to
680 			 *	the existing copy-object.
681 			 */
682 			old_copy->ref_count++;
683 			vm_object_unlock(old_copy);
684 			vm_object_unlock(src_object);
685 			*dst_object = old_copy;
686 			*dst_offset = src_offset;
687 			*src_needs_copy = FALSE;
688 			return;
689 		}
690 		vm_object_unlock(old_copy);
691 	}
692 	vm_object_unlock(src_object);
693 
694 	/*
695 	 *	If the object has a pager, the pager wants
696 	 *	to see all of the changes.  We must make
697 	 *	a copy-object and put the changed pages there.
698 	 *
699 	 *	The copy-object is always made large enough to
700 	 *	completely shadow the original object, since
701 	 *	it may have several users who want to shadow
702 	 *	the original object at different points.
703 	 */
704 
705 	new_copy = vm_object_allocate(src_object->size);
706 
707     Retry2:
708 	vm_object_lock(src_object);
709 	/*
710 	 *	Copy object may have changed while we were unlocked
711 	 */
712 	old_copy = src_object->copy;
713 	if (old_copy != NULL) {
714 		/*
715 		 *	Try to get the locks (out of order)
716 		 */
717 		if (!vm_object_lock_try(old_copy)) {
718 			vm_object_unlock(src_object);
719 			goto Retry2;
720 		}
721 
722 		/*
723 		 *	Consistency check
724 		 */
725 		if (old_copy->shadow != src_object ||
726 		    old_copy->shadow_offset != (vm_offset_t) 0)
727 			panic("vm_object_copy: copy/shadow inconsistency");
728 
729 		/*
730 		 *	Make the old copy-object shadow the new one.
731 		 *	It will receive no more pages from the original
732 		 *	object.
733 		 */
734 
735 		src_object->ref_count--;	/* remove ref. from old_copy */
736 		old_copy->shadow = new_copy;
737 		new_copy->ref_count++;		/* locking not needed - we
738 						   have the only pointer */
739 		vm_object_unlock(old_copy);	/* done with old_copy */
740 	}
741 
742 	new_start = (vm_offset_t) 0;	/* always shadow original at 0 */
743 	new_end   = (vm_offset_t) new_copy->size; /* for the whole object */
744 
745 	/*
746 	 *	Point the new copy at the existing object.
747 	 */
748 
749 	new_copy->shadow = src_object;
750 	new_copy->shadow_offset = new_start;
751 	src_object->ref_count++;
752 	src_object->copy = new_copy;
753 
754 	/*
755 	 *	Mark all the affected pages of the existing object
756 	 *	copy-on-write.
757 	 */
758 	p = (vm_page_t) queue_first(&src_object->memq);
759 	while (!queue_end(&src_object->memq, (queue_entry_t) p)) {
760 		if ((new_start <= p->offset) && (p->offset < new_end))
761 			p->copy_on_write = TRUE;
762 		p = (vm_page_t) queue_next(&p->listq);
763 	}
764 
765 	vm_object_unlock(src_object);
766 
767 	*dst_object = new_copy;
768 	*dst_offset = src_offset - new_start;
769 	*src_needs_copy = FALSE;
770 }
771 
772 /*
773  *	vm_object_shadow:
774  *
775  *	Create a new object which is backed by the
776  *	specified existing object range.  The source
777  *	object reference is deallocated.
778  *
779  *	The new object and offset into that object
780  *	are returned in the source parameters.
781  */
782 
783 void vm_object_shadow(object, offset, length)
784 	vm_object_t	*object;	/* IN/OUT */
785 	vm_offset_t	*offset;	/* IN/OUT */
786 	vm_size_t	length;
787 {
788 	register vm_object_t	source;
789 	register vm_object_t	result;
790 
791 	source = *object;
792 
793 	/*
794 	 *	Allocate a new object with the given length
795 	 */
796 
797 	if ((result = vm_object_allocate(length)) == NULL)
798 		panic("vm_object_shadow: no object for shadowing");
799 
800 	/*
801 	 *	The new object shadows the source object, adding
802 	 *	a reference to it.  Our caller changes his reference
803 	 *	to point to the new object, removing a reference to
804 	 *	the source object.  Net result: no change of reference
805 	 *	count.
806 	 */
807 	result->shadow = source;
808 
809 	/*
810 	 *	Store the offset into the source object,
811 	 *	and fix up the offset into the new object.
812 	 */
813 
814 	result->shadow_offset = *offset;
815 
816 	/*
817 	 *	Return the new things
818 	 */
819 
820 	*offset = 0;
821 	*object = result;
822 }
823 
824 /*
825  *	Set the specified object's pager to the specified pager.
826  */
827 
828 void vm_object_setpager(object, pager, paging_offset,
829 			read_only)
830 	vm_object_t	object;
831 	vm_pager_t	pager;
832 	vm_offset_t	paging_offset;
833 	boolean_t	read_only;
834 {
835 #ifdef	lint
836 	read_only++;	/* No longer used */
837 #endif	lint
838 
839 	vm_object_lock(object);			/* XXX ? */
840 	object->pager = pager;
841 	object->paging_offset = paging_offset;
842 	vm_object_unlock(object);			/* XXX ? */
843 }
844 
845 /*
846  *	vm_object_hash hashes the pager/id pair.
847  */
848 
849 #define vm_object_hash(pager) \
850 	(((unsigned)pager)%VM_OBJECT_HASH_COUNT)
851 
852 /*
853  *	vm_object_lookup looks in the object cache for an object with the
854  *	specified pager and paging id.
855  */
856 
857 vm_object_t vm_object_lookup(pager)
858 	vm_pager_t	pager;
859 {
860 	register queue_t		bucket;
861 	register vm_object_hash_entry_t	entry;
862 	vm_object_t			object;
863 
864 	bucket = &vm_object_hashtable[vm_object_hash(pager)];
865 
866 	vm_object_cache_lock();
867 
868 	entry = (vm_object_hash_entry_t) queue_first(bucket);
869 	while (!queue_end(bucket, (queue_entry_t) entry)) {
870 		object = entry->object;
871 		if (object->pager == pager) {
872 			vm_object_lock(object);
873 			if (object->ref_count == 0) {
874 				queue_remove(&vm_object_cached_list, object,
875 						vm_object_t, cached_list);
876 				vm_object_cached--;
877 			}
878 			object->ref_count++;
879 			vm_object_unlock(object);
880 			vm_object_cache_unlock();
881 			return(object);
882 		}
883 		entry = (vm_object_hash_entry_t) queue_next(&entry->hash_links);
884 	}
885 
886 	vm_object_cache_unlock();
887 	return(NULL);
888 }
889 
890 /*
891  *	vm_object_enter enters the specified object/pager/id into
892  *	the hash table.
893  */
894 
895 void vm_object_enter(object, pager)
896 	vm_object_t	object;
897 	vm_pager_t	pager;
898 {
899 	register queue_t		bucket;
900 	register vm_object_hash_entry_t	entry;
901 
902 	/*
903 	 *	We don't cache null objects, and we can't cache
904 	 *	objects with the null pager.
905 	 */
906 
907 	if (object == NULL)
908 		return;
909 	if (pager == NULL)
910 		return;
911 
912 	bucket = &vm_object_hashtable[vm_object_hash(pager)];
913 	entry = (vm_object_hash_entry_t)
914 		malloc((u_long)sizeof *entry, M_VMOBJHASH, M_WAITOK);
915 	entry->object = object;
916 	object->flags |= OBJ_CANPERSIST;
917 
918 	vm_object_cache_lock();
919 	queue_enter(bucket, entry, vm_object_hash_entry_t, hash_links);
920 	vm_object_cache_unlock();
921 }
922 
923 /*
924  *	vm_object_remove:
925  *
926  *	Remove the pager from the hash table.
927  *	Note:  This assumes that the object cache
928  *	is locked.  XXX this should be fixed
929  *	by reorganizing vm_object_deallocate.
930  */
931 vm_object_remove(pager)
932 	register vm_pager_t	pager;
933 {
934 	register queue_t		bucket;
935 	register vm_object_hash_entry_t	entry;
936 	register vm_object_t		object;
937 
938 	bucket = &vm_object_hashtable[vm_object_hash(pager)];
939 
940 	entry = (vm_object_hash_entry_t) queue_first(bucket);
941 	while (!queue_end(bucket, (queue_entry_t) entry)) {
942 		object = entry->object;
943 		if (object->pager == pager) {
944 			queue_remove(bucket, entry, vm_object_hash_entry_t,
945 					hash_links);
946 			free((caddr_t)entry, M_VMOBJHASH);
947 			break;
948 		}
949 		entry = (vm_object_hash_entry_t) queue_next(&entry->hash_links);
950 	}
951 }
952 
953 /*
954  *	vm_object_cache_clear removes all objects from the cache.
955  *
956  */
957 
958 void vm_object_cache_clear()
959 {
960 	register vm_object_t	object;
961 
962 	/*
963 	 *	Remove each object in the cache by scanning down the
964 	 *	list of cached objects.
965 	 */
966 	vm_object_cache_lock();
967 	while (!queue_empty(&vm_object_cached_list)) {
968 		object = (vm_object_t) queue_first(&vm_object_cached_list);
969 		vm_object_cache_unlock();
970 
971 		/*
972 		 * Note: it is important that we use vm_object_lookup
973 		 * to gain a reference, and not vm_object_reference, because
974 		 * the logic for removing an object from the cache lies in
975 		 * lookup.
976 		 */
977 		if (object != vm_object_lookup(object->pager))
978 			panic("vm_object_cache_clear: I'm sooo confused.");
979 		pager_cache(object, FALSE);
980 
981 		vm_object_cache_lock();
982 	}
983 	vm_object_cache_unlock();
984 }
985 
986 boolean_t	vm_object_collapse_allowed = TRUE;
987 /*
988  *	vm_object_collapse:
989  *
990  *	Collapse an object with the object backing it.
991  *	Pages in the backing object are moved into the
992  *	parent, and the backing object is deallocated.
993  *
994  *	Requires that the object be locked and the page
995  *	queues be unlocked.
996  *
997  */
998 void vm_object_collapse(object)
999 	register vm_object_t	object;
1000 
1001 {
1002 	register vm_object_t	backing_object;
1003 	register vm_offset_t	backing_offset;
1004 	register vm_size_t	size;
1005 	register vm_offset_t	new_offset;
1006 	register vm_page_t	p, pp;
1007 
1008 	if (!vm_object_collapse_allowed)
1009 		return;
1010 
1011 	while (TRUE) {
1012 		/*
1013 		 *	Verify that the conditions are right for collapse:
1014 		 *
1015 		 *	The object exists and no pages in it are currently
1016 		 *	being paged out (or have ever been paged out).
1017 		 */
1018 		if (object == NULL ||
1019 		    object->paging_in_progress != 0 ||
1020 		    object->pager != NULL)
1021 			return;
1022 
1023 		/*
1024 		 *		There is a backing object, and
1025 		 */
1026 
1027 		if ((backing_object = object->shadow) == NULL)
1028 			return;
1029 
1030 		vm_object_lock(backing_object);
1031 		/*
1032 		 *	...
1033 		 *		The backing object is not read_only,
1034 		 *		and no pages in the backing object are
1035 		 *		currently being paged out.
1036 		 *		The backing object is internal.
1037 		 */
1038 
1039 		if ((backing_object->flags & OBJ_INTERNAL) == 0 ||
1040 		    backing_object->paging_in_progress != 0) {
1041 			vm_object_unlock(backing_object);
1042 			return;
1043 		}
1044 
1045 		/*
1046 		 *	The backing object can't be a copy-object:
1047 		 *	the shadow_offset for the copy-object must stay
1048 		 *	as 0.  Furthermore (for the 'we have all the
1049 		 *	pages' case), if we bypass backing_object and
1050 		 *	just shadow the next object in the chain, old
1051 		 *	pages from that object would then have to be copied
1052 		 *	BOTH into the (former) backing_object and into the
1053 		 *	parent object.
1054 		 */
1055 		if (backing_object->shadow != NULL &&
1056 		    backing_object->shadow->copy != NULL) {
1057 			vm_object_unlock(backing_object);
1058 			return;
1059 		}
1060 
1061 		/*
1062 		 *	We know that we can either collapse the backing
1063 		 *	object (if the parent is the only reference to
1064 		 *	it) or (perhaps) remove the parent's reference
1065 		 *	to it.
1066 		 */
1067 
1068 		backing_offset = object->shadow_offset;
1069 		size = object->size;
1070 
1071 		/*
1072 		 *	If there is exactly one reference to the backing
1073 		 *	object, we can collapse it into the parent.
1074 		 */
1075 
1076 		if (backing_object->ref_count == 1) {
1077 
1078 			/*
1079 			 *	We can collapse the backing object.
1080 			 *
1081 			 *	Move all in-memory pages from backing_object
1082 			 *	to the parent.  Pages that have been paged out
1083 			 *	will be overwritten by any of the parent's
1084 			 *	pages that shadow them.
1085 			 */
1086 
1087 			while (!queue_empty(&backing_object->memq)) {
1088 
1089 				p = (vm_page_t)
1090 					queue_first(&backing_object->memq);
1091 
1092 				new_offset = (p->offset - backing_offset);
1093 
1094 				/*
1095 				 *	If the parent has a page here, or if
1096 				 *	this page falls outside the parent,
1097 				 *	dispose of it.
1098 				 *
1099 				 *	Otherwise, move it as planned.
1100 				 */
1101 
1102 				if (p->offset < backing_offset ||
1103 				    new_offset >= size) {
1104 					vm_page_lock_queues();
1105 					vm_page_free(p);
1106 					vm_page_unlock_queues();
1107 				} else {
1108 				    pp = vm_page_lookup(object, new_offset);
1109 				    if (pp != NULL && !pp->fake) {
1110 					vm_page_lock_queues();
1111 					vm_page_free(p);
1112 					vm_page_unlock_queues();
1113 				    }
1114 				    else {
1115 					if (pp) {
1116 					    /* may be someone waiting for it */
1117 					    PAGE_WAKEUP(pp);
1118 					    vm_page_lock_queues();
1119 					    vm_page_free(pp);
1120 					    vm_page_unlock_queues();
1121 					}
1122 					vm_page_rename(p, object, new_offset);
1123 				    }
1124 				}
1125 			}
1126 
1127 			/*
1128 			 *	Move the pager from backing_object to object.
1129 			 *
1130 			 *	XXX We're only using part of the paging space
1131 			 *	for keeps now... we ought to discard the
1132 			 *	unused portion.
1133 			 */
1134 
1135 			object->pager = backing_object->pager;
1136 			object->paging_offset += backing_offset;
1137 
1138 			backing_object->pager = NULL;
1139 
1140 			/*
1141 			 *	Object now shadows whatever backing_object did.
1142 			 *	Note that the reference to backing_object->shadow
1143 			 *	moves from within backing_object to within object.
1144 			 */
1145 
1146 			object->shadow = backing_object->shadow;
1147 			object->shadow_offset += backing_object->shadow_offset;
1148 			if (object->shadow != NULL &&
1149 			    object->shadow->copy != NULL) {
1150 				panic("vm_object_collapse: we collapsed a copy-object!");
1151 			}
1152 			/*
1153 			 *	Discard backing_object.
1154 			 *
1155 			 *	Since the backing object has no pages, no
1156 			 *	pager left, and no object references within it,
1157 			 *	all that is necessary is to dispose of it.
1158 			 */
1159 
1160 			vm_object_unlock(backing_object);
1161 
1162 			simple_lock(&vm_object_list_lock);
1163 			queue_remove(&vm_object_list, backing_object,
1164 						vm_object_t, object_list);
1165 			vm_object_count--;
1166 			simple_unlock(&vm_object_list_lock);
1167 
1168 			free((caddr_t)backing_object, M_VMOBJ);
1169 
1170 			object_collapses++;
1171 		}
1172 		else {
1173 			/*
1174 			 *	If all of the pages in the backing object are
1175 			 *	shadowed by the parent object, the parent
1176 			 *	object no longer has to shadow the backing
1177 			 *	object; it can shadow the next one in the
1178 			 *	chain.
1179 			 *
1180 			 *	The backing object must not be paged out - we'd
1181 			 *	have to check all of the paged-out pages, as
1182 			 *	well.
1183 			 */
1184 
1185 			if (backing_object->pager != NULL) {
1186 				vm_object_unlock(backing_object);
1187 				return;
1188 			}
1189 
1190 			/*
1191 			 *	Should have a check for a 'small' number
1192 			 *	of pages here.
1193 			 */
1194 
1195 			p = (vm_page_t) queue_first(&backing_object->memq);
1196 			while (!queue_end(&backing_object->memq,
1197 					  (queue_entry_t) p)) {
1198 
1199 				new_offset = (p->offset - backing_offset);
1200 
1201 				/*
1202 				 *	If the parent has a page here, or if
1203 				 *	this page falls outside the parent,
1204 				 *	keep going.
1205 				 *
1206 				 *	Otherwise, the backing_object must be
1207 				 *	left in the chain.
1208 				 */
1209 
1210 				if (p->offset >= backing_offset &&
1211 				    new_offset <= size &&
1212 				    ((pp = vm_page_lookup(object, new_offset))
1213 				      == NULL ||
1214 				     pp->fake)) {
1215 					/*
1216 					 *	Page still needed.
1217 					 *	Can't go any further.
1218 					 */
1219 					vm_object_unlock(backing_object);
1220 					return;
1221 				}
1222 				p = (vm_page_t) queue_next(&p->listq);
1223 			}
1224 
1225 			/*
1226 			 *	Make the parent shadow the next object
1227 			 *	in the chain.  Deallocating backing_object
1228 			 *	will not remove it, since its reference
1229 			 *	count is at least 2.
1230 			 */
1231 
1232 			vm_object_reference(object->shadow = backing_object->shadow);
1233 			object->shadow_offset += backing_object->shadow_offset;
1234 
1235 			/*	Drop the reference count on backing_object.
1236 			 *	Since its ref_count was at least 2, it
1237 			 *	will not vanish; so we don't need to call
1238 			 *	vm_object_deallocate.
1239 			 */
1240 			backing_object->ref_count--;
1241 			vm_object_unlock(backing_object);
1242 
1243 			object_bypasses ++;
1244 
1245 		}
1246 
1247 		/*
1248 		 *	Try again with this object's new backing object.
1249 		 */
1250 	}
1251 }
1252 
1253 /*
1254  *	vm_object_page_remove: [internal]
1255  *
1256  *	Removes all physical pages in the specified
1257  *	object range from the object's list of pages.
1258  *
1259  *	The object must be locked.
1260  */
1261 void vm_object_page_remove(object, start, end)
1262 	register vm_object_t	object;
1263 	register vm_offset_t	start;
1264 	register vm_offset_t	end;
1265 {
1266 	register vm_page_t	p, next;
1267 
1268 	if (object == NULL)
1269 		return;
1270 
1271 	p = (vm_page_t) queue_first(&object->memq);
1272 	while (!queue_end(&object->memq, (queue_entry_t) p)) {
1273 		next = (vm_page_t) queue_next(&p->listq);
1274 		if ((start <= p->offset) && (p->offset < end)) {
1275 			pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE);
1276 			vm_page_lock_queues();
1277 			vm_page_free(p);
1278 			vm_page_unlock_queues();
1279 		}
1280 		p = next;
1281 	}
1282 }
1283 
1284 /*
1285  *	Routine:	vm_object_coalesce
1286  *	Function:	Coalesces two objects backing up adjoining
1287  *			regions of memory into a single object.
1288  *
1289  *	returns TRUE if objects were combined.
1290  *
1291  *	NOTE:	Only works at the moment if the second object is NULL -
1292  *		if it's not, which object do we lock first?
1293  *
1294  *	Parameters:
1295  *		prev_object	First object to coalesce
1296  *		prev_offset	Offset into prev_object
1297  *		next_object	Second object into coalesce
1298  *		next_offset	Offset into next_object
1299  *
1300  *		prev_size	Size of reference to prev_object
1301  *		next_size	Size of reference to next_object
1302  *
1303  *	Conditions:
1304  *	The object must *not* be locked.
1305  */
1306 boolean_t vm_object_coalesce(prev_object, next_object,
1307 			prev_offset, next_offset,
1308 			prev_size, next_size)
1309 
1310 	register vm_object_t	prev_object;
1311 	vm_object_t	next_object;
1312 	vm_offset_t	prev_offset, next_offset;
1313 	vm_size_t	prev_size, next_size;
1314 {
1315 	vm_size_t	newsize;
1316 
1317 #ifdef	lint
1318 	next_offset++;
1319 #endif	lint
1320 
1321 	if (next_object != NULL) {
1322 		return(FALSE);
1323 	}
1324 
1325 	if (prev_object == NULL) {
1326 		return(TRUE);
1327 	}
1328 
1329 	vm_object_lock(prev_object);
1330 
1331 	/*
1332 	 *	Try to collapse the object first
1333 	 */
1334 	vm_object_collapse(prev_object);
1335 
1336 	/*
1337 	 *	Can't coalesce if:
1338 	 *	. more than one reference
1339 	 *	. paged out
1340 	 *	. shadows another object
1341 	 *	. has a copy elsewhere
1342 	 *	(any of which mean that the pages not mapped to
1343 	 *	prev_entry may be in use anyway)
1344 	 */
1345 
1346 	if (prev_object->ref_count > 1 ||
1347 		prev_object->pager != NULL ||
1348 		prev_object->shadow != NULL ||
1349 		prev_object->copy != NULL) {
1350 		vm_object_unlock(prev_object);
1351 		return(FALSE);
1352 	}
1353 
1354 	/*
1355 	 *	Remove any pages that may still be in the object from
1356 	 *	a previous deallocation.
1357 	 */
1358 
1359 	vm_object_page_remove(prev_object,
1360 			prev_offset + prev_size,
1361 			prev_offset + prev_size + next_size);
1362 
1363 	/*
1364 	 *	Extend the object if necessary.
1365 	 */
1366 	newsize = prev_offset + prev_size + next_size;
1367 	if (newsize > prev_object->size)
1368 		prev_object->size = newsize;
1369 
1370 	vm_object_unlock(prev_object);
1371 	return(TRUE);
1372 }
1373 
1374 /*
1375  *	vm_object_print:	[ debug ]
1376  */
1377 void vm_object_print(object, full)
1378 	vm_object_t	object;
1379 	boolean_t	full;
1380 {
1381 	register vm_page_t	p;
1382 	extern indent;
1383 
1384 	register int count;
1385 
1386 	if (object == NULL)
1387 		return;
1388 
1389 	iprintf("Object 0x%x: size=0x%x, res=%d, ref=%d, ",
1390 		(int) object, (int) object->size,
1391 		object->resident_page_count, object->ref_count);
1392 	printf("pager=0x%x+0x%x, shadow=(0x%x)+0x%x\n",
1393 	       (int) object->pager, (int) object->paging_offset,
1394 	       (int) object->shadow, (int) object->shadow_offset);
1395 	printf("cache: next=0x%x, prev=0x%x\n",
1396 	       object->cached_list.next, object->cached_list.prev);
1397 
1398 	if (!full)
1399 		return;
1400 
1401 	indent += 2;
1402 	count = 0;
1403 	p = (vm_page_t) queue_first(&object->memq);
1404 	while (!queue_end(&object->memq, (queue_entry_t) p)) {
1405 		if (count == 0)
1406 			iprintf("memory:=");
1407 		else if (count == 6) {
1408 			printf("\n");
1409 			iprintf(" ...");
1410 			count = 0;
1411 		} else
1412 			printf(",");
1413 		count++;
1414 
1415 		printf("(off=0x%x,page=0x%x)", p->offset, VM_PAGE_TO_PHYS(p));
1416 		p = (vm_page_t) queue_next(&p->listq);
1417 	}
1418 	if (count != 0)
1419 		printf("\n");
1420 	indent -= 2;
1421 }
1422