xref: /csrg-svn/sys/vm/vm_swap.c (revision 8772)
1 /*	vm_swap.c	4.10	82/10/21	*/
2 
3 #include "../h/param.h"
4 #include "../h/systm.h"
5 #include "../h/buf.h"
6 #include "../h/conf.h"
7 #include "../h/dir.h"
8 #include "../h/user.h"
9 #include "../h/inode.h"
10 #include "../h/map.h"
11 #include "../h/uio.h"
12 
13 struct	buf rswbuf;
14 /*
15  * Indirect driver for multi-controller paging.
16  */
17 swstrategy(bp)
18 	register struct buf *bp;
19 {
20 	int sz, off, seg;
21 	dev_t dev;
22 
23 #ifdef GENERIC
24 	/*
25 	 * A mini-root gets copied into the front of the swap
26 	 * and we run over top of the swap area just long
27 	 * enough for us to do a mkfs and restor of the real
28 	 * root (sure beats rewriting standalone restor).
29 	 */
30 #define	MINIROOTSIZE	2048
31 	if (rootdev == dumpdev)
32 		bp->b_blkno += MINIROOTSIZE;
33 #endif
34 	sz = (bp->b_bcount+511)/512;
35 	off = bp->b_blkno % DMMAX;
36 	if (bp->b_blkno+sz > nswap || off+sz > DMMAX) {
37 		bp->b_flags |= B_ERROR;
38 		iodone(bp);
39 		return;
40 	}
41 	seg = bp->b_blkno / DMMAX;
42 	dev = swdevt[seg % nswdev].sw_dev;
43 	seg /= nswdev;
44 	bp->b_blkno = seg*DMMAX + off;
45 	bp->b_dev = dev;
46 	if (dev == 0)
47 		panic("swstrategy");
48 	(*bdevsw[major(dev)].d_strategy)(bp);
49 }
50 
51 swread(dev, uio)
52 	dev_t dev;
53 	struct uio *uio;
54 {
55 
56 	physio(swstrategy, &rswbuf, dev, B_READ, minphys, uio);
57 }
58 
59 swwrite(dev, uio)
60 	dev_t dev;
61 	struct uio *uio;
62 {
63 
64 	physio(swstrategy, &rswbuf, dev, B_WRITE, minphys, uio);
65 }
66 
67 /*
68  * System call swapon(name) enables swapping on device name,
69  * which must be in the swdevsw.  Return EBUSY
70  * if already swapping on this device.
71  */
72 oswapon()
73 {
74 	register struct inode *ip;
75 	dev_t dev;
76 	register struct swdevt *sp;
77 
78 	ip = namei(uchar, 0, 1);
79 	if (ip == NULL)
80 		return;
81 	if ((ip->i_mode&IFMT) != IFBLK) {
82 		u.u_error = ENOTBLK;
83 		iput(ip);
84 		return;
85 	}
86 	dev = (dev_t)ip->i_rdev;
87 	iput(ip);
88 	if (major(dev) >= nblkdev) {
89 		u.u_error = ENXIO;
90 		return;
91 	}
92 	/*
93 	 * Search starting at second table entry,
94 	 * since first (primary swap area) is freed at boot.
95 	 */
96 	for (sp = &swdevt[1]; sp->sw_dev; sp++)
97 		if (sp->sw_dev == dev) {
98 			if (sp->sw_freed) {
99 				u.u_error = EBUSY;
100 				return;
101 			}
102 			swfree(sp - swdevt);
103 			return;
104 		}
105 	u.u_error = ENODEV;
106 }
107 
108 /*
109  * Swfree(index) frees the index'th portion of the swap map.
110  * Each of the nswdev devices provides 1/nswdev'th of the swap
111  * space, which is laid out with blocks of DMMAX pages circularly
112  * among the devices.
113  */
114 swfree(index)
115 	int index;
116 {
117 	register swblk_t vsbase;
118 	register long blk;
119 
120 	swdevt[index].sw_freed = 1;
121 	for (vsbase = index*DMMAX; vsbase < nswap; vsbase += nswdev*DMMAX) {
122 		blk = nswap - vsbase;
123 		if (blk > DMMAX)
124 			blk = DMMAX;
125 		if (vsbase == 0) {
126 			/*
127 			 * Can't free a block starting at 0 in the swapmap
128 			 * but need some space for argmap so use 1/2 this
129 			 * hunk which needs special treatment anyways.
130 			 */
131 			argdev = swdevt[0].sw_dev;
132 			rminit(argmap, blk/2-CLSIZE, CLSIZE,
133 			    "argmap", ARGMAPSIZE);
134 			/*
135 			 * First of all chunks... initialize the swapmap
136 			 * the second half of the hunk.
137 			 */
138 			rminit(swapmap, blk/2, blk/2, "swap", nswapmap);
139 		} else
140 			rmfree(swapmap, blk, vsbase);
141 	}
142 }
143