xref: /csrg-svn/sys/miscfs/specfs/spec_vnops.c (revision 41400)
1 /*
2  * Copyright (c) 1989 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms are permitted
6  * provided that the above copyright notice and this paragraph are
7  * duplicated in all such forms and that any documentation,
8  * advertising materials, and other materials related to such
9  * distribution and use acknowledge that the software was developed
10  * by the University of California, Berkeley.  The name of the
11  * University may not be used to endorse or promote products derived
12  * from this software without specific prior written permission.
13  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
14  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
15  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
16  *
17  *	@(#)spec_vnops.c	7.26 (Berkeley) 05/04/90
18  */
19 
20 #include "param.h"
21 #include "systm.h"
22 #include "user.h"
23 #include "kernel.h"
24 #include "conf.h"
25 #include "buf.h"
26 #include "mount.h"
27 #include "vnode.h"
28 #include "specdev.h"
29 #include "stat.h"
30 #include "errno.h"
31 #include "ioctl.h"
32 #include "file.h"
33 #include "disklabel.h"
34 
35 /* symbolic sleep message strings for devices */
36 char	devopn[] = "devopn";
37 char	devio[] = "devio";
38 char	devwait[] = "devwait";
39 char	devin[] = "devin";
40 char	devout[] = "devout";
41 char	devioc[] = "devioc";
42 char	devcls[] = "devcls";
43 
44 int	spec_lookup(),
45 	spec_open(),
46 	spec_read(),
47 	spec_write(),
48 	spec_strategy(),
49 	spec_bmap(),
50 	spec_ioctl(),
51 	spec_select(),
52 	spec_lock(),
53 	spec_unlock(),
54 	spec_close(),
55 	spec_print(),
56 	spec_ebadf(),
57 	spec_badop(),
58 	spec_nullop();
59 
60 struct vnodeops spec_vnodeops = {
61 	spec_lookup,		/* lookup */
62 	spec_badop,		/* create */
63 	spec_badop,		/* mknod */
64 	spec_open,		/* open */
65 	spec_close,		/* close */
66 	spec_ebadf,		/* access */
67 	spec_ebadf,		/* getattr */
68 	spec_ebadf,		/* setattr */
69 	spec_read,		/* read */
70 	spec_write,		/* write */
71 	spec_ioctl,		/* ioctl */
72 	spec_select,		/* select */
73 	spec_badop,		/* mmap */
74 	spec_nullop,		/* fsync */
75 	spec_badop,		/* seek */
76 	spec_badop,		/* remove */
77 	spec_badop,		/* link */
78 	spec_badop,		/* rename */
79 	spec_badop,		/* mkdir */
80 	spec_badop,		/* rmdir */
81 	spec_badop,		/* symlink */
82 	spec_badop,		/* readdir */
83 	spec_badop,		/* readlink */
84 	spec_badop,		/* abortop */
85 	spec_nullop,		/* inactive */
86 	spec_nullop,		/* reclaim */
87 	spec_lock,		/* lock */
88 	spec_unlock,		/* unlock */
89 	spec_bmap,		/* bmap */
90 	spec_strategy,		/* strategy */
91 	spec_print,		/* print */
92 	spec_nullop,		/* islocked */
93 };
94 
95 /*
96  * Trivial lookup routine that always fails.
97  */
98 spec_lookup(vp, ndp)
99 	struct vnode *vp;
100 	struct nameidata *ndp;
101 {
102 
103 	ndp->ni_dvp = vp;
104 	ndp->ni_vp = NULL;
105 	return (ENOTDIR);
106 }
107 
108 /*
109  * Open called to allow handler
110  * of special files to initialize and
111  * validate before actual IO.
112  */
113 /* ARGSUSED */
114 spec_open(vp, mode, cred)
115 	register struct vnode *vp;
116 	int mode;
117 	struct ucred *cred;
118 {
119 	dev_t dev = (dev_t)vp->v_rdev;
120 	register int maj = major(dev);
121 	int error;
122 
123 	if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_NODEV))
124 		return (ENXIO);
125 
126 	switch (vp->v_type) {
127 
128 	case VCHR:
129 		if ((u_int)maj >= nchrdev)
130 			return (ENXIO);
131 		return ((*cdevsw[maj].d_open)(dev, mode, S_IFCHR));
132 
133 	case VBLK:
134 		if ((u_int)maj >= nblkdev)
135 			return (ENXIO);
136 		if (error = mountedon(vp))
137 			return (error);
138 		return ((*bdevsw[maj].d_open)(dev, mode, S_IFBLK));
139 	}
140 	return (0);
141 }
142 
143 /*
144  * Vnode op for read
145  */
146 spec_read(vp, uio, ioflag, cred)
147 	register struct vnode *vp;
148 	register struct uio *uio;
149 	int ioflag;
150 	struct ucred *cred;
151 {
152 	struct buf *bp;
153 	daddr_t bn;
154 	long bsize, bscale;
155 	struct partinfo dpart;
156 	register int n, on;
157 	int error = 0;
158 	extern int mem_no;
159 
160 	if (uio->uio_rw != UIO_READ)
161 		panic("spec_read mode");
162 	if (uio->uio_resid == 0)
163 		return (0);
164 
165 	switch (vp->v_type) {
166 
167 	case VCHR:
168 		/*
169 		 * Negative offsets allowed only for /dev/kmem
170 		 */
171 		if (uio->uio_offset < 0 && major(vp->v_rdev) != mem_no)
172 			return (EINVAL);
173 		VOP_UNLOCK(vp);
174 		error = (*cdevsw[major(vp->v_rdev)].d_read)
175 			(vp->v_rdev, uio, ioflag);
176 		VOP_LOCK(vp);
177 		return (error);
178 
179 	case VBLK:
180 		if (uio->uio_offset < 0)
181 			return (EINVAL);
182 		bsize = BLKDEV_IOSIZE;
183 		if ((*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev, DIOCGPART,
184 		    (caddr_t)&dpart, FREAD) == 0) {
185 			if (dpart.part->p_fstype == FS_BSDFFS &&
186 			    dpart.part->p_frag != 0 && dpart.part->p_fsize != 0)
187 				bsize = dpart.part->p_frag *
188 				    dpart.part->p_fsize;
189 		}
190 		bscale = bsize / DEV_BSIZE;
191 		do {
192 			bn = (uio->uio_offset / DEV_BSIZE) &~ (bscale - 1);
193 			on = uio->uio_offset % bsize;
194 			n = MIN((unsigned)(bsize - on), uio->uio_resid);
195 			if (vp->v_lastr + bscale == bn)
196 				error = breada(vp, bn, (int)bsize, bn + bscale,
197 					(int)bsize, NOCRED, &bp);
198 			else
199 				error = bread(vp, bn, (int)bsize, NOCRED, &bp);
200 			vp->v_lastr = bn;
201 			n = MIN(n, bsize - bp->b_resid);
202 			if (error) {
203 				brelse(bp);
204 				return (error);
205 			}
206 			error = uiomove(bp->b_un.b_addr + on, n, uio);
207 			if (n + on == bsize)
208 				bp->b_flags |= B_AGE;
209 			brelse(bp);
210 		} while (error == 0 && uio->uio_resid > 0 && n != 0);
211 		return (error);
212 
213 	default:
214 		panic("spec_read type");
215 	}
216 	/* NOTREACHED */
217 }
218 
219 /*
220  * Vnode op for write
221  */
222 spec_write(vp, uio, ioflag, cred)
223 	register struct vnode *vp;
224 	register struct uio *uio;
225 	int ioflag;
226 	struct ucred *cred;
227 {
228 	struct buf *bp;
229 	daddr_t bn;
230 	int bsize, blkmask;
231 	struct partinfo dpart;
232 	register int n, on, i;
233 	int count, error = 0;
234 	extern int mem_no;
235 
236 	if (uio->uio_rw != UIO_WRITE)
237 		panic("spec_write mode");
238 
239 	switch (vp->v_type) {
240 
241 	case VCHR:
242 		/*
243 		 * Negative offsets allowed only for /dev/kmem
244 		 */
245 		if (uio->uio_offset < 0 && major(vp->v_rdev) != mem_no)
246 			return (EINVAL);
247 		VOP_UNLOCK(vp);
248 		error = (*cdevsw[major(vp->v_rdev)].d_write)
249 			(vp->v_rdev, uio, ioflag);
250 		VOP_LOCK(vp);
251 		return (error);
252 
253 	case VBLK:
254 		if (uio->uio_resid == 0)
255 			return (0);
256 		if (uio->uio_offset < 0)
257 			return (EINVAL);
258 		bsize = BLKDEV_IOSIZE;
259 		if ((*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev, DIOCGPART,
260 		    (caddr_t)&dpart, FREAD) == 0) {
261 			if (dpart.part->p_fstype == FS_BSDFFS &&
262 			    dpart.part->p_frag != 0 && dpart.part->p_fsize != 0)
263 				bsize = dpart.part->p_frag *
264 				    dpart.part->p_fsize;
265 		}
266 		blkmask = (bsize / DEV_BSIZE) - 1;
267 		do {
268 			bn = (uio->uio_offset / DEV_BSIZE) &~ blkmask;
269 			on = uio->uio_offset % bsize;
270 			n = MIN((unsigned)(bsize - on), uio->uio_resid);
271 			count = howmany(bsize, CLBYTES);
272 			for (i = 0; i < count; i++)
273 				munhash(vp, bn + i * (CLBYTES / DEV_BSIZE));
274 			if (n == bsize)
275 				bp = getblk(vp, bn, bsize);
276 			else
277 				error = bread(vp, bn, bsize, NOCRED, &bp);
278 			n = MIN(n, bsize - bp->b_resid);
279 			if (error) {
280 				brelse(bp);
281 				return (error);
282 			}
283 			error = uiomove(bp->b_un.b_addr + on, n, uio);
284 			if (n + on == bsize) {
285 				bp->b_flags |= B_AGE;
286 				bawrite(bp);
287 			} else
288 				bdwrite(bp);
289 		} while (error == 0 && uio->uio_resid > 0 && n != 0);
290 		return (error);
291 
292 	default:
293 		panic("spec_write type");
294 	}
295 	/* NOTREACHED */
296 }
297 
298 /*
299  * Device ioctl operation.
300  */
301 /* ARGSUSED */
302 spec_ioctl(vp, com, data, fflag, cred)
303 	struct vnode *vp;
304 	int com;
305 	caddr_t data;
306 	int fflag;
307 	struct ucred *cred;
308 {
309 	dev_t dev = vp->v_rdev;
310 
311 	switch (vp->v_type) {
312 
313 	case VCHR:
314 		return ((*cdevsw[major(dev)].d_ioctl)(dev, com, data, fflag));
315 
316 	case VBLK:
317 		if (com == 0 && (int)data == B_TAPE)
318 			if (bdevsw[major(dev)].d_flags & B_TAPE)
319 				return (0);
320 			else
321 				return (1);
322 		return ((*bdevsw[major(dev)].d_ioctl)(dev, com, data, fflag));
323 
324 	default:
325 		panic("spec_ioctl");
326 		/* NOTREACHED */
327 	}
328 }
329 
330 /* ARGSUSED */
331 spec_select(vp, which, fflags, cred)
332 	struct vnode *vp;
333 	int which, fflags;
334 	struct ucred *cred;
335 {
336 	register dev_t dev;
337 
338 	switch (vp->v_type) {
339 
340 	default:
341 		return (1);		/* XXX */
342 
343 	case VCHR:
344 		dev = vp->v_rdev;
345 		return (*cdevsw[major(dev)].d_select)(dev, which);
346 	}
347 }
348 
349 /*
350  * Just call the device strategy routine
351  */
352 spec_strategy(bp)
353 	register struct buf *bp;
354 {
355 
356 	(*bdevsw[major(bp->b_dev)].d_strategy)(bp);
357 	return (0);
358 }
359 
360 /*
361  * This is a noop, simply returning what one has been given.
362  */
363 spec_bmap(vp, bn, vpp, bnp)
364 	struct vnode *vp;
365 	daddr_t bn;
366 	struct vnode **vpp;
367 	daddr_t *bnp;
368 {
369 
370 	if (vpp != NULL)
371 		*vpp = vp;
372 	if (bnp != NULL)
373 		*bnp = bn;
374 	return (0);
375 }
376 
377 /*
378  * At the moment we do not do any locking.
379  */
380 /* ARGSUSED */
381 spec_lock(vp)
382 	struct vnode *vp;
383 {
384 
385 	return (0);
386 }
387 
388 /* ARGSUSED */
389 spec_unlock(vp)
390 	struct vnode *vp;
391 {
392 
393 	return (0);
394 }
395 
396 /*
397  * Device close routine
398  */
399 /* ARGSUSED */
400 spec_close(vp, flag, cred)
401 	register struct vnode *vp;
402 	int flag;
403 	struct ucred *cred;
404 {
405 	dev_t dev = vp->v_rdev;
406 	int (*cfunc)();
407 	int mode;
408 
409 	switch (vp->v_type) {
410 
411 	case VCHR:
412 		/*
413 		 * If the vnode is locked, then we are in the midst
414 		 * of forcably closing the device, otherwise we only
415 		 * close on last reference.
416 		 */
417 		if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0)
418 			return (0);
419 		cfunc = cdevsw[major(dev)].d_close;
420 		mode = S_IFCHR;
421 		break;
422 
423 	case VBLK:
424 		/*
425 		 * On last close of a block device (that isn't mounted)
426 		 * we must invalidate any in core blocks, so that
427 		 * we can, for instance, change floppy disks.
428 		 */
429 		vflushbuf(vp, 0);
430 		if (vinvalbuf(vp, 1))
431 			return (0);
432 		/*
433 		 * We do not want to really close the device if it
434 		 * is still in use unless we are trying to close it
435 		 * forcibly. Since every use (buffer, vnode, swap, cmap)
436 		 * holds a reference to the vnode, and because we mark
437 		 * any other vnodes that alias this device, when the
438 		 * sum of the reference counts on all the aliased
439 		 * vnodes descends to one, we are on last close.
440 		 */
441 		if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0)
442 			return (0);
443 		cfunc = bdevsw[major(dev)].d_close;
444 		mode = S_IFBLK;
445 		break;
446 
447 	default:
448 		panic("spec_close: not special");
449 	}
450 
451 	return ((*cfunc)(dev, flag, mode));
452 }
453 
454 /*
455  * Print out the contents of a special device vnode.
456  */
457 spec_print(vp)
458 	struct vnode *vp;
459 {
460 
461 	printf("tag VT_NON, dev %d, %d\n", major(vp->v_rdev),
462 		minor(vp->v_rdev));
463 }
464 
465 /*
466  * Special device failed operation
467  */
468 spec_ebadf()
469 {
470 
471 	return (EBADF);
472 }
473 
474 /*
475  * Special device bad operation
476  */
477 spec_badop()
478 {
479 
480 	panic("spec_badop called");
481 	/* NOTREACHED */
482 }
483 
484 /*
485  * Special device null operation
486  */
487 spec_nullop()
488 {
489 
490 	return (0);
491 }
492