xref: /dflybsd-src/sys/kern/kern_physio.c (revision c30fd56d214a2d2bae562ca87776bc26df22beb3)
1 /*
2  * Copyright (c) 1994 John S. Dyson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice immediately at the beginning of the file, without modification,
10  *    this list of conditions, and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Absolutely no warranty of function or purpose is made by the author
15  *    John S. Dyson.
16  * 4. Modifications may be freely made to this file if the above conditions
17  *    are met.
18  *
19  * $FreeBSD: src/sys/kern/kern_physio.c,v 1.46.2.4 2003/11/14 09:51:47 simokawa Exp $
20  * $DragonFly: src/sys/kern/kern_physio.c,v 1.26 2008/08/10 20:03:14 dillon Exp $
21  */
22 
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/buf.h>
26 #include <sys/conf.h>
27 #include <sys/proc.h>
28 #include <sys/uio.h>
29 #include <sys/device.h>
30 #include <sys/thread2.h>
31 
32 #include <vm/vm.h>
33 #include <vm/vm_extern.h>
34 
35 static void
36 physwakeup(struct bio *bio)
37 {
38 	bio->bio_buf->b_cmd = BUF_CMD_DONE;
39 	wakeup(bio);
40 }
41 
42 static int
43 physio(cdev_t dev, struct uio *uio, int ioflag)
44 {
45 	int i;
46 	int error;
47 	int chk_blockno;
48 	int saflags;
49 	int iolen;
50 	int bcount;
51 	int bounceit;
52 	caddr_t ubase;
53 	struct buf *bp;
54 
55 	bp = getpbuf(NULL);
56 	saflags = bp->b_flags;
57 	error = 0;
58 
59 	/* XXX: sanity check */
60 	if (dev->si_iosize_max < PAGE_SIZE) {
61 		kprintf("WARNING: %s si_iosize_max=%d, using DFLTPHYS.\n",
62 		    devtoname(dev), dev->si_iosize_max);
63 		dev->si_iosize_max = DFLTPHYS;
64 	}
65 
66 	/* Must be a real uio */
67 	KKASSERT(uio->uio_segflg != UIO_NOCOPY);
68 
69 	/* Don't check block number overflow for D_MEM */
70 	if ((dev_dflags(dev) & D_TYPEMASK) == D_MEM)
71 		chk_blockno = 0;
72 	else
73 		chk_blockno = 1;
74 
75 	for (i = 0; i < uio->uio_iovcnt; i++) {
76 		while (uio->uio_iov[i].iov_len) {
77 			if (uio->uio_rw == UIO_READ)
78 				bp->b_cmd = BUF_CMD_READ;
79 			else
80 				bp->b_cmd = BUF_CMD_WRITE;
81 			bp->b_flags = saflags;
82 			bcount = uio->uio_iov[i].iov_len;
83 
84 			reinitbufbio(bp);	/* clear translation cache */
85 			bp->b_bio1.bio_offset = uio->uio_offset;
86 			bp->b_bio1.bio_done = physwakeup;
87 
88 			/*
89 			 * Setup for mapping the request into kernel memory.
90 			 *
91 			 * We can only write as much as fits in a pbuf,
92 			 * which is MAXPHYS, and no larger then the device's
93 			 * ability.
94 			 *
95 			 * If not using bounce pages the base address of the
96 			 * user mapping into the pbuf may be offset, further
97 			 * reducing how much will actually fit in the pbuf.
98 			 */
99 			if (bcount > dev->si_iosize_max)
100 				bcount = dev->si_iosize_max;
101 
102 			ubase = uio->uio_iov[i].iov_base;
103 			bounceit = (int)(((vm_offset_t)ubase) & 15);
104 			iolen = ((vm_offset_t)ubase) & PAGE_MASK;
105 			if (bounceit) {
106 				if (bcount > bp->b_kvasize)
107 					bcount = bp->b_kvasize;
108 			} else {
109 				if ((bcount + iolen) > bp->b_kvasize) {
110 					bcount = bp->b_kvasize;
111 					if (iolen != 0)
112 						bcount -= PAGE_SIZE;
113 				}
114 			}
115 
116 			/*
117 			 * If we have to use a bounce buffer allocate kernel
118 			 * memory and copyin/copyout.  Otherwise map the
119 			 * user buffer directly into kernel memory without
120 			 * copying.
121 			 */
122 			if (uio->uio_segflg == UIO_USERSPACE) {
123 				if (bounceit) {
124 					bp->b_data = bp->b_kvabase;
125 					bp->b_bcount = bcount;
126 					vm_hold_load_pages(bp, (vm_offset_t)bp->b_data, (vm_offset_t)bp->b_data + bcount);
127 					if (uio->uio_rw == UIO_WRITE) {
128 						error = copyin(ubase, bp->b_data, bcount);
129 						if (error) {
130 							vm_hold_free_pages(bp, (vm_offset_t)bp->b_data, (vm_offset_t)bp->b_data + bcount);
131 							goto doerror;
132 						}
133 					}
134 				} else if (vmapbuf(bp, ubase, bcount) < 0) {
135 					error = EFAULT;
136 					goto doerror;
137 				}
138 			} else {
139 				bp->b_data = uio->uio_iov[i].iov_base;
140 				bp->b_bcount = bcount;
141 			}
142 			dev_dstrategy(dev, &bp->b_bio1);
143 			crit_enter();
144 			while (bp->b_cmd != BUF_CMD_DONE)
145 				tsleep(&bp->b_bio1, 0, "physstr", 0);
146 			crit_exit();
147 
148 			iolen = bp->b_bcount - bp->b_resid;
149 			if (uio->uio_segflg == UIO_USERSPACE) {
150 				if (bounceit) {
151 					if (uio->uio_rw == UIO_READ && iolen) {
152 						error = copyout(bp->b_data, ubase, iolen);
153 						if (error) {
154 							bp->b_flags |= B_ERROR;
155 							bp->b_error = error;
156 						}
157 					}
158 					vm_hold_free_pages(bp, (vm_offset_t)bp->b_data, (vm_offset_t)bp->b_data + bcount);
159 				} else {
160 					vunmapbuf(bp);
161 				}
162 			}
163 			if (iolen == 0 && !(bp->b_flags & B_ERROR))
164 				goto doerror;	/* EOF */
165 			uio->uio_iov[i].iov_len -= iolen;
166 			uio->uio_iov[i].iov_base += iolen;
167 			uio->uio_resid -= iolen;
168 			uio->uio_offset += iolen;
169 			if (bp->b_flags & B_ERROR) {
170 				error = bp->b_error;
171 				goto doerror;
172 			}
173 		}
174 	}
175 doerror:
176 	relpbuf(bp, NULL);
177 	return (error);
178 }
179 
180 int
181 physread(struct dev_read_args *ap)
182 {
183 	return(physio(ap->a_head.a_dev, ap->a_uio, ap->a_ioflag));
184 }
185 
186 int
187 physwrite(struct dev_write_args *ap)
188 {
189 	return(physio(ap->a_head.a_dev, ap->a_uio, ap->a_ioflag));
190 }
191 
192