xref: /netbsd-src/sys/dev/cgd.c (revision 53d1339bf7f9c7367b35a9e1ebe693f9b047a47b)
1 /* $NetBSD: cgd.c,v 1.139 2020/08/01 02:15:49 riastradh Exp $ */
2 
3 /*-
4  * Copyright (c) 2002 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Roland C. Dowdeswell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: cgd.c,v 1.139 2020/08/01 02:15:49 riastradh Exp $");
34 
35 #include <sys/types.h>
36 #include <sys/param.h>
37 #include <sys/buf.h>
38 #include <sys/bufq.h>
39 #include <sys/conf.h>
40 #include <sys/cpu.h>
41 #include <sys/device.h>
42 #include <sys/disk.h>
43 #include <sys/disklabel.h>
44 #include <sys/errno.h>
45 #include <sys/fcntl.h>
46 #include <sys/ioctl.h>
47 #include <sys/kmem.h>
48 #include <sys/module.h>
49 #include <sys/namei.h> /* for pathbuf */
50 #include <sys/pool.h>
51 #include <sys/proc.h>
52 #include <sys/syslog.h>
53 #include <sys/systm.h>
54 #include <sys/vnode.h>
55 #include <sys/workqueue.h>
56 
57 #include <dev/cgd_crypto.h>
58 #include <dev/cgdvar.h>
59 #include <dev/dkvar.h>
60 
61 #include <miscfs/specfs/specdev.h> /* for v_rdev */
62 
63 #include "ioconf.h"
64 
65 struct selftest_params {
66 	const char *alg;
67 	int encblkno8;
68 	int blocksize;	/* number of bytes */
69 	int secsize;
70 	daddr_t blkno;
71 	int keylen;	/* number of bits */
72 	int txtlen;	/* number of bytes */
73 	const uint8_t *key;
74 	const uint8_t *ptxt;
75 	const uint8_t *ctxt;
76 };
77 
78 /* Entry Point Functions */
79 
80 static dev_type_open(cgdopen);
81 static dev_type_close(cgdclose);
82 static dev_type_read(cgdread);
83 static dev_type_write(cgdwrite);
84 static dev_type_ioctl(cgdioctl);
85 static dev_type_strategy(cgdstrategy);
86 static dev_type_dump(cgddump);
87 static dev_type_size(cgdsize);
88 
89 const struct bdevsw cgd_bdevsw = {
90 	.d_open = cgdopen,
91 	.d_close = cgdclose,
92 	.d_strategy = cgdstrategy,
93 	.d_ioctl = cgdioctl,
94 	.d_dump = cgddump,
95 	.d_psize = cgdsize,
96 	.d_discard = nodiscard,
97 	.d_flag = D_DISK | D_MPSAFE
98 };
99 
100 const struct cdevsw cgd_cdevsw = {
101 	.d_open = cgdopen,
102 	.d_close = cgdclose,
103 	.d_read = cgdread,
104 	.d_write = cgdwrite,
105 	.d_ioctl = cgdioctl,
106 	.d_stop = nostop,
107 	.d_tty = notty,
108 	.d_poll = nopoll,
109 	.d_mmap = nommap,
110 	.d_kqfilter = nokqfilter,
111 	.d_discard = nodiscard,
112 	.d_flag = D_DISK | D_MPSAFE
113 };
114 
115 /*
116  * Vector 5 from IEEE 1619/D16 truncated to 64 bytes, blkno 1.
117  */
118 static const uint8_t selftest_aes_xts_256_ptxt[64] = {
119 	0x27, 0xa7, 0x47, 0x9b, 0xef, 0xa1, 0xd4, 0x76,
120 	0x48, 0x9f, 0x30, 0x8c, 0xd4, 0xcf, 0xa6, 0xe2,
121 	0xa9, 0x6e, 0x4b, 0xbe, 0x32, 0x08, 0xff, 0x25,
122 	0x28, 0x7d, 0xd3, 0x81, 0x96, 0x16, 0xe8, 0x9c,
123 	0xc7, 0x8c, 0xf7, 0xf5, 0xe5, 0x43, 0x44, 0x5f,
124 	0x83, 0x33, 0xd8, 0xfa, 0x7f, 0x56, 0x00, 0x00,
125 	0x05, 0x27, 0x9f, 0xa5, 0xd8, 0xb5, 0xe4, 0xad,
126 	0x40, 0xe7, 0x36, 0xdd, 0xb4, 0xd3, 0x54, 0x12,
127 };
128 
129 static const uint8_t selftest_aes_xts_256_ctxt[512] = {
130 	0x26, 0x4d, 0x3c, 0xa8, 0x51, 0x21, 0x94, 0xfe,
131 	0xc3, 0x12, 0xc8, 0xc9, 0x89, 0x1f, 0x27, 0x9f,
132 	0xef, 0xdd, 0x60, 0x8d, 0x0c, 0x02, 0x7b, 0x60,
133 	0x48, 0x3a, 0x3f, 0xa8, 0x11, 0xd6, 0x5e, 0xe5,
134 	0x9d, 0x52, 0xd9, 0xe4, 0x0e, 0xc5, 0x67, 0x2d,
135 	0x81, 0x53, 0x2b, 0x38, 0xb6, 0xb0, 0x89, 0xce,
136 	0x95, 0x1f, 0x0f, 0x9c, 0x35, 0x59, 0x0b, 0x8b,
137 	0x97, 0x8d, 0x17, 0x52, 0x13, 0xf3, 0x29, 0xbb,
138 };
139 
140 static const uint8_t selftest_aes_xts_256_key[33] = {
141 	0x27, 0x18, 0x28, 0x18, 0x28, 0x45, 0x90, 0x45,
142 	0x23, 0x53, 0x60, 0x28, 0x74, 0x71, 0x35, 0x26,
143 	0x31, 0x41, 0x59, 0x26, 0x53, 0x58, 0x97, 0x93,
144 	0x23, 0x84, 0x62, 0x64, 0x33, 0x83, 0x27, 0x95,
145 	0
146 };
147 
148 /*
149  * Vector 11 from IEEE 1619/D16 truncated to 64 bytes, blkno 0xffff.
150  */
151 static const uint8_t selftest_aes_xts_512_ptxt[64] = {
152 	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
153 	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
154 	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
155 	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
156 	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
157 	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
158 	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
159 	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
160 };
161 
162 static const uint8_t selftest_aes_xts_512_ctxt[64] = {
163 	0x77, 0xa3, 0x12, 0x51, 0x61, 0x8a, 0x15, 0xe6,
164 	0xb9, 0x2d, 0x1d, 0x66, 0xdf, 0xfe, 0x7b, 0x50,
165 	0xb5, 0x0b, 0xad, 0x55, 0x23, 0x05, 0xba, 0x02,
166 	0x17, 0xa6, 0x10, 0x68, 0x8e, 0xff, 0x7e, 0x11,
167 	0xe1, 0xd0, 0x22, 0x54, 0x38, 0xe0, 0x93, 0x24,
168 	0x2d, 0x6d, 0xb2, 0x74, 0xfd, 0xe8, 0x01, 0xd4,
169 	0xca, 0xe0, 0x6f, 0x20, 0x92, 0xc7, 0x28, 0xb2,
170 	0x47, 0x85, 0x59, 0xdf, 0x58, 0xe8, 0x37, 0xc2,
171 };
172 
173 static const uint8_t selftest_aes_xts_512_key[65] = {
174 	0x27, 0x18, 0x28, 0x18, 0x28, 0x45, 0x90, 0x45,
175 	0x23, 0x53, 0x60, 0x28, 0x74, 0x71, 0x35, 0x26,
176 	0x62, 0x49, 0x77, 0x57, 0x24, 0x70, 0x93, 0x69,
177 	0x99, 0x59, 0x57, 0x49, 0x66, 0x96, 0x76, 0x27,
178 	0x31, 0x41, 0x59, 0x26, 0x53, 0x58, 0x97, 0x93,
179 	0x23, 0x84, 0x62, 0x64, 0x33, 0x83, 0x27, 0x95,
180 	0x02, 0x88, 0x41, 0x97, 0x16, 0x93, 0x99, 0x37,
181 	0x51, 0x05, 0x82, 0x09, 0x74, 0x94, 0x45, 0x92,
182 	0
183 };
184 
185 static const uint8_t selftest_aes_cbc_key[32] = {
186 	0x27, 0x18, 0x28, 0x18, 0x28, 0x45, 0x90, 0x45,
187 	0x23, 0x53, 0x60, 0x28, 0x74, 0x71, 0x35, 0x26,
188 	0x62, 0x49, 0x77, 0x57, 0x24, 0x70, 0x93, 0x69,
189 	0x99, 0x59, 0x57, 0x49, 0x66, 0x96, 0x76, 0x27,
190 };
191 
192 static const uint8_t selftest_aes_cbc_128_ptxt[64] = {
193 	0x27, 0xa7, 0x47, 0x9b, 0xef, 0xa1, 0xd4, 0x76,
194 	0x48, 0x9f, 0x30, 0x8c, 0xd4, 0xcf, 0xa6, 0xe2,
195 	0xa9, 0x6e, 0x4b, 0xbe, 0x32, 0x08, 0xff, 0x25,
196 	0x28, 0x7d, 0xd3, 0x81, 0x96, 0x16, 0xe8, 0x9c,
197 	0xc7, 0x8c, 0xf7, 0xf5, 0xe5, 0x43, 0x44, 0x5f,
198 	0x83, 0x33, 0xd8, 0xfa, 0x7f, 0x56, 0x00, 0x00,
199 	0x05, 0x27, 0x9f, 0xa5, 0xd8, 0xb5, 0xe4, 0xad,
200 	0x40, 0xe7, 0x36, 0xdd, 0xb4, 0xd3, 0x54, 0x12,
201 };
202 
203 static const uint8_t selftest_aes_cbc_128_ctxt[64] = { /* blkno=1 */
204 	0x93, 0x94, 0x56, 0x36, 0x83, 0xbc, 0xff, 0xa4,
205 	0xe0, 0x24, 0x34, 0x12, 0xbe, 0xfa, 0xb0, 0x7d,
206 	0x88, 0x1e, 0xc5, 0x57, 0x55, 0x23, 0x05, 0x0c,
207 	0x69, 0xa5, 0xc1, 0xda, 0x64, 0xee, 0x74, 0x10,
208 	0xc2, 0xc5, 0xe6, 0x66, 0xd6, 0xa7, 0x49, 0x1c,
209 	0x9d, 0x40, 0xb5, 0x0c, 0x9b, 0x6e, 0x1c, 0xe6,
210 	0xb1, 0x7a, 0x1c, 0xe7, 0x5a, 0xfe, 0xf9, 0x2a,
211 	0x78, 0xfa, 0xb7, 0x7b, 0x08, 0xdf, 0x8e, 0x51,
212 };
213 
214 static const uint8_t selftest_aes_cbc_256_ptxt[64] = {
215 	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
216 	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
217 	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
218 	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
219 	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
220 	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
221 	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
222 	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
223 };
224 
225 static const uint8_t selftest_aes_cbc_256_ctxt[64] = { /* blkno=0xffff */
226 	0x6c, 0xa3, 0x15, 0x17, 0x51, 0x90, 0xe9, 0x69,
227 	0x08, 0x36, 0x7b, 0xa6, 0xbb, 0xd1, 0x0b, 0x9e,
228 	0xcd, 0x6b, 0x1e, 0xaf, 0xb6, 0x2e, 0x62, 0x7d,
229 	0x8e, 0xde, 0xf0, 0xed, 0x0d, 0x44, 0xe7, 0x31,
230 	0x26, 0xcf, 0xd5, 0x0b, 0x3e, 0x95, 0x59, 0x89,
231 	0xdf, 0x5d, 0xd6, 0x9a, 0x00, 0x66, 0xcc, 0x7f,
232 	0x45, 0xd3, 0x06, 0x58, 0xed, 0xef, 0x49, 0x47,
233 	0x87, 0x89, 0x17, 0x7d, 0x08, 0x56, 0x50, 0xe1,
234 };
235 
236 static const uint8_t selftest_3des_cbc_key[24] = {
237 	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
238 	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
239 	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
240 };
241 
242 static const uint8_t selftest_3des_cbc_ptxt[64] = {
243 	0x27, 0xa7, 0x47, 0x9b, 0xef, 0xa1, 0xd4, 0x76,
244 	0x48, 0x9f, 0x30, 0x8c, 0xd4, 0xcf, 0xa6, 0xe2,
245 	0xa9, 0x6e, 0x4b, 0xbe, 0x32, 0x08, 0xff, 0x25,
246 	0x28, 0x7d, 0xd3, 0x81, 0x96, 0x16, 0xe8, 0x9c,
247 	0xc7, 0x8c, 0xf7, 0xf5, 0xe5, 0x43, 0x44, 0x5f,
248 	0x83, 0x33, 0xd8, 0xfa, 0x7f, 0x56, 0x00, 0x00,
249 	0x05, 0x27, 0x9f, 0xa5, 0xd8, 0xb5, 0xe4, 0xad,
250 	0x40, 0xe7, 0x36, 0xdd, 0xb4, 0xd3, 0x54, 0x12,
251 };
252 
253 static const uint8_t selftest_3des_cbc_ctxt[64] = {
254 	0xa2, 0xfe, 0x81, 0xaa, 0x10, 0x6c, 0xea, 0xb9,
255 	0x11, 0x58, 0x1f, 0x29, 0xb5, 0x86, 0x71, 0x56,
256 	0xe9, 0x25, 0x1d, 0x07, 0xb1, 0x69, 0x59, 0x6c,
257 	0x96, 0x80, 0xf7, 0x54, 0x38, 0xaa, 0xa7, 0xe4,
258 	0xe8, 0x81, 0xf5, 0x00, 0xbb, 0x1c, 0x00, 0x3c,
259 	0xba, 0x38, 0x45, 0x97, 0x4c, 0xcf, 0x84, 0x14,
260 	0x46, 0x86, 0xd9, 0xf4, 0xc5, 0xe2, 0xf0, 0x54,
261 	0xde, 0x41, 0xf6, 0xa1, 0xef, 0x1b, 0x0a, 0xea,
262 };
263 
264 static const uint8_t selftest_bf_cbc_key[56] = {
265 	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
266 	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
267 	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
268 	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
269 	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
270 	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
271 	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
272 };
273 
274 static const uint8_t selftest_bf_cbc_ptxt[64] = {
275 	0x27, 0xa7, 0x47, 0x9b, 0xef, 0xa1, 0xd4, 0x76,
276 	0x48, 0x9f, 0x30, 0x8c, 0xd4, 0xcf, 0xa6, 0xe2,
277 	0xa9, 0x6e, 0x4b, 0xbe, 0x32, 0x08, 0xff, 0x25,
278 	0x28, 0x7d, 0xd3, 0x81, 0x96, 0x16, 0xe8, 0x9c,
279 	0xc7, 0x8c, 0xf7, 0xf5, 0xe5, 0x43, 0x44, 0x5f,
280 	0x83, 0x33, 0xd8, 0xfa, 0x7f, 0x56, 0x00, 0x00,
281 	0x05, 0x27, 0x9f, 0xa5, 0xd8, 0xb5, 0xe4, 0xad,
282 	0x40, 0xe7, 0x36, 0xdd, 0xb4, 0xd3, 0x54, 0x12,
283 };
284 
285 static const uint8_t selftest_bf_cbc_ctxt[64] = {
286 	0xec, 0xa2, 0xc0, 0x0e, 0xa9, 0x7f, 0x04, 0x1e,
287 	0x2e, 0x4f, 0x64, 0x07, 0x67, 0x3e, 0xf4, 0x58,
288 	0x61, 0x5f, 0xd3, 0x50, 0x5e, 0xd3, 0x4d, 0x34,
289 	0xa0, 0x53, 0xbe, 0x47, 0x75, 0x69, 0x3b, 0x1f,
290 	0x86, 0xf2, 0xae, 0x8b, 0xb7, 0x91, 0xda, 0xd4,
291 	0x2b, 0xa5, 0x47, 0x9b, 0x7d, 0x13, 0x30, 0xdd,
292 	0x7b, 0xad, 0x86, 0x57, 0x51, 0x11, 0x74, 0x42,
293 	0xb8, 0xbf, 0x69, 0x17, 0x20, 0x0a, 0xf7, 0xda,
294 };
295 
296 static const uint8_t selftest_aes_cbc_encblkno8_zero64[64];
297 static const uint8_t selftest_aes_cbc_encblkno8_ctxt[64] = {
298 	0xa2, 0x06, 0x26, 0x26, 0xac, 0xdc, 0xe7, 0xcf,
299 	0x47, 0x68, 0x24, 0x0e, 0xfa, 0x40, 0x44, 0x83,
300 	0x07, 0xe1, 0xf4, 0x5d, 0x53, 0x47, 0xa0, 0xfe,
301 	0xc0, 0x6e, 0x4e, 0xf8, 0x9d, 0x98, 0x63, 0xb8,
302 	0x2c, 0x27, 0xfa, 0x3a, 0xd5, 0x40, 0xda, 0xdb,
303 	0xe6, 0xc3, 0xe4, 0xfb, 0x85, 0x53, 0xfb, 0x78,
304 	0x5d, 0xbd, 0x8f, 0x4c, 0x1a, 0x04, 0x9c, 0x88,
305 	0x85, 0xec, 0x3c, 0x56, 0x46, 0x1a, 0x6e, 0xf5,
306 };
307 
308 const struct selftest_params selftests[] = {
309 	{
310 		.alg = "aes-xts",
311 		.blocksize = 16,
312 		.secsize = 512,
313 		.blkno = 1,
314 		.keylen = 256,
315 		.txtlen = sizeof(selftest_aes_xts_256_ptxt),
316 		.key  = selftest_aes_xts_256_key,
317 		.ptxt = selftest_aes_xts_256_ptxt,
318 		.ctxt = selftest_aes_xts_256_ctxt
319 	},
320 	{
321 		.alg = "aes-xts",
322 		.blocksize = 16,
323 		.secsize = 512,
324 		.blkno = 0xffff,
325 		.keylen = 512,
326 		.txtlen = sizeof(selftest_aes_xts_512_ptxt),
327 		.key  = selftest_aes_xts_512_key,
328 		.ptxt = selftest_aes_xts_512_ptxt,
329 		.ctxt = selftest_aes_xts_512_ctxt
330 	},
331 	{
332 		.alg = "aes-cbc",
333 		.blocksize = 16,
334 		.secsize = 512,
335 		.blkno = 1,
336 		.keylen = 128,
337 		.txtlen = sizeof(selftest_aes_cbc_128_ptxt),
338 		.key  = selftest_aes_cbc_key,
339 		.ptxt = selftest_aes_cbc_128_ptxt,
340 		.ctxt = selftest_aes_cbc_128_ctxt,
341 	},
342 	{
343 		.alg = "aes-cbc",
344 		.blocksize = 16,
345 		.secsize = 512,
346 		.blkno = 0xffff,
347 		.keylen = 256,
348 		.txtlen = sizeof(selftest_aes_cbc_256_ptxt),
349 		.key  = selftest_aes_cbc_key,
350 		.ptxt = selftest_aes_cbc_256_ptxt,
351 		.ctxt = selftest_aes_cbc_256_ctxt,
352 	},
353 	{
354 		.alg = "3des-cbc",
355 		.blocksize = 8,
356 		.secsize = 512,
357 		.blkno = 1,
358 		.keylen = 192,	/* 168 + 3*8 parity bits */
359 		.txtlen = sizeof(selftest_3des_cbc_ptxt),
360 		.key  = selftest_3des_cbc_key,
361 		.ptxt = selftest_3des_cbc_ptxt,
362 		.ctxt = selftest_3des_cbc_ctxt,
363 	},
364 	{
365 		.alg = "blowfish-cbc",
366 		.blocksize = 8,
367 		.secsize = 512,
368 		.blkno = 1,
369 		.keylen = 448,
370 		.txtlen = sizeof(selftest_bf_cbc_ptxt),
371 		.key  = selftest_bf_cbc_key,
372 		.ptxt = selftest_bf_cbc_ptxt,
373 		.ctxt = selftest_bf_cbc_ctxt,
374 	},
375 	{
376 		.alg = "aes-cbc",
377 		.encblkno8 = 1,
378 		.blocksize = 16,
379 		.secsize = 512,
380 		.blkno = 0,
381 		.keylen = 128,
382 		.txtlen = sizeof(selftest_aes_cbc_encblkno8_zero64),
383 		.key = selftest_aes_cbc_encblkno8_zero64,
384 		.ptxt = selftest_aes_cbc_encblkno8_zero64,
385 		.ctxt = selftest_aes_cbc_encblkno8_ctxt,
386 	},
387 };
388 
389 static int cgd_match(device_t, cfdata_t, void *);
390 static void cgd_attach(device_t, device_t, void *);
391 static int cgd_detach(device_t, int);
392 static struct cgd_softc	*cgd_spawn(int);
393 static struct cgd_worker *cgd_create_one_worker(void);
394 static void cgd_destroy_one_worker(struct cgd_worker *);
395 static struct cgd_worker *cgd_create_worker(void);
396 static void cgd_destroy_worker(struct cgd_worker *);
397 static int cgd_destroy(device_t);
398 
399 /* Internal Functions */
400 
401 static int	cgd_diskstart(device_t, struct buf *);
402 static void	cgd_diskstart2(struct cgd_softc *, struct cgd_xfer *);
403 static void	cgdiodone(struct buf *);
404 static void	cgd_iodone2(struct cgd_softc *, struct cgd_xfer *);
405 static void	cgd_enqueue(struct cgd_softc *, struct cgd_xfer *);
406 static void	cgd_process(struct work *, void *);
407 static int	cgd_dumpblocks(device_t, void *, daddr_t, int);
408 
409 static int	cgd_ioctl_set(struct cgd_softc *, void *, struct lwp *);
410 static int	cgd_ioctl_clr(struct cgd_softc *, struct lwp *);
411 static int	cgd_ioctl_get(dev_t, void *, struct lwp *);
412 static int	cgdinit(struct cgd_softc *, const char *, struct vnode *,
413 			struct lwp *);
414 static void	cgd_cipher(struct cgd_softc *, void *, const void *,
415 			   size_t, daddr_t, size_t, int);
416 
417 static void	cgd_selftest(void);
418 
419 static const struct dkdriver cgddkdriver = {
420         .d_minphys  = minphys,
421         .d_open = cgdopen,
422         .d_close = cgdclose,
423         .d_strategy = cgdstrategy,
424         .d_iosize = NULL,
425         .d_diskstart = cgd_diskstart,
426         .d_dumpblocks = cgd_dumpblocks,
427         .d_lastclose = NULL
428 };
429 
430 CFATTACH_DECL3_NEW(cgd, sizeof(struct cgd_softc),
431     cgd_match, cgd_attach, cgd_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
432 
433 /* DIAGNOSTIC and DEBUG definitions */
434 
435 #if defined(CGDDEBUG) && !defined(DEBUG)
436 #define DEBUG
437 #endif
438 
439 #ifdef DEBUG
440 int cgddebug = 0;
441 
442 #define CGDB_FOLLOW	0x1
443 #define CGDB_IO	0x2
444 #define CGDB_CRYPTO	0x4
445 
446 #define IFDEBUG(x,y)		if (cgddebug & (x)) y
447 #define DPRINTF(x,y)		IFDEBUG(x, printf y)
448 #define DPRINTF_FOLLOW(y)	DPRINTF(CGDB_FOLLOW, y)
449 
450 static void	hexprint(const char *, void *, int);
451 
452 #else
453 #define IFDEBUG(x,y)
454 #define DPRINTF(x,y)
455 #define DPRINTF_FOLLOW(y)
456 #endif
457 
458 /* Global variables */
459 
460 static kmutex_t cgd_spawning_mtx;
461 static kcondvar_t cgd_spawning_cv;
462 static bool cgd_spawning;
463 static struct cgd_worker *cgd_worker;
464 static u_int cgd_refcnt;	/* number of users of cgd_worker */
465 
466 /* Utility Functions */
467 
468 #define CGDUNIT(x)		DISKUNIT(x)
469 
470 /* The code */
471 
472 static int
473 cgd_lock(bool intr)
474 {
475 	int error = 0;
476 
477 	mutex_enter(&cgd_spawning_mtx);
478 	while (cgd_spawning) {
479 		if (intr)
480 			error = cv_wait_sig(&cgd_spawning_cv, &cgd_spawning_mtx);
481 		else
482 			cv_wait(&cgd_spawning_cv, &cgd_spawning_mtx);
483 	}
484 	if (error == 0)
485 		cgd_spawning = true;
486 	mutex_exit(&cgd_spawning_mtx);
487 	return error;
488 }
489 
490 static void
491 cgd_unlock(void)
492 {
493 	mutex_enter(&cgd_spawning_mtx);
494 	cgd_spawning = false;
495 	cv_broadcast(&cgd_spawning_cv);
496 	mutex_exit(&cgd_spawning_mtx);
497 }
498 
499 static struct cgd_softc *
500 getcgd_softc(dev_t dev)
501 {
502 	return device_lookup_private(&cgd_cd, CGDUNIT(dev));
503 }
504 
505 static int
506 cgd_match(device_t self, cfdata_t cfdata, void *aux)
507 {
508 
509 	return 1;
510 }
511 
512 static void
513 cgd_attach(device_t parent, device_t self, void *aux)
514 {
515 	struct cgd_softc *sc = device_private(self);
516 
517 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_BIO);
518 	cv_init(&sc->sc_cv, "cgdcv");
519 	dk_init(&sc->sc_dksc, self, DKTYPE_CGD);
520 	disk_init(&sc->sc_dksc.sc_dkdev, sc->sc_dksc.sc_xname, &cgddkdriver);
521 
522 	if (!pmf_device_register(self, NULL, NULL))
523 		aprint_error_dev(self,
524 		    "unable to register power management hooks\n");
525 }
526 
527 
528 static int
529 cgd_detach(device_t self, int flags)
530 {
531 	int ret;
532 	const int pmask = 1 << RAW_PART;
533 	struct cgd_softc *sc = device_private(self);
534 	struct dk_softc *dksc = &sc->sc_dksc;
535 
536 	if (DK_BUSY(dksc, pmask))
537 		return EBUSY;
538 
539 	if (DK_ATTACHED(dksc) &&
540 	    (ret = cgd_ioctl_clr(sc, curlwp)) != 0)
541 		return ret;
542 
543 	disk_destroy(&dksc->sc_dkdev);
544 	cv_destroy(&sc->sc_cv);
545 	mutex_destroy(&sc->sc_lock);
546 
547 	return 0;
548 }
549 
550 void
551 cgdattach(int num)
552 {
553 #ifndef _MODULE
554 	int error;
555 
556 	mutex_init(&cgd_spawning_mtx, MUTEX_DEFAULT, IPL_NONE);
557 	cv_init(&cgd_spawning_cv, "cgspwn");
558 
559 	error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
560 	if (error != 0)
561 		aprint_error("%s: unable to register cfattach\n",
562 		    cgd_cd.cd_name);
563 #endif
564 
565 	cgd_selftest();
566 }
567 
568 static struct cgd_softc *
569 cgd_spawn(int unit)
570 {
571 	cfdata_t cf;
572 	struct cgd_worker *cw;
573 	struct cgd_softc *sc;
574 
575 	cf = kmem_alloc(sizeof(*cf), KM_SLEEP);
576 	cf->cf_name = cgd_cd.cd_name;
577 	cf->cf_atname = cgd_cd.cd_name;
578 	cf->cf_unit = unit;
579 	cf->cf_fstate = FSTATE_STAR;
580 
581 	cw = cgd_create_one_worker();
582 	if (cw == NULL) {
583 		kmem_free(cf, sizeof(*cf));
584 		return NULL;
585 	}
586 
587 	sc = device_private(config_attach_pseudo(cf));
588 	if (sc == NULL) {
589 		cgd_destroy_one_worker(cw);
590 		return NULL;
591 	}
592 
593 	sc->sc_worker = cw;
594 
595 	return sc;
596 }
597 
598 static int
599 cgd_destroy(device_t dev)
600 {
601 	struct cgd_softc *sc = device_private(dev);
602 	struct cgd_worker *cw = sc->sc_worker;
603 	cfdata_t cf;
604 	int error;
605 
606 	cf = device_cfdata(dev);
607 	error = config_detach(dev, DETACH_QUIET);
608 	if (error)
609 		return error;
610 
611 	cgd_destroy_one_worker(cw);
612 
613 	kmem_free(cf, sizeof(*cf));
614 	return 0;
615 }
616 
617 static void
618 cgd_busy(struct cgd_softc *sc)
619 {
620 
621 	mutex_enter(&sc->sc_lock);
622 	while (sc->sc_busy)
623 		cv_wait(&sc->sc_cv, &sc->sc_lock);
624 	sc->sc_busy = true;
625 	mutex_exit(&sc->sc_lock);
626 }
627 
628 static void
629 cgd_unbusy(struct cgd_softc *sc)
630 {
631 
632 	mutex_enter(&sc->sc_lock);
633 	sc->sc_busy = false;
634 	cv_broadcast(&sc->sc_cv);
635 	mutex_exit(&sc->sc_lock);
636 }
637 
638 static struct cgd_worker *
639 cgd_create_one_worker(void)
640 {
641 	KASSERT(cgd_spawning);
642 
643 	if (cgd_refcnt++ == 0) {
644 		KASSERT(cgd_worker == NULL);
645 		cgd_worker = cgd_create_worker();
646 	}
647 
648 	KASSERT(cgd_worker != NULL);
649 	return cgd_worker;
650 }
651 
652 static void
653 cgd_destroy_one_worker(struct cgd_worker *cw)
654 {
655 	KASSERT(cgd_spawning);
656 	KASSERT(cw == cgd_worker);
657 
658 	if (--cgd_refcnt == 0) {
659 		cgd_destroy_worker(cgd_worker);
660 		cgd_worker = NULL;
661 	}
662 }
663 
664 static struct cgd_worker *
665 cgd_create_worker(void)
666 {
667 	struct cgd_worker *cw;
668 	struct workqueue *wq;
669 	struct pool *cp;
670 	int error;
671 
672 	cw = kmem_alloc(sizeof(struct cgd_worker), KM_SLEEP);
673 	cp = kmem_alloc(sizeof(struct pool), KM_SLEEP);
674 
675 	error = workqueue_create(&wq, "cgd", cgd_process, NULL,
676 	    PRI_BIO, IPL_BIO, WQ_FPU|WQ_MPSAFE|WQ_PERCPU);
677 	if (error) {
678 		kmem_free(cp, sizeof(struct pool));
679 		kmem_free(cw, sizeof(struct cgd_worker));
680 		return NULL;
681 	}
682 
683 	cw->cw_cpool = cp;
684 	cw->cw_wq = wq;
685 	pool_init(cw->cw_cpool, sizeof(struct cgd_xfer), 0,
686 	    0, 0, "cgdcpl", NULL, IPL_BIO);
687 	mutex_init(&cw->cw_lock, MUTEX_DEFAULT, IPL_BIO);
688 
689 	return cw;
690 }
691 
692 static void
693 cgd_destroy_worker(struct cgd_worker *cw)
694 {
695 	mutex_destroy(&cw->cw_lock);
696 
697 	if (cw->cw_cpool) {
698 		pool_destroy(cw->cw_cpool);
699 		kmem_free(cw->cw_cpool, sizeof(struct pool));
700 	}
701 	if (cw->cw_wq)
702 		workqueue_destroy(cw->cw_wq);
703 
704 	kmem_free(cw, sizeof(struct cgd_worker));
705 }
706 
707 static int
708 cgdopen(dev_t dev, int flags, int fmt, struct lwp *l)
709 {
710 	struct	cgd_softc *sc;
711 	int error;
712 
713 	DPRINTF_FOLLOW(("cgdopen(0x%"PRIx64", %d)\n", dev, flags));
714 
715 	error = cgd_lock(true);
716 	if (error)
717 		return error;
718 	sc = getcgd_softc(dev);
719 	if (sc == NULL)
720 		sc = cgd_spawn(CGDUNIT(dev));
721 	cgd_unlock();
722 	if (sc == NULL)
723 		return ENXIO;
724 
725 	return dk_open(&sc->sc_dksc, dev, flags, fmt, l);
726 }
727 
728 static int
729 cgdclose(dev_t dev, int flags, int fmt, struct lwp *l)
730 {
731 	struct	cgd_softc *sc;
732 	struct	dk_softc *dksc;
733 	int error;
734 
735 	DPRINTF_FOLLOW(("cgdclose(0x%"PRIx64", %d)\n", dev, flags));
736 
737 	error = cgd_lock(false);
738 	if (error)
739 		return error;
740 	sc = getcgd_softc(dev);
741 	if (sc == NULL) {
742 		error = ENXIO;
743 		goto done;
744 	}
745 
746 	dksc = &sc->sc_dksc;
747 	if ((error =  dk_close(dksc, dev, flags, fmt, l)) != 0)
748 		goto done;
749 
750 	if (!DK_ATTACHED(dksc)) {
751 		if ((error = cgd_destroy(sc->sc_dksc.sc_dev)) != 0) {
752 			device_printf(dksc->sc_dev,
753 			    "unable to detach instance\n");
754 			goto done;
755 		}
756 	}
757 
758 done:
759 	cgd_unlock();
760 
761 	return error;
762 }
763 
764 static void
765 cgdstrategy(struct buf *bp)
766 {
767 	struct	cgd_softc *sc = getcgd_softc(bp->b_dev);
768 
769 	DPRINTF_FOLLOW(("cgdstrategy(%p): b_bcount = %ld\n", bp,
770 	    (long)bp->b_bcount));
771 
772 	/*
773 	 * Reject unaligned writes.
774 	 */
775 	if (((uintptr_t)bp->b_data & 3) != 0) {
776 		bp->b_error = EINVAL;
777 		goto bail;
778 	}
779 
780 	dk_strategy(&sc->sc_dksc, bp);
781 	return;
782 
783 bail:
784 	bp->b_resid = bp->b_bcount;
785 	biodone(bp);
786 	return;
787 }
788 
789 static int
790 cgdsize(dev_t dev)
791 {
792 	struct cgd_softc *sc = getcgd_softc(dev);
793 
794 	DPRINTF_FOLLOW(("cgdsize(0x%"PRIx64")\n", dev));
795 	if (!sc)
796 		return -1;
797 	return dk_size(&sc->sc_dksc, dev);
798 }
799 
800 /*
801  * cgd_{get,put}data are functions that deal with getting a buffer
802  * for the new encrypted data.
803  * We can no longer have a buffer per device, we need a buffer per
804  * work queue...
805  */
806 
807 static void *
808 cgd_getdata(struct cgd_softc *sc, unsigned long size)
809 {
810 	void *data = NULL;
811 
812 	mutex_enter(&sc->sc_lock);
813 	if (!sc->sc_data_used) {
814 		sc->sc_data_used = true;
815 		data = sc->sc_data;
816 	}
817 	mutex_exit(&sc->sc_lock);
818 
819 	if (data)
820 		return data;
821 
822 	return kmem_intr_alloc(size, KM_NOSLEEP);
823 }
824 
825 static void
826 cgd_putdata(struct cgd_softc *sc, void *data, unsigned long size)
827 {
828 
829 	if (data == sc->sc_data) {
830 		mutex_enter(&sc->sc_lock);
831 		sc->sc_data_used = false;
832 		mutex_exit(&sc->sc_lock);
833 	} else
834 		kmem_intr_free(data, size);
835 }
836 
837 static int
838 cgd_diskstart(device_t dev, struct buf *bp)
839 {
840 	struct	cgd_softc *sc = device_private(dev);
841 	struct	cgd_worker *cw = sc->sc_worker;
842 	struct	dk_softc *dksc = &sc->sc_dksc;
843 	struct	disk_geom *dg = &dksc->sc_dkdev.dk_geom;
844 	struct	cgd_xfer *cx;
845 	struct	buf *nbp;
846 	void *	newaddr;
847 	daddr_t	bn;
848 
849 	DPRINTF_FOLLOW(("cgd_diskstart(%p, %p)\n", dksc, bp));
850 
851 	bn = bp->b_rawblkno;
852 
853 	/*
854 	 * We attempt to allocate all of our resources up front, so that
855 	 * we can fail quickly if they are unavailable.
856 	 */
857 	nbp = getiobuf(sc->sc_tvn, false);
858 	if (nbp == NULL)
859 		return EAGAIN;
860 
861 	cx = pool_get(cw->cw_cpool, PR_NOWAIT);
862 	if (cx == NULL) {
863 		putiobuf(nbp);
864 		return EAGAIN;
865 	}
866 
867 	cx->cx_sc = sc;
868 	cx->cx_obp = bp;
869 	cx->cx_nbp = nbp;
870 	cx->cx_srcv = cx->cx_dstv = bp->b_data;
871 	cx->cx_blkno = bn;
872 	cx->cx_secsize = dg->dg_secsize;
873 
874 	/*
875 	 * If we are writing, then we need to encrypt the outgoing
876 	 * block into a new block of memory.
877 	 */
878 	if ((bp->b_flags & B_READ) == 0) {
879 		newaddr = cgd_getdata(sc, bp->b_bcount);
880 		if (!newaddr) {
881 			pool_put(cw->cw_cpool, cx);
882 			putiobuf(nbp);
883 			return EAGAIN;
884 		}
885 
886 		cx->cx_dstv = newaddr;
887 		cx->cx_len = bp->b_bcount;
888 		cx->cx_dir = CGD_CIPHER_ENCRYPT;
889 
890 		cgd_enqueue(sc, cx);
891 		return 0;
892 	}
893 
894 	cgd_diskstart2(sc, cx);
895 	return 0;
896 }
897 
898 static void
899 cgd_diskstart2(struct cgd_softc *sc, struct cgd_xfer *cx)
900 {
901 	struct	vnode *vp;
902 	struct	buf *bp;
903 	struct	buf *nbp;
904 
905 	bp = cx->cx_obp;
906 	nbp = cx->cx_nbp;
907 
908 	nbp->b_data = cx->cx_dstv;
909 	nbp->b_flags = bp->b_flags;
910 	nbp->b_oflags = bp->b_oflags;
911 	nbp->b_cflags = bp->b_cflags;
912 	nbp->b_iodone = cgdiodone;
913 	nbp->b_proc = bp->b_proc;
914 	nbp->b_blkno = btodb(cx->cx_blkno * cx->cx_secsize);
915 	nbp->b_bcount = bp->b_bcount;
916 	nbp->b_private = cx;
917 
918 	BIO_COPYPRIO(nbp, bp);
919 
920 	if ((nbp->b_flags & B_READ) == 0) {
921 		vp = nbp->b_vp;
922 		mutex_enter(vp->v_interlock);
923 		vp->v_numoutput++;
924 		mutex_exit(vp->v_interlock);
925 	}
926 	VOP_STRATEGY(sc->sc_tvn, nbp);
927 }
928 
929 static void
930 cgdiodone(struct buf *nbp)
931 {
932 	struct	cgd_xfer *cx = nbp->b_private;
933 	struct	buf *obp = cx->cx_obp;
934 	struct	cgd_softc *sc = getcgd_softc(obp->b_dev);
935 	struct	dk_softc *dksc = &sc->sc_dksc;
936 	struct	disk_geom *dg = &dksc->sc_dkdev.dk_geom;
937 	daddr_t	bn;
938 
939 	KDASSERT(sc);
940 
941 	DPRINTF_FOLLOW(("cgdiodone(%p)\n", nbp));
942 	DPRINTF(CGDB_IO, ("cgdiodone: bp %p bcount %d resid %d\n",
943 	    obp, obp->b_bcount, obp->b_resid));
944 	DPRINTF(CGDB_IO, (" dev 0x%"PRIx64", nbp %p bn %" PRId64
945 	    " addr %p bcnt %d\n", nbp->b_dev, nbp, nbp->b_blkno, nbp->b_data,
946 		nbp->b_bcount));
947 	if (nbp->b_error != 0) {
948 		obp->b_error = nbp->b_error;
949 		DPRINTF(CGDB_IO, ("%s: error %d\n", dksc->sc_xname,
950 		    obp->b_error));
951 	}
952 
953 	/* Perform the decryption if we are reading.
954 	 *
955 	 * Note: use the blocknumber from nbp, since it is what
956 	 *       we used to encrypt the blocks.
957 	 */
958 
959 	if (nbp->b_flags & B_READ) {
960 		bn = dbtob(nbp->b_blkno) / dg->dg_secsize;
961 
962 		cx->cx_obp     = obp;
963 		cx->cx_nbp     = nbp;
964 		cx->cx_dstv    = obp->b_data;
965 		cx->cx_srcv    = obp->b_data;
966 		cx->cx_len     = obp->b_bcount;
967 		cx->cx_blkno   = bn;
968 		cx->cx_secsize = dg->dg_secsize;
969 		cx->cx_dir     = CGD_CIPHER_DECRYPT;
970 
971 		cgd_enqueue(sc, cx);
972 		return;
973 	}
974 
975 	cgd_iodone2(sc, cx);
976 }
977 
978 static void
979 cgd_iodone2(struct cgd_softc *sc, struct cgd_xfer *cx)
980 {
981 	struct cgd_worker *cw = sc->sc_worker;
982 	struct buf *obp = cx->cx_obp;
983 	struct buf *nbp = cx->cx_nbp;
984 	struct dk_softc *dksc = &sc->sc_dksc;
985 
986 	pool_put(cw->cw_cpool, cx);
987 
988 	/* If we allocated memory, free it now... */
989 	if (nbp->b_data != obp->b_data)
990 		cgd_putdata(sc, nbp->b_data, nbp->b_bcount);
991 
992 	putiobuf(nbp);
993 
994 	/* Request is complete for whatever reason */
995 	obp->b_resid = 0;
996 	if (obp->b_error != 0)
997 		obp->b_resid = obp->b_bcount;
998 
999 	dk_done(dksc, obp);
1000 	dk_start(dksc, NULL);
1001 }
1002 
1003 static int
1004 cgd_dumpblocks(device_t dev, void *va, daddr_t blkno, int nblk)
1005 {
1006 	struct cgd_softc *sc = device_private(dev);
1007 	struct dk_softc *dksc = &sc->sc_dksc;
1008 	struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
1009 	size_t nbytes, blksize;
1010 	void *buf;
1011 	int error;
1012 
1013 	/*
1014 	 * dk_dump gives us units of disklabel sectors.  Everything
1015 	 * else in cgd uses units of diskgeom sectors.  These had
1016 	 * better agree; otherwise we need to figure out how to convert
1017 	 * between them.
1018 	 */
1019 	KASSERTMSG((dg->dg_secsize == dksc->sc_dkdev.dk_label->d_secsize),
1020 	    "diskgeom secsize %"PRIu32" != disklabel secsize %"PRIu32,
1021 	    dg->dg_secsize, dksc->sc_dkdev.dk_label->d_secsize);
1022 	blksize = dg->dg_secsize;
1023 
1024 	/*
1025 	 * Compute the number of bytes in this request, which dk_dump
1026 	 * has `helpfully' converted to a number of blocks for us.
1027 	 */
1028 	nbytes = nblk*blksize;
1029 
1030 	/* Try to acquire a buffer to store the ciphertext.  */
1031 	buf = cgd_getdata(sc, nbytes);
1032 	if (buf == NULL)
1033 		/* Out of memory: give up.  */
1034 		return ENOMEM;
1035 
1036 	/* Encrypt the caller's data into the temporary buffer.  */
1037 	cgd_cipher(sc, buf, va, nbytes, blkno, blksize, CGD_CIPHER_ENCRYPT);
1038 
1039 	/* Pass it on to the underlying disk device.  */
1040 	error = bdev_dump(sc->sc_tdev, blkno, buf, nbytes);
1041 
1042 	/* Release the buffer.  */
1043 	cgd_putdata(sc, buf, nbytes);
1044 
1045 	/* Return any error from the underlying disk device.  */
1046 	return error;
1047 }
1048 
1049 /* XXX: we should probably put these into dksubr.c, mostly */
1050 static int
1051 cgdread(dev_t dev, struct uio *uio, int flags)
1052 {
1053 	struct	cgd_softc *sc;
1054 	struct	dk_softc *dksc;
1055 
1056 	DPRINTF_FOLLOW(("cgdread(0x%llx, %p, %d)\n",
1057 	    (unsigned long long)dev, uio, flags));
1058 	sc = getcgd_softc(dev);
1059 	if (sc == NULL)
1060 		return ENXIO;
1061 	dksc = &sc->sc_dksc;
1062 	if (!DK_ATTACHED(dksc))
1063 		return ENXIO;
1064 	return physio(cgdstrategy, NULL, dev, B_READ, minphys, uio);
1065 }
1066 
1067 /* XXX: we should probably put these into dksubr.c, mostly */
1068 static int
1069 cgdwrite(dev_t dev, struct uio *uio, int flags)
1070 {
1071 	struct	cgd_softc *sc;
1072 	struct	dk_softc *dksc;
1073 
1074 	DPRINTF_FOLLOW(("cgdwrite(0x%"PRIx64", %p, %d)\n", dev, uio, flags));
1075 	sc = getcgd_softc(dev);
1076 	if (sc == NULL)
1077 		return ENXIO;
1078 	dksc = &sc->sc_dksc;
1079 	if (!DK_ATTACHED(dksc))
1080 		return ENXIO;
1081 	return physio(cgdstrategy, NULL, dev, B_WRITE, minphys, uio);
1082 }
1083 
1084 static int
1085 cgdioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
1086 {
1087 	struct	cgd_softc *sc;
1088 	struct	dk_softc *dksc;
1089 	int	part = DISKPART(dev);
1090 	int	pmask = 1 << part;
1091 	int	error;
1092 
1093 	DPRINTF_FOLLOW(("cgdioctl(0x%"PRIx64", %ld, %p, %d, %p)\n",
1094 	    dev, cmd, data, flag, l));
1095 
1096 	switch (cmd) {
1097 	case CGDIOCGET:
1098 		return cgd_ioctl_get(dev, data, l);
1099 	case CGDIOCSET:
1100 	case CGDIOCCLR:
1101 		if ((flag & FWRITE) == 0)
1102 			return EBADF;
1103 		/* FALLTHROUGH */
1104 	default:
1105 		sc = getcgd_softc(dev);
1106 		if (sc == NULL)
1107 			return ENXIO;
1108 		dksc = &sc->sc_dksc;
1109 		break;
1110 	}
1111 
1112 	switch (cmd) {
1113 	case CGDIOCSET:
1114 		cgd_busy(sc);
1115 		if (DK_ATTACHED(dksc))
1116 			error = EBUSY;
1117 		else
1118 			error = cgd_ioctl_set(sc, data, l);
1119 		cgd_unbusy(sc);
1120 		break;
1121 	case CGDIOCCLR:
1122 		cgd_busy(sc);
1123 		if (DK_BUSY(&sc->sc_dksc, pmask))
1124 			error = EBUSY;
1125 		else
1126 			error = cgd_ioctl_clr(sc, l);
1127 		cgd_unbusy(sc);
1128 		break;
1129 	case DIOCGCACHE:
1130 	case DIOCCACHESYNC:
1131 		cgd_busy(sc);
1132 		if (!DK_ATTACHED(dksc)) {
1133 			cgd_unbusy(sc);
1134 			error = ENOENT;
1135 			break;
1136 		}
1137 		/*
1138 		 * We pass this call down to the underlying disk.
1139 		 */
1140 		error = VOP_IOCTL(sc->sc_tvn, cmd, data, flag, l->l_cred);
1141 		cgd_unbusy(sc);
1142 		break;
1143 	case DIOCGSECTORALIGN: {
1144 		struct disk_sectoralign *dsa = data;
1145 
1146 		cgd_busy(sc);
1147 		if (!DK_ATTACHED(dksc)) {
1148 			cgd_unbusy(sc);
1149 			error = ENOENT;
1150 			break;
1151 		}
1152 
1153 		/* Get the underlying disk's sector alignment.  */
1154 		error = VOP_IOCTL(sc->sc_tvn, cmd, data, flag, l->l_cred);
1155 		if (error) {
1156 			cgd_unbusy(sc);
1157 			break;
1158 		}
1159 
1160 		/* Adjust for the disklabel partition if necessary.  */
1161 		if (part != RAW_PART) {
1162 			struct disklabel *lp = dksc->sc_dkdev.dk_label;
1163 			daddr_t offset = lp->d_partitions[part].p_offset;
1164 			uint32_t r = offset % dsa->dsa_alignment;
1165 
1166 			if (r < dsa->dsa_firstaligned)
1167 				dsa->dsa_firstaligned = dsa->dsa_firstaligned
1168 				    - r;
1169 			else
1170 				dsa->dsa_firstaligned = (dsa->dsa_firstaligned
1171 				    + dsa->dsa_alignment) - r;
1172 		}
1173 		cgd_unbusy(sc);
1174 		break;
1175 	}
1176 	case DIOCGSTRATEGY:
1177 	case DIOCSSTRATEGY:
1178 		if (!DK_ATTACHED(dksc)) {
1179 			error = ENOENT;
1180 			break;
1181 		}
1182 		/*FALLTHROUGH*/
1183 	default:
1184 		error = dk_ioctl(dksc, dev, cmd, data, flag, l);
1185 		break;
1186 	case CGDIOCGET:
1187 		KASSERT(0);
1188 		error = EINVAL;
1189 	}
1190 
1191 	return error;
1192 }
1193 
1194 static int
1195 cgddump(dev_t dev, daddr_t blkno, void *va, size_t size)
1196 {
1197 	struct	cgd_softc *sc;
1198 
1199 	DPRINTF_FOLLOW(("cgddump(0x%"PRIx64", %" PRId64 ", %p, %lu)\n",
1200 	    dev, blkno, va, (unsigned long)size));
1201 	sc = getcgd_softc(dev);
1202 	if (sc == NULL)
1203 		return ENXIO;
1204 	return dk_dump(&sc->sc_dksc, dev, blkno, va, size, DK_DUMP_RECURSIVE);
1205 }
1206 
1207 /*
1208  * XXXrcd:
1209  *  for now we hardcode the maximum key length.
1210  */
1211 #define MAX_KEYSIZE	1024
1212 
1213 static const struct {
1214 	const char *n;
1215 	int v;
1216 	int d;
1217 } encblkno[] = {
1218 	{ "encblkno",  CGD_CIPHER_CBC_ENCBLKNO8, 1 },
1219 	{ "encblkno8", CGD_CIPHER_CBC_ENCBLKNO8, 1 },
1220 	{ "encblkno1", CGD_CIPHER_CBC_ENCBLKNO1, 8 },
1221 };
1222 
1223 /* ARGSUSED */
1224 static int
1225 cgd_ioctl_set(struct cgd_softc *sc, void *data, struct lwp *l)
1226 {
1227 	struct	 cgd_ioctl *ci = data;
1228 	struct	 vnode *vp;
1229 	int	 ret;
1230 	size_t	 i;
1231 	size_t	 keybytes;			/* key length in bytes */
1232 	const char *cp;
1233 	struct pathbuf *pb;
1234 	char	 *inbuf;
1235 	struct dk_softc *dksc = &sc->sc_dksc;
1236 
1237 	cp = ci->ci_disk;
1238 
1239 	ret = pathbuf_copyin(ci->ci_disk, &pb);
1240 	if (ret != 0) {
1241 		return ret;
1242 	}
1243 	ret = vn_bdev_openpath(pb, &vp, l);
1244 	pathbuf_destroy(pb);
1245 	if (ret != 0) {
1246 		return ret;
1247 	}
1248 
1249 	inbuf = kmem_alloc(MAX_KEYSIZE, KM_SLEEP);
1250 
1251 	if ((ret = cgdinit(sc, cp, vp, l)) != 0)
1252 		goto bail;
1253 
1254 	(void)memset(inbuf, 0, MAX_KEYSIZE);
1255 	ret = copyinstr(ci->ci_alg, inbuf, 256, NULL);
1256 	if (ret)
1257 		goto bail;
1258 	sc->sc_cfuncs = cryptfuncs_find(inbuf);
1259 	if (!sc->sc_cfuncs) {
1260 		ret = EINVAL;
1261 		goto bail;
1262 	}
1263 
1264 	(void)memset(inbuf, 0, MAX_KEYSIZE);
1265 	ret = copyinstr(ci->ci_ivmethod, inbuf, MAX_KEYSIZE, NULL);
1266 	if (ret)
1267 		goto bail;
1268 
1269 	for (i = 0; i < __arraycount(encblkno); i++)
1270 		if (strcmp(encblkno[i].n, inbuf) == 0)
1271 			break;
1272 
1273 	if (i == __arraycount(encblkno)) {
1274 		ret = EINVAL;
1275 		goto bail;
1276 	}
1277 
1278 	keybytes = ci->ci_keylen / 8 + 1;
1279 	if (keybytes > MAX_KEYSIZE) {
1280 		ret = EINVAL;
1281 		goto bail;
1282 	}
1283 
1284 	(void)memset(inbuf, 0, MAX_KEYSIZE);
1285 	ret = copyin(ci->ci_key, inbuf, keybytes);
1286 	if (ret)
1287 		goto bail;
1288 
1289 	sc->sc_cdata.cf_blocksize = ci->ci_blocksize;
1290 	sc->sc_cdata.cf_mode = encblkno[i].v;
1291 
1292 	/*
1293 	 * Print a warning if the user selected the legacy encblkno8
1294 	 * mistake, and reject it altogether for ciphers that it
1295 	 * doesn't apply to.
1296 	 */
1297 	if (encblkno[i].v != CGD_CIPHER_CBC_ENCBLKNO1) {
1298 		if (strcmp(sc->sc_cfuncs->cf_name, "aes-cbc") &&
1299 		    strcmp(sc->sc_cfuncs->cf_name, "3des-cbc") &&
1300 		    strcmp(sc->sc_cfuncs->cf_name, "blowfish-cbc")) {
1301 			log(LOG_WARNING, "cgd: %s only makes sense for cbc,"
1302 			    " not for %s; ignoring\n",
1303 			    encblkno[i].n, sc->sc_cfuncs->cf_name);
1304 			sc->sc_cdata.cf_mode = CGD_CIPHER_CBC_ENCBLKNO1;
1305 		} else {
1306 			log(LOG_WARNING, "cgd: enabling legacy encblkno8\n");
1307 		}
1308 	}
1309 
1310 	sc->sc_cdata.cf_keylen = ci->ci_keylen;
1311 	sc->sc_cdata.cf_priv = sc->sc_cfuncs->cf_init(ci->ci_keylen, inbuf,
1312 	    &sc->sc_cdata.cf_blocksize);
1313 	if (sc->sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE) {
1314 	    log(LOG_WARNING, "cgd: Disallowed cipher with blocksize %zu > %u\n",
1315 		sc->sc_cdata.cf_blocksize, CGD_MAXBLOCKSIZE);
1316 	    sc->sc_cdata.cf_priv = NULL;
1317 	}
1318 
1319 	/*
1320 	 * The blocksize is supposed to be in bytes. Unfortunately originally
1321 	 * it was expressed in bits. For compatibility we maintain encblkno
1322 	 * and encblkno8.
1323 	 */
1324 	sc->sc_cdata.cf_blocksize /= encblkno[i].d;
1325 	(void)explicit_memset(inbuf, 0, MAX_KEYSIZE);
1326 	if (!sc->sc_cdata.cf_priv) {
1327 		ret = EINVAL;		/* XXX is this the right error? */
1328 		goto bail;
1329 	}
1330 	kmem_free(inbuf, MAX_KEYSIZE);
1331 
1332 	bufq_alloc(&dksc->sc_bufq, "fcfs", 0);
1333 
1334 	sc->sc_data = kmem_alloc(MAXPHYS, KM_SLEEP);
1335 	sc->sc_data_used = false;
1336 
1337 	/* Attach the disk. */
1338 	dk_attach(dksc);
1339 	disk_attach(&dksc->sc_dkdev);
1340 
1341 	disk_set_info(dksc->sc_dev, &dksc->sc_dkdev, NULL);
1342 
1343 	/* Discover wedges on this disk. */
1344 	dkwedge_discover(&dksc->sc_dkdev);
1345 
1346 	return 0;
1347 
1348 bail:
1349 	kmem_free(inbuf, MAX_KEYSIZE);
1350 	(void)vn_close(vp, FREAD|FWRITE, l->l_cred);
1351 	return ret;
1352 }
1353 
1354 /* ARGSUSED */
1355 static int
1356 cgd_ioctl_clr(struct cgd_softc *sc, struct lwp *l)
1357 {
1358 	struct	dk_softc *dksc = &sc->sc_dksc;
1359 
1360 	if (!DK_ATTACHED(dksc))
1361 		return ENXIO;
1362 
1363 	/* Delete all of our wedges. */
1364 	dkwedge_delall(&dksc->sc_dkdev);
1365 
1366 	/* Kill off any queued buffers. */
1367 	dk_drain(dksc);
1368 	bufq_free(dksc->sc_bufq);
1369 
1370 	(void)vn_close(sc->sc_tvn, FREAD|FWRITE, l->l_cred);
1371 	sc->sc_cfuncs->cf_destroy(sc->sc_cdata.cf_priv);
1372 	kmem_free(sc->sc_tpath, sc->sc_tpathlen);
1373 	kmem_free(sc->sc_data, MAXPHYS);
1374 	sc->sc_data_used = false;
1375 	dk_detach(dksc);
1376 	disk_detach(&dksc->sc_dkdev);
1377 
1378 	return 0;
1379 }
1380 
1381 static int
1382 cgd_ioctl_get(dev_t dev, void *data, struct lwp *l)
1383 {
1384 	struct cgd_softc *sc;
1385 	struct cgd_user *cgu;
1386 	int unit, error;
1387 
1388 	unit = CGDUNIT(dev);
1389 	cgu = (struct cgd_user *)data;
1390 
1391 	DPRINTF_FOLLOW(("cgd_ioctl_get(0x%"PRIx64", %d, %p, %p)\n",
1392 			   dev, unit, data, l));
1393 
1394 	/* XXX, we always return this units data, so if cgu_unit is
1395 	 * not -1, that field doesn't match the rest
1396 	 */
1397 	if (cgu->cgu_unit == -1)
1398 		cgu->cgu_unit = unit;
1399 
1400 	if (cgu->cgu_unit < 0)
1401 		return EINVAL;	/* XXX: should this be ENXIO? */
1402 
1403 	error = cgd_lock(false);
1404 	if (error)
1405 		return error;
1406 
1407 	sc = device_lookup_private(&cgd_cd, unit);
1408 	if (sc == NULL || !DK_ATTACHED(&sc->sc_dksc)) {
1409 		cgu->cgu_dev = 0;
1410 		cgu->cgu_alg[0] = '\0';
1411 		cgu->cgu_blocksize = 0;
1412 		cgu->cgu_mode = 0;
1413 		cgu->cgu_keylen = 0;
1414 	}
1415 	else {
1416 		mutex_enter(&sc->sc_lock);
1417 		cgu->cgu_dev = sc->sc_tdev;
1418 		strncpy(cgu->cgu_alg, sc->sc_cfuncs->cf_name,
1419 		    sizeof(cgu->cgu_alg));
1420 		cgu->cgu_blocksize = sc->sc_cdata.cf_blocksize;
1421 		cgu->cgu_mode = sc->sc_cdata.cf_mode;
1422 		cgu->cgu_keylen = sc->sc_cdata.cf_keylen;
1423 		mutex_exit(&sc->sc_lock);
1424 	}
1425 
1426 	cgd_unlock();
1427 	return 0;
1428 }
1429 
1430 static int
1431 cgdinit(struct cgd_softc *sc, const char *cpath, struct vnode *vp,
1432 	struct lwp *l)
1433 {
1434 	struct	disk_geom *dg;
1435 	int	ret;
1436 	char	*tmppath;
1437 	uint64_t psize;
1438 	unsigned secsize;
1439 	struct dk_softc *dksc = &sc->sc_dksc;
1440 
1441 	sc->sc_tvn = vp;
1442 	sc->sc_tpath = NULL;
1443 
1444 	tmppath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
1445 	ret = copyinstr(cpath, tmppath, MAXPATHLEN, &sc->sc_tpathlen);
1446 	if (ret)
1447 		goto bail;
1448 	sc->sc_tpath = kmem_alloc(sc->sc_tpathlen, KM_SLEEP);
1449 	memcpy(sc->sc_tpath, tmppath, sc->sc_tpathlen);
1450 
1451 	sc->sc_tdev = vp->v_rdev;
1452 
1453 	if ((ret = getdisksize(vp, &psize, &secsize)) != 0)
1454 		goto bail;
1455 
1456 	if (psize == 0) {
1457 		ret = ENODEV;
1458 		goto bail;
1459 	}
1460 
1461 	/*
1462 	 * XXX here we should probe the underlying device.  If we
1463 	 *     are accessing a partition of type RAW_PART, then
1464 	 *     we should populate our initial geometry with the
1465 	 *     geometry that we discover from the device.
1466 	 */
1467 	dg = &dksc->sc_dkdev.dk_geom;
1468 	memset(dg, 0, sizeof(*dg));
1469 	dg->dg_secperunit = psize;
1470 	dg->dg_secsize = secsize;
1471 	dg->dg_ntracks = 1;
1472 	dg->dg_nsectors = 1024 * 1024 / dg->dg_secsize;
1473 	dg->dg_ncylinders = dg->dg_secperunit / dg->dg_nsectors;
1474 
1475 bail:
1476 	kmem_free(tmppath, MAXPATHLEN);
1477 	if (ret && sc->sc_tpath)
1478 		kmem_free(sc->sc_tpath, sc->sc_tpathlen);
1479 	return ret;
1480 }
1481 
1482 /*
1483  * Our generic cipher entry point.  This takes care of the
1484  * IV mode and passes off the work to the specific cipher.
1485  * We implement here the IV method ``encrypted block
1486  * number''.
1487  *
1488  * XXXrcd: for now we rely on our own crypto framework defined
1489  *         in dev/cgd_crypto.c.  This will change when we
1490  *         get a generic kernel crypto framework.
1491  */
1492 
1493 static void
1494 blkno2blkno_buf(char *sbuf, daddr_t blkno)
1495 {
1496 	int	i;
1497 
1498 	/* Set up the blkno in blkno_buf, here we do not care much
1499 	 * about the final layout of the information as long as we
1500 	 * can guarantee that each sector will have a different IV
1501 	 * and that the endianness of the machine will not affect
1502 	 * the representation that we have chosen.
1503 	 *
1504 	 * We choose this representation, because it does not rely
1505 	 * on the size of buf (which is the blocksize of the cipher),
1506 	 * but allows daddr_t to grow without breaking existing
1507 	 * disks.
1508 	 *
1509 	 * Note that blkno2blkno_buf does not take a size as input,
1510 	 * and hence must be called on a pre-zeroed buffer of length
1511 	 * greater than or equal to sizeof(daddr_t).
1512 	 */
1513 	for (i=0; i < sizeof(daddr_t); i++) {
1514 		*sbuf++ = blkno & 0xff;
1515 		blkno >>= 8;
1516 	}
1517 }
1518 
1519 static struct cpu_info *
1520 cgd_cpu(struct cgd_softc *sc)
1521 {
1522 	struct cgd_worker *cw = sc->sc_worker;
1523 	struct cpu_info *ci = NULL;
1524 	u_int cidx, i;
1525 
1526 	if (cw->cw_busy == 0) {
1527 		cw->cw_last = cpu_index(curcpu());
1528 		return NULL;
1529 	}
1530 
1531 	for (i=0, cidx = cw->cw_last+1; i<maxcpus; ++i, ++cidx) {
1532 		if (cidx >= maxcpus)
1533 			cidx = 0;
1534 		ci = cpu_lookup(cidx);
1535 		if (ci) {
1536 			cw->cw_last = cidx;
1537 			break;
1538 		}
1539 	}
1540 
1541 	return ci;
1542 }
1543 
1544 static void
1545 cgd_enqueue(struct cgd_softc *sc, struct cgd_xfer *cx)
1546 {
1547 	struct cgd_worker *cw = sc->sc_worker;
1548 	struct cpu_info *ci;
1549 
1550 	mutex_enter(&cw->cw_lock);
1551 	ci = cgd_cpu(sc);
1552 	cw->cw_busy++;
1553 	mutex_exit(&cw->cw_lock);
1554 
1555 	workqueue_enqueue(cw->cw_wq, &cx->cx_work, ci);
1556 }
1557 
1558 static void
1559 cgd_process(struct work *wk, void *arg)
1560 {
1561 	struct cgd_xfer *cx = (struct cgd_xfer *)wk;
1562 	struct cgd_softc *sc = cx->cx_sc;
1563 	struct cgd_worker *cw = sc->sc_worker;
1564 
1565 	cgd_cipher(sc, cx->cx_dstv, cx->cx_srcv, cx->cx_len,
1566 	    cx->cx_blkno, cx->cx_secsize, cx->cx_dir);
1567 
1568 	if (cx->cx_dir == CGD_CIPHER_ENCRYPT) {
1569 		cgd_diskstart2(sc, cx);
1570 	} else {
1571 		cgd_iodone2(sc, cx);
1572 	}
1573 
1574 	mutex_enter(&cw->cw_lock);
1575 	if (cw->cw_busy > 0)
1576 		cw->cw_busy--;
1577 	mutex_exit(&cw->cw_lock);
1578 }
1579 
1580 static void
1581 cgd_cipher(struct cgd_softc *sc, void *dstv, const void *srcv,
1582     size_t len, daddr_t blkno, size_t secsize, int dir)
1583 {
1584 	char		*dst = dstv;
1585 	const char	*src = srcv;
1586 	cfunc_cipher	*cipher = sc->sc_cfuncs->cf_cipher;
1587 	size_t		blocksize = sc->sc_cdata.cf_blocksize;
1588 	size_t		todo;
1589 	char		blkno_buf[CGD_MAXBLOCKSIZE] __aligned(CGD_BLOCKALIGN);
1590 
1591 	DPRINTF_FOLLOW(("cgd_cipher() dir=%d\n", dir));
1592 
1593 	if (sc->sc_cdata.cf_mode == CGD_CIPHER_CBC_ENCBLKNO8)
1594 		blocksize /= 8;
1595 
1596 	KASSERT(len % blocksize == 0);
1597 	/* ensure that sizeof(daddr_t) <= blocksize (for encblkno IVing) */
1598 	KASSERT(sizeof(daddr_t) <= blocksize);
1599 	KASSERT(blocksize <= CGD_MAXBLOCKSIZE);
1600 
1601 	for (; len > 0; len -= todo) {
1602 		todo = MIN(len, secsize);
1603 
1604 		memset(blkno_buf, 0x0, blocksize);
1605 		blkno2blkno_buf(blkno_buf, blkno);
1606 		IFDEBUG(CGDB_CRYPTO, hexprint("step 1: blkno_buf",
1607 		    blkno_buf, blocksize));
1608 
1609 		/*
1610 		 * Handle bollocksed up encblkno8 mistake.  We used to
1611 		 * compute the encryption of a zero block with blkno as
1612 		 * the CBC IV -- except in an early mistake arising
1613 		 * from bit/byte confusion, we actually computed the
1614 		 * encryption of the last of _eight_ zero blocks under
1615 		 * CBC as the CBC IV.
1616 		 *
1617 		 * Encrypting the block number is handled inside the
1618 		 * cipher dispatch now (even though in practice, both
1619 		 * CBC and XTS will do the same thing), so we have to
1620 		 * simulate the block number that would yield the same
1621 		 * result.  So we encrypt _six_ zero blocks -- the
1622 		 * first one and the last one are handled inside the
1623 		 * cipher dispatch.
1624 		 */
1625 		if (sc->sc_cdata.cf_mode == CGD_CIPHER_CBC_ENCBLKNO8) {
1626 			static const uint8_t zero[CGD_MAXBLOCKSIZE];
1627 			uint8_t iv[CGD_MAXBLOCKSIZE];
1628 
1629 			memcpy(iv, blkno_buf, blocksize);
1630 			cipher(sc->sc_cdata.cf_priv, blkno_buf, zero,
1631 			    6*blocksize, iv, CGD_CIPHER_ENCRYPT);
1632 			memmove(blkno_buf, blkno_buf + 5*blocksize, blocksize);
1633 		}
1634 
1635 		cipher(sc->sc_cdata.cf_priv, dst, src, todo, blkno_buf, dir);
1636 
1637 		dst += todo;
1638 		src += todo;
1639 		blkno++;
1640 	}
1641 }
1642 
1643 #ifdef DEBUG
1644 static void
1645 hexprint(const char *start, void *buf, int len)
1646 {
1647 	char	*c = buf;
1648 
1649 	KASSERTMSG(len >= 0, "hexprint: called with len < 0");
1650 	printf("%s: len=%06d 0x", start, len);
1651 	while (len--)
1652 		printf("%02x", (unsigned char) *c++);
1653 }
1654 #endif
1655 
1656 static void
1657 cgd_selftest(void)
1658 {
1659 	struct cgd_softc sc;
1660 	void *buf;
1661 
1662 	for (size_t i = 0; i < __arraycount(selftests); i++) {
1663 		const char *alg = selftests[i].alg;
1664 		int encblkno8 = selftests[i].encblkno8;
1665 		const uint8_t *key = selftests[i].key;
1666 		int keylen = selftests[i].keylen;
1667 		int txtlen = selftests[i].txtlen;
1668 
1669 		aprint_verbose("cgd: self-test %s-%d%s\n", alg, keylen,
1670 		    encblkno8 ? " (encblkno8)" : "");
1671 
1672 		memset(&sc, 0, sizeof(sc));
1673 
1674 		sc.sc_cfuncs = cryptfuncs_find(alg);
1675 		if (sc.sc_cfuncs == NULL)
1676 			panic("%s not implemented", alg);
1677 
1678 		sc.sc_cdata.cf_blocksize = 8 * selftests[i].blocksize;
1679 		sc.sc_cdata.cf_mode = encblkno8 ? CGD_CIPHER_CBC_ENCBLKNO8 :
1680 		    CGD_CIPHER_CBC_ENCBLKNO1;
1681 		sc.sc_cdata.cf_keylen = keylen;
1682 
1683 		sc.sc_cdata.cf_priv = sc.sc_cfuncs->cf_init(keylen,
1684 		    key, &sc.sc_cdata.cf_blocksize);
1685 		if (sc.sc_cdata.cf_priv == NULL)
1686 			panic("cf_priv is NULL");
1687 		if (sc.sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE)
1688 			panic("bad block size %zu", sc.sc_cdata.cf_blocksize);
1689 
1690 		if (!encblkno8)
1691 			sc.sc_cdata.cf_blocksize /= 8;
1692 
1693 		buf = kmem_alloc(txtlen, KM_SLEEP);
1694 		memcpy(buf, selftests[i].ptxt, txtlen);
1695 
1696 		cgd_cipher(&sc, buf, buf, txtlen, selftests[i].blkno,
1697 				selftests[i].secsize, CGD_CIPHER_ENCRYPT);
1698 		if (memcmp(buf, selftests[i].ctxt, txtlen) != 0) {
1699 			hexdump(printf, "was", buf, txtlen);
1700 			hexdump(printf, "exp", selftests[i].ctxt, txtlen);
1701 			panic("cgd %s-%d encryption is broken [%zu]",
1702 			    selftests[i].alg, keylen, i);
1703 		}
1704 
1705 		cgd_cipher(&sc, buf, buf, txtlen, selftests[i].blkno,
1706 				selftests[i].secsize, CGD_CIPHER_DECRYPT);
1707 		if (memcmp(buf, selftests[i].ptxt, txtlen) != 0) {
1708 			hexdump(printf, "was", buf, txtlen);
1709 			hexdump(printf, "exp", selftests[i].ptxt, txtlen);
1710 			panic("cgd %s-%d decryption is broken [%zu]",
1711 			    selftests[i].alg, keylen, i);
1712 		}
1713 
1714 		kmem_free(buf, txtlen);
1715 		sc.sc_cfuncs->cf_destroy(sc.sc_cdata.cf_priv);
1716 	}
1717 
1718 	aprint_verbose("cgd: self-tests passed\n");
1719 }
1720 
1721 MODULE(MODULE_CLASS_DRIVER, cgd, "blowfish,des,dk_subr,bufq_fcfs");
1722 
1723 #ifdef _MODULE
1724 CFDRIVER_DECL(cgd, DV_DISK, NULL);
1725 
1726 devmajor_t cgd_bmajor = -1, cgd_cmajor = -1;
1727 #endif
1728 
1729 static int
1730 cgd_modcmd(modcmd_t cmd, void *arg)
1731 {
1732 	int error = 0;
1733 
1734 	switch (cmd) {
1735 	case MODULE_CMD_INIT:
1736 #ifdef _MODULE
1737 		mutex_init(&cgd_spawning_mtx, MUTEX_DEFAULT, IPL_NONE);
1738 		cv_init(&cgd_spawning_cv, "cgspwn");
1739 
1740 		error = config_cfdriver_attach(&cgd_cd);
1741 		if (error)
1742 			break;
1743 
1744 		error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
1745 	        if (error) {
1746 			config_cfdriver_detach(&cgd_cd);
1747 			aprint_error("%s: unable to register cfattach for"
1748 			    "%s, error %d\n", __func__, cgd_cd.cd_name, error);
1749 			break;
1750 		}
1751 		/*
1752 		 * Attach the {b,c}devsw's
1753 		 */
1754 		error = devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
1755 		    &cgd_cdevsw, &cgd_cmajor);
1756 
1757 		/*
1758 		 * If devsw_attach fails, remove from autoconf database
1759 		 */
1760 		if (error) {
1761 			config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
1762 			config_cfdriver_detach(&cgd_cd);
1763 			aprint_error("%s: unable to attach %s devsw, "
1764 			    "error %d", __func__, cgd_cd.cd_name, error);
1765 			break;
1766 		}
1767 #endif
1768 		break;
1769 
1770 	case MODULE_CMD_FINI:
1771 #ifdef _MODULE
1772 		/*
1773 		 * Remove {b,c}devsw's
1774 		 */
1775 		devsw_detach(&cgd_bdevsw, &cgd_cdevsw);
1776 
1777 		/*
1778 		 * Now remove device from autoconf database
1779 		 */
1780 		error = config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
1781 		if (error) {
1782 			(void)devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
1783 			    &cgd_cdevsw, &cgd_cmajor);
1784 			aprint_error("%s: failed to detach %s cfattach, "
1785 			    "error %d\n", __func__, cgd_cd.cd_name, error);
1786  			break;
1787 		}
1788 		error = config_cfdriver_detach(&cgd_cd);
1789 		if (error) {
1790 			(void)config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
1791 			(void)devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
1792 			    &cgd_cdevsw, &cgd_cmajor);
1793 			aprint_error("%s: failed to detach %s cfdriver, "
1794 			    "error %d\n", __func__, cgd_cd.cd_name, error);
1795 			break;
1796 		}
1797 
1798 		cv_destroy(&cgd_spawning_cv);
1799 		mutex_destroy(&cgd_spawning_mtx);
1800 #endif
1801 		break;
1802 
1803 	case MODULE_CMD_STAT:
1804 		error = ENOTTY;
1805 		break;
1806 	default:
1807 		error = ENOTTY;
1808 		break;
1809 	}
1810 
1811 	return error;
1812 }
1813