1 /* $OpenBSD: via.c,v 1.8 2006/11/17 07:47:56 tom Exp $ */
2 /* $NetBSD: via_padlock.c,v 1.35 2022/05/22 11:39:27 riastradh Exp $ */
3
4 /*-
5 * Copyright (c) 2003 Jason Wright
6 * Copyright (c) 2003, 2004 Theo de Raadt
7 * All rights reserved.
8 *
9 * Permission to use, copy, modify, and distribute this software for any
10 * purpose with or without fee is hereby granted, provided that the above
11 * copyright notice and this permission notice appear in all copies.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 */
21
22 #include <sys/cdefs.h>
23 __KERNEL_RCSID(0, "$NetBSD: via_padlock.c,v 1.35 2022/05/22 11:39:27 riastradh Exp $");
24
25 #include <sys/param.h>
26 #include <sys/systm.h>
27 #include <sys/signalvar.h>
28 #include <sys/kernel.h>
29 #include <sys/device.h>
30 #include <sys/module.h>
31 #include <sys/malloc.h>
32 #include <sys/mbuf.h>
33 #include <sys/cpu.h>
34
35 #include <x86/specialreg.h>
36
37 #include <machine/cpufunc.h>
38 #include <machine/cpuvar.h>
39
40 #include <crypto/aes/aes_bear.h>
41
42 #include <opencrypto/cryptodev.h>
43 #include <opencrypto/cryptosoft.h>
44 #include <opencrypto/xform.h>
45
46 #include <opencrypto/cryptosoft_xform.c>
47
48 #include <x86/via_padlock.h>
49
50 static int via_padlock_match(device_t, cfdata_t, void *);
51 static void via_padlock_attach(device_t, device_t, void *);
52 static int via_padlock_detach(device_t, int);
53 static void via_padlock_attach_intr(device_t);
54
55 CFATTACH_DECL_NEW(
56 padlock,
57 sizeof(struct via_padlock_softc),
58 via_padlock_match,
59 via_padlock_attach,
60 via_padlock_detach,
61 NULL
62 );
63
64 int via_padlock_crypto_newsession(void *, uint32_t *, struct cryptoini *);
65 int via_padlock_crypto_process(void *, struct cryptop *, int);
66 int via_padlock_crypto_swauth(struct cryptop *, struct cryptodesc *,
67 struct swcr_data *, void *);
68 int via_padlock_crypto_encdec(struct cryptop *, struct cryptodesc *,
69 struct via_padlock_session *, struct via_padlock_softc *, void *);
70 void via_padlock_crypto_freesession(void *, uint64_t);
71 static __inline void via_padlock_cbc(void *, void *, void *, void *, int,
72 void *);
73
74 static void
via_c3_ace_init(struct via_padlock_softc * sc)75 via_c3_ace_init(struct via_padlock_softc *sc)
76 {
77 /*
78 * There is no reason to call into the kernel to use this
79 * driver from userspace, because the crypto instructions can
80 * be directly accessed there. Setting CRYPTOCAP_F_SOFTWARE
81 * has approximately the right semantics though the name is
82 * confusing (however, consider that crypto via unprivileged
83 * instructions _is_ "just software" in some sense).
84 */
85 sc->sc_cid = crypto_get_driverid(CRYPTOCAP_F_SOFTWARE);
86 if (sc->sc_cid < 0) {
87 aprint_error_dev(sc->sc_dev,
88 "could not get a crypto driver ID\n");
89 return;
90 }
91
92 sc->sc_cid_attached = true;
93
94 /*
95 * Ask the opencrypto subsystem to register ourselves. Although
96 * we don't support hardware offloading for various HMAC algorithms,
97 * we will handle them, because opencrypto prefers drivers that
98 * support all requested algorithms.
99 *
100 *
101 * XXX We should actually implement the HMAC modes this hardware
102 * XXX can accelerate (wrap its plain SHA1/SHA2 as HMAC) and
103 * XXX strongly consider removing those passed through to cryptosoft.
104 * XXX As it stands, we can "steal" sessions from drivers which could
105 * XXX better accelerate them.
106 *
107 * XXX Note the ordering dependency between when this (or any
108 * XXX crypto driver) attaches and when cryptosoft does. We are
109 * XXX basically counting on the swcrypto pseudo-device to just
110 * XXX happen to attach last, or _it_ will steal every session
111 * XXX from _us_!
112 */
113 #define REGISTER(alg) \
114 crypto_register(sc->sc_cid, alg, 0, 0, \
115 via_padlock_crypto_newsession, via_padlock_crypto_freesession, \
116 via_padlock_crypto_process, sc);
117
118 REGISTER(CRYPTO_AES_CBC);
119 REGISTER(CRYPTO_MD5_HMAC_96);
120 REGISTER(CRYPTO_MD5_HMAC);
121 REGISTER(CRYPTO_SHA1_HMAC_96);
122 REGISTER(CRYPTO_SHA1_HMAC);
123 REGISTER(CRYPTO_RIPEMD160_HMAC_96);
124 REGISTER(CRYPTO_RIPEMD160_HMAC);
125 REGISTER(CRYPTO_SHA2_HMAC);
126 }
127
128 int
via_padlock_crypto_newsession(void * arg,uint32_t * sidp,struct cryptoini * cri)129 via_padlock_crypto_newsession(void *arg, uint32_t *sidp, struct cryptoini *cri)
130 {
131 struct cryptoini *c;
132 struct via_padlock_softc *sc = arg;
133 struct via_padlock_session *ses = NULL;
134 const struct swcr_auth_hash *axf;
135 struct swcr_data *swd;
136 int sesn, i, cw0;
137
138 if (sc->sc_sessions == NULL) {
139 ses = sc->sc_sessions = malloc(sizeof(*ses), M_DEVBUF,
140 M_NOWAIT);
141 if (ses == NULL)
142 return (ENOMEM);
143 sesn = 0;
144 sc->sc_nsessions = 1;
145 } else {
146 for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
147 if (sc->sc_sessions[sesn].ses_used == 0) {
148 ses = &sc->sc_sessions[sesn];
149 break;
150 }
151 }
152
153 if (ses == NULL) {
154 sesn = sc->sc_nsessions;
155 ses = malloc((sesn + 1) * sizeof(*ses), M_DEVBUF,
156 M_NOWAIT);
157 if (ses == NULL)
158 return (ENOMEM);
159 memcpy(ses, sc->sc_sessions, sesn * sizeof(*ses));
160 memset(sc->sc_sessions, 0, sesn * sizeof(*ses));
161 free(sc->sc_sessions, M_DEVBUF);
162 sc->sc_sessions = ses;
163 ses = &sc->sc_sessions[sesn];
164 sc->sc_nsessions++;
165 }
166 }
167
168 memset(ses, 0, sizeof(*ses));
169 ses->ses_used = 1;
170
171 for (c = cri; c != NULL; c = c->cri_next) {
172 switch (c->cri_alg) {
173 case CRYPTO_AES_CBC:
174 memset(ses->ses_ekey, 0, sizeof(ses->ses_ekey));
175 memset(ses->ses_dkey, 0, sizeof(ses->ses_dkey));
176
177 switch (c->cri_klen) {
178 case 128:
179 br_aes_ct_keysched_stdenc(ses->ses_ekey,
180 c->cri_key, 16);
181 br_aes_ct_keysched_stddec(ses->ses_dkey,
182 c->cri_key, 16);
183 cw0 = C3_CRYPT_CWLO_KEY128;
184 break;
185 case 192:
186 br_aes_ct_keysched_stdenc(ses->ses_ekey,
187 c->cri_key, 24);
188 br_aes_ct_keysched_stddec(ses->ses_dkey,
189 c->cri_key, 24);
190 cw0 = C3_CRYPT_CWLO_KEY192;
191 break;
192 case 256:
193 br_aes_ct_keysched_stdenc(ses->ses_ekey,
194 c->cri_key, 32);
195 br_aes_ct_keysched_stddec(ses->ses_dkey,
196 c->cri_key, 32);
197 cw0 = C3_CRYPT_CWLO_KEY256;
198 break;
199 default:
200 return (EINVAL);
201 }
202 cw0 |= C3_CRYPT_CWLO_ALG_AES |
203 C3_CRYPT_CWLO_KEYGEN_SW |
204 C3_CRYPT_CWLO_NORMAL;
205
206 ses->ses_klen = c->cri_klen;
207 ses->ses_cw0 = cw0;
208
209 /* Convert words to host byte order (???) */
210 for (i = 0; i < 4*(AES_256_NROUNDS + 1); i++) {
211 ses->ses_ekey[i] = ntohl(ses->ses_ekey[i]);
212 ses->ses_dkey[i] = ntohl(ses->ses_dkey[i]);
213 }
214 break;
215
216 /* Use hashing implementations from the cryptosoft code. */
217 case CRYPTO_MD5_HMAC:
218 axf = &swcr_auth_hash_hmac_md5;
219 goto authcommon;
220 case CRYPTO_MD5_HMAC_96:
221 axf = &swcr_auth_hash_hmac_md5_96;
222 goto authcommon;
223 case CRYPTO_SHA1_HMAC:
224 axf = &swcr_auth_hash_hmac_sha1;
225 goto authcommon;
226 case CRYPTO_SHA1_HMAC_96:
227 axf = &swcr_auth_hash_hmac_sha1_96;
228 goto authcommon;
229 case CRYPTO_RIPEMD160_HMAC:
230 axf = &swcr_auth_hash_hmac_ripemd_160;
231 goto authcommon;
232 case CRYPTO_RIPEMD160_HMAC_96:
233 axf = &swcr_auth_hash_hmac_ripemd_160_96;
234 goto authcommon;
235 case CRYPTO_SHA2_HMAC:
236 if (cri->cri_klen == 256)
237 axf = &swcr_auth_hash_hmac_sha2_256;
238 else if (cri->cri_klen == 384)
239 axf = &swcr_auth_hash_hmac_sha2_384;
240 else if (cri->cri_klen == 512)
241 axf = &swcr_auth_hash_hmac_sha2_512;
242 else {
243 return EINVAL;
244 }
245 authcommon:
246 swd = malloc(sizeof(struct swcr_data), M_CRYPTO_DATA,
247 M_NOWAIT|M_ZERO);
248 if (swd == NULL) {
249 via_padlock_crypto_freesession(sc, sesn);
250 return (ENOMEM);
251 }
252 ses->swd = swd;
253
254 swd->sw_ictx = malloc(axf->ctxsize,
255 M_CRYPTO_DATA, M_NOWAIT);
256 if (swd->sw_ictx == NULL) {
257 via_padlock_crypto_freesession(sc, sesn);
258 return (ENOMEM);
259 }
260
261 swd->sw_octx = malloc(axf->ctxsize,
262 M_CRYPTO_DATA, M_NOWAIT);
263 if (swd->sw_octx == NULL) {
264 via_padlock_crypto_freesession(sc, sesn);
265 return (ENOMEM);
266 }
267
268 for (i = 0; i < c->cri_klen / 8; i++)
269 c->cri_key[i] ^= HMAC_IPAD_VAL;
270
271 axf->Init(swd->sw_ictx);
272 axf->Update(swd->sw_ictx, c->cri_key, c->cri_klen / 8);
273 axf->Update(swd->sw_ictx, hmac_ipad_buffer,
274 HMAC_BLOCK_LEN - (c->cri_klen / 8));
275
276 for (i = 0; i < c->cri_klen / 8; i++)
277 c->cri_key[i] ^= (HMAC_IPAD_VAL ^
278 HMAC_OPAD_VAL);
279
280 axf->Init(swd->sw_octx);
281 axf->Update(swd->sw_octx, c->cri_key, c->cri_klen / 8);
282 axf->Update(swd->sw_octx, hmac_opad_buffer,
283 HMAC_BLOCK_LEN - (c->cri_klen / 8));
284
285 for (i = 0; i < c->cri_klen / 8; i++)
286 c->cri_key[i] ^= HMAC_OPAD_VAL;
287
288 swd->sw_axf = axf;
289 swd->sw_alg = c->cri_alg;
290
291 break;
292 default:
293 return (EINVAL);
294 }
295 }
296
297 *sidp = VIAC3_SID(0, sesn);
298 return (0);
299 }
300
301 void
via_padlock_crypto_freesession(void * arg,uint64_t tid)302 via_padlock_crypto_freesession(void *arg, uint64_t tid)
303 {
304 struct via_padlock_softc *sc = arg;
305 struct swcr_data *swd;
306 const struct swcr_auth_hash *axf;
307 int sesn;
308 uint32_t sid = ((uint32_t)tid) & 0xffffffff;
309
310 sesn = VIAC3_SESSION(sid);
311 KASSERTMSG(sesn >= 0, "sesn=%d", sesn);
312 KASSERTMSG(sesn < sc->sc_nsessions, "sesn=%d nsessions=%d",
313 sesn, sc->sc_nsessions);
314
315 if (sc->sc_sessions[sesn].swd) {
316 swd = sc->sc_sessions[sesn].swd;
317 axf = swd->sw_axf;
318
319 if (swd->sw_ictx) {
320 memset(swd->sw_ictx, 0, axf->ctxsize);
321 free(swd->sw_ictx, M_CRYPTO_DATA);
322 }
323 if (swd->sw_octx) {
324 memset(swd->sw_octx, 0, axf->ctxsize);
325 free(swd->sw_octx, M_CRYPTO_DATA);
326 }
327 free(swd, M_CRYPTO_DATA);
328 }
329
330 memset(&sc->sc_sessions[sesn], 0, sizeof(sc->sc_sessions[sesn]));
331 }
332
333 static __inline void
via_padlock_cbc(void * cw,void * src,void * dst,void * key,int rep,void * iv)334 via_padlock_cbc(void *cw, void *src, void *dst, void *key, int rep,
335 void *iv)
336 {
337 unsigned int cr0;
338 int s;
339
340 s = splhigh();
341
342 cr0 = rcr0(); /* Permit access to SIMD/FPU path */
343 lcr0(cr0 & ~(CR0_EM|CR0_TS));
344
345 /* Do the deed */
346 __asm __volatile("pushf; popf"); /* force key reload */
347 __asm __volatile(".byte 0xf3, 0x0f, 0xa7, 0xd0" : /* rep xcrypt-cbc */
348 : "a" (iv), "b" (key), "c" (rep), "d" (cw), "S" (src), "D" (dst)
349 : "memory", "cc");
350
351 lcr0(cr0);
352
353 splx(s);
354 }
355
356 int
via_padlock_crypto_swauth(struct cryptop * crp,struct cryptodesc * crd,struct swcr_data * sw,void * buf)357 via_padlock_crypto_swauth(struct cryptop *crp, struct cryptodesc *crd,
358 struct swcr_data *sw, void *buf)
359 {
360 int type;
361
362 if (crp->crp_flags & CRYPTO_F_IMBUF)
363 type = CRYPTO_BUF_MBUF;
364 else
365 type= CRYPTO_BUF_IOV;
366
367 return (swcr_authcompute(crp, crd, sw, buf, type));
368 }
369
370 int
via_padlock_crypto_encdec(struct cryptop * crp,struct cryptodesc * crd,struct via_padlock_session * ses,struct via_padlock_softc * sc,void * buf)371 via_padlock_crypto_encdec(struct cryptop *crp, struct cryptodesc *crd,
372 struct via_padlock_session *ses, struct via_padlock_softc *sc, void *buf)
373 {
374 uint32_t *key;
375
376 if ((crd->crd_len % 16) != 0)
377 return (EINVAL);
378
379 sc->op_buf = malloc(crd->crd_len, M_DEVBUF, M_NOWAIT);
380 if (sc->op_buf == NULL)
381 return (ENOMEM);
382
383 if (crd->crd_flags & CRD_F_ENCRYPT) {
384 sc->op_cw[0] = ses->ses_cw0 | C3_CRYPT_CWLO_ENCRYPT;
385 key = ses->ses_ekey;
386 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
387 memcpy(sc->op_iv, crd->crd_iv, 16);
388 else
389 cprng_fast(sc->op_iv, 16);
390
391 if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
392 if (crp->crp_flags & CRYPTO_F_IMBUF)
393 m_copyback((struct mbuf *)crp->crp_buf,
394 crd->crd_inject, 16, sc->op_iv);
395 else if (crp->crp_flags & CRYPTO_F_IOV)
396 cuio_copyback((struct uio *)crp->crp_buf,
397 crd->crd_inject, 16, sc->op_iv);
398 else
399 memcpy((char *)crp->crp_buf + crd->crd_inject,
400 sc->op_iv, 16);
401 }
402 } else {
403 sc->op_cw[0] = ses->ses_cw0 | C3_CRYPT_CWLO_DECRYPT;
404 key = ses->ses_dkey;
405 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
406 memcpy(sc->op_iv, crd->crd_iv, 16);
407 else {
408 if (crp->crp_flags & CRYPTO_F_IMBUF)
409 m_copydata((struct mbuf *)crp->crp_buf,
410 crd->crd_inject, 16, sc->op_iv);
411 else if (crp->crp_flags & CRYPTO_F_IOV)
412 cuio_copydata((struct uio *)crp->crp_buf,
413 crd->crd_inject, 16, sc->op_iv);
414 else
415 memcpy(sc->op_iv, (char *)crp->crp_buf +
416 crd->crd_inject, 16);
417 }
418 }
419
420 if (crp->crp_flags & CRYPTO_F_IMBUF)
421 m_copydata((struct mbuf *)crp->crp_buf,
422 crd->crd_skip, crd->crd_len, sc->op_buf);
423 else if (crp->crp_flags & CRYPTO_F_IOV)
424 cuio_copydata((struct uio *)crp->crp_buf,
425 crd->crd_skip, crd->crd_len, sc->op_buf);
426 else
427 memcpy(sc->op_buf, (char *)crp->crp_buf + crd->crd_skip,
428 crd->crd_len);
429
430 sc->op_cw[1] = sc->op_cw[2] = sc->op_cw[3] = 0;
431 via_padlock_cbc(&sc->op_cw, sc->op_buf, sc->op_buf, key,
432 crd->crd_len / 16, sc->op_iv);
433
434 if (crp->crp_flags & CRYPTO_F_IMBUF)
435 m_copyback((struct mbuf *)crp->crp_buf,
436 crd->crd_skip, crd->crd_len, sc->op_buf);
437 else if (crp->crp_flags & CRYPTO_F_IOV)
438 cuio_copyback((struct uio *)crp->crp_buf,
439 crd->crd_skip, crd->crd_len, sc->op_buf);
440 else
441 memcpy((char *)crp->crp_buf + crd->crd_skip, sc->op_buf,
442 crd->crd_len);
443
444 if (sc->op_buf != NULL) {
445 memset(sc->op_buf, 0, crd->crd_len);
446 free(sc->op_buf, M_DEVBUF);
447 sc->op_buf = NULL;
448 }
449
450 return 0;
451 }
452
453 int
via_padlock_crypto_process(void * arg,struct cryptop * crp,int hint)454 via_padlock_crypto_process(void *arg, struct cryptop *crp, int hint)
455 {
456 struct via_padlock_softc *sc = arg;
457 struct via_padlock_session *ses;
458 struct cryptodesc *crd;
459 int sesn, err = 0;
460
461 KASSERT(sc != NULL /*, ("via_padlock_crypto_process: null softc")*/);
462 if (crp == NULL || crp->crp_callback == NULL) {
463 err = EINVAL;
464 goto out;
465 }
466
467 sesn = VIAC3_SESSION(crp->crp_sid);
468 if (sesn >= sc->sc_nsessions) {
469 err = EINVAL;
470 goto out;
471 }
472 ses = &sc->sc_sessions[sesn];
473
474 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
475 switch (crd->crd_alg) {
476 case CRYPTO_AES_CBC:
477 if ((err = via_padlock_crypto_encdec(crp, crd, ses,
478 sc, crp->crp_buf)) != 0)
479 goto out;
480 break;
481
482 case CRYPTO_MD5_HMAC:
483 case CRYPTO_SHA1_HMAC:
484 case CRYPTO_RIPEMD160_HMAC:
485 case CRYPTO_SHA2_HMAC:
486 if ((err = via_padlock_crypto_swauth(crp, crd,
487 ses->swd, crp->crp_buf)) != 0)
488 goto out;
489 break;
490
491 default:
492 err = EINVAL;
493 goto out;
494 }
495 }
496 out:
497 crp->crp_etype = err;
498 crypto_done(crp);
499 return 0;
500 }
501
502 static int
via_padlock_match(device_t parent,cfdata_t cf,void * opaque)503 via_padlock_match(device_t parent, cfdata_t cf, void *opaque)
504 {
505 struct cpufeature_attach_args *cfaa = opaque;
506 struct cpu_info *ci = cfaa->ci;
507
508 if (strcmp(cfaa->name, "padlock") != 0)
509 return 0;
510 if ((cpu_feature[4] & (CPUID_VIA_HAS_ACE|CPUID_VIA_HAS_RNG)) == 0)
511 return 0;
512 if ((ci->ci_flags & (CPUF_BSP|CPUF_SP|CPUF_PRIMARY)) == 0)
513 return 0;
514 return 1;
515 }
516
517 static void
via_padlock_attach(device_t parent,device_t self,void * opaque)518 via_padlock_attach(device_t parent, device_t self, void *opaque)
519 {
520 struct via_padlock_softc *sc = device_private(self);
521
522 sc->sc_dev = self;
523
524 aprint_naive("\n");
525 aprint_normal(": VIA PadLock\n");
526
527 pmf_device_register(self, NULL, NULL);
528
529 config_interrupts(self, via_padlock_attach_intr);
530 }
531
532 static void
via_padlock_attach_intr(device_t self)533 via_padlock_attach_intr(device_t self)
534 {
535 struct via_padlock_softc *sc = device_private(self);
536
537 aprint_normal("%s:", device_xname(self));
538 if (cpu_feature[4] & CPUID_VIA_HAS_RNG) {
539 aprint_normal(" RNG");
540 }
541 if (cpu_feature[4] & CPUID_VIA_HAS_ACE) {
542 via_c3_ace_init(sc);
543 aprint_normal(" ACE");
544 }
545 aprint_normal("\n");
546 }
547
548 static int
via_padlock_detach(device_t self,int flags)549 via_padlock_detach(device_t self, int flags)
550 {
551 struct via_padlock_softc *sc = device_private(self);
552
553 if (sc->sc_cid_attached) {
554 crypto_unregister(sc->sc_cid, CRYPTO_AES_CBC);
555 crypto_unregister(sc->sc_cid, CRYPTO_MD5_HMAC_96);
556 crypto_unregister(sc->sc_cid, CRYPTO_MD5_HMAC);
557 crypto_unregister(sc->sc_cid, CRYPTO_SHA1_HMAC_96);
558 crypto_unregister(sc->sc_cid, CRYPTO_SHA1_HMAC);
559 crypto_unregister(sc->sc_cid, CRYPTO_RIPEMD160_HMAC_96);
560 crypto_unregister(sc->sc_cid, CRYPTO_RIPEMD160_HMAC);
561 crypto_unregister(sc->sc_cid, CRYPTO_SHA2_HMAC);
562 sc->sc_cid_attached = false;
563 }
564
565 pmf_device_deregister(self);
566
567 return 0;
568 }
569
570 MODULE(MODULE_CLASS_DRIVER, padlock, NULL);
571
572 #ifdef _MODULE
573 #include "ioconf.c"
574 #endif
575
576 static int
padlock_modcmd(modcmd_t cmd,void * opaque)577 padlock_modcmd(modcmd_t cmd, void *opaque)
578 {
579 int error = 0;
580
581 switch (cmd) {
582 case MODULE_CMD_INIT:
583 #ifdef _MODULE
584 error = config_init_component(cfdriver_ioconf_padlock,
585 cfattach_ioconf_padlock, cfdata_ioconf_padlock);
586 #endif
587 return error;
588 case MODULE_CMD_FINI:
589 #ifdef _MODULE
590 error = config_fini_component(cfdriver_ioconf_padlock,
591 cfattach_ioconf_padlock, cfdata_ioconf_padlock);
592 #endif
593 return error;
594 default:
595 return ENOTTY;
596 }
597 }
598