xref: /netbsd-src/sys/dev/pci/nvme_pci.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /*	$NetBSD: nvme_pci.c,v 1.20 2018/04/18 10:05:59 nonaka Exp $	*/
2 /*	$OpenBSD: nvme_pci.c,v 1.3 2016/04/14 11:18:32 dlg Exp $ */
3 
4 /*
5  * Copyright (c) 2014 David Gwynne <dlg@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*-
21  * Copyright (C) 2016 NONAKA Kimihiro <nonaka@netbsd.org>
22  * All rights reserved.
23  *
24  * Redistribution and use in source and binary forms, with or without
25  * modification, are permitted provided that the following conditions
26  * are met:
27  * 1. Redistributions of source code must retain the above copyright
28  *    notice, this list of conditions and the following disclaimer.
29  * 2. Redistributions in binary form must reproduce the above copyright
30  *    notice, this list of conditions and the following disclaimer in the
31  *    documentation and/or other materials provided with the distribution.
32  *
33  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
34  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
35  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
36  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
37  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
38  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
39  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
40  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
41  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
42  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43  */
44 
45 #include <sys/cdefs.h>
46 __KERNEL_RCSID(0, "$NetBSD: nvme_pci.c,v 1.20 2018/04/18 10:05:59 nonaka Exp $");
47 
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
51 #include <sys/device.h>
52 #include <sys/bitops.h>
53 #include <sys/bus.h>
54 #include <sys/cpu.h>
55 #include <sys/interrupt.h>
56 #include <sys/kmem.h>
57 #include <sys/pmf.h>
58 #include <sys/module.h>
59 
60 #include <dev/pci/pcireg.h>
61 #include <dev/pci/pcivar.h>
62 #include <dev/pci/pcidevs.h>
63 
64 #include <dev/ic/nvmereg.h>
65 #include <dev/ic/nvmevar.h>
66 
67 int nvme_pci_force_intx = 0;
68 int nvme_pci_mpsafe = 1;
69 int nvme_pci_mq = 1;		/* INTx: ioq=1, MSI/MSI-X: ioq=ncpu */
70 
71 #define NVME_PCI_BAR		0x10
72 
73 struct nvme_pci_softc {
74 	struct nvme_softc	psc_nvme;
75 
76 	pci_chipset_tag_t	psc_pc;
77 	pci_intr_handle_t	*psc_intrs;
78 	int			psc_nintrs;
79 };
80 
81 static int	nvme_pci_match(device_t, cfdata_t, void *);
82 static void	nvme_pci_attach(device_t, device_t, void *);
83 static int	nvme_pci_detach(device_t, int);
84 static int	nvme_pci_rescan(device_t, const char *, const int *);
85 
86 CFATTACH_DECL3_NEW(nvme_pci, sizeof(struct nvme_pci_softc),
87     nvme_pci_match, nvme_pci_attach, nvme_pci_detach, NULL, nvme_pci_rescan,
88     nvme_childdet, DVF_DETACH_SHUTDOWN);
89 
90 static int	nvme_pci_intr_establish(struct nvme_softc *,
91 		    uint16_t, struct nvme_queue *);
92 static int	nvme_pci_intr_disestablish(struct nvme_softc *, uint16_t);
93 static int	nvme_pci_setup_intr(struct pci_attach_args *,
94 		    struct nvme_pci_softc *);
95 
96 static const struct nvme_pci_quirk {
97 	pci_vendor_id_t		vendor;
98 	pci_product_id_t	product;
99 	uint32_t		quirks;
100 } nvme_pci_quirks[] = {
101 	{ PCI_VENDOR_HGST, PCI_PRODUCT_HGST_SN100,
102 	    NVME_QUIRK_DELAY_B4_CHK_RDY },
103 	{ PCI_VENDOR_HGST, PCI_PRODUCT_HGST_SN200,
104 	    NVME_QUIRK_DELAY_B4_CHK_RDY },
105 	{ PCI_VENDOR_BEIJING_MEMBLAZE, PCI_PRODUCT_BEIJING_MEMBLAZE_PBLAZE4,
106 	    NVME_QUIRK_DELAY_B4_CHK_RDY },
107 	{ PCI_VENDOR_SAMSUNGELEC3, PCI_PRODUCT_SAMSUNGELEC3_172X,
108 	    NVME_QUIRK_DELAY_B4_CHK_RDY },
109 	{ PCI_VENDOR_SAMSUNGELEC3, PCI_PRODUCT_SAMSUNGELEC3_172XAB,
110 	    NVME_QUIRK_DELAY_B4_CHK_RDY },
111 };
112 
113 static const struct nvme_pci_quirk *
114 nvme_pci_lookup_quirk(struct pci_attach_args *pa)
115 {
116 	const struct nvme_pci_quirk *q;
117 	int i;
118 
119 	for (i = 0; i < __arraycount(nvme_pci_quirks); i++) {
120 		q = &nvme_pci_quirks[i];
121 
122 		if (PCI_VENDOR(pa->pa_id) == q->vendor &&
123 		    PCI_PRODUCT(pa->pa_id) == q->product)
124 			return q;
125 	}
126 	return NULL;
127 }
128 
129 static int
130 nvme_pci_match(device_t parent, cfdata_t match, void *aux)
131 {
132 	struct pci_attach_args *pa = aux;
133 
134 	if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
135 	    PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_NVM &&
136 	    PCI_INTERFACE(pa->pa_class) == PCI_INTERFACE_NVM_NVME)
137 		return 1;
138 
139 	return 0;
140 }
141 
142 static void
143 nvme_pci_attach(device_t parent, device_t self, void *aux)
144 {
145 	struct nvme_pci_softc *psc = device_private(self);
146 	struct nvme_softc *sc = &psc->psc_nvme;
147 	struct pci_attach_args *pa = aux;
148 	const struct nvme_pci_quirk *quirk;
149 	pcireg_t memtype, reg;
150 	bus_addr_t memaddr;
151 	int flags, error;
152 	int msixoff;
153 
154 	sc->sc_dev = self;
155 	psc->psc_pc = pa->pa_pc;
156 	if (pci_dma64_available(pa))
157 		sc->sc_dmat = pa->pa_dmat64;
158 	else
159 		sc->sc_dmat = pa->pa_dmat;
160 
161 	pci_aprint_devinfo(pa, NULL);
162 
163 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
164 	if ((reg & PCI_COMMAND_MASTER_ENABLE) == 0) {
165 		reg |= PCI_COMMAND_MASTER_ENABLE;
166         	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, reg);
167 	}
168 
169 	/* Map registers */
170 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NVME_PCI_BAR);
171 	if (PCI_MAPREG_TYPE(memtype) != PCI_MAPREG_TYPE_MEM) {
172 		aprint_error_dev(self, "invalid type (type=0x%x)\n", memtype);
173 		return;
174 	}
175 	sc->sc_iot = pa->pa_memt;
176 	error = pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START,
177 	    memtype, &memaddr, &sc->sc_ios, &flags);
178 	if (error) {
179 		aprint_error_dev(self, "can't get map info\n");
180 		return;
181 	}
182 
183 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSIX, &msixoff,
184 	    NULL)) {
185 		pcireg_t msixtbl;
186 		uint32_t table_offset;
187 		int bir;
188 
189 		msixtbl = pci_conf_read(pa->pa_pc, pa->pa_tag,
190 		    msixoff + PCI_MSIX_TBLOFFSET);
191 		table_offset = msixtbl & PCI_MSIX_TBLOFFSET_MASK;
192 		bir = msixtbl & PCI_MSIX_PBABIR_MASK;
193 		if (bir == 0) {
194 			sc->sc_ios = table_offset;
195 		}
196 	}
197 
198 	error = bus_space_map(sc->sc_iot, memaddr, sc->sc_ios, flags,
199 	    &sc->sc_ioh);
200 	if (error != 0) {
201 		aprint_error_dev(self, "can't map mem space (error=%d)\n",
202 		    error);
203 		return;
204 	}
205 
206 	/* Establish interrupts */
207 	if (nvme_pci_setup_intr(pa, psc) != 0) {
208 		aprint_error_dev(self, "unable to allocate interrupt\n");
209 		goto unmap;
210 	}
211 	sc->sc_intr_establish = nvme_pci_intr_establish;
212 	sc->sc_intr_disestablish = nvme_pci_intr_disestablish;
213 
214 	sc->sc_ih = kmem_zalloc(sizeof(*sc->sc_ih) * psc->psc_nintrs, KM_SLEEP);
215 	sc->sc_softih = kmem_zalloc(
216 	    sizeof(*sc->sc_softih) * psc->psc_nintrs, KM_SLEEP);
217 
218 	quirk = nvme_pci_lookup_quirk(pa);
219 	if (quirk != NULL)
220 		sc->sc_quirks = quirk->quirks;
221 
222 	if (nvme_attach(sc) != 0) {
223 		/* error printed by nvme_attach() */
224 		goto softintr_free;
225 	}
226 
227 	if (!pmf_device_register(self, NULL, NULL))
228 		aprint_error_dev(self, "couldn't establish power handler\n");
229 
230 	SET(sc->sc_flags, NVME_F_ATTACHED);
231 	return;
232 
233 softintr_free:
234 	kmem_free(sc->sc_softih, sizeof(*sc->sc_softih) * psc->psc_nintrs);
235 	kmem_free(sc->sc_ih, sizeof(*sc->sc_ih) * psc->psc_nintrs);
236 	sc->sc_nq = 0;
237 	pci_intr_release(pa->pa_pc, psc->psc_intrs, psc->psc_nintrs);
238 	psc->psc_nintrs = 0;
239 unmap:
240 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
241 	sc->sc_ios = 0;
242 }
243 
244 static int
245 nvme_pci_rescan(device_t self, const char *attr, const int *flags)
246 {
247 
248 	return nvme_rescan(self, attr, flags);
249 }
250 
251 static int
252 nvme_pci_detach(device_t self, int flags)
253 {
254 	struct nvme_pci_softc *psc = device_private(self);
255 	struct nvme_softc *sc = &psc->psc_nvme;
256 	int error;
257 
258 	if (!ISSET(sc->sc_flags, NVME_F_ATTACHED))
259 		return 0;
260 
261 	error = nvme_detach(sc, flags);
262 	if (error)
263 		return error;
264 
265 	kmem_free(sc->sc_softih, sizeof(*sc->sc_softih) * psc->psc_nintrs);
266 	sc->sc_softih = NULL;
267 
268 	kmem_free(sc->sc_ih, sizeof(*sc->sc_ih) * psc->psc_nintrs);
269 	pci_intr_release(psc->psc_pc, psc->psc_intrs, psc->psc_nintrs);
270 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
271 	return 0;
272 }
273 
274 static int
275 nvme_pci_intr_establish(struct nvme_softc *sc, uint16_t qid,
276     struct nvme_queue *q)
277 {
278 	struct nvme_pci_softc *psc = (struct nvme_pci_softc *)sc;
279 	char intr_xname[INTRDEVNAMEBUF];
280 	char intrbuf[PCI_INTRSTR_LEN];
281 	const char *intrstr = NULL;
282 	int (*ih_func)(void *);
283 	void (*ih_func_soft)(void *);
284 	void *ih_arg;
285 	int error;
286 
287 	KASSERT(sc->sc_use_mq || qid == NVME_ADMIN_Q);
288 	KASSERT(sc->sc_ih[qid] == NULL);
289 
290 	if (nvme_pci_mpsafe) {
291 		pci_intr_setattr(psc->psc_pc, &psc->psc_intrs[qid],
292 		    PCI_INTR_MPSAFE, true);
293 	}
294 
295 	if (!sc->sc_use_mq) {
296 		snprintf(intr_xname, sizeof(intr_xname), "%s",
297 		    device_xname(sc->sc_dev));
298 		ih_arg = sc;
299 		ih_func = nvme_intr;
300 		ih_func_soft = nvme_softintr_intx;
301 	} else {
302 		if (qid == NVME_ADMIN_Q) {
303 			snprintf(intr_xname, sizeof(intr_xname), "%s adminq",
304 			    device_xname(sc->sc_dev));
305 		} else {
306 			snprintf(intr_xname, sizeof(intr_xname), "%s ioq%d",
307 			    device_xname(sc->sc_dev), qid);
308 		}
309 		ih_arg = q;
310 		ih_func = nvme_intr_msi;
311 		ih_func_soft = nvme_softintr_msi;
312 	}
313 
314 	/* establish hardware interrupt */
315 	sc->sc_ih[qid] = pci_intr_establish_xname(psc->psc_pc,
316 	    psc->psc_intrs[qid], IPL_BIO, ih_func, ih_arg, intr_xname);
317 	if (sc->sc_ih[qid] == NULL) {
318 		aprint_error_dev(sc->sc_dev,
319 		    "unable to establish %s interrupt\n", intr_xname);
320 		return 1;
321 	}
322 
323 	/* establish also the software interrupt */
324 	sc->sc_softih[qid] = softint_establish(
325 	    SOFTINT_BIO|(nvme_pci_mpsafe ? SOFTINT_MPSAFE : 0),
326 	    ih_func_soft, q);
327 	if (sc->sc_softih[qid] == NULL) {
328 		pci_intr_disestablish(psc->psc_pc, sc->sc_ih[qid]);
329 		sc->sc_ih[qid] = NULL;
330 
331 		aprint_error_dev(sc->sc_dev,
332 		    "unable to establish %s soft interrupt\n",
333 		    intr_xname);
334 		return 1;
335 	}
336 
337 	intrstr = pci_intr_string(psc->psc_pc, psc->psc_intrs[qid], intrbuf,
338 	    sizeof(intrbuf));
339 	if (!sc->sc_use_mq) {
340 		aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
341 	} else if (qid == NVME_ADMIN_Q) {
342 		aprint_normal_dev(sc->sc_dev,
343 		    "for admin queue interrupting at %s\n", intrstr);
344 	} else if (!nvme_pci_mpsafe) {
345 		aprint_normal_dev(sc->sc_dev,
346 		    "for io queue %d interrupting at %s\n", qid, intrstr);
347 	} else {
348 		kcpuset_t *affinity;
349 		cpuid_t affinity_to;
350 
351 		kcpuset_create(&affinity, true);
352 		affinity_to = (qid - 1) % ncpu;
353 		kcpuset_set(affinity, affinity_to);
354 		error = interrupt_distribute(sc->sc_ih[qid], affinity, NULL);
355 		kcpuset_destroy(affinity);
356 		aprint_normal_dev(sc->sc_dev,
357 		    "for io queue %d interrupting at %s", qid, intrstr);
358 		if (error == 0)
359 			aprint_normal(" affinity to cpu%lu", affinity_to);
360 		aprint_normal("\n");
361 	}
362 	return 0;
363 }
364 
365 static int
366 nvme_pci_intr_disestablish(struct nvme_softc *sc, uint16_t qid)
367 {
368 	struct nvme_pci_softc *psc = (struct nvme_pci_softc *)sc;
369 
370 	KASSERT(sc->sc_use_mq || qid == NVME_ADMIN_Q);
371 	KASSERT(sc->sc_ih[qid] != NULL);
372 
373 	if (sc->sc_softih) {
374 		softint_disestablish(sc->sc_softih[qid]);
375 		sc->sc_softih[qid] = NULL;
376 	}
377 
378 	pci_intr_disestablish(psc->psc_pc, sc->sc_ih[qid]);
379 	sc->sc_ih[qid] = NULL;
380 
381 	return 0;
382 }
383 
384 static int
385 nvme_pci_setup_intr(struct pci_attach_args *pa, struct nvme_pci_softc *psc)
386 {
387 	struct nvme_softc *sc = &psc->psc_nvme;
388 	int error;
389 	int counts[PCI_INTR_TYPE_SIZE], alloced_counts[PCI_INTR_TYPE_SIZE];
390 	pci_intr_handle_t *ihps;
391 	int max_type, intr_type;
392 
393 	if (nvme_pci_force_intx) {
394 		max_type = PCI_INTR_TYPE_INTX;
395 		goto force_intx;
396 	}
397 
398 	/* MSI-X */
399 	max_type = PCI_INTR_TYPE_MSIX;
400 	counts[PCI_INTR_TYPE_MSIX] = min(pci_msix_count(pa->pa_pc, pa->pa_tag),
401 	    ncpu + 1);
402 	if (counts[PCI_INTR_TYPE_MSIX] > 0) {
403 		memset(alloced_counts, 0, sizeof(alloced_counts));
404 		alloced_counts[PCI_INTR_TYPE_MSIX] = counts[PCI_INTR_TYPE_MSIX];
405 		if (pci_intr_alloc(pa, &ihps, alloced_counts,
406 		    PCI_INTR_TYPE_MSIX)) {
407 			counts[PCI_INTR_TYPE_MSIX] = 0;
408 		} else {
409 			counts[PCI_INTR_TYPE_MSIX] =
410 			    alloced_counts[PCI_INTR_TYPE_MSIX];
411 			pci_intr_release(pa->pa_pc, ihps,
412 			    alloced_counts[PCI_INTR_TYPE_MSIX]);
413 		}
414 	}
415 	if (counts[PCI_INTR_TYPE_MSIX] < 2) {
416 		counts[PCI_INTR_TYPE_MSIX] = 0;
417 		max_type = PCI_INTR_TYPE_MSI;
418 	} else if (!nvme_pci_mq || !nvme_pci_mpsafe) {
419 		counts[PCI_INTR_TYPE_MSIX] = 2;	/* adminq + 1 ioq */
420 	}
421 
422 retry_msi:
423 	/* MSI */
424 	counts[PCI_INTR_TYPE_MSI] = pci_msi_count(pa->pa_pc, pa->pa_tag);
425 	if (counts[PCI_INTR_TYPE_MSI] > 0) {
426 		while (counts[PCI_INTR_TYPE_MSI] > ncpu + 1) {
427 			if (counts[PCI_INTR_TYPE_MSI] / 2 <= ncpu + 1)
428 				break;
429 			counts[PCI_INTR_TYPE_MSI] /= 2;
430 		}
431 		memset(alloced_counts, 0, sizeof(alloced_counts));
432 		alloced_counts[PCI_INTR_TYPE_MSI] = counts[PCI_INTR_TYPE_MSI];
433 		if (pci_intr_alloc(pa, &ihps, alloced_counts,
434 		    PCI_INTR_TYPE_MSI)) {
435 			counts[PCI_INTR_TYPE_MSI] = 0;
436 		} else {
437 			counts[PCI_INTR_TYPE_MSI] =
438 			    alloced_counts[PCI_INTR_TYPE_MSI];
439 			pci_intr_release(pa->pa_pc, ihps,
440 			    alloced_counts[PCI_INTR_TYPE_MSI]);
441 		}
442 	}
443 	if (counts[PCI_INTR_TYPE_MSI] < 1) {
444 		counts[PCI_INTR_TYPE_MSI] = 0;
445 		if (max_type == PCI_INTR_TYPE_MSI)
446 			max_type = PCI_INTR_TYPE_INTX;
447 	} else if (!nvme_pci_mq || !nvme_pci_mpsafe) {
448 		if (counts[PCI_INTR_TYPE_MSI] > 2)
449 			counts[PCI_INTR_TYPE_MSI] = 2;	/* adminq + 1 ioq */
450 	}
451 
452 force_intx:
453 	/* INTx */
454 	counts[PCI_INTR_TYPE_INTX] = 1;
455 
456 	memcpy(alloced_counts, counts, sizeof(counts));
457 	error = pci_intr_alloc(pa, &ihps, alloced_counts, max_type);
458 	if (error) {
459 		if (max_type != PCI_INTR_TYPE_INTX) {
460 retry:
461 			memset(counts, 0, sizeof(counts));
462 			if (max_type == PCI_INTR_TYPE_MSIX) {
463 				max_type = PCI_INTR_TYPE_MSI;
464 				goto retry_msi;
465 			} else {
466 				max_type = PCI_INTR_TYPE_INTX;
467 				goto force_intx;
468 			}
469 		}
470 		return error;
471 	}
472 
473 	intr_type = pci_intr_type(pa->pa_pc, ihps[0]);
474 	if (alloced_counts[intr_type] < counts[intr_type]) {
475 		if (intr_type != PCI_INTR_TYPE_INTX) {
476 			pci_intr_release(pa->pa_pc, ihps,
477 			    alloced_counts[intr_type]);
478 			max_type = intr_type;
479 			goto retry;
480 		}
481 		return EBUSY;
482 	}
483 
484 	psc->psc_intrs = ihps;
485 	psc->psc_nintrs = alloced_counts[intr_type];
486 	if (intr_type == PCI_INTR_TYPE_MSI) {
487 		if (alloced_counts[intr_type] > ncpu + 1)
488 			alloced_counts[intr_type] = ncpu + 1;
489 	}
490 	sc->sc_use_mq = alloced_counts[intr_type] > 1;
491 	sc->sc_nq = sc->sc_use_mq ? alloced_counts[intr_type] - 1 : 1;
492 
493 	return 0;
494 }
495 
496 MODULE(MODULE_CLASS_DRIVER, nvme, "pci,dk_subr");
497 
498 #ifdef _MODULE
499 #include "ioconf.c"
500 #endif
501 
502 static int
503 nvme_modcmd(modcmd_t cmd, void *opaque)
504 {
505 #ifdef _MODULE
506 	devmajor_t cmajor, bmajor;
507 	extern const struct cdevsw nvme_cdevsw;
508 #endif
509 	int error = 0;
510 
511 #ifdef _MODULE
512 	switch (cmd) {
513 	case MODULE_CMD_INIT:
514 		error = config_init_component(cfdriver_ioconf_nvme_pci,
515 		    cfattach_ioconf_nvme_pci, cfdata_ioconf_nvme_pci);
516 		if (error)
517 			break;
518 
519 		bmajor = cmajor = NODEVMAJOR;
520 		error = devsw_attach(nvme_cd.cd_name, NULL, &bmajor,
521 		    &nvme_cdevsw, &cmajor);
522 		if (error) {
523 			aprint_error("%s: unable to register devsw\n",
524 			    nvme_cd.cd_name);
525 			/* do not abort, just /dev/nvme* will not work */
526 		}
527 		break;
528 	case MODULE_CMD_FINI:
529 		devsw_detach(NULL, &nvme_cdevsw);
530 
531 		error = config_fini_component(cfdriver_ioconf_nvme_pci,
532 		    cfattach_ioconf_nvme_pci, cfdata_ioconf_nvme_pci);
533 		break;
534 	default:
535 		break;
536 	}
537 #endif
538 	return error;
539 }
540