xref: /freebsd-src/sys/contrib/openzfs/module/os/freebsd/zfs/vdev_geom.c (revision dd21556857e8d40f66bf5ad54754d9d52669ebf7)
1eda14cbcSMatt Macy /*
2eda14cbcSMatt Macy  * CDDL HEADER START
3eda14cbcSMatt Macy  *
4eda14cbcSMatt Macy  * The contents of this file are subject to the terms of the
5eda14cbcSMatt Macy  * Common Development and Distribution License (the "License").
6eda14cbcSMatt Macy  * You may not use this file except in compliance with the License.
7eda14cbcSMatt Macy  *
8eda14cbcSMatt Macy  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9271171e0SMartin Matuska  * or https://opensource.org/licenses/CDDL-1.0.
10eda14cbcSMatt Macy  * See the License for the specific language governing permissions
11eda14cbcSMatt Macy  * and limitations under the License.
12eda14cbcSMatt Macy  *
13eda14cbcSMatt Macy  * When distributing Covered Code, include this CDDL HEADER in each
14eda14cbcSMatt Macy  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15eda14cbcSMatt Macy  * If applicable, add the following below this CDDL HEADER, with the
16eda14cbcSMatt Macy  * fields enclosed by brackets "[]" replaced with your own identifying
17eda14cbcSMatt Macy  * information: Portions Copyright [yyyy] [name of copyright owner]
18eda14cbcSMatt Macy  *
19eda14cbcSMatt Macy  * CDDL HEADER END
20eda14cbcSMatt Macy  */
21eda14cbcSMatt Macy /*
22eda14cbcSMatt Macy  * Copyright (c) 2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
23eda14cbcSMatt Macy  * All rights reserved.
24eda14cbcSMatt Macy  *
25eda14cbcSMatt Macy  * Portions Copyright (c) 2012 Martin Matuska <mm@FreeBSD.org>
26eda14cbcSMatt Macy  */
27eda14cbcSMatt Macy 
28eda14cbcSMatt Macy #include <sys/zfs_context.h>
29eda14cbcSMatt Macy #include <sys/param.h>
30eda14cbcSMatt Macy #include <sys/kernel.h>
31eda14cbcSMatt Macy #include <sys/bio.h>
325eb61f6cSMartin Matuska #include <sys/buf.h>
33eda14cbcSMatt Macy #include <sys/file.h>
34eda14cbcSMatt Macy #include <sys/spa.h>
35eda14cbcSMatt Macy #include <sys/spa_impl.h>
36eda14cbcSMatt Macy #include <sys/vdev_impl.h>
37eda14cbcSMatt Macy #include <sys/vdev_os.h>
38eda14cbcSMatt Macy #include <sys/fs/zfs.h>
39eda14cbcSMatt Macy #include <sys/zio.h>
405eb61f6cSMartin Matuska #include <vm/vm_page.h>
41eda14cbcSMatt Macy #include <geom/geom.h>
42eda14cbcSMatt Macy #include <geom/geom_disk.h>
43eda14cbcSMatt Macy #include <geom/geom_int.h>
44eda14cbcSMatt Macy 
45eda14cbcSMatt Macy #ifndef g_topology_locked
46eda14cbcSMatt Macy #define	g_topology_locked()	sx_xlocked(&topology_lock)
47eda14cbcSMatt Macy #endif
48eda14cbcSMatt Macy 
49eda14cbcSMatt Macy /*
50eda14cbcSMatt Macy  * Virtual device vector for GEOM.
51eda14cbcSMatt Macy  */
52eda14cbcSMatt Macy 
53eda14cbcSMatt Macy static g_attrchanged_t vdev_geom_attrchanged;
54eda14cbcSMatt Macy struct g_class zfs_vdev_class = {
55eda14cbcSMatt Macy 	.name = "ZFS::VDEV",
56eda14cbcSMatt Macy 	.version = G_VERSION,
57eda14cbcSMatt Macy 	.attrchanged = vdev_geom_attrchanged,
58eda14cbcSMatt Macy };
59eda14cbcSMatt Macy 
60eda14cbcSMatt Macy struct consumer_vdev_elem {
61eda14cbcSMatt Macy 	SLIST_ENTRY(consumer_vdev_elem)	elems;
62eda14cbcSMatt Macy 	vdev_t	*vd;
63eda14cbcSMatt Macy };
64eda14cbcSMatt Macy 
65eda14cbcSMatt Macy SLIST_HEAD(consumer_priv_t, consumer_vdev_elem);
66c03c5b1cSMartin Matuska _Static_assert(
67c03c5b1cSMartin Matuska     sizeof (((struct g_consumer *)NULL)->private) ==
68c03c5b1cSMartin Matuska     sizeof (struct consumer_priv_t *),
69eda14cbcSMatt Macy 	"consumer_priv_t* can't be stored in g_consumer.private");
70eda14cbcSMatt Macy 
71eda14cbcSMatt Macy DECLARE_GEOM_CLASS(zfs_vdev_class, zfs_vdev);
72eda14cbcSMatt Macy 
73eda14cbcSMatt Macy SYSCTL_DECL(_vfs_zfs_vdev);
74eda14cbcSMatt Macy /* Don't send BIO_FLUSH. */
75eda14cbcSMatt Macy static int vdev_geom_bio_flush_disable;
76eda14cbcSMatt Macy SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, bio_flush_disable, CTLFLAG_RWTUN,
77eda14cbcSMatt Macy 	&vdev_geom_bio_flush_disable, 0, "Disable BIO_FLUSH");
78eda14cbcSMatt Macy /* Don't send BIO_DELETE. */
79eda14cbcSMatt Macy static int vdev_geom_bio_delete_disable;
80eda14cbcSMatt Macy SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, bio_delete_disable, CTLFLAG_RWTUN,
81eda14cbcSMatt Macy 	&vdev_geom_bio_delete_disable, 0, "Disable BIO_DELETE");
82eda14cbcSMatt Macy 
83eda14cbcSMatt Macy /* Declare local functions */
84eda14cbcSMatt Macy static void vdev_geom_detach(struct g_consumer *cp, boolean_t open_for_read);
85eda14cbcSMatt Macy 
86eda14cbcSMatt Macy /*
87eda14cbcSMatt Macy  * Thread local storage used to indicate when a thread is probing geoms
88eda14cbcSMatt Macy  * for their guids.  If NULL, this thread is not tasting geoms.  If non NULL,
89eda14cbcSMatt Macy  * it is looking for a replacement for the vdev_t* that is its value.
90eda14cbcSMatt Macy  */
91eda14cbcSMatt Macy uint_t zfs_geom_probe_vdev_key;
92eda14cbcSMatt Macy 
93eda14cbcSMatt Macy static void
94eda14cbcSMatt Macy vdev_geom_set_physpath(vdev_t *vd, struct g_consumer *cp,
95eda14cbcSMatt Macy     boolean_t do_null_update)
96eda14cbcSMatt Macy {
97eda14cbcSMatt Macy 	boolean_t needs_update = B_FALSE;
98eda14cbcSMatt Macy 	char *physpath;
99eda14cbcSMatt Macy 	int error, physpath_len;
100eda14cbcSMatt Macy 
101eda14cbcSMatt Macy 	physpath_len = MAXPATHLEN;
102eda14cbcSMatt Macy 	physpath = g_malloc(physpath_len, M_WAITOK|M_ZERO);
103eda14cbcSMatt Macy 	error = g_io_getattr("GEOM::physpath", cp, &physpath_len, physpath);
104eda14cbcSMatt Macy 	if (error == 0) {
105eda14cbcSMatt Macy 		char *old_physpath;
106eda14cbcSMatt Macy 
107eda14cbcSMatt Macy 		/* g_topology lock ensures that vdev has not been closed */
108eda14cbcSMatt Macy 		g_topology_assert();
109eda14cbcSMatt Macy 		old_physpath = vd->vdev_physpath;
110eda14cbcSMatt Macy 		vd->vdev_physpath = spa_strdup(physpath);
111eda14cbcSMatt Macy 
112eda14cbcSMatt Macy 		if (old_physpath != NULL) {
113eda14cbcSMatt Macy 			needs_update = (strcmp(old_physpath,
114eda14cbcSMatt Macy 			    vd->vdev_physpath) != 0);
115eda14cbcSMatt Macy 			spa_strfree(old_physpath);
116eda14cbcSMatt Macy 		} else
117eda14cbcSMatt Macy 			needs_update = do_null_update;
118eda14cbcSMatt Macy 	}
119eda14cbcSMatt Macy 	g_free(physpath);
120eda14cbcSMatt Macy 
121eda14cbcSMatt Macy 	/*
122eda14cbcSMatt Macy 	 * If the physical path changed, update the config.
123eda14cbcSMatt Macy 	 * Only request an update for previously unset physpaths if
124eda14cbcSMatt Macy 	 * requested by the caller.
125eda14cbcSMatt Macy 	 */
126eda14cbcSMatt Macy 	if (needs_update)
127eda14cbcSMatt Macy 		spa_async_request(vd->vdev_spa, SPA_ASYNC_CONFIG_UPDATE);
128eda14cbcSMatt Macy 
129eda14cbcSMatt Macy }
130eda14cbcSMatt Macy 
131eda14cbcSMatt Macy static void
132eda14cbcSMatt Macy vdev_geom_attrchanged(struct g_consumer *cp, const char *attr)
133eda14cbcSMatt Macy {
134eda14cbcSMatt Macy 	struct consumer_priv_t *priv;
135eda14cbcSMatt Macy 	struct consumer_vdev_elem *elem;
136eda14cbcSMatt Macy 
137eda14cbcSMatt Macy 	priv = (struct consumer_priv_t *)&cp->private;
138eda14cbcSMatt Macy 	if (SLIST_EMPTY(priv))
139eda14cbcSMatt Macy 		return;
140eda14cbcSMatt Macy 
141eda14cbcSMatt Macy 	SLIST_FOREACH(elem, priv, elems) {
142eda14cbcSMatt Macy 		vdev_t *vd = elem->vd;
143eda14cbcSMatt Macy 		if (strcmp(attr, "GEOM::physpath") == 0) {
144eda14cbcSMatt Macy 			vdev_geom_set_physpath(vd, cp, /* null_update */B_TRUE);
145eda14cbcSMatt Macy 			return;
146eda14cbcSMatt Macy 		}
147eda14cbcSMatt Macy 	}
148eda14cbcSMatt Macy }
149eda14cbcSMatt Macy 
150eda14cbcSMatt Macy static void
151eda14cbcSMatt Macy vdev_geom_resize(struct g_consumer *cp)
152eda14cbcSMatt Macy {
153eda14cbcSMatt Macy 	struct consumer_priv_t *priv;
154eda14cbcSMatt Macy 	struct consumer_vdev_elem *elem;
155eda14cbcSMatt Macy 	spa_t *spa;
156eda14cbcSMatt Macy 	vdev_t *vd;
157eda14cbcSMatt Macy 
158eda14cbcSMatt Macy 	priv = (struct consumer_priv_t *)&cp->private;
159eda14cbcSMatt Macy 	if (SLIST_EMPTY(priv))
160eda14cbcSMatt Macy 		return;
161eda14cbcSMatt Macy 
162eda14cbcSMatt Macy 	SLIST_FOREACH(elem, priv, elems) {
163eda14cbcSMatt Macy 		vd = elem->vd;
164eda14cbcSMatt Macy 		if (vd->vdev_state != VDEV_STATE_HEALTHY)
165eda14cbcSMatt Macy 			continue;
166eda14cbcSMatt Macy 		spa = vd->vdev_spa;
167eda14cbcSMatt Macy 		if (!spa->spa_autoexpand)
168eda14cbcSMatt Macy 			continue;
169eda14cbcSMatt Macy 		vdev_online(spa, vd->vdev_guid, ZFS_ONLINE_EXPAND, NULL);
170eda14cbcSMatt Macy 	}
171eda14cbcSMatt Macy }
172eda14cbcSMatt Macy 
173eda14cbcSMatt Macy static void
174eda14cbcSMatt Macy vdev_geom_orphan(struct g_consumer *cp)
175eda14cbcSMatt Macy {
176eda14cbcSMatt Macy 	struct consumer_priv_t *priv;
177eda14cbcSMatt Macy 	// cppcheck-suppress uninitvar
178eda14cbcSMatt Macy 	struct consumer_vdev_elem *elem;
179eda14cbcSMatt Macy 
180eda14cbcSMatt Macy 	g_topology_assert();
181eda14cbcSMatt Macy 
182eda14cbcSMatt Macy 	priv = (struct consumer_priv_t *)&cp->private;
183eda14cbcSMatt Macy 	if (SLIST_EMPTY(priv))
184eda14cbcSMatt Macy 		/* Vdev close in progress.  Ignore the event. */
185eda14cbcSMatt Macy 		return;
186eda14cbcSMatt Macy 
187eda14cbcSMatt Macy 	/*
188eda14cbcSMatt Macy 	 * Orphan callbacks occur from the GEOM event thread.
189eda14cbcSMatt Macy 	 * Concurrent with this call, new I/O requests may be
190eda14cbcSMatt Macy 	 * working their way through GEOM about to find out
191eda14cbcSMatt Macy 	 * (only once executed by the g_down thread) that we've
192eda14cbcSMatt Macy 	 * been orphaned from our disk provider.  These I/Os
193eda14cbcSMatt Macy 	 * must be retired before we can detach our consumer.
194eda14cbcSMatt Macy 	 * This is most easily achieved by acquiring the
195eda14cbcSMatt Macy 	 * SPA ZIO configuration lock as a writer, but doing
196eda14cbcSMatt Macy 	 * so with the GEOM topology lock held would cause
197eda14cbcSMatt Macy 	 * a lock order reversal.  Instead, rely on the SPA's
198eda14cbcSMatt Macy 	 * async removal support to invoke a close on this
199eda14cbcSMatt Macy 	 * vdev once it is safe to do so.
200eda14cbcSMatt Macy 	 */
201eda14cbcSMatt Macy 	SLIST_FOREACH(elem, priv, elems) {
202eda14cbcSMatt Macy 		// cppcheck-suppress uninitvar
203eda14cbcSMatt Macy 		vdev_t *vd = elem->vd;
204eda14cbcSMatt Macy 
205eda14cbcSMatt Macy 		vd->vdev_remove_wanted = B_TRUE;
206eda14cbcSMatt Macy 		spa_async_request(vd->vdev_spa, SPA_ASYNC_REMOVE);
207eda14cbcSMatt Macy 	}
208eda14cbcSMatt Macy }
209eda14cbcSMatt Macy 
210eda14cbcSMatt Macy static struct g_consumer *
211eda14cbcSMatt Macy vdev_geom_attach(struct g_provider *pp, vdev_t *vd, boolean_t sanity)
212eda14cbcSMatt Macy {
213eda14cbcSMatt Macy 	struct g_geom *gp;
214eda14cbcSMatt Macy 	struct g_consumer *cp;
215eda14cbcSMatt Macy 	int error;
216eda14cbcSMatt Macy 
217eda14cbcSMatt Macy 	g_topology_assert();
218eda14cbcSMatt Macy 
219eda14cbcSMatt Macy 	ZFS_LOG(1, "Attaching to %s.", pp->name);
220eda14cbcSMatt Macy 
221eda14cbcSMatt Macy 	if (sanity) {
222eda14cbcSMatt Macy 		if (pp->sectorsize > VDEV_PAD_SIZE || !ISP2(pp->sectorsize)) {
223eda14cbcSMatt Macy 			ZFS_LOG(1, "Failing attach of %s. "
224eda14cbcSMatt Macy 			    "Incompatible sectorsize %d\n",
225eda14cbcSMatt Macy 			    pp->name, pp->sectorsize);
226eda14cbcSMatt Macy 			return (NULL);
227eda14cbcSMatt Macy 		} else if (pp->mediasize < SPA_MINDEVSIZE) {
228eda14cbcSMatt Macy 			ZFS_LOG(1, "Failing attach of %s. "
229eda14cbcSMatt Macy 			    "Incompatible mediasize %ju\n",
230eda14cbcSMatt Macy 			    pp->name, pp->mediasize);
231eda14cbcSMatt Macy 			return (NULL);
232eda14cbcSMatt Macy 		}
233eda14cbcSMatt Macy 	}
234eda14cbcSMatt Macy 
235eda14cbcSMatt Macy 	/* Do we have geom already? No? Create one. */
236eda14cbcSMatt Macy 	LIST_FOREACH(gp, &zfs_vdev_class.geom, geom) {
237eda14cbcSMatt Macy 		if (gp->flags & G_GEOM_WITHER)
238eda14cbcSMatt Macy 			continue;
239eda14cbcSMatt Macy 		if (strcmp(gp->name, "zfs::vdev") != 0)
240eda14cbcSMatt Macy 			continue;
241eda14cbcSMatt Macy 		break;
242eda14cbcSMatt Macy 	}
243eda14cbcSMatt Macy 	if (gp == NULL) {
244eda14cbcSMatt Macy 		gp = g_new_geomf(&zfs_vdev_class, "zfs::vdev");
245eda14cbcSMatt Macy 		gp->orphan = vdev_geom_orphan;
246eda14cbcSMatt Macy 		gp->attrchanged = vdev_geom_attrchanged;
247eda14cbcSMatt Macy 		gp->resize = vdev_geom_resize;
248eda14cbcSMatt Macy 		cp = g_new_consumer(gp);
249eda14cbcSMatt Macy 		error = g_attach(cp, pp);
250eda14cbcSMatt Macy 		if (error != 0) {
251eda14cbcSMatt Macy 			ZFS_LOG(1, "%s(%d): g_attach failed: %d\n", __func__,
252eda14cbcSMatt Macy 			    __LINE__, error);
253eda14cbcSMatt Macy 			vdev_geom_detach(cp, B_FALSE);
254eda14cbcSMatt Macy 			return (NULL);
255eda14cbcSMatt Macy 		}
256eda14cbcSMatt Macy 		error = g_access(cp, 1, 0, 1);
257eda14cbcSMatt Macy 		if (error != 0) {
258eda14cbcSMatt Macy 			ZFS_LOG(1, "%s(%d): g_access failed: %d\n", __func__,
259eda14cbcSMatt Macy 			    __LINE__, error);
260eda14cbcSMatt Macy 			vdev_geom_detach(cp, B_FALSE);
261eda14cbcSMatt Macy 			return (NULL);
262eda14cbcSMatt Macy 		}
263eda14cbcSMatt Macy 		ZFS_LOG(1, "Created geom and consumer for %s.", pp->name);
264eda14cbcSMatt Macy 	} else {
265eda14cbcSMatt Macy 		/* Check if we are already connected to this provider. */
266eda14cbcSMatt Macy 		LIST_FOREACH(cp, &gp->consumer, consumer) {
267eda14cbcSMatt Macy 			if (cp->provider == pp) {
268eda14cbcSMatt Macy 				ZFS_LOG(1, "Found consumer for %s.", pp->name);
269eda14cbcSMatt Macy 				break;
270eda14cbcSMatt Macy 			}
271eda14cbcSMatt Macy 		}
272eda14cbcSMatt Macy 		if (cp == NULL) {
273eda14cbcSMatt Macy 			cp = g_new_consumer(gp);
274eda14cbcSMatt Macy 			error = g_attach(cp, pp);
275eda14cbcSMatt Macy 			if (error != 0) {
276eda14cbcSMatt Macy 				ZFS_LOG(1, "%s(%d): g_attach failed: %d\n",
277eda14cbcSMatt Macy 				    __func__, __LINE__, error);
278eda14cbcSMatt Macy 				vdev_geom_detach(cp, B_FALSE);
279eda14cbcSMatt Macy 				return (NULL);
280eda14cbcSMatt Macy 			}
281eda14cbcSMatt Macy 			error = g_access(cp, 1, 0, 1);
282eda14cbcSMatt Macy 			if (error != 0) {
283eda14cbcSMatt Macy 				ZFS_LOG(1, "%s(%d): g_access failed: %d\n",
284eda14cbcSMatt Macy 				    __func__, __LINE__, error);
285eda14cbcSMatt Macy 				vdev_geom_detach(cp, B_FALSE);
286eda14cbcSMatt Macy 				return (NULL);
287eda14cbcSMatt Macy 			}
288eda14cbcSMatt Macy 			ZFS_LOG(1, "Created consumer for %s.", pp->name);
289eda14cbcSMatt Macy 		} else {
290eda14cbcSMatt Macy 			error = g_access(cp, 1, 0, 1);
291eda14cbcSMatt Macy 			if (error != 0) {
292eda14cbcSMatt Macy 				ZFS_LOG(1, "%s(%d): g_access failed: %d\n",
293eda14cbcSMatt Macy 				    __func__, __LINE__, error);
294eda14cbcSMatt Macy 				return (NULL);
295eda14cbcSMatt Macy 			}
296eda14cbcSMatt Macy 			ZFS_LOG(1, "Used existing consumer for %s.", pp->name);
297eda14cbcSMatt Macy 		}
298eda14cbcSMatt Macy 	}
299eda14cbcSMatt Macy 
300eda14cbcSMatt Macy 	if (vd != NULL)
301eda14cbcSMatt Macy 		vd->vdev_tsd = cp;
302eda14cbcSMatt Macy 
303eda14cbcSMatt Macy 	cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
304eda14cbcSMatt Macy 	return (cp);
305eda14cbcSMatt Macy }
306eda14cbcSMatt Macy 
307eda14cbcSMatt Macy static void
308eda14cbcSMatt Macy vdev_geom_detach(struct g_consumer *cp, boolean_t open_for_read)
309eda14cbcSMatt Macy {
310eda14cbcSMatt Macy 	struct g_geom *gp;
311eda14cbcSMatt Macy 
312eda14cbcSMatt Macy 	g_topology_assert();
313eda14cbcSMatt Macy 
314eda14cbcSMatt Macy 	ZFS_LOG(1, "Detaching from %s.",
315eda14cbcSMatt Macy 	    cp->provider && cp->provider->name ? cp->provider->name : "NULL");
316eda14cbcSMatt Macy 
317eda14cbcSMatt Macy 	gp = cp->geom;
318eda14cbcSMatt Macy 	if (open_for_read)
319eda14cbcSMatt Macy 		g_access(cp, -1, 0, -1);
320eda14cbcSMatt Macy 	/* Destroy consumer on last close. */
321eda14cbcSMatt Macy 	if (cp->acr == 0 && cp->ace == 0) {
322eda14cbcSMatt Macy 		if (cp->acw > 0)
323eda14cbcSMatt Macy 			g_access(cp, 0, -cp->acw, 0);
324eda14cbcSMatt Macy 		if (cp->provider != NULL) {
325eda14cbcSMatt Macy 			ZFS_LOG(1, "Destroying consumer for %s.",
326eda14cbcSMatt Macy 			    cp->provider->name ? cp->provider->name : "NULL");
327eda14cbcSMatt Macy 			g_detach(cp);
328eda14cbcSMatt Macy 		}
329eda14cbcSMatt Macy 		g_destroy_consumer(cp);
330eda14cbcSMatt Macy 	}
331eda14cbcSMatt Macy 	/* Destroy geom if there are no consumers left. */
332eda14cbcSMatt Macy 	if (LIST_EMPTY(&gp->consumer)) {
333eda14cbcSMatt Macy 		ZFS_LOG(1, "Destroyed geom %s.", gp->name);
334eda14cbcSMatt Macy 		g_wither_geom(gp, ENXIO);
335eda14cbcSMatt Macy 	}
336eda14cbcSMatt Macy }
337eda14cbcSMatt Macy 
338eda14cbcSMatt Macy static void
339eda14cbcSMatt Macy vdev_geom_close_locked(vdev_t *vd)
340eda14cbcSMatt Macy {
341eda14cbcSMatt Macy 	struct g_consumer *cp;
342eda14cbcSMatt Macy 	struct consumer_priv_t *priv;
343eda14cbcSMatt Macy 	struct consumer_vdev_elem *elem, *elem_temp;
344eda14cbcSMatt Macy 
345eda14cbcSMatt Macy 	g_topology_assert();
346eda14cbcSMatt Macy 
347eda14cbcSMatt Macy 	cp = vd->vdev_tsd;
348eda14cbcSMatt Macy 	vd->vdev_delayed_close = B_FALSE;
349eda14cbcSMatt Macy 	if (cp == NULL)
350eda14cbcSMatt Macy 		return;
351eda14cbcSMatt Macy 
352eda14cbcSMatt Macy 	ZFS_LOG(1, "Closing access to %s.", cp->provider->name);
353eda14cbcSMatt Macy 	KASSERT(cp->private != NULL, ("%s: cp->private is NULL", __func__));
354eda14cbcSMatt Macy 	priv = (struct consumer_priv_t *)&cp->private;
355eda14cbcSMatt Macy 	vd->vdev_tsd = NULL;
356eda14cbcSMatt Macy 	SLIST_FOREACH_SAFE(elem, priv, elems, elem_temp) {
357eda14cbcSMatt Macy 		if (elem->vd == vd) {
358eda14cbcSMatt Macy 			SLIST_REMOVE(priv, elem, consumer_vdev_elem, elems);
359eda14cbcSMatt Macy 			g_free(elem);
360eda14cbcSMatt Macy 		}
361eda14cbcSMatt Macy 	}
362eda14cbcSMatt Macy 
363eda14cbcSMatt Macy 	vdev_geom_detach(cp, B_TRUE);
364eda14cbcSMatt Macy }
365eda14cbcSMatt Macy 
366eda14cbcSMatt Macy /*
367eda14cbcSMatt Macy  * Issue one or more bios to the vdev in parallel
368eda14cbcSMatt Macy  * cmds, datas, offsets, errors, and sizes are arrays of length ncmds.  Each IO
369eda14cbcSMatt Macy  * operation is described by parallel entries from each array.  There may be
370eda14cbcSMatt Macy  * more bios actually issued than entries in the array
371eda14cbcSMatt Macy  */
372eda14cbcSMatt Macy static void
373eda14cbcSMatt Macy vdev_geom_io(struct g_consumer *cp, int *cmds, void **datas, off_t *offsets,
374eda14cbcSMatt Macy     off_t *sizes, int *errors, int ncmds)
375eda14cbcSMatt Macy {
376eda14cbcSMatt Macy 	struct bio **bios;
377eda14cbcSMatt Macy 	uint8_t *p;
378eda14cbcSMatt Macy 	off_t off, maxio, s, end;
379eda14cbcSMatt Macy 	int i, n_bios, j;
380eda14cbcSMatt Macy 	size_t bios_size;
381eda14cbcSMatt Macy 
382cd853791SKonstantin Belousov 	maxio = maxphys - (maxphys % cp->provider->sectorsize);
383eda14cbcSMatt Macy 	n_bios = 0;
384eda14cbcSMatt Macy 
385eda14cbcSMatt Macy 	/* How many bios are required for all commands ? */
386eda14cbcSMatt Macy 	for (i = 0; i < ncmds; i++)
387eda14cbcSMatt Macy 		n_bios += (sizes[i] + maxio - 1) / maxio;
388eda14cbcSMatt Macy 
389eda14cbcSMatt Macy 	/* Allocate memory for the bios */
390eda14cbcSMatt Macy 	bios_size = n_bios * sizeof (struct bio *);
391eda14cbcSMatt Macy 	bios = kmem_zalloc(bios_size, KM_SLEEP);
392eda14cbcSMatt Macy 
393eda14cbcSMatt Macy 	/* Prepare and issue all of the bios */
394eda14cbcSMatt Macy 	for (i = j = 0; i < ncmds; i++) {
395eda14cbcSMatt Macy 		off = offsets[i];
396eda14cbcSMatt Macy 		p = datas[i];
397eda14cbcSMatt Macy 		s = sizes[i];
398eda14cbcSMatt Macy 		end = off + s;
39916038816SMartin Matuska 		ASSERT0(off % cp->provider->sectorsize);
40016038816SMartin Matuska 		ASSERT0(s % cp->provider->sectorsize);
401eda14cbcSMatt Macy 
402eda14cbcSMatt Macy 		for (; off < end; off += maxio, p += maxio, s -= maxio, j++) {
403eda14cbcSMatt Macy 			bios[j] = g_alloc_bio();
404eda14cbcSMatt Macy 			bios[j]->bio_cmd = cmds[i];
405eda14cbcSMatt Macy 			bios[j]->bio_done = NULL;
406eda14cbcSMatt Macy 			bios[j]->bio_offset = off;
407eda14cbcSMatt Macy 			bios[j]->bio_length = MIN(s, maxio);
408eda14cbcSMatt Macy 			bios[j]->bio_data = (caddr_t)p;
409eda14cbcSMatt Macy 			g_io_request(bios[j], cp);
410eda14cbcSMatt Macy 		}
411eda14cbcSMatt Macy 	}
41216038816SMartin Matuska 	ASSERT3S(j, ==, n_bios);
413eda14cbcSMatt Macy 
414eda14cbcSMatt Macy 	/* Wait for all of the bios to complete, and clean them up */
415eda14cbcSMatt Macy 	for (i = j = 0; i < ncmds; i++) {
416eda14cbcSMatt Macy 		off = offsets[i];
417eda14cbcSMatt Macy 		s = sizes[i];
418eda14cbcSMatt Macy 		end = off + s;
419eda14cbcSMatt Macy 
420eda14cbcSMatt Macy 		for (; off < end; off += maxio, s -= maxio, j++) {
421eda14cbcSMatt Macy 			errors[i] = biowait(bios[j], "vdev_geom_io") ||
422eda14cbcSMatt Macy 			    errors[i];
423eda14cbcSMatt Macy 			g_destroy_bio(bios[j]);
424eda14cbcSMatt Macy 		}
425eda14cbcSMatt Macy 	}
426eda14cbcSMatt Macy 	kmem_free(bios, bios_size);
427eda14cbcSMatt Macy }
428eda14cbcSMatt Macy 
429eda14cbcSMatt Macy /*
430eda14cbcSMatt Macy  * Read the vdev config from a device.  Return the number of valid labels that
431eda14cbcSMatt Macy  * were found.  The vdev config will be returned in config if and only if at
432eda14cbcSMatt Macy  * least one valid label was found.
433eda14cbcSMatt Macy  */
434eda14cbcSMatt Macy static int
435eda14cbcSMatt Macy vdev_geom_read_config(struct g_consumer *cp, nvlist_t **configp)
436eda14cbcSMatt Macy {
437eda14cbcSMatt Macy 	struct g_provider *pp;
438eda14cbcSMatt Macy 	nvlist_t *config;
439eda14cbcSMatt Macy 	vdev_phys_t *vdev_lists[VDEV_LABELS];
440eda14cbcSMatt Macy 	char *buf;
441eda14cbcSMatt Macy 	size_t buflen;
442eda14cbcSMatt Macy 	uint64_t psize, state, txg;
443eda14cbcSMatt Macy 	off_t offsets[VDEV_LABELS];
444eda14cbcSMatt Macy 	off_t size;
445eda14cbcSMatt Macy 	off_t sizes[VDEV_LABELS];
446eda14cbcSMatt Macy 	int cmds[VDEV_LABELS];
447eda14cbcSMatt Macy 	int errors[VDEV_LABELS];
448eda14cbcSMatt Macy 	int l, nlabels;
449eda14cbcSMatt Macy 
450eda14cbcSMatt Macy 	g_topology_assert_not();
451eda14cbcSMatt Macy 
452eda14cbcSMatt Macy 	pp = cp->provider;
453eda14cbcSMatt Macy 	ZFS_LOG(1, "Reading config from %s...", pp->name);
454eda14cbcSMatt Macy 
455eda14cbcSMatt Macy 	psize = pp->mediasize;
456*aca928a5SMartin Matuska 	psize = P2ALIGN_TYPED(psize, sizeof (vdev_label_t), uint64_t);
457eda14cbcSMatt Macy 
458eda14cbcSMatt Macy 	size = sizeof (*vdev_lists[0]) + pp->sectorsize -
459eda14cbcSMatt Macy 	    ((sizeof (*vdev_lists[0]) - 1) % pp->sectorsize) - 1;
460eda14cbcSMatt Macy 
461eda14cbcSMatt Macy 	buflen = sizeof (vdev_lists[0]->vp_nvlist);
462eda14cbcSMatt Macy 
463eda14cbcSMatt Macy 	/* Create all of the IO requests */
464eda14cbcSMatt Macy 	for (l = 0; l < VDEV_LABELS; l++) {
465eda14cbcSMatt Macy 		cmds[l] = BIO_READ;
466eda14cbcSMatt Macy 		vdev_lists[l] = kmem_alloc(size, KM_SLEEP);
467eda14cbcSMatt Macy 		offsets[l] = vdev_label_offset(psize, l, 0) + VDEV_SKIP_SIZE;
468eda14cbcSMatt Macy 		sizes[l] = size;
469eda14cbcSMatt Macy 		errors[l] = 0;
47016038816SMartin Matuska 		ASSERT0(offsets[l] % pp->sectorsize);
471eda14cbcSMatt Macy 	}
472eda14cbcSMatt Macy 
473eda14cbcSMatt Macy 	/* Issue the IO requests */
474eda14cbcSMatt Macy 	vdev_geom_io(cp, cmds, (void**)vdev_lists, offsets, sizes, errors,
475eda14cbcSMatt Macy 	    VDEV_LABELS);
476eda14cbcSMatt Macy 
477eda14cbcSMatt Macy 	/* Parse the labels */
478eda14cbcSMatt Macy 	config = *configp = NULL;
479eda14cbcSMatt Macy 	nlabels = 0;
480eda14cbcSMatt Macy 	for (l = 0; l < VDEV_LABELS; l++) {
481eda14cbcSMatt Macy 		if (errors[l] != 0)
482eda14cbcSMatt Macy 			continue;
483eda14cbcSMatt Macy 
484eda14cbcSMatt Macy 		buf = vdev_lists[l]->vp_nvlist;
485eda14cbcSMatt Macy 
486eda14cbcSMatt Macy 		if (nvlist_unpack(buf, buflen, &config, 0) != 0)
487eda14cbcSMatt Macy 			continue;
488eda14cbcSMatt Macy 
489eda14cbcSMatt Macy 		if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
490eda14cbcSMatt Macy 		    &state) != 0 || state > POOL_STATE_L2CACHE) {
491eda14cbcSMatt Macy 			nvlist_free(config);
492eda14cbcSMatt Macy 			continue;
493eda14cbcSMatt Macy 		}
494eda14cbcSMatt Macy 
495eda14cbcSMatt Macy 		if (state != POOL_STATE_SPARE &&
496eda14cbcSMatt Macy 		    state != POOL_STATE_L2CACHE &&
497eda14cbcSMatt Macy 		    (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
498eda14cbcSMatt Macy 		    &txg) != 0 || txg == 0)) {
499eda14cbcSMatt Macy 			nvlist_free(config);
500eda14cbcSMatt Macy 			continue;
501eda14cbcSMatt Macy 		}
502eda14cbcSMatt Macy 
503eda14cbcSMatt Macy 		if (*configp != NULL)
504eda14cbcSMatt Macy 			nvlist_free(*configp);
505eda14cbcSMatt Macy 		*configp = config;
506eda14cbcSMatt Macy 		nlabels++;
507eda14cbcSMatt Macy 	}
508eda14cbcSMatt Macy 
509eda14cbcSMatt Macy 	/* Free the label storage */
510eda14cbcSMatt Macy 	for (l = 0; l < VDEV_LABELS; l++)
511eda14cbcSMatt Macy 		kmem_free(vdev_lists[l], size);
512eda14cbcSMatt Macy 
513eda14cbcSMatt Macy 	return (nlabels);
514eda14cbcSMatt Macy }
515eda14cbcSMatt Macy 
516eda14cbcSMatt Macy static void
517eda14cbcSMatt Macy resize_configs(nvlist_t ***configs, uint64_t *count, uint64_t id)
518eda14cbcSMatt Macy {
519eda14cbcSMatt Macy 	nvlist_t **new_configs;
520eda14cbcSMatt Macy 	uint64_t i;
521eda14cbcSMatt Macy 
522eda14cbcSMatt Macy 	if (id < *count)
523eda14cbcSMatt Macy 		return;
524eda14cbcSMatt Macy 	new_configs = kmem_zalloc((id + 1) * sizeof (nvlist_t *),
525eda14cbcSMatt Macy 	    KM_SLEEP);
526eda14cbcSMatt Macy 	for (i = 0; i < *count; i++)
527eda14cbcSMatt Macy 		new_configs[i] = (*configs)[i];
528eda14cbcSMatt Macy 	if (*configs != NULL)
529eda14cbcSMatt Macy 		kmem_free(*configs, *count * sizeof (void *));
530eda14cbcSMatt Macy 	*configs = new_configs;
531eda14cbcSMatt Macy 	*count = id + 1;
532eda14cbcSMatt Macy }
533eda14cbcSMatt Macy 
534eda14cbcSMatt Macy static void
535eda14cbcSMatt Macy process_vdev_config(nvlist_t ***configs, uint64_t *count, nvlist_t *cfg,
536eda14cbcSMatt Macy     const char *name, uint64_t *known_pool_guid)
537eda14cbcSMatt Macy {
538eda14cbcSMatt Macy 	nvlist_t *vdev_tree;
539eda14cbcSMatt Macy 	uint64_t pool_guid;
540eda14cbcSMatt Macy 	uint64_t vdev_guid;
541eda14cbcSMatt Macy 	uint64_t id, txg, known_txg;
5422a58b312SMartin Matuska 	const char *pname;
543eda14cbcSMatt Macy 
544eda14cbcSMatt Macy 	if (nvlist_lookup_string(cfg, ZPOOL_CONFIG_POOL_NAME, &pname) != 0 ||
545eda14cbcSMatt Macy 	    strcmp(pname, name) != 0)
546eda14cbcSMatt Macy 		goto ignore;
547eda14cbcSMatt Macy 
548eda14cbcSMatt Macy 	if (nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_POOL_GUID, &pool_guid) != 0)
549eda14cbcSMatt Macy 		goto ignore;
550eda14cbcSMatt Macy 
551eda14cbcSMatt Macy 	if (nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_TOP_GUID, &vdev_guid) != 0)
552eda14cbcSMatt Macy 		goto ignore;
553eda14cbcSMatt Macy 
554eda14cbcSMatt Macy 	if (nvlist_lookup_nvlist(cfg, ZPOOL_CONFIG_VDEV_TREE, &vdev_tree) != 0)
555eda14cbcSMatt Macy 		goto ignore;
556eda14cbcSMatt Macy 
557eda14cbcSMatt Macy 	if (nvlist_lookup_uint64(vdev_tree, ZPOOL_CONFIG_ID, &id) != 0)
558eda14cbcSMatt Macy 		goto ignore;
559eda14cbcSMatt Macy 
56016038816SMartin Matuska 	txg = fnvlist_lookup_uint64(cfg, ZPOOL_CONFIG_POOL_TXG);
561eda14cbcSMatt Macy 
562eda14cbcSMatt Macy 	if (*known_pool_guid != 0) {
563eda14cbcSMatt Macy 		if (pool_guid != *known_pool_guid)
564eda14cbcSMatt Macy 			goto ignore;
565eda14cbcSMatt Macy 	} else
566eda14cbcSMatt Macy 		*known_pool_guid = pool_guid;
567eda14cbcSMatt Macy 
568eda14cbcSMatt Macy 	resize_configs(configs, count, id);
569eda14cbcSMatt Macy 
570eda14cbcSMatt Macy 	if ((*configs)[id] != NULL) {
57116038816SMartin Matuska 		known_txg = fnvlist_lookup_uint64((*configs)[id],
57216038816SMartin Matuska 		    ZPOOL_CONFIG_POOL_TXG);
573eda14cbcSMatt Macy 		if (txg <= known_txg)
574eda14cbcSMatt Macy 			goto ignore;
575eda14cbcSMatt Macy 		nvlist_free((*configs)[id]);
576eda14cbcSMatt Macy 	}
577eda14cbcSMatt Macy 
578eda14cbcSMatt Macy 	(*configs)[id] = cfg;
579eda14cbcSMatt Macy 	return;
580eda14cbcSMatt Macy 
581eda14cbcSMatt Macy ignore:
582eda14cbcSMatt Macy 	nvlist_free(cfg);
583eda14cbcSMatt Macy }
584eda14cbcSMatt Macy 
585eda14cbcSMatt Macy int
586eda14cbcSMatt Macy vdev_geom_read_pool_label(const char *name,
587eda14cbcSMatt Macy     nvlist_t ***configs, uint64_t *count)
588eda14cbcSMatt Macy {
589eda14cbcSMatt Macy 	struct g_class *mp;
590eda14cbcSMatt Macy 	struct g_geom *gp;
591eda14cbcSMatt Macy 	struct g_provider *pp;
592eda14cbcSMatt Macy 	struct g_consumer *zcp;
593eda14cbcSMatt Macy 	nvlist_t *vdev_cfg;
594eda14cbcSMatt Macy 	uint64_t pool_guid;
595eda14cbcSMatt Macy 	int nlabels;
596eda14cbcSMatt Macy 
597eda14cbcSMatt Macy 	DROP_GIANT();
598eda14cbcSMatt Macy 	g_topology_lock();
599eda14cbcSMatt Macy 
600eda14cbcSMatt Macy 	*configs = NULL;
601eda14cbcSMatt Macy 	*count = 0;
602eda14cbcSMatt Macy 	pool_guid = 0;
603eda14cbcSMatt Macy 	LIST_FOREACH(mp, &g_classes, class) {
604eda14cbcSMatt Macy 		if (mp == &zfs_vdev_class)
605eda14cbcSMatt Macy 			continue;
606eda14cbcSMatt Macy 		LIST_FOREACH(gp, &mp->geom, geom) {
607eda14cbcSMatt Macy 			if (gp->flags & G_GEOM_WITHER)
608eda14cbcSMatt Macy 				continue;
609eda14cbcSMatt Macy 			LIST_FOREACH(pp, &gp->provider, provider) {
610eda14cbcSMatt Macy 				if (pp->flags & G_PF_WITHER)
611eda14cbcSMatt Macy 					continue;
612eda14cbcSMatt Macy 				zcp = vdev_geom_attach(pp, NULL, B_TRUE);
613eda14cbcSMatt Macy 				if (zcp == NULL)
614eda14cbcSMatt Macy 					continue;
615eda14cbcSMatt Macy 				g_topology_unlock();
616eda14cbcSMatt Macy 				nlabels = vdev_geom_read_config(zcp, &vdev_cfg);
617eda14cbcSMatt Macy 				g_topology_lock();
618eda14cbcSMatt Macy 				vdev_geom_detach(zcp, B_TRUE);
619eda14cbcSMatt Macy 				if (nlabels == 0)
620eda14cbcSMatt Macy 					continue;
621eda14cbcSMatt Macy 				ZFS_LOG(1, "successfully read vdev config");
622eda14cbcSMatt Macy 
623eda14cbcSMatt Macy 				process_vdev_config(configs, count,
624eda14cbcSMatt Macy 				    vdev_cfg, name, &pool_guid);
625eda14cbcSMatt Macy 			}
626eda14cbcSMatt Macy 		}
627eda14cbcSMatt Macy 	}
628eda14cbcSMatt Macy 	g_topology_unlock();
629eda14cbcSMatt Macy 	PICKUP_GIANT();
630eda14cbcSMatt Macy 
631eda14cbcSMatt Macy 	return (*count > 0 ? 0 : ENOENT);
632eda14cbcSMatt Macy }
633eda14cbcSMatt Macy 
634eda14cbcSMatt Macy enum match {
635eda14cbcSMatt Macy 	NO_MATCH = 0,		/* No matching labels found */
636eda14cbcSMatt Macy 	TOPGUID_MATCH = 1,	/* Labels match top guid, not vdev guid */
637eda14cbcSMatt Macy 	ZERO_MATCH = 1,		/* Should never be returned */
638eda14cbcSMatt Macy 	ONE_MATCH = 2,		/* 1 label matching the vdev_guid */
639eda14cbcSMatt Macy 	TWO_MATCH = 3,		/* 2 label matching the vdev_guid */
640eda14cbcSMatt Macy 	THREE_MATCH = 4,	/* 3 label matching the vdev_guid */
641eda14cbcSMatt Macy 	FULL_MATCH = 5		/* all labels match the vdev_guid */
642eda14cbcSMatt Macy };
643eda14cbcSMatt Macy 
644eda14cbcSMatt Macy static enum match
645eda14cbcSMatt Macy vdev_attach_ok(vdev_t *vd, struct g_provider *pp)
646eda14cbcSMatt Macy {
647eda14cbcSMatt Macy 	nvlist_t *config;
648eda14cbcSMatt Macy 	uint64_t pool_guid, top_guid, vdev_guid;
649eda14cbcSMatt Macy 	struct g_consumer *cp;
650eda14cbcSMatt Macy 	int nlabels;
651eda14cbcSMatt Macy 
652eda14cbcSMatt Macy 	cp = vdev_geom_attach(pp, NULL, B_TRUE);
653eda14cbcSMatt Macy 	if (cp == NULL) {
654eda14cbcSMatt Macy 		ZFS_LOG(1, "Unable to attach tasting instance to %s.",
655eda14cbcSMatt Macy 		    pp->name);
656eda14cbcSMatt Macy 		return (NO_MATCH);
657eda14cbcSMatt Macy 	}
658eda14cbcSMatt Macy 	g_topology_unlock();
659eda14cbcSMatt Macy 	nlabels = vdev_geom_read_config(cp, &config);
660eda14cbcSMatt Macy 	g_topology_lock();
661eda14cbcSMatt Macy 	vdev_geom_detach(cp, B_TRUE);
662eda14cbcSMatt Macy 	if (nlabels == 0) {
663eda14cbcSMatt Macy 		ZFS_LOG(1, "Unable to read config from %s.", pp->name);
664eda14cbcSMatt Macy 		return (NO_MATCH);
665eda14cbcSMatt Macy 	}
666eda14cbcSMatt Macy 
667eda14cbcSMatt Macy 	pool_guid = 0;
668eda14cbcSMatt Macy 	(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid);
669eda14cbcSMatt Macy 	top_guid = 0;
670eda14cbcSMatt Macy 	(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID, &top_guid);
671eda14cbcSMatt Macy 	vdev_guid = 0;
672eda14cbcSMatt Macy 	(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid);
673eda14cbcSMatt Macy 	nvlist_free(config);
674eda14cbcSMatt Macy 
675eda14cbcSMatt Macy 	/*
676eda14cbcSMatt Macy 	 * Check that the label's pool guid matches the desired guid.
677eda14cbcSMatt Macy 	 * Inactive spares and L2ARCs do not have any pool guid in the label.
678eda14cbcSMatt Macy 	 */
679eda14cbcSMatt Macy 	if (pool_guid != 0 && pool_guid != spa_guid(vd->vdev_spa)) {
680eda14cbcSMatt Macy 		ZFS_LOG(1, "pool guid mismatch for provider %s: %ju != %ju.",
681eda14cbcSMatt Macy 		    pp->name,
682eda14cbcSMatt Macy 		    (uintmax_t)spa_guid(vd->vdev_spa), (uintmax_t)pool_guid);
683eda14cbcSMatt Macy 		return (NO_MATCH);
684eda14cbcSMatt Macy 	}
685eda14cbcSMatt Macy 
686eda14cbcSMatt Macy 	/*
687eda14cbcSMatt Macy 	 * Check that the label's vdev guid matches the desired guid.
688eda14cbcSMatt Macy 	 * The second condition handles possible race on vdev detach, when
689eda14cbcSMatt Macy 	 * remaining vdev receives GUID of destroyed top level mirror vdev.
690eda14cbcSMatt Macy 	 */
691eda14cbcSMatt Macy 	if (vdev_guid == vd->vdev_guid) {
692eda14cbcSMatt Macy 		ZFS_LOG(1, "guids match for provider %s.", pp->name);
693eda14cbcSMatt Macy 		return (ZERO_MATCH + nlabels);
694eda14cbcSMatt Macy 	} else if (top_guid == vd->vdev_guid && vd == vd->vdev_top) {
695eda14cbcSMatt Macy 		ZFS_LOG(1, "top vdev guid match for provider %s.", pp->name);
696eda14cbcSMatt Macy 		return (TOPGUID_MATCH);
697eda14cbcSMatt Macy 	}
698eda14cbcSMatt Macy 	ZFS_LOG(1, "vdev guid mismatch for provider %s: %ju != %ju.",
699eda14cbcSMatt Macy 	    pp->name, (uintmax_t)vd->vdev_guid, (uintmax_t)vdev_guid);
700eda14cbcSMatt Macy 	return (NO_MATCH);
701eda14cbcSMatt Macy }
702eda14cbcSMatt Macy 
703eda14cbcSMatt Macy static struct g_consumer *
704eda14cbcSMatt Macy vdev_geom_attach_by_guids(vdev_t *vd)
705eda14cbcSMatt Macy {
706eda14cbcSMatt Macy 	struct g_class *mp;
707eda14cbcSMatt Macy 	struct g_geom *gp;
708eda14cbcSMatt Macy 	struct g_provider *pp, *best_pp;
709eda14cbcSMatt Macy 	struct g_consumer *cp;
710eda14cbcSMatt Macy 	const char *vdpath;
711eda14cbcSMatt Macy 	enum match match, best_match;
712eda14cbcSMatt Macy 
713eda14cbcSMatt Macy 	g_topology_assert();
714eda14cbcSMatt Macy 
715eda14cbcSMatt Macy 	vdpath = vd->vdev_path + sizeof ("/dev/") - 1;
716eda14cbcSMatt Macy 	cp = NULL;
717eda14cbcSMatt Macy 	best_pp = NULL;
718eda14cbcSMatt Macy 	best_match = NO_MATCH;
719eda14cbcSMatt Macy 	LIST_FOREACH(mp, &g_classes, class) {
720eda14cbcSMatt Macy 		if (mp == &zfs_vdev_class)
721eda14cbcSMatt Macy 			continue;
722eda14cbcSMatt Macy 		LIST_FOREACH(gp, &mp->geom, geom) {
723eda14cbcSMatt Macy 			if (gp->flags & G_GEOM_WITHER)
724eda14cbcSMatt Macy 				continue;
725eda14cbcSMatt Macy 			LIST_FOREACH(pp, &gp->provider, provider) {
726eda14cbcSMatt Macy 				match = vdev_attach_ok(vd, pp);
727eda14cbcSMatt Macy 				if (match > best_match) {
728eda14cbcSMatt Macy 					best_match = match;
729eda14cbcSMatt Macy 					best_pp = pp;
730eda14cbcSMatt Macy 				} else if (match == best_match) {
731eda14cbcSMatt Macy 					if (strcmp(pp->name, vdpath) == 0) {
732eda14cbcSMatt Macy 						best_pp = pp;
733eda14cbcSMatt Macy 					}
734eda14cbcSMatt Macy 				}
735eda14cbcSMatt Macy 				if (match == FULL_MATCH)
736eda14cbcSMatt Macy 					goto out;
737eda14cbcSMatt Macy 			}
738eda14cbcSMatt Macy 		}
739eda14cbcSMatt Macy 	}
740eda14cbcSMatt Macy 
741eda14cbcSMatt Macy out:
742eda14cbcSMatt Macy 	if (best_pp) {
743eda14cbcSMatt Macy 		cp = vdev_geom_attach(best_pp, vd, B_TRUE);
744eda14cbcSMatt Macy 		if (cp == NULL) {
745eda14cbcSMatt Macy 			printf("ZFS WARNING: Unable to attach to %s.\n",
746eda14cbcSMatt Macy 			    best_pp->name);
747eda14cbcSMatt Macy 		}
748eda14cbcSMatt Macy 	}
749eda14cbcSMatt Macy 	return (cp);
750eda14cbcSMatt Macy }
751eda14cbcSMatt Macy 
752eda14cbcSMatt Macy static struct g_consumer *
753eda14cbcSMatt Macy vdev_geom_open_by_guids(vdev_t *vd)
754eda14cbcSMatt Macy {
755eda14cbcSMatt Macy 	struct g_consumer *cp;
756eda14cbcSMatt Macy 	char *buf;
757eda14cbcSMatt Macy 	size_t len;
758eda14cbcSMatt Macy 
759eda14cbcSMatt Macy 	g_topology_assert();
760eda14cbcSMatt Macy 
761eda14cbcSMatt Macy 	ZFS_LOG(1, "Searching by guids [%ju:%ju].",
762eda14cbcSMatt Macy 	    (uintmax_t)spa_guid(vd->vdev_spa), (uintmax_t)vd->vdev_guid);
763eda14cbcSMatt Macy 	cp = vdev_geom_attach_by_guids(vd);
764eda14cbcSMatt Macy 	if (cp != NULL) {
765eda14cbcSMatt Macy 		len = strlen(cp->provider->name) + strlen("/dev/") + 1;
766eda14cbcSMatt Macy 		buf = kmem_alloc(len, KM_SLEEP);
767eda14cbcSMatt Macy 
768eda14cbcSMatt Macy 		snprintf(buf, len, "/dev/%s", cp->provider->name);
769eda14cbcSMatt Macy 		spa_strfree(vd->vdev_path);
770eda14cbcSMatt Macy 		vd->vdev_path = buf;
771eda14cbcSMatt Macy 
772eda14cbcSMatt Macy 		ZFS_LOG(1, "Attach by guid [%ju:%ju] succeeded, provider %s.",
773eda14cbcSMatt Macy 		    (uintmax_t)spa_guid(vd->vdev_spa),
774eda14cbcSMatt Macy 		    (uintmax_t)vd->vdev_guid, cp->provider->name);
775eda14cbcSMatt Macy 	} else {
776eda14cbcSMatt Macy 		ZFS_LOG(1, "Search by guid [%ju:%ju] failed.",
777eda14cbcSMatt Macy 		    (uintmax_t)spa_guid(vd->vdev_spa),
778eda14cbcSMatt Macy 		    (uintmax_t)vd->vdev_guid);
779eda14cbcSMatt Macy 	}
780eda14cbcSMatt Macy 
781eda14cbcSMatt Macy 	return (cp);
782eda14cbcSMatt Macy }
783eda14cbcSMatt Macy 
784eda14cbcSMatt Macy static struct g_consumer *
785eda14cbcSMatt Macy vdev_geom_open_by_path(vdev_t *vd, int check_guid)
786eda14cbcSMatt Macy {
787eda14cbcSMatt Macy 	struct g_provider *pp;
788eda14cbcSMatt Macy 	struct g_consumer *cp;
789eda14cbcSMatt Macy 
790eda14cbcSMatt Macy 	g_topology_assert();
791eda14cbcSMatt Macy 
792eda14cbcSMatt Macy 	cp = NULL;
793eda14cbcSMatt Macy 	pp = g_provider_by_name(vd->vdev_path + sizeof ("/dev/") - 1);
794eda14cbcSMatt Macy 	if (pp != NULL) {
795eda14cbcSMatt Macy 		ZFS_LOG(1, "Found provider by name %s.", vd->vdev_path);
796eda14cbcSMatt Macy 		if (!check_guid || vdev_attach_ok(vd, pp) == FULL_MATCH)
797eda14cbcSMatt Macy 			cp = vdev_geom_attach(pp, vd, B_FALSE);
798eda14cbcSMatt Macy 	}
799eda14cbcSMatt Macy 
800eda14cbcSMatt Macy 	return (cp);
801eda14cbcSMatt Macy }
802eda14cbcSMatt Macy 
803eda14cbcSMatt Macy static int
804eda14cbcSMatt Macy vdev_geom_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
805eda14cbcSMatt Macy     uint64_t *logical_ashift, uint64_t *physical_ashift)
806eda14cbcSMatt Macy {
807eda14cbcSMatt Macy 	struct g_provider *pp;
808eda14cbcSMatt Macy 	struct g_consumer *cp;
809eda14cbcSMatt Macy 	int error, has_trim;
810eda14cbcSMatt Macy 	uint16_t rate;
811eda14cbcSMatt Macy 
812eda14cbcSMatt Macy 	/*
813eda14cbcSMatt Macy 	 * Set the TLS to indicate downstack that we
814eda14cbcSMatt Macy 	 * should not access zvols
815eda14cbcSMatt Macy 	 */
81616038816SMartin Matuska 	VERIFY0(tsd_set(zfs_geom_probe_vdev_key, vd));
817eda14cbcSMatt Macy 
818eda14cbcSMatt Macy 	/*
819eda14cbcSMatt Macy 	 * We must have a pathname, and it must be absolute.
820eda14cbcSMatt Macy 	 */
821eda14cbcSMatt Macy 	if (vd->vdev_path == NULL || strncmp(vd->vdev_path, "/dev/", 5) != 0) {
822eda14cbcSMatt Macy 		vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
823eda14cbcSMatt Macy 		return (EINVAL);
824eda14cbcSMatt Macy 	}
825eda14cbcSMatt Macy 
826eda14cbcSMatt Macy 	/*
827eda14cbcSMatt Macy 	 * Reopen the device if it's not currently open. Otherwise,
828eda14cbcSMatt Macy 	 * just update the physical size of the device.
829eda14cbcSMatt Macy 	 */
830eda14cbcSMatt Macy 	if ((cp = vd->vdev_tsd) != NULL) {
831eda14cbcSMatt Macy 		ASSERT(vd->vdev_reopening);
832eda14cbcSMatt Macy 		goto skip_open;
833eda14cbcSMatt Macy 	}
834eda14cbcSMatt Macy 
835eda14cbcSMatt Macy 	DROP_GIANT();
836eda14cbcSMatt Macy 	g_topology_lock();
837eda14cbcSMatt Macy 	error = 0;
838eda14cbcSMatt Macy 
839eda14cbcSMatt Macy 	if (vd->vdev_spa->spa_is_splitting ||
840eda14cbcSMatt Macy 	    ((vd->vdev_prevstate == VDEV_STATE_UNKNOWN &&
841eda14cbcSMatt Macy 	    (vd->vdev_spa->spa_load_state == SPA_LOAD_NONE ||
842eda14cbcSMatt Macy 	    vd->vdev_spa->spa_load_state == SPA_LOAD_CREATE)))) {
843eda14cbcSMatt Macy 		/*
844eda14cbcSMatt Macy 		 * We are dealing with a vdev that hasn't been previously
845eda14cbcSMatt Macy 		 * opened (since boot), and we are not loading an
846eda14cbcSMatt Macy 		 * existing pool configuration.  This looks like a
847eda14cbcSMatt Macy 		 * vdev add operation to a new or existing pool.
848eda14cbcSMatt Macy 		 * Assume the user really wants to do this, and find
849eda14cbcSMatt Macy 		 * GEOM provider by its name, ignoring GUID mismatches.
850eda14cbcSMatt Macy 		 *
851eda14cbcSMatt Macy 		 * XXPOLICY: It would be safer to only allow a device
852eda14cbcSMatt Macy 		 *           that is unlabeled or labeled but missing
853eda14cbcSMatt Macy 		 *           GUID information to be opened in this fashion,
854eda14cbcSMatt Macy 		 *           unless we are doing a split, in which case we
855eda14cbcSMatt Macy 		 *           should allow any guid.
856eda14cbcSMatt Macy 		 */
857eda14cbcSMatt Macy 		cp = vdev_geom_open_by_path(vd, 0);
858eda14cbcSMatt Macy 	} else {
859eda14cbcSMatt Macy 		/*
860eda14cbcSMatt Macy 		 * Try using the recorded path for this device, but only
861eda14cbcSMatt Macy 		 * accept it if its label data contains the expected GUIDs.
862eda14cbcSMatt Macy 		 */
863eda14cbcSMatt Macy 		cp = vdev_geom_open_by_path(vd, 1);
864eda14cbcSMatt Macy 		if (cp == NULL) {
865eda14cbcSMatt Macy 			/*
866eda14cbcSMatt Macy 			 * The device at vd->vdev_path doesn't have the
867eda14cbcSMatt Macy 			 * expected GUIDs. The disks might have merely
868eda14cbcSMatt Macy 			 * moved around so try all other GEOM providers
869eda14cbcSMatt Macy 			 * to find one with the right GUIDs.
870eda14cbcSMatt Macy 			 */
871eda14cbcSMatt Macy 			cp = vdev_geom_open_by_guids(vd);
872eda14cbcSMatt Macy 		}
873eda14cbcSMatt Macy 	}
874eda14cbcSMatt Macy 
875eda14cbcSMatt Macy 	/* Clear the TLS now that tasting is done */
87616038816SMartin Matuska 	VERIFY0(tsd_set(zfs_geom_probe_vdev_key, NULL));
877eda14cbcSMatt Macy 
878eda14cbcSMatt Macy 	if (cp == NULL) {
879eda14cbcSMatt Macy 		ZFS_LOG(1, "Vdev %s not found.", vd->vdev_path);
880eda14cbcSMatt Macy 		error = ENOENT;
881eda14cbcSMatt Macy 	} else {
882eda14cbcSMatt Macy 		struct consumer_priv_t *priv;
883eda14cbcSMatt Macy 		struct consumer_vdev_elem *elem;
884eda14cbcSMatt Macy 		int spamode;
885eda14cbcSMatt Macy 
886eda14cbcSMatt Macy 		priv = (struct consumer_priv_t *)&cp->private;
887eda14cbcSMatt Macy 		if (cp->private == NULL)
888eda14cbcSMatt Macy 			SLIST_INIT(priv);
889eda14cbcSMatt Macy 		elem = g_malloc(sizeof (*elem), M_WAITOK|M_ZERO);
890eda14cbcSMatt Macy 		elem->vd = vd;
891eda14cbcSMatt Macy 		SLIST_INSERT_HEAD(priv, elem, elems);
892eda14cbcSMatt Macy 
893eda14cbcSMatt Macy 		spamode = spa_mode(vd->vdev_spa);
894eda14cbcSMatt Macy 		if (cp->provider->sectorsize > VDEV_PAD_SIZE ||
895eda14cbcSMatt Macy 		    !ISP2(cp->provider->sectorsize)) {
896eda14cbcSMatt Macy 			ZFS_LOG(1, "Provider %s has unsupported sectorsize.",
897eda14cbcSMatt Macy 			    cp->provider->name);
898eda14cbcSMatt Macy 
899eda14cbcSMatt Macy 			vdev_geom_close_locked(vd);
900eda14cbcSMatt Macy 			error = EINVAL;
901eda14cbcSMatt Macy 			cp = NULL;
902eda14cbcSMatt Macy 		} else if (cp->acw == 0 && (spamode & FWRITE) != 0) {
903eda14cbcSMatt Macy 			int i;
904eda14cbcSMatt Macy 
905eda14cbcSMatt Macy 			for (i = 0; i < 5; i++) {
906eda14cbcSMatt Macy 				error = g_access(cp, 0, 1, 0);
907eda14cbcSMatt Macy 				if (error == 0)
908eda14cbcSMatt Macy 					break;
909eda14cbcSMatt Macy 				g_topology_unlock();
910eda14cbcSMatt Macy 				tsleep(vd, 0, "vdev", hz / 2);
911eda14cbcSMatt Macy 				g_topology_lock();
912eda14cbcSMatt Macy 			}
913eda14cbcSMatt Macy 			if (error != 0) {
914eda14cbcSMatt Macy 				printf("ZFS WARNING: Unable to open %s for "
915eda14cbcSMatt Macy 				    "writing (error=%d).\n",
916eda14cbcSMatt Macy 				    cp->provider->name, error);
917eda14cbcSMatt Macy 				vdev_geom_close_locked(vd);
918eda14cbcSMatt Macy 				cp = NULL;
919eda14cbcSMatt Macy 			}
920eda14cbcSMatt Macy 		}
921eda14cbcSMatt Macy 	}
922eda14cbcSMatt Macy 
923eda14cbcSMatt Macy 	/* Fetch initial physical path information for this device. */
924eda14cbcSMatt Macy 	if (cp != NULL) {
925eda14cbcSMatt Macy 		vdev_geom_attrchanged(cp, "GEOM::physpath");
926eda14cbcSMatt Macy 
927eda14cbcSMatt Macy 		/* Set other GEOM characteristics */
928eda14cbcSMatt Macy 		vdev_geom_set_physpath(vd, cp, /* do_null_update */B_FALSE);
929eda14cbcSMatt Macy 	}
930eda14cbcSMatt Macy 
931eda14cbcSMatt Macy 	g_topology_unlock();
932eda14cbcSMatt Macy 	PICKUP_GIANT();
933eda14cbcSMatt Macy 	if (cp == NULL) {
934eda14cbcSMatt Macy 		vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
935eda14cbcSMatt Macy 		vdev_dbgmsg(vd, "vdev_geom_open: failed to open [error=%d]",
936eda14cbcSMatt Macy 		    error);
937eda14cbcSMatt Macy 		return (error);
938eda14cbcSMatt Macy 	}
939eda14cbcSMatt Macy skip_open:
940eda14cbcSMatt Macy 	pp = cp->provider;
941eda14cbcSMatt Macy 
942eda14cbcSMatt Macy 	/*
943eda14cbcSMatt Macy 	 * Determine the actual size of the device.
944eda14cbcSMatt Macy 	 */
945eda14cbcSMatt Macy 	*max_psize = *psize = pp->mediasize;
946eda14cbcSMatt Macy 
947eda14cbcSMatt Macy 	/*
948eda14cbcSMatt Macy 	 * Determine the device's minimum transfer size and preferred
949eda14cbcSMatt Macy 	 * transfer size.
950eda14cbcSMatt Macy 	 */
951eda14cbcSMatt Macy 	*logical_ashift = highbit(MAX(pp->sectorsize, SPA_MINBLOCKSIZE)) - 1;
952eda14cbcSMatt Macy 	*physical_ashift = 0;
953eda14cbcSMatt Macy 	if (pp->stripesize && pp->stripesize > (1 << *logical_ashift) &&
954c7046f76SMartin Matuska 	    ISP2(pp->stripesize) && pp->stripeoffset == 0)
955eda14cbcSMatt Macy 		*physical_ashift = highbit(pp->stripesize) - 1;
956eda14cbcSMatt Macy 
957eda14cbcSMatt Macy 	/*
958eda14cbcSMatt Macy 	 * Clear the nowritecache settings, so that on a vdev_reopen()
959eda14cbcSMatt Macy 	 * we will try again.
960eda14cbcSMatt Macy 	 */
961eda14cbcSMatt Macy 	vd->vdev_nowritecache = B_FALSE;
962eda14cbcSMatt Macy 
963eda14cbcSMatt Macy 	/* Inform the ZIO pipeline that we are non-rotational. */
964eda14cbcSMatt Macy 	error = g_getattr("GEOM::rotation_rate", cp, &rate);
965eda14cbcSMatt Macy 	if (error == 0 && rate == DISK_RR_NON_ROTATING)
966eda14cbcSMatt Macy 		vd->vdev_nonrot = B_TRUE;
967eda14cbcSMatt Macy 	else
968eda14cbcSMatt Macy 		vd->vdev_nonrot = B_FALSE;
969eda14cbcSMatt Macy 
970eda14cbcSMatt Macy 	/* Set when device reports it supports TRIM. */
971eda14cbcSMatt Macy 	error = g_getattr("GEOM::candelete", cp, &has_trim);
972eda14cbcSMatt Macy 	vd->vdev_has_trim = (error == 0 && has_trim);
973eda14cbcSMatt Macy 
974eda14cbcSMatt Macy 	/* Set when device reports it supports secure TRIM. */
975eda14cbcSMatt Macy 	/* unavailable on FreeBSD */
976eda14cbcSMatt Macy 	vd->vdev_has_securetrim = B_FALSE;
977eda14cbcSMatt Macy 
978eda14cbcSMatt Macy 	return (0);
979eda14cbcSMatt Macy }
980eda14cbcSMatt Macy 
981eda14cbcSMatt Macy static void
982eda14cbcSMatt Macy vdev_geom_close(vdev_t *vd)
983eda14cbcSMatt Macy {
984eda14cbcSMatt Macy 	struct g_consumer *cp;
985eda14cbcSMatt Macy 	boolean_t locked;
986eda14cbcSMatt Macy 
987eda14cbcSMatt Macy 	cp = vd->vdev_tsd;
988eda14cbcSMatt Macy 
989eda14cbcSMatt Macy 	DROP_GIANT();
990eda14cbcSMatt Macy 	locked = g_topology_locked();
991eda14cbcSMatt Macy 	if (!locked)
992eda14cbcSMatt Macy 		g_topology_lock();
993eda14cbcSMatt Macy 
994eda14cbcSMatt Macy 	if (!vd->vdev_reopening ||
995eda14cbcSMatt Macy 	    (cp != NULL && ((cp->flags & G_CF_ORPHAN) != 0 ||
996eda14cbcSMatt Macy 	    (cp->provider != NULL && cp->provider->error != 0))))
997eda14cbcSMatt Macy 		vdev_geom_close_locked(vd);
998eda14cbcSMatt Macy 
999eda14cbcSMatt Macy 	if (!locked)
1000eda14cbcSMatt Macy 		g_topology_unlock();
1001eda14cbcSMatt Macy 	PICKUP_GIANT();
1002eda14cbcSMatt Macy }
1003eda14cbcSMatt Macy 
1004eda14cbcSMatt Macy static void
1005eda14cbcSMatt Macy vdev_geom_io_intr(struct bio *bp)
1006eda14cbcSMatt Macy {
1007eda14cbcSMatt Macy 	vdev_t *vd;
1008eda14cbcSMatt Macy 	zio_t *zio;
1009eda14cbcSMatt Macy 
1010eda14cbcSMatt Macy 	zio = bp->bio_caller1;
1011eda14cbcSMatt Macy 	vd = zio->io_vd;
1012eda14cbcSMatt Macy 	zio->io_error = bp->bio_error;
1013eda14cbcSMatt Macy 	if (zio->io_error == 0 && bp->bio_resid != 0)
1014eda14cbcSMatt Macy 		zio->io_error = SET_ERROR(EIO);
1015eda14cbcSMatt Macy 
1016eda14cbcSMatt Macy 	switch (zio->io_error) {
1017eda14cbcSMatt Macy 	case ENXIO:
1018eda14cbcSMatt Macy 		if (!vd->vdev_remove_wanted) {
1019eda14cbcSMatt Macy 			/*
1020eda14cbcSMatt Macy 			 * If provider's error is set we assume it is being
1021eda14cbcSMatt Macy 			 * removed.
1022eda14cbcSMatt Macy 			 */
1023eda14cbcSMatt Macy 			if (bp->bio_to->error != 0) {
1024eda14cbcSMatt Macy 				vd->vdev_remove_wanted = B_TRUE;
1025eda14cbcSMatt Macy 				spa_async_request(zio->io_spa,
1026eda14cbcSMatt Macy 				    SPA_ASYNC_REMOVE);
1027eda14cbcSMatt Macy 			} else if (!vd->vdev_delayed_close) {
1028eda14cbcSMatt Macy 				vd->vdev_delayed_close = B_TRUE;
1029eda14cbcSMatt Macy 			}
1030eda14cbcSMatt Macy 		}
1031eda14cbcSMatt Macy 		break;
1032eda14cbcSMatt Macy 	}
1033eda14cbcSMatt Macy 
1034eda14cbcSMatt Macy 	/*
1035eda14cbcSMatt Macy 	 * We have to split bio freeing into two parts, because the ABD code
1036eda14cbcSMatt Macy 	 * cannot be called in this context and vdev_op_io_done is not called
10371719886fSMartin Matuska 	 * for ZIO_TYPE_FLUSH zio-s.
1038eda14cbcSMatt Macy 	 */
1039eda14cbcSMatt Macy 	if (zio->io_type != ZIO_TYPE_READ && zio->io_type != ZIO_TYPE_WRITE) {
1040eda14cbcSMatt Macy 		g_destroy_bio(bp);
1041eda14cbcSMatt Macy 		zio->io_bio = NULL;
1042eda14cbcSMatt Macy 	}
1043eda14cbcSMatt Macy 	zio_delay_interrupt(zio);
1044eda14cbcSMatt Macy }
1045eda14cbcSMatt Macy 
10465eb61f6cSMartin Matuska struct vdev_geom_check_unmapped_cb_state {
10475eb61f6cSMartin Matuska 	int	pages;
10485eb61f6cSMartin Matuska 	uint_t	end;
10495eb61f6cSMartin Matuska };
10505eb61f6cSMartin Matuska 
10515eb61f6cSMartin Matuska /*
10525eb61f6cSMartin Matuska  * Callback to check the ABD segment size/alignment and count the pages.
10535eb61f6cSMartin Matuska  * GEOM requires data buffer to look virtually contiguous.  It means only
10545eb61f6cSMartin Matuska  * the first page of the buffer may not start and only the last may not
10555eb61f6cSMartin Matuska  * end on a page boundary.  All other physical pages must be full.
10565eb61f6cSMartin Matuska  */
10575eb61f6cSMartin Matuska static int
10585eb61f6cSMartin Matuska vdev_geom_check_unmapped_cb(void *buf, size_t len, void *priv)
10595eb61f6cSMartin Matuska {
10605eb61f6cSMartin Matuska 	struct vdev_geom_check_unmapped_cb_state *s = priv;
10615eb61f6cSMartin Matuska 	vm_offset_t off = (vm_offset_t)buf & PAGE_MASK;
10625eb61f6cSMartin Matuska 
10635eb61f6cSMartin Matuska 	if (s->pages != 0 && off != 0)
10645eb61f6cSMartin Matuska 		return (1);
10655eb61f6cSMartin Matuska 	if (s->end != 0)
10665eb61f6cSMartin Matuska 		return (1);
10675eb61f6cSMartin Matuska 	s->end = (off + len) & PAGE_MASK;
10685eb61f6cSMartin Matuska 	s->pages += (off + len + PAGE_MASK) >> PAGE_SHIFT;
10695eb61f6cSMartin Matuska 	return (0);
10705eb61f6cSMartin Matuska }
10715eb61f6cSMartin Matuska 
10725eb61f6cSMartin Matuska /*
10735eb61f6cSMartin Matuska  * Check whether we can use unmapped I/O for this ZIO on this device to
10745eb61f6cSMartin Matuska  * avoid data copying between scattered and/or gang ABD buffer and linear.
10755eb61f6cSMartin Matuska  */
10765eb61f6cSMartin Matuska static int
10775eb61f6cSMartin Matuska vdev_geom_check_unmapped(zio_t *zio, struct g_consumer *cp)
10785eb61f6cSMartin Matuska {
10795eb61f6cSMartin Matuska 	struct vdev_geom_check_unmapped_cb_state s;
10805eb61f6cSMartin Matuska 
10811f88aa09SMartin Matuska 	/* If unmapped I/O is administratively disabled, respect that. */
10821f88aa09SMartin Matuska 	if (!unmapped_buf_allowed)
10831f88aa09SMartin Matuska 		return (0);
10841f88aa09SMartin Matuska 
10855eb61f6cSMartin Matuska 	/* If the buffer is already linear, then nothing to do here. */
10865eb61f6cSMartin Matuska 	if (abd_is_linear(zio->io_abd))
10875eb61f6cSMartin Matuska 		return (0);
10885eb61f6cSMartin Matuska 
10895eb61f6cSMartin Matuska 	/*
10905eb61f6cSMartin Matuska 	 * If unmapped I/O is not supported by the GEOM provider,
10915eb61f6cSMartin Matuska 	 * then we can't do anything and have to copy the data.
10925eb61f6cSMartin Matuska 	 */
10935eb61f6cSMartin Matuska 	if ((cp->provider->flags & G_PF_ACCEPT_UNMAPPED) == 0)
10945eb61f6cSMartin Matuska 		return (0);
10955eb61f6cSMartin Matuska 
10965eb61f6cSMartin Matuska 	/* Check the buffer chunks sizes/alignments and count pages. */
10975eb61f6cSMartin Matuska 	s.pages = s.end = 0;
10985eb61f6cSMartin Matuska 	if (abd_iterate_func(zio->io_abd, 0, zio->io_size,
10995eb61f6cSMartin Matuska 	    vdev_geom_check_unmapped_cb, &s))
11005eb61f6cSMartin Matuska 		return (0);
11015eb61f6cSMartin Matuska 	return (s.pages);
11025eb61f6cSMartin Matuska }
11035eb61f6cSMartin Matuska 
11045eb61f6cSMartin Matuska /*
11055eb61f6cSMartin Matuska  * Callback to translate the ABD segment into array of physical pages.
11065eb61f6cSMartin Matuska  */
11075eb61f6cSMartin Matuska static int
11085eb61f6cSMartin Matuska vdev_geom_fill_unmap_cb(void *buf, size_t len, void *priv)
11095eb61f6cSMartin Matuska {
11105eb61f6cSMartin Matuska 	struct bio *bp = priv;
11115eb61f6cSMartin Matuska 	vm_offset_t addr = (vm_offset_t)buf;
11125eb61f6cSMartin Matuska 	vm_offset_t end = addr + len;
11135eb61f6cSMartin Matuska 
1114716fd348SMartin Matuska 	if (bp->bio_ma_n == 0) {
11155eb61f6cSMartin Matuska 		bp->bio_ma_offset = addr & PAGE_MASK;
1116716fd348SMartin Matuska 		addr &= ~PAGE_MASK;
1117716fd348SMartin Matuska 	} else {
1118716fd348SMartin Matuska 		ASSERT0(P2PHASE(addr, PAGE_SIZE));
1119716fd348SMartin Matuska 	}
11205eb61f6cSMartin Matuska 	do {
11215eb61f6cSMartin Matuska 		bp->bio_ma[bp->bio_ma_n++] =
11225eb61f6cSMartin Matuska 		    PHYS_TO_VM_PAGE(pmap_kextract(addr));
11235eb61f6cSMartin Matuska 		addr += PAGE_SIZE;
11245eb61f6cSMartin Matuska 	} while (addr < end);
11255eb61f6cSMartin Matuska 	return (0);
11265eb61f6cSMartin Matuska }
11275eb61f6cSMartin Matuska 
1128eda14cbcSMatt Macy static void
1129eda14cbcSMatt Macy vdev_geom_io_start(zio_t *zio)
1130eda14cbcSMatt Macy {
1131eda14cbcSMatt Macy 	vdev_t *vd;
1132eda14cbcSMatt Macy 	struct g_consumer *cp;
1133eda14cbcSMatt Macy 	struct bio *bp;
1134eda14cbcSMatt Macy 
1135eda14cbcSMatt Macy 	vd = zio->io_vd;
1136eda14cbcSMatt Macy 
11371719886fSMartin Matuska 	if (zio->io_type == ZIO_TYPE_FLUSH) {
1138eda14cbcSMatt Macy 		/* XXPOLICY */
1139eda14cbcSMatt Macy 		if (!vdev_readable(vd)) {
1140eda14cbcSMatt Macy 			zio->io_error = SET_ERROR(ENXIO);
1141eda14cbcSMatt Macy 			zio_interrupt(zio);
1142eda14cbcSMatt Macy 			return;
11431719886fSMartin Matuska 		}
11441719886fSMartin Matuska 
11451719886fSMartin Matuska 		if (zfs_nocacheflush || vdev_geom_bio_flush_disable) {
11461719886fSMartin Matuska 			zio_execute(zio);
11471719886fSMartin Matuska 			return;
11481719886fSMartin Matuska 		}
11491719886fSMartin Matuska 
1150eda14cbcSMatt Macy 		if (vd->vdev_nowritecache) {
1151eda14cbcSMatt Macy 			zio->io_error = SET_ERROR(ENOTSUP);
11521719886fSMartin Matuska 			zio_execute(zio);
11531719886fSMartin Matuska 			return;
1154eda14cbcSMatt Macy 		}
11551719886fSMartin Matuska 	} else if (zio->io_type == ZIO_TYPE_TRIM) {
11561719886fSMartin Matuska 		if (vdev_geom_bio_delete_disable) {
11571719886fSMartin Matuska 			zio_execute(zio);
11581719886fSMartin Matuska 			return;
1159eda14cbcSMatt Macy 		}
1160eda14cbcSMatt Macy 	}
1161eda14cbcSMatt Macy 
1162eda14cbcSMatt Macy 	ASSERT(zio->io_type == ZIO_TYPE_READ ||
1163eda14cbcSMatt Macy 	    zio->io_type == ZIO_TYPE_WRITE ||
1164eda14cbcSMatt Macy 	    zio->io_type == ZIO_TYPE_TRIM ||
11651719886fSMartin Matuska 	    zio->io_type == ZIO_TYPE_FLUSH);
1166eda14cbcSMatt Macy 
1167eda14cbcSMatt Macy 	cp = vd->vdev_tsd;
1168eda14cbcSMatt Macy 	if (cp == NULL) {
1169eda14cbcSMatt Macy 		zio->io_error = SET_ERROR(ENXIO);
1170eda14cbcSMatt Macy 		zio_interrupt(zio);
1171eda14cbcSMatt Macy 		return;
1172eda14cbcSMatt Macy 	}
1173eda14cbcSMatt Macy 	bp = g_alloc_bio();
1174eda14cbcSMatt Macy 	bp->bio_caller1 = zio;
1175eda14cbcSMatt Macy 	switch (zio->io_type) {
1176eda14cbcSMatt Macy 	case ZIO_TYPE_READ:
1177eda14cbcSMatt Macy 	case ZIO_TYPE_WRITE:
1178eda14cbcSMatt Macy 		zio->io_target_timestamp = zio_handle_io_delay(zio);
1179eda14cbcSMatt Macy 		bp->bio_offset = zio->io_offset;
1180eda14cbcSMatt Macy 		bp->bio_length = zio->io_size;
11815eb61f6cSMartin Matuska 		if (zio->io_type == ZIO_TYPE_READ)
1182eda14cbcSMatt Macy 			bp->bio_cmd = BIO_READ;
11835eb61f6cSMartin Matuska 		else
1184eda14cbcSMatt Macy 			bp->bio_cmd = BIO_WRITE;
11855eb61f6cSMartin Matuska 
11865eb61f6cSMartin Matuska 		/*
11875eb61f6cSMartin Matuska 		 * If possible, represent scattered and/or gang ABD buffer to
11885eb61f6cSMartin Matuska 		 * GEOM as an array of physical pages.  It allows to satisfy
11895eb61f6cSMartin Matuska 		 * requirement of virtually contiguous buffer without copying.
11905eb61f6cSMartin Matuska 		 */
11915eb61f6cSMartin Matuska 		int pgs = vdev_geom_check_unmapped(zio, cp);
11925eb61f6cSMartin Matuska 		if (pgs > 0) {
11935eb61f6cSMartin Matuska 			bp->bio_ma = malloc(sizeof (struct vm_page *) * pgs,
11945eb61f6cSMartin Matuska 			    M_DEVBUF, M_WAITOK);
11955eb61f6cSMartin Matuska 			bp->bio_ma_n = 0;
11965eb61f6cSMartin Matuska 			bp->bio_ma_offset = 0;
11975eb61f6cSMartin Matuska 			abd_iterate_func(zio->io_abd, 0, zio->io_size,
11985eb61f6cSMartin Matuska 			    vdev_geom_fill_unmap_cb, bp);
11995eb61f6cSMartin Matuska 			bp->bio_data = unmapped_buf;
12005eb61f6cSMartin Matuska 			bp->bio_flags |= BIO_UNMAPPED;
12015eb61f6cSMartin Matuska 		} else {
12025eb61f6cSMartin Matuska 			if (zio->io_type == ZIO_TYPE_READ) {
12035eb61f6cSMartin Matuska 				bp->bio_data = abd_borrow_buf(zio->io_abd,
12045eb61f6cSMartin Matuska 				    zio->io_size);
12055eb61f6cSMartin Matuska 			} else {
12065eb61f6cSMartin Matuska 				bp->bio_data = abd_borrow_buf_copy(zio->io_abd,
12075eb61f6cSMartin Matuska 				    zio->io_size);
12085eb61f6cSMartin Matuska 			}
1209eda14cbcSMatt Macy 		}
1210eda14cbcSMatt Macy 		break;
1211eda14cbcSMatt Macy 	case ZIO_TYPE_TRIM:
1212eda14cbcSMatt Macy 		bp->bio_cmd = BIO_DELETE;
1213eda14cbcSMatt Macy 		bp->bio_data = NULL;
1214eda14cbcSMatt Macy 		bp->bio_offset = zio->io_offset;
1215eda14cbcSMatt Macy 		bp->bio_length = zio->io_size;
1216eda14cbcSMatt Macy 		break;
12171719886fSMartin Matuska 	case ZIO_TYPE_FLUSH:
1218eda14cbcSMatt Macy 		bp->bio_cmd = BIO_FLUSH;
1219eda14cbcSMatt Macy 		bp->bio_data = NULL;
1220eda14cbcSMatt Macy 		bp->bio_offset = cp->provider->mediasize;
1221eda14cbcSMatt Macy 		bp->bio_length = 0;
1222eda14cbcSMatt Macy 		break;
1223eda14cbcSMatt Macy 	default:
1224eda14cbcSMatt Macy 		panic("invalid zio->io_type: %d\n", zio->io_type);
1225eda14cbcSMatt Macy 	}
1226eda14cbcSMatt Macy 	bp->bio_done = vdev_geom_io_intr;
1227eda14cbcSMatt Macy 	zio->io_bio = bp;
1228eda14cbcSMatt Macy 
1229eda14cbcSMatt Macy 	g_io_request(bp, cp);
1230eda14cbcSMatt Macy }
1231eda14cbcSMatt Macy 
1232eda14cbcSMatt Macy static void
1233eda14cbcSMatt Macy vdev_geom_io_done(zio_t *zio)
1234eda14cbcSMatt Macy {
1235eda14cbcSMatt Macy 	struct bio *bp = zio->io_bio;
1236eda14cbcSMatt Macy 
1237eda14cbcSMatt Macy 	if (zio->io_type != ZIO_TYPE_READ && zio->io_type != ZIO_TYPE_WRITE) {
123816038816SMartin Matuska 		ASSERT3P(bp, ==, NULL);
1239eda14cbcSMatt Macy 		return;
1240eda14cbcSMatt Macy 	}
1241eda14cbcSMatt Macy 
1242eda14cbcSMatt Macy 	if (bp == NULL) {
1243eda14cbcSMatt Macy 		ASSERT3S(zio->io_error, ==, ENXIO);
1244eda14cbcSMatt Macy 		return;
1245eda14cbcSMatt Macy 	}
1246eda14cbcSMatt Macy 
12475eb61f6cSMartin Matuska 	if (bp->bio_ma != NULL) {
12485eb61f6cSMartin Matuska 		free(bp->bio_ma, M_DEVBUF);
12495eb61f6cSMartin Matuska 	} else {
12505eb61f6cSMartin Matuska 		if (zio->io_type == ZIO_TYPE_READ) {
12515eb61f6cSMartin Matuska 			abd_return_buf_copy(zio->io_abd, bp->bio_data,
12525eb61f6cSMartin Matuska 			    zio->io_size);
12535eb61f6cSMartin Matuska 		} else {
12545eb61f6cSMartin Matuska 			abd_return_buf(zio->io_abd, bp->bio_data,
12555eb61f6cSMartin Matuska 			    zio->io_size);
12565eb61f6cSMartin Matuska 		}
12575eb61f6cSMartin Matuska 	}
1258eda14cbcSMatt Macy 
1259eda14cbcSMatt Macy 	g_destroy_bio(bp);
1260eda14cbcSMatt Macy 	zio->io_bio = NULL;
1261eda14cbcSMatt Macy }
1262eda14cbcSMatt Macy 
1263eda14cbcSMatt Macy static void
1264eda14cbcSMatt Macy vdev_geom_hold(vdev_t *vd)
1265eda14cbcSMatt Macy {
1266eda14cbcSMatt Macy }
1267eda14cbcSMatt Macy 
1268eda14cbcSMatt Macy static void
1269eda14cbcSMatt Macy vdev_geom_rele(vdev_t *vd)
1270eda14cbcSMatt Macy {
1271eda14cbcSMatt Macy }
1272eda14cbcSMatt Macy 
1273eda14cbcSMatt Macy vdev_ops_t vdev_disk_ops = {
12747877fdebSMatt Macy 	.vdev_op_init = NULL,
12757877fdebSMatt Macy 	.vdev_op_fini = NULL,
12767877fdebSMatt Macy 	.vdev_op_open = vdev_geom_open,
12777877fdebSMatt Macy 	.vdev_op_close = vdev_geom_close,
12787877fdebSMatt Macy 	.vdev_op_asize = vdev_default_asize,
12797877fdebSMatt Macy 	.vdev_op_min_asize = vdev_default_min_asize,
12807877fdebSMatt Macy 	.vdev_op_min_alloc = NULL,
12817877fdebSMatt Macy 	.vdev_op_io_start = vdev_geom_io_start,
12827877fdebSMatt Macy 	.vdev_op_io_done = vdev_geom_io_done,
12837877fdebSMatt Macy 	.vdev_op_state_change = NULL,
12847877fdebSMatt Macy 	.vdev_op_need_resilver = NULL,
12857877fdebSMatt Macy 	.vdev_op_hold = vdev_geom_hold,
12867877fdebSMatt Macy 	.vdev_op_rele = vdev_geom_rele,
12877877fdebSMatt Macy 	.vdev_op_remap = NULL,
12887877fdebSMatt Macy 	.vdev_op_xlate = vdev_default_xlate,
12897877fdebSMatt Macy 	.vdev_op_rebuild_asize = NULL,
12907877fdebSMatt Macy 	.vdev_op_metaslab_init = NULL,
12917877fdebSMatt Macy 	.vdev_op_config_generate = NULL,
12927877fdebSMatt Macy 	.vdev_op_nparity = NULL,
12937877fdebSMatt Macy 	.vdev_op_ndisks = NULL,
12947877fdebSMatt Macy 	.vdev_op_type = VDEV_TYPE_DISK,		/* name of this vdev type */
12957877fdebSMatt Macy 	.vdev_op_leaf = B_TRUE			/* leaf vdev */
1296eda14cbcSMatt Macy };
1297