xref: /netbsd-src/external/cddl/osnet/dist/uts/common/fs/zfs/vdev_queue.c (revision 0762753c60580346994e3024e86804d644503578)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
28  * Copyright (c) 2014 Integros [integros.com]
29  */
30 
31 #include <sys/zfs_context.h>
32 #include <sys/vdev_impl.h>
33 #include <sys/spa_impl.h>
34 #include <sys/zio.h>
35 #include <sys/avl.h>
36 #include <sys/dsl_pool.h>
37 #include <sys/metaslab_impl.h>
38 
39 /*
40  * ZFS I/O Scheduler
41  * ---------------
42  *
43  * ZFS issues I/O operations to leaf vdevs to satisfy and complete zios.  The
44  * I/O scheduler determines when and in what order those operations are
45  * issued.  The I/O scheduler divides operations into six I/O classes
46  * prioritized in the following order: sync read, sync write, async read,
47  * async write, scrub/resilver and trim.  Each queue defines the minimum and
48  * maximum number of concurrent operations that may be issued to the device.
49  * In addition, the device has an aggregate maximum. Note that the sum of the
50  * per-queue minimums must not exceed the aggregate maximum, and if the
51  * aggregate maximum is equal to or greater than the sum of the per-queue
52  * maximums, the per-queue minimum has no effect.
53  *
54  * For many physical devices, throughput increases with the number of
55  * concurrent operations, but latency typically suffers. Further, physical
56  * devices typically have a limit at which more concurrent operations have no
57  * effect on throughput or can actually cause it to decrease.
58  *
59  * The scheduler selects the next operation to issue by first looking for an
60  * I/O class whose minimum has not been satisfied. Once all are satisfied and
61  * the aggregate maximum has not been hit, the scheduler looks for classes
62  * whose maximum has not been satisfied. Iteration through the I/O classes is
63  * done in the order specified above. No further operations are issued if the
64  * aggregate maximum number of concurrent operations has been hit or if there
65  * are no operations queued for an I/O class that has not hit its maximum.
66  * Every time an I/O is queued or an operation completes, the I/O scheduler
67  * looks for new operations to issue.
68  *
69  * All I/O classes have a fixed maximum number of outstanding operations
70  * except for the async write class. Asynchronous writes represent the data
71  * that is committed to stable storage during the syncing stage for
72  * transaction groups (see txg.c). Transaction groups enter the syncing state
73  * periodically so the number of queued async writes will quickly burst up and
74  * then bleed down to zero. Rather than servicing them as quickly as possible,
75  * the I/O scheduler changes the maximum number of active async write I/Os
76  * according to the amount of dirty data in the pool (see dsl_pool.c). Since
77  * both throughput and latency typically increase with the number of
78  * concurrent operations issued to physical devices, reducing the burstiness
79  * in the number of concurrent operations also stabilizes the response time of
80  * operations from other -- and in particular synchronous -- queues. In broad
81  * strokes, the I/O scheduler will issue more concurrent operations from the
82  * async write queue as there's more dirty data in the pool.
83  *
84  * Async Writes
85  *
86  * The number of concurrent operations issued for the async write I/O class
87  * follows a piece-wise linear function defined by a few adjustable points.
88  *
89  *        |                   o---------| <-- zfs_vdev_async_write_max_active
90  *   ^    |                  /^         |
91  *   |    |                 / |         |
92  * active |                /  |         |
93  *  I/O   |               /   |         |
94  * count  |              /    |         |
95  *        |             /     |         |
96  *        |------------o      |         | <-- zfs_vdev_async_write_min_active
97  *       0|____________^______|_________|
98  *        0%           |      |       100% of zfs_dirty_data_max
99  *                     |      |
100  *                     |      `-- zfs_vdev_async_write_active_max_dirty_percent
101  *                     `--------- zfs_vdev_async_write_active_min_dirty_percent
102  *
103  * Until the amount of dirty data exceeds a minimum percentage of the dirty
104  * data allowed in the pool, the I/O scheduler will limit the number of
105  * concurrent operations to the minimum. As that threshold is crossed, the
106  * number of concurrent operations issued increases linearly to the maximum at
107  * the specified maximum percentage of the dirty data allowed in the pool.
108  *
109  * Ideally, the amount of dirty data on a busy pool will stay in the sloped
110  * part of the function between zfs_vdev_async_write_active_min_dirty_percent
111  * and zfs_vdev_async_write_active_max_dirty_percent. If it exceeds the
112  * maximum percentage, this indicates that the rate of incoming data is
113  * greater than the rate that the backend storage can handle. In this case, we
114  * must further throttle incoming writes (see dmu_tx_delay() for details).
115  */
116 
117 /*
118  * The maximum number of I/Os active to each device.  Ideally, this will be >=
119  * the sum of each queue's max_active.  It must be at least the sum of each
120  * queue's min_active.
121  */
122 uint32_t zfs_vdev_max_active = 1000;
123 
124 /*
125  * Per-queue limits on the number of I/Os active to each device.  If the
126  * sum of the queue's max_active is < zfs_vdev_max_active, then the
127  * min_active comes into play.  We will send min_active from each queue,
128  * and then select from queues in the order defined by zio_priority_t.
129  *
130  * In general, smaller max_active's will lead to lower latency of synchronous
131  * operations.  Larger max_active's may lead to higher overall throughput,
132  * depending on underlying storage.
133  *
134  * The ratio of the queues' max_actives determines the balance of performance
135  * between reads, writes, and scrubs.  E.g., increasing
136  * zfs_vdev_scrub_max_active will cause the scrub or resilver to complete
137  * more quickly, but reads and writes to have higher latency and lower
138  * throughput.
139  */
140 uint32_t zfs_vdev_sync_read_min_active = 10;
141 uint32_t zfs_vdev_sync_read_max_active = 10;
142 uint32_t zfs_vdev_sync_write_min_active = 10;
143 uint32_t zfs_vdev_sync_write_max_active = 10;
144 uint32_t zfs_vdev_async_read_min_active = 1;
145 uint32_t zfs_vdev_async_read_max_active = 3;
146 uint32_t zfs_vdev_async_write_min_active = 1;
147 uint32_t zfs_vdev_async_write_max_active = 10;
148 uint32_t zfs_vdev_scrub_min_active = 1;
149 uint32_t zfs_vdev_scrub_max_active = 2;
150 uint32_t zfs_vdev_trim_min_active = 1;
151 /*
152  * TRIM max active is large in comparison to the other values due to the fact
153  * that TRIM IOs are coalesced at the device layer. This value is set such
154  * that a typical SSD can process the queued IOs in a single request.
155  */
156 uint32_t zfs_vdev_trim_max_active = 64;
157 
158 
159 /*
160  * When the pool has less than zfs_vdev_async_write_active_min_dirty_percent
161  * dirty data, use zfs_vdev_async_write_min_active.  When it has more than
162  * zfs_vdev_async_write_active_max_dirty_percent, use
163  * zfs_vdev_async_write_max_active. The value is linearly interpolated
164  * between min and max.
165  */
166 int zfs_vdev_async_write_active_min_dirty_percent = 30;
167 int zfs_vdev_async_write_active_max_dirty_percent = 60;
168 
169 /*
170  * To reduce IOPs, we aggregate small adjacent I/Os into one large I/O.
171  * For read I/Os, we also aggregate across small adjacency gaps; for writes
172  * we include spans of optional I/Os to aid aggregation at the disk even when
173  * they aren't able to help us aggregate at this level.
174  */
175 int zfs_vdev_aggregation_limit = SPA_OLD_MAXBLOCKSIZE;
176 int zfs_vdev_read_gap_limit = 32 << 10;
177 int zfs_vdev_write_gap_limit = 4 << 10;
178 
179 /*
180  * Define the queue depth percentage for each top-level. This percentage is
181  * used in conjunction with zfs_vdev_async_max_active to determine how many
182  * allocations a specific top-level vdev should handle. Once the queue depth
183  * reaches zfs_vdev_queue_depth_pct * zfs_vdev_async_write_max_active / 100
184  * then allocator will stop allocating blocks on that top-level device.
185  * The default kernel setting is 1000% which will yield 100 allocations per
186  * device. For userland testing, the default setting is 300% which equates
187  * to 30 allocations per device.
188  */
189 #ifdef _KERNEL
190 int zfs_vdev_queue_depth_pct = 1000;
191 #else
192 int zfs_vdev_queue_depth_pct = 300;
193 #endif
194 
195 
196 #ifdef __FreeBSD__
197 #ifdef _KERNEL
198 SYSCTL_DECL(_vfs_zfs_vdev);
199 
200 static int sysctl_zfs_async_write_active_min_dirty_percent(SYSCTL_HANDLER_ARGS);
201 SYSCTL_PROC(_vfs_zfs_vdev, OID_AUTO, async_write_active_min_dirty_percent,
202     CTLTYPE_UINT | CTLFLAG_MPSAFE | CTLFLAG_RWTUN, 0, sizeof(int),
203     sysctl_zfs_async_write_active_min_dirty_percent, "I",
204     "Percentage of async write dirty data below which "
205     "async_write_min_active is used.");
206 
207 static int sysctl_zfs_async_write_active_max_dirty_percent(SYSCTL_HANDLER_ARGS);
208 SYSCTL_PROC(_vfs_zfs_vdev, OID_AUTO, async_write_active_max_dirty_percent,
209     CTLTYPE_UINT | CTLFLAG_MPSAFE | CTLFLAG_RWTUN, 0, sizeof(int),
210     sysctl_zfs_async_write_active_max_dirty_percent, "I",
211     "Percentage of async write dirty data above which "
212     "async_write_max_active is used.");
213 
214 SYSCTL_UINT(_vfs_zfs_vdev, OID_AUTO, max_active, CTLFLAG_RWTUN,
215     &zfs_vdev_max_active, 0,
216     "The maximum number of I/Os of all types active for each device.");
217 
218 #define ZFS_VDEV_QUEUE_KNOB_MIN(name)					\
219 SYSCTL_UINT(_vfs_zfs_vdev, OID_AUTO, name ## _min_active, CTLFLAG_RWTUN,\
220     &zfs_vdev_ ## name ## _min_active, 0,				\
221     "Initial number of I/O requests of type " #name			\
222     " active for each device");
223 
224 #define ZFS_VDEV_QUEUE_KNOB_MAX(name)					\
225 SYSCTL_UINT(_vfs_zfs_vdev, OID_AUTO, name ## _max_active, CTLFLAG_RWTUN,\
226     &zfs_vdev_ ## name ## _max_active, 0,				\
227     "Maximum number of I/O requests of type " #name			\
228     " active for each device");
229 
230 ZFS_VDEV_QUEUE_KNOB_MIN(sync_read);
231 ZFS_VDEV_QUEUE_KNOB_MAX(sync_read);
232 ZFS_VDEV_QUEUE_KNOB_MIN(sync_write);
233 ZFS_VDEV_QUEUE_KNOB_MAX(sync_write);
234 ZFS_VDEV_QUEUE_KNOB_MIN(async_read);
235 ZFS_VDEV_QUEUE_KNOB_MAX(async_read);
236 ZFS_VDEV_QUEUE_KNOB_MIN(async_write);
237 ZFS_VDEV_QUEUE_KNOB_MAX(async_write);
238 ZFS_VDEV_QUEUE_KNOB_MIN(scrub);
239 ZFS_VDEV_QUEUE_KNOB_MAX(scrub);
240 ZFS_VDEV_QUEUE_KNOB_MIN(trim);
241 ZFS_VDEV_QUEUE_KNOB_MAX(trim);
242 
243 #undef ZFS_VDEV_QUEUE_KNOB
244 
245 SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, aggregation_limit, CTLFLAG_RWTUN,
246     &zfs_vdev_aggregation_limit, 0,
247     "I/O requests are aggregated up to this size");
248 SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, read_gap_limit, CTLFLAG_RWTUN,
249     &zfs_vdev_read_gap_limit, 0,
250     "Acceptable gap between two reads being aggregated");
251 SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, write_gap_limit, CTLFLAG_RWTUN,
252     &zfs_vdev_write_gap_limit, 0,
253     "Acceptable gap between two writes being aggregated");
254 SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, queue_depth_pct, CTLFLAG_RWTUN,
255     &zfs_vdev_queue_depth_pct, 0,
256     "Queue depth percentage for each top-level");
257 
258 static int
sysctl_zfs_async_write_active_min_dirty_percent(SYSCTL_HANDLER_ARGS)259 sysctl_zfs_async_write_active_min_dirty_percent(SYSCTL_HANDLER_ARGS)
260 {
261 	int val, err;
262 
263 	val = zfs_vdev_async_write_active_min_dirty_percent;
264 	err = sysctl_handle_int(oidp, &val, 0, req);
265 	if (err != 0 || req->newptr == NULL)
266 		return (err);
267 
268 	if (val < 0 || val > 100 ||
269 	    val >= zfs_vdev_async_write_active_max_dirty_percent)
270 		return (EINVAL);
271 
272 	zfs_vdev_async_write_active_min_dirty_percent = val;
273 
274 	return (0);
275 }
276 
277 static int
sysctl_zfs_async_write_active_max_dirty_percent(SYSCTL_HANDLER_ARGS)278 sysctl_zfs_async_write_active_max_dirty_percent(SYSCTL_HANDLER_ARGS)
279 {
280 	int val, err;
281 
282 	val = zfs_vdev_async_write_active_max_dirty_percent;
283 	err = sysctl_handle_int(oidp, &val, 0, req);
284 	if (err != 0 || req->newptr == NULL)
285 		return (err);
286 
287 	if (val < 0 || val > 100 ||
288 	    val <= zfs_vdev_async_write_active_min_dirty_percent)
289 		return (EINVAL);
290 
291 	zfs_vdev_async_write_active_max_dirty_percent = val;
292 
293 	return (0);
294 }
295 #endif
296 #endif
297 
298 int
vdev_queue_offset_compare(const void * x1,const void * x2)299 vdev_queue_offset_compare(const void *x1, const void *x2)
300 {
301 	const zio_t *z1 = x1;
302 	const zio_t *z2 = x2;
303 
304 	if (z1->io_offset < z2->io_offset)
305 		return (-1);
306 	if (z1->io_offset > z2->io_offset)
307 		return (1);
308 
309 	if (z1 < z2)
310 		return (-1);
311 	if (z1 > z2)
312 		return (1);
313 
314 	return (0);
315 }
316 
317 static inline avl_tree_t *
vdev_queue_class_tree(vdev_queue_t * vq,zio_priority_t p)318 vdev_queue_class_tree(vdev_queue_t *vq, zio_priority_t p)
319 {
320 	return (&vq->vq_class[p].vqc_queued_tree);
321 }
322 
323 static inline avl_tree_t *
vdev_queue_type_tree(vdev_queue_t * vq,zio_type_t t)324 vdev_queue_type_tree(vdev_queue_t *vq, zio_type_t t)
325 {
326 	if (t == ZIO_TYPE_READ)
327 		return (&vq->vq_read_offset_tree);
328 	else if (t == ZIO_TYPE_WRITE)
329 		return (&vq->vq_write_offset_tree);
330 	else
331 		return (NULL);
332 }
333 
334 int
vdev_queue_timestamp_compare(const void * x1,const void * x2)335 vdev_queue_timestamp_compare(const void *x1, const void *x2)
336 {
337 	const zio_t *z1 = x1;
338 	const zio_t *z2 = x2;
339 
340 	if (z1->io_timestamp < z2->io_timestamp)
341 		return (-1);
342 	if (z1->io_timestamp > z2->io_timestamp)
343 		return (1);
344 
345 	if (z1->io_offset < z2->io_offset)
346 		return (-1);
347 	if (z1->io_offset > z2->io_offset)
348 		return (1);
349 
350 	if (z1 < z2)
351 		return (-1);
352 	if (z1 > z2)
353 		return (1);
354 
355 	return (0);
356 }
357 
358 void
vdev_queue_init(vdev_t * vd)359 vdev_queue_init(vdev_t *vd)
360 {
361 	vdev_queue_t *vq = &vd->vdev_queue;
362 
363 	mutex_init(&vq->vq_lock, NULL, MUTEX_DEFAULT, NULL);
364 	vq->vq_vdev = vd;
365 
366 	avl_create(&vq->vq_active_tree, vdev_queue_offset_compare,
367 	    sizeof (zio_t), offsetof(struct zio, io_queue_node));
368 	avl_create(vdev_queue_type_tree(vq, ZIO_TYPE_READ),
369 	    vdev_queue_offset_compare, sizeof (zio_t),
370 	    offsetof(struct zio, io_offset_node));
371 	avl_create(vdev_queue_type_tree(vq, ZIO_TYPE_WRITE),
372 	    vdev_queue_offset_compare, sizeof (zio_t),
373 	    offsetof(struct zio, io_offset_node));
374 
375 	for (zio_priority_t p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) {
376 		int (*compfn) (const void *, const void *);
377 
378 		/*
379 		 * The synchronous i/o queues are dispatched in FIFO rather
380 		 * than LBA order.  This provides more consistent latency for
381 		 * these i/os.
382 		 */
383 		if (p == ZIO_PRIORITY_SYNC_READ || p == ZIO_PRIORITY_SYNC_WRITE)
384 			compfn = vdev_queue_timestamp_compare;
385 		else
386 			compfn = vdev_queue_offset_compare;
387 
388 		avl_create(vdev_queue_class_tree(vq, p), compfn,
389 		    sizeof (zio_t), offsetof(struct zio, io_queue_node));
390 	}
391 
392 	vq->vq_lastoffset = 0;
393 }
394 
395 void
vdev_queue_fini(vdev_t * vd)396 vdev_queue_fini(vdev_t *vd)
397 {
398 	vdev_queue_t *vq = &vd->vdev_queue;
399 
400 	for (zio_priority_t p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++)
401 		avl_destroy(vdev_queue_class_tree(vq, p));
402 	avl_destroy(&vq->vq_active_tree);
403 	avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_READ));
404 	avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_WRITE));
405 
406 	mutex_destroy(&vq->vq_lock);
407 }
408 
409 static void
vdev_queue_io_add(vdev_queue_t * vq,zio_t * zio)410 vdev_queue_io_add(vdev_queue_t *vq, zio_t *zio)
411 {
412 	spa_t *spa = zio->io_spa;
413 	avl_tree_t *qtt;
414 
415 	ASSERT(MUTEX_HELD(&vq->vq_lock));
416 	ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
417 	avl_add(vdev_queue_class_tree(vq, zio->io_priority), zio);
418 	qtt = vdev_queue_type_tree(vq, zio->io_type);
419 	if (qtt)
420 		avl_add(qtt, zio);
421 
422 #ifdef illumos
423 	mutex_enter(&spa->spa_iokstat_lock);
424 	spa->spa_queue_stats[zio->io_priority].spa_queued++;
425 	if (spa->spa_iokstat != NULL)
426 		kstat_waitq_enter(spa->spa_iokstat->ks_data);
427 	mutex_exit(&spa->spa_iokstat_lock);
428 #endif
429 }
430 
431 static void
vdev_queue_io_remove(vdev_queue_t * vq,zio_t * zio)432 vdev_queue_io_remove(vdev_queue_t *vq, zio_t *zio)
433 {
434 	spa_t *spa = zio->io_spa;
435 	avl_tree_t *qtt;
436 
437 	ASSERT(MUTEX_HELD(&vq->vq_lock));
438 	ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
439 	avl_remove(vdev_queue_class_tree(vq, zio->io_priority), zio);
440 	qtt = vdev_queue_type_tree(vq, zio->io_type);
441 	if (qtt)
442 		avl_remove(qtt, zio);
443 
444 #ifdef illumos
445 	mutex_enter(&spa->spa_iokstat_lock);
446 	ASSERT3U(spa->spa_queue_stats[zio->io_priority].spa_queued, >, 0);
447 	spa->spa_queue_stats[zio->io_priority].spa_queued--;
448 	if (spa->spa_iokstat != NULL)
449 		kstat_waitq_exit(spa->spa_iokstat->ks_data);
450 	mutex_exit(&spa->spa_iokstat_lock);
451 #endif
452 }
453 
454 static void
vdev_queue_pending_add(vdev_queue_t * vq,zio_t * zio)455 vdev_queue_pending_add(vdev_queue_t *vq, zio_t *zio)
456 {
457 	spa_t *spa = zio->io_spa;
458 	ASSERT(MUTEX_HELD(&vq->vq_lock));
459 	ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
460 	vq->vq_class[zio->io_priority].vqc_active++;
461 	avl_add(&vq->vq_active_tree, zio);
462 
463 #ifdef illumos
464 	mutex_enter(&spa->spa_iokstat_lock);
465 	spa->spa_queue_stats[zio->io_priority].spa_active++;
466 	if (spa->spa_iokstat != NULL)
467 		kstat_runq_enter(spa->spa_iokstat->ks_data);
468 	mutex_exit(&spa->spa_iokstat_lock);
469 #endif
470 }
471 
472 static void
vdev_queue_pending_remove(vdev_queue_t * vq,zio_t * zio)473 vdev_queue_pending_remove(vdev_queue_t *vq, zio_t *zio)
474 {
475 	spa_t *spa = zio->io_spa;
476 	ASSERT(MUTEX_HELD(&vq->vq_lock));
477 	ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
478 	vq->vq_class[zio->io_priority].vqc_active--;
479 	avl_remove(&vq->vq_active_tree, zio);
480 
481 #ifdef illumos
482 	mutex_enter(&spa->spa_iokstat_lock);
483 	ASSERT3U(spa->spa_queue_stats[zio->io_priority].spa_active, >, 0);
484 	spa->spa_queue_stats[zio->io_priority].spa_active--;
485 	if (spa->spa_iokstat != NULL) {
486 		kstat_io_t *ksio = spa->spa_iokstat->ks_data;
487 
488 		kstat_runq_exit(spa->spa_iokstat->ks_data);
489 		if (zio->io_type == ZIO_TYPE_READ) {
490 			ksio->reads++;
491 			ksio->nread += zio->io_size;
492 		} else if (zio->io_type == ZIO_TYPE_WRITE) {
493 			ksio->writes++;
494 			ksio->nwritten += zio->io_size;
495 		}
496 	}
497 	mutex_exit(&spa->spa_iokstat_lock);
498 #endif
499 }
500 
501 static void
vdev_queue_agg_io_done(zio_t * aio)502 vdev_queue_agg_io_done(zio_t *aio)
503 {
504 	if (aio->io_type == ZIO_TYPE_READ) {
505 		zio_t *pio;
506 		zio_link_t *zl = NULL;
507 		while ((pio = zio_walk_parents(aio, &zl)) != NULL) {
508 			bcopy((char *)aio->io_data + (pio->io_offset -
509 			    aio->io_offset), pio->io_data, pio->io_size);
510 		}
511 	}
512 
513 	zio_buf_free(aio->io_data, aio->io_size);
514 }
515 
516 static int
vdev_queue_class_min_active(zio_priority_t p)517 vdev_queue_class_min_active(zio_priority_t p)
518 {
519 	switch (p) {
520 	case ZIO_PRIORITY_SYNC_READ:
521 		return (zfs_vdev_sync_read_min_active);
522 	case ZIO_PRIORITY_SYNC_WRITE:
523 		return (zfs_vdev_sync_write_min_active);
524 	case ZIO_PRIORITY_ASYNC_READ:
525 		return (zfs_vdev_async_read_min_active);
526 	case ZIO_PRIORITY_ASYNC_WRITE:
527 		return (zfs_vdev_async_write_min_active);
528 	case ZIO_PRIORITY_SCRUB:
529 		return (zfs_vdev_scrub_min_active);
530 	case ZIO_PRIORITY_TRIM:
531 		return (zfs_vdev_trim_min_active);
532 	default:
533 		panic("invalid priority %u", p);
534 		return (0);
535 	}
536 }
537 
538 static __noinline int
vdev_queue_max_async_writes(spa_t * spa)539 vdev_queue_max_async_writes(spa_t *spa)
540 {
541 	int writes;
542 	uint64_t dirty = spa->spa_dsl_pool->dp_dirty_total;
543 	uint64_t min_bytes = zfs_dirty_data_max *
544 	    zfs_vdev_async_write_active_min_dirty_percent / 100;
545 	uint64_t max_bytes = zfs_dirty_data_max *
546 	    zfs_vdev_async_write_active_max_dirty_percent / 100;
547 
548 	/*
549 	 * Sync tasks correspond to interactive user actions. To reduce the
550 	 * execution time of those actions we push data out as fast as possible.
551 	 */
552 	if (spa_has_pending_synctask(spa)) {
553 		return (zfs_vdev_async_write_max_active);
554 	}
555 
556 	if (dirty < min_bytes)
557 		return (zfs_vdev_async_write_min_active);
558 	if (dirty > max_bytes)
559 		return (zfs_vdev_async_write_max_active);
560 
561 	/*
562 	 * linear interpolation:
563 	 * slope = (max_writes - min_writes) / (max_bytes - min_bytes)
564 	 * move right by min_bytes
565 	 * move up by min_writes
566 	 */
567 	writes = (dirty - min_bytes) *
568 	    (zfs_vdev_async_write_max_active -
569 	    zfs_vdev_async_write_min_active) /
570 	    (max_bytes - min_bytes) +
571 	    zfs_vdev_async_write_min_active;
572 	ASSERT3U(writes, >=, zfs_vdev_async_write_min_active);
573 	ASSERT3U(writes, <=, zfs_vdev_async_write_max_active);
574 	return (writes);
575 }
576 
577 static int
vdev_queue_class_max_active(spa_t * spa,zio_priority_t p)578 vdev_queue_class_max_active(spa_t *spa, zio_priority_t p)
579 {
580 	switch (p) {
581 	case ZIO_PRIORITY_SYNC_READ:
582 		return (zfs_vdev_sync_read_max_active);
583 	case ZIO_PRIORITY_SYNC_WRITE:
584 		return (zfs_vdev_sync_write_max_active);
585 	case ZIO_PRIORITY_ASYNC_READ:
586 		return (zfs_vdev_async_read_max_active);
587 	case ZIO_PRIORITY_ASYNC_WRITE:
588 		return (vdev_queue_max_async_writes(spa));
589 	case ZIO_PRIORITY_SCRUB:
590 		return (zfs_vdev_scrub_max_active);
591 	case ZIO_PRIORITY_TRIM:
592 		return (zfs_vdev_trim_max_active);
593 	default:
594 		panic("invalid priority %u", p);
595 		return (0);
596 	}
597 }
598 
599 /*
600  * Return the i/o class to issue from, or ZIO_PRIORITY_MAX_QUEUEABLE if
601  * there is no eligible class.
602  */
603 static zio_priority_t
vdev_queue_class_to_issue(vdev_queue_t * vq)604 vdev_queue_class_to_issue(vdev_queue_t *vq)
605 {
606 	spa_t *spa = vq->vq_vdev->vdev_spa;
607 	zio_priority_t p;
608 
609 	ASSERT(MUTEX_HELD(&vq->vq_lock));
610 
611 	if (avl_numnodes(&vq->vq_active_tree) >= zfs_vdev_max_active)
612 		return (ZIO_PRIORITY_NUM_QUEUEABLE);
613 
614 	/* find a queue that has not reached its minimum # outstanding i/os */
615 	for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) {
616 		if (avl_numnodes(vdev_queue_class_tree(vq, p)) > 0 &&
617 		    vq->vq_class[p].vqc_active <
618 		    vdev_queue_class_min_active(p))
619 			return (p);
620 	}
621 
622 	/*
623 	 * If we haven't found a queue, look for one that hasn't reached its
624 	 * maximum # outstanding i/os.
625 	 */
626 	for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) {
627 		if (avl_numnodes(vdev_queue_class_tree(vq, p)) > 0 &&
628 		    vq->vq_class[p].vqc_active <
629 		    vdev_queue_class_max_active(spa, p))
630 			return (p);
631 	}
632 
633 	/* No eligible queued i/os */
634 	return (ZIO_PRIORITY_NUM_QUEUEABLE);
635 }
636 
637 /*
638  * Compute the range spanned by two i/os, which is the endpoint of the last
639  * (lio->io_offset + lio->io_size) minus start of the first (fio->io_offset).
640  * Conveniently, the gap between fio and lio is given by -IO_SPAN(lio, fio);
641  * thus fio and lio are adjacent if and only if IO_SPAN(lio, fio) == 0.
642  */
643 #define	IO_SPAN(fio, lio) ((lio)->io_offset + (lio)->io_size - (fio)->io_offset)
644 #define	IO_GAP(fio, lio) (-IO_SPAN(lio, fio))
645 
646 static zio_t *
vdev_queue_aggregate(vdev_queue_t * vq,zio_t * zio)647 vdev_queue_aggregate(vdev_queue_t *vq, zio_t *zio)
648 {
649 	zio_t *first, *last, *aio, *dio, *mandatory, *nio;
650 	void *abuf;
651 	uint64_t maxgap = 0;
652 	uint64_t size;
653 	boolean_t stretch;
654 	avl_tree_t *t;
655 	enum zio_flag flags;
656 
657 	ASSERT(MUTEX_HELD(&vq->vq_lock));
658 
659 	if (zio->io_flags & ZIO_FLAG_DONT_AGGREGATE)
660 		return (NULL);
661 
662 	first = last = zio;
663 
664 	if (zio->io_type == ZIO_TYPE_READ)
665 		maxgap = zfs_vdev_read_gap_limit;
666 
667 	/*
668 	 * We can aggregate I/Os that are sufficiently adjacent and of
669 	 * the same flavor, as expressed by the AGG_INHERIT flags.
670 	 * The latter requirement is necessary so that certain
671 	 * attributes of the I/O, such as whether it's a normal I/O
672 	 * or a scrub/resilver, can be preserved in the aggregate.
673 	 * We can include optional I/Os, but don't allow them
674 	 * to begin a range as they add no benefit in that situation.
675 	 */
676 
677 	/*
678 	 * We keep track of the last non-optional I/O.
679 	 */
680 	mandatory = (first->io_flags & ZIO_FLAG_OPTIONAL) ? NULL : first;
681 
682 	/*
683 	 * Walk backwards through sufficiently contiguous I/Os
684 	 * recording the last non-option I/O.
685 	 */
686 	flags = zio->io_flags & ZIO_FLAG_AGG_INHERIT;
687 	t = vdev_queue_type_tree(vq, zio->io_type);
688 	while (t != NULL && (dio = AVL_PREV(t, first)) != NULL &&
689 	    (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags &&
690 	    IO_SPAN(dio, last) <= zfs_vdev_aggregation_limit &&
691 	    IO_GAP(dio, first) <= maxgap) {
692 		first = dio;
693 		if (mandatory == NULL && !(first->io_flags & ZIO_FLAG_OPTIONAL))
694 			mandatory = first;
695 	}
696 
697 	/*
698 	 * Skip any initial optional I/Os.
699 	 */
700 	while ((first->io_flags & ZIO_FLAG_OPTIONAL) && first != last) {
701 		first = AVL_NEXT(t, first);
702 		ASSERT(first != NULL);
703 	}
704 
705 	/*
706 	 * Walk forward through sufficiently contiguous I/Os.
707 	 */
708 	while ((dio = AVL_NEXT(t, last)) != NULL &&
709 	    (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags &&
710 	    IO_SPAN(first, dio) <= zfs_vdev_aggregation_limit &&
711 	    IO_GAP(last, dio) <= maxgap) {
712 		last = dio;
713 		if (!(last->io_flags & ZIO_FLAG_OPTIONAL))
714 			mandatory = last;
715 	}
716 
717 	/*
718 	 * Now that we've established the range of the I/O aggregation
719 	 * we must decide what to do with trailing optional I/Os.
720 	 * For reads, there's nothing to do. While we are unable to
721 	 * aggregate further, it's possible that a trailing optional
722 	 * I/O would allow the underlying device to aggregate with
723 	 * subsequent I/Os. We must therefore determine if the next
724 	 * non-optional I/O is close enough to make aggregation
725 	 * worthwhile.
726 	 */
727 	stretch = B_FALSE;
728 	if (zio->io_type == ZIO_TYPE_WRITE && mandatory != NULL) {
729 		zio_t *nio = last;
730 		while ((dio = AVL_NEXT(t, nio)) != NULL &&
731 		    IO_GAP(nio, dio) == 0 &&
732 		    IO_GAP(mandatory, dio) <= zfs_vdev_write_gap_limit) {
733 			nio = dio;
734 			if (!(nio->io_flags & ZIO_FLAG_OPTIONAL)) {
735 				stretch = B_TRUE;
736 				break;
737 			}
738 		}
739 	}
740 
741 	if (stretch) {
742 		/* This may be a no-op. */
743 		dio = AVL_NEXT(t, last);
744 		dio->io_flags &= ~ZIO_FLAG_OPTIONAL;
745 	} else {
746 		while (last != mandatory && last != first) {
747 			ASSERT(last->io_flags & ZIO_FLAG_OPTIONAL);
748 			last = AVL_PREV(t, last);
749 			ASSERT(last != NULL);
750 		}
751 	}
752 
753 	if (first == last)
754 		return (NULL);
755 
756 	size = IO_SPAN(first, last);
757 	ASSERT3U(size, <=, zfs_vdev_aggregation_limit);
758 
759 	abuf = zio_buf_alloc_nowait(size);
760 	if (abuf == NULL)
761 		return (NULL);
762 
763 	aio = zio_vdev_delegated_io(first->io_vd, first->io_offset,
764 	    abuf, size, first->io_type, zio->io_priority,
765 	    flags | ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE,
766 	    vdev_queue_agg_io_done, NULL);
767 	aio->io_timestamp = first->io_timestamp;
768 
769 	nio = first;
770 	do {
771 		dio = nio;
772 		nio = AVL_NEXT(t, dio);
773 		ASSERT3U(dio->io_type, ==, aio->io_type);
774 
775 		if (dio->io_flags & ZIO_FLAG_NODATA) {
776 			ASSERT3U(dio->io_type, ==, ZIO_TYPE_WRITE);
777 			bzero((char *)aio->io_data + (dio->io_offset -
778 			    aio->io_offset), dio->io_size);
779 		} else if (dio->io_type == ZIO_TYPE_WRITE) {
780 			bcopy(dio->io_data, (char *)aio->io_data +
781 			    (dio->io_offset - aio->io_offset),
782 			    dio->io_size);
783 		}
784 
785 		zio_add_child(dio, aio);
786 		vdev_queue_io_remove(vq, dio);
787 		zio_vdev_io_bypass(dio);
788 		zio_execute(dio);
789 	} while (dio != last);
790 
791 	return (aio);
792 }
793 
794 static zio_t *
vdev_queue_io_to_issue(vdev_queue_t * vq)795 vdev_queue_io_to_issue(vdev_queue_t *vq)
796 {
797 	zio_t *zio, *aio;
798 	zio_priority_t p;
799 	avl_index_t idx;
800 	avl_tree_t *tree;
801 	zio_t *search;
802 
803 again:
804 	ASSERT(MUTEX_HELD(&vq->vq_lock));
805 
806 	p = vdev_queue_class_to_issue(vq);
807 
808 	if (p == ZIO_PRIORITY_NUM_QUEUEABLE) {
809 		/* No eligible queued i/os */
810 		return (NULL);
811 	}
812 
813 	/*
814 	 * For LBA-ordered queues (async / scrub), issue the i/o which follows
815 	 * the most recently issued i/o in LBA (offset) order.
816 	 *
817 	 * For FIFO queues (sync), issue the i/o with the lowest timestamp.
818 	 */
819 	tree = vdev_queue_class_tree(vq, p);
820 	search = kmem_zalloc(sizeof (*search), KM_NOSLEEP);
821 	if (search) {
822 		search->io_offset = vq->vq_last_offset + 1;
823 		VERIFY3P(avl_find(tree, search, &idx), ==, NULL);
824 		kmem_free(search, sizeof (*search));
825 		zio = avl_nearest(tree, idx, AVL_AFTER);
826 	} else {
827 		/* Can't find nearest, fallback to first */
828 		zio = NULL;
829 	}
830 	if (zio == NULL)
831 		zio = avl_first(tree);
832 	ASSERT3U(zio->io_priority, ==, p);
833 
834 	aio = vdev_queue_aggregate(vq, zio);
835 	if (aio != NULL)
836 		zio = aio;
837 	else
838 		vdev_queue_io_remove(vq, zio);
839 
840 	/*
841 	 * If the I/O is or was optional and therefore has no data, we need to
842 	 * simply discard it. We need to drop the vdev queue's lock to avoid a
843 	 * deadlock that we could encounter since this I/O will complete
844 	 * immediately.
845 	 */
846 	if (zio->io_flags & ZIO_FLAG_NODATA) {
847 		mutex_exit(&vq->vq_lock);
848 		zio_vdev_io_bypass(zio);
849 		zio_execute(zio);
850 		mutex_enter(&vq->vq_lock);
851 		goto again;
852 	}
853 
854 	vdev_queue_pending_add(vq, zio);
855 	vq->vq_last_offset = zio->io_offset;
856 
857 	return (zio);
858 }
859 
860 zio_t *
vdev_queue_io(zio_t * zio)861 vdev_queue_io(zio_t *zio)
862 {
863 	vdev_queue_t *vq = &zio->io_vd->vdev_queue;
864 	zio_t *nio;
865 
866 	if (zio->io_flags & ZIO_FLAG_DONT_QUEUE)
867 		return (zio);
868 
869 	/*
870 	 * Children i/os inherent their parent's priority, which might
871 	 * not match the child's i/o type.  Fix it up here.
872 	 */
873 	if (zio->io_type == ZIO_TYPE_READ) {
874 		if (zio->io_priority != ZIO_PRIORITY_SYNC_READ &&
875 		    zio->io_priority != ZIO_PRIORITY_ASYNC_READ &&
876 		    zio->io_priority != ZIO_PRIORITY_SCRUB)
877 			zio->io_priority = ZIO_PRIORITY_ASYNC_READ;
878 	} else if (zio->io_type == ZIO_TYPE_WRITE) {
879 		if (zio->io_priority != ZIO_PRIORITY_SYNC_WRITE &&
880 		    zio->io_priority != ZIO_PRIORITY_ASYNC_WRITE)
881 			zio->io_priority = ZIO_PRIORITY_ASYNC_WRITE;
882 	} else {
883 		ASSERT(zio->io_type == ZIO_TYPE_FREE);
884 		zio->io_priority = ZIO_PRIORITY_TRIM;
885 	}
886 
887 	zio->io_flags |= ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE;
888 
889 	mutex_enter(&vq->vq_lock);
890 	zio->io_timestamp = gethrtime();
891 	vdev_queue_io_add(vq, zio);
892 	nio = vdev_queue_io_to_issue(vq);
893 	mutex_exit(&vq->vq_lock);
894 
895 	if (nio == NULL)
896 		return (NULL);
897 
898 	if (nio->io_done == vdev_queue_agg_io_done) {
899 		zio_nowait(nio);
900 		return (NULL);
901 	}
902 
903 	return (nio);
904 }
905 
906 void
vdev_queue_io_done(zio_t * zio)907 vdev_queue_io_done(zio_t *zio)
908 {
909 	vdev_queue_t *vq = &zio->io_vd->vdev_queue;
910 	zio_t *nio;
911 
912 	mutex_enter(&vq->vq_lock);
913 
914 	vdev_queue_pending_remove(vq, zio);
915 
916 	vq->vq_io_complete_ts = gethrtime();
917 
918 	while ((nio = vdev_queue_io_to_issue(vq)) != NULL) {
919 		mutex_exit(&vq->vq_lock);
920 		if (nio->io_done == vdev_queue_agg_io_done) {
921 			zio_nowait(nio);
922 		} else {
923 			zio_vdev_io_reissue(nio);
924 			zio_execute(nio);
925 		}
926 		mutex_enter(&vq->vq_lock);
927 	}
928 
929 	mutex_exit(&vq->vq_lock);
930 }
931 
932 /*
933  * As these three methods are only used for load calculations we're not concerned
934  * if we get an incorrect value on 32bit platforms due to lack of vq_lock mutex
935  * use here, instead we prefer to keep it lock free for performance.
936  */
937 int
vdev_queue_length(vdev_t * vd)938 vdev_queue_length(vdev_t *vd)
939 {
940 	return (avl_numnodes(&vd->vdev_queue.vq_active_tree));
941 }
942 
943 uint64_t
vdev_queue_lastoffset(vdev_t * vd)944 vdev_queue_lastoffset(vdev_t *vd)
945 {
946 	return (vd->vdev_queue.vq_lastoffset);
947 }
948 
949 void
vdev_queue_register_lastoffset(vdev_t * vd,zio_t * zio)950 vdev_queue_register_lastoffset(vdev_t *vd, zio_t *zio)
951 {
952 	vd->vdev_queue.vq_lastoffset = zio->io_offset + zio->io_size;
953 }
954