1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25 /*
26 * Copyright (c) 2013, 2015 by Delphix. All rights reserved.
27 */
28
29 #include <sys/zfs_context.h>
30 #include <sys/spa.h>
31 #include <sys/vdev_impl.h>
32 #include <sys/zio.h>
33 #include <sys/kstat.h>
34
35 /*
36 * Virtual device read-ahead caching.
37 *
38 * This file implements a simple LRU read-ahead cache. When the DMU reads
39 * a given block, it will often want other, nearby blocks soon thereafter.
40 * We take advantage of this by reading a larger disk region and caching
41 * the result. In the best case, this can turn 128 back-to-back 512-byte
42 * reads into a single 64k read followed by 127 cache hits; this reduces
43 * latency dramatically. In the worst case, it can turn an isolated 512-byte
44 * read into a 64k read, which doesn't affect latency all that much but is
45 * terribly wasteful of bandwidth. A more intelligent version of the cache
46 * could keep track of access patterns and not do read-ahead unless it sees
47 * at least two temporally close I/Os to the same region. Currently, only
48 * metadata I/O is inflated. A futher enhancement could take advantage of
49 * more semantic information about the I/O. And it could use something
50 * faster than an AVL tree; that was chosen solely for convenience.
51 *
52 * There are five cache operations: allocate, fill, read, write, evict.
53 *
54 * (1) Allocate. This reserves a cache entry for the specified region.
55 * We separate the allocate and fill operations so that multiple threads
56 * don't generate I/O for the same cache miss.
57 *
58 * (2) Fill. When the I/O for a cache miss completes, the fill routine
59 * places the data in the previously allocated cache entry.
60 *
61 * (3) Read. Read data from the cache.
62 *
63 * (4) Write. Update cache contents after write completion.
64 *
65 * (5) Evict. When allocating a new entry, we evict the oldest (LRU) entry
66 * if the total cache size exceeds zfs_vdev_cache_size.
67 */
68
69 /*
70 * These tunables are for performance analysis.
71 */
72 /*
73 * All i/os smaller than zfs_vdev_cache_max will be turned into
74 * 1<<zfs_vdev_cache_bshift byte reads by the vdev_cache (aka software
75 * track buffer). At most zfs_vdev_cache_size bytes will be kept in each
76 * vdev's vdev_cache.
77 *
78 * TODO: Note that with the current ZFS code, it turns out that the
79 * vdev cache is not helpful, and in some cases actually harmful. It
80 * is better if we disable this. Once some time has passed, we should
81 * actually remove this to simplify the code. For now we just disable
82 * it by setting the zfs_vdev_cache_size to zero. Note that Solaris 11
83 * has made these same changes.
84 */
85 int zfs_vdev_cache_max = 1<<14; /* 16KB */
86 int zfs_vdev_cache_size = 0;
87 int zfs_vdev_cache_bshift = 16;
88
89 #define VCBS (1 << zfs_vdev_cache_bshift) /* 64KB */
90
91 SYSCTL_DECL(_vfs_zfs_vdev);
92 SYSCTL_NODE(_vfs_zfs_vdev, OID_AUTO, cache, CTLFLAG_RW, 0, "ZFS VDEV Cache");
93 SYSCTL_INT(_vfs_zfs_vdev_cache, OID_AUTO, max, CTLFLAG_RDTUN,
94 &zfs_vdev_cache_max, 0, "Maximum I/O request size that increase read size");
95 SYSCTL_INT(_vfs_zfs_vdev_cache, OID_AUTO, size, CTLFLAG_RDTUN,
96 &zfs_vdev_cache_size, 0, "Size of VDEV cache");
97 SYSCTL_INT(_vfs_zfs_vdev_cache, OID_AUTO, bshift, CTLFLAG_RDTUN,
98 &zfs_vdev_cache_bshift, 0, "Turn too small requests into 1 << this value");
99
100 kstat_t *vdc_ksp = NULL;
101
102 typedef struct vdc_stats {
103 kstat_named_t vdc_stat_delegations;
104 kstat_named_t vdc_stat_hits;
105 kstat_named_t vdc_stat_misses;
106 } vdc_stats_t;
107
108 static vdc_stats_t vdc_stats = {
109 { "delegations", KSTAT_DATA_UINT64 },
110 { "hits", KSTAT_DATA_UINT64 },
111 { "misses", KSTAT_DATA_UINT64 }
112 };
113
114 #define VDCSTAT_BUMP(stat) atomic_inc_64(&vdc_stats.stat.value.ui64);
115
116 static int
vdev_cache_offset_compare(const void * a1,const void * a2)117 vdev_cache_offset_compare(const void *a1, const void *a2)
118 {
119 const vdev_cache_entry_t *ve1 = a1;
120 const vdev_cache_entry_t *ve2 = a2;
121
122 if (ve1->ve_offset < ve2->ve_offset)
123 return (-1);
124 if (ve1->ve_offset > ve2->ve_offset)
125 return (1);
126 return (0);
127 }
128
129 static int
vdev_cache_lastused_compare(const void * a1,const void * a2)130 vdev_cache_lastused_compare(const void *a1, const void *a2)
131 {
132 const vdev_cache_entry_t *ve1 = a1;
133 const vdev_cache_entry_t *ve2 = a2;
134
135 if (ve1->ve_lastused < ve2->ve_lastused)
136 return (-1);
137 if (ve1->ve_lastused > ve2->ve_lastused)
138 return (1);
139
140 /*
141 * Among equally old entries, sort by offset to ensure uniqueness.
142 */
143 return (vdev_cache_offset_compare(a1, a2));
144 }
145
146 /*
147 * Evict the specified entry from the cache.
148 */
149 static void
vdev_cache_evict(vdev_cache_t * vc,vdev_cache_entry_t * ve)150 vdev_cache_evict(vdev_cache_t *vc, vdev_cache_entry_t *ve)
151 {
152 ASSERT(MUTEX_HELD(&vc->vc_lock));
153 ASSERT(ve->ve_fill_io == NULL);
154 ASSERT(ve->ve_data != NULL);
155
156 avl_remove(&vc->vc_lastused_tree, ve);
157 avl_remove(&vc->vc_offset_tree, ve);
158 zio_buf_free(ve->ve_data, VCBS);
159 kmem_free(ve, sizeof (vdev_cache_entry_t));
160 }
161
162 /*
163 * Allocate an entry in the cache. At the point we don't have the data,
164 * we're just creating a placeholder so that multiple threads don't all
165 * go off and read the same blocks.
166 */
167 static vdev_cache_entry_t *
vdev_cache_allocate(zio_t * zio)168 vdev_cache_allocate(zio_t *zio)
169 {
170 vdev_cache_t *vc = &zio->io_vd->vdev_cache;
171 uint64_t offset = P2ALIGN(zio->io_offset, VCBS);
172 vdev_cache_entry_t *ve;
173
174 ASSERT(MUTEX_HELD(&vc->vc_lock));
175
176 if (zfs_vdev_cache_size == 0)
177 return (NULL);
178
179 /*
180 * If adding a new entry would exceed the cache size,
181 * evict the oldest entry (LRU).
182 */
183 if ((avl_numnodes(&vc->vc_lastused_tree) << zfs_vdev_cache_bshift) >
184 zfs_vdev_cache_size) {
185 ve = avl_first(&vc->vc_lastused_tree);
186 if (ve->ve_fill_io != NULL)
187 return (NULL);
188 ASSERT(ve->ve_hits != 0);
189 vdev_cache_evict(vc, ve);
190 }
191
192 ve = kmem_zalloc(sizeof (vdev_cache_entry_t), KM_SLEEP);
193 ve->ve_offset = offset;
194 ve->ve_lastused = ddi_get_lbolt();
195 ve->ve_data = zio_buf_alloc(VCBS);
196
197 avl_add(&vc->vc_offset_tree, ve);
198 avl_add(&vc->vc_lastused_tree, ve);
199
200 return (ve);
201 }
202
203 static void
vdev_cache_hit(vdev_cache_t * vc,vdev_cache_entry_t * ve,zio_t * zio)204 vdev_cache_hit(vdev_cache_t *vc, vdev_cache_entry_t *ve, zio_t *zio)
205 {
206 uint64_t cache_phase = P2PHASE(zio->io_offset, VCBS);
207
208 ASSERT(MUTEX_HELD(&vc->vc_lock));
209 ASSERT(ve->ve_fill_io == NULL);
210
211 if (ve->ve_lastused != ddi_get_lbolt()) {
212 avl_remove(&vc->vc_lastused_tree, ve);
213 ve->ve_lastused = ddi_get_lbolt();
214 avl_add(&vc->vc_lastused_tree, ve);
215 }
216
217 ve->ve_hits++;
218 bcopy(ve->ve_data + cache_phase, zio->io_data, zio->io_size);
219 }
220
221 /*
222 * Fill a previously allocated cache entry with data.
223 */
224 static void
vdev_cache_fill(zio_t * fio)225 vdev_cache_fill(zio_t *fio)
226 {
227 vdev_t *vd = fio->io_vd;
228 vdev_cache_t *vc = &vd->vdev_cache;
229 vdev_cache_entry_t *ve = fio->io_private;
230 zio_t *pio;
231
232 ASSERT(fio->io_size == VCBS);
233
234 /*
235 * Add data to the cache.
236 */
237 mutex_enter(&vc->vc_lock);
238
239 ASSERT(ve->ve_fill_io == fio);
240 ASSERT(ve->ve_offset == fio->io_offset);
241 ASSERT(ve->ve_data == fio->io_data);
242
243 ve->ve_fill_io = NULL;
244
245 /*
246 * Even if this cache line was invalidated by a missed write update,
247 * any reads that were queued up before the missed update are still
248 * valid, so we can satisfy them from this line before we evict it.
249 */
250 zio_link_t *zl = NULL;
251 while ((pio = zio_walk_parents(fio, &zl)) != NULL)
252 vdev_cache_hit(vc, ve, pio);
253
254 if (fio->io_error || ve->ve_missed_update)
255 vdev_cache_evict(vc, ve);
256
257 mutex_exit(&vc->vc_lock);
258 }
259
260 /*
261 * Read data from the cache. Returns B_TRUE cache hit, B_FALSE on miss.
262 */
263 boolean_t
vdev_cache_read(zio_t * zio)264 vdev_cache_read(zio_t *zio)
265 {
266 vdev_cache_t *vc = &zio->io_vd->vdev_cache;
267 vdev_cache_entry_t *ve, ve_search;
268 uint64_t cache_offset = P2ALIGN(zio->io_offset, VCBS);
269 uint64_t cache_phase = P2PHASE(zio->io_offset, VCBS);
270 zio_t *fio;
271
272 ASSERT(zio->io_type == ZIO_TYPE_READ);
273
274 if (zio->io_flags & ZIO_FLAG_DONT_CACHE)
275 return (B_FALSE);
276
277 if (zio->io_size > zfs_vdev_cache_max)
278 return (B_FALSE);
279
280 /*
281 * If the I/O straddles two or more cache blocks, don't cache it.
282 */
283 if (P2BOUNDARY(zio->io_offset, zio->io_size, VCBS))
284 return (B_FALSE);
285
286 ASSERT(cache_phase + zio->io_size <= VCBS);
287
288 mutex_enter(&vc->vc_lock);
289
290 ve_search.ve_offset = cache_offset;
291 ve = avl_find(&vc->vc_offset_tree, &ve_search, NULL);
292
293 if (ve != NULL) {
294 if (ve->ve_missed_update) {
295 mutex_exit(&vc->vc_lock);
296 return (B_FALSE);
297 }
298
299 if ((fio = ve->ve_fill_io) != NULL) {
300 zio_vdev_io_bypass(zio);
301 zio_add_child(zio, fio);
302 mutex_exit(&vc->vc_lock);
303 VDCSTAT_BUMP(vdc_stat_delegations);
304 return (B_TRUE);
305 }
306
307 vdev_cache_hit(vc, ve, zio);
308 zio_vdev_io_bypass(zio);
309
310 mutex_exit(&vc->vc_lock);
311 VDCSTAT_BUMP(vdc_stat_hits);
312 return (B_TRUE);
313 }
314
315 ve = vdev_cache_allocate(zio);
316
317 if (ve == NULL) {
318 mutex_exit(&vc->vc_lock);
319 return (B_FALSE);
320 }
321
322 fio = zio_vdev_delegated_io(zio->io_vd, cache_offset,
323 ve->ve_data, VCBS, ZIO_TYPE_READ, ZIO_PRIORITY_NOW,
324 ZIO_FLAG_DONT_CACHE, vdev_cache_fill, ve);
325
326 ve->ve_fill_io = fio;
327 zio_vdev_io_bypass(zio);
328 zio_add_child(zio, fio);
329
330 mutex_exit(&vc->vc_lock);
331 zio_nowait(fio);
332 VDCSTAT_BUMP(vdc_stat_misses);
333
334 return (B_TRUE);
335 }
336
337 /*
338 * Update cache contents upon write completion.
339 */
340 void
vdev_cache_write(zio_t * zio)341 vdev_cache_write(zio_t *zio)
342 {
343 vdev_cache_t *vc = &zio->io_vd->vdev_cache;
344 vdev_cache_entry_t *ve, ve_search;
345 uint64_t io_start = zio->io_offset;
346 uint64_t io_end = io_start + zio->io_size;
347 uint64_t min_offset = P2ALIGN(io_start, VCBS);
348 uint64_t max_offset = P2ROUNDUP(io_end, VCBS);
349 avl_index_t where;
350
351 ASSERT(zio->io_type == ZIO_TYPE_WRITE);
352
353 mutex_enter(&vc->vc_lock);
354
355 ve_search.ve_offset = min_offset;
356 ve = avl_find(&vc->vc_offset_tree, &ve_search, &where);
357
358 if (ve == NULL)
359 ve = avl_nearest(&vc->vc_offset_tree, where, AVL_AFTER);
360
361 while (ve != NULL && ve->ve_offset < max_offset) {
362 uint64_t start = MAX(ve->ve_offset, io_start);
363 uint64_t end = MIN(ve->ve_offset + VCBS, io_end);
364
365 if (ve->ve_fill_io != NULL) {
366 ve->ve_missed_update = 1;
367 } else {
368 bcopy((char *)zio->io_data + start - io_start,
369 ve->ve_data + start - ve->ve_offset, end - start);
370 }
371 ve = AVL_NEXT(&vc->vc_offset_tree, ve);
372 }
373 mutex_exit(&vc->vc_lock);
374 }
375
376 void
vdev_cache_purge(vdev_t * vd)377 vdev_cache_purge(vdev_t *vd)
378 {
379 vdev_cache_t *vc = &vd->vdev_cache;
380 vdev_cache_entry_t *ve;
381
382 mutex_enter(&vc->vc_lock);
383 while ((ve = avl_first(&vc->vc_offset_tree)) != NULL)
384 vdev_cache_evict(vc, ve);
385 mutex_exit(&vc->vc_lock);
386 }
387
388 void
vdev_cache_init(vdev_t * vd)389 vdev_cache_init(vdev_t *vd)
390 {
391 vdev_cache_t *vc = &vd->vdev_cache;
392
393 mutex_init(&vc->vc_lock, NULL, MUTEX_DEFAULT, NULL);
394
395 avl_create(&vc->vc_offset_tree, vdev_cache_offset_compare,
396 sizeof (vdev_cache_entry_t),
397 offsetof(struct vdev_cache_entry, ve_offset_node));
398
399 avl_create(&vc->vc_lastused_tree, vdev_cache_lastused_compare,
400 sizeof (vdev_cache_entry_t),
401 offsetof(struct vdev_cache_entry, ve_lastused_node));
402 }
403
404 void
vdev_cache_fini(vdev_t * vd)405 vdev_cache_fini(vdev_t *vd)
406 {
407 vdev_cache_t *vc = &vd->vdev_cache;
408
409 vdev_cache_purge(vd);
410
411 avl_destroy(&vc->vc_offset_tree);
412 avl_destroy(&vc->vc_lastused_tree);
413
414 mutex_destroy(&vc->vc_lock);
415 }
416
417 void
vdev_cache_stat_init(void)418 vdev_cache_stat_init(void)
419 {
420 vdc_ksp = kstat_create("zfs", 0, "vdev_cache_stats", "misc",
421 KSTAT_TYPE_NAMED, sizeof (vdc_stats) / sizeof (kstat_named_t),
422 KSTAT_FLAG_VIRTUAL);
423 if (vdc_ksp != NULL) {
424 vdc_ksp->ks_data = &vdc_stats;
425 kstat_install(vdc_ksp);
426 }
427 }
428
429 void
vdev_cache_stat_fini(void)430 vdev_cache_stat_fini(void)
431 {
432 if (vdc_ksp != NULL) {
433 kstat_delete(vdc_ksp);
434 vdc_ksp = NULL;
435 }
436 }
437