xref: /netbsd-src/external/gpl2/lvm2/dist/lib/device/dev-io.c (revision 4817a0b0b8fe9612e8ebe21a9bf2d97b95038a97)
1 /*	$NetBSD: dev-io.c,v 1.9 2010/12/29 00:14:04 haad Exp $	*/
2 
3 /*
4  * Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
5  * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
6  *
7  * This file is part of LVM2.
8  *
9  * This copyrighted material is made available to anyone wishing to use,
10  * modify, copy, or redistribute it subject to the terms and conditions
11  * of the GNU Lesser General Public License v.2.1.
12  *
13  * You should have received a copy of the GNU Lesser General Public License
14  * along with this program; if not, write to the Free Software Foundation,
15  * Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
16  */
17 
18 #include "lib.h"
19 #include "lvm-types.h"
20 #include "device.h"
21 #include "metadata.h"
22 #include "lvmcache.h"
23 #include "memlock.h"
24 #include "locking.h"
25 
26 #include <limits.h>
27 #include <sys/stat.h>
28 #include <fcntl.h>
29 #include <unistd.h>
30 #include <sys/ioctl.h>
31 
32 #ifdef linux
33 #  define u64 uint64_t		/* Missing without __KERNEL__ */
34 #  undef WNOHANG		/* Avoid redefinition */
35 #  undef WUNTRACED		/* Avoid redefinition */
36 #  include <linux/fs.h>		/* For block ioctl definitions */
37 #  define BLKSIZE_SHIFT SECTOR_SHIFT
38 #  ifndef BLKGETSIZE64		/* fs.h out-of-date */
39 #    define BLKGETSIZE64 _IOR(0x12, 114, size_t)
40 #  endif /* BLKGETSIZE64 */
41 #elif __NetBSD__
42 #  include <sys/disk.h>
43 #  include <sys/disklabel.h>
44 #  include <prop/proplib.h>
45 #  include <sys/param.h>
46 #else
47 #  include <sys/disk.h>
48 #  define BLKBSZGET DKIOCGETBLOCKSIZE
49 #  define BLKSSZGET DKIOCGETBLOCKSIZE
50 #  define BLKGETSIZE64 DKIOCGETBLOCKCOUNT
51 #  define BLKFLSBUF DKIOCSYNCHRONIZECACHE
52 #  define BLKSIZE_SHIFT 0
53 #endif
54 
55 #ifdef O_DIRECT_SUPPORT
56 #  ifndef O_DIRECT
57 #    error O_DIRECT support configured but O_DIRECT definition not found in headers
58 #  endif
59 #endif
60 
61 static DM_LIST_INIT(_open_devices);
62 
63 /*-----------------------------------------------------------------
64  * The standard io loop that keeps submitting an io until it's
65  * all gone.
66  *---------------------------------------------------------------*/
67 static int _io(struct device_area *where, void *buffer, int should_write)
68 {
69 	int fd = dev_fd(where->dev);
70 	ssize_t n = 0;
71 	size_t total = 0;
72 
73 	if (fd < 0) {
74 		log_error("Attempt to read an unopened device (%s).",
75 			  dev_name(where->dev));
76 		return 0;
77 	}
78 
79 	/*
80 	 * Skip all writes in test mode.
81 	 */
82 	if (should_write && test_mode())
83 		return 1;
84 
85 	if (where->size > SSIZE_MAX) {
86 		log_error("Read size too large: %" PRIu64, where->size);
87 		return 0;
88 	}
89 
90 	if (lseek(fd, (off_t) where->start, SEEK_SET) < 0) {
91 		log_error("%s: lseek %" PRIu64 " failed: %s",
92 			  dev_name(where->dev), (uint64_t) where->start,
93 			  strerror(errno));
94 		return 0;
95 	}
96 
97 	while (total < (size_t) where->size) {
98 		do
99 			n = should_write ?
100 			    write(fd, buffer, (size_t) where->size - total) :
101 			    read(fd, buffer, (size_t) where->size - total);
102 		while ((n < 0) && ((errno == EINTR) || (errno == EAGAIN)));
103 
104 		if (n < 0)
105 			log_error("%s: %s failed after %" PRIu64 " of %" PRIu64
106 				  " at %" PRIu64 ": %s", dev_name(where->dev),
107 				  should_write ? "write" : "read",
108 				  (uint64_t) total,
109 				  (uint64_t) where->size,
110 				  (uint64_t) where->start, strerror(errno));
111 
112 		if (n <= 0)
113 			break;
114 
115 		total += n;
116 		buffer += n;
117 	}
118 
119 	return (total == (size_t) where->size);
120 }
121 
122 /*-----------------------------------------------------------------
123  * LVM2 uses O_DIRECT when performing metadata io, which requires
124  * block size aligned accesses.  If any io is not aligned we have
125  * to perform the io via a bounce buffer, obviously this is quite
126  * inefficient.
127  *---------------------------------------------------------------*/
128 
129 /*
130  * Get the sector size from an _open_ device.
131  */
132 static int _get_block_size(struct device *dev, unsigned int *size)
133 {
134 	const char *name = dev_name(dev);
135 #ifdef __NetBSD__
136 	struct disklabel	lab;
137 	prop_dictionary_t	disk_dict, geom_dict;
138 	uint32_t		secsize;
139 #endif
140 
141 	if ((dev->block_size == -1)) {
142 #ifdef __NetBSD__
143 		if (prop_dictionary_recv_ioctl(dev_fd(dev), DIOCGDISKINFO, &disk_dict)) {
144 			if (ioctl(dev_fd(dev), DIOCGDINFO, &lab) < 0) {
145 				dev->block_size = DEV_BSIZE;
146 			} else
147 				dev->block_size = lab.d_secsize;
148 		} else {
149 			geom_dict = prop_dictionary_get(disk_dict, "geometry");
150 			prop_dictionary_get_uint32(geom_dict, "sector-size", &secsize);
151 			dev->block_size = secsize;
152 		}
153 #else
154 		if (ioctl(dev_fd(dev), BLKBSZGET, &dev->block_size) < 0) {
155 			log_sys_error("ioctl BLKBSZGET", name);
156 			return 0;
157 		}
158 #endif
159 		log_debug("%s: block size is %u bytes", name, dev->block_size);
160 	}
161 
162 	*size = (unsigned int) dev->block_size;
163 
164 	return 1;
165 }
166 
167 /*
168  * Widens a region to be an aligned region.
169  */
170 static void _widen_region(unsigned int block_size, struct device_area *region,
171 			  struct device_area *result)
172 {
173 	uint64_t mask = block_size - 1, delta;
174 	memcpy(result, region, sizeof(*result));
175 
176 	/* adjust the start */
177 	delta = result->start & mask;
178 	if (delta) {
179 		result->start -= delta;
180 		result->size += delta;
181 	}
182 
183 	/* adjust the end */
184 	delta = (result->start + result->size) & mask;
185 	if (delta)
186 		result->size += block_size - delta;
187 }
188 
189 static int _aligned_io(struct device_area *where, void *buffer,
190 		       int should_write)
191 {
192 	void *bounce;
193 	unsigned int block_size = 0;
194 	uintptr_t mask;
195 	struct device_area widened;
196 
197 	if (!(where->dev->flags & DEV_REGULAR) &&
198 	    !_get_block_size(where->dev, &block_size))
199 		return_0;
200 
201 	if (!block_size)
202 		block_size = lvm_getpagesize();
203 
204 	_widen_region(block_size, where, &widened);
205 
206 	/* Do we need to use a bounce buffer? */
207 	mask = block_size - 1;
208 	if (!memcmp(where, &widened, sizeof(widened)) &&
209 	    !((uintptr_t) buffer & mask))
210 		return _io(where, buffer, should_write);
211 
212 	/* Allocate a bounce buffer with an extra block */
213 	if (!(bounce = alloca((size_t) widened.size + block_size))) {
214 		log_error("Bounce buffer alloca failed");
215 		return 0;
216 	}
217 
218 	/*
219 	 * Realign start of bounce buffer (using the extra sector)
220 	 */
221 	if (((uintptr_t) bounce) & mask)
222 		bounce = (void *) ((((uintptr_t) bounce) + mask) & ~mask);
223 
224 	/* channel the io through the bounce buffer */
225 	if (!_io(&widened, bounce, 0)) {
226 		if (!should_write)
227 			return_0;
228 		/* FIXME pre-extend the file */
229 		memset(bounce, '\n', widened.size);
230 	}
231 
232 	if (should_write) {
233 		memcpy(bounce + (where->start - widened.start), buffer,
234 		       (size_t) where->size);
235 
236 		/* ... then we write */
237 		return _io(&widened, bounce, 1);
238 	}
239 
240 	memcpy(buffer, bounce + (where->start - widened.start),
241 	       (size_t) where->size);
242 
243 	return 1;
244 }
245 
246 static int _dev_get_size_file(const struct device *dev, uint64_t *size)
247 {
248 	const char *name = dev_name(dev);
249 	struct stat info;
250 
251 	if (stat(name, &info)) {
252 		log_sys_error("stat", name);
253 		return 0;
254 	}
255 
256 	*size = info.st_size;
257 	*size >>= SECTOR_SHIFT;	/* Convert to sectors */
258 
259 	log_very_verbose("%s: size is %" PRIu64 " sectors", name, *size);
260 
261 	return 1;
262 }
263 
264 static int _dev_get_size_dev(const struct device *dev, uint64_t *size)
265 {
266 	int fd;
267 	const char *name = dev_name(dev);
268 #ifdef __NetBSD__
269 	struct disklabel	lab;
270 	struct dkwedge_info     dkw;
271 #endif
272 
273 	if ((fd = open(name, O_RDONLY)) < 0) {
274 #ifndef __NetBSD__
275 		log_sys_error("open", name);
276 #endif
277 		return 0;
278 	}
279 
280 #ifdef __NetBSD__
281         /* Get info about partition/wedge */
282 	if (ioctl(fd, DIOCGWEDGEINFO, &dkw) == -1) {
283 		if (ioctl(fd, DIOCGDINFO, &lab) == -1) {
284 			log_debug("Please implement DIOCGWEDGEINFO or "
285 			    "DIOCGDINFO for disk device %s", name);
286 			close(fd);
287 			return 0;
288 		} else
289 			*size = lab.d_nsectors;
290 	} else
291 		*size = dkw.dkw_size;
292 #else
293 	if (ioctl(fd, BLKGETSIZE64, size) < 0) {
294 		log_sys_error("ioctl BLKGETSIZE64", name);
295 		if (close(fd))
296 			log_sys_error("close", name);
297 		return 0;
298 	}
299 
300 	*size >>= BLKSIZE_SHIFT;	/* Convert to sectors */
301 #endif
302 	if (close(fd))
303 		log_sys_error("close", name);
304 
305 	log_very_verbose("%s: size is %" PRIu64 " sectors", name, *size);
306 
307 	return 1;
308 }
309 
310 static int _dev_read_ahead_dev(struct device *dev, uint32_t *read_ahead)
311 {
312 #ifdef linux
313 	long read_ahead_long;
314 
315 	if (dev->read_ahead != -1) {
316 		*read_ahead = (uint32_t) dev->read_ahead;
317 		return 1;
318 	}
319 
320 	if (!dev_open(dev))
321 		return_0;
322 
323 	if (ioctl(dev->fd, BLKRAGET, &read_ahead_long) < 0) {
324 		log_sys_error("ioctl BLKRAGET", dev_name(dev));
325 		if (!dev_close(dev))
326 			stack;
327 		return 0;
328 	}
329 
330 	if (!dev_close(dev))
331 		stack;
332 
333 	*read_ahead = (uint32_t) read_ahead_long;
334 	dev->read_ahead = read_ahead_long;
335 
336 	log_very_verbose("%s: read_ahead is %u sectors",
337 			 dev_name(dev), *read_ahead);
338 #endif
339 	return 1;
340 }
341 
342 /*-----------------------------------------------------------------
343  * Public functions
344  *---------------------------------------------------------------*/
345 
346 int dev_get_size(const struct device *dev, uint64_t *size)
347 {
348 	if (!dev)
349 		return 0;
350 
351 	if ((dev->flags & DEV_REGULAR))
352 		return _dev_get_size_file(dev, size);
353 	else
354 		return _dev_get_size_dev(dev, size);
355 }
356 
357 int dev_get_read_ahead(struct device *dev, uint32_t *read_ahead)
358 {
359 	if (!dev)
360 		return 0;
361 
362 	if (dev->flags & DEV_REGULAR) {
363 		*read_ahead = 0;
364 		return 1;
365 	}
366 
367 	return _dev_read_ahead_dev(dev, read_ahead);
368 }
369 
370 /* FIXME Unused
371 int dev_get_sectsize(struct device *dev, uint32_t *size)
372 {
373 	int fd;
374 	int s;
375 	const char *name = dev_name(dev);
376 
377 	if ((fd = open(name, O_RDONLY)) < 0) {
378 		log_sys_error("open", name);
379 		return 0;
380 	}
381 
382 	if (ioctl(fd, BLKSSZGET, &s) < 0) {
383 		log_sys_error("ioctl BLKSSZGET", name);
384 		if (close(fd))
385 			log_sys_error("close", name);
386 		return 0;
387 	}
388 
389 	if (close(fd))
390 		log_sys_error("close", name);
391 
392 	*size = (uint32_t) s;
393 
394 	log_very_verbose("%s: sector size is %" PRIu32 " bytes", name, *size);
395 
396 	return 1;
397 }
398 */
399 
400 void dev_flush(struct device *dev)
401 {
402 #ifdef __linux__
403 	if (!(dev->flags & DEV_REGULAR) && ioctl(dev->fd, BLKFLSBUF, 0) >= 0)
404 		return;
405 #endif
406 
407 	if (fsync(dev->fd) >= 0)
408 		return;
409 
410 	sync();
411 }
412 
413 int dev_open_flags(struct device *dev, int flags, int direct, int quiet)
414 {
415 	struct stat buf;
416 	const char *name;
417 	int need_excl = 0, need_rw = 0;
418 
419 	if ((flags & O_ACCMODE) == O_RDWR)
420 		need_rw = 1;
421 
422 	if ((flags & O_EXCL))
423 		need_excl = 1;
424 
425 	if (dev->fd >= 0) {
426 		if (((dev->flags & DEV_OPENED_RW) || !need_rw) &&
427 		    ((dev->flags & DEV_OPENED_EXCL) || !need_excl)) {
428 			dev->open_count++;
429 			return 1;
430 		}
431 
432 		if (dev->open_count && !need_excl) {
433 			/* FIXME Ensure we never get here */
434 			log_debug("WARNING: %s already opened read-only",
435 				  dev_name(dev));
436 			dev->open_count++;
437 		}
438 
439 		dev_close_immediate(dev);
440 	}
441 
442 	if (memlock())
443 		log_error("WARNING: dev_open(%s) called while suspended",
444 			  dev_name(dev));
445 
446 	if (dev->flags & DEV_REGULAR)
447 		name = dev_name(dev);
448 	else if (!(name = dev_name_confirmed(dev, quiet)))
449 		return_0;
450 
451 	if (!(dev->flags & DEV_REGULAR)) {
452 		if (stat(name, &buf) < 0) {
453 			log_sys_error("%s: stat failed", name);
454 			return 0;
455 		}
456 		if (buf.st_rdev != dev->dev) {
457 			log_error("%s: device changed", name);
458 			return 0;
459 		}
460 	}
461 
462 #ifdef O_DIRECT_SUPPORT
463 	if (direct) {
464 		if (!(dev->flags & DEV_O_DIRECT_TESTED))
465 			dev->flags |= DEV_O_DIRECT;
466 
467 		if ((dev->flags & DEV_O_DIRECT))
468 			flags |= O_DIRECT;
469 	}
470 #endif
471 
472 #ifdef O_NOATIME
473 	/* Don't update atime on device inodes */
474 	if (!(dev->flags & DEV_REGULAR))
475 		flags |= O_NOATIME;
476 #endif
477 
478 	if ((dev->fd = open(name, flags, 0777)) < 0) {
479 #ifdef O_DIRECT_SUPPORT
480 		if (direct && !(dev->flags & DEV_O_DIRECT_TESTED)) {
481 			flags &= ~O_DIRECT;
482 			if ((dev->fd = open(name, flags, 0777)) >= 0) {
483 				dev->flags &= ~DEV_O_DIRECT;
484 				log_debug("%s: Not using O_DIRECT", name);
485 				goto opened;
486 			}
487 		}
488 #endif
489 		if (quiet)
490 			log_sys_debug("open", name);
491 		else
492 			log_sys_error("open", name);
493 
494 		return 0;
495 	}
496 
497 #ifdef O_DIRECT_SUPPORT
498       opened:
499 	if (direct)
500 		dev->flags |= DEV_O_DIRECT_TESTED;
501 #endif
502 	dev->open_count++;
503 	dev->flags &= ~DEV_ACCESSED_W;
504 
505 	if (need_rw)
506 		dev->flags |= DEV_OPENED_RW;
507 	else
508 		dev->flags &= ~DEV_OPENED_RW;
509 
510 	if (need_excl)
511 		dev->flags |= DEV_OPENED_EXCL;
512 	else
513 		dev->flags &= ~DEV_OPENED_EXCL;
514 
515 	if (!(dev->flags & DEV_REGULAR) &&
516 	    ((fstat(dev->fd, &buf) < 0) || (buf.st_rdev != dev->dev))) {
517 		log_error("%s: fstat failed: Has device name changed?", name);
518 		dev_close_immediate(dev);
519 		return 0;
520 	}
521 
522 #ifndef O_DIRECT_SUPPORT
523 	if (!(dev->flags & DEV_REGULAR))
524 		dev_flush(dev);
525 #endif
526 
527 	if ((flags & O_CREAT) && !(flags & O_TRUNC))
528 		dev->end = lseek(dev->fd, (off_t) 0, SEEK_END);
529 
530 	dm_list_add(&_open_devices, &dev->open_list);
531 
532 	log_debug("Opened %s %s%s%s", dev_name(dev),
533 		  dev->flags & DEV_OPENED_RW ? "RW" : "RO",
534 		  dev->flags & DEV_OPENED_EXCL ? " O_EXCL" : "",
535 		  dev->flags & DEV_O_DIRECT ? " O_DIRECT" : "");
536 
537 	return 1;
538 }
539 
540 int dev_open_quiet(struct device *dev)
541 {
542 	int flags;
543 
544 	flags = vg_write_lock_held() ? O_RDWR : O_RDONLY;
545 
546 	return dev_open_flags(dev, flags, 1, 1);
547 }
548 
549 int dev_open(struct device *dev)
550 {
551 	int flags;
552 
553 	flags = vg_write_lock_held() ? O_RDWR : O_RDONLY;
554 
555 	return dev_open_flags(dev, flags, 1, 0);
556 }
557 
558 int dev_test_excl(struct device *dev)
559 {
560 	int flags;
561 	int r;
562 
563 	flags = vg_write_lock_held() ? O_RDWR : O_RDONLY;
564 	flags |= O_EXCL;
565 
566 	r = dev_open_flags(dev, flags, 1, 1);
567 	if (r)
568 		dev_close_immediate(dev);
569 
570 	return r;
571 }
572 
573 static void _close(struct device *dev)
574 {
575 	if (close(dev->fd))
576 		log_sys_error("close", dev_name(dev));
577 	dev->fd = -1;
578 	dev->block_size = -1;
579 	dm_list_del(&dev->open_list);
580 
581 	log_debug("Closed %s", dev_name(dev));
582 
583 	if (dev->flags & DEV_ALLOCED) {
584 		dm_free((void *) dm_list_item(dev->aliases.n, struct str_list)->
585 			 str);
586 		dm_free(dev->aliases.n);
587 		dm_free(dev);
588 	}
589 }
590 
591 static int _dev_close(struct device *dev, int immediate)
592 {
593 	struct lvmcache_info *info;
594 
595 	if (dev->fd < 0) {
596 		log_error("Attempt to close device '%s' "
597 			  "which is not open.", dev_name(dev));
598 		return 0;
599 	}
600 
601 #ifndef O_DIRECT_SUPPORT
602 	if (dev->flags & DEV_ACCESSED_W)
603 		dev_flush(dev);
604 #endif
605 
606 	if (dev->open_count > 0)
607 		dev->open_count--;
608 
609 	if (immediate && dev->open_count)
610 		log_debug("%s: Immediate close attempt while still referenced",
611 			  dev_name(dev));
612 
613 	/* Close unless device is known to belong to a locked VG */
614 	if (immediate ||
615 	    (dev->open_count < 1 &&
616 	     (!(info = info_from_pvid(dev->pvid, 0)) ||
617 	      !info->vginfo ||
618 	      !vgname_is_locked(info->vginfo->vgname))))
619 		_close(dev);
620 
621 	return 1;
622 }
623 
624 int dev_close(struct device *dev)
625 {
626 	return _dev_close(dev, 0);
627 }
628 
629 int dev_close_immediate(struct device *dev)
630 {
631 	return _dev_close(dev, 1);
632 }
633 
634 void dev_close_all(void)
635 {
636 	struct dm_list *doh, *doht;
637 	struct device *dev;
638 
639 	dm_list_iterate_safe(doh, doht, &_open_devices) {
640 		dev = dm_list_struct_base(doh, struct device, open_list);
641 		if (dev->open_count < 1)
642 			_close(dev);
643 	}
644 }
645 
646 int dev_read(struct device *dev, uint64_t offset, size_t len, void *buffer)
647 {
648 	struct device_area where;
649 
650 	if (!dev->open_count)
651 		return_0;
652 
653 	where.dev = dev;
654 	where.start = offset;
655 	where.size = len;
656 
657 	return _aligned_io(&where, buffer, 0);
658 }
659 
660 /*
661  * Read from 'dev' into 'buf', possibly in 2 distinct regions, denoted
662  * by (offset,len) and (offset2,len2).  Thus, the total size of
663  * 'buf' should be len+len2.
664  */
665 int dev_read_circular(struct device *dev, uint64_t offset, size_t len,
666 		      uint64_t offset2, size_t len2, void *buf)
667 {
668 	if (!dev_read(dev, offset, len, buf)) {
669 		log_error("Read from %s failed", dev_name(dev));
670 		return 0;
671 	}
672 
673 	/*
674 	 * The second region is optional, and allows for
675 	 * a circular buffer on the device.
676 	 */
677 	if (!len2)
678 		return 1;
679 
680 	if (!dev_read(dev, offset2, len2, buf + len)) {
681 		log_error("Circular read from %s failed",
682 			  dev_name(dev));
683 		return 0;
684 	}
685 
686 	return 1;
687 }
688 
689 /* FIXME If O_DIRECT can't extend file, dev_extend first; dev_truncate after.
690  *       But fails if concurrent processes writing
691  */
692 
693 /* FIXME pre-extend the file */
694 int dev_append(struct device *dev, size_t len, void *buffer)
695 {
696 	int r;
697 
698 	if (!dev->open_count)
699 		return_0;
700 
701 	r = dev_write(dev, dev->end, len, buffer);
702 	dev->end += (uint64_t) len;
703 
704 #ifndef O_DIRECT_SUPPORT
705 	dev_flush(dev);
706 #endif
707 	return r;
708 }
709 
710 int dev_write(struct device *dev, uint64_t offset, size_t len, void *buffer)
711 {
712 	struct device_area where;
713 
714 	if (!dev->open_count)
715 		return_0;
716 
717 	where.dev = dev;
718 	where.start = offset;
719 	where.size = len;
720 
721 	dev->flags |= DEV_ACCESSED_W;
722 
723 	return _aligned_io(&where, buffer, 1);
724 }
725 
726 int dev_set(struct device *dev, uint64_t offset, size_t len, int value)
727 {
728 	size_t s;
729 	char buffer[4096] __attribute((aligned(8)));
730 
731 	if (!dev_open(dev))
732 		return_0;
733 
734 	if ((offset % SECTOR_SIZE) || (len % SECTOR_SIZE))
735 		log_debug("Wiping %s at %" PRIu64 " length %" PRIsize_t,
736 			  dev_name(dev), offset, len);
737 	else
738 		log_debug("Wiping %s at sector %" PRIu64 " length %" PRIsize_t
739 			  " sectors", dev_name(dev), offset >> SECTOR_SHIFT,
740 			  len >> SECTOR_SHIFT);
741 
742 	memset(buffer, value, sizeof(buffer));
743 	while (1) {
744 		s = len > sizeof(buffer) ? sizeof(buffer) : len;
745 		if (!dev_write(dev, offset, s, buffer))
746 			break;
747 
748 		len -= s;
749 		if (!len)
750 			break;
751 
752 		offset += s;
753 	}
754 
755 	dev->flags |= DEV_ACCESSED_W;
756 
757 	if (!dev_close(dev))
758 		stack;
759 
760 	return (len == 0);
761 }
762