Lines Matching full:new
81 * new proxy locks created for non overlapping ranges.
157 zfs_rangelock_enter_writer(zfs_rangelock_t *rl, zfs_locked_range_t *new, in zfs_rangelock_enter_writer() argument
163 uint64_t orig_off = new->lr_offset; in zfs_rangelock_enter_writer()
164 uint64_t orig_len = new->lr_length; in zfs_rangelock_enter_writer()
165 zfs_rangelock_type_t orig_type = new->lr_type; in zfs_rangelock_enter_writer()
169 * Call callback which can modify new->r_off,len,type. in zfs_rangelock_enter_writer()
174 rl->rl_cb(new, rl->rl_arg); in zfs_rangelock_enter_writer()
181 ASSERT3U(new->lr_type, ==, RL_WRITER); in zfs_rangelock_enter_writer()
187 avl_add(tree, new); in zfs_rangelock_enter_writer()
194 lr = avl_find(tree, new, &where); in zfs_rangelock_enter_writer()
200 lr->lr_offset < new->lr_offset + new->lr_length) in zfs_rangelock_enter_writer()
205 lr->lr_offset + lr->lr_length > new->lr_offset) in zfs_rangelock_enter_writer()
208 avl_insert(tree, new, where); in zfs_rangelock_enter_writer()
220 new->lr_offset = orig_off; in zfs_rangelock_enter_writer()
221 new->lr_length = orig_len; in zfs_rangelock_enter_writer()
222 new->lr_type = orig_type; in zfs_rangelock_enter_writer()
291 * Create and add a new proxy range lock for the supplied range.
311 zfs_rangelock_add_reader(avl_tree_t *tree, zfs_locked_range_t *new, in zfs_rangelock_add_reader() argument
315 uint64_t off = new->lr_offset; in zfs_rangelock_add_reader()
316 uint64_t len = new->lr_length; in zfs_rangelock_add_reader()
322 * range may overlap with the new range in zfs_rangelock_add_reader()
323 * - null, if there were no ranges starting before the new one in zfs_rangelock_add_reader()
345 /* no overlaps, use the original new rl_t in the tree */ in zfs_rangelock_add_reader()
346 avl_insert(tree, new, where); in zfs_rangelock_add_reader()
355 new->lr_count = 0; /* will use proxies in tree */ in zfs_rangelock_add_reader()
358 * of the new range. For each entry we make it a proxy if it in zfs_rangelock_add_reader()
360 * gaps between the ranges then we create a new proxy range. in zfs_rangelock_add_reader()
382 /* new range ends in the middle of this block */ in zfs_rangelock_add_reader()
402 zfs_rangelock_enter_reader(zfs_rangelock_t *rl, zfs_locked_range_t *new, in zfs_rangelock_enter_reader() argument
408 uint64_t off = new->lr_offset; in zfs_rangelock_enter_reader()
409 uint64_t len = new->lr_length; in zfs_rangelock_enter_reader()
415 prev = avl_find(tree, new, &where); in zfs_rangelock_enter_reader()
469 zfs_rangelock_add_reader(tree, new, prev, where); in zfs_rangelock_enter_reader()
485 zfs_locked_range_t *new; in zfs_rangelock_enter_impl() local
489 new = kmem_alloc(sizeof (zfs_locked_range_t), KM_SLEEP); in zfs_rangelock_enter_impl()
490 new->lr_rangelock = rl; in zfs_rangelock_enter_impl()
491 new->lr_offset = off; in zfs_rangelock_enter_impl()
494 new->lr_length = len; in zfs_rangelock_enter_impl()
495 new->lr_count = 1; /* assume it's going to be in the tree */ in zfs_rangelock_enter_impl()
496 new->lr_type = type; in zfs_rangelock_enter_impl()
497 new->lr_proxy = B_FALSE; in zfs_rangelock_enter_impl()
498 new->lr_write_wanted = B_FALSE; in zfs_rangelock_enter_impl()
499 new->lr_read_wanted = B_FALSE; in zfs_rangelock_enter_impl()
507 avl_add(&rl->rl_tree, new); in zfs_rangelock_enter_impl()
508 } else if (!zfs_rangelock_enter_reader(rl, new, nonblock)) { in zfs_rangelock_enter_impl()
509 kmem_free(new, sizeof (*new)); in zfs_rangelock_enter_impl()
510 new = NULL; in zfs_rangelock_enter_impl()
512 } else if (!zfs_rangelock_enter_writer(rl, new, nonblock)) { in zfs_rangelock_enter_impl()
513 kmem_free(new, sizeof (*new)); in zfs_rangelock_enter_impl()
514 new = NULL; in zfs_rangelock_enter_impl()
517 return (new); in zfs_rangelock_enter_impl()