Lines Matching full:zap
34 #include <sys/zap.h>
87 zap_getflags(zap_t *zap)
89 if (zap->zap_ismicro)
91 return (zap_f_phys(zap)->zap_flags);
95 zap_hashbits(zap_t *zap)
97 if (zap_getflags(zap) & ZAP_FLAG_HASH64)
104 zap_maxcd(zap_t *zap)
106 if (zap_getflags(zap) & ZAP_FLAG_HASH64)
115 zap_t *zap = zn->zn_zap;
118 if (zap_getflags(zap) & ZAP_FLAG_PRE_HASHED_KEY) {
119 ASSERT(zap_getflags(zap) & ZAP_FLAG_UINT64_KEY);
122 h = zap->zap_salt;
126 if (zap_getflags(zap) & ZAP_FLAG_UINT64_KEY) {
165 h &= ~((1ULL << (64 - zap_hashbits(zap))) - 1);
171 zap_normalize(zap_t *zap, const char *name, char *namenorm, int normflags,
174 ASSERT(!(zap_getflags(zap) & ZAP_FLAG_UINT64_KEY));
253 zap_name_alloc(zap_t *zap, boolean_t longname)
258 zn->zn_zap = zap;
277 zap_t *zap = zn->zn_zap;
288 zn->zn_normflags = zap->zap_normflags;
298 if (zap->zap_normflags) {
303 if (zap_normalize(zap, key, zn->zn_normbuf,
304 zap->zap_normflags, zn->zn_normbuf_len) != 0)
317 if (zap->zap_normflags != zn->zn_normflags) {
322 if (zap_normalize(zap, key, zn->zn_normbuf,
332 zap_name_alloc_str(zap_t *zap, const char *key, matchtype_t mt)
335 zap_name_t *zn = zap_name_alloc(zap, (key_len > ZAP_MAXNAMELEN));
344 zap_name_alloc_uint64(zap_t *zap, const uint64_t *key, int numints)
348 ASSERT(zap->zap_normflags == 0);
349 zn->zn_zap = zap;
403 mze_insert(zap_t *zap, uint16_t chunkid, uint64_t hash)
407 ASSERT(zap->zap_ismicro);
408 ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
413 ASSERT3U(MZE_PHYS(zap, &mze)->mze_cd, <=, 0xffff);
414 mze.mze_cd = (uint16_t)MZE_PHYS(zap, &mze)->mze_cd;
415 ASSERT(MZE_PHYS(zap, &mze)->mze_name[0] != 0);
416 zfs_btree_add(&zap->zap_m.zap_tree, &mze);
447 mze_find_unused_cd(zap_t *zap, uint64_t hash)
451 zfs_btree_t *tree = &zap->zap_m.zap_tree;
453 ASSERT(zap->zap_ismicro);
454 ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
487 zap_t *zap = zn->zn_zap;
490 zfs_btree_t *tree = &zap->zap_m.zap_tree;
511 mze_destroy(zap_t *zap)
513 zfs_btree_clear(&zap->zap_m.zap_tree);
514 zfs_btree_destroy(&zap->zap_m.zap_tree);
527 zap_t *zap = kmem_zalloc(sizeof (zap_t), KM_SLEEP);
528 rw_init(&zap->zap_rwlock, NULL, RW_DEFAULT, NULL);
529 rw_enter(&zap->zap_rwlock, RW_WRITER);
530 zap->zap_objset = dmu_buf_get_objset(db);
531 zap->zap_object = db->db_object;
532 zap->zap_dbuf = db;
535 mutex_init(&zap->zap_f.zap_num_entries_mtx, 0, MUTEX_DEFAULT,
537 zap->zap_f.zap_block_shift = highbit64(db->db_size) - 1;
543 zap->zap_ismicro = TRUE;
551 dmu_buf_init_user(&zap->zap_dbu, zap_evict_sync, NULL, &zap->zap_dbuf);
552 winner = dmu_buf_set_user(db, &zap->zap_dbu);
557 if (zap->zap_ismicro) {
558 zap->zap_salt = zap_m_phys(zap)->mz_salt;
559 zap->zap_normflags = zap_m_phys(zap)->mz_normflags;
560 zap->zap_m.zap_num_chunks = db->db_size / MZAP_ENT_LEN - 1;
567 zfs_btree_create_custom(&zap->zap_m.zap_tree, mze_compare,
570 zap_name_t *zn = zap_name_alloc(zap, B_FALSE);
571 for (uint16_t i = 0; i < zap->zap_m.zap_num_chunks; i++) {
573 &zap_m_phys(zap)->mz_chunk[i];
575 zap->zap_m.zap_num_entries++;
577 mze_insert(zap, i, zn->zn_hash);
582 zap->zap_salt = zap_f_phys(zap)->zap_salt;
583 zap->zap_normflags = zap_f_phys(zap)->zap_normflags;
592 ASSERT3P(&ZAP_EMBEDDED_PTRTBL_ENT(zap, 0), >,
593 &zap_f_phys(zap)->zap_salt);
599 ASSERT3U((uintptr_t)&ZAP_EMBEDDED_PTRTBL_ENT(zap,
600 1<<ZAP_EMBEDDED_PTRTBL_SHIFT(zap)) -
601 (uintptr_t)zap_f_phys(zap), ==,
602 zap->zap_dbuf->db_size);
604 rw_exit(&zap->zap_rwlock);
605 return (zap);
608 rw_exit(&zap->zap_rwlock);
609 rw_destroy(&zap->zap_rwlock);
610 if (!zap->zap_ismicro)
611 mutex_destroy(&zap->zap_f.zap_num_entries_mtx);
612 kmem_free(zap, sizeof (zap_t));
635 zap_t *zap = dmu_buf_get_user(db);
636 if (zap == NULL) {
637 zap = mzap_open(db);
638 if (zap == NULL) {
654 krw_t lt = (!zap->zap_ismicro && fatreader) ? RW_READER : lti;
655 rw_enter(&zap->zap_rwlock, lt);
656 if (lt != ((!zap->zap_ismicro && fatreader) ? RW_READER : lti)) {
660 ((!zap->zap_ismicro && fatreader) ? RW_READER : lti));
661 rw_downgrade(&zap->zap_rwlock);
665 zap->zap_objset = os;
666 zap->zap_dnode = dn;
671 ASSERT3P(zap->zap_dbuf, ==, db);
673 ASSERT(!zap->zap_ismicro ||
674 zap->zap_m.zap_num_entries <= zap->zap_m.zap_num_chunks);
675 if (zap->zap_ismicro && tx && adding &&
676 zap->zap_m.zap_num_entries == zap->zap_m.zap_num_chunks) {
680 (u_longlong_t)obj, zap->zap_m.zap_num_entries);
681 *zapp = zap;
684 rw_exit(&zap->zap_rwlock);
688 zap->zap_m.zap_num_chunks =
717 *zapp = zap;
766 zap_unlockdir(zap_t *zap, const void *tag)
768 rw_exit(&zap->zap_rwlock);
769 dnode_rele(zap->zap_dnode, tag);
770 dmu_buf_rele(zap->zap_dbuf, tag);
777 zap_t *zap = *zapp;
779 ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
781 int sz = zap->zap_dbuf->db_size;
783 memcpy(mzp, zap->zap_dbuf->db_data, sz);
784 int nchunks = zap->zap_m.zap_num_chunks;
787 err = dmu_object_set_blocksize(zap->zap_objset, zap->zap_object,
796 (u_longlong_t)zap->zap_object, nchunks);
798 mze_destroy(zap);
800 fzap_upgrade(zap, tx, flags);
802 zap_name_t *zn = zap_name_alloc(zap, B_FALSE);
813 zap = zn->zn_zap; /* fzap_add_cd() may change zap */
817 *zapp = zap;
853 zap_t *zap;
854 /* Only fat zap supports flags; upgrade immediately. */
857 B_FALSE, B_FALSE, &zap));
858 VERIFY0(mzap_upgrade(&zap, FTAG, tx, flags));
859 zap_unlockdir(zap, FTAG);
993 * Create a zap object and return a pointer to the newly allocated dnode via
1023 zap_t *zap = dbu;
1025 rw_destroy(&zap->zap_rwlock);
1027 if (zap->zap_ismicro)
1028 mze_destroy(zap);
1030 mutex_destroy(&zap->zap_f.zap_num_entries_mtx);
1032 kmem_free(zap, sizeof (zap_t));
1038 zap_t *zap;
1041 zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
1044 if (!zap->zap_ismicro) {
1045 err = fzap_count(zap, count);
1047 *count = zap->zap_m.zap_num_entries;
1049 zap_unlockdir(zap, FTAG);
1058 mzap_normalization_conflict(zap_t *zap, zap_name_t *zn, mzap_ent_t *mze,
1065 if (zap->zap_normflags == 0)
1068 for (other = zfs_btree_prev(&zap->zap_m.zap_tree, idx, &oidx);
1070 other = zfs_btree_prev(&zap->zap_m.zap_tree, &oidx, &oidx)) {
1073 zn = zap_name_alloc_str(zap,
1074 MZE_PHYS(zap, mze)->mze_name, MT_NORMALIZE);
1077 if (zap_match(zn, MZE_PHYS(zap, other)->mze_name)) {
1084 for (other = zfs_btree_next(&zap->zap_m.zap_tree, idx, &oidx);
1086 other = zfs_btree_next(&zap->zap_m.zap_tree, &oidx, &oidx)) {
1089 zn = zap_name_alloc_str(zap,
1090 MZE_PHYS(zap, mze)->mze_name, MT_NORMALIZE);
1093 if (zap_match(zn, MZE_PHYS(zap, other)->mze_name)) {
1118 zap_lookup_impl(zap_t *zap, const char *name,
1125 zap_name_t *zn = zap_name_alloc_str(zap, name, mt);
1129 if (!zap->zap_ismicro) {
1144 MZE_PHYS(zap, mze)->mze_value;
1147 MZE_PHYS(zap, mze)->mze_name,
1150 *ncp = mzap_normalization_conflict(zap,
1166 zap_t *zap;
1169 zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
1172 err = zap_lookup_impl(zap, name, integer_size,
1174 zap_unlockdir(zap, FTAG);
1181 zap_t *zap;
1185 err = zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
1188 zn = zap_name_alloc_str(zap, name, 0);
1190 zap_unlockdir(zap, FTAG);
1196 zap_unlockdir(zap, FTAG);
1229 zap_t *zap;
1232 FTAG, &zap);
1235 err = zap_lookup_impl(zap, name, integer_size,
1237 zap_unlockdir(zap, FTAG);
1242 zap_prefetch_uint64_impl(zap_t *zap, const uint64_t *key, int key_numints)
1244 zap_name_t *zn = zap_name_alloc_uint64(zap, key, key_numints);
1246 zap_unlockdir(zap, FTAG);
1252 zap_unlockdir(zap, FTAG);
1260 zap_t *zap;
1263 zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
1266 err = zap_prefetch_uint64_impl(zap, key, key_numints);
1274 zap_t *zap;
1277 zap_lockdir_by_dnode(dn, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
1280 err = zap_prefetch_uint64_impl(zap, key, key_numints);
1286 zap_lookup_uint64_impl(zap_t *zap, const uint64_t *key,
1289 zap_name_t *zn = zap_name_alloc_uint64(zap, key, key_numints);
1291 zap_unlockdir(zap, FTAG);
1298 zap_unlockdir(zap, FTAG);
1306 zap_t *zap;
1309 zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
1312 err = zap_lookup_uint64_impl(zap, key, key_numints, integer_size,
1322 zap_t *zap;
1325 zap_lockdir_by_dnode(dn, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
1328 err = zap_lookup_uint64_impl(zap, key, key_numints, integer_size,
1348 zap_t *zap;
1351 zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
1354 zap_name_t *zn = zap_name_alloc_str(zap, name, 0);
1356 zap_unlockdir(zap, FTAG);
1359 if (!zap->zap_ismicro) {
1374 zap_unlockdir(zap, FTAG);
1382 zap_t *zap;
1385 zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
1388 zap_name_t *zn = zap_name_alloc_uint64(zap, key, key_numints);
1390 zap_unlockdir(zap, FTAG);
1395 zap_unlockdir(zap, FTAG);
1402 zap_t *zap = zn->zn_zap;
1403 uint16_t start = zap->zap_m.zap_alloc_next;
1405 ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
1408 for (int i = 0; i < zap->zap_m.zap_num_chunks; i++) {
1409 mzap_ent_phys_t *mze = &zap_m_phys(zap)->mz_chunk[i];
1414 uint32_t cd = mze_find_unused_cd(zap, zn->zn_hash);
1416 ASSERT(cd < zap_maxcd(zap));
1419 for (uint16_t i = start; i < zap->zap_m.zap_num_chunks; i++) {
1420 mzap_ent_phys_t *mze = &zap_m_phys(zap)->mz_chunk[i];
1426 zap->zap_m.zap_num_entries++;
1427 zap->zap_m.zap_alloc_next = i+1;
1428 if (zap->zap_m.zap_alloc_next ==
1429 zap->zap_m.zap_num_chunks)
1430 zap->zap_m.zap_alloc_next = 0;
1431 mze_insert(zap, i, zn->zn_hash);
1443 zap_add_impl(zap_t *zap, const char *key,
1450 zap_name_t *zn = zap_name_alloc_str(zap, key, 0);
1452 zap_unlockdir(zap, tag);
1455 if (!zap->zap_ismicro) {
1457 zap = zn->zn_zap; /* fzap_add() may change zap */
1466 zap = zn->zn_zap; /* fzap_add() may change zap */
1475 ASSERT(zap == zn->zn_zap);
1477 if (zap != NULL) /* may be NULL if fzap_add() failed */
1478 zap_unlockdir(zap, tag);
1487 zap_t *zap;
1490 err = zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, TRUE, FTAG, &zap);
1493 err = zap_add_impl(zap, key, integer_size, num_integers, val, tx, FTAG);
1503 zap_t *zap;
1506 err = zap_lockdir_by_dnode(dn, tx, RW_WRITER, TRUE, TRUE, FTAG, &zap);
1509 err = zap_add_impl(zap, key, integer_size, num_integers, val, tx, FTAG);
1515 zap_add_uint64_impl(zap_t *zap, const uint64_t *key,
1521 zap_name_t *zn = zap_name_alloc_uint64(zap, key, key_numints);
1523 zap_unlockdir(zap, tag);
1527 zap = zn->zn_zap; /* fzap_add() may change zap */
1529 if (zap != NULL) /* may be NULL if fzap_add() failed */
1530 zap_unlockdir(zap, tag);
1539 zap_t *zap;
1542 zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, TRUE, FTAG, &zap);
1545 err = zap_add_uint64_impl(zap, key, key_numints,
1556 zap_t *zap;
1559 zap_lockdir_by_dnode(dn, tx, RW_WRITER, TRUE, TRUE, FTAG, &zap);
1562 err = zap_add_uint64_impl(zap, key, key_numints,
1572 zap_t *zap;
1576 zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, TRUE, FTAG, &zap);
1579 zap_name_t *zn = zap_name_alloc_str(zap, name, 0);
1581 zap_unlockdir(zap, FTAG);
1584 if (!zap->zap_ismicro) {
1587 zap = zn->zn_zap; /* fzap_update() may change zap */
1598 zap = zn->zn_zap; /* fzap_update() may change zap */
1603 MZE_PHYS(zap, mze)->mze_value = *intval;
1608 ASSERT(zap == zn->zn_zap);
1610 if (zap != NULL) /* may be NULL if fzap_upgrade() failed */
1611 zap_unlockdir(zap, FTAG);
1616 zap_update_uint64_impl(zap_t *zap, const uint64_t *key, int key_numints,
1622 zap_name_t *zn = zap_name_alloc_uint64(zap, key, key_numints);
1624 zap_unlockdir(zap, tag);
1628 zap = zn->zn_zap; /* fzap_update() may change zap */
1630 if (zap != NULL) /* may be NULL if fzap_upgrade() failed */
1631 zap_unlockdir(zap, tag);
1640 zap_t *zap;
1643 zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, TRUE, FTAG, &zap);
1646 err = zap_update_uint64_impl(zap, key, key_numints,
1656 zap_t *zap;
1659 zap_lockdir_by_dnode(dn, tx, RW_WRITER, TRUE, TRUE, FTAG, &zap);
1662 err = zap_update_uint64_impl(zap, key, key_numints,
1675 zap_remove_impl(zap_t *zap, const char *name,
1680 zap_name_t *zn = zap_name_alloc_str(zap, name, mt);
1683 if (!zap->zap_ismicro) {
1691 zap->zap_m.zap_num_entries--;
1692 memset(MZE_PHYS(zap, mze), 0, sizeof (mzap_ent_phys_t));
1693 zfs_btree_remove_idx(&zap->zap_m.zap_tree, &idx);
1704 zap_t *zap;
1707 err = zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, FALSE, FTAG, &zap);
1710 err = zap_remove_impl(zap, name, mt, tx);
1711 zap_unlockdir(zap, FTAG);
1718 zap_t *zap;
1721 err = zap_lockdir_by_dnode(dn, tx, RW_WRITER, TRUE, FALSE, FTAG, &zap);
1724 err = zap_remove_impl(zap, name, 0, tx);
1725 zap_unlockdir(zap, FTAG);
1730 zap_remove_uint64_impl(zap_t *zap, const uint64_t *key, int key_numints,
1735 zap_name_t *zn = zap_name_alloc_uint64(zap, key, key_numints);
1737 zap_unlockdir(zap, tag);
1742 zap_unlockdir(zap, tag);
1750 zap_t *zap;
1753 zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, FALSE, FTAG, &zap);
1756 err = zap_remove_uint64_impl(zap, key, key_numints, tx, FTAG);
1765 zap_t *zap;
1768 zap_lockdir_by_dnode(dn, tx, RW_WRITER, TRUE, FALSE, FTAG, &zap);
1771 err = zap_remove_uint64_impl(zap, key, key_numints, tx, FTAG);
1836 * Initialize a cursor at the beginning of the ZAP object. The entire
1837 * ZAP object will be prefetched.
1847 * the entire ZAP object.
1971 zap_t *zap;
1974 zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
1980 if (zap->zap_ismicro) {
1981 zs->zs_blocksize = zap->zap_dbuf->db_size;
1982 zs->zs_num_entries = zap->zap_m.zap_num_entries;
1985 fzap_get_stats(zap, zs);
1987 zap_unlockdir(zap, FTAG);
2045 "Maximum micro ZAP size before converting to a fat ZAP, "