1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2013, 2015 by Delphix. All rights reserved.
24 */
25
26 #include <sys/types.h>
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/sysmacros.h>
30 #include <sys/cmn_err.h>
31 #include <sys/kmem.h>
32 #include <sys/file.h>
33 #include <sys/fcntl.h>
34 #include <sys/vfs.h>
35 #include <sys/fs/zfs.h>
36 #include <sys/zfs_znode.h>
37 #include <sys/zfs_dir.h>
38 #include <sys/zfs_acl.h>
39 #include <sys/zfs_fuid.h>
40 #include <sys/spa.h>
41 #include <sys/zil.h>
42 #include <sys/byteorder.h>
43 #include <sys/stat.h>
44 #include <sys/acl.h>
45 #include <sys/atomic.h>
46 #include <sys/cred.h>
47 #include <sys/namei.h>
48
49 /*
50 * Functions to replay ZFS intent log (ZIL) records
51 * The functions are called through a function vector (zfs_replay_vector)
52 * which is indexed by the transaction type.
53 */
54
55 static void
zfs_init_vattr(vattr_t * vap,uint64_t mask,uint64_t mode,uint64_t uid,uint64_t gid,uint64_t rdev,uint64_t nodeid)56 zfs_init_vattr(vattr_t *vap, uint64_t mask, uint64_t mode,
57 uint64_t uid, uint64_t gid, uint64_t rdev, uint64_t nodeid)
58 {
59 VATTR_NULL(vap);
60 vap->va_mask = (uint_t)mask;
61 if (mask & AT_TYPE)
62 vap->va_type = IFTOVT(mode);
63 if (mask & AT_MODE)
64 vap->va_mode = mode & MODEMASK;
65 if (mask & AT_UID)
66 vap->va_uid = (uid_t)(IS_EPHEMERAL(uid)) ? -1 : uid;
67 if (mask & AT_GID)
68 vap->va_gid = (gid_t)(IS_EPHEMERAL(gid)) ? -1 : gid;
69 vap->va_rdev = zfs_cmpldev(rdev);
70 vap->va_nodeid = nodeid;
71 }
72
73 /* ARGSUSED */
74 static int
zfs_replay_error(zfsvfs_t * zfsvfs,lr_t * lr,boolean_t byteswap)75 zfs_replay_error(zfsvfs_t *zfsvfs, lr_t *lr, boolean_t byteswap)
76 {
77 return (SET_ERROR(ENOTSUP));
78 }
79
80 static void
zfs_replay_xvattr(lr_attr_t * lrattr,xvattr_t * xvap)81 zfs_replay_xvattr(lr_attr_t *lrattr, xvattr_t *xvap)
82 {
83 xoptattr_t *xoap = NULL;
84 uint64_t *attrs;
85 uint64_t *crtime;
86 uint32_t *bitmap;
87 void *scanstamp;
88 int i;
89
90 xvap->xva_vattr.va_mask |= AT_XVATTR;
91 if ((xoap = xva_getxoptattr(xvap)) == NULL) {
92 xvap->xva_vattr.va_mask &= ~AT_XVATTR; /* shouldn't happen */
93 return;
94 }
95
96 ASSERT(lrattr->lr_attr_masksize == xvap->xva_mapsize);
97
98 bitmap = &lrattr->lr_attr_bitmap;
99 for (i = 0; i != lrattr->lr_attr_masksize; i++, bitmap++)
100 xvap->xva_reqattrmap[i] = *bitmap;
101
102 attrs = (uint64_t *)(lrattr + lrattr->lr_attr_masksize - 1);
103 crtime = attrs + 1;
104 scanstamp = (caddr_t)(crtime + 2);
105
106 if (XVA_ISSET_REQ(xvap, XAT_HIDDEN))
107 xoap->xoa_hidden = ((*attrs & XAT0_HIDDEN) != 0);
108 if (XVA_ISSET_REQ(xvap, XAT_SYSTEM))
109 xoap->xoa_system = ((*attrs & XAT0_SYSTEM) != 0);
110 if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE))
111 xoap->xoa_archive = ((*attrs & XAT0_ARCHIVE) != 0);
112 if (XVA_ISSET_REQ(xvap, XAT_READONLY))
113 xoap->xoa_readonly = ((*attrs & XAT0_READONLY) != 0);
114 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE))
115 xoap->xoa_immutable = ((*attrs & XAT0_IMMUTABLE) != 0);
116 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK))
117 xoap->xoa_nounlink = ((*attrs & XAT0_NOUNLINK) != 0);
118 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY))
119 xoap->xoa_appendonly = ((*attrs & XAT0_APPENDONLY) != 0);
120 if (XVA_ISSET_REQ(xvap, XAT_NODUMP))
121 xoap->xoa_nodump = ((*attrs & XAT0_NODUMP) != 0);
122 if (XVA_ISSET_REQ(xvap, XAT_OPAQUE))
123 xoap->xoa_opaque = ((*attrs & XAT0_OPAQUE) != 0);
124 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED))
125 xoap->xoa_av_modified = ((*attrs & XAT0_AV_MODIFIED) != 0);
126 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED))
127 xoap->xoa_av_quarantined =
128 ((*attrs & XAT0_AV_QUARANTINED) != 0);
129 if (XVA_ISSET_REQ(xvap, XAT_CREATETIME))
130 ZFS_TIME_DECODE(&xoap->xoa_createtime, crtime);
131 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
132 bcopy(scanstamp, xoap->xoa_av_scanstamp, AV_SCANSTAMP_SZ);
133 if (XVA_ISSET_REQ(xvap, XAT_REPARSE))
134 xoap->xoa_reparse = ((*attrs & XAT0_REPARSE) != 0);
135 if (XVA_ISSET_REQ(xvap, XAT_OFFLINE))
136 xoap->xoa_offline = ((*attrs & XAT0_OFFLINE) != 0);
137 if (XVA_ISSET_REQ(xvap, XAT_SPARSE))
138 xoap->xoa_sparse = ((*attrs & XAT0_SPARSE) != 0);
139 }
140
141 static int
zfs_replay_domain_cnt(uint64_t uid,uint64_t gid)142 zfs_replay_domain_cnt(uint64_t uid, uint64_t gid)
143 {
144 uint64_t uid_idx;
145 uint64_t gid_idx;
146 int domcnt = 0;
147
148 uid_idx = FUID_INDEX(uid);
149 gid_idx = FUID_INDEX(gid);
150 if (uid_idx)
151 domcnt++;
152 if (gid_idx > 0 && gid_idx != uid_idx)
153 domcnt++;
154
155 return (domcnt);
156 }
157
158 static void *
zfs_replay_fuid_domain_common(zfs_fuid_info_t * fuid_infop,void * start,int domcnt)159 zfs_replay_fuid_domain_common(zfs_fuid_info_t *fuid_infop, void *start,
160 int domcnt)
161 {
162 int i;
163
164 for (i = 0; i != domcnt; i++) {
165 fuid_infop->z_domain_table[i] = start;
166 start = (caddr_t)start + strlen(start) + 1;
167 }
168
169 return (start);
170 }
171
172 /*
173 * Set the uid/gid in the fuid_info structure.
174 */
175 static void
zfs_replay_fuid_ugid(zfs_fuid_info_t * fuid_infop,uint64_t uid,uint64_t gid)176 zfs_replay_fuid_ugid(zfs_fuid_info_t *fuid_infop, uint64_t uid, uint64_t gid)
177 {
178 /*
179 * If owner or group are log specific FUIDs then slurp up
180 * domain information and build zfs_fuid_info_t
181 */
182 if (IS_EPHEMERAL(uid))
183 fuid_infop->z_fuid_owner = uid;
184
185 if (IS_EPHEMERAL(gid))
186 fuid_infop->z_fuid_group = gid;
187 }
188
189 /*
190 * Load fuid domains into fuid_info_t
191 */
192 static zfs_fuid_info_t *
zfs_replay_fuid_domain(void * buf,void ** end,uint64_t uid,uint64_t gid)193 zfs_replay_fuid_domain(void *buf, void **end, uint64_t uid, uint64_t gid)
194 {
195 int domcnt;
196
197 zfs_fuid_info_t *fuid_infop;
198
199 fuid_infop = zfs_fuid_info_alloc();
200
201 domcnt = zfs_replay_domain_cnt(uid, gid);
202
203 if (domcnt == 0)
204 return (fuid_infop);
205
206 fuid_infop->z_domain_table =
207 kmem_zalloc(domcnt * sizeof (char **), KM_SLEEP);
208
209 zfs_replay_fuid_ugid(fuid_infop, uid, gid);
210
211 fuid_infop->z_domain_cnt = domcnt;
212 *end = zfs_replay_fuid_domain_common(fuid_infop, buf, domcnt);
213 return (fuid_infop);
214 }
215
216 /*
217 * load zfs_fuid_t's and fuid_domains into fuid_info_t
218 */
219 static zfs_fuid_info_t *
zfs_replay_fuids(void * start,void ** end,int idcnt,int domcnt,uint64_t uid,uint64_t gid)220 zfs_replay_fuids(void *start, void **end, int idcnt, int domcnt, uint64_t uid,
221 uint64_t gid)
222 {
223 uint64_t *log_fuid = (uint64_t *)start;
224 zfs_fuid_info_t *fuid_infop;
225 int i;
226
227 fuid_infop = zfs_fuid_info_alloc();
228 fuid_infop->z_domain_cnt = domcnt;
229
230 fuid_infop->z_domain_table =
231 kmem_zalloc(domcnt * sizeof (char **), KM_SLEEP);
232
233 for (i = 0; i != idcnt; i++) {
234 zfs_fuid_t *zfuid;
235
236 zfuid = kmem_alloc(sizeof (zfs_fuid_t), KM_SLEEP);
237 zfuid->z_logfuid = *log_fuid;
238 zfuid->z_id = -1;
239 zfuid->z_domidx = 0;
240 list_insert_tail(&fuid_infop->z_fuids, zfuid);
241 log_fuid++;
242 }
243
244 zfs_replay_fuid_ugid(fuid_infop, uid, gid);
245
246 *end = zfs_replay_fuid_domain_common(fuid_infop, log_fuid, domcnt);
247 return (fuid_infop);
248 }
249
250 static void
zfs_replay_swap_attrs(lr_attr_t * lrattr)251 zfs_replay_swap_attrs(lr_attr_t *lrattr)
252 {
253 /* swap the lr_attr structure */
254 byteswap_uint32_array(lrattr, sizeof (*lrattr));
255 /* swap the bitmap */
256 byteswap_uint32_array(lrattr + 1, (lrattr->lr_attr_masksize - 1) *
257 sizeof (uint32_t));
258 /* swap the attributes, create time + 64 bit word for attributes */
259 byteswap_uint64_array((caddr_t)(lrattr + 1) + (sizeof (uint32_t) *
260 (lrattr->lr_attr_masksize - 1)), 3 * sizeof (uint64_t));
261 }
262
263 /*
264 * Replay file create with optional ACL, xvattr information as well
265 * as option FUID information.
266 */
267 static int
zfs_replay_create_acl(zfsvfs_t * zfsvfs,lr_acl_create_t * lracl,boolean_t byteswap)268 zfs_replay_create_acl(zfsvfs_t *zfsvfs,
269 lr_acl_create_t *lracl, boolean_t byteswap)
270 {
271 char *name = NULL; /* location determined later */
272 lr_create_t *lr = (lr_create_t *)lracl;
273 znode_t *dzp;
274 vnode_t *vp = NULL;
275 xvattr_t xva;
276 int vflg = 0;
277 vsecattr_t vsec = { 0 };
278 lr_attr_t *lrattr;
279 void *aclstart;
280 void *fuidstart;
281 size_t xvatlen = 0;
282 uint64_t txtype;
283 int error;
284
285 txtype = (lr->lr_common.lrc_txtype & ~TX_CI);
286 if (byteswap) {
287 byteswap_uint64_array(lracl, sizeof (*lracl));
288 if (txtype == TX_CREATE_ACL_ATTR ||
289 txtype == TX_MKDIR_ACL_ATTR) {
290 lrattr = (lr_attr_t *)(caddr_t)(lracl + 1);
291 zfs_replay_swap_attrs(lrattr);
292 xvatlen = ZIL_XVAT_SIZE(lrattr->lr_attr_masksize);
293 }
294
295 aclstart = (caddr_t)(lracl + 1) + xvatlen;
296 zfs_ace_byteswap(aclstart, lracl->lr_acl_bytes, B_FALSE);
297 /* swap fuids */
298 if (lracl->lr_fuidcnt) {
299 byteswap_uint64_array((caddr_t)aclstart +
300 ZIL_ACE_LENGTH(lracl->lr_acl_bytes),
301 lracl->lr_fuidcnt * sizeof (uint64_t));
302 }
303 }
304
305 if ((error = zfs_zget(zfsvfs, lr->lr_doid, &dzp)) != 0)
306 return (error);
307
308 xva_init(&xva);
309 zfs_init_vattr(&xva.xva_vattr, AT_TYPE | AT_MODE | AT_UID | AT_GID,
310 lr->lr_mode, lr->lr_uid, lr->lr_gid, lr->lr_rdev, lr->lr_foid);
311
312 /*
313 * All forms of zfs create (create, mkdir, mkxattrdir, symlink)
314 * eventually end up in zfs_mknode(), which assigns the object's
315 * creation time and generation number. The generic VOP_CREATE()
316 * doesn't have either concept, so we smuggle the values inside
317 * the vattr's otherwise unused va_ctime and va_nblocks fields.
318 */
319 ZFS_TIME_DECODE(&xva.xva_vattr.va_ctime, lr->lr_crtime);
320 xva.xva_vattr.va_nblocks = lr->lr_gen;
321
322 error = dmu_object_info(zfsvfs->z_os, lr->lr_foid, NULL);
323 if (error != ENOENT)
324 goto bail;
325
326 if (lr->lr_common.lrc_txtype & TX_CI)
327 vflg |= FIGNORECASE;
328 switch (txtype) {
329 case TX_CREATE_ACL:
330 aclstart = (caddr_t)(lracl + 1);
331 fuidstart = (caddr_t)aclstart +
332 ZIL_ACE_LENGTH(lracl->lr_acl_bytes);
333 zfsvfs->z_fuid_replay = zfs_replay_fuids(fuidstart,
334 (void *)&name, lracl->lr_fuidcnt, lracl->lr_domcnt,
335 lr->lr_uid, lr->lr_gid);
336 /*FALLTHROUGH*/
337 case TX_CREATE_ACL_ATTR:
338 if (name == NULL) {
339 lrattr = (lr_attr_t *)(caddr_t)(lracl + 1);
340 xvatlen = ZIL_XVAT_SIZE(lrattr->lr_attr_masksize);
341 xva.xva_vattr.va_mask |= AT_XVATTR;
342 zfs_replay_xvattr(lrattr, &xva);
343 }
344 vsec.vsa_mask = VSA_ACE | VSA_ACE_ACLFLAGS;
345 vsec.vsa_aclentp = (caddr_t)(lracl + 1) + xvatlen;
346 vsec.vsa_aclcnt = lracl->lr_aclcnt;
347 vsec.vsa_aclentsz = lracl->lr_acl_bytes;
348 vsec.vsa_aclflags = lracl->lr_acl_flags;
349 if (zfsvfs->z_fuid_replay == NULL) {
350 fuidstart = (caddr_t)(lracl + 1) + xvatlen +
351 ZIL_ACE_LENGTH(lracl->lr_acl_bytes);
352 zfsvfs->z_fuid_replay =
353 zfs_replay_fuids(fuidstart,
354 (void *)&name, lracl->lr_fuidcnt, lracl->lr_domcnt,
355 lr->lr_uid, lr->lr_gid);
356 }
357
358 #ifdef TODO
359 error = VOP_CREATE(ZTOV(dzp), name, &xva.xva_vattr,
360 0, 0, &vp, kcred, vflg, NULL, &vsec);
361 #else
362 panic("%s:%u: unsupported condition", __func__, __LINE__);
363 #endif
364 break;
365 case TX_MKDIR_ACL:
366 aclstart = (caddr_t)(lracl + 1);
367 fuidstart = (caddr_t)aclstart +
368 ZIL_ACE_LENGTH(lracl->lr_acl_bytes);
369 zfsvfs->z_fuid_replay = zfs_replay_fuids(fuidstart,
370 (void *)&name, lracl->lr_fuidcnt, lracl->lr_domcnt,
371 lr->lr_uid, lr->lr_gid);
372 /*FALLTHROUGH*/
373 case TX_MKDIR_ACL_ATTR:
374 if (name == NULL) {
375 lrattr = (lr_attr_t *)(caddr_t)(lracl + 1);
376 xvatlen = ZIL_XVAT_SIZE(lrattr->lr_attr_masksize);
377 zfs_replay_xvattr(lrattr, &xva);
378 }
379 vsec.vsa_mask = VSA_ACE | VSA_ACE_ACLFLAGS;
380 vsec.vsa_aclentp = (caddr_t)(lracl + 1) + xvatlen;
381 vsec.vsa_aclcnt = lracl->lr_aclcnt;
382 vsec.vsa_aclentsz = lracl->lr_acl_bytes;
383 vsec.vsa_aclflags = lracl->lr_acl_flags;
384 if (zfsvfs->z_fuid_replay == NULL) {
385 fuidstart = (caddr_t)(lracl + 1) + xvatlen +
386 ZIL_ACE_LENGTH(lracl->lr_acl_bytes);
387 zfsvfs->z_fuid_replay =
388 zfs_replay_fuids(fuidstart,
389 (void *)&name, lracl->lr_fuidcnt, lracl->lr_domcnt,
390 lr->lr_uid, lr->lr_gid);
391 }
392 #ifdef TODO
393 error = VOP_MKDIR(ZTOV(dzp), name, &xva.xva_vattr,
394 &vp, kcred, NULL, vflg, &vsec);
395 #else
396 panic("%s:%u: unsupported condition", __func__, __LINE__);
397 #endif
398 break;
399 default:
400 error = SET_ERROR(ENOTSUP);
401 }
402
403 bail:
404 if (error == 0 && vp != NULL)
405 VN_RELE(vp);
406
407 VN_RELE(ZTOV(dzp));
408
409 if (zfsvfs->z_fuid_replay)
410 zfs_fuid_info_free(zfsvfs->z_fuid_replay);
411 zfsvfs->z_fuid_replay = NULL;
412
413 return (error);
414 }
415
416 static int
zfs_replay_create(zfsvfs_t * zfsvfs,lr_create_t * lr,boolean_t byteswap)417 zfs_replay_create(zfsvfs_t *zfsvfs, lr_create_t *lr, boolean_t byteswap)
418 {
419 char *name = NULL; /* location determined later */
420 char *link; /* symlink content follows name */
421 znode_t *dzp;
422 vnode_t *vp = NULL;
423 xvattr_t xva;
424 int vflg = 0;
425 size_t lrsize = sizeof (lr_create_t);
426 lr_attr_t *lrattr;
427 void *start;
428 size_t xvatlen;
429 uint64_t txtype;
430 struct componentname cn;
431 int error;
432
433 txtype = (lr->lr_common.lrc_txtype & ~TX_CI);
434 if (byteswap) {
435 byteswap_uint64_array(lr, sizeof (*lr));
436 if (txtype == TX_CREATE_ATTR || txtype == TX_MKDIR_ATTR)
437 zfs_replay_swap_attrs((lr_attr_t *)(lr + 1));
438 }
439
440
441 if ((error = zfs_zget(zfsvfs, lr->lr_doid, &dzp)) != 0)
442 return (error);
443
444 xva_init(&xva);
445 zfs_init_vattr(&xva.xva_vattr, AT_TYPE | AT_MODE | AT_UID | AT_GID,
446 lr->lr_mode, lr->lr_uid, lr->lr_gid, lr->lr_rdev, lr->lr_foid);
447
448 /*
449 * All forms of zfs create (create, mkdir, mkxattrdir, symlink)
450 * eventually end up in zfs_mknode(), which assigns the object's
451 * creation time and generation number. The generic VOP_CREATE()
452 * doesn't have either concept, so we smuggle the values inside
453 * the vattr's otherwise unused va_ctime and va_nblocks fields.
454 */
455 ZFS_TIME_DECODE(&xva.xva_vattr.va_ctime, lr->lr_crtime);
456 xva.xva_vattr.va_nblocks = lr->lr_gen;
457
458 error = dmu_object_info(zfsvfs->z_os, lr->lr_foid, NULL);
459 if (error != ENOENT)
460 goto out;
461
462 if (lr->lr_common.lrc_txtype & TX_CI)
463 vflg |= FIGNORECASE;
464
465 /*
466 * Symlinks don't have fuid info, and CIFS never creates
467 * symlinks.
468 *
469 * The _ATTR versions will grab the fuid info in their subcases.
470 */
471 if ((int)lr->lr_common.lrc_txtype != TX_SYMLINK &&
472 (int)lr->lr_common.lrc_txtype != TX_MKDIR_ATTR &&
473 (int)lr->lr_common.lrc_txtype != TX_CREATE_ATTR) {
474 start = (lr + 1);
475 zfsvfs->z_fuid_replay =
476 zfs_replay_fuid_domain(start, &start,
477 lr->lr_uid, lr->lr_gid);
478 }
479
480 cn.cn_cred = kcred;
481 #ifndef __NetBSD__
482 cn.cn_thread = curthread;
483 cn.cn_flags = SAVENAME;
484 #endif
485
486 vn_lock(ZTOV(dzp), LK_EXCLUSIVE | LK_RETRY);
487 switch (txtype) {
488 case TX_CREATE_ATTR:
489 lrattr = (lr_attr_t *)(caddr_t)(lr + 1);
490 xvatlen = ZIL_XVAT_SIZE(lrattr->lr_attr_masksize);
491 zfs_replay_xvattr((lr_attr_t *)((caddr_t)lr + lrsize), &xva);
492 start = (caddr_t)(lr + 1) + xvatlen;
493 zfsvfs->z_fuid_replay =
494 zfs_replay_fuid_domain(start, &start,
495 lr->lr_uid, lr->lr_gid);
496 name = (char *)start;
497
498 /*FALLTHROUGH*/
499 case TX_CREATE:
500 if (name == NULL)
501 name = (char *)start;
502
503 cn.cn_nameptr = name;
504 #ifdef __NetBSD__
505 cn.cn_namelen = strlen(name);
506 #endif
507 error = VOP_CREATE(ZTOV(dzp), &vp, &cn, &xva.xva_vattr /*,vflg*/);
508 break;
509 case TX_MKDIR_ATTR:
510 lrattr = (lr_attr_t *)(caddr_t)(lr + 1);
511 xvatlen = ZIL_XVAT_SIZE(lrattr->lr_attr_masksize);
512 zfs_replay_xvattr((lr_attr_t *)((caddr_t)lr + lrsize), &xva);
513 start = (caddr_t)(lr + 1) + xvatlen;
514 zfsvfs->z_fuid_replay =
515 zfs_replay_fuid_domain(start, &start,
516 lr->lr_uid, lr->lr_gid);
517 name = (char *)start;
518
519 /*FALLTHROUGH*/
520 case TX_MKDIR:
521 if (name == NULL)
522 name = (char *)(lr + 1);
523
524 cn.cn_nameptr = name;
525 #ifdef __NetBSD__
526 cn.cn_namelen = strlen(name);
527 #endif
528 error = VOP_MKDIR(ZTOV(dzp), &vp, &cn, &xva.xva_vattr /*,vflg*/);
529 break;
530 case TX_MKXATTR:
531 error = zfs_make_xattrdir(dzp, &xva.xva_vattr, &vp, kcred);
532 break;
533 case TX_SYMLINK:
534 name = (char *)(lr + 1);
535 link = name + strlen(name) + 1;
536 cn.cn_nameptr = name;
537 #ifdef __NetBSD__
538 cn.cn_namelen = strlen(name);
539 #endif
540 error = VOP_SYMLINK(ZTOV(dzp), &vp, &cn, &xva.xva_vattr, link /*,vflg*/);
541 break;
542 default:
543 error = SET_ERROR(ENOTSUP);
544 }
545 VOP_UNLOCK(ZTOV(dzp), 0);
546
547 out:
548 if (error == 0 && vp != NULL)
549 #ifdef __NetBSD__
550 VN_RELE(vp);
551 #else
552 VN_URELE(vp);
553 #endif
554
555 VN_RELE(ZTOV(dzp));
556
557 if (zfsvfs->z_fuid_replay)
558 zfs_fuid_info_free(zfsvfs->z_fuid_replay);
559 zfsvfs->z_fuid_replay = NULL;
560 return (error);
561 }
562
563 static int
zfs_replay_remove(zfsvfs_t * zfsvfs,lr_remove_t * lr,boolean_t byteswap)564 zfs_replay_remove(zfsvfs_t *zfsvfs, lr_remove_t *lr, boolean_t byteswap)
565 {
566 char *name = (char *)(lr + 1); /* name follows lr_remove_t */
567 znode_t *dzp;
568 struct componentname cn;
569 vnode_t *vp;
570 int error;
571 int vflg = 0;
572
573 if (byteswap)
574 byteswap_uint64_array(lr, sizeof (*lr));
575
576 if ((error = zfs_zget(zfsvfs, lr->lr_doid, &dzp)) != 0)
577 return (error);
578
579 if (lr->lr_common.lrc_txtype & TX_CI)
580 vflg |= FIGNORECASE;
581 cn.cn_nameptr = name;
582 cn.cn_namelen = strlen(name);
583 cn.cn_nameiop = DELETE;
584 cn.cn_flags = ISLASTCN | SAVENAME;
585 cn.cn_cred = kcred;
586 #ifndef __NetBSD__
587 cn.cn_lkflags = LK_EXCLUSIVE | LK_RETRY;
588 cn.cn_thread = curthread;
589 #endif
590 vn_lock(ZTOV(dzp), LK_EXCLUSIVE | LK_RETRY);
591 error = VOP_LOOKUP(ZTOV(dzp), &vp, &cn);
592 if (error != 0) {
593 VOP_UNLOCK(ZTOV(dzp), 0);
594 goto fail;
595 }
596 #ifdef __NetBSD__
597 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
598 #endif
599
600 switch ((int)lr->lr_common.lrc_txtype) {
601 case TX_REMOVE:
602 error = VOP_REMOVE(ZTOV(dzp), vp, &cn /*,vflg*/);
603 break;
604 case TX_RMDIR:
605 error = VOP_RMDIR(ZTOV(dzp), vp, &cn /*,vflg*/);
606 break;
607 default:
608 #ifdef __NetBSD__
609 vput(vp);
610 #endif
611 error = SET_ERROR(ENOTSUP);
612 }
613 #ifndef __NetBSD__
614 vput(vp);
615 #endif
616 VOP_UNLOCK(ZTOV(dzp), 0);
617
618 fail:
619 VN_RELE(ZTOV(dzp));
620
621 return (error);
622 }
623
624 static int
zfs_replay_link(zfsvfs_t * zfsvfs,lr_link_t * lr,boolean_t byteswap)625 zfs_replay_link(zfsvfs_t *zfsvfs, lr_link_t *lr, boolean_t byteswap)
626 {
627 char *name = (char *)(lr + 1); /* name follows lr_link_t */
628 znode_t *dzp, *zp;
629 struct componentname cn;
630 int error;
631 int vflg = 0;
632
633 if (byteswap)
634 byteswap_uint64_array(lr, sizeof (*lr));
635
636 if ((error = zfs_zget(zfsvfs, lr->lr_doid, &dzp)) != 0)
637 return (error);
638
639 if ((error = zfs_zget(zfsvfs, lr->lr_link_obj, &zp)) != 0) {
640 VN_RELE(ZTOV(dzp));
641 return (error);
642 }
643
644 if (lr->lr_common.lrc_txtype & TX_CI)
645 vflg |= FIGNORECASE;
646
647 cn.cn_nameptr = name;
648 #ifdef __NetBSD__
649 cn.cn_namelen = strlen(name);
650 #endif
651 cn.cn_cred = kcred;
652 #ifndef __NetBSD__
653 cn.cn_thread = curthread;
654 cn.cn_flags = SAVENAME;
655 #endif
656
657 vn_lock(ZTOV(dzp), LK_EXCLUSIVE | LK_RETRY);
658 #ifndef __NetBSD__
659 vn_lock(ZTOV(zp), LK_EXCLUSIVE | LK_RETRY);
660 #endif
661 error = VOP_LINK(ZTOV(dzp), ZTOV(zp), &cn /*,vflg*/);
662 #ifndef __NetBSD__
663 VOP_UNLOCK(ZTOV(zp), 0);
664 #endif
665 VOP_UNLOCK(ZTOV(dzp), 0);
666
667 VN_RELE(ZTOV(zp));
668 VN_RELE(ZTOV(dzp));
669
670 return (error);
671 }
672
673 static int
zfs_replay_rename(zfsvfs_t * zfsvfs,lr_rename_t * lr,boolean_t byteswap)674 zfs_replay_rename(zfsvfs_t *zfsvfs, lr_rename_t *lr, boolean_t byteswap)
675 {
676 char *sname = (char *)(lr + 1); /* sname and tname follow lr_rename_t */
677 char *tname = sname + strlen(sname) + 1;
678 znode_t *sdzp, *tdzp;
679 struct componentname scn, tcn;
680 vnode_t *svp, *tvp;
681 kthread_t *td = curthread;
682 int error;
683 int vflg = 0;
684
685 if (byteswap)
686 byteswap_uint64_array(lr, sizeof (*lr));
687
688 if ((error = zfs_zget(zfsvfs, lr->lr_sdoid, &sdzp)) != 0)
689 return (error);
690
691 if ((error = zfs_zget(zfsvfs, lr->lr_tdoid, &tdzp)) != 0) {
692 VN_RELE(ZTOV(sdzp));
693 return (error);
694 }
695
696 if (lr->lr_common.lrc_txtype & TX_CI)
697 vflg |= FIGNORECASE;
698 svp = tvp = NULL;
699
700 scn.cn_nameptr = sname;
701 scn.cn_namelen = strlen(sname);
702 scn.cn_nameiop = DELETE;
703 scn.cn_flags = ISLASTCN | SAVENAME;
704 scn.cn_cred = kcred;
705 #ifndef __NetBSD__
706 scn.cn_lkflags = LK_EXCLUSIVE | LK_RETRY;
707 scn.cn_thread = td;
708 #endif
709 vn_lock(ZTOV(sdzp), LK_EXCLUSIVE | LK_RETRY);
710 error = VOP_LOOKUP(ZTOV(sdzp), &svp, &scn);
711 VOP_UNLOCK(ZTOV(sdzp), 0);
712 if (error != 0)
713 goto fail;
714 #ifndef __NetBSD__
715 VOP_UNLOCK(svp, 0);
716 #endif
717
718 tcn.cn_nameptr = tname;
719 tcn.cn_namelen = strlen(tname);
720 tcn.cn_nameiop = RENAME;
721 tcn.cn_flags = ISLASTCN | SAVENAME;
722 tcn.cn_cred = kcred;
723 #ifndef __NetBSD__
724 tcn.cn_lkflags = LK_EXCLUSIVE | LK_RETRY;
725 tcn.cn_thread = td;
726 #endif
727 vn_lock(ZTOV(tdzp), LK_EXCLUSIVE | LK_RETRY);
728 error = VOP_LOOKUP(ZTOV(tdzp), &tvp, &tcn);
729 if (error == EJUSTRETURN)
730 tvp = NULL;
731 else if (error != 0) {
732 VOP_UNLOCK(ZTOV(tdzp), 0);
733 goto fail;
734 }
735 #ifdef __NetBSD__
736 if (tvp != NULL)
737 vn_lock(tvp, LK_EXCLUSIVE | LK_RETRY);
738 #endif
739
740 error = VOP_RENAME(ZTOV(sdzp), svp, &scn, ZTOV(tdzp), tvp, &tcn /*,vflg*/);
741 return (error);
742 fail:
743 if (svp != NULL)
744 vrele(svp);
745 if (tvp != NULL)
746 vrele(tvp);
747 VN_RELE(ZTOV(tdzp));
748 VN_RELE(ZTOV(sdzp));
749
750 return (error);
751 }
752
753 static int
zfs_replay_write(zfsvfs_t * zfsvfs,lr_write_t * lr,boolean_t byteswap)754 zfs_replay_write(zfsvfs_t *zfsvfs, lr_write_t *lr, boolean_t byteswap)
755 {
756 char *data = (char *)(lr + 1); /* data follows lr_write_t */
757 znode_t *zp;
758 int error;
759 ssize_t resid;
760 uint64_t eod, offset, length;
761
762 if (byteswap)
763 byteswap_uint64_array(lr, sizeof (*lr));
764
765 if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0) {
766 /*
767 * As we can log writes out of order, it's possible the
768 * file has been removed. In this case just drop the write
769 * and return success.
770 */
771 if (error == ENOENT)
772 error = 0;
773 return (error);
774 }
775
776 offset = lr->lr_offset;
777 length = lr->lr_length;
778 eod = offset + length; /* end of data for this write */
779
780 /*
781 * This may be a write from a dmu_sync() for a whole block,
782 * and may extend beyond the current end of the file.
783 * We can't just replay what was written for this TX_WRITE as
784 * a future TX_WRITE2 may extend the eof and the data for that
785 * write needs to be there. So we write the whole block and
786 * reduce the eof. This needs to be done within the single dmu
787 * transaction created within vn_rdwr -> zfs_write. So a possible
788 * new end of file is passed through in zfsvfs->z_replay_eof
789 */
790
791 zfsvfs->z_replay_eof = 0; /* 0 means don't change end of file */
792
793 /* If it's a dmu_sync() block, write the whole block */
794 if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
795 uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
796 if (length < blocksize) {
797 offset -= offset % blocksize;
798 length = blocksize;
799 }
800 if (zp->z_size < eod)
801 zfsvfs->z_replay_eof = eod;
802 }
803
804 error = vn_rdwr(UIO_WRITE, ZTOV(zp), data, length, offset,
805 UIO_SYSSPACE, 0, RLIM64_INFINITY, kcred, &resid);
806
807 VN_RELE(ZTOV(zp));
808 zfsvfs->z_replay_eof = 0; /* safety */
809
810 return (error);
811 }
812
813 /*
814 * TX_WRITE2 are only generated when dmu_sync() returns EALREADY
815 * meaning the pool block is already being synced. So now that we always write
816 * out full blocks, all we have to do is expand the eof if
817 * the file is grown.
818 */
819 static int
zfs_replay_write2(zfsvfs_t * zfsvfs,lr_write_t * lr,boolean_t byteswap)820 zfs_replay_write2(zfsvfs_t *zfsvfs, lr_write_t *lr, boolean_t byteswap)
821 {
822 znode_t *zp;
823 int error;
824 uint64_t end;
825
826 if (byteswap)
827 byteswap_uint64_array(lr, sizeof (*lr));
828
829 if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0)
830 return (error);
831
832 top:
833 end = lr->lr_offset + lr->lr_length;
834 if (end > zp->z_size) {
835 dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
836
837 zp->z_size = end;
838 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
839 error = dmu_tx_assign(tx, TXG_WAIT);
840 if (error) {
841 VN_RELE(ZTOV(zp));
842 if (error == ERESTART) {
843 dmu_tx_wait(tx);
844 dmu_tx_abort(tx);
845 goto top;
846 }
847 dmu_tx_abort(tx);
848 return (error);
849 }
850 (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
851 (void *)&zp->z_size, sizeof (uint64_t), tx);
852
853 /* Ensure the replayed seq is updated */
854 (void) zil_replaying(zfsvfs->z_log, tx);
855
856 dmu_tx_commit(tx);
857 }
858
859 VN_RELE(ZTOV(zp));
860
861 return (error);
862 }
863
864 static int
zfs_replay_truncate(zfsvfs_t * zfsvfs,lr_truncate_t * lr,boolean_t byteswap)865 zfs_replay_truncate(zfsvfs_t *zfsvfs, lr_truncate_t *lr, boolean_t byteswap)
866 {
867 #ifdef illumos
868 znode_t *zp;
869 flock64_t fl;
870 int error;
871
872 if (byteswap)
873 byteswap_uint64_array(lr, sizeof (*lr));
874
875 if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0)
876 return (error);
877
878 bzero(&fl, sizeof (fl));
879 fl.l_type = F_WRLCK;
880 fl.l_whence = 0;
881 fl.l_start = lr->lr_offset;
882 fl.l_len = lr->lr_length;
883
884 error = VOP_SPACE(ZTOV(zp), F_FREESP, &fl, FWRITE | FOFFMAX,
885 lr->lr_offset, kcred, NULL);
886
887 VN_RELE(ZTOV(zp));
888
889 return (error);
890 #else
891 ZFS_LOG(0, "Unexpected code path, report to pjd@FreeBSD.org");
892 return (EOPNOTSUPP);
893 #endif
894 }
895
896 static int
zfs_replay_setattr(zfsvfs_t * zfsvfs,lr_setattr_t * lr,boolean_t byteswap)897 zfs_replay_setattr(zfsvfs_t *zfsvfs, lr_setattr_t *lr, boolean_t byteswap)
898 {
899 znode_t *zp;
900 xvattr_t xva;
901 vattr_t *vap = &xva.xva_vattr;
902 vnode_t *vp;
903 int error;
904 void *start;
905
906 xva_init(&xva);
907 if (byteswap) {
908 byteswap_uint64_array(lr, sizeof (*lr));
909
910 if ((lr->lr_mask & AT_XVATTR) &&
911 zfsvfs->z_version >= ZPL_VERSION_INITIAL)
912 zfs_replay_swap_attrs((lr_attr_t *)(lr + 1));
913 }
914
915 if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0)
916 return (error);
917
918 zfs_init_vattr(vap, lr->lr_mask, lr->lr_mode,
919 lr->lr_uid, lr->lr_gid, 0, lr->lr_foid);
920
921 vap->va_size = lr->lr_size;
922 ZFS_TIME_DECODE(&vap->va_atime, lr->lr_atime);
923 ZFS_TIME_DECODE(&vap->va_mtime, lr->lr_mtime);
924
925 /*
926 * Fill in xvattr_t portions if necessary.
927 */
928
929 start = (lr_setattr_t *)(lr + 1);
930 if (vap->va_mask & AT_XVATTR) {
931 zfs_replay_xvattr((lr_attr_t *)start, &xva);
932 start = (caddr_t)start +
933 ZIL_XVAT_SIZE(((lr_attr_t *)start)->lr_attr_masksize);
934 } else
935 xva.xva_vattr.va_mask &= ~AT_XVATTR;
936
937 zfsvfs->z_fuid_replay = zfs_replay_fuid_domain(start, &start,
938 lr->lr_uid, lr->lr_gid);
939
940 vp = ZTOV(zp);
941 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
942 error = VOP_SETATTR(vp, vap, kcred);
943 VOP_UNLOCK(vp, 0);
944
945 zfs_fuid_info_free(zfsvfs->z_fuid_replay);
946 zfsvfs->z_fuid_replay = NULL;
947 VN_RELE(vp);
948
949 return (error);
950 }
951
952 extern int zfs_setsecattr(vnode_t *vp, vsecattr_t *vsecp, int flag, cred_t *cr,
953 caller_context_t *ct);
954
955 static int
zfs_replay_acl_v0(zfsvfs_t * zfsvfs,lr_acl_v0_t * lr,boolean_t byteswap)956 zfs_replay_acl_v0(zfsvfs_t *zfsvfs, lr_acl_v0_t *lr, boolean_t byteswap)
957 {
958 ace_t *ace = (ace_t *)(lr + 1); /* ace array follows lr_acl_t */
959 vsecattr_t vsa;
960 vnode_t *vp;
961 znode_t *zp;
962 int error;
963
964 if (byteswap) {
965 byteswap_uint64_array(lr, sizeof (*lr));
966 zfs_oldace_byteswap(ace, lr->lr_aclcnt);
967 }
968
969 if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0)
970 return (error);
971
972 bzero(&vsa, sizeof (vsa));
973 vsa.vsa_mask = VSA_ACE | VSA_ACECNT;
974 vsa.vsa_aclcnt = lr->lr_aclcnt;
975 vsa.vsa_aclentsz = sizeof (ace_t) * vsa.vsa_aclcnt;
976 vsa.vsa_aclflags = 0;
977 vsa.vsa_aclentp = ace;
978
979 vp = ZTOV(zp);
980 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
981 error = zfs_setsecattr(vp, &vsa, 0, kcred, NULL);
982 VOP_UNLOCK(vp, 0);
983
984 VN_RELE(vp);
985
986 return (error);
987 }
988
989 /*
990 * Replaying ACLs is complicated by FUID support.
991 * The log record may contain some optional data
992 * to be used for replaying FUID's. These pieces
993 * are the actual FUIDs that were created initially.
994 * The FUID table index may no longer be valid and
995 * during zfs_create() a new index may be assigned.
996 * Because of this the log will contain the original
997 * doman+rid in order to create a new FUID.
998 *
999 * The individual ACEs may contain an ephemeral uid/gid which is no
1000 * longer valid and will need to be replaced with an actual FUID.
1001 *
1002 */
1003 static int
zfs_replay_acl(zfsvfs_t * zfsvfs,lr_acl_t * lr,boolean_t byteswap)1004 zfs_replay_acl(zfsvfs_t *zfsvfs, lr_acl_t *lr, boolean_t byteswap)
1005 {
1006 ace_t *ace = (ace_t *)(lr + 1);
1007 vsecattr_t vsa;
1008 znode_t *zp;
1009 vnode_t *vp;
1010 int error;
1011
1012 if (byteswap) {
1013 byteswap_uint64_array(lr, sizeof (*lr));
1014 zfs_ace_byteswap(ace, lr->lr_acl_bytes, B_FALSE);
1015 if (lr->lr_fuidcnt) {
1016 byteswap_uint64_array((caddr_t)ace +
1017 ZIL_ACE_LENGTH(lr->lr_acl_bytes),
1018 lr->lr_fuidcnt * sizeof (uint64_t));
1019 }
1020 }
1021
1022 if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0)
1023 return (error);
1024
1025 bzero(&vsa, sizeof (vsa));
1026 vsa.vsa_mask = VSA_ACE | VSA_ACECNT | VSA_ACE_ACLFLAGS;
1027 vsa.vsa_aclcnt = lr->lr_aclcnt;
1028 vsa.vsa_aclentp = ace;
1029 vsa.vsa_aclentsz = lr->lr_acl_bytes;
1030 vsa.vsa_aclflags = lr->lr_acl_flags;
1031
1032 if (lr->lr_fuidcnt) {
1033 void *fuidstart = (caddr_t)ace +
1034 ZIL_ACE_LENGTH(lr->lr_acl_bytes);
1035
1036 zfsvfs->z_fuid_replay =
1037 zfs_replay_fuids(fuidstart, &fuidstart,
1038 lr->lr_fuidcnt, lr->lr_domcnt, 0, 0);
1039 }
1040
1041 vp = ZTOV(zp);
1042 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1043 error = zfs_setsecattr(vp, &vsa, 0, kcred, NULL);
1044 VOP_UNLOCK(vp, 0);
1045
1046 if (zfsvfs->z_fuid_replay)
1047 zfs_fuid_info_free(zfsvfs->z_fuid_replay);
1048
1049 zfsvfs->z_fuid_replay = NULL;
1050 VN_RELE(vp);
1051
1052 return (error);
1053 }
1054
1055 /*
1056 * Callback vectors for replaying records
1057 */
1058 zil_replay_func_t *zfs_replay_vector[TX_MAX_TYPE] = {
1059 zfs_replay_error, /* 0 no such transaction type */
1060 zfs_replay_create, /* TX_CREATE */
1061 zfs_replay_create, /* TX_MKDIR */
1062 zfs_replay_create, /* TX_MKXATTR */
1063 zfs_replay_create, /* TX_SYMLINK */
1064 zfs_replay_remove, /* TX_REMOVE */
1065 zfs_replay_remove, /* TX_RMDIR */
1066 zfs_replay_link, /* TX_LINK */
1067 zfs_replay_rename, /* TX_RENAME */
1068 zfs_replay_write, /* TX_WRITE */
1069 zfs_replay_truncate, /* TX_TRUNCATE */
1070 zfs_replay_setattr, /* TX_SETATTR */
1071 zfs_replay_acl_v0, /* TX_ACL_V0 */
1072 zfs_replay_acl, /* TX_ACL */
1073 zfs_replay_create_acl, /* TX_CREATE_ACL */
1074 zfs_replay_create, /* TX_CREATE_ATTR */
1075 zfs_replay_create_acl, /* TX_CREATE_ACL_ATTR */
1076 zfs_replay_create_acl, /* TX_MKDIR_ACL */
1077 zfs_replay_create, /* TX_MKDIR_ATTR */
1078 zfs_replay_create_acl, /* TX_MKDIR_ACL_ATTR */
1079 zfs_replay_write2, /* TX_WRITE2 */
1080 };
1081