xref: /csrg-svn/bin/pax/buf_subs.c (revision 57193)
1*57193Smuller /*-
2*57193Smuller  * Copyright (c) 1992 Keith Muller.
3*57193Smuller  * Copyright (c) 1992 The Regents of the University of California.
4*57193Smuller  * All rights reserved.
5*57193Smuller  *
6*57193Smuller  * This code is derived from software contributed to Berkeley by
7*57193Smuller  * Keith Muller of the University of California, San Diego.
8*57193Smuller  *
9*57193Smuller  * %sccs.include.redist.c%
10*57193Smuller  */
11*57193Smuller 
12*57193Smuller #ifndef lint
13*57193Smuller static char sccsid[] = "@(#)buf_subs.c	1.1 (Berkeley) 12/17/92";
14*57193Smuller #endif /* not lint */
15*57193Smuller 
16*57193Smuller #include <sys/types.h>
17*57193Smuller #include <sys/time.h>
18*57193Smuller #include <sys/stat.h>
19*57193Smuller #include <sys/param.h>
20*57193Smuller #include <stdio.h>
21*57193Smuller #include <ctype.h>
22*57193Smuller #include <errno.h>
23*57193Smuller #include <unistd.h>
24*57193Smuller #include <stdlib.h>
25*57193Smuller #include <string.h>
26*57193Smuller #include "pax.h"
27*57193Smuller #include "extern.h"
28*57193Smuller 
29*57193Smuller /*
30*57193Smuller  * routines which implement archive and file buffering
31*57193Smuller  */
32*57193Smuller 
33*57193Smuller #define MAXFBSZ		4096		/* max block size for hole detection */
34*57193Smuller #define MINFBSZ		512		/* min block size for hole detection */
35*57193Smuller #define MAXFLT          10              /* default media read error limit */
36*57193Smuller 
37*57193Smuller static char bufmem[MAXBLK+BLKMULT];	/* i/o buffer + pushback id space */
38*57193Smuller static char *buf;			/* normal start of i/o buffer */
39*57193Smuller static char *bufend;			/* end or last char in i/o buffer */
40*57193Smuller static char *bufpt;			/* read/write point in i/o buffer */
41*57193Smuller int blksz = MAXBLK;                    	/* block input/output size in bytes */
42*57193Smuller int wrblksz;                      	/* user spec output size in bytes */
43*57193Smuller int maxflt = MAXFLT;			/* MAX consecutive media errors */
44*57193Smuller int rdblksz;				/* first read blksize (tapes only) */
45*57193Smuller off_t wrlimit;				/* # of bytes written per archive vol */
46*57193Smuller off_t wrcnt;				/* # of bytes written on current vol */
47*57193Smuller off_t rdcnt;				/* # of bytes read on current vol */
48*57193Smuller 
49*57193Smuller /*
50*57193Smuller  * wr_start()
51*57193Smuller  *	set up the buffering system to operate in a write mode
52*57193Smuller  * Return:
53*57193Smuller  *	0 if ok, -1 if the user specified write block size violates pax spec
54*57193Smuller  */
55*57193Smuller 
56*57193Smuller #if __STDC__
57*57193Smuller int
58*57193Smuller wr_start(void)
59*57193Smuller #else
60*57193Smuller int
61*57193Smuller wr_start()
62*57193Smuller #endif
63*57193Smuller {
64*57193Smuller 	buf = &(bufmem[BLKMULT]);
65*57193Smuller 	/*
66*57193Smuller 	 * Check to make sure the write block size meets pax specs. If the user
67*57193Smuller 	 * does not specify a blocksize, we use the format default blocksize.
68*57193Smuller 	 * We must be picky on writes, so we do not allow the user to create an
69*57193Smuller 	 * archive that might be hard to read elsewhere. If all ok, we then
70*57193Smuller 	 * open the first archive volume
71*57193Smuller 	 */
72*57193Smuller 	if (!wrblksz)
73*57193Smuller 		wrblksz = frmt->bsz;
74*57193Smuller 	if (wrblksz > MAXBLK) {
75*57193Smuller 		warn(1, "Write block size of %d too large, maximium is: %d",
76*57193Smuller 			wrblksz, MAXBLK);
77*57193Smuller 		return(-1);
78*57193Smuller 	}
79*57193Smuller 	if (wrblksz % BLKMULT) {
80*57193Smuller 		warn(1, "Write block size of %d is not a %d byte multiple",
81*57193Smuller 		    wrblksz, BLKMULT);
82*57193Smuller 		return(-1);
83*57193Smuller 	}
84*57193Smuller 
85*57193Smuller 	/*
86*57193Smuller 	 * we only allow wrblksz to be used with all archive operations
87*57193Smuller 	 */
88*57193Smuller 	blksz = rdblksz = wrblksz;
89*57193Smuller 	if ((ar_open(arcname) < 0) && (ar_next() < 0))
90*57193Smuller 		return(-1);
91*57193Smuller 	wrcnt = 0;
92*57193Smuller 	bufend = buf + wrblksz;
93*57193Smuller 	bufpt = buf;
94*57193Smuller 	return(0);
95*57193Smuller }
96*57193Smuller 
97*57193Smuller /*
98*57193Smuller  * rd_start()
99*57193Smuller  *	set up buffering system to read an archive
100*57193Smuller  * Return:
101*57193Smuller  *	0 if ok, -1 otherwise
102*57193Smuller  */
103*57193Smuller 
104*57193Smuller #if __STDC__
105*57193Smuller int
106*57193Smuller rd_start(void)
107*57193Smuller #else
108*57193Smuller int
109*57193Smuller rd_start()
110*57193Smuller #endif
111*57193Smuller {
112*57193Smuller 	/*
113*57193Smuller 	 * leave space for the header pushback (see get_arc()). If we are
114*57193Smuller 	 * going to append and user specified a write block size, check it
115*57193Smuller 	 * right away
116*57193Smuller 	 */
117*57193Smuller 	buf = &(bufmem[BLKMULT]);
118*57193Smuller 	if ((act == APPND) && wrblksz) {
119*57193Smuller 		if (wrblksz > MAXBLK) {
120*57193Smuller 			warn(1,"Write block size %d too large, maximium is: %d",
121*57193Smuller 				wrblksz, MAXBLK);
122*57193Smuller 			return(-1);
123*57193Smuller 		}
124*57193Smuller 		if (wrblksz % BLKMULT) {
125*57193Smuller 			warn(1, "Write block size %d is not a %d byte multiple",
126*57193Smuller 		    	wrblksz, BLKMULT);
127*57193Smuller 			return(-1);
128*57193Smuller 		}
129*57193Smuller 	}
130*57193Smuller 
131*57193Smuller 	/*
132*57193Smuller 	 * open the archive
133*57193Smuller 	 */
134*57193Smuller 	if ((ar_open(arcname) < 0) && (ar_next() < 0))
135*57193Smuller 		return(-1);
136*57193Smuller 	bufend = buf + rdblksz;
137*57193Smuller 	bufpt = bufend;
138*57193Smuller 	rdcnt = 0;
139*57193Smuller 	return(0);
140*57193Smuller }
141*57193Smuller 
142*57193Smuller /*
143*57193Smuller  * cp_start()
144*57193Smuller  *	set up buffer system for copying within the file system
145*57193Smuller  */
146*57193Smuller 
147*57193Smuller #if __STDC__
148*57193Smuller void
149*57193Smuller cp_start(void)
150*57193Smuller #else
151*57193Smuller void
152*57193Smuller cp_start()
153*57193Smuller #endif
154*57193Smuller {
155*57193Smuller 	buf = &(bufmem[BLKMULT]);
156*57193Smuller 	rdblksz = blksz = MAXBLK;
157*57193Smuller }
158*57193Smuller 
159*57193Smuller /*
160*57193Smuller  * appnd_start()
161*57193Smuller  *	Set up the buffering system to append new members to an archive that
162*57193Smuller  *	was just read. The last block(s) of an archive may contain a format
163*57193Smuller  *	specific trailer. To append a new member, this trailer has to be
164*57193Smuller  *	removed from the archive. The first byte of the trailer is replaced by
165*57193Smuller  *	the start of the header of the first file added to the archive. The
166*57193Smuller  *	format specific end read function tells us how many bytes to move
167*57193Smuller  *	backwards in the archive to be positioned BEFORE the trailer. Two
168*57193Smuller  *	different postions have to be adjusted, the O.S. file offset (e.g. the
169*57193Smuller  *	position of the tape head) and the write point within the data we have
170*57193Smuller  *	stored in the read (soon to become write) buffer. We may have to move
171*57193Smuller  *	back several records (the number depends on the size of the archive
172*57193Smuller  *	record and the size of the format trailer) to read up the record where
173*57193Smuller  *	the first byte of the trailer is recorded. Trailers may span (and
174*57193Smuller  *	overlap) record boundries.
175*57193Smuller  *	We first calculate which record has the first byte of the trailer. We
176*57193Smuller  *	move the OS file offset back to the start of this record and read it
177*57193Smuller  *	up. We set the buffer write pointer to be at this byte (the byte where
178*57193Smuller  *	the trailer starts). We then move the OS file pointer back to the
179*57193Smuller  *	start of this record so a flush of this buffer will replace the record
180*57193Smuller  *	in the archive.
181*57193Smuller  *	A major problem is rewriting this last record. For archives stored
182*57193Smuller  *	on disk files, this is trival. However, many devices are really picky
183*57193Smuller  *	about the conditions under which they will allow a write to occur.
184*57193Smuller  *	Often devices restrict the conditions where writes can be made writes,
185*57193Smuller  *	so it may not be feasable to append archives stored on all types of
186*57193Smuller  *	devices.
187*57193Smuller  * Return:
188*57193Smuller  *	0 for success, -1 for failure
189*57193Smuller  */
190*57193Smuller 
191*57193Smuller #if __STDC__
192*57193Smuller int
193*57193Smuller appnd_start(off_t skcnt)
194*57193Smuller #else
195*57193Smuller int
196*57193Smuller appnd_start(skcnt)
197*57193Smuller 	off_t skcnt;
198*57193Smuller #endif
199*57193Smuller {
200*57193Smuller 	register int res;
201*57193Smuller 	off_t cnt;
202*57193Smuller 
203*57193Smuller 	if (exit_val != 0)
204*57193Smuller 		return(-1);
205*57193Smuller 	/*
206*57193Smuller 	 * if the user did not specify a write blocksize, inherit the size used
207*57193Smuller 	 * in the last archive volume read. (If a is set we still use rdblksz
208*57193Smuller 	 * until next volume, cannot shift sizes within a single volume).
209*57193Smuller 	 */
210*57193Smuller 	if (!wrblksz)
211*57193Smuller 		wrblksz = blksz = rdblksz;
212*57193Smuller 	else
213*57193Smuller 		blksz = rdblksz;
214*57193Smuller 
215*57193Smuller 	/*
216*57193Smuller 	 * make sure that this volume allows appends
217*57193Smuller 	 */
218*57193Smuller 	if (ar_app_ok() < 0)
219*57193Smuller 		return(-1);
220*57193Smuller 
221*57193Smuller 	/*
222*57193Smuller 	 * Calculate bytes to move back and move in front of record where we
223*57193Smuller 	 * need to start writing from. Remember we have to add in any padding
224*57193Smuller 	 * that might be in the buffer after the trailer in the last block. We
225*57193Smuller 	 * travel skcnt + padding ROUNDED UP to blksize.
226*57193Smuller 	 */
227*57193Smuller 	skcnt += bufend - bufpt;
228*57193Smuller 	if ((cnt = (skcnt/blksz) * blksz) < skcnt)
229*57193Smuller 		cnt += blksz;
230*57193Smuller 	if (ar_rev((off_t)cnt) < 0)
231*57193Smuller 		goto out;
232*57193Smuller 
233*57193Smuller 	/*
234*57193Smuller 	 * We may have gone too far if there is valid data in the block we are
235*57193Smuller 	 * now in front of, read up the block and position the pointer after
236*57193Smuller 	 * the valid data.
237*57193Smuller 	 */
238*57193Smuller 	if ((cnt -= skcnt) > 0) {
239*57193Smuller 		/*
240*57193Smuller 		 * watch out for stupid tape drives. ar_rev() will set rdblksz
241*57193Smuller 		 * to be real physical blocksize so we must loop until we get
242*57193Smuller 		 * the old rdblksz (now in blksz). If ar_rev() fouls up the
243*57193Smuller 		 * determination of the physical block size, we will fail.
244*57193Smuller 		 */
245*57193Smuller 		bufpt = buf;
246*57193Smuller 		bufend = buf + blksz;
247*57193Smuller 		while (bufpt < bufend) {
248*57193Smuller 			if ((res = ar_read(bufpt, rdblksz)) <= 0)
249*57193Smuller 				goto out;
250*57193Smuller 			bufpt += res;
251*57193Smuller 		}
252*57193Smuller 		if (ar_rev((off_t)(bufpt - buf)) < 0)
253*57193Smuller 			goto out;
254*57193Smuller 		bufpt = buf + cnt;
255*57193Smuller 		bufend = buf + blksz;
256*57193Smuller 	} else {
257*57193Smuller 		/*
258*57193Smuller 		 * buffer is empty
259*57193Smuller 		 */
260*57193Smuller 		bufend = buf + blksz;
261*57193Smuller 		bufpt = buf;
262*57193Smuller 	}
263*57193Smuller 	rdblksz = blksz;
264*57193Smuller 	rdcnt -= skcnt;
265*57193Smuller 	wrcnt = 0;
266*57193Smuller 
267*57193Smuller 	/*
268*57193Smuller 	 * At this point we are ready to write. If the device requires special
269*57193Smuller 	 * handling to write at a point were previously recorded data resides,
270*57193Smuller 	 * that is handled in ar_set_wr(). From now on we operate under normal
271*57193Smuller 	 * ARCHIVE mode (write) conditions
272*57193Smuller 	 */
273*57193Smuller 	if (ar_set_wr() < 0)
274*57193Smuller 		return(-1);
275*57193Smuller 	act = ARCHIVE;
276*57193Smuller 	return(0);
277*57193Smuller 
278*57193Smuller     out:
279*57193Smuller 	warn(1, "Unable to position at the end of the archive, append failed");
280*57193Smuller 	return(-1);
281*57193Smuller }
282*57193Smuller 
283*57193Smuller /*
284*57193Smuller  * rd_sync()
285*57193Smuller  *	A read error occurred on this archive volume. Resync the buffer and
286*57193Smuller  *	try to reset the device (if possible) so we can continue to read. Keep
287*57193Smuller  *	trying to do this until we get a valid read, or we reach the limit on
288*57193Smuller  *	consecutive read faults (at which point we give up). The user can
289*57193Smuller  *	adjust the read error limit through a command line option.
290*57193Smuller  * Returns:
291*57193Smuller  *	0 on success, and -1 on failure
292*57193Smuller  */
293*57193Smuller 
294*57193Smuller #if __STDC__
295*57193Smuller int
296*57193Smuller rd_sync(void)
297*57193Smuller #else
298*57193Smuller int
299*57193Smuller rd_sync()
300*57193Smuller #endif
301*57193Smuller {
302*57193Smuller 	register int errcnt = 0;
303*57193Smuller 	register int res;
304*57193Smuller 
305*57193Smuller 	/*
306*57193Smuller 	 * if the user says bail out on first fault, we are out of here...
307*57193Smuller 	 */
308*57193Smuller 	if (maxflt == 0)
309*57193Smuller 		return(-1);
310*57193Smuller 	if (act == APPND) {
311*57193Smuller 		warn(1, "Unable to append when there are archive read errors.");
312*57193Smuller 		return(-1);
313*57193Smuller 	}
314*57193Smuller 
315*57193Smuller 	/*
316*57193Smuller 	 * poke at device and try to get past media error
317*57193Smuller 	 */
318*57193Smuller 	if (ar_rdsync() < 0) {
319*57193Smuller 		if (ar_next() < 0)
320*57193Smuller 			return(-1);
321*57193Smuller 		else
322*57193Smuller 			rdcnt = 0;
323*57193Smuller 	}
324*57193Smuller 
325*57193Smuller 	for (;;) {
326*57193Smuller 		if ((res = ar_read(buf, blksz)) > 0) {
327*57193Smuller 			/*
328*57193Smuller 			 * All right! got some data, fill that buffer
329*57193Smuller 			 */
330*57193Smuller 			bufpt = buf;
331*57193Smuller 			bufend = buf + res;
332*57193Smuller 			rdcnt += res;
333*57193Smuller 			return(0);
334*57193Smuller 		}
335*57193Smuller 
336*57193Smuller 		/*
337*57193Smuller 		 * Oh well, yet another failed read...
338*57193Smuller 		 * if error limit reached, ditch. o.w. poke device to move past
339*57193Smuller 		 * bad media and try again. if media is badly damaged, we ask
340*57193Smuller 		 * the poor (and upset user at this point) for the next archive
341*57193Smuller 		 * volume. remember the goal on reads is to get the most we
342*57193Smuller 		 * can extract out of the archive.
343*57193Smuller 		 */
344*57193Smuller 		if ((maxflt > 0) && (++errcnt > maxflt))
345*57193Smuller 			warn(1,"Archive read error limit (%d) reached",maxflt);
346*57193Smuller 		else if (ar_rdsync() == 0)
347*57193Smuller 			continue;
348*57193Smuller 		if (ar_next() < 0)
349*57193Smuller 			break;
350*57193Smuller 		rdcnt = 0;
351*57193Smuller 		errcnt = 0;
352*57193Smuller 	}
353*57193Smuller 	return(-1);
354*57193Smuller }
355*57193Smuller 
356*57193Smuller /*
357*57193Smuller  * pback()
358*57193Smuller  *	push the data used during the archive id phase back into the I/O
359*57193Smuller  *	buffer. This is required as we cannot be sure that the header does NOT
360*57193Smuller  *	overlap a block boundry (as in the case we are trying to recover a
361*57193Smuller  *	flawed archived). This was not designed to be used for any other
362*57193Smuller  *	purpose. (What software engineering, HA!)
363*57193Smuller  *	WARNING: do not even THINK of pback greater than BLKMULT, unless the
364*57193Smuller  *	pback space is increased.
365*57193Smuller  */
366*57193Smuller 
367*57193Smuller #if __STDC__
368*57193Smuller void
369*57193Smuller pback(char *pt, int cnt)
370*57193Smuller #else
371*57193Smuller void
372*57193Smuller pback(pt, cnt)
373*57193Smuller 	char *pt;
374*57193Smuller 	int cnt;
375*57193Smuller #endif
376*57193Smuller {
377*57193Smuller 	bufpt -= cnt;
378*57193Smuller 	bcopy(pt, bufpt, cnt);
379*57193Smuller 	return;
380*57193Smuller }
381*57193Smuller 
382*57193Smuller /*
383*57193Smuller  * rd_skip()
384*57193Smuller  *	skip foward in the archive during a archive read. Used to get quickly
385*57193Smuller  *	past file data and padding for files the user did NOT select.
386*57193Smuller  * Return:
387*57193Smuller  *	0 if ok, -1 failure, and 1 when EOF on the archive volume was detected.
388*57193Smuller  */
389*57193Smuller 
390*57193Smuller #if __STDC__
391*57193Smuller int
392*57193Smuller rd_skip(off_t skcnt)
393*57193Smuller #else
394*57193Smuller int
395*57193Smuller rd_skip(skcnt)
396*57193Smuller 	off_t skcnt;
397*57193Smuller #endif
398*57193Smuller {
399*57193Smuller 	off_t res;
400*57193Smuller 	off_t cnt;
401*57193Smuller 	off_t skipped = 0;
402*57193Smuller 
403*57193Smuller 	/*
404*57193Smuller 	 * consume what data we have in the buffer. If we have to move foward
405*57193Smuller 	 * whole records, we call the low level skip function to see if we can
406*57193Smuller 	 * move within the archive without doing the expensive reads on data we
407*57193Smuller 	 * do not want.
408*57193Smuller 	 */
409*57193Smuller 	if (skcnt == 0)
410*57193Smuller 		return(0);
411*57193Smuller 	res = MIN((bufend - bufpt), skcnt);
412*57193Smuller 	bufpt += res;
413*57193Smuller 	skcnt -= res;
414*57193Smuller 
415*57193Smuller 	/*
416*57193Smuller 	 * if skcnt is now 0, then no additional i/o is needed
417*57193Smuller 	 */
418*57193Smuller 	if (skcnt == 0)
419*57193Smuller 		return(0);
420*57193Smuller 
421*57193Smuller 	/*
422*57193Smuller 	 * We have to read more, calculate complete and partial record reads
423*57193Smuller 	 * based on rdblksz. we skip over "cnt" complete records
424*57193Smuller 	 */
425*57193Smuller 	res = skcnt%rdblksz;
426*57193Smuller 	cnt = (skcnt/rdblksz) * rdblksz;
427*57193Smuller 
428*57193Smuller 	/*
429*57193Smuller 	 * if the skip fails, we will have to resync. ar_fow will tell us
430*57193Smuller 	 * how much it can skip over. We will have to read the rest.
431*57193Smuller 	 */
432*57193Smuller 	if (ar_fow(cnt, &skipped) < 0)
433*57193Smuller 		return(-1);
434*57193Smuller 	res += cnt - skipped;
435*57193Smuller 	rdcnt += skipped;
436*57193Smuller 
437*57193Smuller 	/*
438*57193Smuller 	 * what is left we have to read (which may be the whole thing if
439*57193Smuller 	 * ar_fow() told us the device can only read to skip records);
440*57193Smuller 	 */
441*57193Smuller 	while (res > 0L) {
442*57193Smuller 		cnt = bufend - bufpt;
443*57193Smuller 		/*
444*57193Smuller 		 * if the read fails, we will have to resync
445*57193Smuller 		 */
446*57193Smuller 		if ((cnt <= 0) && ((cnt = buf_fill()) < 0))
447*57193Smuller 			return(-1);
448*57193Smuller 		if (cnt == 0)
449*57193Smuller 			return(1);
450*57193Smuller 		cnt = MIN(cnt, res);
451*57193Smuller 		bufpt += cnt;
452*57193Smuller 		res -= cnt;
453*57193Smuller 	}
454*57193Smuller 	return(0);
455*57193Smuller }
456*57193Smuller 
457*57193Smuller /*
458*57193Smuller  * wr_fin()
459*57193Smuller  *	flush out any data (and pad if required) the last block. We always pad
460*57193Smuller  *	with zero (even though we do not have to). Padding with 0 makes it a
461*57193Smuller  *	lot easier to recover if the archive is damaged. zero paddding SHOULD
462*57193Smuller  *	BE a requirement....
463*57193Smuller  */
464*57193Smuller 
465*57193Smuller #if __STDC__
466*57193Smuller void
467*57193Smuller wr_fin(void)
468*57193Smuller #else
469*57193Smuller void
470*57193Smuller wr_fin()
471*57193Smuller #endif
472*57193Smuller {
473*57193Smuller 	if (bufpt > buf) {
474*57193Smuller 		bzero(bufpt, bufend - bufpt);
475*57193Smuller 		bufpt = bufend;
476*57193Smuller 		(void)buf_flush(blksz);
477*57193Smuller 	}
478*57193Smuller }
479*57193Smuller 
480*57193Smuller /*
481*57193Smuller  * wr_rdbuf()
482*57193Smuller  *	fill the write buffer from data passed to it in a buffer (usually used
483*57193Smuller  *	by format specific write routines to pass a file header). On failure we
484*57193Smuller  *	punt. We do not allow the user to continue to write flawed archives.
485*57193Smuller  *	We assume these headers are not very large (the memory copy we use is
486*57193Smuller  *	a bit expensive).
487*57193Smuller  * Return:
488*57193Smuller  *	0 if buffer was filled ok, -1 o.w. (buffer flush failure)
489*57193Smuller  */
490*57193Smuller 
491*57193Smuller #if __STDC__
492*57193Smuller int
493*57193Smuller wr_rdbuf(register char *out, register int outcnt)
494*57193Smuller #else
495*57193Smuller int
496*57193Smuller wr_rdbuf(out, outcnt)
497*57193Smuller 	register char *out;
498*57193Smuller 	register int outcnt;
499*57193Smuller #endif
500*57193Smuller {
501*57193Smuller 	register int cnt;
502*57193Smuller 
503*57193Smuller 	/*
504*57193Smuller 	 * while there is data to copy copy into the write buffer. when the
505*57193Smuller 	 * write buffer fills, flush it to the archive and continue
506*57193Smuller 	 */
507*57193Smuller 	while (outcnt > 0) {
508*57193Smuller 		cnt = bufend - bufpt;
509*57193Smuller 		if ((cnt <= 0) && ((cnt = buf_flush(blksz)) < 0))
510*57193Smuller 			return(-1);
511*57193Smuller 		/*
512*57193Smuller 		 * only move what we have space for
513*57193Smuller 		 */
514*57193Smuller 		cnt = MIN(cnt, outcnt);
515*57193Smuller 		bcopy(out, bufpt, cnt);
516*57193Smuller 		bufpt += cnt;
517*57193Smuller 		out += cnt;
518*57193Smuller 		outcnt -= cnt;
519*57193Smuller 	}
520*57193Smuller 	return(0);
521*57193Smuller }
522*57193Smuller 
523*57193Smuller /*
524*57193Smuller  * rd_wrbuf()
525*57193Smuller  *	copy from the read buffer into a supplied buffer a specified number of
526*57193Smuller  *	bytes. If the read buffer is empty fill it and continue to copy.
527*57193Smuller  *	usually used to obtain a file header for processing by a format
528*57193Smuller  *	specific read routine.
529*57193Smuller  * Return
530*57193Smuller  *	number of bytes copied to the buffer, 0 indicates EOF on archive volume,
531*57193Smuller  *	-1 is a read error
532*57193Smuller  */
533*57193Smuller 
534*57193Smuller #if __STDC__
535*57193Smuller int
536*57193Smuller rd_wrbuf(register char *in, register int cpcnt)
537*57193Smuller #else
538*57193Smuller int
539*57193Smuller rd_wrbuf(in, cpcnt)
540*57193Smuller 	register char *in;
541*57193Smuller 	register int cpcnt;
542*57193Smuller #endif
543*57193Smuller {
544*57193Smuller 	register int res;
545*57193Smuller 	register int cnt;
546*57193Smuller 	register int incnt = cpcnt;
547*57193Smuller 
548*57193Smuller 	/*
549*57193Smuller 	 * loop until we fill the buffer with the requested number of bytes
550*57193Smuller 	 */
551*57193Smuller 	while (incnt > 0) {
552*57193Smuller 		cnt = bufend - bufpt;
553*57193Smuller 		if ((cnt <= 0) && ((cnt = buf_fill()) <= 0)) {
554*57193Smuller 			/*
555*57193Smuller 			 * read error, return what we got (or the error if
556*57193Smuller 			 * no data was copied). The caller must know that an
557*57193Smuller 			 * error occured and has the best knowledge what to
558*57193Smuller 			 * do with it
559*57193Smuller 			 */
560*57193Smuller 			if ((res = cpcnt - incnt) > 0)
561*57193Smuller 				return(res);
562*57193Smuller 			return(cnt);
563*57193Smuller 		}
564*57193Smuller 
565*57193Smuller 		/*
566*57193Smuller 		 * calculate how much data to copy based on whats left and
567*57193Smuller 		 * state of buffer
568*57193Smuller 		 */
569*57193Smuller 		cnt = MIN(cnt, incnt);
570*57193Smuller 		bcopy(bufpt, in, cnt);
571*57193Smuller 		bufpt += cnt;
572*57193Smuller 		incnt -= cnt;
573*57193Smuller 		in += cnt;
574*57193Smuller 	}
575*57193Smuller 	return(cpcnt);
576*57193Smuller }
577*57193Smuller 
578*57193Smuller /*
579*57193Smuller  * wr_skip()
580*57193Smuller  *	skip foward during a write. In other words add padding to the file.
581*57193Smuller  *	we add zero filled padding as it makes flawed archives much easier to
582*57193Smuller  *	recover from. the caller tells us how many bytes of padding to add
583*57193Smuller  *	This routine was not designed to add HUGE amount of padding, just small
584*57193Smuller  *	amounts (a few 512 byte blocks at most)
585*57193Smuller  * Return:
586*57193Smuller  *	0 if ok, -1 if there was a buf_flush failure
587*57193Smuller  */
588*57193Smuller 
589*57193Smuller #if __STDC__
590*57193Smuller int
591*57193Smuller wr_skip(off_t skcnt)
592*57193Smuller #else
593*57193Smuller int
594*57193Smuller wr_skip(skcnt)
595*57193Smuller 	off_t skcnt;
596*57193Smuller #endif
597*57193Smuller {
598*57193Smuller 	register int cnt;
599*57193Smuller 
600*57193Smuller 	/*
601*57193Smuller 	 * loop while there is more padding to add
602*57193Smuller 	 */
603*57193Smuller 	while (skcnt > 0L) {
604*57193Smuller 		cnt = bufend - bufpt;
605*57193Smuller 		if ((cnt <= 0) && ((cnt = buf_flush(blksz)) < 0))
606*57193Smuller 			return(-1);
607*57193Smuller 		cnt = MIN(cnt, skcnt);
608*57193Smuller 		bzero(bufpt, cnt);
609*57193Smuller 		bufpt += cnt;
610*57193Smuller 		skcnt -= cnt;
611*57193Smuller 	}
612*57193Smuller 	return(0);
613*57193Smuller }
614*57193Smuller 
615*57193Smuller /*
616*57193Smuller  * wr_rdfile()
617*57193Smuller  *	fill write buffer with the contents of a file. We are passed an	open
618*57193Smuller  *	file descriptor to the file an the archive structure that describes the
619*57193Smuller  *	file we are storing. The variable "left" is modified to contain the
620*57193Smuller  *	number of bytes of the file we were NOT able to write to the archive.
621*57193Smuller  *	it is important that we always write EXACTLY the number of bytes that
622*57193Smuller  *	the format specific write routine told us to. The file can also get
623*57193Smuller  *	bigger, so reading to the end of file would create an improper archive,
624*57193Smuller  *	we just detect this case and warn the user. We never create a bad
625*57193Smuller  *	archive if we can avoid it. Of course trying to archive files that are
626*57193Smuller  *	active is asking for trouble. It we fail, we pass back how much we
627*57193Smuller  *	could NOT copy and let the caller deal with it.
628*57193Smuller  * Return:
629*57193Smuller  *	0 ok, -1 if archive write failure. a short read of the file returns a
630*57193Smuller  *	0, but "left" is set to be greater than zero.
631*57193Smuller  */
632*57193Smuller 
633*57193Smuller #if __STDC__
634*57193Smuller int
635*57193Smuller wr_rdfile(ARCHD *arcn, int ifd, off_t *left)
636*57193Smuller #else
637*57193Smuller int
638*57193Smuller wr_rdfile(arcn, ifd, left)
639*57193Smuller 	ARCHD *arcn;
640*57193Smuller 	int ifd;
641*57193Smuller 	off_t *left;
642*57193Smuller #endif
643*57193Smuller {
644*57193Smuller 	register int cnt;
645*57193Smuller 	register int res = 0;
646*57193Smuller 	register off_t size = arcn->sb.st_size;
647*57193Smuller 	struct stat sb;
648*57193Smuller 
649*57193Smuller 	/*
650*57193Smuller 	 * while there are more bytes to write
651*57193Smuller 	 */
652*57193Smuller 	while (size > 0L) {
653*57193Smuller 		cnt = bufend - bufpt;
654*57193Smuller 		if ((cnt <= 0) && ((cnt = buf_flush(blksz)) < 0)) {
655*57193Smuller 			*left = size;
656*57193Smuller 			return(-1);
657*57193Smuller 		}
658*57193Smuller 		cnt = MIN(cnt, size);
659*57193Smuller 		if ((res = read(ifd, bufpt, cnt)) <= 0)
660*57193Smuller 			break;
661*57193Smuller 		size -= res;
662*57193Smuller 		bufpt += res;
663*57193Smuller 	}
664*57193Smuller 
665*57193Smuller 	/*
666*57193Smuller 	 * better check the file did not change during this operation
667*57193Smuller 	 * or the file read failed.
668*57193Smuller 	 */
669*57193Smuller 	if (res < 0)
670*57193Smuller 		syswarn(1, errno, "Read fault on %s", arcn->org_name);
671*57193Smuller 	else if (size != 0L)
672*57193Smuller 		warn(1, "File changed size during read %s", arcn->org_name);
673*57193Smuller 	else if (fstat(ifd, &sb) < 0)
674*57193Smuller 		syswarn(1, errno, "Failed stat on %s", arcn->org_name);
675*57193Smuller 	else if (arcn->sb.st_mtime != sb.st_mtime)
676*57193Smuller 		warn(1, "File %s was modified during copy to archive",
677*57193Smuller 			arcn->org_name);
678*57193Smuller 	*left = size;
679*57193Smuller 	return(0);
680*57193Smuller }
681*57193Smuller 
682*57193Smuller /*
683*57193Smuller  * rd_wrfile()
684*57193Smuller  *	extract the contents of a file from the archive. If we are unable to
685*57193Smuller  *	extract the entire file (due to failure to write the file) we return
686*57193Smuller  *	the numbers of bytes we did NOT process. This way the caller knows how
687*57193Smuller  *	many bytes to skip past to find the next archive header. If the failure
688*57193Smuller  *	was due to an archive read, we will catch that when we try to skip. If
689*57193Smuller  *	the format supplies a file data crc value, we calculate the actual crc
690*57193Smuller  *	so that it can be compared to the value stored in the header
691*57193Smuller  * NOTE:
692*57193Smuller  *	We call a special function to write the file. This function attempts to
693*57193Smuller  *	restore file holes (blocks of zeros) into the file. When files are
694*57193Smuller  *	sparse this saves space, and is a LOT faster. For non sparse files
695*57193Smuller  *	the performance hit is small. As of this writing, no archive supports
696*57193Smuller  *	information on where the file holes are.
697*57193Smuller  * Return:
698*57193Smuller  *	0 ok, -1 if archive read failure. if we cannot write the entire file,
699*57193Smuller  *	we return a 0 but "left" is set to be the amount unwritten
700*57193Smuller  */
701*57193Smuller 
702*57193Smuller #if __STDC__
703*57193Smuller int
704*57193Smuller rd_wrfile(ARCHD *arcn, int ofd, off_t *left)
705*57193Smuller #else
706*57193Smuller int
707*57193Smuller rd_wrfile(arcn, ofd, left)
708*57193Smuller 	ARCHD *arcn;
709*57193Smuller 	int ofd;
710*57193Smuller 	off_t *left;
711*57193Smuller #endif
712*57193Smuller {
713*57193Smuller 	register int cnt = 0;
714*57193Smuller 	register off_t size = arcn->sb.st_size;
715*57193Smuller 	register int res = 0;
716*57193Smuller 	register char *fnm = arcn->name;
717*57193Smuller 	int isem = 1;
718*57193Smuller 	int rem;
719*57193Smuller 	int sz = MINFBSZ;
720*57193Smuller  	struct stat sb;
721*57193Smuller 	u_long crc = 0L;
722*57193Smuller 
723*57193Smuller 	/*
724*57193Smuller 	 * pass the blocksize of the file being written to the write routine,
725*57193Smuller 	 * if an odd size, use the default MINFBSZ
726*57193Smuller 	 */
727*57193Smuller         if (fstat(ofd, &sb) == 0) {
728*57193Smuller 		if ((sb.st_blksize > 0) && (sb.st_blksize <= MAXFBSZ))
729*57193Smuller 			sz = (int)sb.st_blksize;
730*57193Smuller         } else
731*57193Smuller                 syswarn(0,errno,"Unable to obtain block size for file %s",fnm);
732*57193Smuller 	rem = sz;
733*57193Smuller 	*left = 0L;
734*57193Smuller 
735*57193Smuller 	/*
736*57193Smuller 	 * Copy the archive to the file the number of bytes specified. We have
737*57193Smuller 	 * to assume that we want to recover file holes as none of the archive
738*57193Smuller 	 * formats can record the location of file holes.
739*57193Smuller 	 */
740*57193Smuller 	while (size > 0L) {
741*57193Smuller 		cnt = bufend - bufpt;
742*57193Smuller 		/*
743*57193Smuller 		 * if we get a read error, we do not want to skip, as we may
744*57193Smuller 		 * miss a header, so we do not set left, but if we get a write
745*57193Smuller 		 * error, we do want to skip over the unprocessed data.
746*57193Smuller 		 */
747*57193Smuller 		if ((cnt <= 0) && ((cnt = buf_fill()) <= 0))
748*57193Smuller 			break;
749*57193Smuller 		cnt = MIN(cnt, size);
750*57193Smuller 		if ((res = file_write(ofd,bufpt,cnt,&rem,&isem,sz,fnm)) <= 0) {
751*57193Smuller 			*left = size;
752*57193Smuller 			break;
753*57193Smuller 		}
754*57193Smuller 
755*57193Smuller 		if (docrc) {
756*57193Smuller 			/*
757*57193Smuller 			 * update the actual crc value
758*57193Smuller 			 */
759*57193Smuller 			cnt = res;
760*57193Smuller 			while (--cnt >= 0)
761*57193Smuller 				crc += *bufpt++ & 0xff;
762*57193Smuller 		} else
763*57193Smuller 			bufpt += res;
764*57193Smuller 		size -= res;
765*57193Smuller 	}
766*57193Smuller 
767*57193Smuller 	/*
768*57193Smuller 	 * if the last block has a file hole (all zero), we must make sure this
769*57193Smuller 	 * gets updated in the file. We force the last block of zeros to be
770*57193Smuller 	 * written. just closing with the file offset moved foward may not put
771*57193Smuller 	 * a hole at the end of the file.
772*57193Smuller 	 */
773*57193Smuller 	if (isem && (arcn->sb.st_size > 0L))
774*57193Smuller 		file_flush(ofd, fnm, isem);
775*57193Smuller 
776*57193Smuller 	/*
777*57193Smuller 	 * if we failed from archive read, we do not want to skip
778*57193Smuller 	 */
779*57193Smuller 	if ((size > 0L) && (*left == 0L))
780*57193Smuller 		return(-1);
781*57193Smuller 
782*57193Smuller 	/*
783*57193Smuller 	 * some formats record a crc on file data. If so, then we compare the
784*57193Smuller 	 * calculated crc to the crc stored in the archive
785*57193Smuller 	 */
786*57193Smuller 	if (docrc && (size == 0L) && (arcn->crc != crc))
787*57193Smuller 		warn(1,"Actual crc does not match expected crc %s",arcn->name);
788*57193Smuller 	return(0);
789*57193Smuller }
790*57193Smuller 
791*57193Smuller /*
792*57193Smuller  * cp_file()
793*57193Smuller  *	copy the contents of one file to another. used during -rw phase of pax
794*57193Smuller  *	just as in rd_wrfile() we use a special write function to write the
795*57193Smuller  *	destination file so we can properly copy files with holes.
796*57193Smuller  */
797*57193Smuller 
798*57193Smuller #if __STDC__
799*57193Smuller void
800*57193Smuller cp_file(ARCHD *arcn, int fd1, int fd2)
801*57193Smuller #else
802*57193Smuller void
803*57193Smuller cp_file(arcn, fd1, fd2)
804*57193Smuller 	ARCHD *arcn;
805*57193Smuller 	int fd1;
806*57193Smuller 	int fd2;
807*57193Smuller #endif
808*57193Smuller {
809*57193Smuller 	register int cnt;
810*57193Smuller 	register off_t cpcnt = 0L;
811*57193Smuller 	register int res = 0;
812*57193Smuller 	register char *fnm = arcn->name;
813*57193Smuller 	register int no_hole = 0;
814*57193Smuller 	int isem = 1;
815*57193Smuller 	int rem;
816*57193Smuller 	int sz = MINFBSZ;
817*57193Smuller 	struct stat sb;
818*57193Smuller 
819*57193Smuller 	/*
820*57193Smuller 	 * check for holes in the source file. If none, we will use regular
821*57193Smuller 	 * write instead of file write.
822*57193Smuller 	 */
823*57193Smuller 	 if (((off_t)(arcn->sb.st_blocks * BLKMULT)) >= arcn->sb.st_size)
824*57193Smuller 		++no_hole;
825*57193Smuller 
826*57193Smuller 	/*
827*57193Smuller 	 * pass the blocksize of the file being written to the write routine,
828*57193Smuller 	 * if an odd size, use the default MINFBSZ
829*57193Smuller 	 */
830*57193Smuller         if (fstat(fd2, &sb) == 0) {
831*57193Smuller 		if ((sb.st_blksize > 0) && (sb.st_blksize <= MAXFBSZ))
832*57193Smuller 			sz = sb.st_blksize;
833*57193Smuller         } else
834*57193Smuller                 syswarn(0,errno,"Unable to obtain block size for file %s",fnm);
835*57193Smuller 	rem = sz;
836*57193Smuller 
837*57193Smuller 	/*
838*57193Smuller 	 * read the source file and copy to destination file until EOF
839*57193Smuller 	 */
840*57193Smuller 	for(;;) {
841*57193Smuller 		if ((cnt = read(fd1, buf, blksz)) <= 0)
842*57193Smuller 			break;
843*57193Smuller 		if (no_hole)
844*57193Smuller 			res = write(fd2, buf, cnt);
845*57193Smuller 		else
846*57193Smuller 			res = file_write(fd2, buf, cnt, &rem, &isem, sz, fnm);
847*57193Smuller 		if (res != cnt)
848*57193Smuller 			break;
849*57193Smuller 		cpcnt += cnt;
850*57193Smuller 	}
851*57193Smuller 
852*57193Smuller 	/*
853*57193Smuller 	 * check to make sure the copy is valid.
854*57193Smuller 	 */
855*57193Smuller 	if (res < 0)
856*57193Smuller 		syswarn(1, errno, "Failed write during copy of %s to %s",
857*57193Smuller 			arcn->org_name, arcn->name);
858*57193Smuller 	else if (cpcnt != arcn->sb.st_size)
859*57193Smuller 		warn(1, "File %s changed size during copy to %s",
860*57193Smuller 			arcn->org_name, arcn->name);
861*57193Smuller 	else if (fstat(fd1, &sb) < 0)
862*57193Smuller 		syswarn(1, errno, "Failed stat of %s", arcn->org_name);
863*57193Smuller 	else if (arcn->sb.st_mtime != sb.st_mtime)
864*57193Smuller 		warn(1, "File %s was modified during copy to %s",
865*57193Smuller 			arcn->org_name, arcn->name);
866*57193Smuller 
867*57193Smuller 	/*
868*57193Smuller 	 * if the last block has a file hole (all zero), we must make sure this
869*57193Smuller 	 * gets updated in the file. We force the last block of zeros to be
870*57193Smuller 	 * written. just closing with the file offset moved foward may not put
871*57193Smuller 	 * a hole at the end of the file.
872*57193Smuller 	 */
873*57193Smuller 	if (!no_hole && isem && (arcn->sb.st_size > 0L))
874*57193Smuller 		file_flush(fd2, fnm, isem);
875*57193Smuller 	return;
876*57193Smuller }
877*57193Smuller 
878*57193Smuller /*
879*57193Smuller  * buf_fill()
880*57193Smuller  *	fill the read buffer with the next record (or what we can get) from
881*57193Smuller  *	the archive volume.
882*57193Smuller  * Return:
883*57193Smuller  *	Number of bytes of data in the read buffer, -1 for read error, and
884*57193Smuller  *	0 when finished (user specified termination in ar_next()).
885*57193Smuller  */
886*57193Smuller 
887*57193Smuller #if __STDC__
888*57193Smuller int
889*57193Smuller buf_fill(void)
890*57193Smuller #else
891*57193Smuller int
892*57193Smuller buf_fill()
893*57193Smuller #endif
894*57193Smuller {
895*57193Smuller 	register int cnt;
896*57193Smuller 	static int fini = 0;
897*57193Smuller 
898*57193Smuller 	if (fini)
899*57193Smuller 		return(0);
900*57193Smuller 
901*57193Smuller 	for(;;) {
902*57193Smuller 		/*
903*57193Smuller 		 * try to fill the buffer. on error the next archive volume is
904*57193Smuller 		 * opened and we try again.
905*57193Smuller 		 */
906*57193Smuller 		if ((cnt = ar_read(buf, blksz)) > 0) {
907*57193Smuller 			bufpt = buf;
908*57193Smuller 			bufend = buf + cnt;
909*57193Smuller 			rdcnt += cnt;
910*57193Smuller 			return(cnt);
911*57193Smuller 		}
912*57193Smuller 
913*57193Smuller 		/*
914*57193Smuller 		 * errors require resync, EOF goes to next archive
915*57193Smuller 		 */
916*57193Smuller 		if (cnt < 0)
917*57193Smuller 			break;
918*57193Smuller 		if (ar_next() < 0) {
919*57193Smuller 			fini = 1;
920*57193Smuller 			return(0);
921*57193Smuller 		}
922*57193Smuller 		rdcnt = 0;
923*57193Smuller 	}
924*57193Smuller 	exit_val = 1;
925*57193Smuller 	return(-1);
926*57193Smuller }
927*57193Smuller 
928*57193Smuller /*
929*57193Smuller  * buf_flush()
930*57193Smuller  *	force the write buffer to the archive. We are passed the number of
931*57193Smuller  *	bytes in the buffer at the point of the flush. When we change archives
932*57193Smuller  *	the record size might change. (either larger or smaller).
933*57193Smuller  * Return:
934*57193Smuller  *	0 if all is ok, -1 when a write error occurs.
935*57193Smuller  */
936*57193Smuller 
937*57193Smuller #if __STDC__
938*57193Smuller int
939*57193Smuller buf_flush(register int bufcnt)
940*57193Smuller #else
941*57193Smuller int
942*57193Smuller buf_flush(bufcnt)
943*57193Smuller 	register int bufcnt;
944*57193Smuller #endif
945*57193Smuller {
946*57193Smuller 	register int cnt;
947*57193Smuller 	register int push = 0;
948*57193Smuller 	register int totcnt = 0;
949*57193Smuller 
950*57193Smuller 	/*
951*57193Smuller 	 * if we have reached the user specified byte count for each archive
952*57193Smuller 	 * volume, prompt for the next volume. (The non-standrad -R flag).
953*57193Smuller 	 * NOTE: If the wrlimit is smaller than wrcnt, we will always write
954*57193Smuller 	 * at least one record. We always round limit UP to next blocksize.
955*57193Smuller 	 */
956*57193Smuller 	if ((wrlimit > 0) && (wrcnt > wrlimit)) {
957*57193Smuller 		warn(0, "User specified archive volume byte limit reached.");
958*57193Smuller 		if (ar_next() < 0) {
959*57193Smuller 			wrcnt = 0;
960*57193Smuller 			exit_val = 1;
961*57193Smuller 			return(-1);
962*57193Smuller 		}
963*57193Smuller 		wrcnt = 0;
964*57193Smuller 
965*57193Smuller 		/*
966*57193Smuller 		 * The new archive volume might have changed the size of the
967*57193Smuller 		 * write blocksize. if so we figure out if we need to write
968*57193Smuller 		 * (one or more times), or if there is now free space left in
969*57193Smuller 		 * the buffer (it is no longer full). bufcnt has the number of
970*57193Smuller 		 * bytes in the buffer, (the blocksize, at the point we were
971*57193Smuller 		 * CALLED). Push has the amount of "extra" data in the buffer
972*57193Smuller 		 * if the block size has shrunk from a volume change.
973*57193Smuller 		 */
974*57193Smuller 		bufend = buf + blksz;
975*57193Smuller 		if (blksz > bufcnt)
976*57193Smuller 			return(0);
977*57193Smuller 		if (blksz < bufcnt)
978*57193Smuller 			push = bufcnt - blksz;
979*57193Smuller 	}
980*57193Smuller 
981*57193Smuller 	/*
982*57193Smuller 	 * We have enough data to write at least one archive block
983*57193Smuller 	 */
984*57193Smuller 	for (;;) {
985*57193Smuller 		/*
986*57193Smuller 		 * write a block and check if it all went out ok
987*57193Smuller 		 */
988*57193Smuller 		cnt = ar_write(buf, blksz);
989*57193Smuller 		if (cnt == blksz) {
990*57193Smuller 			/*
991*57193Smuller 			 * the write went ok
992*57193Smuller 			 */
993*57193Smuller 			wrcnt += cnt;
994*57193Smuller 			totcnt += cnt;
995*57193Smuller 			if (push > 0) {
996*57193Smuller 				/* we have extra data to push to the front.
997*57193Smuller 				 * check for more than 1 block of push, and if
998*57193Smuller 				 * so we loop back to write again
999*57193Smuller 				 */
1000*57193Smuller 				bcopy(bufend, buf, push);
1001*57193Smuller 				bufpt = buf + push;
1002*57193Smuller 				if (push >= blksz) {
1003*57193Smuller 					push -= blksz;
1004*57193Smuller 					continue;
1005*57193Smuller 				}
1006*57193Smuller 			} else
1007*57193Smuller 				bufpt = buf;
1008*57193Smuller 			return(totcnt);
1009*57193Smuller 		} else if (cnt > 0) {
1010*57193Smuller 			/*
1011*57193Smuller 			 * Oh drat we got a partial write!
1012*57193Smuller 			 * if format doesnt care about alignment let it go,
1013*57193Smuller 			 * we warned the user in ar_write().... but this means
1014*57193Smuller 			 * the last record on this volume violates pax spec....
1015*57193Smuller 			 */
1016*57193Smuller 			totcnt += cnt;
1017*57193Smuller 			wrcnt += cnt;
1018*57193Smuller 			bufpt = buf + cnt;
1019*57193Smuller 			cnt = bufcnt - cnt;
1020*57193Smuller 			bcopy(bufpt, buf, cnt);
1021*57193Smuller 			bufpt = buf + cnt;
1022*57193Smuller 			if (!frmt->blkalgn || ((cnt % frmt->blkalgn) == 0))
1023*57193Smuller 				return(totcnt);
1024*57193Smuller 			break;
1025*57193Smuller 		}
1026*57193Smuller 
1027*57193Smuller 		/*
1028*57193Smuller 		 * All done, go to next archive
1029*57193Smuller 		 */
1030*57193Smuller 		wrcnt = 0;
1031*57193Smuller 		if (ar_next() < 0)
1032*57193Smuller 			break;
1033*57193Smuller 
1034*57193Smuller 		/*
1035*57193Smuller 		 * The new archive volume might also have changed the block
1036*57193Smuller 		 * size. if so, figure out if we have too much or too little
1037*57193Smuller 		 * data for using the new block size
1038*57193Smuller 		 */
1039*57193Smuller 		bufend = buf + blksz;
1040*57193Smuller 		if (blksz > bufcnt)
1041*57193Smuller 			return(0);
1042*57193Smuller 		if (blksz < bufcnt)
1043*57193Smuller 			push = bufcnt - blksz;
1044*57193Smuller 	}
1045*57193Smuller 
1046*57193Smuller 	/*
1047*57193Smuller 	 * write failed, stop pax. we must not create a bad archive!
1048*57193Smuller 	 */
1049*57193Smuller 	exit_val = 1;
1050*57193Smuller 	return(-1);
1051*57193Smuller }
1052