xref: /onnv-gate/usr/src/lib/libparted/common/libparted/fs/fat/clstdup.c (revision 9663:ace9a2ac3683)
1 /*
2     libparted
3     Copyright (C) 1998, 1999, 2000, 2001, 2007 Free Software Foundation, Inc.
4 
5     This program is free software; you can redistribute it and/or modify
6     it under the terms of the GNU General Public License as published by
7     the Free Software Foundation; either version 3 of the License, or
8     (at your option) any later version.
9 
10     This program is distributed in the hope that it will be useful,
11     but WITHOUT ANY WARRANTY; without even the implied warranty of
12     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13     GNU General Public License for more details.
14 
15     You should have received a copy of the GNU General Public License
16     along with this program.  If not, see <http://www.gnu.org/licenses/>.
17 */
18 
19 #include <config.h>
20 #include <string.h>
21 
22 #include "fat.h"
23 
24 #ifndef DISCOVER_ONLY
25 
26 static int
needs_duplicating(const FatOpContext * ctx,FatFragment frag)27 needs_duplicating (const FatOpContext* ctx, FatFragment frag)
28 {
29 	FatSpecific*	old_fs_info = FAT_SPECIFIC (ctx->old_fs);
30 	FatCluster	cluster = fat_frag_to_cluster (ctx->old_fs, frag);
31 	FatClusterFlag	flag;
32 
33 	PED_ASSERT (cluster >= 2 && cluster < old_fs_info->cluster_count + 2,
34 		    return 0);
35 
36 	flag = fat_get_fragment_flag (ctx->old_fs, frag);
37 	switch (flag) {
38 	case FAT_FLAG_FREE:
39 		return 0;
40 
41 	case FAT_FLAG_DIRECTORY:
42 		return 1;
43 
44 	case FAT_FLAG_FILE:
45 		return fat_op_context_map_static_fragment (ctx, frag) == -1;
46 
47 	case FAT_FLAG_BAD:
48 		return 0;
49 	}
50 
51 	return 0;
52 }
53 
54 static int
search_next_fragment(FatOpContext * ctx)55 search_next_fragment (FatOpContext* ctx)
56 {
57 	FatSpecific*	fs_info = FAT_SPECIFIC (ctx->old_fs);
58 
59 	for (; ctx->buffer_offset < fs_info->frag_count; ctx->buffer_offset++) {
60 		if (needs_duplicating (ctx, ctx->buffer_offset))
61 			return 1;
62 	}
63 	return 0;	/* all done! */
64 }
65 
66 static int
read_marked_fragments(FatOpContext * ctx,FatFragment length)67 read_marked_fragments (FatOpContext* ctx, FatFragment length)
68 {
69 	FatSpecific*		fs_info = FAT_SPECIFIC (ctx->old_fs);
70 	int			status;
71 	FatFragment		i;
72 
73 	ped_exception_fetch_all ();
74 	status = fat_read_fragments (ctx->old_fs, fs_info->buffer,
75 				     ctx->buffer_offset, length);
76 	ped_exception_leave_all ();
77 	if (status)
78 		return 1;
79 
80 	ped_exception_catch ();
81 
82 /* something bad happened, so read fragments one by one.  (The error may
83    have occurred on an unused fragment: who cares) */
84 	for (i = 0; i < length; i++) {
85 		if (ctx->buffer_map [i]) {
86 			if (!fat_read_fragment (ctx->old_fs,
87 			      fs_info->buffer + i * fs_info->frag_size,
88 			      ctx->buffer_offset + i))
89 				return 0;
90 		}
91 	}
92 
93 	return 1;
94 }
95 
96 static int
fetch_fragments(FatOpContext * ctx)97 fetch_fragments (FatOpContext* ctx)
98 {
99 	FatSpecific*	old_fs_info = FAT_SPECIFIC (ctx->old_fs);
100 	FatFragment	fetch_length = 0;
101 	FatFragment	frag;
102 
103 	for (frag = 0; frag < ctx->buffer_frags; frag++)
104 		ctx->buffer_map [frag] = -1;
105 
106 	for (frag = 0;
107 	     frag < ctx->buffer_frags
108 		&& ctx->buffer_offset + frag < old_fs_info->frag_count;
109 	     frag++) {
110 		if (needs_duplicating (ctx, ctx->buffer_offset + frag)) {
111 			ctx->buffer_map [frag] = 1;
112 			fetch_length = frag + 1;
113 		}
114 	}
115 
116 	if (!read_marked_fragments (ctx, fetch_length))
117 		return 0;
118 
119 	return 1;
120 }
121 
122 /*****************************************************************************
123  * here starts the write code.  All assumes that ctx->buffer_map [first] and
124  * ctx->buffer_map [last] are occupied by fragments that need to be duplicated.
125  *****************************************************************************/
126 
127 /* finds the first fragment that is not going to get overwritten (that needs to
128    get read in) */
129 static FatFragment
get_first_underlay(const FatOpContext * ctx,int first,int last)130 get_first_underlay (const FatOpContext* ctx, int first, int last)
131 {
132 	int		old;
133 	FatFragment	new;
134 
135 	PED_ASSERT (first <= last, return 0);
136 
137 	new = ctx->buffer_map [first];
138 	for (old = first + 1; old <= last; old++) {
139 		if (ctx->buffer_map [old] == -1)
140 			continue;
141 		new++;
142 		if (ctx->buffer_map [old] != new)
143 			return new;
144 	}
145 	return -1;
146 }
147 
148 /* finds the last fragment that is not going to get overwritten (that needs to
149    get read in) */
150 static FatFragment
get_last_underlay(const FatOpContext * ctx,int first,int last)151 get_last_underlay (const FatOpContext* ctx, int first, int last)
152 {
153 	int		old;
154 	FatFragment	new;
155 
156 	PED_ASSERT (first <= last, return 0);
157 
158 	new = ctx->buffer_map [last];
159 	for (old = last - 1; old >= first; old--) {
160 		if (ctx->buffer_map [old] == -1)
161 			continue;
162 		new--;
163 		if (ctx->buffer_map [old] != new)
164 			return new;
165 	}
166 	return -1;
167 }
168 
169 /* "underlay" refers to the "static" fragments, that remain unchanged.
170  * when writing large chunks at a time, we don't want to clobber these,
171  * so we read them in, and write them back again.  MUCH quicker that way.
172  */
173 static int
quick_group_write_read_underlay(FatOpContext * ctx,int first,int last)174 quick_group_write_read_underlay (FatOpContext* ctx, int first, int last)
175 {
176 	FatSpecific*	new_fs_info = FAT_SPECIFIC (ctx->new_fs);
177 	FatFragment	first_underlay;
178 	FatFragment	last_underlay;
179 	FatFragment	underlay_length;
180 
181 	PED_ASSERT (first <= last, return 0);
182 
183 	first_underlay = get_first_underlay (ctx, first, last);
184 	if (first_underlay == -1)
185 		return 1;
186 	last_underlay = get_last_underlay (ctx, first, last);
187 
188 	PED_ASSERT (first_underlay <= last_underlay, return 0);
189 
190 	underlay_length = last_underlay - first_underlay + 1;
191 	if (!fat_read_fragments (ctx->new_fs,
192 				new_fs_info->buffer
193 				   + (first_underlay - ctx->buffer_map [first])
194 					* new_fs_info->frag_size,
195 				first_underlay,
196 				underlay_length))
197 		return 0;
198 	return 1;
199 }
200 
201 /* quick_group_write() makes no attempt to recover from errors - just
202  * does things fast.  If there is an error, slow_group_write() is
203  * called.
204  *    Note: we do syncing writes, to make sure there isn't any
205  * error writing out.  It's rather difficult recovering from errors
206  * further on.
207  */
208 static int
quick_group_write(FatOpContext * ctx,int first,int last)209 quick_group_write (FatOpContext* ctx, int first, int last)
210 {
211 	FatSpecific*		old_fs_info = FAT_SPECIFIC (ctx->old_fs);
212 	FatSpecific*		new_fs_info = FAT_SPECIFIC (ctx->new_fs);
213 	int			active_length;
214 	int			i;
215 	int			offset;
216 
217 	PED_ASSERT (first <= last, return 0);
218 
219 	ped_exception_fetch_all ();
220 	if (!quick_group_write_read_underlay (ctx, first, last))
221 		goto error;
222 
223 	for (i = first; i <= last; i++) {
224 		if (ctx->buffer_map [i] == -1)
225 			continue;
226 
227 		offset = ctx->buffer_map [i] - ctx->buffer_map [first];
228 		memcpy (new_fs_info->buffer + offset * new_fs_info->frag_size,
229 			old_fs_info->buffer + i * new_fs_info->frag_size,
230 			new_fs_info->frag_size);
231 	}
232 
233 	active_length = ctx->buffer_map [last] - ctx->buffer_map [first] + 1;
234 	if (!fat_write_sync_fragments (ctx->new_fs, new_fs_info->buffer,
235 				       ctx->buffer_map [first], active_length))
236 		goto error;
237 
238 	ped_exception_leave_all ();
239 	return 1;
240 
241 error:
242 	ped_exception_catch ();
243 	ped_exception_leave_all ();
244 	return 0;
245 }
246 
247 /* Writes fragments out, one at a time, avoiding errors on redundant writes
248  * on damaged parts of the disk we already know about.  If there's an error
249  * on one of the required fragments, it gets marked as bad, and a replacement
250  * is found.
251  */
252 static int
slow_group_write(FatOpContext * ctx,int first,int last)253 slow_group_write (FatOpContext* ctx, int first, int last)
254 {
255 	FatSpecific*		old_fs_info = FAT_SPECIFIC (ctx->old_fs);
256 	FatSpecific*		new_fs_info = FAT_SPECIFIC (ctx->new_fs);
257 	int			i;
258 
259 	PED_ASSERT (first <= last, return 0);
260 
261 	for (i = first; i <= last; i++) {
262 		if (ctx->buffer_map [i] == -1)
263 			continue;
264 
265 		while (!fat_write_sync_fragment (ctx->new_fs,
266 			      old_fs_info->buffer + i * old_fs_info->frag_size,
267 			      ctx->buffer_map [i])) {
268 			fat_table_set_bad (new_fs_info->fat,
269 					   ctx->buffer_map [i]);
270 			ctx->buffer_map [i] = fat_table_alloc_cluster
271 						(new_fs_info->fat);
272 			if (ctx->buffer_map [i] == 0)
273 				return 0;
274 		}
275 	}
276 	return 1;
277 }
278 
279 static int
update_remap(FatOpContext * ctx,int first,int last)280 update_remap (FatOpContext* ctx, int first, int last)
281 {
282 	int		i;
283 
284 	PED_ASSERT (first <= last, return 0);
285 
286 	for (i = first; i <= last; i++) {
287 		if (ctx->buffer_map [i] == -1)
288 			continue;
289 		ctx->remap [ctx->buffer_offset + i] = ctx->buffer_map [i];
290 	}
291 
292 	return 1;
293 }
294 
295 static int
group_write(FatOpContext * ctx,int first,int last)296 group_write (FatOpContext* ctx, int first, int last)
297 {
298 	PED_ASSERT (first <= last, return 0);
299 
300 	if (!quick_group_write (ctx, first, last)) {
301 		if (!slow_group_write (ctx, first, last))
302 			return 0;
303 	}
304 	if (!update_remap (ctx, first, last))
305 		return 0;
306 	return 1;
307 }
308 
309 /* assumes fragment size and new_fs's cluster size are equal */
310 static int
write_fragments(FatOpContext * ctx)311 write_fragments (FatOpContext* ctx)
312 {
313 	FatSpecific*		old_fs_info = FAT_SPECIFIC (ctx->old_fs);
314 	FatSpecific*		new_fs_info = FAT_SPECIFIC (ctx->new_fs);
315 	int			group_start;
316 	int			group_end = -1;	/* shut gcc up! */
317 	FatFragment		mapped_length;
318 	FatFragment		i;
319 	FatCluster		new_cluster;
320 
321 	PED_ASSERT (ctx->buffer_offset < old_fs_info->frag_count, return 0);
322 
323 	group_start = -1;
324 	for (i = 0; i < ctx->buffer_frags; i++) {
325 		if (ctx->buffer_map [i] == -1)
326 			continue;
327 
328 		ctx->frags_duped++;
329 
330 		new_cluster = fat_table_alloc_cluster (new_fs_info->fat);
331 		if (!new_cluster)
332 			return 0;
333 		fat_table_set_eof (new_fs_info->fat, new_cluster);
334 		ctx->buffer_map [i] = fat_cluster_to_frag (ctx->new_fs,
335 							   new_cluster);
336 
337 		if (group_start == -1)
338 			group_start = group_end = i;
339 
340 		PED_ASSERT (ctx->buffer_map [i]
341 				>= ctx->buffer_map [group_start],
342 			    return 0);
343 
344 		mapped_length = ctx->buffer_map [i]
345 				- ctx->buffer_map [group_start] + 1;
346 		if (mapped_length <= ctx->buffer_frags) {
347 			group_end = i;
348 		} else {
349 			/* ran out of room in the buffer, so write this group,
350 			 * and start a new one...
351 			 */
352 			if (!group_write (ctx, group_start, group_end))
353 				return 0;
354 			group_start = group_end = i;
355 		}
356 	}
357 
358 	PED_ASSERT (group_start != -1, return 0);
359 
360 	if (!group_write (ctx, group_start, group_end))
361 		return 0;
362 	return 1;
363 }
364 
365 /*  default all fragments to unmoved
366  */
367 static void
init_remap(FatOpContext * ctx)368 init_remap (FatOpContext* ctx)
369 {
370 	FatSpecific*		old_fs_info = FAT_SPECIFIC (ctx->old_fs);
371 	FatFragment		i;
372 
373 	for (i = 0; i < old_fs_info->frag_count; i++)
374 		ctx->remap[i] = fat_op_context_map_static_fragment (ctx, i);
375 }
376 
377 static FatFragment
count_frags_to_dup(FatOpContext * ctx)378 count_frags_to_dup (FatOpContext* ctx)
379 {
380 	FatSpecific*	fs_info = FAT_SPECIFIC (ctx->old_fs);
381 	FatFragment	i;
382 	FatFragment	total;
383 
384 	total = 0;
385 
386 	for (i = 0; i < fs_info->frag_count; i++) {
387 		if (needs_duplicating (ctx, i))
388 			total++;
389 	}
390 
391 	return total;
392 }
393 
394 /*  duplicates unreachable file clusters, and all directory clusters
395  */
396 int
fat_duplicate_clusters(FatOpContext * ctx,PedTimer * timer)397 fat_duplicate_clusters (FatOpContext* ctx, PedTimer* timer)
398 {
399 	FatFragment	total_frags_to_dup;
400 
401 	init_remap (ctx);
402 	total_frags_to_dup = count_frags_to_dup (ctx);
403 
404 	ped_timer_reset (timer);
405 	ped_timer_set_state_name (timer, "moving data");
406 
407 	ctx->buffer_offset = 0;
408 	ctx->frags_duped = 0;
409 	while (search_next_fragment (ctx)) {
410 		ped_timer_update (
411 			timer, 1.0 * ctx->frags_duped / total_frags_to_dup);
412 
413 		if (!fetch_fragments (ctx))
414 			return 0;
415 		if (!write_fragments (ctx))
416 			return 0;
417 		ctx->buffer_offset += ctx->buffer_frags;
418 	}
419 
420 	ped_timer_update (timer, 1.0);
421 	return 1;
422 }
423 
424 #endif /* !DISCOVER_ONLY */
425