xref: /csrg-svn/sys/tahoe/stand/vdformat/maps.c (revision 30345)
1 #ifndef lint
2 static char sccsid[] = "@(#)maps.c	1.3 (Berkeley/CCI) 12/23/86";
3 #endif
4 
5 
6 #include	"vdfmt.h"
7 
8 
9 /*
10 **
11 */
12 
13 boolean align_buf(buf, sync)
14 unsigned long	*buf;
15 unsigned long	sync;
16 {
17 	register int	i, shift;
18 
19 	/* find shift amount */
20 	for(shift=0; shift<32; shift++) {
21 		if((*buf >> shift ) == sync) {
22 			for(i=(512/sizeof(long))-1; i >= 0; i--) {
23 				*(buf+i+1) |= *(buf+i) << (32 - shift);
24 				*(buf+i) = *(buf+i) >> shift;
25 			}
26 			return true;
27 		}
28 	}
29 	return false;
30 }
31 
32 
33 /*
34 **	Looks for two maps in a row that are the same.
35 */
36 
37 boolean
38 read_map(flags)
39 short	flags;
40 {
41 	register int	trk, i;
42 	dskadr		dskaddr;
43 
44 	dskaddr.cylinder = (CURRENT->vc_ncyl - 1) | flags;
45 	for(i=0; i<100; i++)
46 		scratch[i] = -1;
47 	for(trk=0; trk<CURRENT->vc_ntrak; trk++) {
48 		dskaddr.track = trk;
49 		dskaddr.sector = 0;
50 		if(access_dsk((char *)save,&dskaddr,RD,CURRENT->vc_nsec,1)& HRDERR)
51 			continue;
52 		if(blkcmp((char *)scratch, (char *)save, bytes_trk) == true) {
53 			blkcopy((char *)save, (char *)bad_map, bytes_trk);
54 			if(bad_map->bs_count <= MAX_FLAWS) {
55 				for(i=0; i<bad_map->bs_count; i++) {
56 					if(bad_map->list[i].bs_cyl >=
57 					    CURRENT->vc_ncyl)
58 						break;
59 					if(bad_map->list[i].bs_trk >=
60 					    CURRENT->vc_ntrak)
61 						break;
62 					if(bad_map->list[i].bs_offset >=
63 					    CURRENT->vc_traksize)
64 						break;
65 				}
66 				if(i == bad_map->bs_count) {
67 					load_free_table();
68 					return true;
69 				}
70 			}
71 			blkzero(bad_map, bytes_trk);
72 			bad_map->bs_id = 0;
73 			bad_map->bs_max = MAX_FLAWS;
74 		}
75 		blkcopy((char *)save, (char *)scratch, bytes_trk);
76 	}
77 	return false;
78 }
79 
80 
81 /*
82 **
83 */
84 
85 boolean read_bad_sector_map()
86 {
87 	dskadr		dskaddr;
88 
89 	dskaddr.cylinder = CURRENT->vc_ncyl - 1;
90 	dskaddr.track = 0;
91 	dskaddr.sector = 0;
92 	/* start with nothing in map */
93 	blkzero(bad_map, bytes_trk);
94 	bad_map->bs_id = 0;
95 	bad_map->bs_max = MAX_FLAWS;
96 	if (C_INFO.type == SMD_ECTLR) {
97 		access_dsk((char *)save, &dskaddr, RD_RAW, 1, 1);
98 		if (align_buf((unsigned long *)save, CDCSYNC) == true) {
99 			read_flaw_map();
100 			return (false);
101 		} else if (read_map(NRM) == true) {
102 			return (true);
103 		} else {
104 			get_smde_relocations();
105 			return false;
106 		}
107 	} else {
108 		if (read_map(WPT) == true)
109 			return (true);
110 		else {
111 			get_relocations_the_hard_way();
112 			return (false);
113 		}
114 	}
115 }
116 
117 
118 /*
119 **
120 */
121 
122 get_relocations_the_hard_way()
123 {
124 	register int	cyl, trk;
125 	register int	status;
126 	dskadr		dskaddr;
127 
128 	dskaddr.sector = 0;
129 	/* scan each sector to see if it is relocated and take note if it is */
130 	for(cyl=0; cyl<CURRENT->vc_ncyl-NUMSYS; cyl++) {
131 		dskaddr.cylinder = cyl;
132 		for(trk=0; trk<CURRENT->vc_ntrak; trk++) {
133 			dskaddr.track = trk;
134 			status=access_dsk((char *)scratch, &dskaddr,
135 			    RD, CURRENT->vc_nsec, 1);
136 			if(status & ALTACC)
137 				get_track_relocations(dskaddr);
138 		}
139 	}
140 	load_free_table();
141 }
142 
143 
144 /*
145 **
146 */
147 
148 get_track_relocations(dskaddr)
149 dskadr	dskaddr;
150 {
151 	register int	status;
152 	bs_entry	temp;
153 	fmt_err		error;
154 
155 	for(dskaddr.sector=0; dskaddr.sector<CURRENT->vc_nsec; dskaddr.sector++) {
156 		status = access_dsk((char *)scratch, &dskaddr, RD, 1, 1);
157 		if(status & ALTACC) {
158 			error.err_adr = dskaddr;
159 			error.err_stat = DATA_ERROR;
160 			temp = (*C_INFO.code_pos)(error);
161 			temp.bs_how = operator;
162 			add_flaw(&temp);
163 		}
164 	}
165 }
166 
167 
168 /*
169 **
170 */
171 
172 remove_user_relocations(entry)
173 bs_entry	entry;
174 {
175 	register int	i, j;
176 	fmt_err		temp;
177 	fmt_err		error;
178 	bs_entry	*ptr;
179 
180 	error = (*C_INFO.decode_pos)(entry);
181 	if(is_in_map(&error.err_adr) == true) {
182 		ptr = bad_map->list;
183 		for(i=0; i<bad_map->bs_count; i++) {
184 			temp = (*C_INFO.decode_pos)(*ptr);
185 			if((ptr->bs_how == operator) &&
186 			    (temp.err_adr.cylinder == error.err_adr.cylinder) &&
187 			    (temp.err_adr.track == error.err_adr.track) &&
188 			    (temp.err_adr.sector == error.err_adr.sector)) {
189 				if(temp.err_stat & HEADER_ERROR)
190 					remove_track(temp, ptr);
191 				else
192 					remove_sector(temp, ptr);
193 				for(j=i+1; j<bad_map->bs_count; j++)
194 					bad_map->list[j-1] = bad_map->list[j];
195 				bad_map->bs_count--;
196 				return;
197 			}
198 			ptr++;
199 		}
200 	}
201 	else {
202 		indent();
203 		print("Sector %d is not in bad sector map!\n",
204 		    to_sector(error.err_adr));
205 		exdent(1);
206 	}
207 }
208 
209 
210 /*
211 **
212 */
213 
214 remove_sector(error, entry)
215 fmt_err		error;
216 bs_entry	*entry;
217 {
218 	format_sectors(&error.err_adr, &error.err_adr, NRM, 1);
219 	format_sectors(&entry->bs_alt, &entry->bs_alt, NRM, 1);
220 }
221 
222 
223 /*
224 **
225 */
226 
227 remove_track(error, entry)
228 fmt_err		error;
229 bs_entry	*entry;
230 {
231 	format_sectors(&error.err_adr,&error.err_adr,NRM,(long)CURRENT->vc_nsec);
232 	format_sectors(&entry->bs_alt,&entry->bs_alt,NRM,(long)CURRENT->vc_nsec);
233 }
234 
235 
236 /*
237 **
238 */
239 
240 write_bad_sector_map()
241 {
242 	register int	trk, sec;
243 	dskadr		dskaddr;
244 
245 	dskaddr.cylinder = (CURRENT->vc_ncyl - NUMMAP);
246 	for(trk=0; trk<CURRENT->vc_ntrak; trk++) {
247 		for(sec = 0; sec < CURRENT->vc_nsec; sec++) {
248 			blkcopy((char *)bs_map_space + (sec * SECSIZ),
249 			    (char *)scratch, SECSIZ);
250 			dskaddr.track = trk;
251 			dskaddr.sector = sec;
252 			format_sectors(&dskaddr, &dskaddr, WPT, 1);
253 		}
254 	}
255 }
256 
257 
258 /*
259 **
260 */
261 
262 zero_bad_sector_map()
263 {
264 	bs_map		*bm = bad_map;
265 	register int	i;
266 	dskadr		zero;
267 
268 	zero.cylinder = 0;
269 	zero.track = 0;
270 	zero.sector = 0;
271 	for(i=0; i<bm->bs_count; i++)
272 		bm->list[i].bs_alt = zero;
273 	load_free_table();
274 }
275 
276 
277 /*
278 **
279 */
280 
281 read_flaw_map()
282 {
283 	register int	cyl, trk;
284 	dskadr		dskaddr;
285 	flaw		buffer;
286 
287 	dskaddr.sector = 0;
288 	for  (cyl=0; cyl<CURRENT->vc_ncyl; cyl++) {
289 		dskaddr.cylinder = cyl;
290 		for  (trk=0; trk<CURRENT->vc_ntrak; trk++) {
291 			dskaddr.track = trk;
292 			access_dsk(&buffer, &dskaddr, RD_RAW, 1, 1);
293 			if(align_buf(&buffer, CDCSYNC) == true) {
294 				add_flaw_entries(&buffer);
295 				continue;
296 			}
297 		}
298 	}
299 	load_free_table();
300 }
301 
302 
303 /*
304 **
305 */
306 
307 get_smde_relocations()
308 {
309 	register int	cyl, trk, sec;
310 	smde_hdr	buffer;
311 	dskadr		dskaddr;
312 	fmt_err		bad;
313 	bs_entry	temp;
314 	boolean		bad_track;
315 
316 	/* Read any old drive relocations */
317 	for(cyl=0; cyl<NUMREL; cyl++) {
318 		dskaddr.cylinder = CURRENT->vc_ncyl - NUMSYS + cyl;
319 		for(trk=0; trk<CURRENT->vc_ntrak; trk++) {
320 			dskaddr.track = trk;
321 			bad_track = true;
322 			for(sec=0; sec<CURRENT->vc_nsec; sec++) {
323 				dskaddr.sector = sec;
324 				access_dsk(&buffer, &dskaddr, RD_RAW, 1, 1);
325 				if(align_buf(&buffer, SMDE1SYNC) == false) {
326 					bad_track = false;
327 					break;
328 				}
329 			}
330 			if(bad_track == true) {
331 				dskaddr.sector = 0;
332 				bad.err_adr.cylinder = buffer.alt_cyl;
333 				bad.err_adr.track = buffer.alt_trk;
334 				bad.err_adr.sector = 0;
335 				bad.err_stat = HEADER_ERROR;
336 				temp = (*C_INFO.code_pos)(bad);
337 				temp.bs_alt = dskaddr;
338 				temp.bs_how = scanning;
339 				add_flaw(&temp);
340 				continue;
341 			}
342 			for(sec=0; sec<CURRENT->vc_nsec; sec++) {
343 				dskaddr.sector = sec;
344 				access_dsk(&buffer, &dskaddr, RD_RAW, 1, 1);
345 				if(align_buf(&buffer, SMDE1SYNC) == true) {
346 					bad.err_adr.cylinder = buffer.alt_cyl;
347 					bad.err_adr.track = buffer.alt_trk;
348 					bad.err_adr.sector = buffer.alt_sec;
349 					bad.err_stat = DATA_ERROR;
350 					temp = (*C_INFO.code_pos)(bad);
351 					temp.bs_alt = dskaddr;
352 					temp.bs_how = scanning;
353 					add_flaw(&temp);
354 				}
355 			}
356 		}
357 	}
358 	load_free_table();
359 }
360 
361 
362 /*
363 **
364 */
365 
366 add_flaw_entries(buffer)
367 flaw	*buffer;
368 {
369 	register int	i;
370 	bs_entry	temp;
371 
372 	temp.bs_cyl = buffer->flaw_cyl & 0x7fff; /* clear off bad track bit */
373 	temp.bs_trk = buffer->flaw_trk;
374 	for(i=0; i<4; i++) {
375 		if(buffer->flaw_pos[i].flaw_length != 0) {
376 			temp.bs_offset = buffer->flaw_pos[i].flaw_offset;
377 			temp.bs_length = buffer->flaw_pos[i].flaw_length;
378 			temp.bs_alt.cylinder = 0;
379 			temp.bs_alt.track = 0;
380 			temp.bs_alt.sector = 0;
381 			temp.bs_how = flaw_map;
382 			add_flaw(&temp);
383 		}
384 	}
385 }
386 
387 
388 cmp_entry(a, b)
389 bs_entry	*a;
390 bs_entry	*b;
391 {
392 	if(a->bs_cyl == b->bs_cyl) {
393 		if(a->bs_trk == b->bs_trk) {
394 			if(a->bs_offset == b->bs_offset)
395 				return 0;
396 			else if(a->bs_offset < b->bs_offset)
397 				return -1;
398 		 }
399 		else if(a->bs_trk < b->bs_trk)
400 			return -1;
401 	}
402 	else if(a->bs_cyl < b->bs_cyl)
403 		return -1;
404 	return 1;
405 }
406 
407 
408 add_flaw(entry)
409 bs_entry	*entry;
410 {
411 	extern	int	cmp_entry();
412 	bs_map		*bm = bad_map;
413 	register int	i;
414 
415 	if(bm->bs_count > MAX_FLAWS)
416 		return;
417 	if (entry->bs_cyl >= CURRENT->vc_ncyl ||
418 	    entry->bs_trk >= CURRENT->vc_ntrak ||
419 	    entry->bs_offset >= CURRENT->vc_traksize)
420 		return;
421 	for(i=0; i<bm->bs_count; i++) {
422 		if(((bm->list[i].bs_cyl == entry->bs_cyl)) &&
423 		    (bm->list[i].bs_trk == entry->bs_trk) &&
424 		    (bm->list[i].bs_offset == entry->bs_offset)) {
425 			if((int)bm->list[i].bs_how > (int)entry->bs_how)
426 				bm->list[i].bs_how = entry->bs_how;
427 			return;
428 		}
429 	}
430 	bm->list[i] = *entry;
431 	bm->list[i].bs_alt.cylinder = 0;
432 	bm->list[i].bs_alt.track = 0;
433 	bm->list[i].bs_alt.sector = 0;
434 	bm->bs_count++;
435 	qsort((char *)&(bm->list[0]), (unsigned)bm->bs_count,
436 	    sizeof(bs_entry), cmp_entry);
437 }
438 
439 
440 /*
441 **	Is_in_map checks to see if a block is known to be bad already.
442 */
443 
444 boolean is_in_map(dskaddr)
445 dskadr	*dskaddr;
446 {
447 	register int	i;
448 	fmt_err		temp;
449 
450 	for(i=0; i<bad_map->bs_count; i++) {
451 		temp = (*C_INFO.decode_pos)(bad_map->list[i]);
452 		if((temp.err_adr.cylinder == dskaddr->cylinder) &&
453 		    (temp.err_adr.track == dskaddr->track) &&
454 		    (temp.err_adr.sector == dskaddr->sector)) {
455 			return true;
456 		}
457 	}
458 	return false;
459 }
460 
461 
462 /*
463 **
464 */
465 
466 print_bad_sector_list()
467 {
468 	register int	i;
469 	fmt_err		errloc;
470 
471 	if(bad_map->bs_count == 0) {
472 		print("There are no bad sectors in bad sector map.\n");
473 		return;
474 	}
475 	print("The following sector%s known to be bad:\n",
476 	    (bad_map->bs_count == 1) ? " is" : "s are");
477 	indent();
478 	for(i=0; i<bad_map->bs_count; i++) {
479 		print("cyl %d, head %d, pos %d, len %d ",
480 			bad_map->list[i].bs_cyl,
481 			bad_map->list[i].bs_trk,
482 			bad_map->list[i].bs_offset,
483 			bad_map->list[i].bs_length);
484 		errloc = (*C_INFO.decode_pos)(bad_map->list[i]);
485 		if(errloc.err_stat & HEADER_ERROR) {
486 			printf("(Track #%d)", to_track(errloc.err_adr));
487 		}
488 		else {
489 			printf("(Sector #%d)", to_sector(errloc.err_adr));
490 		}
491 		if((bad_map->list[i].bs_alt.cylinder != 0) ||
492 		    (bad_map->list[i].bs_alt.track != 0) ||
493 		    (bad_map->list[i].bs_alt.sector != 0)) {
494 			indent();
495 			printf(" -> ");
496 			if(errloc.err_stat & HEADER_ERROR) {
497 				printf("Track %d",
498 		    		    to_track(bad_map->list[i].bs_alt));
499 			}
500 			else {
501 				printf("Sector %d",
502 		    		    to_sector(bad_map->list[i].bs_alt));
503 			}
504 			exdent(1);
505 		}
506 		printf(".\n");
507 	}
508 	exdent(1);
509 }
510 
511 
512 /*
513 **	Vdload_free_table checks each block in the bad block relocation area
514 ** to see if it is used. If it is, the free relocation block table is updated.
515 */
516 
517 load_free_table()
518 {
519 	register int	i, j;
520 	fmt_err		temp;
521 
522 	/* Clear free table before starting */
523 	for(i = 0; i < (CURRENT->vc_ntrak * NUMREL); i++) {
524 		for(j=0; j < CURRENT->vc_nsec; j++)
525 			free_tbl[i][j].free_status = NOTALLOCATED;
526 	}
527 	for(i=0; i<bad_map->bs_count; i++)
528 		if((bad_map->list[i].bs_alt.cylinder != 0) ||
529 		    (bad_map->list[i].bs_alt.track != 0) ||
530 		    (bad_map->list[i].bs_alt.sector != 0)) {
531 			temp = (*C_INFO.decode_pos)(bad_map->list[i]);
532 			allocate(&(bad_map->list[i].bs_alt), temp.err_stat);
533 		}
534 }
535 
536 
537 /*
538 **	allocate marks a replacement sector as used.
539 */
540 
541 allocate(dskaddr, status)
542 dskadr	*dskaddr;
543 long	status;
544 {
545 	register int	trk, sec;
546 
547 	trk = dskaddr->cylinder - (CURRENT->vc_ncyl - NUMSYS);
548 	if((trk < 0) || (trk >= NUMREL))
549 		return;
550 	trk *= CURRENT->vc_ntrak;
551 	trk += dskaddr->track;
552 	if(status & HEADER_ERROR)
553 		for(sec=0; sec<CURRENT->vc_nsec; sec++)
554 			free_tbl[trk][sec].free_status = ALLOCATED;
555 	else
556 		free_tbl[trk][dskaddr->sector].free_status = ALLOCATED;
557 }
558 
559 
560 /*
561 **
562 */
563 
564 boolean mapping_collision(entry)
565 bs_entry	*entry;
566 {
567 	register int	trk, sec;
568 	fmt_err		temp;
569 
570 	trk = entry->bs_cyl - (CURRENT->vc_ncyl - NUMSYS);
571 	if((trk < 0) || (trk >= NUMREL))
572 		return false;
573 	trk *= CURRENT->vc_ntrak;
574 	trk += entry->bs_trk;
575 	temp = (*C_INFO.decode_pos)(*entry);
576 	/* if this relocation should take up the whole track */
577 	if(temp.err_stat & HEADER_ERROR) {
578 		for(sec=0; sec<CURRENT->vc_nsec; sec++)
579 			if(free_tbl[trk][sec].free_status == ALLOCATED)
580 				return true;
581 	}
582 	/* else just check the current sector */
583 	else {
584 		if(free_tbl[trk][temp.err_adr.sector].free_status == ALLOCATED)
585 			return true;
586 	}
587 	return false;
588 }
589 
590 
591 /*
592 **
593 */
594 
595 report_collision()
596 {
597 	indent();
598 	print("Sector resides in relocation area");
599 	printf("but it has a sector mapped to it already.\n");
600 	print("Please reformat disk with 0 patterns to eliminate problem.\n");
601 	exdent(1);
602 }
603 
604 
605 /*
606 **
607 */
608 
609 add_user_relocations(entry)
610 bs_entry	*entry;
611 {
612 	fmt_err		error;
613 
614 	error = (*C_INFO.decode_pos)(*entry);
615 	if(is_in_map(&error.err_adr) == false) {
616 		if(mapping_collision(entry) == true)
617 			report_collision();
618 		entry->bs_how = operator;
619 		add_flaw(entry);
620 	}
621 	else {
622 		indent();
623 		print("Sector %d is already mapped out!\n",
624 		    to_sector(error.err_adr));
625 		exdent(1);
626 	}
627 }
628 
629 
630 /*
631 ** 	New_location allocates a replacement block given a bad block address.
632 **  The algorithm is fairly simple; it simply searches for the first
633 **  free sector that has the same sector number of the bad sector.  If no sector
634 **  is found then the drive should be considered bad because of a microcode bug
635 **  in the controller that forces us to use the same sector number as the bad
636 **  sector for relocation purposes.  Using different tracks and cylinders is ok
637 **  of course.
638 */
639 
640 dskadr *new_location(entry)
641 bs_entry	*entry;
642 {
643 	register int	i, sec;
644 	static fmt_err	temp;
645 	static dskadr	newaddr;
646 
647 	newaddr.cylinder = 0;
648 	newaddr.track = 0;
649 	newaddr.sector = 0;
650 	temp = (*C_INFO.decode_pos)(*entry);
651 	/* If it is ouside of the user's data area */
652 	if(entry->bs_cyl >= CURRENT->vc_ncyl-NUMSYS) {
653 		/* if it is in the relocation area */
654 		if(entry->bs_cyl < (CURRENT->vc_ncyl - NUMMAP - NUMMNT)) {
655 			/* mark space as allocated */
656 			allocate(&temp.err_adr, temp.err_stat);
657 			return &temp.err_adr;
658 		}
659 		/* if it is in the map area forget about it */
660 		if(entry->bs_cyl != (CURRENT->vc_ncyl - NUMMAP - NUMMNT))
661 			return &temp.err_adr;
662 		/* otherwise treat maintainence cylinder normally */
663 	}
664 	if(temp.err_stat & (HEADER_ERROR)) {
665 		for(i = 0; i < (CURRENT->vc_ntrak * NUMREL); i++) {
666 			for(sec=0; sec < CURRENT->vc_nsec; sec++) {
667 				if(free_tbl[i][sec].free_status == ALLOCATED)
668 					break;
669 			}
670 			if(sec == CURRENT->vc_nsec) {
671 				for(sec = 0; sec < CURRENT->vc_nsec; sec++)
672 					free_tbl[i][sec].free_status=ALLOCATED;
673 				newaddr.cylinder = i / CURRENT->vc_ntrak +
674 				    (CURRENT->vc_ncyl - NUMSYS);
675 				newaddr.track = i % CURRENT->vc_ntrak;
676 				break;
677 			}
678 		}
679 	}
680 	else if(C_INFO.type == SMDCTLR) {
681 		for(i = 0; i < (CURRENT->vc_ntrak * NUMREL); i++) {
682 			if(free_tbl[i][temp.err_adr.sector].free_status !=
683 			    ALLOCATED) {
684 				free_tbl[i][temp.err_adr.sector].free_status =
685 				    ALLOCATED;
686 				newaddr.cylinder = i / CURRENT->vc_ntrak +
687 				    (CURRENT->vc_ncyl - NUMSYS);
688 				newaddr.track = i % CURRENT->vc_ntrak;
689 				newaddr.sector = temp.err_adr.sector;
690 				break;
691 			}
692 		}
693 	}
694 	else {
695 		for(i = 0; i < (CURRENT->vc_ntrak * NUMREL); i++) {
696 			for(sec=0; sec < CURRENT->vc_nsec; sec++)
697 				if(free_tbl[i][sec].free_status != ALLOCATED)
698 					break;
699 			if(sec < CURRENT->vc_nsec) {
700 				free_tbl[i][sec].free_status = ALLOCATED;
701 				newaddr.cylinder = i / CURRENT->vc_ntrak +
702 				    (CURRENT->vc_ncyl - NUMSYS);
703 				newaddr.track = i % CURRENT->vc_ntrak;
704 				newaddr.sector = sec;
705 				break;
706 			}
707 		}
708 	}
709 	return &newaddr;
710 }
711 
712