xref: /csrg-svn/sys/tahoe/stand/vdformat/maps.c (revision 29981)
1 #ifndef lint
2 static char sccsid[] = "@(#)maps.c	1.2 (Berkeley/CCI) 11/04/86";
3 #endif
4 
5 
6 #include	"vdfmt.h"
7 
8 
9 /*
10 **
11 */
12 
13 boolean align_buf(buf, sync)
14 unsigned long	*buf;
15 unsigned long	sync;
16 {
17 	register int	i, shift;
18 
19 	/* find shift amount */
20 	for(shift=0; shift<32; shift++) {
21 		if((*buf >> shift ) == sync) {
22 			for(i=(512/sizeof(long))-1; i >= 0; i--) {
23 				*(buf+i+1) |= *(buf+i) << (32 - shift);
24 				*(buf+i) = *(buf+i) >> shift;
25 			}
26 			return true;
27 		}
28 	}
29 	return false;
30 }
31 
32 
33 /*
34 **	Looks for two maps in a row that are the same.
35 */
36 
37 boolean
38 read_map(flags)
39 short	flags;
40 {
41 	register int	trk, i;
42 	dskadr		dskaddr;
43 
44 	dskaddr.cylinder = (CURRENT->vc_ncyl - 1) | flags;
45 	for(i=0; i<100; i++)
46 		scratch[i] = -1;
47 	for(trk=0; trk<CURRENT->vc_ntrak; trk++) {
48 		dskaddr.track = trk;
49 		dskaddr.sector = 0;
50 		if(access_dsk((char *)save,&dskaddr,RD,CURRENT->vc_nsec,1)& HRDERR)
51 			continue;
52 		if(blkcmp((char *)scratch, (char *)save, bytes_trk) == true) {
53 			blkcopy((char *)save, (char *)bad_map, bytes_trk);
54 			if(bad_map->bs_count <= MAX_FLAWS) {
55 				for(i=0; i<bad_map->bs_count; i++) {
56 					if(bad_map->list[i].bs_cyl >=
57 					    CURRENT->vc_ncyl)
58 						break;
59 					if(bad_map->list[i].bs_trk >=
60 					    CURRENT->vc_ntrak)
61 						break;
62 					if(bad_map->list[i].bs_offset >=
63 					    CURRENT->vc_traksize)
64 						break;
65 				}
66 				if(i == bad_map->bs_count) {
67 					load_free_table();
68 					return true;
69 				}
70 else
71 printf("%d: junk, slot %d/%d\n", trk, i, bad_map->bs_count);
72 			}
73 			blkzero(bad_map, bytes_trk);
74 			bad_map->bs_id = 0;
75 			bad_map->bs_max = MAX_FLAWS;
76 		}
77 		blkcopy((char *)save, (char *)scratch, bytes_trk);
78 	}
79 	return false;
80 }
81 
82 
83 /*
84 **
85 */
86 
87 boolean read_bad_sector_map()
88 {
89 	dskadr		dskaddr;
90 
91 	dskaddr.cylinder = CURRENT->vc_ncyl - 1;
92 	dskaddr.track = 0;
93 	dskaddr.sector = 0;
94 	/* start with nothing in map */
95 	blkzero(bad_map, bytes_trk);
96 	bad_map->bs_id = 0;
97 	bad_map->bs_max = MAX_FLAWS;
98 	if (C_INFO.type == SMD_ECTLR) {
99 		access_dsk((char *)save, &dskaddr, RD_RAW, 1, 1);
100 		if (align_buf((unsigned long *)save, CDCSYNC) == true) {
101 printf("Reading manufacturer's flaw maps...");
102 			read_flaw_map();
103 printf("\n");
104 			return (false);
105 		} else if (read_map(NRM) == true) {
106 			return (true);
107 		} else {
108 printf("Scanning for old relocations...");
109 			get_smde_relocations();
110 printf("\n");
111 			return false;
112 		}
113 	} else {
114 		if (read_map(WPT) == true)
115 			return (true);
116 		else {
117 printf("Scanning for old relocations...");
118 			get_relocations_the_hard_way();
119 printf("\n");
120 			return (false);
121 		}
122 	}
123 }
124 
125 
126 /*
127 **
128 */
129 
130 get_relocations_the_hard_way()
131 {
132 	register int	cyl, trk;
133 	register int	status;
134 	dskadr		dskaddr;
135 
136 	dskaddr.sector = 0;
137 	/* scan each sector to see if it is relocated and take note if it is */
138 	for(cyl=0; cyl<CURRENT->vc_ncyl-NUMSYS; cyl++) {
139 		dskaddr.cylinder = cyl;
140 		for(trk=0; trk<CURRENT->vc_ntrak; trk++) {
141 			dskaddr.track = trk;
142 			status=access_dsk((char *)scratch, &dskaddr,
143 			    RD, CURRENT->vc_nsec, 1);
144 			if(status & ALTACC)
145 				get_track_relocations(dskaddr);
146 		}
147 	}
148 	load_free_table();
149 }
150 
151 
152 /*
153 **
154 */
155 
156 get_track_relocations(dskaddr)
157 dskadr	dskaddr;
158 {
159 	register int	status;
160 	bs_entry	temp;
161 	fmt_err		error;
162 
163 	for(dskaddr.sector=0; dskaddr.sector<CURRENT->vc_nsec; dskaddr.sector++) {
164 		status = access_dsk((char *)scratch, &dskaddr, RD, 1, 1);
165 		if(status & ALTACC) {
166 			error.err_adr = dskaddr;
167 			error.err_stat = DATA_ERROR;
168 			temp = (*C_INFO.code_pos)(error);
169 			temp.bs_how = operator;
170 			add_flaw(&temp);
171 		}
172 	}
173 }
174 
175 
176 /*
177 **
178 */
179 
180 remove_user_relocations(entry)
181 bs_entry	entry;
182 {
183 	register int	i, j;
184 	fmt_err		temp;
185 	fmt_err		error;
186 	bs_entry	*ptr;
187 
188 	error = (*C_INFO.decode_pos)(entry);
189 	if(is_in_map(&error.err_adr) == true) {
190 		ptr = bad_map->list;
191 		for(i=0; i<bad_map->bs_count; i++) {
192 			temp = (*C_INFO.decode_pos)(*ptr);
193 			if((ptr->bs_how == operator) &&
194 			    (temp.err_adr.cylinder == error.err_adr.cylinder) &&
195 			    (temp.err_adr.track == error.err_adr.track) &&
196 			    (temp.err_adr.sector == error.err_adr.sector)) {
197 				if(temp.err_stat & HEADER_ERROR)
198 					remove_track(temp, ptr);
199 				else
200 					remove_sector(temp, ptr);
201 				for(j=i+1; j<bad_map->bs_count; j++)
202 					bad_map->list[j-1] = bad_map->list[j];
203 				bad_map->bs_count--;
204 				return;
205 			}
206 			ptr++;
207 		}
208 	}
209 	else {
210 		indent();
211 		print("Sector %d is not in bad sector map!\n",
212 		    to_sector(error.err_adr));
213 		exdent(1);
214 	}
215 }
216 
217 
218 /*
219 **
220 */
221 
222 remove_sector(error, entry)
223 fmt_err		error;
224 bs_entry	*entry;
225 {
226 	format_sectors(&error.err_adr, &error.err_adr, NRM, 1);
227 	format_sectors(&entry->bs_alt, &entry->bs_alt, NRM, 1);
228 }
229 
230 
231 /*
232 **
233 */
234 
235 remove_track(error, entry)
236 fmt_err		error;
237 bs_entry	*entry;
238 {
239 	format_sectors(&error.err_adr,&error.err_adr,NRM,(long)CURRENT->vc_nsec);
240 	format_sectors(&entry->bs_alt,&entry->bs_alt,NRM,(long)CURRENT->vc_nsec);
241 }
242 
243 
244 /*
245 **
246 */
247 
248 write_bad_sector_map()
249 {
250 	register int	trk, sec;
251 	dskadr		dskaddr;
252 
253 	dskaddr.cylinder = (CURRENT->vc_ncyl - NUMMAP);
254 	for(trk=0; trk<CURRENT->vc_ntrak; trk++) {
255 		for(sec = 0; sec < CURRENT->vc_nsec; sec++) {
256 			blkcopy((char *)bs_map_space + (sec * SECSIZ),
257 			    (char *)scratch, SECSIZ);
258 			dskaddr.track = trk;
259 			dskaddr.sector = sec;
260 			format_sectors(&dskaddr, &dskaddr, WPT, 1);
261 		}
262 	}
263 }
264 
265 
266 /*
267 **
268 */
269 
270 zero_bad_sector_map()
271 {
272 	bs_map		*bm = bad_map;
273 	register int	i;
274 	dskadr		zero;
275 
276 	zero.cylinder = 0;
277 	zero.track = 0;
278 	zero.sector = 0;
279 	for(i=0; i<bm->bs_count; i++)
280 		bm->list[i].bs_alt = zero;
281 	load_free_table();
282 }
283 
284 
285 /*
286 **
287 */
288 
289 read_flaw_map()
290 {
291 	register int	cyl, trk;
292 	dskadr		dskaddr;
293 	flaw		buffer;
294 
295 	dskaddr.sector = 0;
296 	for  (cyl=0; cyl<CURRENT->vc_ncyl; cyl++) {
297 		dskaddr.cylinder = cyl;
298 		for  (trk=0; trk<CURRENT->vc_ntrak; trk++) {
299 			dskaddr.track = trk;
300 			access_dsk(&buffer, &dskaddr, RD_RAW, 1, 1);
301 			if(align_buf(&buffer, CDCSYNC) == true) {
302 				add_flaw_entries(&buffer);
303 				continue;
304 			}
305 		}
306 	}
307 	load_free_table();
308 }
309 
310 
311 /*
312 **
313 */
314 
315 get_smde_relocations()
316 {
317 	register int	cyl, trk, sec;
318 	smde_hdr	buffer;
319 	dskadr		dskaddr;
320 	fmt_err		bad;
321 	bs_entry	temp;
322 	boolean		bad_track;
323 
324 	/* Read any old drive relocations */
325 	for(cyl=0; cyl<NUMREL; cyl++) {
326 		dskaddr.cylinder = CURRENT->vc_ncyl - NUMSYS + cyl;
327 		for(trk=0; trk<CURRENT->vc_ntrak; trk++) {
328 			dskaddr.track = trk;
329 			bad_track = true;
330 			for(sec=0; sec<CURRENT->vc_nsec; sec++) {
331 				dskaddr.sector = sec;
332 				access_dsk(&buffer, &dskaddr, RD_RAW, 1, 1);
333 				if(align_buf(&buffer, SMDE1SYNC) == false) {
334 					bad_track = false;
335 					break;
336 				}
337 			}
338 			if(bad_track == true) {
339 				dskaddr.sector = 0;
340 				bad.err_adr.cylinder = buffer.alt_cyl;
341 				bad.err_adr.track = buffer.alt_trk;
342 				bad.err_adr.sector = 0;
343 				bad.err_stat = HEADER_ERROR;
344 				temp = (*C_INFO.code_pos)(bad);
345 				temp.bs_alt = dskaddr;
346 				temp.bs_how = scanning;
347 				add_flaw(&temp);
348 				continue;
349 			}
350 			for(sec=0; sec<CURRENT->vc_nsec; sec++) {
351 				dskaddr.sector = sec;
352 				access_dsk(&buffer, &dskaddr, RD_RAW, 1, 1);
353 				if(align_buf(&buffer, SMDE1SYNC) == true) {
354 					bad.err_adr.cylinder = buffer.alt_cyl;
355 					bad.err_adr.track = buffer.alt_trk;
356 					bad.err_adr.sector = buffer.alt_sec;
357 					bad.err_stat = DATA_ERROR;
358 					temp = (*C_INFO.code_pos)(bad);
359 					temp.bs_alt = dskaddr;
360 					temp.bs_how = scanning;
361 					add_flaw(&temp);
362 				}
363 			}
364 		}
365 	}
366 	load_free_table();
367 }
368 
369 
370 /*
371 **
372 */
373 
374 add_flaw_entries(buffer)
375 flaw	*buffer;
376 {
377 	register int	i;
378 	bs_entry	temp;
379 
380 	temp.bs_cyl = buffer->flaw_cyl & 0x7fff; /* clear off bad track bit */
381 	temp.bs_trk = buffer->flaw_trk;
382 	for(i=0; i<4; i++) {
383 		if(buffer->flaw_pos[i].flaw_length != 0) {
384 			temp.bs_offset = buffer->flaw_pos[i].flaw_offset;
385 			temp.bs_length = buffer->flaw_pos[i].flaw_length;
386 			temp.bs_alt.cylinder = 0;
387 			temp.bs_alt.track = 0;
388 			temp.bs_alt.sector = 0;
389 			temp.bs_how = flaw_map;
390 			add_flaw(&temp);
391 		}
392 	}
393 }
394 
395 
396 cmp_entry(a, b)
397 bs_entry	*a;
398 bs_entry	*b;
399 {
400 	if(a->bs_cyl == b->bs_cyl) {
401 		if(a->bs_trk == b->bs_trk) {
402 			if(a->bs_offset == b->bs_offset)
403 				return 0;
404 			else if(a->bs_offset < b->bs_offset)
405 				return -1;
406 		 }
407 		else if(a->bs_trk < b->bs_trk)
408 			return -1;
409 	}
410 	else if(a->bs_cyl < b->bs_cyl)
411 		return -1;
412 	return 1;
413 }
414 
415 
416 add_flaw(entry)
417 bs_entry	*entry;
418 {
419 	extern	int	cmp_entry();
420 	bs_map		*bm = bad_map;
421 	register int	i;
422 
423 	if(bm->bs_count > MAX_FLAWS)
424 		return;
425 	if (entry->bs_cyl >= CURRENT->vc_ncyl ||
426 	    entry->bs_trk >= CURRENT->vc_ntrak ||
427 	    entry->bs_offset >= CURRENT->vc_traksize)
428 		return;
429 	for(i=0; i<bm->bs_count; i++) {
430 		if(((bm->list[i].bs_cyl == entry->bs_cyl)) &&
431 		    (bm->list[i].bs_trk == entry->bs_trk) &&
432 		    (bm->list[i].bs_offset == entry->bs_offset)) {
433 			if((int)bm->list[i].bs_how > (int)entry->bs_how)
434 				bm->list[i].bs_how = entry->bs_how;
435 			return;
436 		}
437 	}
438 	bm->list[i] = *entry;
439 	bm->list[i].bs_alt.cylinder = 0;
440 	bm->list[i].bs_alt.track = 0;
441 	bm->list[i].bs_alt.sector = 0;
442 	bm->bs_count++;
443 	qsort((char *)&(bm->list[0]), (unsigned)bm->bs_count,
444 	    sizeof(bs_entry), cmp_entry);
445 }
446 
447 
448 /*
449 **	Is_in_map checks to see if a block is known to be bad already.
450 */
451 
452 boolean is_in_map(dskaddr)
453 dskadr	*dskaddr;
454 {
455 	register int	i;
456 	fmt_err		temp;
457 
458 	for(i=0; i<bad_map->bs_count; i++) {
459 		temp = (*C_INFO.decode_pos)(bad_map->list[i]);
460 		if((temp.err_adr.cylinder == dskaddr->cylinder) &&
461 		    (temp.err_adr.track == dskaddr->track) &&
462 		    (temp.err_adr.sector == dskaddr->sector)) {
463 			return true;
464 		}
465 	}
466 	return false;
467 }
468 
469 
470 /*
471 **
472 */
473 
474 print_bad_sector_list()
475 {
476 	register int	i;
477 	fmt_err		errloc;
478 
479 	if(bad_map->bs_count == 0) {
480 		print("There are no bad sectors in bad sector map.\n");
481 		return;
482 	}
483 	print("The following sector%s known to be bad:\n",
484 	    (bad_map->bs_count == 1) ? " is" : "s are");
485 	indent();
486 	for(i=0; i<bad_map->bs_count; i++) {
487 		print("cyl %d, head %d, pos %d, len %d ",
488 			bad_map->list[i].bs_cyl,
489 			bad_map->list[i].bs_trk,
490 			bad_map->list[i].bs_offset,
491 			bad_map->list[i].bs_length);
492 		errloc = (*C_INFO.decode_pos)(bad_map->list[i]);
493 		if(errloc.err_stat & HEADER_ERROR) {
494 			printf("(Track #%d)", to_track(errloc.err_adr));
495 		}
496 		else {
497 			printf("(Sector #%d)", to_sector(errloc.err_adr));
498 		}
499 		if((bad_map->list[i].bs_alt.cylinder != 0) ||
500 		    (bad_map->list[i].bs_alt.track != 0) ||
501 		    (bad_map->list[i].bs_alt.sector != 0)) {
502 			indent();
503 			printf(" -> ");
504 			if(errloc.err_stat & HEADER_ERROR) {
505 				printf("Track %d",
506 		    		    to_track(bad_map->list[i].bs_alt));
507 			}
508 			else {
509 				printf("Sector %d",
510 		    		    to_sector(bad_map->list[i].bs_alt));
511 			}
512 			exdent(1);
513 		}
514 		printf(".\n");
515 	}
516 	exdent(1);
517 }
518 
519 
520 /*
521 **	Vdload_free_table checks each block in the bad block relocation area
522 ** to see if it is used. If it is, the free relocation block table is updated.
523 */
524 
525 load_free_table()
526 {
527 	register int	i, j;
528 	fmt_err		temp;
529 
530 	/* Clear free table before starting */
531 	for(i = 0; i < (CURRENT->vc_ntrak * NUMREL); i++) {
532 		for(j=0; j < CURRENT->vc_nsec; j++)
533 			free_tbl[i][j].free_status = NOTALLOCATED;
534 	}
535 	for(i=0; i<bad_map->bs_count; i++)
536 		if((bad_map->list[i].bs_alt.cylinder != 0) ||
537 		    (bad_map->list[i].bs_alt.track != 0) ||
538 		    (bad_map->list[i].bs_alt.sector != 0)) {
539 			temp = (*C_INFO.decode_pos)(bad_map->list[i]);
540 			allocate(&(bad_map->list[i].bs_alt), temp.err_stat);
541 		}
542 }
543 
544 
545 /*
546 **	allocate marks a replacement sector as used.
547 */
548 
549 allocate(dskaddr, status)
550 dskadr	*dskaddr;
551 long	status;
552 {
553 	register int	trk, sec;
554 
555 	trk = dskaddr->cylinder - (CURRENT->vc_ncyl - NUMSYS);
556 	if((trk < 0) || (trk >= NUMREL))
557 		return;
558 	trk *= CURRENT->vc_ntrak;
559 	trk += dskaddr->track;
560 	if(status & HEADER_ERROR)
561 		for(sec=0; sec<CURRENT->vc_nsec; sec++)
562 			free_tbl[trk][sec].free_status = ALLOCATED;
563 	else
564 		free_tbl[trk][dskaddr->sector].free_status = ALLOCATED;
565 }
566 
567 
568 /*
569 **
570 */
571 
572 boolean mapping_collision(entry)
573 bs_entry	*entry;
574 {
575 	register int	trk, sec;
576 	fmt_err		temp;
577 
578 	trk = entry->bs_cyl - (CURRENT->vc_ncyl - NUMSYS);
579 	if((trk < 0) || (trk >= NUMREL))
580 		return false;
581 	trk *= CURRENT->vc_ntrak;
582 	trk += entry->bs_trk;
583 	temp = (*C_INFO.decode_pos)(*entry);
584 	/* if this relocation should take up the whole track */
585 	if(temp.err_stat & HEADER_ERROR) {
586 		for(sec=0; sec<CURRENT->vc_nsec; sec++)
587 			if(free_tbl[trk][sec].free_status == ALLOCATED)
588 				return true;
589 	}
590 	/* else just check the current sector */
591 	else {
592 		if(free_tbl[trk][temp.err_adr.sector].free_status == ALLOCATED)
593 			return true;
594 	}
595 	return false;
596 }
597 
598 
599 /*
600 **
601 */
602 
603 report_collision()
604 {
605 	indent();
606 	print("Sector resides in relocation area");
607 	printf("but it has a sector mapped to it already.\n");
608 	print("Please reformat disk with 0 patterns to eliminate problem.\n");
609 	exdent(1);
610 }
611 
612 
613 /*
614 **
615 */
616 
617 add_user_relocations(entry)
618 bs_entry	*entry;
619 {
620 	fmt_err		error;
621 
622 	error = (*C_INFO.decode_pos)(*entry);
623 	if(is_in_map(&error.err_adr) == false) {
624 		if(mapping_collision(entry) == true)
625 			report_collision();
626 		entry->bs_how = operator;
627 		add_flaw(entry);
628 	}
629 	else {
630 		indent();
631 		print("Sector %d is already mapped out!\n",
632 		    to_sector(error.err_adr));
633 		exdent(1);
634 	}
635 }
636 
637 
638 /*
639 ** 	New_location allocates a replacement block given a bad block address.
640 **  The algorithm is fairly simple; it simply searches for the first
641 **  free sector that has the same sector number of the bad sector.  If no sector
642 **  is found then the drive should be considered bad because of a microcode bug
643 **  in the controller that forces us to use the same sector number as the bad
644 **  sector for relocation purposes.  Using different tracks and cylinders is ok
645 **  of course.
646 */
647 
648 dskadr *new_location(entry)
649 bs_entry	*entry;
650 {
651 	register int	i, sec;
652 	static fmt_err	temp;
653 	static dskadr	newaddr;
654 
655 	newaddr.cylinder = 0;
656 	newaddr.track = 0;
657 	newaddr.sector = 0;
658 	temp = (*C_INFO.decode_pos)(*entry);
659 	/* If it is ouside of the user's data area */
660 	if(entry->bs_cyl >= CURRENT->vc_ncyl-NUMSYS) {
661 		/* if it is in the relocation area */
662 		if(entry->bs_cyl < (CURRENT->vc_ncyl - NUMMAP - NUMMNT)) {
663 			/* mark space as allocated */
664 			allocate(&temp.err_adr, temp.err_stat);
665 			return &temp.err_adr;
666 		}
667 		/* if it is in the map area forget about it */
668 		if(entry->bs_cyl != (CURRENT->vc_ncyl - NUMMAP - NUMMNT))
669 			return &temp.err_adr;
670 		/* otherwise treat maintainence cylinder normally */
671 	}
672 	if(temp.err_stat & (HEADER_ERROR)) {
673 		for(i = 0; i < (CURRENT->vc_ntrak * NUMREL); i++) {
674 			for(sec=0; sec < CURRENT->vc_nsec; sec++) {
675 				if(free_tbl[i][sec].free_status == ALLOCATED)
676 					break;
677 			}
678 			if(sec == CURRENT->vc_nsec) {
679 				for(sec = 0; sec < CURRENT->vc_nsec; sec++)
680 					free_tbl[i][sec].free_status=ALLOCATED;
681 				newaddr.cylinder = i / CURRENT->vc_ntrak +
682 				    (CURRENT->vc_ncyl - NUMSYS);
683 				newaddr.track = i % CURRENT->vc_ntrak;
684 				break;
685 			}
686 		}
687 	}
688 	else if(C_INFO.type == SMDCTLR) {
689 		for(i = 0; i < (CURRENT->vc_ntrak * NUMREL); i++) {
690 			if(free_tbl[i][temp.err_adr.sector].free_status !=
691 			    ALLOCATED) {
692 				free_tbl[i][temp.err_adr.sector].free_status =
693 				    ALLOCATED;
694 				newaddr.cylinder = i / CURRENT->vc_ntrak +
695 				    (CURRENT->vc_ncyl - NUMSYS);
696 				newaddr.track = i % CURRENT->vc_ntrak;
697 				newaddr.sector = temp.err_adr.sector;
698 				break;
699 			}
700 		}
701 	}
702 	else {
703 		for(i = 0; i < (CURRENT->vc_ntrak * NUMREL); i++) {
704 			for(sec=0; sec < CURRENT->vc_nsec; sec++)
705 				if(free_tbl[i][sec].free_status != ALLOCATED)
706 					break;
707 			if(sec < CURRENT->vc_nsec) {
708 				free_tbl[i][sec].free_status = ALLOCATED;
709 				newaddr.cylinder = i / CURRENT->vc_ntrak +
710 				    (CURRENT->vc_ncyl - NUMSYS);
711 				newaddr.track = i % CURRENT->vc_ntrak;
712 				newaddr.sector = sec;
713 				break;
714 			}
715 		}
716 	}
717 	return &newaddr;
718 }
719 
720