1*29533Ssam #ifndef lint 2*29533Ssam static char sccsid[] = "@(#)maps.c 1.1 (Berkeley/CCI) 07/05/86"; 3*29533Ssam #endif 4*29533Ssam 5*29533Ssam 6*29533Ssam #include "vdfmt.h" 7*29533Ssam 8*29533Ssam 9*29533Ssam /* 10*29533Ssam ** 11*29533Ssam */ 12*29533Ssam 13*29533Ssam boolean align_buf(buf, sync) 14*29533Ssam unsigned long *buf; 15*29533Ssam unsigned long sync; 16*29533Ssam { 17*29533Ssam register int i, shift; 18*29533Ssam 19*29533Ssam /* find shift amount */ 20*29533Ssam for(shift=0; shift<32; shift++) { 21*29533Ssam if((*buf >> shift ) == sync) { 22*29533Ssam for(i=(512/sizeof(long))-1; i >= 0; i--) { 23*29533Ssam *(buf+i+1) |= *(buf+i) << (32 - shift); 24*29533Ssam *(buf+i) = *(buf+i) >> shift; 25*29533Ssam } 26*29533Ssam return true; 27*29533Ssam } 28*29533Ssam } 29*29533Ssam return false; 30*29533Ssam } 31*29533Ssam 32*29533Ssam 33*29533Ssam /* 34*29533Ssam ** Looks for two maps in a row that are the same. 35*29533Ssam */ 36*29533Ssam 37*29533Ssam boolean read_map(flags) 38*29533Ssam short flags; 39*29533Ssam { 40*29533Ssam register int trk, i; 41*29533Ssam dskadr dskaddr; 42*29533Ssam 43*29533Ssam dskaddr.cylinder = (CURRENT->vc_ncyl - 1) | flags; 44*29533Ssam for(i=0; i<100; i++) 45*29533Ssam scratch[i] = -1; 46*29533Ssam for(trk=0; trk<CURRENT->vc_ntrak; trk++) { 47*29533Ssam dskaddr.track = trk; 48*29533Ssam dskaddr.sector = 0; 49*29533Ssam if(access_dsk((char *)save,&dskaddr,RD,CURRENT->vc_nsec,1)& HRDERR) 50*29533Ssam continue; 51*29533Ssam if(blkcmp((char *)scratch, (char *)save, bytes_trk) == true) { 52*29533Ssam blkcopy((char *)save, (char *)bad_map, bytes_trk); 53*29533Ssam if(bad_map->bs_count <= MAX_FLAWS) { 54*29533Ssam for(i=0; i<bad_map->bs_count; i++) { 55*29533Ssam if(bad_map->list[i].bs_cyl >= 56*29533Ssam CURRENT->vc_ncyl) 57*29533Ssam break; 58*29533Ssam if(bad_map->list[i].bs_trk >= 59*29533Ssam CURRENT->vc_ntrak) 60*29533Ssam break; 61*29533Ssam if(bad_map->list[i].bs_offset >= 62*29533Ssam CURRENT->vc_traksize) 63*29533Ssam break; 64*29533Ssam } 65*29533Ssam if(i == bad_map->bs_count) { 66*29533Ssam load_free_table(); 67*29533Ssam return true; 68*29533Ssam } 69*29533Ssam } 70*29533Ssam blkzero(bad_map, bytes_trk); 71*29533Ssam bad_map->bs_id = 0; 72*29533Ssam bad_map->bs_max = MAX_FLAWS; 73*29533Ssam } 74*29533Ssam blkcopy((char *)save, (char *)scratch, bytes_trk); 75*29533Ssam } 76*29533Ssam return false; 77*29533Ssam } 78*29533Ssam 79*29533Ssam 80*29533Ssam /* 81*29533Ssam ** 82*29533Ssam */ 83*29533Ssam 84*29533Ssam boolean read_bad_sector_map() 85*29533Ssam { 86*29533Ssam dskadr dskaddr; 87*29533Ssam 88*29533Ssam dskaddr.cylinder = CURRENT->vc_ncyl - 1; 89*29533Ssam dskaddr.track = 0; 90*29533Ssam dskaddr.sector = 0; 91*29533Ssam /* start with nothing in map */ 92*29533Ssam blkzero(bad_map, bytes_trk); 93*29533Ssam bad_map->bs_id = 0; 94*29533Ssam bad_map->bs_max = MAX_FLAWS; 95*29533Ssam if(C_INFO.type == SMD_ECTLR) { 96*29533Ssam access_dsk((char *)save, &dskaddr, RD_RAW, 1, 1); 97*29533Ssam if(align_buf((unsigned long *)save, CDCSYNC) == true) { 98*29533Ssam read_flaw_map(); 99*29533Ssam return false; 100*29533Ssam } 101*29533Ssam if(read_map(NRM) == false) { 102*29533Ssam get_smde_relocations(); 103*29533Ssam return false; 104*29533Ssam } 105*29533Ssam } 106*29533Ssam else if(read_map(WPT) == false) { 107*29533Ssam get_relocations_the_hard_way(); 108*29533Ssam return false; 109*29533Ssam } 110*29533Ssam return true; 111*29533Ssam } 112*29533Ssam 113*29533Ssam 114*29533Ssam /* 115*29533Ssam ** 116*29533Ssam */ 117*29533Ssam 118*29533Ssam get_relocations_the_hard_way() 119*29533Ssam { 120*29533Ssam register int cyl, trk; 121*29533Ssam register int status; 122*29533Ssam dskadr dskaddr; 123*29533Ssam 124*29533Ssam dskaddr.sector = 0; 125*29533Ssam /* scan each sector to see if it is relocated and take note if it is */ 126*29533Ssam for(cyl=0; cyl<CURRENT->vc_ncyl-NUMSYS; cyl++) { 127*29533Ssam dskaddr.cylinder = cyl; 128*29533Ssam for(trk=0; trk<CURRENT->vc_ntrak; trk++) { 129*29533Ssam dskaddr.track = trk; 130*29533Ssam status=access_dsk((char *)scratch, &dskaddr, 131*29533Ssam RD, CURRENT->vc_nsec, 1); 132*29533Ssam if(status & ALTACC) 133*29533Ssam get_track_relocations(dskaddr); 134*29533Ssam } 135*29533Ssam } 136*29533Ssam load_free_table(); 137*29533Ssam } 138*29533Ssam 139*29533Ssam 140*29533Ssam /* 141*29533Ssam ** 142*29533Ssam */ 143*29533Ssam 144*29533Ssam get_track_relocations(dskaddr) 145*29533Ssam dskadr dskaddr; 146*29533Ssam { 147*29533Ssam register int status; 148*29533Ssam bs_entry temp; 149*29533Ssam fmt_err error; 150*29533Ssam 151*29533Ssam for(dskaddr.sector=0; dskaddr.sector<CURRENT->vc_nsec; dskaddr.sector++) { 152*29533Ssam status = access_dsk((char *)scratch, &dskaddr, RD, 1, 1); 153*29533Ssam if(status & ALTACC) { 154*29533Ssam error.err_adr = dskaddr; 155*29533Ssam error.err_stat = DATA_ERROR; 156*29533Ssam temp = (*C_INFO.code_pos)(error); 157*29533Ssam temp.bs_how = operator; 158*29533Ssam add_flaw(&temp); 159*29533Ssam } 160*29533Ssam } 161*29533Ssam } 162*29533Ssam 163*29533Ssam 164*29533Ssam /* 165*29533Ssam ** 166*29533Ssam */ 167*29533Ssam 168*29533Ssam remove_user_relocations(entry) 169*29533Ssam bs_entry entry; 170*29533Ssam { 171*29533Ssam register int i, j; 172*29533Ssam fmt_err temp; 173*29533Ssam fmt_err error; 174*29533Ssam bs_entry *ptr; 175*29533Ssam 176*29533Ssam error = (*C_INFO.decode_pos)(entry); 177*29533Ssam if(is_in_map(&error.err_adr) == true) { 178*29533Ssam ptr = bad_map->list; 179*29533Ssam for(i=0; i<bad_map->bs_count; i++) { 180*29533Ssam temp = (*C_INFO.decode_pos)(*ptr); 181*29533Ssam if((ptr->bs_how == operator) && 182*29533Ssam (temp.err_adr.cylinder == error.err_adr.cylinder) && 183*29533Ssam (temp.err_adr.track == error.err_adr.track) && 184*29533Ssam (temp.err_adr.sector == error.err_adr.sector)) { 185*29533Ssam if(temp.err_stat & HEADER_ERROR) 186*29533Ssam remove_track(temp, ptr); 187*29533Ssam else 188*29533Ssam remove_sector(temp, ptr); 189*29533Ssam for(j=i+1; j<bad_map->bs_count; j++) 190*29533Ssam bad_map->list[j-1] = bad_map->list[j]; 191*29533Ssam bad_map->bs_count--; 192*29533Ssam return; 193*29533Ssam } 194*29533Ssam ptr++; 195*29533Ssam } 196*29533Ssam } 197*29533Ssam else { 198*29533Ssam indent(); 199*29533Ssam print("Sector %d is not in bad sector map!\n", 200*29533Ssam to_sector(error.err_adr)); 201*29533Ssam exdent(1); 202*29533Ssam } 203*29533Ssam } 204*29533Ssam 205*29533Ssam 206*29533Ssam /* 207*29533Ssam ** 208*29533Ssam */ 209*29533Ssam 210*29533Ssam remove_sector(error, entry) 211*29533Ssam fmt_err error; 212*29533Ssam bs_entry *entry; 213*29533Ssam { 214*29533Ssam format_sectors(&error.err_adr, &error.err_adr, NRM, 1); 215*29533Ssam format_sectors(&entry->bs_alt, &entry->bs_alt, NRM, 1); 216*29533Ssam } 217*29533Ssam 218*29533Ssam 219*29533Ssam /* 220*29533Ssam ** 221*29533Ssam */ 222*29533Ssam 223*29533Ssam remove_track(error, entry) 224*29533Ssam fmt_err error; 225*29533Ssam bs_entry *entry; 226*29533Ssam { 227*29533Ssam format_sectors(&error.err_adr,&error.err_adr,NRM,(long)CURRENT->vc_nsec); 228*29533Ssam format_sectors(&entry->bs_alt,&entry->bs_alt,NRM,(long)CURRENT->vc_nsec); 229*29533Ssam } 230*29533Ssam 231*29533Ssam 232*29533Ssam /* 233*29533Ssam ** 234*29533Ssam */ 235*29533Ssam 236*29533Ssam write_bad_sector_map() 237*29533Ssam { 238*29533Ssam register int trk, sec; 239*29533Ssam dskadr dskaddr; 240*29533Ssam 241*29533Ssam dskaddr.cylinder = (CURRENT->vc_ncyl - NUMMAP); 242*29533Ssam for(trk=0; trk<CURRENT->vc_ntrak; trk++) { 243*29533Ssam for(sec = 0; sec < CURRENT->vc_nsec; sec++) { 244*29533Ssam blkcopy((char *)&bs_map_space[sec * SECSIZ], 245*29533Ssam (char *)scratch, SECSIZ); 246*29533Ssam dskaddr.track = trk; 247*29533Ssam dskaddr.sector = sec; 248*29533Ssam format_sectors(&dskaddr, &dskaddr, WPT, 1); 249*29533Ssam } 250*29533Ssam } 251*29533Ssam } 252*29533Ssam 253*29533Ssam 254*29533Ssam /* 255*29533Ssam ** 256*29533Ssam */ 257*29533Ssam 258*29533Ssam zero_bad_sector_map() 259*29533Ssam { 260*29533Ssam bs_map *bm = bad_map; 261*29533Ssam register int i; 262*29533Ssam dskadr zero; 263*29533Ssam 264*29533Ssam zero.cylinder = 0; 265*29533Ssam zero.track = 0; 266*29533Ssam zero.sector = 0; 267*29533Ssam for(i=0; i<bm->bs_count; i++) 268*29533Ssam bm->list[i].bs_alt = zero; 269*29533Ssam load_free_table(); 270*29533Ssam } 271*29533Ssam 272*29533Ssam 273*29533Ssam /* 274*29533Ssam ** 275*29533Ssam */ 276*29533Ssam 277*29533Ssam read_flaw_map() 278*29533Ssam { 279*29533Ssam register int cyl, trk; 280*29533Ssam dskadr dskaddr; 281*29533Ssam flaw buffer; 282*29533Ssam 283*29533Ssam dskaddr.sector = 0; 284*29533Ssam for (cyl=0; cyl<CURRENT->vc_ncyl; cyl++) { 285*29533Ssam dskaddr.cylinder = cyl; 286*29533Ssam for (trk=0; trk<CURRENT->vc_ntrak; trk++) { 287*29533Ssam dskaddr.track = trk; 288*29533Ssam access_dsk(&buffer, &dskaddr, RD_RAW, 1, 1); 289*29533Ssam if(align_buf(&buffer, CDCSYNC) == true) { 290*29533Ssam add_flaw_entries(&buffer); 291*29533Ssam continue; 292*29533Ssam } 293*29533Ssam } 294*29533Ssam } 295*29533Ssam load_free_table(); 296*29533Ssam } 297*29533Ssam 298*29533Ssam 299*29533Ssam /* 300*29533Ssam ** 301*29533Ssam */ 302*29533Ssam 303*29533Ssam get_smde_relocations() 304*29533Ssam { 305*29533Ssam register int cyl, trk, sec; 306*29533Ssam smde_hdr buffer; 307*29533Ssam dskadr dskaddr; 308*29533Ssam fmt_err bad; 309*29533Ssam bs_entry temp; 310*29533Ssam boolean bad_track; 311*29533Ssam 312*29533Ssam /* Read any old drive relocations */ 313*29533Ssam for(cyl=0; cyl<NUMREL; cyl++) { 314*29533Ssam dskaddr.cylinder = CURRENT->vc_ncyl - NUMSYS + cyl; 315*29533Ssam for(trk=0; trk<CURRENT->vc_ntrak; trk++) { 316*29533Ssam dskaddr.track = trk; 317*29533Ssam bad_track = true; 318*29533Ssam for(sec=0; sec<CURRENT->vc_nsec; sec++) { 319*29533Ssam dskaddr.sector = sec; 320*29533Ssam access_dsk(&buffer, &dskaddr, RD_RAW, 1, 1); 321*29533Ssam if(align_buf(&buffer, SMDE1SYNC) == false) { 322*29533Ssam bad_track = false; 323*29533Ssam break; 324*29533Ssam } 325*29533Ssam } 326*29533Ssam if(bad_track == true) { 327*29533Ssam dskaddr.sector = 0; 328*29533Ssam bad.err_adr.cylinder = buffer.alt_cyl; 329*29533Ssam bad.err_adr.track = buffer.alt_trk; 330*29533Ssam bad.err_adr.sector = 0; 331*29533Ssam bad.err_stat = HEADER_ERROR; 332*29533Ssam temp = (*C_INFO.code_pos)(bad); 333*29533Ssam temp.bs_alt = dskaddr; 334*29533Ssam temp.bs_how = scanning; 335*29533Ssam add_flaw(&temp); 336*29533Ssam continue; 337*29533Ssam } 338*29533Ssam for(sec=0; sec<CURRENT->vc_nsec; sec++) { 339*29533Ssam dskaddr.sector = sec; 340*29533Ssam access_dsk(&buffer, &dskaddr, RD_RAW, 1, 1); 341*29533Ssam if(align_buf(&buffer, SMDE1SYNC) == true) { 342*29533Ssam bad.err_adr.cylinder = buffer.alt_cyl; 343*29533Ssam bad.err_adr.track = buffer.alt_trk; 344*29533Ssam bad.err_adr.sector = buffer.alt_sec; 345*29533Ssam bad.err_stat = DATA_ERROR; 346*29533Ssam temp = (*C_INFO.code_pos)(bad); 347*29533Ssam temp.bs_alt = dskaddr; 348*29533Ssam temp.bs_how = scanning; 349*29533Ssam add_flaw(&temp); 350*29533Ssam } 351*29533Ssam } 352*29533Ssam } 353*29533Ssam } 354*29533Ssam load_free_table(); 355*29533Ssam } 356*29533Ssam 357*29533Ssam 358*29533Ssam /* 359*29533Ssam ** 360*29533Ssam */ 361*29533Ssam 362*29533Ssam add_flaw_entries(buffer) 363*29533Ssam flaw *buffer; 364*29533Ssam { 365*29533Ssam register int i; 366*29533Ssam bs_entry temp; 367*29533Ssam 368*29533Ssam temp.bs_cyl = buffer->flaw_cyl & 0x7fff; /* clear off bad track bit */ 369*29533Ssam temp.bs_trk = buffer->flaw_trk; 370*29533Ssam for(i=0; i<4; i++) { 371*29533Ssam if(buffer->flaw_pos[i].flaw_length != 0) { 372*29533Ssam temp.bs_offset = buffer->flaw_pos[i].flaw_offset; 373*29533Ssam temp.bs_length = buffer->flaw_pos[i].flaw_length; 374*29533Ssam temp.bs_alt.cylinder = 0; 375*29533Ssam temp.bs_alt.track = 0; 376*29533Ssam temp.bs_alt.sector = 0; 377*29533Ssam temp.bs_how = flaw_map; 378*29533Ssam add_flaw(&temp); 379*29533Ssam } 380*29533Ssam } 381*29533Ssam } 382*29533Ssam 383*29533Ssam 384*29533Ssam cmp_entry(a, b) 385*29533Ssam bs_entry *a; 386*29533Ssam bs_entry *b; 387*29533Ssam { 388*29533Ssam if(a->bs_cyl == b->bs_cyl) { 389*29533Ssam if(a->bs_trk == b->bs_trk) { 390*29533Ssam if(a->bs_offset == b->bs_offset) 391*29533Ssam return 0; 392*29533Ssam else if(a->bs_offset < b->bs_offset) 393*29533Ssam return -1; 394*29533Ssam } 395*29533Ssam else if(a->bs_trk < b->bs_trk) 396*29533Ssam return -1; 397*29533Ssam } 398*29533Ssam else if(a->bs_cyl < b->bs_cyl) 399*29533Ssam return -1; 400*29533Ssam return 1; 401*29533Ssam } 402*29533Ssam 403*29533Ssam 404*29533Ssam add_flaw(entry) 405*29533Ssam bs_entry *entry; 406*29533Ssam { 407*29533Ssam extern int cmp_entry(); 408*29533Ssam bs_map *bm = bad_map; 409*29533Ssam register int i; 410*29533Ssam 411*29533Ssam if(bm->bs_count > MAX_FLAWS) 412*29533Ssam return; 413*29533Ssam for(i=0; i<bm->bs_count; i++) { 414*29533Ssam if(((bm->list[i].bs_cyl == entry->bs_cyl)) && 415*29533Ssam (bm->list[i].bs_trk == entry->bs_trk) && 416*29533Ssam (bm->list[i].bs_offset == entry->bs_offset)) { 417*29533Ssam if((int)bm->list[i].bs_how > (int)entry->bs_how) 418*29533Ssam bm->list[i].bs_how = entry->bs_how; 419*29533Ssam return; 420*29533Ssam } 421*29533Ssam } 422*29533Ssam bm->list[i] = *entry; 423*29533Ssam bm->list[i].bs_alt.cylinder = 0; 424*29533Ssam bm->list[i].bs_alt.track = 0; 425*29533Ssam bm->list[i].bs_alt.sector = 0; 426*29533Ssam bm->bs_count++; 427*29533Ssam qsort((char *)&(bm->list[0]), (unsigned)bm->bs_count, 428*29533Ssam sizeof(bs_entry), cmp_entry); 429*29533Ssam } 430*29533Ssam 431*29533Ssam 432*29533Ssam /* 433*29533Ssam ** Is_in_map checks to see if a block is known to be bad already. 434*29533Ssam */ 435*29533Ssam 436*29533Ssam boolean is_in_map(dskaddr) 437*29533Ssam dskadr *dskaddr; 438*29533Ssam { 439*29533Ssam register int i; 440*29533Ssam fmt_err temp; 441*29533Ssam 442*29533Ssam for(i=0; i<bad_map->bs_count; i++) { 443*29533Ssam temp = (*C_INFO.decode_pos)(bad_map->list[i]); 444*29533Ssam if((temp.err_adr.cylinder == dskaddr->cylinder) && 445*29533Ssam (temp.err_adr.track == dskaddr->track) && 446*29533Ssam (temp.err_adr.sector == dskaddr->sector)) { 447*29533Ssam return true; 448*29533Ssam } 449*29533Ssam } 450*29533Ssam return false; 451*29533Ssam } 452*29533Ssam 453*29533Ssam 454*29533Ssam /* 455*29533Ssam ** 456*29533Ssam */ 457*29533Ssam 458*29533Ssam print_bad_sector_list() 459*29533Ssam { 460*29533Ssam register int i; 461*29533Ssam fmt_err errloc; 462*29533Ssam 463*29533Ssam if(bad_map->bs_count == 0) { 464*29533Ssam print("There are no bad sectors in bad sector map.\n"); 465*29533Ssam return; 466*29533Ssam } 467*29533Ssam print("The following sector%s known to be bad:\n", 468*29533Ssam (bad_map->bs_count == 1) ? " is" : "s are"); 469*29533Ssam indent(); 470*29533Ssam for(i=0; i<bad_map->bs_count; i++) { 471*29533Ssam print("cyl %d, head %d, pos %d, len %d ", 472*29533Ssam bad_map->list[i].bs_cyl, 473*29533Ssam bad_map->list[i].bs_trk, 474*29533Ssam bad_map->list[i].bs_offset, 475*29533Ssam bad_map->list[i].bs_length); 476*29533Ssam errloc = (*C_INFO.decode_pos)(bad_map->list[i]); 477*29533Ssam if(errloc.err_stat & HEADER_ERROR) { 478*29533Ssam printf("(Track #%d)", to_track(errloc.err_adr)); 479*29533Ssam } 480*29533Ssam else { 481*29533Ssam printf("(Sector #%d)", to_sector(errloc.err_adr)); 482*29533Ssam } 483*29533Ssam if((bad_map->list[i].bs_alt.cylinder != 0) || 484*29533Ssam (bad_map->list[i].bs_alt.track != 0) || 485*29533Ssam (bad_map->list[i].bs_alt.sector != 0)) { 486*29533Ssam indent(); 487*29533Ssam printf(" -> "); 488*29533Ssam if(errloc.err_stat & HEADER_ERROR) { 489*29533Ssam printf("Track %d", 490*29533Ssam to_track(bad_map->list[i].bs_alt)); 491*29533Ssam } 492*29533Ssam else { 493*29533Ssam printf("Sector %d", 494*29533Ssam to_sector(bad_map->list[i].bs_alt)); 495*29533Ssam } 496*29533Ssam exdent(1); 497*29533Ssam } 498*29533Ssam printf(".\n"); 499*29533Ssam } 500*29533Ssam exdent(1); 501*29533Ssam } 502*29533Ssam 503*29533Ssam 504*29533Ssam /* 505*29533Ssam ** Vdload_free_table checks each block in the bad block relocation area 506*29533Ssam ** to see if it is used. If it is, the free relocation block table is updated. 507*29533Ssam */ 508*29533Ssam 509*29533Ssam load_free_table() 510*29533Ssam { 511*29533Ssam register int i, j; 512*29533Ssam fmt_err temp; 513*29533Ssam 514*29533Ssam /* Clear free table before starting */ 515*29533Ssam for(i = 0; i < (CURRENT->vc_ntrak * NUMREL); i++) { 516*29533Ssam for(j=0; j < CURRENT->vc_nsec; j++) 517*29533Ssam free_tbl[i][j].free_status = NOTALLOCATED; 518*29533Ssam } 519*29533Ssam for(i=0; i<bad_map->bs_count; i++) 520*29533Ssam if((bad_map->list[i].bs_alt.cylinder != 0) || 521*29533Ssam (bad_map->list[i].bs_alt.track != 0) || 522*29533Ssam (bad_map->list[i].bs_alt.sector != 0)) { 523*29533Ssam temp = (*C_INFO.decode_pos)(bad_map->list[i]); 524*29533Ssam allocate(&(bad_map->list[i].bs_alt), temp.err_stat); 525*29533Ssam } 526*29533Ssam } 527*29533Ssam 528*29533Ssam 529*29533Ssam /* 530*29533Ssam ** allocate marks a replacement sector as used. 531*29533Ssam */ 532*29533Ssam 533*29533Ssam allocate(dskaddr, status) 534*29533Ssam dskadr *dskaddr; 535*29533Ssam long status; 536*29533Ssam { 537*29533Ssam register int trk, sec; 538*29533Ssam 539*29533Ssam trk = dskaddr->cylinder - (CURRENT->vc_ncyl - NUMSYS); 540*29533Ssam if((trk < 0) || (trk >= NUMREL)) 541*29533Ssam return; 542*29533Ssam trk *= CURRENT->vc_ntrak; 543*29533Ssam trk += dskaddr->track; 544*29533Ssam if(status & HEADER_ERROR) 545*29533Ssam for(sec=0; sec<CURRENT->vc_nsec; sec++) 546*29533Ssam free_tbl[trk][sec].free_status = ALLOCATED; 547*29533Ssam else 548*29533Ssam free_tbl[trk][dskaddr->sector].free_status = ALLOCATED; 549*29533Ssam } 550*29533Ssam 551*29533Ssam 552*29533Ssam /* 553*29533Ssam ** 554*29533Ssam */ 555*29533Ssam 556*29533Ssam boolean mapping_collision(entry) 557*29533Ssam bs_entry *entry; 558*29533Ssam { 559*29533Ssam register int trk, sec; 560*29533Ssam fmt_err temp; 561*29533Ssam 562*29533Ssam trk = entry->bs_cyl - (CURRENT->vc_ncyl - NUMSYS); 563*29533Ssam if((trk < 0) || (trk >= NUMREL)) 564*29533Ssam return false; 565*29533Ssam trk *= CURRENT->vc_ntrak; 566*29533Ssam trk += entry->bs_trk; 567*29533Ssam temp = (*C_INFO.decode_pos)(*entry); 568*29533Ssam /* if this relocation should take up the whole track */ 569*29533Ssam if(temp.err_stat & HEADER_ERROR) { 570*29533Ssam for(sec=0; sec<CURRENT->vc_nsec; sec++) 571*29533Ssam if(free_tbl[trk][sec].free_status == ALLOCATED) 572*29533Ssam return true; 573*29533Ssam } 574*29533Ssam /* else just check the current sector */ 575*29533Ssam else { 576*29533Ssam if(free_tbl[trk][temp.err_adr.sector].free_status == ALLOCATED) 577*29533Ssam return true; 578*29533Ssam } 579*29533Ssam return false; 580*29533Ssam } 581*29533Ssam 582*29533Ssam 583*29533Ssam /* 584*29533Ssam ** 585*29533Ssam */ 586*29533Ssam 587*29533Ssam report_collision() 588*29533Ssam { 589*29533Ssam indent(); 590*29533Ssam print("Sector resides in relocation area"); 591*29533Ssam printf("but it has a sector mapped to it already.\n"); 592*29533Ssam print("Please reformat disk with 0 patterns to eliminate problem.\n"); 593*29533Ssam exdent(1); 594*29533Ssam } 595*29533Ssam 596*29533Ssam 597*29533Ssam /* 598*29533Ssam ** 599*29533Ssam */ 600*29533Ssam 601*29533Ssam add_user_relocations(entry) 602*29533Ssam bs_entry *entry; 603*29533Ssam { 604*29533Ssam fmt_err error; 605*29533Ssam 606*29533Ssam error = (*C_INFO.decode_pos)(*entry); 607*29533Ssam if(is_in_map(&error.err_adr) == false) { 608*29533Ssam if(mapping_collision(entry) == true) 609*29533Ssam report_collision(); 610*29533Ssam entry->bs_how = operator; 611*29533Ssam add_flaw(entry); 612*29533Ssam } 613*29533Ssam else { 614*29533Ssam indent(); 615*29533Ssam print("Sector %d is already mapped out!\n", 616*29533Ssam to_sector(error.err_adr)); 617*29533Ssam exdent(1); 618*29533Ssam } 619*29533Ssam } 620*29533Ssam 621*29533Ssam 622*29533Ssam /* 623*29533Ssam ** New_location allocates a replacement block given a bad block address. 624*29533Ssam ** The algorithm is fairly simple; it simply searches for the first 625*29533Ssam ** free sector that has the same sector number of the bad sector. If no sector 626*29533Ssam ** is found then the drive should be considered bad because of a microcode bug 627*29533Ssam ** in the controller that forces us to use the same sector number as the bad 628*29533Ssam ** sector for relocation purposes. Using different tracks and cylinders is ok 629*29533Ssam ** of course. 630*29533Ssam */ 631*29533Ssam 632*29533Ssam dskadr *new_location(entry) 633*29533Ssam bs_entry *entry; 634*29533Ssam { 635*29533Ssam register int i, sec; 636*29533Ssam static fmt_err temp; 637*29533Ssam static dskadr newaddr; 638*29533Ssam 639*29533Ssam newaddr.cylinder = 0; 640*29533Ssam newaddr.track = 0; 641*29533Ssam newaddr.sector = 0; 642*29533Ssam temp = (*C_INFO.decode_pos)(*entry); 643*29533Ssam /* If it is ouside of the user's data area */ 644*29533Ssam if(entry->bs_cyl >= CURRENT->vc_ncyl-NUMSYS) { 645*29533Ssam /* if it is in the relocation area */ 646*29533Ssam if(entry->bs_cyl < (CURRENT->vc_ncyl - NUMMAP - NUMMNT)) { 647*29533Ssam /* mark space as allocated */ 648*29533Ssam allocate(&temp.err_adr, temp.err_stat); 649*29533Ssam return &temp.err_adr; 650*29533Ssam } 651*29533Ssam /* if it is in the map area forget about it */ 652*29533Ssam if(entry->bs_cyl != (CURRENT->vc_ncyl - NUMMAP - NUMMNT)) 653*29533Ssam return &temp.err_adr; 654*29533Ssam /* otherwise treat maintainence cylinder normally */ 655*29533Ssam } 656*29533Ssam if(temp.err_stat & (HEADER_ERROR)) { 657*29533Ssam for(i = 0; i < (CURRENT->vc_ntrak * NUMREL); i++) { 658*29533Ssam for(sec=0; sec < CURRENT->vc_nsec; sec++) { 659*29533Ssam if(free_tbl[i][sec].free_status == ALLOCATED) 660*29533Ssam break; 661*29533Ssam } 662*29533Ssam if(sec == CURRENT->vc_nsec) { 663*29533Ssam for(sec = 0; sec < CURRENT->vc_nsec; sec++) 664*29533Ssam free_tbl[i][sec].free_status=ALLOCATED; 665*29533Ssam newaddr.cylinder = i / CURRENT->vc_ntrak + 666*29533Ssam (CURRENT->vc_ncyl - NUMSYS); 667*29533Ssam newaddr.track = i % CURRENT->vc_ntrak; 668*29533Ssam break; 669*29533Ssam } 670*29533Ssam } 671*29533Ssam } 672*29533Ssam else if(C_INFO.type == SMDCTLR) { 673*29533Ssam for(i = 0; i < (CURRENT->vc_ntrak * NUMREL); i++) { 674*29533Ssam if(free_tbl[i][temp.err_adr.sector].free_status != 675*29533Ssam ALLOCATED) { 676*29533Ssam free_tbl[i][temp.err_adr.sector].free_status = 677*29533Ssam ALLOCATED; 678*29533Ssam newaddr.cylinder = i / CURRENT->vc_ntrak + 679*29533Ssam (CURRENT->vc_ncyl - NUMSYS); 680*29533Ssam newaddr.track = i % CURRENT->vc_ntrak; 681*29533Ssam newaddr.sector = temp.err_adr.sector; 682*29533Ssam break; 683*29533Ssam } 684*29533Ssam } 685*29533Ssam } 686*29533Ssam else { 687*29533Ssam for(i = 0; i < (CURRENT->vc_ntrak * NUMREL); i++) { 688*29533Ssam for(sec=0; sec < CURRENT->vc_nsec; sec++) 689*29533Ssam if(free_tbl[i][sec].free_status != ALLOCATED) 690*29533Ssam break; 691*29533Ssam if(sec < CURRENT->vc_nsec) { 692*29533Ssam free_tbl[i][sec].free_status = ALLOCATED; 693*29533Ssam newaddr.cylinder = i / CURRENT->vc_ntrak + 694*29533Ssam (CURRENT->vc_ncyl - NUMSYS); 695*29533Ssam newaddr.track = i % CURRENT->vc_ntrak; 696*29533Ssam newaddr.sector = sec; 697*29533Ssam break; 698*29533Ssam } 699*29533Ssam } 700*29533Ssam } 701*29533Ssam return &newaddr; 702*29533Ssam } 703*29533Ssam 704