1 #include "stdinc.h" 2 #include "dat.h" 3 #include "fns.h" 4 #include "error.h" 5 6 /* 7 * Lock watcher. Check that locking of blocks is always down. 8 * 9 * This is REALLY slow, and it won't work when the blocks aren't 10 * arranged in a tree (e.g., after the first snapshot). But it's great 11 * for debugging. 12 */ 13 enum 14 { 15 MaxLock = 16, 16 HashSize = 1009, 17 }; 18 19 /* 20 * Thread-specific watch state. 21 */ 22 typedef struct WThread WThread; 23 struct WThread 24 { 25 Block *b[MaxLock]; /* blocks currently held */ 26 uint nb; 27 uint pid; 28 }; 29 30 typedef struct WMap WMap; 31 typedef struct WEntry WEntry; 32 33 struct WEntry 34 { 35 uchar c[VtScoreSize]; 36 uchar p[VtScoreSize]; 37 int off; 38 39 WEntry *cprev; 40 WEntry *cnext; 41 WEntry *pprev; 42 WEntry *pnext; 43 }; 44 45 struct WMap 46 { 47 VtLock *lk; 48 49 WEntry *hchild[HashSize]; 50 WEntry *hparent[HashSize]; 51 }; 52 53 static WMap map; 54 static void **wp; 55 static uint blockSize; 56 static WEntry *pool; 57 uint bwatchDisabled; 58 59 static uint 60 hash(uchar score[VtScoreSize]) 61 { 62 uint i, h; 63 64 h = 0; 65 for(i=0; i<VtScoreSize; i++) 66 h = h*37 + score[i]; 67 return h%HashSize; 68 } 69 70 #include <pool.h> 71 static void 72 freeWEntry(WEntry *e) 73 { 74 memset(e, 0, sizeof(WEntry)); 75 e->pnext = pool; 76 pool = e; 77 } 78 79 static WEntry* 80 allocWEntry(void) 81 { 82 int i; 83 WEntry *w; 84 85 w = pool; 86 if(w == nil){ 87 w = vtMemAllocZ(1024*sizeof(WEntry)); 88 for(i=0; i<1024; i++) 89 freeWEntry(&w[i]); 90 w = pool; 91 } 92 pool = w->pnext; 93 memset(w, 0, sizeof(WEntry)); 94 return w; 95 } 96 97 /* 98 * remove all dependencies with score as a parent 99 */ 100 static void 101 _bwatchResetParent(uchar *score) 102 { 103 WEntry *w, *next; 104 uint h; 105 106 h = hash(score); 107 for(w=map.hparent[h]; w; w=next){ 108 next = w->pnext; 109 if(memcmp(w->p, score, VtScoreSize) == 0){ 110 if(w->pnext) 111 w->pnext->pprev = w->pprev; 112 if(w->pprev) 113 w->pprev->pnext = w->pnext; 114 else 115 map.hparent[h] = w->pnext; 116 if(w->cnext) 117 w->cnext->cprev = w->cprev; 118 if(w->cprev) 119 w->cprev->cnext = w->cnext; 120 else 121 map.hchild[hash(w->c)] = w->cnext; 122 freeWEntry(w); 123 } 124 } 125 } 126 /* 127 * and child 128 */ 129 static void 130 _bwatchResetChild(uchar *score) 131 { 132 WEntry *w, *next; 133 uint h; 134 135 h = hash(score); 136 for(w=map.hchild[h]; w; w=next){ 137 next = w->cnext; 138 if(memcmp(w->c, score, VtScoreSize) == 0){ 139 if(w->pnext) 140 w->pnext->pprev = w->pprev; 141 if(w->pprev) 142 w->pprev->pnext = w->pnext; 143 else 144 map.hparent[hash(w->p)] = w->pnext; 145 if(w->cnext) 146 w->cnext->cprev = w->cprev; 147 if(w->cprev) 148 w->cprev->cnext = w->cnext; 149 else 150 map.hchild[h] = w->cnext; 151 freeWEntry(w); 152 } 153 } 154 } 155 156 static uchar* 157 parent(uchar c[VtScoreSize], int *off) 158 { 159 WEntry *w; 160 uint h; 161 162 h = hash(c); 163 for(w=map.hchild[h]; w; w=w->cnext) 164 if(memcmp(w->c, c, VtScoreSize) == 0){ 165 *off = w->off; 166 return w->p; 167 } 168 return nil; 169 } 170 171 static void 172 addChild(uchar p[VtEntrySize], uchar c[VtEntrySize], int off) 173 { 174 uint h; 175 WEntry *w; 176 177 w = allocWEntry(); 178 memmove(w->p, p, VtScoreSize); 179 memmove(w->c, c, VtScoreSize); 180 w->off = off; 181 182 h = hash(p); 183 w->pnext = map.hparent[h]; 184 if(w->pnext) 185 w->pnext->pprev = w; 186 map.hparent[h] = w; 187 188 h = hash(c); 189 w->cnext = map.hchild[h]; 190 if(w->cnext) 191 w->cnext->cprev = w; 192 map.hchild[h] = w; 193 } 194 195 void 196 bwatchReset(uchar score[VtScoreSize]) 197 { 198 vtLock(map.lk); 199 _bwatchResetParent(score); 200 _bwatchResetChild(score); 201 vtUnlock(map.lk); 202 } 203 204 void 205 bwatchInit(void) 206 { 207 map.lk = vtLockAlloc(); 208 wp = privalloc(); 209 *wp = nil; 210 } 211 212 void 213 bwatchSetBlockSize(uint bs) 214 { 215 blockSize = bs; 216 } 217 218 static WThread* 219 getWThread(void) 220 { 221 WThread *w; 222 223 w = *wp; 224 if(w == nil || w->pid != getpid()){ 225 w = vtMemAllocZ(sizeof(WThread)); 226 *wp = w; 227 w->pid = getpid(); 228 } 229 return w; 230 } 231 232 /* 233 * Derive dependencies from the contents of b. 234 */ 235 void 236 bwatchDependency(Block *b) 237 { 238 int i, epb, ppb; 239 Entry e; 240 241 if(bwatchDisabled) 242 return; 243 244 vtLock(map.lk); 245 _bwatchResetParent(b->score); 246 247 switch(b->l.type){ 248 case BtData: 249 break; 250 251 case BtDir: 252 epb = blockSize / VtEntrySize; 253 for(i=0; i<epb; i++){ 254 entryUnpack(&e, b->data, i); 255 if(!(e.flags & VtEntryActive)) 256 continue; 257 addChild(b->score, e.score, i); 258 } 259 break; 260 261 default: 262 ppb = blockSize / VtScoreSize; 263 for(i=0; i<ppb; i++) 264 addChild(b->score, b->data+i*VtScoreSize, i); 265 break; 266 } 267 vtUnlock(map.lk); 268 } 269 270 static int 271 depth(uchar *s) 272 { 273 int d, x; 274 275 d = -1; 276 while(s){ 277 d++; 278 s = parent(s, &x); 279 } 280 return d; 281 } 282 283 static int 284 lockConflicts(uchar xhave[VtScoreSize], uchar xwant[VtScoreSize]) 285 { 286 uchar *have, *want; 287 int havedepth, wantdepth, havepos, wantpos; 288 289 have = xhave; 290 want = xwant; 291 292 havedepth = depth(have); 293 wantdepth = depth(want); 294 295 /* 296 * walk one or the other up until they're both 297 * at the same level. 298 */ 299 havepos = -1; 300 wantpos = -1; 301 have = xhave; 302 want = xwant; 303 while(wantdepth > havedepth){ 304 wantdepth--; 305 want = parent(want, &wantpos); 306 } 307 while(havedepth > wantdepth){ 308 havedepth--; 309 have = parent(have, &havepos); 310 } 311 312 /* 313 * walk them up simultaneously until we reach 314 * a common ancestor. 315 */ 316 while(have && want && memcmp(have, want, VtScoreSize) != 0){ 317 have = parent(have, &havepos); 318 want = parent(want, &wantpos); 319 } 320 321 /* 322 * not part of same tree. happens mainly with 323 * newly allocated blocks. 324 */ 325 if(!have || !want) 326 return 0; 327 328 /* 329 * never walked want: means we want to lock 330 * an ancestor of have. no no. 331 */ 332 if(wantpos == -1) 333 return 1; 334 335 /* 336 * never walked have: means we want to lock a 337 * child of have. that's okay. 338 */ 339 if(havepos == -1) 340 return 0; 341 342 /* 343 * walked both: they're from different places in the tree. 344 * require that the left one be locked before the right one. 345 * (this is questionable, but it puts a total order on the block tree). 346 */ 347 return havepos < wantpos; 348 } 349 350 static void 351 stop(void) 352 { 353 int fd; 354 char buf[32]; 355 356 snprint(buf, sizeof buf, "#p/%d/ctl", getpid()); 357 fd = open(buf, OWRITE); 358 write(fd, "stop", 4); 359 close(fd); 360 } 361 362 /* 363 * Check whether the calling thread can validly lock b. 364 * That is, check that the calling thread doesn't hold 365 * locks for any of b's children. 366 */ 367 void 368 bwatchLock(Block *b) 369 { 370 int i; 371 WThread *w; 372 373 if(bwatchDisabled) 374 return; 375 376 if(b->part != PartData) 377 return; 378 379 vtLock(map.lk); 380 w = getWThread(); 381 for(i=0; i<w->nb; i++){ 382 if(lockConflicts(w->b[i]->score, b->score)){ 383 fprint(2, "%d: have block %V; shouldn't lock %V\n", 384 w->pid, w->b[i]->score, b->score); 385 stop(); 386 } 387 } 388 vtUnlock(map.lk); 389 if(w->nb >= MaxLock){ 390 fprint(2, "%d: too many blocks held\n", w->pid); 391 stop(); 392 }else 393 w->b[w->nb++] = b; 394 } 395 396 /* 397 * Note that the calling thread is about to unlock b. 398 */ 399 void 400 bwatchUnlock(Block *b) 401 { 402 int i; 403 WThread *w; 404 405 if(bwatchDisabled) 406 return; 407 408 if(b->part != PartData) 409 return; 410 411 w = getWThread(); 412 for(i=0; i<w->nb; i++) 413 if(w->b[i] == b) 414 break; 415 if(i>=w->nb){ 416 fprint(2, "%d: unlock of unlocked block %V\n", w->pid, b->score); 417 stop(); 418 }else 419 w->b[i] = w->b[--w->nb]; 420 } 421 422