1 /* $NetBSD: dict_cache.c,v 1.2 2017/02/14 01:16:49 christos Exp $ */ 2 3 /*++ 4 /* NAME 5 /* dict_cache 3 6 /* SUMMARY 7 /* External cache manager 8 /* SYNOPSIS 9 /* #include <dict_cache.h> 10 /* 11 /* DICT_CACHE *dict_cache_open(dbname, open_flags, dict_flags) 12 /* const char *dbname; 13 /* int open_flags; 14 /* int dict_flags; 15 /* 16 /* void dict_cache_close(cache) 17 /* DICT_CACHE *cache; 18 /* 19 /* const char *dict_cache_lookup(cache, cache_key) 20 /* DICT_CACHE *cache; 21 /* const char *cache_key; 22 /* 23 /* int dict_cache_update(cache, cache_key, cache_val) 24 /* DICT_CACHE *cache; 25 /* const char *cache_key; 26 /* const char *cache_val; 27 /* 28 /* int dict_cache_delete(cache, cache_key) 29 /* DICT_CACHE *cache; 30 /* const char *cache_key; 31 /* 32 /* int dict_cache_sequence(cache, first_next, cache_key, cache_val) 33 /* DICT_CACHE *cache; 34 /* int first_next; 35 /* const char **cache_key; 36 /* const char **cache_val; 37 /* AUXILIARY FUNCTIONS 38 /* void dict_cache_control(cache, name, value, ...) 39 /* DICT_CACHE *cache; 40 /* int name; 41 /* 42 /* typedef int (*DICT_CACHE_VALIDATOR_FN) (const char *cache_key, 43 /* const char *cache_val, void *context); 44 /* 45 /* const char *dict_cache_name(cache) 46 /* DICT_CACHE *cache; 47 /* DESCRIPTION 48 /* This module maintains external cache files with support 49 /* for expiration. The underlying table must implement the 50 /* "lookup", "update", "delete" and "sequence" operations. 51 /* 52 /* Although this API is similar to the one documented in 53 /* dict_open(3), there are subtle differences in the interaction 54 /* between the iterators that access all cache elements, and 55 /* other operations that access individual cache elements. 56 /* 57 /* In particular, when a "sequence" or "cleanup" operation is 58 /* in progress the cache intercepts requests to delete the 59 /* "current" entry, as this would cause some databases to 60 /* mis-behave. Instead, the cache implements a "delete behind" 61 /* strategy, and deletes such an entry after the "sequence" 62 /* or "cleanup" operation moves on to the next cache element. 63 /* The "delete behind" strategy also affects the cache lookup 64 /* and update operations as detailed below. 65 /* 66 /* dict_cache_open() is a wrapper around the dict_open() 67 /* function. It opens the specified cache and returns a handle 68 /* that must be used for subsequent access. This function does 69 /* not return in case of error. 70 /* 71 /* dict_cache_close() closes the specified cache and releases 72 /* memory that was allocated by dict_cache_open(), and terminates 73 /* any thread that was started with dict_cache_control(). 74 /* 75 /* dict_cache_lookup() looks up the specified cache entry. 76 /* The result value is a null pointer when the cache entry was 77 /* not found, or when the entry is scheduled for "delete 78 /* behind". 79 /* 80 /* dict_cache_update() updates the specified cache entry. If 81 /* the entry is scheduled for "delete behind", the delete 82 /* operation is canceled (because of this, the cache must be 83 /* opened with DICT_FLAG_DUP_REPLACE). This function does not 84 /* return in case of error. 85 /* 86 /* dict_cache_delete() removes the specified cache entry. If 87 /* this is the "current" entry of a "sequence" operation, the 88 /* entry is scheduled for "delete behind". The result value 89 /* is zero when the entry was found. 90 /* 91 /* dict_cache_sequence() iterates over the specified cache and 92 /* returns each entry in an implementation-defined order. The 93 /* result value is zero when a cache entry was found. 94 /* 95 /* Important: programs must not use both dict_cache_sequence() 96 /* and the built-in cache cleanup feature. 97 /* 98 /* dict_cache_control() provides control over the built-in 99 /* cache cleanup feature and logging. The arguments are a list 100 /* of macros with zero or more arguments, terminated with 101 /* CA_DICT_CACHE_CTL_END which has none. The following lists 102 /* the macros and corresponding argument types. 103 /* .IP "CA_DICT_CACHE_CTL_FLAGS(int flags)" 104 /* The arguments to this command are the bit-wise OR of zero 105 /* or more of the following: 106 /* .RS 107 /* .IP CA_DICT_CACHE_CTL_FLAG_VERBOSE 108 /* Enable verbose logging of cache activity. 109 /* .IP CA_DICT_CACHE_CTL_FLAG_EXP_SUMMARY 110 /* Log cache statistics after each cache cleanup run. 111 /* .RE 112 /* .IP "CA_DICT_CACHE_CTL_INTERVAL(int interval)" 113 /* The interval between cache cleanup runs. Specify a null 114 /* validator or interval to stop cache cleanup. 115 /* .IP "CA_DICT_CACHE_CTL_VALIDATOR(DICT_CACHE_VALIDATOR_FN validator)" 116 /* An application call-back routine that returns non-zero when 117 /* a cache entry should be kept. The call-back function should 118 /* not make changes to the cache. Specify a null validator or 119 /* interval to stop cache cleanup. 120 /* .IP "CA_DICT_CACHE_CTL_CONTEXT(void *context)" 121 /* Application context that is passed to the validator function. 122 /* .RE 123 /* .PP 124 /* dict_cache_name() returns the name of the specified cache. 125 /* 126 /* Arguments: 127 /* .IP "dbname, open_flags, dict_flags" 128 /* These are passed unchanged to dict_open(). The cache must 129 /* be opened with DICT_FLAG_DUP_REPLACE. 130 /* .IP cache 131 /* Cache handle created with dict_cache_open(). 132 /* .IP cache_key 133 /* Cache lookup key. 134 /* .IP cache_val 135 /* Information that is stored under a cache lookup key. 136 /* .IP first_next 137 /* One of DICT_SEQ_FUN_FIRST (first cache element) or 138 /* DICT_SEQ_FUN_NEXT (next cache element). 139 /* .sp 140 /* Note: there is no "stop" request. To ensure that the "delete 141 /* behind" strategy does not interfere with database access, 142 /* allow dict_cache_sequence() to run to completion. 143 /* .IP table 144 /* A bare dictonary handle. 145 /* DIAGNOSTICS 146 /* When a request is satisfied, the lookup routine returns 147 /* non-null, and the update, delete and sequence routines 148 /* return zero. The cache->error value is zero when a request 149 /* could not be satisfied because an item did not exist (delete, 150 /* sequence) or if it could not be updated. The cache->error 151 /* value is non-zero only when a request could not be satisfied, 152 /* and the cause was a database error. 153 /* 154 /* Cache access errors are logged with a warning message. To 155 /* avoid spamming the log, each type of operation logs no more 156 /* than one cache access error per second, per cache. Specify 157 /* the DICT_CACHE_FLAG_VERBOSE flag (see above) to log all 158 /* warnings. 159 /* BUGS 160 /* There should be a way to suspend automatic program suicide 161 /* until a cache cleanup run is completed. Some entries may 162 /* never be removed when the process max_idle time is less 163 /* than the time needed to make a full pass over the cache. 164 /* 165 /* The delete-behind strategy assumes that all updates are 166 /* made by a single process. Otherwise, delete-behind may 167 /* remove an entry that was updated after it was scheduled for 168 /* deletion. 169 /* LICENSE 170 /* .ad 171 /* .fi 172 /* The Secure Mailer license must be distributed with this software. 173 /* HISTORY 174 /* .ad 175 /* .fi 176 /* A predecessor of this code was written first for the Postfix 177 /* tlsmgr(8) daemon. 178 /* AUTHOR(S) 179 /* Wietse Venema 180 /* IBM T.J. Watson Research 181 /* P.O. Box 704 182 /* Yorktown Heights, NY 10598, USA 183 /*--*/ 184 185 /* System library. */ 186 187 #include <sys_defs.h> 188 #include <string.h> 189 #include <stdlib.h> 190 191 /* Utility library. */ 192 193 #include <msg.h> 194 #include <dict.h> 195 #include <mymalloc.h> 196 #include <events.h> 197 #include <dict_cache.h> 198 199 /* Application-specific. */ 200 201 /* 202 * XXX Deleting entries while enumerating a map can he tricky. Some map 203 * types have a concept of cursor and support a "delete the current element" 204 * operation. Some map types without cursors don't behave well when the 205 * current first/next entry is deleted (example: with Berkeley DB < 2, the 206 * "next" operation produces garbage). To avoid trouble, we delete an entry 207 * after advancing the current first/next position beyond it; we use the 208 * same strategy with application requests to delete the current entry. 209 */ 210 211 /* 212 * Opaque data structure. Use dict_cache_name() to access the name of the 213 * underlying database. 214 */ 215 struct DICT_CACHE { 216 char *name; /* full name including proxy: */ 217 int cache_flags; /* see below */ 218 int user_flags; /* logging */ 219 DICT *db; /* database handle */ 220 int error; /* last operation only */ 221 222 /* Delete-behind support. */ 223 char *saved_curr_key; /* "current" cache lookup key */ 224 char *saved_curr_val; /* "current" cache lookup result */ 225 226 /* Cleanup support. */ 227 int exp_interval; /* time between cleanup runs */ 228 DICT_CACHE_VALIDATOR_FN exp_validator; /* expiration call-back */ 229 void *exp_context; /* call-back context */ 230 int retained; /* entries retained in cleanup run */ 231 int dropped; /* entries removed in cleanup run */ 232 233 /* Rate-limited logging support. */ 234 int log_delay; 235 time_t upd_log_stamp; /* last update warning */ 236 time_t get_log_stamp; /* last lookup warning */ 237 time_t del_log_stamp; /* last delete warning */ 238 time_t seq_log_stamp; /* last sequence warning */ 239 }; 240 241 #define DC_FLAG_DEL_SAVED_CURRENT_KEY (1<<0) /* delete-behind is scheduled */ 242 243 /* 244 * Don't log cache access errors more than once per second. 245 */ 246 #define DC_DEF_LOG_DELAY 1 247 248 /* 249 * Macros to make obscure code more readable. 250 */ 251 #define DC_SCHEDULE_FOR_DELETE_BEHIND(cp) \ 252 ((cp)->cache_flags |= DC_FLAG_DEL_SAVED_CURRENT_KEY) 253 254 #define DC_MATCH_SAVED_CURRENT_KEY(cp, cache_key) \ 255 ((cp)->saved_curr_key && strcmp((cp)->saved_curr_key, (cache_key)) == 0) 256 257 #define DC_IS_SCHEDULED_FOR_DELETE_BEHIND(cp) \ 258 (/* NOT: (cp)->saved_curr_key && */ \ 259 ((cp)->cache_flags & DC_FLAG_DEL_SAVED_CURRENT_KEY) != 0) 260 261 #define DC_CANCEL_DELETE_BEHIND(cp) \ 262 ((cp)->cache_flags &= ~DC_FLAG_DEL_SAVED_CURRENT_KEY) 263 264 /* 265 * Special key to store the time of the last cache cleanup run completion. 266 */ 267 #define DC_LAST_CACHE_CLEANUP_COMPLETED "_LAST_CACHE_CLEANUP_COMPLETED_" 268 269 /* dict_cache_lookup - load entry from cache */ 270 271 const char *dict_cache_lookup(DICT_CACHE *cp, const char *cache_key) 272 { 273 const char *myname = "dict_cache_lookup"; 274 const char *cache_val; 275 DICT *db = cp->db; 276 277 /* 278 * Search for the cache entry. Don't return an entry that is scheduled 279 * for delete-behind. 280 */ 281 if (DC_IS_SCHEDULED_FOR_DELETE_BEHIND(cp) 282 && DC_MATCH_SAVED_CURRENT_KEY(cp, cache_key)) { 283 if (cp->user_flags & DICT_CACHE_FLAG_VERBOSE) 284 msg_info("%s: key=%s (pretend not found - scheduled for deletion)", 285 myname, cache_key); 286 DICT_ERR_VAL_RETURN(cp, DICT_ERR_NONE, (char *) 0); 287 } else { 288 cache_val = dict_get(db, cache_key); 289 if (cache_val == 0 && db->error != 0) 290 msg_rate_delay(&cp->get_log_stamp, cp->log_delay, msg_warn, 291 "%s: cache lookup for '%s' failed due to error", 292 cp->name, cache_key); 293 if (cp->user_flags & DICT_CACHE_FLAG_VERBOSE) 294 msg_info("%s: key=%s value=%s", myname, cache_key, 295 cache_val ? cache_val : db->error ? 296 "error" : "(not found)"); 297 DICT_ERR_VAL_RETURN(cp, db->error, cache_val); 298 } 299 } 300 301 /* dict_cache_update - save entry to cache */ 302 303 int dict_cache_update(DICT_CACHE *cp, const char *cache_key, 304 const char *cache_val) 305 { 306 const char *myname = "dict_cache_update"; 307 DICT *db = cp->db; 308 int put_res; 309 310 /* 311 * Store the cache entry and cancel the delete-behind operation. 312 */ 313 if (DC_IS_SCHEDULED_FOR_DELETE_BEHIND(cp) 314 && DC_MATCH_SAVED_CURRENT_KEY(cp, cache_key)) { 315 if (cp->user_flags & DICT_CACHE_FLAG_VERBOSE) 316 msg_info("%s: cancel delete-behind for key=%s", myname, cache_key); 317 DC_CANCEL_DELETE_BEHIND(cp); 318 } 319 if (cp->user_flags & DICT_CACHE_FLAG_VERBOSE) 320 msg_info("%s: key=%s value=%s", myname, cache_key, cache_val); 321 put_res = dict_put(db, cache_key, cache_val); 322 if (put_res != 0) 323 msg_rate_delay(&cp->upd_log_stamp, cp->log_delay, msg_warn, 324 "%s: could not update entry for %s", cp->name, cache_key); 325 DICT_ERR_VAL_RETURN(cp, db->error, put_res); 326 } 327 328 /* dict_cache_delete - delete entry from cache */ 329 330 int dict_cache_delete(DICT_CACHE *cp, const char *cache_key) 331 { 332 const char *myname = "dict_cache_delete"; 333 int del_res; 334 DICT *db = cp->db; 335 336 /* 337 * Delete the entry, unless we would delete the current first/next entry. 338 * In that case, schedule the "current" entry for delete-behind to avoid 339 * mis-behavior by some databases. 340 */ 341 if (DC_MATCH_SAVED_CURRENT_KEY(cp, cache_key)) { 342 DC_SCHEDULE_FOR_DELETE_BEHIND(cp); 343 if (cp->user_flags & DICT_CACHE_FLAG_VERBOSE) 344 msg_info("%s: key=%s (current entry - schedule for delete-behind)", 345 myname, cache_key); 346 DICT_ERR_VAL_RETURN(cp, DICT_ERR_NONE, DICT_STAT_SUCCESS); 347 } else { 348 del_res = dict_del(db, cache_key); 349 if (del_res != 0) 350 msg_rate_delay(&cp->del_log_stamp, cp->log_delay, msg_warn, 351 "%s: could not delete entry for %s", cp->name, cache_key); 352 if (cp->user_flags & DICT_CACHE_FLAG_VERBOSE) 353 msg_info("%s: key=%s (%s)", myname, cache_key, 354 del_res == 0 ? "found" : 355 db->error ? "error" : "not found"); 356 DICT_ERR_VAL_RETURN(cp, db->error, del_res); 357 } 358 } 359 360 /* dict_cache_sequence - look up the first/next cache entry */ 361 362 int dict_cache_sequence(DICT_CACHE *cp, int first_next, 363 const char **cache_key, 364 const char **cache_val) 365 { 366 const char *myname = "dict_cache_sequence"; 367 int seq_res; 368 const char *raw_cache_key; 369 const char *raw_cache_val; 370 char *previous_curr_key; 371 char *previous_curr_val; 372 DICT *db = cp->db; 373 374 /* 375 * Find the first or next database entry. Hide the record with the cache 376 * cleanup completion time stamp. 377 */ 378 seq_res = dict_seq(db, first_next, &raw_cache_key, &raw_cache_val); 379 if (seq_res == 0 380 && strcmp(raw_cache_key, DC_LAST_CACHE_CLEANUP_COMPLETED) == 0) 381 seq_res = 382 dict_seq(db, DICT_SEQ_FUN_NEXT, &raw_cache_key, &raw_cache_val); 383 if (cp->user_flags & DICT_CACHE_FLAG_VERBOSE) 384 msg_info("%s: key=%s value=%s", myname, 385 seq_res == 0 ? raw_cache_key : db->error ? 386 "(error)" : "(not found)", 387 seq_res == 0 ? raw_cache_val : db->error ? 388 "(error)" : "(not found)"); 389 if (db->error) 390 msg_rate_delay(&cp->seq_log_stamp, cp->log_delay, msg_warn, 391 "%s: sequence error", cp->name); 392 393 /* 394 * Save the current cache_key and cache_val before they are clobbered by 395 * our own delete operation below. This also prevents surprises when the 396 * application accesses the database after this function returns. 397 * 398 * We also use the saved cache_key to protect the current entry against 399 * application delete requests. 400 */ 401 previous_curr_key = cp->saved_curr_key; 402 previous_curr_val = cp->saved_curr_val; 403 if (seq_res == 0) { 404 cp->saved_curr_key = mystrdup(raw_cache_key); 405 cp->saved_curr_val = mystrdup(raw_cache_val); 406 } else { 407 cp->saved_curr_key = 0; 408 cp->saved_curr_val = 0; 409 } 410 411 /* 412 * Delete behind. 413 */ 414 if (db->error == 0 && DC_IS_SCHEDULED_FOR_DELETE_BEHIND(cp)) { 415 DC_CANCEL_DELETE_BEHIND(cp); 416 if (cp->user_flags & DICT_CACHE_FLAG_VERBOSE) 417 msg_info("%s: delete-behind key=%s value=%s", 418 myname, previous_curr_key, previous_curr_val); 419 if (dict_del(db, previous_curr_key) != 0) 420 msg_rate_delay(&cp->del_log_stamp, cp->log_delay, msg_warn, 421 "%s: could not delete entry for %s", 422 cp->name, previous_curr_key); 423 } 424 425 /* 426 * Clean up previous iteration key and value. 427 */ 428 if (previous_curr_key) 429 myfree(previous_curr_key); 430 if (previous_curr_val) 431 myfree(previous_curr_val); 432 433 /* 434 * Return the result. 435 */ 436 *cache_key = (cp)->saved_curr_key; 437 *cache_val = (cp)->saved_curr_val; 438 DICT_ERR_VAL_RETURN(cp, db->error, seq_res); 439 } 440 441 /* dict_cache_delete_behind_reset - reset "delete behind" state */ 442 443 static void dict_cache_delete_behind_reset(DICT_CACHE *cp) 444 { 445 #define FREE_AND_WIPE(s) do { if (s) { myfree(s); (s) = 0; } } while (0) 446 447 DC_CANCEL_DELETE_BEHIND(cp); 448 FREE_AND_WIPE(cp->saved_curr_key); 449 FREE_AND_WIPE(cp->saved_curr_val); 450 } 451 452 /* dict_cache_clean_stat_log_reset - log and reset cache cleanup statistics */ 453 454 static void dict_cache_clean_stat_log_reset(DICT_CACHE *cp, 455 const char *full_partial) 456 { 457 if (cp->user_flags & DICT_CACHE_FLAG_STATISTICS) 458 msg_info("cache %s %s cleanup: retained=%d dropped=%d entries", 459 cp->name, full_partial, cp->retained, cp->dropped); 460 cp->retained = cp->dropped = 0; 461 } 462 463 /* dict_cache_clean_event - examine one cache entry */ 464 465 static void dict_cache_clean_event(int unused_event, void *cache_context) 466 { 467 const char *myname = "dict_cache_clean_event"; 468 DICT_CACHE *cp = (DICT_CACHE *) cache_context; 469 const char *cache_key; 470 const char *cache_val; 471 int next_interval; 472 VSTRING *stamp_buf; 473 int first_next; 474 475 /* 476 * We interleave cache cleanup with other processing, so that the 477 * application's service remains available, with perhaps increased 478 * latency. 479 */ 480 481 /* 482 * Start a new cache cleanup run. 483 */ 484 if (cp->saved_curr_key == 0) { 485 cp->retained = cp->dropped = 0; 486 first_next = DICT_SEQ_FUN_FIRST; 487 if (cp->user_flags & DICT_CACHE_FLAG_VERBOSE) 488 msg_info("%s: start %s cache cleanup", myname, cp->name); 489 } 490 491 /* 492 * Continue a cache cleanup run in progress. 493 */ 494 else { 495 first_next = DICT_SEQ_FUN_NEXT; 496 } 497 498 /* 499 * Examine one cache entry. 500 */ 501 if (dict_cache_sequence(cp, first_next, &cache_key, &cache_val) == 0) { 502 if (cp->exp_validator(cache_key, cache_val, cp->exp_context) == 0) { 503 DC_SCHEDULE_FOR_DELETE_BEHIND(cp); 504 cp->dropped++; 505 if (cp->user_flags & DICT_CACHE_FLAG_VERBOSE) 506 msg_info("%s: drop %s cache entry for %s", 507 myname, cp->name, cache_key); 508 } else { 509 cp->retained++; 510 if (cp->user_flags & DICT_CACHE_FLAG_VERBOSE) 511 msg_info("%s: keep %s cache entry for %s", 512 myname, cp->name, cache_key); 513 } 514 next_interval = 0; 515 } 516 517 /* 518 * Cache cleanup completed. Report vital statistics. 519 */ 520 else if (cp->error != 0) { 521 msg_warn("%s: cache cleanup scan terminated due to error", cp->name); 522 dict_cache_clean_stat_log_reset(cp, "partial"); 523 next_interval = cp->exp_interval; 524 } else { 525 if (cp->user_flags & DICT_CACHE_FLAG_VERBOSE) 526 msg_info("%s: done %s cache cleanup scan", myname, cp->name); 527 dict_cache_clean_stat_log_reset(cp, "full"); 528 stamp_buf = vstring_alloc(100); 529 vstring_sprintf(stamp_buf, "%ld", (long) event_time()); 530 dict_put(cp->db, DC_LAST_CACHE_CLEANUP_COMPLETED, 531 vstring_str(stamp_buf)); 532 vstring_free(stamp_buf); 533 next_interval = cp->exp_interval; 534 } 535 event_request_timer(dict_cache_clean_event, cache_context, next_interval); 536 } 537 538 /* dict_cache_control - schedule or stop the cache cleanup thread */ 539 540 void dict_cache_control(DICT_CACHE *cp,...) 541 { 542 const char *myname = "dict_cache_control"; 543 const char *last_done; 544 time_t next_interval; 545 int cache_cleanup_is_active = (cp->exp_validator && cp->exp_interval); 546 va_list ap; 547 int name; 548 549 /* 550 * Update the control settings. 551 */ 552 va_start(ap, cp); 553 while ((name = va_arg(ap, int)) > 0) { 554 switch (name) { 555 case DICT_CACHE_CTL_END: 556 break; 557 case DICT_CACHE_CTL_FLAGS: 558 cp->user_flags = va_arg(ap, int); 559 cp->log_delay = (cp->user_flags & DICT_CACHE_FLAG_VERBOSE) ? 560 0 : DC_DEF_LOG_DELAY; 561 break; 562 case DICT_CACHE_CTL_INTERVAL: 563 cp->exp_interval = va_arg(ap, int); 564 if (cp->exp_interval < 0) 565 msg_panic("%s: bad %s cache cleanup interval %d", 566 myname, cp->name, cp->exp_interval); 567 break; 568 case DICT_CACHE_CTL_VALIDATOR: 569 cp->exp_validator = va_arg(ap, DICT_CACHE_VALIDATOR_FN); 570 break; 571 case DICT_CACHE_CTL_CONTEXT: 572 cp->exp_context = va_arg(ap, void *); 573 break; 574 default: 575 msg_panic("%s: bad command: %d", myname, name); 576 } 577 } 578 va_end(ap); 579 580 /* 581 * Schedule the cache cleanup thread. 582 */ 583 if (cp->exp_interval && cp->exp_validator) { 584 585 /* 586 * Sanity checks. 587 */ 588 if (cache_cleanup_is_active) 589 msg_panic("%s: %s cache cleanup is already scheduled", 590 myname, cp->name); 591 592 /* 593 * The next start time depends on the last completion time. 594 */ 595 #define NEXT_START(last, delta) ((delta) + (unsigned long) atol(last)) 596 #define NOW (time((time_t *) 0)) /* NOT: event_time() */ 597 598 if ((last_done = dict_get(cp->db, DC_LAST_CACHE_CLEANUP_COMPLETED)) == 0 599 || (next_interval = (NEXT_START(last_done, cp->exp_interval) - NOW)) < 0) 600 next_interval = 0; 601 if (next_interval > cp->exp_interval) 602 next_interval = cp->exp_interval; 603 if ((cp->user_flags & DICT_CACHE_FLAG_VERBOSE) && next_interval > 0) 604 msg_info("%s cache cleanup will start after %ds", 605 cp->name, (int) next_interval); 606 event_request_timer(dict_cache_clean_event, (void *) cp, 607 (int) next_interval); 608 } 609 610 /* 611 * Cancel the cache cleanup thread. 612 */ 613 else if (cache_cleanup_is_active) { 614 if (cp->retained || cp->dropped) 615 dict_cache_clean_stat_log_reset(cp, "partial"); 616 dict_cache_delete_behind_reset(cp); 617 event_cancel_timer(dict_cache_clean_event, (void *) cp); 618 } 619 } 620 621 /* dict_cache_open - open cache file */ 622 623 DICT_CACHE *dict_cache_open(const char *dbname, int open_flags, int dict_flags) 624 { 625 DICT_CACHE *cp; 626 DICT *dict; 627 628 /* 629 * Open the database as requested. Don't attempt to second-guess the 630 * application. 631 */ 632 dict = dict_open(dbname, open_flags, dict_flags); 633 634 /* 635 * Create the DICT_CACHE object. 636 */ 637 cp = (DICT_CACHE *) mymalloc(sizeof(*cp)); 638 cp->name = mystrdup(dbname); 639 cp->cache_flags = 0; 640 cp->user_flags = 0; 641 cp->db = dict; 642 cp->saved_curr_key = 0; 643 cp->saved_curr_val = 0; 644 cp->exp_interval = 0; 645 cp->exp_validator = 0; 646 cp->exp_context = 0; 647 cp->retained = 0; 648 cp->dropped = 0; 649 cp->log_delay = DC_DEF_LOG_DELAY; 650 cp->upd_log_stamp = cp->get_log_stamp = 651 cp->del_log_stamp = cp->seq_log_stamp = 0; 652 653 return (cp); 654 } 655 656 /* dict_cache_close - close cache file */ 657 658 void dict_cache_close(DICT_CACHE *cp) 659 { 660 661 /* 662 * Destroy the DICT_CACHE object. 663 */ 664 myfree(cp->name); 665 dict_cache_control(cp, DICT_CACHE_CTL_INTERVAL, 0, DICT_CACHE_CTL_END); 666 dict_close(cp->db); 667 if (cp->saved_curr_key) 668 myfree(cp->saved_curr_key); 669 if (cp->saved_curr_val) 670 myfree(cp->saved_curr_val); 671 myfree((void *) cp); 672 } 673 674 /* dict_cache_name - get the cache name */ 675 676 const char *dict_cache_name(DICT_CACHE *cp) 677 { 678 679 /* 680 * This is used for verbose logging or warning messages, so the cost of 681 * call is only made where needed (well sort off - code that does not 682 * execute still presents overhead for the processor pipeline, processor 683 * cache, etc). 684 */ 685 return (cp->name); 686 } 687 688 /* 689 * Test driver with support for interleaved access. First, enter a number of 690 * requests to look up, update or delete a sequence of cache entries, then 691 * interleave those sequences with the "run" command. 692 */ 693 #ifdef TEST 694 #include <msg_vstream.h> 695 #include <vstring_vstream.h> 696 #include <argv.h> 697 #include <stringops.h> 698 699 #define DELIMS " " 700 #define USAGE "\n\tTo manage settings:" \ 701 "\n\tverbose <level> (verbosity level)" \ 702 "\n\telapsed <level> (0=don't show elapsed time)" \ 703 "\n\tlmdb_map_size <limit> (initial LMDB size limit)" \ 704 "\n\tcache <type>:<name> (switch to named database)" \ 705 "\n\tstatus (show map size, cache, pending requests)" \ 706 "\n\n\tTo manage pending requests:" \ 707 "\n\treset (discard pending requests)" \ 708 "\n\trun (execute pending requests in interleaved order)" \ 709 "\n\n\tTo add a pending request:" \ 710 "\n\tquery <key-suffix> <count> (negative to reverse order)" \ 711 "\n\tupdate <key-suffix> <count> (negative to reverse order)" \ 712 "\n\tdelete <key-suffix> <count> (negative to reverse order)" \ 713 "\n\tpurge <key-suffix>" \ 714 "\n\tcount <key-suffix>" 715 716 /* 717 * For realism, open the cache with the same flags as postscreen(8) and 718 * verify(8). 719 */ 720 #define DICT_CACHE_OPEN_FLAGS (DICT_FLAG_DUP_REPLACE | DICT_FLAG_SYNC_UPDATE | \ 721 DICT_FLAG_OPEN_LOCK) 722 723 /* 724 * Storage for one request to access a sequence of cache entries. 725 */ 726 typedef struct DICT_CACHE_SREQ { 727 int flags; /* per-request: reverse, purge */ 728 char *cmd; /* command for status report */ 729 void (*action) (struct DICT_CACHE_SREQ *, DICT_CACHE *, VSTRING *); 730 char *suffix; /* key suffix */ 731 int done; /* progress indicator */ 732 int todo; /* number of entries to process */ 733 int first_next; /* first/next */ 734 } DICT_CACHE_SREQ; 735 736 #define DICT_CACHE_SREQ_FLAG_PURGE (1<<1) /* purge instead of count */ 737 #define DICT_CACHE_SREQ_FLAG_REVERSE (1<<2) /* reverse instead of forward */ 738 739 #define DICT_CACHE_SREQ_LIMIT 10 740 741 /* 742 * All test requests combined. 743 */ 744 typedef struct DICT_CACHE_TEST { 745 int flags; /* exclusion flags */ 746 int size; /* allocated slots */ 747 int used; /* used slots */ 748 DICT_CACHE_SREQ job_list[1]; /* actually, a bunch */ 749 } DICT_CACHE_TEST; 750 751 #define DICT_CACHE_TEST_FLAG_ITER (1<<0) /* count or purge */ 752 753 #define STR(x) vstring_str(x) 754 755 int show_elapsed = 1; /* show elapsed time */ 756 757 #ifdef HAS_LMDB 758 extern size_t dict_lmdb_map_size; /* LMDB-specific */ 759 760 #endif 761 762 /* usage - command-line usage message */ 763 764 static NORETURN usage(const char *progname) 765 { 766 msg_fatal("usage: %s (no argument)", progname); 767 } 768 769 /* make_tagged_key - make tagged search key */ 770 771 static void make_tagged_key(VSTRING *bp, DICT_CACHE_SREQ *cp) 772 { 773 if (cp->done < 0) 774 msg_panic("make_tagged_key: bad done count: %d", cp->done); 775 if (cp->todo < 1) 776 msg_panic("make_tagged_key: bad todo count: %d", cp->todo); 777 vstring_sprintf(bp, "%d-%s", 778 (cp->flags & DICT_CACHE_SREQ_FLAG_REVERSE) ? 779 cp->todo - cp->done - 1 : cp->done, cp->suffix); 780 } 781 782 /* create_requests - create request list */ 783 784 static DICT_CACHE_TEST *create_requests(int count) 785 { 786 DICT_CACHE_TEST *tp; 787 DICT_CACHE_SREQ *cp; 788 789 tp = (DICT_CACHE_TEST *) mymalloc(sizeof(DICT_CACHE_TEST) + 790 (count - 1) *sizeof(DICT_CACHE_SREQ)); 791 tp->flags = 0; 792 tp->size = count; 793 tp->used = 0; 794 for (cp = tp->job_list; cp < tp->job_list + count; cp++) { 795 cp->flags = 0; 796 cp->cmd = 0; 797 cp->action = 0; 798 cp->suffix = 0; 799 cp->todo = 0; 800 cp->first_next = DICT_SEQ_FUN_FIRST; 801 } 802 return (tp); 803 } 804 805 /* reset_requests - reset request list */ 806 807 static void reset_requests(DICT_CACHE_TEST *tp) 808 { 809 DICT_CACHE_SREQ *cp; 810 811 tp->flags = 0; 812 tp->used = 0; 813 for (cp = tp->job_list; cp < tp->job_list + tp->size; cp++) { 814 cp->flags = 0; 815 if (cp->cmd) { 816 myfree(cp->cmd); 817 cp->cmd = 0; 818 } 819 cp->action = 0; 820 if (cp->suffix) { 821 myfree(cp->suffix); 822 cp->suffix = 0; 823 } 824 cp->todo = 0; 825 cp->first_next = DICT_SEQ_FUN_FIRST; 826 } 827 } 828 829 /* free_requests - destroy request list */ 830 831 static void free_requests(DICT_CACHE_TEST *tp) 832 { 833 reset_requests(tp); 834 myfree((void *) tp); 835 } 836 837 /* run_requests - execute pending requests in interleaved order */ 838 839 static void run_requests(DICT_CACHE_TEST *tp, DICT_CACHE *dp, VSTRING *bp) 840 { 841 DICT_CACHE_SREQ *cp; 842 int todo; 843 struct timeval start; 844 struct timeval finish; 845 struct timeval elapsed; 846 847 if (dp == 0) { 848 msg_warn("no cache"); 849 return; 850 } 851 GETTIMEOFDAY(&start); 852 do { 853 todo = 0; 854 for (cp = tp->job_list; cp < tp->job_list + tp->used; cp++) { 855 if (cp->done < cp->todo) { 856 todo = 1; 857 cp->action(cp, dp, bp); 858 } 859 } 860 } while (todo); 861 GETTIMEOFDAY(&finish); 862 timersub(&finish, &start, &elapsed); 863 if (show_elapsed) 864 vstream_printf("Elapsed: %g\n", 865 elapsed.tv_sec + elapsed.tv_usec / 1000000.0); 866 867 reset_requests(tp); 868 } 869 870 /* show_status - show settings and pending requests */ 871 872 static void show_status(DICT_CACHE_TEST *tp, DICT_CACHE *dp) 873 { 874 DICT_CACHE_SREQ *cp; 875 876 #ifdef HAS_LMDB 877 vstream_printf("lmdb_map_size\t%ld\n", (long) dict_lmdb_map_size); 878 #endif 879 vstream_printf("cache\t%s\n", dp ? dp->name : "(none)"); 880 881 if (tp->used == 0) 882 vstream_printf("No pending requests\n"); 883 else 884 vstream_printf("%s\t%s\t%s\t%s\t%s\t%s\n", 885 "cmd", "dir", "suffix", "count", "done", "first/next"); 886 887 for (cp = tp->job_list; cp < tp->job_list + tp->used; cp++) 888 if (cp->todo > 0) 889 vstream_printf("%s\t%s\t%s\t%d\t%d\t%d\n", 890 cp->cmd, 891 (cp->flags & DICT_CACHE_SREQ_FLAG_REVERSE) ? 892 "reverse" : "forward", 893 cp->suffix ? cp->suffix : "(null)", cp->todo, 894 cp->done, cp->first_next); 895 } 896 897 /* query_action - lookup cache entry */ 898 899 static void query_action(DICT_CACHE_SREQ *cp, DICT_CACHE *dp, VSTRING *bp) 900 { 901 const char *lookup; 902 903 make_tagged_key(bp, cp); 904 if ((lookup = dict_cache_lookup(dp, STR(bp))) == 0) { 905 if (dp->error) 906 msg_warn("query_action: query failed: %s: %m", STR(bp)); 907 else 908 msg_warn("query_action: query failed: %s", STR(bp)); 909 } else if (strcmp(STR(bp), lookup) != 0) { 910 msg_warn("lookup result \"%s\" differs from key \"%s\"", 911 lookup, STR(bp)); 912 } 913 cp->done += 1; 914 } 915 916 /* update_action - update cache entry */ 917 918 static void update_action(DICT_CACHE_SREQ *cp, DICT_CACHE *dp, VSTRING *bp) 919 { 920 make_tagged_key(bp, cp); 921 if (dict_cache_update(dp, STR(bp), STR(bp)) != 0) { 922 if (dp->error) 923 msg_warn("update_action: update failed: %s: %m", STR(bp)); 924 else 925 msg_warn("update_action: update failed: %s", STR(bp)); 926 } 927 cp->done += 1; 928 } 929 930 /* delete_action - delete cache entry */ 931 932 static void delete_action(DICT_CACHE_SREQ *cp, DICT_CACHE *dp, VSTRING *bp) 933 { 934 make_tagged_key(bp, cp); 935 if (dict_cache_delete(dp, STR(bp)) != 0) { 936 if (dp->error) 937 msg_warn("delete_action: delete failed: %s: %m", STR(bp)); 938 else 939 msg_warn("delete_action: delete failed: %s", STR(bp)); 940 } 941 cp->done += 1; 942 } 943 944 /* iter_action - iterate over cache and act on entries with given suffix */ 945 946 static void iter_action(DICT_CACHE_SREQ *cp, DICT_CACHE *dp, VSTRING *bp) 947 { 948 const char *cache_key; 949 const char *cache_val; 950 const char *what; 951 const char *suffix; 952 953 if (dict_cache_sequence(dp, cp->first_next, &cache_key, &cache_val) == 0) { 954 if (strcmp(cache_key, cache_val) != 0) 955 msg_warn("value \"%s\" differs from key \"%s\"", 956 cache_val, cache_key); 957 suffix = cache_key + strspn(cache_key, "0123456789"); 958 if (suffix[0] == '-' && strcmp(suffix + 1, cp->suffix) == 0) { 959 cp->done += 1; 960 cp->todo = cp->done + 1; /* XXX */ 961 if ((cp->flags & DICT_CACHE_SREQ_FLAG_PURGE) 962 && dict_cache_delete(dp, cache_key) != 0) { 963 if (dp->error) 964 msg_warn("purge_action: delete failed: %s: %m", STR(bp)); 965 else 966 msg_warn("purge_action: delete failed: %s", STR(bp)); 967 } 968 } 969 cp->first_next = DICT_SEQ_FUN_NEXT; 970 } else { 971 what = (cp->flags & DICT_CACHE_SREQ_FLAG_PURGE) ? "purge" : "count"; 972 if (dp->error) 973 msg_warn("%s error after %d: %m", what, cp->done); 974 else 975 vstream_printf("suffix=%s %s=%d\n", cp->suffix, what, cp->done); 976 cp->todo = 0; 977 } 978 } 979 980 /* 981 * Table-driven support. 982 */ 983 typedef struct DICT_CACHE_SREQ_INFO { 984 const char *name; 985 int argc; 986 void (*action) (DICT_CACHE_SREQ *, DICT_CACHE *, VSTRING *); 987 int test_flags; 988 int req_flags; 989 } DICT_CACHE_SREQ_INFO; 990 991 static DICT_CACHE_SREQ_INFO req_info[] = { 992 {"query", 3, query_action}, 993 {"update", 3, update_action}, 994 {"delete", 3, delete_action}, 995 {"count", 2, iter_action, DICT_CACHE_TEST_FLAG_ITER}, 996 {"purge", 2, iter_action, DICT_CACHE_TEST_FLAG_ITER, DICT_CACHE_SREQ_FLAG_PURGE}, 997 0, 998 }; 999 1000 /* add_request - add a request to the list */ 1001 1002 static void add_request(DICT_CACHE_TEST *tp, ARGV *argv) 1003 { 1004 DICT_CACHE_SREQ_INFO *rp; 1005 DICT_CACHE_SREQ *cp; 1006 int req_flags; 1007 int count; 1008 char *cmd = argv->argv[0]; 1009 char *suffix = (argv->argc > 1 ? argv->argv[1] : 0); 1010 char *todo = (argv->argc > 2 ? argv->argv[2] : "1"); /* XXX */ 1011 1012 if (tp->used >= tp->size) { 1013 msg_warn("%s: request list is full", cmd); 1014 return; 1015 } 1016 for (rp = req_info; /* See below */ ; rp++) { 1017 if (rp->name == 0) { 1018 vstream_printf("usage: %s\n", USAGE); 1019 return; 1020 } 1021 if (strcmp(rp->name, argv->argv[0]) == 0 1022 && rp->argc == argv->argc) 1023 break; 1024 } 1025 req_flags = rp->req_flags; 1026 if (todo[0] == '-') { 1027 req_flags |= DICT_CACHE_SREQ_FLAG_REVERSE; 1028 todo += 1; 1029 } 1030 if (!alldig(todo) || (count = atoi(todo)) == 0) { 1031 msg_warn("%s: bad count: %s", cmd, todo); 1032 return; 1033 } 1034 if (tp->flags & rp->test_flags) { 1035 msg_warn("%s: command conflicts with other command", cmd); 1036 return; 1037 } 1038 tp->flags |= rp->test_flags; 1039 cp = tp->job_list + tp->used; 1040 cp->cmd = mystrdup(cmd); 1041 cp->action = rp->action; 1042 if (suffix) 1043 cp->suffix = mystrdup(suffix); 1044 cp->done = 0; 1045 cp->flags = req_flags; 1046 cp->todo = count; 1047 tp->used += 1; 1048 } 1049 1050 /* main - main program */ 1051 1052 int main(int argc, char **argv) 1053 { 1054 DICT_CACHE_TEST *test_job; 1055 VSTRING *inbuf = vstring_alloc(100); 1056 char *bufp; 1057 ARGV *args; 1058 DICT_CACHE *cache = 0; 1059 int stdin_is_tty; 1060 1061 msg_vstream_init(argv[0], VSTREAM_ERR); 1062 if (argc != 1) 1063 usage(argv[0]); 1064 1065 1066 test_job = create_requests(DICT_CACHE_SREQ_LIMIT); 1067 1068 stdin_is_tty = isatty(0); 1069 1070 for (;;) { 1071 if (stdin_is_tty) { 1072 vstream_printf("> "); 1073 vstream_fflush(VSTREAM_OUT); 1074 } 1075 if (vstring_fgets_nonl(inbuf, VSTREAM_IN) == 0) 1076 break; 1077 bufp = vstring_str(inbuf); 1078 if (!stdin_is_tty) { 1079 vstream_printf("> %s\n", bufp); 1080 vstream_fflush(VSTREAM_OUT); 1081 } 1082 if (*bufp == '#') 1083 continue; 1084 args = argv_split(bufp, DELIMS); 1085 if (argc == 0) { 1086 vstream_printf("usage: %s\n", USAGE); 1087 vstream_fflush(VSTREAM_OUT); 1088 continue; 1089 } 1090 if (strcmp(args->argv[0], "verbose") == 0 && args->argc == 2) { 1091 msg_verbose = atoi(args->argv[1]); 1092 } else if (strcmp(args->argv[0], "elapsed") == 0 && args->argc == 2) { 1093 show_elapsed = atoi(args->argv[1]); 1094 #ifdef HAS_LMDB 1095 } else if (strcmp(args->argv[0], "lmdb_map_size") == 0 && args->argc == 2) { 1096 dict_lmdb_map_size = atol(args->argv[1]); 1097 #endif 1098 } else if (strcmp(args->argv[0], "cache") == 0 && args->argc == 2) { 1099 if (cache) 1100 dict_cache_close(cache); 1101 cache = dict_cache_open(args->argv[1], O_CREAT | O_RDWR, 1102 DICT_CACHE_OPEN_FLAGS); 1103 } else if (strcmp(args->argv[0], "reset") == 0 && args->argc == 1) { 1104 reset_requests(test_job); 1105 } else if (strcmp(args->argv[0], "run") == 0 && args->argc == 1) { 1106 run_requests(test_job, cache, inbuf); 1107 } else if (strcmp(args->argv[0], "status") == 0 && args->argc == 1) { 1108 show_status(test_job, cache); 1109 } else { 1110 add_request(test_job, args); 1111 } 1112 vstream_fflush(VSTREAM_OUT); 1113 argv_free(args); 1114 } 1115 1116 vstring_free(inbuf); 1117 free_requests(test_job); 1118 if (cache) 1119 dict_cache_close(cache); 1120 return (0); 1121 } 1122 1123 #endif 1124