1 /* $NetBSD: dict_cache.c,v 1.4 2023/12/23 20:30:46 christos Exp $ */ 2 3 /*++ 4 /* NAME 5 /* dict_cache 3 6 /* SUMMARY 7 /* External cache manager 8 /* SYNOPSIS 9 /* #include <dict_cache.h> 10 /* 11 /* DICT_CACHE *dict_cache_open(dbname, open_flags, dict_flags) 12 /* const char *dbname; 13 /* int open_flags; 14 /* int dict_flags; 15 /* 16 /* void dict_cache_close(cache) 17 /* DICT_CACHE *cache; 18 /* 19 /* const char *dict_cache_lookup(cache, cache_key) 20 /* DICT_CACHE *cache; 21 /* const char *cache_key; 22 /* 23 /* int dict_cache_update(cache, cache_key, cache_val) 24 /* DICT_CACHE *cache; 25 /* const char *cache_key; 26 /* const char *cache_val; 27 /* 28 /* int dict_cache_delete(cache, cache_key) 29 /* DICT_CACHE *cache; 30 /* const char *cache_key; 31 /* 32 /* int dict_cache_sequence(cache, first_next, cache_key, cache_val) 33 /* DICT_CACHE *cache; 34 /* int first_next; 35 /* const char **cache_key; 36 /* const char **cache_val; 37 /* AUXILIARY FUNCTIONS 38 /* void dict_cache_control(cache, name, value, ...) 39 /* DICT_CACHE *cache; 40 /* int name; 41 /* 42 /* typedef int (*DICT_CACHE_VALIDATOR_FN) (const char *cache_key, 43 /* const char *cache_val, void *context); 44 /* 45 /* const char *dict_cache_name(cache) 46 /* DICT_CACHE *cache; 47 /* DESCRIPTION 48 /* This module maintains external cache files with support 49 /* for expiration. The underlying table must implement the 50 /* "lookup", "update", "delete" and "sequence" operations. 51 /* 52 /* Although this API is similar to the one documented in 53 /* dict_open(3), there are subtle differences in the interaction 54 /* between the iterators that access all cache elements, and 55 /* other operations that access individual cache elements. 56 /* 57 /* In particular, when a "sequence" or "cleanup" operation is 58 /* in progress the cache intercepts requests to delete the 59 /* "current" entry, as this would cause some databases to 60 /* mis-behave. Instead, the cache implements a "delete behind" 61 /* strategy, and deletes such an entry after the "sequence" 62 /* or "cleanup" operation moves on to the next cache element. 63 /* The "delete behind" strategy also affects the cache lookup 64 /* and update operations as detailed below. 65 /* 66 /* dict_cache_open() is a wrapper around the dict_open() 67 /* function. It opens the specified cache and returns a handle 68 /* that must be used for subsequent access. This function does 69 /* not return in case of error. 70 /* 71 /* dict_cache_close() closes the specified cache and releases 72 /* memory that was allocated by dict_cache_open(), and terminates 73 /* any thread that was started with dict_cache_control(). 74 /* 75 /* dict_cache_lookup() looks up the specified cache entry. 76 /* The result value is a null pointer when the cache entry was 77 /* not found, or when the entry is scheduled for "delete 78 /* behind". 79 /* 80 /* dict_cache_update() updates the specified cache entry. If 81 /* the entry is scheduled for "delete behind", the delete 82 /* operation is canceled (because of this, the cache must be 83 /* opened with DICT_FLAG_DUP_REPLACE). This function does not 84 /* return in case of error. 85 /* 86 /* dict_cache_delete() removes the specified cache entry. If 87 /* this is the "current" entry of a "sequence" operation, the 88 /* entry is scheduled for "delete behind". The result value 89 /* is zero when the entry was found. 90 /* 91 /* dict_cache_sequence() iterates over the specified cache and 92 /* returns each entry in an implementation-defined order. The 93 /* result value is zero when a cache entry was found. 94 /* 95 /* Important: programs must not use both dict_cache_sequence() 96 /* and the built-in cache cleanup feature. 97 /* 98 /* dict_cache_control() provides control over the built-in 99 /* cache cleanup feature and logging. The arguments are a list 100 /* of macros with zero or more arguments, terminated with 101 /* CA_DICT_CACHE_CTL_END which has none. The following lists 102 /* the macros and corresponding argument types. 103 /* .IP "CA_DICT_CACHE_CTL_FLAGS(int flags)" 104 /* The arguments to this command are the bit-wise OR of zero 105 /* or more of the following: 106 /* .RS 107 /* .IP CA_DICT_CACHE_CTL_FLAG_VERBOSE 108 /* Enable verbose logging of cache activity. 109 /* .IP CA_DICT_CACHE_CTL_FLAG_EXP_SUMMARY 110 /* Log cache statistics after each cache cleanup run. 111 /* .RE 112 /* .IP "CA_DICT_CACHE_CTL_INTERVAL(int interval)" 113 /* The interval between cache cleanup runs. Specify a null 114 /* validator or interval to stop cache cleanup and log cache 115 /* statistics if a cleanup run was in progress. 116 /* .IP "CA_DICT_CACHE_CTL_VALIDATOR(DICT_CACHE_VALIDATOR_FN validator)" 117 /* An application call-back routine that returns non-zero when 118 /* a cache entry should be kept. The call-back function should 119 /* not make changes to the cache. Specify a null validator or 120 /* interval to stop cache cleanup. 121 /* .IP "CA_DICT_CACHE_CTL_CONTEXT(void *context)" 122 /* Application context that is passed to the validator function. 123 /* .RE 124 /* .PP 125 /* dict_cache_name() returns the name of the specified cache. 126 /* 127 /* Arguments: 128 /* .IP "dbname, open_flags, dict_flags" 129 /* These are passed unchanged to dict_open(). The cache must 130 /* be opened with DICT_FLAG_DUP_REPLACE. 131 /* .IP cache 132 /* Cache handle created with dict_cache_open(). 133 /* .IP cache_key 134 /* Cache lookup key. 135 /* .IP cache_val 136 /* Information that is stored under a cache lookup key. 137 /* .IP first_next 138 /* One of DICT_SEQ_FUN_FIRST (first cache element) or 139 /* DICT_SEQ_FUN_NEXT (next cache element). 140 /* .sp 141 /* Note: there is no "stop" request. To ensure that the "delete 142 /* behind" strategy does not interfere with database access, 143 /* allow dict_cache_sequence() to run to completion. 144 /* .IP table 145 /* A bare dictionary handle. 146 /* DIAGNOSTICS 147 /* When a request is satisfied, the lookup routine returns 148 /* non-null, and the update, delete and sequence routines 149 /* return zero. The cache->error value is zero when a request 150 /* could not be satisfied because an item did not exist (delete, 151 /* sequence) or if it could not be updated. The cache->error 152 /* value is non-zero only when a request could not be satisfied, 153 /* and the cause was a database error. 154 /* 155 /* Cache access errors are logged with a warning message. To 156 /* avoid spamming the log, each type of operation logs no more 157 /* than one cache access error per second, per cache. Specify 158 /* the DICT_CACHE_FLAG_VERBOSE flag (see above) to log all 159 /* warnings. 160 /* BUGS 161 /* There should be a way to suspend automatic program suicide 162 /* until a cache cleanup run is completed. Some entries may 163 /* never be removed when the process max_idle time is less 164 /* than the time needed to make a full pass over the cache. 165 /* 166 /* The delete-behind strategy assumes that all updates are 167 /* made by a single process. Otherwise, delete-behind may 168 /* remove an entry that was updated after it was scheduled for 169 /* deletion. 170 /* LICENSE 171 /* .ad 172 /* .fi 173 /* The Secure Mailer license must be distributed with this software. 174 /* HISTORY 175 /* .ad 176 /* .fi 177 /* A predecessor of this code was written first for the Postfix 178 /* tlsmgr(8) daemon. 179 /* AUTHOR(S) 180 /* Wietse Venema 181 /* IBM T.J. Watson Research 182 /* P.O. Box 704 183 /* Yorktown Heights, NY 10598, USA 184 /*--*/ 185 186 /* System library. */ 187 188 #include <sys_defs.h> 189 #include <string.h> 190 #include <stdlib.h> 191 192 /* Utility library. */ 193 194 #include <msg.h> 195 #include <dict.h> 196 #include <mymalloc.h> 197 #include <events.h> 198 #include <dict_cache.h> 199 200 /* Application-specific. */ 201 202 /* 203 * XXX Deleting entries while enumerating a map can he tricky. Some map 204 * types have a concept of cursor and support a "delete the current element" 205 * operation. Some map types without cursors don't behave well when the 206 * current first/next entry is deleted (example: with Berkeley DB < 2, the 207 * "next" operation produces garbage). To avoid trouble, we delete an entry 208 * after advancing the current first/next position beyond it; we use the 209 * same strategy with application requests to delete the current entry. 210 */ 211 212 /* 213 * Opaque data structure. Use dict_cache_name() to access the name of the 214 * underlying database. 215 */ 216 struct DICT_CACHE { 217 char *name; /* full name including proxy: */ 218 int cache_flags; /* see below */ 219 int user_flags; /* logging */ 220 DICT *db; /* database handle */ 221 int error; /* last operation only */ 222 223 /* Delete-behind support. */ 224 char *saved_curr_key; /* "current" cache lookup key */ 225 char *saved_curr_val; /* "current" cache lookup result */ 226 227 /* Cleanup support. */ 228 int exp_interval; /* time between cleanup runs */ 229 DICT_CACHE_VALIDATOR_FN exp_validator; /* expiration call-back */ 230 void *exp_context; /* call-back context */ 231 int retained; /* entries retained in cleanup run */ 232 int dropped; /* entries removed in cleanup run */ 233 234 /* Rate-limited logging support. */ 235 int log_delay; 236 time_t upd_log_stamp; /* last update warning */ 237 time_t get_log_stamp; /* last lookup warning */ 238 time_t del_log_stamp; /* last delete warning */ 239 time_t seq_log_stamp; /* last sequence warning */ 240 }; 241 242 #define DC_FLAG_DEL_SAVED_CURRENT_KEY (1<<0) /* delete-behind is scheduled */ 243 244 /* 245 * Don't log cache access errors more than once per second. 246 */ 247 #define DC_DEF_LOG_DELAY 1 248 249 /* 250 * Macros to make obscure code more readable. 251 */ 252 #define DC_SCHEDULE_FOR_DELETE_BEHIND(cp) \ 253 ((cp)->cache_flags |= DC_FLAG_DEL_SAVED_CURRENT_KEY) 254 255 #define DC_MATCH_SAVED_CURRENT_KEY(cp, cache_key) \ 256 ((cp)->saved_curr_key && strcmp((cp)->saved_curr_key, (cache_key)) == 0) 257 258 #define DC_IS_SCHEDULED_FOR_DELETE_BEHIND(cp) \ 259 (/* NOT: (cp)->saved_curr_key && */ \ 260 ((cp)->cache_flags & DC_FLAG_DEL_SAVED_CURRENT_KEY) != 0) 261 262 #define DC_CANCEL_DELETE_BEHIND(cp) \ 263 ((cp)->cache_flags &= ~DC_FLAG_DEL_SAVED_CURRENT_KEY) 264 265 /* 266 * Special key to store the time of the last cache cleanup run completion. 267 */ 268 #define DC_LAST_CACHE_CLEANUP_COMPLETED "_LAST_CACHE_CLEANUP_COMPLETED_" 269 270 /* dict_cache_lookup - load entry from cache */ 271 272 const char *dict_cache_lookup(DICT_CACHE *cp, const char *cache_key) 273 { 274 const char *myname = "dict_cache_lookup"; 275 const char *cache_val; 276 DICT *db = cp->db; 277 278 /* 279 * Search for the cache entry. Don't return an entry that is scheduled 280 * for delete-behind. 281 */ 282 if (DC_IS_SCHEDULED_FOR_DELETE_BEHIND(cp) 283 && DC_MATCH_SAVED_CURRENT_KEY(cp, cache_key)) { 284 if (cp->user_flags & DICT_CACHE_FLAG_VERBOSE) 285 msg_info("%s: key=%s (pretend not found - scheduled for deletion)", 286 myname, cache_key); 287 DICT_ERR_VAL_RETURN(cp, DICT_ERR_NONE, (char *) 0); 288 } else { 289 cache_val = dict_get(db, cache_key); 290 if (cache_val == 0 && db->error != 0) 291 msg_rate_delay(&cp->get_log_stamp, cp->log_delay, msg_warn, 292 "%s: cache lookup for '%s' failed due to error", 293 cp->name, cache_key); 294 if (cp->user_flags & DICT_CACHE_FLAG_VERBOSE) 295 msg_info("%s: key=%s value=%s", myname, cache_key, 296 cache_val ? cache_val : db->error ? 297 "error" : "(not found)"); 298 DICT_ERR_VAL_RETURN(cp, db->error, cache_val); 299 } 300 } 301 302 /* dict_cache_update - save entry to cache */ 303 304 int dict_cache_update(DICT_CACHE *cp, const char *cache_key, 305 const char *cache_val) 306 { 307 const char *myname = "dict_cache_update"; 308 DICT *db = cp->db; 309 int put_res; 310 311 /* 312 * Store the cache entry and cancel the delete-behind operation. 313 */ 314 if (DC_IS_SCHEDULED_FOR_DELETE_BEHIND(cp) 315 && DC_MATCH_SAVED_CURRENT_KEY(cp, cache_key)) { 316 if (cp->user_flags & DICT_CACHE_FLAG_VERBOSE) 317 msg_info("%s: cancel delete-behind for key=%s", myname, cache_key); 318 DC_CANCEL_DELETE_BEHIND(cp); 319 } 320 if (cp->user_flags & DICT_CACHE_FLAG_VERBOSE) 321 msg_info("%s: key=%s value=%s", myname, cache_key, cache_val); 322 put_res = dict_put(db, cache_key, cache_val); 323 if (put_res != 0) 324 msg_rate_delay(&cp->upd_log_stamp, cp->log_delay, msg_warn, 325 "%s: could not update entry for %s", cp->name, cache_key); 326 DICT_ERR_VAL_RETURN(cp, db->error, put_res); 327 } 328 329 /* dict_cache_delete - delete entry from cache */ 330 331 int dict_cache_delete(DICT_CACHE *cp, const char *cache_key) 332 { 333 const char *myname = "dict_cache_delete"; 334 int del_res; 335 DICT *db = cp->db; 336 337 /* 338 * Delete the entry, unless we would delete the current first/next entry. 339 * In that case, schedule the "current" entry for delete-behind to avoid 340 * mis-behavior by some databases. 341 */ 342 if (DC_MATCH_SAVED_CURRENT_KEY(cp, cache_key)) { 343 DC_SCHEDULE_FOR_DELETE_BEHIND(cp); 344 if (cp->user_flags & DICT_CACHE_FLAG_VERBOSE) 345 msg_info("%s: key=%s (current entry - schedule for delete-behind)", 346 myname, cache_key); 347 DICT_ERR_VAL_RETURN(cp, DICT_ERR_NONE, DICT_STAT_SUCCESS); 348 } else { 349 del_res = dict_del(db, cache_key); 350 if (del_res != 0) 351 msg_rate_delay(&cp->del_log_stamp, cp->log_delay, msg_warn, 352 "%s: could not delete entry for %s", cp->name, cache_key); 353 if (cp->user_flags & DICT_CACHE_FLAG_VERBOSE) 354 msg_info("%s: key=%s (%s)", myname, cache_key, 355 del_res == 0 ? "found" : 356 db->error ? "error" : "not found"); 357 DICT_ERR_VAL_RETURN(cp, db->error, del_res); 358 } 359 } 360 361 /* dict_cache_sequence - look up the first/next cache entry */ 362 363 int dict_cache_sequence(DICT_CACHE *cp, int first_next, 364 const char **cache_key, 365 const char **cache_val) 366 { 367 const char *myname = "dict_cache_sequence"; 368 int seq_res; 369 const char *raw_cache_key; 370 const char *raw_cache_val; 371 char *previous_curr_key; 372 char *previous_curr_val; 373 DICT *db = cp->db; 374 375 /* 376 * Find the first or next database entry. Hide the record with the cache 377 * cleanup completion time stamp. 378 */ 379 seq_res = dict_seq(db, first_next, &raw_cache_key, &raw_cache_val); 380 if (seq_res == 0 381 && strcmp(raw_cache_key, DC_LAST_CACHE_CLEANUP_COMPLETED) == 0) 382 seq_res = 383 dict_seq(db, DICT_SEQ_FUN_NEXT, &raw_cache_key, &raw_cache_val); 384 if (cp->user_flags & DICT_CACHE_FLAG_VERBOSE) 385 msg_info("%s: key=%s value=%s", myname, 386 seq_res == 0 ? raw_cache_key : db->error ? 387 "(error)" : "(not found)", 388 seq_res == 0 ? raw_cache_val : db->error ? 389 "(error)" : "(not found)"); 390 if (db->error) 391 msg_rate_delay(&cp->seq_log_stamp, cp->log_delay, msg_warn, 392 "%s: sequence error", cp->name); 393 394 /* 395 * Save the current cache_key and cache_val before they are clobbered by 396 * our own delete operation below. This also prevents surprises when the 397 * application accesses the database after this function returns. 398 * 399 * We also use the saved cache_key to protect the current entry against 400 * application delete requests. 401 */ 402 previous_curr_key = cp->saved_curr_key; 403 previous_curr_val = cp->saved_curr_val; 404 if (seq_res == 0) { 405 cp->saved_curr_key = mystrdup(raw_cache_key); 406 cp->saved_curr_val = mystrdup(raw_cache_val); 407 } else { 408 cp->saved_curr_key = 0; 409 cp->saved_curr_val = 0; 410 } 411 412 /* 413 * Delete behind. 414 */ 415 if (db->error == 0 && DC_IS_SCHEDULED_FOR_DELETE_BEHIND(cp)) { 416 DC_CANCEL_DELETE_BEHIND(cp); 417 if (cp->user_flags & DICT_CACHE_FLAG_VERBOSE) 418 msg_info("%s: delete-behind key=%s value=%s", 419 myname, previous_curr_key, previous_curr_val); 420 if (dict_del(db, previous_curr_key) != 0) 421 msg_rate_delay(&cp->del_log_stamp, cp->log_delay, msg_warn, 422 "%s: could not delete entry for %s", 423 cp->name, previous_curr_key); 424 } 425 426 /* 427 * Clean up previous iteration key and value. 428 */ 429 if (previous_curr_key) 430 myfree(previous_curr_key); 431 if (previous_curr_val) 432 myfree(previous_curr_val); 433 434 /* 435 * Return the result. 436 */ 437 *cache_key = (cp)->saved_curr_key; 438 *cache_val = (cp)->saved_curr_val; 439 DICT_ERR_VAL_RETURN(cp, db->error, seq_res); 440 } 441 442 /* dict_cache_delete_behind_reset - reset "delete behind" state */ 443 444 static void dict_cache_delete_behind_reset(DICT_CACHE *cp) 445 { 446 #define FREE_AND_WIPE(s) do { if (s) { myfree(s); (s) = 0; } } while (0) 447 448 DC_CANCEL_DELETE_BEHIND(cp); 449 FREE_AND_WIPE(cp->saved_curr_key); 450 FREE_AND_WIPE(cp->saved_curr_val); 451 } 452 453 /* dict_cache_clean_stat_log_reset - log and reset cache cleanup statistics */ 454 455 static void dict_cache_clean_stat_log_reset(DICT_CACHE *cp, 456 const char *full_partial) 457 { 458 if (cp->user_flags & DICT_CACHE_FLAG_STATISTICS) 459 msg_info("cache %s %s cleanup: retained=%d dropped=%d entries", 460 cp->name, full_partial, cp->retained, cp->dropped); 461 cp->retained = cp->dropped = 0; 462 } 463 464 /* dict_cache_clean_event - examine one cache entry */ 465 466 static void dict_cache_clean_event(int unused_event, void *cache_context) 467 { 468 const char *myname = "dict_cache_clean_event"; 469 DICT_CACHE *cp = (DICT_CACHE *) cache_context; 470 const char *cache_key; 471 const char *cache_val; 472 int next_interval; 473 VSTRING *stamp_buf; 474 int first_next; 475 476 /* 477 * We interleave cache cleanup with other processing, so that the 478 * application's service remains available, with perhaps increased 479 * latency. 480 */ 481 482 /* 483 * Start a new cache cleanup run. 484 */ 485 if (cp->saved_curr_key == 0) { 486 cp->retained = cp->dropped = 0; 487 first_next = DICT_SEQ_FUN_FIRST; 488 if (cp->user_flags & DICT_CACHE_FLAG_VERBOSE) 489 msg_info("%s: start %s cache cleanup", myname, cp->name); 490 } 491 492 /* 493 * Continue a cache cleanup run in progress. 494 */ 495 else { 496 first_next = DICT_SEQ_FUN_NEXT; 497 } 498 499 /* 500 * Examine one cache entry. 501 */ 502 if (dict_cache_sequence(cp, first_next, &cache_key, &cache_val) == 0) { 503 if (cp->exp_validator(cache_key, cache_val, cp->exp_context) == 0) { 504 DC_SCHEDULE_FOR_DELETE_BEHIND(cp); 505 cp->dropped++; 506 if (cp->user_flags & DICT_CACHE_FLAG_VERBOSE) 507 msg_info("%s: drop %s cache entry for %s", 508 myname, cp->name, cache_key); 509 } else { 510 cp->retained++; 511 if (cp->user_flags & DICT_CACHE_FLAG_VERBOSE) 512 msg_info("%s: keep %s cache entry for %s", 513 myname, cp->name, cache_key); 514 } 515 next_interval = 0; 516 } 517 518 /* 519 * Cache cleanup completed. Report vital statistics. 520 */ 521 else if (cp->error != 0) { 522 msg_warn("%s: cache cleanup scan terminated due to error", cp->name); 523 dict_cache_clean_stat_log_reset(cp, "partial"); 524 next_interval = cp->exp_interval; 525 } else { 526 if (cp->user_flags & DICT_CACHE_FLAG_VERBOSE) 527 msg_info("%s: done %s cache cleanup scan", myname, cp->name); 528 dict_cache_clean_stat_log_reset(cp, "full"); 529 stamp_buf = vstring_alloc(100); 530 vstring_sprintf(stamp_buf, "%ld", (long) event_time()); 531 dict_put(cp->db, DC_LAST_CACHE_CLEANUP_COMPLETED, 532 vstring_str(stamp_buf)); 533 vstring_free(stamp_buf); 534 next_interval = cp->exp_interval; 535 } 536 event_request_timer(dict_cache_clean_event, cache_context, next_interval); 537 } 538 539 /* dict_cache_control - schedule or stop the cache cleanup thread */ 540 541 void dict_cache_control(DICT_CACHE *cp,...) 542 { 543 const char *myname = "dict_cache_control"; 544 const char *last_done; 545 time_t next_interval; 546 int cache_cleanup_is_active = (cp->exp_validator && cp->exp_interval); 547 va_list ap; 548 int name; 549 550 /* 551 * Update the control settings. 552 */ 553 va_start(ap, cp); 554 while ((name = va_arg(ap, int)) > 0) { 555 switch (name) { 556 case DICT_CACHE_CTL_END: 557 break; 558 case DICT_CACHE_CTL_FLAGS: 559 cp->user_flags = va_arg(ap, int); 560 cp->log_delay = (cp->user_flags & DICT_CACHE_FLAG_VERBOSE) ? 561 0 : DC_DEF_LOG_DELAY; 562 break; 563 case DICT_CACHE_CTL_INTERVAL: 564 cp->exp_interval = va_arg(ap, int); 565 if (cp->exp_interval < 0) 566 msg_panic("%s: bad %s cache cleanup interval %d", 567 myname, cp->name, cp->exp_interval); 568 break; 569 case DICT_CACHE_CTL_VALIDATOR: 570 cp->exp_validator = va_arg(ap, DICT_CACHE_VALIDATOR_FN); 571 break; 572 case DICT_CACHE_CTL_CONTEXT: 573 cp->exp_context = va_arg(ap, void *); 574 break; 575 default: 576 msg_panic("%s: bad command: %d", myname, name); 577 } 578 } 579 va_end(ap); 580 581 /* 582 * Schedule the cache cleanup thread. 583 */ 584 if (cp->exp_interval && cp->exp_validator) { 585 586 /* 587 * Sanity checks. 588 */ 589 if (cache_cleanup_is_active) 590 msg_panic("%s: %s cache cleanup is already scheduled", 591 myname, cp->name); 592 593 /* 594 * The next start time depends on the last completion time. 595 */ 596 #define NEXT_START(last, delta) ((delta) + (unsigned long) atol(last)) 597 #define NOW (time((time_t *) 0)) /* NOT: event_time() */ 598 599 if ((last_done = dict_get(cp->db, DC_LAST_CACHE_CLEANUP_COMPLETED)) == 0 600 || (next_interval = (NEXT_START(last_done, cp->exp_interval) - NOW)) < 0) 601 next_interval = 0; 602 if (next_interval > cp->exp_interval) 603 next_interval = cp->exp_interval; 604 if ((cp->user_flags & DICT_CACHE_FLAG_VERBOSE) && next_interval > 0) 605 msg_info("%s cache cleanup will start after %ds", 606 cp->name, (int) next_interval); 607 event_request_timer(dict_cache_clean_event, (void *) cp, 608 (int) next_interval); 609 } 610 611 /* 612 * Cancel the cache cleanup thread. 613 */ 614 else if (cache_cleanup_is_active) { 615 if (cp->retained || cp->dropped) 616 dict_cache_clean_stat_log_reset(cp, "partial"); 617 dict_cache_delete_behind_reset(cp); 618 event_cancel_timer(dict_cache_clean_event, (void *) cp); 619 } 620 } 621 622 /* dict_cache_open - open cache file */ 623 624 DICT_CACHE *dict_cache_open(const char *dbname, int open_flags, int dict_flags) 625 { 626 DICT_CACHE *cp; 627 DICT *dict; 628 629 /* 630 * Open the database as requested. Don't attempt to second-guess the 631 * application. 632 */ 633 dict = dict_open(dbname, open_flags, dict_flags); 634 635 /* 636 * Create the DICT_CACHE object. 637 */ 638 cp = (DICT_CACHE *) mymalloc(sizeof(*cp)); 639 cp->name = mystrdup(dbname); 640 cp->cache_flags = 0; 641 cp->user_flags = 0; 642 cp->db = dict; 643 cp->saved_curr_key = 0; 644 cp->saved_curr_val = 0; 645 cp->exp_interval = 0; 646 cp->exp_validator = 0; 647 cp->exp_context = 0; 648 cp->retained = 0; 649 cp->dropped = 0; 650 cp->log_delay = DC_DEF_LOG_DELAY; 651 cp->upd_log_stamp = cp->get_log_stamp = 652 cp->del_log_stamp = cp->seq_log_stamp = 0; 653 654 return (cp); 655 } 656 657 /* dict_cache_close - close cache file */ 658 659 void dict_cache_close(DICT_CACHE *cp) 660 { 661 662 /* 663 * Cancel the cache cleanup thread. This also logs (and resets) 664 * statistics for a scan that is in progress. 665 */ 666 dict_cache_control(cp, DICT_CACHE_CTL_INTERVAL, 0, DICT_CACHE_CTL_END); 667 668 /* 669 * Destroy the DICT_CACHE object. 670 */ 671 myfree(cp->name); 672 dict_close(cp->db); 673 if (cp->saved_curr_key) 674 myfree(cp->saved_curr_key); 675 if (cp->saved_curr_val) 676 myfree(cp->saved_curr_val); 677 myfree((void *) cp); 678 } 679 680 /* dict_cache_name - get the cache name */ 681 682 const char *dict_cache_name(DICT_CACHE *cp) 683 { 684 685 /* 686 * This is used for verbose logging or warning messages, so the cost of 687 * call is only made where needed (well sort off - code that does not 688 * execute still presents overhead for the processor pipeline, processor 689 * cache, etc). 690 */ 691 return (cp->name); 692 } 693 694 /* 695 * Test driver with support for interleaved access. First, enter a number of 696 * requests to look up, update or delete a sequence of cache entries, then 697 * interleave those sequences with the "run" command. 698 */ 699 #ifdef TEST 700 #include <msg_vstream.h> 701 #include <vstring_vstream.h> 702 #include <argv.h> 703 #include <stringops.h> 704 705 #define DELIMS " " 706 #define USAGE "\n\tTo manage settings:" \ 707 "\n\tverbose <level> (verbosity level)" \ 708 "\n\telapsed <level> (0=don't show elapsed time)" \ 709 "\n\tlmdb_map_size <limit> (initial LMDB size limit)" \ 710 "\n\tcache <type>:<name> (switch to named database)" \ 711 "\n\tstatus (show map size, cache, pending requests)" \ 712 "\n\n\tTo manage pending requests:" \ 713 "\n\treset (discard pending requests)" \ 714 "\n\trun (execute pending requests in interleaved order)" \ 715 "\n\n\tTo add a pending request:" \ 716 "\n\tquery <key-suffix> <count> (negative to reverse order)" \ 717 "\n\tupdate <key-suffix> <count> (negative to reverse order)" \ 718 "\n\tdelete <key-suffix> <count> (negative to reverse order)" \ 719 "\n\tpurge <key-suffix>" \ 720 "\n\tcount <key-suffix>" 721 722 /* 723 * For realism, open the cache with the same flags as postscreen(8) and 724 * verify(8). 725 */ 726 #define DICT_CACHE_OPEN_FLAGS (DICT_FLAG_DUP_REPLACE | DICT_FLAG_SYNC_UPDATE | \ 727 DICT_FLAG_OPEN_LOCK) 728 729 /* 730 * Storage for one request to access a sequence of cache entries. 731 */ 732 typedef struct DICT_CACHE_SREQ { 733 int flags; /* per-request: reverse, purge */ 734 char *cmd; /* command for status report */ 735 void (*action) (struct DICT_CACHE_SREQ *, DICT_CACHE *, VSTRING *); 736 char *suffix; /* key suffix */ 737 int done; /* progress indicator */ 738 int todo; /* number of entries to process */ 739 int first_next; /* first/next */ 740 } DICT_CACHE_SREQ; 741 742 #define DICT_CACHE_SREQ_FLAG_PURGE (1<<1) /* purge instead of count */ 743 #define DICT_CACHE_SREQ_FLAG_REVERSE (1<<2) /* reverse instead of forward */ 744 745 #define DICT_CACHE_SREQ_LIMIT 10 746 747 /* 748 * All test requests combined. 749 */ 750 typedef struct DICT_CACHE_TEST { 751 int flags; /* exclusion flags */ 752 int size; /* allocated slots */ 753 int used; /* used slots */ 754 DICT_CACHE_SREQ job_list[1]; /* actually, a bunch */ 755 } DICT_CACHE_TEST; 756 757 #define DICT_CACHE_TEST_FLAG_ITER (1<<0) /* count or purge */ 758 759 #define STR(x) vstring_str(x) 760 761 int show_elapsed = 1; /* show elapsed time */ 762 763 #ifdef HAS_LMDB 764 extern size_t dict_lmdb_map_size; /* LMDB-specific */ 765 766 #endif 767 768 /* usage - command-line usage message */ 769 770 static NORETURN usage(const char *progname) 771 { 772 msg_fatal("usage: %s (no argument)", progname); 773 } 774 775 /* make_tagged_key - make tagged search key */ 776 777 static void make_tagged_key(VSTRING *bp, DICT_CACHE_SREQ *cp) 778 { 779 if (cp->done < 0) 780 msg_panic("make_tagged_key: bad done count: %d", cp->done); 781 if (cp->todo < 1) 782 msg_panic("make_tagged_key: bad todo count: %d", cp->todo); 783 vstring_sprintf(bp, "%d-%s", 784 (cp->flags & DICT_CACHE_SREQ_FLAG_REVERSE) ? 785 cp->todo - cp->done - 1 : cp->done, cp->suffix); 786 } 787 788 /* create_requests - create request list */ 789 790 static DICT_CACHE_TEST *create_requests(int count) 791 { 792 DICT_CACHE_TEST *tp; 793 DICT_CACHE_SREQ *cp; 794 795 tp = (DICT_CACHE_TEST *) mymalloc(sizeof(DICT_CACHE_TEST) + 796 (count - 1) *sizeof(DICT_CACHE_SREQ)); 797 tp->flags = 0; 798 tp->size = count; 799 tp->used = 0; 800 for (cp = tp->job_list; cp < tp->job_list + count; cp++) { 801 cp->flags = 0; 802 cp->cmd = 0; 803 cp->action = 0; 804 cp->suffix = 0; 805 cp->todo = 0; 806 cp->first_next = DICT_SEQ_FUN_FIRST; 807 } 808 return (tp); 809 } 810 811 /* reset_requests - reset request list */ 812 813 static void reset_requests(DICT_CACHE_TEST *tp) 814 { 815 DICT_CACHE_SREQ *cp; 816 817 tp->flags = 0; 818 tp->used = 0; 819 for (cp = tp->job_list; cp < tp->job_list + tp->size; cp++) { 820 cp->flags = 0; 821 if (cp->cmd) { 822 myfree(cp->cmd); 823 cp->cmd = 0; 824 } 825 cp->action = 0; 826 if (cp->suffix) { 827 myfree(cp->suffix); 828 cp->suffix = 0; 829 } 830 cp->todo = 0; 831 cp->first_next = DICT_SEQ_FUN_FIRST; 832 } 833 } 834 835 /* free_requests - destroy request list */ 836 837 static void free_requests(DICT_CACHE_TEST *tp) 838 { 839 reset_requests(tp); 840 myfree((void *) tp); 841 } 842 843 /* run_requests - execute pending requests in interleaved order */ 844 845 static void run_requests(DICT_CACHE_TEST *tp, DICT_CACHE *dp, VSTRING *bp) 846 { 847 DICT_CACHE_SREQ *cp; 848 int todo; 849 struct timeval start; 850 struct timeval finish; 851 struct timeval elapsed; 852 853 if (dp == 0) { 854 msg_warn("no cache"); 855 return; 856 } 857 GETTIMEOFDAY(&start); 858 do { 859 todo = 0; 860 for (cp = tp->job_list; cp < tp->job_list + tp->used; cp++) { 861 if (cp->done < cp->todo) { 862 todo = 1; 863 cp->action(cp, dp, bp); 864 } 865 } 866 } while (todo); 867 GETTIMEOFDAY(&finish); 868 timersub(&finish, &start, &elapsed); 869 if (show_elapsed) 870 vstream_printf("Elapsed: %g\n", 871 elapsed.tv_sec + elapsed.tv_usec / 1000000.0); 872 873 reset_requests(tp); 874 } 875 876 /* show_status - show settings and pending requests */ 877 878 static void show_status(DICT_CACHE_TEST *tp, DICT_CACHE *dp) 879 { 880 DICT_CACHE_SREQ *cp; 881 882 #ifdef HAS_LMDB 883 vstream_printf("lmdb_map_size\t%ld\n", (long) dict_lmdb_map_size); 884 #endif 885 vstream_printf("cache\t%s\n", dp ? dp->name : "(none)"); 886 887 if (tp->used == 0) 888 vstream_printf("No pending requests\n"); 889 else 890 vstream_printf("%s\t%s\t%s\t%s\t%s\t%s\n", 891 "cmd", "dir", "suffix", "count", "done", "first/next"); 892 893 for (cp = tp->job_list; cp < tp->job_list + tp->used; cp++) 894 if (cp->todo > 0) 895 vstream_printf("%s\t%s\t%s\t%d\t%d\t%d\n", 896 cp->cmd, 897 (cp->flags & DICT_CACHE_SREQ_FLAG_REVERSE) ? 898 "reverse" : "forward", 899 cp->suffix ? cp->suffix : "(null)", cp->todo, 900 cp->done, cp->first_next); 901 } 902 903 /* query_action - lookup cache entry */ 904 905 static void query_action(DICT_CACHE_SREQ *cp, DICT_CACHE *dp, VSTRING *bp) 906 { 907 const char *lookup; 908 909 make_tagged_key(bp, cp); 910 if ((lookup = dict_cache_lookup(dp, STR(bp))) == 0) { 911 if (dp->error) 912 msg_warn("query_action: query failed: %s: %m", STR(bp)); 913 else 914 msg_warn("query_action: query failed: %s", STR(bp)); 915 } else if (strcmp(STR(bp), lookup) != 0) { 916 msg_warn("lookup result \"%s\" differs from key \"%s\"", 917 lookup, STR(bp)); 918 } 919 cp->done += 1; 920 } 921 922 /* update_action - update cache entry */ 923 924 static void update_action(DICT_CACHE_SREQ *cp, DICT_CACHE *dp, VSTRING *bp) 925 { 926 make_tagged_key(bp, cp); 927 if (dict_cache_update(dp, STR(bp), STR(bp)) != 0) { 928 if (dp->error) 929 msg_warn("update_action: update failed: %s: %m", STR(bp)); 930 else 931 msg_warn("update_action: update failed: %s", STR(bp)); 932 } 933 cp->done += 1; 934 } 935 936 /* delete_action - delete cache entry */ 937 938 static void delete_action(DICT_CACHE_SREQ *cp, DICT_CACHE *dp, VSTRING *bp) 939 { 940 make_tagged_key(bp, cp); 941 if (dict_cache_delete(dp, STR(bp)) != 0) { 942 if (dp->error) 943 msg_warn("delete_action: delete failed: %s: %m", STR(bp)); 944 else 945 msg_warn("delete_action: delete failed: %s", STR(bp)); 946 } 947 cp->done += 1; 948 } 949 950 /* iter_action - iterate over cache and act on entries with given suffix */ 951 952 static void iter_action(DICT_CACHE_SREQ *cp, DICT_CACHE *dp, VSTRING *bp) 953 { 954 const char *cache_key; 955 const char *cache_val; 956 const char *what; 957 const char *suffix; 958 959 if (dict_cache_sequence(dp, cp->first_next, &cache_key, &cache_val) == 0) { 960 if (strcmp(cache_key, cache_val) != 0) 961 msg_warn("value \"%s\" differs from key \"%s\"", 962 cache_val, cache_key); 963 suffix = cache_key + strspn(cache_key, "0123456789"); 964 if (suffix[0] == '-' && strcmp(suffix + 1, cp->suffix) == 0) { 965 cp->done += 1; 966 cp->todo = cp->done + 1; /* XXX */ 967 if ((cp->flags & DICT_CACHE_SREQ_FLAG_PURGE) 968 && dict_cache_delete(dp, cache_key) != 0) { 969 if (dp->error) 970 msg_warn("purge_action: delete failed: %s: %m", STR(bp)); 971 else 972 msg_warn("purge_action: delete failed: %s", STR(bp)); 973 } 974 } 975 cp->first_next = DICT_SEQ_FUN_NEXT; 976 } else { 977 what = (cp->flags & DICT_CACHE_SREQ_FLAG_PURGE) ? "purge" : "count"; 978 if (dp->error) 979 msg_warn("%s error after %d: %m", what, cp->done); 980 else 981 vstream_printf("suffix=%s %s=%d\n", cp->suffix, what, cp->done); 982 cp->todo = 0; 983 } 984 } 985 986 /* 987 * Table-driven support. 988 */ 989 typedef struct DICT_CACHE_SREQ_INFO { 990 const char *name; 991 int argc; 992 void (*action) (DICT_CACHE_SREQ *, DICT_CACHE *, VSTRING *); 993 int test_flags; 994 int req_flags; 995 } DICT_CACHE_SREQ_INFO; 996 997 static DICT_CACHE_SREQ_INFO req_info[] = { 998 {"query", 3, query_action}, 999 {"update", 3, update_action}, 1000 {"delete", 3, delete_action}, 1001 {"count", 2, iter_action, DICT_CACHE_TEST_FLAG_ITER}, 1002 {"purge", 2, iter_action, DICT_CACHE_TEST_FLAG_ITER, DICT_CACHE_SREQ_FLAG_PURGE}, 1003 0, 1004 }; 1005 1006 /* add_request - add a request to the list */ 1007 1008 static void add_request(DICT_CACHE_TEST *tp, ARGV *argv) 1009 { 1010 DICT_CACHE_SREQ_INFO *rp; 1011 DICT_CACHE_SREQ *cp; 1012 int req_flags; 1013 int count; 1014 char *cmd = argv->argv[0]; 1015 char *suffix = (argv->argc > 1 ? argv->argv[1] : 0); 1016 char *todo = (argv->argc > 2 ? argv->argv[2] : "1"); /* XXX */ 1017 1018 if (tp->used >= tp->size) { 1019 msg_warn("%s: request list is full", cmd); 1020 return; 1021 } 1022 for (rp = req_info; /* See below */ ; rp++) { 1023 if (rp->name == 0) { 1024 vstream_printf("usage: %s\n", USAGE); 1025 return; 1026 } 1027 if (strcmp(rp->name, argv->argv[0]) == 0 1028 && rp->argc == argv->argc) 1029 break; 1030 } 1031 req_flags = rp->req_flags; 1032 if (todo[0] == '-') { 1033 req_flags |= DICT_CACHE_SREQ_FLAG_REVERSE; 1034 todo += 1; 1035 } 1036 if (!alldig(todo) || (count = atoi(todo)) == 0) { 1037 msg_warn("%s: bad count: %s", cmd, todo); 1038 return; 1039 } 1040 if (tp->flags & rp->test_flags) { 1041 msg_warn("%s: command conflicts with other command", cmd); 1042 return; 1043 } 1044 tp->flags |= rp->test_flags; 1045 cp = tp->job_list + tp->used; 1046 cp->cmd = mystrdup(cmd); 1047 cp->action = rp->action; 1048 if (suffix) 1049 cp->suffix = mystrdup(suffix); 1050 cp->done = 0; 1051 cp->flags = req_flags; 1052 cp->todo = count; 1053 tp->used += 1; 1054 } 1055 1056 /* main - main program */ 1057 1058 int main(int argc, char **argv) 1059 { 1060 DICT_CACHE_TEST *test_job; 1061 VSTRING *inbuf = vstring_alloc(100); 1062 char *bufp; 1063 ARGV *args; 1064 DICT_CACHE *cache = 0; 1065 int stdin_is_tty; 1066 1067 msg_vstream_init(argv[0], VSTREAM_ERR); 1068 if (argc != 1) 1069 usage(argv[0]); 1070 1071 1072 test_job = create_requests(DICT_CACHE_SREQ_LIMIT); 1073 1074 stdin_is_tty = isatty(0); 1075 1076 for (;;) { 1077 if (stdin_is_tty) { 1078 vstream_printf("> "); 1079 vstream_fflush(VSTREAM_OUT); 1080 } 1081 if (vstring_fgets_nonl(inbuf, VSTREAM_IN) == 0) 1082 break; 1083 bufp = vstring_str(inbuf); 1084 if (!stdin_is_tty) { 1085 vstream_printf("> %s\n", bufp); 1086 vstream_fflush(VSTREAM_OUT); 1087 } 1088 if (*bufp == '#') 1089 continue; 1090 args = argv_split(bufp, DELIMS); 1091 if (argc == 0) { 1092 vstream_printf("usage: %s\n", USAGE); 1093 vstream_fflush(VSTREAM_OUT); 1094 continue; 1095 } 1096 if (strcmp(args->argv[0], "verbose") == 0 && args->argc == 2) { 1097 msg_verbose = atoi(args->argv[1]); 1098 } else if (strcmp(args->argv[0], "elapsed") == 0 && args->argc == 2) { 1099 show_elapsed = atoi(args->argv[1]); 1100 #ifdef HAS_LMDB 1101 } else if (strcmp(args->argv[0], "lmdb_map_size") == 0 && args->argc == 2) { 1102 dict_lmdb_map_size = atol(args->argv[1]); 1103 #endif 1104 } else if (strcmp(args->argv[0], "cache") == 0 && args->argc == 2) { 1105 if (cache) 1106 dict_cache_close(cache); 1107 cache = dict_cache_open(args->argv[1], O_CREAT | O_RDWR, 1108 DICT_CACHE_OPEN_FLAGS); 1109 } else if (strcmp(args->argv[0], "reset") == 0 && args->argc == 1) { 1110 reset_requests(test_job); 1111 } else if (strcmp(args->argv[0], "run") == 0 && args->argc == 1) { 1112 run_requests(test_job, cache, inbuf); 1113 } else if (strcmp(args->argv[0], "status") == 0 && args->argc == 1) { 1114 show_status(test_job, cache); 1115 } else { 1116 add_request(test_job, args); 1117 } 1118 vstream_fflush(VSTREAM_OUT); 1119 argv_free(args); 1120 } 1121 1122 vstring_free(inbuf); 1123 free_requests(test_job); 1124 if (cache) 1125 dict_cache_close(cache); 1126 return (0); 1127 } 1128 1129 #endif 1130