1 /* $NetBSD: cache.c,v 1.1.1.6 2018/02/06 01:53:16 christos Exp $ */ 2 3 /* cache.c - routines to maintain an in-core cache of entries */ 4 /* $OpenLDAP$ */ 5 /* This work is part of OpenLDAP Software <http://www.openldap.org/>. 6 * 7 * Copyright 2001-2017 The OpenLDAP Foundation. 8 * Portions Copyright 2001-2003 Pierangelo Masarati. 9 * All rights reserved. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted only as authorized by the OpenLDAP 13 * Public License. 14 * 15 * A copy of this license is available in file LICENSE in the 16 * top-level directory of the distribution or, alternatively, at 17 * <http://www.OpenLDAP.org/license.html>. 18 */ 19 /* ACKNOWLEDGEMENTS: 20 * This work was initially developed by Pierangelo Masarati for inclusion 21 * in OpenLDAP Software. 22 */ 23 24 #include <sys/cdefs.h> 25 __RCSID("$NetBSD: cache.c,v 1.1.1.6 2018/02/06 01:53:16 christos Exp $"); 26 27 #include "portable.h" 28 29 #include <stdio.h> 30 #include "ac/string.h" 31 32 #include "slap.h" 33 34 #include "back-monitor.h" 35 36 /* 37 * The cache maps DNs to Entries. 38 * Each entry, on turn, holds the list of its children in the e_private field. 39 * This is used by search operation to perform onelevel and subtree candidate 40 * selection. 41 */ 42 typedef struct monitor_cache_t { 43 struct berval mc_ndn; 44 Entry *mc_e; 45 } monitor_cache_t; 46 47 /* 48 * compares entries based on the dn 49 */ 50 int 51 monitor_cache_cmp( 52 const void *c1, 53 const void *c2 ) 54 { 55 monitor_cache_t *cc1 = ( monitor_cache_t * )c1; 56 monitor_cache_t *cc2 = ( monitor_cache_t * )c2; 57 58 /* 59 * case sensitive, because the dn MUST be normalized 60 */ 61 return ber_bvcmp( &cc1->mc_ndn, &cc2->mc_ndn ); 62 } 63 64 /* 65 * checks for duplicate entries 66 */ 67 int 68 monitor_cache_dup( 69 void *c1, 70 void *c2 ) 71 { 72 monitor_cache_t *cc1 = ( monitor_cache_t * )c1; 73 monitor_cache_t *cc2 = ( monitor_cache_t * )c2; 74 75 /* 76 * case sensitive, because the dn MUST be normalized 77 */ 78 return ber_bvcmp( &cc1->mc_ndn, &cc2->mc_ndn ) == 0 ? -1 : 0; 79 } 80 81 /* 82 * adds an entry to the cache and inits the mutex 83 */ 84 int 85 monitor_cache_add( 86 monitor_info_t *mi, 87 Entry *e ) 88 { 89 monitor_cache_t *mc; 90 monitor_entry_t *mp; 91 int rc; 92 93 assert( mi != NULL ); 94 assert( e != NULL ); 95 96 mp = ( monitor_entry_t *)e->e_private; 97 98 mc = ( monitor_cache_t * )ch_malloc( sizeof( monitor_cache_t ) ); 99 mc->mc_ndn = e->e_nname; 100 mc->mc_e = e; 101 ldap_pvt_thread_mutex_lock( &mi->mi_cache_mutex ); 102 rc = avl_insert( &mi->mi_cache, ( caddr_t )mc, 103 monitor_cache_cmp, monitor_cache_dup ); 104 ldap_pvt_thread_mutex_unlock( &mi->mi_cache_mutex ); 105 106 return rc; 107 } 108 109 /* 110 * locks the entry (no r/w) 111 */ 112 int 113 monitor_cache_lock( 114 Entry *e ) 115 { 116 monitor_entry_t *mp; 117 118 assert( e != NULL ); 119 assert( e->e_private != NULL ); 120 121 mp = ( monitor_entry_t * )e->e_private; 122 ldap_pvt_thread_mutex_lock( &mp->mp_mutex ); 123 124 return( 0 ); 125 } 126 127 /* 128 * tries to lock the entry (no r/w) 129 */ 130 int 131 monitor_cache_trylock( 132 Entry *e ) 133 { 134 monitor_entry_t *mp; 135 136 assert( e != NULL ); 137 assert( e->e_private != NULL ); 138 139 mp = ( monitor_entry_t * )e->e_private; 140 return ldap_pvt_thread_mutex_trylock( &mp->mp_mutex ); 141 } 142 143 /* 144 * gets an entry from the cache based on the normalized dn 145 * with mutex locked 146 */ 147 int 148 monitor_cache_get( 149 monitor_info_t *mi, 150 struct berval *ndn, 151 Entry **ep ) 152 { 153 monitor_cache_t tmp_mc, *mc; 154 155 assert( mi != NULL ); 156 assert( ndn != NULL ); 157 assert( ep != NULL ); 158 159 *ep = NULL; 160 161 tmp_mc.mc_ndn = *ndn; 162 retry:; 163 ldap_pvt_thread_mutex_lock( &mi->mi_cache_mutex ); 164 mc = ( monitor_cache_t * )avl_find( mi->mi_cache, 165 ( caddr_t )&tmp_mc, monitor_cache_cmp ); 166 167 if ( mc != NULL ) { 168 /* entry is returned with mutex locked */ 169 if ( monitor_cache_trylock( mc->mc_e ) ) { 170 ldap_pvt_thread_mutex_unlock( &mi->mi_cache_mutex ); 171 ldap_pvt_thread_yield(); 172 goto retry; 173 } 174 *ep = mc->mc_e; 175 } 176 177 ldap_pvt_thread_mutex_unlock( &mi->mi_cache_mutex ); 178 179 return ( *ep == NULL ? -1 : 0 ); 180 } 181 182 /* 183 * gets an entry from the cache based on the normalized dn 184 * with mutex locked 185 */ 186 int 187 monitor_cache_remove( 188 monitor_info_t *mi, 189 struct berval *ndn, 190 Entry **ep ) 191 { 192 monitor_cache_t tmp_mc, *mc; 193 struct berval pndn; 194 195 assert( mi != NULL ); 196 assert( ndn != NULL ); 197 assert( ep != NULL ); 198 199 *ep = NULL; 200 201 dnParent( ndn, &pndn ); 202 203 retry:; 204 ldap_pvt_thread_mutex_lock( &mi->mi_cache_mutex ); 205 206 tmp_mc.mc_ndn = *ndn; 207 mc = ( monitor_cache_t * )avl_find( mi->mi_cache, 208 ( caddr_t )&tmp_mc, monitor_cache_cmp ); 209 210 if ( mc != NULL ) { 211 monitor_cache_t *pmc; 212 213 if ( monitor_cache_trylock( mc->mc_e ) ) { 214 ldap_pvt_thread_mutex_unlock( &mi->mi_cache_mutex ); 215 goto retry; 216 } 217 218 tmp_mc.mc_ndn = pndn; 219 pmc = ( monitor_cache_t * )avl_find( mi->mi_cache, 220 ( caddr_t )&tmp_mc, monitor_cache_cmp ); 221 if ( pmc != NULL ) { 222 monitor_entry_t *mp = (monitor_entry_t *)mc->mc_e->e_private, 223 *pmp = (monitor_entry_t *)pmc->mc_e->e_private; 224 Entry **entryp; 225 226 if ( monitor_cache_trylock( pmc->mc_e ) ) { 227 monitor_cache_release( mi, mc->mc_e ); 228 ldap_pvt_thread_mutex_unlock( &mi->mi_cache_mutex ); 229 goto retry; 230 } 231 232 for ( entryp = &pmp->mp_children; *entryp != NULL; ) { 233 monitor_entry_t *next = (monitor_entry_t *)(*entryp)->e_private; 234 if ( next == mp ) { 235 *entryp = next->mp_next; 236 entryp = NULL; 237 break; 238 } 239 240 entryp = &next->mp_next; 241 } 242 243 if ( entryp != NULL ) { 244 Debug( LDAP_DEBUG_ANY, 245 "monitor_cache_remove(\"%s\"): " 246 "not in parent's list\n", 247 ndn->bv_val, 0, 0 ); 248 } 249 250 /* either succeeded, and the entry is no longer 251 * in its parent's list, or failed, and the 252 * entry is neither mucked with nor returned */ 253 monitor_cache_release( mi, pmc->mc_e ); 254 255 if ( entryp == NULL ) { 256 monitor_cache_t *tmpmc; 257 258 tmp_mc.mc_ndn = *ndn; 259 tmpmc = avl_delete( &mi->mi_cache, 260 ( caddr_t )&tmp_mc, monitor_cache_cmp ); 261 assert( tmpmc == mc ); 262 263 *ep = mc->mc_e; 264 ch_free( mc ); 265 mc = NULL; 266 267 /* NOTE: we destroy the mutex, but otherwise 268 * leave the private data around; specifically, 269 * callbacks need be freed by someone else */ 270 271 ldap_pvt_thread_mutex_destroy( &mp->mp_mutex ); 272 mp->mp_next = NULL; 273 mp->mp_children = NULL; 274 } 275 276 } 277 278 if ( mc ) { 279 monitor_cache_release( mi, mc->mc_e ); 280 } 281 } 282 283 ldap_pvt_thread_mutex_unlock( &mi->mi_cache_mutex ); 284 285 return ( *ep == NULL ? -1 : 0 ); 286 } 287 288 /* 289 * If the entry exists in cache, it is returned in locked status; 290 * otherwise, if the parent exists, if it may generate volatile 291 * descendants an attempt to generate the required entry is 292 * performed and, if successful, the entry is returned 293 */ 294 int 295 monitor_cache_dn2entry( 296 Operation *op, 297 SlapReply *rs, 298 struct berval *ndn, 299 Entry **ep, 300 Entry **matched ) 301 { 302 monitor_info_t *mi = (monitor_info_t *)op->o_bd->be_private; 303 int rc; 304 struct berval p_ndn = BER_BVNULL; 305 Entry *e_parent; 306 monitor_entry_t *mp; 307 308 assert( mi != NULL ); 309 assert( ndn != NULL ); 310 assert( ep != NULL ); 311 assert( matched != NULL ); 312 313 *matched = NULL; 314 315 if ( !dnIsSuffix( ndn, &op->o_bd->be_nsuffix[ 0 ] ) ) { 316 return( -1 ); 317 } 318 319 rc = monitor_cache_get( mi, ndn, ep ); 320 if ( !rc && *ep != NULL ) { 321 return( 0 ); 322 } 323 324 /* try with parent/ancestors */ 325 if ( BER_BVISNULL( ndn ) ) { 326 BER_BVSTR( &p_ndn, "" ); 327 328 } else { 329 dnParent( ndn, &p_ndn ); 330 } 331 332 rc = monitor_cache_dn2entry( op, rs, &p_ndn, &e_parent, matched ); 333 if ( rc || e_parent == NULL ) { 334 return( -1 ); 335 } 336 337 mp = ( monitor_entry_t * )e_parent->e_private; 338 rc = -1; 339 if ( mp->mp_flags & MONITOR_F_VOLATILE_CH ) { 340 /* parent entry generates volatile children */ 341 rc = monitor_entry_create( op, rs, ndn, e_parent, ep ); 342 } 343 344 if ( !rc ) { 345 monitor_cache_lock( *ep ); 346 monitor_cache_release( mi, e_parent ); 347 348 } else { 349 *matched = e_parent; 350 } 351 352 return( rc ); 353 } 354 355 /* 356 * releases the lock of the entry; if it is marked as volatile, it is 357 * destroyed. 358 */ 359 int 360 monitor_cache_release( 361 monitor_info_t *mi, 362 Entry *e ) 363 { 364 monitor_entry_t *mp; 365 366 assert( mi != NULL ); 367 assert( e != NULL ); 368 assert( e->e_private != NULL ); 369 370 mp = ( monitor_entry_t * )e->e_private; 371 372 if ( mp->mp_flags & MONITOR_F_VOLATILE ) { 373 monitor_cache_t *mc, tmp_mc; 374 375 /* volatile entries do not return to cache */ 376 ldap_pvt_thread_mutex_lock( &mi->mi_cache_mutex ); 377 tmp_mc.mc_ndn = e->e_nname; 378 mc = avl_delete( &mi->mi_cache, 379 ( caddr_t )&tmp_mc, monitor_cache_cmp ); 380 ldap_pvt_thread_mutex_unlock( &mi->mi_cache_mutex ); 381 if ( mc != NULL ) { 382 ch_free( mc ); 383 } 384 385 ldap_pvt_thread_mutex_unlock( &mp->mp_mutex ); 386 ldap_pvt_thread_mutex_destroy( &mp->mp_mutex ); 387 ch_free( mp ); 388 e->e_private = NULL; 389 entry_free( e ); 390 391 return( 0 ); 392 } 393 394 ldap_pvt_thread_mutex_unlock( &mp->mp_mutex ); 395 396 return( 0 ); 397 } 398 399 static void 400 monitor_entry_destroy( void *v_mc ) 401 { 402 monitor_cache_t *mc = (monitor_cache_t *)v_mc; 403 404 if ( mc->mc_e != NULL ) { 405 monitor_entry_t *mp; 406 407 assert( mc->mc_e->e_private != NULL ); 408 409 mp = ( monitor_entry_t * )mc->mc_e->e_private; 410 411 if ( mp->mp_cb ) { 412 monitor_callback_t *cb; 413 414 for ( cb = mp->mp_cb; cb != NULL; ) { 415 monitor_callback_t *next = cb->mc_next; 416 417 if ( cb->mc_free ) { 418 (void)cb->mc_free( mc->mc_e, &cb->mc_private ); 419 } 420 ch_free( mp->mp_cb ); 421 422 cb = next; 423 } 424 } 425 426 ldap_pvt_thread_mutex_destroy( &mp->mp_mutex ); 427 428 ch_free( mp ); 429 mc->mc_e->e_private = NULL; 430 entry_free( mc->mc_e ); 431 } 432 433 ch_free( mc ); 434 } 435 436 int 437 monitor_cache_destroy( 438 monitor_info_t *mi ) 439 { 440 if ( mi->mi_cache ) { 441 avl_free( mi->mi_cache, monitor_entry_destroy ); 442 } 443 444 return 0; 445 } 446 447 int monitor_back_release( 448 Operation *op, 449 Entry *e, 450 int rw ) 451 { 452 monitor_info_t *mi = ( monitor_info_t * )op->o_bd->be_private; 453 return monitor_cache_release( mi, e ); 454 } 455