1 /* 2 * Copyright 2015-2018 The OpenSSL Project Authors. All Rights Reserved. 3 * Copyright 2004-2014, Akamai Technologies. All Rights Reserved. 4 * 5 * Licensed under the OpenSSL license (the "License"). You may not use 6 * this file except in compliance with the License. You can obtain a copy 7 * in the file LICENSE in the source distribution or at 8 * https://www.openssl.org/source/license.html 9 */ 10 11 /* 12 * This file is in two halves. The first half implements the public API 13 * to be used by external consumers, and to be used by OpenSSL to store 14 * data in a "secure arena." The second half implements the secure arena. 15 * For details on that implementation, see below (look for uppercase 16 * "SECURE HEAP IMPLEMENTATION"). 17 */ 18 #include "e_os.h" 19 #include <openssl/crypto.h> 20 21 #include <string.h> 22 23 /* e_os.h includes unistd.h, which defines _POSIX_VERSION */ 24 #if !defined(OPENSSL_NO_SECURE_MEMORY) && defined(OPENSSL_SYS_UNIX) \ 25 && ( (defined(_POSIX_VERSION) && _POSIX_VERSION >= 200112L) \ 26 || defined(__sun) || defined(__hpux) || defined(__sgi) \ 27 || defined(__osf__) ) 28 # define IMPLEMENTED 29 # include <stdlib.h> 30 # include <assert.h> 31 # include <unistd.h> 32 # include <sys/types.h> 33 # include <sys/mman.h> 34 # if defined(OPENSSL_SYS_LINUX) 35 # include <sys/syscall.h> 36 # if defined(SYS_mlock2) 37 # include <linux/mman.h> 38 # include <errno.h> 39 # endif 40 # endif 41 # include <sys/param.h> 42 # include <sys/stat.h> 43 # include <fcntl.h> 44 #endif 45 46 #define CLEAR(p, s) OPENSSL_cleanse(p, s) 47 #ifndef PAGE_SIZE 48 # define PAGE_SIZE 4096 49 #endif 50 #if !defined(MAP_ANON) && defined(MAP_ANONYMOUS) 51 # define MAP_ANON MAP_ANONYMOUS 52 #endif 53 54 #ifdef IMPLEMENTED 55 static size_t secure_mem_used; 56 57 static int secure_mem_initialized; 58 59 static CRYPTO_RWLOCK *sec_malloc_lock = NULL; 60 61 /* 62 * These are the functions that must be implemented by a secure heap (sh). 63 */ 64 static int sh_init(size_t size, int minsize); 65 static void *sh_malloc(size_t size); 66 static void sh_free(void *ptr); 67 static void sh_done(void); 68 static size_t sh_actual_size(char *ptr); 69 static int sh_allocated(const char *ptr); 70 #endif 71 72 int CRYPTO_secure_malloc_init(size_t size, int minsize) 73 { 74 #ifdef IMPLEMENTED 75 int ret = 0; 76 77 if (!secure_mem_initialized) { 78 sec_malloc_lock = CRYPTO_THREAD_lock_new(); 79 if (sec_malloc_lock == NULL) 80 return 0; 81 if ((ret = sh_init(size, minsize)) != 0) { 82 secure_mem_initialized = 1; 83 } else { 84 CRYPTO_THREAD_lock_free(sec_malloc_lock); 85 sec_malloc_lock = NULL; 86 } 87 } 88 89 return ret; 90 #else 91 return 0; 92 #endif /* IMPLEMENTED */ 93 } 94 95 int CRYPTO_secure_malloc_done(void) 96 { 97 #ifdef IMPLEMENTED 98 if (secure_mem_used == 0) { 99 sh_done(); 100 secure_mem_initialized = 0; 101 CRYPTO_THREAD_lock_free(sec_malloc_lock); 102 sec_malloc_lock = NULL; 103 return 1; 104 } 105 #endif /* IMPLEMENTED */ 106 return 0; 107 } 108 109 int CRYPTO_secure_malloc_initialized(void) 110 { 111 #ifdef IMPLEMENTED 112 return secure_mem_initialized; 113 #else 114 return 0; 115 #endif /* IMPLEMENTED */ 116 } 117 118 void *CRYPTO_secure_malloc(size_t num, const char *file, int line) 119 { 120 #ifdef IMPLEMENTED 121 void *ret; 122 size_t actual_size; 123 124 if (!secure_mem_initialized) { 125 return CRYPTO_malloc(num, file, line); 126 } 127 CRYPTO_THREAD_write_lock(sec_malloc_lock); 128 ret = sh_malloc(num); 129 actual_size = ret ? sh_actual_size(ret) : 0; 130 secure_mem_used += actual_size; 131 CRYPTO_THREAD_unlock(sec_malloc_lock); 132 return ret; 133 #else 134 return CRYPTO_malloc(num, file, line); 135 #endif /* IMPLEMENTED */ 136 } 137 138 void *CRYPTO_secure_zalloc(size_t num, const char *file, int line) 139 { 140 #ifdef IMPLEMENTED 141 if (secure_mem_initialized) 142 /* CRYPTO_secure_malloc() zeroes allocations when it is implemented */ 143 return CRYPTO_secure_malloc(num, file, line); 144 #endif 145 return CRYPTO_zalloc(num, file, line); 146 } 147 148 void CRYPTO_secure_free(void *ptr, const char *file, int line) 149 { 150 #ifdef IMPLEMENTED 151 size_t actual_size; 152 153 if (ptr == NULL) 154 return; 155 if (!CRYPTO_secure_allocated(ptr)) { 156 CRYPTO_free(ptr, file, line); 157 return; 158 } 159 CRYPTO_THREAD_write_lock(sec_malloc_lock); 160 actual_size = sh_actual_size(ptr); 161 CLEAR(ptr, actual_size); 162 secure_mem_used -= actual_size; 163 sh_free(ptr); 164 CRYPTO_THREAD_unlock(sec_malloc_lock); 165 #else 166 CRYPTO_free(ptr, file, line); 167 #endif /* IMPLEMENTED */ 168 } 169 170 void CRYPTO_secure_clear_free(void *ptr, size_t num, 171 const char *file, int line) 172 { 173 #ifdef IMPLEMENTED 174 size_t actual_size; 175 176 if (ptr == NULL) 177 return; 178 if (!CRYPTO_secure_allocated(ptr)) { 179 OPENSSL_cleanse(ptr, num); 180 CRYPTO_free(ptr, file, line); 181 return; 182 } 183 CRYPTO_THREAD_write_lock(sec_malloc_lock); 184 actual_size = sh_actual_size(ptr); 185 CLEAR(ptr, actual_size); 186 secure_mem_used -= actual_size; 187 sh_free(ptr); 188 CRYPTO_THREAD_unlock(sec_malloc_lock); 189 #else 190 if (ptr == NULL) 191 return; 192 OPENSSL_cleanse(ptr, num); 193 CRYPTO_free(ptr, file, line); 194 #endif /* IMPLEMENTED */ 195 } 196 197 int CRYPTO_secure_allocated(const void *ptr) 198 { 199 #ifdef IMPLEMENTED 200 int ret; 201 202 if (!secure_mem_initialized) 203 return 0; 204 CRYPTO_THREAD_write_lock(sec_malloc_lock); 205 ret = sh_allocated(ptr); 206 CRYPTO_THREAD_unlock(sec_malloc_lock); 207 return ret; 208 #else 209 return 0; 210 #endif /* IMPLEMENTED */ 211 } 212 213 size_t CRYPTO_secure_used(void) 214 { 215 #ifdef IMPLEMENTED 216 return secure_mem_used; 217 #else 218 return 0; 219 #endif /* IMPLEMENTED */ 220 } 221 222 size_t CRYPTO_secure_actual_size(void *ptr) 223 { 224 #ifdef IMPLEMENTED 225 size_t actual_size; 226 227 CRYPTO_THREAD_write_lock(sec_malloc_lock); 228 actual_size = sh_actual_size(ptr); 229 CRYPTO_THREAD_unlock(sec_malloc_lock); 230 return actual_size; 231 #else 232 return 0; 233 #endif 234 } 235 /* END OF PAGE ... 236 237 ... START OF PAGE */ 238 239 /* 240 * SECURE HEAP IMPLEMENTATION 241 */ 242 #ifdef IMPLEMENTED 243 244 245 /* 246 * The implementation provided here uses a fixed-sized mmap() heap, 247 * which is locked into memory, not written to core files, and protected 248 * on either side by an unmapped page, which will catch pointer overruns 249 * (or underruns) and an attempt to read data out of the secure heap. 250 * Free'd memory is zero'd or otherwise cleansed. 251 * 252 * This is a pretty standard buddy allocator. We keep areas in a multiple 253 * of "sh.minsize" units. The freelist and bitmaps are kept separately, 254 * so all (and only) data is kept in the mmap'd heap. 255 * 256 * This code assumes eight-bit bytes. The numbers 3 and 7 are all over the 257 * place. 258 */ 259 260 #define ONE ((size_t)1) 261 262 # define TESTBIT(t, b) (t[(b) >> 3] & (ONE << ((b) & 7))) 263 # define SETBIT(t, b) (t[(b) >> 3] |= (ONE << ((b) & 7))) 264 # define CLEARBIT(t, b) (t[(b) >> 3] &= (0xFF & ~(ONE << ((b) & 7)))) 265 266 #define WITHIN_ARENA(p) \ 267 ((char*)(p) >= sh.arena && (char*)(p) < &sh.arena[sh.arena_size]) 268 #define WITHIN_FREELIST(p) \ 269 ((char*)(p) >= (char*)sh.freelist && (char*)(p) < (char*)&sh.freelist[sh.freelist_size]) 270 271 272 typedef struct sh_list_st 273 { 274 struct sh_list_st *next; 275 struct sh_list_st **p_next; 276 } SH_LIST; 277 278 typedef struct sh_st 279 { 280 char* map_result; 281 size_t map_size; 282 char *arena; 283 size_t arena_size; 284 char **freelist; 285 ossl_ssize_t freelist_size; 286 size_t minsize; 287 unsigned char *bittable; 288 unsigned char *bitmalloc; 289 size_t bittable_size; /* size in bits */ 290 } SH; 291 292 static SH sh; 293 294 static size_t sh_getlist(char *ptr) 295 { 296 ossl_ssize_t list = sh.freelist_size - 1; 297 size_t bit = (sh.arena_size + ptr - sh.arena) / sh.minsize; 298 299 for (; bit; bit >>= 1, list--) { 300 if (TESTBIT(sh.bittable, bit)) 301 break; 302 OPENSSL_assert((bit & 1) == 0); 303 } 304 305 return list; 306 } 307 308 309 static int sh_testbit(char *ptr, int list, unsigned char *table) 310 { 311 size_t bit; 312 313 OPENSSL_assert(list >= 0 && list < sh.freelist_size); 314 OPENSSL_assert(((ptr - sh.arena) & ((sh.arena_size >> list) - 1)) == 0); 315 bit = (ONE << list) + ((ptr - sh.arena) / (sh.arena_size >> list)); 316 OPENSSL_assert(bit > 0 && bit < sh.bittable_size); 317 return TESTBIT(table, bit); 318 } 319 320 static void sh_clearbit(char *ptr, int list, unsigned char *table) 321 { 322 size_t bit; 323 324 OPENSSL_assert(list >= 0 && list < sh.freelist_size); 325 OPENSSL_assert(((ptr - sh.arena) & ((sh.arena_size >> list) - 1)) == 0); 326 bit = (ONE << list) + ((ptr - sh.arena) / (sh.arena_size >> list)); 327 OPENSSL_assert(bit > 0 && bit < sh.bittable_size); 328 OPENSSL_assert(TESTBIT(table, bit)); 329 CLEARBIT(table, bit); 330 } 331 332 static void sh_setbit(char *ptr, int list, unsigned char *table) 333 { 334 size_t bit; 335 336 OPENSSL_assert(list >= 0 && list < sh.freelist_size); 337 OPENSSL_assert(((ptr - sh.arena) & ((sh.arena_size >> list) - 1)) == 0); 338 bit = (ONE << list) + ((ptr - sh.arena) / (sh.arena_size >> list)); 339 OPENSSL_assert(bit > 0 && bit < sh.bittable_size); 340 OPENSSL_assert(!TESTBIT(table, bit)); 341 SETBIT(table, bit); 342 } 343 344 static void sh_add_to_list(char **list, char *ptr) 345 { 346 SH_LIST *temp; 347 348 OPENSSL_assert(WITHIN_FREELIST(list)); 349 OPENSSL_assert(WITHIN_ARENA(ptr)); 350 351 temp = (SH_LIST *)ptr; 352 temp->next = *(SH_LIST **)list; 353 OPENSSL_assert(temp->next == NULL || WITHIN_ARENA(temp->next)); 354 temp->p_next = (SH_LIST **)list; 355 356 if (temp->next != NULL) { 357 OPENSSL_assert((char **)temp->next->p_next == list); 358 temp->next->p_next = &(temp->next); 359 } 360 361 *list = ptr; 362 } 363 364 static void sh_remove_from_list(char *ptr) 365 { 366 SH_LIST *temp, *temp2; 367 368 temp = (SH_LIST *)ptr; 369 if (temp->next != NULL) 370 temp->next->p_next = temp->p_next; 371 *temp->p_next = temp->next; 372 if (temp->next == NULL) 373 return; 374 375 temp2 = temp->next; 376 OPENSSL_assert(WITHIN_FREELIST(temp2->p_next) || WITHIN_ARENA(temp2->p_next)); 377 } 378 379 380 static int sh_init(size_t size, int minsize) 381 { 382 int ret; 383 size_t i; 384 size_t pgsize; 385 size_t aligned; 386 387 memset(&sh, 0, sizeof(sh)); 388 389 /* make sure size and minsize are powers of 2 */ 390 OPENSSL_assert(size > 0); 391 OPENSSL_assert((size & (size - 1)) == 0); 392 OPENSSL_assert(minsize > 0); 393 OPENSSL_assert((minsize & (minsize - 1)) == 0); 394 if (size <= 0 || (size & (size - 1)) != 0) 395 goto err; 396 if (minsize <= 0 || (minsize & (minsize - 1)) != 0) 397 goto err; 398 399 while (minsize < (int)sizeof(SH_LIST)) 400 minsize *= 2; 401 402 sh.arena_size = size; 403 sh.minsize = minsize; 404 sh.bittable_size = (sh.arena_size / sh.minsize) * 2; 405 406 /* Prevent allocations of size 0 later on */ 407 if (sh.bittable_size >> 3 == 0) 408 goto err; 409 410 sh.freelist_size = -1; 411 for (i = sh.bittable_size; i; i >>= 1) 412 sh.freelist_size++; 413 414 sh.freelist = OPENSSL_zalloc(sh.freelist_size * sizeof(char *)); 415 OPENSSL_assert(sh.freelist != NULL); 416 if (sh.freelist == NULL) 417 goto err; 418 419 sh.bittable = OPENSSL_zalloc(sh.bittable_size >> 3); 420 OPENSSL_assert(sh.bittable != NULL); 421 if (sh.bittable == NULL) 422 goto err; 423 424 sh.bitmalloc = OPENSSL_zalloc(sh.bittable_size >> 3); 425 OPENSSL_assert(sh.bitmalloc != NULL); 426 if (sh.bitmalloc == NULL) 427 goto err; 428 429 /* Allocate space for heap, and two extra pages as guards */ 430 #if defined(_SC_PAGE_SIZE) || defined (_SC_PAGESIZE) 431 { 432 # if defined(_SC_PAGE_SIZE) 433 long tmppgsize = sysconf(_SC_PAGE_SIZE); 434 # else 435 long tmppgsize = sysconf(_SC_PAGESIZE); 436 # endif 437 if (tmppgsize < 1) 438 pgsize = PAGE_SIZE; 439 else 440 pgsize = (size_t)tmppgsize; 441 } 442 #else 443 pgsize = PAGE_SIZE; 444 #endif 445 sh.map_size = pgsize + sh.arena_size + pgsize; 446 if (1) { 447 #ifdef MAP_ANON 448 sh.map_result = mmap(NULL, sh.map_size, 449 PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0); 450 } else { 451 #endif 452 int fd; 453 454 sh.map_result = MAP_FAILED; 455 if ((fd = open("/dev/zero", O_RDWR)) >= 0) { 456 sh.map_result = mmap(NULL, sh.map_size, 457 PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0); 458 close(fd); 459 } 460 } 461 if (sh.map_result == MAP_FAILED) 462 goto err; 463 sh.arena = (char *)(sh.map_result + pgsize); 464 sh_setbit(sh.arena, 0, sh.bittable); 465 sh_add_to_list(&sh.freelist[0], sh.arena); 466 467 /* Now try to add guard pages and lock into memory. */ 468 ret = 1; 469 470 /* Starting guard is already aligned from mmap. */ 471 if (mprotect(sh.map_result, pgsize, PROT_NONE) < 0) 472 ret = 2; 473 474 /* Ending guard page - need to round up to page boundary */ 475 aligned = (pgsize + sh.arena_size + (pgsize - 1)) & ~(pgsize - 1); 476 if (mprotect(sh.map_result + aligned, pgsize, PROT_NONE) < 0) 477 ret = 2; 478 479 #if defined(OPENSSL_SYS_LINUX) && defined(MLOCK_ONFAULT) && defined(SYS_mlock2) 480 if (syscall(SYS_mlock2, sh.arena, sh.arena_size, MLOCK_ONFAULT) < 0) { 481 if (errno == ENOSYS) { 482 if (mlock(sh.arena, sh.arena_size) < 0) 483 ret = 2; 484 } else { 485 ret = 2; 486 } 487 } 488 #else 489 if (mlock(sh.arena, sh.arena_size) < 0) 490 ret = 2; 491 #endif 492 #ifdef MADV_DONTDUMP 493 if (madvise(sh.arena, sh.arena_size, MADV_DONTDUMP) < 0) 494 ret = 2; 495 #endif 496 497 return ret; 498 499 err: 500 sh_done(); 501 return 0; 502 } 503 504 static void sh_done(void) 505 { 506 OPENSSL_free(sh.freelist); 507 OPENSSL_free(sh.bittable); 508 OPENSSL_free(sh.bitmalloc); 509 if (sh.map_result != NULL && sh.map_size) 510 munmap(sh.map_result, sh.map_size); 511 memset(&sh, 0, sizeof(sh)); 512 } 513 514 static int sh_allocated(const char *ptr) 515 { 516 return WITHIN_ARENA(ptr) ? 1 : 0; 517 } 518 519 static char *sh_find_my_buddy(char *ptr, int list) 520 { 521 size_t bit; 522 char *chunk = NULL; 523 524 bit = (ONE << list) + (ptr - sh.arena) / (sh.arena_size >> list); 525 bit ^= 1; 526 527 if (TESTBIT(sh.bittable, bit) && !TESTBIT(sh.bitmalloc, bit)) 528 chunk = sh.arena + ((bit & ((ONE << list) - 1)) * (sh.arena_size >> list)); 529 530 return chunk; 531 } 532 533 static void *sh_malloc(size_t size) 534 { 535 ossl_ssize_t list, slist; 536 size_t i; 537 char *chunk; 538 539 if (size > sh.arena_size) 540 return NULL; 541 542 list = sh.freelist_size - 1; 543 for (i = sh.minsize; i < size; i <<= 1) 544 list--; 545 if (list < 0) 546 return NULL; 547 548 /* try to find a larger entry to split */ 549 for (slist = list; slist >= 0; slist--) 550 if (sh.freelist[slist] != NULL) 551 break; 552 if (slist < 0) 553 return NULL; 554 555 /* split larger entry */ 556 while (slist != list) { 557 char *temp = sh.freelist[slist]; 558 559 /* remove from bigger list */ 560 OPENSSL_assert(!sh_testbit(temp, slist, sh.bitmalloc)); 561 sh_clearbit(temp, slist, sh.bittable); 562 sh_remove_from_list(temp); 563 OPENSSL_assert(temp != sh.freelist[slist]); 564 565 /* done with bigger list */ 566 slist++; 567 568 /* add to smaller list */ 569 OPENSSL_assert(!sh_testbit(temp, slist, sh.bitmalloc)); 570 sh_setbit(temp, slist, sh.bittable); 571 sh_add_to_list(&sh.freelist[slist], temp); 572 OPENSSL_assert(sh.freelist[slist] == temp); 573 574 /* split in 2 */ 575 temp += sh.arena_size >> slist; 576 OPENSSL_assert(!sh_testbit(temp, slist, sh.bitmalloc)); 577 sh_setbit(temp, slist, sh.bittable); 578 sh_add_to_list(&sh.freelist[slist], temp); 579 OPENSSL_assert(sh.freelist[slist] == temp); 580 581 OPENSSL_assert(temp-(sh.arena_size >> slist) == sh_find_my_buddy(temp, slist)); 582 } 583 584 /* peel off memory to hand back */ 585 chunk = sh.freelist[list]; 586 OPENSSL_assert(sh_testbit(chunk, list, sh.bittable)); 587 sh_setbit(chunk, list, sh.bitmalloc); 588 sh_remove_from_list(chunk); 589 590 OPENSSL_assert(WITHIN_ARENA(chunk)); 591 592 /* zero the free list header as a precaution against information leakage */ 593 memset(chunk, 0, sizeof(SH_LIST)); 594 595 return chunk; 596 } 597 598 static void sh_free(void *ptr) 599 { 600 size_t list; 601 void *buddy; 602 603 if (ptr == NULL) 604 return; 605 OPENSSL_assert(WITHIN_ARENA(ptr)); 606 if (!WITHIN_ARENA(ptr)) 607 return; 608 609 list = sh_getlist(ptr); 610 OPENSSL_assert(sh_testbit(ptr, list, sh.bittable)); 611 sh_clearbit(ptr, list, sh.bitmalloc); 612 sh_add_to_list(&sh.freelist[list], ptr); 613 614 /* Try to coalesce two adjacent free areas. */ 615 while ((buddy = sh_find_my_buddy(ptr, list)) != NULL) { 616 OPENSSL_assert(ptr == sh_find_my_buddy(buddy, list)); 617 OPENSSL_assert(ptr != NULL); 618 OPENSSL_assert(!sh_testbit(ptr, list, sh.bitmalloc)); 619 sh_clearbit(ptr, list, sh.bittable); 620 sh_remove_from_list(ptr); 621 OPENSSL_assert(!sh_testbit(ptr, list, sh.bitmalloc)); 622 sh_clearbit(buddy, list, sh.bittable); 623 sh_remove_from_list(buddy); 624 625 list--; 626 627 /* Zero the higher addressed block's free list pointers */ 628 memset(ptr > buddy ? ptr : buddy, 0, sizeof(SH_LIST)); 629 if (ptr > buddy) 630 ptr = buddy; 631 632 OPENSSL_assert(!sh_testbit(ptr, list, sh.bitmalloc)); 633 sh_setbit(ptr, list, sh.bittable); 634 sh_add_to_list(&sh.freelist[list], ptr); 635 OPENSSL_assert(sh.freelist[list] == ptr); 636 } 637 } 638 639 static size_t sh_actual_size(char *ptr) 640 { 641 int list; 642 643 OPENSSL_assert(WITHIN_ARENA(ptr)); 644 if (!WITHIN_ARENA(ptr)) 645 return 0; 646 list = sh_getlist(ptr); 647 OPENSSL_assert(sh_testbit(ptr, list, sh.bittable)); 648 return sh.arena_size / (ONE << list); 649 } 650 #endif /* IMPLEMENTED */ 651