1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 35 #ifndef __LIBOCF_ENV_H__ 36 #define __LIBOCF_ENV_H__ 37 38 #ifndef _GNU_SOURCE 39 #define _GNU_SOURCE 40 #endif 41 #ifndef __USE_GNU 42 #define __USE_GNU 43 #endif 44 45 #include <linux/limits.h> 46 #include <linux/stddef.h> 47 48 #include "spdk/stdinc.h" 49 #include "spdk/likely.h" 50 #include "spdk/env.h" 51 #include "spdk/util.h" 52 #include "spdk/log.h" 53 54 #include "ocf_env_list.h" 55 #include "ocf/ocf_err.h" 56 57 #include "mpool.h" 58 59 typedef uint8_t u8; 60 typedef uint16_t u16; 61 typedef uint32_t u32; 62 typedef uint64_t u64; 63 64 typedef uint64_t sector_t; 65 66 #define __packed __attribute__((packed)) 67 #define __aligned(x) __attribute__((aligned(x))) 68 69 /* linux sector 512-bytes */ 70 #define ENV_SECTOR_SHIFT 9 71 #define ENV_SECTOR_SIZE (1<<ENV_SECTOR_SHIFT) 72 #define BYTES_TO_SECTOR(x) ((x) >> ENV_SECTOR_SHIFT) 73 74 /* *** MEMORY MANAGEMENT *** */ 75 76 #define ENV_MEM_NORMAL 0 77 #define ENV_MEM_NOIO 0 78 #define ENV_MEM_ATOMIC 0 79 80 #define likely spdk_likely 81 #define unlikely spdk_unlikely 82 83 #define min(x, y) MIN(x, y) 84 #ifndef MIN 85 #define MIN(x, y) spdk_min(x, y) 86 #endif 87 88 #define ARRAY_SIZE(x) SPDK_COUNTOF(x) 89 90 /* LOGGING */ 91 #define ENV_PRIu64 PRIu64 92 93 #define ENV_WARN(cond, fmt, args...) ({ \ 94 if (spdk_unlikely((uintptr_t)(cond))) \ 95 SPDK_NOTICELOG("WARNING" fmt, ##args); \ 96 }) 97 98 #define ENV_WARN_ON(cond) ({ \ 99 if (spdk_unlikely((uintptr_t)(cond))) \ 100 SPDK_NOTICELOG("WARNING\n"); \ 101 }) 102 103 #define ENV_BUG() ({ \ 104 SPDK_ERRLOG("BUG\n"); \ 105 assert(0); \ 106 abort(); \ 107 }) 108 109 #define ENV_BUG_ON(cond) ({ \ 110 if (spdk_unlikely((uintptr_t)(cond))) { \ 111 SPDK_ERRLOG("BUG\n"); \ 112 assert(0); \ 113 abort(); \ 114 } \ 115 }) 116 117 #define ENV_BUILD_BUG_ON(cond) _Static_assert(!(cond), "static "\ 118 "assertion failure") 119 120 #define container_of(ptr, type, member) SPDK_CONTAINEROF(ptr, type, member) 121 122 static inline void *env_malloc(size_t size, int flags) 123 { 124 return spdk_malloc(size, 0, NULL, SPDK_ENV_LCORE_ID_ANY, 125 SPDK_MALLOC_DMA); 126 } 127 128 static inline void *env_zalloc(size_t size, int flags) 129 { 130 return spdk_zmalloc(size, 0, NULL, SPDK_ENV_LCORE_ID_ANY, 131 SPDK_MALLOC_DMA); 132 } 133 134 static inline void env_free(const void *ptr) 135 { 136 return spdk_free((void *)ptr); 137 } 138 139 static inline void *env_vmalloc(size_t size) 140 { 141 return spdk_malloc(size, 0, NULL, SPDK_ENV_LCORE_ID_ANY, 142 SPDK_MALLOC_DMA); 143 } 144 145 static inline void *env_vzalloc(size_t size) 146 { 147 /* TODO: raw_ram init can request huge amount of memory to store 148 * hashtable in it. need to ensure that allocation succeeds */ 149 return spdk_zmalloc(size, 0, NULL, SPDK_ENV_LCORE_ID_ANY, 150 SPDK_MALLOC_DMA); 151 } 152 153 static inline void *env_vzalloc_flags(size_t size, int flags) 154 { 155 return env_vzalloc(size); 156 } 157 158 static inline void *env_secure_alloc(size_t size) 159 { 160 return spdk_zmalloc(size, 0, NULL, SPDK_ENV_LCORE_ID_ANY, 161 SPDK_MALLOC_DMA); 162 } 163 164 static inline void env_secure_free(const void *ptr, size_t size) 165 { 166 return spdk_free((void *)ptr); 167 } 168 169 static inline void env_vfree(const void *ptr) 170 { 171 return spdk_free((void *)ptr); 172 } 173 174 static inline uint64_t env_get_free_memory(void) 175 { 176 return -1; 177 } 178 179 /* *** ALLOCATOR *** */ 180 181 #define OCF_ALLOCATOR_NAME_MAX 24 182 183 typedef struct { 184 struct spdk_mempool *mempool; 185 size_t element_size; 186 size_t element_count; 187 bool zero; 188 } env_allocator; 189 190 env_allocator *env_allocator_create_extended(uint32_t size, const char *name, int limit, bool zero); 191 192 env_allocator *env_allocator_create(uint32_t size, const char *name, bool zero); 193 194 void env_allocator_destroy(env_allocator *allocator); 195 196 void *env_allocator_new(env_allocator *allocator); 197 198 void env_allocator_del(env_allocator *allocator, void *item); 199 200 uint32_t env_allocator_item_count(env_allocator *allocator); 201 202 /* *** MUTEX *** */ 203 204 typedef struct { 205 pthread_mutex_t m; 206 } env_mutex; 207 208 static inline int env_mutex_init(env_mutex *mutex) 209 { 210 return !!pthread_mutex_init(&mutex->m, NULL); 211 } 212 213 static inline void env_mutex_lock(env_mutex *mutex) 214 { 215 ENV_BUG_ON(pthread_mutex_lock(&mutex->m)); 216 } 217 218 static inline int env_mutex_lock_interruptible(env_mutex *mutex) 219 { 220 env_mutex_lock(mutex); 221 return 0; 222 } 223 224 static inline int env_mutex_trylock(env_mutex *mutex) 225 { 226 return pthread_mutex_trylock(&mutex->m) ? -OCF_ERR_NO_LOCK : 0; 227 } 228 229 static inline void env_mutex_unlock(env_mutex *mutex) 230 { 231 ENV_BUG_ON(pthread_mutex_unlock(&mutex->m)); 232 } 233 234 static inline int env_mutex_is_locked(env_mutex *mutex) 235 { 236 if (env_mutex_trylock(mutex) == 0) { 237 env_mutex_unlock(mutex); 238 return 0; 239 } 240 241 return 1; 242 } 243 244 static inline int env_mutex_destroy(env_mutex *mutex) 245 { 246 if (pthread_mutex_destroy(&mutex->m)) { 247 return 1; 248 } 249 250 return 0; 251 } 252 253 /* *** RECURSIVE MUTEX *** */ 254 255 typedef env_mutex env_rmutex; 256 257 static inline int env_rmutex_init(env_rmutex *rmutex) 258 { 259 pthread_mutexattr_t attr; 260 261 pthread_mutexattr_init(&attr); 262 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE); 263 pthread_mutex_init(&rmutex->m, &attr); 264 265 return 0; 266 } 267 268 static inline void env_rmutex_lock(env_rmutex *rmutex) 269 { 270 env_mutex_lock(rmutex); 271 } 272 273 static inline int env_rmutex_lock_interruptible(env_rmutex *rmutex) 274 { 275 return env_mutex_lock_interruptible(rmutex); 276 } 277 278 static inline int env_rmutex_trylock(env_rmutex *rmutex) 279 { 280 return env_mutex_trylock(rmutex); 281 } 282 283 static inline void env_rmutex_unlock(env_rmutex *rmutex) 284 { 285 env_mutex_unlock(rmutex); 286 } 287 288 static inline int env_rmutex_is_locked(env_rmutex *rmutex) 289 { 290 return env_mutex_is_locked(rmutex); 291 } 292 293 static inline int env_rmutex_destroy(env_rmutex *rmutex) 294 { 295 return env_mutex_destroy(rmutex); 296 } 297 298 /* *** RW SEMAPHORE *** */ 299 typedef struct { 300 pthread_rwlock_t lock; 301 } env_rwsem; 302 303 static inline int env_rwsem_init(env_rwsem *s) 304 { 305 return !!pthread_rwlock_init(&s->lock, NULL); 306 } 307 308 static inline void env_rwsem_up_read(env_rwsem *s) 309 { 310 ENV_BUG_ON(pthread_rwlock_unlock(&s->lock)); 311 } 312 313 static inline void env_rwsem_down_read(env_rwsem *s) 314 { 315 ENV_BUG_ON(pthread_rwlock_rdlock(&s->lock)); 316 } 317 318 static inline int env_rwsem_down_read_trylock(env_rwsem *s) 319 { 320 return pthread_rwlock_tryrdlock(&s->lock) ? -OCF_ERR_NO_LOCK : 0; 321 } 322 323 static inline void env_rwsem_up_write(env_rwsem *s) 324 { 325 ENV_BUG_ON(pthread_rwlock_unlock(&s->lock)); 326 } 327 328 static inline void env_rwsem_down_write(env_rwsem *s) 329 { 330 ENV_BUG_ON(pthread_rwlock_wrlock(&s->lock)); 331 } 332 333 static inline int env_rwsem_down_write_trylock(env_rwsem *s) 334 { 335 return pthread_rwlock_trywrlock(&s->lock) ? -OCF_ERR_NO_LOCK : 0; 336 } 337 338 static inline int env_rwsem_is_locked(env_rwsem *s) 339 { 340 if (env_rwsem_down_read_trylock(s) == 0) { 341 env_rwsem_up_read(s); 342 return 0; 343 } 344 345 return 1; 346 } 347 348 static inline int env_rwsem_down_read_interruptible(env_rwsem *s) 349 { 350 return pthread_rwlock_rdlock(&s->lock); 351 } 352 static inline int env_rwsem_down_write_interruptible(env_rwsem *s) 353 { 354 return pthread_rwlock_wrlock(&s->lock); 355 } 356 357 static inline int env_rwsem_destroy(env_rwsem *s) 358 { 359 return pthread_rwlock_destroy(&s->lock); 360 } 361 362 /* *** ATOMIC VARIABLES *** */ 363 364 typedef int env_atomic; 365 366 typedef long env_atomic64; 367 368 #ifndef atomic_read 369 #define atomic_read(ptr) (*(__typeof__(*ptr) *volatile) (ptr)) 370 #endif 371 372 #ifndef atomic_set 373 #define atomic_set(ptr, i) ((*(__typeof__(*ptr) *volatile) (ptr)) = (i)) 374 #endif 375 376 #define atomic_inc(ptr) ((void) __sync_fetch_and_add(ptr, 1)) 377 #define atomic_dec(ptr) ((void) __sync_fetch_and_add(ptr, -1)) 378 #define atomic_add(ptr, n) ((void) __sync_fetch_and_add(ptr, n)) 379 #define atomic_sub(ptr, n) ((void) __sync_fetch_and_sub(ptr, n)) 380 381 #define atomic_cmpxchg __sync_val_compare_and_swap 382 383 static inline int env_atomic_read(const env_atomic *a) 384 { 385 return atomic_read(a); 386 } 387 388 static inline void env_atomic_set(env_atomic *a, int i) 389 { 390 atomic_set(a, i); 391 } 392 393 static inline void env_atomic_add(int i, env_atomic *a) 394 { 395 atomic_add(a, i); 396 } 397 398 static inline void env_atomic_sub(int i, env_atomic *a) 399 { 400 atomic_sub(a, i); 401 } 402 403 static inline bool env_atomic_sub_and_test(int i, env_atomic *a) 404 { 405 return __sync_sub_and_fetch(a, i) == 0; 406 } 407 408 static inline void env_atomic_inc(env_atomic *a) 409 { 410 atomic_inc(a); 411 } 412 413 static inline void env_atomic_dec(env_atomic *a) 414 { 415 atomic_dec(a); 416 } 417 418 static inline bool env_atomic_dec_and_test(env_atomic *a) 419 { 420 return __sync_sub_and_fetch(a, 1) == 0; 421 } 422 423 static inline bool env_atomic_inc_and_test(env_atomic *a) 424 { 425 return __sync_add_and_fetch(a, 1) == 0; 426 } 427 428 static inline int env_atomic_add_return(int i, env_atomic *a) 429 { 430 return __sync_add_and_fetch(a, i); 431 } 432 433 static inline int env_atomic_sub_return(int i, env_atomic *a) 434 { 435 return __sync_sub_and_fetch(a, i); 436 } 437 438 static inline int env_atomic_inc_return(env_atomic *a) 439 { 440 return env_atomic_add_return(1, a); 441 } 442 443 static inline int env_atomic_dec_return(env_atomic *a) 444 { 445 return env_atomic_sub_return(1, a); 446 } 447 448 static inline int env_atomic_cmpxchg(env_atomic *a, int old, int new_value) 449 { 450 return atomic_cmpxchg(a, old, new_value); 451 } 452 453 static inline int env_atomic_add_unless(env_atomic *a, int i, int u) 454 { 455 int c, old; 456 c = env_atomic_read(a); 457 for (;;) { 458 if (spdk_unlikely(c == (u))) { 459 break; 460 } 461 old = env_atomic_cmpxchg((a), c, c + (i)); 462 if (spdk_likely(old == c)) { 463 break; 464 } 465 c = old; 466 } 467 return c != (u); 468 } 469 470 static inline long env_atomic64_read(const env_atomic64 *a) 471 { 472 return atomic_read(a); 473 } 474 475 static inline void env_atomic64_set(env_atomic64 *a, long i) 476 { 477 atomic_set(a, i); 478 } 479 480 static inline void env_atomic64_add(long i, env_atomic64 *a) 481 { 482 atomic_add(a, i); 483 } 484 485 static inline void env_atomic64_sub(long i, env_atomic64 *a) 486 { 487 atomic_sub(a, i); 488 } 489 490 static inline void env_atomic64_inc(env_atomic64 *a) 491 { 492 atomic_inc(a); 493 } 494 495 static inline void env_atomic64_dec(env_atomic64 *a) 496 { 497 atomic_dec(a); 498 } 499 500 static inline int env_atomic64_add_return(int i, env_atomic *a) 501 { 502 return __sync_add_and_fetch(a, i); 503 } 504 505 static inline int env_atomic64_sub_return(int i, env_atomic *a) 506 { 507 return __sync_sub_and_fetch(a, i); 508 } 509 510 static inline int env_atomic64_inc_return(env_atomic *a) 511 { 512 return env_atomic64_add_return(1, a); 513 } 514 515 static inline int env_atomic64_dec_return(env_atomic *a) 516 { 517 return env_atomic_sub_return(1, a); 518 } 519 520 static inline long env_atomic64_cmpxchg(env_atomic64 *a, long old, long new) 521 { 522 return atomic_cmpxchg(a, old, new); 523 } 524 525 /* *** COMPLETION *** */ 526 typedef struct completion { 527 sem_t sem; 528 } env_completion; 529 530 static inline void env_completion_init(env_completion *completion) 531 { 532 sem_init(&completion->sem, 0, 0); 533 } 534 535 static inline void env_completion_wait(env_completion *completion) 536 { 537 sem_wait(&completion->sem); 538 } 539 540 static inline void env_completion_complete(env_completion *completion) 541 { 542 sem_post(&completion->sem); 543 } 544 545 static inline void env_completion_destroy(env_completion *completion) 546 { 547 sem_destroy(&completion->sem); 548 } 549 550 /* *** SPIN LOCKS *** */ 551 552 typedef struct { 553 pthread_spinlock_t lock; 554 } env_spinlock; 555 556 static inline int env_spinlock_init(env_spinlock *l) 557 { 558 return pthread_spin_init(&l->lock, 0); 559 } 560 561 static inline int env_spinlock_trylock(env_spinlock *l) 562 { 563 return pthread_spin_trylock(&l->lock) ? -OCF_ERR_NO_LOCK : 0; 564 } 565 566 static inline void env_spinlock_lock(env_spinlock *l) 567 { 568 ENV_BUG_ON(pthread_spin_lock(&l->lock)); 569 } 570 571 static inline void env_spinlock_unlock(env_spinlock *l) 572 { 573 ENV_BUG_ON(pthread_spin_unlock(&l->lock)); 574 } 575 576 #define env_spinlock_lock_irqsave(l, flags) \ 577 (void)flags; \ 578 env_spinlock_lock(l) 579 580 #define env_spinlock_unlock_irqrestore(l, flags) \ 581 (void)flags; \ 582 env_spinlock_unlock(l) 583 584 static inline void env_spinlock_destroy(env_spinlock *l) 585 { 586 ENV_BUG_ON(pthread_spin_destroy(&l->lock)); 587 } 588 589 /* *** RW LOCKS *** */ 590 591 typedef struct { 592 pthread_rwlock_t lock; 593 } env_rwlock; 594 595 static inline void env_rwlock_init(env_rwlock *l) 596 { 597 ENV_BUG_ON(pthread_rwlock_init(&l->lock, NULL)); 598 } 599 600 static inline void env_rwlock_read_lock(env_rwlock *l) 601 { 602 ENV_BUG_ON(pthread_rwlock_rdlock(&l->lock)); 603 } 604 605 static inline void env_rwlock_read_unlock(env_rwlock *l) 606 { 607 ENV_BUG_ON(pthread_rwlock_unlock(&l->lock)); 608 } 609 610 static inline void env_rwlock_write_lock(env_rwlock *l) 611 { 612 ENV_BUG_ON(pthread_rwlock_wrlock(&l->lock)); 613 } 614 615 static inline void env_rwlock_write_unlock(env_rwlock *l) 616 { 617 ENV_BUG_ON(pthread_rwlock_unlock(&l->lock)); 618 } 619 620 static inline void env_rwlock_destroy(env_rwlock *l) 621 { 622 ENV_BUG_ON(pthread_rwlock_destroy(&l->lock)); 623 } 624 625 static inline void env_bit_set(int nr, volatile void *addr) 626 { 627 char *byte = (char *)addr + (nr >> 3); 628 char mask = 1 << (nr & 7); 629 630 __sync_or_and_fetch(byte, mask); 631 } 632 633 static inline void env_bit_clear(int nr, volatile void *addr) 634 { 635 char *byte = (char *)addr + (nr >> 3); 636 char mask = 1 << (nr & 7); 637 638 __sync_and_and_fetch(byte, ~mask); 639 } 640 641 static inline bool env_bit_test(int nr, const volatile unsigned long *addr) 642 { 643 const char *byte = (char *)addr + (nr >> 3); 644 char mask = 1 << (nr & 7); 645 646 return !!(*byte & mask); 647 } 648 649 /* *** WAITQUEUE *** */ 650 651 typedef struct { 652 sem_t sem; 653 } env_waitqueue; 654 655 static inline void env_waitqueue_init(env_waitqueue *w) 656 { 657 sem_init(&w->sem, 0, 0); 658 } 659 660 static inline void env_waitqueue_wake_up(env_waitqueue *w) 661 { 662 sem_post(&w->sem); 663 } 664 665 #define env_waitqueue_wait(w, condition) \ 666 ({ \ 667 int __ret = 0; \ 668 if (!(condition)) \ 669 sem_wait(&w.sem); \ 670 __ret = __ret; \ 671 }) 672 673 /* *** SCHEDULING *** */ 674 675 /* CAS does not need this while in user-space */ 676 static inline void env_schedule(void) 677 { 678 } 679 680 #define env_cond_resched env_schedule 681 682 static inline int env_in_interrupt(void) 683 { 684 return 0; 685 } 686 687 static inline uint64_t env_get_tick_count(void) 688 { 689 return spdk_get_ticks(); 690 } 691 692 static inline uint64_t env_ticks_to_secs(uint64_t j) 693 { 694 return j / spdk_get_ticks_hz(); 695 } 696 697 static inline uint64_t env_ticks_to_msecs(uint64_t j) 698 { 699 return env_ticks_to_secs(j) * 1000; 700 } 701 702 static inline uint64_t env_ticks_to_nsecs(uint64_t j) 703 { 704 return env_ticks_to_secs(j) * 1000 * 1000; 705 } 706 707 static inline uint64_t env_ticks_to_usecs(uint64_t j) 708 { 709 return env_ticks_to_secs(j) * 1000 * 1000 * 1000; 710 } 711 712 static inline uint64_t env_secs_to_ticks(uint64_t j) 713 { 714 return j * spdk_get_ticks_hz(); 715 } 716 717 /* *** STRING OPERATIONS *** */ 718 719 /* 512 KB is sufficient amount of memory for OCF operations */ 720 #define ENV_MAX_MEM (512 * 1024) 721 722 static inline int env_memset(void *dest, size_t len, uint8_t value) 723 { 724 if (dest == NULL || len == 0) { 725 return 1; 726 } 727 728 memset(dest, value, len); 729 return 0; 730 } 731 732 static inline int env_memcpy(void *dest, size_t dmax, const void *src, size_t len) 733 { 734 if (dest == NULL || src == NULL) { 735 return 1; 736 } 737 if (dmax == 0 || dmax > ENV_MAX_MEM) { 738 return 1; 739 } 740 if (len == 0 || len > dmax) { 741 return 1; 742 } 743 744 memcpy(dest, src, len); 745 return 0; 746 } 747 748 static inline int env_memcmp(const void *aptr, size_t dmax, const void *bptr, size_t len, 749 int *diff) 750 { 751 if (diff == NULL || aptr == NULL || bptr == NULL) { 752 return 1; 753 } 754 if (dmax == 0 || dmax > ENV_MAX_MEM) { 755 return 1; 756 } 757 if (len == 0 || len > dmax) { 758 return 1; 759 } 760 761 *diff = memcmp(aptr, bptr, len); 762 return 0; 763 } 764 765 /* 4096 is sufficient max length for any OCF operation on string */ 766 #define ENV_MAX_STR (4 * 1024) 767 768 static inline size_t env_strnlen(const char *src, size_t dmax) 769 { 770 return strnlen(src, dmax); 771 } 772 773 static inline int env_strncpy(char *dest, size_t dmax, const char *src, size_t len) 774 { 775 if (dest == NULL || src == NULL) { 776 return 1; 777 } 778 if (dmax == 0 || dmax > ENV_MAX_STR) { 779 return 1; 780 } 781 if (len == 0) { 782 return 1; 783 } 784 /* Just copy as many characters as we can instead of return failure */ 785 len = min(len, dmax); 786 787 strncpy(dest, src, len); 788 return 0; 789 } 790 791 #define env_strncmp(s1, slen1, s2, slen2) strncmp(s1, s2, min(slen1, slen2)) 792 793 static inline char *env_strdup(const char *src, int flags) 794 { 795 int len; 796 char *ret; 797 798 if (src == NULL) { 799 return NULL; 800 } 801 802 len = env_strnlen(src, ENV_MAX_STR) + 1; 803 ret = env_malloc(len, flags); 804 805 if (env_strncpy(ret, ENV_MAX_STR, src, len)) { 806 return NULL; 807 } else { 808 return ret; 809 } 810 } 811 812 /* *** SORTING *** */ 813 814 static inline void env_sort(void *base, size_t num, size_t size, 815 int (*cmp_fn)(const void *, const void *), 816 void (*swap_fn)(void *, void *, int size)) 817 { 818 qsort(base, num, size, cmp_fn); 819 } 820 821 static inline void env_msleep(uint64_t n) 822 { 823 usleep(n * 1000); 824 } 825 826 static inline void env_touch_softlockup_wd(void) 827 { 828 } 829 830 /* *** CRC *** */ 831 832 uint32_t env_crc32(uint32_t crc, uint8_t const *data, size_t len); 833 834 /* EXECUTION CONTEXTS */ 835 unsigned env_get_execution_context(void); 836 void env_put_execution_context(unsigned ctx); 837 unsigned env_get_execution_context_count(void); 838 839 #endif /* __OCF_ENV_H__ */ 840