xref: /spdk/lib/env_ocf/ocf_env.h (revision 60982c759db49b4f4579f16e3b24df0725ba4b94)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2018 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 
7 #ifndef __LIBOCF_ENV_H__
8 #define __LIBOCF_ENV_H__
9 
10 #ifndef _GNU_SOURCE
11 #define _GNU_SOURCE
12 #endif
13 #ifndef __USE_GNU
14 #define __USE_GNU
15 #endif
16 
17 #include <linux/limits.h>
18 #include <linux/stddef.h>
19 
20 #include "spdk/stdinc.h"
21 #include "spdk/likely.h"
22 #include "spdk/env.h"
23 #include "spdk/util.h"
24 #include "spdk/log.h"
25 
26 #include "ocf_env_list.h"
27 #include "ocf/ocf_err.h"
28 
29 #include "mpool.h"
30 
31 typedef uint8_t u8;
32 typedef uint16_t u16;
33 typedef uint32_t u32;
34 typedef uint64_t u64;
35 
36 typedef uint64_t sector_t;
37 
38 #define __packed __attribute__((packed))
39 #define __aligned(x) __attribute__((aligned(x)))
40 
41 /* linux sector 512-bytes */
42 #define ENV_SECTOR_SHIFT	9
43 #define ENV_SECTOR_SIZE (1<<ENV_SECTOR_SHIFT)
44 #define BYTES_TO_SECTOR(x)	((x) >> ENV_SECTOR_SHIFT)
45 
46 /* *** MEMORY MANAGEMENT *** */
47 
48 #define ENV_MEM_NORMAL	0
49 #define ENV_MEM_NOIO	0
50 #define ENV_MEM_ATOMIC	0
51 
52 #define likely spdk_likely
53 #define unlikely spdk_unlikely
54 
55 #define min(x, y) MIN(x, y)
56 #ifndef MIN
57 #define MIN(x, y) spdk_min(x, y)
58 #endif
59 
60 #define ARRAY_SIZE(x) SPDK_COUNTOF(x)
61 
62 /* LOGGING */
63 #define ENV_PRIu64 PRIu64
64 
65 #define ENV_WARN(cond, fmt, args...) ({ \
66 		if (spdk_unlikely((uintptr_t)(cond))) \
67 			SPDK_WARNLOG(fmt, ##args); \
68 	})
69 
70 #define ENV_WARN_ON(cond) ({ \
71 	if (spdk_unlikely((uintptr_t)(cond))) \
72 		SPDK_WARNLOG("\n"); \
73 	})
74 
75 #define ENV_BUG() ({ \
76 		SPDK_ERRLOG("BUG\n"); \
77 		assert(0); \
78 		abort(); \
79 	})
80 
81 #define ENV_BUG_ON(cond) ({ \
82 		if (spdk_unlikely((uintptr_t)(cond))) { \
83 			SPDK_ERRLOG("BUG\n"); \
84 			assert(0); \
85 			abort(); \
86 		} \
87 	})
88 
89 #define ENV_BUILD_BUG_ON(cond)		_Static_assert(!(cond), "static "\
90 					"assertion failure")
91 
92 #define container_of(ptr, type, member) SPDK_CONTAINEROF(ptr, type, member)
93 
94 static inline void *
95 env_malloc(size_t size, int flags)
96 {
97 	return spdk_malloc(size, 0, NULL, SPDK_ENV_LCORE_ID_ANY,
98 			   SPDK_MALLOC_DMA);
99 }
100 
101 static inline void *
102 env_zalloc(size_t size, int flags)
103 {
104 	return spdk_zmalloc(size, 0, NULL, SPDK_ENV_LCORE_ID_ANY,
105 			    SPDK_MALLOC_DMA);
106 }
107 
108 static inline void
109 env_free(const void *ptr)
110 {
111 	return spdk_free((void *)ptr);
112 }
113 
114 static inline void *
115 env_vmalloc(size_t size)
116 {
117 	return spdk_malloc(size, 0, NULL, SPDK_ENV_LCORE_ID_ANY,
118 			   SPDK_MALLOC_DMA);
119 }
120 
121 static inline void *
122 env_vzalloc(size_t size)
123 {
124 	/* TODO: raw_ram init can request huge amount of memory to store
125 	 * hashtable in it. need to ensure that allocation succeeds */
126 	return spdk_zmalloc(size, 0, NULL, SPDK_ENV_LCORE_ID_ANY,
127 			    SPDK_MALLOC_DMA);
128 }
129 
130 static inline void *
131 env_vzalloc_flags(size_t size, int flags)
132 {
133 	return env_vzalloc(size);
134 }
135 
136 static inline void *
137 env_secure_alloc(size_t size)
138 {
139 	return spdk_zmalloc(size, 0, NULL, SPDK_ENV_LCORE_ID_ANY,
140 			    SPDK_MALLOC_DMA);
141 }
142 
143 static inline void
144 env_secure_free(const void *ptr, size_t size)
145 {
146 	return spdk_free((void *)ptr);
147 }
148 
149 static inline void
150 env_vfree(const void *ptr)
151 {
152 	return spdk_free((void *)ptr);
153 }
154 
155 static inline uint64_t
156 env_get_free_memory(void)
157 {
158 	return -1;
159 }
160 
161 /* *** ALLOCATOR *** */
162 
163 #define OCF_ALLOCATOR_NAME_MAX 24
164 
165 typedef struct {
166 	struct spdk_mempool *mempool;
167 	size_t element_size;
168 	size_t element_count;
169 	bool zero;
170 } env_allocator;
171 
172 env_allocator *env_allocator_create_extended(uint32_t size, const char *name, int limit, bool zero);
173 
174 env_allocator *env_allocator_create(uint32_t size, const char *name, bool zero);
175 
176 void env_allocator_destroy(env_allocator *allocator);
177 
178 void *env_allocator_new(env_allocator *allocator);
179 
180 void env_allocator_del(env_allocator *allocator, void *item);
181 
182 uint32_t env_allocator_item_count(env_allocator *allocator);
183 
184 /* *** MUTEX *** */
185 
186 typedef struct {
187 	pthread_mutex_t m;
188 } env_mutex;
189 
190 static inline int
191 env_mutex_init(env_mutex *mutex)
192 {
193 	return !!pthread_mutex_init(&mutex->m, NULL);
194 }
195 
196 static inline void
197 env_mutex_lock(env_mutex *mutex)
198 {
199 	ENV_BUG_ON(pthread_mutex_lock(&mutex->m));
200 }
201 
202 static inline int
203 env_mutex_lock_interruptible(env_mutex *mutex)
204 {
205 	env_mutex_lock(mutex);
206 	return 0;
207 }
208 
209 static inline int
210 env_mutex_trylock(env_mutex *mutex)
211 {
212 	return pthread_mutex_trylock(&mutex->m) ? -OCF_ERR_NO_LOCK : 0;
213 }
214 
215 static inline void
216 env_mutex_unlock(env_mutex *mutex)
217 {
218 	ENV_BUG_ON(pthread_mutex_unlock(&mutex->m));
219 }
220 
221 static inline int
222 env_mutex_is_locked(env_mutex *mutex)
223 {
224 	if (env_mutex_trylock(mutex) == 0) {
225 		env_mutex_unlock(mutex);
226 		return 0;
227 	}
228 
229 	return 1;
230 }
231 
232 static inline int
233 env_mutex_destroy(env_mutex *mutex)
234 {
235 	if (pthread_mutex_destroy(&mutex->m)) {
236 		return 1;
237 	}
238 
239 	return 0;
240 }
241 
242 /* *** RECURSIVE MUTEX *** */
243 
244 typedef env_mutex env_rmutex;
245 
246 static inline int
247 env_rmutex_init(env_rmutex *rmutex)
248 {
249 	pthread_mutexattr_t attr;
250 
251 	pthread_mutexattr_init(&attr);
252 	pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
253 	pthread_mutex_init(&rmutex->m, &attr);
254 
255 	return 0;
256 }
257 
258 static inline void
259 env_rmutex_lock(env_rmutex *rmutex)
260 {
261 	env_mutex_lock(rmutex);
262 }
263 
264 static inline int
265 env_rmutex_lock_interruptible(env_rmutex *rmutex)
266 {
267 	return env_mutex_lock_interruptible(rmutex);
268 }
269 
270 static inline int
271 env_rmutex_trylock(env_rmutex *rmutex)
272 {
273 	return env_mutex_trylock(rmutex);
274 }
275 
276 static inline void
277 env_rmutex_unlock(env_rmutex *rmutex)
278 {
279 	env_mutex_unlock(rmutex);
280 }
281 
282 static inline int
283 env_rmutex_is_locked(env_rmutex *rmutex)
284 {
285 	return env_mutex_is_locked(rmutex);
286 }
287 
288 static inline int
289 env_rmutex_destroy(env_rmutex *rmutex)
290 {
291 	return env_mutex_destroy(rmutex);
292 }
293 
294 /* *** RW SEMAPHORE *** */
295 typedef struct {
296 	pthread_rwlock_t lock;
297 } env_rwsem;
298 
299 static inline int
300 env_rwsem_init(env_rwsem *s)
301 {
302 	return !!pthread_rwlock_init(&s->lock, NULL);
303 }
304 
305 static inline void
306 env_rwsem_up_read(env_rwsem *s)
307 {
308 	ENV_BUG_ON(pthread_rwlock_unlock(&s->lock));
309 }
310 
311 static inline void
312 env_rwsem_down_read(env_rwsem *s)
313 {
314 	ENV_BUG_ON(pthread_rwlock_rdlock(&s->lock));
315 }
316 
317 static inline int
318 env_rwsem_down_read_trylock(env_rwsem *s)
319 {
320 	return pthread_rwlock_tryrdlock(&s->lock) ? -OCF_ERR_NO_LOCK : 0;
321 }
322 
323 static inline void
324 env_rwsem_up_write(env_rwsem *s)
325 {
326 	ENV_BUG_ON(pthread_rwlock_unlock(&s->lock));
327 }
328 
329 static inline void
330 env_rwsem_down_write(env_rwsem *s)
331 {
332 	ENV_BUG_ON(pthread_rwlock_wrlock(&s->lock));
333 }
334 
335 static inline int
336 env_rwsem_down_write_trylock(env_rwsem *s)
337 {
338 	return pthread_rwlock_trywrlock(&s->lock) ? -OCF_ERR_NO_LOCK : 0;
339 }
340 
341 static inline int
342 env_rwsem_is_locked(env_rwsem *s)
343 {
344 	if (env_rwsem_down_read_trylock(s) == 0) {
345 		env_rwsem_up_read(s);
346 		return 0;
347 	}
348 
349 	return 1;
350 }
351 
352 static inline int
353 env_rwsem_down_read_interruptible(env_rwsem *s)
354 {
355 	return pthread_rwlock_rdlock(&s->lock);
356 }
357 static inline int
358 env_rwsem_down_write_interruptible(env_rwsem *s)
359 {
360 	return pthread_rwlock_wrlock(&s->lock);
361 }
362 
363 static inline int
364 env_rwsem_destroy(env_rwsem *s)
365 {
366 	return pthread_rwlock_destroy(&s->lock);
367 }
368 
369 /* *** ATOMIC VARIABLES *** */
370 
371 typedef int env_atomic;
372 
373 typedef long env_atomic64;
374 
375 #ifndef atomic_read
376 #define atomic_read(ptr)       (*(__typeof__(*ptr) *volatile) (ptr))
377 #endif
378 
379 #ifndef atomic_set
380 #define atomic_set(ptr, i)     ((*(__typeof__(*ptr) *volatile) (ptr)) = (i))
381 #endif
382 
383 #define atomic_inc(ptr)        ((void) __sync_fetch_and_add(ptr, 1))
384 #define atomic_dec(ptr)        ((void) __sync_fetch_and_add(ptr, -1))
385 #define atomic_add(ptr, n)     ((void) __sync_fetch_and_add(ptr, n))
386 #define atomic_sub(ptr, n)     ((void) __sync_fetch_and_sub(ptr, n))
387 
388 #define atomic_cmpxchg         __sync_val_compare_and_swap
389 
390 static inline int
391 env_atomic_read(const env_atomic *a)
392 {
393 	return atomic_read(a);
394 }
395 
396 static inline void
397 env_atomic_set(env_atomic *a, int i)
398 {
399 	atomic_set(a, i);
400 }
401 
402 static inline void
403 env_atomic_add(int i, env_atomic *a)
404 {
405 	atomic_add(a, i);
406 }
407 
408 static inline void
409 env_atomic_sub(int i, env_atomic *a)
410 {
411 	atomic_sub(a, i);
412 }
413 
414 static inline bool
415 env_atomic_sub_and_test(int i, env_atomic *a)
416 {
417 	return __sync_sub_and_fetch(a, i) == 0;
418 }
419 
420 static inline void
421 env_atomic_inc(env_atomic *a)
422 {
423 	atomic_inc(a);
424 }
425 
426 static inline void
427 env_atomic_dec(env_atomic *a)
428 {
429 	atomic_dec(a);
430 }
431 
432 static inline bool
433 env_atomic_dec_and_test(env_atomic *a)
434 {
435 	return __sync_sub_and_fetch(a, 1) == 0;
436 }
437 
438 static inline bool
439 env_atomic_inc_and_test(env_atomic *a)
440 {
441 	return __sync_add_and_fetch(a, 1) == 0;
442 }
443 
444 static inline int
445 env_atomic_add_return(int i, env_atomic *a)
446 {
447 	return __sync_add_and_fetch(a, i);
448 }
449 
450 static inline int
451 env_atomic_sub_return(int i, env_atomic *a)
452 {
453 	return __sync_sub_and_fetch(a, i);
454 }
455 
456 static inline int
457 env_atomic_inc_return(env_atomic *a)
458 {
459 	return env_atomic_add_return(1, a);
460 }
461 
462 static inline int
463 env_atomic_dec_return(env_atomic *a)
464 {
465 	return env_atomic_sub_return(1, a);
466 }
467 
468 static inline int
469 env_atomic_cmpxchg(env_atomic *a, int old, int new_value)
470 {
471 	return atomic_cmpxchg(a, old, new_value);
472 }
473 
474 static inline int
475 env_atomic_add_unless(env_atomic *a, int i, int u)
476 {
477 	int c, old;
478 	c = env_atomic_read(a);
479 	for (;;) {
480 		if (spdk_unlikely(c == (u))) {
481 			break;
482 		}
483 		old = env_atomic_cmpxchg((a), c, c + (i));
484 		if (spdk_likely(old == c)) {
485 			break;
486 		}
487 		c = old;
488 	}
489 	return c != (u);
490 }
491 
492 static inline long
493 env_atomic64_read(const env_atomic64 *a)
494 {
495 	return atomic_read(a);
496 }
497 
498 static inline void
499 env_atomic64_set(env_atomic64 *a, long i)
500 {
501 	atomic_set(a, i);
502 }
503 
504 static inline void
505 env_atomic64_add(long i, env_atomic64 *a)
506 {
507 	atomic_add(a, i);
508 }
509 
510 static inline void
511 env_atomic64_sub(long i, env_atomic64 *a)
512 {
513 	atomic_sub(a, i);
514 }
515 
516 static inline void
517 env_atomic64_inc(env_atomic64 *a)
518 {
519 	atomic_inc(a);
520 }
521 
522 static inline void
523 env_atomic64_dec(env_atomic64 *a)
524 {
525 	atomic_dec(a);
526 }
527 
528 static inline long
529 env_atomic64_add_return(long i, env_atomic64 *a)
530 {
531 	return __sync_add_and_fetch(a, i);
532 }
533 
534 static inline long
535 env_atomic64_sub_return(long i, env_atomic64 *a)
536 {
537 	return __sync_sub_and_fetch(a, i);
538 }
539 
540 static inline long
541 env_atomic64_inc_return(env_atomic64 *a)
542 {
543 	return env_atomic64_add_return(1, a);
544 }
545 
546 static inline long
547 env_atomic64_dec_return(env_atomic64 *a)
548 {
549 	return env_atomic64_sub_return(1, a);
550 }
551 
552 static inline long
553 env_atomic64_cmpxchg(env_atomic64 *a, long old, long new)
554 {
555 	return atomic_cmpxchg(a, old, new);
556 }
557 
558 /* *** COMPLETION *** */
559 typedef struct completion {
560 	sem_t sem;
561 } env_completion;
562 
563 static inline void
564 env_completion_init(env_completion *completion)
565 {
566 	sem_init(&completion->sem, 0, 0);
567 }
568 
569 static inline void
570 env_completion_wait(env_completion *completion)
571 {
572 	sem_wait(&completion->sem);
573 }
574 
575 static inline void
576 env_completion_complete(env_completion *completion)
577 {
578 	sem_post(&completion->sem);
579 }
580 
581 static inline void
582 env_completion_destroy(env_completion *completion)
583 {
584 	sem_destroy(&completion->sem);
585 }
586 
587 /* *** SPIN LOCKS *** */
588 
589 typedef struct {
590 	pthread_spinlock_t lock;
591 } env_spinlock;
592 
593 static inline int
594 env_spinlock_init(env_spinlock *l)
595 {
596 	return pthread_spin_init(&l->lock, 0);
597 }
598 
599 static inline int
600 env_spinlock_trylock(env_spinlock *l)
601 {
602 	return pthread_spin_trylock(&l->lock) ? -OCF_ERR_NO_LOCK : 0;
603 }
604 
605 static inline void
606 env_spinlock_lock(env_spinlock *l)
607 {
608 	ENV_BUG_ON(pthread_spin_lock(&l->lock));
609 }
610 
611 static inline void
612 env_spinlock_unlock(env_spinlock *l)
613 {
614 	ENV_BUG_ON(pthread_spin_unlock(&l->lock));
615 }
616 
617 #define env_spinlock_lock_irqsave(l, flags) \
618 		(void)flags; \
619 		env_spinlock_lock(l)
620 
621 #define env_spinlock_unlock_irqrestore(l, flags) \
622 		(void)flags; \
623 		env_spinlock_unlock(l)
624 
625 static inline void
626 env_spinlock_destroy(env_spinlock *l)
627 {
628 	ENV_BUG_ON(pthread_spin_destroy(&l->lock));
629 }
630 
631 /* *** RW LOCKS *** */
632 
633 typedef struct {
634 	pthread_rwlock_t lock;
635 } env_rwlock;
636 
637 static inline void
638 env_rwlock_init(env_rwlock *l)
639 {
640 	ENV_BUG_ON(pthread_rwlock_init(&l->lock, NULL));
641 }
642 
643 static inline void
644 env_rwlock_read_lock(env_rwlock *l)
645 {
646 	ENV_BUG_ON(pthread_rwlock_rdlock(&l->lock));
647 }
648 
649 static inline void
650 env_rwlock_read_unlock(env_rwlock *l)
651 {
652 	ENV_BUG_ON(pthread_rwlock_unlock(&l->lock));
653 }
654 
655 static inline void
656 env_rwlock_write_lock(env_rwlock *l)
657 {
658 	ENV_BUG_ON(pthread_rwlock_wrlock(&l->lock));
659 }
660 
661 static inline void
662 env_rwlock_write_unlock(env_rwlock *l)
663 {
664 	ENV_BUG_ON(pthread_rwlock_unlock(&l->lock));
665 }
666 
667 static inline void
668 env_rwlock_destroy(env_rwlock *l)
669 {
670 	ENV_BUG_ON(pthread_rwlock_destroy(&l->lock));
671 }
672 
673 static inline void
674 env_bit_set(int nr, volatile void *addr)
675 {
676 	char *byte = (char *)addr + (nr >> 3);
677 	char mask = 1 << (nr & 7);
678 
679 	__sync_or_and_fetch(byte, mask);
680 }
681 
682 static inline void
683 env_bit_clear(int nr, volatile void *addr)
684 {
685 	char *byte = (char *)addr + (nr >> 3);
686 	char mask = 1 << (nr & 7);
687 
688 	__sync_and_and_fetch(byte, ~mask);
689 }
690 
691 static inline bool
692 env_bit_test(int nr, const volatile unsigned long *addr)
693 {
694 	const char *byte = (char *)addr + (nr >> 3);
695 	char mask = 1 << (nr & 7);
696 
697 	return !!(*byte & mask);
698 }
699 
700 /* *** WAITQUEUE *** */
701 
702 typedef struct {
703 	sem_t sem;
704 } env_waitqueue;
705 
706 static inline void
707 env_waitqueue_init(env_waitqueue *w)
708 {
709 	sem_init(&w->sem, 0, 0);
710 }
711 
712 static inline void
713 env_waitqueue_wake_up(env_waitqueue *w)
714 {
715 	sem_post(&w->sem);
716 }
717 
718 #define env_waitqueue_wait(w, condition)	\
719 ({						\
720 	int __ret = 0;				\
721 	if (!(condition))			\
722 		sem_wait(&w.sem);		\
723 	__ret = __ret;				\
724 })
725 
726 /* *** SCHEDULING *** */
727 
728 /* CAS does not need this while in user-space */
729 static inline void
730 env_schedule(void)
731 {
732 }
733 
734 #define env_cond_resched	env_schedule
735 
736 static inline int
737 env_in_interrupt(void)
738 {
739 	return 0;
740 }
741 
742 static inline uint64_t
743 env_get_tick_count(void)
744 {
745 	return spdk_get_ticks();
746 }
747 
748 static inline uint64_t
749 env_ticks_to_secs(uint64_t j)
750 {
751 	return j / spdk_get_ticks_hz();
752 }
753 
754 /**
755  * @brief Dividing first tick_hz by 1000 is better than multiply j by 1000
756  * because if we would multiply j by 1000 we could only handle j
757  * up to 54b (*1000 is 10b).
758  * with this implementation we can handle all 64b in j.
759  * we only assume that ticks_hz is perfectly divisible by 1000
760  * which is probably good assumption because CPU frequency is in GHz/MHz scale.
761  *
762  * @param[in] j ticks count
763  */
764 static inline uint64_t
765 env_ticks_to_msecs(uint64_t j)
766 {
767 	return j / (spdk_get_ticks_hz() / 1000);
768 }
769 
770 /**
771  * @brief Same as in msec case
772  * we divide ticks_hz by 1000 * 1000.
773  * so we use all 64b in j here as well.
774  * we assume that ticks_hz is perfectly divisible by 1000 * 1000
775  * i.e. CPU frequency is divisible by 1MHz.
776  *
777  * @param[in] j ticks count
778  */
779 static inline uint64_t
780 env_ticks_to_usecs(uint64_t j)
781 {
782 	return j / (spdk_get_ticks_hz() / (1000 * 1000));
783 }
784 
785 /**
786  * @brief We can't divide ticks_hz by 10^9
787  * because we can't assume that CPU frequency is prefectly divisible by 10^9.
788  * for example there are CPUs with 2.8GHz or 3.3GHz.
789  * so in here we multiply j by 1000
790  * which means we can only handle 54b of j correctly.
791  *
792  * @param[in] j ticks count
793  */
794 static inline uint64_t
795 env_ticks_to_nsecs(uint64_t j)
796 {
797 	return (j * 1000) / (spdk_get_ticks_hz() / (1000 * 1000));
798 }
799 
800 static inline uint64_t
801 env_secs_to_ticks(uint64_t j)
802 {
803 	return j * spdk_get_ticks_hz();
804 }
805 
806 /* *** STRING OPERATIONS *** */
807 
808 /* 512 KB is sufficient amount of memory for OCF operations */
809 #define ENV_MAX_MEM (512 * 1024)
810 
811 static inline int
812 env_memset(void *dest, size_t len, uint8_t value)
813 {
814 	if (dest == NULL || len == 0) {
815 		return 1;
816 	}
817 
818 	memset(dest, value, len);
819 	return 0;
820 }
821 
822 static inline int
823 env_memcpy(void *dest, size_t dmax, const void *src, size_t len)
824 {
825 	if (dest == NULL || src == NULL) {
826 		return 1;
827 	}
828 	if (dmax == 0 || dmax > ENV_MAX_MEM) {
829 		return 1;
830 	}
831 	if (len == 0 || len > dmax) {
832 		return 1;
833 	}
834 
835 	memcpy(dest, src, len);
836 	return 0;
837 }
838 
839 static inline int
840 env_memcmp(const void *aptr, size_t dmax, const void *bptr, size_t len,
841 	   int *diff)
842 {
843 	if (diff == NULL || aptr == NULL || bptr == NULL) {
844 		return 1;
845 	}
846 	if (dmax == 0 || dmax > ENV_MAX_MEM) {
847 		return 1;
848 	}
849 	if (len == 0 || len > dmax) {
850 		return 1;
851 	}
852 
853 	*diff = memcmp(aptr, bptr, len);
854 	return 0;
855 }
856 
857 /* 4096 is sufficient max length for any OCF operation on string */
858 #define ENV_MAX_STR (4 * 1024)
859 
860 static inline size_t
861 env_strnlen(const char *src, size_t dmax)
862 {
863 	return strnlen(src, dmax);
864 }
865 
866 static inline int
867 env_strncpy(char *dest, size_t dmax, const char *src, size_t len)
868 {
869 	if (dest == NULL  || src == NULL) {
870 		return 1;
871 	}
872 	if (dmax == 0 || dmax > ENV_MAX_STR) {
873 		return 1;
874 	}
875 	if (len == 0) {
876 		return 1;
877 	}
878 	/* Just copy as many characters as we can instead of return failure */
879 	len = min(len, dmax);
880 
881 	strncpy(dest, src, len);
882 	return 0;
883 }
884 
885 #define env_strncmp(s1, slen1, s2, slen2) strncmp(s1, s2, min(slen1, slen2))
886 
887 static inline char *
888 env_strdup(const char *src, int flags)
889 {
890 	int len;
891 	char *ret;
892 
893 	if (src == NULL) {
894 		return NULL;
895 	}
896 
897 	len = env_strnlen(src, ENV_MAX_STR) + 1;
898 	ret = env_malloc(len, flags);
899 
900 	if (env_strncpy(ret, ENV_MAX_STR, src, len)) {
901 		return NULL;
902 	} else {
903 		return ret;
904 	}
905 }
906 
907 /* *** SORTING *** */
908 
909 static inline void
910 env_sort(void *base, size_t num, size_t size,
911 	 int (*cmp_fn)(const void *, const void *),
912 	 void (*swap_fn)(void *, void *, int size))
913 {
914 	qsort(base, num, size, cmp_fn);
915 }
916 
917 static inline void
918 env_msleep(uint64_t n)
919 {
920 	usleep(n * 1000);
921 }
922 
923 static inline void
924 env_touch_softlockup_wd(void)
925 {
926 }
927 
928 /* *** CRC *** */
929 
930 uint32_t env_crc32(uint32_t crc, uint8_t const *data, size_t len);
931 
932 /* EXECUTION CONTEXTS */
933 unsigned env_get_execution_context(void);
934 void env_put_execution_context(unsigned ctx);
935 unsigned env_get_execution_context_count(void);
936 
937 #endif /* __OCF_ENV_H__ */
938