1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017-2018 Intel Corporation
3 */
4
5 #include <inttypes.h>
6 #include <limits.h>
7 #include <stdint.h>
8 #include <errno.h>
9 #include <string.h>
10 #include <unistd.h>
11
12 #include <rte_common.h>
13 #include <rte_eal_paging.h>
14 #include <rte_errno.h>
15 #include <rte_log.h>
16 #include <rte_spinlock.h>
17
18 #include "eal_filesystem.h"
19 #include "eal_private.h"
20
21 #include "rte_fbarray.h"
22
23 #define MASK_SHIFT 6ULL
24 #define MASK_ALIGN (1ULL << MASK_SHIFT)
25 #define MASK_LEN_TO_IDX(x) ((x) >> MASK_SHIFT)
26 #define MASK_LEN_TO_MOD(x) ((x) - RTE_ALIGN_FLOOR(x, MASK_ALIGN))
27 #define MASK_GET_IDX(idx, mod) ((idx << MASK_SHIFT) + mod)
28
29 /*
30 * We use this to keep track of created/attached memory areas to prevent user
31 * errors in API usage.
32 */
33 struct mem_area {
34 TAILQ_ENTRY(mem_area) next;
35 void *addr;
36 size_t len;
37 int fd;
38 };
39 TAILQ_HEAD(mem_area_head, mem_area);
40 /* local per-process tailq */
41 static struct mem_area_head mem_area_tailq =
42 TAILQ_HEAD_INITIALIZER(mem_area_tailq);
43 static rte_spinlock_t mem_area_lock = RTE_SPINLOCK_INITIALIZER;
44
45 /*
46 * This is a mask that is always stored at the end of array, to provide fast
47 * way of finding free/used spots without looping through each element.
48 */
49
50 struct used_mask {
51 unsigned int n_masks;
52 uint64_t data[];
53 };
54
55 static size_t
calc_mask_size(unsigned int len)56 calc_mask_size(unsigned int len)
57 {
58 /* mask must be multiple of MASK_ALIGN, even though length of array
59 * itself may not be aligned on that boundary.
60 */
61 len = RTE_ALIGN_CEIL(len, MASK_ALIGN);
62 return sizeof(struct used_mask) +
63 sizeof(uint64_t) * MASK_LEN_TO_IDX(len);
64 }
65
66 static size_t
calc_data_size(size_t page_sz,unsigned int elt_sz,unsigned int len)67 calc_data_size(size_t page_sz, unsigned int elt_sz, unsigned int len)
68 {
69 size_t data_sz = elt_sz * len;
70 size_t msk_sz = calc_mask_size(len);
71 return RTE_ALIGN_CEIL(data_sz + msk_sz, page_sz);
72 }
73
74 static struct used_mask *
get_used_mask(void * data,unsigned int elt_sz,unsigned int len)75 get_used_mask(void *data, unsigned int elt_sz, unsigned int len)
76 {
77 return (struct used_mask *) RTE_PTR_ADD(data, elt_sz * len);
78 }
79
80 static int
resize_and_map(int fd,const char * path,void * addr,size_t len)81 resize_and_map(int fd, const char *path, void *addr, size_t len)
82 {
83 void *map_addr;
84
85 if (eal_file_truncate(fd, len)) {
86 EAL_LOG(ERR, "Cannot truncate %s", path);
87 return -1;
88 }
89
90 map_addr = rte_mem_map(addr, len, RTE_PROT_READ | RTE_PROT_WRITE,
91 RTE_MAP_SHARED | RTE_MAP_FORCE_ADDRESS, fd, 0);
92 if (map_addr != addr) {
93 return -1;
94 }
95 return 0;
96 }
97
98 static int
overlap(const struct mem_area * ma,const void * start,size_t len)99 overlap(const struct mem_area *ma, const void *start, size_t len)
100 {
101 const void *end = RTE_PTR_ADD(start, len);
102 const void *ma_start = ma->addr;
103 const void *ma_end = RTE_PTR_ADD(ma->addr, ma->len);
104
105 /* start overlap? */
106 if (start >= ma_start && start < ma_end)
107 return 1;
108 /* end overlap? */
109 if (end > ma_start && end < ma_end)
110 return 1;
111 return 0;
112 }
113
114 static int
find_next_n(const struct rte_fbarray * arr,unsigned int start,unsigned int n,bool used)115 find_next_n(const struct rte_fbarray *arr, unsigned int start, unsigned int n,
116 bool used)
117 {
118 const struct used_mask *msk = get_used_mask(arr->data, arr->elt_sz,
119 arr->len);
120 unsigned int msk_idx, lookahead_idx, first, first_mod;
121 unsigned int last, last_mod;
122 uint64_t last_msk, ignore_msk;
123
124 /*
125 * mask only has granularity of MASK_ALIGN, but start may not be aligned
126 * on that boundary, so construct a special mask to exclude anything we
127 * don't want to see to avoid confusing ctz.
128 */
129 first = MASK_LEN_TO_IDX(start);
130 first_mod = MASK_LEN_TO_MOD(start);
131 ignore_msk = ~((1ULL << first_mod) - 1);
132
133 /* array length may not be aligned, so calculate ignore mask for last
134 * mask index.
135 */
136 last = MASK_LEN_TO_IDX(arr->len);
137 last_mod = MASK_LEN_TO_MOD(arr->len);
138 last_msk = ~(UINT64_MAX << last_mod);
139
140 for (msk_idx = first; msk_idx < msk->n_masks; msk_idx++) {
141 uint64_t cur_msk, lookahead_msk;
142 unsigned int run_start, clz, left;
143 bool found = false;
144 /*
145 * The process of getting n consecutive bits for arbitrary n is
146 * a bit involved, but here it is in a nutshell:
147 *
148 * 1. let n be the number of consecutive bits we're looking for
149 * 2. check if n can fit in one mask, and if so, do n-1
150 * rshift-ands to see if there is an appropriate run inside
151 * our current mask
152 * 2a. if we found a run, bail out early
153 * 2b. if we didn't find a run, proceed
154 * 3. invert the mask and count leading zeroes (that is, count
155 * how many consecutive set bits we had starting from the
156 * end of current mask) as k
157 * 3a. if k is 0, continue to next mask
158 * 3b. if k is not 0, we have a potential run
159 * 4. to satisfy our requirements, next mask must have n-k
160 * consecutive set bits right at the start, so we will do
161 * (n-k-1) rshift-ands and check if first bit is set.
162 *
163 * Step 4 will need to be repeated if (n-k) > MASK_ALIGN until
164 * we either run out of masks, lose the run, or find what we
165 * were looking for.
166 */
167 cur_msk = msk->data[msk_idx];
168 left = n;
169
170 /* if we're looking for free spaces, invert the mask */
171 if (!used)
172 cur_msk = ~cur_msk;
173
174 /* combine current ignore mask with last index ignore mask */
175 if (msk_idx == last)
176 ignore_msk &= last_msk;
177
178 /* if we have an ignore mask, ignore once */
179 if (ignore_msk) {
180 cur_msk &= ignore_msk;
181 ignore_msk = 0;
182 }
183
184 /* if n can fit in within a single mask, do a search */
185 if (n <= MASK_ALIGN) {
186 uint64_t tmp_msk = cur_msk;
187 unsigned int s_idx;
188 for (s_idx = 0; s_idx < n - 1; s_idx++)
189 tmp_msk &= tmp_msk >> 1ULL;
190 /* we found what we were looking for */
191 if (tmp_msk != 0) {
192 run_start = rte_ctz64(tmp_msk);
193 return MASK_GET_IDX(msk_idx, run_start);
194 }
195 }
196
197 /*
198 * we didn't find our run within the mask, or n > MASK_ALIGN,
199 * so we're going for plan B.
200 */
201
202 /* count leading zeroes on inverted mask */
203 if (~cur_msk == 0)
204 clz = sizeof(cur_msk) * 8;
205 else
206 clz = rte_clz64(~cur_msk);
207
208 /* if there aren't any runs at the end either, just continue */
209 if (clz == 0)
210 continue;
211
212 /* we have a partial run at the end, so try looking ahead */
213 run_start = MASK_ALIGN - clz;
214 left -= clz;
215
216 for (lookahead_idx = msk_idx + 1; lookahead_idx < msk->n_masks;
217 lookahead_idx++) {
218 unsigned int s_idx, need;
219 uint64_t first_bit = 1;
220
221 lookahead_msk = msk->data[lookahead_idx];
222
223 /* if we're looking for free space, invert the mask */
224 if (!used)
225 lookahead_msk = ~lookahead_msk;
226
227 /* figure out how many consecutive bits we need here */
228 need = RTE_MIN(left, MASK_ALIGN);
229
230 /* count number of shifts we performed */
231 for (s_idx = 0; s_idx < need - 1; s_idx++) {
232 lookahead_msk &= lookahead_msk >> 1ULL;
233 /* did we lose the run yet? */
234 if ((lookahead_msk & first_bit) == 0)
235 break;
236 }
237
238 /* if first bit is not set, we've lost the run */
239 if ((lookahead_msk & first_bit) == 0) {
240 /*
241 * we've scanned this far, so we know there are
242 * no runs in the space we've lookahead-scanned
243 * as well, so skip that on next iteration.
244 */
245 ignore_msk = ~((1ULL << (s_idx + 1)) - 1);
246 /* outer loop will increment msk_idx so add 1 */
247 msk_idx = lookahead_idx - 1;
248 break;
249 }
250
251 left -= need;
252
253 /* check if we've found what we were looking for */
254 if (left == 0) {
255 found = true;
256 break;
257 }
258 }
259
260 /* we didn't find anything, so continue */
261 if (!found)
262 continue;
263
264 return MASK_GET_IDX(msk_idx, run_start);
265 }
266 /* we didn't find anything */
267 rte_errno = used ? ENOENT : ENOSPC;
268 return -1;
269 }
270
271 static int
find_next(const struct rte_fbarray * arr,unsigned int start,bool used)272 find_next(const struct rte_fbarray *arr, unsigned int start, bool used)
273 {
274 const struct used_mask *msk = get_used_mask(arr->data, arr->elt_sz,
275 arr->len);
276 unsigned int idx, first, first_mod;
277 unsigned int last, last_mod;
278 uint64_t last_msk, ignore_msk;
279
280 /*
281 * mask only has granularity of MASK_ALIGN, but start may not be aligned
282 * on that boundary, so construct a special mask to exclude anything we
283 * don't want to see to avoid confusing ctz.
284 */
285 first = MASK_LEN_TO_IDX(start);
286 first_mod = MASK_LEN_TO_MOD(start);
287 ignore_msk = ~((1ULL << first_mod) - 1ULL);
288
289 /* array length may not be aligned, so calculate ignore mask for last
290 * mask index.
291 */
292 last = MASK_LEN_TO_IDX(arr->len);
293 last_mod = MASK_LEN_TO_MOD(arr->len);
294 last_msk = ~(-(1ULL) << last_mod);
295
296 for (idx = first; idx < msk->n_masks; idx++) {
297 uint64_t cur = msk->data[idx];
298 int found;
299
300 /* if we're looking for free entries, invert mask */
301 if (!used)
302 cur = ~cur;
303
304 if (idx == last)
305 cur &= last_msk;
306
307 /* ignore everything before start on first iteration */
308 if (idx == first)
309 cur &= ignore_msk;
310
311 /* check if we have any entries */
312 if (cur == 0)
313 continue;
314
315 /*
316 * find first set bit - that will correspond to whatever it is
317 * that we're looking for.
318 */
319 found = rte_ctz64(cur);
320 return MASK_GET_IDX(idx, found);
321 }
322 /* we didn't find anything */
323 rte_errno = used ? ENOENT : ENOSPC;
324 return -1;
325 }
326
327 static int
find_contig(const struct rte_fbarray * arr,unsigned int start,bool used)328 find_contig(const struct rte_fbarray *arr, unsigned int start, bool used)
329 {
330 const struct used_mask *msk = get_used_mask(arr->data, arr->elt_sz,
331 arr->len);
332 unsigned int idx, first, first_mod;
333 unsigned int last, last_mod;
334 uint64_t last_msk;
335 unsigned int need_len, result = 0;
336
337 /* array length may not be aligned, so calculate ignore mask for last
338 * mask index.
339 */
340 last = MASK_LEN_TO_IDX(arr->len);
341 last_mod = MASK_LEN_TO_MOD(arr->len);
342 last_msk = ~(-(1ULL) << last_mod);
343
344 first = MASK_LEN_TO_IDX(start);
345 first_mod = MASK_LEN_TO_MOD(start);
346 for (idx = first; idx < msk->n_masks; idx++, result += need_len) {
347 uint64_t cur = msk->data[idx];
348 unsigned int run_len;
349
350 need_len = MASK_ALIGN;
351
352 /* if we're looking for free entries, invert mask */
353 if (!used)
354 cur = ~cur;
355
356 /* if this is last mask, ignore everything after last bit */
357 if (idx == last)
358 cur &= last_msk;
359
360 /* ignore everything before start on first iteration */
361 if (idx == first) {
362 cur >>= first_mod;
363 /* at the start, we don't need the full mask len */
364 need_len -= first_mod;
365 }
366
367 /* we will be looking for zeroes, so invert the mask */
368 cur = ~cur;
369
370 /* if mask is zero, we have a complete run */
371 if (cur == 0)
372 continue;
373
374 /*
375 * see if current run ends before mask end.
376 */
377 run_len = rte_ctz64(cur);
378
379 /* add however many zeroes we've had in the last run and quit */
380 if (run_len < need_len) {
381 result += run_len;
382 break;
383 }
384 }
385 return result;
386 }
387
388 static int
find_prev_n(const struct rte_fbarray * arr,unsigned int start,unsigned int n,bool used)389 find_prev_n(const struct rte_fbarray *arr, unsigned int start, unsigned int n,
390 bool used)
391 {
392 const struct used_mask *msk = get_used_mask(arr->data, arr->elt_sz,
393 arr->len);
394 unsigned int msk_idx, lookbehind_idx, first, first_mod;
395 uint64_t ignore_msk;
396
397 /*
398 * mask only has granularity of MASK_ALIGN, but start may not be aligned
399 * on that boundary, so construct a special mask to exclude anything we
400 * don't want to see to avoid confusing ctz.
401 */
402 first = MASK_LEN_TO_IDX(start);
403 first_mod = MASK_LEN_TO_MOD(start);
404 /* we're going backwards, so mask must start from the top */
405 ignore_msk = first_mod == MASK_ALIGN - 1 ?
406 UINT64_MAX : /* prevent overflow */
407 ~(UINT64_MAX << (first_mod + 1));
408
409 /* go backwards, include zero */
410 msk_idx = first;
411 do {
412 uint64_t cur_msk, lookbehind_msk;
413 unsigned int run_start, run_end, ctz, left;
414 bool found = false;
415 /*
416 * The process of getting n consecutive bits from the top for
417 * arbitrary n is a bit involved, but here it is in a nutshell:
418 *
419 * 1. let n be the number of consecutive bits we're looking for
420 * 2. check if n can fit in one mask, and if so, do n-1
421 * lshift-ands to see if there is an appropriate run inside
422 * our current mask
423 * 2a. if we found a run, bail out early
424 * 2b. if we didn't find a run, proceed
425 * 3. invert the mask and count trailing zeroes (that is, count
426 * how many consecutive set bits we had starting from the
427 * start of current mask) as k
428 * 3a. if k is 0, continue to next mask
429 * 3b. if k is not 0, we have a potential run
430 * 4. to satisfy our requirements, next mask must have n-k
431 * consecutive set bits at the end, so we will do (n-k-1)
432 * lshift-ands and check if last bit is set.
433 *
434 * Step 4 will need to be repeated if (n-k) > MASK_ALIGN until
435 * we either run out of masks, lose the run, or find what we
436 * were looking for.
437 */
438 cur_msk = msk->data[msk_idx];
439 left = n;
440
441 /* if we're looking for free spaces, invert the mask */
442 if (!used)
443 cur_msk = ~cur_msk;
444
445 /* if we have an ignore mask, ignore once */
446 if (ignore_msk) {
447 cur_msk &= ignore_msk;
448 ignore_msk = 0;
449 }
450
451 /* if n can fit in within a single mask, do a search */
452 if (n <= MASK_ALIGN) {
453 uint64_t tmp_msk = cur_msk;
454 unsigned int s_idx;
455 for (s_idx = 0; s_idx < n - 1; s_idx++)
456 tmp_msk &= tmp_msk << 1ULL;
457 /* we found what we were looking for */
458 if (tmp_msk != 0) {
459 /* clz will give us offset from end of mask, and
460 * we only get the end of our run, not start,
461 * so adjust result to point to where start
462 * would have been.
463 */
464 run_start = MASK_ALIGN -
465 rte_clz64(tmp_msk) - n;
466 return MASK_GET_IDX(msk_idx, run_start);
467 }
468 }
469
470 /*
471 * we didn't find our run within the mask, or n > MASK_ALIGN,
472 * so we're going for plan B.
473 */
474
475 /* count trailing zeroes on inverted mask */
476 if (~cur_msk == 0)
477 ctz = sizeof(cur_msk) * 8;
478 else
479 ctz = rte_ctz64(~cur_msk);
480
481 /* if there aren't any runs at the start either, just
482 * continue
483 */
484 if (ctz == 0)
485 continue;
486
487 /* we have a partial run at the start, so try looking behind */
488 run_end = MASK_GET_IDX(msk_idx, ctz);
489 left -= ctz;
490
491 /* go backwards, include zero */
492 lookbehind_idx = msk_idx - 1;
493
494 /* we can't lookbehind as we've run out of masks, so stop */
495 if (msk_idx == 0)
496 break;
497
498 do {
499 const uint64_t last_bit = 1ULL << (MASK_ALIGN - 1);
500 unsigned int s_idx, need;
501
502 lookbehind_msk = msk->data[lookbehind_idx];
503
504 /* if we're looking for free space, invert the mask */
505 if (!used)
506 lookbehind_msk = ~lookbehind_msk;
507
508 /* figure out how many consecutive bits we need here */
509 need = RTE_MIN(left, MASK_ALIGN);
510
511 /* count number of shifts we performed */
512 for (s_idx = 0; s_idx < need - 1; s_idx++) {
513 lookbehind_msk &= lookbehind_msk << 1ULL;
514 /* did we lose the run yet? */
515 if ((lookbehind_msk & last_bit) == 0)
516 break;
517 }
518
519 /* if last bit is not set, we've lost the run */
520 if ((lookbehind_msk & last_bit) == 0) {
521 /*
522 * we've scanned this far, so we know there are
523 * no runs in the space we've lookbehind-scanned
524 * as well, so skip that on next iteration.
525 */
526 ignore_msk = ~(UINT64_MAX << (MASK_ALIGN - s_idx - 1));
527 /* outer loop will decrement msk_idx so add 1 */
528 msk_idx = lookbehind_idx + 1;
529 break;
530 }
531
532 left -= need;
533
534 /* check if we've found what we were looking for */
535 if (left == 0) {
536 found = true;
537 break;
538 }
539 } while ((lookbehind_idx--) != 0); /* decrement after check to
540 * include zero
541 */
542
543 /* we didn't find anything, so continue */
544 if (!found)
545 continue;
546
547 /* we've found what we were looking for, but we only know where
548 * the run ended, so calculate start position.
549 */
550 return run_end - n;
551 } while (msk_idx-- != 0); /* decrement after check to include zero */
552 /* we didn't find anything */
553 rte_errno = used ? ENOENT : ENOSPC;
554 return -1;
555 }
556
557 static int
find_prev(const struct rte_fbarray * arr,unsigned int start,bool used)558 find_prev(const struct rte_fbarray *arr, unsigned int start, bool used)
559 {
560 const struct used_mask *msk = get_used_mask(arr->data, arr->elt_sz,
561 arr->len);
562 unsigned int idx, first, first_mod;
563 uint64_t ignore_msk;
564
565 /*
566 * mask only has granularity of MASK_ALIGN, but start may not be aligned
567 * on that boundary, so construct a special mask to exclude anything we
568 * don't want to see to avoid confusing clz.
569 */
570 first = MASK_LEN_TO_IDX(start);
571 first_mod = MASK_LEN_TO_MOD(start);
572 /* we're going backwards, so mask must start from the top */
573 ignore_msk = first_mod == MASK_ALIGN - 1 ?
574 UINT64_MAX : /* prevent overflow */
575 ~(UINT64_MAX << (first_mod + 1));
576
577 /* go backwards, include zero */
578 idx = first;
579 do {
580 uint64_t cur = msk->data[idx];
581 int found;
582
583 /* if we're looking for free entries, invert mask */
584 if (!used)
585 cur = ~cur;
586
587 /* ignore everything before start on first iteration */
588 if (idx == first)
589 cur &= ignore_msk;
590
591 /* check if we have any entries */
592 if (cur == 0)
593 continue;
594
595 /*
596 * find last set bit - that will correspond to whatever it is
597 * that we're looking for. we're counting trailing zeroes, thus
598 * the value we get is counted from end of mask, so calculate
599 * position from start of mask.
600 */
601 found = MASK_ALIGN - rte_clz64(cur) - 1;
602
603 return MASK_GET_IDX(idx, found);
604 } while (idx-- != 0); /* decrement after check to include zero*/
605
606 /* we didn't find anything */
607 rte_errno = used ? ENOENT : ENOSPC;
608 return -1;
609 }
610
611 static int
find_rev_contig(const struct rte_fbarray * arr,unsigned int start,bool used)612 find_rev_contig(const struct rte_fbarray *arr, unsigned int start, bool used)
613 {
614 const struct used_mask *msk = get_used_mask(arr->data, arr->elt_sz,
615 arr->len);
616 unsigned int idx, first, first_mod;
617 unsigned int need_len, result = 0;
618
619 first = MASK_LEN_TO_IDX(start);
620 first_mod = MASK_LEN_TO_MOD(start);
621
622 /* go backwards, include zero */
623 idx = first;
624 do {
625 uint64_t cur = msk->data[idx];
626 unsigned int run_len;
627
628 need_len = MASK_ALIGN;
629
630 /* if we're looking for free entries, invert mask */
631 if (!used)
632 cur = ~cur;
633
634 /* ignore everything after start on first iteration */
635 if (idx == first) {
636 unsigned int end_len = MASK_ALIGN - first_mod - 1;
637 cur <<= end_len;
638 /* at the start, we don't need the full mask len */
639 need_len -= end_len;
640 }
641
642 /* we will be looking for zeroes, so invert the mask */
643 cur = ~cur;
644
645 /* if mask is zero, we have a complete run */
646 if (cur == 0)
647 goto endloop;
648
649 /*
650 * see where run ends, starting from the end.
651 */
652 run_len = rte_clz64(cur);
653
654 /* add however many zeroes we've had in the last run and quit */
655 if (run_len < need_len) {
656 result += run_len;
657 break;
658 }
659 endloop:
660 result += need_len;
661 } while (idx-- != 0); /* decrement after check to include zero */
662 return result;
663 }
664
665 static int
set_used(struct rte_fbarray * arr,unsigned int idx,bool used)666 set_used(struct rte_fbarray *arr, unsigned int idx, bool used)
667 {
668 struct used_mask *msk;
669 uint64_t msk_bit = 1ULL << MASK_LEN_TO_MOD(idx);
670 unsigned int msk_idx = MASK_LEN_TO_IDX(idx);
671 bool already_used;
672 int ret = -1;
673
674 if (arr == NULL || idx >= arr->len) {
675 rte_errno = EINVAL;
676 return -1;
677 }
678 msk = get_used_mask(arr->data, arr->elt_sz, arr->len);
679 ret = 0;
680
681 /* prevent array from changing under us */
682 rte_rwlock_write_lock(&arr->rwlock);
683
684 already_used = (msk->data[msk_idx] & msk_bit) != 0;
685
686 /* nothing to be done */
687 if (used == already_used)
688 goto out;
689
690 if (used) {
691 msk->data[msk_idx] |= msk_bit;
692 arr->count++;
693 } else {
694 msk->data[msk_idx] &= ~msk_bit;
695 arr->count--;
696 }
697 out:
698 rte_rwlock_write_unlock(&arr->rwlock);
699
700 return ret;
701 }
702
703 static int
fully_validate(const char * name,unsigned int elt_sz,unsigned int len)704 fully_validate(const char *name, unsigned int elt_sz, unsigned int len)
705 {
706 if (name == NULL || elt_sz == 0 || len == 0 || len > INT_MAX) {
707 rte_errno = EINVAL;
708 return -1;
709 }
710
711 if (strnlen(name, RTE_FBARRAY_NAME_LEN) == RTE_FBARRAY_NAME_LEN) {
712 rte_errno = ENAMETOOLONG;
713 return -1;
714 }
715 return 0;
716 }
717
718 int
rte_fbarray_init(struct rte_fbarray * arr,const char * name,unsigned int len,unsigned int elt_sz)719 rte_fbarray_init(struct rte_fbarray *arr, const char *name, unsigned int len,
720 unsigned int elt_sz)
721 {
722 size_t page_sz, mmap_len;
723 char path[PATH_MAX];
724 struct used_mask *msk;
725 struct mem_area *ma = NULL;
726 void *data = NULL;
727 int fd = -1;
728 const struct internal_config *internal_conf =
729 eal_get_internal_configuration();
730
731 if (arr == NULL) {
732 rte_errno = EINVAL;
733 return -1;
734 }
735
736 if (fully_validate(name, elt_sz, len))
737 return -1;
738
739 /* allocate mem area before doing anything */
740 ma = malloc(sizeof(*ma));
741 if (ma == NULL) {
742 rte_errno = ENOMEM;
743 return -1;
744 }
745
746 page_sz = rte_mem_page_size();
747 if (page_sz == (size_t)-1) {
748 free(ma);
749 return -1;
750 }
751
752 /* calculate our memory limits */
753 mmap_len = calc_data_size(page_sz, elt_sz, len);
754
755 data = eal_get_virtual_area(NULL, &mmap_len, page_sz, 0, 0);
756 if (data == NULL) {
757 free(ma);
758 return -1;
759 }
760
761 rte_spinlock_lock(&mem_area_lock);
762
763 fd = -1;
764
765 if (internal_conf->no_shconf) {
766 /* remap virtual area as writable */
767 static const int flags = RTE_MAP_FORCE_ADDRESS |
768 RTE_MAP_PRIVATE | RTE_MAP_ANONYMOUS;
769 void *new_data = rte_mem_map(data, mmap_len,
770 RTE_PROT_READ | RTE_PROT_WRITE, flags, fd, 0);
771 if (new_data == NULL) {
772 EAL_LOG(DEBUG, "%s(): couldn't remap anonymous memory: %s",
773 __func__, rte_strerror(rte_errno));
774 goto fail;
775 }
776 } else {
777 eal_get_fbarray_path(path, sizeof(path), name);
778
779 /*
780 * Each fbarray is unique to process namespace, i.e. the
781 * filename depends on process prefix. Try to take out a lock
782 * and see if we succeed. If we don't, someone else is using it
783 * already.
784 */
785 fd = eal_file_open(path, EAL_OPEN_CREATE | EAL_OPEN_READWRITE);
786 if (fd < 0) {
787 EAL_LOG(DEBUG, "%s(): couldn't open %s: %s",
788 __func__, path, rte_strerror(rte_errno));
789 goto fail;
790 } else if (eal_file_lock(
791 fd, EAL_FLOCK_EXCLUSIVE, EAL_FLOCK_RETURN)) {
792 EAL_LOG(DEBUG, "%s(): couldn't lock %s: %s",
793 __func__, path, rte_strerror(rte_errno));
794 rte_errno = EBUSY;
795 goto fail;
796 }
797
798 /* take out a non-exclusive lock, so that other processes could
799 * still attach to it, but no other process could reinitialize
800 * it.
801 */
802 if (eal_file_lock(fd, EAL_FLOCK_SHARED, EAL_FLOCK_RETURN))
803 goto fail;
804
805 if (resize_and_map(fd, path, data, mmap_len))
806 goto fail;
807 }
808 ma->addr = data;
809 ma->len = mmap_len;
810 ma->fd = fd;
811
812 /* do not close fd - keep it until detach/destroy */
813 TAILQ_INSERT_TAIL(&mem_area_tailq, ma, next);
814
815 /* initialize the data */
816 memset(data, 0, mmap_len);
817
818 /* populate data structure */
819 strlcpy(arr->name, name, sizeof(arr->name));
820 arr->data = data;
821 arr->len = len;
822 arr->elt_sz = elt_sz;
823 arr->count = 0;
824
825 msk = get_used_mask(data, elt_sz, len);
826 msk->n_masks = MASK_LEN_TO_IDX(RTE_ALIGN_CEIL(len, MASK_ALIGN));
827
828 rte_rwlock_init(&arr->rwlock);
829
830 rte_spinlock_unlock(&mem_area_lock);
831
832 return 0;
833 fail:
834 if (data)
835 rte_mem_unmap(data, mmap_len);
836 if (fd >= 0)
837 close(fd);
838 free(ma);
839
840 rte_spinlock_unlock(&mem_area_lock);
841 return -1;
842 }
843
844 int
rte_fbarray_attach(struct rte_fbarray * arr)845 rte_fbarray_attach(struct rte_fbarray *arr)
846 {
847 struct mem_area *ma = NULL, *tmp = NULL;
848 size_t page_sz, mmap_len;
849 char path[PATH_MAX];
850 void *data = NULL;
851 int fd = -1;
852
853 if (arr == NULL) {
854 rte_errno = EINVAL;
855 return -1;
856 }
857
858 /*
859 * we don't need to synchronize attach as two values we need (element
860 * size and array length) are constant for the duration of life of
861 * the array, so the parts we care about will not race.
862 */
863
864 if (fully_validate(arr->name, arr->elt_sz, arr->len))
865 return -1;
866
867 ma = malloc(sizeof(*ma));
868 if (ma == NULL) {
869 rte_errno = ENOMEM;
870 return -1;
871 }
872
873 page_sz = rte_mem_page_size();
874 if (page_sz == (size_t)-1) {
875 free(ma);
876 return -1;
877 }
878
879 mmap_len = calc_data_size(page_sz, arr->elt_sz, arr->len);
880
881 /* check the tailq - maybe user has already mapped this address space */
882 rte_spinlock_lock(&mem_area_lock);
883
884 TAILQ_FOREACH(tmp, &mem_area_tailq, next) {
885 if (overlap(tmp, arr->data, mmap_len)) {
886 rte_errno = EEXIST;
887 goto fail;
888 }
889 }
890
891 /* we know this memory area is unique, so proceed */
892
893 data = eal_get_virtual_area(arr->data, &mmap_len, page_sz, 0, 0);
894 if (data == NULL)
895 goto fail;
896
897 eal_get_fbarray_path(path, sizeof(path), arr->name);
898
899 fd = eal_file_open(path, EAL_OPEN_READWRITE);
900 if (fd < 0) {
901 goto fail;
902 }
903
904 /* lock the file, to let others know we're using it */
905 if (eal_file_lock(fd, EAL_FLOCK_SHARED, EAL_FLOCK_RETURN))
906 goto fail;
907
908 if (resize_and_map(fd, path, data, mmap_len))
909 goto fail;
910
911 /* store our new memory area */
912 ma->addr = data;
913 ma->fd = fd; /* keep fd until detach/destroy */
914 ma->len = mmap_len;
915
916 TAILQ_INSERT_TAIL(&mem_area_tailq, ma, next);
917
918 /* we're done */
919
920 rte_spinlock_unlock(&mem_area_lock);
921 return 0;
922 fail:
923 if (data)
924 rte_mem_unmap(data, mmap_len);
925 if (fd >= 0)
926 close(fd);
927 free(ma);
928 rte_spinlock_unlock(&mem_area_lock);
929 return -1;
930 }
931
932 int
rte_fbarray_detach(struct rte_fbarray * arr)933 rte_fbarray_detach(struct rte_fbarray *arr)
934 {
935 struct mem_area *tmp = NULL;
936 size_t mmap_len;
937 int ret = -1;
938
939 if (arr == NULL) {
940 rte_errno = EINVAL;
941 return -1;
942 }
943
944 /*
945 * we don't need to synchronize detach as two values we need (element
946 * size and total capacity) are constant for the duration of life of
947 * the array, so the parts we care about will not race. if the user is
948 * detaching while doing something else in the same process, we can't
949 * really do anything about it, things will blow up either way.
950 */
951
952 size_t page_sz = rte_mem_page_size();
953 if (page_sz == (size_t)-1)
954 return -1;
955
956 mmap_len = calc_data_size(page_sz, arr->elt_sz, arr->len);
957
958 /* does this area exist? */
959 rte_spinlock_lock(&mem_area_lock);
960
961 TAILQ_FOREACH(tmp, &mem_area_tailq, next) {
962 if (tmp->addr == arr->data && tmp->len == mmap_len)
963 break;
964 }
965 if (tmp == NULL) {
966 rte_errno = ENOENT;
967 ret = -1;
968 goto out;
969 }
970
971 rte_mem_unmap(arr->data, mmap_len);
972
973 /* area is unmapped, close fd and remove the tailq entry */
974 if (tmp->fd >= 0)
975 close(tmp->fd);
976 TAILQ_REMOVE(&mem_area_tailq, tmp, next);
977 free(tmp);
978
979 ret = 0;
980 out:
981 rte_spinlock_unlock(&mem_area_lock);
982 return ret;
983 }
984
985 int
rte_fbarray_destroy(struct rte_fbarray * arr)986 rte_fbarray_destroy(struct rte_fbarray *arr)
987 {
988 struct mem_area *tmp = NULL;
989 size_t mmap_len;
990 int fd, ret;
991 char path[PATH_MAX];
992 const struct internal_config *internal_conf =
993 eal_get_internal_configuration();
994
995 if (arr == NULL) {
996 rte_errno = EINVAL;
997 return -1;
998 }
999
1000 /*
1001 * we don't need to synchronize detach as two values we need (element
1002 * size and total capacity) are constant for the duration of life of
1003 * the array, so the parts we care about will not race. if the user is
1004 * detaching while doing something else in the same process, we can't
1005 * really do anything about it, things will blow up either way.
1006 */
1007
1008 size_t page_sz = rte_mem_page_size();
1009 if (page_sz == (size_t)-1)
1010 return -1;
1011
1012 mmap_len = calc_data_size(page_sz, arr->elt_sz, arr->len);
1013
1014 /* does this area exist? */
1015 rte_spinlock_lock(&mem_area_lock);
1016
1017 TAILQ_FOREACH(tmp, &mem_area_tailq, next) {
1018 if (tmp->addr == arr->data && tmp->len == mmap_len)
1019 break;
1020 }
1021 if (tmp == NULL) {
1022 rte_errno = ENOENT;
1023 ret = -1;
1024 goto out;
1025 }
1026 /* with no shconf, there were never any files to begin with */
1027 if (!internal_conf->no_shconf) {
1028 /*
1029 * attempt to get an exclusive lock on the file, to ensure it
1030 * has been detached by all other processes
1031 */
1032 fd = tmp->fd;
1033 if (eal_file_lock(fd, EAL_FLOCK_EXCLUSIVE, EAL_FLOCK_RETURN)) {
1034 EAL_LOG(DEBUG, "Cannot destroy fbarray - another process is using it");
1035 rte_errno = EBUSY;
1036 ret = -1;
1037 goto out;
1038 }
1039
1040 /* we're OK to destroy the file */
1041 eal_get_fbarray_path(path, sizeof(path), arr->name);
1042 if (unlink(path)) {
1043 EAL_LOG(DEBUG, "Cannot unlink fbarray: %s",
1044 strerror(errno));
1045 rte_errno = errno;
1046 /*
1047 * we're still holding an exclusive lock, so drop it to
1048 * shared.
1049 */
1050 eal_file_lock(fd, EAL_FLOCK_SHARED, EAL_FLOCK_RETURN);
1051
1052 ret = -1;
1053 goto out;
1054 }
1055 close(fd);
1056 }
1057 rte_mem_unmap(arr->data, mmap_len);
1058
1059 /* area is unmapped, remove the tailq entry */
1060 TAILQ_REMOVE(&mem_area_tailq, tmp, next);
1061 free(tmp);
1062 ret = 0;
1063
1064 /* reset the fbarray structure */
1065 memset(arr, 0, sizeof(*arr));
1066 out:
1067 rte_spinlock_unlock(&mem_area_lock);
1068 return ret;
1069 }
1070
1071 void *
rte_fbarray_get(const struct rte_fbarray * arr,unsigned int idx)1072 rte_fbarray_get(const struct rte_fbarray *arr, unsigned int idx)
1073 {
1074 void *ret = NULL;
1075 if (arr == NULL) {
1076 rte_errno = EINVAL;
1077 return NULL;
1078 }
1079
1080 if (idx >= arr->len) {
1081 rte_errno = EINVAL;
1082 return NULL;
1083 }
1084
1085 ret = RTE_PTR_ADD(arr->data, idx * arr->elt_sz);
1086
1087 return ret;
1088 }
1089
1090 int
rte_fbarray_set_used(struct rte_fbarray * arr,unsigned int idx)1091 rte_fbarray_set_used(struct rte_fbarray *arr, unsigned int idx)
1092 {
1093 return set_used(arr, idx, true);
1094 }
1095
1096 int
rte_fbarray_set_free(struct rte_fbarray * arr,unsigned int idx)1097 rte_fbarray_set_free(struct rte_fbarray *arr, unsigned int idx)
1098 {
1099 return set_used(arr, idx, false);
1100 }
1101
1102 int
rte_fbarray_is_used(struct rte_fbarray * arr,unsigned int idx)1103 rte_fbarray_is_used(struct rte_fbarray *arr, unsigned int idx)
1104 {
1105 struct used_mask *msk;
1106 int msk_idx;
1107 uint64_t msk_bit;
1108 int ret = -1;
1109
1110 if (arr == NULL || idx >= arr->len) {
1111 rte_errno = EINVAL;
1112 return -1;
1113 }
1114
1115 /* prevent array from changing under us */
1116 rte_rwlock_read_lock(&arr->rwlock);
1117
1118 msk = get_used_mask(arr->data, arr->elt_sz, arr->len);
1119 msk_idx = MASK_LEN_TO_IDX(idx);
1120 msk_bit = 1ULL << MASK_LEN_TO_MOD(idx);
1121
1122 ret = (msk->data[msk_idx] & msk_bit) != 0;
1123
1124 rte_rwlock_read_unlock(&arr->rwlock);
1125
1126 return ret;
1127 }
1128
1129 static int
fbarray_find(struct rte_fbarray * arr,unsigned int start,bool next,bool used)1130 fbarray_find(struct rte_fbarray *arr, unsigned int start, bool next, bool used)
1131 {
1132 int ret = -1;
1133
1134 if (arr == NULL || start >= arr->len) {
1135 rte_errno = EINVAL;
1136 return -1;
1137 }
1138
1139 /* prevent array from changing under us */
1140 rte_rwlock_read_lock(&arr->rwlock);
1141
1142 /* cheap checks to prevent doing useless work */
1143 if (!used) {
1144 if (arr->len == arr->count) {
1145 rte_errno = ENOSPC;
1146 goto out;
1147 }
1148 if (arr->count == 0) {
1149 ret = start;
1150 goto out;
1151 }
1152 } else {
1153 if (arr->count == 0) {
1154 rte_errno = ENOENT;
1155 goto out;
1156 }
1157 if (arr->len == arr->count) {
1158 ret = start;
1159 goto out;
1160 }
1161 }
1162 if (next)
1163 ret = find_next(arr, start, used);
1164 else
1165 ret = find_prev(arr, start, used);
1166 out:
1167 rte_rwlock_read_unlock(&arr->rwlock);
1168 return ret;
1169 }
1170
1171 int
rte_fbarray_find_next_free(struct rte_fbarray * arr,unsigned int start)1172 rte_fbarray_find_next_free(struct rte_fbarray *arr, unsigned int start)
1173 {
1174 return fbarray_find(arr, start, true, false);
1175 }
1176
1177 int
rte_fbarray_find_next_used(struct rte_fbarray * arr,unsigned int start)1178 rte_fbarray_find_next_used(struct rte_fbarray *arr, unsigned int start)
1179 {
1180 return fbarray_find(arr, start, true, true);
1181 }
1182
1183 int
rte_fbarray_find_prev_free(struct rte_fbarray * arr,unsigned int start)1184 rte_fbarray_find_prev_free(struct rte_fbarray *arr, unsigned int start)
1185 {
1186 return fbarray_find(arr, start, false, false);
1187 }
1188
1189 int
rte_fbarray_find_prev_used(struct rte_fbarray * arr,unsigned int start)1190 rte_fbarray_find_prev_used(struct rte_fbarray *arr, unsigned int start)
1191 {
1192 return fbarray_find(arr, start, false, true);
1193 }
1194
1195 static int
fbarray_find_n(struct rte_fbarray * arr,unsigned int start,unsigned int n,bool next,bool used)1196 fbarray_find_n(struct rte_fbarray *arr, unsigned int start, unsigned int n,
1197 bool next, bool used)
1198 {
1199 int ret = -1;
1200
1201 if (arr == NULL || start >= arr->len || n > arr->len || n == 0) {
1202 rte_errno = EINVAL;
1203 return -1;
1204 }
1205 if (next && (arr->len - start) < n) {
1206 rte_errno = used ? ENOENT : ENOSPC;
1207 return -1;
1208 }
1209 if (!next && start < (n - 1)) {
1210 rte_errno = used ? ENOENT : ENOSPC;
1211 return -1;
1212 }
1213
1214 /* prevent array from changing under us */
1215 rte_rwlock_read_lock(&arr->rwlock);
1216
1217 /* cheap checks to prevent doing useless work */
1218 if (!used) {
1219 if (arr->len == arr->count || arr->len - arr->count < n) {
1220 rte_errno = ENOSPC;
1221 goto out;
1222 }
1223 if (arr->count == 0) {
1224 ret = next ? start : start - n + 1;
1225 goto out;
1226 }
1227 } else {
1228 if (arr->count < n) {
1229 rte_errno = ENOENT;
1230 goto out;
1231 }
1232 if (arr->count == arr->len) {
1233 ret = next ? start : start - n + 1;
1234 goto out;
1235 }
1236 }
1237
1238 if (next)
1239 ret = find_next_n(arr, start, n, used);
1240 else
1241 ret = find_prev_n(arr, start, n, used);
1242 out:
1243 rte_rwlock_read_unlock(&arr->rwlock);
1244 return ret;
1245 }
1246
1247 int
rte_fbarray_find_next_n_free(struct rte_fbarray * arr,unsigned int start,unsigned int n)1248 rte_fbarray_find_next_n_free(struct rte_fbarray *arr, unsigned int start,
1249 unsigned int n)
1250 {
1251 return fbarray_find_n(arr, start, n, true, false);
1252 }
1253
1254 int
rte_fbarray_find_next_n_used(struct rte_fbarray * arr,unsigned int start,unsigned int n)1255 rte_fbarray_find_next_n_used(struct rte_fbarray *arr, unsigned int start,
1256 unsigned int n)
1257 {
1258 return fbarray_find_n(arr, start, n, true, true);
1259 }
1260
1261 int
rte_fbarray_find_prev_n_free(struct rte_fbarray * arr,unsigned int start,unsigned int n)1262 rte_fbarray_find_prev_n_free(struct rte_fbarray *arr, unsigned int start,
1263 unsigned int n)
1264 {
1265 return fbarray_find_n(arr, start, n, false, false);
1266 }
1267
1268 int
rte_fbarray_find_prev_n_used(struct rte_fbarray * arr,unsigned int start,unsigned int n)1269 rte_fbarray_find_prev_n_used(struct rte_fbarray *arr, unsigned int start,
1270 unsigned int n)
1271 {
1272 return fbarray_find_n(arr, start, n, false, true);
1273 }
1274
1275 static int
fbarray_find_contig(struct rte_fbarray * arr,unsigned int start,bool next,bool used)1276 fbarray_find_contig(struct rte_fbarray *arr, unsigned int start, bool next,
1277 bool used)
1278 {
1279 int ret = -1;
1280
1281 if (arr == NULL || start >= arr->len) {
1282 rte_errno = EINVAL;
1283 return -1;
1284 }
1285
1286 /* prevent array from changing under us */
1287 rte_rwlock_read_lock(&arr->rwlock);
1288
1289 /* cheap checks to prevent doing useless work */
1290 if (used) {
1291 if (arr->count == 0) {
1292 ret = 0;
1293 goto out;
1294 }
1295 if (next && arr->count == arr->len) {
1296 ret = arr->len - start;
1297 goto out;
1298 }
1299 if (!next && arr->count == arr->len) {
1300 ret = start + 1;
1301 goto out;
1302 }
1303 } else {
1304 if (arr->len == arr->count) {
1305 ret = 0;
1306 goto out;
1307 }
1308 if (next && arr->count == 0) {
1309 ret = arr->len - start;
1310 goto out;
1311 }
1312 if (!next && arr->count == 0) {
1313 ret = start + 1;
1314 goto out;
1315 }
1316 }
1317
1318 if (next)
1319 ret = find_contig(arr, start, used);
1320 else
1321 ret = find_rev_contig(arr, start, used);
1322 out:
1323 rte_rwlock_read_unlock(&arr->rwlock);
1324 return ret;
1325 }
1326
1327 static int
fbarray_find_biggest(struct rte_fbarray * arr,unsigned int start,bool used,bool rev)1328 fbarray_find_biggest(struct rte_fbarray *arr, unsigned int start, bool used,
1329 bool rev)
1330 {
1331 int cur_idx, next_idx, cur_len, biggest_idx, biggest_len;
1332 /* don't stack if conditions, use function pointers instead */
1333 int (*find_func)(struct rte_fbarray *, unsigned int);
1334 int (*find_contig_func)(struct rte_fbarray *, unsigned int);
1335
1336 if (arr == NULL || start >= arr->len) {
1337 rte_errno = EINVAL;
1338 return -1;
1339 }
1340 /* the other API calls already do their fair share of cheap checks, so
1341 * no need to do them here.
1342 */
1343
1344 /* the API's called are thread-safe, but something may still happen
1345 * between the API calls, so lock the fbarray. all other API's are
1346 * read-locking the fbarray, so read lock here is OK.
1347 */
1348 rte_rwlock_read_lock(&arr->rwlock);
1349
1350 /* pick out appropriate functions */
1351 if (used) {
1352 if (rev) {
1353 find_func = rte_fbarray_find_prev_used;
1354 find_contig_func = rte_fbarray_find_rev_contig_used;
1355 } else {
1356 find_func = rte_fbarray_find_next_used;
1357 find_contig_func = rte_fbarray_find_contig_used;
1358 }
1359 } else {
1360 if (rev) {
1361 find_func = rte_fbarray_find_prev_free;
1362 find_contig_func = rte_fbarray_find_rev_contig_free;
1363 } else {
1364 find_func = rte_fbarray_find_next_free;
1365 find_contig_func = rte_fbarray_find_contig_free;
1366 }
1367 }
1368
1369 cur_idx = start;
1370 biggest_idx = -1; /* default is error */
1371 biggest_len = 0;
1372 for (;;) {
1373 cur_idx = find_func(arr, cur_idx);
1374
1375 /* block found, check its length */
1376 if (cur_idx >= 0) {
1377 cur_len = find_contig_func(arr, cur_idx);
1378 /* decide where we go next */
1379 next_idx = rev ? cur_idx - cur_len : cur_idx + cur_len;
1380 /* move current index to start of chunk */
1381 cur_idx = rev ? next_idx + 1 : cur_idx;
1382
1383 if (cur_len > biggest_len) {
1384 biggest_idx = cur_idx;
1385 biggest_len = cur_len;
1386 }
1387 cur_idx = next_idx;
1388 /* in reverse mode, next_idx may be -1 if chunk started
1389 * at array beginning. this means there's no more work
1390 * to do.
1391 */
1392 if (cur_idx < 0)
1393 break;
1394 } else {
1395 /* nothing more to find, stop. however, a failed API
1396 * call has set rte_errno, which we want to ignore, as
1397 * reaching the end of fbarray is not an error.
1398 */
1399 rte_errno = 0;
1400 break;
1401 }
1402 }
1403 /* if we didn't find anything at all, set rte_errno */
1404 if (biggest_idx < 0)
1405 rte_errno = used ? ENOENT : ENOSPC;
1406
1407 rte_rwlock_read_unlock(&arr->rwlock);
1408 return biggest_idx;
1409 }
1410
1411 int
rte_fbarray_find_biggest_free(struct rte_fbarray * arr,unsigned int start)1412 rte_fbarray_find_biggest_free(struct rte_fbarray *arr, unsigned int start)
1413 {
1414 return fbarray_find_biggest(arr, start, false, false);
1415 }
1416
1417 int
rte_fbarray_find_biggest_used(struct rte_fbarray * arr,unsigned int start)1418 rte_fbarray_find_biggest_used(struct rte_fbarray *arr, unsigned int start)
1419 {
1420 return fbarray_find_biggest(arr, start, true, false);
1421 }
1422
1423 int
rte_fbarray_find_rev_biggest_free(struct rte_fbarray * arr,unsigned int start)1424 rte_fbarray_find_rev_biggest_free(struct rte_fbarray *arr, unsigned int start)
1425 {
1426 return fbarray_find_biggest(arr, start, false, true);
1427 }
1428
1429 int
rte_fbarray_find_rev_biggest_used(struct rte_fbarray * arr,unsigned int start)1430 rte_fbarray_find_rev_biggest_used(struct rte_fbarray *arr, unsigned int start)
1431 {
1432 return fbarray_find_biggest(arr, start, true, true);
1433 }
1434
1435
1436 int
rte_fbarray_find_contig_free(struct rte_fbarray * arr,unsigned int start)1437 rte_fbarray_find_contig_free(struct rte_fbarray *arr, unsigned int start)
1438 {
1439 return fbarray_find_contig(arr, start, true, false);
1440 }
1441
1442 int
rte_fbarray_find_contig_used(struct rte_fbarray * arr,unsigned int start)1443 rte_fbarray_find_contig_used(struct rte_fbarray *arr, unsigned int start)
1444 {
1445 return fbarray_find_contig(arr, start, true, true);
1446 }
1447
1448 int
rte_fbarray_find_rev_contig_free(struct rte_fbarray * arr,unsigned int start)1449 rte_fbarray_find_rev_contig_free(struct rte_fbarray *arr, unsigned int start)
1450 {
1451 return fbarray_find_contig(arr, start, false, false);
1452 }
1453
1454 int
rte_fbarray_find_rev_contig_used(struct rte_fbarray * arr,unsigned int start)1455 rte_fbarray_find_rev_contig_used(struct rte_fbarray *arr, unsigned int start)
1456 {
1457 return fbarray_find_contig(arr, start, false, true);
1458 }
1459
1460 int
rte_fbarray_find_idx(const struct rte_fbarray * arr,const void * elt)1461 rte_fbarray_find_idx(const struct rte_fbarray *arr, const void *elt)
1462 {
1463 void *end;
1464 int ret = -1;
1465
1466 /*
1467 * no need to synchronize as it doesn't matter if underlying data
1468 * changes - we're doing pointer arithmetic here.
1469 */
1470
1471 if (arr == NULL || elt == NULL) {
1472 rte_errno = EINVAL;
1473 return -1;
1474 }
1475 end = RTE_PTR_ADD(arr->data, arr->elt_sz * arr->len);
1476 if (elt < arr->data || elt >= end) {
1477 rte_errno = EINVAL;
1478 return -1;
1479 }
1480
1481 ret = RTE_PTR_DIFF(elt, arr->data) / arr->elt_sz;
1482
1483 return ret;
1484 }
1485
1486 void
rte_fbarray_dump_metadata(struct rte_fbarray * arr,FILE * f)1487 rte_fbarray_dump_metadata(struct rte_fbarray *arr, FILE *f)
1488 {
1489 struct used_mask *msk;
1490 unsigned int i;
1491
1492 if (arr == NULL || f == NULL) {
1493 rte_errno = EINVAL;
1494 return;
1495 }
1496
1497 if (fully_validate(arr->name, arr->elt_sz, arr->len)) {
1498 fprintf(f, "Invalid file-backed array\n");
1499 return;
1500 }
1501
1502 /* prevent array from changing under us */
1503 rte_rwlock_read_lock(&arr->rwlock);
1504
1505 fprintf(f, "File-backed array: %s\n", arr->name);
1506 fprintf(f, "size: %i occupied: %i elt_sz: %i\n",
1507 arr->len, arr->count, arr->elt_sz);
1508
1509 msk = get_used_mask(arr->data, arr->elt_sz, arr->len);
1510
1511 for (i = 0; i < msk->n_masks; i++)
1512 fprintf(f, "msk idx %i: 0x%016" PRIx64 "\n", i, msk->data[i]);
1513 rte_rwlock_read_unlock(&arr->rwlock);
1514 }
1515