xref: /dpdk/lib/pdcp/pdcp_cnt.c (revision 7917b0d38e92e8b9ec5a870415b791420e10f11a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2023 Marvell.
3  */
4 
5 #include <rte_bitmap.h>
6 #include <rte_pdcp.h>
7 
8 #include "pdcp_cnt.h"
9 #include "pdcp_ctrl_pdu.h"
10 #include "pdcp_entity.h"
11 
12 #define SLAB_BYTE_SIZE (RTE_BITMAP_SLAB_BIT_SIZE / 8)
13 
14 uint32_t
15 pdcp_cnt_bitmap_get_memory_footprint(const struct rte_pdcp_entity_conf *conf)
16 {
17 	uint32_t n_bits = pdcp_window_size_get(conf->pdcp_xfrm.sn_size);
18 
19 	return rte_bitmap_get_memory_footprint(n_bits);
20 }
21 
22 int
23 pdcp_cnt_bitmap_create(struct entity_priv_dl_part *dl, uint32_t nb_elem,
24 		       void *bitmap_mem, uint32_t mem_size)
25 {
26 	dl->bitmap.bmp = rte_bitmap_init(nb_elem, bitmap_mem, mem_size);
27 	if (dl->bitmap.bmp == NULL)
28 		return -EINVAL;
29 
30 	dl->bitmap.size = nb_elem;
31 
32 	return 0;
33 }
34 
35 void
36 pdcp_cnt_bitmap_set(struct pdcp_cnt_bitmap bitmap, uint32_t count)
37 {
38 	rte_bitmap_set(bitmap.bmp, count % bitmap.size);
39 }
40 
41 bool
42 pdcp_cnt_bitmap_is_set(struct pdcp_cnt_bitmap bitmap, uint32_t count)
43 {
44 	return rte_bitmap_get(bitmap.bmp, count % bitmap.size);
45 }
46 
47 void
48 pdcp_cnt_bitmap_range_clear(struct pdcp_cnt_bitmap bitmap, uint32_t start, uint32_t stop)
49 {
50 	uint32_t i;
51 
52 	for (i = start; i < stop; i++)
53 		rte_bitmap_clear(bitmap.bmp, i % bitmap.size);
54 }
55 
56 uint16_t
57 pdcp_cnt_get_bitmap_size(uint32_t pending_bytes)
58 {
59 	/*
60 	 * Round up bitmap size to slab size to operate only on slabs sizes, instead of individual
61 	 * bytes
62 	 */
63 	return RTE_ALIGN_MUL_CEIL(pending_bytes, SLAB_BYTE_SIZE);
64 }
65 
66 static __rte_always_inline uint64_t
67 leftover_get(uint64_t slab, uint32_t shift, uint64_t mask)
68 {
69 	return (slab & mask) << shift;
70 }
71 
72 void
73 pdcp_cnt_report_fill(struct pdcp_cnt_bitmap bitmap, struct entity_state state,
74 		     uint8_t *data, uint16_t data_len)
75 {
76 	uint64_t slab = 0, next_slab = 0, leftover;
77 	uint32_t zeros, report_len, diff;
78 	uint32_t slab_id, next_slab_id;
79 	uint32_t pos = 0, next_pos = 0;
80 
81 	const uint32_t start_count = state.rx_deliv + 1;
82 	const uint32_t nb_slabs = bitmap.size / RTE_BITMAP_SLAB_BIT_SIZE;
83 	const uint32_t nb_data_slabs = data_len / SLAB_BYTE_SIZE;
84 	const uint32_t start_slab_id = start_count / RTE_BITMAP_SLAB_BIT_SIZE;
85 	const uint32_t stop_slab_id = (start_slab_id + nb_data_slabs) % nb_slabs;
86 	const uint32_t shift = start_count % RTE_BITMAP_SLAB_BIT_SIZE;
87 	const uint32_t leftover_shift = shift ? RTE_BITMAP_SLAB_BIT_SIZE - shift : 0;
88 	const uint8_t *data_end = RTE_PTR_ADD(data, data_len + SLAB_BYTE_SIZE);
89 
90 	/* NOTE: Mask required to workaround case - when shift is not needed */
91 	const uint64_t leftover_mask = shift ? ~0 : 0;
92 
93 	/* NOTE: implement scan init at to set custom position */
94 	__rte_bitmap_scan_init(bitmap.bmp);
95 	while (true) {
96 		assert(rte_bitmap_scan(bitmap.bmp, &pos, &slab) == 1);
97 		slab_id = pos / RTE_BITMAP_SLAB_BIT_SIZE;
98 		if (slab_id >= start_slab_id)
99 			break;
100 	}
101 
102 	report_len = nb_data_slabs;
103 
104 	if (slab_id > start_slab_id) {
105 		/* Zero slabs at beginning */
106 		zeros = (slab_id - start_slab_id - 1) * SLAB_BYTE_SIZE;
107 		memset(data, 0, zeros);
108 		data = RTE_PTR_ADD(data, zeros);
109 		leftover = leftover_get(slab, leftover_shift, leftover_mask);
110 		memcpy(data, &leftover, SLAB_BYTE_SIZE);
111 		data = RTE_PTR_ADD(data, SLAB_BYTE_SIZE);
112 		report_len -= (slab_id - start_slab_id);
113 	}
114 
115 	while (report_len) {
116 		rte_bitmap_scan(bitmap.bmp, &next_pos, &next_slab);
117 		next_slab_id = next_pos / RTE_BITMAP_SLAB_BIT_SIZE;
118 		diff = (next_slab_id + nb_slabs - slab_id) % nb_slabs;
119 
120 		/* If next_slab_id == slab_id - overlap */
121 		diff += !(next_slab_id ^ slab_id) * nb_slabs;
122 
123 		/* Size check - next slab is outsize of size range */
124 		if (diff > report_len) {
125 			next_slab = 0;
126 			next_slab_id = stop_slab_id;
127 			diff = report_len;
128 		}
129 
130 		report_len -= diff;
131 
132 		/* Calculate gap between slabs, taking wrap around into account */
133 		zeros = (next_slab_id + nb_slabs - slab_id - 1) % nb_slabs;
134 		if (zeros) {
135 			/* Non continues slabs, align them individually */
136 			slab >>= shift;
137 			memcpy(data, &slab, SLAB_BYTE_SIZE);
138 			data = RTE_PTR_ADD(data, SLAB_BYTE_SIZE);
139 
140 			/* Fill zeros between slabs */
141 			zeros = (zeros - 1) * SLAB_BYTE_SIZE;
142 			memset(data, 0, zeros);
143 			data = RTE_PTR_ADD(data, zeros);
144 
145 			/* Align beginning of next slab */
146 			leftover = leftover_get(next_slab, leftover_shift, leftover_mask);
147 			memcpy(data, &leftover, SLAB_BYTE_SIZE);
148 			data = RTE_PTR_ADD(data, SLAB_BYTE_SIZE);
149 		} else {
150 			/* Continues slabs, combine them */
151 			uint64_t new_slab = (slab >> shift) |
152 					leftover_get(next_slab, leftover_shift, leftover_mask);
153 			memcpy(data, &new_slab, SLAB_BYTE_SIZE);
154 			data = RTE_PTR_ADD(data, SLAB_BYTE_SIZE);
155 		}
156 
157 		slab = next_slab;
158 		pos = next_pos;
159 		slab_id = next_slab_id;
160 
161 	};
162 
163 	assert(data < data_end);
164 }
165