1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018 Intel Corporation 3 */ 4 5 #ifndef _IPSEC_SQN_H_ 6 #define _IPSEC_SQN_H_ 7 8 #define WINDOW_BUCKET_BITS 6 /* uint64_t */ 9 #define WINDOW_BUCKET_SIZE (1 << WINDOW_BUCKET_BITS) 10 #define WINDOW_BIT_LOC_MASK (WINDOW_BUCKET_SIZE - 1) 11 12 /* minimum number of bucket, power of 2*/ 13 #define WINDOW_BUCKET_MIN 2 14 #define WINDOW_BUCKET_MAX (INT16_MAX + 1) 15 16 #define IS_ESN(sa) ((sa)->sqn_mask == UINT64_MAX) 17 18 #define SQN_ATOMIC(sa) ((sa)->type & RTE_IPSEC_SATP_SQN_ATOM) 19 20 /* 21 * gets SQN.hi32 bits, SQN supposed to be in network byte order. 22 */ 23 static inline rte_be32_t 24 sqn_hi32(rte_be64_t sqn) 25 { 26 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 27 return (sqn >> 32); 28 #else 29 return sqn; 30 #endif 31 } 32 33 /* 34 * gets SQN.low32 bits, SQN supposed to be in network byte order. 35 */ 36 static inline rte_be32_t 37 sqn_low32(rte_be64_t sqn) 38 { 39 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 40 return sqn; 41 #else 42 return (sqn >> 32); 43 #endif 44 } 45 46 /* 47 * gets SQN.low16 bits, SQN supposed to be in network byte order. 48 */ 49 static inline rte_be16_t 50 sqn_low16(rte_be64_t sqn) 51 { 52 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN 53 return sqn; 54 #else 55 return (sqn >> 48); 56 #endif 57 } 58 59 /* 60 * According to RFC4303 A2.1, determine the high-order bit of sequence number. 61 * use 32bit arithmetic inside, return uint64_t. 62 */ 63 static inline uint64_t 64 reconstruct_esn(uint64_t t, uint32_t sqn, uint32_t w) 65 { 66 uint32_t th, tl, bl; 67 68 tl = t; 69 th = t >> 32; 70 bl = tl - w + 1; 71 72 /* case A: window is within one sequence number subspace */ 73 if (tl >= (w - 1)) 74 th += (sqn < bl); 75 /* case B: window spans two sequence number subspaces */ 76 else if (th != 0) 77 th -= (sqn >= bl); 78 79 /* return constructed sequence with proper high-order bits */ 80 return (uint64_t)th << 32 | sqn; 81 } 82 83 /** 84 * Perform the replay checking. 85 * 86 * struct rte_ipsec_sa contains the window and window related parameters, 87 * such as the window size, bitmask, and the last acknowledged sequence number. 88 * 89 * Based on RFC 6479. 90 * Blocks are 64 bits unsigned integers 91 */ 92 static inline int32_t 93 esn_inb_check_sqn(const struct replay_sqn *rsn, const struct rte_ipsec_sa *sa, 94 uint64_t sqn) 95 { 96 uint32_t bit, bucket; 97 98 /* replay not enabled */ 99 if (sa->replay.win_sz == 0) 100 return 0; 101 102 /* seq is larger than lastseq */ 103 if (sqn > rsn->sqn) 104 return 0; 105 106 /* seq is outside window */ 107 if (sqn == 0 || sqn + sa->replay.win_sz < rsn->sqn) 108 return -EINVAL; 109 110 /* seq is inside the window */ 111 bit = sqn & WINDOW_BIT_LOC_MASK; 112 bucket = (sqn >> WINDOW_BUCKET_BITS) & sa->replay.bucket_index_mask; 113 114 /* already seen packet */ 115 if (rsn->window[bucket] & ((uint64_t)1 << bit)) 116 return -EINVAL; 117 118 return 0; 119 } 120 121 /** 122 * For outbound SA perform the sequence number update. 123 */ 124 static inline uint64_t 125 esn_outb_update_sqn(struct rte_ipsec_sa *sa, uint32_t *num) 126 { 127 uint64_t n, s, sqn; 128 129 n = *num; 130 if (SQN_ATOMIC(sa)) 131 sqn = rte_atomic_fetch_add_explicit(&sa->sqn.outb, n, rte_memory_order_relaxed) + n; 132 else { 133 sqn = sa->sqn.outb + n; 134 sa->sqn.outb = sqn; 135 } 136 137 /* overflow */ 138 if (sqn > sa->sqn_mask) { 139 s = sqn - sa->sqn_mask; 140 *num = (s < n) ? n - s : 0; 141 } 142 143 return sqn - n; 144 } 145 146 /** 147 * For inbound SA perform the sequence number and replay window update. 148 */ 149 static inline int32_t 150 esn_inb_update_sqn(struct replay_sqn *rsn, const struct rte_ipsec_sa *sa, 151 uint64_t sqn) 152 { 153 uint32_t bit, bucket, last_bucket, new_bucket, diff, i; 154 155 /* handle ESN */ 156 if (IS_ESN(sa)) 157 sqn = reconstruct_esn(rsn->sqn, sqn, sa->replay.win_sz); 158 159 /* seq is outside window*/ 160 if (sqn == 0 || sqn + sa->replay.win_sz < rsn->sqn) 161 return -EINVAL; 162 163 /* update the bit */ 164 bucket = (sqn >> WINDOW_BUCKET_BITS); 165 166 /* check if the seq is within the range */ 167 if (sqn > rsn->sqn) { 168 last_bucket = rsn->sqn >> WINDOW_BUCKET_BITS; 169 diff = bucket - last_bucket; 170 /* seq is way after the range of WINDOW_SIZE */ 171 if (diff > sa->replay.nb_bucket) 172 diff = sa->replay.nb_bucket; 173 174 for (i = 0; i != diff; i++) { 175 new_bucket = (i + last_bucket + 1) & 176 sa->replay.bucket_index_mask; 177 rsn->window[new_bucket] = 0; 178 } 179 rsn->sqn = sqn; 180 } 181 182 bucket &= sa->replay.bucket_index_mask; 183 bit = (uint64_t)1 << (sqn & WINDOW_BIT_LOC_MASK); 184 185 /* already seen packet */ 186 if (rsn->window[bucket] & bit) 187 return -EINVAL; 188 189 rsn->window[bucket] |= bit; 190 return 0; 191 } 192 193 /** 194 * To achieve ability to do multiple readers single writer for 195 * SA replay window information and sequence number (RSN) 196 * basic RCU schema is used: 197 * SA have 2 copies of RSN (one for readers, another for writers). 198 * Each RSN contains a rwlock that has to be grabbed (for read/write) 199 * to avoid races between readers and writer. 200 * Writer is responsible to make a copy or reader RSN, update it 201 * and mark newly updated RSN as readers one. 202 * That approach is intended to minimize contention and cache sharing 203 * between writer and readers. 204 */ 205 206 /** 207 * Copy replay window and SQN. 208 */ 209 static inline void 210 rsn_copy(const struct rte_ipsec_sa *sa, uint32_t dst, uint32_t src) 211 { 212 uint32_t i, n; 213 struct replay_sqn *d; 214 const struct replay_sqn *s; 215 216 d = sa->sqn.inb.rsn[dst]; 217 s = sa->sqn.inb.rsn[src]; 218 219 n = sa->replay.nb_bucket; 220 221 d->sqn = s->sqn; 222 for (i = 0; i != n; i++) 223 d->window[i] = s->window[i]; 224 } 225 226 /** 227 * Get RSN for read-only access. 228 */ 229 static inline struct replay_sqn * 230 rsn_acquire(struct rte_ipsec_sa *sa) 231 { 232 uint32_t n; 233 struct replay_sqn *rsn; 234 235 n = sa->sqn.inb.rdidx; 236 rsn = sa->sqn.inb.rsn[n]; 237 238 if (!SQN_ATOMIC(sa)) 239 return rsn; 240 241 /* check there are no writers */ 242 while (rte_rwlock_read_trylock(&rsn->rwl) < 0) { 243 rte_pause(); 244 n = sa->sqn.inb.rdidx; 245 rsn = sa->sqn.inb.rsn[n]; 246 rte_compiler_barrier(); 247 } 248 249 return rsn; 250 } 251 252 /** 253 * Release read-only access for RSN. 254 */ 255 static inline void 256 rsn_release(struct rte_ipsec_sa *sa, struct replay_sqn *rsn) 257 { 258 if (SQN_ATOMIC(sa)) 259 rte_rwlock_read_unlock(&rsn->rwl); 260 } 261 262 /** 263 * Start RSN update. 264 */ 265 static inline struct replay_sqn * 266 rsn_update_start(struct rte_ipsec_sa *sa) 267 { 268 uint32_t k, n; 269 struct replay_sqn *rsn; 270 271 n = sa->sqn.inb.wridx; 272 273 /* no active writers */ 274 RTE_ASSERT(n == sa->sqn.inb.rdidx); 275 276 if (!SQN_ATOMIC(sa)) 277 return sa->sqn.inb.rsn[n]; 278 279 k = REPLAY_SQN_NEXT(n); 280 sa->sqn.inb.wridx = k; 281 282 rsn = sa->sqn.inb.rsn[k]; 283 rte_rwlock_write_lock(&rsn->rwl); 284 rsn_copy(sa, k, n); 285 286 return rsn; 287 } 288 289 /** 290 * Finish RSN update. 291 */ 292 static inline void 293 rsn_update_finish(struct rte_ipsec_sa *sa, struct replay_sqn *rsn) 294 { 295 uint32_t n; 296 297 if (!SQN_ATOMIC(sa)) 298 return; 299 300 n = sa->sqn.inb.wridx; 301 RTE_ASSERT(n != sa->sqn.inb.rdidx); 302 RTE_ASSERT(rsn == sa->sqn.inb.rsn[n]); 303 304 rte_rwlock_write_unlock(&rsn->rwl); 305 sa->sqn.inb.rdidx = n; 306 } 307 308 309 #endif /* _IPSEC_SQN_H_ */ 310