xref: /dpdk/drivers/net/cxgbe/cxgbe_filter.h (revision 23f3dac43237d5de18f9544c6e3f932c70c39e27)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Chelsio Communications.
3  * All rights reserved.
4  */
5 
6 #ifndef _CXGBE_FILTER_H_
7 #define _CXGBE_FILTER_H_
8 
9 #include "base/t4_msg.h"
10 /*
11  * Defined bit width of user definable filter tuples
12  */
13 #define ETHTYPE_BITWIDTH 16
14 #define FRAG_BITWIDTH 1
15 #define MACIDX_BITWIDTH 9
16 #define FCOE_BITWIDTH 1
17 #define IPORT_BITWIDTH 3
18 #define MATCHTYPE_BITWIDTH 3
19 #define PROTO_BITWIDTH 8
20 #define TOS_BITWIDTH 8
21 #define PF_BITWIDTH 3
22 #define VF_BITWIDTH 13
23 #define IVLAN_BITWIDTH 16
24 #define OVLAN_BITWIDTH 16
25 
26 /*
27  * Filter matching rules.  These consist of a set of ingress packet field
28  * (value, mask) tuples.  The associated ingress packet field matches the
29  * tuple when ((field & mask) == value).  (Thus a wildcard "don't care" field
30  * rule can be constructed by specifying a tuple of (0, 0).)  A filter rule
31  * matches an ingress packet when all of the individual field
32  * matching rules are true.
33  *
34  * Partial field masks are always valid, however, while it may be easy to
35  * understand their meanings for some fields (e.g. IP address to match a
36  * subnet), for others making sensible partial masks is less intuitive (e.g.
37  * MPS match type) ...
38  */
39 struct ch_filter_tuple {
40 	/*
41 	 * Compressed header matching field rules.  The TP_VLAN_PRI_MAP
42 	 * register selects which of these fields will participate in the
43 	 * filter match rules -- up to a maximum of 36 bits.  Because
44 	 * TP_VLAN_PRI_MAP is a global register, all filters must use the same
45 	 * set of fields.
46 	 */
47 	uint32_t ethtype:ETHTYPE_BITWIDTH;	/* Ethernet type */
48 	uint32_t frag:FRAG_BITWIDTH;		/* IP fragmentation header */
49 	uint32_t ivlan_vld:1;			/* inner VLAN valid */
50 	uint32_t ovlan_vld:1;			/* outer VLAN valid */
51 	uint32_t pfvf_vld:1;			/* PF/VF valid */
52 	uint32_t macidx:MACIDX_BITWIDTH;	/* exact match MAC index */
53 	uint32_t fcoe:FCOE_BITWIDTH;		/* FCoE packet */
54 	uint32_t iport:IPORT_BITWIDTH;		/* ingress port */
55 	uint32_t matchtype:MATCHTYPE_BITWIDTH;	/* MPS match type */
56 	uint32_t proto:PROTO_BITWIDTH;		/* protocol type */
57 	uint32_t tos:TOS_BITWIDTH;		/* TOS/Traffic Type */
58 	uint32_t pf:PF_BITWIDTH;		/* PCI-E PF ID */
59 	uint32_t vf:VF_BITWIDTH;		/* PCI-E VF ID */
60 	uint32_t ivlan:IVLAN_BITWIDTH;		/* inner VLAN */
61 	uint32_t ovlan:OVLAN_BITWIDTH;		/* outer VLAN */
62 
63 	/*
64 	 * Uncompressed header matching field rules.  These are always
65 	 * available for field rules.
66 	 */
67 	uint8_t lip[16];	/* local IP address (IPv4 in [3:0]) */
68 	uint8_t fip[16];	/* foreign IP address (IPv4 in [3:0]) */
69 	uint16_t lport;		/* local port */
70 	uint16_t fport;		/* foreign port */
71 
72 	uint8_t dmac[6];        /* Destination MAC to match */
73 
74 	/* reservations for future additions */
75 	uint8_t rsvd[6];
76 };
77 
78 /*
79  * Filter specification
80  */
81 struct ch_filter_specification {
82 	void *private;
83 	/* Administrative fields for filter. */
84 	uint32_t hitcnts:1;	/* count filter hits in TCB */
85 	uint32_t prio:1;	/* filter has priority over active/server */
86 
87 	/*
88 	 * Fundamental filter typing.  This is the one element of filter
89 	 * matching that doesn't exist as a (value, mask) tuple.
90 	 */
91 	uint32_t type:1;	/* 0 => IPv4, 1 => IPv6 */
92 	uint32_t cap:1;		/* 0 => LE-TCAM, 1 => Hash */
93 
94 	/*
95 	 * Packet dispatch information.  Ingress packets which match the
96 	 * filter rules will be dropped, passed to the host or switched back
97 	 * out as egress packets.
98 	 */
99 	uint32_t action:2;	/* drop, pass, switch */
100 
101 	uint32_t dirsteer:1;	/* 0 => RSS, 1 => steer to iq */
102 	uint32_t iq:10;		/* ingress queue */
103 
104 	uint32_t eport:2;	/* egress port to switch packet out */
105 	uint32_t newsmac:1;     /* rewrite source MAC address */
106 	uint32_t newdmac:1;     /* rewrite destination MAC address */
107 	uint32_t swapmac:1;     /* swap SMAC/DMAC for loopback packet */
108 	uint32_t newvlan:2;     /* rewrite VLAN Tag */
109 	uint8_t smac[RTE_ETHER_ADDR_LEN];   /* new source MAC address */
110 	uint8_t dmac[RTE_ETHER_ADDR_LEN];   /* new destination MAC address */
111 	uint16_t vlan;          /* VLAN Tag to insert */
112 
113 	/*
114 	 * Switch proxy/rewrite fields.  An ingress packet which matches a
115 	 * filter with "switch" set will be looped back out as an egress
116 	 * packet -- potentially with some header rewriting.
117 	 */
118 	uint32_t nat_mode:3;	/* specify NAT operation mode */
119 
120 	uint8_t nat_lip[16];	/* local IP to use after NAT'ing */
121 	uint8_t nat_fip[16];	/* foreign IP to use after NAT'ing */
122 	uint16_t nat_lport;	/* local port number to use after NAT'ing */
123 	uint16_t nat_fport;	/* foreign port number to use after NAT'ing */
124 
125 	/* Filter rule value/mask pairs. */
126 	struct ch_filter_tuple val;
127 	struct ch_filter_tuple mask;
128 };
129 
130 enum {
131 	FILTER_PASS = 0,	/* default */
132 	FILTER_DROP,
133 	FILTER_SWITCH
134 };
135 
136 enum {
137 	VLAN_REMOVE = 1,
138 	VLAN_INSERT,
139 	VLAN_REWRITE
140 };
141 
142 enum {
143 	NAT_MODE_NONE = 0,	/* No NAT performed */
144 	NAT_MODE_DIP,		/* NAT on Dst IP */
145 	NAT_MODE_DIP_DP,	/* NAT on Dst IP, Dst Port */
146 	NAT_MODE_DIP_DP_SIP,	/* NAT on Dst IP, Dst Port and Src IP */
147 	NAT_MODE_DIP_DP_SP,	/* NAT on Dst IP, Dst Port and Src Port */
148 	NAT_MODE_SIP_SP,	/* NAT on Src IP and Src Port */
149 	NAT_MODE_DIP_SIP_SP,	/* NAT on Dst IP, Src IP and Src Port */
150 	NAT_MODE_ALL		/* NAT on entire 4-tuple */
151 };
152 
153 enum filter_type {
154 	FILTER_TYPE_IPV4 = 0,
155 	FILTER_TYPE_IPV6,
156 };
157 
158 struct t4_completion {
159 	unsigned int done;       /* completion done (0 - No, 1 - Yes) */
160 	rte_spinlock_t lock;     /* completion lock */
161 };
162 
163 /*
164  * Filter operation context to allow callers to wait for
165  * an asynchronous completion.
166  */
167 struct filter_ctx {
168 	struct t4_completion completion; /* completion rendezvous */
169 	int result;                      /* result of operation */
170 	u32 tid;                         /* to store tid of hash filter */
171 };
172 
173 /*
174  * Host shadow copy of ingress filter entry.  This is in host native format
175  * and doesn't match the ordering or bit order, etc. of the hardware or the
176  * firmware command.
177  */
178 struct filter_entry {
179 	/*
180 	 * Administrative fields for filter.
181 	 */
182 	u32 valid:1;                /* filter allocated and valid */
183 	u32 locked:1;               /* filter is administratively locked */
184 	u32 pending:1;              /* filter action is pending FW reply */
185 	struct filter_ctx *ctx;     /* caller's completion hook */
186 	struct clip_entry *clipt;   /* CLIP Table entry for IPv6 */
187 	struct l2t_entry *l2t;      /* Layer Two Table entry for dmac */
188 	struct smt_entry *smt;      /* Source Mac Table entry for smac */
189 	struct rte_eth_dev *dev;    /* Port's rte eth device */
190 	void *private;              /* For use by apps using filter_entry */
191 
192 	/* This will store the actual tid */
193 	u32 tid;
194 
195 	/*
196 	 * The filter itself.
197 	 */
198 	struct ch_filter_specification fs;
199 };
200 
201 #define FILTER_ID_MAX   (~0U)
202 
203 struct tid_info;
204 struct adapter;
205 
206 /**
207  * Find first clear bit in the bitmap.
208  */
cxgbe_find_first_zero_bit(struct rte_bitmap * bmap,unsigned int size)209 static inline unsigned int cxgbe_find_first_zero_bit(struct rte_bitmap *bmap,
210 						     unsigned int size)
211 {
212 	unsigned int idx;
213 
214 	for (idx = 0; idx < size; idx++)
215 		if (!rte_bitmap_get(bmap, idx))
216 			break;
217 
218 	return idx;
219 }
220 
221 /**
222  * Find a free region of 'num' consecutive entries.
223  */
224 static inline unsigned int
cxgbe_bitmap_find_free_region(struct rte_bitmap * bmap,unsigned int size,unsigned int num)225 cxgbe_bitmap_find_free_region(struct rte_bitmap *bmap, unsigned int size,
226 			      unsigned int num)
227 {
228 	unsigned int idx, j, free = 0;
229 
230 	if (num > size)
231 		return size;
232 
233 	for (idx = 0; idx < size; idx += num) {
234 		for (j = 0; j < num; j++) {
235 			if (!rte_bitmap_get(bmap, idx + j)) {
236 				free++;
237 			} else {
238 				free = 0;
239 				break;
240 			}
241 		}
242 
243 		/* Found the Region */
244 		if (free == num)
245 			break;
246 
247 		/* Reached the end and still no region found */
248 		if ((idx + num) > size) {
249 			idx = size;
250 			break;
251 		}
252 	}
253 
254 	return idx;
255 }
256 
257 u8 cxgbe_filter_slots(struct adapter *adap, u8 family);
258 bool cxgbe_is_filter_set(struct tid_info *t, u32 fidx, u8 nentries);
259 void cxgbe_filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl);
260 int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id,
261 		     struct ch_filter_specification *fs,
262 		     struct filter_ctx *ctx);
263 int cxgbe_del_filter(struct rte_eth_dev *dev, unsigned int filter_id,
264 		     struct ch_filter_specification *fs,
265 		     struct filter_ctx *ctx);
266 int cxgbe_alloc_ftid(struct adapter *adap, u8 nentries);
267 int cxgbe_init_hash_filter(struct adapter *adap);
268 void cxgbe_hash_filter_rpl(struct adapter *adap,
269 			   const struct cpl_act_open_rpl *rpl);
270 void cxgbe_hash_del_filter_rpl(struct adapter *adap,
271 			       const struct cpl_abort_rpl_rss *rpl);
272 int cxgbe_validate_filter(struct adapter *adap,
273 			  struct ch_filter_specification *fs);
274 int cxgbe_get_filter_count(struct adapter *adapter, unsigned int fidx,
275 			   u64 *c, int hash, bool get_byte);
276 int cxgbe_clear_filter_count(struct adapter *adapter, unsigned int fidx,
277 			     int hash, bool clear_byte);
278 #endif /* _CXGBE_FILTER_H_ */
279