xref: /dpdk/drivers/net/cxgbe/cxgbe_filter.c (revision 7b3d52989a1af69831fdc739f4c4e9566b3b6cbc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Chelsio Communications.
3  * All rights reserved.
4  */
5 #include <rte_net.h>
6 
7 #include "base/common.h"
8 #include "base/t4_tcb.h"
9 #include "base/t4_regs.h"
10 #include "cxgbe_filter.h"
11 #include "mps_tcam.h"
12 #include "clip_tbl.h"
13 #include "l2t.h"
14 #include "smt.h"
15 #include "cxgbe_pfvf.h"
16 
17 /**
18  * Initialize Hash Filters
19  */
cxgbe_init_hash_filter(struct adapter * adap)20 int cxgbe_init_hash_filter(struct adapter *adap)
21 {
22 	unsigned int user_filter_perc, n_user_filters;
23 	u32 param, val;
24 	int ret;
25 
26 	if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
27 		val = t4_read_reg(adap, A_LE_DB_RSP_CODE_0);
28 		if (G_TCAM_ACTV_HIT(val) != 4) {
29 			adap->params.hash_filter = 0;
30 			return 0;
31 		}
32 
33 		val = t4_read_reg(adap, A_LE_DB_RSP_CODE_1);
34 		if (G_HASH_ACTV_HIT(val) != 4) {
35 			adap->params.hash_filter = 0;
36 			return 0;
37 		}
38 	}
39 
40 	param = CXGBE_FW_PARAM_DEV(NTID);
41 	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
42 			      &param, &val);
43 	if (ret < 0)
44 		return ret;
45 	adap->tids.ntids = val;
46 	adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
47 
48 	user_filter_perc = 100;
49 	n_user_filters = mult_frac(adap->tids.nftids,
50 				   user_filter_perc,
51 				   100);
52 
53 	adap->tids.nftids = n_user_filters;
54 	adap->params.hash_filter = 1;
55 	return 0;
56 }
57 
58 /**
59  * Validate if the requested filter specification can be set by checking
60  * if the requested features have been enabled
61  */
cxgbe_validate_filter(struct adapter * adapter,struct ch_filter_specification * fs)62 int cxgbe_validate_filter(struct adapter *adapter,
63 			  struct ch_filter_specification *fs)
64 {
65 	u32 fconf, iconf;
66 
67 	/*
68 	 * Check for unconfigured fields being used.
69 	 */
70 	fconf = fs->cap ? adapter->params.tp.filter_mask :
71 			  adapter->params.tp.vlan_pri_map;
72 
73 	iconf = adapter->params.tp.ingress_config;
74 
75 #define S(_field) \
76 	(fs->val._field || fs->mask._field)
77 #define U(_mask, _field) \
78 	(!(fconf & (_mask)) && S(_field))
79 
80 	if (U(F_PORT, iport) || U(F_ETHERTYPE, ethtype) ||
81 	    U(F_PROTOCOL, proto) || U(F_MACMATCH, macidx) ||
82 	    U(F_VLAN, ivlan_vld) || U(F_VNIC_ID, ovlan_vld) ||
83 	    U(F_TOS, tos) || U(F_VNIC_ID, pfvf_vld))
84 		return -EOPNOTSUPP;
85 
86 	/* Either OVLAN or PFVF match is enabled in hardware, but not both */
87 	if ((S(pfvf_vld) && !(iconf & F_VNIC)) ||
88 	    (S(ovlan_vld) && (iconf & F_VNIC)))
89 		return -EOPNOTSUPP;
90 
91 	/* To use OVLAN or PFVF, L4 encapsulation match must not be enabled */
92 	if ((S(ovlan_vld) && (iconf & F_USE_ENC_IDX)) ||
93 	    (S(pfvf_vld) && (iconf & F_USE_ENC_IDX)))
94 		return -EOPNOTSUPP;
95 
96 #undef S
97 #undef U
98 
99 	/*
100 	 * If the user is requesting that the filter action loop
101 	 * matching packets back out one of our ports, make sure that
102 	 * the egress port is in range.
103 	 */
104 	if (fs->action == FILTER_SWITCH &&
105 	    fs->eport >= adapter->params.nports)
106 		return -ERANGE;
107 
108 	/*
109 	 * Don't allow various trivially obvious bogus out-of-range
110 	 * values ...
111 	 */
112 	if (fs->val.iport >= adapter->params.nports)
113 		return -ERANGE;
114 
115 	if (!fs->cap && fs->nat_mode && !adapter->params.filter2_wr_support)
116 		return -EOPNOTSUPP;
117 
118 	if (!fs->cap && fs->swapmac && !adapter->params.filter2_wr_support)
119 		return -EOPNOTSUPP;
120 
121 	return 0;
122 }
123 
124 /**
125  * Get the queue to which the traffic must be steered to.
126  */
get_filter_steerq(struct rte_eth_dev * dev,struct ch_filter_specification * fs)127 static unsigned int get_filter_steerq(struct rte_eth_dev *dev,
128 				      struct ch_filter_specification *fs)
129 {
130 	struct port_info *pi = ethdev2pinfo(dev);
131 	struct adapter *adapter = pi->adapter;
132 	unsigned int iq;
133 
134 	/*
135 	 * If the user has requested steering matching Ingress Packets
136 	 * to a specific Queue Set, we need to make sure it's in range
137 	 * for the port and map that into the Absolute Queue ID of the
138 	 * Queue Set's Response Queue.
139 	 */
140 	if (!fs->dirsteer) {
141 		iq = 0;
142 	} else {
143 		/*
144 		 * If the iq id is greater than the number of qsets,
145 		 * then assume it is an absolute qid.
146 		 */
147 		if (fs->iq < pi->n_rx_qsets)
148 			iq = adapter->sge.ethrxq[pi->first_rxqset +
149 						 fs->iq].rspq.abs_id;
150 		else
151 			iq = fs->iq;
152 	}
153 
154 	return iq;
155 }
156 
157 /* Return an error number if the indicated filter isn't writable ... */
writable_filter(struct filter_entry * f)158 static int writable_filter(struct filter_entry *f)
159 {
160 	if (f->locked)
161 		return -EPERM;
162 	if (f->pending)
163 		return -EBUSY;
164 
165 	return 0;
166 }
167 
168 /**
169  * Send CPL_SET_TCB_FIELD message
170  */
set_tcb_field(struct adapter * adapter,unsigned int ftid,u16 word,u64 mask,u64 val,int no_reply)171 static void set_tcb_field(struct adapter *adapter, unsigned int ftid,
172 			  u16 word, u64 mask, u64 val, int no_reply)
173 {
174 	struct rte_mbuf *mbuf;
175 	struct cpl_set_tcb_field *req;
176 	struct sge_ctrl_txq *ctrlq;
177 
178 	ctrlq = &adapter->sge.ctrlq[0];
179 	mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
180 	WARN_ON(!mbuf);
181 
182 	mbuf->data_len = sizeof(*req);
183 	mbuf->pkt_len = mbuf->data_len;
184 
185 	req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
186 	memset(req, 0, sizeof(*req));
187 	INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, ftid);
188 	req->reply_ctrl = cpu_to_be16(V_REPLY_CHAN(0) |
189 				      V_QUEUENO(adapter->sge.fw_evtq.abs_id) |
190 				      V_NO_REPLY(no_reply));
191 	req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(ftid));
192 	req->mask = cpu_to_be64(mask);
193 	req->val = cpu_to_be64(val);
194 
195 	t4_mgmt_tx(ctrlq, mbuf);
196 }
197 
198 /**
199  * Set one of the t_flags bits in the TCB.
200  */
set_tcb_tflag(struct adapter * adap,unsigned int ftid,unsigned int bit_pos,unsigned int val,int no_reply)201 static void set_tcb_tflag(struct adapter *adap, unsigned int ftid,
202 			  unsigned int bit_pos, unsigned int val, int no_reply)
203 {
204 	set_tcb_field(adap, ftid,  W_TCB_T_FLAGS, 1ULL << bit_pos,
205 		      (unsigned long long)val << bit_pos, no_reply);
206 }
207 
208 /**
209  * Build a CPL_SET_TCB_FIELD message as payload of a ULP_TX_PKT command.
210  */
mk_set_tcb_field_ulp(struct filter_entry * f,struct cpl_set_tcb_field * req,unsigned int word,u64 mask,u64 val,u8 cookie,int no_reply)211 static inline void mk_set_tcb_field_ulp(struct filter_entry *f,
212 					struct cpl_set_tcb_field *req,
213 					unsigned int word,
214 					u64 mask, u64 val, u8 cookie,
215 					int no_reply)
216 {
217 	struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
218 	struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
219 
220 	txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
221 				      V_ULP_TXPKT_DEST(0));
222 	txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*req), 16));
223 	sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
224 	sc->len = cpu_to_be32(sizeof(*req) - sizeof(struct work_request_hdr));
225 	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, f->tid));
226 	req->reply_ctrl = cpu_to_be16(V_NO_REPLY(no_reply) | V_REPLY_CHAN(0) |
227 				      V_QUEUENO(0));
228 	req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(cookie));
229 	req->mask = cpu_to_be64(mask);
230 	req->val = cpu_to_be64(val);
231 	sc = (struct ulptx_idata *)(req + 1);
232 	sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
233 	sc->len = cpu_to_be32(0);
234 }
235 
236 /**
237  * IPv6 requires 2 slots on T6 and 4 slots for cards below T6.
238  * IPv4 requires only 1 slot on all cards.
239  */
cxgbe_filter_slots(struct adapter * adap,u8 family)240 u8 cxgbe_filter_slots(struct adapter *adap, u8 family)
241 {
242 	if (family == FILTER_TYPE_IPV6) {
243 		if (CHELSIO_CHIP_VERSION(adap->params.chip) < CHELSIO_T6)
244 			return 4;
245 
246 		return 2;
247 	}
248 
249 	return 1;
250 }
251 
252 /**
253  * Check if entries are already filled.
254  */
cxgbe_is_filter_set(struct tid_info * t,u32 fidx,u8 nentries)255 bool cxgbe_is_filter_set(struct tid_info *t, u32 fidx, u8 nentries)
256 {
257 	bool result = FALSE;
258 	u32 i;
259 
260 	/* Ensure there's enough slots available. */
261 	t4_os_lock(&t->ftid_lock);
262 	for (i = fidx; i < fidx + nentries; i++) {
263 		if (rte_bitmap_get(t->ftid_bmap, i)) {
264 			result = TRUE;
265 			break;
266 		}
267 	}
268 	t4_os_unlock(&t->ftid_lock);
269 	return result;
270 }
271 
272 /**
273  * Allocate available free entries.
274  */
cxgbe_alloc_ftid(struct adapter * adap,u8 nentries)275 int cxgbe_alloc_ftid(struct adapter *adap, u8 nentries)
276 {
277 	struct tid_info *t = &adap->tids;
278 	int pos;
279 	int size = t->nftids;
280 
281 	t4_os_lock(&t->ftid_lock);
282 	if (nentries > 1)
283 		pos = cxgbe_bitmap_find_free_region(t->ftid_bmap, size,
284 						    nentries);
285 	else
286 		pos = cxgbe_find_first_zero_bit(t->ftid_bmap, size);
287 	t4_os_unlock(&t->ftid_lock);
288 
289 	return pos < size ? pos : -1;
290 }
291 
292 /**
293  * Clear a filter and release any of its resources that we own.  This also
294  * clears the filter's "pending" status.
295  */
clear_filter(struct filter_entry * f)296 static void clear_filter(struct filter_entry *f)
297 {
298 	struct port_info *pi = ethdev2pinfo(f->dev);
299 
300 	if (f->clipt)
301 		cxgbe_clip_release(f->dev, f->clipt);
302 
303 	if (f->l2t)
304 		cxgbe_l2t_release(f->l2t);
305 
306 	if (f->fs.mask.macidx)
307 		cxgbe_mpstcam_remove(pi, f->fs.val.macidx);
308 
309 	if (f->smt)
310 		cxgbe_smt_release(f->smt);
311 
312 	/* The zeroing of the filter rule below clears the filter valid,
313 	 * pending, locked flags etc. so it's all we need for
314 	 * this operation.
315 	 */
316 	memset(f, 0, sizeof(*f));
317 }
318 
319 /**
320  * Construct hash filter ntuple.
321  */
hash_filter_ntuple(const struct filter_entry * f)322 static u64 hash_filter_ntuple(const struct filter_entry *f)
323 {
324 	struct adapter *adap = ethdev2adap(f->dev);
325 	struct tp_params *tp = &adap->params.tp;
326 	u64 ntuple = 0;
327 	u16 tcp_proto = IPPROTO_TCP; /* TCP Protocol Number */
328 
329 	if (tp->port_shift >= 0 && f->fs.mask.iport)
330 		ntuple |= (u64)f->fs.val.iport << tp->port_shift;
331 
332 	if (tp->protocol_shift >= 0) {
333 		if (!f->fs.val.proto)
334 			ntuple |= (u64)tcp_proto << tp->protocol_shift;
335 		else
336 			ntuple |= (u64)f->fs.val.proto << tp->protocol_shift;
337 	}
338 
339 	if (tp->ethertype_shift >= 0 && f->fs.mask.ethtype)
340 		ntuple |= (u64)(f->fs.val.ethtype) << tp->ethertype_shift;
341 	if (tp->macmatch_shift >= 0 && f->fs.mask.macidx)
342 		ntuple |= (u64)(f->fs.val.macidx) << tp->macmatch_shift;
343 	if (tp->vlan_shift >= 0 && f->fs.mask.ivlan)
344 		ntuple |= (u64)(F_FT_VLAN_VLD | f->fs.val.ivlan) <<
345 			  tp->vlan_shift;
346 	if (tp->vnic_shift >= 0) {
347 		if ((adap->params.tp.ingress_config & F_VNIC) &&
348 		    f->fs.mask.pfvf_vld)
349 			ntuple |= (u64)(f->fs.val.pfvf_vld << 16 |
350 					f->fs.val.pf << 13 | f->fs.val.vf) <<
351 					tp->vnic_shift;
352 		else if (!(adap->params.tp.ingress_config & F_VNIC) &&
353 			 f->fs.mask.ovlan_vld)
354 			ntuple |= (u64)(f->fs.val.ovlan_vld << 16 |
355 					f->fs.val.ovlan) << tp->vnic_shift;
356 	}
357 	if (tp->tos_shift >= 0 && f->fs.mask.tos)
358 		ntuple |= (u64)f->fs.val.tos << tp->tos_shift;
359 
360 	return ntuple;
361 }
362 
363 /**
364  * Build a CPL_ABORT_REQ message as payload of a ULP_TX_PKT command.
365  */
mk_abort_req_ulp(struct cpl_abort_req * abort_req,unsigned int tid)366 static void mk_abort_req_ulp(struct cpl_abort_req *abort_req,
367 			     unsigned int tid)
368 {
369 	struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_req;
370 	struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
371 
372 	txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
373 				      V_ULP_TXPKT_DEST(0));
374 	txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_req), 16));
375 	sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
376 	sc->len = cpu_to_be32(sizeof(*abort_req) -
377 			      sizeof(struct work_request_hdr));
378 	OPCODE_TID(abort_req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
379 	abort_req->rsvd0 = cpu_to_be32(0);
380 	abort_req->rsvd1 = 0;
381 	abort_req->cmd = CPL_ABORT_NO_RST;
382 	sc = (struct ulptx_idata *)(abort_req + 1);
383 	sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
384 	sc->len = cpu_to_be32(0);
385 }
386 
387 /**
388  * Build a CPL_ABORT_RPL message as payload of a ULP_TX_PKT command.
389  */
mk_abort_rpl_ulp(struct cpl_abort_rpl * abort_rpl,unsigned int tid)390 static void mk_abort_rpl_ulp(struct cpl_abort_rpl *abort_rpl,
391 			     unsigned int tid)
392 {
393 	struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_rpl;
394 	struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
395 
396 	txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
397 				      V_ULP_TXPKT_DEST(0));
398 	txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_rpl), 16));
399 	sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
400 	sc->len = cpu_to_be32(sizeof(*abort_rpl) -
401 			      sizeof(struct work_request_hdr));
402 	OPCODE_TID(abort_rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
403 	abort_rpl->rsvd0 = cpu_to_be32(0);
404 	abort_rpl->rsvd1 = 0;
405 	abort_rpl->cmd = CPL_ABORT_NO_RST;
406 	sc = (struct ulptx_idata *)(abort_rpl + 1);
407 	sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
408 	sc->len = cpu_to_be32(0);
409 }
410 
411 /**
412  * Delete the specified hash filter.
413  */
cxgbe_del_hash_filter(struct rte_eth_dev * dev,unsigned int filter_id,struct filter_ctx * ctx)414 static int cxgbe_del_hash_filter(struct rte_eth_dev *dev,
415 				 unsigned int filter_id,
416 				 struct filter_ctx *ctx)
417 {
418 	struct adapter *adapter = ethdev2adap(dev);
419 	struct tid_info *t = &adapter->tids;
420 	struct filter_entry *f;
421 	struct sge_ctrl_txq *ctrlq;
422 	unsigned int port_id = ethdev2pinfo(dev)->port_id;
423 	int ret;
424 
425 	if (filter_id > adapter->tids.ntids)
426 		return -E2BIG;
427 
428 	f = lookup_tid(t, filter_id);
429 	if (!f) {
430 		dev_err(adapter, "%s: no filter entry for filter_id = %d\n",
431 			__func__, filter_id);
432 		return -EINVAL;
433 	}
434 
435 	ret = writable_filter(f);
436 	if (ret)
437 		return ret;
438 
439 	if (f->valid) {
440 		unsigned int wrlen;
441 		struct rte_mbuf *mbuf;
442 		struct work_request_hdr *wr;
443 		struct ulptx_idata *aligner;
444 		struct cpl_set_tcb_field *req;
445 		struct cpl_abort_req *abort_req;
446 		struct cpl_abort_rpl *abort_rpl;
447 
448 		f->ctx = ctx;
449 		f->pending = 1;
450 
451 		wrlen = cxgbe_roundup(sizeof(*wr) +
452 				      (sizeof(*req) + sizeof(*aligner)) +
453 				      sizeof(*abort_req) + sizeof(*abort_rpl),
454 				      16);
455 
456 		ctrlq = &adapter->sge.ctrlq[port_id];
457 		mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
458 		if (!mbuf) {
459 			dev_err(adapter, "%s: could not allocate skb ..\n",
460 				__func__);
461 			goto out_err;
462 		}
463 
464 		mbuf->data_len = wrlen;
465 		mbuf->pkt_len = mbuf->data_len;
466 
467 		req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
468 		INIT_ULPTX_WR(req, wrlen, 0, 0);
469 		wr = (struct work_request_hdr *)req;
470 		wr++;
471 		req = (struct cpl_set_tcb_field *)wr;
472 		mk_set_tcb_field_ulp(f, req, W_TCB_RSS_INFO,
473 				V_TCB_RSS_INFO(M_TCB_RSS_INFO),
474 				V_TCB_RSS_INFO(adapter->sge.fw_evtq.abs_id),
475 				0, 1);
476 		aligner = (struct ulptx_idata *)(req + 1);
477 		abort_req = (struct cpl_abort_req *)(aligner + 1);
478 		mk_abort_req_ulp(abort_req, f->tid);
479 		abort_rpl = (struct cpl_abort_rpl *)(abort_req + 1);
480 		mk_abort_rpl_ulp(abort_rpl, f->tid);
481 		t4_mgmt_tx(ctrlq, mbuf);
482 	}
483 	return 0;
484 
485 out_err:
486 	return -ENOMEM;
487 }
488 
489 /**
490  * Build a ACT_OPEN_REQ6 message for setting IPv6 hash filter.
491  */
mk_act_open_req6(struct filter_entry * f,struct rte_mbuf * mbuf,unsigned int qid_filterid,struct adapter * adap)492 static void mk_act_open_req6(struct filter_entry *f, struct rte_mbuf *mbuf,
493 			     unsigned int qid_filterid, struct adapter *adap)
494 {
495 	struct cpl_t6_act_open_req6 *req = NULL;
496 	u64 local_lo, local_hi, peer_lo, peer_hi;
497 	u32 *lip = (u32 *)f->fs.val.lip;
498 	u32 *fip = (u32 *)f->fs.val.fip;
499 
500 	switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
501 	case CHELSIO_T6:
502 		req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req6 *);
503 
504 		INIT_TP_WR(req, 0);
505 		break;
506 	default:
507 		dev_err(adap, "%s: unsupported chip type!\n", __func__);
508 		return;
509 	}
510 
511 	local_hi = ((u64)lip[1]) << 32 | lip[0];
512 	local_lo = ((u64)lip[3]) << 32 | lip[2];
513 	peer_hi = ((u64)fip[1]) << 32 | fip[0];
514 	peer_lo = ((u64)fip[3]) << 32 | fip[2];
515 
516 	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
517 						    qid_filterid));
518 	req->local_port = cpu_to_be16(f->fs.val.lport);
519 	req->peer_port = cpu_to_be16(f->fs.val.fport);
520 	req->local_ip_hi = local_hi;
521 	req->local_ip_lo = local_lo;
522 	req->peer_ip_hi = peer_hi;
523 	req->peer_ip_lo = peer_lo;
524 	req->opt0 = cpu_to_be64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
525 					f->fs.newvlan == VLAN_REWRITE) |
526 				V_DELACK(f->fs.hitcnts) |
527 				V_L2T_IDX(f->l2t ? f->l2t->idx : 0) |
528 				V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
529 					   << 1) |
530 				V_TX_CHAN(f->fs.eport) |
531 				V_ULP_MODE(ULP_MODE_NONE) |
532 				F_TCAM_BYPASS | F_NON_OFFLOAD);
533 	req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
534 	req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
535 			    V_RSS_QUEUE(f->fs.iq) |
536 			    F_T5_OPT_2_VALID |
537 			    F_RX_CHANNEL |
538 			    V_SACK_EN(f->fs.swapmac) |
539 			    V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
540 					 (f->fs.dirsteer << 1)) |
541 			    V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
542 }
543 
544 /**
545  * Build a ACT_OPEN_REQ message for setting IPv4 hash filter.
546  */
mk_act_open_req(struct filter_entry * f,struct rte_mbuf * mbuf,unsigned int qid_filterid,struct adapter * adap)547 static void mk_act_open_req(struct filter_entry *f, struct rte_mbuf *mbuf,
548 			    unsigned int qid_filterid, struct adapter *adap)
549 {
550 	struct cpl_t6_act_open_req *req = NULL;
551 
552 	switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
553 	case CHELSIO_T6:
554 		req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req *);
555 
556 		INIT_TP_WR(req, 0);
557 		break;
558 	default:
559 		dev_err(adap, "%s: unsupported chip type!\n", __func__);
560 		return;
561 	}
562 
563 	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
564 						    qid_filterid));
565 	req->local_port = cpu_to_be16(f->fs.val.lport);
566 	req->peer_port = cpu_to_be16(f->fs.val.fport);
567 	req->local_ip = f->fs.val.lip[0] | f->fs.val.lip[1] << 8 |
568 			f->fs.val.lip[2] << 16 | f->fs.val.lip[3] << 24;
569 	req->peer_ip = f->fs.val.fip[0] | f->fs.val.fip[1] << 8 |
570 			f->fs.val.fip[2] << 16 | f->fs.val.fip[3] << 24;
571 	req->opt0 = cpu_to_be64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
572 					f->fs.newvlan == VLAN_REWRITE) |
573 				V_DELACK(f->fs.hitcnts) |
574 				V_L2T_IDX(f->l2t ? f->l2t->idx : 0) |
575 				V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
576 					   << 1) |
577 				V_TX_CHAN(f->fs.eport) |
578 				V_ULP_MODE(ULP_MODE_NONE) |
579 				F_TCAM_BYPASS | F_NON_OFFLOAD);
580 	req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
581 	req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
582 			    V_RSS_QUEUE(f->fs.iq) |
583 			    F_T5_OPT_2_VALID |
584 			    F_RX_CHANNEL |
585 			    V_SACK_EN(f->fs.swapmac) |
586 			    V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
587 					 (f->fs.dirsteer << 1)) |
588 			    V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
589 }
590 
591 /**
592  * Set the specified hash filter.
593  */
cxgbe_set_hash_filter(struct rte_eth_dev * dev,struct ch_filter_specification * fs,struct filter_ctx * ctx)594 static int cxgbe_set_hash_filter(struct rte_eth_dev *dev,
595 				 struct ch_filter_specification *fs,
596 				 struct filter_ctx *ctx)
597 {
598 	struct port_info *pi = ethdev2pinfo(dev);
599 	struct adapter *adapter = pi->adapter;
600 	struct tid_info *t = &adapter->tids;
601 	struct filter_entry *f;
602 	struct rte_mbuf *mbuf;
603 	struct sge_ctrl_txq *ctrlq;
604 	unsigned int iq;
605 	int atid, size;
606 	int ret = 0;
607 
608 	ret = cxgbe_validate_filter(adapter, fs);
609 	if (ret)
610 		return ret;
611 
612 	iq = get_filter_steerq(dev, fs);
613 
614 	ctrlq = &adapter->sge.ctrlq[pi->port_id];
615 
616 	f = t4_os_alloc(sizeof(*f));
617 	if (!f)
618 		return -ENOMEM;
619 
620 	f->fs = *fs;
621 	f->ctx = ctx;
622 	f->dev = dev;
623 	f->fs.iq = iq;
624 
625 	/* Allocate MPS TCAM entry to match Destination MAC. */
626 	if (f->fs.mask.macidx) {
627 		int idx;
628 
629 		idx = cxgbe_mpstcam_alloc(pi, f->fs.val.dmac, f->fs.mask.dmac);
630 		if (idx <= 0) {
631 			ret = -ENOMEM;
632 			goto out_err;
633 		}
634 
635 		f->fs.val.macidx = idx;
636 	}
637 
638 	/*
639 	 * If the new filter requires loopback Destination MAC and/or VLAN
640 	 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
641 	 * the filter.
642 	 */
643 	if (f->fs.newdmac || f->fs.newvlan == VLAN_INSERT ||
644 	    f->fs.newvlan == VLAN_REWRITE) {
645 		/* allocate L2T entry for new filter */
646 		f->l2t = cxgbe_l2t_alloc_switching(dev, f->fs.vlan,
647 						   f->fs.eport, f->fs.dmac);
648 		if (!f->l2t) {
649 			ret = -ENOMEM;
650 			goto out_err;
651 		}
652 	}
653 
654 	/* If the new filter requires Source MAC rewriting then we need to
655 	 * allocate a SMT entry for the filter
656 	 */
657 	if (f->fs.newsmac) {
658 		f->smt = cxgbe_smt_alloc_switching(f->dev, f->fs.smac);
659 		if (!f->smt) {
660 			ret = -EAGAIN;
661 			goto out_err;
662 		}
663 	}
664 
665 	atid = cxgbe_alloc_atid(t, f);
666 	if (atid < 0)
667 		goto out_err;
668 
669 	if (f->fs.type == FILTER_TYPE_IPV6) {
670 		/* IPv6 hash filter */
671 		f->clipt = cxgbe_clip_alloc(f->dev, (u32 *)&f->fs.val.lip);
672 		if (!f->clipt)
673 			goto free_atid;
674 
675 		size = sizeof(struct cpl_t6_act_open_req6);
676 		mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
677 		if (!mbuf) {
678 			ret = -ENOMEM;
679 			goto free_atid;
680 		}
681 
682 		mbuf->data_len = size;
683 		mbuf->pkt_len = mbuf->data_len;
684 
685 		mk_act_open_req6(f, mbuf,
686 				 ((adapter->sge.fw_evtq.abs_id << 14) | atid),
687 				 adapter);
688 	} else {
689 		/* IPv4 hash filter */
690 		size = sizeof(struct cpl_t6_act_open_req);
691 		mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
692 		if (!mbuf) {
693 			ret = -ENOMEM;
694 			goto free_atid;
695 		}
696 
697 		mbuf->data_len = size;
698 		mbuf->pkt_len = mbuf->data_len;
699 
700 		mk_act_open_req(f, mbuf,
701 				((adapter->sge.fw_evtq.abs_id << 14) | atid),
702 				adapter);
703 	}
704 
705 	f->pending = 1;
706 	t4_mgmt_tx(ctrlq, mbuf);
707 	return 0;
708 
709 free_atid:
710 	cxgbe_free_atid(t, atid);
711 
712 out_err:
713 	clear_filter(f);
714 	t4_os_free(f);
715 	return ret;
716 }
717 
718 /**
719  * t4_mk_filtdelwr - create a delete filter WR
720  * @adap: adapter context
721  * @ftid: the filter ID
722  * @wr: the filter work request to populate
723  * @qid: ingress queue to receive the delete notification
724  *
725  * Creates a filter work request to delete the supplied filter.  If @qid is
726  * negative the delete notification is suppressed.
727  */
t4_mk_filtdelwr(struct adapter * adap,unsigned int ftid,struct fw_filter2_wr * wr,int qid)728 static void t4_mk_filtdelwr(struct adapter *adap, unsigned int ftid,
729 			    struct fw_filter2_wr *wr, int qid)
730 {
731 	memset(wr, 0, sizeof(*wr));
732 	if (adap->params.filter2_wr_support)
733 		wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER2_WR));
734 	else
735 		wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
736 	wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
737 	wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
738 				    V_FW_FILTER_WR_NOREPLY(qid < 0));
739 	wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
740 	if (qid >= 0)
741 		wr->rx_chan_rx_rpl_iq =
742 				cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
743 }
744 
745 /**
746  * Create FW work request to delete the filter at a specified index
747  */
del_filter_wr(struct rte_eth_dev * dev,unsigned int fidx)748 static int del_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
749 {
750 	struct adapter *adapter = ethdev2adap(dev);
751 	struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
752 	struct rte_mbuf *mbuf;
753 	struct fw_filter2_wr *fwr;
754 	struct sge_ctrl_txq *ctrlq;
755 	unsigned int port_id = ethdev2pinfo(dev)->port_id;
756 
757 	ctrlq = &adapter->sge.ctrlq[port_id];
758 	mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
759 	if (!mbuf)
760 		return -ENOMEM;
761 
762 	mbuf->data_len = sizeof(*fwr);
763 	mbuf->pkt_len = mbuf->data_len;
764 
765 	fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter2_wr *);
766 	t4_mk_filtdelwr(adapter, f->tid, fwr, adapter->sge.fw_evtq.abs_id);
767 
768 	/*
769 	 * Mark the filter as "pending" and ship off the Filter Work Request.
770 	 * When we get the Work Request Reply we'll clear the pending status.
771 	 */
772 	f->pending = 1;
773 	t4_mgmt_tx(ctrlq, mbuf);
774 	return 0;
775 }
776 
set_filter_wr(struct rte_eth_dev * dev,unsigned int fidx)777 static int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
778 {
779 	struct adapter *adapter = ethdev2adap(dev);
780 	struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
781 	struct rte_mbuf *mbuf;
782 	struct fw_filter2_wr *fwr;
783 	struct sge_ctrl_txq *ctrlq;
784 	unsigned int port_id = ethdev2pinfo(dev)->port_id;
785 	int ret;
786 
787 	ctrlq = &adapter->sge.ctrlq[port_id];
788 	mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
789 	if (!mbuf) {
790 		ret = -ENOMEM;
791 		goto out;
792 	}
793 
794 	mbuf->data_len = sizeof(*fwr);
795 	mbuf->pkt_len = mbuf->data_len;
796 
797 	fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter2_wr *);
798 	memset(fwr, 0, sizeof(*fwr));
799 
800 	/*
801 	 * Construct the work request to set the filter.
802 	 */
803 	if (adapter->params.filter2_wr_support)
804 		fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER2_WR));
805 	else
806 		fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
807 	fwr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*fwr) / 16));
808 	fwr->tid_to_iq =
809 		cpu_to_be32(V_FW_FILTER_WR_TID(f->tid) |
810 			    V_FW_FILTER_WR_RQTYPE(f->fs.type) |
811 			    V_FW_FILTER_WR_NOREPLY(0) |
812 			    V_FW_FILTER_WR_IQ(f->fs.iq));
813 	fwr->del_filter_to_l2tix =
814 		cpu_to_be32(V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
815 			    V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
816 			    V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
817 			    V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
818 			    V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
819 			    V_FW_FILTER_WR_INSVLAN
820 				(f->fs.newvlan == VLAN_INSERT ||
821 				 f->fs.newvlan == VLAN_REWRITE) |
822 			    V_FW_FILTER_WR_RMVLAN
823 				(f->fs.newvlan == VLAN_REMOVE ||
824 				 f->fs.newvlan == VLAN_REWRITE) |
825 			    V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
826 			    V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
827 			    V_FW_FILTER_WR_PRIO(f->fs.prio) |
828 			    V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
829 	fwr->ethtype = cpu_to_be16(f->fs.val.ethtype);
830 	fwr->ethtypem = cpu_to_be16(f->fs.mask.ethtype);
831 	fwr->frag_to_ovlan_vldm =
832 		(V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
833 		 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
834 		 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
835 		 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
836 	fwr->smac_sel = f->smt ? f->smt->hw_idx : 0;
837 	fwr->rx_chan_rx_rpl_iq =
838 		cpu_to_be16(V_FW_FILTER_WR_RX_CHAN(0) |
839 			    V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id
840 						     ));
841 	fwr->maci_to_matchtypem =
842 		cpu_to_be32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
843 			    V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
844 			    V_FW_FILTER_WR_PORT(f->fs.val.iport) |
845 			    V_FW_FILTER_WR_PORTM(f->fs.mask.iport));
846 	fwr->ptcl = f->fs.val.proto;
847 	fwr->ptclm = f->fs.mask.proto;
848 	fwr->ttyp = f->fs.val.tos;
849 	fwr->ttypm = f->fs.mask.tos;
850 	fwr->ivlan = cpu_to_be16(f->fs.val.ivlan);
851 	fwr->ivlanm = cpu_to_be16(f->fs.mask.ivlan);
852 	fwr->ovlan = cpu_to_be16(f->fs.val.ovlan);
853 	fwr->ovlanm = cpu_to_be16(f->fs.mask.ovlan);
854 	rte_memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
855 	rte_memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
856 	rte_memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
857 	rte_memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
858 	fwr->lp = cpu_to_be16(f->fs.val.lport);
859 	fwr->lpm = cpu_to_be16(f->fs.mask.lport);
860 	fwr->fp = cpu_to_be16(f->fs.val.fport);
861 	fwr->fpm = cpu_to_be16(f->fs.mask.fport);
862 
863 	if (adapter->params.filter2_wr_support) {
864 		fwr->filter_type_swapmac =
865 			 V_FW_FILTER2_WR_SWAPMAC(f->fs.swapmac);
866 		fwr->natmode_to_ulp_type =
867 			V_FW_FILTER2_WR_ULP_TYPE(f->fs.nat_mode ?
868 						 ULP_MODE_TCPDDP :
869 						 ULP_MODE_NONE) |
870 			V_FW_FILTER2_WR_NATMODE(f->fs.nat_mode);
871 		memcpy(fwr->newlip, f->fs.nat_lip, sizeof(fwr->newlip));
872 		memcpy(fwr->newfip, f->fs.nat_fip, sizeof(fwr->newfip));
873 		fwr->newlport = cpu_to_be16(f->fs.nat_lport);
874 		fwr->newfport = cpu_to_be16(f->fs.nat_fport);
875 	}
876 
877 	/*
878 	 * Mark the filter as "pending" and ship off the Filter Work Request.
879 	 * When we get the Work Request Reply we'll clear the pending status.
880 	 */
881 	f->pending = 1;
882 	t4_mgmt_tx(ctrlq, mbuf);
883 	return 0;
884 
885 out:
886 	return ret;
887 }
888 
889 /**
890  * Set the corresponding entries in the bitmap.
891  */
cxgbe_set_ftid(struct tid_info * t,u32 fidx,u8 nentries)892 static int cxgbe_set_ftid(struct tid_info *t, u32 fidx, u8 nentries)
893 {
894 	u32 i;
895 
896 	t4_os_lock(&t->ftid_lock);
897 	if (rte_bitmap_get(t->ftid_bmap, fidx)) {
898 		t4_os_unlock(&t->ftid_lock);
899 		return -EBUSY;
900 	}
901 
902 	for (i = fidx; i < fidx + nentries; i++)
903 		rte_bitmap_set(t->ftid_bmap, i);
904 	t4_os_unlock(&t->ftid_lock);
905 	return 0;
906 }
907 
908 /**
909  * Clear the corresponding entries in the bitmap.
910  */
cxgbe_clear_ftid(struct tid_info * t,u32 fidx,u8 nentries)911 static void cxgbe_clear_ftid(struct tid_info *t, u32 fidx, u8 nentries)
912 {
913 	u32 i;
914 
915 	t4_os_lock(&t->ftid_lock);
916 	for (i = fidx; i < fidx + nentries; i++)
917 		rte_bitmap_clear(t->ftid_bmap, i);
918 	t4_os_unlock(&t->ftid_lock);
919 }
920 
921 /**
922  * Check a delete filter request for validity and send it to the hardware.
923  * Return 0 on success, an error number otherwise.  We attach any provided
924  * filter operation context to the internal filter specification in order to
925  * facilitate signaling completion of the operation.
926  */
cxgbe_del_filter(struct rte_eth_dev * dev,unsigned int filter_id,struct ch_filter_specification * fs,struct filter_ctx * ctx)927 int cxgbe_del_filter(struct rte_eth_dev *dev, unsigned int filter_id,
928 		     struct ch_filter_specification *fs,
929 		     struct filter_ctx *ctx)
930 {
931 	struct port_info *pi = dev->data->dev_private;
932 	struct adapter *adapter = pi->adapter;
933 	struct filter_entry *f;
934 	unsigned int chip_ver;
935 	u8 nentries;
936 	int ret;
937 
938 	if (is_hashfilter(adapter) && fs->cap)
939 		return cxgbe_del_hash_filter(dev, filter_id, ctx);
940 
941 	if (filter_id >= adapter->tids.nftids)
942 		return -ERANGE;
943 
944 	chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
945 
946 	/*
947 	 * Ensure IPv6 filter id is aligned on the 2 slot boundary for T6,
948 	 * and 4 slot boundary for cards below T6.
949 	 */
950 	if (fs->type == FILTER_TYPE_IPV6) {
951 		if (chip_ver < CHELSIO_T6)
952 			filter_id &= ~(0x3);
953 		else
954 			filter_id &= ~(0x1);
955 	}
956 
957 	nentries = cxgbe_filter_slots(adapter, fs->type);
958 	ret = cxgbe_is_filter_set(&adapter->tids, filter_id, nentries);
959 	if (!ret) {
960 		dev_warn(adap, "%s: could not find filter entry: %u\n",
961 			 __func__, filter_id);
962 		return -EINVAL;
963 	}
964 
965 	f = &adapter->tids.ftid_tab[filter_id];
966 	ret = writable_filter(f);
967 	if (ret)
968 		return ret;
969 
970 	if (f->valid) {
971 		f->ctx = ctx;
972 		cxgbe_clear_ftid(&adapter->tids,
973 				 f->tid - adapter->tids.ftid_base,
974 				 nentries);
975 		return del_filter_wr(dev, filter_id);
976 	}
977 
978 	/*
979 	 * If the caller has passed in a Completion Context then we need to
980 	 * mark it as a successful completion so they don't stall waiting
981 	 * for it.
982 	 */
983 	if (ctx) {
984 		ctx->result = 0;
985 		t4_complete(&ctx->completion);
986 	}
987 
988 	return 0;
989 }
990 
991 /**
992  * Check a Chelsio Filter Request for validity, convert it into our internal
993  * format and send it to the hardware.  Return 0 on success, an error number
994  * otherwise.  We attach any provided filter operation context to the internal
995  * filter specification in order to facilitate signaling completion of the
996  * operation.
997  */
cxgbe_set_filter(struct rte_eth_dev * dev,unsigned int filter_id,struct ch_filter_specification * fs,struct filter_ctx * ctx)998 int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id,
999 		     struct ch_filter_specification *fs,
1000 		     struct filter_ctx *ctx)
1001 {
1002 	struct port_info *pi = ethdev2pinfo(dev);
1003 	struct adapter *adapter = pi->adapter;
1004 	u8 nentries, bitoff[16] = {0};
1005 	struct filter_entry *f;
1006 	unsigned int chip_ver;
1007 	unsigned int fidx, iq;
1008 	u32 iconf;
1009 	int ret;
1010 
1011 	if (is_hashfilter(adapter) && fs->cap)
1012 		return cxgbe_set_hash_filter(dev, fs, ctx);
1013 
1014 	if (filter_id >= adapter->tids.nftids)
1015 		return -ERANGE;
1016 
1017 	chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
1018 
1019 	ret = cxgbe_validate_filter(adapter, fs);
1020 	if (ret)
1021 		return ret;
1022 
1023 	/*
1024 	 * IPv6 filters occupy four slots and must be aligned on four-slot
1025 	 * boundaries for T5. On T6, IPv6 filters occupy two-slots and
1026 	 * must be aligned on two-slot boundaries.
1027 	 *
1028 	 * IPv4 filters only occupy a single slot and have no alignment
1029 	 * requirements.
1030 	 */
1031 	fidx = filter_id;
1032 	if (fs->type == FILTER_TYPE_IPV6) {
1033 		if (chip_ver < CHELSIO_T6)
1034 			fidx &= ~(0x3);
1035 		else
1036 			fidx &= ~(0x1);
1037 	}
1038 
1039 	if (fidx != filter_id)
1040 		return -EINVAL;
1041 
1042 	nentries = cxgbe_filter_slots(adapter, fs->type);
1043 	ret = cxgbe_is_filter_set(&adapter->tids, filter_id, nentries);
1044 	if (ret)
1045 		return -EBUSY;
1046 
1047 	iq = get_filter_steerq(dev, fs);
1048 
1049 	/*
1050 	 * Check to make sure that provided filter index is not
1051 	 * already in use by someone else
1052 	 */
1053 	f = &adapter->tids.ftid_tab[filter_id];
1054 	if (f->valid)
1055 		return -EBUSY;
1056 
1057 	fidx = adapter->tids.ftid_base + filter_id;
1058 	ret = cxgbe_set_ftid(&adapter->tids, filter_id, nentries);
1059 	if (ret)
1060 		return ret;
1061 
1062 	/*
1063 	 * Check to make sure the filter requested is writable ...
1064 	 */
1065 	ret = writable_filter(f);
1066 	if (ret) {
1067 		/* Clear the bits we have set above */
1068 		cxgbe_clear_ftid(&adapter->tids, filter_id, nentries);
1069 		return ret;
1070 	}
1071 
1072 	/*
1073 	 * Convert the filter specification into our internal format.
1074 	 * We copy the PF/VF specification into the Outer VLAN field
1075 	 * here so the rest of the code -- including the interface to
1076 	 * the firmware -- doesn't have to constantly do these checks.
1077 	 */
1078 	f->fs = *fs;
1079 	f->fs.iq = iq;
1080 	f->dev = dev;
1081 
1082 	/* Allocate MPS TCAM entry to match Destination MAC. */
1083 	if (f->fs.mask.macidx) {
1084 		int idx;
1085 
1086 		idx = cxgbe_mpstcam_alloc(pi, f->fs.val.dmac, f->fs.mask.dmac);
1087 		if (idx <= 0) {
1088 			ret = -ENOMEM;
1089 			goto free_tid;
1090 		}
1091 
1092 		f->fs.val.macidx = idx;
1093 	}
1094 
1095 	/* Allocate a clip table entry only if we have non-zero IPv6 address. */
1096 	if (chip_ver > CHELSIO_T5 && f->fs.type &&
1097 	    memcmp(f->fs.val.lip, bitoff, sizeof(bitoff))) {
1098 		f->clipt = cxgbe_clip_alloc(dev, (u32 *)&f->fs.val.lip);
1099 		if (!f->clipt) {
1100 			ret = -ENOMEM;
1101 			goto free_tid;
1102 		}
1103 	}
1104 
1105 	/* If the new filter requires loopback Destination MAC and/or VLAN
1106 	 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1107 	 * the filter.
1108 	 */
1109 	if (f->fs.newvlan || f->fs.newdmac) {
1110 		f->l2t = cxgbe_l2t_alloc_switching(f->dev, f->fs.vlan,
1111 						   f->fs.eport, f->fs.dmac);
1112 		if (!f->l2t) {
1113 			ret = -ENOMEM;
1114 			goto free_tid;
1115 		}
1116 	}
1117 
1118 	/* If the new filter requires Source MAC rewriting then we need to
1119 	 * allocate a SMT entry for the filter
1120 	 */
1121 	if (f->fs.newsmac) {
1122 		f->smt = cxgbe_smt_alloc_switching(f->dev, f->fs.smac);
1123 		if (!f->smt) {
1124 			ret = -ENOMEM;
1125 			goto free_tid;
1126 		}
1127 	}
1128 
1129 	iconf = adapter->params.tp.ingress_config;
1130 
1131 	/* Either PFVF or OVLAN can be active, but not both
1132 	 * So, if PFVF is enabled, then overwrite the OVLAN
1133 	 * fields with PFVF fields before writing the spec
1134 	 * to hardware.
1135 	 */
1136 	if (iconf & F_VNIC) {
1137 		f->fs.val.ovlan = fs->val.pf << 13 | fs->val.vf;
1138 		f->fs.mask.ovlan = fs->mask.pf << 13 | fs->mask.vf;
1139 		f->fs.val.ovlan_vld = fs->val.pfvf_vld;
1140 		f->fs.mask.ovlan_vld = fs->mask.pfvf_vld;
1141 	}
1142 
1143 	/*
1144 	 * Attempt to set the filter.  If we don't succeed, we clear
1145 	 * it and return the failure.
1146 	 */
1147 	f->ctx = ctx;
1148 	f->tid = fidx; /* Save the actual tid */
1149 	ret = set_filter_wr(dev, filter_id);
1150 	if (ret)
1151 		goto free_tid;
1152 
1153 	return ret;
1154 
1155 free_tid:
1156 	cxgbe_clear_ftid(&adapter->tids, filter_id, nentries);
1157 	clear_filter(f);
1158 	return ret;
1159 }
1160 
1161 /**
1162  * Handle a Hash filter write reply.
1163  */
cxgbe_hash_filter_rpl(struct adapter * adap,const struct cpl_act_open_rpl * rpl)1164 void cxgbe_hash_filter_rpl(struct adapter *adap,
1165 			   const struct cpl_act_open_rpl *rpl)
1166 {
1167 	struct tid_info *t = &adap->tids;
1168 	struct filter_entry *f;
1169 	struct filter_ctx *ctx = NULL;
1170 	unsigned int tid = GET_TID(rpl);
1171 	unsigned int ftid = G_TID_TID(G_AOPEN_ATID
1172 				      (be32_to_cpu(rpl->atid_status)));
1173 	unsigned int status  = G_AOPEN_STATUS(be32_to_cpu(rpl->atid_status));
1174 
1175 	f = lookup_atid(t, ftid);
1176 	if (!f) {
1177 		dev_warn(adap, "%s: could not find filter entry: %d\n",
1178 			 __func__, ftid);
1179 		return;
1180 	}
1181 
1182 	ctx = f->ctx;
1183 	f->ctx = NULL;
1184 
1185 	switch (status) {
1186 	case CPL_ERR_NONE: {
1187 		f->tid = tid;
1188 		f->pending = 0;  /* asynchronous setup completed */
1189 		f->valid = 1;
1190 
1191 		cxgbe_insert_tid(t, f, f->tid, 0);
1192 		cxgbe_free_atid(t, ftid);
1193 		if (ctx) {
1194 			ctx->tid = f->tid;
1195 			ctx->result = 0;
1196 		}
1197 		if (f->fs.hitcnts)
1198 			set_tcb_field(adap, tid,
1199 				      W_TCB_TIMESTAMP,
1200 				      V_TCB_TIMESTAMP(M_TCB_TIMESTAMP) |
1201 				      V_TCB_T_RTT_TS_RECENT_AGE
1202 					      (M_TCB_T_RTT_TS_RECENT_AGE),
1203 				      V_TCB_TIMESTAMP(0ULL) |
1204 				      V_TCB_T_RTT_TS_RECENT_AGE(0ULL),
1205 				      1);
1206 		if (f->fs.newdmac)
1207 			set_tcb_tflag(adap, tid, S_TF_CCTRL_ECE, 1, 1);
1208 		if (f->fs.newvlan == VLAN_INSERT ||
1209 		    f->fs.newvlan == VLAN_REWRITE)
1210 			set_tcb_tflag(adap, tid, S_TF_CCTRL_RFR, 1, 1);
1211 		if (f->fs.newsmac) {
1212 			set_tcb_tflag(adap, tid, S_TF_CCTRL_CWR, 1, 1);
1213 			set_tcb_field(adap, tid, W_TCB_SMAC_SEL,
1214 				      V_TCB_SMAC_SEL(M_TCB_SMAC_SEL),
1215 				      V_TCB_SMAC_SEL(f->smt->hw_idx), 1);
1216 		}
1217 		break;
1218 	}
1219 	default:
1220 		dev_warn(adap, "%s: filter creation failed with status = %u\n",
1221 			 __func__, status);
1222 
1223 		if (ctx) {
1224 			if (status == CPL_ERR_TCAM_FULL)
1225 				ctx->result = -EAGAIN;
1226 			else
1227 				ctx->result = -EINVAL;
1228 		}
1229 
1230 		cxgbe_free_atid(t, ftid);
1231 		clear_filter(f);
1232 		t4_os_free(f);
1233 	}
1234 
1235 	if (ctx)
1236 		t4_complete(&ctx->completion);
1237 }
1238 
1239 /**
1240  * Handle a LE-TCAM filter write/deletion reply.
1241  */
cxgbe_filter_rpl(struct adapter * adap,const struct cpl_set_tcb_rpl * rpl)1242 void cxgbe_filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
1243 {
1244 	struct filter_entry *f = NULL;
1245 	unsigned int tid = GET_TID(rpl);
1246 	int idx, max_fidx = adap->tids.nftids;
1247 
1248 	/* Get the corresponding filter entry for this tid */
1249 	if (adap->tids.ftid_tab) {
1250 		/* Check this in normal filter region */
1251 		idx = tid - adap->tids.ftid_base;
1252 		if (idx >= max_fidx)
1253 			return;
1254 
1255 		f = &adap->tids.ftid_tab[idx];
1256 		if (f->tid != tid)
1257 			return;
1258 	}
1259 
1260 	/* We found the filter entry for this tid */
1261 	if (f) {
1262 		unsigned int ret = G_COOKIE(rpl->cookie);
1263 		struct filter_ctx *ctx;
1264 
1265 		/*
1266 		 * Pull off any filter operation context attached to the
1267 		 * filter.
1268 		 */
1269 		ctx = f->ctx;
1270 		f->ctx = NULL;
1271 
1272 		if (ret == FW_FILTER_WR_FLT_ADDED) {
1273 			f->pending = 0;  /* asynchronous setup completed */
1274 			f->valid = 1;
1275 			if (ctx) {
1276 				ctx->tid = f->tid;
1277 				ctx->result = 0;
1278 			}
1279 		} else if (ret == FW_FILTER_WR_FLT_DELETED) {
1280 			/*
1281 			 * Clear the filter when we get confirmation from the
1282 			 * hardware that the filter has been deleted.
1283 			 */
1284 			clear_filter(f);
1285 			if (ctx)
1286 				ctx->result = 0;
1287 		} else {
1288 			/*
1289 			 * Something went wrong.  Issue a warning about the
1290 			 * problem and clear everything out.
1291 			 */
1292 			dev_warn(adap, "filter %u setup failed with error %u\n",
1293 				 idx, ret);
1294 			clear_filter(f);
1295 			if (ctx)
1296 				ctx->result = -EINVAL;
1297 		}
1298 
1299 		if (ctx)
1300 			t4_complete(&ctx->completion);
1301 	}
1302 }
1303 
1304 /*
1305  * Retrieve the packet count for the specified filter.
1306  */
cxgbe_get_filter_count(struct adapter * adapter,unsigned int fidx,u64 * c,int hash,bool get_byte)1307 int cxgbe_get_filter_count(struct adapter *adapter, unsigned int fidx,
1308 			   u64 *c, int hash, bool get_byte)
1309 {
1310 	struct filter_entry *f;
1311 	unsigned int tcb_base, tcbaddr;
1312 	int ret;
1313 
1314 	tcb_base = t4_read_reg(adapter, A_TP_CMM_TCB_BASE);
1315 	if (is_hashfilter(adapter) && hash) {
1316 		if (fidx < adapter->tids.ntids) {
1317 			f = adapter->tids.tid_tab[fidx];
1318 			if (!f)
1319 				return -EINVAL;
1320 
1321 			if (is_t5(adapter->params.chip)) {
1322 				*c = 0;
1323 				return 0;
1324 			}
1325 			tcbaddr = tcb_base + (fidx * TCB_SIZE);
1326 			goto get_count;
1327 		} else {
1328 			return -ERANGE;
1329 		}
1330 	} else {
1331 		if (fidx >= adapter->tids.nftids)
1332 			return -ERANGE;
1333 
1334 		f = &adapter->tids.ftid_tab[fidx];
1335 		if (!f->valid)
1336 			return -EINVAL;
1337 
1338 		tcbaddr = tcb_base + f->tid * TCB_SIZE;
1339 	}
1340 
1341 	f = &adapter->tids.ftid_tab[fidx];
1342 	if (!f->valid)
1343 		return -EINVAL;
1344 
1345 get_count:
1346 	if (is_t5(adapter->params.chip) || is_t6(adapter->params.chip)) {
1347 		/*
1348 		 * For T5, the Filter Packet Hit Count is maintained as a
1349 		 * 32-bit Big Endian value in the TCB field {timestamp}.
1350 		 * Similar to the craziness above, instead of the filter hit
1351 		 * count showing up at offset 20 ((W_TCB_TIMESTAMP == 5) *
1352 		 * sizeof(u32)), it actually shows up at offset 24.  Whacky.
1353 		 */
1354 		if (get_byte) {
1355 			unsigned int word_offset = 4;
1356 			__be64 be64_byte_count;
1357 
1358 			t4_os_lock(&adapter->win0_lock);
1359 			ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
1360 					   tcbaddr +
1361 					   (word_offset * sizeof(__be32)),
1362 					   sizeof(be64_byte_count),
1363 					   &be64_byte_count,
1364 					   T4_MEMORY_READ);
1365 			t4_os_unlock(&adapter->win0_lock);
1366 			if (ret < 0)
1367 				return ret;
1368 			*c = be64_to_cpu(be64_byte_count);
1369 		} else {
1370 			unsigned int word_offset = 6;
1371 			__be32 be32_count;
1372 
1373 			t4_os_lock(&adapter->win0_lock);
1374 			ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
1375 					   tcbaddr +
1376 					   (word_offset * sizeof(__be32)),
1377 					   sizeof(be32_count), &be32_count,
1378 					   T4_MEMORY_READ);
1379 			t4_os_unlock(&adapter->win0_lock);
1380 			if (ret < 0)
1381 				return ret;
1382 			*c = (u64)be32_to_cpu(be32_count);
1383 		}
1384 	}
1385 	return 0;
1386 }
1387 
1388 /*
1389  * Clear the packet count for the specified filter.
1390  */
cxgbe_clear_filter_count(struct adapter * adapter,unsigned int fidx,int hash,bool clear_byte)1391 int cxgbe_clear_filter_count(struct adapter *adapter, unsigned int fidx,
1392 			     int hash, bool clear_byte)
1393 {
1394 	u64 tcb_mask = 0, tcb_val = 0;
1395 	struct filter_entry *f = NULL;
1396 	u16 tcb_word = 0;
1397 
1398 	if (is_hashfilter(adapter) && hash) {
1399 		if (fidx >= adapter->tids.ntids)
1400 			return -ERANGE;
1401 
1402 		/* No hitcounts supported for T5 hashfilters */
1403 		if (is_t5(adapter->params.chip))
1404 			return 0;
1405 
1406 		f = adapter->tids.tid_tab[fidx];
1407 	} else {
1408 		if (fidx >= adapter->tids.nftids)
1409 			return -ERANGE;
1410 
1411 		f = &adapter->tids.ftid_tab[fidx];
1412 	}
1413 
1414 	if (!f || !f->valid)
1415 		return -EINVAL;
1416 
1417 	tcb_word = W_TCB_TIMESTAMP;
1418 	tcb_mask = V_TCB_TIMESTAMP(M_TCB_TIMESTAMP);
1419 	tcb_val = V_TCB_TIMESTAMP(0ULL);
1420 
1421 	set_tcb_field(adapter, f->tid, tcb_word, tcb_mask, tcb_val, 1);
1422 
1423 	if (clear_byte) {
1424 		tcb_word = W_TCB_T_RTT_TS_RECENT_AGE;
1425 		tcb_mask =
1426 			V_TCB_T_RTT_TS_RECENT_AGE(M_TCB_T_RTT_TS_RECENT_AGE) |
1427 			V_TCB_T_RTSEQ_RECENT(M_TCB_T_RTSEQ_RECENT);
1428 		tcb_val = V_TCB_T_RTT_TS_RECENT_AGE(0ULL) |
1429 			  V_TCB_T_RTSEQ_RECENT(0ULL);
1430 
1431 		set_tcb_field(adapter, f->tid, tcb_word, tcb_mask, tcb_val, 1);
1432 	}
1433 
1434 	return 0;
1435 }
1436 
1437 /**
1438  * Handle a Hash filter delete reply.
1439  */
cxgbe_hash_del_filter_rpl(struct adapter * adap,const struct cpl_abort_rpl_rss * rpl)1440 void cxgbe_hash_del_filter_rpl(struct adapter *adap,
1441 			       const struct cpl_abort_rpl_rss *rpl)
1442 {
1443 	struct tid_info *t = &adap->tids;
1444 	struct filter_entry *f;
1445 	struct filter_ctx *ctx = NULL;
1446 	unsigned int tid = GET_TID(rpl);
1447 
1448 	f = lookup_tid(t, tid);
1449 	if (!f) {
1450 		dev_warn(adap, "%s: could not find filter entry: %u\n",
1451 			 __func__, tid);
1452 		return;
1453 	}
1454 
1455 	ctx = f->ctx;
1456 
1457 	clear_filter(f);
1458 	cxgbe_remove_tid(t, 0, tid, 0);
1459 	t4_os_free(f);
1460 
1461 	if (ctx) {
1462 		ctx->result = 0;
1463 		t4_complete(&ctx->completion);
1464 	}
1465 }
1466