xref: /dpdk/drivers/net/cxgbe/smt.c (revision e12a0166c80f65e35408f4715b2f3a60763c3741)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Chelsio Communications.
3  * All rights reserved.
4  */
5 
6 #include "base/common.h"
7 #include "smt.h"
8 
cxgbe_do_smt_write_rpl(struct adapter * adap,const struct cpl_smt_write_rpl * rpl)9 void cxgbe_do_smt_write_rpl(struct adapter *adap,
10 			    const struct cpl_smt_write_rpl *rpl)
11 {
12 	unsigned int smtidx = G_TID_TID(GET_TID(rpl));
13 	struct smt_data *s = adap->smt;
14 
15 	if (unlikely(rpl->status != CPL_ERR_NONE)) {
16 		struct smt_entry *e = &s->smtab[smtidx];
17 
18 		dev_err(adap,
19 			"Unexpected SMT_WRITE_RPL status %u for entry %u\n",
20 			rpl->status, smtidx);
21 		t4_os_lock(&e->lock);
22 		e->state = SMT_STATE_ERROR;
23 		t4_os_unlock(&e->lock);
24 	}
25 }
26 
write_smt_entry(struct rte_eth_dev * dev,struct smt_entry * e)27 static int write_smt_entry(struct rte_eth_dev *dev, struct smt_entry *e)
28 {
29 	unsigned int port_id = ethdev2pinfo(dev)->port_id;
30 	struct adapter *adap = ethdev2adap(dev);
31 	struct cpl_t6_smt_write_req *t6req;
32 	struct smt_data *s = adap->smt;
33 	struct cpl_smt_write_req *req;
34 	struct sge_ctrl_txq *ctrlq;
35 	struct rte_mbuf *mbuf;
36 	u8 row;
37 
38 	ctrlq = &adap->sge.ctrlq[port_id];
39 	mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
40 	if (!mbuf)
41 		return -ENOMEM;
42 
43 	if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) {
44 		mbuf->data_len = sizeof(*req);
45 		mbuf->pkt_len = mbuf->data_len;
46 
47 		/* Source MAC Table (SMT) contains 256 SMAC entries
48 		 * organized in 128 rows of 2 entries each.
49 		 */
50 		req = rte_pktmbuf_mtod(mbuf, struct cpl_smt_write_req *);
51 		INIT_TP_WR(req, 0);
52 
53 		/* Each row contains an SMAC pair.
54 		 * LSB selects the SMAC entry within a row
55 		 */
56 		if (e->idx & 1) {
57 			req->pfvf1 = 0x0;
58 			rte_memcpy(req->src_mac1, e->src_mac,
59 				   RTE_ETHER_ADDR_LEN);
60 
61 			/* fill pfvf0/src_mac0 with entry
62 			 * at prev index from smt-tab.
63 			 */
64 			req->pfvf0 = 0x0;
65 			rte_memcpy(req->src_mac0, s->smtab[e->idx - 1].src_mac,
66 				   RTE_ETHER_ADDR_LEN);
67 		} else {
68 			req->pfvf0 = 0x0;
69 			rte_memcpy(req->src_mac0, e->src_mac,
70 				   RTE_ETHER_ADDR_LEN);
71 
72 			/* fill pfvf1/src_mac1 with entry
73 			 * at next index from smt-tab
74 			 */
75 			req->pfvf1 = 0x0;
76 			rte_memcpy(req->src_mac1, s->smtab[e->idx + 1].src_mac,
77 				   RTE_ETHER_ADDR_LEN);
78 		}
79 		row = (e->hw_idx >> 1);
80 	} else {
81 		mbuf->data_len = sizeof(*t6req);
82 		mbuf->pkt_len = mbuf->data_len;
83 
84 		/* Source MAC Table (SMT) contains 256 SMAC entries */
85 		t6req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_smt_write_req *);
86 		INIT_TP_WR(t6req, 0);
87 
88 		/* fill pfvf0/src_mac0 from smt-tab */
89 		t6req->pfvf0 = 0x0;
90 		rte_memcpy(t6req->src_mac0, s->smtab[e->idx].src_mac,
91 			   RTE_ETHER_ADDR_LEN);
92 		row = e->hw_idx;
93 		req = (struct cpl_smt_write_req *)t6req;
94 	}
95 
96 	OPCODE_TID(req) =
97 		cpu_to_be32(MK_OPCODE_TID(CPL_SMT_WRITE_REQ,
98 					  e->hw_idx |
99 					  V_TID_QID(adap->sge.fw_evtq.abs_id)));
100 
101 	req->params = cpu_to_be32(V_SMTW_NORPL(0) |
102 				  V_SMTW_IDX(row) |
103 				  V_SMTW_OVLAN_IDX(0));
104 	t4_mgmt_tx(ctrlq, mbuf);
105 
106 	return 0;
107 }
108 
109 /**
110  * find_or_alloc_smte - Find/Allocate a free SMT entry
111  * @s: SMT table
112  * @smac: Source MAC address to compare/add
113  * Returns pointer to the SMT entry found/created
114  *
115  * Finds/Allocates an SMT entry to be used by switching rule of a filter.
116  */
find_or_alloc_smte(struct smt_data * s,u8 * smac)117 static struct smt_entry *find_or_alloc_smte(struct smt_data *s, u8 *smac)
118 {
119 	struct smt_entry *e, *end, *first_free = NULL;
120 
121 	for (e = &s->smtab[0], end = &s->smtab[s->smt_size]; e != end; ++e) {
122 		if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) == 0) {
123 			if (!first_free)
124 				first_free = e;
125 		} else {
126 			if (e->state == SMT_STATE_SWITCHING) {
127 				/* This entry is actually in use. See if we can
128 				 * re-use it ?
129 				 */
130 				if (!memcmp(e->src_mac, smac,
131 					    RTE_ETHER_ADDR_LEN))
132 					goto found;
133 			}
134 		}
135 	}
136 
137 	if (!first_free)
138 		return NULL;
139 
140 	e = first_free;
141 	e->state = SMT_STATE_UNUSED;
142 
143 found:
144 	return e;
145 }
146 
t4_smt_alloc_switching(struct rte_eth_dev * dev,u16 pfvf,u8 * smac)147 static struct smt_entry *t4_smt_alloc_switching(struct rte_eth_dev *dev,
148 						u16 pfvf, u8 *smac)
149 {
150 	struct adapter *adap = ethdev2adap(dev);
151 	struct smt_data *s = adap->smt;
152 	struct smt_entry *e;
153 	int ret;
154 
155 	t4_os_write_lock(&s->lock);
156 	e = find_or_alloc_smte(s, smac);
157 	if (e) {
158 		t4_os_lock(&e->lock);
159 		if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) == 0) {
160 			e->pfvf = pfvf;
161 			rte_memcpy(e->src_mac, smac, RTE_ETHER_ADDR_LEN);
162 			ret = write_smt_entry(dev, e);
163 			if (ret) {
164 				e->pfvf = 0;
165 				memset(e->src_mac, 0, RTE_ETHER_ADDR_LEN);
166 				t4_os_unlock(&e->lock);
167 				e = NULL;
168 				goto out_write_unlock;
169 			}
170 			e->state = SMT_STATE_SWITCHING;
171 			rte_atomic_store_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
172 		} else {
173 			rte_atomic_fetch_add_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
174 		}
175 		t4_os_unlock(&e->lock);
176 	}
177 
178 out_write_unlock:
179 	t4_os_write_unlock(&s->lock);
180 	return e;
181 }
182 
183 /**
184  * cxgbe_smt_alloc_switching - Allocate an SMT entry for switching rule
185  * @dev: rte_eth_dev pointer
186  * @smac: MAC address to add to SMT
187  * Returns pointer to the SMT entry created
188  *
189  * Allocates an SMT entry to be used by switching rule of a filter.
190  */
cxgbe_smt_alloc_switching(struct rte_eth_dev * dev,u8 * smac)191 struct smt_entry *cxgbe_smt_alloc_switching(struct rte_eth_dev *dev, u8 *smac)
192 {
193 	return t4_smt_alloc_switching(dev, 0x0, smac);
194 }
195 
cxgbe_smt_release(struct smt_entry * e)196 void cxgbe_smt_release(struct smt_entry *e)
197 {
198 	if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) != 0)
199 		rte_atomic_fetch_sub_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
200 }
201 
202 /**
203  * Initialize Source MAC Table
204  */
t4_init_smt(u32 smt_start_idx,u32 smt_size)205 struct smt_data *t4_init_smt(u32 smt_start_idx, u32 smt_size)
206 {
207 	struct smt_data *s;
208 	u32 i;
209 
210 	s = t4_alloc_mem(sizeof(*s) + smt_size * sizeof(struct smt_entry));
211 	if (!s)
212 		return NULL;
213 
214 	s->smt_start = smt_start_idx;
215 	s->smt_size = smt_size;
216 	t4_os_rwlock_init(&s->lock);
217 
218 	for (i = 0; i < s->smt_size; ++i) {
219 		s->smtab[i].idx = i;
220 		s->smtab[i].hw_idx = smt_start_idx + i;
221 		s->smtab[i].state = SMT_STATE_UNUSED;
222 		memset(&s->smtab[i].src_mac, 0, RTE_ETHER_ADDR_LEN);
223 		t4_os_lock_init(&s->smtab[i].lock);
224 		s->smtab[i].refcnt = 0;
225 	}
226 	return s;
227 }
228 
229 /**
230  * Cleanup Source MAC Table
231  */
t4_cleanup_smt(struct adapter * adap)232 void t4_cleanup_smt(struct adapter *adap)
233 {
234 	if (adap->smt)
235 		t4_os_free(adap->smt);
236 }
237