xref: /dpdk/drivers/net/cxgbe/l2t.c (revision e12a0166c80f65e35408f4715b2f3a60763c3741)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Chelsio Communications.
3  * All rights reserved.
4  */
5 
6 #include "base/common.h"
7 #include "l2t.h"
8 
9 /**
10  * cxgbe_l2t_release - Release associated L2T entry
11  * @e: L2T entry to release
12  *
13  * Releases ref count and frees up an L2T entry from L2T table
14  */
cxgbe_l2t_release(struct l2t_entry * e)15 void cxgbe_l2t_release(struct l2t_entry *e)
16 {
17 	if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) != 0)
18 		rte_atomic_fetch_sub_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
19 }
20 
21 /**
22  * Process a CPL_L2T_WRITE_RPL. Note that the TID in the reply is really
23  * the L2T index it refers to.
24  */
cxgbe_do_l2t_write_rpl(struct adapter * adap,const struct cpl_l2t_write_rpl * rpl)25 void cxgbe_do_l2t_write_rpl(struct adapter *adap,
26 			    const struct cpl_l2t_write_rpl *rpl)
27 {
28 	struct l2t_data *d = adap->l2t;
29 	unsigned int tid = GET_TID(rpl);
30 	unsigned int l2t_idx = tid % L2T_SIZE;
31 
32 	if (unlikely(rpl->status != CPL_ERR_NONE)) {
33 		dev_err(adap,
34 			"Unexpected L2T_WRITE_RPL status %u for entry %u\n",
35 			rpl->status, l2t_idx);
36 		return;
37 	}
38 
39 	if (tid & F_SYNC_WR) {
40 		struct l2t_entry *e = &d->l2tab[l2t_idx - d->l2t_start];
41 
42 		t4_os_lock(&e->lock);
43 		if (e->state != L2T_STATE_SWITCHING)
44 			e->state = L2T_STATE_VALID;
45 		t4_os_unlock(&e->lock);
46 	}
47 }
48 
49 /**
50  * Write an L2T entry.  Must be called with the entry locked.
51  * The write may be synchronous or asynchronous.
52  */
write_l2e(struct rte_eth_dev * dev,struct l2t_entry * e,int sync,bool loopback,bool arpmiss)53 static int write_l2e(struct rte_eth_dev *dev, struct l2t_entry *e, int sync,
54 		     bool loopback, bool arpmiss)
55 {
56 	struct adapter *adap = ethdev2adap(dev);
57 	struct l2t_data *d = adap->l2t;
58 	struct rte_mbuf *mbuf;
59 	struct cpl_l2t_write_req *req;
60 	struct sge_ctrl_txq *ctrlq;
61 	unsigned int l2t_idx = e->idx + d->l2t_start;
62 	unsigned int port_id = ethdev2pinfo(dev)->port_id;
63 
64 	ctrlq = &adap->sge.ctrlq[port_id];
65 	mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
66 	if (!mbuf)
67 		return -ENOMEM;
68 
69 	mbuf->data_len = sizeof(*req);
70 	mbuf->pkt_len = mbuf->data_len;
71 
72 	req = rte_pktmbuf_mtod(mbuf, struct cpl_l2t_write_req *);
73 	INIT_TP_WR(req, 0);
74 
75 	OPCODE_TID(req) =
76 		cpu_to_be32(MK_OPCODE_TID(CPL_L2T_WRITE_REQ,
77 					  l2t_idx | V_SYNC_WR(sync) |
78 					  V_TID_QID(adap->sge.fw_evtq.abs_id)));
79 	req->params = cpu_to_be16(V_L2T_W_PORT(e->lport) |
80 				  V_L2T_W_LPBK(loopback) |
81 				  V_L2T_W_ARPMISS(arpmiss) |
82 				  V_L2T_W_NOREPLY(!sync));
83 	req->l2t_idx = cpu_to_be16(l2t_idx);
84 	req->vlan = cpu_to_be16(e->vlan);
85 	rte_memcpy(req->dst_mac, e->dmac, RTE_ETHER_ADDR_LEN);
86 
87 	if (loopback)
88 		memset(req->dst_mac, 0, RTE_ETHER_ADDR_LEN);
89 
90 	t4_mgmt_tx(ctrlq, mbuf);
91 
92 	if (sync && e->state != L2T_STATE_SWITCHING)
93 		e->state = L2T_STATE_SYNC_WRITE;
94 
95 	return 0;
96 }
97 
98 /**
99  * find_or_alloc_l2e - Find/Allocate a free L2T entry
100  * @d: L2T table
101  * @vlan: VLAN id to compare/add
102  * @port: port id to compare/add
103  * @dmac: Destination MAC address to compare/add
104  * Returns pointer to the L2T entry found/created
105  *
106  * Finds/Allocates an L2T entry to be used by switching rule of a filter.
107  */
find_or_alloc_l2e(struct l2t_data * d,u16 vlan,u8 port,u8 * dmac)108 static struct l2t_entry *find_or_alloc_l2e(struct l2t_data *d, u16 vlan,
109 					   u8 port, u8 *dmac)
110 {
111 	struct l2t_entry *end, *e;
112 	struct l2t_entry *first_free = NULL;
113 
114 	for (e = &d->l2tab[0], end = &d->l2tab[d->l2t_size]; e != end; ++e) {
115 		if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) == 0) {
116 			if (!first_free)
117 				first_free = e;
118 		} else {
119 			if (e->state == L2T_STATE_SWITCHING) {
120 				if ((!memcmp(e->dmac, dmac, RTE_ETHER_ADDR_LEN)) &&
121 				    e->vlan == vlan && e->lport == port)
122 					goto exists;
123 			}
124 		}
125 	}
126 
127 	if (first_free) {
128 		e = first_free;
129 		goto found;
130 	}
131 
132 	return NULL;
133 
134 found:
135 	e->state = L2T_STATE_UNUSED;
136 
137 exists:
138 	return e;
139 }
140 
t4_l2t_alloc_switching(struct rte_eth_dev * dev,u16 vlan,u8 port,u8 * eth_addr)141 static struct l2t_entry *t4_l2t_alloc_switching(struct rte_eth_dev *dev,
142 						u16 vlan, u8 port,
143 						u8 *eth_addr)
144 {
145 	struct adapter *adap = ethdev2adap(dev);
146 	struct l2t_data *d = adap->l2t;
147 	struct l2t_entry *e;
148 	int ret = 0;
149 
150 	t4_os_write_lock(&d->lock);
151 	e = find_or_alloc_l2e(d, vlan, port, eth_addr);
152 	if (e) {
153 		t4_os_lock(&e->lock);
154 		if (rte_atomic_load_explicit(&e->refcnt, rte_memory_order_relaxed) == 0) {
155 			e->state = L2T_STATE_SWITCHING;
156 			e->vlan = vlan;
157 			e->lport = port;
158 			rte_memcpy(e->dmac, eth_addr, RTE_ETHER_ADDR_LEN);
159 			rte_atomic_store_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
160 			ret = write_l2e(dev, e, 0, !L2T_LPBK, !L2T_ARPMISS);
161 			if (ret < 0)
162 				dev_debug(adap, "Failed to write L2T entry: %d",
163 					  ret);
164 		} else {
165 			rte_atomic_fetch_add_explicit(&e->refcnt, 1, rte_memory_order_relaxed);
166 		}
167 		t4_os_unlock(&e->lock);
168 	}
169 	t4_os_write_unlock(&d->lock);
170 
171 	return ret ? NULL : e;
172 }
173 
174 /**
175  * cxgbe_l2t_alloc_switching - Allocate a L2T entry for switching rule
176  * @dev: rte_eth_dev pointer
177  * @vlan: VLAN Id
178  * @port: Associated port
179  * @dmac: Destination MAC address to add to L2T
180  * Returns pointer to the allocated l2t entry
181  *
182  * Allocates a L2T entry for use by switching rule of a filter
183  */
cxgbe_l2t_alloc_switching(struct rte_eth_dev * dev,u16 vlan,u8 port,u8 * dmac)184 struct l2t_entry *cxgbe_l2t_alloc_switching(struct rte_eth_dev *dev, u16 vlan,
185 					    u8 port, u8 *dmac)
186 {
187 	return t4_l2t_alloc_switching(dev, vlan, port, dmac);
188 }
189 
190 /**
191  * Initialize L2 Table
192  */
t4_init_l2t(unsigned int l2t_start,unsigned int l2t_end)193 struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end)
194 {
195 	unsigned int l2t_size;
196 	unsigned int i;
197 	struct l2t_data *d;
198 
199 	if (l2t_start >= l2t_end || l2t_end >= L2T_SIZE)
200 		return NULL;
201 	l2t_size = l2t_end - l2t_start + 1;
202 
203 	d = t4_os_alloc(sizeof(*d) + l2t_size * sizeof(struct l2t_entry));
204 	if (!d)
205 		return NULL;
206 
207 	d->l2t_start = l2t_start;
208 	d->l2t_size = l2t_size;
209 
210 	t4_os_rwlock_init(&d->lock);
211 
212 	for (i = 0; i < d->l2t_size; ++i) {
213 		d->l2tab[i].idx = i;
214 		d->l2tab[i].state = L2T_STATE_UNUSED;
215 		t4_os_lock_init(&d->l2tab[i].lock);
216 		d->l2tab[i].refcnt = 0;
217 	}
218 
219 	return d;
220 }
221 
222 /**
223  * Cleanup L2 Table
224  */
t4_cleanup_l2t(struct adapter * adap)225 void t4_cleanup_l2t(struct adapter *adap)
226 {
227 	if (adap->l2t)
228 		t4_os_free(adap->l2t);
229 }
230