Lines Matching defs:e

64 l2t_hold(struct l2t_data *d, struct l2t_entry *e)
67 if (atomic_fetchadd_int(&e->refcnt, 1) == 0) /* 0 -> 1 transition */
103 l2_cmp(const struct sockaddr *sa, struct l2t_entry *e)
113 return (e->addr[0] != sin->sin_addr.s_addr);
117 return (memcmp(&e->addr[0], &sin6->sin6_addr, sizeof(e->addr)));
122 l2_store(const struct sockaddr *sa, struct l2t_entry *e)
132 e->addr[0] = sin->sin_addr.s_addr;
133 e->ipv6 = 0;
137 memcpy(&e->addr[0], &sin6->sin6_addr, sizeof(e->addr));
138 e->ipv6 = 1;
147 arpq_enqueue(struct l2t_entry *e, struct wrqe *wr)
149 mtx_assert(&e->lock, MA_OWNED);
151 STAILQ_INSERT_TAIL(&e->wr_list, wr, link);
155 send_pending(struct adapter *sc, struct l2t_entry *e)
159 mtx_assert(&e->lock, MA_OWNED);
161 while ((wr = STAILQ_FIRST(&e->wr_list)) != NULL) {
162 STAILQ_REMOVE_HEAD(&e->wr_list, link);
168 resolution_failed(struct adapter *sc, struct l2t_entry *e)
172 mtx_assert(&e->lock, MA_OWNED);
175 STAILQ_CONCAT(&td->unsent_wr_list, &e->wr_list);
182 update_entry(struct adapter *sc, struct l2t_entry *e, uint8_t *lladdr,
186 mtx_assert(&e->lock, MA_OWNED);
189 * The entry may be in active use (e->refcount > 0) or not. We update
195 (e->state == L2T_STATE_RESOLVING || e->state == L2T_STATE_FAILED)) {
201 e->state = L2T_STATE_FAILED;
202 resolution_failed(sc, e);
209 KASSERT(e->state == L2T_STATE_VALID ||
210 e->state == L2T_STATE_STALE,
211 ("%s: lladdr NULL, state %d", __func__, e->state));
213 e->state = L2T_STATE_STALE;
215 } else if (e->state == L2T_STATE_RESOLVING ||
216 e->state == L2T_STATE_FAILED ||
217 memcmp(e->dmac, lladdr, ETHER_ADDR_LEN)) {
221 memcpy(e->dmac, lladdr, ETHER_ADDR_LEN);
222 e->vlan = vtag;
223 if (t4_write_l2e(e, 1) == 0)
224 e->state = L2T_STATE_VALID;
226 e->state = L2T_STATE_VALID;
230 resolve_entry(struct adapter *sc, struct l2t_entry *e)
241 if (e->ipv6 == 0) {
244 sin.sin_addr.s_addr = e->addr[0];
249 memcpy(&sin6.sin6_addr, &e->addr[0], sizeof(e->addr));
254 rc = toe_l2_resolve(tod, e->ifp, sa, dmac, &vtag);
258 mtx_lock(&e->lock);
259 update_entry(sc, e, rc == 0 ? dmac : NULL, vtag);
260 mtx_unlock(&e->lock);
266 t4_l2t_send_slow(struct adapter *sc, struct wrqe *wr, struct l2t_entry *e)
270 switch (e->state) {
273 resolve_entry(sc, e);
285 mtx_lock(&e->lock);
286 if (e->state != L2T_STATE_SYNC_WRITE &&
287 e->state != L2T_STATE_RESOLVING) {
289 mtx_unlock(&e->lock);
295 arpq_enqueue(e, wr);
296 mtx_unlock(&e->lock);
298 if (resolve_entry(sc, e) == EWOULDBLOCK)
301 mtx_lock(&e->lock);
302 if (e->state == L2T_STATE_VALID && !STAILQ_EMPTY(&e->wr_list))
303 send_pending(sc, e);
304 if (e->state == L2T_STATE_FAILED)
305 resolution_failed(sc, e);
306 mtx_unlock(&e->lock);
338 struct l2t_entry *e = &sc->l2t->l2tab[idx];
340 mtx_lock(&e->lock);
341 if (e->state != L2T_STATE_SWITCHING) {
342 send_pending(sc, e);
343 e->state = L2T_STATE_VALID;
345 mtx_unlock(&e->lock);
361 struct l2t_entry *e;
385 e = NULL;
388 for (e = d->l2tab[hash].first; e; e = e->next) {
389 if (l2_cmp(sa, e) == 0 && e->ifp == ifp && e->vlan == vtag &&
390 e->smt_idx == smt_idx) {
391 l2t_hold(d, e);
397 e = t4_alloc_l2e(d);
398 if (e) {
399 mtx_lock(&e->lock); /* avoid race with t4_l2t_free */
400 e->next = d->l2tab[hash].first;
401 d->l2tab[hash].first = e;
403 e->state = L2T_STATE_RESOLVING;
404 l2_store(sa, e);
405 e->ifp = ifp;
406 e->smt_idx = smt_idx;
407 e->hash = hash;
408 e->lport = pi->lport;
409 e->wrq = &sc->sge.ctrlq[pi->port_id];
410 e->iqid = sc->sge.ofld_rxq[pi->vi[0].first_ofld_rxq].iq.abs_id;
411 atomic_store_rel_int(&e->refcnt, 1);
412 e->vlan = vtag;
413 mtx_unlock(&e->lock);
417 return e;
429 struct l2t_entry *e;
439 for (e = d->l2tab[hash].first; e; e = e->next) {
440 if (l2_cmp(sa, e) == 0 && e->ifp == ifp) {
441 mtx_lock(&e->lock);
442 if (atomic_load_acq_int(&e->refcnt))
444 if (e->state == L2T_STATE_VALID)
445 e->state = L2T_STATE_STALE;
446 mtx_unlock(&e->lock);
463 KASSERT(e->state != L2T_STATE_UNUSED,
466 update_entry(sc, e, lladdr, vtag);
467 mtx_unlock(&e->lock);