1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
3 * Copyright (c) 2015-2018 Cavium Inc.
4 * All rights reserved.
5 * www.cavium.com
6 */
7
8 #include "bnx2x.h"
9
10 /* calculate the crc in the bulletin board */
11 static inline uint32_t
bnx2x_vf_crc(struct bnx2x_vf_bulletin * bull)12 bnx2x_vf_crc(struct bnx2x_vf_bulletin *bull)
13 {
14 uint32_t crc_sz = sizeof(bull->crc), length = bull->length - crc_sz;
15
16 return ECORE_CRC32_LE(0, (uint8_t *)bull + crc_sz, length);
17 }
18
19 /* Checks are there mac/channel updates for VF
20 * returns TRUE if something was updated
21 */
22 int
bnx2x_check_bull(struct bnx2x_softc * sc)23 bnx2x_check_bull(struct bnx2x_softc *sc)
24 {
25 struct bnx2x_vf_bulletin *bull;
26 uint8_t tries = 0;
27 uint16_t old_version = sc->old_bulletin.version;
28 uint64_t valid_bitmap;
29
30 bull = sc->pf2vf_bulletin;
31 if (old_version == bull->version) {
32 return FALSE;
33 } else {
34 /* Check the crc until we get the correct data */
35 while (tries < BNX2X_VF_BULLETIN_TRIES) {
36 bull = sc->pf2vf_bulletin;
37 if (bull->crc == bnx2x_vf_crc(bull))
38 break;
39
40 PMD_DRV_LOG(ERR, sc, "bad crc on bulletin board. contained %x computed %x",
41 bull->crc, bnx2x_vf_crc(bull));
42 ++tries;
43 }
44 if (tries == BNX2X_VF_BULLETIN_TRIES) {
45 PMD_DRV_LOG(ERR, sc, "pf to vf bulletin board crc was wrong %d consecutive times. Aborting",
46 tries);
47 return FALSE;
48 }
49 }
50
51 valid_bitmap = bull->valid_bitmap;
52
53 /* check the mac address and VLAN and allocate memory if valid */
54 if (valid_bitmap & (1 << MAC_ADDR_VALID) && memcmp(bull->mac, sc->old_bulletin.mac, ETH_ALEN))
55 memcpy(&sc->link_params.mac_addr, bull->mac, ETH_ALEN);
56 if (valid_bitmap & (1 << VLAN_VALID))
57 memcpy(&bull->vlan, &sc->old_bulletin.vlan, sizeof(bull->vlan));
58
59 sc->old_bulletin = *bull;
60
61 return TRUE;
62 }
63
64 /* place a given tlv on the tlv buffer at a given offset */
65 static void
bnx2x_add_tlv(__rte_unused struct bnx2x_softc * sc,void * tlvs_list,uint16_t offset,uint16_t type,uint16_t length)66 bnx2x_add_tlv(__rte_unused struct bnx2x_softc *sc, void *tlvs_list,
67 uint16_t offset, uint16_t type, uint16_t length)
68 {
69 struct channel_tlv *tl = (struct channel_tlv *)
70 ((unsigned long)tlvs_list + offset);
71
72 tl->type = type;
73 tl->length = length;
74 }
75
76 /* Initialize header of the first TLV and clear mailbox */
77 static void
bnx2x_vf_prep(struct bnx2x_softc * sc,struct vf_first_tlv * first_tlv,uint16_t type,uint16_t length)78 bnx2x_vf_prep(struct bnx2x_softc *sc, struct vf_first_tlv *first_tlv,
79 uint16_t type, uint16_t length)
80 {
81 struct bnx2x_vf_mbx_msg *mbox = sc->vf2pf_mbox;
82
83 rte_spinlock_lock(&sc->vf2pf_lock);
84
85 PMD_DRV_LOG(DEBUG, sc, "Preparing %d tlv for sending", type);
86
87 memset(mbox, 0, sizeof(struct bnx2x_vf_mbx_msg));
88
89 bnx2x_add_tlv(sc, &first_tlv->tl, 0, type, length);
90
91 /* Initialize header of the first tlv */
92 first_tlv->reply_offset = sizeof(mbox->query);
93 }
94
95 /* releases the mailbox */
96 static void
bnx2x_vf_finalize(struct bnx2x_softc * sc,__rte_unused struct vf_first_tlv * first_tlv)97 bnx2x_vf_finalize(struct bnx2x_softc *sc,
98 __rte_unused struct vf_first_tlv *first_tlv)
99 {
100 PMD_DRV_LOG(DEBUG, sc, "done sending [%d] tlv over vf pf channel",
101 first_tlv->tl.type);
102
103 rte_spinlock_unlock(&sc->vf2pf_lock);
104 }
105
106 #define BNX2X_VF_CMD_ADDR_LO PXP_VF_ADDR_CSDM_GLOBAL_START
107 #define BNX2X_VF_CMD_ADDR_HI BNX2X_VF_CMD_ADDR_LO + 4
108 #define BNX2X_VF_CMD_TRIGGER BNX2X_VF_CMD_ADDR_HI + 4
109 #define BNX2X_VF_CHANNEL_DELAY 100
110 #define BNX2X_VF_CHANNEL_TRIES 100
111
112 static int
bnx2x_do_req4pf(struct bnx2x_softc * sc,rte_iova_t phys_addr)113 bnx2x_do_req4pf(struct bnx2x_softc *sc, rte_iova_t phys_addr)
114 {
115 uint8_t *status = &sc->vf2pf_mbox->resp.common_reply.status;
116 uint8_t i;
117
118 if (*status) {
119 PMD_DRV_LOG(ERR, sc, "status should be zero before message"
120 " to pf was sent");
121 return -EINVAL;
122 }
123
124 bnx2x_check_bull(sc);
125 if (sc->old_bulletin.valid_bitmap & (1 << CHANNEL_DOWN)) {
126 PMD_DRV_LOG(ERR, sc, "channel is down. Aborting message sending");
127 return -EINVAL;
128 }
129
130 REG_WR(sc, BNX2X_VF_CMD_ADDR_LO, U64_LO(phys_addr));
131 REG_WR(sc, BNX2X_VF_CMD_ADDR_HI, U64_HI(phys_addr));
132
133 /* memory barrier to ensure that FW can read phys_addr */
134 wmb();
135
136 REG_WR8(sc, BNX2X_VF_CMD_TRIGGER, 1);
137
138 /* Do several attempts until PF completes */
139 for (i = 0; i < BNX2X_VF_CHANNEL_TRIES; i++) {
140 DELAY_MS(BNX2X_VF_CHANNEL_DELAY);
141 if (*status)
142 break;
143 }
144
145 if (!*status) {
146 PMD_DRV_LOG(ERR, sc, "Response from PF timed out");
147 return -EAGAIN;
148 }
149
150 PMD_DRV_LOG(DEBUG, sc, "Response from PF was received");
151 return 0;
152 }
153
bnx2x_check_me_flags(uint32_t val)154 static inline uint16_t bnx2x_check_me_flags(uint32_t val)
155 {
156 if (((val) & ME_REG_VF_VALID) && (!((val) & ME_REG_VF_ERR)))
157 return ME_REG_VF_VALID;
158 else
159 return 0;
160 }
161
162 #define BNX2X_ME_ANSWER_DELAY 100
163 #define BNX2X_ME_ANSWER_TRIES 10
164
bnx2x_read_vf_id(struct bnx2x_softc * sc,uint32_t * vf_id)165 static inline int bnx2x_read_vf_id(struct bnx2x_softc *sc, uint32_t *vf_id)
166 {
167 uint32_t val;
168 uint8_t i = 0;
169
170 while (i <= BNX2X_ME_ANSWER_TRIES) {
171 val = BNX2X_DB_READ(DOORBELL_ADDR(sc, 0));
172 if (bnx2x_check_me_flags(val)) {
173 PMD_DRV_LOG(DEBUG, sc,
174 "valid register value: 0x%08x", val);
175 *vf_id = VF_ID(val);
176 return 0;
177 }
178
179 DELAY_MS(BNX2X_ME_ANSWER_DELAY);
180 i++;
181 }
182
183 PMD_DRV_LOG(ERR, sc, "Invalid register value: 0x%08x", val);
184
185 return -EINVAL;
186 }
187
188 #define BNX2X_VF_OBTAIN_MAX_TRIES 3
189 #define BNX2X_VF_OBTAIN_MAC_FILTERS 1
190 #define BNX2X_VF_OBTAIN_MC_FILTERS 10
191
192 static
bnx2x_loop_obtain_resources(struct bnx2x_softc * sc)193 int bnx2x_loop_obtain_resources(struct bnx2x_softc *sc)
194 {
195 struct vf_acquire_resp_tlv *resp = &sc->vf2pf_mbox->resp.acquire_resp,
196 *sc_resp = &sc->acquire_resp;
197 struct vf_resource_query *res_query;
198 struct vf_resc *resc;
199 int res_obtained = false;
200 int tries = 0;
201 int rc;
202
203 do {
204 PMD_DRV_LOG(DEBUG, sc, "trying to get resources");
205
206 rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
207 if (rc)
208 return rc;
209
210 memcpy(sc_resp, resp, sizeof(sc->acquire_resp));
211
212 tries++;
213
214 /* check PF to request acceptance */
215 if (sc_resp->status == BNX2X_VF_STATUS_SUCCESS) {
216 PMD_DRV_LOG(DEBUG, sc, "resources obtained successfully");
217 res_obtained = true;
218 } else if (sc_resp->status == BNX2X_VF_STATUS_NO_RESOURCES &&
219 tries < BNX2X_VF_OBTAIN_MAX_TRIES) {
220 PMD_DRV_LOG(DEBUG, sc,
221 "PF cannot allocate requested amount of resources");
222
223 res_query = &sc->vf2pf_mbox->query[0].acquire.res_query;
224 resc = &sc_resp->resc;
225
226 /* PF refused our request. Try to decrease request params */
227 res_query->num_txqs = min(res_query->num_txqs, resc->num_txqs);
228 res_query->num_rxqs = min(res_query->num_rxqs, resc->num_rxqs);
229 res_query->num_sbs = min(res_query->num_sbs, resc->num_sbs);
230 res_query->num_mac_filters = min(res_query->num_mac_filters, resc->num_mac_filters);
231 res_query->num_vlan_filters = min(res_query->num_vlan_filters, resc->num_vlan_filters);
232 res_query->num_mc_filters = min(res_query->num_mc_filters, resc->num_mc_filters);
233
234 memset(&sc->vf2pf_mbox->resp, 0, sizeof(union resp_tlvs));
235 } else {
236 PMD_DRV_LOG(ERR, sc, "Failed to get the requested "
237 "amount of resources: %d.",
238 sc_resp->status);
239 return -EINVAL;
240 }
241 } while (!res_obtained);
242
243 return 0;
244 }
245
bnx2x_vf_get_resources(struct bnx2x_softc * sc,uint8_t tx_count,uint8_t rx_count)246 int bnx2x_vf_get_resources(struct bnx2x_softc *sc, uint8_t tx_count, uint8_t rx_count)
247 {
248 struct vf_acquire_tlv *acq = &sc->vf2pf_mbox->query[0].acquire;
249 uint32_t vf_id;
250 int rc;
251
252 bnx2x_vf_close(sc);
253 bnx2x_vf_prep(sc, &acq->first_tlv, BNX2X_VF_TLV_ACQUIRE, sizeof(*acq));
254
255 if (bnx2x_read_vf_id(sc, &vf_id)) {
256 rc = -EAGAIN;
257 goto out;
258 }
259
260 acq->vf_id = vf_id;
261
262 acq->res_query.num_rxqs = rx_count;
263 acq->res_query.num_txqs = tx_count;
264 acq->res_query.num_sbs = sc->igu_sb_cnt;
265 acq->res_query.num_mac_filters = BNX2X_VF_OBTAIN_MAC_FILTERS;
266 acq->res_query.num_mc_filters = BNX2X_VF_OBTAIN_MC_FILTERS;
267
268 acq->bulletin_addr = sc->pf2vf_bulletin_mapping.paddr;
269
270 /* Request physical port identifier */
271 bnx2x_add_tlv(sc, acq, acq->first_tlv.tl.length,
272 BNX2X_VF_TLV_PHYS_PORT_ID,
273 sizeof(struct channel_tlv));
274
275 bnx2x_add_tlv(sc, acq,
276 (acq->first_tlv.tl.length + sizeof(struct channel_tlv)),
277 BNX2X_VF_TLV_LIST_END,
278 sizeof(struct channel_list_end_tlv));
279
280 /* requesting the resources in loop */
281 rc = bnx2x_loop_obtain_resources(sc);
282 if (rc)
283 goto out;
284
285 struct vf_acquire_resp_tlv sc_resp = sc->acquire_resp;
286
287 sc->devinfo.chip_id |= (sc_resp.chip_num & 0xFFFF);
288 sc->devinfo.int_block = INT_BLOCK_IGU;
289 sc->devinfo.chip_port_mode = CHIP_2_PORT_MODE;
290 sc->devinfo.mf_info.mf_ov = 0;
291 sc->devinfo.mf_info.mf_mode = 0;
292 sc->devinfo.flash_size = 0;
293
294 sc->igu_sb_cnt = sc_resp.resc.num_sbs;
295 sc->igu_base_sb = sc_resp.resc.hw_sbs[0] & 0xFF;
296 sc->igu_dsb_id = -1;
297 sc->max_tx_queues = sc_resp.resc.num_txqs;
298 sc->max_rx_queues = sc_resp.resc.num_rxqs;
299
300 sc->link_params.chip_id = sc->devinfo.chip_id;
301 sc->doorbell_size = sc_resp.db_size;
302 sc->flags |= BNX2X_NO_WOL_FLAG | BNX2X_NO_ISCSI_OOO_FLAG | BNX2X_NO_ISCSI_FLAG | BNX2X_NO_FCOE_FLAG;
303
304 PMD_DRV_LOG(DEBUG, sc, "status block count = %d, base status block = %x",
305 sc->igu_sb_cnt, sc->igu_base_sb);
306 strncpy(sc->fw_ver, sc_resp.fw_ver, sizeof(sc->fw_ver));
307
308 if (rte_is_valid_assigned_ether_addr(&sc_resp.resc.current_mac_addr))
309 rte_ether_addr_copy(&sc_resp.resc.current_mac_addr,
310 (struct rte_ether_addr *)sc->link_params.mac_addr);
311 else
312 rte_eth_random_addr(sc->link_params.mac_addr);
313
314 out:
315 bnx2x_vf_finalize(sc, &acq->first_tlv);
316
317 return rc;
318 }
319
320 /* Ask PF to release VF's resources */
321 void
bnx2x_vf_close(struct bnx2x_softc * sc)322 bnx2x_vf_close(struct bnx2x_softc *sc)
323 {
324 struct vf_release_tlv *query;
325 struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply;
326 uint32_t vf_id;
327 int rc;
328
329 query = &sc->vf2pf_mbox->query[0].release;
330 bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_RELEASE,
331 sizeof(*query));
332
333 if (bnx2x_read_vf_id(sc, &vf_id)) {
334 rc = -EAGAIN;
335 goto out;
336 }
337
338 query->vf_id = vf_id;
339
340 bnx2x_add_tlv(sc, query, query->first_tlv.tl.length,
341 BNX2X_VF_TLV_LIST_END,
342 sizeof(struct channel_list_end_tlv));
343
344 rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
345 if (rc || reply->status != BNX2X_VF_STATUS_SUCCESS)
346 PMD_DRV_LOG(ERR, sc, "Failed to release VF");
347
348 out:
349 bnx2x_vf_finalize(sc, &query->first_tlv);
350 }
351
352 /* Let PF know the VF status blocks phys_addrs */
353 int
bnx2x_vf_init(struct bnx2x_softc * sc)354 bnx2x_vf_init(struct bnx2x_softc *sc)
355 {
356 struct vf_init_tlv *query;
357 struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply;
358 int i, rc;
359
360 PMD_INIT_FUNC_TRACE(sc);
361
362 query = &sc->vf2pf_mbox->query[0].init;
363 bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_INIT,
364 sizeof(*query));
365
366 FOR_EACH_QUEUE(sc, i) {
367 query->sb_addr[i] = (unsigned long)(sc->fp[i].sb_dma.paddr);
368 }
369
370 query->stats_step = sizeof(struct per_queue_stats);
371 query->stats_addr = sc->fw_stats_data_mapping +
372 offsetof(struct bnx2x_fw_stats_data, queue_stats);
373
374 bnx2x_add_tlv(sc, query, query->first_tlv.tl.length,
375 BNX2X_VF_TLV_LIST_END,
376 sizeof(struct channel_list_end_tlv));
377
378 rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
379 if (rc)
380 goto out;
381 if (reply->status != BNX2X_VF_STATUS_SUCCESS) {
382 PMD_DRV_LOG(ERR, sc, "Failed to init VF");
383 rc = -EINVAL;
384 goto out;
385 }
386
387 PMD_DRV_LOG(DEBUG, sc, "VF was initialized");
388 out:
389 bnx2x_vf_finalize(sc, &query->first_tlv);
390 return rc;
391 }
392
393 void
bnx2x_vf_unload(struct bnx2x_softc * sc)394 bnx2x_vf_unload(struct bnx2x_softc *sc)
395 {
396 struct vf_close_tlv *query;
397 struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply;
398 uint32_t vf_id;
399 int i, rc;
400
401 PMD_INIT_FUNC_TRACE(sc);
402
403 FOR_EACH_QUEUE(sc, i)
404 bnx2x_vf_teardown_queue(sc, i);
405
406 bnx2x_vf_set_mac(sc, false);
407
408 query = &sc->vf2pf_mbox->query[0].close;
409 bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_CLOSE,
410 sizeof(*query));
411
412 if (bnx2x_read_vf_id(sc, &vf_id)) {
413 rc = -EAGAIN;
414 goto out;
415 }
416
417 query->vf_id = vf_id;
418
419 bnx2x_add_tlv(sc, query, query->first_tlv.tl.length,
420 BNX2X_VF_TLV_LIST_END,
421 sizeof(struct channel_list_end_tlv));
422
423 rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
424 if (rc || reply->status != BNX2X_VF_STATUS_SUCCESS)
425 PMD_DRV_LOG(ERR, sc,
426 "Bad reply from PF for close message");
427
428 out:
429 bnx2x_vf_finalize(sc, &query->first_tlv);
430 }
431
432 static inline uint16_t
bnx2x_vf_q_flags(uint8_t leading)433 bnx2x_vf_q_flags(uint8_t leading)
434 {
435 uint16_t flags = leading ? BNX2X_VF_Q_FLAG_LEADING_RSS : 0;
436
437 flags |= BNX2X_VF_Q_FLAG_CACHE_ALIGN;
438 flags |= BNX2X_VF_Q_FLAG_STATS;
439 flags |= BNX2X_VF_Q_FLAG_VLAN;
440
441 return flags;
442 }
443
444 static void
bnx2x_vf_rx_q_prep(struct bnx2x_softc * sc,struct bnx2x_fastpath * fp,struct vf_rxq_params * rxq_init,uint16_t flags)445 bnx2x_vf_rx_q_prep(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,
446 struct vf_rxq_params *rxq_init, uint16_t flags)
447 {
448 struct bnx2x_rx_queue *rxq;
449
450 rxq = sc->rx_queues[fp->index];
451 if (!rxq) {
452 PMD_DRV_LOG(ERR, sc, "RX queue %d is NULL", fp->index);
453 return;
454 }
455
456 rxq_init->rcq_addr = rxq->cq_ring_phys_addr;
457 rxq_init->rcq_np_addr = rxq->cq_ring_phys_addr + BNX2X_PAGE_SIZE;
458 rxq_init->rxq_addr = rxq->rx_ring_phys_addr;
459 rxq_init->vf_sb_id = fp->index;
460 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
461 rxq_init->mtu = sc->mtu;
462 rxq_init->buf_sz = fp->rx_buf_size;
463 rxq_init->flags = flags;
464 rxq_init->stat_id = -1;
465 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
466 }
467
468 static void
bnx2x_vf_tx_q_prep(struct bnx2x_softc * sc,struct bnx2x_fastpath * fp,struct vf_txq_params * txq_init,uint16_t flags)469 bnx2x_vf_tx_q_prep(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,
470 struct vf_txq_params *txq_init, uint16_t flags)
471 {
472 struct bnx2x_tx_queue *txq;
473
474 txq = sc->tx_queues[fp->index];
475 if (!txq) {
476 PMD_DRV_LOG(ERR, sc, "TX queue %d is NULL", fp->index);
477 return;
478 }
479
480 txq_init->txq_addr = txq->tx_ring_phys_addr;
481 txq_init->sb_index = HC_INDEX_ETH_TX_CQ_CONS_COS0;
482 txq_init->flags = flags;
483 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
484 txq_init->vf_sb_id = fp->index;
485 }
486
487 int
bnx2x_vf_setup_queue(struct bnx2x_softc * sc,struct bnx2x_fastpath * fp,int leading)488 bnx2x_vf_setup_queue(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, int leading)
489 {
490 struct vf_setup_q_tlv *query;
491 struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply;
492 uint16_t flags = bnx2x_vf_q_flags(leading);
493 int rc;
494
495 query = &sc->vf2pf_mbox->query[0].setup_q;
496 bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_SETUP_Q,
497 sizeof(*query));
498
499 query->vf_qid = fp->index;
500 query->param_valid = VF_RXQ_VALID | VF_TXQ_VALID;
501
502 bnx2x_vf_rx_q_prep(sc, fp, &query->rxq, flags);
503 bnx2x_vf_tx_q_prep(sc, fp, &query->txq, flags);
504
505 bnx2x_add_tlv(sc, query, query->first_tlv.tl.length,
506 BNX2X_VF_TLV_LIST_END,
507 sizeof(struct channel_list_end_tlv));
508
509 rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
510 if (rc)
511 goto out;
512 if (reply->status != BNX2X_VF_STATUS_SUCCESS) {
513 PMD_DRV_LOG(ERR, sc, "Failed to setup VF queue[%d]",
514 fp->index);
515 rc = -EINVAL;
516 }
517 out:
518 bnx2x_vf_finalize(sc, &query->first_tlv);
519
520 return rc;
521 }
522
523 int
bnx2x_vf_teardown_queue(struct bnx2x_softc * sc,int qid)524 bnx2x_vf_teardown_queue(struct bnx2x_softc *sc, int qid)
525 {
526 struct vf_q_op_tlv *query_op;
527 struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply;
528 int rc;
529
530 query_op = &sc->vf2pf_mbox->query[0].q_op;
531 bnx2x_vf_prep(sc, &query_op->first_tlv,
532 BNX2X_VF_TLV_TEARDOWN_Q,
533 sizeof(*query_op));
534
535 query_op->vf_qid = qid;
536
537 bnx2x_add_tlv(sc, query_op,
538 query_op->first_tlv.tl.length,
539 BNX2X_VF_TLV_LIST_END,
540 sizeof(struct channel_list_end_tlv));
541
542 rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
543 if (rc || reply->status != BNX2X_VF_STATUS_SUCCESS)
544 PMD_DRV_LOG(ERR, sc,
545 "Bad reply for vf_q %d teardown", qid);
546
547 bnx2x_vf_finalize(sc, &query_op->first_tlv);
548
549 return rc;
550 }
551
552 int
bnx2x_vf_set_mac(struct bnx2x_softc * sc,int set)553 bnx2x_vf_set_mac(struct bnx2x_softc *sc, int set)
554 {
555 struct vf_set_q_filters_tlv *query;
556 struct vf_common_reply_tlv *reply;
557 int rc;
558
559 query = &sc->vf2pf_mbox->query[0].set_q_filters;
560 bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_SET_Q_FILTERS,
561 sizeof(*query));
562
563 query->vf_qid = sc->fp->index;
564 query->mac_filters_cnt = 1;
565 query->flags = BNX2X_VF_MAC_VLAN_CHANGED;
566
567 query->filters[0].flags = (set ? BNX2X_VF_Q_FILTER_SET_MAC : 0) |
568 BNX2X_VF_Q_FILTER_DEST_MAC_VALID;
569
570 bnx2x_check_bull(sc);
571
572 memcpy(query->filters[0].mac, sc->link_params.mac_addr, ETH_ALEN);
573
574 bnx2x_add_tlv(sc, query, query->first_tlv.tl.length,
575 BNX2X_VF_TLV_LIST_END,
576 sizeof(struct channel_list_end_tlv));
577
578 rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
579 if (rc)
580 goto out;
581 reply = &sc->vf2pf_mbox->resp.common_reply;
582
583 while (BNX2X_VF_STATUS_FAILURE == reply->status &&
584 bnx2x_check_bull(sc)) {
585 /* A new mac was configured by PF for us */
586 memcpy(sc->link_params.mac_addr, sc->pf2vf_bulletin->mac,
587 ETH_ALEN);
588 memcpy(query->filters[0].mac, sc->pf2vf_bulletin->mac,
589 ETH_ALEN);
590
591 rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
592 if (rc)
593 goto out;
594 }
595
596 if (BNX2X_VF_STATUS_SUCCESS != reply->status) {
597 PMD_DRV_LOG(ERR, sc, "Bad reply from PF for SET MAC message: %d",
598 reply->status);
599 rc = -EINVAL;
600 }
601 out:
602 bnx2x_vf_finalize(sc, &query->first_tlv);
603
604 return rc;
605 }
606
607 int
bnx2x_vf_config_rss(struct bnx2x_softc * sc,struct ecore_config_rss_params * params)608 bnx2x_vf_config_rss(struct bnx2x_softc *sc,
609 struct ecore_config_rss_params *params)
610 {
611 struct vf_rss_tlv *query;
612 struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply;
613 int rc;
614
615 query = &sc->vf2pf_mbox->query[0].update_rss;
616
617 bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_UPDATE_RSS,
618 sizeof(*query));
619
620 /* add list termination tlv */
621 bnx2x_add_tlv(sc, query, query->first_tlv.tl.length,
622 BNX2X_VF_TLV_LIST_END,
623 sizeof(struct channel_list_end_tlv));
624
625 memcpy(query->rss_key, params->rss_key, sizeof(params->rss_key));
626 query->rss_key_size = T_ETH_RSS_KEY;
627
628 memcpy(query->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
629 query->ind_table_size = T_ETH_INDIRECTION_TABLE_SIZE;
630
631 query->rss_result_mask = params->rss_result_mask;
632 query->rss_flags = params->rss_flags;
633
634 rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
635 if (rc)
636 goto out;
637
638 if (reply->status != BNX2X_VF_STATUS_SUCCESS) {
639 PMD_DRV_LOG(ERR, sc, "Failed to configure RSS");
640 rc = -EINVAL;
641 }
642 out:
643 bnx2x_vf_finalize(sc, &query->first_tlv);
644
645 return rc;
646 }
647
648 int
bnx2x_vf_set_rx_mode(struct bnx2x_softc * sc)649 bnx2x_vf_set_rx_mode(struct bnx2x_softc *sc)
650 {
651 struct vf_set_q_filters_tlv *query;
652 struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply;
653 int rc;
654
655 query = &sc->vf2pf_mbox->query[0].set_q_filters;
656 bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_SET_Q_FILTERS,
657 sizeof(*query));
658
659 query->vf_qid = 0;
660 query->flags = BNX2X_VF_RX_MASK_CHANGED;
661
662 switch (sc->rx_mode) {
663 case BNX2X_RX_MODE_NONE: /* no Rx */
664 query->rx_mask = VFPF_RX_MASK_ACCEPT_NONE;
665 break;
666 case BNX2X_RX_MODE_NORMAL:
667 query->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
668 query->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
669 query->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
670 break;
671 case BNX2X_RX_MODE_ALLMULTI:
672 query->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
673 query->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
674 query->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
675 break;
676 case BNX2X_RX_MODE_ALLMULTI_PROMISC:
677 case BNX2X_RX_MODE_PROMISC:
678 query->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_UNICAST;
679 query->rx_mask |= VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
680 query->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
681 break;
682 default:
683 PMD_DRV_LOG(ERR, sc, "BAD rx mode (%d)", sc->rx_mode);
684 rc = -EINVAL;
685 goto out;
686 }
687
688 bnx2x_add_tlv(sc, query, query->first_tlv.tl.length,
689 BNX2X_VF_TLV_LIST_END,
690 sizeof(struct channel_list_end_tlv));
691
692 rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
693 if (rc)
694 goto out;
695
696 if (reply->status != BNX2X_VF_STATUS_SUCCESS) {
697 PMD_DRV_LOG(ERR, sc, "Failed to set RX mode");
698 rc = -EINVAL;
699 }
700
701 out:
702 bnx2x_vf_finalize(sc, &query->first_tlv);
703
704 return rc;
705 }
706
707 int
bnx2x_vfpf_set_mcast(struct bnx2x_softc * sc,struct rte_ether_addr * mc_addrs,uint32_t mc_addrs_num)708 bnx2x_vfpf_set_mcast(struct bnx2x_softc *sc,
709 struct rte_ether_addr *mc_addrs,
710 uint32_t mc_addrs_num)
711 {
712 struct vf_set_q_filters_tlv *query;
713 struct vf_common_reply_tlv *reply =
714 &sc->vf2pf_mbox->resp.common_reply;
715 int rc = 0;
716 uint32_t i = 0;
717 query = &sc->vf2pf_mbox->query[0].set_q_filters;
718 bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_SET_Q_FILTERS,
719 sizeof(*query));
720 /* We support PFVF_MAX_MULTICAST_PER_VF mcast addresses tops */
721 if (mc_addrs_num > VF_MAX_MULTICAST_PER_VF) {
722 PMD_DRV_LOG(ERR, sc,
723 "VF supports not more than %d multicast MAC addresses",
724 VF_MAX_MULTICAST_PER_VF);
725
726 rc = -EINVAL;
727 goto out;
728 }
729
730 for (i = 0; i < mc_addrs_num; i++) {
731 PMD_DRV_LOG(DEBUG, sc, "Adding mcast MAC:"
732 RTE_ETHER_ADDR_PRT_FMT,
733 RTE_ETHER_ADDR_BYTES(&mc_addrs[i]));
734 memcpy(query->multicast[i], mc_addrs[i].addr_bytes, ETH_ALEN);
735 }
736
737 query->vf_qid = 0;
738 query->flags = BNX2X_VF_MULTICAST_CHANGED;
739 query->multicast_cnt = i;
740
741 /* add list termination tlv */
742 bnx2x_add_tlv(sc, query, query->first_tlv.tl.length,
743 BNX2X_VF_TLV_LIST_END,
744 sizeof(struct channel_list_end_tlv));
745 rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
746 if (rc)
747 goto out;
748
749 if (reply->status != BNX2X_VF_STATUS_SUCCESS) {
750 PMD_DRV_LOG(ERR, sc, "Set Rx mode/multicast failed: %d",
751 reply->status);
752 rc = -EINVAL;
753 }
754
755 out:
756 bnx2x_vf_finalize(sc, &query->first_tlv);
757
758 return rc;
759 }
760