1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Huawei Technologies Co., Ltd
3 */
4
5 #include<ethdev_driver.h>
6 #include <bus_pci_driver.h>
7 #include <rte_hash.h>
8 #include <rte_jhash.h>
9
10 #include "hinic_compat.h"
11 #include "hinic_csr.h"
12 #include "hinic_pmd_hwdev.h"
13 #include "hinic_pmd_hwif.h"
14 #include "hinic_pmd_wq.h"
15 #include "hinic_pmd_cmdq.h"
16 #include "hinic_pmd_mgmt.h"
17 #include "hinic_pmd_niccfg.h"
18 #include "hinic_pmd_mbox.h"
19
20 #define HINIC_DEAULT_EQ_MSIX_PENDING_LIMIT 0
21 #define HINIC_DEAULT_EQ_MSIX_COALESC_TIMER_CFG 0xFF
22 #define HINIC_DEAULT_EQ_MSIX_RESEND_TIMER_CFG 7
23
24 #define HINIC_FLR_TIMEOUT 1000
25
26 #define FFM_RECORD_NUM_MAX 32
27
28 #define HINIC_DMA_ATTR_ENTRY_ST_SHIFT 0
29 #define HINIC_DMA_ATTR_ENTRY_AT_SHIFT 8
30 #define HINIC_DMA_ATTR_ENTRY_PH_SHIFT 10
31 #define HINIC_DMA_ATTR_ENTRY_NO_SNOOPING_SHIFT 12
32 #define HINIC_DMA_ATTR_ENTRY_TPH_EN_SHIFT 13
33
34 #define HINIC_DMA_ATTR_ENTRY_ST_MASK 0xFF
35 #define HINIC_DMA_ATTR_ENTRY_AT_MASK 0x3
36 #define HINIC_DMA_ATTR_ENTRY_PH_MASK 0x3
37 #define HINIC_DMA_ATTR_ENTRY_NO_SNOOPING_MASK 0x1
38 #define HINIC_DMA_ATTR_ENTRY_TPH_EN_MASK 0x1
39
40 #define HINIC_DMA_ATTR_ENTRY_SET(val, member) \
41 (((u32)(val) & HINIC_DMA_ATTR_ENTRY_##member##_MASK) << \
42 HINIC_DMA_ATTR_ENTRY_##member##_SHIFT)
43
44 #define HINIC_DMA_ATTR_ENTRY_CLEAR(val, member) \
45 ((val) & (~(HINIC_DMA_ATTR_ENTRY_##member##_MASK \
46 << HINIC_DMA_ATTR_ENTRY_##member##_SHIFT)))
47
48 #define HINIC_PCIE_ST_DISABLE 0
49 #define HINIC_PCIE_AT_DISABLE 0
50 #define HINIC_PCIE_PH_DISABLE 0
51 #define PCIE_MSIX_ATTR_ENTRY 0
52
53 #define HINIC_HASH_FUNC rte_jhash
54 #define HINIC_HASH_KEY_LEN (sizeof(dma_addr_t))
55 #define HINIC_HASH_FUNC_INIT_VAL 0
56
57 static const char *__hw_to_char_fec[HILINK_FEC_MAX_TYPE] = {
58 "RS-FEC", "BASE-FEC", "NO-FEC"};
59
60 static const char *__hw_to_char_port_type[LINK_PORT_MAX_TYPE] = {
61 "Unknown", "Fibre", "Electric", "Direct Attach Copper", "AOC",
62 "Back plane", "BaseT"
63 };
64
65 static const char *hinic_module_link_err[LINK_ERR_NUM] = {
66 "Unrecognized module",
67 };
68
69 struct hinic_vf_dma_attr_table {
70 struct hinic_mgmt_msg_head mgmt_msg_head;
71
72 u16 func_idx;
73 u8 func_dma_entry_num;
74 u8 entry_idx;
75 u8 st;
76 u8 at;
77 u8 ph;
78 u8 no_snooping;
79 u8 tph_en;
80 u8 resv1[3];
81 };
82
83 /**
84 * hinic_cpu_to_be32 - convert data to big endian 32 bit format
85 * @data: the data to convert
86 * @len: length of data to convert, must be Multiple of 4B
87 */
hinic_cpu_to_be32(void * data,u32 len)88 void hinic_cpu_to_be32(void *data, u32 len)
89 {
90 u32 i;
91 u32 *mem = (u32 *)data;
92
93 for (i = 0; i < (len >> 2); i++) {
94 *mem = cpu_to_be32(*mem);
95 mem++;
96 }
97 }
98
99 /**
100 * hinic_be32_to_cpu - convert data from big endian 32 bit format
101 * @data: the data to convert
102 * @len: length of data to convert, must be Multiple of 4B
103 */
hinic_be32_to_cpu(void * data,u32 len)104 void hinic_be32_to_cpu(void *data, u32 len)
105 {
106 u32 i;
107 u32 *mem = (u32 *)data;
108
109 for (i = 0; i < (len >> 2); i++) {
110 *mem = be32_to_cpu(*mem);
111 mem++;
112 }
113 }
114
hinic_dma_mem_zalloc(struct hinic_hwdev * hwdev,size_t size,dma_addr_t * dma_handle,unsigned int align,unsigned int socket_id)115 static void *hinic_dma_mem_zalloc(struct hinic_hwdev *hwdev, size_t size,
116 dma_addr_t *dma_handle, unsigned int align,
117 unsigned int socket_id)
118 {
119 int rc, alloc_cnt;
120 const struct rte_memzone *mz;
121 char z_name[RTE_MEMZONE_NAMESIZE];
122 hash_sig_t sig;
123 rte_iova_t iova;
124
125 if (dma_handle == NULL || 0 == size)
126 return NULL;
127
128 alloc_cnt = rte_atomic32_add_return(&hwdev->os_dep.dma_alloc_cnt, 1);
129 snprintf(z_name, sizeof(z_name), "%s_%d",
130 hwdev->pcidev_hdl->name, alloc_cnt);
131
132 mz = rte_memzone_reserve_aligned(z_name, size, socket_id,
133 RTE_MEMZONE_IOVA_CONTIG, align);
134 if (!mz) {
135 PMD_DRV_LOG(ERR, "Alloc dma able memory failed, errno: %d, ma_name: %s, size: 0x%zx",
136 rte_errno, z_name, size);
137 return NULL;
138 }
139
140 iova = mz->iova;
141
142 /* check if phys_addr already exist */
143 sig = HINIC_HASH_FUNC(&iova, HINIC_HASH_KEY_LEN,
144 HINIC_HASH_FUNC_INIT_VAL);
145 rc = rte_hash_lookup_with_hash(hwdev->os_dep.dma_addr_hash,
146 &iova, sig);
147 if (rc >= 0) {
148 PMD_DRV_LOG(ERR, "Dma addr: %p already in hash table, error: %d, mz_name: %s",
149 (void *)iova, rc, z_name);
150 goto phys_addr_hash_err;
151 }
152
153 /* record paddr in hash table */
154 rte_spinlock_lock(&hwdev->os_dep.dma_hash_lock);
155 rc = rte_hash_add_key_with_hash_data(hwdev->os_dep.dma_addr_hash,
156 &iova, sig,
157 (void *)(u64)mz);
158 rte_spinlock_unlock(&hwdev->os_dep.dma_hash_lock);
159 if (rc) {
160 PMD_DRV_LOG(ERR, "Insert dma addr: %p hash failed, error: %d, mz_name: %s",
161 (void *)iova, rc, z_name);
162 goto phys_addr_hash_err;
163 }
164 *dma_handle = iova;
165 memset(mz->addr, 0, size);
166
167 return mz->addr;
168
169 phys_addr_hash_err:
170 (void)rte_memzone_free(mz);
171
172 return NULL;
173 }
174
175 static void
hinic_dma_mem_free(struct hinic_hwdev * hwdev,size_t size,void * virt,dma_addr_t phys)176 hinic_dma_mem_free(struct hinic_hwdev *hwdev, size_t size,
177 void *virt, dma_addr_t phys)
178 {
179 int rc;
180 struct rte_memzone *mz = NULL;
181 struct rte_hash *hash;
182 hash_sig_t sig;
183
184 if (virt == NULL || phys == 0)
185 return;
186
187 hash = hwdev->os_dep.dma_addr_hash;
188 sig = HINIC_HASH_FUNC(&phys, HINIC_HASH_KEY_LEN,
189 HINIC_HASH_FUNC_INIT_VAL);
190 rc = rte_hash_lookup_with_hash_data(hash, &phys, sig, (void **)&mz);
191 if (rc < 0) {
192 PMD_DRV_LOG(ERR, "Can not find phys_addr: %p, error: %d",
193 (void *)phys, rc);
194 return;
195 }
196
197 if (virt != mz->addr || size > mz->len) {
198 PMD_DRV_LOG(ERR, "Match mz_info failed: "
199 "mz.name: %s, mz.phys: %p, mz.virt: %p, mz.len: %zu, "
200 "phys: %p, virt: %p, size: %zu",
201 mz->name, (void *)mz->iova, mz->addr, mz->len,
202 (void *)phys, virt, size);
203 }
204
205 rte_spinlock_lock(&hwdev->os_dep.dma_hash_lock);
206 (void)rte_hash_del_key_with_hash(hash, &phys, sig);
207 rte_spinlock_unlock(&hwdev->os_dep.dma_hash_lock);
208
209 (void)rte_memzone_free(mz);
210 }
211
dma_zalloc_coherent(void * hwdev,size_t size,dma_addr_t * dma_handle,unsigned int socket_id)212 void *dma_zalloc_coherent(void *hwdev, size_t size, dma_addr_t *dma_handle,
213 unsigned int socket_id)
214 {
215 return hinic_dma_mem_zalloc(hwdev, size, dma_handle,
216 RTE_CACHE_LINE_SIZE, socket_id);
217 }
218
dma_zalloc_coherent_aligned(void * hwdev,size_t size,dma_addr_t * dma_handle,unsigned int socket_id)219 void *dma_zalloc_coherent_aligned(void *hwdev, size_t size,
220 dma_addr_t *dma_handle, unsigned int socket_id)
221 {
222 return hinic_dma_mem_zalloc(hwdev, size, dma_handle, HINIC_PAGE_SIZE,
223 socket_id);
224 }
225
dma_zalloc_coherent_aligned256k(void * hwdev,size_t size,dma_addr_t * dma_handle,unsigned int socket_id)226 void *dma_zalloc_coherent_aligned256k(void *hwdev, size_t size,
227 dma_addr_t *dma_handle,
228 unsigned int socket_id)
229 {
230 return hinic_dma_mem_zalloc(hwdev, size, dma_handle,
231 HINIC_PAGE_SIZE * 64, socket_id);
232 }
233
dma_free_coherent(void * hwdev,size_t size,void * virt,dma_addr_t phys)234 void dma_free_coherent(void *hwdev, size_t size, void *virt, dma_addr_t phys)
235 {
236 hinic_dma_mem_free(hwdev, size, virt, phys);
237 }
238
dma_free_coherent_volatile(void * hwdev,size_t size,volatile void * virt,dma_addr_t phys)239 void dma_free_coherent_volatile(void *hwdev, size_t size,
240 volatile void *virt, dma_addr_t phys)
241 {
242 int rc;
243 struct rte_memzone *mz = NULL;
244 struct hinic_hwdev *dev = hwdev;
245 struct rte_hash *hash;
246 hash_sig_t sig;
247
248 if (virt == NULL || phys == 0)
249 return;
250
251 hash = dev->os_dep.dma_addr_hash;
252 sig = HINIC_HASH_FUNC(&phys, HINIC_HASH_KEY_LEN,
253 HINIC_HASH_FUNC_INIT_VAL);
254 rc = rte_hash_lookup_with_hash_data(hash, &phys, sig, (void **)&mz);
255 if (rc < 0) {
256 PMD_DRV_LOG(ERR, "Can not find phys_addr: %p, error: %d",
257 (void *)phys, rc);
258 return;
259 }
260
261 if (virt != mz->addr || size > mz->len) {
262 PMD_DRV_LOG(ERR, "Match mz_info failed: "
263 "mz.name:%s, mz.phys:%p, mz.virt:%p, mz.len:%zu, "
264 "phys:%p, virt:%p, size:%zu",
265 mz->name, (void *)mz->iova, mz->addr, mz->len,
266 (void *)phys, virt, size);
267 }
268
269 rte_spinlock_lock(&dev->os_dep.dma_hash_lock);
270 (void)rte_hash_del_key_with_hash(hash, &phys, sig);
271 rte_spinlock_unlock(&dev->os_dep.dma_hash_lock);
272
273 (void)rte_memzone_free(mz);
274 }
275
dma_pool_create(const char * name,void * dev,size_t size,size_t align,size_t boundary)276 struct dma_pool *dma_pool_create(const char *name, void *dev,
277 size_t size, size_t align, size_t boundary)
278 {
279 struct pci_pool *pool;
280
281 pool = rte_zmalloc(NULL, sizeof(*pool), HINIC_MEM_ALLOC_ALIGN_MIN);
282 if (!pool)
283 return NULL;
284
285 rte_atomic32_set(&pool->inuse, 0);
286 pool->elem_size = size;
287 pool->align = align;
288 pool->boundary = boundary;
289 pool->hwdev = dev;
290 strncpy(pool->name, name, (sizeof(pool->name) - 1));
291
292 return pool;
293 }
294
dma_pool_destroy(struct dma_pool * pool)295 void dma_pool_destroy(struct dma_pool *pool)
296 {
297 if (!pool)
298 return;
299
300 if (rte_atomic32_read(&pool->inuse) != 0) {
301 PMD_DRV_LOG(ERR, "Leak memory, dma_pool: %s, inuse_count: %d",
302 pool->name, rte_atomic32_read(&pool->inuse));
303 }
304
305 rte_free(pool);
306 }
307
dma_pool_alloc(struct pci_pool * pool,dma_addr_t * dma_addr)308 void *dma_pool_alloc(struct pci_pool *pool, dma_addr_t *dma_addr)
309 {
310 void *buf;
311
312 buf = hinic_dma_mem_zalloc(pool->hwdev, pool->elem_size, dma_addr,
313 (u32)pool->align, SOCKET_ID_ANY);
314 if (buf)
315 rte_atomic32_inc(&pool->inuse);
316
317 return buf;
318 }
319
dma_pool_free(struct pci_pool * pool,void * vaddr,dma_addr_t dma)320 void dma_pool_free(struct pci_pool *pool, void *vaddr, dma_addr_t dma)
321 {
322 rte_atomic32_dec(&pool->inuse);
323 hinic_dma_mem_free(pool->hwdev, pool->elem_size, vaddr, dma);
324 }
325
326 #define HINIC_MAX_DMA_ENTRIES 8192
hinic_osdep_init(struct hinic_hwdev * hwdev)327 int hinic_osdep_init(struct hinic_hwdev *hwdev)
328 {
329 struct rte_hash_parameters dh_params = { 0 };
330 struct rte_hash *paddr_hash = NULL;
331
332 rte_atomic32_set(&hwdev->os_dep.dma_alloc_cnt, 0);
333 rte_spinlock_init(&hwdev->os_dep.dma_hash_lock);
334
335 dh_params.name = hwdev->pcidev_hdl->name;
336 dh_params.entries = HINIC_MAX_DMA_ENTRIES;
337 dh_params.key_len = HINIC_HASH_KEY_LEN;
338 dh_params.hash_func = HINIC_HASH_FUNC;
339 dh_params.hash_func_init_val = HINIC_HASH_FUNC_INIT_VAL;
340 dh_params.socket_id = SOCKET_ID_ANY;
341
342 paddr_hash = rte_hash_find_existing(dh_params.name);
343 if (paddr_hash == NULL) {
344 paddr_hash = rte_hash_create(&dh_params);
345 if (paddr_hash == NULL) {
346 PMD_DRV_LOG(ERR, "Create nic_dev phys_addr hash table failed");
347 return -ENOMEM;
348 }
349 } else {
350 PMD_DRV_LOG(INFO, "Using existing dma hash table %s",
351 dh_params.name);
352 }
353 hwdev->os_dep.dma_addr_hash = paddr_hash;
354
355 return 0;
356 }
357
hinic_osdep_deinit(struct hinic_hwdev * hwdev)358 void hinic_osdep_deinit(struct hinic_hwdev *hwdev)
359 {
360 uint32_t iter = 0;
361 dma_addr_t key_pa;
362 struct rte_memzone *data_mz = NULL;
363 struct rte_hash *paddr_hash = hwdev->os_dep.dma_addr_hash;
364
365 if (paddr_hash) {
366 /* iterate through the hash table */
367 while (rte_hash_iterate(paddr_hash, (const void **)&key_pa,
368 (void **)&data_mz, &iter) >= 0) {
369 if (data_mz) {
370 PMD_DRV_LOG(WARNING, "Free leaked dma_addr: %p, mz: %s",
371 (void *)key_pa, data_mz->name);
372 (void)rte_memzone_free(data_mz);
373 }
374 }
375
376 /* free phys_addr hash table */
377 rte_hash_free(paddr_hash);
378 }
379 }
380
381 /**
382 * hinic_set_ci_table - set ci attribute table
383 * @hwdev: the hardware interface of a nic device
384 * @q_id: Queue id of SQ
385 * @attr: Point to SQ CI attribute table
386 * @return
387 * 0 on success and ci attribute table is filled,
388 * negative error value otherwise.
389 */
hinic_set_ci_table(void * hwdev,u16 q_id,struct hinic_sq_attr * attr)390 int hinic_set_ci_table(void *hwdev, u16 q_id, struct hinic_sq_attr *attr)
391 {
392 struct hinic_cons_idx_attr cons_idx_attr;
393 u16 out_size = sizeof(cons_idx_attr);
394 int err;
395
396 memset(&cons_idx_attr, 0, sizeof(cons_idx_attr));
397 cons_idx_attr.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
398 cons_idx_attr.func_idx = hinic_global_func_id(hwdev);
399 cons_idx_attr.dma_attr_off = attr->dma_attr_off;
400 cons_idx_attr.pending_limit = attr->pending_limit;
401 cons_idx_attr.coalescing_time = attr->coalescing_time;
402 if (attr->intr_en) {
403 cons_idx_attr.intr_en = attr->intr_en;
404 cons_idx_attr.intr_idx = attr->intr_idx;
405 }
406
407 cons_idx_attr.l2nic_sqn = attr->l2nic_sqn;
408 cons_idx_attr.sq_id = q_id;
409 cons_idx_attr.ci_addr = attr->ci_dma_base;
410
411 err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
412 HINIC_MGMT_CMD_L2NIC_SQ_CI_ATTR_SET,
413 &cons_idx_attr, sizeof(cons_idx_attr),
414 &cons_idx_attr, &out_size, 0);
415 if (err || !out_size || cons_idx_attr.mgmt_msg_head.status) {
416 PMD_DRV_LOG(ERR, "Set ci attribute table failed, err: %d, status: 0x%x, out_size: 0x%x",
417 err, cons_idx_attr.mgmt_msg_head.status, out_size);
418 return -EIO;
419 }
420
421 return 0;
422 }
423
424 /**
425 * hinic_set_pagesize - set page size to vat table
426 * @hwdev: the hardware interface of a nic device
427 * @page_size: vat page size
428 * @return
429 * 0 on success,
430 * negative error value otherwise.
431 */
hinic_set_pagesize(void * hwdev,u8 page_size)432 int hinic_set_pagesize(void *hwdev, u8 page_size)
433 {
434 struct hinic_page_size page_size_info;
435 u16 out_size = sizeof(page_size_info);
436 int err;
437
438 if (page_size > HINIC_PAGE_SIZE_MAX) {
439 PMD_DRV_LOG(ERR, "Invalid page_size %u, bigger than %u",
440 page_size, HINIC_PAGE_SIZE_MAX);
441 return -EINVAL;
442 }
443
444 memset(&page_size_info, 0, sizeof(page_size_info));
445 page_size_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
446 page_size_info.func_idx = hinic_global_func_id(hwdev);
447 page_size_info.ppf_idx = hinic_ppf_idx(hwdev);
448 page_size_info.page_size = page_size;
449
450 err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
451 HINIC_MGMT_CMD_PAGESIZE_SET,
452 &page_size_info, sizeof(page_size_info),
453 &page_size_info, &out_size, 0);
454 if (err || !out_size || page_size_info.mgmt_msg_head.status) {
455 PMD_DRV_LOG(ERR, "Set wq page size failed, err: %d, status: 0x%x, out_size: 0x%0x",
456 err, page_size_info.mgmt_msg_head.status, out_size);
457 return -EIO;
458 }
459
460 return 0;
461 }
462
wait_for_flr_finish(struct hinic_hwif * hwif)463 static int wait_for_flr_finish(struct hinic_hwif *hwif)
464 {
465 unsigned long end;
466 enum hinic_pf_status status;
467
468 end = jiffies + msecs_to_jiffies(HINIC_FLR_TIMEOUT);
469 do {
470 status = hinic_get_pf_status(hwif);
471 if (status == HINIC_PF_STATUS_FLR_FINISH_FLAG) {
472 return 0;
473 }
474
475 rte_delay_ms(10);
476 } while (time_before(jiffies, end));
477
478 return -ETIMEDOUT;
479 }
480
481 #define HINIC_WAIT_CMDQ_IDLE_TIMEOUT 1000
482
wait_cmdq_stop(struct hinic_hwdev * hwdev)483 static int wait_cmdq_stop(struct hinic_hwdev *hwdev)
484 {
485 enum hinic_cmdq_type cmdq_type;
486 struct hinic_cmdqs *cmdqs = hwdev->cmdqs;
487 unsigned long end;
488 int err = 0;
489
490 if (!(cmdqs->status & HINIC_CMDQ_ENABLE))
491 return 0;
492
493 cmdqs->status &= ~HINIC_CMDQ_ENABLE;
494
495 end = jiffies + msecs_to_jiffies(HINIC_WAIT_CMDQ_IDLE_TIMEOUT);
496 do {
497 err = 0;
498 cmdq_type = HINIC_CMDQ_SYNC;
499 for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
500 if (!hinic_cmdq_idle(&cmdqs->cmdq[cmdq_type])) {
501 err = -EBUSY;
502 break;
503 }
504 }
505
506 if (!err)
507 return 0;
508
509 rte_delay_ms(1);
510 } while (time_before(jiffies, end));
511
512 cmdqs->status |= HINIC_CMDQ_ENABLE;
513
514 return err;
515 }
516
hinic_vf_rx_tx_flush(struct hinic_hwdev * hwdev)517 static int hinic_vf_rx_tx_flush(struct hinic_hwdev *hwdev)
518 {
519 struct hinic_clear_resource clr_res;
520 int err;
521
522 err = wait_cmdq_stop(hwdev);
523 if (err) {
524 PMD_DRV_LOG(WARNING, "Cmdq is still working");
525 return err;
526 }
527
528 memset(&clr_res, 0, sizeof(clr_res));
529 clr_res.func_idx = HINIC_HWIF_GLOBAL_IDX(hwdev->hwif);
530 clr_res.ppf_idx = HINIC_HWIF_PPF_IDX(hwdev->hwif);
531 err = hinic_mbox_to_pf_no_ack(hwdev, HINIC_MOD_COMM,
532 HINIC_MGMT_CMD_START_FLR, &clr_res, sizeof(clr_res));
533 if (err)
534 PMD_DRV_LOG(WARNING, "Notice flush message failed");
535
536 /*
537 * PF firstly set VF doorbell flush csr to be disabled. After PF finish
538 * VF resources flush, PF will set VF doorbell flush csr to be enabled.
539 */
540 err = wait_until_doorbell_flush_states(hwdev->hwif, DISABLE_DOORBELL);
541 if (err)
542 PMD_DRV_LOG(WARNING, "Wait doorbell flush disable timeout");
543
544 err = wait_until_doorbell_flush_states(hwdev->hwif, ENABLE_DOORBELL);
545 if (err)
546 PMD_DRV_LOG(WARNING, "Wait doorbell flush enable timeout");
547
548 err = hinic_reinit_cmdq_ctxts(hwdev);
549 if (err)
550 PMD_DRV_LOG(WARNING, "Reinit cmdq failed when vf flush");
551
552 return err;
553 }
554
555 /**
556 * hinic_pf_rx_tx_flush - clean up hardware resource
557 * @hwdev: the hardware interface of a nic device
558 * @return
559 * 0 on success,
560 * negative error value otherwise.
561 */
hinic_pf_rx_tx_flush(struct hinic_hwdev * hwdev)562 static int hinic_pf_rx_tx_flush(struct hinic_hwdev *hwdev)
563 {
564 struct hinic_hwif *hwif = hwdev->hwif;
565 struct hinic_clear_doorbell clear_db;
566 struct hinic_clear_resource clr_res;
567 u16 out_size;
568 int err;
569 int ret = 0;
570
571 rte_delay_ms(100);
572
573 err = wait_cmdq_stop(hwdev);
574 if (err) {
575 PMD_DRV_LOG(ERR, "Cmdq is still working");
576 return err;
577 }
578
579 hinic_disable_doorbell(hwif);
580 out_size = sizeof(clear_db);
581 memset(&clear_db, 0, sizeof(clear_db));
582 clear_db.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
583 clear_db.func_idx = HINIC_HWIF_GLOBAL_IDX(hwif);
584 clear_db.ppf_idx = HINIC_HWIF_PPF_IDX(hwif);
585 err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
586 HINIC_MGMT_CMD_FLUSH_DOORBELL, &clear_db,
587 sizeof(clear_db), &clear_db, &out_size, 0);
588 if (err || !out_size || clear_db.mgmt_msg_head.status) {
589 PMD_DRV_LOG(WARNING, "Flush doorbell failed, err: %d, status: 0x%x, out_size: 0x%x",
590 err, clear_db.mgmt_msg_head.status, out_size);
591 ret = err ? err : (-EIO);
592 }
593
594 hinic_set_pf_status(hwif, HINIC_PF_STATUS_FLR_START_FLAG);
595 memset(&clr_res, 0, sizeof(clr_res));
596 clr_res.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
597 clr_res.func_idx = HINIC_HWIF_GLOBAL_IDX(hwif);
598 clr_res.ppf_idx = HINIC_HWIF_PPF_IDX(hwif);
599
600 err = hinic_msg_to_mgmt_no_ack(hwdev, HINIC_MOD_COMM,
601 HINIC_MGMT_CMD_START_FLR, &clr_res,
602 sizeof(clr_res));
603 if (err) {
604 PMD_DRV_LOG(WARNING, "Notice flush msg failed, err: %d", err);
605 ret = err;
606 }
607
608 err = wait_for_flr_finish(hwif);
609 if (err) {
610 PMD_DRV_LOG(WARNING, "Wait firmware FLR timeout, err: %d", err);
611 ret = err;
612 }
613
614 hinic_enable_doorbell(hwif);
615
616 err = hinic_reinit_cmdq_ctxts(hwdev);
617 if (err) {
618 PMD_DRV_LOG(WARNING,
619 "Reinit cmdq failed when pf flush, err: %d", err);
620 ret = err;
621 }
622
623 return ret;
624 }
625
hinic_func_rx_tx_flush(struct hinic_hwdev * hwdev)626 int hinic_func_rx_tx_flush(struct hinic_hwdev *hwdev)
627 {
628 if (HINIC_FUNC_TYPE(hwdev) == TYPE_VF)
629 return hinic_vf_rx_tx_flush(hwdev);
630 else
631 return hinic_pf_rx_tx_flush(hwdev);
632 }
633
634 /**
635 * hinic_get_interrupt_cfg - get interrupt configuration from NIC
636 * @hwdev: the hardware interface of a nic device
637 * @interrupt_info: Information of Interrupt aggregation
638 * Return: 0 on success, negative error value otherwise.
639 */
hinic_get_interrupt_cfg(struct hinic_hwdev * hwdev,struct nic_interrupt_info * interrupt_info)640 static int hinic_get_interrupt_cfg(struct hinic_hwdev *hwdev,
641 struct nic_interrupt_info *interrupt_info)
642 {
643 struct hinic_msix_config msix_cfg;
644 u16 out_size = sizeof(msix_cfg);
645 int err;
646
647 memset(&msix_cfg, 0, sizeof(msix_cfg));
648 msix_cfg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
649 msix_cfg.func_id = hinic_global_func_id(hwdev);
650 msix_cfg.msix_index = interrupt_info->msix_index;
651
652 err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
653 HINIC_MGMT_CMD_MSI_CTRL_REG_RD_BY_UP,
654 &msix_cfg, sizeof(msix_cfg),
655 &msix_cfg, &out_size, 0);
656 if (err || !out_size || msix_cfg.mgmt_msg_head.status) {
657 PMD_DRV_LOG(ERR, "Get interrupt config failed, err: %d, status: 0x%x, out size: 0x%x",
658 err, msix_cfg.mgmt_msg_head.status, out_size);
659 return -EIO;
660 }
661
662 interrupt_info->lli_credit_limit = msix_cfg.lli_credit_cnt;
663 interrupt_info->lli_timer_cfg = msix_cfg.lli_tmier_cnt;
664 interrupt_info->pending_limt = msix_cfg.pending_cnt;
665 interrupt_info->coalesc_timer_cfg = msix_cfg.coalesct_timer_cnt;
666 interrupt_info->resend_timer_cfg = msix_cfg.resend_timer_cnt;
667 return 0;
668 }
669
670 /**
671 * hinic_set_interrupt_cfg - set interrupt configuration to NIC
672 * @hwdev: the hardware interface of a nic device
673 * @interrupt_info: Information of Interrupt aggregation
674 * Return: 0 on success, negative error value otherwise.
675 */
hinic_set_interrupt_cfg(struct hinic_hwdev * hwdev,struct nic_interrupt_info interrupt_info)676 int hinic_set_interrupt_cfg(struct hinic_hwdev *hwdev,
677 struct nic_interrupt_info interrupt_info)
678 {
679 struct hinic_msix_config msix_cfg;
680 struct nic_interrupt_info temp_info;
681 u16 out_size = sizeof(msix_cfg);
682 int err;
683
684 temp_info.msix_index = interrupt_info.msix_index;
685 err = hinic_get_interrupt_cfg(hwdev, &temp_info);
686 if (err)
687 return -EIO;
688
689 memset(&msix_cfg, 0, sizeof(msix_cfg));
690 msix_cfg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
691 msix_cfg.func_id = hinic_global_func_id(hwdev);
692 msix_cfg.msix_index = (u16)interrupt_info.msix_index;
693 msix_cfg.lli_credit_cnt = temp_info.lli_credit_limit;
694 msix_cfg.lli_tmier_cnt = temp_info.lli_timer_cfg;
695 msix_cfg.pending_cnt = temp_info.pending_limt;
696 msix_cfg.coalesct_timer_cnt = temp_info.coalesc_timer_cfg;
697 msix_cfg.resend_timer_cnt = temp_info.resend_timer_cfg;
698
699 if (interrupt_info.lli_set) {
700 msix_cfg.lli_credit_cnt = interrupt_info.lli_credit_limit;
701 msix_cfg.lli_tmier_cnt = interrupt_info.lli_timer_cfg;
702 }
703
704 if (interrupt_info.interrupt_coalesc_set) {
705 msix_cfg.pending_cnt = interrupt_info.pending_limt;
706 msix_cfg.coalesct_timer_cnt = interrupt_info.coalesc_timer_cfg;
707 msix_cfg.resend_timer_cnt = interrupt_info.resend_timer_cfg;
708 }
709
710 err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
711 HINIC_MGMT_CMD_MSI_CTRL_REG_WR_BY_UP,
712 &msix_cfg, sizeof(msix_cfg),
713 &msix_cfg, &out_size, 0);
714 if (err || !out_size || msix_cfg.mgmt_msg_head.status) {
715 PMD_DRV_LOG(ERR, "Set interrupt config failed, err: %d, status: 0x%x, out size: 0x%x",
716 err, msix_cfg.mgmt_msg_head.status, out_size);
717 return -EIO;
718 }
719
720 return 0;
721 }
722
723 /**
724 * init_aeqs_msix_attr - Init interrupt attributes of aeq
725 * @hwdev: the hardware interface of a nic device
726 * @return
727 * 0 on success,
728 * negative error value otherwise.
729 */
init_aeqs_msix_attr(void * hwdev)730 int init_aeqs_msix_attr(void *hwdev)
731 {
732 struct hinic_hwdev *nic_hwdev = hwdev;
733 struct hinic_aeqs *aeqs = nic_hwdev->aeqs;
734 struct nic_interrupt_info info = {0};
735 struct hinic_eq *eq;
736 u16 q_id;
737 int err;
738
739 info.lli_set = 0;
740 info.interrupt_coalesc_set = 1;
741 info.pending_limt = HINIC_DEAULT_EQ_MSIX_PENDING_LIMIT;
742 info.coalesc_timer_cfg = HINIC_DEAULT_EQ_MSIX_COALESC_TIMER_CFG;
743 info.resend_timer_cfg = HINIC_DEAULT_EQ_MSIX_RESEND_TIMER_CFG;
744
745 for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) {
746 eq = &aeqs->aeq[q_id];
747 info.msix_index = eq->eq_irq.msix_entry_idx;
748 err = hinic_set_interrupt_cfg(hwdev, info);
749 if (err) {
750 PMD_DRV_LOG(ERR, "Set msix attr for aeq %d failed",
751 q_id);
752 return -EFAULT;
753 }
754 }
755
756 return 0;
757 }
758
759 /**
760 * set_pf_dma_attr_entry - set the dma attributes for entry
761 * @hwdev: the pointer to the private hardware device object
762 * @entry_idx: the entry index in the dma table
763 * @st: PCIE TLP steering tag
764 * @at: PCIE TLP AT field
765 * @ph: PCIE TLP Processing Hint field
766 * @no_snooping: PCIE TLP No snooping
767 * @tph_en: PCIE TLP Processing Hint Enable
768 */
set_pf_dma_attr_entry(struct hinic_hwdev * hwdev,u32 entry_idx,u8 st,u8 at,u8 ph,enum hinic_pcie_nosnoop no_snooping,enum hinic_pcie_tph tph_en)769 static void set_pf_dma_attr_entry(struct hinic_hwdev *hwdev, u32 entry_idx,
770 u8 st, u8 at, u8 ph,
771 enum hinic_pcie_nosnoop no_snooping,
772 enum hinic_pcie_tph tph_en)
773 {
774 u32 addr, val, dma_attr_entry;
775
776 /* Read Modify Write */
777 addr = HINIC_CSR_DMA_ATTR_TBL_ADDR(entry_idx);
778
779 val = hinic_hwif_read_reg(hwdev->hwif, addr);
780 val = HINIC_DMA_ATTR_ENTRY_CLEAR(val, ST) &
781 HINIC_DMA_ATTR_ENTRY_CLEAR(val, AT) &
782 HINIC_DMA_ATTR_ENTRY_CLEAR(val, PH) &
783 HINIC_DMA_ATTR_ENTRY_CLEAR(val, NO_SNOOPING) &
784 HINIC_DMA_ATTR_ENTRY_CLEAR(val, TPH_EN);
785
786 dma_attr_entry = HINIC_DMA_ATTR_ENTRY_SET(st, ST) |
787 HINIC_DMA_ATTR_ENTRY_SET(at, AT) |
788 HINIC_DMA_ATTR_ENTRY_SET(ph, PH) |
789 HINIC_DMA_ATTR_ENTRY_SET(no_snooping, NO_SNOOPING) |
790 HINIC_DMA_ATTR_ENTRY_SET(tph_en, TPH_EN);
791
792 val |= dma_attr_entry;
793 hinic_hwif_write_reg(hwdev->hwif, addr, val);
794 }
795
set_vf_dma_attr_entry(struct hinic_hwdev * hwdev,u8 entry_idx,u8 st,u8 at,u8 ph,enum hinic_pcie_nosnoop no_snooping,enum hinic_pcie_tph tph_en)796 static int set_vf_dma_attr_entry(struct hinic_hwdev *hwdev, u8 entry_idx,
797 u8 st, u8 at, u8 ph,
798 enum hinic_pcie_nosnoop no_snooping,
799 enum hinic_pcie_tph tph_en)
800 {
801 struct hinic_vf_dma_attr_table attr;
802 u16 out_size = sizeof(attr);
803 int err;
804
805 memset(&attr, 0, sizeof(attr));
806 attr.func_idx = hinic_global_func_id(hwdev);
807 attr.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
808 attr.func_dma_entry_num = hinic_dma_attr_entry_num(hwdev);
809 attr.entry_idx = entry_idx;
810 attr.st = st;
811 attr.at = at;
812 attr.ph = ph;
813 attr.no_snooping = no_snooping;
814 attr.tph_en = tph_en;
815
816 err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
817 HINIC_MGMT_CMD_DMA_ATTR_SET,
818 &attr, sizeof(attr), &attr, &out_size, 0);
819 if (err || !out_size || attr.mgmt_msg_head.status) {
820 PMD_DRV_LOG(ERR, "Set dma attribute failed, err: %d, status: 0x%x, out_size: 0x%x",
821 err, attr.mgmt_msg_head.status, out_size);
822 return -EIO;
823 }
824
825 return 0;
826 }
827
828 /**
829 * dma_attr_table_init - initialize the default dma attributes
830 * @hwdev: the pointer to the private hardware device object
831 */
dma_attr_table_init(struct hinic_hwdev * hwdev)832 static int dma_attr_table_init(struct hinic_hwdev *hwdev)
833 {
834 int err = 0;
835
836 if (HINIC_IS_VF(hwdev))
837 err = set_vf_dma_attr_entry(hwdev, PCIE_MSIX_ATTR_ENTRY,
838 HINIC_PCIE_ST_DISABLE, HINIC_PCIE_AT_DISABLE,
839 HINIC_PCIE_PH_DISABLE, HINIC_PCIE_SNOOP,
840 HINIC_PCIE_TPH_DISABLE);
841 else
842 set_pf_dma_attr_entry(hwdev, PCIE_MSIX_ATTR_ENTRY,
843 HINIC_PCIE_ST_DISABLE, HINIC_PCIE_AT_DISABLE,
844 HINIC_PCIE_PH_DISABLE, HINIC_PCIE_SNOOP,
845 HINIC_PCIE_TPH_DISABLE);
846
847 return err;
848 }
849
850 /**
851 * hinic_init_attr_table - init dma and aeq msix attribute table
852 * @hwdev: the pointer to the private hardware device object
853 */
hinic_init_attr_table(struct hinic_hwdev * hwdev)854 int hinic_init_attr_table(struct hinic_hwdev *hwdev)
855 {
856 int err;
857
858 err = dma_attr_table_init(hwdev);
859 if (err) {
860 PMD_DRV_LOG(ERR, "Initialize dma attribute table failed, err: %d",
861 err);
862 return err;
863 }
864
865 err = init_aeqs_msix_attr(hwdev);
866 if (err) {
867 PMD_DRV_LOG(ERR, "Initialize aeqs msix attribute failed, err: %d",
868 err);
869 return err;
870 }
871
872 return 0;
873 }
874
875 #define FAULT_SHOW_STR_LEN 16
fault_report_show(struct hinic_hwdev * hwdev,struct hinic_fault_event * event)876 static void fault_report_show(struct hinic_hwdev *hwdev,
877 struct hinic_fault_event *event)
878 {
879 char fault_type[FAULT_TYPE_MAX][FAULT_SHOW_STR_LEN + 1] = {
880 "chip", "ucode", "mem rd timeout", "mem wr timeout",
881 "reg rd timeout", "reg wr timeout"};
882 char fault_level[FAULT_LEVEL_MAX][FAULT_SHOW_STR_LEN + 1] = {
883 "fatal", "reset", "flr", "general", "suggestion"};
884 char type_str[FAULT_SHOW_STR_LEN + 1] = { 0 };
885 char level_str[FAULT_SHOW_STR_LEN + 1] = { 0 };
886 u8 err_level;
887
888 PMD_DRV_LOG(WARNING, "Fault event report received, func_id: %d",
889 hinic_global_func_id(hwdev));
890
891 if (event->type < FAULT_TYPE_MAX)
892 strncpy(type_str, fault_type[event->type], FAULT_SHOW_STR_LEN);
893 else
894 strncpy(type_str, "unknown", FAULT_SHOW_STR_LEN);
895 PMD_DRV_LOG(WARNING, "fault type: %d [%s]",
896 event->type, type_str);
897 PMD_DRV_LOG(WARNING, "fault val[0]: 0x%08x",
898 event->event.val[0]);
899 PMD_DRV_LOG(WARNING, "fault val[1]: 0x%08x",
900 event->event.val[1]);
901 PMD_DRV_LOG(WARNING, "fault val[2]: 0x%08x",
902 event->event.val[2]);
903 PMD_DRV_LOG(WARNING, "fault val[3]: 0x%08x",
904 event->event.val[3]);
905
906 switch (event->type) {
907 case FAULT_TYPE_CHIP:
908 err_level = event->event.chip.err_level;
909 if (err_level < FAULT_LEVEL_MAX)
910 strncpy(level_str, fault_level[err_level],
911 FAULT_SHOW_STR_LEN);
912 else
913 strncpy(level_str, "unknown",
914 FAULT_SHOW_STR_LEN);
915
916 PMD_DRV_LOG(WARNING, "err_level: %d [%s]",
917 err_level, level_str);
918
919 if (err_level == FAULT_LEVEL_SERIOUS_FLR) {
920 PMD_DRV_LOG(WARNING, "flr func_id: %d",
921 event->event.chip.func_id);
922 } else {
923 PMD_DRV_LOG(WARNING, "node_id: %d",
924 event->event.chip.node_id);
925 PMD_DRV_LOG(WARNING, "err_type: %d",
926 event->event.chip.err_type);
927 PMD_DRV_LOG(WARNING, "err_csr_addr: %d",
928 event->event.chip.err_csr_addr);
929 PMD_DRV_LOG(WARNING, "err_csr_value: %d",
930 event->event.chip.err_csr_value);
931 }
932 break;
933 case FAULT_TYPE_UCODE:
934 PMD_DRV_LOG(WARNING, "cause_id: %d",
935 event->event.ucode.cause_id);
936 PMD_DRV_LOG(WARNING, "core_id: %d",
937 event->event.ucode.core_id);
938 PMD_DRV_LOG(WARNING, "c_id: %d",
939 event->event.ucode.c_id);
940 PMD_DRV_LOG(WARNING, "epc: %d",
941 event->event.ucode.epc);
942 break;
943 case FAULT_TYPE_MEM_RD_TIMEOUT:
944 case FAULT_TYPE_MEM_WR_TIMEOUT:
945 PMD_DRV_LOG(WARNING, "err_csr_ctrl: %d",
946 event->event.mem_timeout.err_csr_ctrl);
947 PMD_DRV_LOG(WARNING, "err_csr_data: %d",
948 event->event.mem_timeout.err_csr_data);
949 PMD_DRV_LOG(WARNING, "ctrl_tab: %d",
950 event->event.mem_timeout.ctrl_tab);
951 PMD_DRV_LOG(WARNING, "mem_index: %d",
952 event->event.mem_timeout.mem_index);
953 break;
954 case FAULT_TYPE_REG_RD_TIMEOUT:
955 case FAULT_TYPE_REG_WR_TIMEOUT:
956 PMD_DRV_LOG(WARNING, "err_csr: %d",
957 event->event.reg_timeout.err_csr);
958 break;
959 default:
960 break;
961 }
962 }
963
resources_state_set(struct hinic_hwdev * hwdev,enum hinic_res_state state)964 static int resources_state_set(struct hinic_hwdev *hwdev,
965 enum hinic_res_state state)
966 {
967 struct hinic_cmd_set_res_state res_state;
968 u16 out_size = sizeof(res_state);
969 int err;
970
971 memset(&res_state, 0, sizeof(res_state));
972 res_state.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
973 res_state.func_idx = HINIC_HWIF_GLOBAL_IDX(hwdev->hwif);
974 res_state.state = state;
975
976 err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
977 HINIC_MGMT_CMD_RES_STATE_SET,
978 &res_state, sizeof(res_state),
979 &res_state, &out_size, 0);
980 if (err || !out_size || res_state.mgmt_msg_head.status) {
981 PMD_DRV_LOG(ERR, "Set resources state failed, err: %d, status: 0x%x, out_size: 0x%x",
982 err, res_state.mgmt_msg_head.status, out_size);
983 return -EIO;
984 }
985
986 return 0;
987 }
988
989 /**
990 * hinic_activate_hwdev_state - Active host nic state and notify mgmt channel
991 * that host nic is ready.
992 * @hwdev: the hardware interface of a nic device
993 * @return
994 * 0 on success,
995 * negative error value otherwise.
996 */
hinic_activate_hwdev_state(struct hinic_hwdev * hwdev)997 int hinic_activate_hwdev_state(struct hinic_hwdev *hwdev)
998 {
999 int rc = HINIC_OK;
1000
1001 if (!hwdev)
1002 return -EINVAL;
1003
1004 hinic_set_pf_status(hwdev->hwif, HINIC_PF_STATUS_ACTIVE_FLAG);
1005
1006 rc = resources_state_set(hwdev, HINIC_RES_ACTIVE);
1007 if (rc) {
1008 PMD_DRV_LOG(ERR, "Initialize resources state failed");
1009 return rc;
1010 }
1011
1012 return 0;
1013 }
1014
1015 /**
1016 * hinic_deactivate_hwdev_state - Deactivate host nic state and notify mgmt
1017 * channel that host nic is not ready.
1018 * @hwdev: the pointer to the private hardware device object
1019 */
hinic_deactivate_hwdev_state(struct hinic_hwdev * hwdev)1020 void hinic_deactivate_hwdev_state(struct hinic_hwdev *hwdev)
1021 {
1022 int rc = HINIC_OK;
1023
1024 if (!hwdev)
1025 return;
1026
1027 rc = resources_state_set(hwdev, HINIC_RES_CLEAN);
1028 if (rc)
1029 PMD_DRV_LOG(ERR, "Deinit resources state failed");
1030
1031 hinic_set_pf_status(hwdev->hwif, HINIC_PF_STATUS_INIT);
1032 }
1033
hinic_get_board_info(void * hwdev,struct hinic_board_info * info)1034 int hinic_get_board_info(void *hwdev, struct hinic_board_info *info)
1035 {
1036 struct hinic_comm_board_info board_info;
1037 u16 out_size = sizeof(board_info);
1038 int err;
1039
1040 if (!hwdev || !info)
1041 return -EINVAL;
1042
1043 memset(&board_info, 0, sizeof(board_info));
1044 board_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
1045 err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
1046 HINIC_MGMT_CMD_GET_BOARD_INFO,
1047 &board_info, sizeof(board_info),
1048 &board_info, &out_size, 0);
1049 if (err || board_info.mgmt_msg_head.status || !out_size) {
1050 PMD_DRV_LOG(ERR, "Failed to get board info, err: %d, status: 0x%x, out size: 0x%x",
1051 err, board_info.mgmt_msg_head.status, out_size);
1052 return -EIO;
1053 }
1054
1055 memcpy(info, &board_info.info, sizeof(*info));
1056 return 0;
1057 }
1058
1059 /**
1060 * hinic_l2nic_reset - Restore the initial state of NIC
1061 * @hwdev: the hardware interface of a nic device
1062 * @return
1063 * 0 on success,
1064 * negative error value otherwise.
1065 */
hinic_l2nic_reset(struct hinic_hwdev * hwdev)1066 int hinic_l2nic_reset(struct hinic_hwdev *hwdev)
1067 {
1068 struct hinic_hwif *hwif = hwdev->hwif;
1069 struct hinic_l2nic_reset l2nic_reset;
1070 u16 out_size = sizeof(l2nic_reset);
1071 int err = 0;
1072
1073 err = hinic_set_vport_enable(hwdev, false);
1074 if (err) {
1075 PMD_DRV_LOG(ERR, "Set vport disable failed");
1076 return err;
1077 }
1078
1079 rte_delay_ms(100);
1080
1081 memset(&l2nic_reset, 0, sizeof(l2nic_reset));
1082 l2nic_reset.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
1083 l2nic_reset.func_id = HINIC_HWIF_GLOBAL_IDX(hwif);
1084 err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
1085 HINIC_MGMT_CMD_L2NIC_RESET,
1086 &l2nic_reset, sizeof(l2nic_reset),
1087 &l2nic_reset, &out_size, 0);
1088 if (err || !out_size || l2nic_reset.mgmt_msg_head.status) {
1089 PMD_DRV_LOG(ERR, "Reset L2NIC resources failed, err: %d, status: 0x%x, out_size: 0x%x",
1090 err, l2nic_reset.mgmt_msg_head.status, out_size);
1091 return -EIO;
1092 }
1093
1094 return 0;
1095 }
1096
1097 static void
hinic_show_sw_watchdog_timeout_info(void * buf_in,u16 in_size,void * buf_out,u16 * out_size)1098 hinic_show_sw_watchdog_timeout_info(void *buf_in, u16 in_size,
1099 void *buf_out, u16 *out_size)
1100 {
1101 struct hinic_mgmt_watchdog_info *watchdog_info;
1102 u32 *dump_addr, *reg, stack_len, i, j;
1103
1104 if (in_size != sizeof(*watchdog_info)) {
1105 PMD_DRV_LOG(ERR, "Invalid mgmt watchdog report, length: %d, should be %zu",
1106 in_size, sizeof(*watchdog_info));
1107 return;
1108 }
1109
1110 watchdog_info = (struct hinic_mgmt_watchdog_info *)buf_in;
1111
1112 PMD_DRV_LOG(ERR, "Mgmt deadloop time: 0x%x 0x%x, task id: 0x%x, sp: 0x%x",
1113 watchdog_info->curr_time_h, watchdog_info->curr_time_l,
1114 watchdog_info->task_id, watchdog_info->sp);
1115 PMD_DRV_LOG(ERR, "Stack current used: 0x%x, peak used: 0x%x, overflow flag: 0x%x, top: 0x%x, bottom: 0x%x",
1116 watchdog_info->curr_used, watchdog_info->peak_used,
1117 watchdog_info->is_overflow, watchdog_info->stack_top,
1118 watchdog_info->stack_bottom);
1119
1120 PMD_DRV_LOG(ERR, "Mgmt pc: 0x%08x, lr: 0x%08x, cpsr: 0x%08x",
1121 watchdog_info->pc, watchdog_info->lr, watchdog_info->cpsr);
1122
1123 PMD_DRV_LOG(ERR, "Mgmt register info");
1124
1125 for (i = 0; i < 3; i++) {
1126 reg = watchdog_info->reg + (u64)(u32)(4 * i);
1127 PMD_DRV_LOG(ERR, "0x%08x 0x%08x 0x%08x 0x%08x",
1128 *(reg), *(reg + 1), *(reg + 2), *(reg + 3));
1129 }
1130
1131 PMD_DRV_LOG(ERR, "0x%08x", watchdog_info->reg[12]);
1132
1133 if (watchdog_info->stack_actlen <= 1024) {
1134 stack_len = watchdog_info->stack_actlen;
1135 } else {
1136 PMD_DRV_LOG(ERR, "Oops stack length: 0x%x is wrong",
1137 watchdog_info->stack_actlen);
1138 stack_len = 1024;
1139 }
1140
1141 PMD_DRV_LOG(ERR, "Mgmt dump stack, 16Bytes per line(start from sp)");
1142 for (i = 0; i < (stack_len / 16); i++) {
1143 dump_addr = (u32 *)(watchdog_info->data + ((u64)(u32)(i * 16)));
1144 PMD_DRV_LOG(ERR, "0x%08x 0x%08x 0x%08x 0x%08x",
1145 *dump_addr, *(dump_addr + 1), *(dump_addr + 2),
1146 *(dump_addr + 3));
1147 }
1148
1149 for (j = 0; j < ((stack_len % 16) / 4); j++) {
1150 dump_addr = (u32 *)(watchdog_info->data +
1151 ((u64)(u32)(i * 16 + j * 4)));
1152 PMD_DRV_LOG(ERR, "0x%08x", *dump_addr);
1153 }
1154
1155 *out_size = sizeof(*watchdog_info);
1156 watchdog_info = (struct hinic_mgmt_watchdog_info *)buf_out;
1157 watchdog_info->mgmt_msg_head.status = 0;
1158 }
1159
hinic_show_pcie_dfx_info(struct hinic_hwdev * hwdev,void * buf_in,u16 in_size,void * buf_out,u16 * out_size)1160 static void hinic_show_pcie_dfx_info(struct hinic_hwdev *hwdev,
1161 void *buf_in, u16 in_size,
1162 void *buf_out, u16 *out_size)
1163 {
1164 struct hinic_pcie_dfx_ntc *notice_info =
1165 (struct hinic_pcie_dfx_ntc *)buf_in;
1166 struct hinic_pcie_dfx_info dfx_info;
1167 u16 size = 0;
1168 u16 cnt = 0;
1169 u32 num = 0;
1170 u32 i, j;
1171 int err;
1172 u32 *reg;
1173
1174 if (in_size != sizeof(*notice_info)) {
1175 PMD_DRV_LOG(ERR, "Invalid pcie dfx notice info, length: %d, should be %zu.",
1176 in_size, sizeof(*notice_info));
1177 return;
1178 }
1179
1180 ((struct hinic_pcie_dfx_ntc *)buf_out)->mgmt_msg_head.status = 0;
1181 *out_size = sizeof(*notice_info);
1182 memset(&dfx_info, 0, sizeof(dfx_info));
1183 num = (u32)(notice_info->len / 1024);
1184 PMD_DRV_LOG(INFO, "INFO LEN: %d", notice_info->len);
1185 PMD_DRV_LOG(INFO, "PCIE DFX:");
1186 dfx_info.host_id = 0;
1187 dfx_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
1188 for (i = 0; i < num; i++) {
1189 dfx_info.offset = i * MAX_PCIE_DFX_BUF_SIZE;
1190 if (i == (num - 1))
1191 dfx_info.last = 1;
1192 size = sizeof(dfx_info);
1193 err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
1194 HINIC_MGMT_CMD_PCIE_DFX_GET,
1195 &dfx_info, sizeof(dfx_info),
1196 &dfx_info, &size, 0);
1197 if (err || dfx_info.mgmt_msg_head.status || !size) {
1198 PMD_DRV_LOG(ERR, "Failed to get pcie dfx info, err: %d, status: 0x%x, out size: 0x%x",
1199 err, dfx_info.mgmt_msg_head.status, size);
1200 return;
1201 }
1202
1203 reg = (u32 *)dfx_info.data;
1204 for (j = 0; j < 256; j = j + 8) {
1205 PMD_DRV_LOG(ERR, "0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x",
1206 cnt, reg[j], reg[(u32)(j + 1)],
1207 reg[(u32)(j + 2)], reg[(u32)(j + 3)],
1208 reg[(u32)(j + 4)], reg[(u32)(j + 5)],
1209 reg[(u32)(j + 6)], reg[(u32)(j + 7)]);
1210 cnt = cnt + 32;
1211 }
1212 memset(dfx_info.data, 0, MAX_PCIE_DFX_BUF_SIZE);
1213 }
1214 }
1215
1216 static void
hinic_show_ffm_info(struct hinic_hwdev * hwdev,void * buf_in,u16 in_size)1217 hinic_show_ffm_info(struct hinic_hwdev *hwdev, void *buf_in, u16 in_size)
1218 {
1219 struct ffm_intr_info *intr;
1220
1221 if (in_size != sizeof(struct ffm_intr_info)) {
1222 PMD_DRV_LOG(ERR, "Invalid input buffer len, length: %d, should be %zu.",
1223 in_size, sizeof(struct ffm_intr_info));
1224 return;
1225 }
1226
1227 if (hwdev->ffm_num < FFM_RECORD_NUM_MAX) {
1228 hwdev->ffm_num++;
1229 intr = (struct ffm_intr_info *)buf_in;
1230 PMD_DRV_LOG(WARNING, "node_id(%d),err_csr_addr(0x%x),err_csr_val(0x%x),err_level(0x%x),err_type(0x%x)",
1231 intr->node_id,
1232 intr->err_csr_addr,
1233 intr->err_csr_value,
1234 intr->err_level,
1235 intr->err_type);
1236 }
1237 }
1238
hinic_comm_async_event_handle(struct hinic_hwdev * hwdev,u8 cmd,void * buf_in,u16 in_size,void * buf_out,u16 * out_size)1239 void hinic_comm_async_event_handle(struct hinic_hwdev *hwdev, u8 cmd,
1240 void *buf_in, u16 in_size,
1241 void *buf_out, u16 *out_size)
1242 {
1243 struct hinic_cmd_fault_event *fault_event, *ret_fault_event;
1244
1245 if (!hwdev)
1246 return;
1247
1248 *out_size = 0;
1249
1250 switch (cmd) {
1251 case HINIC_MGMT_CMD_FAULT_REPORT:
1252 if (in_size != sizeof(*fault_event)) {
1253 PMD_DRV_LOG(ERR, "Invalid fault event report, length: %d, should be %zu",
1254 in_size, sizeof(*fault_event));
1255 return;
1256 }
1257
1258 fault_event = (struct hinic_cmd_fault_event *)buf_in;
1259 fault_report_show(hwdev, &fault_event->event);
1260
1261 if (hinic_func_type(hwdev) != TYPE_VF) {
1262 ret_fault_event =
1263 (struct hinic_cmd_fault_event *)buf_out;
1264 ret_fault_event->mgmt_msg_head.status = 0;
1265 *out_size = sizeof(*ret_fault_event);
1266 }
1267 break;
1268
1269 case HINIC_MGMT_CMD_WATCHDOG_INFO:
1270 hinic_show_sw_watchdog_timeout_info(buf_in, in_size,
1271 buf_out, out_size);
1272 break;
1273
1274 case HINIC_MGMT_CMD_PCIE_DFX_NTC:
1275 hinic_show_pcie_dfx_info(hwdev, buf_in, in_size,
1276 buf_out, out_size);
1277 break;
1278
1279 case HINIC_MGMT_CMD_FFM_SET:
1280 hinic_show_ffm_info(hwdev, buf_in, in_size);
1281 break;
1282
1283 default:
1284 break;
1285 }
1286 }
1287
1288 static void
hinic_cable_status_event(u8 cmd,void * buf_in,__rte_unused u16 in_size,void * buf_out,u16 * out_size)1289 hinic_cable_status_event(u8 cmd, void *buf_in, __rte_unused u16 in_size,
1290 void *buf_out, u16 *out_size)
1291 {
1292 struct hinic_cable_plug_event *plug_event;
1293 struct hinic_link_err_event *link_err;
1294
1295 if (cmd == HINIC_PORT_CMD_CABLE_PLUG_EVENT) {
1296 plug_event = (struct hinic_cable_plug_event *)buf_in;
1297 PMD_DRV_LOG(INFO, "Port module event: Cable %s",
1298 plug_event->plugged ? "plugged" : "unplugged");
1299
1300 *out_size = sizeof(*plug_event);
1301 plug_event = (struct hinic_cable_plug_event *)buf_out;
1302 plug_event->mgmt_msg_head.status = 0;
1303 } else if (cmd == HINIC_PORT_CMD_LINK_ERR_EVENT) {
1304 link_err = (struct hinic_link_err_event *)buf_in;
1305 if (link_err->err_type >= LINK_ERR_NUM) {
1306 PMD_DRV_LOG(ERR, "Link failed, Unknown type: 0x%x",
1307 link_err->err_type);
1308 } else {
1309 PMD_DRV_LOG(INFO, "Link failed, type: 0x%x: %s",
1310 link_err->err_type,
1311 hinic_module_link_err[link_err->err_type]);
1312 }
1313
1314 *out_size = sizeof(*link_err);
1315 link_err = (struct hinic_link_err_event *)buf_out;
1316 link_err->mgmt_msg_head.status = 0;
1317 }
1318 }
1319
hinic_link_event_process(struct hinic_hwdev * hwdev,struct rte_eth_dev * eth_dev,u8 status)1320 static int hinic_link_event_process(struct hinic_hwdev *hwdev,
1321 struct rte_eth_dev *eth_dev, u8 status)
1322 {
1323 uint32_t port_speed[LINK_SPEED_MAX] = {RTE_ETH_SPEED_NUM_10M,
1324 RTE_ETH_SPEED_NUM_100M, RTE_ETH_SPEED_NUM_1G,
1325 RTE_ETH_SPEED_NUM_10G, RTE_ETH_SPEED_NUM_25G,
1326 RTE_ETH_SPEED_NUM_40G, RTE_ETH_SPEED_NUM_100G};
1327 struct nic_port_info port_info;
1328 struct rte_eth_link link;
1329 int rc = HINIC_OK;
1330
1331 if (!status) {
1332 link.link_status = RTE_ETH_LINK_DOWN;
1333 link.link_speed = 0;
1334 link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
1335 link.link_autoneg = RTE_ETH_LINK_FIXED;
1336 } else {
1337 link.link_status = RTE_ETH_LINK_UP;
1338
1339 memset(&port_info, 0, sizeof(port_info));
1340 rc = hinic_get_port_info(hwdev, &port_info);
1341 if (rc) {
1342 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1343 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1344 link.link_autoneg = RTE_ETH_LINK_FIXED;
1345 } else {
1346 link.link_speed = port_speed[port_info.speed %
1347 LINK_SPEED_MAX];
1348 link.link_duplex = port_info.duplex;
1349 link.link_autoneg = port_info.autoneg_state;
1350 }
1351 }
1352 (void)rte_eth_linkstatus_set(eth_dev, &link);
1353
1354 return rc;
1355 }
1356
hinic_lsc_process(struct hinic_hwdev * hwdev,struct rte_eth_dev * rte_dev,u8 status)1357 static void hinic_lsc_process(struct hinic_hwdev *hwdev,
1358 struct rte_eth_dev *rte_dev, u8 status)
1359 {
1360 int ret;
1361
1362 ret = hinic_link_event_process(hwdev, rte_dev, status);
1363 /* check if link has changed, notify callback */
1364 if (ret == 0)
1365 rte_eth_dev_callback_process(rte_dev,
1366 RTE_ETH_EVENT_INTR_LSC,
1367 NULL);
1368 }
1369
hinic_l2nic_async_event_handle(struct hinic_hwdev * hwdev,void * param,u8 cmd,void * buf_in,u16 in_size,void * buf_out,u16 * out_size)1370 void hinic_l2nic_async_event_handle(struct hinic_hwdev *hwdev,
1371 void *param, u8 cmd,
1372 void *buf_in, u16 in_size,
1373 void *buf_out, u16 *out_size)
1374 {
1375 struct hinic_port_link_status *in_link;
1376 struct rte_eth_dev *eth_dev;
1377
1378 if (!hwdev)
1379 return;
1380
1381 *out_size = 0;
1382
1383 switch (cmd) {
1384 case HINIC_PORT_CMD_LINK_STATUS_REPORT:
1385 eth_dev = param;
1386 in_link = (struct hinic_port_link_status *)buf_in;
1387 PMD_DRV_LOG(INFO, "Link status event report, dev_name: %s, port_id: %d, link_status: %s",
1388 eth_dev->data->name, eth_dev->data->port_id,
1389 in_link->link ? "UP" : "DOWN");
1390
1391 hinic_lsc_process(hwdev, eth_dev, in_link->link);
1392 break;
1393
1394 case HINIC_PORT_CMD_CABLE_PLUG_EVENT:
1395 case HINIC_PORT_CMD_LINK_ERR_EVENT:
1396 hinic_cable_status_event(cmd, buf_in, in_size,
1397 buf_out, out_size);
1398 break;
1399
1400 case HINIC_PORT_CMD_MGMT_RESET:
1401 PMD_DRV_LOG(WARNING, "Mgmt is reset");
1402 break;
1403
1404 default:
1405 PMD_DRV_LOG(ERR, "Unsupported event %d to process",
1406 cmd);
1407 break;
1408 }
1409 }
1410
print_cable_info(struct hinic_link_info * info)1411 static void print_cable_info(struct hinic_link_info *info)
1412 {
1413 char tmp_str[512] = {0};
1414 char tmp_vendor[17] = {0};
1415 const char *port_type = "Unknown port type";
1416 int i;
1417
1418 if (info->cable_absent) {
1419 PMD_DRV_LOG(INFO, "Cable unpresent");
1420 return;
1421 }
1422
1423 if (info->port_type < LINK_PORT_MAX_TYPE)
1424 port_type = __hw_to_char_port_type[info->port_type];
1425 else
1426 PMD_DRV_LOG(INFO, "Unknown port type: %u",
1427 info->port_type);
1428 if (info->port_type == LINK_PORT_FIBRE) {
1429 if (info->port_sub_type == FIBRE_SUBTYPE_SR)
1430 port_type = "Fibre-SR";
1431 else if (info->port_sub_type == FIBRE_SUBTYPE_LR)
1432 port_type = "Fibre-LR";
1433 }
1434
1435 for (i = sizeof(info->vendor_name) - 1; i >= 0; i--) {
1436 if (info->vendor_name[i] == ' ')
1437 info->vendor_name[i] = '\0';
1438 else
1439 break;
1440 }
1441
1442 memcpy(tmp_vendor, info->vendor_name, sizeof(info->vendor_name));
1443 snprintf(tmp_str, sizeof(tmp_str),
1444 "Vendor: %s, %s, %s, length: %um, max_speed: %uGbps",
1445 tmp_vendor, info->sfp_type ? "SFP" : "QSFP", port_type,
1446 info->cable_length, info->cable_max_speed);
1447 if (info->port_type != LINK_PORT_COPPER)
1448 snprintf(tmp_str + strlen(tmp_str),
1449 sizeof(tmp_str) - strlen(tmp_str),
1450 ", Temperature: %u", info->cable_temp);
1451
1452 PMD_DRV_LOG(INFO, "Cable information: %s", tmp_str);
1453 }
1454
print_hi30_status(struct hinic_link_info * info)1455 static void print_hi30_status(struct hinic_link_info *info)
1456 {
1457 struct hi30_ffe_data *ffe_data;
1458 struct hi30_ctle_data *ctle_data;
1459
1460 ffe_data = (struct hi30_ffe_data *)info->hi30_ffe;
1461 ctle_data = (struct hi30_ctle_data *)info->hi30_ctle;
1462
1463 PMD_DRV_LOG(INFO, "TX_FFE: PRE2=%s%d; PRE1=%s%d; MAIN=%d; POST1=%s%d; POST1X=%s%d",
1464 (ffe_data->PRE1 & 0x10) ? "-" : "",
1465 (int)(ffe_data->PRE1 & 0xf),
1466 (ffe_data->PRE2 & 0x10) ? "-" : "",
1467 (int)(ffe_data->PRE2 & 0xf),
1468 (int)ffe_data->MAIN,
1469 (ffe_data->POST1 & 0x10) ? "-" : "",
1470 (int)(ffe_data->POST1 & 0xf),
1471 (ffe_data->POST2 & 0x10) ? "-" : "",
1472 (int)(ffe_data->POST2 & 0xf));
1473 PMD_DRV_LOG(INFO, "RX_CTLE: Gain1~3=%u %u %u; Boost1~3=%u %u %u; Zero1~3=%u %u %u; Squelch1~3=%u %u %u",
1474 ctle_data->ctlebst[0], ctle_data->ctlebst[1],
1475 ctle_data->ctlebst[2], ctle_data->ctlecmband[0],
1476 ctle_data->ctlecmband[1], ctle_data->ctlecmband[2],
1477 ctle_data->ctlermband[0], ctle_data->ctlermband[1],
1478 ctle_data->ctlermband[2], ctle_data->ctleza[0],
1479 ctle_data->ctleza[1], ctle_data->ctleza[2]);
1480 }
1481
print_link_info(struct hinic_link_info * info,enum hilink_info_print_event type)1482 static void print_link_info(struct hinic_link_info *info,
1483 enum hilink_info_print_event type)
1484 {
1485 const char *fec = "None";
1486
1487 if (info->fec < HILINK_FEC_MAX_TYPE)
1488 fec = __hw_to_char_fec[info->fec];
1489 else
1490 PMD_DRV_LOG(INFO, "Unknown fec type: %u",
1491 info->fec);
1492
1493 if (type == HILINK_EVENT_LINK_UP || !info->an_state) {
1494 PMD_DRV_LOG(INFO, "Link information: speed %dGbps, %s, autoneg %s",
1495 info->speed, fec, info->an_state ? "on" : "off");
1496 } else {
1497 PMD_DRV_LOG(INFO, "Link information: antoneg: %s",
1498 info->an_state ? "on" : "off");
1499 }
1500 }
1501
1502 static const char *hilink_info_report_type[HILINK_EVENT_MAX_TYPE] = {
1503 "", "link up", "link down", "cable plugged"
1504 };
1505
hinic_print_hilink_info(void * buf_in,u16 in_size,void * buf_out,u16 * out_size)1506 static void hinic_print_hilink_info(void *buf_in, u16 in_size,
1507 void *buf_out, u16 *out_size)
1508 {
1509 struct hinic_hilink_link_info *hilink_info =
1510 (struct hinic_hilink_link_info *)buf_in;
1511 struct hinic_link_info *info;
1512 enum hilink_info_print_event type;
1513
1514 if (in_size != sizeof(*hilink_info)) {
1515 PMD_DRV_LOG(ERR, "Invalid hilink info message size %d, should be %zu",
1516 in_size, sizeof(*hilink_info));
1517 return;
1518 }
1519
1520 ((struct hinic_hilink_link_info *)buf_out)->mgmt_msg_head.status = 0;
1521 *out_size = sizeof(*hilink_info);
1522
1523 info = &hilink_info->info;
1524 type = hilink_info->info_type;
1525
1526 if (type < HILINK_EVENT_LINK_UP || type >= HILINK_EVENT_MAX_TYPE) {
1527 PMD_DRV_LOG(INFO, "Invalid hilink info report, type: %d",
1528 type);
1529 return;
1530 }
1531
1532 PMD_DRV_LOG(INFO, "Hilink info report after %s",
1533 hilink_info_report_type[type]);
1534
1535 print_cable_info(info);
1536
1537 print_link_info(info, type);
1538
1539 print_hi30_status(info);
1540
1541 if (type == HILINK_EVENT_LINK_UP)
1542 return;
1543
1544 if (type == HILINK_EVENT_CABLE_PLUGGED) {
1545 PMD_DRV_LOG(INFO, "alos: %u, rx_los: %u",
1546 info->alos, info->rx_los);
1547 return;
1548 }
1549
1550 PMD_DRV_LOG(INFO, "PMA ctrl: %s, MAC tx %s, MAC rx %s, PMA debug inforeg: 0x%x, PMA signal ok reg: 0x%x, RF/LF status reg: 0x%x",
1551 info->pma_status ? "on" : "off",
1552 info->mac_tx_en ? "enable" : "disable",
1553 info->mac_rx_en ? "enable" : "disable", info->pma_dbg_info_reg,
1554 info->pma_signal_ok_reg, info->rf_lf_status_reg);
1555 PMD_DRV_LOG(INFO, "alos: %u, rx_los: %u, PCS block counter reg: 0x%x,PCS link: 0x%x, MAC link: 0x%x PCS_err_cnt: 0x%x",
1556 info->alos, info->rx_los, info->pcs_err_blk_cnt_reg,
1557 info->pcs_link_reg, info->mac_link_reg, info->pcs_err_cnt);
1558 }
1559
hinic_hilink_async_event_handle(struct hinic_hwdev * hwdev,u8 cmd,void * buf_in,u16 in_size,void * buf_out,u16 * out_size)1560 void hinic_hilink_async_event_handle(struct hinic_hwdev *hwdev, u8 cmd,
1561 void *buf_in, u16 in_size,
1562 void *buf_out, u16 *out_size)
1563 {
1564 if (!hwdev)
1565 return;
1566
1567 *out_size = 0;
1568
1569 switch (cmd) {
1570 case HINIC_HILINK_CMD_GET_LINK_INFO:
1571 hinic_print_hilink_info(buf_in, in_size, buf_out,
1572 out_size);
1573 break;
1574
1575 default:
1576 PMD_DRV_LOG(ERR, "Unsupported event %d to process",
1577 cmd);
1578 break;
1579 }
1580 }
1581