1d5a7873cSVikas Gupta /* SPDX-License-Identifier: BSD-3-Clause
2d5a7873cSVikas Gupta * Copyright(c) 2020 Broadcom
3d5a7873cSVikas Gupta * All rights reserved.
4d5a7873cSVikas Gupta */
5d5a7873cSVikas Gupta
6d5a7873cSVikas Gupta #include <unistd.h>
7d5a7873cSVikas Gupta
8d5a7873cSVikas Gupta #include <rte_bitmap.h>
9d5a7873cSVikas Gupta
10d5a7873cSVikas Gupta #include "bcmfs_qp.h"
11d5a7873cSVikas Gupta #include "bcmfs_logs.h"
12d5a7873cSVikas Gupta #include "bcmfs_dev_msg.h"
13d5a7873cSVikas Gupta #include "bcmfs_device.h"
14d5a7873cSVikas Gupta #include "bcmfs_hw_defs.h"
15d5a7873cSVikas Gupta #include "bcmfs_rm_common.h"
16d5a7873cSVikas Gupta
17d5a7873cSVikas Gupta /* Ring version */
18d5a7873cSVikas Gupta #define RING_VER_MAGIC 0x76303032
19d5a7873cSVikas Gupta
20d5a7873cSVikas Gupta /* Per-Ring register offsets */
21d5a7873cSVikas Gupta #define RING_VER 0x000
22d5a7873cSVikas Gupta #define RING_BD_START_ADDRESS_LSB 0x004
23d5a7873cSVikas Gupta #define RING_BD_READ_PTR 0x008
24d5a7873cSVikas Gupta #define RING_BD_WRITE_PTR 0x00c
25d5a7873cSVikas Gupta #define RING_BD_READ_PTR_DDR_LS 0x010
26d5a7873cSVikas Gupta #define RING_BD_READ_PTR_DDR_MS 0x014
27d5a7873cSVikas Gupta #define RING_CMPL_START_ADDR_LSB 0x018
28d5a7873cSVikas Gupta #define RING_CMPL_WRITE_PTR 0x01c
29d5a7873cSVikas Gupta #define RING_NUM_REQ_RECV_LS 0x020
30d5a7873cSVikas Gupta #define RING_NUM_REQ_RECV_MS 0x024
31d5a7873cSVikas Gupta #define RING_NUM_REQ_TRANS_LS 0x028
32d5a7873cSVikas Gupta #define RING_NUM_REQ_TRANS_MS 0x02c
33d5a7873cSVikas Gupta #define RING_NUM_REQ_OUTSTAND 0x030
34d5a7873cSVikas Gupta #define RING_CONTROL 0x034
35d5a7873cSVikas Gupta #define RING_FLUSH_DONE 0x038
36d5a7873cSVikas Gupta #define RING_MSI_ADDR_LS 0x03c
37d5a7873cSVikas Gupta #define RING_MSI_ADDR_MS 0x040
38d5a7873cSVikas Gupta #define RING_MSI_CONTROL 0x048
39d5a7873cSVikas Gupta #define RING_BD_READ_PTR_DDR_CONTROL 0x04c
40d5a7873cSVikas Gupta #define RING_MSI_DATA_VALUE 0x064
41d5a7873cSVikas Gupta #define RING_BD_START_ADDRESS_MSB 0x078
42d5a7873cSVikas Gupta #define RING_CMPL_START_ADDR_MSB 0x07c
43d5a7873cSVikas Gupta #define RING_DOORBELL_BD_WRITE_COUNT 0x074
44d5a7873cSVikas Gupta
45d5a7873cSVikas Gupta /* Register RING_BD_START_ADDR fields */
46d5a7873cSVikas Gupta #define BD_LAST_UPDATE_HW_SHIFT 28
47d5a7873cSVikas Gupta #define BD_LAST_UPDATE_HW_MASK 0x1
48d5a7873cSVikas Gupta #define BD_START_ADDR_VALUE(pa) \
49d5a7873cSVikas Gupta ((uint32_t)((((uint64_t)(pa)) >> RING_BD_ALIGN_ORDER) & 0x0fffffff))
50d5a7873cSVikas Gupta #define BD_START_ADDR_DECODE(val) \
51d5a7873cSVikas Gupta ((uint64_t)((val) & 0x0fffffff) << RING_BD_ALIGN_ORDER)
52d5a7873cSVikas Gupta
53d5a7873cSVikas Gupta /* Register RING_CMPL_START_ADDR fields */
54d5a7873cSVikas Gupta #define CMPL_START_ADDR_VALUE(pa) \
55d5a7873cSVikas Gupta ((uint32_t)((((uint64_t)(pa)) >> RING_CMPL_ALIGN_ORDER) & 0x07ffffff))
56d5a7873cSVikas Gupta
57d5a7873cSVikas Gupta /* Register RING_CONTROL fields */
58d5a7873cSVikas Gupta #define CONTROL_MASK_DISABLE_CONTROL 12
59d5a7873cSVikas Gupta #define CONTROL_FLUSH_SHIFT 5
60d5a7873cSVikas Gupta #define CONTROL_ACTIVE_SHIFT 4
61d5a7873cSVikas Gupta #define CONTROL_RATE_ADAPT_MASK 0xf
62d5a7873cSVikas Gupta #define CONTROL_RATE_DYNAMIC 0x0
63d5a7873cSVikas Gupta #define CONTROL_RATE_FAST 0x8
64d5a7873cSVikas Gupta #define CONTROL_RATE_MEDIUM 0x9
65d5a7873cSVikas Gupta #define CONTROL_RATE_SLOW 0xa
66d5a7873cSVikas Gupta #define CONTROL_RATE_IDLE 0xb
67d5a7873cSVikas Gupta
68d5a7873cSVikas Gupta /* Register RING_FLUSH_DONE fields */
69d5a7873cSVikas Gupta #define FLUSH_DONE_MASK 0x1
70d5a7873cSVikas Gupta
71d5a7873cSVikas Gupta /* Register RING_MSI_CONTROL fields */
72d5a7873cSVikas Gupta #define MSI_TIMER_VAL_SHIFT 16
73d5a7873cSVikas Gupta #define MSI_TIMER_VAL_MASK 0xffff
74d5a7873cSVikas Gupta #define MSI_ENABLE_SHIFT 15
75d5a7873cSVikas Gupta #define MSI_ENABLE_MASK 0x1
76d5a7873cSVikas Gupta #define MSI_COUNT_SHIFT 0
77d5a7873cSVikas Gupta #define MSI_COUNT_MASK 0x3ff
78d5a7873cSVikas Gupta
79d5a7873cSVikas Gupta /* Register RING_BD_READ_PTR_DDR_CONTROL fields */
80d5a7873cSVikas Gupta #define BD_READ_PTR_DDR_TIMER_VAL_SHIFT 16
81d5a7873cSVikas Gupta #define BD_READ_PTR_DDR_TIMER_VAL_MASK 0xffff
82d5a7873cSVikas Gupta #define BD_READ_PTR_DDR_ENABLE_SHIFT 15
83d5a7873cSVikas Gupta #define BD_READ_PTR_DDR_ENABLE_MASK 0x1
84d5a7873cSVikas Gupta
85d5a7873cSVikas Gupta /* General descriptor format */
86d5a7873cSVikas Gupta #define DESC_TYPE_SHIFT 60
87d5a7873cSVikas Gupta #define DESC_TYPE_MASK 0xf
88d5a7873cSVikas Gupta #define DESC_PAYLOAD_SHIFT 0
89d5a7873cSVikas Gupta #define DESC_PAYLOAD_MASK 0x0fffffffffffffff
90d5a7873cSVikas Gupta
91d5a7873cSVikas Gupta /* Null descriptor format */
92d5a7873cSVikas Gupta #define NULL_TYPE 0
93d5a7873cSVikas Gupta #define NULL_TOGGLE_SHIFT 59
94d5a7873cSVikas Gupta #define NULL_TOGGLE_MASK 0x1
95d5a7873cSVikas Gupta
96d5a7873cSVikas Gupta /* Header descriptor format */
97d5a7873cSVikas Gupta #define HEADER_TYPE 1
98d5a7873cSVikas Gupta #define HEADER_TOGGLE_SHIFT 59
99d5a7873cSVikas Gupta #define HEADER_TOGGLE_MASK 0x1
100d5a7873cSVikas Gupta #define HEADER_ENDPKT_SHIFT 57
101d5a7873cSVikas Gupta #define HEADER_ENDPKT_MASK 0x1
102d5a7873cSVikas Gupta #define HEADER_STARTPKT_SHIFT 56
103d5a7873cSVikas Gupta #define HEADER_STARTPKT_MASK 0x1
104d5a7873cSVikas Gupta #define HEADER_BDCOUNT_SHIFT 36
105d5a7873cSVikas Gupta #define HEADER_BDCOUNT_MASK 0x1f
106d5a7873cSVikas Gupta #define HEADER_BDCOUNT_MAX HEADER_BDCOUNT_MASK
107d5a7873cSVikas Gupta #define HEADER_FLAGS_SHIFT 16
108d5a7873cSVikas Gupta #define HEADER_FLAGS_MASK 0xffff
109d5a7873cSVikas Gupta #define HEADER_OPAQUE_SHIFT 0
110d5a7873cSVikas Gupta #define HEADER_OPAQUE_MASK 0xffff
111d5a7873cSVikas Gupta
112d5a7873cSVikas Gupta /* Source (SRC) descriptor format */
113d5a7873cSVikas Gupta
114d5a7873cSVikas Gupta #define SRC_TYPE 2
115d5a7873cSVikas Gupta #define SRC_LENGTH_SHIFT 44
116d5a7873cSVikas Gupta #define SRC_LENGTH_MASK 0xffff
117d5a7873cSVikas Gupta #define SRC_ADDR_SHIFT 0
118d5a7873cSVikas Gupta #define SRC_ADDR_MASK 0x00000fffffffffff
119d5a7873cSVikas Gupta
120d5a7873cSVikas Gupta /* Destination (DST) descriptor format */
121d5a7873cSVikas Gupta #define DST_TYPE 3
122d5a7873cSVikas Gupta #define DST_LENGTH_SHIFT 44
123d5a7873cSVikas Gupta #define DST_LENGTH_MASK 0xffff
124d5a7873cSVikas Gupta #define DST_ADDR_SHIFT 0
125d5a7873cSVikas Gupta #define DST_ADDR_MASK 0x00000fffffffffff
126d5a7873cSVikas Gupta
127d5a7873cSVikas Gupta /* Next pointer (NPTR) descriptor format */
128d5a7873cSVikas Gupta #define NPTR_TYPE 5
129d5a7873cSVikas Gupta #define NPTR_TOGGLE_SHIFT 59
130d5a7873cSVikas Gupta #define NPTR_TOGGLE_MASK 0x1
131d5a7873cSVikas Gupta #define NPTR_ADDR_SHIFT 0
132d5a7873cSVikas Gupta #define NPTR_ADDR_MASK 0x00000fffffffffff
133d5a7873cSVikas Gupta
134d5a7873cSVikas Gupta /* Mega source (MSRC) descriptor format */
135d5a7873cSVikas Gupta #define MSRC_TYPE 6
136d5a7873cSVikas Gupta #define MSRC_LENGTH_SHIFT 44
137d5a7873cSVikas Gupta #define MSRC_LENGTH_MASK 0xffff
138d5a7873cSVikas Gupta #define MSRC_ADDR_SHIFT 0
139d5a7873cSVikas Gupta #define MSRC_ADDR_MASK 0x00000fffffffffff
140d5a7873cSVikas Gupta
141d5a7873cSVikas Gupta /* Mega destination (MDST) descriptor format */
142d5a7873cSVikas Gupta #define MDST_TYPE 7
143d5a7873cSVikas Gupta #define MDST_LENGTH_SHIFT 44
144d5a7873cSVikas Gupta #define MDST_LENGTH_MASK 0xffff
145d5a7873cSVikas Gupta #define MDST_ADDR_SHIFT 0
146d5a7873cSVikas Gupta #define MDST_ADDR_MASK 0x00000fffffffffff
147d5a7873cSVikas Gupta
148d5a7873cSVikas Gupta static uint8_t
bcmfs5_is_next_table_desc(void * desc_ptr)149d5a7873cSVikas Gupta bcmfs5_is_next_table_desc(void *desc_ptr)
150d5a7873cSVikas Gupta {
151d5a7873cSVikas Gupta uint64_t desc = rm_read_desc(desc_ptr);
152d5a7873cSVikas Gupta uint32_t type = FS_DESC_DEC(desc, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
153d5a7873cSVikas Gupta
154d5a7873cSVikas Gupta return (type == NPTR_TYPE) ? true : false;
155d5a7873cSVikas Gupta }
156d5a7873cSVikas Gupta
157d5a7873cSVikas Gupta static uint64_t
bcmfs5_next_table_desc(uint64_t next_addr)158d5a7873cSVikas Gupta bcmfs5_next_table_desc(uint64_t next_addr)
159d5a7873cSVikas Gupta {
160d5a7873cSVikas Gupta return (rm_build_desc(NPTR_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |
161d5a7873cSVikas Gupta rm_build_desc(next_addr, NPTR_ADDR_SHIFT, NPTR_ADDR_MASK));
162d5a7873cSVikas Gupta }
163d5a7873cSVikas Gupta
164d5a7873cSVikas Gupta static uint64_t
bcmfs5_null_desc(void)165d5a7873cSVikas Gupta bcmfs5_null_desc(void)
166d5a7873cSVikas Gupta {
167d5a7873cSVikas Gupta return rm_build_desc(NULL_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
168d5a7873cSVikas Gupta }
169d5a7873cSVikas Gupta
170d5a7873cSVikas Gupta static uint64_t
bcmfs5_header_desc(uint32_t startpkt,uint32_t endpkt,uint32_t bdcount,uint32_t flags,uint32_t opaque)171d5a7873cSVikas Gupta bcmfs5_header_desc(uint32_t startpkt, uint32_t endpkt,
172d5a7873cSVikas Gupta uint32_t bdcount, uint32_t flags,
173d5a7873cSVikas Gupta uint32_t opaque)
174d5a7873cSVikas Gupta {
175d5a7873cSVikas Gupta return (rm_build_desc(HEADER_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |
176d5a7873cSVikas Gupta rm_build_desc(startpkt, HEADER_STARTPKT_SHIFT,
177d5a7873cSVikas Gupta HEADER_STARTPKT_MASK) |
178d5a7873cSVikas Gupta rm_build_desc(endpkt, HEADER_ENDPKT_SHIFT, HEADER_ENDPKT_MASK) |
179d5a7873cSVikas Gupta rm_build_desc(bdcount, HEADER_BDCOUNT_SHIFT, HEADER_BDCOUNT_MASK) |
180d5a7873cSVikas Gupta rm_build_desc(flags, HEADER_FLAGS_SHIFT, HEADER_FLAGS_MASK) |
181d5a7873cSVikas Gupta rm_build_desc(opaque, HEADER_OPAQUE_SHIFT, HEADER_OPAQUE_MASK));
182d5a7873cSVikas Gupta }
183d5a7873cSVikas Gupta
184d5a7873cSVikas Gupta static int
bcmfs5_enqueue_desc(uint32_t nhpos,uint32_t nhcnt,uint32_t reqid,uint64_t desc,void ** desc_ptr,void * start_desc,void * end_desc)185d5a7873cSVikas Gupta bcmfs5_enqueue_desc(uint32_t nhpos, uint32_t nhcnt,
186d5a7873cSVikas Gupta uint32_t reqid, uint64_t desc,
187d5a7873cSVikas Gupta void **desc_ptr, void *start_desc,
188d5a7873cSVikas Gupta void *end_desc)
189d5a7873cSVikas Gupta {
190d5a7873cSVikas Gupta uint64_t d;
191d5a7873cSVikas Gupta uint32_t nhavail, _startpkt, _endpkt, _bdcount;
192d5a7873cSVikas Gupta int is_nxt_page = 0;
193d5a7873cSVikas Gupta
194d5a7873cSVikas Gupta /*
195d5a7873cSVikas Gupta * Each request or packet start with a HEADER descriptor followed
196d5a7873cSVikas Gupta * by one or more non-HEADER descriptors (SRC, SRCT, MSRC, DST,
197d5a7873cSVikas Gupta * DSTT, MDST, IMM, and IMMT). The number of non-HEADER descriptors
198d5a7873cSVikas Gupta * following a HEADER descriptor is represented by BDCOUNT field
199d5a7873cSVikas Gupta * of HEADER descriptor. The max value of BDCOUNT field is 31 which
200d5a7873cSVikas Gupta * means we can only have 31 non-HEADER descriptors following one
201d5a7873cSVikas Gupta * HEADER descriptor.
202d5a7873cSVikas Gupta *
203d5a7873cSVikas Gupta * In general use, number of non-HEADER descriptors can easily go
204d5a7873cSVikas Gupta * beyond 31. To tackle this situation, we have packet (or request)
205d5a7873cSVikas Gupta * extension bits (STARTPKT and ENDPKT) in the HEADER descriptor.
206d5a7873cSVikas Gupta *
207d5a7873cSVikas Gupta * To use packet extension, the first HEADER descriptor of request
208d5a7873cSVikas Gupta * (or packet) will have STARTPKT=1 and ENDPKT=0. The intermediate
209d5a7873cSVikas Gupta * HEADER descriptors will have STARTPKT=0 and ENDPKT=0. The last
210d5a7873cSVikas Gupta * HEADER descriptor will have STARTPKT=0 and ENDPKT=1.
211d5a7873cSVikas Gupta */
212d5a7873cSVikas Gupta
213d5a7873cSVikas Gupta if ((nhpos % HEADER_BDCOUNT_MAX == 0) && (nhcnt - nhpos)) {
214d5a7873cSVikas Gupta /* Prepare the header descriptor */
215d5a7873cSVikas Gupta nhavail = (nhcnt - nhpos);
216d5a7873cSVikas Gupta _startpkt = (nhpos == 0) ? 0x1 : 0x0;
217d5a7873cSVikas Gupta _endpkt = (nhavail <= HEADER_BDCOUNT_MAX) ? 0x1 : 0x0;
218d5a7873cSVikas Gupta _bdcount = (nhavail <= HEADER_BDCOUNT_MAX) ?
219d5a7873cSVikas Gupta nhavail : HEADER_BDCOUNT_MAX;
220d5a7873cSVikas Gupta if (nhavail <= HEADER_BDCOUNT_MAX)
221d5a7873cSVikas Gupta _bdcount = nhavail;
222d5a7873cSVikas Gupta else
223d5a7873cSVikas Gupta _bdcount = HEADER_BDCOUNT_MAX;
224d5a7873cSVikas Gupta d = bcmfs5_header_desc(_startpkt, _endpkt,
225d5a7873cSVikas Gupta _bdcount, 0x0, reqid);
226d5a7873cSVikas Gupta
227d5a7873cSVikas Gupta /* Write header descriptor */
228d5a7873cSVikas Gupta rm_write_desc(*desc_ptr, d);
229d5a7873cSVikas Gupta
230d5a7873cSVikas Gupta /* Point to next descriptor */
231d5a7873cSVikas Gupta *desc_ptr = (uint8_t *)*desc_ptr + sizeof(desc);
232d5a7873cSVikas Gupta if (*desc_ptr == end_desc)
233d5a7873cSVikas Gupta *desc_ptr = start_desc;
234d5a7873cSVikas Gupta
235d5a7873cSVikas Gupta /* Skip next pointer descriptors */
236d5a7873cSVikas Gupta while (bcmfs5_is_next_table_desc(*desc_ptr)) {
237d5a7873cSVikas Gupta is_nxt_page = 1;
238d5a7873cSVikas Gupta *desc_ptr = (uint8_t *)*desc_ptr + sizeof(desc);
239d5a7873cSVikas Gupta if (*desc_ptr == end_desc)
240d5a7873cSVikas Gupta *desc_ptr = start_desc;
241d5a7873cSVikas Gupta }
242d5a7873cSVikas Gupta }
243d5a7873cSVikas Gupta
244d5a7873cSVikas Gupta /* Write desired descriptor */
245d5a7873cSVikas Gupta rm_write_desc(*desc_ptr, desc);
246d5a7873cSVikas Gupta
247d5a7873cSVikas Gupta /* Point to next descriptor */
248d5a7873cSVikas Gupta *desc_ptr = (uint8_t *)*desc_ptr + sizeof(desc);
249d5a7873cSVikas Gupta if (*desc_ptr == end_desc)
250d5a7873cSVikas Gupta *desc_ptr = start_desc;
251d5a7873cSVikas Gupta
252d5a7873cSVikas Gupta /* Skip next pointer descriptors */
253d5a7873cSVikas Gupta while (bcmfs5_is_next_table_desc(*desc_ptr)) {
254d5a7873cSVikas Gupta is_nxt_page = 1;
255d5a7873cSVikas Gupta *desc_ptr = (uint8_t *)*desc_ptr + sizeof(desc);
256d5a7873cSVikas Gupta if (*desc_ptr == end_desc)
257d5a7873cSVikas Gupta *desc_ptr = start_desc;
258d5a7873cSVikas Gupta }
259d5a7873cSVikas Gupta
260d5a7873cSVikas Gupta return is_nxt_page;
261d5a7873cSVikas Gupta }
262d5a7873cSVikas Gupta
263d5a7873cSVikas Gupta static uint64_t
bcmfs5_src_desc(uint64_t addr,unsigned int len)264d5a7873cSVikas Gupta bcmfs5_src_desc(uint64_t addr, unsigned int len)
265d5a7873cSVikas Gupta {
266d5a7873cSVikas Gupta return (rm_build_desc(SRC_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |
267d5a7873cSVikas Gupta rm_build_desc(len, SRC_LENGTH_SHIFT, SRC_LENGTH_MASK) |
268d5a7873cSVikas Gupta rm_build_desc(addr, SRC_ADDR_SHIFT, SRC_ADDR_MASK));
269d5a7873cSVikas Gupta }
270d5a7873cSVikas Gupta
271d5a7873cSVikas Gupta static uint64_t
bcmfs5_msrc_desc(uint64_t addr,unsigned int len_div_16)272d5a7873cSVikas Gupta bcmfs5_msrc_desc(uint64_t addr, unsigned int len_div_16)
273d5a7873cSVikas Gupta {
274d5a7873cSVikas Gupta return (rm_build_desc(MSRC_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |
275d5a7873cSVikas Gupta rm_build_desc(len_div_16, MSRC_LENGTH_SHIFT, MSRC_LENGTH_MASK) |
276d5a7873cSVikas Gupta rm_build_desc(addr, MSRC_ADDR_SHIFT, MSRC_ADDR_MASK));
277d5a7873cSVikas Gupta }
278d5a7873cSVikas Gupta
279d5a7873cSVikas Gupta static uint64_t
bcmfs5_dst_desc(uint64_t addr,unsigned int len)280d5a7873cSVikas Gupta bcmfs5_dst_desc(uint64_t addr, unsigned int len)
281d5a7873cSVikas Gupta {
282d5a7873cSVikas Gupta return (rm_build_desc(DST_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |
283d5a7873cSVikas Gupta rm_build_desc(len, DST_LENGTH_SHIFT, DST_LENGTH_MASK) |
284d5a7873cSVikas Gupta rm_build_desc(addr, DST_ADDR_SHIFT, DST_ADDR_MASK));
285d5a7873cSVikas Gupta }
286d5a7873cSVikas Gupta
287d5a7873cSVikas Gupta static uint64_t
bcmfs5_mdst_desc(uint64_t addr,unsigned int len_div_16)288d5a7873cSVikas Gupta bcmfs5_mdst_desc(uint64_t addr, unsigned int len_div_16)
289d5a7873cSVikas Gupta {
290d5a7873cSVikas Gupta return (rm_build_desc(MDST_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK) |
291d5a7873cSVikas Gupta rm_build_desc(len_div_16, MDST_LENGTH_SHIFT, MDST_LENGTH_MASK) |
292d5a7873cSVikas Gupta rm_build_desc(addr, MDST_ADDR_SHIFT, MDST_ADDR_MASK));
293d5a7873cSVikas Gupta }
294d5a7873cSVikas Gupta
295d5a7873cSVikas Gupta static bool
bcmfs5_sanity_check(struct bcmfs_qp_message * msg)296d5a7873cSVikas Gupta bcmfs5_sanity_check(struct bcmfs_qp_message *msg)
297d5a7873cSVikas Gupta {
298d5a7873cSVikas Gupta unsigned int i = 0;
299d5a7873cSVikas Gupta
300d5a7873cSVikas Gupta if (msg == NULL)
301d5a7873cSVikas Gupta return false;
302d5a7873cSVikas Gupta
303d5a7873cSVikas Gupta for (i = 0; i < msg->srcs_count; i++) {
304d5a7873cSVikas Gupta if (msg->srcs_len[i] & 0xf) {
305d5a7873cSVikas Gupta if (msg->srcs_len[i] > SRC_LENGTH_MASK)
306d5a7873cSVikas Gupta return false;
307d5a7873cSVikas Gupta } else {
308d5a7873cSVikas Gupta if (msg->srcs_len[i] > (MSRC_LENGTH_MASK * 16))
309d5a7873cSVikas Gupta return false;
310d5a7873cSVikas Gupta }
311d5a7873cSVikas Gupta }
312d5a7873cSVikas Gupta for (i = 0; i < msg->dsts_count; i++) {
313d5a7873cSVikas Gupta if (msg->dsts_len[i] & 0xf) {
314d5a7873cSVikas Gupta if (msg->dsts_len[i] > DST_LENGTH_MASK)
315d5a7873cSVikas Gupta return false;
316d5a7873cSVikas Gupta } else {
317d5a7873cSVikas Gupta if (msg->dsts_len[i] > (MDST_LENGTH_MASK * 16))
318d5a7873cSVikas Gupta return false;
319d5a7873cSVikas Gupta }
320d5a7873cSVikas Gupta }
321d5a7873cSVikas Gupta
322d5a7873cSVikas Gupta return true;
323d5a7873cSVikas Gupta }
324d5a7873cSVikas Gupta
325d5a7873cSVikas Gupta static void *
bcmfs5_enqueue_msg(struct bcmfs_queue * txq,struct bcmfs_qp_message * msg,uint32_t reqid,void * desc_ptr,void * start_desc,void * end_desc)326d5a7873cSVikas Gupta bcmfs5_enqueue_msg(struct bcmfs_queue *txq,
327d5a7873cSVikas Gupta struct bcmfs_qp_message *msg,
328d5a7873cSVikas Gupta uint32_t reqid, void *desc_ptr,
329d5a7873cSVikas Gupta void *start_desc, void *end_desc)
330d5a7873cSVikas Gupta {
331d5a7873cSVikas Gupta uint64_t d;
332d5a7873cSVikas Gupta unsigned int src, dst;
333d5a7873cSVikas Gupta uint32_t nhpos = 0;
334d5a7873cSVikas Gupta int nxt_page = 0;
335d5a7873cSVikas Gupta uint32_t nhcnt = msg->srcs_count + msg->dsts_count;
336d5a7873cSVikas Gupta
337d5a7873cSVikas Gupta if (desc_ptr == NULL || start_desc == NULL || end_desc == NULL)
338d5a7873cSVikas Gupta return NULL;
339d5a7873cSVikas Gupta
340d5a7873cSVikas Gupta if (desc_ptr < start_desc || end_desc <= desc_ptr)
341d5a7873cSVikas Gupta return NULL;
342d5a7873cSVikas Gupta
343d5a7873cSVikas Gupta for (src = 0; src < msg->srcs_count; src++) {
344d5a7873cSVikas Gupta if (msg->srcs_len[src] & 0xf)
345d5a7873cSVikas Gupta d = bcmfs5_src_desc(msg->srcs_addr[src],
346d5a7873cSVikas Gupta msg->srcs_len[src]);
347d5a7873cSVikas Gupta else
348d5a7873cSVikas Gupta d = bcmfs5_msrc_desc(msg->srcs_addr[src],
349d5a7873cSVikas Gupta msg->srcs_len[src] / 16);
350d5a7873cSVikas Gupta
351d5a7873cSVikas Gupta nxt_page = bcmfs5_enqueue_desc(nhpos, nhcnt, reqid,
352d5a7873cSVikas Gupta d, &desc_ptr, start_desc,
353d5a7873cSVikas Gupta end_desc);
354d5a7873cSVikas Gupta if (nxt_page)
355d5a7873cSVikas Gupta txq->descs_inflight++;
356d5a7873cSVikas Gupta nhpos++;
357d5a7873cSVikas Gupta }
358d5a7873cSVikas Gupta
359d5a7873cSVikas Gupta for (dst = 0; dst < msg->dsts_count; dst++) {
360d5a7873cSVikas Gupta if (msg->dsts_len[dst] & 0xf)
361d5a7873cSVikas Gupta d = bcmfs5_dst_desc(msg->dsts_addr[dst],
362d5a7873cSVikas Gupta msg->dsts_len[dst]);
363d5a7873cSVikas Gupta else
364d5a7873cSVikas Gupta d = bcmfs5_mdst_desc(msg->dsts_addr[dst],
365d5a7873cSVikas Gupta msg->dsts_len[dst] / 16);
366d5a7873cSVikas Gupta
367d5a7873cSVikas Gupta nxt_page = bcmfs5_enqueue_desc(nhpos, nhcnt, reqid,
368d5a7873cSVikas Gupta d, &desc_ptr, start_desc,
369d5a7873cSVikas Gupta end_desc);
370d5a7873cSVikas Gupta if (nxt_page)
371d5a7873cSVikas Gupta txq->descs_inflight++;
372d5a7873cSVikas Gupta nhpos++;
373d5a7873cSVikas Gupta }
374d5a7873cSVikas Gupta
375d5a7873cSVikas Gupta txq->descs_inflight += nhcnt + 1;
376d5a7873cSVikas Gupta
377d5a7873cSVikas Gupta return desc_ptr;
378d5a7873cSVikas Gupta }
379d5a7873cSVikas Gupta
380d5a7873cSVikas Gupta static int
bcmfs5_enqueue_single_request_qp(struct bcmfs_qp * qp,void * op)381d5a7873cSVikas Gupta bcmfs5_enqueue_single_request_qp(struct bcmfs_qp *qp, void *op)
382d5a7873cSVikas Gupta {
383d5a7873cSVikas Gupta void *next;
384d5a7873cSVikas Gupta int reqid;
385d5a7873cSVikas Gupta int ret = 0;
386d5a7873cSVikas Gupta uint64_t slab = 0;
387d5a7873cSVikas Gupta uint32_t pos = 0;
388d5a7873cSVikas Gupta uint8_t exit_cleanup = false;
389d5a7873cSVikas Gupta struct bcmfs_queue *txq = &qp->tx_q;
390d5a7873cSVikas Gupta struct bcmfs_qp_message *msg = (struct bcmfs_qp_message *)op;
391d5a7873cSVikas Gupta
392d5a7873cSVikas Gupta /* Do sanity check on message */
393d5a7873cSVikas Gupta if (!bcmfs5_sanity_check(msg)) {
394d5a7873cSVikas Gupta BCMFS_DP_LOG(ERR, "Invalid msg on queue %d", qp->qpair_id);
395d5a7873cSVikas Gupta return -EIO;
396d5a7873cSVikas Gupta }
397d5a7873cSVikas Gupta
398d5a7873cSVikas Gupta /* Scan from the beginning */
399d5a7873cSVikas Gupta __rte_bitmap_scan_init(qp->ctx_bmp);
400d5a7873cSVikas Gupta /* Scan bitmap to get the free pool */
401d5a7873cSVikas Gupta ret = rte_bitmap_scan(qp->ctx_bmp, &pos, &slab);
402d5a7873cSVikas Gupta if (ret == 0) {
403d5a7873cSVikas Gupta BCMFS_DP_LOG(ERR, "BD memory exhausted");
404d5a7873cSVikas Gupta return -ERANGE;
405d5a7873cSVikas Gupta }
406d5a7873cSVikas Gupta
407*3d4e27fdSDavid Marchand reqid = pos + rte_ctz64(slab);
408d5a7873cSVikas Gupta rte_bitmap_clear(qp->ctx_bmp, reqid);
409d5a7873cSVikas Gupta qp->ctx_pool[reqid] = (unsigned long)msg;
410d5a7873cSVikas Gupta
411d5a7873cSVikas Gupta /* Write descriptors to ring */
412d5a7873cSVikas Gupta next = bcmfs5_enqueue_msg(txq, msg, reqid,
413d5a7873cSVikas Gupta (uint8_t *)txq->base_addr + txq->tx_write_ptr,
414d5a7873cSVikas Gupta txq->base_addr,
415d5a7873cSVikas Gupta (uint8_t *)txq->base_addr + txq->queue_size);
416d5a7873cSVikas Gupta if (next == NULL) {
417d5a7873cSVikas Gupta BCMFS_DP_LOG(ERR, "Enqueue for desc failed on queue %d",
418d5a7873cSVikas Gupta qp->qpair_id);
419d5a7873cSVikas Gupta ret = -EINVAL;
420d5a7873cSVikas Gupta exit_cleanup = true;
421d5a7873cSVikas Gupta goto exit;
422d5a7873cSVikas Gupta }
423d5a7873cSVikas Gupta
424d5a7873cSVikas Gupta /* Save ring BD write offset */
425d5a7873cSVikas Gupta txq->tx_write_ptr = (uint32_t)((uint8_t *)next -
426d5a7873cSVikas Gupta (uint8_t *)txq->base_addr);
427d5a7873cSVikas Gupta
428d5a7873cSVikas Gupta qp->nb_pending_requests++;
429d5a7873cSVikas Gupta
430d5a7873cSVikas Gupta return 0;
431d5a7873cSVikas Gupta
432d5a7873cSVikas Gupta exit:
433d5a7873cSVikas Gupta /* Cleanup if we failed */
434d5a7873cSVikas Gupta if (exit_cleanup)
435d5a7873cSVikas Gupta rte_bitmap_set(qp->ctx_bmp, reqid);
436d5a7873cSVikas Gupta
437d5a7873cSVikas Gupta return ret;
438d5a7873cSVikas Gupta }
439d5a7873cSVikas Gupta
bcmfs5_write_doorbell(struct bcmfs_qp * qp)440d5a7873cSVikas Gupta static void bcmfs5_write_doorbell(struct bcmfs_qp *qp)
441d5a7873cSVikas Gupta {
442d5a7873cSVikas Gupta struct bcmfs_queue *txq = &qp->tx_q;
443d5a7873cSVikas Gupta
4447be78d02SJosh Soref /* sync in before ringing the door-bell */
445d5a7873cSVikas Gupta rte_wmb();
446d5a7873cSVikas Gupta
447d5a7873cSVikas Gupta FS_MMIO_WRITE32(txq->descs_inflight,
448d5a7873cSVikas Gupta (uint8_t *)qp->ioreg + RING_DOORBELL_BD_WRITE_COUNT);
449d5a7873cSVikas Gupta
450d5a7873cSVikas Gupta /* reset the count */
451d5a7873cSVikas Gupta txq->descs_inflight = 0;
452d5a7873cSVikas Gupta }
453d5a7873cSVikas Gupta
454d5a7873cSVikas Gupta static uint16_t
bcmfs5_dequeue_qp(struct bcmfs_qp * qp,void ** ops,uint16_t budget)455d5a7873cSVikas Gupta bcmfs5_dequeue_qp(struct bcmfs_qp *qp, void **ops, uint16_t budget)
456d5a7873cSVikas Gupta {
457d5a7873cSVikas Gupta int err;
458d5a7873cSVikas Gupta uint16_t reqid;
459d5a7873cSVikas Gupta uint64_t desc;
460d5a7873cSVikas Gupta uint16_t count = 0;
461d5a7873cSVikas Gupta unsigned long context = 0;
462d5a7873cSVikas Gupta struct bcmfs_queue *hwq = &qp->cmpl_q;
463d5a7873cSVikas Gupta uint32_t cmpl_read_offset, cmpl_write_offset;
464d5a7873cSVikas Gupta
465d5a7873cSVikas Gupta /*
466d5a7873cSVikas Gupta * Check whether budget is valid, else set the budget to maximum
467d5a7873cSVikas Gupta * so that all the available completions will be processed.
468d5a7873cSVikas Gupta */
469d5a7873cSVikas Gupta if (budget > qp->nb_pending_requests)
470d5a7873cSVikas Gupta budget = qp->nb_pending_requests;
471d5a7873cSVikas Gupta
472d5a7873cSVikas Gupta /*
473d5a7873cSVikas Gupta * Get current completion read and write offset
474d5a7873cSVikas Gupta *
475d5a7873cSVikas Gupta * Note: We should read completion write pointer at least once
476d5a7873cSVikas Gupta * after we get a MSI interrupt because HW maintains internal
477d5a7873cSVikas Gupta * MSI status which will allow next MSI interrupt only after
478d5a7873cSVikas Gupta * completion write pointer is read.
479d5a7873cSVikas Gupta */
480d5a7873cSVikas Gupta cmpl_write_offset = FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_CMPL_WRITE_PTR);
481d5a7873cSVikas Gupta cmpl_write_offset *= FS_RING_DESC_SIZE;
482d5a7873cSVikas Gupta cmpl_read_offset = hwq->cmpl_read_ptr;
483d5a7873cSVikas Gupta
484d5a7873cSVikas Gupta /* read the ring cmpl write ptr before cmpl read offset */
485d5a7873cSVikas Gupta rte_io_rmb();
486d5a7873cSVikas Gupta
487d5a7873cSVikas Gupta /* For each completed request notify mailbox clients */
488d5a7873cSVikas Gupta reqid = 0;
489d5a7873cSVikas Gupta while ((cmpl_read_offset != cmpl_write_offset) && (budget > 0)) {
490d5a7873cSVikas Gupta /* Dequeue next completion descriptor */
491d5a7873cSVikas Gupta desc = *((uint64_t *)((uint8_t *)hwq->base_addr +
492d5a7873cSVikas Gupta cmpl_read_offset));
493d5a7873cSVikas Gupta
494d5a7873cSVikas Gupta /* Next read offset */
495d5a7873cSVikas Gupta cmpl_read_offset += FS_RING_DESC_SIZE;
496d5a7873cSVikas Gupta if (cmpl_read_offset == FS_RING_CMPL_SIZE)
497d5a7873cSVikas Gupta cmpl_read_offset = 0;
498d5a7873cSVikas Gupta
499d5a7873cSVikas Gupta /* Decode error from completion descriptor */
500d5a7873cSVikas Gupta err = rm_cmpl_desc_to_error(desc);
501d5a7873cSVikas Gupta if (err < 0)
502d5a7873cSVikas Gupta BCMFS_DP_LOG(ERR, "error desc rcvd");
503d5a7873cSVikas Gupta
504d5a7873cSVikas Gupta /* Determine request id from completion descriptor */
505d5a7873cSVikas Gupta reqid = rm_cmpl_desc_to_reqid(desc);
506d5a7873cSVikas Gupta
507d5a7873cSVikas Gupta /* Retrieve context */
508d5a7873cSVikas Gupta context = qp->ctx_pool[reqid];
509d5a7873cSVikas Gupta if (context == 0)
510d5a7873cSVikas Gupta BCMFS_DP_LOG(ERR, "HW error detected");
511d5a7873cSVikas Gupta
512d5a7873cSVikas Gupta /* Release reqid for recycling */
513d5a7873cSVikas Gupta qp->ctx_pool[reqid] = 0;
514d5a7873cSVikas Gupta rte_bitmap_set(qp->ctx_bmp, reqid);
515d5a7873cSVikas Gupta
516d5a7873cSVikas Gupta *ops = (void *)context;
517d5a7873cSVikas Gupta
518d5a7873cSVikas Gupta /* Increment number of completions processed */
519d5a7873cSVikas Gupta count++;
520d5a7873cSVikas Gupta budget--;
521d5a7873cSVikas Gupta ops++;
522d5a7873cSVikas Gupta }
523d5a7873cSVikas Gupta
524d5a7873cSVikas Gupta hwq->cmpl_read_ptr = cmpl_read_offset;
525d5a7873cSVikas Gupta
526d5a7873cSVikas Gupta qp->nb_pending_requests -= count;
527d5a7873cSVikas Gupta
528d5a7873cSVikas Gupta return count;
529d5a7873cSVikas Gupta }
530d5a7873cSVikas Gupta
531d5a7873cSVikas Gupta static int
bcmfs5_start_qp(struct bcmfs_qp * qp)532d5a7873cSVikas Gupta bcmfs5_start_qp(struct bcmfs_qp *qp)
533d5a7873cSVikas Gupta {
534d5a7873cSVikas Gupta uint32_t val, off;
535d5a7873cSVikas Gupta uint64_t d, next_addr, msi;
536d5a7873cSVikas Gupta int timeout;
537d5a7873cSVikas Gupta uint32_t bd_high, bd_low, cmpl_high, cmpl_low;
538d5a7873cSVikas Gupta struct bcmfs_queue *tx_queue = &qp->tx_q;
539d5a7873cSVikas Gupta struct bcmfs_queue *cmpl_queue = &qp->cmpl_q;
540d5a7873cSVikas Gupta
541d5a7873cSVikas Gupta /* Disable/deactivate ring */
542d5a7873cSVikas Gupta FS_MMIO_WRITE32(0x0, (uint8_t *)qp->ioreg + RING_CONTROL);
543d5a7873cSVikas Gupta
544d5a7873cSVikas Gupta /* Configure next table pointer entries in BD memory */
545d5a7873cSVikas Gupta for (off = 0; off < tx_queue->queue_size; off += FS_RING_DESC_SIZE) {
546d5a7873cSVikas Gupta next_addr = off + FS_RING_DESC_SIZE;
547d5a7873cSVikas Gupta if (next_addr == tx_queue->queue_size)
548d5a7873cSVikas Gupta next_addr = 0;
549d5a7873cSVikas Gupta next_addr += (uint64_t)tx_queue->base_phys_addr;
550d5a7873cSVikas Gupta if (FS_RING_BD_ALIGN_CHECK(next_addr))
551d5a7873cSVikas Gupta d = bcmfs5_next_table_desc(next_addr);
552d5a7873cSVikas Gupta else
553d5a7873cSVikas Gupta d = bcmfs5_null_desc();
554d5a7873cSVikas Gupta rm_write_desc((uint8_t *)tx_queue->base_addr + off, d);
555d5a7873cSVikas Gupta }
556d5a7873cSVikas Gupta
557d5a7873cSVikas Gupta /*
558d5a7873cSVikas Gupta * If user interrupt the test in between the run(Ctrl+C), then all
559d5a7873cSVikas Gupta * subsequent test run will fail because sw cmpl_read_offset and hw
560d5a7873cSVikas Gupta * cmpl_write_offset will be pointing at different completion BD. To
561d5a7873cSVikas Gupta * handle this we should flush all the rings in the startup instead
562d5a7873cSVikas Gupta * of shutdown function.
563d5a7873cSVikas Gupta * Ring flush will reset hw cmpl_write_offset.
564d5a7873cSVikas Gupta */
565d5a7873cSVikas Gupta
566d5a7873cSVikas Gupta /* Set ring flush state */
567d5a7873cSVikas Gupta timeout = 1000;
568d5a7873cSVikas Gupta FS_MMIO_WRITE32(BIT(CONTROL_FLUSH_SHIFT),
569d5a7873cSVikas Gupta (uint8_t *)qp->ioreg + RING_CONTROL);
570d5a7873cSVikas Gupta do {
571d5a7873cSVikas Gupta /*
572d5a7873cSVikas Gupta * If previous test is stopped in between the run, then
573d5a7873cSVikas Gupta * sw has to read cmpl_write_offset else DME/AE will be not
574d5a7873cSVikas Gupta * come out of flush state.
575d5a7873cSVikas Gupta */
576d5a7873cSVikas Gupta FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_CMPL_WRITE_PTR);
577d5a7873cSVikas Gupta
578d5a7873cSVikas Gupta if (FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_FLUSH_DONE) &
579d5a7873cSVikas Gupta FLUSH_DONE_MASK)
580d5a7873cSVikas Gupta break;
581d5a7873cSVikas Gupta usleep(1000);
582d5a7873cSVikas Gupta } while (--timeout);
583d5a7873cSVikas Gupta if (!timeout) {
584d5a7873cSVikas Gupta BCMFS_DP_LOG(ERR, "Ring flush timeout hw-queue %d",
585d5a7873cSVikas Gupta qp->qpair_id);
586d5a7873cSVikas Gupta }
587d5a7873cSVikas Gupta
588d5a7873cSVikas Gupta /* Clear ring flush state */
589d5a7873cSVikas Gupta timeout = 1000;
590d5a7873cSVikas Gupta FS_MMIO_WRITE32(0x0, (uint8_t *)qp->ioreg + RING_CONTROL);
591d5a7873cSVikas Gupta do {
592d5a7873cSVikas Gupta if (!(FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_FLUSH_DONE) &
593d5a7873cSVikas Gupta FLUSH_DONE_MASK))
594d5a7873cSVikas Gupta break;
595d5a7873cSVikas Gupta usleep(1000);
596d5a7873cSVikas Gupta } while (--timeout);
597d5a7873cSVikas Gupta if (!timeout) {
598d5a7873cSVikas Gupta BCMFS_DP_LOG(ERR, "Ring clear flush timeout hw-queue %d",
599d5a7873cSVikas Gupta qp->qpair_id);
600d5a7873cSVikas Gupta }
601d5a7873cSVikas Gupta
602d5a7873cSVikas Gupta /* Program BD start address */
603d5a7873cSVikas Gupta bd_low = lower_32_bits(tx_queue->base_phys_addr);
604d5a7873cSVikas Gupta bd_high = upper_32_bits(tx_queue->base_phys_addr);
605d5a7873cSVikas Gupta FS_MMIO_WRITE32(bd_low, (uint8_t *)qp->ioreg +
606d5a7873cSVikas Gupta RING_BD_START_ADDRESS_LSB);
607d5a7873cSVikas Gupta FS_MMIO_WRITE32(bd_high, (uint8_t *)qp->ioreg +
608d5a7873cSVikas Gupta RING_BD_START_ADDRESS_MSB);
609d5a7873cSVikas Gupta
610d5a7873cSVikas Gupta tx_queue->tx_write_ptr = 0;
611d5a7873cSVikas Gupta
612d5a7873cSVikas Gupta for (off = 0; off < FS_RING_CMPL_SIZE; off += FS_RING_DESC_SIZE)
613d5a7873cSVikas Gupta rm_write_desc((uint8_t *)cmpl_queue->base_addr + off, 0x0);
614d5a7873cSVikas Gupta
615d5a7873cSVikas Gupta /* Completion read pointer will be same as HW write pointer */
616d5a7873cSVikas Gupta cmpl_queue->cmpl_read_ptr = FS_MMIO_READ32((uint8_t *)qp->ioreg +
617d5a7873cSVikas Gupta RING_CMPL_WRITE_PTR);
618d5a7873cSVikas Gupta /* Program completion start address */
619d5a7873cSVikas Gupta cmpl_low = lower_32_bits(cmpl_queue->base_phys_addr);
620d5a7873cSVikas Gupta cmpl_high = upper_32_bits(cmpl_queue->base_phys_addr);
621d5a7873cSVikas Gupta FS_MMIO_WRITE32(cmpl_low, (uint8_t *)qp->ioreg +
622d5a7873cSVikas Gupta RING_CMPL_START_ADDR_LSB);
623d5a7873cSVikas Gupta FS_MMIO_WRITE32(cmpl_high, (uint8_t *)qp->ioreg +
624d5a7873cSVikas Gupta RING_CMPL_START_ADDR_MSB);
625d5a7873cSVikas Gupta
626d5a7873cSVikas Gupta cmpl_queue->cmpl_read_ptr *= FS_RING_DESC_SIZE;
627d5a7873cSVikas Gupta
628d5a7873cSVikas Gupta /* Read ring Tx, Rx, and Outstanding counts to clear */
629d5a7873cSVikas Gupta FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_NUM_REQ_RECV_LS);
630d5a7873cSVikas Gupta FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_NUM_REQ_RECV_MS);
631d5a7873cSVikas Gupta FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_NUM_REQ_TRANS_LS);
632d5a7873cSVikas Gupta FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_NUM_REQ_TRANS_MS);
633d5a7873cSVikas Gupta FS_MMIO_READ32((uint8_t *)qp->ioreg + RING_NUM_REQ_OUTSTAND);
634d5a7873cSVikas Gupta
635d5a7873cSVikas Gupta /* Configure per-Ring MSI registers with dummy location */
636d5a7873cSVikas Gupta msi = cmpl_queue->base_phys_addr + (1024 * FS_RING_DESC_SIZE);
637d5a7873cSVikas Gupta FS_MMIO_WRITE32((msi & 0xFFFFFFFF),
638d5a7873cSVikas Gupta (uint8_t *)qp->ioreg + RING_MSI_ADDR_LS);
639d5a7873cSVikas Gupta FS_MMIO_WRITE32(((msi >> 32) & 0xFFFFFFFF),
640d5a7873cSVikas Gupta (uint8_t *)qp->ioreg + RING_MSI_ADDR_MS);
641d5a7873cSVikas Gupta FS_MMIO_WRITE32(qp->qpair_id, (uint8_t *)qp->ioreg +
642d5a7873cSVikas Gupta RING_MSI_DATA_VALUE);
643d5a7873cSVikas Gupta
644d5a7873cSVikas Gupta /* Configure RING_MSI_CONTROL */
645d5a7873cSVikas Gupta val = 0;
646d5a7873cSVikas Gupta val |= (MSI_TIMER_VAL_MASK << MSI_TIMER_VAL_SHIFT);
647d5a7873cSVikas Gupta val |= BIT(MSI_ENABLE_SHIFT);
648d5a7873cSVikas Gupta val |= (0x1 & MSI_COUNT_MASK) << MSI_COUNT_SHIFT;
649d5a7873cSVikas Gupta FS_MMIO_WRITE32(val, (uint8_t *)qp->ioreg + RING_MSI_CONTROL);
650d5a7873cSVikas Gupta
651d5a7873cSVikas Gupta /* Enable/activate ring */
652d5a7873cSVikas Gupta val = BIT(CONTROL_ACTIVE_SHIFT);
653d5a7873cSVikas Gupta FS_MMIO_WRITE32(val, (uint8_t *)qp->ioreg + RING_CONTROL);
654d5a7873cSVikas Gupta
655d5a7873cSVikas Gupta return 0;
656d5a7873cSVikas Gupta }
657d5a7873cSVikas Gupta
658d5a7873cSVikas Gupta static void
bcmfs5_shutdown_qp(struct bcmfs_qp * qp)659d5a7873cSVikas Gupta bcmfs5_shutdown_qp(struct bcmfs_qp *qp)
660d5a7873cSVikas Gupta {
661d5a7873cSVikas Gupta /* Disable/deactivate ring */
662d5a7873cSVikas Gupta FS_MMIO_WRITE32(0x0, (uint8_t *)qp->ioreg + RING_CONTROL);
663d5a7873cSVikas Gupta }
664d5a7873cSVikas Gupta
665d5a7873cSVikas Gupta struct bcmfs_hw_queue_pair_ops bcmfs5_qp_ops = {
666d5a7873cSVikas Gupta .name = "fs5",
667d5a7873cSVikas Gupta .enq_one_req = bcmfs5_enqueue_single_request_qp,
668d5a7873cSVikas Gupta .ring_db = bcmfs5_write_doorbell,
669d5a7873cSVikas Gupta .dequeue = bcmfs5_dequeue_qp,
670d5a7873cSVikas Gupta .startq = bcmfs5_start_qp,
671d5a7873cSVikas Gupta .stopq = bcmfs5_shutdown_qp,
672d5a7873cSVikas Gupta };
673d5a7873cSVikas Gupta
RTE_INIT(bcmfs5_register_qp_ops)674d5a7873cSVikas Gupta RTE_INIT(bcmfs5_register_qp_ops)
675d5a7873cSVikas Gupta {
676d5a7873cSVikas Gupta bcmfs_hw_queue_pair_register_ops(&bcmfs5_qp_ops);
677d5a7873cSVikas Gupta }
678