xref: /dpdk/drivers/common/cnxk/roc_ml.c (revision dfcf94749ac9a899eb091bb99ac78d57ed7b215b)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2022 Marvell.
3  */
4 
5 #include "roc_api.h"
6 #include "roc_priv.h"
7 
8 #define TIME_SEC_IN_MS 1000
9 
10 static int
roc_ml_reg_wait_to_clear(struct roc_ml * roc_ml,uint64_t offset,uint64_t mask)11 roc_ml_reg_wait_to_clear(struct roc_ml *roc_ml, uint64_t offset, uint64_t mask)
12 {
13 	uint64_t start_cycle;
14 	uint64_t wait_cycles;
15 	uint64_t reg_val;
16 
17 	wait_cycles = (ROC_ML_TIMEOUT_MS * plt_tsc_hz()) / TIME_SEC_IN_MS;
18 	start_cycle = plt_tsc_cycles();
19 	do {
20 		reg_val = roc_ml_reg_read64(roc_ml, offset);
21 
22 		if (!(reg_val & mask))
23 			return 0;
24 	} while (plt_tsc_cycles() - start_cycle < wait_cycles);
25 
26 	return -ETIME;
27 }
28 
29 uint64_t
roc_ml_reg_read64(struct roc_ml * roc_ml,uint64_t offset)30 roc_ml_reg_read64(struct roc_ml *roc_ml, uint64_t offset)
31 {
32 	struct ml *ml = roc_ml_to_ml_priv(roc_ml);
33 
34 	return plt_read64(PLT_PTR_ADD(ml->ml_reg_addr, offset));
35 }
36 
37 void
roc_ml_reg_write64(struct roc_ml * roc_ml,uint64_t val,uint64_t offset)38 roc_ml_reg_write64(struct roc_ml *roc_ml, uint64_t val, uint64_t offset)
39 {
40 	struct ml *ml = roc_ml_to_ml_priv(roc_ml);
41 
42 	plt_write64(val, PLT_PTR_ADD(ml->ml_reg_addr, offset));
43 }
44 
45 uint32_t
roc_ml_reg_read32(struct roc_ml * roc_ml,uint64_t offset)46 roc_ml_reg_read32(struct roc_ml *roc_ml, uint64_t offset)
47 {
48 	struct ml *ml = roc_ml_to_ml_priv(roc_ml);
49 
50 	return plt_read32(PLT_PTR_ADD(ml->ml_reg_addr, offset));
51 }
52 
53 void
roc_ml_reg_write32(struct roc_ml * roc_ml,uint32_t val,uint64_t offset)54 roc_ml_reg_write32(struct roc_ml *roc_ml, uint32_t val, uint64_t offset)
55 {
56 	struct ml *ml = roc_ml_to_ml_priv(roc_ml);
57 
58 	plt_write32(val, PLT_PTR_ADD(ml->ml_reg_addr, offset));
59 }
60 
61 void
roc_ml_reg_save(struct roc_ml * roc_ml,uint64_t offset)62 roc_ml_reg_save(struct roc_ml *roc_ml, uint64_t offset)
63 {
64 	struct ml *ml = roc_ml_to_ml_priv(roc_ml);
65 
66 	if (offset == ML_MLR_BASE) {
67 		ml->ml_mlr_base =
68 			FIELD_GET(ROC_ML_MLR_BASE_BASE, roc_ml_reg_read64(roc_ml, offset));
69 		ml->ml_mlr_base_saved = true;
70 	}
71 }
72 
73 void *
roc_ml_addr_ap2mlip(struct roc_ml * roc_ml,void * addr)74 roc_ml_addr_ap2mlip(struct roc_ml *roc_ml, void *addr)
75 {
76 	struct ml *ml = roc_ml_to_ml_priv(roc_ml);
77 	uint64_t ml_mlr_base;
78 
79 	ml_mlr_base = (ml->ml_mlr_base_saved) ? ml->ml_mlr_base :
80 						FIELD_GET(ROC_ML_MLR_BASE_BASE,
81 							  roc_ml_reg_read64(roc_ml, ML_MLR_BASE));
82 	return PLT_PTR_ADD(addr, ML_AXI_START_ADDR - ml_mlr_base);
83 }
84 
85 void *
roc_ml_addr_mlip2ap(struct roc_ml * roc_ml,void * addr)86 roc_ml_addr_mlip2ap(struct roc_ml *roc_ml, void *addr)
87 {
88 	struct ml *ml = roc_ml_to_ml_priv(roc_ml);
89 	uint64_t ml_mlr_base;
90 
91 	ml_mlr_base = (ml->ml_mlr_base_saved) ? ml->ml_mlr_base :
92 						FIELD_GET(ROC_ML_MLR_BASE_BASE,
93 							  roc_ml_reg_read64(roc_ml, ML_MLR_BASE));
94 	return PLT_PTR_ADD(addr, ml_mlr_base - ML_AXI_START_ADDR);
95 }
96 
97 uint64_t
roc_ml_addr_pa_to_offset(struct roc_ml * roc_ml,uint64_t phys_addr)98 roc_ml_addr_pa_to_offset(struct roc_ml *roc_ml, uint64_t phys_addr)
99 {
100 	struct ml *ml = roc_ml_to_ml_priv(roc_ml);
101 
102 	if (roc_model_is_cn10ka())
103 		return phys_addr - ml->pci_dev->mem_resource[0].phys_addr;
104 	else
105 		return phys_addr - ml->pci_dev->mem_resource[0].phys_addr - ML_MLAB_BLK_OFFSET;
106 }
107 
108 uint64_t
roc_ml_addr_offset_to_pa(struct roc_ml * roc_ml,uint64_t offset)109 roc_ml_addr_offset_to_pa(struct roc_ml *roc_ml, uint64_t offset)
110 {
111 	struct ml *ml = roc_ml_to_ml_priv(roc_ml);
112 
113 	if (roc_model_is_cn10ka())
114 		return ml->pci_dev->mem_resource[0].phys_addr + offset;
115 	else
116 		return ml->pci_dev->mem_resource[0].phys_addr + ML_MLAB_BLK_OFFSET + offset;
117 }
118 
119 void
roc_ml_scratch_write_job(struct roc_ml * roc_ml,void * work_ptr)120 roc_ml_scratch_write_job(struct roc_ml *roc_ml, void *work_ptr)
121 {
122 	union ml_scratch_work_ptr_s reg_work_ptr;
123 	union ml_scratch_fw_ctrl_s reg_fw_ctrl;
124 
125 	reg_work_ptr.u64 = 0;
126 	reg_work_ptr.s.work_ptr = PLT_U64_CAST(roc_ml_addr_ap2mlip(roc_ml, work_ptr));
127 
128 	reg_fw_ctrl.u64 = 0;
129 	reg_fw_ctrl.s.valid = 1;
130 
131 	roc_ml_reg_write64(roc_ml, reg_work_ptr.u64, ML_SCRATCH_WORK_PTR);
132 	roc_ml_reg_write64(roc_ml, reg_fw_ctrl.u64, ML_SCRATCH_FW_CTRL);
133 }
134 
135 bool
roc_ml_scratch_is_valid_bit_set(struct roc_ml * roc_ml)136 roc_ml_scratch_is_valid_bit_set(struct roc_ml *roc_ml)
137 {
138 	union ml_scratch_fw_ctrl_s reg_fw_ctrl;
139 
140 	reg_fw_ctrl.u64 = roc_ml_reg_read64(roc_ml, ML_SCRATCH_FW_CTRL);
141 
142 	if (reg_fw_ctrl.s.valid == 1)
143 		return true;
144 
145 	return false;
146 }
147 
148 bool
roc_ml_scratch_is_done_bit_set(struct roc_ml * roc_ml)149 roc_ml_scratch_is_done_bit_set(struct roc_ml *roc_ml)
150 {
151 	union ml_scratch_fw_ctrl_s reg_fw_ctrl;
152 
153 	reg_fw_ctrl.u64 = roc_ml_reg_read64(roc_ml, ML_SCRATCH_FW_CTRL);
154 
155 	if (reg_fw_ctrl.s.done == 1)
156 		return true;
157 
158 	return false;
159 }
160 
161 bool
roc_ml_scratch_enqueue(struct roc_ml * roc_ml,void * work_ptr)162 roc_ml_scratch_enqueue(struct roc_ml *roc_ml, void *work_ptr)
163 {
164 	union ml_scratch_work_ptr_s reg_work_ptr;
165 	union ml_scratch_fw_ctrl_s reg_fw_ctrl;
166 	bool ret = false;
167 
168 	reg_work_ptr.u64 = 0;
169 	reg_work_ptr.s.work_ptr = PLT_U64_CAST(roc_ml_addr_ap2mlip(roc_ml, work_ptr));
170 
171 	reg_fw_ctrl.u64 = 0;
172 	reg_fw_ctrl.s.valid = 1;
173 
174 	if (plt_spinlock_trylock(&roc_ml->sp_spinlock) != 0) {
175 		bool valid = roc_ml_scratch_is_valid_bit_set(roc_ml);
176 		bool done = roc_ml_scratch_is_done_bit_set(roc_ml);
177 
178 		if (valid == done) {
179 			roc_ml_clk_force_on(roc_ml);
180 			roc_ml_dma_stall_off(roc_ml);
181 
182 			roc_ml_reg_write64(roc_ml, reg_work_ptr.u64, ML_SCRATCH_WORK_PTR);
183 			roc_ml_reg_write64(roc_ml, reg_fw_ctrl.u64, ML_SCRATCH_FW_CTRL);
184 
185 			ret = true;
186 		}
187 		plt_spinlock_unlock(&roc_ml->sp_spinlock);
188 	}
189 
190 	return ret;
191 }
192 
193 bool
roc_ml_scratch_dequeue(struct roc_ml * roc_ml,void * work_ptr)194 roc_ml_scratch_dequeue(struct roc_ml *roc_ml, void *work_ptr)
195 {
196 	union ml_scratch_work_ptr_s reg_work_ptr;
197 	bool ret = false;
198 
199 	if (plt_spinlock_trylock(&roc_ml->sp_spinlock) != 0) {
200 		bool valid = roc_ml_scratch_is_valid_bit_set(roc_ml);
201 		bool done = roc_ml_scratch_is_done_bit_set(roc_ml);
202 
203 		if (valid && done) {
204 			reg_work_ptr.u64 = roc_ml_reg_read64(roc_ml, ML_SCRATCH_WORK_PTR);
205 			if (work_ptr ==
206 			    roc_ml_addr_mlip2ap(roc_ml, PLT_PTR_CAST(reg_work_ptr.u64))) {
207 				roc_ml_dma_stall_on(roc_ml);
208 				roc_ml_clk_force_off(roc_ml);
209 
210 				roc_ml_reg_write64(roc_ml, 0, ML_SCRATCH_WORK_PTR);
211 				roc_ml_reg_write64(roc_ml, 0, ML_SCRATCH_FW_CTRL);
212 				ret = true;
213 			}
214 		}
215 		plt_spinlock_unlock(&roc_ml->sp_spinlock);
216 	}
217 
218 	return ret;
219 }
220 
221 void
roc_ml_scratch_queue_reset(struct roc_ml * roc_ml)222 roc_ml_scratch_queue_reset(struct roc_ml *roc_ml)
223 {
224 	if (plt_spinlock_trylock(&roc_ml->sp_spinlock) != 0) {
225 		roc_ml_dma_stall_on(roc_ml);
226 		roc_ml_clk_force_off(roc_ml);
227 		roc_ml_reg_write64(roc_ml, 0, ML_SCRATCH_WORK_PTR);
228 		roc_ml_reg_write64(roc_ml, 0, ML_SCRATCH_FW_CTRL);
229 		plt_spinlock_unlock(&roc_ml->sp_spinlock);
230 	}
231 }
232 
233 bool
roc_ml_jcmdq_enqueue_lf(struct roc_ml * roc_ml,struct ml_job_cmd_s * job_cmd)234 roc_ml_jcmdq_enqueue_lf(struct roc_ml *roc_ml, struct ml_job_cmd_s *job_cmd)
235 {
236 	bool ret = false;
237 
238 	if (FIELD_GET(ROC_ML_JCMDQ_STATUS_AVAIL_COUNT,
239 		      roc_ml_reg_read64(roc_ml, ML_JCMDQ_STATUS)) != 0) {
240 		roc_ml_reg_write64(roc_ml, job_cmd->w0.u64, ML_JCMDQ_IN(0));
241 		roc_ml_reg_write64(roc_ml, job_cmd->w1.u64, ML_JCMDQ_IN(1));
242 		ret = true;
243 	}
244 
245 	return ret;
246 }
247 
248 bool
roc_ml_jcmdq_enqueue_sl(struct roc_ml * roc_ml,struct ml_job_cmd_s * job_cmd)249 roc_ml_jcmdq_enqueue_sl(struct roc_ml *roc_ml, struct ml_job_cmd_s *job_cmd)
250 {
251 	bool ret = false;
252 
253 	if (plt_spinlock_trylock(&roc_ml->fp_spinlock) != 0) {
254 		if (FIELD_GET(ROC_ML_JCMDQ_STATUS_AVAIL_COUNT,
255 			      roc_ml_reg_read64(roc_ml, ML_JCMDQ_STATUS)) != 0) {
256 			roc_ml_reg_write64(roc_ml, job_cmd->w0.u64, ML_JCMDQ_IN(0));
257 			roc_ml_reg_write64(roc_ml, job_cmd->w1.u64, ML_JCMDQ_IN(1));
258 			ret = true;
259 		}
260 		plt_spinlock_unlock(&roc_ml->fp_spinlock);
261 	}
262 
263 	return ret;
264 }
265 
266 void
roc_ml_clk_force_on(struct roc_ml * roc_ml)267 roc_ml_clk_force_on(struct roc_ml *roc_ml)
268 {
269 	uint64_t reg_val = 0;
270 
271 	reg_val = roc_ml_reg_read64(roc_ml, ML_CFG);
272 	reg_val |= ROC_ML_CFG_MLIP_CLK_FORCE;
273 	roc_ml_reg_write64(roc_ml, reg_val, ML_CFG);
274 }
275 
276 void
roc_ml_clk_force_off(struct roc_ml * roc_ml)277 roc_ml_clk_force_off(struct roc_ml *roc_ml)
278 {
279 	uint64_t reg_val = 0;
280 
281 	roc_ml_reg_write64(roc_ml, 0, ML_SCRATCH_WORK_PTR);
282 
283 	reg_val = roc_ml_reg_read64(roc_ml, ML_CFG);
284 	reg_val &= ~ROC_ML_CFG_MLIP_CLK_FORCE;
285 	roc_ml_reg_write64(roc_ml, reg_val, ML_CFG);
286 }
287 
288 void
roc_ml_dma_stall_on(struct roc_ml * roc_ml)289 roc_ml_dma_stall_on(struct roc_ml *roc_ml)
290 {
291 	uint64_t reg_val = 0;
292 
293 	reg_val = roc_ml_reg_read64(roc_ml, ML_JOB_MGR_CTRL);
294 	reg_val |= ROC_ML_JOB_MGR_CTRL_STALL_ON_IDLE;
295 	roc_ml_reg_write64(roc_ml, reg_val, ML_JOB_MGR_CTRL);
296 }
297 
298 void
roc_ml_dma_stall_off(struct roc_ml * roc_ml)299 roc_ml_dma_stall_off(struct roc_ml *roc_ml)
300 {
301 	uint64_t reg_val = 0;
302 
303 	reg_val = roc_ml_reg_read64(roc_ml, ML_JOB_MGR_CTRL);
304 	reg_val &= ~ROC_ML_JOB_MGR_CTRL_STALL_ON_IDLE;
305 	roc_ml_reg_write64(roc_ml, reg_val, ML_JOB_MGR_CTRL);
306 }
307 
308 bool
roc_ml_mlip_is_enabled(struct roc_ml * roc_ml)309 roc_ml_mlip_is_enabled(struct roc_ml *roc_ml)
310 {
311 	uint64_t reg_val;
312 
313 	reg_val = roc_ml_reg_read64(roc_ml, ML_CFG);
314 
315 	if ((reg_val & ROC_ML_CFG_MLIP_ENA) != 0)
316 		return true;
317 
318 	return false;
319 }
320 
321 int
roc_ml_mlip_reset(struct roc_ml * roc_ml,bool force)322 roc_ml_mlip_reset(struct roc_ml *roc_ml, bool force)
323 {
324 	uint64_t reg_val;
325 
326 	/* Force reset */
327 	if (force) {
328 		/* Set ML(0)_CFG[ENA] = 0. */
329 		reg_val = roc_ml_reg_read64(roc_ml, ML_CFG);
330 		reg_val &= ~ROC_ML_CFG_ENA;
331 		roc_ml_reg_write64(roc_ml, reg_val, ML_CFG);
332 
333 		/* Set ML(0)_CFG[MLIP_ENA] = 0. */
334 		reg_val = roc_ml_reg_read64(roc_ml, ML_CFG);
335 		reg_val &= ~ROC_ML_CFG_MLIP_ENA;
336 		roc_ml_reg_write64(roc_ml, reg_val, ML_CFG);
337 
338 		/* Clear ML_MLR_BASE */
339 		roc_ml_reg_write64(roc_ml, 0, ML_MLR_BASE);
340 	}
341 
342 	if (roc_model_is_cn10ka()) {
343 		/* Wait for all active jobs to finish.
344 		 * ML_CFG[ENA] : When set, MLW will accept job commands. This
345 		 * bit can be cleared at any time. If [BUSY] is set, software
346 		 * must wait until [BUSY] == 0 before setting this bit.
347 		 */
348 		roc_ml_reg_wait_to_clear(roc_ml, ML_CFG, ROC_ML_CFG_BUSY);
349 
350 		/* (1) Set ML(0)_AXI_BRIDGE_CTRL(0..1)[FENCE] = 1 to instruct
351 		 * the AXI bridge not to accept any new transactions from MLIP.
352 		 */
353 		reg_val = roc_ml_reg_read64(roc_ml, ML_AXI_BRIDGE_CTRL(0));
354 		reg_val |= ROC_ML_AXI_BRIDGE_CTRL_FENCE;
355 		roc_ml_reg_write64(roc_ml, reg_val, ML_AXI_BRIDGE_CTRL(0));
356 
357 		reg_val = roc_ml_reg_read64(roc_ml, ML_AXI_BRIDGE_CTRL(1));
358 		reg_val |= ROC_ML_AXI_BRIDGE_CTRL_FENCE;
359 		roc_ml_reg_write64(roc_ml, reg_val, ML_AXI_BRIDGE_CTRL(1));
360 
361 		/* (2) Wait until ML(0)_AXI_BRIDGE_CTRL(0..1)[BUSY] = 0 which
362 		 * indicates that there is no outstanding transactions on
363 		 * AXI-NCB paths.
364 		 */
365 		roc_ml_reg_wait_to_clear(roc_ml, ML_AXI_BRIDGE_CTRL(0),
366 					 ROC_ML_AXI_BRIDGE_CTRL_BUSY);
367 		roc_ml_reg_wait_to_clear(roc_ml, ML_AXI_BRIDGE_CTRL(1),
368 					 ROC_ML_AXI_BRIDGE_CTRL_BUSY);
369 
370 		/* (3) Wait until ML(0)_JOB_MGR_CTRL[BUSY] = 0 which indicates
371 		 * that there are no pending jobs in the MLW's job manager.
372 		 */
373 		roc_ml_reg_wait_to_clear(roc_ml, ML_JOB_MGR_CTRL, ROC_ML_JOB_MGR_CTRL_BUSY);
374 
375 		/* (4) Set ML(0)_CFG[ENA] = 0. */
376 		reg_val = roc_ml_reg_read64(roc_ml, ML_CFG);
377 		reg_val &= ~ROC_ML_CFG_ENA;
378 		roc_ml_reg_write64(roc_ml, reg_val, ML_CFG);
379 
380 		/* (5) Set ML(0)_CFG[MLIP_ENA] = 0. */
381 		reg_val = roc_ml_reg_read64(roc_ml, ML_CFG);
382 		reg_val &= ~ROC_ML_CFG_MLIP_ENA;
383 		roc_ml_reg_write64(roc_ml, reg_val, ML_CFG);
384 
385 		/* (6) Set ML(0)_AXI_BRIDGE_CTRL(0..1)[FENCE] = 0.*/
386 		reg_val = roc_ml_reg_read64(roc_ml, ML_AXI_BRIDGE_CTRL(0));
387 		reg_val &= ~ROC_ML_AXI_BRIDGE_CTRL_FENCE;
388 		roc_ml_reg_write64(roc_ml, reg_val, ML_AXI_BRIDGE_CTRL(0));
389 		roc_ml_reg_write64(roc_ml, reg_val, ML_AXI_BRIDGE_CTRL(1));
390 	}
391 
392 	if (roc_model_is_cnf10kb()) {
393 		/* (1) Clear MLAB(0)_CFG[ENA]. Any new jobs will bypass the job
394 		 * execution stages and their completions will be returned to
395 		 * PSM.
396 		 */
397 		reg_val = roc_ml_reg_read64(roc_ml, ML_CFG);
398 		reg_val &= ~ROC_ML_CFG_ENA;
399 		roc_ml_reg_write64(roc_ml, reg_val, ML_CFG);
400 
401 		/* (2) Quiesce the ACC and DMA AXI interfaces: For each of the
402 		 * two MLAB(0)_AXI_BRIDGE_CTRL(0..1) registers:
403 		 *
404 		 * (a) Set MLAB(0)_AXI_BRIDGE_CTRL(0..1)[FENCE] to block new AXI
405 		 * commands from MLIP.
406 		 *
407 		 * (b) Poll MLAB(0)_AXI_BRIDGE_CTRL(0..1)[BUSY] == 0.
408 		 */
409 		reg_val = roc_ml_reg_read64(roc_ml, ML_AXI_BRIDGE_CTRL(0));
410 		reg_val |= ROC_ML_AXI_BRIDGE_CTRL_FENCE;
411 		roc_ml_reg_write64(roc_ml, reg_val, ML_AXI_BRIDGE_CTRL(0));
412 
413 		roc_ml_reg_wait_to_clear(roc_ml, ML_AXI_BRIDGE_CTRL(0),
414 					 ROC_ML_AXI_BRIDGE_CTRL_BUSY);
415 
416 		reg_val = roc_ml_reg_read64(roc_ml, ML_AXI_BRIDGE_CTRL(1));
417 		reg_val |= ROC_ML_AXI_BRIDGE_CTRL_FENCE;
418 		roc_ml_reg_write64(roc_ml, reg_val, ML_AXI_BRIDGE_CTRL(1));
419 
420 		roc_ml_reg_wait_to_clear(roc_ml, ML_AXI_BRIDGE_CTRL(1),
421 					 ROC_ML_AXI_BRIDGE_CTRL_BUSY);
422 
423 		/* (3) Clear MLAB(0)_CFG[MLIP_ENA] to reset MLIP.
424 		 */
425 		reg_val = roc_ml_reg_read64(roc_ml, ML_CFG);
426 		reg_val &= ~ROC_ML_CFG_MLIP_ENA;
427 		roc_ml_reg_write64(roc_ml, reg_val, ML_CFG);
428 
429 cnf10kb_mlip_reset_stage_4a:
430 		/* (4) Flush any outstanding jobs in MLAB's job execution
431 		 * stages:
432 		 *
433 		 * (a) Wait for completion stage to clear:
434 		 *   - Poll MLAB(0)_STG(0..2)_STATUS[VALID] == 0.
435 		 */
436 		roc_ml_reg_wait_to_clear(roc_ml, ML_STGX_STATUS(0), ROC_ML_STG_STATUS_VALID);
437 		roc_ml_reg_wait_to_clear(roc_ml, ML_STGX_STATUS(1), ROC_ML_STG_STATUS_VALID);
438 		roc_ml_reg_wait_to_clear(roc_ml, ML_STGX_STATUS(2), ROC_ML_STG_STATUS_VALID);
439 
440 cnf10kb_mlip_reset_stage_4b:
441 		/* (4b) Clear job run stage: Poll
442 		 * MLAB(0)_STG_CONTROL[RUN_TO_COMP] == 0.
443 		 */
444 		roc_ml_reg_wait_to_clear(roc_ml, ML_STG_CONTROL, ROC_ML_STG_CONTROL_RUN_TO_COMP);
445 
446 		/* (4b) Clear job run stage: If MLAB(0)_STG(1)_STATUS[VALID] ==
447 		 * 1:
448 		 *     - Set MLAB(0)_STG_CONTROL[RUN_TO_COMP].
449 		 *     - Poll MLAB(0)_STG_CONTROL[RUN_TO_COMP] == 0.
450 		 *     - Repeat step (a) to clear job completion stage.
451 		 */
452 		reg_val = roc_ml_reg_read64(roc_ml, ML_STGX_STATUS(1));
453 		if (reg_val & ROC_ML_STG_STATUS_VALID) {
454 			reg_val = roc_ml_reg_read64(roc_ml, ML_STG_CONTROL);
455 			reg_val |= ROC_ML_STG_CONTROL_RUN_TO_COMP;
456 			roc_ml_reg_write64(roc_ml, reg_val, ML_STG_CONTROL);
457 
458 			roc_ml_reg_wait_to_clear(roc_ml, ML_STG_CONTROL,
459 						 ROC_ML_STG_CONTROL_RUN_TO_COMP);
460 
461 			goto cnf10kb_mlip_reset_stage_4a;
462 		}
463 
464 		/* (4c) Clear job fetch stage: Poll
465 		 * MLAB(0)_STG_CONTROL[FETCH_TO_RUN] == 0.
466 		 */
467 		roc_ml_reg_wait_to_clear(roc_ml, ML_STG_CONTROL, ROC_ML_STG_CONTROL_FETCH_TO_RUN);
468 
469 		/* (4c) Clear job fetch stage: If
470 		 * MLAB(0)_STG(0..2)_STATUS[VALID] == 1:
471 		 *     - Set MLAB(0)_STG_CONTROL[FETCH_TO_RUN].
472 		 *     - Poll MLAB(0)_STG_CONTROL[FETCH_TO_RUN] == 0.
473 		 *     - Repeat step (b) to clear job run and completion stages.
474 		 */
475 		reg_val = (roc_ml_reg_read64(roc_ml, ML_STGX_STATUS(0)) |
476 			   roc_ml_reg_read64(roc_ml, ML_STGX_STATUS(1)) |
477 			   roc_ml_reg_read64(roc_ml, ML_STGX_STATUS(2)));
478 
479 		if (reg_val & ROC_ML_STG_STATUS_VALID) {
480 			reg_val = roc_ml_reg_read64(roc_ml, ML_STG_CONTROL);
481 			reg_val |= ROC_ML_STG_CONTROL_RUN_TO_COMP;
482 			roc_ml_reg_write64(roc_ml, reg_val, ML_STG_CONTROL);
483 
484 			roc_ml_reg_wait_to_clear(roc_ml, ML_STG_CONTROL,
485 						 ROC_ML_STG_CONTROL_RUN_TO_COMP);
486 
487 			goto cnf10kb_mlip_reset_stage_4b;
488 		}
489 
490 		/* (5) Reset the ACC and DMA AXI interfaces: For each of the two
491 		 * MLAB(0)_AXI_BRIDGE_CTRL(0..1) registers:
492 		 *
493 		 * (5a) Set and then clear
494 		 * MLAB(0)_AXI_BRIDGE_CTRL(0..1)[FLUSH_WRITE_DATA].
495 		 *
496 		 * (5b) Clear MLAB(0)_AXI_BRIDGE_CTRL(0..1)[FENCE].
497 		 */
498 		reg_val = roc_ml_reg_read64(roc_ml, ML_AXI_BRIDGE_CTRL(0));
499 		reg_val |= ROC_ML_AXI_BRIDGE_CTRL_FLUSH_WRITE_DATA;
500 		roc_ml_reg_write64(roc_ml, reg_val, ML_AXI_BRIDGE_CTRL(0));
501 
502 		reg_val = roc_ml_reg_read64(roc_ml, ML_AXI_BRIDGE_CTRL(0));
503 		reg_val &= ~ROC_ML_AXI_BRIDGE_CTRL_FLUSH_WRITE_DATA;
504 		roc_ml_reg_write64(roc_ml, reg_val, ML_AXI_BRIDGE_CTRL(0));
505 
506 		reg_val = roc_ml_reg_read64(roc_ml, ML_AXI_BRIDGE_CTRL(0));
507 		reg_val &= ~ROC_ML_AXI_BRIDGE_CTRL_FENCE;
508 		roc_ml_reg_write64(roc_ml, reg_val, ML_AXI_BRIDGE_CTRL(0));
509 
510 		reg_val = roc_ml_reg_read64(roc_ml, ML_AXI_BRIDGE_CTRL(1));
511 		reg_val |= ROC_ML_AXI_BRIDGE_CTRL_FLUSH_WRITE_DATA;
512 		roc_ml_reg_write64(roc_ml, reg_val, ML_AXI_BRIDGE_CTRL(1));
513 
514 		reg_val = roc_ml_reg_read64(roc_ml, ML_AXI_BRIDGE_CTRL(1));
515 		reg_val &= ~ROC_ML_AXI_BRIDGE_CTRL_FLUSH_WRITE_DATA;
516 		roc_ml_reg_write64(roc_ml, reg_val, ML_AXI_BRIDGE_CTRL(1));
517 
518 		reg_val = roc_ml_reg_read64(roc_ml, ML_AXI_BRIDGE_CTRL(1));
519 		reg_val &= ~ROC_ML_AXI_BRIDGE_CTRL_FENCE;
520 		roc_ml_reg_write64(roc_ml, reg_val, ML_AXI_BRIDGE_CTRL(1));
521 	}
522 
523 	return 0;
524 }
525 
526 int
roc_ml_dev_init(struct roc_ml * roc_ml)527 roc_ml_dev_init(struct roc_ml *roc_ml)
528 {
529 	struct plt_pci_device *pci_dev;
530 	struct dev *dev;
531 	struct ml *ml;
532 
533 	if (roc_ml == NULL || roc_ml->pci_dev == NULL)
534 		return -EINVAL;
535 
536 	PLT_STATIC_ASSERT(sizeof(struct ml) <= ROC_ML_MEM_SZ);
537 
538 	ml = roc_ml_to_ml_priv(roc_ml);
539 	memset(ml, 0, sizeof(*ml));
540 	pci_dev = roc_ml->pci_dev;
541 	dev = &ml->dev;
542 
543 	ml->pci_dev = pci_dev;
544 	dev->roc_ml = roc_ml;
545 
546 	ml->ml_reg_addr = ml->pci_dev->mem_resource[0].addr;
547 	ml->ml_mlr_base = 0;
548 	ml->ml_mlr_base_saved = false;
549 
550 	plt_ml_dbg("ML: PCI Physical Address : 0x%016lx", ml->pci_dev->mem_resource[0].phys_addr);
551 	plt_ml_dbg("ML: PCI Virtual Address : 0x%016lx",
552 		   PLT_U64_CAST(ml->pci_dev->mem_resource[0].addr));
553 
554 	plt_spinlock_init(&roc_ml->sp_spinlock);
555 	plt_spinlock_init(&roc_ml->fp_spinlock);
556 
557 	return 0;
558 }
559 
560 int
roc_ml_dev_fini(struct roc_ml * roc_ml)561 roc_ml_dev_fini(struct roc_ml *roc_ml)
562 {
563 	struct ml *ml = roc_ml_to_ml_priv(roc_ml);
564 
565 	if (ml == NULL)
566 		return -EINVAL;
567 
568 	return 0;
569 }
570 
571 int
roc_ml_blk_init(struct roc_bphy * roc_bphy,struct roc_ml * roc_ml)572 roc_ml_blk_init(struct roc_bphy *roc_bphy, struct roc_ml *roc_ml)
573 {
574 	struct dev *dev;
575 	struct ml *ml;
576 
577 	if ((roc_ml == NULL) || (roc_bphy == NULL))
578 		return -EINVAL;
579 
580 	PLT_STATIC_ASSERT(sizeof(struct ml) <= ROC_ML_MEM_SZ);
581 
582 	ml = roc_ml_to_ml_priv(roc_ml);
583 	memset(ml, 0, sizeof(*ml));
584 
585 	dev = &ml->dev;
586 
587 	ml->pci_dev = roc_bphy->pci_dev;
588 	dev->roc_ml = roc_ml;
589 
590 	plt_ml_dbg(
591 		"MLAB: Physical Address : 0x%016lx",
592 		PLT_PTR_ADD_U64_CAST(ml->pci_dev->mem_resource[0].phys_addr, ML_MLAB_BLK_OFFSET));
593 	plt_ml_dbg("MLAB: Virtual Address : 0x%016lx",
594 		   PLT_PTR_ADD_U64_CAST(ml->pci_dev->mem_resource[0].addr, ML_MLAB_BLK_OFFSET));
595 
596 	ml->ml_reg_addr = PLT_PTR_ADD(ml->pci_dev->mem_resource[0].addr, ML_MLAB_BLK_OFFSET);
597 	ml->ml_mlr_base = 0;
598 	ml->ml_mlr_base_saved = false;
599 
600 	plt_spinlock_init(&roc_ml->sp_spinlock);
601 	plt_spinlock_init(&roc_ml->fp_spinlock);
602 
603 	return 0;
604 }
605 
606 int
roc_ml_blk_fini(struct roc_bphy * roc_bphy,struct roc_ml * roc_ml)607 roc_ml_blk_fini(struct roc_bphy *roc_bphy, struct roc_ml *roc_ml)
608 {
609 	struct ml *ml;
610 
611 	if ((roc_ml == NULL) || (roc_bphy == NULL))
612 		return -EINVAL;
613 
614 	ml = roc_ml_to_ml_priv(roc_ml);
615 
616 	if (ml == NULL)
617 		return -EINVAL;
618 
619 	return 0;
620 }
621 
622 uint16_t
roc_ml_sso_pf_func_get(void)623 roc_ml_sso_pf_func_get(void)
624 {
625 	return idev_sso_pffunc_get();
626 }
627