1 /* $NetBSD: amdgpu_dc_helper.c,v 1.4 2021/12/19 10:59:01 riastradh Exp $ */
2
3 /*
4 * Copyright 2017 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25 /*
26 * dc_helper.c
27 *
28 * Created on: Aug 30, 2016
29 * Author: agrodzov
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: amdgpu_dc_helper.c,v 1.4 2021/12/19 10:59:01 riastradh Exp $");
34
35 #include <linux/delay.h>
36
37 #include "dm_services.h"
38 #include <sys/stdarg.h>
39
40 #include "dc.h"
41 #include "dc_dmub_srv.h"
42 #include "inc/reg_helper.h"
43
submit_dmub_read_modify_write(struct dc_reg_helper_state * offload,const struct dc_context * ctx)44 static inline void submit_dmub_read_modify_write(
45 struct dc_reg_helper_state *offload,
46 const struct dc_context *ctx)
47 {
48 struct dmub_rb_cmd_read_modify_write *cmd_buf = &offload->cmd_data.read_modify_write;
49 bool gather = false;
50
51 offload->should_burst_write =
52 (offload->same_addr_count == (DMUB_READ_MODIFY_WRITE_SEQ__MAX - 1));
53 cmd_buf->header.payload_bytes =
54 sizeof(struct dmub_cmd_read_modify_write_sequence) * offload->reg_seq_count;
55
56 gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress;
57 ctx->dmub_srv->reg_helper_offload.gather_in_progress = false;
58
59 dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd_buf->header);
60
61 ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather;
62
63 memset(cmd_buf, 0, sizeof(*cmd_buf));
64
65 offload->reg_seq_count = 0;
66 offload->same_addr_count = 0;
67 }
68
submit_dmub_burst_write(struct dc_reg_helper_state * offload,const struct dc_context * ctx)69 static inline void submit_dmub_burst_write(
70 struct dc_reg_helper_state *offload,
71 const struct dc_context *ctx)
72 {
73 struct dmub_rb_cmd_burst_write *cmd_buf = &offload->cmd_data.burst_write;
74 bool gather = false;
75
76 cmd_buf->header.payload_bytes =
77 sizeof(uint32_t) * offload->reg_seq_count;
78
79 gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress;
80 ctx->dmub_srv->reg_helper_offload.gather_in_progress = false;
81
82 dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd_buf->header);
83
84 ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather;
85
86 memset(cmd_buf, 0, sizeof(*cmd_buf));
87
88 offload->reg_seq_count = 0;
89 }
90
submit_dmub_reg_wait(struct dc_reg_helper_state * offload,const struct dc_context * ctx)91 static inline void submit_dmub_reg_wait(
92 struct dc_reg_helper_state *offload,
93 const struct dc_context *ctx)
94 {
95 struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait;
96 bool gather = false;
97
98 gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress;
99 ctx->dmub_srv->reg_helper_offload.gather_in_progress = false;
100
101 dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd_buf->header);
102
103 memset(cmd_buf, 0, sizeof(*cmd_buf));
104 offload->reg_seq_count = 0;
105
106 ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather;
107 }
108
109 struct dc_reg_value_masks {
110 uint32_t value;
111 uint32_t mask;
112 };
113
114 struct dc_reg_sequence {
115 uint32_t addr;
116 struct dc_reg_value_masks value_masks;
117 };
118
set_reg_field_value_masks(struct dc_reg_value_masks * field_value_mask,uint32_t value,uint32_t mask,uint8_t shift)119 static inline void set_reg_field_value_masks(
120 struct dc_reg_value_masks *field_value_mask,
121 uint32_t value,
122 uint32_t mask,
123 uint8_t shift)
124 {
125 ASSERT(mask != 0);
126
127 field_value_mask->value = (field_value_mask->value & ~mask) | (mask & (value << shift));
128 field_value_mask->mask = field_value_mask->mask | mask;
129 }
130
set_reg_field_values(struct dc_reg_value_masks * field_value_mask,uint32_t addr,int n,uint8_t shift1,uint32_t mask1,uint32_t field_value1,va_list ap)131 static void set_reg_field_values(struct dc_reg_value_masks *field_value_mask,
132 uint32_t addr, int n,
133 uint8_t shift1, uint32_t mask1, uint32_t field_value1,
134 va_list ap)
135 {
136 uint32_t shift, mask, field_value;
137 int i = 1;
138
139 /* gather all bits value/mask getting updated in this register */
140 set_reg_field_value_masks(field_value_mask,
141 field_value1, mask1, shift1);
142
143 while (i < n) {
144 shift = va_arg(ap, uint32_t);
145 mask = va_arg(ap, uint32_t);
146 field_value = va_arg(ap, uint32_t);
147
148 set_reg_field_value_masks(field_value_mask,
149 field_value, mask, shift);
150 i++;
151 }
152 }
153
dmub_flush_buffer_execute(struct dc_reg_helper_state * offload,const struct dc_context * ctx)154 static void dmub_flush_buffer_execute(
155 struct dc_reg_helper_state *offload,
156 const struct dc_context *ctx)
157 {
158 submit_dmub_read_modify_write(offload, ctx);
159 dc_dmub_srv_cmd_execute(ctx->dmub_srv);
160 }
161
dmub_flush_burst_write_buffer_execute(struct dc_reg_helper_state * offload,const struct dc_context * ctx)162 static void dmub_flush_burst_write_buffer_execute(
163 struct dc_reg_helper_state *offload,
164 const struct dc_context *ctx)
165 {
166 submit_dmub_burst_write(offload, ctx);
167 dc_dmub_srv_cmd_execute(ctx->dmub_srv);
168 }
169
dmub_reg_value_burst_set_pack(const struct dc_context * ctx,uint32_t addr,uint32_t reg_val)170 static bool dmub_reg_value_burst_set_pack(const struct dc_context *ctx, uint32_t addr,
171 uint32_t reg_val)
172 {
173 struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload;
174 struct dmub_rb_cmd_burst_write *cmd_buf = &offload->cmd_data.burst_write;
175
176 /* flush command if buffer is full */
177 if (offload->reg_seq_count == DMUB_BURST_WRITE_VALUES__MAX)
178 dmub_flush_burst_write_buffer_execute(offload, ctx);
179
180 if (offload->cmd_data.cmd_common.header.type == DMUB_CMD__REG_SEQ_BURST_WRITE &&
181 addr != cmd_buf->addr) {
182 dmub_flush_burst_write_buffer_execute(offload, ctx);
183 return false;
184 }
185
186 cmd_buf->header.type = DMUB_CMD__REG_SEQ_BURST_WRITE;
187 cmd_buf->header.sub_type = 0;
188 cmd_buf->addr = addr;
189 cmd_buf->write_values[offload->reg_seq_count] = reg_val;
190 offload->reg_seq_count++;
191
192 return true;
193 }
194
dmub_reg_value_pack(const struct dc_context * ctx,uint32_t addr,struct dc_reg_value_masks * field_value_mask)195 static uint32_t dmub_reg_value_pack(const struct dc_context *ctx, uint32_t addr,
196 struct dc_reg_value_masks *field_value_mask)
197 {
198 struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload;
199 struct dmub_rb_cmd_read_modify_write *cmd_buf = &offload->cmd_data.read_modify_write;
200 struct dmub_cmd_read_modify_write_sequence *seq;
201
202 /* flush command if buffer is full */
203 if (offload->cmd_data.cmd_common.header.type != DMUB_CMD__REG_SEQ_BURST_WRITE &&
204 offload->reg_seq_count == DMUB_READ_MODIFY_WRITE_SEQ__MAX)
205 dmub_flush_buffer_execute(offload, ctx);
206
207 if (offload->should_burst_write) {
208 if (dmub_reg_value_burst_set_pack(ctx, addr, field_value_mask->value))
209 return field_value_mask->value;
210 else
211 offload->should_burst_write = false;
212 }
213
214 /* pack commands */
215 cmd_buf->header.type = DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE;
216 cmd_buf->header.sub_type = 0;
217 seq = &cmd_buf->seq[offload->reg_seq_count];
218
219 if (offload->reg_seq_count) {
220 if (cmd_buf->seq[offload->reg_seq_count - 1].addr == addr)
221 offload->same_addr_count++;
222 else
223 offload->same_addr_count = 0;
224 }
225
226 seq->addr = addr;
227 seq->modify_mask = field_value_mask->mask;
228 seq->modify_value = field_value_mask->value;
229 offload->reg_seq_count++;
230
231 return field_value_mask->value;
232 }
233
dmub_reg_wait_done_pack(const struct dc_context * ctx,uint32_t addr,uint32_t mask,uint32_t shift,uint32_t condition_value,uint32_t time_out_us)234 static void dmub_reg_wait_done_pack(const struct dc_context *ctx, uint32_t addr,
235 uint32_t mask, uint32_t shift, uint32_t condition_value, uint32_t time_out_us)
236 {
237 struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload;
238 struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait;
239
240 cmd_buf->header.type = DMUB_CMD__REG_REG_WAIT;
241 cmd_buf->header.sub_type = 0;
242 cmd_buf->reg_wait.addr = addr;
243 cmd_buf->reg_wait.condition_field_value = mask & (condition_value << shift);
244 cmd_buf->reg_wait.mask = mask;
245 cmd_buf->reg_wait.time_out_us = time_out_us;
246 }
247
generic_reg_update_ex(const struct dc_context * ctx,uint32_t addr,int n,uint8_t shift1,uint32_t mask1,uint32_t field_value1,...)248 uint32_t generic_reg_update_ex(const struct dc_context *ctx,
249 uint32_t addr, int n,
250 uint8_t shift1, uint32_t mask1, uint32_t field_value1,
251 ...)
252 {
253 struct dc_reg_value_masks field_value_mask = {0};
254 uint32_t reg_val;
255 va_list ap;
256
257 va_start(ap, field_value1);
258
259 set_reg_field_values(&field_value_mask, addr, n, shift1, mask1,
260 field_value1, ap);
261
262 va_end(ap);
263
264 if (ctx->dmub_srv &&
265 ctx->dmub_srv->reg_helper_offload.gather_in_progress)
266 return dmub_reg_value_pack(ctx, addr, &field_value_mask);
267 /* todo: return void so we can decouple code running in driver from register states */
268
269 /* mmio write directly */
270 reg_val = dm_read_reg(ctx, addr);
271 reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value;
272 dm_write_reg(ctx, addr, reg_val);
273 return reg_val;
274 }
275
generic_reg_set_ex(const struct dc_context * ctx,uint32_t addr,uint32_t reg_val,int n,uint8_t shift1,uint32_t mask1,uint32_t field_value1,...)276 uint32_t generic_reg_set_ex(const struct dc_context *ctx,
277 uint32_t addr, uint32_t reg_val, int n,
278 uint8_t shift1, uint32_t mask1, uint32_t field_value1,
279 ...)
280 {
281 struct dc_reg_value_masks field_value_mask = {0};
282 va_list ap;
283
284 va_start(ap, field_value1);
285
286 set_reg_field_values(&field_value_mask, addr, n, shift1, mask1,
287 field_value1, ap);
288
289 va_end(ap);
290
291
292 /* mmio write directly */
293 reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value;
294
295 if (ctx->dmub_srv &&
296 ctx->dmub_srv->reg_helper_offload.gather_in_progress) {
297 return dmub_reg_value_burst_set_pack(ctx, addr, reg_val);
298 /* todo: return void so we can decouple code running in driver from register states */
299 }
300
301 dm_write_reg(ctx, addr, reg_val);
302 return reg_val;
303 }
304
dm_read_reg_func(const struct dc_context * ctx,uint32_t address,const char * func_name)305 uint32_t dm_read_reg_func(
306 const struct dc_context *ctx,
307 uint32_t address,
308 const char *func_name)
309 {
310 uint32_t value;
311 #ifdef DM_CHECK_ADDR_0
312 if (address == 0) {
313 DC_ERR("invalid register read; address = 0\n");
314 return 0;
315 }
316 #endif
317
318 if (ctx->dmub_srv &&
319 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
320 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
321 ASSERT(false);
322 return 0;
323 }
324
325 value = cgs_read_register(ctx->cgs_device, address);
326 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
327
328 return value;
329 }
330
generic_reg_get(const struct dc_context * ctx,uint32_t addr,uint8_t shift,uint32_t mask,uint32_t * field_value)331 uint32_t generic_reg_get(const struct dc_context *ctx, uint32_t addr,
332 uint8_t shift, uint32_t mask, uint32_t *field_value)
333 {
334 uint32_t reg_val = dm_read_reg(ctx, addr);
335 *field_value = get_reg_field_value_ex(reg_val, mask, shift);
336 return reg_val;
337 }
338
generic_reg_get2(const struct dc_context * ctx,uint32_t addr,uint8_t shift1,uint32_t mask1,uint32_t * field_value1,uint8_t shift2,uint32_t mask2,uint32_t * field_value2)339 uint32_t generic_reg_get2(const struct dc_context *ctx, uint32_t addr,
340 uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
341 uint8_t shift2, uint32_t mask2, uint32_t *field_value2)
342 {
343 uint32_t reg_val = dm_read_reg(ctx, addr);
344 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
345 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
346 return reg_val;
347 }
348
generic_reg_get3(const struct dc_context * ctx,uint32_t addr,uint8_t shift1,uint32_t mask1,uint32_t * field_value1,uint8_t shift2,uint32_t mask2,uint32_t * field_value2,uint8_t shift3,uint32_t mask3,uint32_t * field_value3)349 uint32_t generic_reg_get3(const struct dc_context *ctx, uint32_t addr,
350 uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
351 uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
352 uint8_t shift3, uint32_t mask3, uint32_t *field_value3)
353 {
354 uint32_t reg_val = dm_read_reg(ctx, addr);
355 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
356 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
357 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
358 return reg_val;
359 }
360
generic_reg_get4(const struct dc_context * ctx,uint32_t addr,uint8_t shift1,uint32_t mask1,uint32_t * field_value1,uint8_t shift2,uint32_t mask2,uint32_t * field_value2,uint8_t shift3,uint32_t mask3,uint32_t * field_value3,uint8_t shift4,uint32_t mask4,uint32_t * field_value4)361 uint32_t generic_reg_get4(const struct dc_context *ctx, uint32_t addr,
362 uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
363 uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
364 uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
365 uint8_t shift4, uint32_t mask4, uint32_t *field_value4)
366 {
367 uint32_t reg_val = dm_read_reg(ctx, addr);
368 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
369 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
370 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
371 *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4);
372 return reg_val;
373 }
374
generic_reg_get5(const struct dc_context * ctx,uint32_t addr,uint8_t shift1,uint32_t mask1,uint32_t * field_value1,uint8_t shift2,uint32_t mask2,uint32_t * field_value2,uint8_t shift3,uint32_t mask3,uint32_t * field_value3,uint8_t shift4,uint32_t mask4,uint32_t * field_value4,uint8_t shift5,uint32_t mask5,uint32_t * field_value5)375 uint32_t generic_reg_get5(const struct dc_context *ctx, uint32_t addr,
376 uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
377 uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
378 uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
379 uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
380 uint8_t shift5, uint32_t mask5, uint32_t *field_value5)
381 {
382 uint32_t reg_val = dm_read_reg(ctx, addr);
383 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
384 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
385 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
386 *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4);
387 *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5);
388 return reg_val;
389 }
390
generic_reg_get6(const struct dc_context * ctx,uint32_t addr,uint8_t shift1,uint32_t mask1,uint32_t * field_value1,uint8_t shift2,uint32_t mask2,uint32_t * field_value2,uint8_t shift3,uint32_t mask3,uint32_t * field_value3,uint8_t shift4,uint32_t mask4,uint32_t * field_value4,uint8_t shift5,uint32_t mask5,uint32_t * field_value5,uint8_t shift6,uint32_t mask6,uint32_t * field_value6)391 uint32_t generic_reg_get6(const struct dc_context *ctx, uint32_t addr,
392 uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
393 uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
394 uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
395 uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
396 uint8_t shift5, uint32_t mask5, uint32_t *field_value5,
397 uint8_t shift6, uint32_t mask6, uint32_t *field_value6)
398 {
399 uint32_t reg_val = dm_read_reg(ctx, addr);
400 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
401 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
402 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
403 *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4);
404 *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5);
405 *field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6);
406 return reg_val;
407 }
408
generic_reg_get7(const struct dc_context * ctx,uint32_t addr,uint8_t shift1,uint32_t mask1,uint32_t * field_value1,uint8_t shift2,uint32_t mask2,uint32_t * field_value2,uint8_t shift3,uint32_t mask3,uint32_t * field_value3,uint8_t shift4,uint32_t mask4,uint32_t * field_value4,uint8_t shift5,uint32_t mask5,uint32_t * field_value5,uint8_t shift6,uint32_t mask6,uint32_t * field_value6,uint8_t shift7,uint32_t mask7,uint32_t * field_value7)409 uint32_t generic_reg_get7(const struct dc_context *ctx, uint32_t addr,
410 uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
411 uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
412 uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
413 uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
414 uint8_t shift5, uint32_t mask5, uint32_t *field_value5,
415 uint8_t shift6, uint32_t mask6, uint32_t *field_value6,
416 uint8_t shift7, uint32_t mask7, uint32_t *field_value7)
417 {
418 uint32_t reg_val = dm_read_reg(ctx, addr);
419 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
420 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
421 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
422 *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4);
423 *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5);
424 *field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6);
425 *field_value7 = get_reg_field_value_ex(reg_val, mask7, shift7);
426 return reg_val;
427 }
428
generic_reg_get8(const struct dc_context * ctx,uint32_t addr,uint8_t shift1,uint32_t mask1,uint32_t * field_value1,uint8_t shift2,uint32_t mask2,uint32_t * field_value2,uint8_t shift3,uint32_t mask3,uint32_t * field_value3,uint8_t shift4,uint32_t mask4,uint32_t * field_value4,uint8_t shift5,uint32_t mask5,uint32_t * field_value5,uint8_t shift6,uint32_t mask6,uint32_t * field_value6,uint8_t shift7,uint32_t mask7,uint32_t * field_value7,uint8_t shift8,uint32_t mask8,uint32_t * field_value8)429 uint32_t generic_reg_get8(const struct dc_context *ctx, uint32_t addr,
430 uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
431 uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
432 uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
433 uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
434 uint8_t shift5, uint32_t mask5, uint32_t *field_value5,
435 uint8_t shift6, uint32_t mask6, uint32_t *field_value6,
436 uint8_t shift7, uint32_t mask7, uint32_t *field_value7,
437 uint8_t shift8, uint32_t mask8, uint32_t *field_value8)
438 {
439 uint32_t reg_val = dm_read_reg(ctx, addr);
440 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
441 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
442 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
443 *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4);
444 *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5);
445 *field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6);
446 *field_value7 = get_reg_field_value_ex(reg_val, mask7, shift7);
447 *field_value8 = get_reg_field_value_ex(reg_val, mask8, shift8);
448 return reg_val;
449 }
450 /* note: va version of this is pretty bad idea, since there is a output parameter pass by pointer
451 * compiler won't be able to check for size match and is prone to stack corruption type of bugs
452
453 uint32_t generic_reg_get(const struct dc_context *ctx,
454 uint32_t addr, int n, ...)
455 {
456 uint32_t shift, mask;
457 uint32_t *field_value;
458 uint32_t reg_val;
459 int i = 0;
460
461 reg_val = dm_read_reg(ctx, addr);
462
463 va_list ap;
464 va_start(ap, n);
465
466 while (i < n) {
467 shift = va_arg(ap, uint32_t);
468 mask = va_arg(ap, uint32_t);
469 field_value = va_arg(ap, uint32_t *);
470
471 *field_value = get_reg_field_value_ex(reg_val, mask, shift);
472 i++;
473 }
474
475 va_end(ap);
476
477 return reg_val;
478 }
479 */
480
generic_reg_wait(const struct dc_context * ctx,uint32_t addr,uint32_t shift,uint32_t mask,uint32_t condition_value,unsigned int delay_between_poll_us,unsigned int time_out_num_tries,const char * func_name,int line)481 void generic_reg_wait(const struct dc_context *ctx,
482 uint32_t addr, uint32_t shift, uint32_t mask, uint32_t condition_value,
483 unsigned int delay_between_poll_us, unsigned int time_out_num_tries,
484 const char *func_name, int line)
485 {
486 uint32_t field_value;
487 uint32_t reg_val;
488 int i;
489
490 if (ctx->dmub_srv &&
491 ctx->dmub_srv->reg_helper_offload.gather_in_progress) {
492 dmub_reg_wait_done_pack(ctx, addr, mask, shift, condition_value,
493 delay_between_poll_us * time_out_num_tries);
494 return;
495 }
496
497 /*
498 * Something is terribly wrong if time out is > 3000ms.
499 * 3000ms is the maximum time needed for SMU to pass values back.
500 * This value comes from experiments.
501 *
502 */
503 ASSERT(delay_between_poll_us * time_out_num_tries <= 3000000);
504
505 for (i = 0; i <= time_out_num_tries; i++) {
506 if (i) {
507 if (delay_between_poll_us >= 1000)
508 msleep(delay_between_poll_us/1000);
509 else if (delay_between_poll_us > 0)
510 udelay(delay_between_poll_us);
511 }
512
513 reg_val = dm_read_reg(ctx, addr);
514
515 field_value = get_reg_field_value_ex(reg_val, mask, shift);
516
517 if (field_value == condition_value) {
518 if (i * delay_between_poll_us > 1000 &&
519 !IS_FPGA_MAXIMUS_DC(ctx->dce_environment))
520 DC_LOG_DC("REG_WAIT taking a while: %dms in %s line:%d\n",
521 delay_between_poll_us * i / 1000,
522 func_name, line);
523 return;
524 }
525 }
526
527 DC_LOG_WARNING("REG_WAIT timeout %dus * %d tries - %s line:%d\n",
528 delay_between_poll_us, time_out_num_tries,
529 func_name, line);
530
531 if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment))
532 BREAK_TO_DEBUGGER();
533 }
534
generic_write_indirect_reg(const struct dc_context * ctx,uint32_t addr_index,uint32_t addr_data,uint32_t index,uint32_t data)535 void generic_write_indirect_reg(const struct dc_context *ctx,
536 uint32_t addr_index, uint32_t addr_data,
537 uint32_t index, uint32_t data)
538 {
539 dm_write_reg(ctx, addr_index, index);
540 dm_write_reg(ctx, addr_data, data);
541 }
542
generic_read_indirect_reg(const struct dc_context * ctx,uint32_t addr_index,uint32_t addr_data,uint32_t index)543 uint32_t generic_read_indirect_reg(const struct dc_context *ctx,
544 uint32_t addr_index, uint32_t addr_data,
545 uint32_t index)
546 {
547 uint32_t value = 0;
548
549 // when reg read, there should not be any offload.
550 if (ctx->dmub_srv &&
551 ctx->dmub_srv->reg_helper_offload.gather_in_progress) {
552 ASSERT(false);
553 }
554
555 dm_write_reg(ctx, addr_index, index);
556 value = dm_read_reg(ctx, addr_data);
557
558 return value;
559 }
560
generic_indirect_reg_get(const struct dc_context * ctx,uint32_t addr_index,uint32_t addr_data,uint32_t index,int n,uint8_t shift1,uint32_t mask1,uint32_t * field_value1,...)561 uint32_t generic_indirect_reg_get(const struct dc_context *ctx,
562 uint32_t addr_index, uint32_t addr_data,
563 uint32_t index, int n,
564 uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
565 ...)
566 {
567 uint32_t shift, mask, *field_value;
568 uint32_t value = 0;
569 int i = 1;
570
571 va_list ap;
572
573 va_start(ap, field_value1);
574
575 value = generic_read_indirect_reg(ctx, addr_index, addr_data, index);
576 *field_value1 = get_reg_field_value_ex(value, mask1, shift1);
577
578 while (i < n) {
579 shift = va_arg(ap, uint32_t);
580 mask = va_arg(ap, uint32_t);
581 field_value = va_arg(ap, uint32_t *);
582
583 *field_value = get_reg_field_value_ex(value, mask, shift);
584 i++;
585 }
586
587 va_end(ap);
588
589 return value;
590 }
591
generic_indirect_reg_update_ex(const struct dc_context * ctx,uint32_t addr_index,uint32_t addr_data,uint32_t index,uint32_t reg_val,int n,uint8_t shift1,uint32_t mask1,uint32_t field_value1,...)592 uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx,
593 uint32_t addr_index, uint32_t addr_data,
594 uint32_t index, uint32_t reg_val, int n,
595 uint8_t shift1, uint32_t mask1, uint32_t field_value1,
596 ...)
597 {
598 uint32_t shift, mask, field_value;
599 int i = 1;
600
601 va_list ap;
602
603 va_start(ap, field_value1);
604
605 reg_val = set_reg_field_value_ex(reg_val, field_value1, mask1, shift1);
606
607 while (i < n) {
608 shift = va_arg(ap, uint32_t);
609 mask = va_arg(ap, uint32_t);
610 field_value = va_arg(ap, uint32_t);
611
612 reg_val = set_reg_field_value_ex(reg_val, field_value, mask, shift);
613 i++;
614 }
615
616 generic_write_indirect_reg(ctx, addr_index, addr_data, index, reg_val);
617 va_end(ap);
618
619 return reg_val;
620 }
621
reg_sequence_start_gather(const struct dc_context * ctx)622 void reg_sequence_start_gather(const struct dc_context *ctx)
623 {
624 /* if reg sequence is supported and enabled, set flag to
625 * indicate we want to have REG_SET, REG_UPDATE macro build
626 * reg sequence command buffer rather than MMIO directly.
627 */
628
629 if (ctx->dmub_srv && ctx->dc->debug.dmub_offload_enabled) {
630 struct dc_reg_helper_state *offload =
631 &ctx->dmub_srv->reg_helper_offload;
632
633 /* caller sequence mismatch. need to debug caller. offload will not work!!! */
634 ASSERT(!offload->gather_in_progress);
635
636 offload->gather_in_progress = true;
637 }
638 }
639
reg_sequence_start_execute(const struct dc_context * ctx)640 void reg_sequence_start_execute(const struct dc_context *ctx)
641 {
642 struct dc_reg_helper_state *offload;
643
644 if (!ctx->dmub_srv)
645 return;
646
647 offload = &ctx->dmub_srv->reg_helper_offload;
648
649 if (offload && offload->gather_in_progress) {
650 offload->gather_in_progress = false;
651 offload->should_burst_write = false;
652 switch (offload->cmd_data.cmd_common.header.type) {
653 case DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE:
654 submit_dmub_read_modify_write(offload, ctx);
655 break;
656 case DMUB_CMD__REG_REG_WAIT:
657 submit_dmub_reg_wait(offload, ctx);
658 break;
659 case DMUB_CMD__REG_SEQ_BURST_WRITE:
660 submit_dmub_burst_write(offload, ctx);
661 break;
662 default:
663 return;
664 }
665
666 dc_dmub_srv_cmd_execute(ctx->dmub_srv);
667 }
668 }
669
reg_sequence_wait_done(const struct dc_context * ctx)670 void reg_sequence_wait_done(const struct dc_context *ctx)
671 {
672 /* callback to DM to poll for last submission done*/
673 struct dc_reg_helper_state *offload;
674
675 if (!ctx->dmub_srv)
676 return;
677
678 offload = &ctx->dmub_srv->reg_helper_offload;
679
680 if (offload &&
681 ctx->dc->debug.dmub_offload_enabled &&
682 !ctx->dc->debug.dmcub_emulation) {
683 dc_dmub_srv_wait_idle(ctx->dmub_srv);
684 }
685 }
686