xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/i915/gt/uc/intel_guc_log.c (revision 2a8c33eaff5adddac3ef2c5cb48ee67ef6d5d6dc)
1 /*	$NetBSD: intel_guc_log.c,v 1.3 2021/12/19 12:32:15 riastradh Exp $	*/
2 
3 // SPDX-License-Identifier: MIT
4 /*
5  * Copyright © 2014-2019 Intel Corporation
6  */
7 
8 #include <sys/cdefs.h>
9 __KERNEL_RCSID(0, "$NetBSD: intel_guc_log.c,v 1.3 2021/12/19 12:32:15 riastradh Exp $");
10 
11 #include <linux/debugfs.h>
12 
13 #include "gt/intel_gt.h"
14 #include "i915_drv.h"
15 #include "i915_memcpy.h"
16 #include "intel_guc_log.h"
17 
18 static void guc_log_capture_logs(struct intel_guc_log *log);
19 
20 /**
21  * DOC: GuC firmware log
22  *
23  * Firmware log is enabled by setting i915.guc_log_level to the positive level.
24  * Log data is printed out via reading debugfs i915_guc_log_dump. Reading from
25  * i915_guc_load_status will print out firmware loading status and scratch
26  * registers value.
27  */
28 
guc_action_flush_log_complete(struct intel_guc * guc)29 static int guc_action_flush_log_complete(struct intel_guc *guc)
30 {
31 	u32 action[] = {
32 		INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE
33 	};
34 
35 	return intel_guc_send(guc, action, ARRAY_SIZE(action));
36 }
37 
guc_action_flush_log(struct intel_guc * guc)38 static int guc_action_flush_log(struct intel_guc *guc)
39 {
40 	u32 action[] = {
41 		INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH,
42 		0
43 	};
44 
45 	return intel_guc_send(guc, action, ARRAY_SIZE(action));
46 }
47 
guc_action_control_log(struct intel_guc * guc,bool enable,bool default_logging,u32 verbosity)48 static int guc_action_control_log(struct intel_guc *guc, bool enable,
49 				  bool default_logging, u32 verbosity)
50 {
51 	u32 action[] = {
52 		INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING,
53 		(enable ? GUC_LOG_CONTROL_LOGGING_ENABLED : 0) |
54 		(verbosity << GUC_LOG_CONTROL_VERBOSITY_SHIFT) |
55 		(default_logging ? GUC_LOG_CONTROL_DEFAULT_LOGGING : 0)
56 	};
57 
58 	GEM_BUG_ON(verbosity > GUC_LOG_VERBOSITY_MAX);
59 
60 	return intel_guc_send(guc, action, ARRAY_SIZE(action));
61 }
62 
log_to_guc(struct intel_guc_log * log)63 static inline struct intel_guc *log_to_guc(struct intel_guc_log *log)
64 {
65 	return container_of(log, struct intel_guc, log);
66 }
67 
guc_log_enable_flush_events(struct intel_guc_log * log)68 static void guc_log_enable_flush_events(struct intel_guc_log *log)
69 {
70 	intel_guc_enable_msg(log_to_guc(log),
71 			     INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
72 			     INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED);
73 }
74 
guc_log_disable_flush_events(struct intel_guc_log * log)75 static void guc_log_disable_flush_events(struct intel_guc_log *log)
76 {
77 	intel_guc_disable_msg(log_to_guc(log),
78 			      INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
79 			      INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED);
80 }
81 
82 /*
83  * Sub buffer switch callback. Called whenever relay has to switch to a new
84  * sub buffer, relay stays on the same sub buffer if 0 is returned.
85  */
subbuf_start_callback(struct rchan_buf * buf,void * subbuf,void * prev_subbuf,size_t prev_padding)86 static int subbuf_start_callback(struct rchan_buf *buf,
87 				 void *subbuf,
88 				 void *prev_subbuf,
89 				 size_t prev_padding)
90 {
91 	/*
92 	 * Use no-overwrite mode by default, where relay will stop accepting
93 	 * new data if there are no empty sub buffers left.
94 	 * There is no strict synchronization enforced by relay between Consumer
95 	 * and Producer. In overwrite mode, there is a possibility of getting
96 	 * inconsistent/garbled data, the producer could be writing on to the
97 	 * same sub buffer from which Consumer is reading. This can't be avoided
98 	 * unless Consumer is fast enough and can always run in tandem with
99 	 * Producer.
100 	 */
101 	if (relay_buf_full(buf))
102 		return 0;
103 
104 	return 1;
105 }
106 
107 /*
108  * file_create() callback. Creates relay file in debugfs.
109  */
create_buf_file_callback(const char * filename,struct dentry * parent,umode_t mode,struct rchan_buf * buf,int * is_global)110 static struct dentry *create_buf_file_callback(const char *filename,
111 					       struct dentry *parent,
112 					       umode_t mode,
113 					       struct rchan_buf *buf,
114 					       int *is_global)
115 {
116 	struct dentry *buf_file;
117 
118 	/*
119 	 * This to enable the use of a single buffer for the relay channel and
120 	 * correspondingly have a single file exposed to User, through which
121 	 * it can collect the logs in order without any post-processing.
122 	 * Need to set 'is_global' even if parent is NULL for early logging.
123 	 */
124 	*is_global = 1;
125 
126 	if (!parent)
127 		return NULL;
128 
129 	buf_file = debugfs_create_file(filename, mode,
130 				       parent, buf, &relay_file_operations);
131 	if (IS_ERR(buf_file))
132 		return NULL;
133 
134 	return buf_file;
135 }
136 
137 /*
138  * file_remove() default callback. Removes relay file in debugfs.
139  */
remove_buf_file_callback(struct dentry * dentry)140 static int remove_buf_file_callback(struct dentry *dentry)
141 {
142 	debugfs_remove(dentry);
143 	return 0;
144 }
145 
146 /* relay channel callbacks */
147 static struct rchan_callbacks relay_callbacks = {
148 	.subbuf_start = subbuf_start_callback,
149 	.create_buf_file = create_buf_file_callback,
150 	.remove_buf_file = remove_buf_file_callback,
151 };
152 
guc_move_to_next_buf(struct intel_guc_log * log)153 static void guc_move_to_next_buf(struct intel_guc_log *log)
154 {
155 	/*
156 	 * Make sure the updates made in the sub buffer are visible when
157 	 * Consumer sees the following update to offset inside the sub buffer.
158 	 */
159 	smp_wmb();
160 
161 	/* All data has been written, so now move the offset of sub buffer. */
162 	relay_reserve(log->relay.channel, log->vma->obj->base.size);
163 
164 	/* Switch to the next sub buffer */
165 	relay_flush(log->relay.channel);
166 }
167 
guc_get_write_buffer(struct intel_guc_log * log)168 static void *guc_get_write_buffer(struct intel_guc_log *log)
169 {
170 	/*
171 	 * Just get the base address of a new sub buffer and copy data into it
172 	 * ourselves. NULL will be returned in no-overwrite mode, if all sub
173 	 * buffers are full. Could have used the relay_write() to indirectly
174 	 * copy the data, but that would have been bit convoluted, as we need to
175 	 * write to only certain locations inside a sub buffer which cannot be
176 	 * done without using relay_reserve() along with relay_write(). So its
177 	 * better to use relay_reserve() alone.
178 	 */
179 	return relay_reserve(log->relay.channel, 0);
180 }
181 
guc_check_log_buf_overflow(struct intel_guc_log * log,enum guc_log_buffer_type type,unsigned int full_cnt)182 static bool guc_check_log_buf_overflow(struct intel_guc_log *log,
183 				       enum guc_log_buffer_type type,
184 				       unsigned int full_cnt)
185 {
186 	unsigned int prev_full_cnt = log->stats[type].sampled_overflow;
187 	bool overflow = false;
188 
189 	if (full_cnt != prev_full_cnt) {
190 		overflow = true;
191 
192 		log->stats[type].overflow = full_cnt;
193 		log->stats[type].sampled_overflow += full_cnt - prev_full_cnt;
194 
195 		if (full_cnt < prev_full_cnt) {
196 			/* buffer_full_cnt is a 4 bit counter */
197 			log->stats[type].sampled_overflow += 16;
198 		}
199 
200 		dev_notice_ratelimited(guc_to_gt(log_to_guc(log))->i915->drm.dev,
201 				       "GuC log buffer overflow\n");
202 	}
203 
204 	return overflow;
205 }
206 
guc_get_log_buffer_size(enum guc_log_buffer_type type)207 static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type)
208 {
209 	switch (type) {
210 	case GUC_ISR_LOG_BUFFER:
211 		return ISR_BUFFER_SIZE;
212 	case GUC_DPC_LOG_BUFFER:
213 		return DPC_BUFFER_SIZE;
214 	case GUC_CRASH_DUMP_LOG_BUFFER:
215 		return CRASH_BUFFER_SIZE;
216 	default:
217 		MISSING_CASE(type);
218 	}
219 
220 	return 0;
221 }
222 
guc_read_update_log_buffer(struct intel_guc_log * log)223 static void guc_read_update_log_buffer(struct intel_guc_log *log)
224 {
225 	unsigned int buffer_size, read_offset, write_offset, bytes_to_copy, full_cnt;
226 	struct guc_log_buffer_state *log_buf_state, *log_buf_snapshot_state;
227 	struct guc_log_buffer_state log_buf_state_local;
228 	enum guc_log_buffer_type type;
229 	void *src_data, *dst_data;
230 	bool new_overflow;
231 
232 	mutex_lock(&log->relay.lock);
233 
234 	if (WARN_ON(!intel_guc_log_relay_created(log)))
235 		goto out_unlock;
236 
237 	/* Get the pointer to shared GuC log buffer */
238 	log_buf_state = src_data = log->relay.buf_addr;
239 
240 	/* Get the pointer to local buffer to store the logs */
241 	log_buf_snapshot_state = dst_data = guc_get_write_buffer(log);
242 
243 	if (unlikely(!log_buf_snapshot_state)) {
244 		/*
245 		 * Used rate limited to avoid deluge of messages, logs might be
246 		 * getting consumed by User at a slow rate.
247 		 */
248 		DRM_ERROR_RATELIMITED("no sub-buffer to capture logs\n");
249 		log->relay.full_count++;
250 
251 		goto out_unlock;
252 	}
253 
254 	/* Actual logs are present from the 2nd page */
255 	src_data += PAGE_SIZE;
256 	dst_data += PAGE_SIZE;
257 
258 	for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
259 		/*
260 		 * Make a copy of the state structure, inside GuC log buffer
261 		 * (which is uncached mapped), on the stack to avoid reading
262 		 * from it multiple times.
263 		 */
264 		memcpy(&log_buf_state_local, log_buf_state,
265 		       sizeof(struct guc_log_buffer_state));
266 		buffer_size = guc_get_log_buffer_size(type);
267 		read_offset = log_buf_state_local.read_ptr;
268 		write_offset = log_buf_state_local.sampled_write_ptr;
269 		full_cnt = log_buf_state_local.buffer_full_cnt;
270 
271 		/* Bookkeeping stuff */
272 		log->stats[type].flush += log_buf_state_local.flush_to_file;
273 		new_overflow = guc_check_log_buf_overflow(log, type, full_cnt);
274 
275 		/* Update the state of shared log buffer */
276 		log_buf_state->read_ptr = write_offset;
277 		log_buf_state->flush_to_file = 0;
278 		log_buf_state++;
279 
280 		/* First copy the state structure in snapshot buffer */
281 		memcpy(log_buf_snapshot_state, &log_buf_state_local,
282 		       sizeof(struct guc_log_buffer_state));
283 
284 		/*
285 		 * The write pointer could have been updated by GuC firmware,
286 		 * after sending the flush interrupt to Host, for consistency
287 		 * set write pointer value to same value of sampled_write_ptr
288 		 * in the snapshot buffer.
289 		 */
290 		log_buf_snapshot_state->write_ptr = write_offset;
291 		log_buf_snapshot_state++;
292 
293 		/* Now copy the actual logs. */
294 		if (unlikely(new_overflow)) {
295 			/* copy the whole buffer in case of overflow */
296 			read_offset = 0;
297 			write_offset = buffer_size;
298 		} else if (unlikely((read_offset > buffer_size) ||
299 				    (write_offset > buffer_size))) {
300 			DRM_ERROR("invalid log buffer state\n");
301 			/* copy whole buffer as offsets are unreliable */
302 			read_offset = 0;
303 			write_offset = buffer_size;
304 		}
305 
306 		/* Just copy the newly written data */
307 		if (read_offset > write_offset) {
308 			i915_memcpy_from_wc(dst_data, src_data, write_offset);
309 			bytes_to_copy = buffer_size - read_offset;
310 		} else {
311 			bytes_to_copy = write_offset - read_offset;
312 		}
313 		i915_memcpy_from_wc(dst_data + read_offset,
314 				    src_data + read_offset, bytes_to_copy);
315 
316 		src_data += buffer_size;
317 		dst_data += buffer_size;
318 	}
319 
320 	guc_move_to_next_buf(log);
321 
322 out_unlock:
323 	mutex_unlock(&log->relay.lock);
324 }
325 
capture_logs_work(struct work_struct * work)326 static void capture_logs_work(struct work_struct *work)
327 {
328 	struct intel_guc_log *log =
329 		container_of(work, struct intel_guc_log, relay.flush_work);
330 
331 	guc_log_capture_logs(log);
332 }
333 
guc_log_map(struct intel_guc_log * log)334 static int guc_log_map(struct intel_guc_log *log)
335 {
336 	void *vaddr;
337 
338 	lockdep_assert_held(&log->relay.lock);
339 
340 	if (!log->vma)
341 		return -ENODEV;
342 
343 	/*
344 	 * Create a WC (Uncached for read) vmalloc mapping of log
345 	 * buffer pages, so that we can directly get the data
346 	 * (up-to-date) from memory.
347 	 */
348 	vaddr = i915_gem_object_pin_map(log->vma->obj, I915_MAP_WC);
349 	if (IS_ERR(vaddr))
350 		return PTR_ERR(vaddr);
351 
352 	log->relay.buf_addr = vaddr;
353 
354 	return 0;
355 }
356 
guc_log_unmap(struct intel_guc_log * log)357 static void guc_log_unmap(struct intel_guc_log *log)
358 {
359 	lockdep_assert_held(&log->relay.lock);
360 
361 	i915_gem_object_unpin_map(log->vma->obj);
362 	log->relay.buf_addr = NULL;
363 }
364 
intel_guc_log_init_early(struct intel_guc_log * log)365 void intel_guc_log_init_early(struct intel_guc_log *log)
366 {
367 	mutex_init(&log->relay.lock);
368 	INIT_WORK(&log->relay.flush_work, capture_logs_work);
369 	log->relay.started = false;
370 }
371 
guc_log_relay_create(struct intel_guc_log * log)372 static int guc_log_relay_create(struct intel_guc_log *log)
373 {
374 	struct intel_guc *guc = log_to_guc(log);
375 	struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
376 	struct rchan *guc_log_relay_chan;
377 	size_t n_subbufs, subbuf_size;
378 	int ret;
379 
380 	lockdep_assert_held(&log->relay.lock);
381 	GEM_BUG_ON(!log->vma);
382 
383 	 /* Keep the size of sub buffers same as shared log buffer */
384 	subbuf_size = log->vma->size;
385 
386 	/*
387 	 * Store up to 8 snapshots, which is large enough to buffer sufficient
388 	 * boot time logs and provides enough leeway to User, in terms of
389 	 * latency, for consuming the logs from relay. Also doesn't take
390 	 * up too much memory.
391 	 */
392 	n_subbufs = 8;
393 
394 	guc_log_relay_chan = relay_open("guc_log",
395 					dev_priv->drm.primary->debugfs_root,
396 					subbuf_size, n_subbufs,
397 					&relay_callbacks, dev_priv);
398 	if (!guc_log_relay_chan) {
399 		DRM_ERROR("Couldn't create relay chan for GuC logging\n");
400 
401 		ret = -ENOMEM;
402 		return ret;
403 	}
404 
405 	GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
406 	log->relay.channel = guc_log_relay_chan;
407 
408 	return 0;
409 }
410 
guc_log_relay_destroy(struct intel_guc_log * log)411 static void guc_log_relay_destroy(struct intel_guc_log *log)
412 {
413 	lockdep_assert_held(&log->relay.lock);
414 
415 	relay_close(log->relay.channel);
416 	log->relay.channel = NULL;
417 }
418 
guc_log_capture_logs(struct intel_guc_log * log)419 static void guc_log_capture_logs(struct intel_guc_log *log)
420 {
421 	struct intel_guc *guc = log_to_guc(log);
422 	struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
423 	intel_wakeref_t wakeref;
424 
425 	guc_read_update_log_buffer(log);
426 
427 	/*
428 	 * Generally device is expected to be active only at this
429 	 * time, so get/put should be really quick.
430 	 */
431 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
432 		guc_action_flush_log_complete(guc);
433 }
434 
__get_default_log_level(struct intel_guc_log * log)435 static u32 __get_default_log_level(struct intel_guc_log *log)
436 {
437 	/* A negative value means "use platform/config default" */
438 	if (i915_modparams.guc_log_level < 0) {
439 		return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
440 			IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ?
441 			GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_NON_VERBOSE;
442 	}
443 
444 	if (i915_modparams.guc_log_level > GUC_LOG_LEVEL_MAX) {
445 		DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
446 			 "guc_log_level", i915_modparams.guc_log_level,
447 			 "verbosity too high");
448 		return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
449 			IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ?
450 			GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_DISABLED;
451 	}
452 
453 	GEM_BUG_ON(i915_modparams.guc_log_level < GUC_LOG_LEVEL_DISABLED);
454 	GEM_BUG_ON(i915_modparams.guc_log_level > GUC_LOG_LEVEL_MAX);
455 	return i915_modparams.guc_log_level;
456 }
457 
intel_guc_log_create(struct intel_guc_log * log)458 int intel_guc_log_create(struct intel_guc_log *log)
459 {
460 	struct intel_guc *guc = log_to_guc(log);
461 	struct i915_vma *vma;
462 	u32 guc_log_size;
463 	int ret;
464 
465 	GEM_BUG_ON(log->vma);
466 
467 	/*
468 	 *  GuC Log buffer Layout
469 	 *
470 	 *  +===============================+ 00B
471 	 *  |    Crash dump state header    |
472 	 *  +-------------------------------+ 32B
473 	 *  |       DPC state header        |
474 	 *  +-------------------------------+ 64B
475 	 *  |       ISR state header        |
476 	 *  +-------------------------------+ 96B
477 	 *  |                               |
478 	 *  +===============================+ PAGE_SIZE (4KB)
479 	 *  |        Crash Dump logs        |
480 	 *  +===============================+ + CRASH_SIZE
481 	 *  |           DPC logs            |
482 	 *  +===============================+ + DPC_SIZE
483 	 *  |           ISR logs            |
484 	 *  +===============================+ + ISR_SIZE
485 	 */
486 	guc_log_size = PAGE_SIZE + CRASH_BUFFER_SIZE + DPC_BUFFER_SIZE +
487 			ISR_BUFFER_SIZE;
488 
489 	vma = intel_guc_allocate_vma(guc, guc_log_size);
490 	if (IS_ERR(vma)) {
491 		ret = PTR_ERR(vma);
492 		goto err;
493 	}
494 
495 	log->vma = vma;
496 
497 	log->level = __get_default_log_level(log);
498 	DRM_DEBUG_DRIVER("guc_log_level=%d (%s, verbose:%s, verbosity:%d)\n",
499 			 log->level, enableddisabled(log->level),
500 			 yesno(GUC_LOG_LEVEL_IS_VERBOSE(log->level)),
501 			 GUC_LOG_LEVEL_TO_VERBOSITY(log->level));
502 
503 	return 0;
504 
505 err:
506 	DRM_ERROR("Failed to allocate GuC log buffer. %d\n", ret);
507 	return ret;
508 }
509 
intel_guc_log_destroy(struct intel_guc_log * log)510 void intel_guc_log_destroy(struct intel_guc_log *log)
511 {
512 	i915_vma_unpin_and_release(&log->vma, 0);
513 	mutex_destroy(&log->relay.lock);
514 }
515 
intel_guc_log_set_level(struct intel_guc_log * log,u32 level)516 int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
517 {
518 	struct intel_guc *guc = log_to_guc(log);
519 	struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
520 	intel_wakeref_t wakeref;
521 	int ret = 0;
522 
523 	BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN != 0);
524 	GEM_BUG_ON(!log->vma);
525 
526 	/*
527 	 * GuC is recognizing log levels starting from 0 to max, we're using 0
528 	 * as indication that logging should be disabled.
529 	 */
530 	if (level < GUC_LOG_LEVEL_DISABLED || level > GUC_LOG_LEVEL_MAX)
531 		return -EINVAL;
532 
533 	mutex_lock(&dev_priv->drm.struct_mutex);
534 
535 	if (log->level == level)
536 		goto out_unlock;
537 
538 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
539 		ret = guc_action_control_log(guc,
540 					     GUC_LOG_LEVEL_IS_VERBOSE(level),
541 					     GUC_LOG_LEVEL_IS_ENABLED(level),
542 					     GUC_LOG_LEVEL_TO_VERBOSITY(level));
543 	if (ret) {
544 		DRM_DEBUG_DRIVER("guc_log_control action failed %d\n", ret);
545 		goto out_unlock;
546 	}
547 
548 	log->level = level;
549 
550 out_unlock:
551 	mutex_unlock(&dev_priv->drm.struct_mutex);
552 
553 	return ret;
554 }
555 
intel_guc_log_relay_created(const struct intel_guc_log * log)556 bool intel_guc_log_relay_created(const struct intel_guc_log *log)
557 {
558 	return log->relay.buf_addr;
559 }
560 
intel_guc_log_relay_open(struct intel_guc_log * log)561 int intel_guc_log_relay_open(struct intel_guc_log *log)
562 {
563 	int ret;
564 
565 	if (!log->vma)
566 		return -ENODEV;
567 
568 	mutex_lock(&log->relay.lock);
569 
570 	if (intel_guc_log_relay_created(log)) {
571 		ret = -EEXIST;
572 		goto out_unlock;
573 	}
574 
575 	/*
576 	 * We require SSE 4.1 for fast reads from the GuC log buffer and
577 	 * it should be present on the chipsets supporting GuC based
578 	 * submisssions.
579 	 */
580 	if (!i915_has_memcpy_from_wc()) {
581 		ret = -ENXIO;
582 		goto out_unlock;
583 	}
584 
585 	ret = guc_log_relay_create(log);
586 	if (ret)
587 		goto out_unlock;
588 
589 	ret = guc_log_map(log);
590 	if (ret)
591 		goto out_relay;
592 
593 	mutex_unlock(&log->relay.lock);
594 
595 	return 0;
596 
597 out_relay:
598 	guc_log_relay_destroy(log);
599 out_unlock:
600 	mutex_unlock(&log->relay.lock);
601 
602 	return ret;
603 }
604 
intel_guc_log_relay_start(struct intel_guc_log * log)605 int intel_guc_log_relay_start(struct intel_guc_log *log)
606 {
607 	if (log->relay.started)
608 		return -EEXIST;
609 
610 	guc_log_enable_flush_events(log);
611 
612 	/*
613 	 * When GuC is logging without us relaying to userspace, we're ignoring
614 	 * the flush notification. This means that we need to unconditionally
615 	 * flush on relay enabling, since GuC only notifies us once.
616 	 */
617 	queue_work(system_highpri_wq, &log->relay.flush_work);
618 
619 	log->relay.started = true;
620 
621 	return 0;
622 }
623 
intel_guc_log_relay_flush(struct intel_guc_log * log)624 void intel_guc_log_relay_flush(struct intel_guc_log *log)
625 {
626 	struct intel_guc *guc = log_to_guc(log);
627 	intel_wakeref_t wakeref;
628 
629 	if (!log->relay.started)
630 		return;
631 
632 	/*
633 	 * Before initiating the forceful flush, wait for any pending/ongoing
634 	 * flush to complete otherwise forceful flush may not actually happen.
635 	 */
636 	flush_work(&log->relay.flush_work);
637 
638 	with_intel_runtime_pm(guc_to_gt(guc)->uncore->rpm, wakeref)
639 		guc_action_flush_log(guc);
640 
641 	/* GuC would have updated log buffer by now, so capture it */
642 	guc_log_capture_logs(log);
643 }
644 
645 /*
646  * Stops the relay log. Called from intel_guc_log_relay_close(), so no
647  * possibility of race with start/flush since relay_write cannot race
648  * relay_close.
649  */
guc_log_relay_stop(struct intel_guc_log * log)650 static void guc_log_relay_stop(struct intel_guc_log *log)
651 {
652 	struct intel_guc *guc = log_to_guc(log);
653 	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
654 
655 	if (!log->relay.started)
656 		return;
657 
658 	guc_log_disable_flush_events(log);
659 	intel_synchronize_irq(i915);
660 
661 	flush_work(&log->relay.flush_work);
662 
663 	log->relay.started = false;
664 }
665 
intel_guc_log_relay_close(struct intel_guc_log * log)666 void intel_guc_log_relay_close(struct intel_guc_log *log)
667 {
668 	guc_log_relay_stop(log);
669 
670 	mutex_lock(&log->relay.lock);
671 	GEM_BUG_ON(!intel_guc_log_relay_created(log));
672 	guc_log_unmap(log);
673 	guc_log_relay_destroy(log);
674 	mutex_unlock(&log->relay.lock);
675 }
676 
intel_guc_log_handle_flush_event(struct intel_guc_log * log)677 void intel_guc_log_handle_flush_event(struct intel_guc_log *log)
678 {
679 	queue_work(system_highpri_wq, &log->relay.flush_work);
680 }
681