xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/i915/gt/uc/intel_guc_ct.c (revision 2a8c33eaff5adddac3ef2c5cb48ee67ef6d5d6dc)
1 /*	$NetBSD: intel_guc_ct.c,v 1.4 2021/12/19 12:32:15 riastradh Exp $	*/
2 
3 // SPDX-License-Identifier: MIT
4 /*
5  * Copyright © 2016-2019 Intel Corporation
6  */
7 
8 #include <sys/cdefs.h>
9 __KERNEL_RCSID(0, "$NetBSD: intel_guc_ct.c,v 1.4 2021/12/19 12:32:15 riastradh Exp $");
10 
11 #include "i915_drv.h"
12 #include "intel_guc_ct.h"
13 
14 #ifdef CONFIG_DRM_I915_DEBUG_GUC
15 #define CT_DEBUG_DRIVER(...)	DRM_DEBUG_DRIVER(__VA_ARGS__)
16 #else
17 #define CT_DEBUG_DRIVER(...)	do { } while (0)
18 #endif
19 
20 struct ct_request {
21 	struct list_head link;
22 	u32 fence;
23 	u32 status;
24 	u32 response_len;
25 	u32 *response_buf;
26 };
27 
28 struct ct_incoming_request {
29 	struct list_head link;
30 	u32 msg[];
31 };
32 
33 enum { CTB_SEND = 0, CTB_RECV = 1 };
34 
35 enum { CTB_OWNER_HOST = 0 };
36 
37 static void ct_incoming_request_worker_func(struct work_struct *w);
38 
39 /**
40  * intel_guc_ct_init_early - Initialize CT state without requiring device access
41  * @ct: pointer to CT struct
42  */
intel_guc_ct_init_early(struct intel_guc_ct * ct)43 void intel_guc_ct_init_early(struct intel_guc_ct *ct)
44 {
45 	spin_lock_init(&ct->requests.lock);
46 	INIT_LIST_HEAD(&ct->requests.pending);
47 	INIT_LIST_HEAD(&ct->requests.incoming);
48 	INIT_WORK(&ct->requests.worker, ct_incoming_request_worker_func);
49 }
50 
ct_to_guc(struct intel_guc_ct * ct)51 static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct)
52 {
53 	return container_of(ct, struct intel_guc, ct);
54 }
55 
guc_ct_buffer_type_to_str(u32 type)56 static inline const char *guc_ct_buffer_type_to_str(u32 type)
57 {
58 	switch (type) {
59 	case INTEL_GUC_CT_BUFFER_TYPE_SEND:
60 		return "SEND";
61 	case INTEL_GUC_CT_BUFFER_TYPE_RECV:
62 		return "RECV";
63 	default:
64 		return "<invalid>";
65 	}
66 }
67 
guc_ct_buffer_desc_init(struct guc_ct_buffer_desc * desc,u32 cmds_addr,u32 size)68 static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc,
69 				    u32 cmds_addr, u32 size)
70 {
71 	CT_DEBUG_DRIVER("CT: init addr=%#x size=%u\n", cmds_addr, size);
72 	memset(desc, 0, sizeof(*desc));
73 	desc->addr = cmds_addr;
74 	desc->size = size;
75 	desc->owner = CTB_OWNER_HOST;
76 }
77 
guc_ct_buffer_desc_reset(struct guc_ct_buffer_desc * desc)78 static void guc_ct_buffer_desc_reset(struct guc_ct_buffer_desc *desc)
79 {
80 	CT_DEBUG_DRIVER("CT: desc %p reset head=%u tail=%u\n",
81 			desc, desc->head, desc->tail);
82 	desc->head = 0;
83 	desc->tail = 0;
84 	desc->is_in_error = 0;
85 }
86 
guc_action_register_ct_buffer(struct intel_guc * guc,u32 desc_addr,u32 type)87 static int guc_action_register_ct_buffer(struct intel_guc *guc,
88 					 u32 desc_addr,
89 					 u32 type)
90 {
91 	u32 action[] = {
92 		INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER,
93 		desc_addr,
94 		sizeof(struct guc_ct_buffer_desc),
95 		type
96 	};
97 	int err;
98 
99 	/* Can't use generic send(), CT registration must go over MMIO */
100 	err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
101 	if (err)
102 		DRM_ERROR("CT: register %s buffer failed; err=%d\n",
103 			  guc_ct_buffer_type_to_str(type), err);
104 	return err;
105 }
106 
guc_action_deregister_ct_buffer(struct intel_guc * guc,u32 type)107 static int guc_action_deregister_ct_buffer(struct intel_guc *guc,
108 					   u32 type)
109 {
110 	u32 action[] = {
111 		INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER,
112 		CTB_OWNER_HOST,
113 		type
114 	};
115 	int err;
116 
117 	/* Can't use generic send(), CT deregistration must go over MMIO */
118 	err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
119 	if (err)
120 		DRM_ERROR("CT: deregister %s buffer failed; err=%d\n",
121 			  guc_ct_buffer_type_to_str(type), err);
122 	return err;
123 }
124 
125 /**
126  * intel_guc_ct_init - Init buffer-based communication
127  * @ct: pointer to CT struct
128  *
129  * Allocate memory required for buffer-based communication.
130  *
131  * Return: 0 on success, a negative errno code on failure.
132  */
intel_guc_ct_init(struct intel_guc_ct * ct)133 int intel_guc_ct_init(struct intel_guc_ct *ct)
134 {
135 	struct intel_guc *guc = ct_to_guc(ct);
136 	void *blob;
137 	int err;
138 	int i;
139 
140 	GEM_BUG_ON(ct->vma);
141 
142 	/* We allocate 1 page to hold both descriptors and both buffers.
143 	 *       ___________.....................
144 	 *      |desc (SEND)|                   :
145 	 *      |___________|                   PAGE/4
146 	 *      :___________....................:
147 	 *      |desc (RECV)|                   :
148 	 *      |___________|                   PAGE/4
149 	 *      :_______________________________:
150 	 *      |cmds (SEND)                    |
151 	 *      |                               PAGE/4
152 	 *      |_______________________________|
153 	 *      |cmds (RECV)                    |
154 	 *      |                               PAGE/4
155 	 *      |_______________________________|
156 	 *
157 	 * Each message can use a maximum of 32 dwords and we don't expect to
158 	 * have more than 1 in flight at any time, so we have enough space.
159 	 * Some logic further ahead will rely on the fact that there is only 1
160 	 * page and that it is always mapped, so if the size is changed the
161 	 * other code will need updating as well.
162 	 */
163 
164 	err = intel_guc_allocate_and_map_vma(guc, PAGE_SIZE, &ct->vma, &blob);
165 	if (err) {
166 		DRM_ERROR("CT: channel allocation failed; err=%d\n", err);
167 		return err;
168 	}
169 
170 	CT_DEBUG_DRIVER("CT: vma base=%#x\n",
171 			intel_guc_ggtt_offset(guc, ct->vma));
172 
173 	/* store pointers to desc and cmds */
174 	for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) {
175 		GEM_BUG_ON((i !=  CTB_SEND) && (i != CTB_RECV));
176 		ct->ctbs[i].desc = blob + PAGE_SIZE/4 * i;
177 		ct->ctbs[i].cmds = blob + PAGE_SIZE/4 * i + PAGE_SIZE/2;
178 	}
179 
180 	return 0;
181 }
182 
183 /**
184  * intel_guc_ct_fini - Fini buffer-based communication
185  * @ct: pointer to CT struct
186  *
187  * Deallocate memory required for buffer-based communication.
188  */
intel_guc_ct_fini(struct intel_guc_ct * ct)189 void intel_guc_ct_fini(struct intel_guc_ct *ct)
190 {
191 	GEM_BUG_ON(ct->enabled);
192 
193 	i915_vma_unpin_and_release(&ct->vma, I915_VMA_RELEASE_MAP);
194 	spin_lock_destroy(&ct->requests.lock);
195 }
196 
197 /**
198  * intel_guc_ct_enable - Enable buffer based command transport.
199  * @ct: pointer to CT struct
200  *
201  * Return: 0 on success, a negative errno code on failure.
202  */
intel_guc_ct_enable(struct intel_guc_ct * ct)203 int intel_guc_ct_enable(struct intel_guc_ct *ct)
204 {
205 	struct intel_guc *guc = ct_to_guc(ct);
206 	u32 base;
207 	int err;
208 	int i;
209 
210 	GEM_BUG_ON(ct->enabled);
211 
212 	/* vma should be already allocated and map'ed */
213 	GEM_BUG_ON(!ct->vma);
214 	base = intel_guc_ggtt_offset(guc, ct->vma);
215 
216 	/* (re)initialize descriptors
217 	 * cmds buffers are in the second half of the blob page
218 	 */
219 	for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) {
220 		GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
221 		guc_ct_buffer_desc_init(ct->ctbs[i].desc,
222 					base + PAGE_SIZE/4 * i + PAGE_SIZE/2,
223 					PAGE_SIZE/4);
224 	}
225 
226 	/* register buffers, starting wirh RECV buffer
227 	 * descriptors are in first half of the blob
228 	 */
229 	err = guc_action_register_ct_buffer(guc,
230 					    base + PAGE_SIZE/4 * CTB_RECV,
231 					    INTEL_GUC_CT_BUFFER_TYPE_RECV);
232 	if (unlikely(err))
233 		goto err_out;
234 
235 	err = guc_action_register_ct_buffer(guc,
236 					    base + PAGE_SIZE/4 * CTB_SEND,
237 					    INTEL_GUC_CT_BUFFER_TYPE_SEND);
238 	if (unlikely(err))
239 		goto err_deregister;
240 
241 	ct->enabled = true;
242 
243 	return 0;
244 
245 err_deregister:
246 	guc_action_deregister_ct_buffer(guc,
247 					INTEL_GUC_CT_BUFFER_TYPE_RECV);
248 err_out:
249 	DRM_ERROR("CT: can't open channel; err=%d\n", err);
250 	return err;
251 }
252 
253 /**
254  * intel_guc_ct_disable - Disable buffer based command transport.
255  * @ct: pointer to CT struct
256  */
intel_guc_ct_disable(struct intel_guc_ct * ct)257 void intel_guc_ct_disable(struct intel_guc_ct *ct)
258 {
259 	struct intel_guc *guc = ct_to_guc(ct);
260 
261 	GEM_BUG_ON(!ct->enabled);
262 
263 	ct->enabled = false;
264 
265 	if (intel_guc_is_running(guc)) {
266 		guc_action_deregister_ct_buffer(guc,
267 						INTEL_GUC_CT_BUFFER_TYPE_SEND);
268 		guc_action_deregister_ct_buffer(guc,
269 						INTEL_GUC_CT_BUFFER_TYPE_RECV);
270 	}
271 }
272 
ct_get_next_fence(struct intel_guc_ct * ct)273 static u32 ct_get_next_fence(struct intel_guc_ct *ct)
274 {
275 	/* For now it's trivial */
276 	return ++ct->requests.next_fence;
277 }
278 
279 /**
280  * DOC: CTB Host to GuC request
281  *
282  * Format of the CTB Host to GuC request message is as follows::
283  *
284  *      +------------+---------+---------+---------+---------+
285  *      |   msg[0]   |   [1]   |   [2]   |   ...   |  [n-1]  |
286  *      +------------+---------+---------+---------+---------+
287  *      |   MESSAGE  |       MESSAGE PAYLOAD                 |
288  *      +   HEADER   +---------+---------+---------+---------+
289  *      |            |    0    |    1    |   ...   |    n    |
290  *      +============+=========+=========+=========+=========+
291  *      |  len >= 1  |  FENCE  |     request specific data   |
292  *      +------+-----+---------+---------+---------+---------+
293  *
294  *                   ^-----------------len-------------------^
295  */
296 
ctb_write(struct intel_guc_ct_buffer * ctb,const u32 * action,u32 len,u32 fence,bool want_response)297 static int ctb_write(struct intel_guc_ct_buffer *ctb,
298 		     const u32 *action,
299 		     u32 len /* in dwords */,
300 		     u32 fence,
301 		     bool want_response)
302 {
303 	struct guc_ct_buffer_desc *desc = ctb->desc;
304 	u32 head = desc->head / 4;	/* in dwords */
305 	u32 tail = desc->tail / 4;	/* in dwords */
306 	u32 size = desc->size / 4;	/* in dwords */
307 	u32 used;			/* in dwords */
308 	u32 header;
309 	u32 *cmds = ctb->cmds;
310 	unsigned int i;
311 
312 	GEM_BUG_ON(desc->size % 4);
313 	GEM_BUG_ON(desc->head % 4);
314 	GEM_BUG_ON(desc->tail % 4);
315 	GEM_BUG_ON(tail >= size);
316 
317 	/*
318 	 * tail == head condition indicates empty. GuC FW does not support
319 	 * using up the entire buffer to get tail == head meaning full.
320 	 */
321 	if (tail < head)
322 		used = (size - head) + tail;
323 	else
324 		used = tail - head;
325 
326 	/* make sure there is a space including extra dw for the fence */
327 	if (unlikely(used + len + 1 >= size))
328 		return -ENOSPC;
329 
330 	/*
331 	 * Write the message. The format is the following:
332 	 * DW0: header (including action code)
333 	 * DW1: fence
334 	 * DW2+: action data
335 	 */
336 	header = (len << GUC_CT_MSG_LEN_SHIFT) |
337 		 (GUC_CT_MSG_WRITE_FENCE_TO_DESC) |
338 		 (want_response ? GUC_CT_MSG_SEND_STATUS : 0) |
339 		 (action[0] << GUC_CT_MSG_ACTION_SHIFT);
340 
341 	CT_DEBUG_DRIVER("CT: writing %*ph %*ph %*ph\n",
342 			4, &header, 4, &fence,
343 			4 * (len - 1), &action[1]);
344 
345 	cmds[tail] = header;
346 	tail = (tail + 1) % size;
347 
348 	cmds[tail] = fence;
349 	tail = (tail + 1) % size;
350 
351 	for (i = 1; i < len; i++) {
352 		cmds[tail] = action[i];
353 		tail = (tail + 1) % size;
354 	}
355 
356 	/* now update desc tail (back in bytes) */
357 	desc->tail = tail * 4;
358 	GEM_BUG_ON(desc->tail > desc->size);
359 
360 	return 0;
361 }
362 
363 /**
364  * wait_for_ctb_desc_update - Wait for the CT buffer descriptor update.
365  * @desc:	buffer descriptor
366  * @fence:	response fence
367  * @status:	placeholder for status
368  *
369  * Guc will update CT buffer descriptor with new fence and status
370  * after processing the command identified by the fence. Wait for
371  * specified fence and then read from the descriptor status of the
372  * command.
373  *
374  * Return:
375  * *	0 response received (status is valid)
376  * *	-ETIMEDOUT no response within hardcoded timeout
377  * *	-EPROTO no response, CT buffer is in error
378  */
wait_for_ctb_desc_update(struct guc_ct_buffer_desc * desc,u32 fence,u32 * status)379 static int wait_for_ctb_desc_update(struct guc_ct_buffer_desc *desc,
380 				    u32 fence,
381 				    u32 *status)
382 {
383 	int err;
384 
385 	/*
386 	 * Fast commands should complete in less than 10us, so sample quickly
387 	 * up to that length of time, then switch to a slower sleep-wait loop.
388 	 * No GuC command should ever take longer than 10ms.
389 	 */
390 #define done (READ_ONCE(desc->fence) == fence)
391 #ifdef __NetBSD__
392 	int timo = 10;
393 	err = 0;
394 	while (!done) {
395 		if (--timo == 0) {
396 			kpause("intelguc", false, mstohz(10), NULL);
397 			if (!done)
398 				err = -ETIMEDOUT;
399 			break;
400 		}
401 	}
402 #else
403 	err = wait_for_us(done, 10);
404 	if (err)
405 		err = wait_for(done, 10);
406 #endif
407 #undef done
408 
409 	if (unlikely(err)) {
410 		DRM_ERROR("CT: fence %u failed; reported fence=%u\n",
411 			  fence, desc->fence);
412 
413 		if (WARN_ON(desc->is_in_error)) {
414 			/* Something went wrong with the messaging, try to reset
415 			 * the buffer and hope for the best
416 			 */
417 			guc_ct_buffer_desc_reset(desc);
418 			err = -EPROTO;
419 		}
420 	}
421 
422 	*status = desc->status;
423 	return err;
424 }
425 
426 /**
427  * wait_for_ct_request_update - Wait for CT request state update.
428  * @req:	pointer to pending request
429  * @status:	placeholder for status
430  *
431  * For each sent request, Guc shall send bac CT response message.
432  * Our message handler will update status of tracked request once
433  * response message with given fence is received. Wait here and
434  * check for valid response status value.
435  *
436  * Return:
437  * *	0 response received (status is valid)
438  * *	-ETIMEDOUT no response within hardcoded timeout
439  */
wait_for_ct_request_update(struct ct_request * req,u32 * status)440 static int wait_for_ct_request_update(struct ct_request *req, u32 *status)
441 {
442 	int err;
443 
444 	/*
445 	 * Fast commands should complete in less than 10us, so sample quickly
446 	 * up to that length of time, then switch to a slower sleep-wait loop.
447 	 * No GuC command should ever take longer than 10ms.
448 	 */
449 #define done INTEL_GUC_MSG_IS_RESPONSE(READ_ONCE(req->status))
450 #ifdef __NetBSD__
451 	int timo = 10;
452 	err = 0;
453 	while (!done) {
454 		if (--timo == 0) {
455 			kpause("intelguc", false, mstohz(10), NULL);
456 			if (!done)
457 				err = -ETIMEDOUT;
458 			break;
459 		}
460 	}
461 #else
462 	err = wait_for_us(done, 10);
463 	if (err)
464 		err = wait_for(done, 10);
465 #endif
466 #undef done
467 
468 	if (unlikely(err))
469 		DRM_ERROR("CT: fence %u err %d\n", req->fence, err);
470 
471 	*status = req->status;
472 	return err;
473 }
474 
ct_send(struct intel_guc_ct * ct,const u32 * action,u32 len,u32 * response_buf,u32 response_buf_size,u32 * status)475 static int ct_send(struct intel_guc_ct *ct,
476 		   const u32 *action,
477 		   u32 len,
478 		   u32 *response_buf,
479 		   u32 response_buf_size,
480 		   u32 *status)
481 {
482 	struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_SEND];
483 	struct guc_ct_buffer_desc *desc = ctb->desc;
484 	struct ct_request request;
485 	unsigned long flags;
486 	u32 fence;
487 	int err;
488 
489 	GEM_BUG_ON(!ct->enabled);
490 	GEM_BUG_ON(!len);
491 	GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK);
492 	GEM_BUG_ON(!response_buf && response_buf_size);
493 
494 	fence = ct_get_next_fence(ct);
495 	request.fence = fence;
496 	request.status = 0;
497 	request.response_len = response_buf_size;
498 	request.response_buf = response_buf;
499 
500 	spin_lock_irqsave(&ct->requests.lock, flags);
501 	list_add_tail(&request.link, &ct->requests.pending);
502 	spin_unlock_irqrestore(&ct->requests.lock, flags);
503 
504 	err = ctb_write(ctb, action, len, fence, !!response_buf);
505 	if (unlikely(err))
506 		goto unlink;
507 
508 	intel_guc_notify(ct_to_guc(ct));
509 
510 	if (response_buf)
511 		err = wait_for_ct_request_update(&request, status);
512 	else
513 		err = wait_for_ctb_desc_update(desc, fence, status);
514 	if (unlikely(err))
515 		goto unlink;
516 
517 	if (!INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(*status)) {
518 		err = -EIO;
519 		goto unlink;
520 	}
521 
522 	if (response_buf) {
523 		/* There shall be no data in the status */
524 		WARN_ON(INTEL_GUC_MSG_TO_DATA(request.status));
525 		/* Return actual response len */
526 		err = request.response_len;
527 	} else {
528 		/* There shall be no response payload */
529 		WARN_ON(request.response_len);
530 		/* Return data decoded from the status dword */
531 		err = INTEL_GUC_MSG_TO_DATA(*status);
532 	}
533 
534 unlink:
535 	spin_lock_irqsave(&ct->requests.lock, flags);
536 	list_del(&request.link);
537 	spin_unlock_irqrestore(&ct->requests.lock, flags);
538 
539 	return err;
540 }
541 
542 /*
543  * Command Transport (CT) buffer based GuC send function.
544  */
intel_guc_ct_send(struct intel_guc_ct * ct,const u32 * action,u32 len,u32 * response_buf,u32 response_buf_size)545 int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len,
546 		      u32 *response_buf, u32 response_buf_size)
547 {
548 	struct intel_guc *guc = ct_to_guc(ct);
549 	u32 status = ~0; /* undefined */
550 	int ret;
551 
552 	if (unlikely(!ct->enabled)) {
553 		WARN(1, "Unexpected send: action=%#x\n", *action);
554 		return -ENODEV;
555 	}
556 
557 	mutex_lock(&guc->send_mutex);
558 
559 	ret = ct_send(ct, action, len, response_buf, response_buf_size, &status);
560 	if (unlikely(ret < 0)) {
561 		DRM_ERROR("CT: send action %#X failed; err=%d status=%#X\n",
562 			  action[0], ret, status);
563 	} else if (unlikely(ret)) {
564 		CT_DEBUG_DRIVER("CT: send action %#x returned %d (%#x)\n",
565 				action[0], ret, ret);
566 	}
567 
568 	mutex_unlock(&guc->send_mutex);
569 	return ret;
570 }
571 
ct_header_get_len(u32 header)572 static inline unsigned int ct_header_get_len(u32 header)
573 {
574 	return (header >> GUC_CT_MSG_LEN_SHIFT) & GUC_CT_MSG_LEN_MASK;
575 }
576 
ct_header_get_action(u32 header)577 static inline unsigned int ct_header_get_action(u32 header)
578 {
579 	return (header >> GUC_CT_MSG_ACTION_SHIFT) & GUC_CT_MSG_ACTION_MASK;
580 }
581 
ct_header_is_response(u32 header)582 static inline bool ct_header_is_response(u32 header)
583 {
584 	return !!(header & GUC_CT_MSG_IS_RESPONSE);
585 }
586 
ctb_read(struct intel_guc_ct_buffer * ctb,u32 * data)587 static int ctb_read(struct intel_guc_ct_buffer *ctb, u32 *data)
588 {
589 	struct guc_ct_buffer_desc *desc = ctb->desc;
590 	u32 head = desc->head / 4;	/* in dwords */
591 	u32 tail = desc->tail / 4;	/* in dwords */
592 	u32 size = desc->size / 4;	/* in dwords */
593 	u32 *cmds = ctb->cmds;
594 	s32 available;			/* in dwords */
595 	unsigned int len;
596 	unsigned int i;
597 
598 	GEM_BUG_ON(desc->size % 4);
599 	GEM_BUG_ON(desc->head % 4);
600 	GEM_BUG_ON(desc->tail % 4);
601 	GEM_BUG_ON(tail >= size);
602 	GEM_BUG_ON(head >= size);
603 
604 	/* tail == head condition indicates empty */
605 	available = tail - head;
606 	if (unlikely(available == 0))
607 		return -ENODATA;
608 
609 	/* beware of buffer wrap case */
610 	if (unlikely(available < 0))
611 		available += size;
612 	CT_DEBUG_DRIVER("CT: available %d (%u:%u)\n", available, head, tail);
613 	GEM_BUG_ON(available < 0);
614 
615 	data[0] = cmds[head];
616 	head = (head + 1) % size;
617 
618 	/* message len with header */
619 	len = ct_header_get_len(data[0]) + 1;
620 	if (unlikely(len > (u32)available)) {
621 		DRM_ERROR("CT: incomplete message %*ph %*ph %*ph\n",
622 			  4, data,
623 			  4 * (head + available - 1 > size ?
624 			       size - head : available - 1), &cmds[head],
625 			  4 * (head + available - 1 > size ?
626 			       available - 1 - size + head : 0), &cmds[0]);
627 		return -EPROTO;
628 	}
629 
630 	for (i = 1; i < len; i++) {
631 		data[i] = cmds[head];
632 		head = (head + 1) % size;
633 	}
634 	CT_DEBUG_DRIVER("CT: received %*ph\n", 4 * len, data);
635 
636 	desc->head = head * 4;
637 	return 0;
638 }
639 
640 /**
641  * DOC: CTB GuC to Host response
642  *
643  * Format of the CTB GuC to Host response message is as follows::
644  *
645  *      +------------+---------+---------+---------+---------+---------+
646  *      |   msg[0]   |   [1]   |   [2]   |   [3]   |   ...   |  [n-1]  |
647  *      +------------+---------+---------+---------+---------+---------+
648  *      |   MESSAGE  |       MESSAGE PAYLOAD                           |
649  *      +   HEADER   +---------+---------+---------+---------+---------+
650  *      |            |    0    |    1    |    2    |   ...   |    n    |
651  *      +============+=========+=========+=========+=========+=========+
652  *      |  len >= 2  |  FENCE  |  STATUS |   response specific data    |
653  *      +------+-----+---------+---------+---------+---------+---------+
654  *
655  *                   ^-----------------------len-----------------------^
656  */
657 
ct_handle_response(struct intel_guc_ct * ct,const u32 * msg)658 static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
659 {
660 	u32 header = msg[0];
661 	u32 len = ct_header_get_len(header);
662 	u32 msglen = len + 1; /* total message length including header */
663 	u32 fence;
664 	u32 status;
665 	u32 datalen;
666 	struct ct_request *req;
667 	bool found = false;
668 
669 	GEM_BUG_ON(!ct_header_is_response(header));
670 	GEM_BUG_ON(!in_irq());
671 
672 	/* Response payload shall at least include fence and status */
673 	if (unlikely(len < 2)) {
674 		DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen, msg);
675 		return -EPROTO;
676 	}
677 
678 	fence = msg[1];
679 	status = msg[2];
680 	datalen = len - 2;
681 
682 	/* Format of the status follows RESPONSE message */
683 	if (unlikely(!INTEL_GUC_MSG_IS_RESPONSE(status))) {
684 		DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen, msg);
685 		return -EPROTO;
686 	}
687 
688 	CT_DEBUG_DRIVER("CT: response fence %u status %#x\n", fence, status);
689 
690 	spin_lock(&ct->requests.lock);
691 	list_for_each_entry(req, &ct->requests.pending, link) {
692 		if (unlikely(fence != req->fence)) {
693 			CT_DEBUG_DRIVER("CT: request %u awaits response\n",
694 					req->fence);
695 			continue;
696 		}
697 		if (unlikely(datalen > req->response_len)) {
698 			DRM_ERROR("CT: response %u too long %*ph\n",
699 				  req->fence, 4 * msglen, msg);
700 			datalen = 0;
701 		}
702 		if (datalen)
703 			memcpy(req->response_buf, msg + 3, 4 * datalen);
704 		req->response_len = datalen;
705 		WRITE_ONCE(req->status, status);
706 		found = true;
707 		break;
708 	}
709 	spin_unlock(&ct->requests.lock);
710 
711 	if (!found)
712 		DRM_ERROR("CT: unsolicited response %*ph\n", 4 * msglen, msg);
713 	return 0;
714 }
715 
ct_process_request(struct intel_guc_ct * ct,u32 action,u32 len,const u32 * payload)716 static void ct_process_request(struct intel_guc_ct *ct,
717 			       u32 action, u32 len, const u32 *payload)
718 {
719 	struct intel_guc *guc = ct_to_guc(ct);
720 	int ret;
721 
722 	CT_DEBUG_DRIVER("CT: request %x %*ph\n", action, 4 * len, payload);
723 
724 	switch (action) {
725 	case INTEL_GUC_ACTION_DEFAULT:
726 		ret = intel_guc_to_host_process_recv_msg(guc, payload, len);
727 		if (unlikely(ret))
728 			goto fail_unexpected;
729 		break;
730 
731 	default:
732 fail_unexpected:
733 		DRM_ERROR("CT: unexpected request %x %*ph\n",
734 			  action, 4 * len, payload);
735 		break;
736 	}
737 }
738 
ct_process_incoming_requests(struct intel_guc_ct * ct)739 static bool ct_process_incoming_requests(struct intel_guc_ct *ct)
740 {
741 	unsigned long flags;
742 	struct ct_incoming_request *request;
743 	u32 header;
744 	u32 *payload;
745 	bool done;
746 
747 	spin_lock_irqsave(&ct->requests.lock, flags);
748 	request = list_first_entry_or_null(&ct->requests.incoming,
749 					   struct ct_incoming_request, link);
750 	if (request)
751 		list_del(&request->link);
752 	done = !!list_empty(&ct->requests.incoming);
753 	spin_unlock_irqrestore(&ct->requests.lock, flags);
754 
755 	if (!request)
756 		return true;
757 
758 	header = request->msg[0];
759 	payload = &request->msg[1];
760 	ct_process_request(ct,
761 			   ct_header_get_action(header),
762 			   ct_header_get_len(header),
763 			   payload);
764 
765 	kfree(request);
766 	return done;
767 }
768 
ct_incoming_request_worker_func(struct work_struct * w)769 static void ct_incoming_request_worker_func(struct work_struct *w)
770 {
771 	struct intel_guc_ct *ct =
772 		container_of(w, struct intel_guc_ct, requests.worker);
773 	bool done;
774 
775 	done = ct_process_incoming_requests(ct);
776 	if (!done)
777 		queue_work(system_unbound_wq, &ct->requests.worker);
778 }
779 
780 /**
781  * DOC: CTB GuC to Host request
782  *
783  * Format of the CTB GuC to Host request message is as follows::
784  *
785  *      +------------+---------+---------+---------+---------+---------+
786  *      |   msg[0]   |   [1]   |   [2]   |   [3]   |   ...   |  [n-1]  |
787  *      +------------+---------+---------+---------+---------+---------+
788  *      |   MESSAGE  |       MESSAGE PAYLOAD                           |
789  *      +   HEADER   +---------+---------+---------+---------+---------+
790  *      |            |    0    |    1    |    2    |   ...   |    n    |
791  *      +============+=========+=========+=========+=========+=========+
792  *      |     len    |            request specific data                |
793  *      +------+-----+---------+---------+---------+---------+---------+
794  *
795  *                   ^-----------------------len-----------------------^
796  */
797 
ct_handle_request(struct intel_guc_ct * ct,const u32 * msg)798 static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg)
799 {
800 	u32 header = msg[0];
801 	u32 len = ct_header_get_len(header);
802 	u32 msglen = len + 1; /* total message length including header */
803 	struct ct_incoming_request *request;
804 	unsigned long flags;
805 
806 	GEM_BUG_ON(ct_header_is_response(header));
807 
808 	request = kmalloc(sizeof(*request) + 4 * msglen, GFP_ATOMIC);
809 	if (unlikely(!request)) {
810 		DRM_ERROR("CT: dropping request %*ph\n", 4 * msglen, msg);
811 		return 0; /* XXX: -ENOMEM ? */
812 	}
813 	memcpy(request->msg, msg, 4 * msglen);
814 
815 	spin_lock_irqsave(&ct->requests.lock, flags);
816 	list_add_tail(&request->link, &ct->requests.incoming);
817 	spin_unlock_irqrestore(&ct->requests.lock, flags);
818 
819 	queue_work(system_unbound_wq, &ct->requests.worker);
820 	return 0;
821 }
822 
823 /*
824  * When we're communicating with the GuC over CT, GuC uses events
825  * to notify us about new messages being posted on the RECV buffer.
826  */
intel_guc_ct_event_handler(struct intel_guc_ct * ct)827 void intel_guc_ct_event_handler(struct intel_guc_ct *ct)
828 {
829 	struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_RECV];
830 	u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */
831 	int err = 0;
832 
833 	if (unlikely(!ct->enabled)) {
834 		WARN(1, "Unexpected GuC event received while CT disabled!\n");
835 		return;
836 	}
837 
838 	do {
839 		err = ctb_read(ctb, msg);
840 		if (err)
841 			break;
842 
843 		if (ct_header_is_response(msg[0]))
844 			err = ct_handle_response(ct, msg);
845 		else
846 			err = ct_handle_request(ct, msg);
847 	} while (!err);
848 
849 	if (GEM_WARN_ON(err == -EPROTO)) {
850 		DRM_ERROR("CT: corrupted message detected!\n");
851 		ctb->desc->is_in_error = 1;
852 	}
853 }
854 
855