xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/xen/xen_drm_front.h (revision 41ec02673d281bbb3d38e6c78504ce6e30c228c1)
1 /*	$NetBSD: xen_drm_front.h,v 1.2 2021/12/18 23:45:45 riastradh Exp $	*/
2 
3 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
4 
5 /*
6  *  Xen para-virtual DRM device
7  *
8  * Copyright (C) 2016-2018 EPAM Systems Inc.
9  *
10  * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
11  */
12 
13 #ifndef __XEN_DRM_FRONT_H_
14 #define __XEN_DRM_FRONT_H_
15 
16 #include <linux/scatterlist.h>
17 
18 #include <drm/drm_connector.h>
19 #include <drm/drm_simple_kms_helper.h>
20 
21 #include "xen_drm_front_cfg.h"
22 
23 struct drm_device;
24 struct drm_framebuffer;
25 struct drm_gem_object;
26 struct drm_pending_vblank_event;
27 
28 /**
29  * DOC: Driver modes of operation in terms of display buffers used
30  *
31  * Depending on the requirements for the para-virtualized environment, namely
32  * requirements dictated by the accompanying DRM/(v)GPU drivers running in both
33  * host and guest environments, display buffers can be allocated by either
34  * frontend driver or backend.
35  */
36 
37 /**
38  * DOC: Buffers allocated by the frontend driver
39  *
40  * In this mode of operation driver allocates buffers from system memory.
41  *
42  * Note! If used with accompanying DRM/(v)GPU drivers this mode of operation
43  * may require IOMMU support on the platform, so accompanying DRM/vGPU
44  * hardware can still reach display buffer memory while importing PRIME
45  * buffers from the frontend driver.
46  */
47 
48 /**
49  * DOC: Buffers allocated by the backend
50  *
51  * This mode of operation is run-time configured via guest domain configuration
52  * through XenStore entries.
53  *
54  * For systems which do not provide IOMMU support, but having specific
55  * requirements for display buffers it is possible to allocate such buffers
56  * at backend side and share those with the frontend.
57  * For example, if host domain is 1:1 mapped and has DRM/GPU hardware expecting
58  * physically contiguous memory, this allows implementing zero-copying
59  * use-cases.
60  *
61  * Note, while using this scenario the following should be considered:
62  *
63  * #. If guest domain dies then pages/grants received from the backend
64  *    cannot be claimed back
65  *
66  * #. Misbehaving guest may send too many requests to the
67  *    backend exhausting its grant references and memory
68  *    (consider this from security POV)
69  */
70 
71 /**
72  * DOC: Driver limitations
73  *
74  * #. Only primary plane without additional properties is supported.
75  *
76  * #. Only one video mode per connector supported which is configured
77  *    via XenStore.
78  *
79  * #. All CRTCs operate at fixed frequency of 60Hz.
80  */
81 
82 /* timeout in ms to wait for backend to respond */
83 #define XEN_DRM_FRONT_WAIT_BACK_MS	3000
84 
85 #ifndef GRANT_INVALID_REF
86 /*
87  * Note on usage of grant reference 0 as invalid grant reference:
88  * grant reference 0 is valid, but never exposed to a PV driver,
89  * because of the fact it is already in use/reserved by the PV console.
90  */
91 #define GRANT_INVALID_REF	0
92 #endif
93 
94 struct xen_drm_front_info {
95 	struct xenbus_device *xb_dev;
96 	struct xen_drm_front_drm_info *drm_info;
97 
98 	/* to protect data between backend IO code and interrupt handler */
99 	spinlock_t io_lock;
100 
101 	int num_evt_pairs;
102 	struct xen_drm_front_evtchnl_pair *evt_pairs;
103 	struct xen_drm_front_cfg cfg;
104 
105 	/* display buffers */
106 	struct list_head dbuf_list;
107 };
108 
109 struct xen_drm_front_drm_pipeline {
110 	struct xen_drm_front_drm_info *drm_info;
111 
112 	int index;
113 
114 	struct drm_simple_display_pipe pipe;
115 
116 	struct drm_connector conn;
117 	/* These are only for connector mode checking */
118 	int width, height;
119 
120 	struct drm_pending_vblank_event *pending_event;
121 
122 	struct delayed_work pflip_to_worker;
123 
124 	bool conn_connected;
125 };
126 
127 struct xen_drm_front_drm_info {
128 	struct xen_drm_front_info *front_info;
129 	struct drm_device *drm_dev;
130 
131 	struct xen_drm_front_drm_pipeline pipeline[XEN_DRM_FRONT_MAX_CRTCS];
132 };
133 
xen_drm_front_fb_to_cookie(struct drm_framebuffer * fb)134 static inline u64 xen_drm_front_fb_to_cookie(struct drm_framebuffer *fb)
135 {
136 	return (uintptr_t)fb;
137 }
138 
xen_drm_front_dbuf_to_cookie(struct drm_gem_object * gem_obj)139 static inline u64 xen_drm_front_dbuf_to_cookie(struct drm_gem_object *gem_obj)
140 {
141 	return (uintptr_t)gem_obj;
142 }
143 
144 int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline *pipeline,
145 			   u32 x, u32 y, u32 width, u32 height,
146 			   u32 bpp, u64 fb_cookie);
147 
148 int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
149 			      u64 dbuf_cookie, u32 width, u32 height,
150 			      u32 bpp, u64 size, struct page **pages);
151 
152 int xen_drm_front_fb_attach(struct xen_drm_front_info *front_info,
153 			    u64 dbuf_cookie, u64 fb_cookie, u32 width,
154 			    u32 height, u32 pixel_format);
155 
156 int xen_drm_front_fb_detach(struct xen_drm_front_info *front_info,
157 			    u64 fb_cookie);
158 
159 int xen_drm_front_page_flip(struct xen_drm_front_info *front_info,
160 			    int conn_idx, u64 fb_cookie);
161 
162 void xen_drm_front_on_frame_done(struct xen_drm_front_info *front_info,
163 				 int conn_idx, u64 fb_cookie);
164 
165 #endif /* __XEN_DRM_FRONT_H_ */
166