1*41ec0267Sriastradh /* $NetBSD: virtgpu_display.c,v 1.3 2021/12/18 23:45:45 riastradh Exp $ */
2efa246c0Sriastradh
3efa246c0Sriastradh /*
4efa246c0Sriastradh * Copyright (C) 2015 Red Hat, Inc.
5efa246c0Sriastradh * All Rights Reserved.
6efa246c0Sriastradh *
7efa246c0Sriastradh * Authors:
8efa246c0Sriastradh * Dave Airlie
9efa246c0Sriastradh * Alon Levy
10efa246c0Sriastradh *
11efa246c0Sriastradh * Permission is hereby granted, free of charge, to any person obtaining a
12efa246c0Sriastradh * copy of this software and associated documentation files (the "Software"),
13efa246c0Sriastradh * to deal in the Software without restriction, including without limitation
14efa246c0Sriastradh * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15efa246c0Sriastradh * and/or sell copies of the Software, and to permit persons to whom the
16efa246c0Sriastradh * Software is furnished to do so, subject to the following conditions:
17efa246c0Sriastradh *
18efa246c0Sriastradh * The above copyright notice and this permission notice shall be included in
19efa246c0Sriastradh * all copies or substantial portions of the Software.
20efa246c0Sriastradh *
21efa246c0Sriastradh * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22efa246c0Sriastradh * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23efa246c0Sriastradh * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
24efa246c0Sriastradh * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
25efa246c0Sriastradh * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
26efa246c0Sriastradh * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
27efa246c0Sriastradh * OTHER DEALINGS IN THE SOFTWARE.
28efa246c0Sriastradh */
29efa246c0Sriastradh
30efa246c0Sriastradh #include <sys/cdefs.h>
31*41ec0267Sriastradh __KERNEL_RCSID(0, "$NetBSD: virtgpu_display.c,v 1.3 2021/12/18 23:45:45 riastradh Exp $");
32*41ec0267Sriastradh
33*41ec0267Sriastradh #include <drm/drm_atomic_helper.h>
34*41ec0267Sriastradh #include <drm/drm_damage_helper.h>
35*41ec0267Sriastradh #include <drm/drm_fourcc.h>
36*41ec0267Sriastradh #include <drm/drm_gem_framebuffer_helper.h>
37*41ec0267Sriastradh #include <drm/drm_probe_helper.h>
38*41ec0267Sriastradh #include <drm/drm_vblank.h>
39efa246c0Sriastradh
40efa246c0Sriastradh #include "virtgpu_drv.h"
41efa246c0Sriastradh
42*41ec0267Sriastradh #define XRES_MIN 32
43*41ec0267Sriastradh #define YRES_MIN 32
44efa246c0Sriastradh
45efa246c0Sriastradh #define XRES_DEF 1024
46efa246c0Sriastradh #define YRES_DEF 768
47efa246c0Sriastradh
48efa246c0Sriastradh #define XRES_MAX 8192
49efa246c0Sriastradh #define YRES_MAX 8192
50efa246c0Sriastradh
51*41ec0267Sriastradh #define drm_connector_to_virtio_gpu_output(x) \
52*41ec0267Sriastradh container_of(x, struct virtio_gpu_output, conn)
53efa246c0Sriastradh
54efa246c0Sriastradh static const struct drm_crtc_funcs virtio_gpu_crtc_funcs = {
55efa246c0Sriastradh .set_config = drm_atomic_helper_set_config,
56efa246c0Sriastradh .destroy = drm_crtc_cleanup,
57efa246c0Sriastradh
58*41ec0267Sriastradh .page_flip = drm_atomic_helper_page_flip,
59efa246c0Sriastradh .reset = drm_atomic_helper_crtc_reset,
60efa246c0Sriastradh .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
61efa246c0Sriastradh .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
62efa246c0Sriastradh };
63efa246c0Sriastradh
64efa246c0Sriastradh static const struct drm_framebuffer_funcs virtio_gpu_fb_funcs = {
65*41ec0267Sriastradh .create_handle = drm_gem_fb_create_handle,
66*41ec0267Sriastradh .destroy = drm_gem_fb_destroy,
67*41ec0267Sriastradh .dirty = drm_atomic_helper_dirtyfb,
68efa246c0Sriastradh };
69efa246c0Sriastradh
70*41ec0267Sriastradh static int
virtio_gpu_framebuffer_init(struct drm_device * dev,struct virtio_gpu_framebuffer * vgfb,const struct drm_mode_fb_cmd2 * mode_cmd,struct drm_gem_object * obj)71efa246c0Sriastradh virtio_gpu_framebuffer_init(struct drm_device *dev,
72efa246c0Sriastradh struct virtio_gpu_framebuffer *vgfb,
73*41ec0267Sriastradh const struct drm_mode_fb_cmd2 *mode_cmd,
74efa246c0Sriastradh struct drm_gem_object *obj)
75efa246c0Sriastradh {
76efa246c0Sriastradh int ret;
77efa246c0Sriastradh
78*41ec0267Sriastradh vgfb->base.obj[0] = obj;
79*41ec0267Sriastradh
80*41ec0267Sriastradh drm_helper_mode_fill_fb_struct(dev, &vgfb->base, mode_cmd);
81efa246c0Sriastradh
82efa246c0Sriastradh ret = drm_framebuffer_init(dev, &vgfb->base, &virtio_gpu_fb_funcs);
83efa246c0Sriastradh if (ret) {
84*41ec0267Sriastradh vgfb->base.obj[0] = NULL;
85efa246c0Sriastradh return ret;
86efa246c0Sriastradh }
87efa246c0Sriastradh return 0;
88efa246c0Sriastradh }
89efa246c0Sriastradh
virtio_gpu_crtc_mode_set_nofb(struct drm_crtc * crtc)90efa246c0Sriastradh static void virtio_gpu_crtc_mode_set_nofb(struct drm_crtc *crtc)
91efa246c0Sriastradh {
92efa246c0Sriastradh struct drm_device *dev = crtc->dev;
93efa246c0Sriastradh struct virtio_gpu_device *vgdev = dev->dev_private;
94efa246c0Sriastradh struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
95efa246c0Sriastradh
96efa246c0Sriastradh virtio_gpu_cmd_set_scanout(vgdev, output->index, 0,
97efa246c0Sriastradh crtc->mode.hdisplay,
98efa246c0Sriastradh crtc->mode.vdisplay, 0, 0);
99efa246c0Sriastradh }
100efa246c0Sriastradh
virtio_gpu_crtc_atomic_enable(struct drm_crtc * crtc,struct drm_crtc_state * old_state)101*41ec0267Sriastradh static void virtio_gpu_crtc_atomic_enable(struct drm_crtc *crtc,
102*41ec0267Sriastradh struct drm_crtc_state *old_state)
103efa246c0Sriastradh {
104*41ec0267Sriastradh struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
105*41ec0267Sriastradh
106*41ec0267Sriastradh output->enabled = true;
107efa246c0Sriastradh }
108efa246c0Sriastradh
virtio_gpu_crtc_atomic_disable(struct drm_crtc * crtc,struct drm_crtc_state * old_state)109*41ec0267Sriastradh static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc,
110*41ec0267Sriastradh struct drm_crtc_state *old_state)
111efa246c0Sriastradh {
112efa246c0Sriastradh struct drm_device *dev = crtc->dev;
113efa246c0Sriastradh struct virtio_gpu_device *vgdev = dev->dev_private;
114efa246c0Sriastradh struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
115efa246c0Sriastradh
116efa246c0Sriastradh virtio_gpu_cmd_set_scanout(vgdev, output->index, 0, 0, 0, 0, 0);
117*41ec0267Sriastradh output->enabled = false;
118efa246c0Sriastradh }
119efa246c0Sriastradh
virtio_gpu_crtc_atomic_check(struct drm_crtc * crtc,struct drm_crtc_state * state)120efa246c0Sriastradh static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc,
121efa246c0Sriastradh struct drm_crtc_state *state)
122efa246c0Sriastradh {
123efa246c0Sriastradh return 0;
124efa246c0Sriastradh }
125efa246c0Sriastradh
virtio_gpu_crtc_atomic_flush(struct drm_crtc * crtc,struct drm_crtc_state * old_state)126*41ec0267Sriastradh static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
127*41ec0267Sriastradh struct drm_crtc_state *old_state)
128*41ec0267Sriastradh {
129*41ec0267Sriastradh unsigned long flags;
130*41ec0267Sriastradh
131*41ec0267Sriastradh spin_lock_irqsave(&crtc->dev->event_lock, flags);
132*41ec0267Sriastradh if (crtc->state->event)
133*41ec0267Sriastradh drm_crtc_send_vblank_event(crtc, crtc->state->event);
134*41ec0267Sriastradh crtc->state->event = NULL;
135*41ec0267Sriastradh spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
136*41ec0267Sriastradh }
137*41ec0267Sriastradh
138efa246c0Sriastradh static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = {
139efa246c0Sriastradh .mode_set_nofb = virtio_gpu_crtc_mode_set_nofb,
140efa246c0Sriastradh .atomic_check = virtio_gpu_crtc_atomic_check,
141*41ec0267Sriastradh .atomic_flush = virtio_gpu_crtc_atomic_flush,
142*41ec0267Sriastradh .atomic_enable = virtio_gpu_crtc_atomic_enable,
143*41ec0267Sriastradh .atomic_disable = virtio_gpu_crtc_atomic_disable,
144efa246c0Sriastradh };
145efa246c0Sriastradh
virtio_gpu_enc_mode_set(struct drm_encoder * encoder,struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode)146efa246c0Sriastradh static void virtio_gpu_enc_mode_set(struct drm_encoder *encoder,
147efa246c0Sriastradh struct drm_display_mode *mode,
148efa246c0Sriastradh struct drm_display_mode *adjusted_mode)
149efa246c0Sriastradh {
150efa246c0Sriastradh }
151efa246c0Sriastradh
virtio_gpu_enc_enable(struct drm_encoder * encoder)152efa246c0Sriastradh static void virtio_gpu_enc_enable(struct drm_encoder *encoder)
153efa246c0Sriastradh {
154efa246c0Sriastradh }
155efa246c0Sriastradh
virtio_gpu_enc_disable(struct drm_encoder * encoder)156efa246c0Sriastradh static void virtio_gpu_enc_disable(struct drm_encoder *encoder)
157efa246c0Sriastradh {
158efa246c0Sriastradh }
159efa246c0Sriastradh
virtio_gpu_conn_get_modes(struct drm_connector * connector)160efa246c0Sriastradh static int virtio_gpu_conn_get_modes(struct drm_connector *connector)
161efa246c0Sriastradh {
162efa246c0Sriastradh struct virtio_gpu_output *output =
163efa246c0Sriastradh drm_connector_to_virtio_gpu_output(connector);
164efa246c0Sriastradh struct drm_display_mode *mode = NULL;
165efa246c0Sriastradh int count, width, height;
166efa246c0Sriastradh
167*41ec0267Sriastradh if (output->edid) {
168*41ec0267Sriastradh count = drm_add_edid_modes(connector, output->edid);
169*41ec0267Sriastradh if (count)
170*41ec0267Sriastradh return count;
171*41ec0267Sriastradh }
172*41ec0267Sriastradh
173efa246c0Sriastradh width = le32_to_cpu(output->info.r.width);
174efa246c0Sriastradh height = le32_to_cpu(output->info.r.height);
175efa246c0Sriastradh count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX);
176efa246c0Sriastradh
177efa246c0Sriastradh if (width == 0 || height == 0) {
178efa246c0Sriastradh width = XRES_DEF;
179efa246c0Sriastradh height = YRES_DEF;
180efa246c0Sriastradh drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF);
181efa246c0Sriastradh } else {
182efa246c0Sriastradh DRM_DEBUG("add mode: %dx%d\n", width, height);
183efa246c0Sriastradh mode = drm_cvt_mode(connector->dev, width, height, 60,
184efa246c0Sriastradh false, false, false);
185efa246c0Sriastradh mode->type |= DRM_MODE_TYPE_PREFERRED;
186efa246c0Sriastradh drm_mode_probed_add(connector, mode);
187efa246c0Sriastradh count++;
188efa246c0Sriastradh }
189efa246c0Sriastradh
190efa246c0Sriastradh return count;
191efa246c0Sriastradh }
192efa246c0Sriastradh
virtio_gpu_conn_mode_valid(struct drm_connector * connector,struct drm_display_mode * mode)193*41ec0267Sriastradh static enum drm_mode_status virtio_gpu_conn_mode_valid(struct drm_connector *connector,
194efa246c0Sriastradh struct drm_display_mode *mode)
195efa246c0Sriastradh {
196efa246c0Sriastradh struct virtio_gpu_output *output =
197efa246c0Sriastradh drm_connector_to_virtio_gpu_output(connector);
198efa246c0Sriastradh int width, height;
199efa246c0Sriastradh
200efa246c0Sriastradh width = le32_to_cpu(output->info.r.width);
201efa246c0Sriastradh height = le32_to_cpu(output->info.r.height);
202efa246c0Sriastradh
203efa246c0Sriastradh if (!(mode->type & DRM_MODE_TYPE_PREFERRED))
204efa246c0Sriastradh return MODE_OK;
205efa246c0Sriastradh if (mode->hdisplay == XRES_DEF && mode->vdisplay == YRES_DEF)
206efa246c0Sriastradh return MODE_OK;
207efa246c0Sriastradh if (mode->hdisplay <= width && mode->hdisplay >= width - 16 &&
208efa246c0Sriastradh mode->vdisplay <= height && mode->vdisplay >= height - 16)
209efa246c0Sriastradh return MODE_OK;
210efa246c0Sriastradh
211efa246c0Sriastradh DRM_DEBUG("del mode: %dx%d\n", mode->hdisplay, mode->vdisplay);
212efa246c0Sriastradh return MODE_BAD;
213efa246c0Sriastradh }
214efa246c0Sriastradh
215efa246c0Sriastradh static const struct drm_encoder_helper_funcs virtio_gpu_enc_helper_funcs = {
216efa246c0Sriastradh .mode_set = virtio_gpu_enc_mode_set,
217efa246c0Sriastradh .enable = virtio_gpu_enc_enable,
218efa246c0Sriastradh .disable = virtio_gpu_enc_disable,
219efa246c0Sriastradh };
220efa246c0Sriastradh
221efa246c0Sriastradh static const struct drm_connector_helper_funcs virtio_gpu_conn_helper_funcs = {
222efa246c0Sriastradh .get_modes = virtio_gpu_conn_get_modes,
223efa246c0Sriastradh .mode_valid = virtio_gpu_conn_mode_valid,
224efa246c0Sriastradh };
225efa246c0Sriastradh
virtio_gpu_conn_detect(struct drm_connector * connector,bool force)226efa246c0Sriastradh static enum drm_connector_status virtio_gpu_conn_detect(
227efa246c0Sriastradh struct drm_connector *connector,
228efa246c0Sriastradh bool force)
229efa246c0Sriastradh {
230efa246c0Sriastradh struct virtio_gpu_output *output =
231efa246c0Sriastradh drm_connector_to_virtio_gpu_output(connector);
232efa246c0Sriastradh
233efa246c0Sriastradh if (output->info.enabled)
234efa246c0Sriastradh return connector_status_connected;
235efa246c0Sriastradh else
236efa246c0Sriastradh return connector_status_disconnected;
237efa246c0Sriastradh }
238efa246c0Sriastradh
virtio_gpu_conn_destroy(struct drm_connector * connector)239efa246c0Sriastradh static void virtio_gpu_conn_destroy(struct drm_connector *connector)
240efa246c0Sriastradh {
241efa246c0Sriastradh drm_connector_unregister(connector);
242efa246c0Sriastradh drm_connector_cleanup(connector);
243efa246c0Sriastradh }
244efa246c0Sriastradh
245efa246c0Sriastradh static const struct drm_connector_funcs virtio_gpu_connector_funcs = {
246efa246c0Sriastradh .detect = virtio_gpu_conn_detect,
247*41ec0267Sriastradh .fill_modes = drm_helper_probe_single_connector_modes,
248efa246c0Sriastradh .destroy = virtio_gpu_conn_destroy,
249efa246c0Sriastradh .reset = drm_atomic_helper_connector_reset,
250efa246c0Sriastradh .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
251efa246c0Sriastradh .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
252efa246c0Sriastradh };
253efa246c0Sriastradh
254efa246c0Sriastradh static const struct drm_encoder_funcs virtio_gpu_enc_funcs = {
255efa246c0Sriastradh .destroy = drm_encoder_cleanup,
256efa246c0Sriastradh };
257efa246c0Sriastradh
vgdev_output_init(struct virtio_gpu_device * vgdev,int index)258efa246c0Sriastradh static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
259efa246c0Sriastradh {
260efa246c0Sriastradh struct drm_device *dev = vgdev->ddev;
261efa246c0Sriastradh struct virtio_gpu_output *output = vgdev->outputs + index;
262efa246c0Sriastradh struct drm_connector *connector = &output->conn;
263efa246c0Sriastradh struct drm_encoder *encoder = &output->enc;
264efa246c0Sriastradh struct drm_crtc *crtc = &output->crtc;
265*41ec0267Sriastradh struct drm_plane *primary, *cursor;
266efa246c0Sriastradh
267efa246c0Sriastradh output->index = index;
268efa246c0Sriastradh if (index == 0) {
269efa246c0Sriastradh output->info.enabled = cpu_to_le32(true);
270efa246c0Sriastradh output->info.r.width = cpu_to_le32(XRES_DEF);
271efa246c0Sriastradh output->info.r.height = cpu_to_le32(YRES_DEF);
272efa246c0Sriastradh }
273efa246c0Sriastradh
274*41ec0267Sriastradh primary = virtio_gpu_plane_init(vgdev, DRM_PLANE_TYPE_PRIMARY, index);
275*41ec0267Sriastradh if (IS_ERR(primary))
276*41ec0267Sriastradh return PTR_ERR(primary);
277*41ec0267Sriastradh cursor = virtio_gpu_plane_init(vgdev, DRM_PLANE_TYPE_CURSOR, index);
278*41ec0267Sriastradh if (IS_ERR(cursor))
279*41ec0267Sriastradh return PTR_ERR(cursor);
280*41ec0267Sriastradh drm_crtc_init_with_planes(dev, crtc, primary, cursor,
281*41ec0267Sriastradh &virtio_gpu_crtc_funcs, NULL);
282efa246c0Sriastradh drm_crtc_helper_add(crtc, &virtio_gpu_crtc_helper_funcs);
283efa246c0Sriastradh
284efa246c0Sriastradh drm_connector_init(dev, connector, &virtio_gpu_connector_funcs,
285efa246c0Sriastradh DRM_MODE_CONNECTOR_VIRTUAL);
286efa246c0Sriastradh drm_connector_helper_add(connector, &virtio_gpu_conn_helper_funcs);
287*41ec0267Sriastradh if (vgdev->has_edid)
288*41ec0267Sriastradh drm_connector_attach_edid_property(connector);
289efa246c0Sriastradh
290efa246c0Sriastradh drm_encoder_init(dev, encoder, &virtio_gpu_enc_funcs,
291*41ec0267Sriastradh DRM_MODE_ENCODER_VIRTUAL, NULL);
292efa246c0Sriastradh drm_encoder_helper_add(encoder, &virtio_gpu_enc_helper_funcs);
293efa246c0Sriastradh encoder->possible_crtcs = 1 << index;
294efa246c0Sriastradh
295*41ec0267Sriastradh drm_connector_attach_encoder(connector, encoder);
296efa246c0Sriastradh drm_connector_register(connector);
297efa246c0Sriastradh return 0;
298efa246c0Sriastradh }
299efa246c0Sriastradh
300efa246c0Sriastradh static struct drm_framebuffer *
virtio_gpu_user_framebuffer_create(struct drm_device * dev,struct drm_file * file_priv,const struct drm_mode_fb_cmd2 * mode_cmd)301efa246c0Sriastradh virtio_gpu_user_framebuffer_create(struct drm_device *dev,
302efa246c0Sriastradh struct drm_file *file_priv,
303*41ec0267Sriastradh const struct drm_mode_fb_cmd2 *mode_cmd)
304efa246c0Sriastradh {
305efa246c0Sriastradh struct drm_gem_object *obj = NULL;
306efa246c0Sriastradh struct virtio_gpu_framebuffer *virtio_gpu_fb;
307efa246c0Sriastradh int ret;
308efa246c0Sriastradh
309*41ec0267Sriastradh if (mode_cmd->pixel_format != DRM_FORMAT_HOST_XRGB8888 &&
310*41ec0267Sriastradh mode_cmd->pixel_format != DRM_FORMAT_HOST_ARGB8888)
311*41ec0267Sriastradh return ERR_PTR(-ENOENT);
312*41ec0267Sriastradh
313efa246c0Sriastradh /* lookup object associated with res handle */
314*41ec0267Sriastradh obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
315efa246c0Sriastradh if (!obj)
316efa246c0Sriastradh return ERR_PTR(-EINVAL);
317efa246c0Sriastradh
318efa246c0Sriastradh virtio_gpu_fb = kzalloc(sizeof(*virtio_gpu_fb), GFP_KERNEL);
319efa246c0Sriastradh if (virtio_gpu_fb == NULL)
320efa246c0Sriastradh return ERR_PTR(-ENOMEM);
321efa246c0Sriastradh
322efa246c0Sriastradh ret = virtio_gpu_framebuffer_init(dev, virtio_gpu_fb, mode_cmd, obj);
323efa246c0Sriastradh if (ret) {
324efa246c0Sriastradh kfree(virtio_gpu_fb);
325*41ec0267Sriastradh drm_gem_object_put_unlocked(obj);
326efa246c0Sriastradh return NULL;
327efa246c0Sriastradh }
328efa246c0Sriastradh
329efa246c0Sriastradh return &virtio_gpu_fb->base;
330efa246c0Sriastradh }
331efa246c0Sriastradh
vgdev_atomic_commit_tail(struct drm_atomic_state * state)332*41ec0267Sriastradh static void vgdev_atomic_commit_tail(struct drm_atomic_state *state)
333*41ec0267Sriastradh {
334*41ec0267Sriastradh struct drm_device *dev = state->dev;
335*41ec0267Sriastradh
336*41ec0267Sriastradh drm_atomic_helper_commit_modeset_disables(dev, state);
337*41ec0267Sriastradh drm_atomic_helper_commit_modeset_enables(dev, state);
338*41ec0267Sriastradh drm_atomic_helper_commit_planes(dev, state, 0);
339*41ec0267Sriastradh
340*41ec0267Sriastradh drm_atomic_helper_commit_hw_done(state);
341*41ec0267Sriastradh
342*41ec0267Sriastradh drm_atomic_helper_wait_for_vblanks(dev, state);
343*41ec0267Sriastradh drm_atomic_helper_cleanup_planes(dev, state);
344*41ec0267Sriastradh }
345*41ec0267Sriastradh
346*41ec0267Sriastradh static const struct drm_mode_config_helper_funcs virtio_mode_config_helpers = {
347*41ec0267Sriastradh .atomic_commit_tail = vgdev_atomic_commit_tail,
348*41ec0267Sriastradh };
349*41ec0267Sriastradh
350efa246c0Sriastradh static const struct drm_mode_config_funcs virtio_gpu_mode_funcs = {
351efa246c0Sriastradh .fb_create = virtio_gpu_user_framebuffer_create,
352efa246c0Sriastradh .atomic_check = drm_atomic_helper_check,
353efa246c0Sriastradh .atomic_commit = drm_atomic_helper_commit,
354efa246c0Sriastradh };
355efa246c0Sriastradh
virtio_gpu_modeset_init(struct virtio_gpu_device * vgdev)356*41ec0267Sriastradh void virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev)
357efa246c0Sriastradh {
358efa246c0Sriastradh int i;
359efa246c0Sriastradh
360efa246c0Sriastradh drm_mode_config_init(vgdev->ddev);
361*41ec0267Sriastradh vgdev->ddev->mode_config.quirk_addfb_prefer_host_byte_order = true;
362*41ec0267Sriastradh vgdev->ddev->mode_config.funcs = &virtio_gpu_mode_funcs;
363*41ec0267Sriastradh vgdev->ddev->mode_config.helper_private = &virtio_mode_config_helpers;
364efa246c0Sriastradh
365efa246c0Sriastradh /* modes will be validated against the framebuffer size */
366efa246c0Sriastradh vgdev->ddev->mode_config.min_width = XRES_MIN;
367efa246c0Sriastradh vgdev->ddev->mode_config.min_height = YRES_MIN;
368efa246c0Sriastradh vgdev->ddev->mode_config.max_width = XRES_MAX;
369efa246c0Sriastradh vgdev->ddev->mode_config.max_height = YRES_MAX;
370efa246c0Sriastradh
371efa246c0Sriastradh for (i = 0 ; i < vgdev->num_scanouts; ++i)
372efa246c0Sriastradh vgdev_output_init(vgdev, i);
373efa246c0Sriastradh
374efa246c0Sriastradh drm_mode_config_reset(vgdev->ddev);
375efa246c0Sriastradh }
376efa246c0Sriastradh
virtio_gpu_modeset_fini(struct virtio_gpu_device * vgdev)377efa246c0Sriastradh void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev)
378efa246c0Sriastradh {
379*41ec0267Sriastradh int i;
380*41ec0267Sriastradh
381*41ec0267Sriastradh for (i = 0 ; i < vgdev->num_scanouts; ++i)
382*41ec0267Sriastradh kfree(vgdev->outputs[i].edid);
383*41ec0267Sriastradh drm_atomic_helper_shutdown(vgdev->ddev);
384efa246c0Sriastradh drm_mode_config_cleanup(vgdev->ddev);
385efa246c0Sriastradh }
386