xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_fb.c (revision 41ec02673d281bbb3d38e6c78504ce6e30c228c1)
1 /*	$NetBSD: vmwgfx_fb.c,v 1.3 2021/12/18 23:45:45 riastradh Exp $	*/
2 
3 /**************************************************************************
4  *
5  * Copyright © 2007 David Airlie
6  * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
7  * All Rights Reserved.
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the
11  * "Software"), to deal in the Software without restriction, including
12  * without limitation the rights to use, copy, modify, merge, publish,
13  * distribute, sub license, and/or sell copies of the Software, and to
14  * permit persons to whom the Software is furnished to do so, subject to
15  * the following conditions:
16  *
17  * The above copyright notice and this permission notice (including the
18  * next paragraph) shall be included in all copies or substantial portions
19  * of the Software.
20  *
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
24  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
25  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
26  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
27  * USE OR OTHER DEALINGS IN THE SOFTWARE.
28  *
29  **************************************************************************/
30 
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: vmwgfx_fb.c,v 1.3 2021/12/18 23:45:45 riastradh Exp $");
33 
34 #include <linux/pci.h>
35 
36 #include <drm/drm_fourcc.h>
37 #include <drm/ttm/ttm_placement.h>
38 
39 #include "vmwgfx_drv.h"
40 #include "vmwgfx_kms.h"
41 
42 #define VMW_DIRTY_DELAY (HZ / 30)
43 
44 struct vmw_fb_par {
45 	struct vmw_private *vmw_priv;
46 
47 	void *vmalloc;
48 
49 	struct mutex bo_mutex;
50 	struct vmw_buffer_object *vmw_bo;
51 	unsigned bo_size;
52 	struct drm_framebuffer *set_fb;
53 	struct drm_display_mode *set_mode;
54 	u32 fb_x;
55 	u32 fb_y;
56 	bool bo_iowrite;
57 
58 	u32 pseudo_palette[17];
59 
60 	unsigned max_width;
61 	unsigned max_height;
62 
63 	struct {
64 		spinlock_t lock;
65 		bool active;
66 		unsigned x1;
67 		unsigned y1;
68 		unsigned x2;
69 		unsigned y2;
70 	} dirty;
71 
72 	struct drm_crtc *crtc;
73 	struct drm_connector *con;
74 	struct delayed_work local_work;
75 };
76 
vmw_fb_setcolreg(unsigned regno,unsigned red,unsigned green,unsigned blue,unsigned transp,struct fb_info * info)77 static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
78 			    unsigned blue, unsigned transp,
79 			    struct fb_info *info)
80 {
81 	struct vmw_fb_par *par = info->par;
82 	u32 *pal = par->pseudo_palette;
83 
84 	if (regno > 15) {
85 		DRM_ERROR("Bad regno %u.\n", regno);
86 		return 1;
87 	}
88 
89 	switch (par->set_fb->format->depth) {
90 	case 24:
91 	case 32:
92 		pal[regno] = ((red & 0xff00) << 8) |
93 			      (green & 0xff00) |
94 			     ((blue  & 0xff00) >> 8);
95 		break;
96 	default:
97 		DRM_ERROR("Bad depth %u, bpp %u.\n",
98 			  par->set_fb->format->depth,
99 			  par->set_fb->format->cpp[0] * 8);
100 		return 1;
101 	}
102 
103 	return 0;
104 }
105 
vmw_fb_check_var(struct fb_var_screeninfo * var,struct fb_info * info)106 static int vmw_fb_check_var(struct fb_var_screeninfo *var,
107 			    struct fb_info *info)
108 {
109 	int depth = var->bits_per_pixel;
110 	struct vmw_fb_par *par = info->par;
111 	struct vmw_private *vmw_priv = par->vmw_priv;
112 
113 	switch (var->bits_per_pixel) {
114 	case 32:
115 		depth = (var->transp.length > 0) ? 32 : 24;
116 		break;
117 	default:
118 		DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
119 		return -EINVAL;
120 	}
121 
122 	switch (depth) {
123 	case 24:
124 		var->red.offset = 16;
125 		var->green.offset = 8;
126 		var->blue.offset = 0;
127 		var->red.length = 8;
128 		var->green.length = 8;
129 		var->blue.length = 8;
130 		var->transp.length = 0;
131 		var->transp.offset = 0;
132 		break;
133 	case 32:
134 		var->red.offset = 16;
135 		var->green.offset = 8;
136 		var->blue.offset = 0;
137 		var->red.length = 8;
138 		var->green.length = 8;
139 		var->blue.length = 8;
140 		var->transp.length = 8;
141 		var->transp.offset = 24;
142 		break;
143 	default:
144 		DRM_ERROR("Bad depth %u.\n", depth);
145 		return -EINVAL;
146 	}
147 
148 	if ((var->xoffset + var->xres) > par->max_width ||
149 	    (var->yoffset + var->yres) > par->max_height) {
150 		DRM_ERROR("Requested geom can not fit in framebuffer\n");
151 		return -EINVAL;
152 	}
153 
154 	if (!vmw_kms_validate_mode_vram(vmw_priv,
155 					var->xres * var->bits_per_pixel/8,
156 					var->yoffset + var->yres)) {
157 		DRM_ERROR("Requested geom can not fit in framebuffer\n");
158 		return -EINVAL;
159 	}
160 
161 	return 0;
162 }
163 
vmw_fb_blank(int blank,struct fb_info * info)164 static int vmw_fb_blank(int blank, struct fb_info *info)
165 {
166 	return 0;
167 }
168 
169 /**
170  * vmw_fb_dirty_flush - flush dirty regions to the kms framebuffer
171  *
172  * @work: The struct work_struct associated with this task.
173  *
174  * This function flushes the dirty regions of the vmalloc framebuffer to the
175  * kms framebuffer, and if the kms framebuffer is visible, also updated the
176  * corresponding displays. Note that this function runs even if the kms
177  * framebuffer is not bound to a crtc and thus not visible, but it's turned
178  * off during hibernation using the par->dirty.active bool.
179  */
vmw_fb_dirty_flush(struct work_struct * work)180 static void vmw_fb_dirty_flush(struct work_struct *work)
181 {
182 	struct vmw_fb_par *par = container_of(work, struct vmw_fb_par,
183 					      local_work.work);
184 	struct vmw_private *vmw_priv = par->vmw_priv;
185 	struct fb_info *info = vmw_priv->fb_info;
186 	unsigned long irq_flags;
187 	s32 dst_x1, dst_x2, dst_y1, dst_y2, w = 0, h = 0;
188 	u32 cpp, max_x, max_y;
189 	struct drm_clip_rect clip;
190 	struct drm_framebuffer *cur_fb;
191 	u8 *src_ptr, *dst_ptr;
192 	struct vmw_buffer_object *vbo = par->vmw_bo;
193 	void *virtual;
194 
195 	if (!READ_ONCE(par->dirty.active))
196 		return;
197 
198 	mutex_lock(&par->bo_mutex);
199 	cur_fb = par->set_fb;
200 	if (!cur_fb)
201 		goto out_unlock;
202 
203 	(void) ttm_read_lock(&vmw_priv->reservation_sem, false);
204 	(void) ttm_bo_reserve(&vbo->base, false, false, NULL);
205 	virtual = vmw_bo_map_and_cache(vbo);
206 	if (!virtual)
207 		goto out_unreserve;
208 
209 	spin_lock_irqsave(&par->dirty.lock, irq_flags);
210 	if (!par->dirty.active) {
211 		spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
212 		goto out_unreserve;
213 	}
214 
215 	/*
216 	 * Handle panning when copying from vmalloc to framebuffer.
217 	 * Clip dirty area to framebuffer.
218 	 */
219 	cpp = cur_fb->format->cpp[0];
220 	max_x = par->fb_x + cur_fb->width;
221 	max_y = par->fb_y + cur_fb->height;
222 
223 	dst_x1 = par->dirty.x1 - par->fb_x;
224 	dst_y1 = par->dirty.y1 - par->fb_y;
225 	dst_x1 = max_t(s32, dst_x1, 0);
226 	dst_y1 = max_t(s32, dst_y1, 0);
227 
228 	dst_x2 = par->dirty.x2 - par->fb_x;
229 	dst_y2 = par->dirty.y2 - par->fb_y;
230 	dst_x2 = min_t(s32, dst_x2, max_x);
231 	dst_y2 = min_t(s32, dst_y2, max_y);
232 	w = dst_x2 - dst_x1;
233 	h = dst_y2 - dst_y1;
234 	w = max_t(s32, 0, w);
235 	h = max_t(s32, 0, h);
236 
237 	par->dirty.x1 = par->dirty.x2 = 0;
238 	par->dirty.y1 = par->dirty.y2 = 0;
239 	spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
240 
241 	if (w && h) {
242 		dst_ptr = (u8 *)virtual  +
243 			(dst_y1 * par->set_fb->pitches[0] + dst_x1 * cpp);
244 		src_ptr = (u8 *)par->vmalloc +
245 			((dst_y1 + par->fb_y) * info->fix.line_length +
246 			 (dst_x1 + par->fb_x) * cpp);
247 
248 		while (h-- > 0) {
249 			memcpy(dst_ptr, src_ptr, w*cpp);
250 			dst_ptr += par->set_fb->pitches[0];
251 			src_ptr += info->fix.line_length;
252 		}
253 
254 		clip.x1 = dst_x1;
255 		clip.x2 = dst_x2;
256 		clip.y1 = dst_y1;
257 		clip.y2 = dst_y2;
258 	}
259 
260 out_unreserve:
261 	ttm_bo_unreserve(&vbo->base);
262 	ttm_read_unlock(&vmw_priv->reservation_sem);
263 	if (w && h) {
264 		WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0,
265 						       &clip, 1));
266 		vmw_fifo_flush(vmw_priv, false);
267 	}
268 out_unlock:
269 	mutex_unlock(&par->bo_mutex);
270 }
271 
vmw_fb_dirty_mark(struct vmw_fb_par * par,unsigned x1,unsigned y1,unsigned width,unsigned height)272 static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
273 			      unsigned x1, unsigned y1,
274 			      unsigned width, unsigned height)
275 {
276 	unsigned long flags;
277 	unsigned x2 = x1 + width;
278 	unsigned y2 = y1 + height;
279 
280 	spin_lock_irqsave(&par->dirty.lock, flags);
281 	if (par->dirty.x1 == par->dirty.x2) {
282 		par->dirty.x1 = x1;
283 		par->dirty.y1 = y1;
284 		par->dirty.x2 = x2;
285 		par->dirty.y2 = y2;
286 		/* if we are active start the dirty work
287 		 * we share the work with the defio system */
288 		if (par->dirty.active)
289 			schedule_delayed_work(&par->local_work,
290 					      VMW_DIRTY_DELAY);
291 	} else {
292 		if (x1 < par->dirty.x1)
293 			par->dirty.x1 = x1;
294 		if (y1 < par->dirty.y1)
295 			par->dirty.y1 = y1;
296 		if (x2 > par->dirty.x2)
297 			par->dirty.x2 = x2;
298 		if (y2 > par->dirty.y2)
299 			par->dirty.y2 = y2;
300 	}
301 	spin_unlock_irqrestore(&par->dirty.lock, flags);
302 }
303 
vmw_fb_pan_display(struct fb_var_screeninfo * var,struct fb_info * info)304 static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
305 			      struct fb_info *info)
306 {
307 	struct vmw_fb_par *par = info->par;
308 
309 	if ((var->xoffset + var->xres) > var->xres_virtual ||
310 	    (var->yoffset + var->yres) > var->yres_virtual) {
311 		DRM_ERROR("Requested panning can not fit in framebuffer\n");
312 		return -EINVAL;
313 	}
314 
315 	mutex_lock(&par->bo_mutex);
316 	par->fb_x = var->xoffset;
317 	par->fb_y = var->yoffset;
318 	if (par->set_fb)
319 		vmw_fb_dirty_mark(par, par->fb_x, par->fb_y, par->set_fb->width,
320 				  par->set_fb->height);
321 	mutex_unlock(&par->bo_mutex);
322 
323 	return 0;
324 }
325 
vmw_deferred_io(struct fb_info * info,struct list_head * pagelist)326 static void vmw_deferred_io(struct fb_info *info,
327 			    struct list_head *pagelist)
328 {
329 	struct vmw_fb_par *par = info->par;
330 	unsigned long start, end, min, max;
331 	unsigned long flags;
332 	struct page *page;
333 	int y1, y2;
334 
335 	min = ULONG_MAX;
336 	max = 0;
337 	list_for_each_entry(page, pagelist, lru) {
338 		start = page->index << PAGE_SHIFT;
339 		end = start + PAGE_SIZE - 1;
340 		min = min(min, start);
341 		max = max(max, end);
342 	}
343 
344 	if (min < max) {
345 		y1 = min / info->fix.line_length;
346 		y2 = (max / info->fix.line_length) + 1;
347 
348 		spin_lock_irqsave(&par->dirty.lock, flags);
349 		par->dirty.x1 = 0;
350 		par->dirty.y1 = y1;
351 		par->dirty.x2 = info->var.xres;
352 		par->dirty.y2 = y2;
353 		spin_unlock_irqrestore(&par->dirty.lock, flags);
354 
355 		/*
356 		 * Since we've already waited on this work once, try to
357 		 * execute asap.
358 		 */
359 		cancel_delayed_work(&par->local_work);
360 		schedule_delayed_work(&par->local_work, 0);
361 	}
362 };
363 
364 static struct fb_deferred_io vmw_defio = {
365 	.delay		= VMW_DIRTY_DELAY,
366 	.deferred_io	= vmw_deferred_io,
367 };
368 
369 /*
370  * Draw code
371  */
372 
vmw_fb_fillrect(struct fb_info * info,const struct fb_fillrect * rect)373 static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
374 {
375 	cfb_fillrect(info, rect);
376 	vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
377 			  rect->width, rect->height);
378 }
379 
vmw_fb_copyarea(struct fb_info * info,const struct fb_copyarea * region)380 static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
381 {
382 	cfb_copyarea(info, region);
383 	vmw_fb_dirty_mark(info->par, region->dx, region->dy,
384 			  region->width, region->height);
385 }
386 
vmw_fb_imageblit(struct fb_info * info,const struct fb_image * image)387 static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
388 {
389 	cfb_imageblit(info, image);
390 	vmw_fb_dirty_mark(info->par, image->dx, image->dy,
391 			  image->width, image->height);
392 }
393 
394 /*
395  * Bring up code
396  */
397 
vmw_fb_create_bo(struct vmw_private * vmw_priv,size_t size,struct vmw_buffer_object ** out)398 static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
399 			    size_t size, struct vmw_buffer_object **out)
400 {
401 	struct vmw_buffer_object *vmw_bo;
402 	int ret;
403 
404 	(void) ttm_write_lock(&vmw_priv->reservation_sem, false);
405 
406 	vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
407 	if (!vmw_bo) {
408 		ret = -ENOMEM;
409 		goto err_unlock;
410 	}
411 
412 	ret = vmw_bo_init(vmw_priv, vmw_bo, size,
413 			      &vmw_sys_placement,
414 			      false,
415 			      &vmw_bo_bo_free);
416 	if (unlikely(ret != 0))
417 		goto err_unlock; /* init frees the buffer on failure */
418 
419 	*out = vmw_bo;
420 	ttm_write_unlock(&vmw_priv->reservation_sem);
421 
422 	return 0;
423 
424 err_unlock:
425 	ttm_write_unlock(&vmw_priv->reservation_sem);
426 	return ret;
427 }
428 
vmw_fb_compute_depth(struct fb_var_screeninfo * var,int * depth)429 static int vmw_fb_compute_depth(struct fb_var_screeninfo *var,
430 				int *depth)
431 {
432 	switch (var->bits_per_pixel) {
433 	case 32:
434 		*depth = (var->transp.length > 0) ? 32 : 24;
435 		break;
436 	default:
437 		DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
438 		return -EINVAL;
439 	}
440 
441 	return 0;
442 }
443 
vmwgfx_set_config_internal(struct drm_mode_set * set)444 static int vmwgfx_set_config_internal(struct drm_mode_set *set)
445 {
446 	struct drm_crtc *crtc = set->crtc;
447 	struct drm_modeset_acquire_ctx ctx;
448 	int ret;
449 
450 	drm_modeset_acquire_init(&ctx, 0);
451 
452 restart:
453 	ret = crtc->funcs->set_config(set, &ctx);
454 
455 	if (ret == -EDEADLK) {
456 		drm_modeset_backoff(&ctx);
457 		goto restart;
458 	}
459 
460 	drm_modeset_drop_locks(&ctx);
461 	drm_modeset_acquire_fini(&ctx);
462 
463 	return ret;
464 }
465 
vmw_fb_kms_detach(struct vmw_fb_par * par,bool detach_bo,bool unref_bo)466 static int vmw_fb_kms_detach(struct vmw_fb_par *par,
467 			     bool detach_bo,
468 			     bool unref_bo)
469 {
470 	struct drm_framebuffer *cur_fb = par->set_fb;
471 	int ret;
472 
473 	/* Detach the KMS framebuffer from crtcs */
474 	if (par->set_mode) {
475 		struct drm_mode_set set;
476 
477 		set.crtc = par->crtc;
478 		set.x = 0;
479 		set.y = 0;
480 		set.mode = NULL;
481 		set.fb = NULL;
482 		set.num_connectors = 0;
483 		set.connectors = &par->con;
484 		ret = vmwgfx_set_config_internal(&set);
485 		if (ret) {
486 			DRM_ERROR("Could not unset a mode.\n");
487 			return ret;
488 		}
489 		drm_mode_destroy(par->vmw_priv->dev, par->set_mode);
490 		par->set_mode = NULL;
491 	}
492 
493 	if (cur_fb) {
494 		drm_framebuffer_put(cur_fb);
495 		par->set_fb = NULL;
496 	}
497 
498 	if (par->vmw_bo && detach_bo && unref_bo)
499 		vmw_bo_unreference(&par->vmw_bo);
500 
501 	return 0;
502 }
503 
vmw_fb_kms_framebuffer(struct fb_info * info)504 static int vmw_fb_kms_framebuffer(struct fb_info *info)
505 {
506 	struct drm_mode_fb_cmd2 mode_cmd;
507 	struct vmw_fb_par *par = info->par;
508 	struct fb_var_screeninfo *var = &info->var;
509 	struct drm_framebuffer *cur_fb;
510 	struct vmw_framebuffer *vfb;
511 	int ret = 0, depth;
512 	size_t new_bo_size;
513 
514 	ret = vmw_fb_compute_depth(var, &depth);
515 	if (ret)
516 		return ret;
517 
518 	mode_cmd.width = var->xres;
519 	mode_cmd.height = var->yres;
520 	mode_cmd.pitches[0] = ((var->bits_per_pixel + 7) / 8) * mode_cmd.width;
521 	mode_cmd.pixel_format =
522 		drm_mode_legacy_fb_format(var->bits_per_pixel, depth);
523 
524 	cur_fb = par->set_fb;
525 	if (cur_fb && cur_fb->width == mode_cmd.width &&
526 	    cur_fb->height == mode_cmd.height &&
527 	    cur_fb->format->format == mode_cmd.pixel_format &&
528 	    cur_fb->pitches[0] == mode_cmd.pitches[0])
529 		return 0;
530 
531 	/* Need new buffer object ? */
532 	new_bo_size = (size_t) mode_cmd.pitches[0] * (size_t) mode_cmd.height;
533 	ret = vmw_fb_kms_detach(par,
534 				par->bo_size < new_bo_size ||
535 				par->bo_size > 2*new_bo_size,
536 				true);
537 	if (ret)
538 		return ret;
539 
540 	if (!par->vmw_bo) {
541 		ret = vmw_fb_create_bo(par->vmw_priv, new_bo_size,
542 				       &par->vmw_bo);
543 		if (ret) {
544 			DRM_ERROR("Failed creating a buffer object for "
545 				  "fbdev.\n");
546 			return ret;
547 		}
548 		par->bo_size = new_bo_size;
549 	}
550 
551 	vfb = vmw_kms_new_framebuffer(par->vmw_priv, par->vmw_bo, NULL,
552 				      true, &mode_cmd);
553 	if (IS_ERR(vfb))
554 		return PTR_ERR(vfb);
555 
556 	par->set_fb = &vfb->base;
557 
558 	return 0;
559 }
560 
vmw_fb_set_par(struct fb_info * info)561 static int vmw_fb_set_par(struct fb_info *info)
562 {
563 	struct vmw_fb_par *par = info->par;
564 	struct vmw_private *vmw_priv = par->vmw_priv;
565 	struct drm_mode_set set;
566 	struct fb_var_screeninfo *var = &info->var;
567 	struct drm_display_mode new_mode = { DRM_MODE("fb_mode",
568 		DRM_MODE_TYPE_DRIVER,
569 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
570 		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
571 	};
572 	struct drm_display_mode *mode;
573 	int ret;
574 
575 	mode = drm_mode_duplicate(vmw_priv->dev, &new_mode);
576 	if (!mode) {
577 		DRM_ERROR("Could not create new fb mode.\n");
578 		return -ENOMEM;
579 	}
580 
581 	mode->hdisplay = var->xres;
582 	mode->vdisplay = var->yres;
583 	vmw_guess_mode_timing(mode);
584 
585 	if (!vmw_kms_validate_mode_vram(vmw_priv,
586 					mode->hdisplay *
587 					DIV_ROUND_UP(var->bits_per_pixel, 8),
588 					mode->vdisplay)) {
589 		drm_mode_destroy(vmw_priv->dev, mode);
590 		return -EINVAL;
591 	}
592 
593 	mutex_lock(&par->bo_mutex);
594 	ret = vmw_fb_kms_framebuffer(info);
595 	if (ret)
596 		goto out_unlock;
597 
598 	par->fb_x = var->xoffset;
599 	par->fb_y = var->yoffset;
600 
601 	set.crtc = par->crtc;
602 	set.x = 0;
603 	set.y = 0;
604 	set.mode = mode;
605 	set.fb = par->set_fb;
606 	set.num_connectors = 1;
607 	set.connectors = &par->con;
608 
609 	ret = vmwgfx_set_config_internal(&set);
610 	if (ret)
611 		goto out_unlock;
612 
613 	vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
614 			  par->set_fb->width, par->set_fb->height);
615 
616 	/* If there already was stuff dirty we wont
617 	 * schedule a new work, so lets do it now */
618 
619 	schedule_delayed_work(&par->local_work, 0);
620 
621 out_unlock:
622 	if (par->set_mode)
623 		drm_mode_destroy(vmw_priv->dev, par->set_mode);
624 	par->set_mode = mode;
625 
626 	mutex_unlock(&par->bo_mutex);
627 
628 	return ret;
629 }
630 
631 
632 static const struct fb_ops vmw_fb_ops = {
633 	.owner = THIS_MODULE,
634 	.fb_check_var = vmw_fb_check_var,
635 	.fb_set_par = vmw_fb_set_par,
636 	.fb_setcolreg = vmw_fb_setcolreg,
637 	.fb_fillrect = vmw_fb_fillrect,
638 	.fb_copyarea = vmw_fb_copyarea,
639 	.fb_imageblit = vmw_fb_imageblit,
640 	.fb_pan_display = vmw_fb_pan_display,
641 	.fb_blank = vmw_fb_blank,
642 };
643 
vmw_fb_init(struct vmw_private * vmw_priv)644 int vmw_fb_init(struct vmw_private *vmw_priv)
645 {
646 	struct device *device = &vmw_priv->dev->pdev->dev;
647 	struct vmw_fb_par *par;
648 	struct fb_info *info;
649 	unsigned fb_width, fb_height;
650 	unsigned int fb_bpp, fb_pitch, fb_size;
651 	struct drm_display_mode *init_mode;
652 	int ret;
653 
654 	fb_bpp = 32;
655 
656 	/* XXX As shouldn't these be as well. */
657 	fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
658 	fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
659 
660 	fb_pitch = fb_width * fb_bpp / 8;
661 	fb_size = fb_pitch * fb_height;
662 
663 	info = framebuffer_alloc(sizeof(*par), device);
664 	if (!info)
665 		return -ENOMEM;
666 
667 	/*
668 	 * Par
669 	 */
670 	vmw_priv->fb_info = info;
671 	par = info->par;
672 	memset(par, 0, sizeof(*par));
673 	INIT_DELAYED_WORK(&par->local_work, &vmw_fb_dirty_flush);
674 	par->vmw_priv = vmw_priv;
675 	par->vmalloc = NULL;
676 	par->max_width = fb_width;
677 	par->max_height = fb_height;
678 
679 	ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width,
680 				      par->max_height, &par->con,
681 				      &par->crtc, &init_mode);
682 	if (ret)
683 		goto err_kms;
684 
685 	info->var.xres = init_mode->hdisplay;
686 	info->var.yres = init_mode->vdisplay;
687 
688 	/*
689 	 * Create buffers and alloc memory
690 	 */
691 	par->vmalloc = vzalloc(fb_size);
692 	if (unlikely(par->vmalloc == NULL)) {
693 		ret = -ENOMEM;
694 		goto err_free;
695 	}
696 
697 	/*
698 	 * Fixed and var
699 	 */
700 	strcpy(info->fix.id, "svgadrmfb");
701 	info->fix.type = FB_TYPE_PACKED_PIXELS;
702 	info->fix.visual = FB_VISUAL_TRUECOLOR;
703 	info->fix.type_aux = 0;
704 	info->fix.xpanstep = 1; /* doing it in hw */
705 	info->fix.ypanstep = 1; /* doing it in hw */
706 	info->fix.ywrapstep = 0;
707 	info->fix.accel = FB_ACCEL_NONE;
708 	info->fix.line_length = fb_pitch;
709 
710 	info->fix.smem_start = 0;
711 	info->fix.smem_len = fb_size;
712 
713 	info->pseudo_palette = par->pseudo_palette;
714 	info->screen_base = (char __iomem *)par->vmalloc;
715 	info->screen_size = fb_size;
716 
717 	info->fbops = &vmw_fb_ops;
718 
719 	/* 24 depth per default */
720 	info->var.red.offset = 16;
721 	info->var.green.offset = 8;
722 	info->var.blue.offset = 0;
723 	info->var.red.length = 8;
724 	info->var.green.length = 8;
725 	info->var.blue.length = 8;
726 	info->var.transp.offset = 0;
727 	info->var.transp.length = 0;
728 
729 	info->var.xres_virtual = fb_width;
730 	info->var.yres_virtual = fb_height;
731 	info->var.bits_per_pixel = fb_bpp;
732 	info->var.xoffset = 0;
733 	info->var.yoffset = 0;
734 	info->var.activate = FB_ACTIVATE_NOW;
735 	info->var.height = -1;
736 	info->var.width = -1;
737 
738 	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
739 	info->apertures = alloc_apertures(1);
740 	if (!info->apertures) {
741 		ret = -ENOMEM;
742 		goto err_aper;
743 	}
744 	info->apertures->ranges[0].base = vmw_priv->vram_start;
745 	info->apertures->ranges[0].size = vmw_priv->vram_size;
746 
747 	/*
748 	 * Dirty & Deferred IO
749 	 */
750 	par->dirty.x1 = par->dirty.x2 = 0;
751 	par->dirty.y1 = par->dirty.y2 = 0;
752 	par->dirty.active = true;
753 	spin_lock_init(&par->dirty.lock);
754 	mutex_init(&par->bo_mutex);
755 	info->fbdefio = &vmw_defio;
756 	fb_deferred_io_init(info);
757 
758 	ret = register_framebuffer(info);
759 	if (unlikely(ret != 0))
760 		goto err_defio;
761 
762 	vmw_fb_set_par(info);
763 
764 	return 0;
765 
766 err_defio:
767 	fb_deferred_io_cleanup(info);
768 err_aper:
769 err_free:
770 	vfree(par->vmalloc);
771 err_kms:
772 	framebuffer_release(info);
773 	vmw_priv->fb_info = NULL;
774 
775 	return ret;
776 }
777 
vmw_fb_close(struct vmw_private * vmw_priv)778 int vmw_fb_close(struct vmw_private *vmw_priv)
779 {
780 	struct fb_info *info;
781 	struct vmw_fb_par *par;
782 
783 	if (!vmw_priv->fb_info)
784 		return 0;
785 
786 	info = vmw_priv->fb_info;
787 	par = info->par;
788 
789 	/* ??? order */
790 	fb_deferred_io_cleanup(info);
791 	cancel_delayed_work_sync(&par->local_work);
792 	unregister_framebuffer(info);
793 
794 	mutex_lock(&par->bo_mutex);
795 	(void) vmw_fb_kms_detach(par, true, true);
796 	mutex_unlock(&par->bo_mutex);
797 
798 	vfree(par->vmalloc);
799 	framebuffer_release(info);
800 
801 	return 0;
802 }
803 
vmw_fb_off(struct vmw_private * vmw_priv)804 int vmw_fb_off(struct vmw_private *vmw_priv)
805 {
806 	struct fb_info *info;
807 	struct vmw_fb_par *par;
808 	unsigned long flags;
809 
810 	if (!vmw_priv->fb_info)
811 		return -EINVAL;
812 
813 	info = vmw_priv->fb_info;
814 	par = info->par;
815 
816 	spin_lock_irqsave(&par->dirty.lock, flags);
817 	par->dirty.active = false;
818 	spin_unlock_irqrestore(&par->dirty.lock, flags);
819 
820 	flush_delayed_work(&info->deferred_work);
821 	flush_delayed_work(&par->local_work);
822 
823 	return 0;
824 }
825 
vmw_fb_on(struct vmw_private * vmw_priv)826 int vmw_fb_on(struct vmw_private *vmw_priv)
827 {
828 	struct fb_info *info;
829 	struct vmw_fb_par *par;
830 	unsigned long flags;
831 
832 	if (!vmw_priv->fb_info)
833 		return -EINVAL;
834 
835 	info = vmw_priv->fb_info;
836 	par = info->par;
837 
838 	spin_lock_irqsave(&par->dirty.lock, flags);
839 	par->dirty.active = true;
840 	spin_unlock_irqrestore(&par->dirty.lock, flags);
841 
842 	/*
843 	 * Need to reschedule a dirty update, because otherwise that's
844 	 * only done in dirty_mark() if the previous coalesced
845 	 * dirty region was empty.
846 	 */
847 	schedule_delayed_work(&par->local_work, 0);
848 
849 	return 0;
850 }
851