xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/drm_atomic_uapi.c (revision d95770457554cf793db417b12e8b1f00fd21e581)
1 /*	$NetBSD: drm_atomic_uapi.c,v 1.7 2021/12/19 10:45:49 riastradh Exp $	*/
2 
3 /*
4  * Copyright (C) 2014 Red Hat
5  * Copyright (C) 2014 Intel Corp.
6  * Copyright (C) 2018 Intel Corp.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the "Software"),
10  * to deal in the Software without restriction, including without limitation
11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12  * and/or sell copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice shall be included in
16  * all copies or substantial portions of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
22  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24  * OTHER DEALINGS IN THE SOFTWARE.
25  *
26  * Authors:
27  * Rob Clark <robdclark@gmail.com>
28  * Daniel Vetter <daniel.vetter@ffwll.ch>
29  */
30 
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: drm_atomic_uapi.c,v 1.7 2021/12/19 10:45:49 riastradh Exp $");
33 
34 #include <drm/drm_atomic_uapi.h>
35 #include <drm/drm_atomic.h>
36 #include <drm/drm_print.h>
37 #include <drm/drm_drv.h>
38 #include <drm/drm_writeback.h>
39 #include <drm/drm_vblank.h>
40 
41 #include <linux/dma-fence.h>
42 #include <linux/uaccess.h>
43 #include <linux/sync_file.h>
44 #include <linux/file.h>
45 
46 #include "drm_crtc_internal.h"
47 
48 /**
49  * DOC: overview
50  *
51  * This file contains the marshalling and demarshalling glue for the atomic UAPI
52  * in all its forms: The monster ATOMIC IOCTL itself, code for GET_PROPERTY and
53  * SET_PROPERTY IOCTLs. Plus interface functions for compatibility helpers and
54  * drivers which have special needs to construct their own atomic updates, e.g.
55  * for load detect or similiar.
56  */
57 
58 /**
59  * drm_atomic_set_mode_for_crtc - set mode for CRTC
60  * @state: the CRTC whose incoming state to update
61  * @mode: kernel-internal mode to use for the CRTC, or NULL to disable
62  *
63  * Set a mode (originating from the kernel) on the desired CRTC state and update
64  * the enable property.
65  *
66  * RETURNS:
67  * Zero on success, error code on failure. Cannot return -EDEADLK.
68  */
drm_atomic_set_mode_for_crtc(struct drm_crtc_state * state,const struct drm_display_mode * mode)69 int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
70 				 const struct drm_display_mode *mode)
71 {
72 	struct drm_crtc *crtc = state->crtc;
73 	struct drm_mode_modeinfo umode;
74 
75 	/* Early return for no change. */
76 	if (mode && memcmp(&state->mode, mode, sizeof(*mode)) == 0)
77 		return 0;
78 
79 	drm_property_blob_put(state->mode_blob);
80 	state->mode_blob = NULL;
81 
82 	if (mode) {
83 		drm_mode_convert_to_umode(&umode, mode);
84 		state->mode_blob =
85 			drm_property_create_blob(state->crtc->dev,
86 		                                 sizeof(umode),
87 		                                 &umode);
88 		if (IS_ERR(state->mode_blob))
89 			return PTR_ERR(state->mode_blob);
90 
91 		drm_mode_copy(&state->mode, mode);
92 		state->enable = true;
93 		DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
94 				 mode->name, crtc->base.id, crtc->name, state);
95 	} else {
96 		memset(&state->mode, 0, sizeof(state->mode));
97 		state->enable = false;
98 		DRM_DEBUG_ATOMIC("Set [NOMODE] for [CRTC:%d:%s] state %p\n",
99 				 crtc->base.id, crtc->name, state);
100 	}
101 
102 	return 0;
103 }
104 EXPORT_SYMBOL(drm_atomic_set_mode_for_crtc);
105 
106 /**
107  * drm_atomic_set_mode_prop_for_crtc - set mode for CRTC
108  * @state: the CRTC whose incoming state to update
109  * @blob: pointer to blob property to use for mode
110  *
111  * Set a mode (originating from a blob property) on the desired CRTC state.
112  * This function will take a reference on the blob property for the CRTC state,
113  * and release the reference held on the state's existing mode property, if any
114  * was set.
115  *
116  * RETURNS:
117  * Zero on success, error code on failure. Cannot return -EDEADLK.
118  */
drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state * state,struct drm_property_blob * blob)119 int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
120                                       struct drm_property_blob *blob)
121 {
122 	struct drm_crtc *crtc = state->crtc;
123 
124 	if (blob == state->mode_blob)
125 		return 0;
126 
127 	drm_property_blob_put(state->mode_blob);
128 	state->mode_blob = NULL;
129 
130 	memset(&state->mode, 0, sizeof(state->mode));
131 
132 	if (blob) {
133 		int ret;
134 
135 		if (blob->length != sizeof(struct drm_mode_modeinfo)) {
136 			DRM_DEBUG_ATOMIC("[CRTC:%d:%s] bad mode blob length: %zu\n",
137 					 crtc->base.id, crtc->name,
138 					 blob->length);
139 			return -EINVAL;
140 		}
141 
142 		ret = drm_mode_convert_umode(crtc->dev,
143 					     &state->mode, blob->data);
144 		if (ret) {
145 			DRM_DEBUG_ATOMIC("[CRTC:%d:%s] invalid mode (ret=%d, status=%s):\n",
146 					 crtc->base.id, crtc->name,
147 					 ret, drm_get_mode_status_name(state->mode.status));
148 			drm_mode_debug_printmodeline(&state->mode);
149 			return -EINVAL;
150 		}
151 
152 		state->mode_blob = drm_property_blob_get(blob);
153 		state->enable = true;
154 		DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
155 				 state->mode.name, crtc->base.id, crtc->name,
156 				 state);
157 	} else {
158 		state->enable = false;
159 		DRM_DEBUG_ATOMIC("Set [NOMODE] for [CRTC:%d:%s] state %p\n",
160 				 crtc->base.id, crtc->name, state);
161 	}
162 
163 	return 0;
164 }
165 EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc);
166 
167 /**
168  * drm_atomic_set_crtc_for_plane - set CRTC for plane
169  * @plane_state: the plane whose incoming state to update
170  * @crtc: CRTC to use for the plane
171  *
172  * Changing the assigned CRTC for a plane requires us to grab the lock and state
173  * for the new CRTC, as needed. This function takes care of all these details
174  * besides updating the pointer in the state object itself.
175  *
176  * Returns:
177  * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
178  * then the w/w mutex code has detected a deadlock and the entire atomic
179  * sequence must be restarted. All other errors are fatal.
180  */
181 int
drm_atomic_set_crtc_for_plane(struct drm_plane_state * plane_state,struct drm_crtc * crtc)182 drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
183 			      struct drm_crtc *crtc)
184 {
185 	struct drm_plane *plane = plane_state->plane;
186 	struct drm_crtc_state *crtc_state;
187 	/* Nothing to do for same crtc*/
188 	if (plane_state->crtc == crtc)
189 		return 0;
190 	if (plane_state->crtc) {
191 		crtc_state = drm_atomic_get_crtc_state(plane_state->state,
192 						       plane_state->crtc);
193 		if (WARN_ON(IS_ERR(crtc_state)))
194 			return PTR_ERR(crtc_state);
195 
196 		crtc_state->plane_mask &= ~drm_plane_mask(plane);
197 	}
198 
199 	plane_state->crtc = crtc;
200 
201 	if (crtc) {
202 		crtc_state = drm_atomic_get_crtc_state(plane_state->state,
203 						       crtc);
204 		if (IS_ERR(crtc_state))
205 			return PTR_ERR(crtc_state);
206 		crtc_state->plane_mask |= drm_plane_mask(plane);
207 	}
208 
209 	if (crtc)
210 		DRM_DEBUG_ATOMIC("Link [PLANE:%d:%s] state %p to [CRTC:%d:%s]\n",
211 				 plane->base.id, plane->name, plane_state,
212 				 crtc->base.id, crtc->name);
213 	else
214 		DRM_DEBUG_ATOMIC("Link [PLANE:%d:%s] state %p to [NOCRTC]\n",
215 				 plane->base.id, plane->name, plane_state);
216 
217 	return 0;
218 }
219 EXPORT_SYMBOL(drm_atomic_set_crtc_for_plane);
220 
221 /**
222  * drm_atomic_set_fb_for_plane - set framebuffer for plane
223  * @plane_state: atomic state object for the plane
224  * @fb: fb to use for the plane
225  *
226  * Changing the assigned framebuffer for a plane requires us to grab a reference
227  * to the new fb and drop the reference to the old fb, if there is one. This
228  * function takes care of all these details besides updating the pointer in the
229  * state object itself.
230  */
231 void
drm_atomic_set_fb_for_plane(struct drm_plane_state * plane_state,struct drm_framebuffer * fb)232 drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
233 			    struct drm_framebuffer *fb)
234 {
235 	struct drm_plane *plane = plane_state->plane;
236 
237 	if (fb)
238 		DRM_DEBUG_ATOMIC("Set [FB:%d] for [PLANE:%d:%s] state %p\n",
239 				 fb->base.id, plane->base.id, plane->name,
240 				 plane_state);
241 	else
242 		DRM_DEBUG_ATOMIC("Set [NOFB] for [PLANE:%d:%s] state %p\n",
243 				 plane->base.id, plane->name, plane_state);
244 
245 	drm_framebuffer_assign(&plane_state->fb, fb);
246 }
247 EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
248 
249 /**
250  * drm_atomic_set_fence_for_plane - set fence for plane
251  * @plane_state: atomic state object for the plane
252  * @fence: dma_fence to use for the plane
253  *
254  * Helper to setup the plane_state fence in case it is not set yet.
255  * By using this drivers doesn't need to worry if the user choose
256  * implicit or explicit fencing.
257  *
258  * This function will not set the fence to the state if it was set
259  * via explicit fencing interfaces on the atomic ioctl. In that case it will
260  * drop the reference to the fence as we are not storing it anywhere.
261  * Otherwise, if &drm_plane_state.fence is not set this function we just set it
262  * with the received implicit fence. In both cases this function consumes a
263  * reference for @fence.
264  *
265  * This way explicit fencing can be used to overrule implicit fencing, which is
266  * important to make explicit fencing use-cases work: One example is using one
267  * buffer for 2 screens with different refresh rates. Implicit fencing will
268  * clamp rendering to the refresh rate of the slower screen, whereas explicit
269  * fence allows 2 independent render and display loops on a single buffer. If a
270  * driver allows obeys both implicit and explicit fences for plane updates, then
271  * it will break all the benefits of explicit fencing.
272  */
273 void
drm_atomic_set_fence_for_plane(struct drm_plane_state * plane_state,struct dma_fence * fence)274 drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
275 			       struct dma_fence *fence)
276 {
277 	if (plane_state->fence) {
278 		dma_fence_put(fence);
279 		return;
280 	}
281 
282 	plane_state->fence = fence;
283 }
284 EXPORT_SYMBOL(drm_atomic_set_fence_for_plane);
285 
286 /**
287  * drm_atomic_set_crtc_for_connector - set CRTC for connector
288  * @conn_state: atomic state object for the connector
289  * @crtc: CRTC to use for the connector
290  *
291  * Changing the assigned CRTC for a connector requires us to grab the lock and
292  * state for the new CRTC, as needed. This function takes care of all these
293  * details besides updating the pointer in the state object itself.
294  *
295  * Returns:
296  * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
297  * then the w/w mutex code has detected a deadlock and the entire atomic
298  * sequence must be restarted. All other errors are fatal.
299  */
300 int
drm_atomic_set_crtc_for_connector(struct drm_connector_state * conn_state,struct drm_crtc * crtc)301 drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
302 				  struct drm_crtc *crtc)
303 {
304 	struct drm_connector *connector = conn_state->connector;
305 	struct drm_crtc_state *crtc_state;
306 
307 	if (conn_state->crtc == crtc)
308 		return 0;
309 
310 	if (conn_state->crtc) {
311 		crtc_state = drm_atomic_get_new_crtc_state(conn_state->state,
312 							   conn_state->crtc);
313 
314 		crtc_state->connector_mask &=
315 			~drm_connector_mask(conn_state->connector);
316 
317 		drm_connector_put(conn_state->connector);
318 		conn_state->crtc = NULL;
319 	}
320 
321 	if (crtc) {
322 		crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc);
323 		if (IS_ERR(crtc_state))
324 			return PTR_ERR(crtc_state);
325 
326 		crtc_state->connector_mask |=
327 			drm_connector_mask(conn_state->connector);
328 
329 		drm_connector_get(conn_state->connector);
330 		conn_state->crtc = crtc;
331 
332 		DRM_DEBUG_ATOMIC("Link [CONNECTOR:%d:%s] state %p to [CRTC:%d:%s]\n",
333 				 connector->base.id, connector->name,
334 				 conn_state, crtc->base.id, crtc->name);
335 	} else {
336 		DRM_DEBUG_ATOMIC("Link [CONNECTOR:%d:%s] state %p to [NOCRTC]\n",
337 				 connector->base.id, connector->name,
338 				 conn_state);
339 	}
340 
341 	return 0;
342 }
343 EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector);
344 
set_out_fence_for_crtc(struct drm_atomic_state * state,struct drm_crtc * crtc,s32 __user * fence_ptr)345 static void set_out_fence_for_crtc(struct drm_atomic_state *state,
346 				   struct drm_crtc *crtc, s32 __user *fence_ptr)
347 {
348 	state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr;
349 }
350 
get_out_fence_for_crtc(struct drm_atomic_state * state,struct drm_crtc * crtc)351 static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
352 					  struct drm_crtc *crtc)
353 {
354 	s32 __user *fence_ptr;
355 
356 	fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr;
357 	state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL;
358 
359 	return fence_ptr;
360 }
361 
set_out_fence_for_connector(struct drm_atomic_state * state,struct drm_connector * connector,s32 __user * fence_ptr)362 static int set_out_fence_for_connector(struct drm_atomic_state *state,
363 					struct drm_connector *connector,
364 					s32 __user *fence_ptr)
365 {
366 	unsigned int index = drm_connector_index(connector);
367 
368 	if (!fence_ptr)
369 		return 0;
370 
371 	if (put_user(-1, fence_ptr))
372 		return -EFAULT;
373 
374 	state->connectors[index].out_fence_ptr = fence_ptr;
375 
376 	return 0;
377 }
378 
get_out_fence_for_connector(struct drm_atomic_state * state,struct drm_connector * connector)379 static s32 __user *get_out_fence_for_connector(struct drm_atomic_state *state,
380 					       struct drm_connector *connector)
381 {
382 	unsigned int index = drm_connector_index(connector);
383 	s32 __user *fence_ptr;
384 
385 	fence_ptr = state->connectors[index].out_fence_ptr;
386 	state->connectors[index].out_fence_ptr = NULL;
387 
388 	return fence_ptr;
389 }
390 
391 static int
drm_atomic_replace_property_blob_from_id(struct drm_device * dev,struct drm_property_blob ** blob,uint64_t blob_id,ssize_t expected_size,ssize_t expected_elem_size,bool * replaced)392 drm_atomic_replace_property_blob_from_id(struct drm_device *dev,
393 					 struct drm_property_blob **blob,
394 					 uint64_t blob_id,
395 					 ssize_t expected_size,
396 					 ssize_t expected_elem_size,
397 					 bool *replaced)
398 {
399 	struct drm_property_blob *new_blob = NULL;
400 
401 	if (blob_id != 0) {
402 		new_blob = drm_property_lookup_blob(dev, blob_id);
403 		if (new_blob == NULL)
404 			return -EINVAL;
405 
406 		if (expected_size > 0 &&
407 		    new_blob->length != expected_size) {
408 			drm_property_blob_put(new_blob);
409 			return -EINVAL;
410 		}
411 		if (expected_elem_size > 0 &&
412 		    new_blob->length % expected_elem_size != 0) {
413 			drm_property_blob_put(new_blob);
414 			return -EINVAL;
415 		}
416 	}
417 
418 	*replaced |= drm_property_replace_blob(blob, new_blob);
419 	drm_property_blob_put(new_blob);
420 
421 	return 0;
422 }
423 
drm_atomic_crtc_set_property(struct drm_crtc * crtc,struct drm_crtc_state * state,struct drm_property * property,uint64_t val)424 static int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
425 		struct drm_crtc_state *state, struct drm_property *property,
426 		uint64_t val)
427 {
428 	struct drm_device *dev = crtc->dev;
429 	struct drm_mode_config *config = &dev->mode_config;
430 	bool replaced = false;
431 	int ret;
432 
433 	if (property == config->prop_active)
434 		state->active = val;
435 	else if (property == config->prop_mode_id) {
436 		struct drm_property_blob *mode =
437 			drm_property_lookup_blob(dev, val);
438 		ret = drm_atomic_set_mode_prop_for_crtc(state, mode);
439 		drm_property_blob_put(mode);
440 		return ret;
441 	} else if (property == config->prop_vrr_enabled) {
442 		state->vrr_enabled = val;
443 	} else if (property == config->degamma_lut_property) {
444 		ret = drm_atomic_replace_property_blob_from_id(dev,
445 					&state->degamma_lut,
446 					val,
447 					-1, sizeof(struct drm_color_lut),
448 					&replaced);
449 		state->color_mgmt_changed |= replaced;
450 		return ret;
451 	} else if (property == config->ctm_property) {
452 		ret = drm_atomic_replace_property_blob_from_id(dev,
453 					&state->ctm,
454 					val,
455 					sizeof(struct drm_color_ctm), -1,
456 					&replaced);
457 		state->color_mgmt_changed |= replaced;
458 		return ret;
459 	} else if (property == config->gamma_lut_property) {
460 		ret = drm_atomic_replace_property_blob_from_id(dev,
461 					&state->gamma_lut,
462 					val,
463 					-1, sizeof(struct drm_color_lut),
464 					&replaced);
465 		state->color_mgmt_changed |= replaced;
466 		return ret;
467 	} else if (property == config->prop_out_fence_ptr) {
468 		s32 __user *fence_ptr = u64_to_user_ptr(val);
469 
470 		if (!fence_ptr)
471 			return 0;
472 
473 		if (put_user(-1, fence_ptr))
474 			return -EFAULT;
475 
476 		set_out_fence_for_crtc(state->state, crtc, fence_ptr);
477 	} else if (crtc->funcs->atomic_set_property) {
478 		return crtc->funcs->atomic_set_property(crtc, state, property, val);
479 	} else {
480 		DRM_DEBUG_ATOMIC("[CRTC:%d:%s] unknown property [PROP:%d:%s]]\n",
481 				 crtc->base.id, crtc->name,
482 				 property->base.id, property->name);
483 		return -EINVAL;
484 	}
485 
486 	return 0;
487 }
488 
489 static int
drm_atomic_crtc_get_property(struct drm_crtc * crtc,const struct drm_crtc_state * state,struct drm_property * property,uint64_t * val)490 drm_atomic_crtc_get_property(struct drm_crtc *crtc,
491 		const struct drm_crtc_state *state,
492 		struct drm_property *property, uint64_t *val)
493 {
494 	struct drm_device *dev = crtc->dev;
495 	struct drm_mode_config *config = &dev->mode_config;
496 
497 	if (property == config->prop_active)
498 		*val = drm_atomic_crtc_effectively_active(state);
499 	else if (property == config->prop_mode_id)
500 		*val = (state->mode_blob) ? state->mode_blob->base.id : 0;
501 	else if (property == config->prop_vrr_enabled)
502 		*val = state->vrr_enabled;
503 	else if (property == config->degamma_lut_property)
504 		*val = (state->degamma_lut) ? state->degamma_lut->base.id : 0;
505 	else if (property == config->ctm_property)
506 		*val = (state->ctm) ? state->ctm->base.id : 0;
507 	else if (property == config->gamma_lut_property)
508 		*val = (state->gamma_lut) ? state->gamma_lut->base.id : 0;
509 	else if (property == config->prop_out_fence_ptr)
510 		*val = 0;
511 	else if (crtc->funcs->atomic_get_property)
512 		return crtc->funcs->atomic_get_property(crtc, state, property, val);
513 	else
514 		return -EINVAL;
515 
516 	return 0;
517 }
518 
drm_atomic_plane_set_property(struct drm_plane * plane,struct drm_plane_state * state,struct drm_file * file_priv,struct drm_property * property,uint64_t val)519 static int drm_atomic_plane_set_property(struct drm_plane *plane,
520 		struct drm_plane_state *state, struct drm_file *file_priv,
521 		struct drm_property *property, uint64_t val)
522 {
523 	struct drm_device *dev = plane->dev;
524 	struct drm_mode_config *config = &dev->mode_config;
525 	bool replaced = false;
526 	int ret;
527 
528 	if (property == config->prop_fb_id) {
529 		struct drm_framebuffer *fb;
530 		fb = drm_framebuffer_lookup(dev, file_priv, val);
531 		drm_atomic_set_fb_for_plane(state, fb);
532 		if (fb)
533 			drm_framebuffer_put(fb);
534 	} else if (property == config->prop_in_fence_fd) {
535 		if (state->fence)
536 			return -EINVAL;
537 
538 		if (U642I64(val) == -1)
539 			return 0;
540 
541 		state->fence = sync_file_get_fence(val);
542 		if (!state->fence)
543 			return -EINVAL;
544 
545 	} else if (property == config->prop_crtc_id) {
546 		struct drm_crtc *crtc = drm_crtc_find(dev, file_priv, val);
547 		if (val && !crtc)
548 			return -EACCES;
549 		return drm_atomic_set_crtc_for_plane(state, crtc);
550 	} else if (property == config->prop_crtc_x) {
551 		state->crtc_x = U642I64(val);
552 	} else if (property == config->prop_crtc_y) {
553 		state->crtc_y = U642I64(val);
554 	} else if (property == config->prop_crtc_w) {
555 		state->crtc_w = val;
556 	} else if (property == config->prop_crtc_h) {
557 		state->crtc_h = val;
558 	} else if (property == config->prop_src_x) {
559 		state->src_x = val;
560 	} else if (property == config->prop_src_y) {
561 		state->src_y = val;
562 	} else if (property == config->prop_src_w) {
563 		state->src_w = val;
564 	} else if (property == config->prop_src_h) {
565 		state->src_h = val;
566 	} else if (property == plane->alpha_property) {
567 		state->alpha = val;
568 	} else if (property == plane->blend_mode_property) {
569 		state->pixel_blend_mode = val;
570 	} else if (property == plane->rotation_property) {
571 		if (!is_power_of_2(val & DRM_MODE_ROTATE_MASK)) {
572 			DRM_DEBUG_ATOMIC("[PLANE:%d:%s] bad rotation bitmask: 0x%"PRIx64"\n",
573 					 plane->base.id, plane->name, val);
574 			return -EINVAL;
575 		}
576 		state->rotation = val;
577 	} else if (property == plane->zpos_property) {
578 		state->zpos = val;
579 	} else if (property == plane->color_encoding_property) {
580 		state->color_encoding = val;
581 	} else if (property == plane->color_range_property) {
582 		state->color_range = val;
583 	} else if (property == config->prop_fb_damage_clips) {
584 		ret = drm_atomic_replace_property_blob_from_id(dev,
585 					&state->fb_damage_clips,
586 					val,
587 					-1,
588 					sizeof(struct drm_rect),
589 					&replaced);
590 		return ret;
591 	} else if (plane->funcs->atomic_set_property) {
592 		return plane->funcs->atomic_set_property(plane, state,
593 				property, val);
594 	} else {
595 		DRM_DEBUG_ATOMIC("[PLANE:%d:%s] unknown property [PROP:%d:%s]]\n",
596 				 plane->base.id, plane->name,
597 				 property->base.id, property->name);
598 		return -EINVAL;
599 	}
600 
601 	return 0;
602 }
603 
604 static int
drm_atomic_plane_get_property(struct drm_plane * plane,const struct drm_plane_state * state,struct drm_property * property,uint64_t * val)605 drm_atomic_plane_get_property(struct drm_plane *plane,
606 		const struct drm_plane_state *state,
607 		struct drm_property *property, uint64_t *val)
608 {
609 	struct drm_device *dev = plane->dev;
610 	struct drm_mode_config *config = &dev->mode_config;
611 
612 	if (property == config->prop_fb_id) {
613 		*val = (state->fb) ? state->fb->base.id : 0;
614 	} else if (property == config->prop_in_fence_fd) {
615 		*val = -1;
616 	} else if (property == config->prop_crtc_id) {
617 		*val = (state->crtc) ? state->crtc->base.id : 0;
618 	} else if (property == config->prop_crtc_x) {
619 		*val = I642U64(state->crtc_x);
620 	} else if (property == config->prop_crtc_y) {
621 		*val = I642U64(state->crtc_y);
622 	} else if (property == config->prop_crtc_w) {
623 		*val = state->crtc_w;
624 	} else if (property == config->prop_crtc_h) {
625 		*val = state->crtc_h;
626 	} else if (property == config->prop_src_x) {
627 		*val = state->src_x;
628 	} else if (property == config->prop_src_y) {
629 		*val = state->src_y;
630 	} else if (property == config->prop_src_w) {
631 		*val = state->src_w;
632 	} else if (property == config->prop_src_h) {
633 		*val = state->src_h;
634 	} else if (property == plane->alpha_property) {
635 		*val = state->alpha;
636 	} else if (property == plane->blend_mode_property) {
637 		*val = state->pixel_blend_mode;
638 	} else if (property == plane->rotation_property) {
639 		*val = state->rotation;
640 	} else if (property == plane->zpos_property) {
641 		*val = state->zpos;
642 	} else if (property == plane->color_encoding_property) {
643 		*val = state->color_encoding;
644 	} else if (property == plane->color_range_property) {
645 		*val = state->color_range;
646 	} else if (property == config->prop_fb_damage_clips) {
647 		*val = (state->fb_damage_clips) ?
648 			state->fb_damage_clips->base.id : 0;
649 	} else if (plane->funcs->atomic_get_property) {
650 		return plane->funcs->atomic_get_property(plane, state, property, val);
651 	} else {
652 		return -EINVAL;
653 	}
654 
655 	return 0;
656 }
657 
drm_atomic_set_writeback_fb_for_connector(struct drm_connector_state * conn_state,struct drm_framebuffer * fb)658 static int drm_atomic_set_writeback_fb_for_connector(
659 		struct drm_connector_state *conn_state,
660 		struct drm_framebuffer *fb)
661 {
662 	int ret;
663 
664 	ret = drm_writeback_set_fb(conn_state, fb);
665 	if (ret < 0)
666 		return ret;
667 
668 	if (fb)
669 		DRM_DEBUG_ATOMIC("Set [FB:%d] for connector state %p\n",
670 				 fb->base.id, conn_state);
671 	else
672 		DRM_DEBUG_ATOMIC("Set [NOFB] for connector state %p\n",
673 				 conn_state);
674 
675 	return 0;
676 }
677 
drm_atomic_connector_set_property(struct drm_connector * connector,struct drm_connector_state * state,struct drm_file * file_priv,struct drm_property * property,uint64_t val)678 static int drm_atomic_connector_set_property(struct drm_connector *connector,
679 		struct drm_connector_state *state, struct drm_file *file_priv,
680 		struct drm_property *property, uint64_t val)
681 {
682 	struct drm_device *dev = connector->dev;
683 	struct drm_mode_config *config = &dev->mode_config;
684 	bool replaced = false;
685 
686 	if (property == config->prop_crtc_id) {
687 		struct drm_crtc *crtc = drm_crtc_find(dev, file_priv, val);
688 		if (val && !crtc)
689 			return -EACCES;
690 		return drm_atomic_set_crtc_for_connector(state, crtc);
691 	} else if (property == config->dpms_property) {
692 		/* setting DPMS property requires special handling, which
693 		 * is done in legacy setprop path for us.  Disallow (for
694 		 * now?) atomic writes to DPMS property:
695 		 */
696 		return -EINVAL;
697 	} else if (property == config->tv_select_subconnector_property) {
698 		state->tv.subconnector = val;
699 	} else if (property == config->tv_left_margin_property) {
700 		state->tv.margins.left = val;
701 	} else if (property == config->tv_right_margin_property) {
702 		state->tv.margins.right = val;
703 	} else if (property == config->tv_top_margin_property) {
704 		state->tv.margins.top = val;
705 	} else if (property == config->tv_bottom_margin_property) {
706 		state->tv.margins.bottom = val;
707 	} else if (property == config->tv_mode_property) {
708 		state->tv.mode = val;
709 	} else if (property == config->tv_brightness_property) {
710 		state->tv.brightness = val;
711 	} else if (property == config->tv_contrast_property) {
712 		state->tv.contrast = val;
713 	} else if (property == config->tv_flicker_reduction_property) {
714 		state->tv.flicker_reduction = val;
715 	} else if (property == config->tv_overscan_property) {
716 		state->tv.overscan = val;
717 	} else if (property == config->tv_saturation_property) {
718 		state->tv.saturation = val;
719 	} else if (property == config->tv_hue_property) {
720 		state->tv.hue = val;
721 	} else if (property == config->link_status_property) {
722 		/* Never downgrade from GOOD to BAD on userspace's request here,
723 		 * only hw issues can do that.
724 		 *
725 		 * For an atomic property the userspace doesn't need to be able
726 		 * to understand all the properties, but needs to be able to
727 		 * restore the state it wants on VT switch. So if the userspace
728 		 * tries to change the link_status from GOOD to BAD, driver
729 		 * silently rejects it and returns a 0. This prevents userspace
730 		 * from accidently breaking  the display when it restores the
731 		 * state.
732 		 */
733 		if (state->link_status != DRM_LINK_STATUS_GOOD)
734 			state->link_status = val;
735 	} else if (property == config->hdr_output_metadata_property) {
736 		int ret;
737 		ret = drm_atomic_replace_property_blob_from_id(dev,
738 				&state->hdr_output_metadata,
739 				val,
740 				sizeof(struct hdr_output_metadata), -1,
741 				&replaced);
742 		return ret;
743 	} else if (property == config->aspect_ratio_property) {
744 		state->picture_aspect_ratio = val;
745 	} else if (property == config->content_type_property) {
746 		state->content_type = val;
747 	} else if (property == connector->scaling_mode_property) {
748 		state->scaling_mode = val;
749 	} else if (property == config->content_protection_property) {
750 		if (val == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
751 			DRM_DEBUG_KMS("only drivers can set CP Enabled\n");
752 			return -EINVAL;
753 		}
754 		state->content_protection = val;
755 	} else if (property == config->hdcp_content_type_property) {
756 		state->hdcp_content_type = val;
757 	} else if (property == connector->colorspace_property) {
758 		state->colorspace = val;
759 	} else if (property == config->writeback_fb_id_property) {
760 		struct drm_framebuffer *fb;
761 		int ret;
762 		fb = drm_framebuffer_lookup(dev, file_priv, val);
763 		ret = drm_atomic_set_writeback_fb_for_connector(state, fb);
764 		if (fb)
765 			drm_framebuffer_put(fb);
766 		return ret;
767 	} else if (property == config->writeback_out_fence_ptr_property) {
768 		s32 __user *fence_ptr = u64_to_user_ptr(val);
769 
770 		return set_out_fence_for_connector(state->state, connector,
771 						   fence_ptr);
772 	} else if (property == connector->max_bpc_property) {
773 		state->max_requested_bpc = val;
774 	} else if (connector->funcs->atomic_set_property) {
775 		return connector->funcs->atomic_set_property(connector,
776 				state, property, val);
777 	} else {
778 		DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] unknown property [PROP:%d:%s]]\n",
779 				 connector->base.id, connector->name,
780 				 property->base.id, property->name);
781 		return -EINVAL;
782 	}
783 
784 	return 0;
785 }
786 
787 static int
drm_atomic_connector_get_property(struct drm_connector * connector,const struct drm_connector_state * state,struct drm_property * property,uint64_t * val)788 drm_atomic_connector_get_property(struct drm_connector *connector,
789 		const struct drm_connector_state *state,
790 		struct drm_property *property, uint64_t *val)
791 {
792 	struct drm_device *dev = connector->dev;
793 	struct drm_mode_config *config = &dev->mode_config;
794 
795 	if (property == config->prop_crtc_id) {
796 		*val = (state->crtc) ? state->crtc->base.id : 0;
797 	} else if (property == config->dpms_property) {
798 		if (state->crtc && state->crtc->state->self_refresh_active)
799 			*val = DRM_MODE_DPMS_ON;
800 		else
801 			*val = connector->dpms;
802 	} else if (property == config->tv_select_subconnector_property) {
803 		*val = state->tv.subconnector;
804 	} else if (property == config->tv_left_margin_property) {
805 		*val = state->tv.margins.left;
806 	} else if (property == config->tv_right_margin_property) {
807 		*val = state->tv.margins.right;
808 	} else if (property == config->tv_top_margin_property) {
809 		*val = state->tv.margins.top;
810 	} else if (property == config->tv_bottom_margin_property) {
811 		*val = state->tv.margins.bottom;
812 	} else if (property == config->tv_mode_property) {
813 		*val = state->tv.mode;
814 	} else if (property == config->tv_brightness_property) {
815 		*val = state->tv.brightness;
816 	} else if (property == config->tv_contrast_property) {
817 		*val = state->tv.contrast;
818 	} else if (property == config->tv_flicker_reduction_property) {
819 		*val = state->tv.flicker_reduction;
820 	} else if (property == config->tv_overscan_property) {
821 		*val = state->tv.overscan;
822 	} else if (property == config->tv_saturation_property) {
823 		*val = state->tv.saturation;
824 	} else if (property == config->tv_hue_property) {
825 		*val = state->tv.hue;
826 	} else if (property == config->link_status_property) {
827 		*val = state->link_status;
828 	} else if (property == config->aspect_ratio_property) {
829 		*val = state->picture_aspect_ratio;
830 	} else if (property == config->content_type_property) {
831 		*val = state->content_type;
832 	} else if (property == connector->colorspace_property) {
833 		*val = state->colorspace;
834 	} else if (property == connector->scaling_mode_property) {
835 		*val = state->scaling_mode;
836 	} else if (property == config->hdr_output_metadata_property) {
837 		*val = state->hdr_output_metadata ?
838 			state->hdr_output_metadata->base.id : 0;
839 	} else if (property == config->content_protection_property) {
840 		*val = state->content_protection;
841 	} else if (property == config->hdcp_content_type_property) {
842 		*val = state->hdcp_content_type;
843 	} else if (property == config->writeback_fb_id_property) {
844 		/* Writeback framebuffer is one-shot, write and forget */
845 		*val = 0;
846 	} else if (property == config->writeback_out_fence_ptr_property) {
847 		*val = 0;
848 	} else if (property == connector->max_bpc_property) {
849 		*val = state->max_requested_bpc;
850 	} else if (connector->funcs->atomic_get_property) {
851 		return connector->funcs->atomic_get_property(connector,
852 				state, property, val);
853 	} else {
854 		return -EINVAL;
855 	}
856 
857 	return 0;
858 }
859 
drm_atomic_get_property(struct drm_mode_object * obj,struct drm_property * property,uint64_t * val)860 int drm_atomic_get_property(struct drm_mode_object *obj,
861 		struct drm_property *property, uint64_t *val)
862 {
863 	struct drm_device *dev = property->dev;
864 	int ret;
865 
866 	switch (obj->type) {
867 	case DRM_MODE_OBJECT_CONNECTOR: {
868 		struct drm_connector *connector = obj_to_connector(obj);
869 		WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
870 		ret = drm_atomic_connector_get_property(connector,
871 				connector->state, property, val);
872 		break;
873 	}
874 	case DRM_MODE_OBJECT_CRTC: {
875 		struct drm_crtc *crtc = obj_to_crtc(obj);
876 		WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
877 		ret = drm_atomic_crtc_get_property(crtc,
878 				crtc->state, property, val);
879 		break;
880 	}
881 	case DRM_MODE_OBJECT_PLANE: {
882 		struct drm_plane *plane = obj_to_plane(obj);
883 		WARN_ON(!drm_modeset_is_locked(&plane->mutex));
884 		ret = drm_atomic_plane_get_property(plane,
885 				plane->state, property, val);
886 		break;
887 	}
888 	default:
889 		ret = -EINVAL;
890 		break;
891 	}
892 
893 	return ret;
894 }
895 
896 /*
897  * The big monster ioctl
898  */
899 
create_vblank_event(struct drm_crtc * crtc,uint64_t user_data)900 static struct drm_pending_vblank_event *create_vblank_event(
901 		struct drm_crtc *crtc, uint64_t user_data)
902 {
903 	struct drm_pending_vblank_event *e = NULL;
904 
905 	e = kzalloc(sizeof *e, GFP_KERNEL);
906 	if (!e)
907 		return NULL;
908 
909 	e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
910 	e->event.base.length = sizeof(e->event);
911 	e->event.vbl.crtc_id = crtc->base.id;
912 	e->event.vbl.user_data = user_data;
913 
914 	return e;
915 }
916 
drm_atomic_connector_commit_dpms(struct drm_atomic_state * state,struct drm_connector * connector,int mode)917 int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state,
918 				     struct drm_connector *connector,
919 				     int mode)
920 {
921 	struct drm_connector *tmp_connector;
922 	struct drm_connector_state *new_conn_state;
923 	struct drm_crtc *crtc;
924 	struct drm_crtc_state *crtc_state;
925 	int i, ret, old_mode = connector->dpms;
926 	bool active = false;
927 
928 	ret = drm_modeset_lock(&state->dev->mode_config.connection_mutex,
929 			       state->acquire_ctx);
930 	if (ret)
931 		return ret;
932 
933 	if (mode != DRM_MODE_DPMS_ON)
934 		mode = DRM_MODE_DPMS_OFF;
935 	connector->dpms = mode;
936 
937 	crtc = connector->state->crtc;
938 	if (!crtc)
939 		goto out;
940 	ret = drm_atomic_add_affected_connectors(state, crtc);
941 	if (ret)
942 		goto out;
943 
944 	crtc_state = drm_atomic_get_crtc_state(state, crtc);
945 	if (IS_ERR(crtc_state)) {
946 		ret = PTR_ERR(crtc_state);
947 		goto out;
948 	}
949 
950 	for_each_new_connector_in_state(state, tmp_connector, new_conn_state, i) {
951 		if (new_conn_state->crtc != crtc)
952 			continue;
953 		if (tmp_connector->dpms == DRM_MODE_DPMS_ON) {
954 			active = true;
955 			break;
956 		}
957 	}
958 
959 	crtc_state->active = active;
960 	ret = drm_atomic_commit(state);
961 out:
962 	if (ret != 0)
963 		connector->dpms = old_mode;
964 	return ret;
965 }
966 
drm_atomic_set_property(struct drm_atomic_state * state,struct drm_file * file_priv,struct drm_mode_object * obj,struct drm_property * prop,uint64_t prop_value)967 int drm_atomic_set_property(struct drm_atomic_state *state,
968 			    struct drm_file *file_priv,
969 			    struct drm_mode_object *obj,
970 			    struct drm_property *prop,
971 			    uint64_t prop_value)
972 {
973 	struct drm_mode_object *ref;
974 	int ret;
975 
976 	if (!drm_property_change_valid_get(prop, prop_value, &ref))
977 		return -EINVAL;
978 
979 	switch (obj->type) {
980 	case DRM_MODE_OBJECT_CONNECTOR: {
981 		struct drm_connector *connector = obj_to_connector(obj);
982 		struct drm_connector_state *connector_state;
983 
984 		connector_state = drm_atomic_get_connector_state(state, connector);
985 		if (IS_ERR(connector_state)) {
986 			ret = PTR_ERR(connector_state);
987 			break;
988 		}
989 
990 		ret = drm_atomic_connector_set_property(connector,
991 				connector_state, file_priv,
992 				prop, prop_value);
993 		break;
994 	}
995 	case DRM_MODE_OBJECT_CRTC: {
996 		struct drm_crtc *crtc = obj_to_crtc(obj);
997 		struct drm_crtc_state *crtc_state;
998 
999 		crtc_state = drm_atomic_get_crtc_state(state, crtc);
1000 		if (IS_ERR(crtc_state)) {
1001 			ret = PTR_ERR(crtc_state);
1002 			break;
1003 		}
1004 
1005 		ret = drm_atomic_crtc_set_property(crtc,
1006 				crtc_state, prop, prop_value);
1007 		break;
1008 	}
1009 	case DRM_MODE_OBJECT_PLANE: {
1010 		struct drm_plane *plane = obj_to_plane(obj);
1011 		struct drm_plane_state *plane_state;
1012 
1013 		plane_state = drm_atomic_get_plane_state(state, plane);
1014 		if (IS_ERR(plane_state)) {
1015 			ret = PTR_ERR(plane_state);
1016 			break;
1017 		}
1018 
1019 		ret = drm_atomic_plane_set_property(plane,
1020 				plane_state, file_priv,
1021 				prop, prop_value);
1022 		break;
1023 	}
1024 	default:
1025 		ret = -EINVAL;
1026 		break;
1027 	}
1028 
1029 	drm_property_change_valid_put(prop, ref);
1030 	return ret;
1031 }
1032 
1033 /**
1034  * DOC: explicit fencing properties
1035  *
1036  * Explicit fencing allows userspace to control the buffer synchronization
1037  * between devices. A Fence or a group of fences are transfered to/from
1038  * userspace using Sync File fds and there are two DRM properties for that.
1039  * IN_FENCE_FD on each DRM Plane to send fences to the kernel and
1040  * OUT_FENCE_PTR on each DRM CRTC to receive fences from the kernel.
1041  *
1042  * As a contrast, with implicit fencing the kernel keeps track of any
1043  * ongoing rendering, and automatically ensures that the atomic update waits
1044  * for any pending rendering to complete. For shared buffers represented with
1045  * a &struct dma_buf this is tracked in &struct dma_resv.
1046  * Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org),
1047  * whereas explicit fencing is what Android wants.
1048  *
1049  * "IN_FENCE_FD”:
1050  *	Use this property to pass a fence that DRM should wait on before
1051  *	proceeding with the Atomic Commit request and show the framebuffer for
1052  *	the plane on the screen. The fence can be either a normal fence or a
1053  *	merged one, the sync_file framework will handle both cases and use a
1054  *	fence_array if a merged fence is received. Passing -1 here means no
1055  *	fences to wait on.
1056  *
1057  *	If the Atomic Commit request has the DRM_MODE_ATOMIC_TEST_ONLY flag
1058  *	it will only check if the Sync File is a valid one.
1059  *
1060  *	On the driver side the fence is stored on the @fence parameter of
1061  *	&struct drm_plane_state. Drivers which also support implicit fencing
1062  *	should set the implicit fence using drm_atomic_set_fence_for_plane(),
1063  *	to make sure there's consistent behaviour between drivers in precedence
1064  *	of implicit vs. explicit fencing.
1065  *
1066  * "OUT_FENCE_PTR”:
1067  *	Use this property to pass a file descriptor pointer to DRM. Once the
1068  *	Atomic Commit request call returns OUT_FENCE_PTR will be filled with
1069  *	the file descriptor number of a Sync File. This Sync File contains the
1070  *	CRTC fence that will be signaled when all framebuffers present on the
1071  *	Atomic Commit * request for that given CRTC are scanned out on the
1072  *	screen.
1073  *
1074  *	The Atomic Commit request fails if a invalid pointer is passed. If the
1075  *	Atomic Commit request fails for any other reason the out fence fd
1076  *	returned will be -1. On a Atomic Commit with the
1077  *	DRM_MODE_ATOMIC_TEST_ONLY flag the out fence will also be set to -1.
1078  *
1079  *	Note that out-fences don't have a special interface to drivers and are
1080  *	internally represented by a &struct drm_pending_vblank_event in struct
1081  *	&drm_crtc_state, which is also used by the nonblocking atomic commit
1082  *	helpers and for the DRM event handling for existing userspace.
1083  */
1084 
1085 struct drm_out_fence_state {
1086 	s32 __user *out_fence_ptr;
1087 	struct sync_file *sync_file;
1088 	int fd;
1089 };
1090 
setup_out_fence(struct drm_out_fence_state * fence_state,struct dma_fence * fence)1091 static int setup_out_fence(struct drm_out_fence_state *fence_state,
1092 			   struct dma_fence *fence)
1093 {
1094 #ifdef __NetBSD__
1095 	int fd = -1;
1096 	struct file *fp = NULL;
1097 	int ret;
1098 
1099 	/* Allocate a file descriptor.	*/
1100 	/* XXX errno NetBSD->Linux */
1101 	ret = -fd_allocfile(&fp, &fd);
1102 	if (ret)
1103 		goto out;
1104 
1105 	/* Prepare to transmit it to user.  */
1106 	/* XXX errno NetBSD->Linux */
1107 	ret = -copyout(&fd, fence_state->out_fence_ptr, sizeof fd);
1108 	if (ret)
1109 		goto out;
1110 
1111 	/* Create sync file.  */
1112 	fence_state->sync_file = sync_file_create(fence, fp);
1113 	if (fence_state->sync_file == NULL) {
1114 		ret = -ENOMEM;
1115 		goto out;
1116 	}
1117 	fd_affix(curproc, fp, fd);
1118 	fp = NULL;		/* sync_file consumes */
1119 
1120 out:	if (fp != NULL) {
1121 		fd_abort(curproc, fp, fd);
1122 		fd = -1;
1123 	}
1124 	fence_state->fd = fd;
1125 	return ret;
1126 #else
1127 	fence_state->fd = get_unused_fd_flags(O_CLOEXEC);
1128 	if (fence_state->fd < 0)
1129 		return fence_state->fd;
1130 
1131 	if (put_user(fence_state->fd, fence_state->out_fence_ptr))
1132 		return -EFAULT;
1133 
1134 	fence_state->sync_file = sync_file_create(fence);
1135 	if (!fence_state->sync_file)
1136 		return -ENOMEM;
1137 
1138 	return 0;
1139 #endif
1140 }
1141 
prepare_signaling(struct drm_device * dev,struct drm_atomic_state * state,struct drm_mode_atomic * arg,struct drm_file * file_priv,struct drm_out_fence_state ** fence_state,unsigned int * num_fences)1142 static int prepare_signaling(struct drm_device *dev,
1143 				  struct drm_atomic_state *state,
1144 				  struct drm_mode_atomic *arg,
1145 				  struct drm_file *file_priv,
1146 				  struct drm_out_fence_state **fence_state,
1147 				  unsigned int *num_fences)
1148 {
1149 	struct drm_crtc *crtc;
1150 	struct drm_crtc_state *crtc_state;
1151 	struct drm_connector *conn;
1152 	struct drm_connector_state *conn_state;
1153 	int i, c = 0, ret;
1154 
1155 	if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
1156 		return 0;
1157 
1158 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1159 		s32 __user *fence_ptr;
1160 
1161 		fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc);
1162 
1163 		if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT || fence_ptr) {
1164 			struct drm_pending_vblank_event *e;
1165 
1166 			e = create_vblank_event(crtc, arg->user_data);
1167 			if (!e)
1168 				return -ENOMEM;
1169 
1170 			crtc_state->event = e;
1171 		}
1172 
1173 		if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
1174 			struct drm_pending_vblank_event *e = crtc_state->event;
1175 
1176 			if (!file_priv)
1177 				continue;
1178 
1179 			ret = drm_event_reserve_init(dev, file_priv, &e->base,
1180 						     &e->event.base);
1181 			if (ret) {
1182 				kfree(e);
1183 				crtc_state->event = NULL;
1184 				return ret;
1185 			}
1186 		}
1187 
1188 		if (fence_ptr) {
1189 			struct dma_fence *fence;
1190 			struct drm_out_fence_state *f;
1191 
1192 			f = krealloc(*fence_state, sizeof(**fence_state) *
1193 				     (*num_fences + 1), GFP_KERNEL);
1194 			if (!f)
1195 				return -ENOMEM;
1196 
1197 			memset(&f[*num_fences], 0, sizeof(*f));
1198 
1199 			f[*num_fences].out_fence_ptr = fence_ptr;
1200 			*fence_state = f;
1201 
1202 			fence = drm_crtc_create_fence(crtc);
1203 			if (!fence)
1204 				return -ENOMEM;
1205 
1206 			ret = setup_out_fence(&f[(*num_fences)++], fence);
1207 			if (ret) {
1208 				dma_fence_put(fence);
1209 				return ret;
1210 			}
1211 
1212 			crtc_state->event->base.fence = fence;
1213 		}
1214 
1215 		c++;
1216 	}
1217 
1218 	for_each_new_connector_in_state(state, conn, conn_state, i) {
1219 		struct drm_writeback_connector *wb_conn;
1220 		struct drm_out_fence_state *f;
1221 		struct dma_fence *fence;
1222 		s32 __user *fence_ptr;
1223 
1224 		if (!conn_state->writeback_job)
1225 			continue;
1226 
1227 		fence_ptr = get_out_fence_for_connector(state, conn);
1228 		if (!fence_ptr)
1229 			continue;
1230 
1231 		f = krealloc(*fence_state, sizeof(**fence_state) *
1232 			     (*num_fences + 1), GFP_KERNEL);
1233 		if (!f)
1234 			return -ENOMEM;
1235 
1236 		memset(&f[*num_fences], 0, sizeof(*f));
1237 
1238 		f[*num_fences].out_fence_ptr = fence_ptr;
1239 		*fence_state = f;
1240 
1241 		wb_conn = drm_connector_to_writeback(conn);
1242 		fence = drm_writeback_get_out_fence(wb_conn);
1243 		if (!fence)
1244 			return -ENOMEM;
1245 
1246 		ret = setup_out_fence(&f[(*num_fences)++], fence);
1247 		if (ret) {
1248 			dma_fence_put(fence);
1249 			return ret;
1250 		}
1251 
1252 		conn_state->writeback_job->out_fence = fence;
1253 	}
1254 
1255 	/*
1256 	 * Having this flag means user mode pends on event which will never
1257 	 * reach due to lack of at least one CRTC for signaling
1258 	 */
1259 	if (c == 0 && (arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
1260 		return -EINVAL;
1261 
1262 	return 0;
1263 }
1264 
complete_signaling(struct drm_device * dev,struct drm_atomic_state * state,struct drm_out_fence_state * fence_state,unsigned int num_fences,bool install_fds)1265 static void complete_signaling(struct drm_device *dev,
1266 			       struct drm_atomic_state *state,
1267 			       struct drm_out_fence_state *fence_state,
1268 			       unsigned int num_fences,
1269 			       bool install_fds)
1270 {
1271 	struct drm_crtc *crtc;
1272 	struct drm_crtc_state *crtc_state;
1273 	int i;
1274 
1275 	if (install_fds) {
1276 		for (i = 0; i < num_fences; i++)
1277 			fd_install(fence_state[i].fd,
1278 				   fence_state[i].sync_file->file);
1279 
1280 		kfree(fence_state);
1281 		return;
1282 	}
1283 
1284 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1285 		struct drm_pending_vblank_event *event = crtc_state->event;
1286 		/*
1287 		 * Free the allocated event. drm_atomic_helper_setup_commit
1288 		 * can allocate an event too, so only free it if it's ours
1289 		 * to prevent a double free in drm_atomic_state_clear.
1290 		 */
1291 		if (event && (event->base.fence || event->base.file_priv)) {
1292 			drm_event_cancel_free(dev, &event->base);
1293 			crtc_state->event = NULL;
1294 		}
1295 	}
1296 
1297 	if (!fence_state)
1298 		return;
1299 
1300 	for (i = 0; i < num_fences; i++) {
1301 #ifdef __NetBSD__
1302 		if (fd_getfile(fence_state[i].fd))
1303 			(void)fd_close(fence_state[i].fd);
1304 #else
1305 		if (fence_state[i].sync_file)
1306 			fput(fence_state[i].sync_file->file);
1307 		if (fence_state[i].fd >= 0)
1308 			put_unused_fd(fence_state[i].fd);
1309 #endif
1310 
1311 		/* If this fails log error to the user */
1312 		if (fence_state[i].out_fence_ptr &&
1313 		    put_user(-1, fence_state[i].out_fence_ptr))
1314 			DRM_DEBUG_ATOMIC("Couldn't clear out_fence_ptr\n");
1315 	}
1316 
1317 	kfree(fence_state);
1318 }
1319 
drm_mode_atomic_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1320 int drm_mode_atomic_ioctl(struct drm_device *dev,
1321 			  void *data, struct drm_file *file_priv)
1322 {
1323 	struct drm_mode_atomic *arg = data;
1324 	uint32_t __user *objs_ptr = (uint32_t __user *)(unsigned long)(arg->objs_ptr);
1325 	uint32_t __user *count_props_ptr = (uint32_t __user *)(unsigned long)(arg->count_props_ptr);
1326 	uint32_t __user *props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr);
1327 	uint64_t __user *prop_values_ptr = (uint64_t __user *)(unsigned long)(arg->prop_values_ptr);
1328 	unsigned int copied_objs, copied_props;
1329 	struct drm_atomic_state *state;
1330 	struct drm_modeset_acquire_ctx ctx;
1331 	struct drm_out_fence_state *fence_state;
1332 	int ret = 0;
1333 	unsigned int i, j, num_fences;
1334 
1335 	/* disallow for drivers not supporting atomic: */
1336 	if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
1337 		return -EOPNOTSUPP;
1338 
1339 	/* disallow for userspace that has not enabled atomic cap (even
1340 	 * though this may be a bit overkill, since legacy userspace
1341 	 * wouldn't know how to call this ioctl)
1342 	 */
1343 	if (!file_priv->atomic)
1344 		return -EINVAL;
1345 
1346 	if (arg->flags & ~DRM_MODE_ATOMIC_FLAGS)
1347 		return -EINVAL;
1348 
1349 	if (arg->reserved)
1350 		return -EINVAL;
1351 
1352 	if (arg->flags & DRM_MODE_PAGE_FLIP_ASYNC)
1353 		return -EINVAL;
1354 
1355 	/* can't test and expect an event at the same time. */
1356 	if ((arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) &&
1357 			(arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
1358 		return -EINVAL;
1359 
1360 	state = drm_atomic_state_alloc(dev);
1361 	if (!state)
1362 		return -ENOMEM;
1363 
1364 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
1365 	state->acquire_ctx = &ctx;
1366 	state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
1367 
1368 retry:
1369 	copied_objs = 0;
1370 	copied_props = 0;
1371 	fence_state = NULL;
1372 	num_fences = 0;
1373 
1374 	for (i = 0; i < arg->count_objs; i++) {
1375 		uint32_t obj_id, count_props;
1376 		struct drm_mode_object *obj;
1377 
1378 		if (get_user(obj_id, objs_ptr + copied_objs)) {
1379 			ret = -EFAULT;
1380 			goto out;
1381 		}
1382 
1383 		obj = drm_mode_object_find(dev, file_priv, obj_id, DRM_MODE_OBJECT_ANY);
1384 		if (!obj) {
1385 			ret = -ENOENT;
1386 			goto out;
1387 		}
1388 
1389 		if (!obj->properties) {
1390 			drm_mode_object_put(obj);
1391 			ret = -ENOENT;
1392 			goto out;
1393 		}
1394 
1395 		if (get_user(count_props, count_props_ptr + copied_objs)) {
1396 			drm_mode_object_put(obj);
1397 			ret = -EFAULT;
1398 			goto out;
1399 		}
1400 
1401 		copied_objs++;
1402 
1403 		for (j = 0; j < count_props; j++) {
1404 			uint32_t prop_id;
1405 			uint64_t prop_value;
1406 			struct drm_property *prop;
1407 
1408 			if (get_user(prop_id, props_ptr + copied_props)) {
1409 				drm_mode_object_put(obj);
1410 				ret = -EFAULT;
1411 				goto out;
1412 			}
1413 
1414 			prop = drm_mode_obj_find_prop_id(obj, prop_id);
1415 			if (!prop) {
1416 				drm_mode_object_put(obj);
1417 				ret = -ENOENT;
1418 				goto out;
1419 			}
1420 
1421 			if (copy_from_user(&prop_value,
1422 					   prop_values_ptr + copied_props,
1423 					   sizeof(prop_value))) {
1424 				drm_mode_object_put(obj);
1425 				ret = -EFAULT;
1426 				goto out;
1427 			}
1428 
1429 			ret = drm_atomic_set_property(state, file_priv,
1430 						      obj, prop, prop_value);
1431 			if (ret) {
1432 				drm_mode_object_put(obj);
1433 				goto out;
1434 			}
1435 
1436 			copied_props++;
1437 		}
1438 
1439 		drm_mode_object_put(obj);
1440 	}
1441 
1442 	ret = prepare_signaling(dev, state, arg, file_priv, &fence_state,
1443 				&num_fences);
1444 	if (ret)
1445 		goto out;
1446 
1447 	if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) {
1448 		ret = drm_atomic_check_only(state);
1449 	} else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
1450 		ret = drm_atomic_nonblocking_commit(state);
1451 	} else {
1452 		if (drm_debug_enabled(DRM_UT_STATE))
1453 			drm_atomic_print_state(state);
1454 
1455 		ret = drm_atomic_commit(state);
1456 	}
1457 
1458 out:
1459 	complete_signaling(dev, state, fence_state, num_fences, !ret);
1460 
1461 	if (ret == -EDEADLK) {
1462 		drm_atomic_state_clear(state);
1463 		ret = drm_modeset_backoff(&ctx);
1464 		if (!ret)
1465 			goto retry;
1466 	}
1467 
1468 	drm_atomic_state_put(state);
1469 
1470 	drm_modeset_drop_locks(&ctx);
1471 	drm_modeset_acquire_fini(&ctx);
1472 
1473 	return ret;
1474 }
1475