1 /* $NetBSD: amdgpu_dm_hdcp.c,v 1.4 2021/12/19 12:31:45 riastradh Exp $ */
2
3 /*
4 * Copyright 2019 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: AMD
25 *
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: amdgpu_dm_hdcp.c,v 1.4 2021/12/19 12:31:45 riastradh Exp $");
30
31 #include "amdgpu_dm_hdcp.h"
32 #include "amdgpu.h"
33 #include "amdgpu_dm.h"
34 #include "dm_helpers.h"
35 #include <drm/drm_hdcp.h>
36
37 #include <linux/nbsd-namespace.h>
38
39 static bool
lp_write_i2c(void * handle,uint32_t address,const uint8_t * data,uint32_t size)40 lp_write_i2c(void *handle, uint32_t address, const uint8_t *data, uint32_t size)
41 {
42
43 struct dc_link *link = handle;
44 struct i2c_payload i2c_payloads[] = {{true, address, size, __UNCONST(data)} };
45 struct i2c_command cmd = {i2c_payloads, 1, I2C_COMMAND_ENGINE_HW, link->dc->caps.i2c_speed_in_khz};
46
47 return dm_helpers_submit_i2c(link->ctx, link, &cmd);
48 }
49
50 static bool
lp_read_i2c(void * handle,uint32_t address,uint8_t offset,uint8_t * data,uint32_t size)51 lp_read_i2c(void *handle, uint32_t address, uint8_t offset, uint8_t *data, uint32_t size)
52 {
53 struct dc_link *link = handle;
54
55 struct i2c_payload i2c_payloads[] = {{true, address, 1, &offset}, {false, address, size, data} };
56 struct i2c_command cmd = {i2c_payloads, 2, I2C_COMMAND_ENGINE_HW, link->dc->caps.i2c_speed_in_khz};
57
58 return dm_helpers_submit_i2c(link->ctx, link, &cmd);
59 }
60
61 static bool
lp_write_dpcd(void * handle,uint32_t address,const uint8_t * data,uint32_t size)62 lp_write_dpcd(void *handle, uint32_t address, const uint8_t *data, uint32_t size)
63 {
64 struct dc_link *link = handle;
65
66 return dm_helpers_dp_write_dpcd(link->ctx, link, address, data, size);
67 }
68
69 static bool
lp_read_dpcd(void * handle,uint32_t address,uint8_t * data,uint32_t size)70 lp_read_dpcd(void *handle, uint32_t address, uint8_t *data, uint32_t size)
71 {
72 struct dc_link *link = handle;
73
74 return dm_helpers_dp_read_dpcd(link->ctx, link, address, data, size);
75 }
76
process_output(struct hdcp_workqueue * hdcp_work)77 static void process_output(struct hdcp_workqueue *hdcp_work)
78 {
79 struct mod_hdcp_output output = hdcp_work->output;
80
81 if (output.callback_stop)
82 cancel_delayed_work(&hdcp_work->callback_dwork);
83
84 if (output.callback_needed)
85 schedule_delayed_work(&hdcp_work->callback_dwork,
86 msecs_to_jiffies(output.callback_delay));
87
88 if (output.watchdog_timer_stop)
89 cancel_delayed_work(&hdcp_work->watchdog_timer_dwork);
90
91 if (output.watchdog_timer_needed)
92 schedule_delayed_work(&hdcp_work->watchdog_timer_dwork,
93 msecs_to_jiffies(output.watchdog_timer_delay));
94
95 schedule_delayed_work(&hdcp_work->property_validate_dwork, msecs_to_jiffies(0));
96 }
97
hdcp_update_display(struct hdcp_workqueue * hdcp_work,unsigned int link_index,struct amdgpu_dm_connector * aconnector,uint8_t content_type,bool enable_encryption)98 void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
99 unsigned int link_index,
100 struct amdgpu_dm_connector *aconnector,
101 uint8_t content_type,
102 bool enable_encryption)
103 {
104 struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
105 struct mod_hdcp_display *display = &hdcp_work[link_index].display;
106 struct mod_hdcp_link *link = &hdcp_work[link_index].link;
107 struct mod_hdcp_display_query query;
108
109 mutex_lock(&hdcp_w->mutex);
110 hdcp_w->aconnector = aconnector;
111
112 query.display = NULL;
113 mod_hdcp_query_display(&hdcp_w->hdcp, aconnector->base.index, &query);
114
115 if (query.display != NULL) {
116 memcpy(display, query.display, sizeof(struct mod_hdcp_display));
117 mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output);
118
119 hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
120
121 if (enable_encryption) {
122 display->adjust.disable = 0;
123 if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0)
124 hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
125 else if (content_type == DRM_MODE_HDCP_CONTENT_TYPE1)
126 hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_1;
127
128 schedule_delayed_work(&hdcp_w->property_validate_dwork,
129 msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS));
130 } else {
131 display->adjust.disable = 1;
132 hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
133 cancel_delayed_work(&hdcp_w->property_validate_dwork);
134 }
135
136 display->state = MOD_HDCP_DISPLAY_ACTIVE;
137 }
138
139 mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output);
140
141 process_output(hdcp_w);
142 mutex_unlock(&hdcp_w->mutex);
143 }
144
hdcp_remove_display(struct hdcp_workqueue * hdcp_work,unsigned int link_index,struct amdgpu_dm_connector * aconnector)145 static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
146 unsigned int link_index,
147 struct amdgpu_dm_connector *aconnector)
148 {
149 struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
150
151 mutex_lock(&hdcp_w->mutex);
152 hdcp_w->aconnector = aconnector;
153
154 mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output);
155
156 process_output(hdcp_w);
157 mutex_unlock(&hdcp_w->mutex);
158 }
hdcp_reset_display(struct hdcp_workqueue * hdcp_work,unsigned int link_index)159 void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index)
160 {
161 struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
162
163 mutex_lock(&hdcp_w->mutex);
164
165 mod_hdcp_reset_connection(&hdcp_w->hdcp, &hdcp_w->output);
166
167 cancel_delayed_work(&hdcp_w->property_validate_dwork);
168 hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
169
170 process_output(hdcp_w);
171
172 mutex_unlock(&hdcp_w->mutex);
173 }
174
hdcp_handle_cpirq(struct hdcp_workqueue * hdcp_work,unsigned int link_index)175 void hdcp_handle_cpirq(struct hdcp_workqueue *hdcp_work, unsigned int link_index)
176 {
177 struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
178
179 schedule_work(&hdcp_w->cpirq_work);
180 }
181
182
183
184
event_callback(struct work_struct * work)185 static void event_callback(struct work_struct *work)
186 {
187 struct hdcp_workqueue *hdcp_work;
188
189 hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue,
190 callback_dwork);
191
192 mutex_lock(&hdcp_work->mutex);
193
194 cancel_delayed_work(&hdcp_work->watchdog_timer_dwork);
195
196 mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CALLBACK,
197 &hdcp_work->output);
198
199 process_output(hdcp_work);
200
201 mutex_unlock(&hdcp_work->mutex);
202
203
204 }
event_property_update(struct work_struct * work)205 static void event_property_update(struct work_struct *work)
206 {
207
208 struct hdcp_workqueue *hdcp_work = container_of(work, struct hdcp_workqueue, property_update_work);
209 struct amdgpu_dm_connector *aconnector = hdcp_work->aconnector;
210 struct drm_device *dev = hdcp_work->aconnector->base.dev;
211 long ret;
212
213 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
214 mutex_lock(&hdcp_work->mutex);
215
216
217 if (aconnector->base.state->commit) {
218 ret = wait_for_completion_interruptible_timeout(&aconnector->base.state->commit->hw_done, 10 * HZ);
219
220 if (ret == 0) {
221 DRM_ERROR("HDCP state unknown! Setting it to DESIRED");
222 hdcp_work->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
223 }
224 }
225
226 if (hdcp_work->encryption_status != MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF) {
227 if (aconnector->base.state->hdcp_content_type == DRM_MODE_HDCP_CONTENT_TYPE0 &&
228 hdcp_work->encryption_status <= MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON)
229 drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_ENABLED);
230 else if (aconnector->base.state->hdcp_content_type == DRM_MODE_HDCP_CONTENT_TYPE1 &&
231 hdcp_work->encryption_status == MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON)
232 drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_ENABLED);
233 } else {
234 drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_DESIRED);
235 }
236
237
238 mutex_unlock(&hdcp_work->mutex);
239 drm_modeset_unlock(&dev->mode_config.connection_mutex);
240 }
241
event_property_validate(struct work_struct * work)242 static void event_property_validate(struct work_struct *work)
243 {
244 struct hdcp_workqueue *hdcp_work =
245 container_of(to_delayed_work(work), struct hdcp_workqueue, property_validate_dwork);
246 struct mod_hdcp_display_query query;
247 struct amdgpu_dm_connector *aconnector = hdcp_work->aconnector;
248
249 if (!aconnector)
250 return;
251
252 mutex_lock(&hdcp_work->mutex);
253
254 query.encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
255 mod_hdcp_query_display(&hdcp_work->hdcp, aconnector->base.index, &query);
256
257 if (query.encryption_status != hdcp_work->encryption_status) {
258 hdcp_work->encryption_status = query.encryption_status;
259 schedule_work(&hdcp_work->property_update_work);
260 }
261
262 mutex_unlock(&hdcp_work->mutex);
263 }
264
event_watchdog_timer(struct work_struct * work)265 static void event_watchdog_timer(struct work_struct *work)
266 {
267 struct hdcp_workqueue *hdcp_work;
268
269 hdcp_work = container_of(to_delayed_work(work),
270 struct hdcp_workqueue,
271 watchdog_timer_dwork);
272
273 mutex_lock(&hdcp_work->mutex);
274
275 mod_hdcp_process_event(&hdcp_work->hdcp,
276 MOD_HDCP_EVENT_WATCHDOG_TIMEOUT,
277 &hdcp_work->output);
278
279 process_output(hdcp_work);
280
281 mutex_unlock(&hdcp_work->mutex);
282
283 }
284
event_cpirq(struct work_struct * work)285 static void event_cpirq(struct work_struct *work)
286 {
287 struct hdcp_workqueue *hdcp_work;
288
289 hdcp_work = container_of(work, struct hdcp_workqueue, cpirq_work);
290
291 mutex_lock(&hdcp_work->mutex);
292
293 mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CPIRQ, &hdcp_work->output);
294
295 process_output(hdcp_work);
296
297 mutex_unlock(&hdcp_work->mutex);
298
299 }
300
301
hdcp_destroy(struct hdcp_workqueue * hdcp_work)302 void hdcp_destroy(struct hdcp_workqueue *hdcp_work)
303 {
304 int i = 0;
305
306 for (i = 0; i < hdcp_work->max_link; i++) {
307 cancel_delayed_work_sync(&hdcp_work[i].callback_dwork);
308 cancel_delayed_work_sync(&hdcp_work[i].watchdog_timer_dwork);
309 mutex_destroy(&hdcp_work[i].mutex);
310 }
311
312 kfree(hdcp_work);
313
314 }
315
update_config(void * handle,struct cp_psp_stream_config * config)316 static void update_config(void *handle, struct cp_psp_stream_config *config)
317 {
318 struct hdcp_workqueue *hdcp_work = handle;
319 struct amdgpu_dm_connector *aconnector = config->dm_stream_ctx;
320 int link_index = aconnector->dc_link->link_index;
321 struct mod_hdcp_display *display = &hdcp_work[link_index].display;
322 struct mod_hdcp_link *link = &hdcp_work[link_index].link;
323
324 memset(display, 0, sizeof(*display));
325 memset(link, 0, sizeof(*link));
326
327 display->index = aconnector->base.index;
328
329 if (config->dpms_off) {
330 hdcp_remove_display(hdcp_work, link_index, aconnector);
331 return;
332 }
333 display->state = MOD_HDCP_DISPLAY_ACTIVE;
334
335 if (aconnector->dc_sink != NULL)
336 link->mode = mod_hdcp_signal_type_to_operation_mode(aconnector->dc_sink->sink_signal);
337
338 display->controller = CONTROLLER_ID_D0 + config->otg_inst;
339 display->dig_fe = config->stream_enc_inst;
340 link->dig_be = config->link_enc_inst;
341 link->ddc_line = aconnector->dc_link->ddc_hw_inst + 1;
342 link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw;
343 display->adjust.disable = 1;
344 link->adjust.auth_delay = 2;
345
346 hdcp_update_display(hdcp_work, link_index, aconnector, DRM_MODE_HDCP_CONTENT_TYPE0, false);
347 }
348
hdcp_create_workqueue(void * psp_context,struct cp_psp * cp_psp,struct dc * dc)349 struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *cp_psp, struct dc *dc)
350 {
351
352 int max_caps = dc->caps.max_links;
353 struct hdcp_workqueue *hdcp_work = kzalloc(max_caps*sizeof(*hdcp_work), GFP_KERNEL);
354 int i = 0;
355
356 if (hdcp_work == NULL)
357 goto fail_alloc_context;
358
359 hdcp_work->max_link = max_caps;
360
361 for (i = 0; i < max_caps; i++) {
362
363 mutex_init(&hdcp_work[i].mutex);
364
365 INIT_WORK(&hdcp_work[i].cpirq_work, event_cpirq);
366 INIT_WORK(&hdcp_work[i].property_update_work, event_property_update);
367 INIT_DELAYED_WORK(&hdcp_work[i].callback_dwork, event_callback);
368 INIT_DELAYED_WORK(&hdcp_work[i].watchdog_timer_dwork, event_watchdog_timer);
369 INIT_DELAYED_WORK(&hdcp_work[i].property_validate_dwork, event_property_validate);
370
371 hdcp_work[i].hdcp.config.psp.handle = psp_context;
372 hdcp_work[i].hdcp.config.ddc.handle = dc_get_link_at_index(dc, i);
373 hdcp_work[i].hdcp.config.ddc.funcs.write_i2c = lp_write_i2c;
374 hdcp_work[i].hdcp.config.ddc.funcs.read_i2c = lp_read_i2c;
375 hdcp_work[i].hdcp.config.ddc.funcs.write_dpcd = lp_write_dpcd;
376 hdcp_work[i].hdcp.config.ddc.funcs.read_dpcd = lp_read_dpcd;
377 }
378
379 cp_psp->funcs.update_stream_config = update_config;
380 cp_psp->handle = hdcp_work;
381
382 return hdcp_work;
383
384 fail_alloc_context:
385 kfree(hdcp_work);
386
387 return NULL;
388
389
390
391 }
392
393
394
395