1f43cf1b1SMichael Neumann /*
2f43cf1b1SMichael Neumann * Copyright 2011 Advanced Micro Devices, Inc.
3f43cf1b1SMichael Neumann * All Rights Reserved.
4f43cf1b1SMichael Neumann *
5f43cf1b1SMichael Neumann * Permission is hereby granted, free of charge, to any person obtaining a
6f43cf1b1SMichael Neumann * copy of this software and associated documentation files (the
7f43cf1b1SMichael Neumann * "Software"), to deal in the Software without restriction, including
8f43cf1b1SMichael Neumann * without limitation the rights to use, copy, modify, merge, publish,
9f43cf1b1SMichael Neumann * distribute, sub license, and/or sell copies of the Software, and to
10f43cf1b1SMichael Neumann * permit persons to whom the Software is furnished to do so, subject to
11f43cf1b1SMichael Neumann * the following conditions:
12f43cf1b1SMichael Neumann *
13f43cf1b1SMichael Neumann * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14f43cf1b1SMichael Neumann * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15f43cf1b1SMichael Neumann * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16f43cf1b1SMichael Neumann * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17f43cf1b1SMichael Neumann * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18f43cf1b1SMichael Neumann * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19f43cf1b1SMichael Neumann * USE OR OTHER DEALINGS IN THE SOFTWARE.
20f43cf1b1SMichael Neumann *
21f43cf1b1SMichael Neumann * The above copyright notice and this permission notice (including the
22f43cf1b1SMichael Neumann * next paragraph) shall be included in all copies or substantial portions
23f43cf1b1SMichael Neumann * of the Software.
24f43cf1b1SMichael Neumann *
25f43cf1b1SMichael Neumann */
26f43cf1b1SMichael Neumann /*
27f43cf1b1SMichael Neumann * Authors:
28f43cf1b1SMichael Neumann * Christian König <deathsimple@vodafone.de>
29f43cf1b1SMichael Neumann */
30f43cf1b1SMichael Neumann
3157e252bfSMichael Neumann #include <linux/firmware.h>
32c4ef309bSzrj #include <linux/module.h>
33f43cf1b1SMichael Neumann #include <drm/drmP.h>
34c59a5c48SFrançois Tigeot #include <drm/drm.h>
35f43cf1b1SMichael Neumann
36f43cf1b1SMichael Neumann #include "radeon.h"
37d78d3a22SFrançois Tigeot #include "radeon_ucode.h"
38f43cf1b1SMichael Neumann #include "r600d.h"
39f43cf1b1SMichael Neumann
40f43cf1b1SMichael Neumann /* 1 second timeout */
41f43cf1b1SMichael Neumann #define UVD_IDLE_TIMEOUT_MS 1000
42f43cf1b1SMichael Neumann
43f43cf1b1SMichael Neumann /* Firmware Names */
44591d5043SFrançois Tigeot #define FIRMWARE_R600 "radeonkmsfw_R600_uvd"
45591d5043SFrançois Tigeot #define FIRMWARE_RS780 "radeonkmsfw_RS780_uvd"
46591d5043SFrançois Tigeot #define FIRMWARE_RV770 "radeonkmsfw_RV770_uvd"
47f43cf1b1SMichael Neumann #define FIRMWARE_RV710 "radeonkmsfw_RV710_uvd"
48f43cf1b1SMichael Neumann #define FIRMWARE_CYPRESS "radeonkmsfw_CYPRESS_uvd"
49f43cf1b1SMichael Neumann #define FIRMWARE_SUMO "radeonkmsfw_SUMO_uvd"
50f43cf1b1SMichael Neumann #define FIRMWARE_TAHITI "radeonkmsfw_TAHITI_uvd"
51d78d3a22SFrançois Tigeot #define FIRMWARE_BONAIRE_LEGACY "radeonkmsfw_BONAIRE_uvd"
52ccd7e1eaSFrançois Tigeot #define FIRMWARE_BONAIRE "radeonkmsfw_bonaire_uvd"
53f43cf1b1SMichael Neumann
54591d5043SFrançois Tigeot MODULE_FIRMWARE(FIRMWARE_R600);
55591d5043SFrançois Tigeot MODULE_FIRMWARE(FIRMWARE_RS780);
56591d5043SFrançois Tigeot MODULE_FIRMWARE(FIRMWARE_RV770);
57f43cf1b1SMichael Neumann MODULE_FIRMWARE(FIRMWARE_RV710);
58f43cf1b1SMichael Neumann MODULE_FIRMWARE(FIRMWARE_CYPRESS);
59f43cf1b1SMichael Neumann MODULE_FIRMWARE(FIRMWARE_SUMO);
60f43cf1b1SMichael Neumann MODULE_FIRMWARE(FIRMWARE_TAHITI);
61d78d3a22SFrançois Tigeot MODULE_FIRMWARE(FIRMWARE_BONAIRE_LEGACY);
6257e252bfSMichael Neumann MODULE_FIRMWARE(FIRMWARE_BONAIRE);
63f43cf1b1SMichael Neumann
64f43cf1b1SMichael Neumann static void radeon_uvd_idle_work_handler(struct work_struct *work);
65f43cf1b1SMichael Neumann
radeon_uvd_init(struct radeon_device * rdev)66f43cf1b1SMichael Neumann int radeon_uvd_init(struct radeon_device *rdev)
67f43cf1b1SMichael Neumann {
68f43cf1b1SMichael Neumann unsigned long bo_size;
69d78d3a22SFrançois Tigeot const char *fw_name = NULL, *legacy_fw_name = NULL;
70f43cf1b1SMichael Neumann int i, r;
71f43cf1b1SMichael Neumann
72f43cf1b1SMichael Neumann INIT_DELAYED_WORK(&rdev->uvd.idle_work, radeon_uvd_idle_work_handler);
73f43cf1b1SMichael Neumann
74f43cf1b1SMichael Neumann switch (rdev->family) {
75591d5043SFrançois Tigeot case CHIP_RV610:
76591d5043SFrançois Tigeot case CHIP_RV630:
77591d5043SFrançois Tigeot case CHIP_RV670:
78591d5043SFrançois Tigeot case CHIP_RV620:
79591d5043SFrançois Tigeot case CHIP_RV635:
80d78d3a22SFrançois Tigeot legacy_fw_name = FIRMWARE_R600;
81591d5043SFrançois Tigeot break;
82591d5043SFrançois Tigeot
83591d5043SFrançois Tigeot case CHIP_RS780:
84591d5043SFrançois Tigeot case CHIP_RS880:
85d78d3a22SFrançois Tigeot legacy_fw_name = FIRMWARE_RS780;
86591d5043SFrançois Tigeot break;
87591d5043SFrançois Tigeot
88591d5043SFrançois Tigeot case CHIP_RV770:
89d78d3a22SFrançois Tigeot legacy_fw_name = FIRMWARE_RV770;
90591d5043SFrançois Tigeot break;
91591d5043SFrançois Tigeot
92f43cf1b1SMichael Neumann case CHIP_RV710:
93f43cf1b1SMichael Neumann case CHIP_RV730:
94f43cf1b1SMichael Neumann case CHIP_RV740:
95d78d3a22SFrançois Tigeot legacy_fw_name = FIRMWARE_RV710;
96f43cf1b1SMichael Neumann break;
97f43cf1b1SMichael Neumann
98f43cf1b1SMichael Neumann case CHIP_CYPRESS:
99f43cf1b1SMichael Neumann case CHIP_HEMLOCK:
100f43cf1b1SMichael Neumann case CHIP_JUNIPER:
101f43cf1b1SMichael Neumann case CHIP_REDWOOD:
102f43cf1b1SMichael Neumann case CHIP_CEDAR:
103d78d3a22SFrançois Tigeot legacy_fw_name = FIRMWARE_CYPRESS;
104f43cf1b1SMichael Neumann break;
105f43cf1b1SMichael Neumann
106f43cf1b1SMichael Neumann case CHIP_SUMO:
107f43cf1b1SMichael Neumann case CHIP_SUMO2:
108f43cf1b1SMichael Neumann case CHIP_PALM:
109f43cf1b1SMichael Neumann case CHIP_CAYMAN:
110f43cf1b1SMichael Neumann case CHIP_BARTS:
111f43cf1b1SMichael Neumann case CHIP_TURKS:
112f43cf1b1SMichael Neumann case CHIP_CAICOS:
113d78d3a22SFrançois Tigeot legacy_fw_name = FIRMWARE_SUMO;
114f43cf1b1SMichael Neumann break;
115f43cf1b1SMichael Neumann
116f43cf1b1SMichael Neumann case CHIP_TAHITI:
117f43cf1b1SMichael Neumann case CHIP_VERDE:
118f43cf1b1SMichael Neumann case CHIP_PITCAIRN:
119f43cf1b1SMichael Neumann case CHIP_ARUBA:
120c6f73aabSFrançois Tigeot case CHIP_OLAND:
121d78d3a22SFrançois Tigeot legacy_fw_name = FIRMWARE_TAHITI;
122f43cf1b1SMichael Neumann break;
123f43cf1b1SMichael Neumann
12457e252bfSMichael Neumann case CHIP_BONAIRE:
12557e252bfSMichael Neumann case CHIP_KABINI:
12657e252bfSMichael Neumann case CHIP_KAVERI:
127c6f73aabSFrançois Tigeot case CHIP_HAWAII:
128c6f73aabSFrançois Tigeot case CHIP_MULLINS:
129d78d3a22SFrançois Tigeot legacy_fw_name = FIRMWARE_BONAIRE_LEGACY;
13057e252bfSMichael Neumann fw_name = FIRMWARE_BONAIRE;
13157e252bfSMichael Neumann break;
13257e252bfSMichael Neumann
133f43cf1b1SMichael Neumann default:
134f43cf1b1SMichael Neumann return -EINVAL;
135f43cf1b1SMichael Neumann }
136f43cf1b1SMichael Neumann
137d78d3a22SFrançois Tigeot rdev->uvd.fw_header_present = false;
138d78d3a22SFrançois Tigeot rdev->uvd.max_handles = RADEON_DEFAULT_UVD_HANDLES;
139d78d3a22SFrançois Tigeot if (fw_name) {
140d78d3a22SFrançois Tigeot /* Let's try to load the newer firmware first */
14171187b16SFrançois Tigeot r = request_firmware(&rdev->uvd_fw, fw_name, rdev->dev);
14257e252bfSMichael Neumann if (r) {
143f43cf1b1SMichael Neumann dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n",
144f43cf1b1SMichael Neumann fw_name);
145d78d3a22SFrançois Tigeot } else {
146*3f2dd94aSFrançois Tigeot struct common_firmware_header *hdr = (void *)rdev->uvd_fw->data;
147d78d3a22SFrançois Tigeot unsigned version_major, version_minor, family_id;
148d78d3a22SFrançois Tigeot
149d78d3a22SFrançois Tigeot r = radeon_ucode_validate(rdev->uvd_fw);
150d78d3a22SFrançois Tigeot if (r)
151fcd4983fSzrj return r;
152d78d3a22SFrançois Tigeot
153d78d3a22SFrançois Tigeot rdev->uvd.fw_header_present = true;
154d78d3a22SFrançois Tigeot
155d78d3a22SFrançois Tigeot family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
156d78d3a22SFrançois Tigeot version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
157d78d3a22SFrançois Tigeot version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
158d78d3a22SFrançois Tigeot DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
159d78d3a22SFrançois Tigeot version_major, version_minor, family_id);
160d78d3a22SFrançois Tigeot
161d78d3a22SFrançois Tigeot /*
162d78d3a22SFrançois Tigeot * Limit the number of UVD handles depending on
163d78d3a22SFrançois Tigeot * microcode major and minor versions.
164d78d3a22SFrançois Tigeot */
165d78d3a22SFrançois Tigeot if ((version_major >= 0x01) && (version_minor >= 0x37))
166d78d3a22SFrançois Tigeot rdev->uvd.max_handles = RADEON_MAX_UVD_HANDLES;
167d78d3a22SFrançois Tigeot }
168d78d3a22SFrançois Tigeot }
169d78d3a22SFrançois Tigeot
170d78d3a22SFrançois Tigeot /*
171d78d3a22SFrançois Tigeot * In case there is only legacy firmware, or we encounter an error
172d78d3a22SFrançois Tigeot * while loading the new firmware, we fall back to loading the legacy
173d78d3a22SFrançois Tigeot * firmware now.
174d78d3a22SFrançois Tigeot */
175d78d3a22SFrançois Tigeot if (!fw_name || r) {
176d78d3a22SFrançois Tigeot r = request_firmware(&rdev->uvd_fw, legacy_fw_name, rdev->dev);
177d78d3a22SFrançois Tigeot if (r) {
178d78d3a22SFrançois Tigeot dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n",
179d78d3a22SFrançois Tigeot legacy_fw_name);
180d78d3a22SFrançois Tigeot return r;
181d78d3a22SFrançois Tigeot }
182f43cf1b1SMichael Neumann }
183f43cf1b1SMichael Neumann
184f43cf1b1SMichael Neumann bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->datasize + 8) +
185c6f73aabSFrançois Tigeot RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE +
186d78d3a22SFrançois Tigeot RADEON_UVD_SESSION_SIZE * rdev->uvd.max_handles;
187f43cf1b1SMichael Neumann r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
1887dcf36dcSFrançois Tigeot RADEON_GEM_DOMAIN_VRAM, 0, NULL,
1897dcf36dcSFrançois Tigeot NULL, &rdev->uvd.vcpu_bo);
190f43cf1b1SMichael Neumann if (r) {
191f43cf1b1SMichael Neumann dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r);
192f43cf1b1SMichael Neumann return r;
193f43cf1b1SMichael Neumann }
194f43cf1b1SMichael Neumann
195f43cf1b1SMichael Neumann r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
196f43cf1b1SMichael Neumann if (r) {
197f43cf1b1SMichael Neumann radeon_bo_unref(&rdev->uvd.vcpu_bo);
198f43cf1b1SMichael Neumann dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r);
199f43cf1b1SMichael Neumann return r;
200f43cf1b1SMichael Neumann }
201f43cf1b1SMichael Neumann
202f43cf1b1SMichael Neumann r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
203f43cf1b1SMichael Neumann &rdev->uvd.gpu_addr);
204f43cf1b1SMichael Neumann if (r) {
205f43cf1b1SMichael Neumann radeon_bo_unreserve(rdev->uvd.vcpu_bo);
206f43cf1b1SMichael Neumann radeon_bo_unref(&rdev->uvd.vcpu_bo);
207f43cf1b1SMichael Neumann dev_err(rdev->dev, "(%d) UVD bo pin failed\n", r);
208f43cf1b1SMichael Neumann return r;
209f43cf1b1SMichael Neumann }
210f43cf1b1SMichael Neumann
211f43cf1b1SMichael Neumann r = radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
212f43cf1b1SMichael Neumann if (r) {
213f43cf1b1SMichael Neumann dev_err(rdev->dev, "(%d) UVD map failed\n", r);
214f43cf1b1SMichael Neumann return r;
215f43cf1b1SMichael Neumann }
216f43cf1b1SMichael Neumann
217f43cf1b1SMichael Neumann radeon_bo_unreserve(rdev->uvd.vcpu_bo);
218f43cf1b1SMichael Neumann
219d78d3a22SFrançois Tigeot for (i = 0; i < rdev->uvd.max_handles; ++i) {
22057e252bfSMichael Neumann atomic_set(&rdev->uvd.handles[i], 0);
22157e252bfSMichael Neumann rdev->uvd.filp[i] = NULL;
2224cd92098Szrj rdev->uvd.img_size[i] = 0;
22357e252bfSMichael Neumann }
22457e252bfSMichael Neumann
22557e252bfSMichael Neumann return 0;
22657e252bfSMichael Neumann }
22757e252bfSMichael Neumann
radeon_uvd_fini(struct radeon_device * rdev)22857e252bfSMichael Neumann void radeon_uvd_fini(struct radeon_device *rdev)
22957e252bfSMichael Neumann {
23057e252bfSMichael Neumann int r;
23157e252bfSMichael Neumann
23257e252bfSMichael Neumann if (rdev->uvd.vcpu_bo == NULL)
23357e252bfSMichael Neumann return;
23457e252bfSMichael Neumann
23557e252bfSMichael Neumann r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
23657e252bfSMichael Neumann if (!r) {
23757e252bfSMichael Neumann radeon_bo_kunmap(rdev->uvd.vcpu_bo);
23857e252bfSMichael Neumann radeon_bo_unpin(rdev->uvd.vcpu_bo);
23957e252bfSMichael Neumann radeon_bo_unreserve(rdev->uvd.vcpu_bo);
24057e252bfSMichael Neumann }
24157e252bfSMichael Neumann
24257e252bfSMichael Neumann radeon_bo_unref(&rdev->uvd.vcpu_bo);
24357e252bfSMichael Neumann
244c6f73aabSFrançois Tigeot radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX]);
245c6f73aabSFrançois Tigeot
24657e252bfSMichael Neumann release_firmware(rdev->uvd_fw);
24757e252bfSMichael Neumann }
24857e252bfSMichael Neumann
radeon_uvd_suspend(struct radeon_device * rdev)24957e252bfSMichael Neumann int radeon_uvd_suspend(struct radeon_device *rdev)
25057e252bfSMichael Neumann {
251c59a5c48SFrançois Tigeot int i, r;
25257e252bfSMichael Neumann
25357e252bfSMichael Neumann if (rdev->uvd.vcpu_bo == NULL)
25457e252bfSMichael Neumann return 0;
25557e252bfSMichael Neumann
256d78d3a22SFrançois Tigeot for (i = 0; i < rdev->uvd.max_handles; ++i) {
257c59a5c48SFrançois Tigeot uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
258c59a5c48SFrançois Tigeot if (handle != 0) {
259c59a5c48SFrançois Tigeot struct radeon_fence *fence;
26057e252bfSMichael Neumann
261c59a5c48SFrançois Tigeot radeon_uvd_note_usage(rdev);
26257e252bfSMichael Neumann
263c59a5c48SFrançois Tigeot r = radeon_uvd_get_destroy_msg(rdev,
264c59a5c48SFrançois Tigeot R600_RING_TYPE_UVD_INDEX, handle, &fence);
265c59a5c48SFrançois Tigeot if (r) {
266c59a5c48SFrançois Tigeot DRM_ERROR("Error destroying UVD (%d)!\n", r);
267c59a5c48SFrançois Tigeot continue;
268c59a5c48SFrançois Tigeot }
26957e252bfSMichael Neumann
270c59a5c48SFrançois Tigeot radeon_fence_wait(fence, false);
271c59a5c48SFrançois Tigeot radeon_fence_unref(&fence);
27257e252bfSMichael Neumann
273c59a5c48SFrançois Tigeot rdev->uvd.filp[i] = NULL;
274c59a5c48SFrançois Tigeot atomic_set(&rdev->uvd.handles[i], 0);
275c59a5c48SFrançois Tigeot }
276c59a5c48SFrançois Tigeot }
27757e252bfSMichael Neumann
27857e252bfSMichael Neumann return 0;
27957e252bfSMichael Neumann }
28057e252bfSMichael Neumann
radeon_uvd_resume(struct radeon_device * rdev)28157e252bfSMichael Neumann int radeon_uvd_resume(struct radeon_device *rdev)
28257e252bfSMichael Neumann {
28357e252bfSMichael Neumann unsigned size;
284*3f2dd94aSFrançois Tigeot void *ptr;
28557e252bfSMichael Neumann
28657e252bfSMichael Neumann if (rdev->uvd.vcpu_bo == NULL)
28757e252bfSMichael Neumann return -EINVAL;
28857e252bfSMichael Neumann
28957e252bfSMichael Neumann memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->datasize);
29057e252bfSMichael Neumann
29157e252bfSMichael Neumann size = radeon_bo_size(rdev->uvd.vcpu_bo);
29257e252bfSMichael Neumann size -= rdev->uvd_fw->datasize;
29357e252bfSMichael Neumann
29457e252bfSMichael Neumann ptr = rdev->uvd.cpu_addr;
29557e252bfSMichael Neumann ptr += rdev->uvd_fw->datasize;
29657e252bfSMichael Neumann
29757e252bfSMichael Neumann memset(ptr, 0, size);
29857e252bfSMichael Neumann
299f43cf1b1SMichael Neumann return 0;
300f43cf1b1SMichael Neumann }
301f43cf1b1SMichael Neumann
radeon_uvd_force_into_uvd_segment(struct radeon_bo * rbo,uint32_t allowed_domains)302591d5043SFrançois Tigeot void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo,
303591d5043SFrançois Tigeot uint32_t allowed_domains)
304f43cf1b1SMichael Neumann {
305591d5043SFrançois Tigeot int i;
306591d5043SFrançois Tigeot
307591d5043SFrançois Tigeot for (i = 0; i < rbo->placement.num_placement; ++i) {
308591d5043SFrançois Tigeot rbo->placements[i].fpfn = 0 >> PAGE_SHIFT;
309591d5043SFrançois Tigeot rbo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
310591d5043SFrançois Tigeot }
311591d5043SFrançois Tigeot
312591d5043SFrançois Tigeot /* If it must be in VRAM it must be in the first segment as well */
313591d5043SFrançois Tigeot if (allowed_domains == RADEON_GEM_DOMAIN_VRAM)
314591d5043SFrançois Tigeot return;
315591d5043SFrançois Tigeot
316591d5043SFrançois Tigeot /* abort if we already have more than one placement */
317591d5043SFrançois Tigeot if (rbo->placement.num_placement > 1)
318591d5043SFrançois Tigeot return;
319591d5043SFrançois Tigeot
320591d5043SFrançois Tigeot /* add another 256MB segment */
321591d5043SFrançois Tigeot rbo->placements[1] = rbo->placements[0];
322591d5043SFrançois Tigeot rbo->placements[1].fpfn += (256 * 1024 * 1024) >> PAGE_SHIFT;
323591d5043SFrançois Tigeot rbo->placements[1].lpfn += (256 * 1024 * 1024) >> PAGE_SHIFT;
324591d5043SFrançois Tigeot rbo->placement.num_placement++;
325591d5043SFrançois Tigeot rbo->placement.num_busy_placement++;
326f43cf1b1SMichael Neumann }
327f43cf1b1SMichael Neumann
radeon_uvd_free_handles(struct radeon_device * rdev,struct drm_file * filp)328f43cf1b1SMichael Neumann void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp)
329f43cf1b1SMichael Neumann {
330f43cf1b1SMichael Neumann int i, r;
331d78d3a22SFrançois Tigeot for (i = 0; i < rdev->uvd.max_handles; ++i) {
332f43cf1b1SMichael Neumann uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
33357e252bfSMichael Neumann if (handle != 0 && rdev->uvd.filp[i] == filp) {
334f43cf1b1SMichael Neumann struct radeon_fence *fence;
335f43cf1b1SMichael Neumann
336c6f73aabSFrançois Tigeot radeon_uvd_note_usage(rdev);
337c6f73aabSFrançois Tigeot
338f43cf1b1SMichael Neumann r = radeon_uvd_get_destroy_msg(rdev,
339f43cf1b1SMichael Neumann R600_RING_TYPE_UVD_INDEX, handle, &fence);
340f43cf1b1SMichael Neumann if (r) {
341f43cf1b1SMichael Neumann DRM_ERROR("Error destroying UVD (%d)!\n", r);
342f43cf1b1SMichael Neumann continue;
343f43cf1b1SMichael Neumann }
344f43cf1b1SMichael Neumann
345f43cf1b1SMichael Neumann radeon_fence_wait(fence, false);
346f43cf1b1SMichael Neumann radeon_fence_unref(&fence);
347f43cf1b1SMichael Neumann
348f43cf1b1SMichael Neumann rdev->uvd.filp[i] = NULL;
349f43cf1b1SMichael Neumann atomic_set(&rdev->uvd.handles[i], 0);
350f43cf1b1SMichael Neumann }
351f43cf1b1SMichael Neumann }
352f43cf1b1SMichael Neumann }
353f43cf1b1SMichael Neumann
radeon_uvd_cs_msg_decode(uint32_t * msg,unsigned buf_sizes[])354f43cf1b1SMichael Neumann static int radeon_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
355f43cf1b1SMichael Neumann {
356f43cf1b1SMichael Neumann unsigned stream_type = msg[4];
357f43cf1b1SMichael Neumann unsigned width = msg[6];
358f43cf1b1SMichael Neumann unsigned height = msg[7];
359f43cf1b1SMichael Neumann unsigned dpb_size = msg[9];
360f43cf1b1SMichael Neumann unsigned pitch = msg[28];
361f43cf1b1SMichael Neumann
362f43cf1b1SMichael Neumann unsigned width_in_mb = width / 16;
363f43cf1b1SMichael Neumann unsigned height_in_mb = ALIGN(height / 16, 2);
364f43cf1b1SMichael Neumann
365f43cf1b1SMichael Neumann unsigned image_size, tmp, min_dpb_size;
366f43cf1b1SMichael Neumann
367f43cf1b1SMichael Neumann image_size = width * height;
368f43cf1b1SMichael Neumann image_size += image_size / 2;
369f43cf1b1SMichael Neumann image_size = ALIGN(image_size, 1024);
370f43cf1b1SMichael Neumann
371f43cf1b1SMichael Neumann switch (stream_type) {
372f43cf1b1SMichael Neumann case 0: /* H264 */
373f43cf1b1SMichael Neumann
374f43cf1b1SMichael Neumann /* reference picture buffer */
375f43cf1b1SMichael Neumann min_dpb_size = image_size * 17;
376f43cf1b1SMichael Neumann
377f43cf1b1SMichael Neumann /* macroblock context buffer */
378f43cf1b1SMichael Neumann min_dpb_size += width_in_mb * height_in_mb * 17 * 192;
379f43cf1b1SMichael Neumann
380f43cf1b1SMichael Neumann /* IT surface buffer */
381f43cf1b1SMichael Neumann min_dpb_size += width_in_mb * height_in_mb * 32;
382f43cf1b1SMichael Neumann break;
383f43cf1b1SMichael Neumann
384f43cf1b1SMichael Neumann case 1: /* VC1 */
385f43cf1b1SMichael Neumann
386f43cf1b1SMichael Neumann /* reference picture buffer */
387f43cf1b1SMichael Neumann min_dpb_size = image_size * 3;
388f43cf1b1SMichael Neumann
389f43cf1b1SMichael Neumann /* CONTEXT_BUFFER */
390f43cf1b1SMichael Neumann min_dpb_size += width_in_mb * height_in_mb * 128;
391f43cf1b1SMichael Neumann
392f43cf1b1SMichael Neumann /* IT surface buffer */
393f43cf1b1SMichael Neumann min_dpb_size += width_in_mb * 64;
394f43cf1b1SMichael Neumann
395f43cf1b1SMichael Neumann /* DB surface buffer */
396f43cf1b1SMichael Neumann min_dpb_size += width_in_mb * 128;
397f43cf1b1SMichael Neumann
398f43cf1b1SMichael Neumann /* BP */
399f43cf1b1SMichael Neumann tmp = max(width_in_mb, height_in_mb);
400f43cf1b1SMichael Neumann min_dpb_size += ALIGN(tmp * 7 * 16, 64);
401f43cf1b1SMichael Neumann break;
402f43cf1b1SMichael Neumann
403f43cf1b1SMichael Neumann case 3: /* MPEG2 */
404f43cf1b1SMichael Neumann
405f43cf1b1SMichael Neumann /* reference picture buffer */
406f43cf1b1SMichael Neumann min_dpb_size = image_size * 3;
407f43cf1b1SMichael Neumann break;
408f43cf1b1SMichael Neumann
409f43cf1b1SMichael Neumann case 4: /* MPEG4 */
410f43cf1b1SMichael Neumann
411f43cf1b1SMichael Neumann /* reference picture buffer */
412f43cf1b1SMichael Neumann min_dpb_size = image_size * 3;
413f43cf1b1SMichael Neumann
414f43cf1b1SMichael Neumann /* CM */
415f43cf1b1SMichael Neumann min_dpb_size += width_in_mb * height_in_mb * 64;
416f43cf1b1SMichael Neumann
417f43cf1b1SMichael Neumann /* IT surface buffer */
418f43cf1b1SMichael Neumann min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64);
419f43cf1b1SMichael Neumann break;
420f43cf1b1SMichael Neumann
421f43cf1b1SMichael Neumann default:
422f43cf1b1SMichael Neumann DRM_ERROR("UVD codec not handled %d!\n", stream_type);
423f43cf1b1SMichael Neumann return -EINVAL;
424f43cf1b1SMichael Neumann }
425f43cf1b1SMichael Neumann
426f43cf1b1SMichael Neumann if (width > pitch) {
427f43cf1b1SMichael Neumann DRM_ERROR("Invalid UVD decoding target pitch!\n");
428f43cf1b1SMichael Neumann return -EINVAL;
429f43cf1b1SMichael Neumann }
430f43cf1b1SMichael Neumann
431f43cf1b1SMichael Neumann if (dpb_size < min_dpb_size) {
432f43cf1b1SMichael Neumann DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n",
433f43cf1b1SMichael Neumann dpb_size, min_dpb_size);
434f43cf1b1SMichael Neumann return -EINVAL;
435f43cf1b1SMichael Neumann }
436f43cf1b1SMichael Neumann
437f43cf1b1SMichael Neumann buf_sizes[0x1] = dpb_size;
438f43cf1b1SMichael Neumann buf_sizes[0x2] = image_size;
439f43cf1b1SMichael Neumann return 0;
440f43cf1b1SMichael Neumann }
441f43cf1b1SMichael Neumann
radeon_uvd_validate_codec(struct radeon_cs_parser * p,unsigned stream_type)442c59a5c48SFrançois Tigeot static int radeon_uvd_validate_codec(struct radeon_cs_parser *p,
443c59a5c48SFrançois Tigeot unsigned stream_type)
444c59a5c48SFrançois Tigeot {
445c59a5c48SFrançois Tigeot switch (stream_type) {
446c59a5c48SFrançois Tigeot case 0: /* H264 */
447c59a5c48SFrançois Tigeot case 1: /* VC1 */
448c59a5c48SFrançois Tigeot /* always supported */
449c59a5c48SFrançois Tigeot return 0;
450c59a5c48SFrançois Tigeot
451c59a5c48SFrançois Tigeot case 3: /* MPEG2 */
452c59a5c48SFrançois Tigeot case 4: /* MPEG4 */
453c59a5c48SFrançois Tigeot /* only since UVD 3 */
454c59a5c48SFrançois Tigeot if (p->rdev->family >= CHIP_PALM)
455c59a5c48SFrançois Tigeot return 0;
456c59a5c48SFrançois Tigeot
457c59a5c48SFrançois Tigeot /* fall through */
458c59a5c48SFrançois Tigeot default:
459c59a5c48SFrançois Tigeot DRM_ERROR("UVD codec not supported by hardware %d!\n",
460c59a5c48SFrançois Tigeot stream_type);
461c59a5c48SFrançois Tigeot return -EINVAL;
462c59a5c48SFrançois Tigeot }
463c59a5c48SFrançois Tigeot }
464c59a5c48SFrançois Tigeot
radeon_uvd_cs_msg(struct radeon_cs_parser * p,struct radeon_bo * bo,unsigned offset,unsigned buf_sizes[])465f43cf1b1SMichael Neumann static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
466f43cf1b1SMichael Neumann unsigned offset, unsigned buf_sizes[])
467f43cf1b1SMichael Neumann {
468f43cf1b1SMichael Neumann int32_t *msg, msg_type, handle;
4694cd92098Szrj unsigned img_size = 0;
4706559babbSFrançois Tigeot struct dma_fence *f;
471f43cf1b1SMichael Neumann void *ptr;
472f43cf1b1SMichael Neumann
473f43cf1b1SMichael Neumann int i, r;
474f43cf1b1SMichael Neumann
475f43cf1b1SMichael Neumann if (offset & 0x3F) {
476f43cf1b1SMichael Neumann DRM_ERROR("UVD messages must be 64 byte aligned!\n");
477f43cf1b1SMichael Neumann return -EINVAL;
478f43cf1b1SMichael Neumann }
479f43cf1b1SMichael Neumann
4801cfef1a5SFrançois Tigeot f = reservation_object_get_excl(bo->tbo.resv);
4811cfef1a5SFrançois Tigeot if (f) {
4821cfef1a5SFrançois Tigeot r = radeon_fence_wait((struct radeon_fence *)f, false);
48357e252bfSMichael Neumann if (r) {
48457e252bfSMichael Neumann DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
485f43cf1b1SMichael Neumann return r;
48657e252bfSMichael Neumann }
48757e252bfSMichael Neumann }
48857e252bfSMichael Neumann
48957e252bfSMichael Neumann r = radeon_bo_kmap(bo, &ptr);
49057e252bfSMichael Neumann if (r) {
49157e252bfSMichael Neumann DRM_ERROR("Failed mapping the UVD message (%d)!\n", r);
49257e252bfSMichael Neumann return r;
49357e252bfSMichael Neumann }
494f43cf1b1SMichael Neumann
495*3f2dd94aSFrançois Tigeot msg = ptr + offset;
496f43cf1b1SMichael Neumann
497f43cf1b1SMichael Neumann msg_type = msg[1];
498f43cf1b1SMichael Neumann handle = msg[2];
499f43cf1b1SMichael Neumann
500f43cf1b1SMichael Neumann if (handle == 0) {
501f43cf1b1SMichael Neumann DRM_ERROR("Invalid UVD handle!\n");
502f43cf1b1SMichael Neumann return -EINVAL;
503f43cf1b1SMichael Neumann }
504f43cf1b1SMichael Neumann
505c59a5c48SFrançois Tigeot switch (msg_type) {
506c59a5c48SFrançois Tigeot case 0:
507c59a5c48SFrançois Tigeot /* it's a create msg, calc image size (width * height) */
508c59a5c48SFrançois Tigeot img_size = msg[7] * msg[8];
509c59a5c48SFrançois Tigeot
510c59a5c48SFrançois Tigeot r = radeon_uvd_validate_codec(p, msg[4]);
511f43cf1b1SMichael Neumann radeon_bo_kunmap(bo);
512f43cf1b1SMichael Neumann if (r)
513f43cf1b1SMichael Neumann return r;
514f43cf1b1SMichael Neumann
515c59a5c48SFrançois Tigeot /* try to alloc a new handle */
516d78d3a22SFrançois Tigeot for (i = 0; i < p->rdev->uvd.max_handles; ++i) {
517c59a5c48SFrançois Tigeot if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
518c59a5c48SFrançois Tigeot DRM_ERROR("Handle 0x%x already in use!\n", handle);
51957e252bfSMichael Neumann return -EINVAL;
52057e252bfSMichael Neumann }
52157e252bfSMichael Neumann
522f43cf1b1SMichael Neumann if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
523f43cf1b1SMichael Neumann p->rdev->uvd.filp[i] = p->filp;
5244cd92098Szrj p->rdev->uvd.img_size[i] = img_size;
525f43cf1b1SMichael Neumann return 0;
526f43cf1b1SMichael Neumann }
527f43cf1b1SMichael Neumann }
528f43cf1b1SMichael Neumann
529f43cf1b1SMichael Neumann DRM_ERROR("No more free UVD handles!\n");
530f43cf1b1SMichael Neumann return -EINVAL;
531c59a5c48SFrançois Tigeot
532c59a5c48SFrançois Tigeot case 1:
533c59a5c48SFrançois Tigeot /* it's a decode msg, validate codec and calc buffer sizes */
534c59a5c48SFrançois Tigeot r = radeon_uvd_validate_codec(p, msg[4]);
535c59a5c48SFrançois Tigeot if (!r)
536c59a5c48SFrançois Tigeot r = radeon_uvd_cs_msg_decode(msg, buf_sizes);
537c59a5c48SFrançois Tigeot radeon_bo_kunmap(bo);
538c59a5c48SFrançois Tigeot if (r)
539c59a5c48SFrançois Tigeot return r;
540c59a5c48SFrançois Tigeot
541c59a5c48SFrançois Tigeot /* validate the handle */
542d78d3a22SFrançois Tigeot for (i = 0; i < p->rdev->uvd.max_handles; ++i) {
543c59a5c48SFrançois Tigeot if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
544c59a5c48SFrançois Tigeot if (p->rdev->uvd.filp[i] != p->filp) {
545c59a5c48SFrançois Tigeot DRM_ERROR("UVD handle collision detected!\n");
546c59a5c48SFrançois Tigeot return -EINVAL;
547c59a5c48SFrançois Tigeot }
548c59a5c48SFrançois Tigeot return 0;
549c59a5c48SFrançois Tigeot }
550c59a5c48SFrançois Tigeot }
551c59a5c48SFrançois Tigeot
552c59a5c48SFrançois Tigeot DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
553c59a5c48SFrançois Tigeot return -ENOENT;
554c59a5c48SFrançois Tigeot
555c59a5c48SFrançois Tigeot case 2:
556c59a5c48SFrançois Tigeot /* it's a destroy msg, free the handle */
557d78d3a22SFrançois Tigeot for (i = 0; i < p->rdev->uvd.max_handles; ++i)
558c59a5c48SFrançois Tigeot atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0);
559c59a5c48SFrançois Tigeot radeon_bo_kunmap(bo);
560c59a5c48SFrançois Tigeot return 0;
561c59a5c48SFrançois Tigeot
562c59a5c48SFrançois Tigeot default:
563c59a5c48SFrançois Tigeot
564c59a5c48SFrançois Tigeot DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
565c59a5c48SFrançois Tigeot return -EINVAL;
566c59a5c48SFrançois Tigeot }
567c59a5c48SFrançois Tigeot
568c59a5c48SFrançois Tigeot BUG();
569c59a5c48SFrançois Tigeot return -EINVAL;
570f43cf1b1SMichael Neumann }
571f43cf1b1SMichael Neumann
radeon_uvd_cs_reloc(struct radeon_cs_parser * p,int data0,int data1,unsigned buf_sizes[],bool * has_msg_cmd)572f43cf1b1SMichael Neumann static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
573f43cf1b1SMichael Neumann int data0, int data1,
57457e252bfSMichael Neumann unsigned buf_sizes[], bool *has_msg_cmd)
575f43cf1b1SMichael Neumann {
576f43cf1b1SMichael Neumann struct radeon_cs_chunk *relocs_chunk;
5777dcf36dcSFrançois Tigeot struct radeon_bo_list *reloc;
578f43cf1b1SMichael Neumann unsigned idx, cmd, offset;
579f43cf1b1SMichael Neumann uint64_t start, end;
580f43cf1b1SMichael Neumann int r;
581f43cf1b1SMichael Neumann
5827dcf36dcSFrançois Tigeot relocs_chunk = p->chunk_relocs;
583f43cf1b1SMichael Neumann offset = radeon_get_ib_value(p, data0);
584f43cf1b1SMichael Neumann idx = radeon_get_ib_value(p, data1);
585f43cf1b1SMichael Neumann if (idx >= relocs_chunk->length_dw) {
586f43cf1b1SMichael Neumann DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
587f43cf1b1SMichael Neumann idx, relocs_chunk->length_dw);
588f43cf1b1SMichael Neumann return -EINVAL;
589f43cf1b1SMichael Neumann }
590f43cf1b1SMichael Neumann
5917dcf36dcSFrançois Tigeot reloc = &p->relocs[(idx / 4)];
592c6f73aabSFrançois Tigeot start = reloc->gpu_offset;
593f43cf1b1SMichael Neumann end = start + radeon_bo_size(reloc->robj);
594f43cf1b1SMichael Neumann start += offset;
595f43cf1b1SMichael Neumann
596f43cf1b1SMichael Neumann p->ib.ptr[data0] = start & 0xFFFFFFFF;
597f43cf1b1SMichael Neumann p->ib.ptr[data1] = start >> 32;
598f43cf1b1SMichael Neumann
599f43cf1b1SMichael Neumann cmd = radeon_get_ib_value(p, p->idx) >> 1;
600f43cf1b1SMichael Neumann
601f43cf1b1SMichael Neumann if (cmd < 0x4) {
602c6f73aabSFrançois Tigeot if (end <= start) {
603c6f73aabSFrançois Tigeot DRM_ERROR("invalid reloc offset %X!\n", offset);
604c6f73aabSFrançois Tigeot return -EINVAL;
605c6f73aabSFrançois Tigeot }
606f43cf1b1SMichael Neumann if ((end - start) < buf_sizes[cmd]) {
60757e252bfSMichael Neumann DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
608f43cf1b1SMichael Neumann (unsigned)(end - start), buf_sizes[cmd]);
609f43cf1b1SMichael Neumann return -EINVAL;
610f43cf1b1SMichael Neumann }
611f43cf1b1SMichael Neumann
612f43cf1b1SMichael Neumann } else if (cmd != 0x100) {
613f43cf1b1SMichael Neumann DRM_ERROR("invalid UVD command %X!\n", cmd);
614f43cf1b1SMichael Neumann return -EINVAL;
615f43cf1b1SMichael Neumann }
616f43cf1b1SMichael Neumann
617c6f73aabSFrançois Tigeot if ((start >> 28) != ((end - 1) >> 28)) {
618f43cf1b1SMichael Neumann DRM_ERROR("reloc %lX-%lX crossing 256MB boundary!\n",
619f43cf1b1SMichael Neumann start, end);
620f43cf1b1SMichael Neumann return -EINVAL;
621f43cf1b1SMichael Neumann }
622f43cf1b1SMichael Neumann
623f43cf1b1SMichael Neumann /* TODO: is this still necessary on NI+ ? */
624f43cf1b1SMichael Neumann if ((cmd == 0 || cmd == 0x3) &&
625f43cf1b1SMichael Neumann (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
626f43cf1b1SMichael Neumann DRM_ERROR("msg/fb buffer %lX-%lX out of 256MB segment!\n",
627f43cf1b1SMichael Neumann start, end);
628f43cf1b1SMichael Neumann return -EINVAL;
629f43cf1b1SMichael Neumann }
630f43cf1b1SMichael Neumann
631f43cf1b1SMichael Neumann if (cmd == 0) {
63257e252bfSMichael Neumann if (*has_msg_cmd) {
63357e252bfSMichael Neumann DRM_ERROR("More than one message in a UVD-IB!\n");
63457e252bfSMichael Neumann return -EINVAL;
63557e252bfSMichael Neumann }
63657e252bfSMichael Neumann *has_msg_cmd = true;
637f43cf1b1SMichael Neumann r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes);
638f43cf1b1SMichael Neumann if (r)
639f43cf1b1SMichael Neumann return r;
64057e252bfSMichael Neumann } else if (!*has_msg_cmd) {
64157e252bfSMichael Neumann DRM_ERROR("Message needed before other commands are send!\n");
64257e252bfSMichael Neumann return -EINVAL;
643f43cf1b1SMichael Neumann }
644f43cf1b1SMichael Neumann
645f43cf1b1SMichael Neumann return 0;
646f43cf1b1SMichael Neumann }
647f43cf1b1SMichael Neumann
radeon_uvd_cs_reg(struct radeon_cs_parser * p,struct radeon_cs_packet * pkt,int * data0,int * data1,unsigned buf_sizes[],bool * has_msg_cmd)648f43cf1b1SMichael Neumann static int radeon_uvd_cs_reg(struct radeon_cs_parser *p,
649f43cf1b1SMichael Neumann struct radeon_cs_packet *pkt,
650f43cf1b1SMichael Neumann int *data0, int *data1,
65157e252bfSMichael Neumann unsigned buf_sizes[],
65257e252bfSMichael Neumann bool *has_msg_cmd)
653f43cf1b1SMichael Neumann {
654f43cf1b1SMichael Neumann int i, r;
655f43cf1b1SMichael Neumann
656f43cf1b1SMichael Neumann p->idx++;
657f43cf1b1SMichael Neumann for (i = 0; i <= pkt->count; ++i) {
658f43cf1b1SMichael Neumann switch (pkt->reg + i*4) {
659f43cf1b1SMichael Neumann case UVD_GPCOM_VCPU_DATA0:
660f43cf1b1SMichael Neumann *data0 = p->idx;
661f43cf1b1SMichael Neumann break;
662f43cf1b1SMichael Neumann case UVD_GPCOM_VCPU_DATA1:
663f43cf1b1SMichael Neumann *data1 = p->idx;
664f43cf1b1SMichael Neumann break;
665f43cf1b1SMichael Neumann case UVD_GPCOM_VCPU_CMD:
66657e252bfSMichael Neumann r = radeon_uvd_cs_reloc(p, *data0, *data1,
66757e252bfSMichael Neumann buf_sizes, has_msg_cmd);
668f43cf1b1SMichael Neumann if (r)
669f43cf1b1SMichael Neumann return r;
670f43cf1b1SMichael Neumann break;
671f43cf1b1SMichael Neumann case UVD_ENGINE_CNTL:
6721dedbd3bSFrançois Tigeot case UVD_NO_OP:
673f43cf1b1SMichael Neumann break;
674f43cf1b1SMichael Neumann default:
675f43cf1b1SMichael Neumann DRM_ERROR("Invalid reg 0x%X!\n",
676f43cf1b1SMichael Neumann pkt->reg + i*4);
677f43cf1b1SMichael Neumann return -EINVAL;
678f43cf1b1SMichael Neumann }
679f43cf1b1SMichael Neumann p->idx++;
680f43cf1b1SMichael Neumann }
681f43cf1b1SMichael Neumann return 0;
682f43cf1b1SMichael Neumann }
683f43cf1b1SMichael Neumann
radeon_uvd_cs_parse(struct radeon_cs_parser * p)684f43cf1b1SMichael Neumann int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
685f43cf1b1SMichael Neumann {
686f43cf1b1SMichael Neumann struct radeon_cs_packet pkt;
687f43cf1b1SMichael Neumann int r, data0 = 0, data1 = 0;
688f43cf1b1SMichael Neumann
68957e252bfSMichael Neumann /* does the IB has a msg command */
69057e252bfSMichael Neumann bool has_msg_cmd = false;
69157e252bfSMichael Neumann
692f43cf1b1SMichael Neumann /* minimum buffer sizes */
693f43cf1b1SMichael Neumann unsigned buf_sizes[] = {
694f43cf1b1SMichael Neumann [0x00000000] = 2048,
695f43cf1b1SMichael Neumann [0x00000001] = 32 * 1024 * 1024,
696f43cf1b1SMichael Neumann [0x00000002] = 2048 * 1152 * 3,
697f43cf1b1SMichael Neumann [0x00000003] = 2048,
698f43cf1b1SMichael Neumann };
699f43cf1b1SMichael Neumann
7007dcf36dcSFrançois Tigeot if (p->chunk_ib->length_dw % 16) {
701f43cf1b1SMichael Neumann DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
7027dcf36dcSFrançois Tigeot p->chunk_ib->length_dw);
703f43cf1b1SMichael Neumann return -EINVAL;
704f43cf1b1SMichael Neumann }
705f43cf1b1SMichael Neumann
7067dcf36dcSFrançois Tigeot if (p->chunk_relocs == NULL) {
707f43cf1b1SMichael Neumann DRM_ERROR("No relocation chunk !\n");
708f43cf1b1SMichael Neumann return -EINVAL;
709f43cf1b1SMichael Neumann }
710f43cf1b1SMichael Neumann
711f43cf1b1SMichael Neumann
712f43cf1b1SMichael Neumann do {
713f43cf1b1SMichael Neumann r = radeon_cs_packet_parse(p, &pkt, p->idx);
714f43cf1b1SMichael Neumann if (r)
715f43cf1b1SMichael Neumann return r;
716f43cf1b1SMichael Neumann switch (pkt.type) {
717f43cf1b1SMichael Neumann case RADEON_PACKET_TYPE0:
71857e252bfSMichael Neumann r = radeon_uvd_cs_reg(p, &pkt, &data0, &data1,
71957e252bfSMichael Neumann buf_sizes, &has_msg_cmd);
720f43cf1b1SMichael Neumann if (r)
721f43cf1b1SMichael Neumann return r;
722f43cf1b1SMichael Neumann break;
723f43cf1b1SMichael Neumann case RADEON_PACKET_TYPE2:
724f43cf1b1SMichael Neumann p->idx += pkt.count + 2;
725f43cf1b1SMichael Neumann break;
726f43cf1b1SMichael Neumann default:
727f43cf1b1SMichael Neumann DRM_ERROR("Unknown packet type %d !\n", pkt.type);
728f43cf1b1SMichael Neumann return -EINVAL;
729f43cf1b1SMichael Neumann }
7307dcf36dcSFrançois Tigeot } while (p->idx < p->chunk_ib->length_dw);
73157e252bfSMichael Neumann
73257e252bfSMichael Neumann if (!has_msg_cmd) {
73357e252bfSMichael Neumann DRM_ERROR("UVD-IBs need a msg command!\n");
73457e252bfSMichael Neumann return -EINVAL;
73557e252bfSMichael Neumann }
73657e252bfSMichael Neumann
737f43cf1b1SMichael Neumann return 0;
738f43cf1b1SMichael Neumann }
739f43cf1b1SMichael Neumann
radeon_uvd_send_msg(struct radeon_device * rdev,int ring,uint64_t addr,struct radeon_fence ** fence)740f43cf1b1SMichael Neumann static int radeon_uvd_send_msg(struct radeon_device *rdev,
741c6f73aabSFrançois Tigeot int ring, uint64_t addr,
742f43cf1b1SMichael Neumann struct radeon_fence **fence)
743f43cf1b1SMichael Neumann {
744f43cf1b1SMichael Neumann struct radeon_ib ib;
745f43cf1b1SMichael Neumann int i, r;
746f43cf1b1SMichael Neumann
747c6f73aabSFrançois Tigeot r = radeon_ib_get(rdev, ring, &ib, NULL, 64);
748f43cf1b1SMichael Neumann if (r)
749f43cf1b1SMichael Neumann return r;
750f43cf1b1SMichael Neumann
751f43cf1b1SMichael Neumann ib.ptr[0] = PACKET0(UVD_GPCOM_VCPU_DATA0, 0);
752f43cf1b1SMichael Neumann ib.ptr[1] = addr;
753f43cf1b1SMichael Neumann ib.ptr[2] = PACKET0(UVD_GPCOM_VCPU_DATA1, 0);
754f43cf1b1SMichael Neumann ib.ptr[3] = addr >> 32;
755f43cf1b1SMichael Neumann ib.ptr[4] = PACKET0(UVD_GPCOM_VCPU_CMD, 0);
756f43cf1b1SMichael Neumann ib.ptr[5] = 0;
7571dedbd3bSFrançois Tigeot for (i = 6; i < 16; i += 2) {
7581dedbd3bSFrançois Tigeot ib.ptr[i] = PACKET0(UVD_NO_OP, 0);
7591dedbd3bSFrançois Tigeot ib.ptr[i+1] = 0;
7601dedbd3bSFrançois Tigeot }
761f43cf1b1SMichael Neumann ib.length_dw = 16;
762f43cf1b1SMichael Neumann
763c6f73aabSFrançois Tigeot r = radeon_ib_schedule(rdev, &ib, NULL, false);
764f43cf1b1SMichael Neumann
765f43cf1b1SMichael Neumann if (fence)
766f43cf1b1SMichael Neumann *fence = radeon_fence_ref(ib.fence);
767f43cf1b1SMichael Neumann
768f43cf1b1SMichael Neumann radeon_ib_free(rdev, &ib);
7694cd92098Szrj return r;
770f43cf1b1SMichael Neumann }
771f43cf1b1SMichael Neumann
772d78d3a22SFrançois Tigeot /*
773d78d3a22SFrançois Tigeot * multiple fence commands without any stream commands in between can
774d78d3a22SFrançois Tigeot * crash the vcpu so just try to emmit a dummy create/destroy msg to
775d78d3a22SFrançois Tigeot * avoid this
776d78d3a22SFrançois Tigeot */
radeon_uvd_get_create_msg(struct radeon_device * rdev,int ring,uint32_t handle,struct radeon_fence ** fence)777f43cf1b1SMichael Neumann int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
778f43cf1b1SMichael Neumann uint32_t handle, struct radeon_fence **fence)
779f43cf1b1SMichael Neumann {
780c6f73aabSFrançois Tigeot /* we use the last page of the vcpu bo for the UVD message */
781c6f73aabSFrançois Tigeot uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) -
782c6f73aabSFrançois Tigeot RADEON_GPU_PAGE_SIZE;
783c6f73aabSFrançois Tigeot
784*3f2dd94aSFrançois Tigeot uint32_t *msg = rdev->uvd.cpu_addr + offs;
785c6f73aabSFrançois Tigeot uint64_t addr = rdev->uvd.gpu_addr + offs;
786c6f73aabSFrançois Tigeot
787f43cf1b1SMichael Neumann int r, i;
788f43cf1b1SMichael Neumann
789c6f73aabSFrançois Tigeot r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true);
790f43cf1b1SMichael Neumann if (r)
791f43cf1b1SMichael Neumann return r;
792f43cf1b1SMichael Neumann
793f43cf1b1SMichael Neumann /* stitch together an UVD create msg */
794f43cf1b1SMichael Neumann msg[0] = cpu_to_le32(0x00000de4);
795f43cf1b1SMichael Neumann msg[1] = cpu_to_le32(0x00000000);
796f43cf1b1SMichael Neumann msg[2] = cpu_to_le32(handle);
797f43cf1b1SMichael Neumann msg[3] = cpu_to_le32(0x00000000);
798f43cf1b1SMichael Neumann msg[4] = cpu_to_le32(0x00000000);
799f43cf1b1SMichael Neumann msg[5] = cpu_to_le32(0x00000000);
800f43cf1b1SMichael Neumann msg[6] = cpu_to_le32(0x00000000);
801f43cf1b1SMichael Neumann msg[7] = cpu_to_le32(0x00000780);
802f43cf1b1SMichael Neumann msg[8] = cpu_to_le32(0x00000440);
803f43cf1b1SMichael Neumann msg[9] = cpu_to_le32(0x00000000);
804f43cf1b1SMichael Neumann msg[10] = cpu_to_le32(0x01b37000);
805f43cf1b1SMichael Neumann for (i = 11; i < 1024; ++i)
806f43cf1b1SMichael Neumann msg[i] = cpu_to_le32(0x0);
807f43cf1b1SMichael Neumann
808c6f73aabSFrançois Tigeot r = radeon_uvd_send_msg(rdev, ring, addr, fence);
809c6f73aabSFrançois Tigeot radeon_bo_unreserve(rdev->uvd.vcpu_bo);
810c6f73aabSFrançois Tigeot return r;
811f43cf1b1SMichael Neumann }
812f43cf1b1SMichael Neumann
radeon_uvd_get_destroy_msg(struct radeon_device * rdev,int ring,uint32_t handle,struct radeon_fence ** fence)813f43cf1b1SMichael Neumann int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
814f43cf1b1SMichael Neumann uint32_t handle, struct radeon_fence **fence)
815f43cf1b1SMichael Neumann {
816c6f73aabSFrançois Tigeot /* we use the last page of the vcpu bo for the UVD message */
817c6f73aabSFrançois Tigeot uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) -
818c6f73aabSFrançois Tigeot RADEON_GPU_PAGE_SIZE;
819c6f73aabSFrançois Tigeot
820*3f2dd94aSFrançois Tigeot uint32_t *msg = rdev->uvd.cpu_addr + offs;
821c6f73aabSFrançois Tigeot uint64_t addr = rdev->uvd.gpu_addr + offs;
822c6f73aabSFrançois Tigeot
823f43cf1b1SMichael Neumann int r, i;
824f43cf1b1SMichael Neumann
825c6f73aabSFrançois Tigeot r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true);
826f43cf1b1SMichael Neumann if (r)
827f43cf1b1SMichael Neumann return r;
828f43cf1b1SMichael Neumann
829f43cf1b1SMichael Neumann /* stitch together an UVD destroy msg */
830f43cf1b1SMichael Neumann msg[0] = cpu_to_le32(0x00000de4);
831f43cf1b1SMichael Neumann msg[1] = cpu_to_le32(0x00000002);
832f43cf1b1SMichael Neumann msg[2] = cpu_to_le32(handle);
833f43cf1b1SMichael Neumann msg[3] = cpu_to_le32(0x00000000);
834f43cf1b1SMichael Neumann for (i = 4; i < 1024; ++i)
835f43cf1b1SMichael Neumann msg[i] = cpu_to_le32(0x0);
836f43cf1b1SMichael Neumann
837c6f73aabSFrançois Tigeot r = radeon_uvd_send_msg(rdev, ring, addr, fence);
838c6f73aabSFrançois Tigeot radeon_bo_unreserve(rdev->uvd.vcpu_bo);
839c6f73aabSFrançois Tigeot return r;
840f43cf1b1SMichael Neumann }
841f43cf1b1SMichael Neumann
8424cd92098Szrj /**
8434cd92098Szrj * radeon_uvd_count_handles - count number of open streams
8444cd92098Szrj *
8454cd92098Szrj * @rdev: radeon_device pointer
8464cd92098Szrj * @sd: number of SD streams
8474cd92098Szrj * @hd: number of HD streams
8484cd92098Szrj *
8494cd92098Szrj * Count the number of open SD/HD streams as a hint for power mangement
8504cd92098Szrj */
radeon_uvd_count_handles(struct radeon_device * rdev,unsigned * sd,unsigned * hd)8514cd92098Szrj static void radeon_uvd_count_handles(struct radeon_device *rdev,
8524cd92098Szrj unsigned *sd, unsigned *hd)
8534cd92098Szrj {
8544cd92098Szrj unsigned i;
8554cd92098Szrj
8564cd92098Szrj *sd = 0;
8574cd92098Szrj *hd = 0;
8584cd92098Szrj
859d78d3a22SFrançois Tigeot for (i = 0; i < rdev->uvd.max_handles; ++i) {
8604cd92098Szrj if (!atomic_read(&rdev->uvd.handles[i]))
8614cd92098Szrj continue;
8624cd92098Szrj
8634cd92098Szrj if (rdev->uvd.img_size[i] >= 720*576)
8644cd92098Szrj ++(*hd);
8654cd92098Szrj else
8664cd92098Szrj ++(*sd);
8674cd92098Szrj }
8684cd92098Szrj }
8694cd92098Szrj
radeon_uvd_idle_work_handler(struct work_struct * work)870f43cf1b1SMichael Neumann static void radeon_uvd_idle_work_handler(struct work_struct *work)
871f43cf1b1SMichael Neumann {
872f43cf1b1SMichael Neumann struct radeon_device *rdev =
873f43cf1b1SMichael Neumann container_of(work, struct radeon_device, uvd.idle_work.work);
874f43cf1b1SMichael Neumann
87557e252bfSMichael Neumann if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0) {
87657e252bfSMichael Neumann if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
877c6f73aabSFrançois Tigeot radeon_uvd_count_handles(rdev, &rdev->pm.dpm.sd,
878c6f73aabSFrançois Tigeot &rdev->pm.dpm.hd);
8794cd92098Szrj radeon_dpm_enable_uvd(rdev, false);
88057e252bfSMichael Neumann } else {
881f43cf1b1SMichael Neumann radeon_set_uvd_clocks(rdev, 0, 0);
88257e252bfSMichael Neumann }
88357e252bfSMichael Neumann } else {
884f43cf1b1SMichael Neumann schedule_delayed_work(&rdev->uvd.idle_work,
885f43cf1b1SMichael Neumann msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
886f43cf1b1SMichael Neumann }
88757e252bfSMichael Neumann }
888f43cf1b1SMichael Neumann
radeon_uvd_note_usage(struct radeon_device * rdev)889f43cf1b1SMichael Neumann void radeon_uvd_note_usage(struct radeon_device *rdev)
890f43cf1b1SMichael Neumann {
8914cd92098Szrj bool streams_changed = false;
892f43cf1b1SMichael Neumann bool set_clocks = !cancel_delayed_work_sync(&rdev->uvd.idle_work);
893f43cf1b1SMichael Neumann set_clocks &= schedule_delayed_work(&rdev->uvd.idle_work,
894f43cf1b1SMichael Neumann msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
8954cd92098Szrj
89657e252bfSMichael Neumann if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
8974cd92098Szrj unsigned hd = 0, sd = 0;
8984cd92098Szrj radeon_uvd_count_handles(rdev, &sd, &hd);
8994cd92098Szrj if ((rdev->pm.dpm.sd != sd) ||
9004cd92098Szrj (rdev->pm.dpm.hd != hd)) {
9014cd92098Szrj rdev->pm.dpm.sd = sd;
9024cd92098Szrj rdev->pm.dpm.hd = hd;
9034cd92098Szrj /* disable this for now */
9044cd92098Szrj /*streams_changed = true;*/
9054cd92098Szrj }
9064cd92098Szrj }
9074cd92098Szrj
9084cd92098Szrj if (set_clocks || streams_changed) {
9094cd92098Szrj if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
9104cd92098Szrj radeon_dpm_enable_uvd(rdev, true);
91157e252bfSMichael Neumann } else {
912f43cf1b1SMichael Neumann radeon_set_uvd_clocks(rdev, 53300, 40000);
913f43cf1b1SMichael Neumann }
91457e252bfSMichael Neumann }
91557e252bfSMichael Neumann }
916f43cf1b1SMichael Neumann
radeon_uvd_calc_upll_post_div(unsigned vco_freq,unsigned target_freq,unsigned pd_min,unsigned pd_even)917f43cf1b1SMichael Neumann static unsigned radeon_uvd_calc_upll_post_div(unsigned vco_freq,
918f43cf1b1SMichael Neumann unsigned target_freq,
919f43cf1b1SMichael Neumann unsigned pd_min,
920f43cf1b1SMichael Neumann unsigned pd_even)
921f43cf1b1SMichael Neumann {
922f43cf1b1SMichael Neumann unsigned post_div = vco_freq / target_freq;
923f43cf1b1SMichael Neumann
924f43cf1b1SMichael Neumann /* adjust to post divider minimum value */
925f43cf1b1SMichael Neumann if (post_div < pd_min)
926f43cf1b1SMichael Neumann post_div = pd_min;
927f43cf1b1SMichael Neumann
928f43cf1b1SMichael Neumann /* we alway need a frequency less than or equal the target */
929f43cf1b1SMichael Neumann if ((vco_freq / post_div) > target_freq)
930f43cf1b1SMichael Neumann post_div += 1;
931f43cf1b1SMichael Neumann
932f43cf1b1SMichael Neumann /* post dividers above a certain value must be even */
933f43cf1b1SMichael Neumann if (post_div > pd_even && post_div % 2)
934f43cf1b1SMichael Neumann post_div += 1;
935f43cf1b1SMichael Neumann
936f43cf1b1SMichael Neumann return post_div;
937f43cf1b1SMichael Neumann }
938f43cf1b1SMichael Neumann
939f43cf1b1SMichael Neumann /**
940f43cf1b1SMichael Neumann * radeon_uvd_calc_upll_dividers - calc UPLL clock dividers
941f43cf1b1SMichael Neumann *
942f43cf1b1SMichael Neumann * @rdev: radeon_device pointer
943f43cf1b1SMichael Neumann * @vclk: wanted VCLK
944f43cf1b1SMichael Neumann * @dclk: wanted DCLK
945f43cf1b1SMichael Neumann * @vco_min: minimum VCO frequency
946f43cf1b1SMichael Neumann * @vco_max: maximum VCO frequency
947f43cf1b1SMichael Neumann * @fb_factor: factor to multiply vco freq with
948f43cf1b1SMichael Neumann * @fb_mask: limit and bitmask for feedback divider
949f43cf1b1SMichael Neumann * @pd_min: post divider minimum
950f43cf1b1SMichael Neumann * @pd_max: post divider maximum
951f43cf1b1SMichael Neumann * @pd_even: post divider must be even above this value
952f43cf1b1SMichael Neumann * @optimal_fb_div: resulting feedback divider
953f43cf1b1SMichael Neumann * @optimal_vclk_div: resulting vclk post divider
954f43cf1b1SMichael Neumann * @optimal_dclk_div: resulting dclk post divider
955f43cf1b1SMichael Neumann *
956f43cf1b1SMichael Neumann * Calculate dividers for UVDs UPLL (R6xx-SI, except APUs).
957f43cf1b1SMichael Neumann * Returns zero on success -EINVAL on error.
958f43cf1b1SMichael Neumann */
radeon_uvd_calc_upll_dividers(struct radeon_device * rdev,unsigned vclk,unsigned dclk,unsigned vco_min,unsigned vco_max,unsigned fb_factor,unsigned fb_mask,unsigned pd_min,unsigned pd_max,unsigned pd_even,unsigned * optimal_fb_div,unsigned * optimal_vclk_div,unsigned * optimal_dclk_div)959f43cf1b1SMichael Neumann int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
960f43cf1b1SMichael Neumann unsigned vclk, unsigned dclk,
961f43cf1b1SMichael Neumann unsigned vco_min, unsigned vco_max,
962f43cf1b1SMichael Neumann unsigned fb_factor, unsigned fb_mask,
963f43cf1b1SMichael Neumann unsigned pd_min, unsigned pd_max,
964f43cf1b1SMichael Neumann unsigned pd_even,
965f43cf1b1SMichael Neumann unsigned *optimal_fb_div,
966f43cf1b1SMichael Neumann unsigned *optimal_vclk_div,
967f43cf1b1SMichael Neumann unsigned *optimal_dclk_div)
968f43cf1b1SMichael Neumann {
969f43cf1b1SMichael Neumann unsigned vco_freq, ref_freq = rdev->clock.spll.reference_freq;
970f43cf1b1SMichael Neumann
971f43cf1b1SMichael Neumann /* start off with something large */
972f43cf1b1SMichael Neumann unsigned optimal_score = ~0;
973f43cf1b1SMichael Neumann
974f43cf1b1SMichael Neumann /* loop through vco from low to high */
975f43cf1b1SMichael Neumann vco_min = max(max(vco_min, vclk), dclk);
976f43cf1b1SMichael Neumann for (vco_freq = vco_min; vco_freq <= vco_max; vco_freq += 100) {
977f43cf1b1SMichael Neumann
978f43cf1b1SMichael Neumann uint64_t fb_div = (uint64_t)vco_freq * fb_factor;
979f43cf1b1SMichael Neumann unsigned vclk_div, dclk_div, score;
980f43cf1b1SMichael Neumann
981f43cf1b1SMichael Neumann do_div(fb_div, ref_freq);
982f43cf1b1SMichael Neumann
983f43cf1b1SMichael Neumann /* fb div out of range ? */
984f43cf1b1SMichael Neumann if (fb_div > fb_mask)
985f43cf1b1SMichael Neumann break; /* it can oly get worse */
986f43cf1b1SMichael Neumann
987f43cf1b1SMichael Neumann fb_div &= fb_mask;
988f43cf1b1SMichael Neumann
989f43cf1b1SMichael Neumann /* calc vclk divider with current vco freq */
990f43cf1b1SMichael Neumann vclk_div = radeon_uvd_calc_upll_post_div(vco_freq, vclk,
991f43cf1b1SMichael Neumann pd_min, pd_even);
992f43cf1b1SMichael Neumann if (vclk_div > pd_max)
993f43cf1b1SMichael Neumann break; /* vco is too big, it has to stop */
994f43cf1b1SMichael Neumann
995f43cf1b1SMichael Neumann /* calc dclk divider with current vco freq */
996f43cf1b1SMichael Neumann dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk,
997f43cf1b1SMichael Neumann pd_min, pd_even);
99874db9dcfSSascha Wildner if (dclk_div > pd_max)
999f43cf1b1SMichael Neumann break; /* vco is too big, it has to stop */
1000f43cf1b1SMichael Neumann
1001f43cf1b1SMichael Neumann /* calc score with current vco freq */
1002f43cf1b1SMichael Neumann score = vclk - (vco_freq / vclk_div) + dclk - (vco_freq / dclk_div);
1003f43cf1b1SMichael Neumann
1004f43cf1b1SMichael Neumann /* determine if this vco setting is better than current optimal settings */
1005f43cf1b1SMichael Neumann if (score < optimal_score) {
1006f43cf1b1SMichael Neumann *optimal_fb_div = fb_div;
1007f43cf1b1SMichael Neumann *optimal_vclk_div = vclk_div;
1008f43cf1b1SMichael Neumann *optimal_dclk_div = dclk_div;
1009f43cf1b1SMichael Neumann optimal_score = score;
1010f43cf1b1SMichael Neumann if (optimal_score == 0)
1011f43cf1b1SMichael Neumann break; /* it can't get better than this */
1012f43cf1b1SMichael Neumann }
1013f43cf1b1SMichael Neumann }
1014f43cf1b1SMichael Neumann
1015f43cf1b1SMichael Neumann /* did we found a valid setup ? */
1016f43cf1b1SMichael Neumann if (optimal_score == ~0)
1017f43cf1b1SMichael Neumann return -EINVAL;
1018f43cf1b1SMichael Neumann
1019f43cf1b1SMichael Neumann return 0;
1020f43cf1b1SMichael Neumann }
1021f43cf1b1SMichael Neumann
radeon_uvd_send_upll_ctlreq(struct radeon_device * rdev,unsigned cg_upll_func_cntl)1022f43cf1b1SMichael Neumann int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev,
1023f43cf1b1SMichael Neumann unsigned cg_upll_func_cntl)
1024f43cf1b1SMichael Neumann {
1025f43cf1b1SMichael Neumann unsigned i;
1026f43cf1b1SMichael Neumann
1027f43cf1b1SMichael Neumann /* make sure UPLL_CTLREQ is deasserted */
1028f43cf1b1SMichael Neumann WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK);
1029f43cf1b1SMichael Neumann
1030c4ef309bSzrj mdelay(10);
1031f43cf1b1SMichael Neumann
1032f43cf1b1SMichael Neumann /* assert UPLL_CTLREQ */
1033f43cf1b1SMichael Neumann WREG32_P(cg_upll_func_cntl, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK);
1034f43cf1b1SMichael Neumann
1035f43cf1b1SMichael Neumann /* wait for CTLACK and CTLACK2 to get asserted */
1036f43cf1b1SMichael Neumann for (i = 0; i < 100; ++i) {
1037f43cf1b1SMichael Neumann uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK;
1038f43cf1b1SMichael Neumann if ((RREG32(cg_upll_func_cntl) & mask) == mask)
1039f43cf1b1SMichael Neumann break;
1040c4ef309bSzrj mdelay(10);
1041f43cf1b1SMichael Neumann }
1042f43cf1b1SMichael Neumann
1043f43cf1b1SMichael Neumann /* deassert UPLL_CTLREQ */
1044f43cf1b1SMichael Neumann WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK);
1045f43cf1b1SMichael Neumann
1046f43cf1b1SMichael Neumann if (i == 100) {
1047f43cf1b1SMichael Neumann DRM_ERROR("Timeout setting UVD clocks!\n");
1048f43cf1b1SMichael Neumann return -ETIMEDOUT;
1049f43cf1b1SMichael Neumann }
1050f43cf1b1SMichael Neumann
1051f43cf1b1SMichael Neumann return 0;
1052f43cf1b1SMichael Neumann }
1053