xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/nouveau/nvkm/subdev/bus/nouveau_nvkm_subdev_bus_hwsq.c (revision 41ec02673d281bbb3d38e6c78504ce6e30c228c1)
1 /*	$NetBSD: nouveau_nvkm_subdev_bus_hwsq.c,v 1.3 2021/12/18 23:45:38 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2013 Red Hat Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Ben Skeggs <bskeggs@redhat.com>
25  */
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_subdev_bus_hwsq.c,v 1.3 2021/12/18 23:45:38 riastradh Exp $");
28 
29 #include "priv.h"
30 
31 struct nvkm_hwsq {
32 	struct nvkm_subdev *subdev;
33 	u32 addr;
34 	u32 data;
35 	struct {
36 		u8 data[512];
37 		u16 size;
38 	} c;
39 };
40 
41 static void
hwsq_cmd(struct nvkm_hwsq * hwsq,int size,u8 data[])42 hwsq_cmd(struct nvkm_hwsq *hwsq, int size, u8 data[])
43 {
44 	memcpy(&hwsq->c.data[hwsq->c.size], data, size * sizeof(data[0]));
45 	hwsq->c.size += size;
46 }
47 
48 int
nvkm_hwsq_init(struct nvkm_subdev * subdev,struct nvkm_hwsq ** phwsq)49 nvkm_hwsq_init(struct nvkm_subdev *subdev, struct nvkm_hwsq **phwsq)
50 {
51 	struct nvkm_hwsq *hwsq;
52 
53 	hwsq = *phwsq = kmalloc(sizeof(*hwsq), GFP_KERNEL);
54 	if (hwsq) {
55 		hwsq->subdev = subdev;
56 		hwsq->addr = ~0;
57 		hwsq->data = ~0;
58 		memset(hwsq->c.data, 0x7f, sizeof(hwsq->c.data));
59 		hwsq->c.size = 0;
60 	}
61 
62 	return hwsq ? 0 : -ENOMEM;
63 }
64 
65 int
nvkm_hwsq_fini(struct nvkm_hwsq ** phwsq,bool exec)66 nvkm_hwsq_fini(struct nvkm_hwsq **phwsq, bool exec)
67 {
68 	struct nvkm_hwsq *hwsq = *phwsq;
69 	int ret = 0, i;
70 	if (hwsq) {
71 		struct nvkm_subdev *subdev = hwsq->subdev;
72 		struct nvkm_bus *bus = subdev->device->bus;
73 		hwsq->c.size = (hwsq->c.size + 4) / 4;
74 		if (hwsq->c.size <= bus->func->hwsq_size) {
75 			if (exec)
76 				ret = bus->func->hwsq_exec(bus,
77 							   (u32 *)hwsq->c.data,
78 								  hwsq->c.size);
79 			if (ret)
80 				nvkm_error(subdev, "hwsq exec failed: %d\n", ret);
81 		} else {
82 			nvkm_error(subdev, "hwsq ucode too large\n");
83 			ret = -ENOSPC;
84 		}
85 
86 		for (i = 0; ret && i < hwsq->c.size; i++)
87 			nvkm_error(subdev, "\t%08x\n", ((u32 *)hwsq->c.data)[i]);
88 
89 		*phwsq = NULL;
90 		kfree(hwsq);
91 	}
92 	return ret;
93 }
94 
95 void
nvkm_hwsq_wr32(struct nvkm_hwsq * hwsq,u32 addr,u32 data)96 nvkm_hwsq_wr32(struct nvkm_hwsq *hwsq, u32 addr, u32 data)
97 {
98 	nvkm_debug(hwsq->subdev, "R[%06x] = %08x\n", addr, data);
99 
100 	if (hwsq->data != data) {
101 		if ((data & 0xffff0000) != (hwsq->data & 0xffff0000)) {
102 			hwsq_cmd(hwsq, 5, (u8[]){ 0xe2, data, data >> 8,
103 						  data >> 16, data >> 24 });
104 		} else {
105 			hwsq_cmd(hwsq, 3, (u8[]){ 0x42, data, data >> 8 });
106 		}
107 	}
108 
109 	if ((addr & 0xffff0000) != (hwsq->addr & 0xffff0000)) {
110 		hwsq_cmd(hwsq, 5, (u8[]){ 0xe0, addr, addr >> 8,
111 					  addr >> 16, addr >> 24 });
112 	} else {
113 		hwsq_cmd(hwsq, 3, (u8[]){ 0x40, addr, addr >> 8 });
114 	}
115 
116 	hwsq->addr = addr;
117 	hwsq->data = data;
118 }
119 
120 void
nvkm_hwsq_setf(struct nvkm_hwsq * hwsq,u8 flag,int data)121 nvkm_hwsq_setf(struct nvkm_hwsq *hwsq, u8 flag, int data)
122 {
123 	nvkm_debug(hwsq->subdev, " FLAG[%02x] = %d\n", flag, data);
124 	flag += 0x80;
125 	if (data >= 0)
126 		flag += 0x20;
127 	if (data >= 1)
128 		flag += 0x20;
129 	hwsq_cmd(hwsq, 1, (u8[]){ flag });
130 }
131 
132 void
nvkm_hwsq_wait(struct nvkm_hwsq * hwsq,u8 flag,u8 data)133 nvkm_hwsq_wait(struct nvkm_hwsq *hwsq, u8 flag, u8 data)
134 {
135 	nvkm_debug(hwsq->subdev, " WAIT[%02x] = %d\n", flag, data);
136 	hwsq_cmd(hwsq, 3, (u8[]){ 0x5f, flag, data });
137 }
138 
139 void
nvkm_hwsq_wait_vblank(struct nvkm_hwsq * hwsq)140 nvkm_hwsq_wait_vblank(struct nvkm_hwsq *hwsq)
141 {
142 	struct nvkm_subdev *subdev = hwsq->subdev;
143 	struct nvkm_device *device = subdev->device;
144 	u32 heads, x, y, px = 0;
145 	int i, head_sync;
146 
147 	heads = nvkm_rd32(device, 0x610050);
148 	for (i = 0; i < 2; i++) {
149 		/* Heuristic: sync to head with biggest resolution */
150 		if (heads & (2 << (i << 3))) {
151 			x = nvkm_rd32(device, 0x610b40 + (0x540 * i));
152 			y = (x & 0xffff0000) >> 16;
153 			x &= 0x0000ffff;
154 			if ((x * y) > px) {
155 				px = (x * y);
156 				head_sync = i;
157 			}
158 		}
159 	}
160 
161 	if (px == 0) {
162 		nvkm_debug(subdev, "WAIT VBLANK !NO ACTIVE HEAD\n");
163 		return;
164 	}
165 
166 	nvkm_debug(subdev, "WAIT VBLANK HEAD%d\n", head_sync);
167 	nvkm_hwsq_wait(hwsq, head_sync ? 0x3 : 0x1, 0x0);
168 	nvkm_hwsq_wait(hwsq, head_sync ? 0x3 : 0x1, 0x1);
169 }
170 
171 void
nvkm_hwsq_nsec(struct nvkm_hwsq * hwsq,u32 nsec)172 nvkm_hwsq_nsec(struct nvkm_hwsq *hwsq, u32 nsec)
173 {
174 	u8 shift = 0, usec = nsec / 1000;
175 	while (usec & ~3) {
176 		usec >>= 2;
177 		shift++;
178 	}
179 
180 	nvkm_debug(hwsq->subdev, "    DELAY = %d ns\n", nsec);
181 	hwsq_cmd(hwsq, 1, (u8[]){ 0x00 | (shift << 2) | usec });
182 }
183