xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/nouveau/nvkm/engine/pm/nouveau_nvkm_engine_pm_base.c (revision 41ec02673d281bbb3d38e6c78504ce6e30c228c1)
1 /*	$NetBSD: nouveau_nvkm_engine_pm_base.c,v 1.4 2021/12/18 23:45:37 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2013 Red Hat Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Ben Skeggs
25  */
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_engine_pm_base.c,v 1.4 2021/12/18 23:45:37 riastradh Exp $");
28 
29 #include "priv.h"
30 
31 #include <core/client.h>
32 #include <core/option.h>
33 
34 #include <nvif/class.h>
35 #include <nvif/if0002.h>
36 #include <nvif/if0003.h>
37 #include <nvif/ioctl.h>
38 #include <nvif/unpack.h>
39 
40 static u8
nvkm_pm_count_perfdom(struct nvkm_pm * pm)41 nvkm_pm_count_perfdom(struct nvkm_pm *pm)
42 {
43 	struct nvkm_perfdom *dom;
44 	u8 domain_nr = 0;
45 
46 	list_for_each_entry(dom, &pm->domains, head)
47 		domain_nr++;
48 	return domain_nr;
49 }
50 
51 static u16
nvkm_perfdom_count_perfsig(struct nvkm_perfdom * dom)52 nvkm_perfdom_count_perfsig(struct nvkm_perfdom *dom)
53 {
54 	u16 signal_nr = 0;
55 	int i;
56 
57 	if (dom) {
58 		for (i = 0; i < dom->signal_nr; i++) {
59 			if (dom->signal[i].name)
60 				signal_nr++;
61 		}
62 	}
63 	return signal_nr;
64 }
65 
66 static struct nvkm_perfdom *
nvkm_perfdom_find(struct nvkm_pm * pm,int di)67 nvkm_perfdom_find(struct nvkm_pm *pm, int di)
68 {
69 	struct nvkm_perfdom *dom;
70 	int tmp = 0;
71 
72 	list_for_each_entry(dom, &pm->domains, head) {
73 		if (tmp++ == di)
74 			return dom;
75 	}
76 	return NULL;
77 }
78 
79 static struct nvkm_perfsig *
nvkm_perfsig_find(struct nvkm_pm * pm,u8 di,u8 si,struct nvkm_perfdom ** pdom)80 nvkm_perfsig_find(struct nvkm_pm *pm, u8 di, u8 si, struct nvkm_perfdom **pdom)
81 {
82 	struct nvkm_perfdom *dom = *pdom;
83 
84 	if (dom == NULL) {
85 		dom = nvkm_perfdom_find(pm, di);
86 		if (dom == NULL)
87 			return NULL;
88 		*pdom = dom;
89 	}
90 
91 	if (!dom->signal[si].name)
92 		return NULL;
93 	return &dom->signal[si];
94 }
95 
96 static u8
nvkm_perfsig_count_perfsrc(struct nvkm_perfsig * sig)97 nvkm_perfsig_count_perfsrc(struct nvkm_perfsig *sig)
98 {
99 	u8 source_nr = 0, i;
100 
101 	for (i = 0; i < ARRAY_SIZE(sig->source); i++) {
102 		if (sig->source[i])
103 			source_nr++;
104 	}
105 	return source_nr;
106 }
107 
108 static struct nvkm_perfsrc *
nvkm_perfsrc_find(struct nvkm_pm * pm,struct nvkm_perfsig * sig,int si)109 nvkm_perfsrc_find(struct nvkm_pm *pm, struct nvkm_perfsig *sig, int si)
110 {
111 	struct nvkm_perfsrc *src;
112 	bool found = false;
113 	int tmp = 1; /* Sources ID start from 1 */
114 	u8 i;
115 
116 	for (i = 0; i < ARRAY_SIZE(sig->source) && sig->source[i]; i++) {
117 		if (sig->source[i] == si) {
118 			found = true;
119 			break;
120 		}
121 	}
122 
123 	if (found) {
124 		list_for_each_entry(src, &pm->sources, head) {
125 			if (tmp++ == si)
126 				return src;
127 		}
128 	}
129 
130 	return NULL;
131 }
132 
133 static int
nvkm_perfsrc_enable(struct nvkm_pm * pm,struct nvkm_perfctr * ctr)134 nvkm_perfsrc_enable(struct nvkm_pm *pm, struct nvkm_perfctr *ctr)
135 {
136 	struct nvkm_subdev *subdev = &pm->engine.subdev;
137 	struct nvkm_device *device = subdev->device;
138 	struct nvkm_perfdom *dom = NULL;
139 	struct nvkm_perfsig *sig;
140 	struct nvkm_perfsrc *src;
141 	u32 mask, value;
142 	int i, j;
143 
144 	for (i = 0; i < 4; i++) {
145 		for (j = 0; j < 8 && ctr->source[i][j]; j++) {
146 			sig = nvkm_perfsig_find(pm, ctr->domain,
147 						ctr->signal[i], &dom);
148 			if (!sig)
149 				return -EINVAL;
150 
151 			src = nvkm_perfsrc_find(pm, sig, ctr->source[i][j]);
152 			if (!src)
153 				return -EINVAL;
154 
155 			/* set enable bit if needed */
156 			mask = value = 0x00000000;
157 			if (src->enable)
158 				mask = value = 0x80000000;
159 			mask  |= (src->mask << src->shift);
160 			value |= ((ctr->source[i][j] >> 32) << src->shift);
161 
162 			/* enable the source */
163 			nvkm_mask(device, src->addr, mask, value);
164 			nvkm_debug(subdev,
165 				   "enabled source %08x %08x %08x\n",
166 				   src->addr, mask, value);
167 		}
168 	}
169 	return 0;
170 }
171 
172 static int
nvkm_perfsrc_disable(struct nvkm_pm * pm,struct nvkm_perfctr * ctr)173 nvkm_perfsrc_disable(struct nvkm_pm *pm, struct nvkm_perfctr *ctr)
174 {
175 	struct nvkm_subdev *subdev = &pm->engine.subdev;
176 	struct nvkm_device *device = subdev->device;
177 	struct nvkm_perfdom *dom = NULL;
178 	struct nvkm_perfsig *sig;
179 	struct nvkm_perfsrc *src;
180 	u32 mask;
181 	int i, j;
182 
183 	for (i = 0; i < 4; i++) {
184 		for (j = 0; j < 8 && ctr->source[i][j]; j++) {
185 			sig = nvkm_perfsig_find(pm, ctr->domain,
186 						ctr->signal[i], &dom);
187 			if (!sig)
188 				return -EINVAL;
189 
190 			src = nvkm_perfsrc_find(pm, sig, ctr->source[i][j]);
191 			if (!src)
192 				return -EINVAL;
193 
194 			/* unset enable bit if needed */
195 			mask = 0x00000000;
196 			if (src->enable)
197 				mask = 0x80000000;
198 			mask |= (src->mask << src->shift);
199 
200 			/* disable the source */
201 			nvkm_mask(device, src->addr, mask, 0);
202 			nvkm_debug(subdev, "disabled source %08x %08x\n",
203 				   src->addr, mask);
204 		}
205 	}
206 	return 0;
207 }
208 
209 /*******************************************************************************
210  * Perfdom object classes
211  ******************************************************************************/
212 static int
nvkm_perfdom_init(struct nvkm_perfdom * dom,void * data,u32 size)213 nvkm_perfdom_init(struct nvkm_perfdom *dom, void *data, u32 size)
214 {
215 	union {
216 		struct nvif_perfdom_init none;
217 	} *args = data;
218 	struct nvkm_object *object = &dom->object;
219 	struct nvkm_pm *pm = dom->perfmon->pm;
220 	int ret = -ENOSYS, i;
221 
222 	nvif_ioctl(object, "perfdom init size %d\n", size);
223 	if (!(ret = nvif_unvers(ret, &data, &size, args->none))) {
224 		nvif_ioctl(object, "perfdom init\n");
225 	} else
226 		return ret;
227 
228 	for (i = 0; i < 4; i++) {
229 		if (dom->ctr[i]) {
230 			dom->func->init(pm, dom, dom->ctr[i]);
231 
232 			/* enable sources */
233 			nvkm_perfsrc_enable(pm, dom->ctr[i]);
234 		}
235 	}
236 
237 	/* start next batch of counters for sampling */
238 	dom->func->next(pm, dom);
239 	return 0;
240 }
241 
242 static int
nvkm_perfdom_sample(struct nvkm_perfdom * dom,void * data,u32 size)243 nvkm_perfdom_sample(struct nvkm_perfdom *dom, void *data, u32 size)
244 {
245 	union {
246 		struct nvif_perfdom_sample none;
247 	} *args = data;
248 	struct nvkm_object *object = &dom->object;
249 	struct nvkm_pm *pm = dom->perfmon->pm;
250 	int ret = -ENOSYS;
251 
252 	nvif_ioctl(object, "perfdom sample size %d\n", size);
253 	if (!(ret = nvif_unvers(ret, &data, &size, args->none))) {
254 		nvif_ioctl(object, "perfdom sample\n");
255 	} else
256 		return ret;
257 	pm->sequence++;
258 
259 	/* sample previous batch of counters */
260 	list_for_each_entry(dom, &pm->domains, head)
261 		dom->func->next(pm, dom);
262 
263 	return 0;
264 }
265 
266 static int
nvkm_perfdom_read(struct nvkm_perfdom * dom,void * data,u32 size)267 nvkm_perfdom_read(struct nvkm_perfdom *dom, void *data, u32 size)
268 {
269 	union {
270 		struct nvif_perfdom_read_v0 v0;
271 	} *args = data;
272 	struct nvkm_object *object = &dom->object;
273 	struct nvkm_pm *pm = dom->perfmon->pm;
274 	int ret = -ENOSYS, i;
275 
276 	nvif_ioctl(object, "perfdom read size %d\n", size);
277 	if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
278 		nvif_ioctl(object, "perfdom read vers %d\n", args->v0.version);
279 	} else
280 		return ret;
281 
282 	for (i = 0; i < 4; i++) {
283 		if (dom->ctr[i])
284 			dom->func->read(pm, dom, dom->ctr[i]);
285 	}
286 
287 	if (!dom->clk)
288 		return -EAGAIN;
289 
290 	for (i = 0; i < 4; i++)
291 		if (dom->ctr[i])
292 			args->v0.ctr[i] = dom->ctr[i]->ctr;
293 	args->v0.clk = dom->clk;
294 	return 0;
295 }
296 
297 static int
nvkm_perfdom_mthd(struct nvkm_object * object,u32 mthd,void * data,u32 size)298 nvkm_perfdom_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
299 {
300 	struct nvkm_perfdom *dom = nvkm_perfdom(object);
301 	switch (mthd) {
302 	case NVIF_PERFDOM_V0_INIT:
303 		return nvkm_perfdom_init(dom, data, size);
304 	case NVIF_PERFDOM_V0_SAMPLE:
305 		return nvkm_perfdom_sample(dom, data, size);
306 	case NVIF_PERFDOM_V0_READ:
307 		return nvkm_perfdom_read(dom, data, size);
308 	default:
309 		break;
310 	}
311 	return -EINVAL;
312 }
313 
314 static void *
nvkm_perfdom_dtor(struct nvkm_object * object)315 nvkm_perfdom_dtor(struct nvkm_object *object)
316 {
317 	struct nvkm_perfdom *dom = nvkm_perfdom(object);
318 	struct nvkm_pm *pm = dom->perfmon->pm;
319 	int i;
320 
321 	for (i = 0; i < 4; i++) {
322 		struct nvkm_perfctr *ctr = dom->ctr[i];
323 		if (ctr) {
324 			nvkm_perfsrc_disable(pm, ctr);
325 			if (ctr->head.next)
326 				list_del(&ctr->head);
327 		}
328 		kfree(ctr);
329 	}
330 
331 	return dom;
332 }
333 
334 static int
nvkm_perfctr_new(struct nvkm_perfdom * dom,int slot,u8 domain,struct nvkm_perfsig * signal[4],u64 source[4][8],u16 logic_op,struct nvkm_perfctr ** pctr)335 nvkm_perfctr_new(struct nvkm_perfdom *dom, int slot, u8 domain,
336 		 struct nvkm_perfsig *signal[4], u64 source[4][8],
337 		 u16 logic_op, struct nvkm_perfctr **pctr)
338 {
339 	struct nvkm_perfctr *ctr;
340 	int i, j;
341 
342 	if (!dom)
343 		return -EINVAL;
344 
345 	ctr = *pctr = kzalloc(sizeof(*ctr), GFP_KERNEL);
346 	if (!ctr)
347 		return -ENOMEM;
348 
349 	ctr->domain   = domain;
350 	ctr->logic_op = logic_op;
351 	ctr->slot     = slot;
352 	for (i = 0; i < 4; i++) {
353 		if (signal[i]) {
354 			ctr->signal[i] = signal[i] - dom->signal;
355 			for (j = 0; j < 8; j++)
356 				ctr->source[i][j] = source[i][j];
357 		}
358 	}
359 	list_add_tail(&ctr->head, &dom->list);
360 
361 	return 0;
362 }
363 
364 static const struct nvkm_object_func
365 nvkm_perfdom = {
366 	.dtor = nvkm_perfdom_dtor,
367 	.mthd = nvkm_perfdom_mthd,
368 };
369 
370 static int
nvkm_perfdom_new_(struct nvkm_perfmon * perfmon,const struct nvkm_oclass * oclass,void * data,u32 size,struct nvkm_object ** pobject)371 nvkm_perfdom_new_(struct nvkm_perfmon *perfmon,
372 		  const struct nvkm_oclass *oclass, void *data, u32 size,
373 		  struct nvkm_object **pobject)
374 {
375 	union {
376 		struct nvif_perfdom_v0 v0;
377 	} *args = data;
378 	struct nvkm_pm *pm = perfmon->pm;
379 	struct nvkm_object *parent = oclass->parent;
380 	struct nvkm_perfdom *sdom = NULL;
381 	struct nvkm_perfctr *ctr[4] = {};
382 	struct nvkm_perfdom *dom;
383 	int c, s, m;
384 	int ret = -ENOSYS;
385 
386 	nvif_ioctl(parent, "create perfdom size %d\n", size);
387 	if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
388 		nvif_ioctl(parent, "create perfdom vers %d dom %d mode %02x\n",
389 			   args->v0.version, args->v0.domain, args->v0.mode);
390 	} else
391 		return ret;
392 
393 	for (c = 0; c < ARRAY_SIZE(args->v0.ctr); c++) {
394 		struct nvkm_perfsig *sig[4] = {};
395 		u64 src[4][8] = {};
396 
397 		for (s = 0; s < ARRAY_SIZE(args->v0.ctr[c].signal); s++) {
398 			sig[s] = nvkm_perfsig_find(pm, args->v0.domain,
399 						   args->v0.ctr[c].signal[s],
400 						   &sdom);
401 			if (args->v0.ctr[c].signal[s] && !sig[s])
402 				return -EINVAL;
403 
404 			for (m = 0; m < 8; m++) {
405 				src[s][m] = args->v0.ctr[c].source[s][m];
406 				if (src[s][m] && !nvkm_perfsrc_find(pm, sig[s],
407 							            src[s][m]))
408 					return -EINVAL;
409 			}
410 		}
411 
412 		ret = nvkm_perfctr_new(sdom, c, args->v0.domain, sig, src,
413 				       args->v0.ctr[c].logic_op, &ctr[c]);
414 		if (ret)
415 			return ret;
416 	}
417 
418 	if (!sdom)
419 		return -EINVAL;
420 
421 	if (!(dom = kzalloc(sizeof(*dom), GFP_KERNEL)))
422 		return -ENOMEM;
423 	nvkm_object_ctor(&nvkm_perfdom, oclass, &dom->object);
424 	dom->perfmon = perfmon;
425 	*pobject = &dom->object;
426 
427 	dom->func = sdom->func;
428 	dom->addr = sdom->addr;
429 	dom->mode = args->v0.mode;
430 	for (c = 0; c < ARRAY_SIZE(ctr); c++)
431 		dom->ctr[c] = ctr[c];
432 	return 0;
433 }
434 
435 /*******************************************************************************
436  * Perfmon object classes
437  ******************************************************************************/
438 static int
nvkm_perfmon_mthd_query_domain(struct nvkm_perfmon * perfmon,void * data,u32 size)439 nvkm_perfmon_mthd_query_domain(struct nvkm_perfmon *perfmon,
440 			       void *data, u32 size)
441 {
442 	union {
443 		struct nvif_perfmon_query_domain_v0 v0;
444 	} *args = data;
445 	struct nvkm_object *object = &perfmon->object;
446 	struct nvkm_pm *pm = perfmon->pm;
447 	struct nvkm_perfdom *dom;
448 	u8 domain_nr;
449 	int di, ret = -ENOSYS;
450 
451 	nvif_ioctl(object, "perfmon query domain size %d\n", size);
452 	if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
453 		nvif_ioctl(object, "perfmon domain vers %d iter %02x\n",
454 			   args->v0.version, args->v0.iter);
455 		di = (args->v0.iter & 0xff) - 1;
456 	} else
457 		return ret;
458 
459 	domain_nr = nvkm_pm_count_perfdom(pm);
460 	if (di >= (int)domain_nr)
461 		return -EINVAL;
462 
463 	if (di >= 0) {
464 		dom = nvkm_perfdom_find(pm, di);
465 		if (dom == NULL)
466 			return -EINVAL;
467 
468 		args->v0.id         = di;
469 		args->v0.signal_nr  = nvkm_perfdom_count_perfsig(dom);
470 		strncpy(args->v0.name, dom->name, sizeof(args->v0.name) - 1);
471 
472 		/* Currently only global counters (PCOUNTER) are implemented
473 		 * but this will be different for local counters (MP). */
474 		args->v0.counter_nr = 4;
475 	}
476 
477 	if (++di < domain_nr) {
478 		args->v0.iter = ++di;
479 		return 0;
480 	}
481 
482 	args->v0.iter = 0xff;
483 	return 0;
484 }
485 
486 static int
nvkm_perfmon_mthd_query_signal(struct nvkm_perfmon * perfmon,void * data,u32 size)487 nvkm_perfmon_mthd_query_signal(struct nvkm_perfmon *perfmon,
488 			       void *data, u32 size)
489 {
490 	union {
491 		struct nvif_perfmon_query_signal_v0 v0;
492 	} *args = data;
493 	struct nvkm_object *object = &perfmon->object;
494 	struct nvkm_pm *pm = perfmon->pm;
495 	struct nvkm_device *device = pm->engine.subdev.device;
496 	struct nvkm_perfdom *dom;
497 	struct nvkm_perfsig *sig;
498 	const bool all = nvkm_boolopt(device->cfgopt, "NvPmShowAll", false);
499 	const bool raw = nvkm_boolopt(device->cfgopt, "NvPmUnnamed", all);
500 	int ret = -ENOSYS, si;
501 
502 	nvif_ioctl(object, "perfmon query signal size %d\n", size);
503 	if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
504 		nvif_ioctl(object,
505 			   "perfmon query signal vers %d dom %d iter %04x\n",
506 			   args->v0.version, args->v0.domain, args->v0.iter);
507 		si = (args->v0.iter & 0xffff) - 1;
508 	} else
509 		return ret;
510 
511 	dom = nvkm_perfdom_find(pm, args->v0.domain);
512 	if (dom == NULL || si >= (int)dom->signal_nr)
513 		return -EINVAL;
514 
515 	if (si >= 0) {
516 		sig = &dom->signal[si];
517 		if (raw || !sig->name) {
518 			snprintf(args->v0.name, sizeof(args->v0.name),
519 				 "/%s/%02x", dom->name, si);
520 		} else {
521 			strncpy(args->v0.name, sig->name,
522 				sizeof(args->v0.name) - 1);
523 		}
524 
525 		args->v0.signal = si;
526 		args->v0.source_nr = nvkm_perfsig_count_perfsrc(sig);
527 	}
528 
529 	while (++si < dom->signal_nr) {
530 		if (all || dom->signal[si].name) {
531 			args->v0.iter = ++si;
532 			return 0;
533 		}
534 	}
535 
536 	args->v0.iter = 0xffff;
537 	return 0;
538 }
539 
540 static int
nvkm_perfmon_mthd_query_source(struct nvkm_perfmon * perfmon,void * data,u32 size)541 nvkm_perfmon_mthd_query_source(struct nvkm_perfmon *perfmon,
542 			       void *data, u32 size)
543 {
544 	union {
545 		struct nvif_perfmon_query_source_v0 v0;
546 	} *args = data;
547 	struct nvkm_object *object = &perfmon->object;
548 	struct nvkm_pm *pm = perfmon->pm;
549 	struct nvkm_perfdom *dom = NULL;
550 	struct nvkm_perfsig *sig;
551 	struct nvkm_perfsrc *src;
552 	u8 source_nr = 0;
553 	int si, ret = -ENOSYS;
554 
555 	nvif_ioctl(object, "perfmon query source size %d\n", size);
556 	if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
557 		nvif_ioctl(object,
558 			   "perfmon source vers %d dom %d sig %02x iter %02x\n",
559 			   args->v0.version, args->v0.domain, args->v0.signal,
560 			   args->v0.iter);
561 		si = (args->v0.iter & 0xff) - 1;
562 	} else
563 		return ret;
564 
565 	sig = nvkm_perfsig_find(pm, args->v0.domain, args->v0.signal, &dom);
566 	if (!sig)
567 		return -EINVAL;
568 
569 	source_nr = nvkm_perfsig_count_perfsrc(sig);
570 	if (si >= (int)source_nr)
571 		return -EINVAL;
572 
573 	if (si >= 0) {
574 		src = nvkm_perfsrc_find(pm, sig, sig->source[si]);
575 		if (!src)
576 			return -EINVAL;
577 
578 		args->v0.source = sig->source[si];
579 		args->v0.mask   = src->mask;
580 		strncpy(args->v0.name, src->name, sizeof(args->v0.name) - 1);
581 	}
582 
583 	if (++si < source_nr) {
584 		args->v0.iter = ++si;
585 		return 0;
586 	}
587 
588 	args->v0.iter = 0xff;
589 	return 0;
590 }
591 
592 static int
nvkm_perfmon_mthd(struct nvkm_object * object,u32 mthd,void * data,u32 size)593 nvkm_perfmon_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
594 {
595 	struct nvkm_perfmon *perfmon = nvkm_perfmon(object);
596 	switch (mthd) {
597 	case NVIF_PERFMON_V0_QUERY_DOMAIN:
598 		return nvkm_perfmon_mthd_query_domain(perfmon, data, size);
599 	case NVIF_PERFMON_V0_QUERY_SIGNAL:
600 		return nvkm_perfmon_mthd_query_signal(perfmon, data, size);
601 	case NVIF_PERFMON_V0_QUERY_SOURCE:
602 		return nvkm_perfmon_mthd_query_source(perfmon, data, size);
603 	default:
604 		break;
605 	}
606 	return -EINVAL;
607 }
608 
609 static int
nvkm_perfmon_child_new(const struct nvkm_oclass * oclass,void * data,u32 size,struct nvkm_object ** pobject)610 nvkm_perfmon_child_new(const struct nvkm_oclass *oclass, void *data, u32 size,
611 		       struct nvkm_object **pobject)
612 {
613 	struct nvkm_perfmon *perfmon = nvkm_perfmon(oclass->parent);
614 	return nvkm_perfdom_new_(perfmon, oclass, data, size, pobject);
615 }
616 
617 static int
nvkm_perfmon_child_get(struct nvkm_object * object,int index,struct nvkm_oclass * oclass)618 nvkm_perfmon_child_get(struct nvkm_object *object, int index,
619 		       struct nvkm_oclass *oclass)
620 {
621 	if (index == 0) {
622 		oclass->base.oclass = NVIF_CLASS_PERFDOM;
623 		oclass->base.minver = 0;
624 		oclass->base.maxver = 0;
625 		oclass->ctor = nvkm_perfmon_child_new;
626 		return 0;
627 	}
628 	return -EINVAL;
629 }
630 
631 static void *
nvkm_perfmon_dtor(struct nvkm_object * object)632 nvkm_perfmon_dtor(struct nvkm_object *object)
633 {
634 	struct nvkm_perfmon *perfmon = nvkm_perfmon(object);
635 	struct nvkm_pm *pm = perfmon->pm;
636 	mutex_lock(&pm->engine.subdev.mutex);
637 	if (pm->perfmon == &perfmon->object)
638 		pm->perfmon = NULL;
639 	mutex_unlock(&pm->engine.subdev.mutex);
640 	return perfmon;
641 }
642 
643 static const struct nvkm_object_func
644 nvkm_perfmon = {
645 	.dtor = nvkm_perfmon_dtor,
646 	.mthd = nvkm_perfmon_mthd,
647 	.sclass = nvkm_perfmon_child_get,
648 };
649 
650 static int
nvkm_perfmon_new(struct nvkm_pm * pm,const struct nvkm_oclass * oclass,void * data,u32 size,struct nvkm_object ** pobject)651 nvkm_perfmon_new(struct nvkm_pm *pm, const struct nvkm_oclass *oclass,
652 		 void *data, u32 size, struct nvkm_object **pobject)
653 {
654 	struct nvkm_perfmon *perfmon;
655 
656 	if (!(perfmon = kzalloc(sizeof(*perfmon), GFP_KERNEL)))
657 		return -ENOMEM;
658 	nvkm_object_ctor(&nvkm_perfmon, oclass, &perfmon->object);
659 	perfmon->pm = pm;
660 	*pobject = &perfmon->object;
661 	return 0;
662 }
663 
664 /*******************************************************************************
665  * PPM engine/subdev functions
666  ******************************************************************************/
667 
668 static int
nvkm_pm_oclass_new(struct nvkm_device * device,const struct nvkm_oclass * oclass,void * data,u32 size,struct nvkm_object ** pobject)669 nvkm_pm_oclass_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
670 		   void *data, u32 size, struct nvkm_object **pobject)
671 {
672 	struct nvkm_pm *pm = nvkm_pm(oclass->engine);
673 	int ret;
674 
675 	ret = nvkm_perfmon_new(pm, oclass, data, size, pobject);
676 	if (ret)
677 		return ret;
678 
679 	mutex_lock(&pm->engine.subdev.mutex);
680 	if (pm->perfmon == NULL)
681 		pm->perfmon = *pobject;
682 	ret = (pm->perfmon == *pobject) ? 0 : -EBUSY;
683 	mutex_unlock(&pm->engine.subdev.mutex);
684 	return ret;
685 }
686 
687 static const struct nvkm_device_oclass
688 nvkm_pm_oclass = {
689 	.base.oclass = NVIF_CLASS_PERFMON,
690 	.base.minver = -1,
691 	.base.maxver = -1,
692 	.ctor = nvkm_pm_oclass_new,
693 };
694 
695 static int
nvkm_pm_oclass_get(struct nvkm_oclass * oclass,int index,const struct nvkm_device_oclass ** class)696 nvkm_pm_oclass_get(struct nvkm_oclass *oclass, int index,
697 		   const struct nvkm_device_oclass **class)
698 {
699 	if (index == 0) {
700 		oclass->base = nvkm_pm_oclass.base;
701 		*class = &nvkm_pm_oclass;
702 		return index;
703 	}
704 	return 1;
705 }
706 
707 static int
nvkm_perfsrc_new(struct nvkm_pm * pm,struct nvkm_perfsig * sig,const struct nvkm_specsrc * spec)708 nvkm_perfsrc_new(struct nvkm_pm *pm, struct nvkm_perfsig *sig,
709 		 const struct nvkm_specsrc *spec)
710 {
711 	const struct nvkm_specsrc *ssrc;
712 	const struct nvkm_specmux *smux;
713 	struct nvkm_perfsrc *src;
714 	u8 source_nr = 0;
715 
716 	if (!spec) {
717 		/* No sources are defined for this signal. */
718 		return 0;
719 	}
720 
721 	ssrc = spec;
722 	while (ssrc->name) {
723 		smux = ssrc->mux;
724 		while (smux->name) {
725 			bool found = false;
726 			u8 source_id = 0;
727 			u32 len;
728 
729 			list_for_each_entry(src, &pm->sources, head) {
730 				if (src->addr == ssrc->addr &&
731 				    src->shift == smux->shift) {
732 					found = true;
733 					break;
734 				}
735 				source_id++;
736 			}
737 
738 			if (!found) {
739 				src = kzalloc(sizeof(*src), GFP_KERNEL);
740 				if (!src)
741 					return -ENOMEM;
742 
743 				src->addr   = ssrc->addr;
744 				src->mask   = smux->mask;
745 				src->shift  = smux->shift;
746 				src->enable = smux->enable;
747 
748 				len = strlen(ssrc->name) +
749 				      strlen(smux->name) + 2;
750 				src->name = kzalloc(len, GFP_KERNEL);
751 				if (!src->name) {
752 					kfree(src);
753 					return -ENOMEM;
754 				}
755 				snprintf(src->name, len, "%s_%s", ssrc->name,
756 					 smux->name);
757 
758 				list_add_tail(&src->head, &pm->sources);
759 			}
760 
761 			sig->source[source_nr++] = source_id + 1;
762 			smux++;
763 		}
764 		ssrc++;
765 	}
766 
767 	return 0;
768 }
769 
770 int
nvkm_perfdom_new(struct nvkm_pm * pm,const char * name,u32 mask,u32 base,u32 size_unit,u32 size_domain,const struct nvkm_specdom * spec)771 nvkm_perfdom_new(struct nvkm_pm *pm, const char *name, u32 mask,
772 		 u32 base, u32 size_unit, u32 size_domain,
773 		 const struct nvkm_specdom *spec)
774 {
775 	const struct nvkm_specdom *sdom;
776 	const struct nvkm_specsig *ssig;
777 	struct nvkm_perfdom *dom;
778 	int ret, i;
779 
780 	for (i = 0; i == 0 || mask; i++) {
781 		u32 addr = base + (i * size_unit);
782 		if (i && !(mask & (1 << i)))
783 			continue;
784 
785 		sdom = spec;
786 		while (sdom->signal_nr) {
787 			dom = kzalloc(struct_size(dom, signal, sdom->signal_nr),
788 				      GFP_KERNEL);
789 			if (!dom)
790 				return -ENOMEM;
791 
792 			if (mask) {
793 				snprintf(dom->name, sizeof(dom->name),
794 					 "%s/%02x/%02x", name, i,
795 					 (int)(sdom - spec));
796 			} else {
797 				snprintf(dom->name, sizeof(dom->name),
798 					 "%s/%02x", name, (int)(sdom - spec));
799 			}
800 
801 			list_add_tail(&dom->head, &pm->domains);
802 			INIT_LIST_HEAD(&dom->list);
803 			dom->func = sdom->func;
804 			dom->addr = addr;
805 			dom->signal_nr = sdom->signal_nr;
806 
807 			ssig = (sdom++)->signal;
808 			while (ssig->name) {
809 				struct nvkm_perfsig *sig =
810 					&dom->signal[ssig->signal];
811 				sig->name = ssig->name;
812 				ret = nvkm_perfsrc_new(pm, sig, ssig->source);
813 				if (ret)
814 					return ret;
815 				ssig++;
816 			}
817 
818 			addr += size_domain;
819 		}
820 
821 		mask &= ~(1 << i);
822 	}
823 
824 	return 0;
825 }
826 
827 static int
nvkm_pm_fini(struct nvkm_engine * engine,bool suspend)828 nvkm_pm_fini(struct nvkm_engine *engine, bool suspend)
829 {
830 	struct nvkm_pm *pm = nvkm_pm(engine);
831 	if (pm->func->fini)
832 		pm->func->fini(pm);
833 	return 0;
834 }
835 
836 static void *
nvkm_pm_dtor(struct nvkm_engine * engine)837 nvkm_pm_dtor(struct nvkm_engine *engine)
838 {
839 	struct nvkm_pm *pm = nvkm_pm(engine);
840 	struct nvkm_perfdom *dom, *next_dom;
841 	struct nvkm_perfsrc *src, *next_src;
842 
843 	list_for_each_entry_safe(dom, next_dom, &pm->domains, head) {
844 		list_del(&dom->head);
845 		kfree(dom);
846 	}
847 
848 	list_for_each_entry_safe(src, next_src, &pm->sources, head) {
849 		list_del(&src->head);
850 		kfree(src->name);
851 		kfree(src);
852 	}
853 
854 	return pm;
855 }
856 
857 static const struct nvkm_engine_func
858 nvkm_pm = {
859 	.dtor = nvkm_pm_dtor,
860 	.fini = nvkm_pm_fini,
861 	.base.sclass = nvkm_pm_oclass_get,
862 };
863 
864 int
nvkm_pm_ctor(const struct nvkm_pm_func * func,struct nvkm_device * device,int index,struct nvkm_pm * pm)865 nvkm_pm_ctor(const struct nvkm_pm_func *func, struct nvkm_device *device,
866 	     int index, struct nvkm_pm *pm)
867 {
868 	pm->func = func;
869 	INIT_LIST_HEAD(&pm->domains);
870 	INIT_LIST_HEAD(&pm->sources);
871 	return nvkm_engine_ctor(&nvkm_pm, device, index, true, &pm->engine);
872 }
873