1 /* $NetBSD: nouveau_nvkm_subdev_clk_base.c,v 1.5 2021/12/18 23:45:39 riastradh Exp $ */
2
3 /*
4 * Copyright 2013 Red Hat Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Ben Skeggs
25 */
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_subdev_clk_base.c,v 1.5 2021/12/18 23:45:39 riastradh Exp $");
28
29 #include "priv.h"
30
31 #include <subdev/bios.h>
32 #include <subdev/bios/boost.h>
33 #include <subdev/bios/cstep.h>
34 #include <subdev/bios/perf.h>
35 #include <subdev/bios/vpstate.h>
36 #include <subdev/fb.h>
37 #include <subdev/therm.h>
38 #include <subdev/volt.h>
39
40 #include <core/option.h>
41
42 /******************************************************************************
43 * misc
44 *****************************************************************************/
45 static u32
nvkm_clk_adjust(struct nvkm_clk * clk,bool adjust,u8 pstate,u8 domain,u32 input)46 nvkm_clk_adjust(struct nvkm_clk *clk, bool adjust,
47 u8 pstate, u8 domain, u32 input)
48 {
49 struct nvkm_bios *bios = clk->subdev.device->bios;
50 struct nvbios_boostE boostE;
51 u8 ver, hdr, cnt, len;
52 u32 data;
53
54 data = nvbios_boostEm(bios, pstate, &ver, &hdr, &cnt, &len, &boostE);
55 if (data) {
56 struct nvbios_boostS boostS;
57 u8 idx = 0, sver, shdr;
58 u32 subd;
59
60 input = max(boostE.min, input);
61 input = min(boostE.max, input);
62 do {
63 sver = ver;
64 shdr = hdr;
65 subd = nvbios_boostSp(bios, idx++, data, &sver, &shdr,
66 cnt, len, &boostS);
67 if (subd && boostS.domain == domain) {
68 if (adjust)
69 input = input * boostS.percent / 100;
70 input = max(boostS.min, input);
71 input = min(boostS.max, input);
72 break;
73 }
74 } while (subd);
75 }
76
77 return input;
78 }
79
80 /******************************************************************************
81 * C-States
82 *****************************************************************************/
83 static bool
nvkm_cstate_valid(struct nvkm_clk * clk,struct nvkm_cstate * cstate,u32 max_volt,int temp)84 nvkm_cstate_valid(struct nvkm_clk *clk, struct nvkm_cstate *cstate,
85 u32 max_volt, int temp)
86 {
87 const struct nvkm_domain *domain = clk->domains;
88 struct nvkm_volt *volt = clk->subdev.device->volt;
89 int voltage;
90
91 while (domain && domain->name != nv_clk_src_max) {
92 if (domain->flags & NVKM_CLK_DOM_FLAG_VPSTATE) {
93 u32 freq = cstate->domain[domain->name];
94 switch (clk->boost_mode) {
95 case NVKM_CLK_BOOST_NONE:
96 if (clk->base_khz && freq > clk->base_khz)
97 return false;
98 /* fall through */
99 case NVKM_CLK_BOOST_BIOS:
100 if (clk->boost_khz && freq > clk->boost_khz)
101 return false;
102 }
103 }
104 domain++;
105 }
106
107 if (!volt)
108 return true;
109
110 voltage = nvkm_volt_map(volt, cstate->voltage, temp);
111 if (voltage < 0)
112 return false;
113 return voltage <= min(max_volt, volt->max_uv);
114 }
115
116 static struct nvkm_cstate *
nvkm_cstate_find_best(struct nvkm_clk * clk,struct nvkm_pstate * pstate,struct nvkm_cstate * cstate)117 nvkm_cstate_find_best(struct nvkm_clk *clk, struct nvkm_pstate *pstate,
118 struct nvkm_cstate *cstate)
119 {
120 struct nvkm_device *device = clk->subdev.device;
121 struct nvkm_volt *volt = device->volt;
122 int max_volt;
123
124 if (!pstate || !cstate)
125 return NULL;
126
127 if (!volt)
128 return cstate;
129
130 max_volt = volt->max_uv;
131 if (volt->max0_id != 0xff)
132 max_volt = min(max_volt,
133 nvkm_volt_map(volt, volt->max0_id, clk->temp));
134 if (volt->max1_id != 0xff)
135 max_volt = min(max_volt,
136 nvkm_volt_map(volt, volt->max1_id, clk->temp));
137 if (volt->max2_id != 0xff)
138 max_volt = min(max_volt,
139 nvkm_volt_map(volt, volt->max2_id, clk->temp));
140
141 list_for_each_entry_from_reverse(cstate, &pstate->list, head) {
142 if (nvkm_cstate_valid(clk, cstate, max_volt, clk->temp))
143 break;
144 }
145
146 return cstate;
147 }
148
149 static struct nvkm_cstate *
nvkm_cstate_get(struct nvkm_clk * clk,struct nvkm_pstate * pstate,int cstatei)150 nvkm_cstate_get(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
151 {
152 struct nvkm_cstate *cstate;
153 if (cstatei == NVKM_CLK_CSTATE_HIGHEST)
154 return list_last_entry(&pstate->list, typeof(*cstate), head);
155 else {
156 list_for_each_entry(cstate, &pstate->list, head) {
157 if (cstate->id == cstatei)
158 return cstate;
159 }
160 }
161 return NULL;
162 }
163
164 static int
nvkm_cstate_prog(struct nvkm_clk * clk,struct nvkm_pstate * pstate,int cstatei)165 nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
166 {
167 struct nvkm_subdev *subdev = &clk->subdev;
168 struct nvkm_device *device = subdev->device;
169 struct nvkm_therm *therm = device->therm;
170 struct nvkm_volt *volt = device->volt;
171 struct nvkm_cstate *cstate;
172 int ret;
173
174 if (!list_empty(&pstate->list)) {
175 cstate = nvkm_cstate_get(clk, pstate, cstatei);
176 cstate = nvkm_cstate_find_best(clk, pstate, cstate);
177 } else {
178 cstate = &pstate->base;
179 }
180
181 if (therm) {
182 ret = nvkm_therm_cstate(therm, pstate->fanspeed, +1);
183 if (ret && ret != -ENODEV) {
184 nvkm_error(subdev, "failed to raise fan speed: %d\n", ret);
185 return ret;
186 }
187 }
188
189 if (volt) {
190 ret = nvkm_volt_set_id(volt, cstate->voltage,
191 pstate->base.voltage, clk->temp, +1);
192 if (ret && ret != -ENODEV) {
193 nvkm_error(subdev, "failed to raise voltage: %d\n", ret);
194 return ret;
195 }
196 }
197
198 ret = clk->func->calc(clk, cstate);
199 if (ret == 0) {
200 ret = clk->func->prog(clk);
201 clk->func->tidy(clk);
202 }
203
204 if (volt) {
205 ret = nvkm_volt_set_id(volt, cstate->voltage,
206 pstate->base.voltage, clk->temp, -1);
207 if (ret && ret != -ENODEV)
208 nvkm_error(subdev, "failed to lower voltage: %d\n", ret);
209 }
210
211 if (therm) {
212 ret = nvkm_therm_cstate(therm, pstate->fanspeed, -1);
213 if (ret && ret != -ENODEV)
214 nvkm_error(subdev, "failed to lower fan speed: %d\n", ret);
215 }
216
217 return ret;
218 }
219
220 static void
nvkm_cstate_del(struct nvkm_cstate * cstate)221 nvkm_cstate_del(struct nvkm_cstate *cstate)
222 {
223 list_del(&cstate->head);
224 kfree(cstate);
225 }
226
227 static int
nvkm_cstate_new(struct nvkm_clk * clk,int idx,struct nvkm_pstate * pstate)228 nvkm_cstate_new(struct nvkm_clk *clk, int idx, struct nvkm_pstate *pstate)
229 {
230 struct nvkm_bios *bios = clk->subdev.device->bios;
231 struct nvkm_volt *volt = clk->subdev.device->volt;
232 const struct nvkm_domain *domain = clk->domains;
233 struct nvkm_cstate *cstate = NULL;
234 struct nvbios_cstepX cstepX;
235 u8 ver, hdr;
236 u32 data;
237
238 data = nvbios_cstepXp(bios, idx, &ver, &hdr, &cstepX);
239 if (!data)
240 return -ENOENT;
241
242 if (volt && nvkm_volt_map_min(volt, cstepX.voltage) > volt->max_uv)
243 return -EINVAL;
244
245 cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
246 if (!cstate)
247 return -ENOMEM;
248
249 *cstate = pstate->base;
250 cstate->voltage = cstepX.voltage;
251 cstate->id = idx;
252
253 while (domain && domain->name != nv_clk_src_max) {
254 if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) {
255 u32 freq = nvkm_clk_adjust(clk, true, pstate->pstate,
256 domain->bios, cstepX.freq);
257 cstate->domain[domain->name] = freq;
258 }
259 domain++;
260 }
261
262 list_add(&cstate->head, &pstate->list);
263 return 0;
264 }
265
266 /******************************************************************************
267 * P-States
268 *****************************************************************************/
269 static int
nvkm_pstate_prog(struct nvkm_clk * clk,int pstatei)270 nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
271 {
272 struct nvkm_subdev *subdev = &clk->subdev;
273 struct nvkm_fb *fb = subdev->device->fb;
274 struct nvkm_pci *pci = subdev->device->pci;
275 struct nvkm_pstate *pstate;
276 int ret, idx = 0;
277
278 list_for_each_entry(pstate, &clk->states, head) {
279 if (idx++ == pstatei)
280 break;
281 }
282
283 nvkm_debug(subdev, "setting performance state %d\n", pstatei);
284 clk->pstate = pstatei;
285
286 nvkm_pcie_set_link(pci, pstate->pcie_speed, pstate->pcie_width);
287
288 if (fb && fb->ram && fb->ram->func->calc) {
289 struct nvkm_ram *ram = fb->ram;
290 int khz = pstate->base.domain[nv_clk_src_mem];
291 do {
292 ret = ram->func->calc(ram, khz);
293 if (ret == 0)
294 ret = ram->func->prog(ram);
295 } while (ret > 0);
296 ram->func->tidy(ram);
297 }
298
299 return nvkm_cstate_prog(clk, pstate, NVKM_CLK_CSTATE_HIGHEST);
300 }
301
302 static void
nvkm_pstate_work(struct work_struct * work)303 nvkm_pstate_work(struct work_struct *work)
304 {
305 struct nvkm_clk *clk = container_of(work, typeof(*clk), work);
306 struct nvkm_subdev *subdev = &clk->subdev;
307 int pstate;
308
309 spin_lock(&clk->lock);
310 if (!atomic_xchg(&clk->waiting, 0)) {
311 spin_unlock(&clk->lock);
312 return;
313 }
314
315 clk->pwrsrc = power_supply_is_system_supplied();
316
317 nvkm_trace(subdev, "P %d PWR %d U(AC) %d U(DC) %d A %d T %d°C D %d\n",
318 clk->pstate, clk->pwrsrc, clk->ustate_ac, clk->ustate_dc,
319 clk->astate, clk->temp, clk->dstate);
320
321 pstate = clk->pwrsrc ? clk->ustate_ac : clk->ustate_dc;
322 if (clk->state_nr && pstate != -1) {
323 pstate = (pstate < 0) ? clk->astate : pstate;
324 pstate = min(pstate, clk->state_nr - 1);
325 pstate = max(pstate, clk->dstate);
326 } else {
327 pstate = clk->pstate = -1;
328 }
329
330 nvkm_trace(subdev, "-> %d\n", pstate);
331 if (pstate != clk->pstate) {
332 int ret = nvkm_pstate_prog(clk, pstate);
333 if (ret) {
334 nvkm_error(subdev, "error setting pstate %d: %d\n",
335 pstate, ret);
336 }
337 }
338
339 #ifdef __NetBSD__
340 DRM_SPIN_WAKEUP_ALL(&clk->wait, &clk->lock);
341 #else
342 wake_up_all(&clk->wait);
343 #endif
344 spin_unlock(&clk->lock);
345 nvkm_notify_get(&clk->pwrsrc_ntfy);
346 }
347
348 static int
nvkm_pstate_calc(struct nvkm_clk * clk,bool wait)349 nvkm_pstate_calc(struct nvkm_clk *clk, bool wait)
350 {
351 atomic_set(&clk->waiting, 1);
352 schedule_work(&clk->work);
353 if (wait) {
354 #ifdef __NetBSD__
355 if (cold) {
356 unsigned timo = 1000;
357 while (timo --> 0) {
358 if (atomic_read(&clk->waiting))
359 return 0;
360 DELAY(100);
361 }
362 return -ETIMEDOUT;
363 }
364 int ret;
365 spin_lock(&clk->lock);
366 DRM_SPIN_WAIT_NOINTR_UNTIL(ret, &clk->wait, &clk->lock,
367 !atomic_read(&clk->waiting));
368 spin_unlock(&clk->lock);
369 KASSERT(ret == 0);
370 #else
371 wait_event(clk->wait, !atomic_read(&clk->waiting));
372 #endif
373 }
374 return 0;
375 }
376
377 static void
nvkm_pstate_info(struct nvkm_clk * clk,struct nvkm_pstate * pstate)378 nvkm_pstate_info(struct nvkm_clk *clk, struct nvkm_pstate *pstate)
379 {
380 const struct nvkm_domain *clock = clk->domains - 1;
381 struct nvkm_cstate *cstate;
382 struct nvkm_subdev *subdev = &clk->subdev;
383 char info[3][32] = { "", "", "" };
384 char name[4] = "--";
385 int i = -1;
386
387 if (pstate->pstate != 0xff)
388 snprintf(name, sizeof(name), "%02x", pstate->pstate);
389
390 while ((++clock)->name != nv_clk_src_max) {
391 u32 lo = pstate->base.domain[clock->name];
392 u32 hi = lo;
393 if (hi == 0)
394 continue;
395
396 nvkm_debug(subdev, "%02x: %10d KHz\n", clock->name, lo);
397 list_for_each_entry(cstate, &pstate->list, head) {
398 u32 freq = cstate->domain[clock->name];
399 lo = min(lo, freq);
400 hi = max(hi, freq);
401 nvkm_debug(subdev, "%10d KHz\n", freq);
402 }
403
404 if (clock->mname && ++i < ARRAY_SIZE(info)) {
405 lo /= clock->mdiv;
406 hi /= clock->mdiv;
407 if (lo == hi) {
408 snprintf(info[i], sizeof(info[i]), "%s %d MHz",
409 clock->mname, lo);
410 } else {
411 snprintf(info[i], sizeof(info[i]),
412 "%s %d-%d MHz", clock->mname, lo, hi);
413 }
414 }
415 }
416
417 nvkm_debug(subdev, "%s: %s %s %s\n", name, info[0], info[1], info[2]);
418 }
419
420 static void
nvkm_pstate_del(struct nvkm_pstate * pstate)421 nvkm_pstate_del(struct nvkm_pstate *pstate)
422 {
423 struct nvkm_cstate *cstate, *temp;
424
425 list_for_each_entry_safe(cstate, temp, &pstate->list, head) {
426 nvkm_cstate_del(cstate);
427 }
428
429 list_del(&pstate->head);
430 kfree(pstate);
431 }
432
433 static int
nvkm_pstate_new(struct nvkm_clk * clk,int idx)434 nvkm_pstate_new(struct nvkm_clk *clk, int idx)
435 {
436 struct nvkm_bios *bios = clk->subdev.device->bios;
437 const struct nvkm_domain *domain = clk->domains - 1;
438 struct nvkm_pstate *pstate;
439 struct nvkm_cstate *cstate;
440 struct nvbios_cstepE cstepE;
441 struct nvbios_perfE perfE;
442 u8 ver, hdr, cnt, len;
443 u32 data;
444
445 data = nvbios_perfEp(bios, idx, &ver, &hdr, &cnt, &len, &perfE);
446 if (!data)
447 return -EINVAL;
448 if (perfE.pstate == 0xff)
449 return 0;
450
451 pstate = kzalloc(sizeof(*pstate), GFP_KERNEL);
452 cstate = &pstate->base;
453 if (!pstate)
454 return -ENOMEM;
455
456 INIT_LIST_HEAD(&pstate->list);
457
458 pstate->pstate = perfE.pstate;
459 pstate->fanspeed = perfE.fanspeed;
460 pstate->pcie_speed = perfE.pcie_speed;
461 pstate->pcie_width = perfE.pcie_width;
462 cstate->voltage = perfE.voltage;
463 cstate->domain[nv_clk_src_core] = perfE.core;
464 cstate->domain[nv_clk_src_shader] = perfE.shader;
465 cstate->domain[nv_clk_src_mem] = perfE.memory;
466 cstate->domain[nv_clk_src_vdec] = perfE.vdec;
467 cstate->domain[nv_clk_src_dom6] = perfE.disp;
468
469 while (ver >= 0x40 && (++domain)->name != nv_clk_src_max) {
470 struct nvbios_perfS perfS;
471 u8 sver = ver, shdr = hdr;
472 u32 perfSe = nvbios_perfSp(bios, data, domain->bios,
473 &sver, &shdr, cnt, len, &perfS);
474 if (perfSe == 0 || sver != 0x40)
475 continue;
476
477 if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) {
478 perfS.v40.freq = nvkm_clk_adjust(clk, false,
479 pstate->pstate,
480 domain->bios,
481 perfS.v40.freq);
482 }
483
484 cstate->domain[domain->name] = perfS.v40.freq;
485 }
486
487 data = nvbios_cstepEm(bios, pstate->pstate, &ver, &hdr, &cstepE);
488 if (data) {
489 int idx = cstepE.index;
490 do {
491 nvkm_cstate_new(clk, idx, pstate);
492 } while(idx--);
493 }
494
495 nvkm_pstate_info(clk, pstate);
496 list_add_tail(&pstate->head, &clk->states);
497 clk->state_nr++;
498 return 0;
499 }
500
501 /******************************************************************************
502 * Adjustment triggers
503 *****************************************************************************/
504 static int
nvkm_clk_ustate_update(struct nvkm_clk * clk,int req)505 nvkm_clk_ustate_update(struct nvkm_clk *clk, int req)
506 {
507 struct nvkm_pstate *pstate;
508 int i = 0;
509
510 if (!clk->allow_reclock)
511 return -ENOSYS;
512
513 if (req != -1 && req != -2) {
514 list_for_each_entry(pstate, &clk->states, head) {
515 if (pstate->pstate == req)
516 break;
517 i++;
518 }
519
520 if (pstate->pstate != req)
521 return -EINVAL;
522 req = i;
523 }
524
525 return req + 2;
526 }
527
528 static int
nvkm_clk_nstate(struct nvkm_clk * clk,const char * mode,int arglen)529 nvkm_clk_nstate(struct nvkm_clk *clk, const char *mode, int arglen)
530 {
531 int ret = 1;
532
533 if (clk->allow_reclock && !strncasecmpz(mode, "auto", arglen))
534 return -2;
535
536 if (strncasecmpz(mode, "disabled", arglen)) {
537 char *m = kstrndup(mode, arglen, GFP_KERNEL);
538 long v;
539
540 if (!kstrtol(m, 0, &v)) {
541 ret = nvkm_clk_ustate_update(clk, v);
542 if (ret < 0)
543 ret = 1;
544 }
545 kfree(m);
546 }
547
548 return ret - 2;
549 }
550
551 int
nvkm_clk_ustate(struct nvkm_clk * clk,int req,int pwr)552 nvkm_clk_ustate(struct nvkm_clk *clk, int req, int pwr)
553 {
554 int ret = nvkm_clk_ustate_update(clk, req);
555 if (ret >= 0) {
556 if (ret -= 2, pwr) clk->ustate_ac = ret;
557 else clk->ustate_dc = ret;
558 return nvkm_pstate_calc(clk, true);
559 }
560 return ret;
561 }
562
563 int
nvkm_clk_astate(struct nvkm_clk * clk,int req,int rel,bool wait)564 nvkm_clk_astate(struct nvkm_clk *clk, int req, int rel, bool wait)
565 {
566 if (!rel) clk->astate = req;
567 if ( rel) clk->astate += rel;
568 clk->astate = min(clk->astate, clk->state_nr - 1);
569 clk->astate = max(clk->astate, 0);
570 return nvkm_pstate_calc(clk, wait);
571 }
572
573 int
nvkm_clk_tstate(struct nvkm_clk * clk,u8 temp)574 nvkm_clk_tstate(struct nvkm_clk *clk, u8 temp)
575 {
576 if (clk->temp == temp)
577 return 0;
578 clk->temp = temp;
579 return nvkm_pstate_calc(clk, false);
580 }
581
582 int
nvkm_clk_dstate(struct nvkm_clk * clk,int req,int rel)583 nvkm_clk_dstate(struct nvkm_clk *clk, int req, int rel)
584 {
585 if (!rel) clk->dstate = req;
586 if ( rel) clk->dstate += rel;
587 clk->dstate = min(clk->dstate, clk->state_nr - 1);
588 clk->dstate = max(clk->dstate, 0);
589 return nvkm_pstate_calc(clk, true);
590 }
591
592 static int
nvkm_clk_pwrsrc(struct nvkm_notify * notify)593 nvkm_clk_pwrsrc(struct nvkm_notify *notify)
594 {
595 struct nvkm_clk *clk =
596 container_of(notify, typeof(*clk), pwrsrc_ntfy);
597 nvkm_pstate_calc(clk, false);
598 return NVKM_NOTIFY_DROP;
599 }
600
601 /******************************************************************************
602 * subdev base class implementation
603 *****************************************************************************/
604
605 int
nvkm_clk_read(struct nvkm_clk * clk,enum nv_clk_src src)606 nvkm_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
607 {
608 return clk->func->read(clk, src);
609 }
610
611 static int
nvkm_clk_fini(struct nvkm_subdev * subdev,bool suspend)612 nvkm_clk_fini(struct nvkm_subdev *subdev, bool suspend)
613 {
614 struct nvkm_clk *clk = nvkm_clk(subdev);
615 nvkm_notify_put(&clk->pwrsrc_ntfy);
616 flush_work(&clk->work);
617 if (clk->func->fini)
618 clk->func->fini(clk);
619 return 0;
620 }
621
622 static int
nvkm_clk_init(struct nvkm_subdev * subdev)623 nvkm_clk_init(struct nvkm_subdev *subdev)
624 {
625 struct nvkm_clk *clk = nvkm_clk(subdev);
626 const struct nvkm_domain *clock = clk->domains;
627 int ret;
628
629 memset(&clk->bstate, 0x00, sizeof(clk->bstate));
630 INIT_LIST_HEAD(&clk->bstate.list);
631 clk->bstate.pstate = 0xff;
632
633 while (clock->name != nv_clk_src_max) {
634 ret = nvkm_clk_read(clk, clock->name);
635 if (ret < 0) {
636 nvkm_error(subdev, "%02x freq unknown\n", clock->name);
637 return ret;
638 }
639 clk->bstate.base.domain[clock->name] = ret;
640 clock++;
641 }
642
643 nvkm_pstate_info(clk, &clk->bstate);
644
645 if (clk->func->init)
646 return clk->func->init(clk);
647
648 clk->astate = clk->state_nr - 1;
649 clk->dstate = 0;
650 clk->pstate = -1;
651 clk->temp = 90; /* reasonable default value */
652 nvkm_pstate_calc(clk, true);
653 return 0;
654 }
655
656 static void *
nvkm_clk_dtor(struct nvkm_subdev * subdev)657 nvkm_clk_dtor(struct nvkm_subdev *subdev)
658 {
659 struct nvkm_clk *clk = nvkm_clk(subdev);
660 struct nvkm_pstate *pstate, *temp;
661
662 nvkm_notify_fini(&clk->pwrsrc_ntfy);
663
664 /* Early return if the pstates have been provided statically */
665 if (clk->func->pstates)
666 return clk;
667
668 list_for_each_entry_safe(pstate, temp, &clk->states, head) {
669 nvkm_pstate_del(pstate);
670 }
671
672 #ifdef __NetBSD__
673 DRM_DESTROY_WAITQUEUE(&clk->wait);
674 spin_lock_destroy(&clk->lock);
675 #endif
676
677 return clk;
678 }
679
680 static const struct nvkm_subdev_func
681 nvkm_clk = {
682 .dtor = nvkm_clk_dtor,
683 .init = nvkm_clk_init,
684 .fini = nvkm_clk_fini,
685 };
686
687 int
nvkm_clk_ctor(const struct nvkm_clk_func * func,struct nvkm_device * device,int index,bool allow_reclock,struct nvkm_clk * clk)688 nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device,
689 int index, bool allow_reclock, struct nvkm_clk *clk)
690 {
691 struct nvkm_subdev *subdev = &clk->subdev;
692 struct nvkm_bios *bios = device->bios;
693 int ret, idx, arglen;
694 const char *mode;
695 struct nvbios_vpstate_header h;
696
697 nvkm_subdev_ctor(&nvkm_clk, device, index, subdev);
698
699 if (bios && !nvbios_vpstate_parse(bios, &h)) {
700 struct nvbios_vpstate_entry base, boost;
701 if (!nvbios_vpstate_entry(bios, &h, h.boost_id, &boost))
702 clk->boost_khz = boost.clock_mhz * 1000;
703 if (!nvbios_vpstate_entry(bios, &h, h.base_id, &base))
704 clk->base_khz = base.clock_mhz * 1000;
705 }
706
707 clk->func = func;
708 INIT_LIST_HEAD(&clk->states);
709 clk->domains = func->domains;
710 clk->ustate_ac = -1;
711 clk->ustate_dc = -1;
712 clk->allow_reclock = allow_reclock;
713
714 INIT_WORK(&clk->work, nvkm_pstate_work);
715 #ifdef __NetBSD__
716 spin_lock_init(&clk->lock);
717 DRM_INIT_WAITQUEUE(&clk->wait, "nvclk");
718 #else
719 init_waitqueue_head(&clk->wait);
720 #endif
721 atomic_set(&clk->waiting, 0);
722
723 /* If no pstates are provided, try and fetch them from the BIOS */
724 if (!func->pstates) {
725 idx = 0;
726 do {
727 ret = nvkm_pstate_new(clk, idx++);
728 } while (ret == 0);
729 } else {
730 for (idx = 0; idx < func->nr_pstates; idx++)
731 list_add_tail(&func->pstates[idx].head, &clk->states);
732 clk->state_nr = func->nr_pstates;
733 }
734
735 ret = nvkm_notify_init(NULL, &device->event, nvkm_clk_pwrsrc, true,
736 NULL, 0, 0, &clk->pwrsrc_ntfy);
737 if (ret)
738 return ret;
739
740 mode = nvkm_stropt(device->cfgopt, "NvClkMode", &arglen);
741 if (mode) {
742 clk->ustate_ac = nvkm_clk_nstate(clk, mode, arglen);
743 clk->ustate_dc = nvkm_clk_nstate(clk, mode, arglen);
744 }
745
746 mode = nvkm_stropt(device->cfgopt, "NvClkModeAC", &arglen);
747 if (mode)
748 clk->ustate_ac = nvkm_clk_nstate(clk, mode, arglen);
749
750 mode = nvkm_stropt(device->cfgopt, "NvClkModeDC", &arglen);
751 if (mode)
752 clk->ustate_dc = nvkm_clk_nstate(clk, mode, arglen);
753
754 clk->boost_mode = nvkm_longopt(device->cfgopt, "NvBoost",
755 NVKM_CLK_BOOST_NONE);
756 return 0;
757 }
758
759 int
nvkm_clk_new_(const struct nvkm_clk_func * func,struct nvkm_device * device,int index,bool allow_reclock,struct nvkm_clk ** pclk)760 nvkm_clk_new_(const struct nvkm_clk_func *func, struct nvkm_device *device,
761 int index, bool allow_reclock, struct nvkm_clk **pclk)
762 {
763 if (!(*pclk = kzalloc(sizeof(**pclk), GFP_KERNEL)))
764 return -ENOMEM;
765 return nvkm_clk_ctor(func, device, index, allow_reclock, *pclk);
766 }
767