1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 #include <sys/param.h>
27 #include <sys/systm.h>
28 #include <sys/sysmacros.h>
29 #include <sys/sunddi.h>
30 #include <sys/esunddi.h>
31 #include <sys/sunndi.h>
32 #include <sys/modctl.h>
33 #include <sys/promif.h>
34 #include <sys/machparam.h>
35 #include <sys/kobj.h>
36 #include <sys/cpuvar.h>
37 #include <sys/mem_cage.h>
38 #include <sys/promif.h>
39 #include <sys/promimpl.h>
40 #include <sys/platform_module.h>
41 #include <sys/errno.h>
42 #include <sys/cpu_sgnblk_defs.h>
43 #include <sys/iosramio.h>
44 #include <sys/domaind.h>
45 #include <sys/starcat.h>
46 #include <sys/machsystm.h>
47 #include <sys/bootconf.h>
48 #include <sys/memnode.h>
49 #include <vm/vm_dep.h>
50 #include <vm/page.h>
51 #include <sys/cheetahregs.h>
52 #include <sys/plat_ecc_unum.h>
53 #include <sys/plat_ecc_dimm.h>
54 #include <sys/lgrp.h>
55 #include <sys/dr.h>
56 #include <sys/post/scat_dcd.h>
57 #include <sys/kdi_impl.h>
58 #include <sys/iosramreg.h>
59 #include <sys/iosramvar.h>
60 #include <sys/mc-us3.h>
61 #include <sys/clock_impl.h>
62
63 /* Preallocation of spare tsb's for DR */
64 int starcat_tsb_spares = STARCAT_SPARE_TSB_MAX;
65
66 /* Set the maximum number of slot0 + slot1 boards. .. for DR */
67 int starcat_boards = STARCAT_BDSET_MAX * STARCAT_BDSET_SLOT_MAX;
68
69 /* Maximum number of cpus per board... for DR */
70 int starcat_cpu_per_board = MAX(STARCAT_SLOT0_CPU_MAX, STARCAT_SLOT1_CPU_MAX);
71
72 /* Maximum number of mem-units per board... for DR */
73 int starcat_mem_per_board = MAX(STARCAT_SLOT0_MEM_MAX, STARCAT_SLOT1_MEM_MAX);
74
75 /* Maximum number of io-units (buses) per board... for DR */
76 int starcat_io_per_board = 2 * MAX(STARCAT_SLOT0_IO_MAX, STARCAT_SLOT1_IO_MAX);
77
78 /* Preferred minimum cage size (expressed in pages)... for DR */
79 pgcnt_t starcat_startup_cage_size = 0;
80
81 /* Platform specific function to get unum information */
82 int (*p2get_mem_unum)(int, uint64_t, char *, int, int *);
83
84 /* Memory for fcode claims. 16k times # maximum possible schizos */
85 #define EFCODE_SIZE (STARCAT_BDSET_MAX * 4 * 0x4000)
86 int efcode_size = EFCODE_SIZE;
87
88 void sgn_update_all_cpus(ushort_t, uchar_t, uchar_t);
89
90 /*
91 * The IOSRAM driver is loaded in load_platform_drivers() any cpu signature
92 * usage prior to that time will have not have a function to call.
93 */
94 static int (*iosram_rdp)(uint32_t key, uint32_t off, uint32_t len,
95 caddr_t dptr) = prom_starcat_iosram_read;
96 static int (*iosram_wrp)(uint32_t key, uint32_t off, uint32_t len,
97 caddr_t dptr) = prom_starcat_iosram_write;
98
99 plat_dimm_sid_board_t domain_dimm_sids[STARCAT_BDSET_MAX];
100
101 /*
102 * set_platform_max_ncpus should return the maximum number of CPUs that the
103 * platform supports. This function is called from check_cpus() to set the
104 * value of max_ncpus [see PSARC 1997/165 CPU Dynamic Reconfiguration].
105 * Data elements which are allocated based upon max_ncpus are all accessed
106 * via cpu_seqid and not physical IDs. Previously, the value of max_ncpus
107 * was being set to the largest physical ID, which led to boot problems on
108 * systems with less than 1.25GB of memory.
109 */
110
111 int
set_platform_max_ncpus(void)112 set_platform_max_ncpus(void)
113 {
114 int n;
115
116 /*
117 * Convert number of slot0 + slot1 boards to number of expander brds
118 * and constrain the value to an architecturally plausible range
119 */
120 n = MAX(starcat_boards, STARCAT_BDSET_MIN * STARCAT_BDSET_SLOT_MAX);
121 n = MIN(n, STARCAT_BDSET_MAX * STARCAT_BDSET_SLOT_MAX);
122 n = (n + STARCAT_BDSET_SLOT_MAX - 1) / STARCAT_BDSET_SLOT_MAX;
123
124 /* return maximum number of cpus possible on N expander boards */
125 return (n * STARCAT_BDSET_CPU_MAX - STARCAT_SLOT1_CPU_MAX);
126 }
127
128 int
set_platform_tsb_spares()129 set_platform_tsb_spares()
130 {
131 return (MIN(starcat_tsb_spares, MAX_UPA));
132 }
133
134 #pragma weak mmu_init_large_pages
135
136 void
set_platform_defaults(void)137 set_platform_defaults(void)
138 {
139 extern char *tod_module_name;
140 extern int ts_dispatch_extended;
141 extern void cpu_sgn_update(ushort_t, uchar_t, uchar_t, int);
142 extern int tsb_lgrp_affinity;
143 extern int segkmem_reloc;
144 extern void mmu_init_large_pages(size_t);
145 extern int ncpunode; /* number of CPUs detected by OBP */
146
147 #ifdef DEBUG
148 ce_verbose_memory = 2;
149 ce_verbose_other = 2;
150 #endif
151
152 /* Set the CPU signature function pointer */
153 cpu_sgn_func = cpu_sgn_update;
154
155 /* Set appropriate tod module for starcat */
156 ASSERT(tod_module_name == NULL);
157 tod_module_name = "todstarcat";
158
159 /*
160 * Use the alternate TS dispatch table, which is better
161 * tuned for large servers.
162 */
163 if (ts_dispatch_extended == -1)
164 ts_dispatch_extended = 1;
165
166 /*
167 * Use lgroup-aware TSB allocations on this platform,
168 * since they are a considerable performance win.
169 */
170 tsb_lgrp_affinity = 1;
171
172 if ((mmu_page_sizes == max_mmu_page_sizes) &&
173 (mmu_ism_pagesize != DEFAULT_ISM_PAGESIZE)) {
174 if (&mmu_init_large_pages)
175 mmu_init_large_pages(mmu_ism_pagesize);
176 }
177
178 /*
179 * KPR (kernel page relocation) is supported on this platform.
180 */
181 if (hat_kpr_enabled && kernel_cage_enable && ncpunode >= 32) {
182 segkmem_reloc = 1;
183 cmn_err(CE_NOTE, "!Kernel Page Relocation is ENABLED");
184 } else {
185 cmn_err(CE_NOTE, "!Kernel Page Relocation is DISABLED");
186 }
187 }
188
189 #ifdef DEBUG
190 pgcnt_t starcat_cage_size_limit;
191 #endif
192
193 void
set_platform_cage_params(void)194 set_platform_cage_params(void)
195 {
196 extern pgcnt_t total_pages;
197 extern struct memlist *phys_avail;
198
199 if (kernel_cage_enable) {
200 pgcnt_t preferred_cage_size;
201
202 preferred_cage_size =
203 MAX(starcat_startup_cage_size, total_pages / 256);
204
205 #ifdef DEBUG
206 if (starcat_cage_size_limit)
207 preferred_cage_size = starcat_cage_size_limit;
208 #endif
209 /*
210 * Note: we are assuming that post has load the
211 * whole show in to the high end of memory. Having
212 * taken this leap, we copy the whole of phys_avail
213 * the glist and arrange for the cage to grow
214 * downward (descending pfns).
215 */
216 kcage_range_init(phys_avail, KCAGE_DOWN, preferred_cage_size);
217 }
218
219 if (kcage_on)
220 cmn_err(CE_NOTE, "!DR Kernel Cage is ENABLED");
221 else
222 cmn_err(CE_NOTE, "!DR Kernel Cage is DISABLED");
223 }
224
225 void
load_platform_modules(void)226 load_platform_modules(void)
227 {
228 if (modload("misc", "pcihp") < 0) {
229 cmn_err(CE_NOTE, "pcihp driver failed to load");
230 }
231 }
232
233 /*
234 * Starcat does not support power control of CPUs from the OS.
235 */
236 /*ARGSUSED*/
237 int
plat_cpu_poweron(struct cpu * cp)238 plat_cpu_poweron(struct cpu *cp)
239 {
240 int (*starcat_cpu_poweron)(struct cpu *) = NULL;
241
242 starcat_cpu_poweron =
243 (int (*)(struct cpu *))kobj_getsymvalue("drmach_cpu_poweron", 0);
244
245 if (starcat_cpu_poweron == NULL)
246 return (ENOTSUP);
247 else
248 return ((starcat_cpu_poweron)(cp));
249 }
250
251 /*ARGSUSED*/
252 int
plat_cpu_poweroff(struct cpu * cp)253 plat_cpu_poweroff(struct cpu *cp)
254 {
255 int (*starcat_cpu_poweroff)(struct cpu *) = NULL;
256
257 starcat_cpu_poweroff =
258 (int (*)(struct cpu *))kobj_getsymvalue("drmach_cpu_poweroff", 0);
259
260 if (starcat_cpu_poweroff == NULL)
261 return (ENOTSUP);
262 else
263 return ((starcat_cpu_poweroff)(cp));
264 }
265
266 /*
267 * The following are currently private to Starcat DR
268 */
269 int
plat_max_boards()270 plat_max_boards()
271 {
272 return (starcat_boards);
273 }
274
275 int
plat_max_cpu_units_per_board()276 plat_max_cpu_units_per_board()
277 {
278 return (starcat_cpu_per_board);
279 }
280
281 int
plat_max_mc_units_per_board()282 plat_max_mc_units_per_board()
283 {
284 return (starcat_mem_per_board); /* each CPU has a memory controller */
285 }
286
287 int
plat_max_mem_units_per_board()288 plat_max_mem_units_per_board()
289 {
290 return (starcat_mem_per_board);
291 }
292
293 int
plat_max_io_units_per_board()294 plat_max_io_units_per_board()
295 {
296 return (starcat_io_per_board);
297 }
298
299 int
plat_max_cpumem_boards(void)300 plat_max_cpumem_boards(void)
301 {
302 return (STARCAT_BDSET_MAX);
303 }
304
305 int
plat_pfn_to_mem_node(pfn_t pfn)306 plat_pfn_to_mem_node(pfn_t pfn)
307 {
308 return (pfn >> mem_node_pfn_shift);
309 }
310
311 #define STARCAT_MC_MEMBOARD_SHIFT 37 /* Boards on 128BG boundary */
312
313 /* ARGSUSED */
314 void
plat_build_mem_nodes(prom_memlist_t * list,size_t nelems)315 plat_build_mem_nodes(prom_memlist_t *list, size_t nelems)
316 {
317 size_t elem;
318 pfn_t basepfn;
319 pgcnt_t npgs;
320
321 /*
322 * Starcat mem slices are always aligned on a 128GB boundary,
323 * fixed, and limited to one slice per expander due to design
324 * of the centerplane ASICs.
325 */
326 mem_node_pfn_shift = STARCAT_MC_MEMBOARD_SHIFT - MMU_PAGESHIFT;
327 mem_node_physalign = 0;
328
329 /*
330 * Boot install lists are arranged <addr, len>, <addr, len>, ...
331 */
332 for (elem = 0; elem < nelems; list++, elem++) {
333 basepfn = btop(list->addr);
334 npgs = btop(list->size);
335 mem_node_add_slice(basepfn, basepfn + npgs - 1);
336 }
337 }
338
339 /*
340 * Find the CPU associated with a slice at boot-time.
341 */
342 void
plat_fill_mc(pnode_t nodeid)343 plat_fill_mc(pnode_t nodeid)
344 {
345 int len;
346 uint64_t mc_addr, mask;
347 uint64_t mc_decode[MAX_BANKS_PER_MC];
348 uint32_t regs[4];
349 int local_mc;
350 int portid;
351 int expnum;
352 int i;
353
354 /*
355 * Memory address decoding registers
356 * (see Chap 9 of SPARCV9 JSP-1 US-III implementation)
357 */
358 const uint64_t mc_decode_addr[MAX_BANKS_PER_MC] = {
359 0x400028, 0x400010, 0x400018, 0x400020
360 };
361
362 /*
363 * Starcat memory controller portid == global CPU id
364 */
365 if ((prom_getprop(nodeid, "portid", (caddr_t)&portid) < 0) ||
366 (portid == -1))
367 return;
368
369 expnum = STARCAT_CPUID_TO_EXPANDER(portid);
370
371 /*
372 * The "reg" property returns 4 32-bit values. The first two are
373 * combined to form a 64-bit address. The second two are for a
374 * 64-bit size, but we don't actually need to look at that value.
375 */
376 len = prom_getproplen(nodeid, "reg");
377 if (len != (sizeof (uint32_t) * 4)) {
378 prom_printf("Warning: malformed 'reg' property\n");
379 return;
380 }
381 if (prom_getprop(nodeid, "reg", (caddr_t)regs) < 0)
382 return;
383 mc_addr = ((uint64_t)regs[0]) << 32;
384 mc_addr |= (uint64_t)regs[1];
385
386 /*
387 * Figure out whether the memory controller we are examining
388 * belongs to this CPU/CMP or a different one.
389 */
390 if (portid == cpunodes[CPU->cpu_id].portid)
391 local_mc = 1;
392 else
393 local_mc = 0;
394
395 for (i = 0; i < MAX_BANKS_PER_MC; i++) {
396
397 mask = mc_decode_addr[i];
398
399 /*
400 * If the memory controller is local to this CPU, we use
401 * the special ASI to read the decode registers.
402 * Otherwise, we load the values from a magic address in
403 * I/O space.
404 */
405 if (local_mc)
406 mc_decode[i] = lddmcdecode(mask & MC_OFFSET_MASK);
407 else
408 mc_decode[i] = lddphysio((mc_addr | mask));
409
410 if (mc_decode[i] >> MC_VALID_SHIFT) {
411 uint64_t base = MC_BASE(mc_decode[i]) << PHYS2UM_SHIFT;
412 int sliceid = (base >> STARCAT_MC_MEMBOARD_SHIFT);
413
414 if (sliceid < max_mem_nodes) {
415 /*
416 * Establish start-of-day mappings of
417 * lgroup platform handles to memnodes.
418 * Handle == Expander Number
419 * Memnode == Fixed 128GB Slice
420 */
421 plat_assign_lgrphand_to_mem_node(expnum,
422 sliceid);
423 }
424 }
425 }
426 }
427
428 /*
429 * Starcat support for lgroups.
430 *
431 * On Starcat, an lgroup platform handle == expander number.
432 * For split-slot configurations (e.g. slot 0 and slot 1 boards
433 * in different domains) an MCPU board has only remote memory.
434 *
435 * The centerplane logic provides fixed 128GB memory slices
436 * each of which map to a memnode. The initial mapping of
437 * memnodes to lgroup handles is determined at boot time.
438 * A DR addition of memory adds a new mapping. A DR copy-rename
439 * swaps mappings.
440 */
441
442 /*
443 * Convert board number to expander number.
444 */
445 #define BOARDNUM_2_EXPANDER(b) (b >> 1)
446
447 /*
448 * Return the number of boards configured with NULL LPA.
449 */
450 static int
check_for_null_lpa(void)451 check_for_null_lpa(void)
452 {
453 gdcd_t *gdcd;
454 uint_t exp, nlpa;
455
456 /*
457 * Read GDCD from IOSRAM.
458 * If this fails indicate a NULL LPA condition.
459 */
460 if ((gdcd = kmem_zalloc(sizeof (gdcd_t), KM_NOSLEEP)) == NULL)
461 return (EXP_COUNT+1);
462
463 if ((*iosram_rdp)(GDCD_MAGIC, 0, sizeof (gdcd_t), (caddr_t)gdcd) ||
464 (gdcd->h.dcd_magic != GDCD_MAGIC) ||
465 (gdcd->h.dcd_version != DCD_VERSION)) {
466 kmem_free(gdcd, sizeof (gdcd_t));
467 cmn_err(CE_WARN, "check_for_null_lpa: failed to access GDCD\n");
468 return (EXP_COUNT+2);
469 }
470
471 /*
472 * Check for NULL LPAs on all slot 0 boards in domain
473 * (i.e. in all expanders marked good for this domain).
474 */
475 nlpa = 0;
476 for (exp = 0; exp < EXP_COUNT; exp++) {
477 if (RSV_GOOD(gdcd->dcd_slot[exp][0].l1ss_rsv) &&
478 (gdcd->dcd_slot[exp][0].l1ss_flags &
479 L1SSFLG_THIS_L1_NULL_PROC_LPA))
480 nlpa++;
481 }
482
483 kmem_free(gdcd, sizeof (gdcd_t));
484 return (nlpa);
485 }
486
487 /*
488 * Return the platform handle for the lgroup containing the given CPU
489 *
490 * For Starcat, lgroup platform handle == expander.
491 */
492
493 extern int mpo_disabled;
494 extern lgrp_handle_t lgrp_default_handle;
495 int null_lpa_boards = -1;
496
497 lgrp_handle_t
plat_lgrp_cpu_to_hand(processorid_t id)498 plat_lgrp_cpu_to_hand(processorid_t id)
499 {
500 lgrp_handle_t plathand;
501
502 plathand = STARCAT_CPUID_TO_EXPANDER(id);
503
504 /*
505 * Return the real platform handle for the CPU until
506 * such time as we know that MPO should be disabled.
507 * At that point, we set the "mpo_disabled" flag to true,
508 * and from that point on, return the default handle.
509 *
510 * By the time we know that MPO should be disabled, the
511 * first CPU will have already been added to a leaf
512 * lgroup, but that's ok. The common lgroup code will
513 * double check that the boot CPU is in the correct place,
514 * and in the case where mpo should be disabled, will move
515 * it to the root if necessary.
516 */
517 if (mpo_disabled) {
518 /* If MPO is disabled, return the default (UMA) handle */
519 plathand = lgrp_default_handle;
520 } else {
521 if (null_lpa_boards > 0) {
522 /* Determine if MPO should be disabled */
523 mpo_disabled = 1;
524 plathand = lgrp_default_handle;
525 }
526 }
527 return (plathand);
528 }
529
530 /*
531 * Platform specific lgroup initialization
532 */
533 void
plat_lgrp_init(void)534 plat_lgrp_init(void)
535 {
536 extern uint32_t lgrp_expand_proc_thresh;
537 extern uint32_t lgrp_expand_proc_diff;
538
539 /*
540 * Set tuneables for Starcat architecture
541 *
542 * lgrp_expand_proc_thresh is the minimum load on the lgroups
543 * this process is currently running on before considering
544 * expanding threads to another lgroup.
545 *
546 * lgrp_expand_proc_diff determines how much less the remote lgroup
547 * must be loaded before expanding to it.
548 *
549 * Since remote latencies can be costly, attempt to keep 3 threads
550 * within the same lgroup before expanding to the next lgroup.
551 */
552 lgrp_expand_proc_thresh = LGRP_LOADAVG_THREAD_MAX * 3;
553 lgrp_expand_proc_diff = LGRP_LOADAVG_THREAD_MAX;
554 }
555
556 /*
557 * Platform notification of lgroup (re)configuration changes
558 */
559 /*ARGSUSED*/
560 void
plat_lgrp_config(lgrp_config_flag_t evt,uintptr_t arg)561 plat_lgrp_config(lgrp_config_flag_t evt, uintptr_t arg)
562 {
563 update_membounds_t *umb;
564 lgrp_config_mem_rename_t lmr;
565 int sbd, tbd;
566 lgrp_handle_t hand, shand, thand;
567 int mnode, snode, tnode;
568
569 if (mpo_disabled)
570 return;
571
572 switch (evt) {
573
574 case LGRP_CONFIG_MEM_ADD:
575 /*
576 * Establish the lgroup handle to memnode translation.
577 */
578 umb = (update_membounds_t *)arg;
579
580 hand = BOARDNUM_2_EXPANDER(umb->u_board);
581 mnode = plat_pfn_to_mem_node(umb->u_base >> MMU_PAGESHIFT);
582 plat_assign_lgrphand_to_mem_node(hand, mnode);
583
584 break;
585
586 case LGRP_CONFIG_MEM_DEL:
587 /* We don't have to do anything */
588
589 break;
590
591 case LGRP_CONFIG_MEM_RENAME:
592 /*
593 * During a DR copy-rename operation, all of the memory
594 * on one board is moved to another board -- but the
595 * addresses/pfns and memnodes don't change. This means
596 * the memory has changed locations without changing identity.
597 *
598 * Source is where we are copying from and target is where we
599 * are copying to. After source memnode is copied to target
600 * memnode, the physical addresses of the target memnode are
601 * renamed to match what the source memnode had. Then target
602 * memnode can be removed and source memnode can take its
603 * place.
604 *
605 * To do this, swap the lgroup handle to memnode mappings for
606 * the boards, so target lgroup will have source memnode and
607 * source lgroup will have empty target memnode which is where
608 * its memory will go (if any is added to it later).
609 *
610 * Then source memnode needs to be removed from its lgroup
611 * and added to the target lgroup where the memory was living
612 * but under a different name/memnode. The memory was in the
613 * target memnode and now lives in the source memnode with
614 * different physical addresses even though it is the same
615 * memory.
616 */
617 sbd = arg & 0xffff;
618 tbd = (arg & 0xffff0000) >> 16;
619 shand = BOARDNUM_2_EXPANDER(sbd);
620 thand = BOARDNUM_2_EXPANDER(tbd);
621 snode = plat_lgrphand_to_mem_node(shand);
622 tnode = plat_lgrphand_to_mem_node(thand);
623
624 plat_assign_lgrphand_to_mem_node(thand, snode);
625 plat_assign_lgrphand_to_mem_node(shand, tnode);
626
627 lmr.lmem_rename_from = shand;
628 lmr.lmem_rename_to = thand;
629
630 /*
631 * Remove source memnode of copy rename from its lgroup
632 * and add it to its new target lgroup
633 */
634 lgrp_config(LGRP_CONFIG_MEM_RENAME, (uintptr_t)snode,
635 (uintptr_t)&lmr);
636
637 break;
638
639 default:
640 break;
641 }
642 }
643
644 /*
645 * Return latency between "from" and "to" lgroups
646 *
647 * This latency number can only be used for relative comparison
648 * between lgroups on the running system, cannot be used across platforms,
649 * and may not reflect the actual latency. It is platform and implementation
650 * specific, so platform gets to decide its value. It would be nice if the
651 * number was at least proportional to make comparisons more meaningful though.
652 * NOTE: The numbers below are supposed to be load latencies for uncached
653 * memory divided by 10.
654 */
655 int
plat_lgrp_latency(lgrp_handle_t from,lgrp_handle_t to)656 plat_lgrp_latency(lgrp_handle_t from, lgrp_handle_t to)
657 {
658 /*
659 * Return min remote latency when there are more than two lgroups
660 * (root and child) and getting latency between two different lgroups
661 * or root is involved
662 */
663 if (lgrp_optimizations() && (from != to ||
664 from == LGRP_DEFAULT_HANDLE || to == LGRP_DEFAULT_HANDLE))
665 return (48);
666 else
667 return (28);
668 }
669
670 /*
671 * Return platform handle for root lgroup
672 */
673 lgrp_handle_t
plat_lgrp_root_hand(void)674 plat_lgrp_root_hand(void)
675 {
676 if (mpo_disabled)
677 return (lgrp_default_handle);
678
679 return (LGRP_DEFAULT_HANDLE);
680 }
681
682 /* ARGSUSED */
683 void
plat_freelist_process(int mnode)684 plat_freelist_process(int mnode)
685 {
686 }
687
688 void
load_platform_drivers(void)689 load_platform_drivers(void)
690 {
691 uint_t tunnel;
692 pnode_t nodeid;
693 dev_info_t *chosen_devi;
694 char chosen_iosram[MAXNAMELEN];
695
696 /*
697 * Get /chosen node - that's where the tunnel property is
698 */
699 nodeid = prom_chosennode();
700
701 /*
702 * Get the iosram property from the chosen node.
703 */
704 if (prom_getprop(nodeid, IOSRAM_CHOSEN_PROP, (caddr_t)&tunnel) <= 0) {
705 prom_printf("Unable to get iosram property\n");
706 cmn_err(CE_PANIC, "Unable to get iosram property\n");
707 }
708
709 if (prom_phandle_to_path((phandle_t)tunnel, chosen_iosram,
710 sizeof (chosen_iosram)) < 0) {
711 (void) prom_printf("prom_phandle_to_path(0x%x) failed\n",
712 tunnel);
713 cmn_err(CE_PANIC, "prom_phandle_to_path(0x%x) failed\n",
714 tunnel);
715 }
716
717 /*
718 * Attach all driver instances along the iosram's device path
719 */
720 if (i_ddi_attach_hw_nodes("iosram") != DDI_SUCCESS) {
721 cmn_err(CE_WARN, "IOSRAM failed to load\n");
722 }
723
724 if ((chosen_devi = e_ddi_hold_devi_by_path(chosen_iosram, 0)) == NULL) {
725 (void) prom_printf("e_ddi_hold_devi_by_path(%s) failed\n",
726 chosen_iosram);
727 cmn_err(CE_PANIC, "e_ddi_hold_devi_by_path(%s) failed\n",
728 chosen_iosram);
729 }
730 ndi_rele_devi(chosen_devi);
731
732 /*
733 * iosram driver is now loaded so we need to set our read and
734 * write pointers.
735 */
736 iosram_rdp = (int (*)(uint32_t, uint32_t, uint32_t, caddr_t))
737 modgetsymvalue("iosram_rd", 0);
738 iosram_wrp = (int (*)(uint32_t, uint32_t, uint32_t, caddr_t))
739 modgetsymvalue("iosram_wr", 0);
740
741 /*
742 * Need to check for null proc LPA after IOSRAM driver is loaded
743 * and before multiple lgroups created (when start_other_cpus() called)
744 */
745 null_lpa_boards = check_for_null_lpa();
746
747 /* load and attach the axq driver */
748 if (i_ddi_attach_hw_nodes("axq") != DDI_SUCCESS) {
749 cmn_err(CE_WARN, "AXQ failed to load\n");
750 }
751
752 /* load Starcat Solaris Mailbox Client driver */
753 if (modload("misc", "scosmb") < 0) {
754 cmn_err(CE_WARN, "SCOSMB failed to load\n");
755 }
756
757 /* load the DR driver */
758 if (i_ddi_attach_hw_nodes("dr") != DDI_SUCCESS) {
759 cmn_err(CE_WARN, "dr failed to load");
760 }
761
762 /*
763 * Load the mc-us3 memory driver.
764 */
765 if (i_ddi_attach_hw_nodes("mc-us3") != DDI_SUCCESS)
766 cmn_err(CE_WARN, "mc-us3 failed to load");
767 else
768 (void) ddi_hold_driver(ddi_name_to_major("mc-us3"));
769
770 /* Load the schizo pci bus nexus driver. */
771 if (i_ddi_attach_hw_nodes("pcisch") != DDI_SUCCESS)
772 cmn_err(CE_WARN, "pcisch failed to load");
773
774 plat_ecc_init();
775 }
776
777
778 /*
779 * No platform drivers on this platform
780 */
781 char *platform_module_list[] = {
782 (char *)0
783 };
784
785
786 /*ARGSUSED*/
787 void
plat_tod_fault(enum tod_fault_type tod_bad)788 plat_tod_fault(enum tod_fault_type tod_bad)
789 {
790 }
791
792 /*
793 * Update the signature(s) in the IOSRAM's domain data section.
794 */
795 void
cpu_sgn_update(ushort_t sgn,uchar_t state,uchar_t sub_state,int cpuid)796 cpu_sgn_update(ushort_t sgn, uchar_t state, uchar_t sub_state, int cpuid)
797 {
798 sig_state_t new_sgn;
799 sig_state_t current_sgn;
800
801 /*
802 * If the substate is REBOOT, then check for panic flow
803 */
804 if (sub_state == SIGSUBST_REBOOT) {
805 (*iosram_rdp)(DOMD_MAGIC, DOMD_DSTATE_OFFSET,
806 sizeof (sig_state_t), (caddr_t)¤t_sgn);
807 if (current_sgn.state_t.state == SIGST_EXIT)
808 sub_state = SIGSUBST_PANIC_REBOOT;
809 }
810
811 /*
812 * cpuid == -1 indicates that the operation applies to all cpus.
813 */
814 if (cpuid < 0) {
815 sgn_update_all_cpus(sgn, state, sub_state);
816 return;
817 }
818
819 new_sgn.signature = CPU_SIG_BLD(sgn, state, sub_state);
820 (*iosram_wrp)(DOMD_MAGIC,
821 DOMD_CPUSIGS_OFFSET + cpuid * sizeof (sig_state_t),
822 sizeof (sig_state_t), (caddr_t)&new_sgn);
823
824 /*
825 * Under certain conditions we don't update the signature
826 * of the domain_state.
827 */
828 if ((sgn == OS_SIG) &&
829 ((state == SIGST_OFFLINE) || (state == SIGST_DETACHED)))
830 return;
831 (*iosram_wrp)(DOMD_MAGIC, DOMD_DSTATE_OFFSET, sizeof (sig_state_t),
832 (caddr_t)&new_sgn);
833 }
834
835 /*
836 * Update the signature(s) in the IOSRAM's domain data section for all CPUs.
837 */
838 void
sgn_update_all_cpus(ushort_t sgn,uchar_t state,uchar_t sub_state)839 sgn_update_all_cpus(ushort_t sgn, uchar_t state, uchar_t sub_state)
840 {
841 sig_state_t new_sgn;
842 int i = 0;
843
844 new_sgn.signature = CPU_SIG_BLD(sgn, state, sub_state);
845
846 /*
847 * First update the domain_state signature
848 */
849 (*iosram_wrp)(DOMD_MAGIC, DOMD_DSTATE_OFFSET, sizeof (sig_state_t),
850 (caddr_t)&new_sgn);
851
852 for (i = 0; i < NCPU; i++) {
853 if (cpu[i] != NULL && (cpu[i]->cpu_flags &
854 (CPU_EXISTS|CPU_QUIESCED))) {
855 (*iosram_wrp)(DOMD_MAGIC,
856 DOMD_CPUSIGS_OFFSET + i * sizeof (sig_state_t),
857 sizeof (sig_state_t), (caddr_t)&new_sgn);
858 }
859 }
860 }
861
862 ushort_t
get_cpu_sgn(int cpuid)863 get_cpu_sgn(int cpuid)
864 {
865 sig_state_t cpu_sgn;
866
867 (*iosram_rdp)(DOMD_MAGIC,
868 DOMD_CPUSIGS_OFFSET + cpuid * sizeof (sig_state_t),
869 sizeof (sig_state_t), (caddr_t)&cpu_sgn);
870
871 return (cpu_sgn.state_t.sig);
872 }
873
874 uchar_t
get_cpu_sgn_state(int cpuid)875 get_cpu_sgn_state(int cpuid)
876 {
877 sig_state_t cpu_sgn;
878
879 (*iosram_rdp)(DOMD_MAGIC,
880 DOMD_CPUSIGS_OFFSET + cpuid * sizeof (sig_state_t),
881 sizeof (sig_state_t), (caddr_t)&cpu_sgn);
882
883 return (cpu_sgn.state_t.state);
884 }
885
886
887 /*
888 * Type of argument passed into plat_get_ecache_cpu via ddi_walk_devs
889 * for matching on specific CPU node in device tree
890 */
891
892 typedef struct {
893 char *jnum; /* output, kmem_alloc'd if successful */
894 int cpuid; /* input, to match cpuid/portid/upa-portid */
895 uint_t dimm; /* input, index into ecache-dimm-label */
896 } plat_ecache_cpu_arg_t;
897
898
899 /*
900 * plat_get_ecache_cpu is called repeatedly by ddi_walk_devs with pointers
901 * to device tree nodes (dip) and to a plat_ecache_cpu_arg_t structure (arg).
902 * Returning DDI_WALK_CONTINUE tells ddi_walk_devs to keep going, returning
903 * DDI_WALK_TERMINATE ends the walk. When the node for the specific CPU
904 * being searched for is found, the walk is done. But before returning to
905 * ddi_walk_devs and plat_get_ecacheunum, we grab this CPU's ecache-dimm-label
906 * property and set the jnum member of the plat_ecache_cpu_arg_t structure to
907 * point to the label corresponding to this specific ecache DIMM. It is up
908 * to plat_get_ecacheunum to kmem_free this string.
909 */
910
911 static int
plat_get_ecache_cpu(dev_info_t * dip,void * arg)912 plat_get_ecache_cpu(dev_info_t *dip, void *arg)
913 {
914 char *devtype;
915 plat_ecache_cpu_arg_t *cpuarg;
916 char **dimm_labels;
917 uint_t numlabels;
918 int portid;
919
920 /*
921 * Check device_type, must be "cpu"
922 */
923
924 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
925 "device_type", &devtype) != DDI_PROP_SUCCESS)
926 return (DDI_WALK_CONTINUE);
927
928 if (strcmp(devtype, "cpu")) {
929 ddi_prop_free((void *)devtype);
930 return (DDI_WALK_CONTINUE);
931 }
932
933 ddi_prop_free((void *)devtype);
934
935 /*
936 * Check cpuid, portid, upa-portid (in that order), must
937 * match the cpuid being sought
938 */
939
940 portid = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
941 DDI_PROP_DONTPASS, "cpuid", -1);
942
943 if (portid == -1)
944 portid = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
945 DDI_PROP_DONTPASS, "portid", -1);
946
947 if (portid == -1)
948 portid = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
949 DDI_PROP_DONTPASS, "upa-portid", -1);
950
951 cpuarg = (plat_ecache_cpu_arg_t *)arg;
952
953 if (portid != cpuarg->cpuid)
954 return (DDI_WALK_CONTINUE);
955
956 /*
957 * Found the right CPU, fetch ecache-dimm-label property
958 */
959
960 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
961 "ecache-dimm-label", &dimm_labels, &numlabels)
962 != DDI_PROP_SUCCESS) {
963 #ifdef DEBUG
964 cmn_err(CE_NOTE, "cpuid=%d missing ecache-dimm-label property",
965 portid);
966 #endif /* DEBUG */
967 return (DDI_WALK_TERMINATE);
968 }
969
970 if (cpuarg->dimm < numlabels) {
971 cpuarg->jnum = kmem_alloc(strlen(dimm_labels[cpuarg->dimm]) + 1,
972 KM_SLEEP);
973 if (cpuarg->jnum != (char *)NULL)
974 (void) strcpy(cpuarg->jnum, dimm_labels[cpuarg->dimm]);
975 #ifdef DEBUG
976 else
977 cmn_err(CE_WARN,
978 "cannot kmem_alloc for ecache dimm label");
979 #endif /* DEBUG */
980 }
981
982 ddi_prop_free((void *)dimm_labels);
983 return (DDI_WALK_TERMINATE);
984 }
985
986
987 /*
988 * Bit 4 of physical address indicates ecache 0 or 1
989 */
990
991 #define ECACHE_DIMM_MASK 0x10
992
993 /*
994 * plat_get_ecacheunum is called to generate the unum for an ecache error.
995 * After some initialization, nearly all of the work is done by ddi_walk_devs
996 * and plat_get_ecache_cpu.
997 */
998
999 int
plat_get_ecacheunum(int cpuid,unsigned long long physaddr,char * buf,int buflen,int * ustrlen)1000 plat_get_ecacheunum(int cpuid, unsigned long long physaddr, char *buf,
1001 int buflen, int *ustrlen)
1002 {
1003 plat_ecache_cpu_arg_t findcpu;
1004 uint_t expander, slot, proc;
1005
1006 findcpu.jnum = (char *)NULL;
1007 findcpu.cpuid = cpuid;
1008
1009 /*
1010 * Bit 4 of physaddr equal 0 maps to E0 and 1 maps to E1
1011 * except for Panther and Jaguar where it indicates the reverse
1012 */
1013 if (IS_PANTHER(cpunodes[CPU->cpu_id].implementation) ||
1014 IS_JAGUAR(cpunodes[CPU->cpu_id].implementation))
1015 findcpu.dimm = (physaddr & ECACHE_DIMM_MASK) ? 0 : 1;
1016 else
1017 findcpu.dimm = (physaddr & ECACHE_DIMM_MASK) ? 1 : 0;
1018
1019 /*
1020 * Walk the device tree, find this specific CPU, and get the label
1021 * for this ecache, returned here in findcpu.jnum
1022 */
1023
1024 ddi_walk_devs(ddi_root_node(), plat_get_ecache_cpu, (void *)&findcpu);
1025
1026 if (findcpu.jnum == (char *)NULL)
1027 return (-1);
1028
1029 expander = STARCAT_CPUID_TO_EXPANDER(cpuid);
1030 slot = STARCAT_CPUID_TO_BOARDSLOT(cpuid);
1031
1032 /*
1033 * STARCAT_CPUID_TO_PORTID clears the CoreID bit so that
1034 * STARCAT_CPUID_TO_AGENT will return a physical proc (0 - 3).
1035 */
1036 proc = STARCAT_CPUID_TO_AGENT(STARCAT_CPUID_TO_PORTID(cpuid));
1037
1038 /*
1039 * NOTE: Any modifications to the snprintf() call below will require
1040 * changing plat_log_fruid_error() as well!
1041 */
1042 (void) snprintf(buf, buflen, "%s%u/P%u/E%u J%s", (slot ? "IO" : "SB"),
1043 expander, proc, findcpu.dimm, findcpu.jnum);
1044
1045 *ustrlen = strlen(buf);
1046
1047 kmem_free(findcpu.jnum, strlen(findcpu.jnum) + 1);
1048
1049 return (0);
1050 }
1051
1052 /*ARGSUSED*/
1053 int
plat_get_mem_unum(int synd_code,uint64_t flt_addr,int flt_bus_id,int flt_in_memory,ushort_t flt_status,char * buf,int buflen,int * lenp)1054 plat_get_mem_unum(int synd_code, uint64_t flt_addr, int flt_bus_id,
1055 int flt_in_memory, ushort_t flt_status, char *buf, int buflen, int *lenp)
1056 {
1057 int ret;
1058
1059 /*
1060 * check if it's a Memory or an Ecache error.
1061 */
1062 if (flt_in_memory) {
1063 if (p2get_mem_unum != NULL) {
1064 return (p2get_mem_unum(synd_code, P2ALIGN(flt_addr, 8),
1065 buf, buflen, lenp));
1066 } else {
1067 return (ENOTSUP);
1068 }
1069 } else if (flt_status & ECC_ECACHE) {
1070 if ((ret = plat_get_ecacheunum(flt_bus_id,
1071 P2ALIGN(flt_addr, 8), buf, buflen, lenp)) != 0)
1072 return (EIO);
1073 } else {
1074 return (ENOTSUP);
1075 }
1076
1077 return (ret);
1078 }
1079
1080 static int (*ecc_mailbox_msg_func)(plat_ecc_message_type_t, void *) = NULL;
1081
1082 /*
1083 * To keep OS mailbox handling localized, all we do is forward the call to the
1084 * scosmb module (if it is available).
1085 */
1086 int
plat_send_ecc_mailbox_msg(plat_ecc_message_type_t msg_type,void * datap)1087 plat_send_ecc_mailbox_msg(plat_ecc_message_type_t msg_type, void *datap)
1088 {
1089 /*
1090 * find the symbol for the mailbox sender routine in the scosmb module
1091 */
1092 if (ecc_mailbox_msg_func == NULL)
1093 ecc_mailbox_msg_func = (int (*)(plat_ecc_message_type_t,
1094 void *))modgetsymvalue("scosmb_log_ecc_error", 0);
1095
1096 /*
1097 * If the symbol was found, call it. Otherwise, there is not much
1098 * else we can do and console messages will have to suffice.
1099 */
1100 if (ecc_mailbox_msg_func)
1101 return ((*ecc_mailbox_msg_func)(msg_type, datap));
1102 else
1103 return (ENODEV);
1104 }
1105
1106 int
plat_make_fru_cpuid(int sb,int m,int proc)1107 plat_make_fru_cpuid(int sb, int m, int proc)
1108 {
1109 return (MAKE_CPUID(sb, m, proc));
1110 }
1111
1112 /*
1113 * board number for a given proc
1114 */
1115 int
plat_make_fru_boardnum(int proc)1116 plat_make_fru_boardnum(int proc)
1117 {
1118 return (STARCAT_CPUID_TO_EXPANDER(proc));
1119 }
1120
1121 /*
1122 * This platform hook gets called from mc_add_mem_unum_label() in the mc-us3
1123 * driver giving each platform the opportunity to add platform
1124 * specific label information to the unum for ECC error logging purposes.
1125 */
1126 void
plat_add_mem_unum_label(char * unum,int mcid,int bank,int dimm)1127 plat_add_mem_unum_label(char *unum, int mcid, int bank, int dimm)
1128 {
1129 char new_unum[UNUM_NAMLEN];
1130 uint_t expander = STARCAT_CPUID_TO_EXPANDER(mcid);
1131 uint_t slot = STARCAT_CPUID_TO_BOARDSLOT(mcid);
1132
1133 /*
1134 * STARCAT_CPUID_TO_PORTID clears the CoreID bit so that
1135 * STARCAT_CPUID_TO_AGENT will return a physical proc (0 - 3).
1136 */
1137 uint_t proc = STARCAT_CPUID_TO_AGENT(STARCAT_CPUID_TO_PORTID(mcid));
1138
1139 /*
1140 * NOTE: Any modifications to the two sprintf() calls below will
1141 * require changing plat_log_fruid_error() as well!
1142 */
1143 if (dimm == -1)
1144 (void) snprintf(new_unum, UNUM_NAMLEN, "%s%u/P%u/B%d %s",
1145 (slot ? "IO" : "SB"), expander, proc, (bank & 0x1), unum);
1146 else
1147 (void) snprintf(new_unum, UNUM_NAMLEN, "%s%u/P%u/B%d/D%d %s",
1148 (slot ? "IO" : "SB"), expander,
1149 proc, (bank & 0x1), (dimm & 0x3), unum);
1150
1151 (void) strcpy(unum, new_unum);
1152 }
1153
1154 int
plat_get_cpu_unum(int cpuid,char * buf,int buflen,int * lenp)1155 plat_get_cpu_unum(int cpuid, char *buf, int buflen, int *lenp)
1156 {
1157 int expander = STARCAT_CPUID_TO_EXPANDER(cpuid);
1158 int slot = STARCAT_CPUID_TO_BOARDSLOT(cpuid);
1159
1160 if (snprintf(buf, buflen, "%s%d", (slot ? "IO" : "SB"), expander)
1161 >= buflen) {
1162 return (ENOSPC);
1163 } else {
1164 *lenp = strlen(buf);
1165 return (0);
1166 }
1167 }
1168
1169 /*
1170 * This routine is used by the data bearing mondo (DMV) initialization
1171 * routine to determine the number of hardware and software DMV interrupts
1172 * that a platform supports.
1173 */
1174 void
plat_dmv_params(uint_t * hwint,uint_t * swint)1175 plat_dmv_params(uint_t *hwint, uint_t *swint)
1176 {
1177 *hwint = STARCAT_DMV_HWINT;
1178 *swint = 0;
1179 }
1180
1181 /*
1182 * If provided, this function will be called whenever the nodename is updated.
1183 * To keep OS mailbox handling localized, all we do is forward the call to the
1184 * scosmb module (if it is available).
1185 */
1186 void
plat_nodename_set(void)1187 plat_nodename_set(void)
1188 {
1189 void (*nodename_update_func)(uint64_t) = NULL;
1190
1191 /*
1192 * find the symbol for the nodename update routine in the scosmb module
1193 */
1194 nodename_update_func = (void (*)(uint64_t))
1195 modgetsymvalue("scosmb_update_nodename", 0);
1196
1197 /*
1198 * If the symbol was found, call it. Otherwise, log a note (but not to
1199 * the console).
1200 */
1201 if (nodename_update_func != NULL) {
1202 nodename_update_func(0);
1203 } else {
1204 cmn_err(CE_NOTE,
1205 "!plat_nodename_set: scosmb_update_nodename not found\n");
1206 }
1207 }
1208
1209 caddr_t efcode_vaddr = NULL;
1210 caddr_t efcode_paddr = NULL;
1211 /*
1212 * Preallocate enough memory for fcode claims.
1213 */
1214
1215 caddr_t
efcode_alloc(caddr_t alloc_base)1216 efcode_alloc(caddr_t alloc_base)
1217 {
1218 caddr_t efcode_alloc_base = (caddr_t)roundup((uintptr_t)alloc_base,
1219 MMU_PAGESIZE);
1220 caddr_t vaddr;
1221
1222 /*
1223 * allocate the physical memory schizo fcode.
1224 */
1225 if ((vaddr = (caddr_t)BOP_ALLOC(bootops, efcode_alloc_base,
1226 efcode_size, MMU_PAGESIZE)) == NULL)
1227 cmn_err(CE_PANIC, "Cannot allocate Efcode Memory");
1228
1229 efcode_vaddr = vaddr;
1230
1231 return (efcode_alloc_base + efcode_size);
1232 }
1233
1234 caddr_t
plat_startup_memlist(caddr_t alloc_base)1235 plat_startup_memlist(caddr_t alloc_base)
1236 {
1237 caddr_t tmp_alloc_base;
1238
1239 tmp_alloc_base = efcode_alloc(alloc_base);
1240 tmp_alloc_base = (caddr_t)roundup((uintptr_t)tmp_alloc_base,
1241 ecache_alignsize);
1242 return (tmp_alloc_base);
1243 }
1244
1245 /*
1246 * This is a helper function to determine if a given
1247 * node should be considered for a dr operation according
1248 * to predefined dr names. This is accomplished using
1249 * a function defined in drmach module. The drmach module
1250 * owns the definition of dr allowable names.
1251 * Formal Parameter: The name of a device node.
1252 * Expected Return Value: -1, device node name does not map to a valid dr name.
1253 * A value greater or equal to 0, name is valid.
1254 */
1255 int
starcat_dr_name(char * name)1256 starcat_dr_name(char *name)
1257 {
1258 int (*drmach_name2type)(char *) = NULL;
1259
1260 /* Get a pointer to helper function in the dramch module. */
1261 drmach_name2type =
1262 (int (*)(char *))kobj_getsymvalue("drmach_name2type_idx", 0);
1263
1264 if (drmach_name2type == NULL)
1265 return (-1);
1266
1267 return ((*drmach_name2type)(name));
1268 }
1269
1270 void
startup_platform(void)1271 startup_platform(void)
1272 {
1273 /* set per platform constants for mutex backoff */
1274 mutex_backoff_base = 2;
1275 mutex_cap_factor = 64;
1276 }
1277
1278 /*
1279 * KDI functions - used by the in-situ kernel debugger (kmdb) to perform
1280 * platform-specific operations. These functions execute when the world is
1281 * stopped, and as such cannot make any blocking calls, hold locks, etc.
1282 * promif functions are a special case, and may be used.
1283 */
1284
1285 static void
starcat_system_claim(void)1286 starcat_system_claim(void)
1287 {
1288 lbolt_debug_entry();
1289
1290 prom_interpret("sigb-sig! my-sigb-sig!", OBP_SIG, OBP_SIG, 0, 0, 0);
1291 }
1292
1293 static void
starcat_system_release(void)1294 starcat_system_release(void)
1295 {
1296 prom_interpret("sigb-sig! my-sigb-sig!", OS_SIG, OS_SIG, 0, 0, 0);
1297
1298 lbolt_debug_return();
1299 }
1300
1301 void
plat_kdi_init(kdi_t * kdi)1302 plat_kdi_init(kdi_t *kdi)
1303 {
1304 kdi->pkdi_system_claim = starcat_system_claim;
1305 kdi->pkdi_system_release = starcat_system_release;
1306 }
1307
1308 /*
1309 * This function returns 1 if large pages for kernel heap are supported
1310 * and 0 otherwise.
1311 *
1312 * Currently we disable lp kmem support if kpr is going to be enabled
1313 * because in the case of large pages hat_add_callback()/hat_delete_callback()
1314 * cause network performance degradation
1315 */
1316 int
plat_lpkmem_is_supported(void)1317 plat_lpkmem_is_supported(void)
1318 {
1319 extern int segkmem_reloc;
1320
1321 if (hat_kpr_enabled && kernel_cage_enable &&
1322 (ncpunode >= 32 || segkmem_reloc == 1))
1323 return (0);
1324
1325 return (1);
1326 }
1327