xref: /onnv-gate/usr/src/uts/sun4u/seattle/os/seattle.c (revision 920:5061227f5943)
1*920Sjbeloro /*
2*920Sjbeloro  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
3*920Sjbeloro  * Use is subject to license terms.
4*920Sjbeloro  */
5*920Sjbeloro 
6*920Sjbeloro #pragma ident	"%Z%%M%	%I%	%E% SMI"
7*920Sjbeloro 
8*920Sjbeloro #include <sys/param.h>
9*920Sjbeloro #include <sys/systm.h>
10*920Sjbeloro #include <sys/sysmacros.h>
11*920Sjbeloro #include <sys/sunddi.h>
12*920Sjbeloro #include <sys/esunddi.h>
13*920Sjbeloro 
14*920Sjbeloro #include <sys/platform_module.h>
15*920Sjbeloro #include <sys/errno.h>
16*920Sjbeloro #include <sys/cpu_sgnblk_defs.h>
17*920Sjbeloro #include <sys/rmc_comm_dp.h>
18*920Sjbeloro #include <sys/rmc_comm_drvintf.h>
19*920Sjbeloro #include <sys/modctl.h>
20*920Sjbeloro #include <sys/lgrp.h>
21*920Sjbeloro #include <sys/memnode.h>
22*920Sjbeloro #include <sys/promif.h>
23*920Sjbeloro 
24*920Sjbeloro /* Anything related to shared i2c access applies to Seattle only */
25*920Sjbeloro #define	SHARED_MI2CV_PATH "/i2c@1f,530000"
26*920Sjbeloro static dev_info_t *shared_mi2cv_dip;
27*920Sjbeloro static kmutex_t mi2cv_mutex;
28*920Sjbeloro 
29*920Sjbeloro int (*p2get_mem_unum)(int, uint64_t, char *, int, int *);
30*920Sjbeloro static void cpu_sgn_update(ushort_t, uchar_t, uchar_t, int);
31*920Sjbeloro int (*rmc_req_now)(rmc_comm_msg_t *, uint8_t) = NULL;
32*920Sjbeloro 
33*920Sjbeloro void
34*920Sjbeloro startup_platform(void)
35*920Sjbeloro {
36*920Sjbeloro 	mutex_init(&mi2cv_mutex, NULL, NULL, NULL);
37*920Sjbeloro }
38*920Sjbeloro 
39*920Sjbeloro int
40*920Sjbeloro set_platform_tsb_spares()
41*920Sjbeloro {
42*920Sjbeloro 	return (0);
43*920Sjbeloro }
44*920Sjbeloro 
45*920Sjbeloro void
46*920Sjbeloro set_platform_defaults(void)
47*920Sjbeloro {
48*920Sjbeloro 	extern char *tod_module_name;
49*920Sjbeloro 	/* Set appropriate tod module */
50*920Sjbeloro 	if (tod_module_name == NULL)
51*920Sjbeloro 		tod_module_name = "todm5823";
52*920Sjbeloro 
53*920Sjbeloro 	cpu_sgn_func = cpu_sgn_update;
54*920Sjbeloro }
55*920Sjbeloro 
56*920Sjbeloro /*
57*920Sjbeloro  * Definitions for accessing the pci config space of the isa node
58*920Sjbeloro  * of Southbridge.
59*920Sjbeloro  */
60*920Sjbeloro static ddi_acc_handle_t isa_handle = NULL;	/* handle for isa pci space */
61*920Sjbeloro 
62*920Sjbeloro /*
63*920Sjbeloro  * Definition for accessing rmclomv
64*920Sjbeloro  */
65*920Sjbeloro #define	RMCLOMV_PATHNAME	"/pseudo/rmclomv@0"
66*920Sjbeloro 
67*920Sjbeloro void
68*920Sjbeloro load_platform_drivers(void)
69*920Sjbeloro {
70*920Sjbeloro 	dev_info_t	*rmclomv_dip;
71*920Sjbeloro 	/*
72*920Sjbeloro 	 * It is OK to return error because 'us' driver is not available
73*920Sjbeloro 	 * in all clusters (e.g. missing in Core cluster).
74*920Sjbeloro 	 */
75*920Sjbeloro 	(void) i_ddi_attach_hw_nodes("us");
76*920Sjbeloro 
77*920Sjbeloro 
78*920Sjbeloro 	/*
79*920Sjbeloro 	 * mc-us3i must stay loaded for plat_get_mem_unum()
80*920Sjbeloro 	 */
81*920Sjbeloro 	if (i_ddi_attach_hw_nodes("mc-us3i") != DDI_SUCCESS)
82*920Sjbeloro 		cmn_err(CE_WARN, "mc-us3i driver failed to install");
83*920Sjbeloro 	(void) ddi_hold_driver(ddi_name_to_major("mc-us3i"));
84*920Sjbeloro 
85*920Sjbeloro 	/*
86*920Sjbeloro 	 * load the GPIO driver for the ALOM reset and watchdog lines
87*920Sjbeloro 	 */
88*920Sjbeloro 	if (i_ddi_attach_hw_nodes("pmugpio") != DDI_SUCCESS)
89*920Sjbeloro 		cmn_err(CE_WARN, "pmugpio failed to install");
90*920Sjbeloro 	else {
91*920Sjbeloro 		extern int watchdog_enable, watchdog_available;
92*920Sjbeloro 		extern int disable_watchdog_on_exit;
93*920Sjbeloro 
94*920Sjbeloro 		/*
95*920Sjbeloro 		 * Disable an active h/w watchdog timer upon exit to OBP.
96*920Sjbeloro 		 */
97*920Sjbeloro 		disable_watchdog_on_exit = 1;
98*920Sjbeloro 
99*920Sjbeloro 		watchdog_enable = 1;
100*920Sjbeloro 		watchdog_available = 1;
101*920Sjbeloro 	}
102*920Sjbeloro 	(void) ddi_hold_driver(ddi_name_to_major("pmugpio"));
103*920Sjbeloro 
104*920Sjbeloro 	/*
105*920Sjbeloro 	 * Figure out which mi2cv dip is shared with OBP for the nvram
106*920Sjbeloro 	 * device, so the lock can be acquired.
107*920Sjbeloro 	 */
108*920Sjbeloro 	shared_mi2cv_dip = e_ddi_hold_devi_by_path(SHARED_MI2CV_PATH, 0);
109*920Sjbeloro 	/*
110*920Sjbeloro 	 * Load the environmentals driver (rmclomv)
111*920Sjbeloro 	 *
112*920Sjbeloro 	 * We need this driver to handle events from the RMC when state
113*920Sjbeloro 	 * changes occur in the environmental data.
114*920Sjbeloro 	 */
115*920Sjbeloro 	if (i_ddi_attach_hw_nodes("rmc_comm") != DDI_SUCCESS) {
116*920Sjbeloro 		cmn_err(CE_WARN, "rmc_comm failed to install");
117*920Sjbeloro 	} else {
118*920Sjbeloro 		(void) ddi_hold_driver(ddi_name_to_major("rmc_comm"));
119*920Sjbeloro 
120*920Sjbeloro 		rmclomv_dip = e_ddi_hold_devi_by_path(RMCLOMV_PATHNAME, 0);
121*920Sjbeloro 		if (rmclomv_dip == NULL) {
122*920Sjbeloro 			cmn_err(CE_WARN, "Could not install rmclomv driver\n");
123*920Sjbeloro 		}
124*920Sjbeloro 	}
125*920Sjbeloro 
126*920Sjbeloro 	/*
127*920Sjbeloro 	 * create a handle to the rmc_comm_request_nowait() function
128*920Sjbeloro 	 * inside the rmc_comm module.
129*920Sjbeloro 	 *
130*920Sjbeloro 	 * The Seattle/Boston todm5823 driver will use this handle to
131*920Sjbeloro 	 * use the rmc_comm_request_nowait() function to send time/date
132*920Sjbeloro 	 * updates to ALOM.
133*920Sjbeloro 	 */
134*920Sjbeloro 	rmc_req_now = (int (*)(rmc_comm_msg_t *, uint8_t))
135*920Sjbeloro 		modgetsymvalue("rmc_comm_request_nowait", 0);
136*920Sjbeloro }
137*920Sjbeloro 
138*920Sjbeloro /*
139*920Sjbeloro  * This routine is needed if a device error or timeout occurs before the
140*920Sjbeloro  * driver is loaded.
141*920Sjbeloro  */
142*920Sjbeloro /*ARGSUSED*/
143*920Sjbeloro int
144*920Sjbeloro plat_ide_chipreset(dev_info_t *dip, int chno)
145*920Sjbeloro {
146*920Sjbeloro 	int	ret = DDI_SUCCESS;
147*920Sjbeloro 
148*920Sjbeloro 	if (isa_handle == NULL) {
149*920Sjbeloro 		return (DDI_FAILURE);
150*920Sjbeloro 	}
151*920Sjbeloro 
152*920Sjbeloro 	/*
153*920Sjbeloro 	 * This will be filled in with the reset logic
154*920Sjbeloro 	 * for the ULI1573 when that becomes available.
155*920Sjbeloro 	 * currently this is just a stub.
156*920Sjbeloro 	 */
157*920Sjbeloro 	return (ret);
158*920Sjbeloro }
159*920Sjbeloro 
160*920Sjbeloro 
161*920Sjbeloro /*ARGSUSED*/
162*920Sjbeloro int
163*920Sjbeloro plat_cpu_poweron(struct cpu *cp)
164*920Sjbeloro {
165*920Sjbeloro 	return (ENOTSUP);	/* not supported on this platform */
166*920Sjbeloro }
167*920Sjbeloro 
168*920Sjbeloro /*ARGSUSED*/
169*920Sjbeloro int
170*920Sjbeloro plat_cpu_poweroff(struct cpu *cp)
171*920Sjbeloro {
172*920Sjbeloro 	return (ENOTSUP);	/* not supported on this platform */
173*920Sjbeloro }
174*920Sjbeloro 
175*920Sjbeloro /*ARGSUSED*/
176*920Sjbeloro void
177*920Sjbeloro plat_freelist_process(int mnode)
178*920Sjbeloro {
179*920Sjbeloro }
180*920Sjbeloro 
181*920Sjbeloro char *platform_module_list[] = {
182*920Sjbeloro 	"mi2cv",
183*920Sjbeloro 	"pca9556",
184*920Sjbeloro 	(char *)0
185*920Sjbeloro };
186*920Sjbeloro 
187*920Sjbeloro /*ARGSUSED*/
188*920Sjbeloro void
189*920Sjbeloro plat_tod_fault(enum tod_fault_type tod_bad)
190*920Sjbeloro {
191*920Sjbeloro }
192*920Sjbeloro 
193*920Sjbeloro /*ARGSUSED*/
194*920Sjbeloro int
195*920Sjbeloro plat_get_mem_unum(int synd_code, uint64_t flt_addr, int flt_bus_id,
196*920Sjbeloro     int flt_in_memory, ushort_t flt_status, char *buf, int buflen, int *lenp)
197*920Sjbeloro {
198*920Sjbeloro 	if (flt_in_memory && (p2get_mem_unum != NULL))
199*920Sjbeloro 		return (p2get_mem_unum(synd_code, P2ALIGN(flt_addr, 8),
200*920Sjbeloro 		    buf, buflen, lenp));
201*920Sjbeloro 	else
202*920Sjbeloro 		return (ENOTSUP);
203*920Sjbeloro }
204*920Sjbeloro 
205*920Sjbeloro /*ARGSUSED*/
206*920Sjbeloro int
207*920Sjbeloro plat_get_cpu_unum(int cpuid, char *buf, int buflen, int *lenp)
208*920Sjbeloro {
209*920Sjbeloro 	if (snprintf(buf, buflen, "MB") >= buflen) {
210*920Sjbeloro 		return (ENOSPC);
211*920Sjbeloro 	} else {
212*920Sjbeloro 		*lenp = strlen(buf);
213*920Sjbeloro 		return (0);
214*920Sjbeloro 	}
215*920Sjbeloro }
216*920Sjbeloro 
217*920Sjbeloro /*
218*920Sjbeloro  * Our nodename has been set, pass it along to the RMC.
219*920Sjbeloro  */
220*920Sjbeloro void
221*920Sjbeloro plat_nodename_set(void)
222*920Sjbeloro {
223*920Sjbeloro 	rmc_comm_msg_t	req;	/* request */
224*920Sjbeloro 	int (*rmc_req_res)(rmc_comm_msg_t *, rmc_comm_msg_t *, time_t) = NULL;
225*920Sjbeloro 
226*920Sjbeloro 	/*
227*920Sjbeloro 	 * find the symbol for the mailbox routine
228*920Sjbeloro 	 */
229*920Sjbeloro 	rmc_req_res = (int (*)(rmc_comm_msg_t *, rmc_comm_msg_t *, time_t))
230*920Sjbeloro 		modgetsymvalue("rmc_comm_request_response", 0);
231*920Sjbeloro 
232*920Sjbeloro 	if (rmc_req_res == NULL) {
233*920Sjbeloro 		return;
234*920Sjbeloro 	}
235*920Sjbeloro 
236*920Sjbeloro 	/*
237*920Sjbeloro 	 * construct the message telling the RMC our nodename
238*920Sjbeloro 	 */
239*920Sjbeloro 	req.msg_type = DP_SET_CPU_NODENAME;
240*920Sjbeloro 	req.msg_len = strlen(utsname.nodename) + 1;
241*920Sjbeloro 	req.msg_bytes = 0;
242*920Sjbeloro 	req.msg_buf = (caddr_t)utsname.nodename;
243*920Sjbeloro 
244*920Sjbeloro 	/*
245*920Sjbeloro 	 * ship it
246*920Sjbeloro 	 */
247*920Sjbeloro 	(void) (rmc_req_res)(&req, NULL, 2000);
248*920Sjbeloro }
249*920Sjbeloro 
250*920Sjbeloro sig_state_t current_sgn;
251*920Sjbeloro 
252*920Sjbeloro /*
253*920Sjbeloro  * cpu signatures - we're only interested in the overall system
254*920Sjbeloro  * "signature" on this platform - not individual cpu signatures
255*920Sjbeloro  */
256*920Sjbeloro /*ARGSUSED*/
257*920Sjbeloro static void
258*920Sjbeloro cpu_sgn_update(ushort_t sig, uchar_t state, uchar_t sub_state, int cpuid)
259*920Sjbeloro {
260*920Sjbeloro 	dp_cpu_signature_t signature;
261*920Sjbeloro 	rmc_comm_msg_t	req;	/* request */
262*920Sjbeloro 	int (*rmc_req_res)(rmc_comm_msg_t *, rmc_comm_msg_t *, time_t) = NULL;
263*920Sjbeloro 	int (*rmc_req_now)(rmc_comm_msg_t *, uint8_t) = NULL;
264*920Sjbeloro 
265*920Sjbeloro 
266*920Sjbeloro 	/*
267*920Sjbeloro 	 * Differentiate a panic reboot from a non-panic reboot in the
268*920Sjbeloro 	 * setting of the substate of the signature.
269*920Sjbeloro 	 *
270*920Sjbeloro 	 * If the new substate is REBOOT and we're rebooting due to a panic,
271*920Sjbeloro 	 * then set the new substate to a special value indicating a panic
272*920Sjbeloro 	 * reboot, SIGSUBST_PANIC_REBOOT.
273*920Sjbeloro 	 *
274*920Sjbeloro 	 * A panic reboot is detected by a current (previous) signature
275*920Sjbeloro 	 * state of SIGST_EXIT, and a new signature substate of SIGSUBST_REBOOT.
276*920Sjbeloro 	 * The domain signature state SIGST_EXIT is used as the panic flow
277*920Sjbeloro 	 * progresses.
278*920Sjbeloro 	 *
279*920Sjbeloro 	 * At the end of the panic flow, the reboot occurs but we should know
280*920Sjbeloro 	 * one that was involuntary, something that may be quite useful to know
281*920Sjbeloro 	 * at OBP level.
282*920Sjbeloro 	 */
283*920Sjbeloro 	if (state == SIGST_EXIT && sub_state == SIGSUBST_REBOOT) {
284*920Sjbeloro 		if (current_sgn.state_t.state == SIGST_EXIT &&
285*920Sjbeloro 		    current_sgn.state_t.sub_state != SIGSUBST_REBOOT)
286*920Sjbeloro 			sub_state = SIGSUBST_PANIC_REBOOT;
287*920Sjbeloro 	}
288*920Sjbeloro 
289*920Sjbeloro 	/*
290*920Sjbeloro 	 * offline and detached states only apply to a specific cpu
291*920Sjbeloro 	 * so ignore them.
292*920Sjbeloro 	 */
293*920Sjbeloro 	if (state == SIGST_OFFLINE || state == SIGST_DETACHED) {
294*920Sjbeloro 		return;
295*920Sjbeloro 	}
296*920Sjbeloro 
297*920Sjbeloro 	current_sgn.signature = CPU_SIG_BLD(sig, state, sub_state);
298*920Sjbeloro 
299*920Sjbeloro 	/*
300*920Sjbeloro 	 * find the symbol for the mailbox routine
301*920Sjbeloro 	 */
302*920Sjbeloro 	rmc_req_res = (int (*)(rmc_comm_msg_t *, rmc_comm_msg_t *, time_t))
303*920Sjbeloro 		modgetsymvalue("rmc_comm_request_response", 0);
304*920Sjbeloro 	if (rmc_req_res == NULL) {
305*920Sjbeloro 		return;
306*920Sjbeloro 	}
307*920Sjbeloro 
308*920Sjbeloro 	/*
309*920Sjbeloro 	 * find the symbol for the mailbox routine
310*920Sjbeloro 	 */
311*920Sjbeloro 	rmc_req_now = (int (*)(rmc_comm_msg_t *, uint8_t))
312*920Sjbeloro 		modgetsymvalue("rmc_comm_request_nowait", 0);
313*920Sjbeloro 	if (rmc_req_now == NULL) {
314*920Sjbeloro 		return;
315*920Sjbeloro 	}
316*920Sjbeloro 
317*920Sjbeloro 	signature.cpu_id = -1;
318*920Sjbeloro 	signature.sig = sig;
319*920Sjbeloro 	signature.states = state;
320*920Sjbeloro 	signature.sub_state = sub_state;
321*920Sjbeloro 	req.msg_type = DP_SET_CPU_SIGNATURE;
322*920Sjbeloro 	req.msg_len = (int)(sizeof (signature));
323*920Sjbeloro 	req.msg_bytes = 0;
324*920Sjbeloro 	req.msg_buf = (caddr_t)&signature;
325*920Sjbeloro 
326*920Sjbeloro 	/*
327*920Sjbeloro 	 * ship it
328*920Sjbeloro 	 * - note that for panic or reboot need to send with nowait/urgent
329*920Sjbeloro 	 */
330*920Sjbeloro 	if (state == SIGST_EXIT && (sub_state == SIGSUBST_HALT ||
331*920Sjbeloro 	    sub_state == SIGSUBST_REBOOT || sub_state == SIGSUBST_ENVIRON ||
332*920Sjbeloro 	    sub_state == SIGSUBST_PANIC_REBOOT))
333*920Sjbeloro 		(void) (rmc_req_now)(&req, RMC_COMM_DREQ_URGENT);
334*920Sjbeloro 	else
335*920Sjbeloro 		(void) (rmc_req_res)(&req, NULL, 2000);
336*920Sjbeloro }
337*920Sjbeloro 
338*920Sjbeloro /*
339*920Sjbeloro  * Fiesta support for lgroups.
340*920Sjbeloro  *
341*920Sjbeloro  * On fiesta platform, an lgroup platform handle == CPU id
342*920Sjbeloro  */
343*920Sjbeloro 
344*920Sjbeloro /*
345*920Sjbeloro  * Macro for extracting the CPU number from the CPU id
346*920Sjbeloro  */
347*920Sjbeloro #define	CPUID_TO_LGRP(id)	((id) & 0x7)
348*920Sjbeloro #define	PLATFORM_MC_SHIFT	36
349*920Sjbeloro 
350*920Sjbeloro /*
351*920Sjbeloro  * Return the platform handle for the lgroup containing the given CPU
352*920Sjbeloro  */
353*920Sjbeloro void *
354*920Sjbeloro plat_lgrp_cpu_to_hand(processorid_t id)
355*920Sjbeloro {
356*920Sjbeloro 	return ((void *) CPUID_TO_LGRP(id));
357*920Sjbeloro }
358*920Sjbeloro 
359*920Sjbeloro /*
360*920Sjbeloro  * Platform specific lgroup initialization
361*920Sjbeloro  */
362*920Sjbeloro void
363*920Sjbeloro plat_lgrp_init(void)
364*920Sjbeloro {
365*920Sjbeloro 	pnode_t		curnode;
366*920Sjbeloro 	char		tmp_name[MAXSYSNAME];
367*920Sjbeloro 	int		portid;
368*920Sjbeloro 	int		cpucnt = 0;
369*920Sjbeloro 	int		max_portid = -1;
370*920Sjbeloro 	extern uint32_t lgrp_expand_proc_thresh;
371*920Sjbeloro 	extern uint32_t lgrp_expand_proc_diff;
372*920Sjbeloro 	extern pgcnt_t	lgrp_mem_free_thresh;
373*920Sjbeloro 	extern uint32_t lgrp_loadavg_tolerance;
374*920Sjbeloro 	extern uint32_t lgrp_loadavg_max_effect;
375*920Sjbeloro 	extern uint32_t lgrp_load_thresh;
376*920Sjbeloro 	extern lgrp_mem_policy_t  lgrp_mem_policy_root;
377*920Sjbeloro 
378*920Sjbeloro 	/*
379*920Sjbeloro 	 * Count the number of CPUs installed to determine if
380*920Sjbeloro 	 * NUMA optimization should be enabled or not.
381*920Sjbeloro 	 *
382*920Sjbeloro 	 * All CPU nodes reside in the root node and have a
383*920Sjbeloro 	 * device type "cpu".
384*920Sjbeloro 	 */
385*920Sjbeloro 	curnode = prom_rootnode();
386*920Sjbeloro 	for (curnode = prom_childnode(curnode); curnode;
387*920Sjbeloro 	    curnode = prom_nextnode(curnode)) {
388*920Sjbeloro 		bzero(tmp_name, MAXSYSNAME);
389*920Sjbeloro 		if (prom_getproplen(curnode, OBP_NAME) < MAXSYSNAME) {
390*920Sjbeloro 			if (prom_getprop(curnode, OBP_NAME,
391*920Sjbeloro 			    (caddr_t)tmp_name) == -1 || prom_getprop(curnode,
392*920Sjbeloro 			    OBP_DEVICETYPE, tmp_name) == -1 || strcmp(tmp_name,
393*920Sjbeloro 			    "cpu") != 0)
394*920Sjbeloro 			continue;
395*920Sjbeloro 
396*920Sjbeloro 			cpucnt++;
397*920Sjbeloro 			if (prom_getprop(curnode, "portid", (caddr_t)&portid) !=
398*920Sjbeloro 			    -1 && portid > max_portid)
399*920Sjbeloro 				max_portid = portid;
400*920Sjbeloro 		}
401*920Sjbeloro 	}
402*920Sjbeloro 	if (cpucnt <= 1)
403*920Sjbeloro 		max_mem_nodes = 1;
404*920Sjbeloro 	else if (max_portid >= 0 && max_portid < MAX_MEM_NODES)
405*920Sjbeloro 		max_mem_nodes = max_portid + 1;
406*920Sjbeloro 
407*920Sjbeloro 	/*
408*920Sjbeloro 	 * Set tuneables for fiesta architecture
409*920Sjbeloro 	 *
410*920Sjbeloro 	 * lgrp_expand_proc_thresh is the minimum load on the lgroups
411*920Sjbeloro 	 * this process is currently running on before considering
412*920Sjbeloro 	 * expanding threads to another lgroup.
413*920Sjbeloro 	 *
414*920Sjbeloro 	 * lgrp_expand_proc_diff determines how much less the remote lgroup
415*920Sjbeloro 	 * must be loaded before expanding to it.
416*920Sjbeloro 	 *
417*920Sjbeloro 	 * Optimize for memory bandwidth by spreading multi-threaded
418*920Sjbeloro 	 * program to different lgroups.
419*920Sjbeloro 	 */
420*920Sjbeloro 	lgrp_expand_proc_thresh = lgrp_loadavg_max_effect - 1;
421*920Sjbeloro 	lgrp_expand_proc_diff = lgrp_loadavg_max_effect / 2;
422*920Sjbeloro 	lgrp_loadavg_tolerance = lgrp_loadavg_max_effect / 2;
423*920Sjbeloro 	lgrp_mem_free_thresh = 1;	/* home lgrp must have some memory */
424*920Sjbeloro 	lgrp_expand_proc_thresh = lgrp_loadavg_max_effect - 1;
425*920Sjbeloro 	lgrp_mem_policy_root = LGRP_MEM_POLICY_NEXT;
426*920Sjbeloro 	lgrp_load_thresh = 0;
427*920Sjbeloro 
428*920Sjbeloro 	mem_node_pfn_shift = PLATFORM_MC_SHIFT - MMU_PAGESHIFT;
429*920Sjbeloro }
430*920Sjbeloro 
431*920Sjbeloro /*
432*920Sjbeloro  * Return latency between "from" and "to" lgroups
433*920Sjbeloro  *
434*920Sjbeloro  * This latency number can only be used for relative comparison
435*920Sjbeloro  * between lgroups on the running system, cannot be used across platforms,
436*920Sjbeloro  * and may not reflect the actual latency.  It is platform and implementation
437*920Sjbeloro  * specific, so platform gets to decide its value.  It would be nice if the
438*920Sjbeloro  * number was at least proportional to make comparisons more meaningful though.
439*920Sjbeloro  * NOTE: The numbers below are supposed to be load latencies for uncached
440*920Sjbeloro  * memory divided by 10.
441*920Sjbeloro  */
442*920Sjbeloro int
443*920Sjbeloro plat_lgrp_latency(void *from, void *to)
444*920Sjbeloro {
445*920Sjbeloro 	/*
446*920Sjbeloro 	 * Return remote latency when there are more than two lgroups
447*920Sjbeloro 	 * (root and child) and getting latency between two different
448*920Sjbeloro 	 * lgroups or root is involved
449*920Sjbeloro 	 */
450*920Sjbeloro 	if (lgrp_optimizations() && (from != to || from ==
451*920Sjbeloro 	    (void *) LGRP_DEFAULT_HANDLE || to == (void *) LGRP_DEFAULT_HANDLE))
452*920Sjbeloro 		return (17);
453*920Sjbeloro 	else
454*920Sjbeloro 		return (12);
455*920Sjbeloro }
456*920Sjbeloro 
457*920Sjbeloro int
458*920Sjbeloro plat_pfn_to_mem_node(pfn_t pfn)
459*920Sjbeloro {
460*920Sjbeloro 	ASSERT(max_mem_nodes > 1);
461*920Sjbeloro 	return (pfn >> mem_node_pfn_shift);
462*920Sjbeloro }
463*920Sjbeloro 
464*920Sjbeloro /*
465*920Sjbeloro  * Assign memnode to lgroups
466*920Sjbeloro  */
467*920Sjbeloro void
468*920Sjbeloro plat_fill_mc(pnode_t nodeid)
469*920Sjbeloro {
470*920Sjbeloro 	int		portid;
471*920Sjbeloro 
472*920Sjbeloro 	/*
473*920Sjbeloro 	 * Memory controller portid == global CPU id
474*920Sjbeloro 	 */
475*920Sjbeloro 	if ((prom_getprop(nodeid, "portid", (caddr_t)&portid) == -1) ||
476*920Sjbeloro 	    (portid < 0))
477*920Sjbeloro 		return;
478*920Sjbeloro 
479*920Sjbeloro 	if (portid < max_mem_nodes)
480*920Sjbeloro 		plat_assign_lgrphand_to_mem_node((lgrp_handle_t)portid, portid);
481*920Sjbeloro }
482*920Sjbeloro 
483*920Sjbeloro /* ARGSUSED */
484*920Sjbeloro void
485*920Sjbeloro plat_build_mem_nodes(u_longlong_t *list, size_t nelems)
486*920Sjbeloro {
487*920Sjbeloro 	size_t	elem;
488*920Sjbeloro 	pfn_t	basepfn;
489*920Sjbeloro 	pgcnt_t	npgs;
490*920Sjbeloro 
491*920Sjbeloro 	/*
492*920Sjbeloro 	 * Boot install lists are arranged <addr, len>, <addr, len>, ...
493*920Sjbeloro 	 */
494*920Sjbeloro 	for (elem = 0; elem < nelems; elem += 2) {
495*920Sjbeloro 		basepfn = btop(list[elem]);
496*920Sjbeloro 		npgs = btop(list[elem+1]);
497*920Sjbeloro 		mem_node_add_slice(basepfn, basepfn + npgs - 1);
498*920Sjbeloro 	}
499*920Sjbeloro }
500*920Sjbeloro 
501*920Sjbeloro /*
502*920Sjbeloro  * Common locking enter code
503*920Sjbeloro  */
504*920Sjbeloro void
505*920Sjbeloro plat_setprop_enter(void)
506*920Sjbeloro {
507*920Sjbeloro 	mutex_enter(&mi2cv_mutex);
508*920Sjbeloro }
509*920Sjbeloro 
510*920Sjbeloro /*
511*920Sjbeloro  * Common locking exit code
512*920Sjbeloro  */
513*920Sjbeloro void
514*920Sjbeloro plat_setprop_exit(void)
515*920Sjbeloro {
516*920Sjbeloro 	mutex_exit(&mi2cv_mutex);
517*920Sjbeloro }
518*920Sjbeloro 
519*920Sjbeloro /*
520*920Sjbeloro  * Called by mi2cv driver
521*920Sjbeloro  */
522*920Sjbeloro void
523*920Sjbeloro plat_shared_i2c_enter(dev_info_t *i2cnexus_dip)
524*920Sjbeloro {
525*920Sjbeloro 	if (i2cnexus_dip == shared_mi2cv_dip) {
526*920Sjbeloro 		plat_setprop_enter();
527*920Sjbeloro 	}
528*920Sjbeloro }
529*920Sjbeloro 
530*920Sjbeloro /*
531*920Sjbeloro  * Called by mi2cv driver
532*920Sjbeloro  */
533*920Sjbeloro void
534*920Sjbeloro plat_shared_i2c_exit(dev_info_t *i2cnexus_dip)
535*920Sjbeloro {
536*920Sjbeloro 	if (i2cnexus_dip == shared_mi2cv_dip) {
537*920Sjbeloro 		plat_setprop_exit();
538*920Sjbeloro 	}
539*920Sjbeloro }
540*920Sjbeloro /*
541*920Sjbeloro  * Called by todm5823 driver
542*920Sjbeloro  */
543*920Sjbeloro void
544*920Sjbeloro plat_rmc_comm_req(struct rmc_comm_msg *request)
545*920Sjbeloro {
546*920Sjbeloro 	if (rmc_req_now)
547*920Sjbeloro 		(void) rmc_req_now(request, 0);
548*920Sjbeloro }
549