1 /* $NetBSD: machdep.c,v 1.45 2023/10/06 11:45:16 skrll Exp $ */
2
3 /*-
4 * Copyright (c) 2003,2004 Marcel Moolenaar
5 * Copyright (c) 2000,2001 Doug Rabson
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 /*-
31 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
32 * All rights reserved.
33 *
34 * This code is derived from software contributed to The NetBSD Foundation
35 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
36 * NASA Ames Research Center and by Chris G. Demetriou.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
48 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
49 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
50 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
51 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
52 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
53 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
54 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
55 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
56 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
57 * POSSIBILITY OF SUCH DAMAGE.
58 */
59
60 /*
61 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
62 * All rights reserved.
63 *
64 * Author: Chris G. Demetriou
65 *
66 * Permission to use, copy, modify and distribute this software and
67 * its documentation is hereby granted, provided that both the copyright
68 * notice and this permission notice appear in all copies of the
69 * software, derivative works or modified versions, and any portions
70 * thereof, and that both notices appear in supporting documentation.
71 *
72 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
73 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
74 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
75 *
76 * Carnegie Mellon requests users of this software to return to
77 *
78 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
79 * School of Computer Science
80 * Carnegie Mellon University
81 * Pittsburgh PA 15213-3890
82 *
83 * any improvements or extensions that they make and grant Carnegie the
84 * rights to redistribute these changes.
85 */
86
87 #include <sys/cdefs.h>
88 /*__FBSDID("$FreeBSD: src/sys/ia64/ia64/machdep.c,v 1.203 2005/10/14 12:43:45 davidxu Exp $"); */
89
90 #include "opt_modular.h"
91
92 #include <sys/param.h>
93 #include <sys/cpu.h>
94 #include <sys/exec.h>
95 #include <sys/ksyms.h>
96 #include <sys/msgbuf.h>
97 #include <sys/mutex.h>
98 #include <sys/proc.h>
99 #include <sys/reboot.h>
100 #include <sys/systm.h>
101
102 #include <machine/ia64_cpu.h>
103 #include <machine/pal.h>
104 #include <machine/sal.h>
105 #include <machine/ssc.h>
106
107 #include <machine/md_var.h>
108 #include <machine/fpu.h>
109 #include <machine/efi.h>
110 #include <machine/bootinfo.h>
111 #include <machine/vmparam.h>
112
113 #include <machine/atomic.h>
114 #include <machine/pte.h>
115 #include <machine/pcb.h>
116
117 #include <uvm/uvm.h>
118
119 #include <dev/cons.h>
120 #include <dev/mm.h>
121
122 #ifdef DEBUG
123 #define DPRINTF(fmt, args...) printf("%s: " fmt, __func__, ##args)
124 #else
125 #define DPRINTF(fmt, args...) ((void)0)
126 #endif
127
128 /* the following is used externally (sysctl_hw) */
129 char machine[] = MACHINE; /* from <machine/param.h> */
130 char machine_arch[] = MACHINE_ARCH; /* from <machine/param.h> */
131
132 #if NKSYMS || defined(DDB) || defined(MODULAR)
133 /* start and end of kernel symbol table */
134 void *ksym_start, *ksym_end;
135 vaddr_t ia64_unwindtab;
136 vsize_t ia64_unwindtablen;
137 #endif
138
139 struct vm_map *phys_map = NULL;
140
141 void *msgbufaddr;
142
143 vaddr_t kernstart, kernend;
144
145 /* XXX: Move this stuff to cpu_info */
146
147 uint64_t processor_frequency;
148 uint64_t bus_frequency;
149 uint64_t itc_frequency;
150 uint64_t ia64_pal_base;
151 uint64_t ia64_pal_size;
152 uint64_t ia64_port_base;
153
154 int ia64_sync_icache_needed = 0;
155
156 extern uint64_t ia64_gateway_page[];
157
158 uint64_t pa_bootinfo;
159 struct bootinfo bootinfo;
160
161 extern vaddr_t kstack, kstack_top;
162 extern vaddr_t kernel_text, end;
163
164 struct fpswa_iface *fpswa_iface;
165
166
167 /*
168 * Machine-dependent startup code
169 */
170 void
cpu_startup(void)171 cpu_startup(void)
172 {
173 vaddr_t minaddr, maxaddr;
174
175 /* XXX: startrtclock(); */
176 #ifdef PERFMON
177 perfmon_init();
178 #endif
179 printf("Detected memory = %ld (%ld MB)\n", ia64_ptob(physmem),
180 ptoa(physmem) / 1048576);
181
182 /*
183 * Display any holes after the first chunk of extended memory.
184 */
185 if (bootverbose) {
186 int sizetmp, vm_nphysseg;
187 uvm_physseg_t upm;
188
189 printf("Physical memory chunk(s):\n");
190 for (vm_nphysseg = 0, upm = uvm_physseg_get_first();
191 uvm_physseg_valid_p(upm);
192 vm_nphysseg++, upm = uvm_physseg_get_next(upm)) {
193 sizetmp = uvm_physseg_get_avail_end(upm) -
194 uvm_physseg_get_avail_start(upm);
195
196 printf("0x%016lx - 0x%016lx, %ld bytes (%d pages)\n",
197 ptoa(uvm_physseg_get_avail_start(upm)),
198 ptoa(uvm_physseg_get_avail_end(upm)) - 1,
199 ptoa(sizetmp), sizetmp);
200 }
201 printf("Total number of segments: vm_nphysseg = %d \n",
202 vm_nphysseg);
203 }
204
205 minaddr = 0;
206
207 /*
208 * Allocate a submap for physio
209 */
210 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
211 VM_PHYS_SIZE, 0, false, NULL);
212
213 /*
214 * No need to allocate an mbuf cluster submap. Mbuf clusters
215 * are allocated via the pool allocator, and we use RR7 to
216 * map those pages.
217 */
218
219 banner();
220
221 if (fpswa_iface == NULL)
222 printf("Warning: no FPSWA package supplied\n");
223 else
224 printf("FPSWA Revision = 0x%lx, Entry = %p\n",
225 (long)fpswa_iface->if_rev, (void *)fpswa_iface->if_fpswa);
226
227
228 /*
229 * Traverse the MADT to discover IOSAPIC and Local SAPIC
230 * information.
231 */
232 ia64_probe_sapics();
233 }
234
235 void
cpu_reboot(int howto,char * bootstr)236 cpu_reboot(int howto, char *bootstr)
237 {
238
239 efi_reset_system();
240
241 panic("XXX: Reset didn't work ? \n");
242 /*NOTREACHED*/
243 }
244
245 bool
cpu_intr_p(void)246 cpu_intr_p(void)
247 {
248 return 0;
249 }
250
251 /*
252 * This is called by main to set dumplo and dumpsize.
253 * Dumps always skip the first PAGE_SIZE of disk space
254 * in case there might be a disk label stored there.
255 * If there is extra space, put dump at the end to
256 * reduce the chance that swapping trashes it.
257 */
258 void
cpu_dumpconf(void)259 cpu_dumpconf(void)
260 {
261 return;
262 }
263
264 void
map_vhpt(uintptr_t vhpt)265 map_vhpt(uintptr_t vhpt)
266 {
267 pt_entry_t pte;
268 uint64_t psr;
269
270 /*
271 * XXX read pmap_vhpt_log2size before any memory translation
272 * instructions to avoid "Data Nested TLB faults". Not
273 * exactly sure why this is needed with GCC 7.4
274 */
275 register uint64_t log2size = pmap_vhpt_log2size << 2;
276
277 pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY |
278 PTE_PL_KERN | PTE_AR_RW;
279 pte |= vhpt & PTE_PPN_MASK;
280
281 __asm __volatile("ptr.d %0,%1" :: "r"(vhpt), "r"(log2size));
282
283 __asm __volatile("mov %0=psr" : "=r"(psr));
284 __asm __volatile("rsm psr.ic|psr.i");
285 ia64_srlz_i();
286 ia64_set_ifa(vhpt);
287 ia64_set_itir(log2size);
288 ia64_srlz_d();
289 __asm __volatile("itr.d dtr[%0]=%1" :: "r"(3), "r"(pte));
290 __asm __volatile("mov psr.l=%0" :: "r" (psr));
291 ia64_srlz_i();
292 }
293
294 void
map_pal_code(void)295 map_pal_code(void)
296 {
297 pt_entry_t pte;
298 uint64_t psr;
299
300 if (ia64_pal_base == 0)
301 return;
302
303 pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY |
304 PTE_PL_KERN | PTE_AR_RWX;
305 pte |= ia64_pal_base & PTE_PPN_MASK;
306
307 __asm __volatile("ptr.d %0,%1; ptr.i %0,%1" ::
308 "r"(IA64_PHYS_TO_RR7(ia64_pal_base)),
309 "r"(IA64_ID_PAGE_SHIFT<<2));
310
311 __asm __volatile("mov %0=psr" : "=r"(psr));
312 __asm __volatile("rsm psr.ic|psr.i");
313 ia64_srlz_i();
314 ia64_set_ifa(IA64_PHYS_TO_RR7(ia64_pal_base));
315 ia64_set_itir(IA64_ID_PAGE_SHIFT << 2);
316 ia64_srlz_d();
317 __asm __volatile("itr.d dtr[%0]=%1" ::
318 "r"(1), "r"(*(pt_entry_t *)&pte));
319 ia64_srlz_d();
320 __asm __volatile("itr.i itr[%0]=%1" ::
321 "r"(1), "r"(*(pt_entry_t *)&pte));
322 __asm __volatile("mov psr.l=%0" :: "r" (psr));
323 ia64_srlz_i();
324 }
325
326 void
map_gateway_page(void)327 map_gateway_page(void)
328 {
329 pt_entry_t pte;
330 uint64_t psr;
331
332 pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY |
333 PTE_PL_KERN | PTE_AR_X_RX;
334 pte |= (uint64_t)ia64_gateway_page & PTE_PPN_MASK;
335
336 __asm __volatile("ptr.d %0,%1; ptr.i %0,%1" ::
337 "r"(VM_MAX_ADDRESS), "r"(PAGE_SHIFT << 2));
338
339 __asm __volatile("mov %0=psr" : "=r"(psr));
340 __asm __volatile("rsm psr.ic|psr.i");
341 ia64_srlz_i();
342 ia64_set_ifa(VM_MAX_ADDRESS);
343 ia64_set_itir(PAGE_SHIFT << 2);
344 ia64_srlz_d();
345 __asm __volatile("itr.d dtr[%0]=%1" :: "r"(3), "r"(*(pt_entry_t*)&pte));
346 ia64_srlz_d();
347 __asm __volatile("itr.i itr[%0]=%1" :: "r"(3), "r"(*(pt_entry_t*)&pte));
348 __asm __volatile("mov psr.l=%0" :: "r" (psr));
349 ia64_srlz_i();
350
351 /* Expose the mapping to userland in ar.k5 */
352 ia64_set_k5(VM_MAX_ADDRESS);
353 }
354
355 static void
calculate_frequencies(void)356 calculate_frequencies(void)
357 {
358 struct ia64_sal_result sal;
359 struct ia64_pal_result pal;
360
361 sal = ia64_sal_entry(SAL_FREQ_BASE, 0, 0, 0, 0, 0, 0, 0);
362 pal = ia64_call_pal_static(PAL_FREQ_RATIOS, 0, 0, 0);
363 if (sal.sal_status == 0 && pal.pal_status == 0) {
364 if (bootverbose) {
365 printf("Platform clock frequency %ld Hz\n",
366 sal.sal_result[0]);
367 printf("Processor ratio %ld/%ld, Bus ratio %ld/%ld, "
368 "ITC ratio %ld/%ld\n",
369 pal.pal_result[0] >> 32,
370 pal.pal_result[0] & ((1L << 32) - 1),
371 pal.pal_result[1] >> 32,
372 pal.pal_result[1] & ((1L << 32) - 1),
373 pal.pal_result[2] >> 32,
374 pal.pal_result[2] & ((1L << 32) - 1));
375 }
376 processor_frequency =
377 sal.sal_result[0] * (pal.pal_result[0] >> 32)
378 / (pal.pal_result[0] & ((1L << 32) - 1));
379 bus_frequency =
380 sal.sal_result[0] * (pal.pal_result[1] >> 32)
381 / (pal.pal_result[1] & ((1L << 32) - 1));
382 itc_frequency =
383 sal.sal_result[0] * (pal.pal_result[2] >> 32)
384 / (pal.pal_result[2] & ((1L << 32) - 1));
385 }
386
387 }
388
389
390 /* XXXX: Don't allocate 'ci' on stack. */
391 register struct cpu_info *ci __asm__("r13");
392 struct ia64_init_return
ia64_init(void)393 ia64_init(void)
394 {
395 struct ia64_init_return ret;
396 paddr_t kernstartpfn, kernendpfn, pfn0, pfn1;
397 struct pcb *pcb0;
398 struct efi_md *md;
399 vaddr_t v;
400
401 /* NO OUTPUT ALLOWED UNTIL FURTHER NOTICE */
402 ia64_set_fpsr(IA64_FPSR_DEFAULT);
403
404 /*
405 * Region 6 is direct mapped UC and region 7 is direct mapped
406 * WC. The details of this is controlled by the Alt {I,D}TLB
407 * handlers. Here we just make sure that they have the largest
408 * possible page size to minimise TLB usage.
409 */
410 ia64_set_rr(IA64_RR_BASE(6), (6 << 8) | (LOG2_ID_PAGE_SIZE << 2));
411 ia64_set_rr(IA64_RR_BASE(7), (7 << 8) | (LOG2_ID_PAGE_SIZE << 2));
412 ia64_srlz_d();
413
414 /*
415 * TODO: Get critical system information (if possible, from the
416 * information provided by the boot program).
417 */
418
419 /*
420 * pa_bootinfo is the physical address of the bootinfo block as
421 * passed to us by the loader and set in locore.s.
422 */
423 bootinfo = *(struct bootinfo *)(IA64_PHYS_TO_RR7(pa_bootinfo));
424
425 if (bootinfo.bi_magic != BOOTINFO_MAGIC || bootinfo.bi_version != 1) {
426 memset(&bootinfo, 0, sizeof(bootinfo));
427 bootinfo.bi_kernend = (vaddr_t) round_page((vaddr_t)&end);
428 }
429
430
431 /*
432 * Look for the I/O ports first - we need them for console
433 * probing.
434 */
435 for (md = efi_md_first(); md != NULL; md = efi_md_next(md)) {
436 switch (md->md_type) {
437 case EFI_MD_TYPE_IOPORT:
438 ia64_port_base = IA64_PHYS_TO_RR6(md->md_phys);
439 break;
440 case EFI_MD_TYPE_PALCODE:
441 ia64_pal_base = md->md_phys;
442 break;
443 }
444 }
445
446
447 /* XXX: We need to figure out whether/how much of the FreeBSD
448 * getenv/setenv stuff we need. The info we get from ski
449 * is too trivial to go to the hassle of importing the
450 * FreeBSD environment stuff.
451 */
452
453 /*
454 * Look at arguments passed to us and compute boothowto.
455 */
456 boothowto = bootinfo.bi_boothowto;
457
458 /* XXX: Debug: Override to verbose */
459 boothowto |= AB_VERBOSE;
460
461
462 /*
463 * Initialize the console before we print anything out.
464 */
465 cninit();
466
467 /* OUTPUT NOW ALLOWED */
468
469 if (ia64_pal_base != 0) {
470 ia64_pal_base &= ~IA64_ID_PAGE_MASK;
471 /*
472 * We use a TR to map the first 256M of memory - this might
473 * cover the palcode too.
474 */
475 if (ia64_pal_base == 0)
476 printf("PAL code mapped by the kernel's TR\n");
477 } else
478 printf("PAL code not found\n");
479
480 /*
481 * Wire things up so we can call the firmware.
482 */
483 map_pal_code();
484 efi_boot_minimal(bootinfo.bi_systab);
485 ia64_sal_init();
486 calculate_frequencies();
487
488 /*
489 * Find the beginning and end of the kernel.
490 */
491
492 kernstart = trunc_page((vaddr_t) &kernel_text);
493 #ifdef DDB
494 ia64_unwindtab = (uint64_t)bootinfo.bi_unwindtab;
495 ia64_unwindtablen = (uint64_t)bootinfo.bi_unwindtablen;
496 ksym_start = (void *)bootinfo.bi_symtab;
497 ksym_end = (void *)bootinfo.bi_esymtab;
498 kernend = (vaddr_t)round_page((vaddr_t)bootinfo.bi_kernend);
499 #else
500 kernend = (vaddr_t)round_page(bootinfo.bi_kernend);
501 #endif
502 kernstartpfn = atop(IA64_RR_MASK(kernstart));
503 kernendpfn = atop(IA64_RR_MASK(kernend));
504
505 /*
506 * Find out this system's page size, and initialize
507 * PAGE_SIZE-dependent variables.
508 */
509
510 uvmexp.pagesize = PAGE_SIZE;
511 uvm_md_init();
512
513
514 /*
515 * Find out how much memory is available, by looking at
516 * the memory descriptors.
517 */
518
519 physmem = 0;
520
521 for (md = efi_md_first(); md != NULL; md = efi_md_next(md)) {
522
523 DPRINTF("MD %p: type %d pa 0x%lx cnt 0x%lx\n", md,
524 md->md_type, md->md_phys, md->md_pages);
525
526 pfn0 = ia64_btop(round_page(md->md_phys));
527 pfn1 = ia64_btop(trunc_page(md->md_phys + md->md_pages * 4096));
528 if (pfn1 <= pfn0)
529 continue;
530
531 if (md->md_type != EFI_MD_TYPE_FREE)
532 continue;
533
534 /*
535 * Wimp out for now since we do not DTRT here with
536 * pci bus mastering (no bounce buffering, for example).
537 */
538 if (pfn0 >= ia64_btop(0x100000000UL)) {
539 printf("Skipping memory chunk start 0x%lx\n",
540 md->md_phys);
541 continue;
542 }
543 if (pfn1 >= ia64_btop(0x100000000UL)) {
544 printf("Skipping memory chunk end 0x%lx\n",
545 md->md_phys + md->md_pages * 4096);
546 continue;
547 }
548
549 /*
550 * We have a memory descriptor that describes conventional
551 * memory that is for general use. We must determine if the
552 * loader has put the kernel in this region.
553 */
554 physmem += (pfn1 - pfn0);
555 if (pfn0 <= kernendpfn && kernstartpfn <= pfn1) {
556 /*
557 * Must compute the location of the kernel
558 * within the segment.
559 */
560 DPRINTF("Descriptor %p contains kernel\n", md);
561
562 if (pfn0 < kernstartpfn) {
563 /*
564 * There is a chunk before the kernel.
565 */
566 DPRINTF("Loading chunk before kernel: "
567 "0x%lx / 0x%lx\n", pfn0, kernstartpfn);
568
569 uvm_page_physload(pfn0, kernstartpfn,
570 pfn0, kernstartpfn, VM_FREELIST_DEFAULT);
571
572 }
573 if (kernendpfn < pfn1) {
574 /*
575 * There is a chunk after the kernel.
576 */
577 DPRINTF("Loading chunk after kernel: "
578 "0x%lx / 0x%lx\n", kernendpfn, pfn1);
579
580 uvm_page_physload(kernendpfn, pfn1,
581 kernendpfn, pfn1, VM_FREELIST_DEFAULT);
582
583 }
584 } else {
585 /*
586 * Just load this cluster as one chunk.
587 */
588 DPRINTF("Loading descriptor %p: 0x%lx / 0x%lx\n",
589 md, pfn0, pfn1);
590
591 uvm_page_physload(pfn0, pfn1, pfn0, pfn1,
592 VM_FREELIST_DEFAULT);
593
594 }
595 }
596
597 if (physmem == 0)
598 panic("can't happen: system seems to have no memory!");
599
600 /*
601 * Initialize error message buffer (at end of core).
602 */
603 msgbufaddr = (void *) uvm_pageboot_alloc(MSGBUFSIZE);
604 initmsgbuf(msgbufaddr, MSGBUFSIZE);
605
606 /*
607 * Init mapping for u page(s) for proc 0. use memory area
608 * already set up in locore.S
609 */
610 v = (vaddr_t)&kstack;
611 uvm_lwp_setuarea(&lwp0, v);
612
613 /*
614 * Set the kernel sp, reserving space for an (empty) trapframe,
615 * and make lwp0's trapframe pointer point to it for sanity.
616 */
617 lwp0.l_md.md_tf = (struct trapframe *)(v + UAREA_TF_OFFSET);
618 lwp0.l_md.md_tf->tf_length = sizeof(struct trapframe);
619 lwp0.l_md.md_tf->tf_flags = FRAME_SYSCALL;
620
621 lwp0.l_md.user_stack = NULL;
622 lwp0.l_md.user_stack_size = 0;
623
624 pcb0 = lwp_getpcb(&lwp0);
625 pcb0->pcb_special.sp = v + UAREA_SP_OFFSET;
626 pcb0->pcb_special.bspstore = v + UAREA_BSPSTORE_OFFSET;
627
628 /*
629 * Setup global data for the bootstrap cpu.
630 */
631 ci = curcpu();
632
633 /* ar.k4 contains the cpu_info pointer to the
634 * current cpu.
635 */
636 ia64_set_k4((uint64_t) ci);
637 ci->ci_cpuid = cpu_number();
638
639 /*
640 * Initialise process context. XXX: This should really be in cpu_switchto
641 *
642 * No membar needed because we're not switching from a
643 * previous lwp, and the idle lwp we're switching to can't be
644 * holding locks already; see cpu_switchto.
645 */
646 ci->ci_curlwp = &lwp0;
647
648 /*
649 * Initialize the primary CPU's idle PCB to proc0's. In a
650 * MULTIPROCESSOR configuration, each CPU will later get
651 * its own idle PCB when autoconfiguration runs.
652 */
653 ci->ci_idle_pcb = pcb0;
654
655 /* Indicate that proc0 has a CPU. */
656 lwp0.l_cpu = ci;
657
658 ia64_set_tpr(0);
659 ia64_srlz_d();
660
661 mutex_init(&pcb0->pcb_fpcpu_slock, MUTEX_DEFAULT, 0);
662
663 /*
664 * Save our current context so that we have a known (maybe even
665 * sane) context as the initial context for new threads that are
666 * forked from us.
667 */
668 if (savectx(pcb0))
669 panic("savectx failed");
670
671 /*
672 * Initialize the virtual memory system.
673 */
674 pmap_bootstrap();
675
676 /*
677 * Initialize debuggers, and break into them if appropriate.
678 */
679 #if NKSYMS || defined(DDB) || defined(MODULAR)
680 ksyms_addsyms_elf((int)((uint64_t)ksym_end - (uint64_t)ksym_start),
681 ksym_start, ksym_end);
682 #endif
683
684 #ifdef DDB
685 if (boothowto & RB_KDB)
686 Debugger();
687 #endif
688
689 ret.bspstore = pcb0->pcb_special.bspstore;
690 ret.sp = pcb0->pcb_special.sp;
691
692 return (ret);
693 }
694
695 uint64_t
ia64_get_hcdp(void)696 ia64_get_hcdp(void)
697 {
698
699 return bootinfo.bi_hcdp;
700 }
701
702 /*
703 * Set registers on exec.
704 */
705 void
setregs(register struct lwp * l,struct exec_package * pack,vaddr_t stack)706 setregs(register struct lwp *l, struct exec_package *pack, vaddr_t stack)
707 {
708 struct trapframe *tf;
709 uint64_t *ksttop, *kst, regstkp;
710 vaddr_t uv = uvm_lwp_getuarea(l);
711
712 tf = l->l_md.md_tf;
713 regstkp = uv + sizeof(struct pcb);
714
715 ksttop =
716 (uint64_t*)(regstkp + tf->tf_special.ndirty +
717 (tf->tf_special.bspstore & 0x1ffUL));
718
719 /* XXX: tf_special.ndirty on a new stack frame ??? */
720
721 /*
722 * We can ignore up to 8KB of dirty registers by masking off the
723 * lower 13 bits in exception_restore() or epc_syscall(). This
724 * should be enough for a couple of years, but if there are more
725 * than 8KB of dirty registers, we lose track of the bottom of
726 * the kernel stack. The solution is to copy the active part of
727 * the kernel stack down 1 page (or 2, but not more than that)
728 * so that we always have less than 8KB of dirty registers.
729 */
730 KASSERT((tf->tf_special.ndirty & ~PAGE_MASK) == 0);
731
732 memset(&tf->tf_special, 0, sizeof(tf->tf_special));
733 if ((tf->tf_flags & FRAME_SYSCALL) == 0) { /* break syscalls. */
734 memset(&tf->tf_scratch, 0, sizeof(tf->tf_scratch));
735 memset(&tf->tf_scratch_fp, 0, sizeof(tf->tf_scratch_fp));
736 tf->tf_special.cfm = (1UL<<63) | (3UL<<7) | 3UL;
737 tf->tf_special.bspstore = IA64_BACKINGSTORE;
738 /*
739 * Copy the arguments onto the kernel register stack so that
740 * they get loaded by the loadrs instruction. Skip over the
741 * NaT collection points.
742 */
743 kst = ksttop - 1;
744 if (((uintptr_t)kst & 0x1ff) == 0x1f8)
745 *kst-- = 0;
746 *kst-- = stack; /* in3 = sp */
747 if (((uintptr_t)kst & 0x1ff) == 0x1f8)
748 *kst-- = 0;
749 *kst-- = l->l_proc->p_psstrp; /* in2 = ps_strings */
750 if (((uintptr_t)kst & 0x1ff) == 0x1f8)
751 *kst-- = 0;
752 *kst-- = 0; /* in1 = *obj */
753 if (((uintptr_t)kst & 0x1ff) == 0x1f8)
754 *kst-- = 0;
755 *kst = 0; /* in0 = *cleanup */
756 tf->tf_special.ndirty = (ksttop - kst) << 3;
757 } else { /* epc syscalls (default). */
758 tf->tf_special.cfm = (3UL<<62) | (3UL<<7) | 3UL;
759 tf->tf_special.bspstore = IA64_BACKINGSTORE + 24;
760 /*
761 * Write values for out0, out1, out2 and out3 to the user's
762 * backing store and arrange for them to be restored into
763 * the user's initial register frame.
764 * Assumes that (bspstore & 0x1f8) < 0x1e0.
765 */
766
767 /* in0 = *cleanup */
768 ustore_long((u_long *)(tf->tf_special.bspstore - 32), 0);
769
770 /* in1 == *obj */
771 ustore_long((u_long *)(tf->tf_special.bspstore - 24), 0);
772
773 /* in2 == ps_strings */
774 ustore_long((u_long *)(tf->tf_special.bspstore - 16),
775 l->l_proc->p_psstrp);
776
777 /* in3 = sp */
778 ustore_long((u_long *)(tf->tf_special.bspstore - 8),
779 stack);
780
781 }
782
783 tf->tf_special.iip = pack->ep_entry;
784 tf->tf_special.sp = (stack & ~15) - 16;
785 tf->tf_special.rsc = 0xf;
786 tf->tf_special.fpsr = IA64_FPSR_DEFAULT;
787 tf->tf_special.psr = IA64_PSR_IC | IA64_PSR_I | IA64_PSR_IT |
788 IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_DFH | IA64_PSR_BN |
789 IA64_PSR_CPL_USER;
790 return;
791 }
792
793 void
sendsig_siginfo(const ksiginfo_t * ksi,const sigset_t * mask)794 sendsig_siginfo(const ksiginfo_t *ksi, const sigset_t *mask)
795 {
796 return;
797 }
798
799 void
cpu_getmcontext(struct lwp * l,mcontext_t * mcp,unsigned int * flags)800 cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flags)
801 {
802 return;
803 }
804
805 int
cpu_setmcontext(struct lwp * l,const mcontext_t * mcp,unsigned int flags)806 cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags)
807 {
808 return EINVAL;
809 }
810
811 int
cpu_mcontext_validate(struct lwp * l,const mcontext_t * mcp)812 cpu_mcontext_validate(struct lwp *l, const mcontext_t *mcp)
813 {
814 return EINVAL;
815 }
816
817 int
mm_md_physacc(paddr_t pa,vm_prot_t prot)818 mm_md_physacc(paddr_t pa, vm_prot_t prot)
819 {
820
821 return 0; /* TODO: Implement. */
822 }
823
824 void
ia64_sync_icache(vaddr_t va,vsize_t sz)825 ia64_sync_icache(vaddr_t va, vsize_t sz)
826 {
827 vaddr_t lim;
828
829 if (!ia64_sync_icache_needed)
830 return;
831
832 lim = va + sz;
833 while (va < lim) {
834 ia64_fc_i(va);
835 va += 32; /* XXX */
836 }
837
838 ia64_sync_i();
839 ia64_srlz_i();
840 }
841
842 /*
843 * Construct a PCB from a trapframe. This is called from kdb_trap() where
844 * we want to start a backtrace from the function that caused us to enter
845 * the debugger. We have the context in the trapframe, but base the trace
846 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
847 * enough for a backtrace.
848 */
849 void
makectx(struct trapframe * tf,struct pcb * pcb)850 makectx(struct trapframe *tf, struct pcb *pcb)
851 {
852 pcb->pcb_special = tf->tf_special;
853 pcb->pcb_special.__spare = ~0UL; /* XXX see unwind.c */
854 save_callee_saved(&pcb->pcb_preserved);
855 save_callee_saved_fp(&pcb->pcb_preserved_fp);
856 }
857