xref: /onnv-gate/usr/src/uts/i86pc/ml/bios_call_src.s (revision 3446:5903aece022d)
1*3446Smrj/*
2*3446Smrj * CDDL HEADER START
3*3446Smrj *
4*3446Smrj * The contents of this file are subject to the terms of the
5*3446Smrj * Common Development and Distribution License (the "License").
6*3446Smrj * You may not use this file except in compliance with the License.
7*3446Smrj *
8*3446Smrj * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9*3446Smrj * or http://www.opensolaris.org/os/licensing.
10*3446Smrj * See the License for the specific language governing permissions
11*3446Smrj * and limitations under the License.
12*3446Smrj *
13*3446Smrj * When distributing Covered Code, include this CDDL HEADER in each
14*3446Smrj * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15*3446Smrj * If applicable, add the following below this CDDL HEADER, with the
16*3446Smrj * fields enclosed by brackets "[]" replaced with your own identifying
17*3446Smrj * information: Portions Copyright [yyyy] [name of copyright owner]
18*3446Smrj *
19*3446Smrj * CDDL HEADER END
20*3446Smrj */
21*3446Smrj
22*3446Smrj/*
23*3446Smrj * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24*3446Smrj * Use is subject to license terms.
25*3446Smrj */
26*3446Smrj
27*3446Smrj#pragma ident	"%Z%%M%	%I%	%E% SMI"
28*3446Smrj
29*3446Smrj#if defined(__lint)
30*3446Smrj
31*3446Smrjint silence_lint = 0;
32*3446Smrj
33*3446Smrj#else
34*3446Smrj
35*3446Smrj#include <sys/segments.h>
36*3446Smrj#include <sys/controlregs.h>
37*3446Smrj
38*3446Smrj/*
39*3446Smrj * Do a call into BIOS.  This goes down to 16 bit real mode and back again.
40*3446Smrj */
41*3446Smrj
42*3446Smrj/*
43*3446Smrj * instruction prefix to change operand size in instruction
44*3446Smrj */
45*3446Smrj#define DATASZ	.byte 0x66;
46*3446Smrj
47*3446Smrj#if defined(__amd64)
48*3446Smrj#define	MOVCR(x, y)	movq  x,%rax; movq  %rax, y
49*3446Smrj#define LOAD_XAX(sym)	leaq	sym, %rax
50*3446Smrj#elif defined(__i386)
51*3446Smrj#define	MOVCR(x, y)	movl  x,%eax; movl  %eax, y
52*3446Smrj#define LOAD_XAX(sym)	leal	sym, %eax
53*3446Smrj#endif
54*3446Smrj
55*3446Smrj	.globl	_start
56*3446Smrj_start:
57*3446Smrj
58*3446Smrj#if defined(__i386)
59*3446Smrj
60*3446Smrj	/*
61*3446Smrj	 * Save caller registers
62*3446Smrj	 */
63*3446Smrj	movl	%ebp, save_ebp
64*3446Smrj	movl	%esp, save_esp
65*3446Smrj	movl	%ebx, save_ebx
66*3446Smrj	movl	%esi, save_esi
67*3446Smrj	movl	%edi, save_edi
68*3446Smrj
69*3446Smrj	/* get registers argument into esi */
70*3446Smrj	movl	8(%esp), %esi
71*3446Smrj
72*3446Smrj	/* put interrupt number in %bl */
73*3446Smrj	movl	4(%esp), %ebx
74*3446Smrj
75*3446Smrj	/* Switch to a low memory stack */
76*3446Smrj	movl	$_start, %esp
77*3446Smrj
78*3446Smrj	/* allocate space for args on stack */
79*3446Smrj	subl	$18, %esp
80*3446Smrj	movl	%esp, %edi
81*3446Smrj
82*3446Smrj#elif defined(__amd64)
83*3446Smrj
84*3446Smrj	/*
85*3446Smrj	 * Save caller registers
86*3446Smrj	 */
87*3446Smrj	movq	%rbp, save_rbp
88*3446Smrj	movq	%rsp, save_rsp
89*3446Smrj	movq	%rbx, save_rbx
90*3446Smrj	movq	%rsi, save_rsi
91*3446Smrj	movq	%r12, save_r12
92*3446Smrj	movq	%r13, save_r13
93*3446Smrj	movq	%r14, save_r14
94*3446Smrj	movq	%r15, save_r15
95*3446Smrj
96*3446Smrj	/* Switch to a low memory stack */
97*3446Smrj	movq	$_start, %rsp
98*3446Smrj
99*3446Smrj	/* put interrupt number in %bl */
100*3446Smrj	movq	%rdi, %rbx
101*3446Smrj
102*3446Smrj	/* allocate space for args on stack */
103*3446Smrj	subq	$18, %rsp
104*3446Smrj	movq	%rsp, %rdi
105*3446Smrj
106*3446Smrj#endif
107*3446Smrj
108*3446Smrj	/* copy args from high memory to stack in low memory */
109*3446Smrj	cld
110*3446Smrj	movl	$18, %ecx
111*3446Smrj	rep
112*3446Smrj	movsb
113*3446Smrj
114*3446Smrj	/*
115*3446Smrj	 * Save system registers
116*3446Smrj	 */
117*3446Smrj	sidt	save_idt
118*3446Smrj	sgdt	save_gdt
119*3446Smrj	str	save_tr
120*3446Smrj	movw	%cs, save_cs
121*3446Smrj	movw	%ds, save_ds
122*3446Smrj	movw	%ss, save_ss
123*3446Smrj	movw	%es, save_es
124*3446Smrj	movw	%fs, save_fs
125*3446Smrj	movw	%gs, save_gs
126*3446Smrj	MOVCR(	%cr4, save_cr4)
127*3446Smrj	MOVCR(	%cr3, save_cr3)
128*3446Smrj	MOVCR(	%cr0, save_cr0)
129*3446Smrj
130*3446Smrj#if defined(__amd64)
131*3446Smrj	/*
132*3446Smrj	 * save/clear the extension parts of the fs/gs base registers and cr8
133*3446Smrj	 */
134*3446Smrj	movl	$MSR_AMD_FSBASE, %ecx
135*3446Smrj	rdmsr
136*3446Smrj	movl	%eax, save_fsbase
137*3446Smrj	movl	%edx, save_fsbase + 4
138*3446Smrj	xorl	%eax, %eax
139*3446Smrj	xorl	%edx, %edx
140*3446Smrj	wrmsr
141*3446Smrj
142*3446Smrj	movl	$MSR_AMD_GSBASE, %ecx
143*3446Smrj	rdmsr
144*3446Smrj	movl	%eax, save_gsbase
145*3446Smrj	movl	%edx, save_gsbase + 4
146*3446Smrj	xorl	%eax, %eax
147*3446Smrj	xorl	%edx, %edx
148*3446Smrj	wrmsr
149*3446Smrj
150*3446Smrj	movl	$MSR_AMD_KGSBASE, %ecx
151*3446Smrj	rdmsr
152*3446Smrj	movl	%eax, save_kgsbase
153*3446Smrj	movl	%edx, save_kgsbase + 4
154*3446Smrj	xorl	%eax, %eax
155*3446Smrj	xorl	%edx, %edx
156*3446Smrj	wrmsr
157*3446Smrj
158*3446Smrj	movq	%cr8, %rax
159*3446Smrj	movq	%rax, save_cr8
160*3446Smrj#endif
161*3446Smrj
162*3446Smrj	/*
163*3446Smrj	 * set offsets in 16 bit ljmp instructions below
164*3446Smrj	 */
165*3446Smrj	LOAD_XAX(enter_real)
166*3446Smrj	movw	%ax, enter_real_ljmp
167*3446Smrj
168*3446Smrj	LOAD_XAX(enter_protected)
169*3446Smrj	movw	%ax, enter_protected_ljmp
170*3446Smrj
171*3446Smrj	LOAD_XAX(gdt_info)
172*3446Smrj	movw	%ax, gdt_info_load
173*3446Smrj
174*3446Smrj	/*
175*3446Smrj	 * insert BIOS interrupt number into later instruction
176*3446Smrj	 */
177*3446Smrj	movb    %bl, int_instr+1
178*3446Smrj	jmp     1f
179*3446Smrj1:
180*3446Smrj
181*3446Smrj	/*
182*3446Smrj	 * zero out all the registers to make sure they're 16 bit clean
183*3446Smrj	 */
184*3446Smrj#if defined(__amd64)
185*3446Smrj	xorq	%r8, %r8
186*3446Smrj	xorq	%r9, %r9
187*3446Smrj	xorq	%r10, %r10
188*3446Smrj	xorq	%r11, %r11
189*3446Smrj	xorq	%r12, %r12
190*3446Smrj	xorq	%r13, %r13
191*3446Smrj	xorq	%r14, %r14
192*3446Smrj	xorq	%r15, %r15
193*3446Smrj#endif
194*3446Smrj	xorl	%eax, %eax
195*3446Smrj	xorl	%ebx, %ebx
196*3446Smrj	xorl	%ecx, %ecx
197*3446Smrj	xorl	%edx, %edx
198*3446Smrj	xorl	%ebp, %ebp
199*3446Smrj	xorl	%esi, %esi
200*3446Smrj	xorl	%edi, %edi
201*3446Smrj
202*3446Smrj	/*
203*3446Smrj	 * Load our own GDT/IDT
204*3446Smrj	 */
205*3446Smrj	lgdt	gdt_info
206*3446Smrj	lidt	idt_info
207*3446Smrj
208*3446Smrj#if defined(__amd64)
209*3446Smrj	/*
210*3446Smrj	 * Shut down 64 bit mode. First get into compatiblity mode.
211*3446Smrj	 */
212*3446Smrj	movq	%rsp, %rax
213*3446Smrj	pushq	$B32DATA_SEL
214*3446Smrj	pushq	%rax
215*3446Smrj	pushf
216*3446Smrj	pushq	$B32CODE_SEL
217*3446Smrj	pushq	$1f
218*3446Smrj	iretq
219*3446Smrj1:
220*3446Smrj	.code32
221*3446Smrj
222*3446Smrj	/*
223*3446Smrj	 * disable long mode by:
224*3446Smrj	 * - shutting down paging (bit 31 of cr0)
225*3446Smrj	 * - flushing the TLB
226*3446Smrj	 * - disabling LME (long made enable) in EFER (extended feature reg)
227*3446Smrj	 */
228*3446Smrj	movl	%cr0, %eax
229*3446Smrj	btcl	$31, %eax		/* disable paging */
230*3446Smrj	movl	%eax, %cr0
231*3446Smrj	ljmp	$B32CODE_SEL, $1f
232*3446Smrj1:
233*3446Smrj
234*3446Smrj	xorl	%eax, %eax
235*3446Smrj	movl	%eax, %cr3		/* flushes TLB */
236*3446Smrj
237*3446Smrj	movl	$MSR_AMD_EFER, %ecx	/* Extended Feature Enable */
238*3446Smrj	rdmsr
239*3446Smrj	btcl	$8, %eax		/* bit 8 Long Mode Enable bit */
240*3446Smrj	wrmsr
241*3446Smrj#endif
242*3446Smrj
243*3446Smrj	/*
244*3446Smrj	 * ok.. now enter 16 bit mode, so we can shut down protected mode
245*3446Smrj	 *
246*3446Smrj	 * We'll have to act like we're still in a 32 bit section.
247*3446Smrj	 * So the code from this point has DATASZ in front of it to get 32 bit
248*3446Smrj	 * operands. If DATASZ is missing the operands will be 16 bit.
249*3446Smrj	 *
250*3446Smrj	 * Now shut down paging and protected (ie. segmentation) modes.
251*3446Smrj	 */
252*3446Smrj	ljmp	$B16CODE_SEL, $enter_16_bit
253*3446Smrjenter_16_bit:
254*3446Smrj
255*3446Smrj	/*
256*3446Smrj	 * Make sure hidden parts of segment registers are 16 bit clean
257*3446Smrj	 */
258*3446Smrj	DATASZ	movl	$B16DATA_SEL, %eax
259*3446Smrj		movw    %ax, %ss
260*3446Smrj		movw    %ax, %ds
261*3446Smrj		movw    %ax, %es
262*3446Smrj		movw    %ax, %fs
263*3446Smrj		movw    %ax, %gs
264*3446Smrj
265*3446Smrj
266*3446Smrj	DATASZ	movl	$0x0, %eax	/* put us in real mode */
267*3446Smrj	DATASZ	movl	%eax, %cr0
268*3446Smrj	.byte	0xea			/* ljmp */
269*3446Smrjenter_real_ljmp:
270*3446Smrj	.value	0			/* addr (16 bit) */
271*3446Smrj	.value	0x0			/* value for %cs */
272*3446Smrjenter_real:
273*3446Smrj
274*3446Smrj	/*
275*3446Smrj	 * zero out the remaining segment registers
276*3446Smrj	 */
277*3446Smrj	DATASZ	xorl	%eax, %eax
278*3446Smrj		movw    %ax, %ss
279*3446Smrj		movw    %ax, %ds
280*3446Smrj		movw    %ax, %es
281*3446Smrj		movw    %ax, %fs
282*3446Smrj		movw    %ax, %gs
283*3446Smrj
284*3446Smrj	/*
285*3446Smrj	 * load the arguments to the BIOS call from the stack
286*3446Smrj	 */
287*3446Smrj	popl	%eax	/* really executes a 16 bit pop */
288*3446Smrj	popl	%ebx
289*3446Smrj	popl	%ecx
290*3446Smrj	popl	%edx
291*3446Smrj	popl	%esi
292*3446Smrj	popl	%edi
293*3446Smrj	popl	%ebp
294*3446Smrj	pop	%es
295*3446Smrj	pop	%ds
296*3446Smrj
297*3446Smrj	/*
298*3446Smrj	 * do the actual BIOS call
299*3446Smrj	 */
300*3446Smrj	sti
301*3446Smrjint_instr:
302*3446Smrj	int	$0x10		/* this int number is overwritten */
303*3446Smrj	cli			/* ensure interrupts remain disabled */
304*3446Smrj
305*3446Smrj	/*
306*3446Smrj	 * save results of the BIOS call
307*3446Smrj	 */
308*3446Smrj	pushf
309*3446Smrj	push	%ds
310*3446Smrj	push	%es
311*3446Smrj	pushl	%ebp		/* still executes as 16 bit */
312*3446Smrj	pushl	%edi
313*3446Smrj	pushl	%esi
314*3446Smrj	pushl	%edx
315*3446Smrj	pushl	%ecx
316*3446Smrj	pushl	%ebx
317*3446Smrj	pushl	%eax
318*3446Smrj
319*3446Smrj	/*
320*3446Smrj	 * Restore protected mode and 32 bit execution
321*3446Smrj	 */
322*3446Smrj	push	$0			/* make sure %ds is zero before lgdt */
323*3446Smrj	pop	%ds
324*3446Smrj	.byte	0x0f, 0x01, 0x16	/* lgdt */
325*3446Smrjgdt_info_load:
326*3446Smrj	.value	0	/* temp GDT in currently addressible mem */
327*3446Smrj
328*3446Smrj	DATASZ	movl	$0x1, %eax
329*3446Smrj	DATASZ	movl	%eax, %cr0
330*3446Smrj
331*3446Smrj	.byte	0xea			/* ljmp */
332*3446Smrjenter_protected_ljmp:
333*3446Smrj	.value	0			/* addr (still in 16 bit) */
334*3446Smrj	.value	B32CODE_SEL		/* %cs value */
335*3446Smrjenter_protected:
336*3446Smrj
337*3446Smrj	/*
338*3446Smrj	 * We are now back in a 32 bit code section, fix data/stack segments
339*3446Smrj	 */
340*3446Smrj	.code32
341*3446Smrj	movw	$B32DATA_SEL, %ax
342*3446Smrj	movw	%ax, %ds
343*3446Smrj	movw	%ax, %ss
344*3446Smrj
345*3446Smrj	/*
346*3446Smrj	 * Re-enable paging. Note we only use 32 bit mov's to restore these
347*3446Smrj	 * control registers. That's OK as the upper 32 bits are always zero.
348*3446Smrj	 */
349*3446Smrj	movl	save_cr4, %eax
350*3446Smrj	movl	%eax, %cr4
351*3446Smrj	movl	save_cr3, %eax
352*3446Smrj	movl	%eax, %cr3
353*3446Smrj
354*3446Smrj#if defined(__amd64)
355*3446Smrj	/*
356*3446Smrj	 * re-enable long mode
357*3446Smrj	 */
358*3446Smrj	movl	$MSR_AMD_EFER, %ecx
359*3446Smrj	rdmsr
360*3446Smrj	btsl	$8, %eax
361*3446Smrj	wrmsr
362*3446Smrj#endif
363*3446Smrj
364*3446Smrj	movl	save_cr0, %eax
365*3446Smrj	movl	%eax, %cr0
366*3446Smrj	jmp	enter_paging
367*3446Smrjenter_paging:
368*3446Smrj
369*3446Smrj
370*3446Smrj#if defined(__amd64)
371*3446Smrj	/*
372*3446Smrj	 * transition back to 64 bit mode
373*3446Smrj	 */
374*3446Smrj	pushl	$B64CODE_SEL
375*3446Smrj	pushl	$longmode
376*3446Smrj	lret
377*3446Smrjlongmode:
378*3446Smrj	.code64
379*3446Smrj#endif
380*3446Smrj	/*
381*3446Smrj	 * restore caller frame pointer and segment registers
382*3446Smrj	 */
383*3446Smrj	lgdt	save_gdt
384*3446Smrj	lidt	save_idt
385*3446Smrj
386*3446Smrj	/*
387*3446Smrj	 * Before loading the task register we need to reset the busy bit
388*3446Smrj	 * in its corresponding GDT selector. The busy bit is the 2nd bit in
389*3446Smrj	 * the 5th byte of the selector.
390*3446Smrj	 */
391*3446Smrj#if defined(__i386)
392*3446Smrj	movzwl	save_tr, %eax
393*3446Smrj	addl	save_gdt+2, %eax
394*3446Smrj	btcl	$1, 5(%eax)
395*3446Smrj#elif defined(__amd64)
396*3446Smrj	movzwq	save_tr, %rax
397*3446Smrj	addq	save_gdt+2, %rax
398*3446Smrj	btcl	$1, 5(%rax)
399*3446Smrj#endif
400*3446Smrj	ltr	save_tr
401*3446Smrj	movw	save_ds, %ds
402*3446Smrj	movw	save_ss, %ss
403*3446Smrj	movw	save_es, %es
404*3446Smrj	movw	save_fs, %fs
405*3446Smrj	movw	save_gs, %gs
406*3446Smrj
407*3446Smrj#if defined(__i386)
408*3446Smrj	pushl	save_cs
409*3446Smrj	pushl	$.newcs
410*3446Smrj	lret
411*3446Smrj#elif defined(__amd64)
412*3446Smrj	pushq	save_cs
413*3446Smrj	pushq	$.newcs
414*3446Smrj	lretq
415*3446Smrj#endif
416*3446Smrj.newcs:
417*3446Smrj
418*3446Smrj#if defined(__amd64)
419*3446Smrj	/*
420*3446Smrj	 * restore the hidden kernel segment base register values
421*3446Smrj	 */
422*3446Smrj	movl	save_fsbase, %eax
423*3446Smrj	movl	save_fsbase + 4, %edx
424*3446Smrj	movl	$MSR_AMD_FSBASE, %ecx
425*3446Smrj	wrmsr
426*3446Smrj
427*3446Smrj	movl	save_gsbase, %eax
428*3446Smrj	movl	save_gsbase + 4, %edx
429*3446Smrj	movl	$MSR_AMD_GSBASE, %ecx
430*3446Smrj	wrmsr
431*3446Smrj
432*3446Smrj	movl	save_kgsbase, %eax
433*3446Smrj	movl	save_kgsbase + 4, %edx
434*3446Smrj	movl	$MSR_AMD_KGSBASE, %ecx
435*3446Smrj	wrmsr
436*3446Smrj
437*3446Smrj	movq	save_cr8, %rax
438*3446Smrj	cmpq	$0, %rax
439*3446Smrj	je	1f
440*3446Smrj	movq	%rax, %cr8
441*3446Smrj1:
442*3446Smrj#endif
443*3446Smrj
444*3446Smrj	/*
445*3446Smrj	 * copy results to caller's location, then restore remaining registers
446*3446Smrj	 */
447*3446Smrj#if defined(__i386)
448*3446Smrj	movl    save_esp, %edi
449*3446Smrj	movl	8(%edi), %edi
450*3446Smrj	movl	%esp, %esi
451*3446Smrj	movl	$18, %ecx
452*3446Smrj	rep
453*3446Smrj	movsb
454*3446Smrj	movw	18(%esp), %ax
455*3446Smrj	andl	$0xffff, %eax
456*3446Smrj	movl    save_ebx, %ebx
457*3446Smrj	movl    save_esi, %esi
458*3446Smrj	movl    save_edi, %edi
459*3446Smrj	movl    save_esp, %esp
460*3446Smrj	movl    save_ebp, %ebp
461*3446Smrj	movl    save_esp, %esp
462*3446Smrj	ret
463*3446Smrj
464*3446Smrj#elif defined(__amd64)
465*3446Smrj	movq    save_rsi, %rdi
466*3446Smrj	movq	%rsp, %rsi
467*3446Smrj	movq	$18, %rcx
468*3446Smrj	rep
469*3446Smrj	movsb
470*3446Smrj	movw	18(%rsp), %ax
471*3446Smrj	andq	$0xffff, %rax
472*3446Smrj	movq    save_r12, %r12
473*3446Smrj	movq    save_r13, %r13
474*3446Smrj	movq    save_r14, %r14
475*3446Smrj	movq    save_r15, %r15
476*3446Smrj	movq    save_rbx, %rbx
477*3446Smrj	movq    save_rbp, %rbp
478*3446Smrj	movq    save_rsp, %rsp
479*3446Smrj	ret
480*3446Smrj
481*3446Smrj#endif
482*3446Smrj
483*3446Smrj
484*3446Smrj/*
485*3446Smrj * Caller's registers to restore
486*3446Smrj */
487*3446Smrj	.align 4
488*3446Smrjsave_esi:
489*3446Smrj	.long	0
490*3446Smrjsave_edi:
491*3446Smrj	.long	0
492*3446Smrjsave_ebx:
493*3446Smrj	.long	0
494*3446Smrjsave_ebp:
495*3446Smrj	.long	0
496*3446Smrjsave_esp:
497*3446Smrj	.long	0
498*3446Smrj
499*3446Smrj	.align 8
500*3446Smrj#if defined(__amd64)
501*3446Smrjsave_rsi:
502*3446Smrj	.quad	0
503*3446Smrjsave_rbx:
504*3446Smrj	.quad	0
505*3446Smrjsave_rbp:
506*3446Smrj	.quad	0
507*3446Smrjsave_rsp:
508*3446Smrj	.quad	0
509*3446Smrjsave_r12:
510*3446Smrj	.quad	0
511*3446Smrjsave_r13:
512*3446Smrj	.quad	0
513*3446Smrjsave_r14:
514*3446Smrj	.quad	0
515*3446Smrjsave_r15:
516*3446Smrj	.quad	0
517*3446Smrjsave_kgsbase:
518*3446Smrj	.quad	0
519*3446Smrjsave_gsbase:
520*3446Smrj	.quad	0
521*3446Smrjsave_fsbase:
522*3446Smrj	.quad	0
523*3446Smrjsave_cr8:
524*3446Smrj	.quad	0
525*3446Smrj#endif	/* __amd64 */
526*3446Smrj
527*3446Smrjsave_idt:
528*3446Smrj	.quad	0
529*3446Smrj	.quad	0
530*3446Smrj
531*3446Smrjsave_gdt:
532*3446Smrj	.quad	0
533*3446Smrj	.quad	0
534*3446Smrj
535*3446Smrjsave_cr0:
536*3446Smrj	.quad	0
537*3446Smrjsave_cr3:
538*3446Smrj	.quad	0
539*3446Smrjsave_cr4:
540*3446Smrj	.quad	0
541*3446Smrjsave_cs:
542*3446Smrj	.quad	0
543*3446Smrjsave_ss:
544*3446Smrj	.value	0
545*3446Smrjsave_ds:
546*3446Smrj	.value	0
547*3446Smrjsave_es:
548*3446Smrj	.value	0
549*3446Smrjsave_fs:
550*3446Smrj	.value	0
551*3446Smrjsave_gs:
552*3446Smrj	.value	0
553*3446Smrjsave_tr:
554*3446Smrj	.value	0
555*3446Smrj
556*3446Smrjidt_info:
557*3446Smrj	.value 0x3ff
558*3446Smrj	.quad 0
559*3446Smrj
560*3446Smrj
561*3446Smrj/*
562*3446Smrj * We need to trampoline thru a gdt we have in low memory.
563*3446Smrj */
564*3446Smrj#include "../boot/boot_gdt.s"
565*3446Smrj#endif /* __lint */
566