xref: /minix3/minix/kernel/arch/i386/klib.S (revision 433d6423c39e34ec4b79c950597bb2d236f886be)
1/* sections */
2
3
4#include <minix/config.h>
5#include <minix/const.h>
6#include <machine/asm.h>
7#include <machine/interrupt.h>
8#include <machine/vm.h>
9#include "archconst.h"
10#include "kernel/const.h"
11#include "sconst.h"
12#include <machine/multiboot.h>
13
14
15/* Easy way to make functions */
16
17/* Make a function of the form func(arg) */
18
19#define STACKARG 8(%ebp)
20
21#define ARG_EAX_ACTION(FUNCTION, ACTION)        ;\
22ENTRY(FUNCTION)                         ;\
23        push    %ebp                    ;\
24        mov     %esp, %ebp              ;\
25        mov     STACKARG, %eax           ;\
26        ACTION                          ;\
27        pop     %ebp                    ;\
28        ret
29
30/* Make a function of the form ret = func() */
31#define ARG_EAX_RETURN(FUNCTION, EXPR)        ;\
32ENTRY(FUNCTION)                         ;\
33        push    %ebp                    ;\
34        mov     %esp, %ebp              ;\
35        mov	EXPR, %eax              ;\
36        pop     %ebp                    ;\
37	ret
38
39/* Make a function of the form ret = func() */
40#define ARG_EAX_SET(FUNCTION, DEST)        ;\
41ENTRY(FUNCTION)                         ;\
42        push    %ebp                    ;\
43        mov     %esp, %ebp              ;\
44	mov     STACKARG, %eax		;\
45        mov	%eax, DEST              ;\
46	jmp 0f /* a jump is required for some sets */ ;\
470: 	pop     %ebp                    ;\
48        ret
49
50/* Make a function of the form ret = func() */
51#define ARG_AX_SET(FUNCTION, DEST)        ;\
52ENTRY(FUNCTION)                         ;\
53        push    %ebp                    ;\
54        mov     %esp, %ebp              ;\
55	mov     STACKARG, %eax		;\
56        mov	%ax, DEST               ;\
57	jmp 0f /* a jump is required for some sets */ ;\
580: 	pop     %ebp                    ;\
59        ret
60
61/*
62 * This file contains a number of assembly code utility routines needed by the
63 * kernel.
64 */
65
66ENTRY(__main)
67	ret
68
69
70/*===========================================================================*/
71/*				phys_insw				     */
72/*===========================================================================*/
73/*
74 * PUBLIC void phys_insw(Port_t port, phys_bytes buf, size_t count);
75 * Input an array from an I/O port.  Absolute address version of insw().
76 */
77/* transfer data from (disk controller) port to memory */
78ENTRY(phys_insw)
79	push	%ebp
80	mov	%esp, %ebp
81	cld
82	push	%edi
83
84	mov	8(%ebp), %edx	/* port to read from */
85	mov	12(%ebp), %edi	/* destination addr */
86	mov	16(%ebp), %ecx	/* byte count */
87	shr	$1, %ecx	/* word count */
88	rep insw	/* input many words */
89	pop	%edi
90	pop	%ebp
91	ret
92
93
94/*===========================================================================*/
95/*				phys_insb				     */
96/*===========================================================================*/
97/*
98 * PUBLIC void phys_insb(Port_t port, phys_bytes buf, size_t count);
99 * Input an array from an I/O port.  Absolute address version of insb().
100 */
101/* transfer data from (disk controller) port to memory byte by byte */
102ENTRY(phys_insb)
103	push	%ebp
104	mov	%esp, %ebp
105	cld
106	push	%edi
107
108	mov	8(%ebp), %edx	/* port to read from */
109	mov	12(%ebp), %edi	/* destination addr */
110	mov	16(%ebp), %ecx	/* byte count */
111	rep insb	/* input many bytes */
112	pop	%edi
113	pop	%ebp
114	ret
115
116
117/*===========================================================================*/
118/*				phys_outsw				     */
119/*===========================================================================*/
120/*
121 * PUBLIC void phys_outsw(Port_t port, phys_bytes buf, size_t count);
122 * Output an array to an I/O port.  Absolute address version of outsw().
123 */
124/* transfer data from memory to (disk controller) port */
125ENTRY(phys_outsw)
126	push	%ebp
127	mov	%esp, %ebp
128	cld
129	push	%esi
130
131	mov	8(%ebp), %edx	/* port to write to */
132	mov	12(%ebp), %esi	/* source addr */
133	mov	16(%ebp), %ecx	/* byte count */
134	shr	$1, %ecx	/* word count */
135	rep outsw	/* output many words */
136	pop	%esi
137	pop	%ebp
138	ret
139
140
141/*===========================================================================*/
142/*				phys_outsb				     */
143/*===========================================================================*/
144/*
145 * PUBLIC void phys_outsb(Port_t port, phys_bytes buf, size_t count);
146 * Output an array to an I/O port.  Absolute address version of outsb().
147 */
148/* transfer data from memory to (disk controller) port byte by byte */
149ENTRY(phys_outsb)
150	push	%ebp
151	mov	%esp, %ebp
152	cld
153	push	%esi
154
155	mov	8(%ebp), %edx	/* port to write to */
156	mov	12(%ebp), %esi	/* source addr */
157	mov	16(%ebp), %ecx	/* byte count */
158	rep outsb	/* output many bytes */
159	pop	%esi
160	pop	%ebp
161	ret
162
163
164/*===========================================================================*/
165/*				phys_copy				     */
166/*===========================================================================*/
167/*
168 * PUBLIC phys_bytes phys_copy(phys_bytes source, phys_bytes destination,
169 *			phys_bytes bytecount);
170 * Copy a block of data from anywhere to anywhere in physical memory.
171 */
172/*		es edi esi eip	 src dst len */
173ENTRY(phys_copy)
174	push	%ebp
175	mov	%esp, %ebp
176
177	cld
178	push	%esi
179	push	%edi
180
181	mov	8(%ebp), %esi
182	mov	12(%ebp), %edi
183	mov	16(%ebp), %eax
184
185	cmp	$10, %eax	/* avoid align overhead for small counts */
186	jb	pc_small
187	mov	%esi, %ecx	/* align source, hope target is too */
188	neg	%ecx
189	and	$3, %ecx	/* count for alignment */
190	sub	%ecx, %eax
191
192	rep 	movsb (%esi), (%edi)
193	mov	%eax, %ecx
194	shr	$2, %ecx	/* count of dwords */
195
196	rep 	movsl (%esi), (%edi)
197	and	$3, %eax
198pc_small:
199	xchg	%eax, %ecx	/* remainder */
200
201	rep 	movsb (%esi), (%edi)
202
203	mov	$0, %eax		/* 0 means: no fault */
204LABEL(phys_copy_fault)		/* kernel can send us here */
205	pop	%edi
206	pop	%esi
207	pop	%ebp
208	ret
209
210LABEL(phys_copy_fault_in_kernel)	/* kernel can send us here */
211	pop	%edi
212	pop	%esi
213	pop	%ebp
214	mov	%cr2, %eax
215	ret
216
217
218/*===========================================================================*/
219/*				copy_msg_from_user			     */
220/*===========================================================================*/
221/*
222 * int copy_msg_from_user(message * user_mbuf, message * dst);
223 *
224 * Copies a message of 64 bytes from user process space to a kernel buffer. This
225 * function assumes that the process address space is installed (cr3 loaded).
226 *
227 * This function from the callers point of view either succeeds or returns an
228 * error which gives the caller a chance to respond accordingly. In fact it
229 * either succeeds or if it generates a pagefault, general protection or other
230 * exception, the trap handler has to redirect the execution to
231 * __user_copy_msg_pointer_failure where the error is reported to the caller
232 * without resolving the pagefault. It is not kernel's problem to deal with
233 * wrong pointers from userspace and the caller should return an error to
234 * userspace as if wrong values or request were passed to the kernel
235 */
236ENTRY(copy_msg_from_user)
237	/* load the source pointer */
238	mov	4(%esp), %ecx
239	/* load the destination pointer */
240	mov	8(%esp), %edx
241
242/*	mov	0*4(%ecx), %eax
243	mov	%eax, 0*4(%edx) */
244	mov	1*4(%ecx), %eax
245	mov	%eax, 1*4(%edx)
246	mov	2*4(%ecx), %eax
247	mov	%eax, 2*4(%edx)
248	mov	3*4(%ecx), %eax
249	mov	%eax, 3*4(%edx)
250	mov	4*4(%ecx), %eax
251	mov	%eax, 4*4(%edx)
252	mov	5*4(%ecx), %eax
253	mov	%eax, 5*4(%edx)
254	mov	6*4(%ecx), %eax
255	mov	%eax, 6*4(%edx)
256	mov	7*4(%ecx), %eax
257	mov	%eax, 7*4(%edx)
258	mov	8*4(%ecx), %eax
259	mov	%eax, 8*4(%edx)
260
261	mov	 9*4(%ecx), %eax
262	mov	%eax,  9*4(%edx)
263	mov	10*4(%ecx), %eax
264	mov	%eax, 10*4(%edx)
265	mov	11*4(%ecx), %eax
266	mov	%eax, 11*4(%edx)
267	mov	12*4(%ecx), %eax
268	mov	%eax, 12*4(%edx)
269	mov	13*4(%ecx), %eax
270	mov	%eax, 13*4(%edx)
271	mov	14*4(%ecx), %eax
272	mov	%eax, 14*4(%edx)
273	mov	15*4(%ecx), %eax
274	mov	%eax, 15*4(%edx)
275
276LABEL(__copy_msg_from_user_end)
277	movl	$0, %eax
278	ret
279
280/*===========================================================================*/
281/*				copy_msg_to_user			     */
282/*===========================================================================*/
283/*
284 * void copy_msg_to_user(message * src, message * user_mbuf);
285 *
286 * Copies a message of 64 bytes to user process space from a kernel buffer.
287 *
288 * All the other copy_msg_from_user() comments apply here as well!
289 */
290ENTRY(copy_msg_to_user)
291	/* load the source pointer */
292	mov	4(%esp), %ecx
293	/* load the destination pointer */
294	mov	8(%esp), %edx
295
296	mov	0*4(%ecx), %eax
297	mov	%eax, 0*4(%edx)
298	mov	1*4(%ecx), %eax
299	mov	%eax, 1*4(%edx)
300	mov	2*4(%ecx), %eax
301	mov	%eax, 2*4(%edx)
302	mov	3*4(%ecx), %eax
303	mov	%eax, 3*4(%edx)
304	mov	4*4(%ecx), %eax
305	mov	%eax, 4*4(%edx)
306	mov	5*4(%ecx), %eax
307	mov	%eax, 5*4(%edx)
308	mov	6*4(%ecx), %eax
309	mov	%eax, 6*4(%edx)
310	mov	7*4(%ecx), %eax
311	mov	%eax, 7*4(%edx)
312	mov	8*4(%ecx), %eax
313	mov	%eax, 8*4(%edx)
314
315
316	mov	9*4(%ecx), %eax
317	mov	%eax, 9*4(%edx)
318	mov	10*4(%ecx), %eax
319	mov	%eax, 10*4(%edx)
320	mov	11*4(%ecx), %eax
321	mov	%eax, 11*4(%edx)
322	mov	12*4(%ecx), %eax
323	mov	%eax, 12*4(%edx)
324	mov	13*4(%ecx), %eax
325	mov	%eax, 13*4(%edx)
326	mov	14*4(%ecx), %eax
327	mov	%eax, 14*4(%edx)
328	mov	15*4(%ecx), %eax
329	mov	%eax, 15*4(%edx)
330
331LABEL(__copy_msg_to_user_end)
332	movl	$0, %eax
333	ret
334
335/*
336 * if a function from a selected set of copies from or to userspace fails, it is
337 * because of a wrong pointer supplied by the userspace. We have to clean up and
338 * and return -1 to indicated that something wrong has happend. The place it was
339 * called from has to handle this situation. The exception handler redirect us
340 * here to continue, clean up and report the error
341 */
342ENTRY(__user_copy_msg_pointer_failure)
343	movl	$-1, %eax
344	ret
345
346/*===========================================================================*/
347/*				phys_memset				     */
348/*===========================================================================*/
349/*
350 * PUBLIC void phys_memset(phys_bytes dst, unsigned long pattern,
351 *	phys_bytes bytecount);
352 * Fill a block of physical memory with pattern.
353 */
354ENTRY(phys_memset)
355	push	%ebp
356	mov	%esp, %ebp
357	push	%edi
358	cld
359
360	mov	8(%ebp), %edi
361	mov	16(%ebp), %ecx
362	mov	12(%ebp), %eax
363	shr	$2, %ecx
364	rep stosl
365
366/* Any remaining bytes? */
367	mov	16(%ebp), %ecx
368	and	$3, %ecx
369	rep stosb
370
371LABEL(memset_fault)		/* kernel can send us here */
372	mov	$0, %eax		/* 0 means: no fault */
373	pop	%edi
374	pop	%ebp
375	ret
376
377LABEL(memset_fault_in_kernel)		/* kernel can send us here */
378	pop	%edi
379	pop	%ebp
380	mov	%cr2, %eax
381	ret
382
383/*===========================================================================*/
384/*				x86_triplefault				     */
385/*===========================================================================*/
386/*
387 * PUBLIC void x86_triplefault();
388 * Reset the system by loading IDT with offset 0 and interrupting.
389 */
390ENTRY(x86_triplefault)
391	lidt	idt_zero
392	int	$3	/* anything goes, the 386 will not like it */
393.data
394idt_zero:
395.long	0, 0
396.text
397
398
399/*===========================================================================*/
400/*			      	halt_cpu				     */
401/*===========================================================================*/
402/*
403 * PUBLIC void halt_cpu(void);
404 * reanables interrupts and puts the cpu in the halts state. Once an interrupt
405 * is handled the execution resumes by disabling interrupts and continues
406 */
407ENTRY(halt_cpu)
408	sti
409	hlt /* interrupts enabled only after this instruction is executed! */
410	/*
411	 * interrupt handlers make sure that the interrupts are disabled when we
412	 * get here so we take only _one_ interrupt after halting the CPU
413	 */
414	ret
415
416/*===========================================================================*/
417/*			poweroff_vmware_clihlt				     */
418/*===========================================================================*/
419/*
420 * PUBLIC void poweroff_vmware_clihlt(void);
421 * VMware detects this peculiar sequence and forces the virtual machine off
422 * when the parameter gui.exitOnCLIHLT is set to TRUE.
423 * Otherwise this sequence just hangs the CPU, requiring a power down action.
424 */
425ENTRY(poweroff_vmware_clihlt)
426#ifndef NO_VMWARE_DETECTION
427	mov	$1, %eax
428	cpuid
429	test	$[1<<31], %ecx /* "virtualized" */
430	jz	1f	/* always 0 on real hardware */
431	mov	$0x40000000, %eax /* select hypervisor-use leaf */
432	cpuid
433	cmp	$0x61774D56, %ebx /* ASCII "VMwa" */
434	jne	1f
435	cmp	$0x4D566572, %ecx /* ASCII "reVM" */
436	jne	1f
437	cmp	$0x65726177, %edx /* ASCII "ware" */
438	jne	1f
439	/* we are virtualized by some VMware product! */
440#endif
441	cli
442	hlt
4431:	ret
444
445/*===========================================================================*/
446/*			      read_flags				     */
447/*===========================================================================*/
448/*
449 * PUBLIC unsigned long read_cpu_flags(void);
450 * Read CPU status flags from C.
451 */
452ENTRY(read_cpu_flags)
453	pushf
454	mov	(%esp), %eax
455	add	$4, %esp
456	ret
457
458ENTRY(read_ds)
459	mov	$0, %eax
460	mov	%ds, %ax
461	ret
462
463ENTRY(read_cs)
464	mov	$0, %eax
465	mov	%cs, %ax
466	ret
467
468ENTRY(read_ss)
469	mov	$0, %eax
470	mov	%ss, %ax
471	ret
472
473/*===========================================================================*/
474/*                            fpu_routines                                   */
475/*===========================================================================*/
476
477/* non-waiting FPU initialization */
478ENTRY(fninit)
479	fninit
480	ret
481
482ENTRY(clts)
483	clts
484	ret
485
486/* store status word (non-waiting) */
487ENTRY(fnstsw)
488	xor     %eax, %eax
489
490	/* DO NOT CHANGE THE OPERAND!!! gas2ack does not handle it yet */
491	fnstsw	%ax
492	ret
493
494/*===========================================================================*/
495/*				fxrstor					     */
496/*===========================================================================*/
497ENTRY(fxrstor)
498	mov	4(%esp), %eax
499	fxrstor	(%eax)
500ENTRY(__fxrstor_end)
501	xor     %eax, %eax
502	ret
503
504/*===========================================================================*/
505/*				frstor					     */
506/*===========================================================================*/
507ENTRY(frstor)
508	mov	4(%esp), %eax
509	frstor	(%eax)
510ENTRY(__frstor_end)
511	xor     %eax, %eax
512	ret
513
514/* Shared exception handler for both fxrstor and frstor. */
515ENTRY(__frstor_failure)
516	mov	$1, %eax
517	ret
518
519/* Read/write control registers */
520ARG_EAX_RETURN(read_cr0, %cr0);
521ARG_EAX_RETURN(read_cr2, %cr2);
522ARG_EAX_RETURN(read_cr3, %cr3);
523ARG_EAX_RETURN(read_cr4, %cr4);
524ARG_EAX_SET(write_cr4, %cr4);
525ARG_EAX_SET(write_cr0, %cr0);
526ARG_EAX_SET(write_cr3, %cr3);
527
528/* Read/write various descriptor tables */
529ARG_EAX_ACTION(x86_ltr, ltr STACKARG );
530ARG_EAX_ACTION(x86_lidt, lidtl (%eax));
531ARG_EAX_ACTION(x86_lgdt, lgdt (%eax));
532ARG_EAX_ACTION(x86_lldt, lldt STACKARG);
533ARG_EAX_ACTION(x86_sgdt, sgdt (%eax));
534ARG_EAX_ACTION(x86_sidt, sidt (%eax));
535
536/* Load segments */
537ARG_AX_SET(x86_load_ds, %ds)
538ARG_AX_SET(x86_load_es, %es)
539ARG_AX_SET(x86_load_fs, %fs)
540ARG_AX_SET(x86_load_gs, %gs)
541ARG_AX_SET(x86_load_ss, %ss)
542
543/* FPU */
544ARG_EAX_ACTION(fnsave, fnsave (%eax) ; fwait);
545ARG_EAX_ACTION(fxsave, fxsave (%eax));
546ARG_EAX_ACTION(fnstcw, fnstcw (%eax));
547
548/* invlpg */
549ARG_EAX_ACTION(i386_invlpg, invlpg (%eax));
550
551ENTRY(x86_load_kerncs)
552	push	%ebp
553	mov	%esp, %ebp
554	mov	8(%ebp), %eax
555	jmp	$KERN_CS_SELECTOR, $newcs
556newcs:
557	pop	%ebp
558	ret
559
560/*
561 * Read the Model Specific Register (MSR) of IA32 architecture
562 *
563 * void ia32_msr_read(u32_t reg, u32_t * hi, u32_t * lo)
564 */
565ENTRY(ia32_msr_read)
566	push	%ebp
567	mov	%esp, %ebp
568
569	mov	8(%ebp), %ecx
570	rdmsr
571	mov	12(%ebp), %ecx
572	mov	%edx, (%ecx)
573	mov	16(%ebp), %ecx
574	mov	%eax, (%ecx)
575
576	pop	%ebp
577	ret
578
579/*
580 * Write the Model Specific Register (MSR) of IA32 architecture
581 *
582 * void ia32_msr_write(u32_t reg, u32_t hi, u32_t lo)
583 */
584ENTRY(ia32_msr_write)
585	push	%ebp
586	mov	%esp, %ebp
587
588	mov	12(%ebp), %edx
589	mov	16(%ebp), %eax
590	mov	8(%ebp), %ecx
591	wrmsr
592
593	pop	%ebp
594	ret
595
596/*===========================================================================*/
597/*			      __switch_address_space			     */
598/*===========================================================================*/
599/* PUBLIC void __switch_address_space(struct proc *p, struct ** ptproc)
600 *
601 * sets the %cr3 register to the supplied value if it is not already set to the
602 * same value in which case it would only result in an extra TLB flush which is
603 * not desirable
604 */
605ENTRY(__switch_address_space)
606	/* read the process pointer */
607	mov	4(%esp), %edx
608	/* get the new cr3 value */
609	movl	P_CR3(%edx), %eax
610	/* test if the new cr3 != NULL */
611	cmpl	$0, %eax
612	je	0f
613
614	/*
615	 * test if the cr3 is loaded with the current value to avoid unnecessary
616	 * TLB flushes
617	 */
618	mov	%cr3, %ecx
619	cmp	%ecx, %eax
620	je	0f
621	mov	%eax, %cr3
622	/* get ptproc */
623	mov	8(%esp), %eax
624	mov	%edx, (%eax)
6250:
626	ret
627
628/* acknowledge just the master PIC */
629ENTRY(eoi_8259_master)
630	movb	$END_OF_INT, %al
631	outb	$INT_CTL
632	ret
633
634/* we have to acknowledge both PICs */
635ENTRY(eoi_8259_slave)
636	movb	$END_OF_INT, %al
637	outb	$INT_CTL
638	outb	$INT2_CTL
639	ret
640
641/* in some cases we need to force TLB update, reloading cr3 does the trick */
642ENTRY(refresh_tlb)
643	mov	%cr3, %eax
644	mov	%eax, %cr3
645	ret
646
647#ifdef CONFIG_SMP
648
649/*===========================================================================*/
650/*			      smp_get_htt				     */
651/*===========================================================================*/
652/*  PUBLIC int smp_get_htt(void); */
653/*  return true if the processor is hyper-threaded. */
654ENTRY(smp_get_htt)
655	push	%ebp
656	mov	%esp, %ebp
657	pushf
658	pop	%eax
659	mov	%eax, %ebx
660	and	$0x200000, %eax
661	je	0f
662	mov	$0x1, %eax
663/* FIXME don't use the byte code */
664.byte	0x0f, 0xa2	/*  opcode for cpuid  */
665	mov	%edx, %eax
666	pop	%ebp
667	ret
6680:
669	xor	%eax, %eax
670	pop	%ebp
671	ret
672
673/*===========================================================================*/
674/*			      smp_get_num_htt				     */
675/*===========================================================================*/
676/*  PUBLIC int smp_get_num_htt(void); */
677/*  Get the number of hyper-threaded processor cores */
678ENTRY(smp_get_num_htt)
679	push	%ebp
680	mov	%esp, %ebp
681	pushf
682	pop	%eax
683	mov	%eax, %ebx
684	and	$0x200000, %eax
685	je	0f
686	mov	$0x1, %eax
687/* FIXME don't use the byte code */
688.byte	0x0f, 0xa2	/*  opcode for cpuid  */
689	mov	%ebx, %eax
690	pop	%ebp
691	ret
6920:
693	xor	%eax, %eax
694	pop	%ebp
695	ret
696
697/*===========================================================================*/
698/*			      smp_get_cores				    */
699/*===========================================================================*/
700/*  PUBLIC int smp_get_cores(void); */
701/*  Get the number of cores. */
702ENTRY(smp_get_cores)
703	push	%ebp
704	mov	%esp, %ebp
705	pushf
706	pop	%eax
707	mov	%eax, %ebx
708	and	$0x200000, %eax
709	je	0f
710	push	%ecx
711	xor	%ecx, %ecx
712	mov	$0x4, %eax
713/* FIXME don't use the byte code */
714.byte	0x0f, 0xa2	/*  opcode for cpuid  */
715	pop	%ebp
716	ret
7170:
718	xor	%eax, %eax
719	pop	%ebp
720	ret
721
722/*===========================================================================*/
723/*				arch_spinlock_lock				    */
724/*===========================================================================*/
725/* void arch_spinlock_lock (u32_t  *lock_data)
726 * {
727 * 	while (test_and_set(lock_data) == 1)
728 *		while (*lock_data == 1)
729 *			;
730 * }
731 * eax register is clobbered.
732 */
733ENTRY(arch_spinlock_lock)
734	mov	4(%esp), %eax
735	mov	$1, %edx
7362:
737	mov	$1, %ecx
738	xchg	%ecx, (%eax)
739	test	%ecx, %ecx
740	je	0f
741
742	cmp	$(1<< 16), %edx
743	je	1f
744	shl	%edx
7451:
746	mov	%edx, %ecx
7473:
748	pause
749	sub	$1, %ecx
750	test	%ecx, %ecx
751	jz	2b
752	jmp	3b
7530:
754	mfence
755	ret
756
757/*===========================================================================*/
758/*				arch_spinlock_unlock	                             */
759/*===========================================================================*/
760/* * void arch_spinlock_unlock (unsigned int *lockp) */
761/*  spin lock release routine. */
762ENTRY(arch_spinlock_unlock)
763	mov	4(%esp), %eax
764	mov	$0, %ecx
765	xchg	%ecx, (%eax)
766	mfence
767	ret
768
769#endif /* CONFIG_SMP */
770
771/*===========================================================================*/
772/*			      mfence					     */
773/*===========================================================================*/
774/*  PUBLIC void mfence (void); */
775/*  architecture specific memory barrier routine. */
776ENTRY(mfence)
777	mfence
778	ret
779
780/*===========================================================================*/
781/*			      arch_pause				     */
782/*===========================================================================*/
783/*  PUBLIC void arch_pause (void); */
784/*  architecture specific pause routine. */
785ENTRY(arch_pause)
786	pause
787	ret
788
789/*===========================================================================*/
790/*			      read_ebp				     	     */
791/*===========================================================================*/
792/*  PUBLIC u16_t cpuid(void) */
793ENTRY(read_ebp)
794	mov	%ebp, %eax
795	ret
796
797ENTRY(interrupts_enable)
798	sti
799	ret
800
801ENTRY(interrupts_disable)
802	cli
803	ret
804
805
806/*
807 * void switch_k_stack(void * esp, void (* continuation)(void));
808 *
809 * sets the current stack pointer to the given value and continues execution at
810 * the given address
811 */
812ENTRY(switch_k_stack)
813	/* get the arguments from the stack */
814	mov	8(%esp), %eax
815	mov	4(%esp), %ecx
816	mov	$0, %ebp	/* reset %ebp for stack trace */
817	mov	%ecx, %esp	/* set the new stack */
818	jmp	*%eax		/* and jump to the continuation */
819
820	/* NOT_REACHABLE */
8210:	jmp	0b
822
823.data
824idt_ptr:
825	.short 0x3ff
826	.long 0x0
827
828ldtsel:
829	.long LDT_SELECTOR
830