1 /* $NetBSD: strmacros.h,v 1.1 2013/03/17 00:42:32 christos Exp $ */ 2 3 /* 4 * Copyright (c) 1996-2002 Eduardo Horvath 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 *notice, this list of conditions and the following disclaimer. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 */ 26 27 #include <machine/asm.h> 28 #if defined(_KERNEL) && !defined(_RUMPKERNEL) 29 #define USE_BLOCK_STORE_LOAD /* enable block load/store ops */ 30 #include "assym.h" 31 #include <machine/param.h> 32 #include <machine/ctlreg.h> 33 #include <machine/psl.h> 34 #include <machine/frame.h> 35 #include <machine/intr.h> 36 #include <machine/locore.h> 37 38 #ifdef USE_BLOCK_STORE_LOAD 39 40 #define BLOCK_SIZE SPARC64_BLOCK_SIZE 41 #define BLOCK_ALIGN SPARC64_BLOCK_ALIGN 42 43 /* 44 * The following routines allow fpu use in the kernel. 45 * 46 * They allocate a stack frame and use all local regs. Extra 47 * local storage can be requested by setting the siz parameter, 48 * and can be accessed at %sp+CC64FSZ. 49 */ 50 51 #define ENABLE_FPU(siz) \ 52 save %sp, -(CC64FSZ), %sp; /* Allocate a stack frame */ \ 53 sethi %hi(FPLWP), %l1; \ 54 add %fp, STKB-FS_SIZE, %l0; /* Allocate a fpstate */\ 55 LDPTR [%l1 + %lo(FPLWP)], %l2; /* Load fplwp */ \ 56 andn %l0, BLOCK_ALIGN, %l0; /* Align it */ \ 57 clr %l3; /* NULL fpstate */ \ 58 brz,pt %l2, 1f; /* fplwp == NULL? */ \ 59 add %l0, -STKB-CC64FSZ-(siz), %sp; /* Set proper %sp */ \ 60 LDPTR [%l2 + L_FPSTATE], %l3; \ 61 brz,pn %l3, 1f; /* Make sure we have an fpstate */ \ 62 mov %l3, %o0; \ 63 call _C_LABEL(savefpstate); /* Save the old fpstate */ \ 64 1: \ 65 set EINTSTACK-STKB, %l4; /* Are we on intr stack? */ \ 66 cmp %sp, %l4; \ 67 bgu,pt %xcc, 1f; \ 68 set INTSTACK-STKB, %l4; \ 69 cmp %sp, %l4; \ 70 blu %xcc, 1f; \ 71 0: \ 72 sethi %hi(_C_LABEL(lwp0)), %l4; /* Yes, use lpw0 */ \ 73 ba,pt %xcc, 2f; /* XXXX needs to change to CPUs idle proc */ \ 74 or %l4, %lo(_C_LABEL(lwp0)), %l5; \ 75 1: \ 76 sethi %hi(CURLWP), %l4; /* Use curlwp */ \ 77 LDPTR [%l4 + %lo(CURLWP)], %l5; \ 78 brz,pn %l5, 0b; nop; /* If curlwp is NULL need to use lwp0 */\ 79 2: \ 80 LDPTR [%l5 + L_FPSTATE], %l6; /* Save old fpstate */ \ 81 STPTR %l0, [%l5 + L_FPSTATE]; /* Insert new fpstate */\ 82 STPTR %l5, [%l1 + %lo(FPLWP)]; /* Set new fplwp */ \ 83 wr %g0, FPRS_FEF, %fprs /* Enable FPU */ 84 85 /* 86 * Weve saved our possible fpstate, now disable the fpu 87 * and continue with life. 88 */ 89 #ifdef DEBUG 90 #define __CHECK_FPU \ 91 LDPTR [%l5 + L_FPSTATE], %l7; \ 92 cmp %l7, %l0; \ 93 tnz 1; 94 #else 95 #define __CHECK_FPU 96 #endif 97 98 #define RESTORE_FPU \ 99 __CHECK_FPU \ 100 STPTR %l2, [%l1 + %lo(FPLWP)]; /* Restore old fproc */ \ 101 wr %g0, 0, %fprs; /* Disable fpu */ \ 102 brz,pt %l3, 1f; /* Skip if no fpstate */ \ 103 STPTR %l6, [%l5 + L_FPSTATE]; /* Restore old fpstate */\ 104 \ 105 mov %l3, %o0; \ 106 call _C_LABEL(loadfpstate); /* Reload orig fpstate */\ 107 1: \ 108 membar #Sync; /* Finish all FP ops */ 109 110 #endif /* USE_BLOCK_STORE_LOAD */ 111 112 #ifdef USE_BLOCK_STORE_LOAD 113 #if 0 114 #define ASI_STORE ASI_BLK_COMMIT_P 115 #else 116 #define ASI_STORE ASI_BLK_P 117 #endif 118 #endif /* USE_BLOCK_STORE_LOAD */ 119 #endif /* _KERNEL && !_RUMPKERNEL */ 120