10Sstevel@tonic-gate/* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 53626Siskreen * Common Development and Distribution License (the "License"). 63626Siskreen * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 210Sstevel@tonic-gate/* 223626Siskreen * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 230Sstevel@tonic-gate * Use is subject to license terms. 240Sstevel@tonic-gate */ 250Sstevel@tonic-gate 26*5448Sae112802# ident "%Z%%M% %I% %E% SMI" 270Sstevel@tonic-gate 280Sstevel@tonic-gate#include <sys/param.h> 290Sstevel@tonic-gate#include <sys/errno.h> 300Sstevel@tonic-gate#include <sys/asm_linkage.h> 310Sstevel@tonic-gate#include <sys/vtrace.h> 320Sstevel@tonic-gate#include <sys/machthread.h> 330Sstevel@tonic-gate#include <sys/clock.h> 340Sstevel@tonic-gate#include <sys/asi.h> 350Sstevel@tonic-gate#include <sys/fsr.h> 360Sstevel@tonic-gate#include <sys/privregs.h> 370Sstevel@tonic-gate 380Sstevel@tonic-gate#if !defined(lint) 390Sstevel@tonic-gate#include "assym.h" 400Sstevel@tonic-gate#endif /* lint */ 410Sstevel@tonic-gate 420Sstevel@tonic-gate 430Sstevel@tonic-gate/* 440Sstevel@tonic-gate * Less then or equal this number of bytes we will always copy byte-for-byte 450Sstevel@tonic-gate */ 460Sstevel@tonic-gate#define SMALL_LIMIT 7 470Sstevel@tonic-gate 480Sstevel@tonic-gate/* 49890Sae112802 * LOFAULT_SET : Flag set by kzero and kcopy to indicate that t_lofault 50890Sae112802 * handler was set 510Sstevel@tonic-gate */ 520Sstevel@tonic-gate#define LOFAULT_SET 2 530Sstevel@tonic-gate 540Sstevel@tonic-gate 550Sstevel@tonic-gate/* 560Sstevel@tonic-gate * Copy a block of storage, returning an error code if `from' or 570Sstevel@tonic-gate * `to' takes a kernel pagefault which cannot be resolved. 580Sstevel@tonic-gate * Returns errno value on pagefault error, 0 if all ok 590Sstevel@tonic-gate */ 600Sstevel@tonic-gate 610Sstevel@tonic-gate 620Sstevel@tonic-gate 630Sstevel@tonic-gate#if defined(lint) 640Sstevel@tonic-gate 650Sstevel@tonic-gate/* ARGSUSED */ 660Sstevel@tonic-gateint 670Sstevel@tonic-gatekcopy(const void *from, void *to, size_t count) 680Sstevel@tonic-gate{ return(0); } 690Sstevel@tonic-gate 700Sstevel@tonic-gate#else /* lint */ 710Sstevel@tonic-gate 720Sstevel@tonic-gate .seg ".text" 730Sstevel@tonic-gate .align 4 740Sstevel@tonic-gate 750Sstevel@tonic-gate ENTRY(kcopy) 760Sstevel@tonic-gate 770Sstevel@tonic-gate save %sp, -SA(MINFRAME), %sp 78890Sae112802 set .copyerr, %l7 ! copyerr is lofault value 79890Sae112802 ldn [THREAD_REG + T_LOFAULT], %o5 ! save existing handler 80890Sae112802 or %o5, LOFAULT_SET, %o5 81890Sae112802 membar #Sync ! sync error barrier 82890Sae112802 b .do_copy ! common code 83890Sae112802 stn %l7, [THREAD_REG + T_LOFAULT] ! set t_lofault 840Sstevel@tonic-gate 850Sstevel@tonic-gate/* 860Sstevel@tonic-gate * We got here because of a fault during kcopy. 870Sstevel@tonic-gate * Errno value is in %g1. 880Sstevel@tonic-gate */ 890Sstevel@tonic-gate.copyerr: 90890Sae112802 ! The kcopy() *always* sets a t_lofault handler and it ORs LOFAULT_SET 91890Sae112802 ! into %o5 to indicate it has set t_lofault handler. Need to clear 92890Sae112802 ! LOFAULT_SET flag before restoring the error handler. 93890Sae112802 andn %o5, LOFAULT_SET, %o5 940Sstevel@tonic-gate membar #Sync ! sync error barrier 950Sstevel@tonic-gate stn %o5, [THREAD_REG + T_LOFAULT] ! restore old t_lofault 960Sstevel@tonic-gate ret 970Sstevel@tonic-gate restore %g1, 0, %o0 980Sstevel@tonic-gate 990Sstevel@tonic-gate SET_SIZE(kcopy) 1000Sstevel@tonic-gate#endif /* lint */ 1010Sstevel@tonic-gate 1020Sstevel@tonic-gate 1030Sstevel@tonic-gate/* 1040Sstevel@tonic-gate * Copy a block of storage - must not overlap (from + len <= to). 1050Sstevel@tonic-gate */ 1060Sstevel@tonic-gate#if defined(lint) 1070Sstevel@tonic-gate 1080Sstevel@tonic-gate/* ARGSUSED */ 1090Sstevel@tonic-gatevoid 1100Sstevel@tonic-gatebcopy(const void *from, void *to, size_t count) 1110Sstevel@tonic-gate{} 1120Sstevel@tonic-gate 1130Sstevel@tonic-gate#else /* lint */ 1140Sstevel@tonic-gate 1150Sstevel@tonic-gate ENTRY(bcopy) 1160Sstevel@tonic-gate 1170Sstevel@tonic-gate save %sp, -SA(MINFRAME), %sp 118890Sae112802 clr %o5 ! flag LOFAULT_SET is not set for bcopy 1190Sstevel@tonic-gate 1200Sstevel@tonic-gate.do_copy: 1213626Siskreen mov %i1, %g5 ! save dest addr start 1223626Siskreen 1233626Siskreen mov %i2, %l6 ! save size 1243626Siskreen 1250Sstevel@tonic-gate cmp %i2, 12 ! for small counts 1260Sstevel@tonic-gate blu %ncc, .bytecp ! just copy bytes 1270Sstevel@tonic-gate .empty 1280Sstevel@tonic-gate 1290Sstevel@tonic-gate ! 1300Sstevel@tonic-gate ! use aligned transfers where possible 1310Sstevel@tonic-gate ! 1320Sstevel@tonic-gate xor %i0, %i1, %o4 ! xor from and to address 1330Sstevel@tonic-gate btst 7, %o4 ! if lower three bits zero 1340Sstevel@tonic-gate bz .aldoubcp ! can align on double boundary 1350Sstevel@tonic-gate .empty ! assembler complaints about label 1360Sstevel@tonic-gate 1370Sstevel@tonic-gate xor %i0, %i1, %o4 ! xor from and to address 1380Sstevel@tonic-gate btst 3, %o4 ! if lower two bits zero 1390Sstevel@tonic-gate bz .alwordcp ! can align on word boundary 1400Sstevel@tonic-gate btst 3, %i0 ! delay slot, from address unaligned? 1410Sstevel@tonic-gate ! 1420Sstevel@tonic-gate ! use aligned reads and writes where possible 1430Sstevel@tonic-gate ! this differs from wordcp in that it copes 1440Sstevel@tonic-gate ! with odd alignment between source and destnation 1450Sstevel@tonic-gate ! using word reads and writes with the proper shifts 1460Sstevel@tonic-gate ! in between to align transfers to and from memory 1470Sstevel@tonic-gate ! i0 - src address, i1 - dest address, i2 - count 1480Sstevel@tonic-gate ! i3, i4 - tmps for used generating complete word 1490Sstevel@tonic-gate ! i5 (word to write) 1500Sstevel@tonic-gate ! l0 size in bits of upper part of source word (US) 1510Sstevel@tonic-gate ! l1 size in bits of lower part of source word (LS = 32 - US) 1520Sstevel@tonic-gate ! l2 size in bits of upper part of destination word (UD) 1530Sstevel@tonic-gate ! l3 size in bits of lower part of destination word (LD = 32 - UD) 1540Sstevel@tonic-gate ! l4 number of bytes leftover after aligned transfers complete 1550Sstevel@tonic-gate ! l5 the number 32 1560Sstevel@tonic-gate ! 1570Sstevel@tonic-gate mov 32, %l5 ! load an oft-needed constant 1580Sstevel@tonic-gate bz .align_dst_only 1590Sstevel@tonic-gate btst 3, %i1 ! is destnation address aligned? 1600Sstevel@tonic-gate clr %i4 ! clear registers used in either case 1610Sstevel@tonic-gate bz .align_src_only 1620Sstevel@tonic-gate clr %l0 1630Sstevel@tonic-gate ! 1640Sstevel@tonic-gate ! both source and destination addresses are unaligned 1650Sstevel@tonic-gate ! 1660Sstevel@tonic-gate1: ! align source 1670Sstevel@tonic-gate ldub [%i0], %i3 ! read a byte from source address 1680Sstevel@tonic-gate add %i0, 1, %i0 ! increment source address 1690Sstevel@tonic-gate or %i4, %i3, %i4 ! or in with previous bytes (if any) 1700Sstevel@tonic-gate btst 3, %i0 ! is source aligned? 1710Sstevel@tonic-gate add %l0, 8, %l0 ! increment size of upper source (US) 1720Sstevel@tonic-gate bnz,a 1b 1730Sstevel@tonic-gate sll %i4, 8, %i4 ! make room for next byte 1740Sstevel@tonic-gate 1750Sstevel@tonic-gate sub %l5, %l0, %l1 ! generate shift left count (LS) 1760Sstevel@tonic-gate sll %i4, %l1, %i4 ! prepare to get rest 1770Sstevel@tonic-gate ld [%i0], %i3 ! read a word 1780Sstevel@tonic-gate add %i0, 4, %i0 ! increment source address 1790Sstevel@tonic-gate srl %i3, %l0, %i5 ! upper src bits into lower dst bits 1800Sstevel@tonic-gate or %i4, %i5, %i5 ! merge 1810Sstevel@tonic-gate mov 24, %l3 ! align destination 1820Sstevel@tonic-gate1: 1830Sstevel@tonic-gate srl %i5, %l3, %i4 ! prepare to write a single byte 1840Sstevel@tonic-gate stb %i4, [%i1] ! write a byte 1850Sstevel@tonic-gate add %i1, 1, %i1 ! increment destination address 1860Sstevel@tonic-gate sub %i2, 1, %i2 ! decrement count 1870Sstevel@tonic-gate btst 3, %i1 ! is destination aligned? 1880Sstevel@tonic-gate bnz,a 1b 1890Sstevel@tonic-gate sub %l3, 8, %l3 ! delay slot, decrement shift count (LD) 1900Sstevel@tonic-gate sub %l5, %l3, %l2 ! generate shift left count (UD) 1910Sstevel@tonic-gate sll %i5, %l2, %i5 ! move leftover into upper bytes 1920Sstevel@tonic-gate cmp %l2, %l0 ! cmp # reqd to fill dst w old src left 1930Sstevel@tonic-gate bgu %ncc, .more_needed ! need more to fill than we have 1940Sstevel@tonic-gate nop 1950Sstevel@tonic-gate 1960Sstevel@tonic-gate sll %i3, %l1, %i3 ! clear upper used byte(s) 1970Sstevel@tonic-gate srl %i3, %l1, %i3 1980Sstevel@tonic-gate ! get the odd bytes between alignments 1990Sstevel@tonic-gate sub %l0, %l2, %l0 ! regenerate shift count 2000Sstevel@tonic-gate sub %l5, %l0, %l1 ! generate new shift left count (LS) 2010Sstevel@tonic-gate and %i2, 3, %l4 ! must do remaining bytes if count%4 > 0 2020Sstevel@tonic-gate andn %i2, 3, %i2 ! # of aligned bytes that can be moved 2030Sstevel@tonic-gate srl %i3, %l0, %i4 2040Sstevel@tonic-gate or %i5, %i4, %i5 2050Sstevel@tonic-gate st %i5, [%i1] ! write a word 2060Sstevel@tonic-gate subcc %i2, 4, %i2 ! decrement count 2070Sstevel@tonic-gate bz %ncc, .unalign_out 2080Sstevel@tonic-gate add %i1, 4, %i1 ! increment destination address 2090Sstevel@tonic-gate 2100Sstevel@tonic-gate b 2f 2110Sstevel@tonic-gate sll %i3, %l1, %i5 ! get leftover into upper bits 2120Sstevel@tonic-gate.more_needed: 2130Sstevel@tonic-gate sll %i3, %l0, %i3 ! save remaining byte(s) 2140Sstevel@tonic-gate srl %i3, %l0, %i3 2150Sstevel@tonic-gate sub %l2, %l0, %l1 ! regenerate shift count 2160Sstevel@tonic-gate sub %l5, %l1, %l0 ! generate new shift left count 2170Sstevel@tonic-gate sll %i3, %l1, %i4 ! move to fill empty space 2180Sstevel@tonic-gate b 3f 2190Sstevel@tonic-gate or %i5, %i4, %i5 ! merge to complete word 2200Sstevel@tonic-gate ! 2210Sstevel@tonic-gate ! the source address is aligned and destination is not 2220Sstevel@tonic-gate ! 2230Sstevel@tonic-gate.align_dst_only: 2240Sstevel@tonic-gate ld [%i0], %i4 ! read a word 2250Sstevel@tonic-gate add %i0, 4, %i0 ! increment source address 2260Sstevel@tonic-gate mov 24, %l0 ! initial shift alignment count 2270Sstevel@tonic-gate1: 2280Sstevel@tonic-gate srl %i4, %l0, %i3 ! prepare to write a single byte 2290Sstevel@tonic-gate stb %i3, [%i1] ! write a byte 2300Sstevel@tonic-gate add %i1, 1, %i1 ! increment destination address 2310Sstevel@tonic-gate sub %i2, 1, %i2 ! decrement count 2320Sstevel@tonic-gate btst 3, %i1 ! is destination aligned? 2330Sstevel@tonic-gate bnz,a 1b 2340Sstevel@tonic-gate sub %l0, 8, %l0 ! delay slot, decrement shift count 2350Sstevel@tonic-gate.xfer: 2360Sstevel@tonic-gate sub %l5, %l0, %l1 ! generate shift left count 2370Sstevel@tonic-gate sll %i4, %l1, %i5 ! get leftover 2380Sstevel@tonic-gate3: 2390Sstevel@tonic-gate and %i2, 3, %l4 ! must do remaining bytes if count%4 > 0 2400Sstevel@tonic-gate andn %i2, 3, %i2 ! # of aligned bytes that can be moved 2410Sstevel@tonic-gate2: 2420Sstevel@tonic-gate ld [%i0], %i3 ! read a source word 2430Sstevel@tonic-gate add %i0, 4, %i0 ! increment source address 2440Sstevel@tonic-gate srl %i3, %l0, %i4 ! upper src bits into lower dst bits 2450Sstevel@tonic-gate or %i5, %i4, %i5 ! merge with upper dest bits (leftover) 2460Sstevel@tonic-gate st %i5, [%i1] ! write a destination word 2470Sstevel@tonic-gate subcc %i2, 4, %i2 ! decrement count 2480Sstevel@tonic-gate bz %ncc, .unalign_out ! check if done 2490Sstevel@tonic-gate add %i1, 4, %i1 ! increment destination address 2500Sstevel@tonic-gate b 2b ! loop 2510Sstevel@tonic-gate sll %i3, %l1, %i5 ! get leftover 2520Sstevel@tonic-gate.unalign_out: 2530Sstevel@tonic-gate tst %l4 ! any bytes leftover? 2540Sstevel@tonic-gate bz %ncc, .cpdone 2550Sstevel@tonic-gate .empty ! allow next instruction in delay slot 2560Sstevel@tonic-gate1: 2570Sstevel@tonic-gate sub %l0, 8, %l0 ! decrement shift 2580Sstevel@tonic-gate srl %i3, %l0, %i4 ! upper src byte into lower dst byte 2590Sstevel@tonic-gate stb %i4, [%i1] ! write a byte 2600Sstevel@tonic-gate subcc %l4, 1, %l4 ! decrement count 2610Sstevel@tonic-gate bz %ncc, .cpdone ! done? 2620Sstevel@tonic-gate add %i1, 1, %i1 ! increment destination 2630Sstevel@tonic-gate tst %l0 ! any more previously read bytes 2640Sstevel@tonic-gate bnz %ncc, 1b ! we have leftover bytes 2650Sstevel@tonic-gate mov %l4, %i2 ! delay slot, mv cnt where dbytecp wants 2660Sstevel@tonic-gate b .dbytecp ! let dbytecp do the rest 2670Sstevel@tonic-gate sub %i0, %i1, %i0 ! i0 gets the difference of src and dst 2680Sstevel@tonic-gate ! 2690Sstevel@tonic-gate ! the destination address is aligned and the source is not 2700Sstevel@tonic-gate ! 2710Sstevel@tonic-gate.align_src_only: 2720Sstevel@tonic-gate ldub [%i0], %i3 ! read a byte from source address 2730Sstevel@tonic-gate add %i0, 1, %i0 ! increment source address 2740Sstevel@tonic-gate or %i4, %i3, %i4 ! or in with previous bytes (if any) 2750Sstevel@tonic-gate btst 3, %i0 ! is source aligned? 2760Sstevel@tonic-gate add %l0, 8, %l0 ! increment shift count (US) 2770Sstevel@tonic-gate bnz,a .align_src_only 2780Sstevel@tonic-gate sll %i4, 8, %i4 ! make room for next byte 2790Sstevel@tonic-gate b,a .xfer 2800Sstevel@tonic-gate ! 2810Sstevel@tonic-gate ! if from address unaligned for double-word moves, 2820Sstevel@tonic-gate ! move bytes till it is, if count is < 56 it could take 2830Sstevel@tonic-gate ! longer to align the thing than to do the transfer 2840Sstevel@tonic-gate ! in word size chunks right away 2850Sstevel@tonic-gate ! 2860Sstevel@tonic-gate.aldoubcp: 2870Sstevel@tonic-gate cmp %i2, 56 ! if count < 56, use wordcp, it takes 2880Sstevel@tonic-gate blu,a %ncc, .alwordcp ! longer to align doubles than words 2890Sstevel@tonic-gate mov 3, %o0 ! mask for word alignment 2900Sstevel@tonic-gate call .alignit ! copy bytes until aligned 2910Sstevel@tonic-gate mov 7, %o0 ! mask for double alignment 2920Sstevel@tonic-gate ! 2930Sstevel@tonic-gate ! source and destination are now double-word aligned 2940Sstevel@tonic-gate ! i3 has aligned count returned by alignit 2950Sstevel@tonic-gate ! 2960Sstevel@tonic-gate and %i2, 7, %i2 ! unaligned leftover count 2970Sstevel@tonic-gate sub %i0, %i1, %i0 ! i0 gets the difference of src and dst 2980Sstevel@tonic-gate5: 2990Sstevel@tonic-gate ldx [%i0+%i1], %o4 ! read from address 3000Sstevel@tonic-gate stx %o4, [%i1] ! write at destination address 3010Sstevel@tonic-gate subcc %i3, 8, %i3 ! dec count 3020Sstevel@tonic-gate bgu %ncc, 5b 3030Sstevel@tonic-gate add %i1, 8, %i1 ! delay slot, inc to address 3040Sstevel@tonic-gate cmp %i2, 4 ! see if we can copy a word 3050Sstevel@tonic-gate blu %ncc, .dbytecp ! if 3 or less bytes use bytecp 3060Sstevel@tonic-gate .empty 3070Sstevel@tonic-gate ! 3080Sstevel@tonic-gate ! for leftover bytes we fall into wordcp, if needed 3090Sstevel@tonic-gate ! 3100Sstevel@tonic-gate.wordcp: 3110Sstevel@tonic-gate and %i2, 3, %i2 ! unaligned leftover count 3120Sstevel@tonic-gate5: 3130Sstevel@tonic-gate ld [%i0+%i1], %o4 ! read from address 3140Sstevel@tonic-gate st %o4, [%i1] ! write at destination address 3150Sstevel@tonic-gate subcc %i3, 4, %i3 ! dec count 3160Sstevel@tonic-gate bgu %ncc, 5b 3170Sstevel@tonic-gate add %i1, 4, %i1 ! delay slot, inc to address 3180Sstevel@tonic-gate b,a .dbytecp 3190Sstevel@tonic-gate 3200Sstevel@tonic-gate ! we come here to align copies on word boundaries 3210Sstevel@tonic-gate.alwordcp: 3220Sstevel@tonic-gate call .alignit ! go word-align it 3230Sstevel@tonic-gate mov 3, %o0 ! bits that must be zero to be aligned 3240Sstevel@tonic-gate b .wordcp 3250Sstevel@tonic-gate sub %i0, %i1, %i0 ! i0 gets the difference of src and dst 3260Sstevel@tonic-gate 3270Sstevel@tonic-gate ! 3280Sstevel@tonic-gate ! byte copy, works with any alignment 3290Sstevel@tonic-gate ! 3300Sstevel@tonic-gate.bytecp: 3310Sstevel@tonic-gate b .dbytecp 3320Sstevel@tonic-gate sub %i0, %i1, %i0 ! i0 gets difference of src and dst 3330Sstevel@tonic-gate 3340Sstevel@tonic-gate ! 3350Sstevel@tonic-gate ! differenced byte copy, works with any alignment 3360Sstevel@tonic-gate ! assumes dest in %i1 and (source - dest) in %i0 3370Sstevel@tonic-gate ! 3380Sstevel@tonic-gate1: 3390Sstevel@tonic-gate stb %o4, [%i1] ! write to address 3400Sstevel@tonic-gate inc %i1 ! inc to address 3410Sstevel@tonic-gate.dbytecp: 3420Sstevel@tonic-gate deccc %i2 ! dec count 3430Sstevel@tonic-gate bgeu,a %ncc, 1b ! loop till done 3440Sstevel@tonic-gate ldub [%i0+%i1], %o4 ! read from address 3450Sstevel@tonic-gate.cpdone: 346890Sae112802 membar #Sync ! sync error barrier 347890Sae112802 ! Restore t_lofault handler, if came here from kcopy(). 348890Sae112802 tst %o5 349890Sae112802 bz %ncc, 1f 350890Sae112802 andn %o5, LOFAULT_SET, %o5 351890Sae112802 stn %o5, [THREAD_REG + T_LOFAULT] ! restore old t_lofault 352890Sae1128021: 3533626Siskreen mov %g5, %o0 ! copy dest address 3543626Siskreen call sync_icache 3553626Siskreen mov %l6, %o1 ! saved size 3560Sstevel@tonic-gate ret 3570Sstevel@tonic-gate restore %g0, 0, %o0 ! return (0) 3580Sstevel@tonic-gate 3590Sstevel@tonic-gate/* 3600Sstevel@tonic-gate * Common code used to align transfers on word and doubleword 3610Sstevel@tonic-gate * boudaries. Aligns source and destination and returns a count 3620Sstevel@tonic-gate * of aligned bytes to transfer in %i3 3630Sstevel@tonic-gate */ 3640Sstevel@tonic-gate1: 3650Sstevel@tonic-gate inc %i0 ! inc from 3660Sstevel@tonic-gate stb %o4, [%i1] ! write a byte 3670Sstevel@tonic-gate inc %i1 ! inc to 3680Sstevel@tonic-gate dec %i2 ! dec count 3690Sstevel@tonic-gate.alignit: 3700Sstevel@tonic-gate btst %o0, %i0 ! %o0 is bit mask to check for alignment 3710Sstevel@tonic-gate bnz,a 1b 3720Sstevel@tonic-gate ldub [%i0], %o4 ! read next byte 3730Sstevel@tonic-gate 3740Sstevel@tonic-gate retl 3750Sstevel@tonic-gate andn %i2, %o0, %i3 ! return size of aligned bytes 3760Sstevel@tonic-gate SET_SIZE(bcopy) 3770Sstevel@tonic-gate 3780Sstevel@tonic-gate#endif /* lint */ 3790Sstevel@tonic-gate 3800Sstevel@tonic-gate/* 3810Sstevel@tonic-gate * Block copy with possibly overlapped operands. 3820Sstevel@tonic-gate */ 3830Sstevel@tonic-gate 3840Sstevel@tonic-gate#if defined(lint) 3850Sstevel@tonic-gate 3860Sstevel@tonic-gate/*ARGSUSED*/ 3870Sstevel@tonic-gatevoid 3880Sstevel@tonic-gateovbcopy(const void *from, void *to, size_t count) 3890Sstevel@tonic-gate{} 3900Sstevel@tonic-gate 3910Sstevel@tonic-gate#else /* lint */ 3920Sstevel@tonic-gate 3930Sstevel@tonic-gate ENTRY(ovbcopy) 3940Sstevel@tonic-gate tst %o2 ! check count 3950Sstevel@tonic-gate bgu,a %ncc, 1f ! nothing to do or bad arguments 3960Sstevel@tonic-gate subcc %o0, %o1, %o3 ! difference of from and to address 3970Sstevel@tonic-gate 3980Sstevel@tonic-gate retl ! return 3990Sstevel@tonic-gate nop 4000Sstevel@tonic-gate1: 4010Sstevel@tonic-gate bneg,a %ncc, 2f 4020Sstevel@tonic-gate neg %o3 ! if < 0, make it positive 4030Sstevel@tonic-gate2: cmp %o2, %o3 ! cmp size and abs(from - to) 4040Sstevel@tonic-gate bleu %ncc, bcopy ! if size <= abs(diff): use bcopy, 4050Sstevel@tonic-gate .empty ! no overlap 4060Sstevel@tonic-gate cmp %o0, %o1 ! compare from and to addresses 4070Sstevel@tonic-gate blu %ncc, .ov_bkwd ! if from < to, copy backwards 4080Sstevel@tonic-gate nop 4090Sstevel@tonic-gate ! 4100Sstevel@tonic-gate ! Copy forwards. 4110Sstevel@tonic-gate ! 4120Sstevel@tonic-gate.ov_fwd: 4130Sstevel@tonic-gate ldub [%o0], %o3 ! read from address 4140Sstevel@tonic-gate inc %o0 ! inc from address 4150Sstevel@tonic-gate stb %o3, [%o1] ! write to address 4160Sstevel@tonic-gate deccc %o2 ! dec count 4170Sstevel@tonic-gate bgu %ncc, .ov_fwd ! loop till done 4180Sstevel@tonic-gate inc %o1 ! inc to address 4190Sstevel@tonic-gate 4200Sstevel@tonic-gate retl ! return 4210Sstevel@tonic-gate nop 4220Sstevel@tonic-gate ! 4230Sstevel@tonic-gate ! Copy backwards. 4240Sstevel@tonic-gate ! 4250Sstevel@tonic-gate.ov_bkwd: 4260Sstevel@tonic-gate deccc %o2 ! dec count 4270Sstevel@tonic-gate ldub [%o0 + %o2], %o3 ! get byte at end of src 4280Sstevel@tonic-gate bgu %ncc, .ov_bkwd ! loop till done 4290Sstevel@tonic-gate stb %o3, [%o1 + %o2] ! delay slot, store at end of dst 4300Sstevel@tonic-gate 4310Sstevel@tonic-gate retl ! return 4320Sstevel@tonic-gate nop 4330Sstevel@tonic-gate SET_SIZE(ovbcopy) 4340Sstevel@tonic-gate 4350Sstevel@tonic-gate#endif /* lint */ 4360Sstevel@tonic-gate 4370Sstevel@tonic-gate/* 4380Sstevel@tonic-gate * hwblkpagecopy() 4390Sstevel@tonic-gate * 4400Sstevel@tonic-gate * Copies exactly one page. This routine assumes the caller (ppcopy) 4410Sstevel@tonic-gate * has already disabled kernel preemption and has checked 4420Sstevel@tonic-gate * use_hw_bcopy. 4430Sstevel@tonic-gate */ 4440Sstevel@tonic-gate#ifdef lint 4450Sstevel@tonic-gate/*ARGSUSED*/ 4460Sstevel@tonic-gatevoid 4470Sstevel@tonic-gatehwblkpagecopy(const void *src, void *dst) 4480Sstevel@tonic-gate{ } 4490Sstevel@tonic-gate#else /* lint */ 4500Sstevel@tonic-gate ENTRY(hwblkpagecopy) 4510Sstevel@tonic-gate save %sp, -SA(MINFRAME), %sp 4520Sstevel@tonic-gate 4530Sstevel@tonic-gate ! %i0 - source address (arg) 4540Sstevel@tonic-gate ! %i1 - destination address (arg) 4550Sstevel@tonic-gate ! %i2 - length of region (not arg) 4560Sstevel@tonic-gate 4570Sstevel@tonic-gate set PAGESIZE, %i2 4583626Siskreen mov %i1, %o0 ! store destination address for flushing 4590Sstevel@tonic-gate 4600Sstevel@tonic-gate /* 4610Sstevel@tonic-gate * Copying exactly one page and PAGESIZE is in mutliple of 0x80. 4620Sstevel@tonic-gate */ 4630Sstevel@tonic-gate1: 4640Sstevel@tonic-gate ldx [%i0+0x0], %l0 4650Sstevel@tonic-gate ldx [%i0+0x8], %l1 4660Sstevel@tonic-gate ldx [%i0+0x10], %l2 4670Sstevel@tonic-gate ldx [%i0+0x18], %l3 4680Sstevel@tonic-gate ldx [%i0+0x20], %l4 4690Sstevel@tonic-gate ldx [%i0+0x28], %l5 4700Sstevel@tonic-gate ldx [%i0+0x30], %l6 4710Sstevel@tonic-gate ldx [%i0+0x38], %l7 4720Sstevel@tonic-gate stx %l0, [%i1+0x0] 4730Sstevel@tonic-gate stx %l1, [%i1+0x8] 4740Sstevel@tonic-gate stx %l2, [%i1+0x10] 4750Sstevel@tonic-gate stx %l3, [%i1+0x18] 4760Sstevel@tonic-gate stx %l4, [%i1+0x20] 4770Sstevel@tonic-gate stx %l5, [%i1+0x28] 4780Sstevel@tonic-gate stx %l6, [%i1+0x30] 4790Sstevel@tonic-gate stx %l7, [%i1+0x38] 4800Sstevel@tonic-gate 4810Sstevel@tonic-gate ldx [%i0+0x40], %l0 4820Sstevel@tonic-gate ldx [%i0+0x48], %l1 4830Sstevel@tonic-gate ldx [%i0+0x50], %l2 4840Sstevel@tonic-gate ldx [%i0+0x58], %l3 4850Sstevel@tonic-gate ldx [%i0+0x60], %l4 4860Sstevel@tonic-gate ldx [%i0+0x68], %l5 4870Sstevel@tonic-gate ldx [%i0+0x70], %l6 4880Sstevel@tonic-gate ldx [%i0+0x78], %l7 4890Sstevel@tonic-gate stx %l0, [%i1+0x40] 4900Sstevel@tonic-gate stx %l1, [%i1+0x48] 4910Sstevel@tonic-gate stx %l2, [%i1+0x50] 4920Sstevel@tonic-gate stx %l3, [%i1+0x58] 4930Sstevel@tonic-gate stx %l4, [%i1+0x60] 4940Sstevel@tonic-gate stx %l5, [%i1+0x68] 4950Sstevel@tonic-gate stx %l6, [%i1+0x70] 4960Sstevel@tonic-gate stx %l7, [%i1+0x78] 4970Sstevel@tonic-gate 4980Sstevel@tonic-gate add %i0, 0x80, %i0 4990Sstevel@tonic-gate subcc %i2, 0x80, %i2 5000Sstevel@tonic-gate bgu,pt %xcc, 1b 5010Sstevel@tonic-gate add %i1, 0x80, %i1 5020Sstevel@tonic-gate 5033626Siskreen ! %o0 contains the dest. address 5043626Siskreen set PAGESIZE, %o1 5053626Siskreen call sync_icache 5063626Siskreen nop 5073626Siskreen 5080Sstevel@tonic-gate membar #Sync 5090Sstevel@tonic-gate ret 5100Sstevel@tonic-gate restore %g0, 0, %o0 5110Sstevel@tonic-gate SET_SIZE(hwblkpagecopy) 5120Sstevel@tonic-gate#endif /* lint */ 5130Sstevel@tonic-gate 5140Sstevel@tonic-gate 5150Sstevel@tonic-gate/* 5160Sstevel@tonic-gate * Transfer data to and from user space - 5170Sstevel@tonic-gate * Note that these routines can cause faults 5180Sstevel@tonic-gate * It is assumed that the kernel has nothing at 5190Sstevel@tonic-gate * less than KERNELBASE in the virtual address space. 5200Sstevel@tonic-gate * 5210Sstevel@tonic-gate * Note that copyin(9F) and copyout(9F) are part of the 5220Sstevel@tonic-gate * DDI/DKI which specifies that they return '-1' on "errors." 5230Sstevel@tonic-gate * 5240Sstevel@tonic-gate * Sigh. 5250Sstevel@tonic-gate * 5260Sstevel@tonic-gate * So there's two extremely similar routines - xcopyin() and xcopyout() 5270Sstevel@tonic-gate * which return the errno that we've faithfully computed. This 5280Sstevel@tonic-gate * allows other callers (e.g. uiomove(9F)) to work correctly. 5290Sstevel@tonic-gate * Given that these are used pretty heavily, we expand the calling 5300Sstevel@tonic-gate * sequences inline for all flavours (rather than making wrappers). 5310Sstevel@tonic-gate * 5320Sstevel@tonic-gate * There are also stub routines for xcopyout_little and xcopyin_little, 5330Sstevel@tonic-gate * which currently are intended to handle requests of <= 16 bytes from 5340Sstevel@tonic-gate * do_unaligned. Future enhancement to make them handle 8k pages efficiently 5350Sstevel@tonic-gate * is left as an exercise... 5360Sstevel@tonic-gate */ 5370Sstevel@tonic-gate 5380Sstevel@tonic-gate/* 5390Sstevel@tonic-gate * Copy user data to kernel space (copyOP/xcopyOP/copyOP_noerr) 5400Sstevel@tonic-gate * 5410Sstevel@tonic-gate * General theory of operation: 5420Sstevel@tonic-gate * 5430Sstevel@tonic-gate * None of the copyops routines grab a window. 5440Sstevel@tonic-gate * 5450Sstevel@tonic-gate * Flow: 5460Sstevel@tonic-gate * 5470Sstevel@tonic-gate * If count == zero return zero. 5480Sstevel@tonic-gate * 5490Sstevel@tonic-gate * Store the previous lo_fault handler into %g6. 5500Sstevel@tonic-gate * Place our secondary lofault handler into %g5. 5510Sstevel@tonic-gate * Place the address of our fault handler into %o3. 5520Sstevel@tonic-gate * 5530Sstevel@tonic-gate * If count is less than or equal to SMALL_LIMIT (7) we 5540Sstevel@tonic-gate * always do a byte for byte copy. 5550Sstevel@tonic-gate * 5560Sstevel@tonic-gate * If count is > SMALL_LIMIT, we check the alignment of the input 5570Sstevel@tonic-gate * and output pointers. We store -count in %o3, we store the number 5580Sstevel@tonic-gate * of chunks (8, 4, 2 or 1 byte) operated on in our basic copy loop 5590Sstevel@tonic-gate * in %o2. Following this we branch to the appropriate copy loop and 5600Sstevel@tonic-gate * copy that many chunks. Since we've been adding the chunk size 5610Sstevel@tonic-gate * to %o3 each time through as well as decrementing %o2, we can tell 5620Sstevel@tonic-gate * if any data is is left to be copied by examining %o3. If that is 5630Sstevel@tonic-gate * zero, we're done and can go home. If not, we figure out what the 5640Sstevel@tonic-gate * largest chunk size left to be copied is and branch to that copy 5650Sstevel@tonic-gate * loop unless there's only one byte left. We load that as we're 5660Sstevel@tonic-gate * branching to code that stores it just before we return. 5670Sstevel@tonic-gate * 5680Sstevel@tonic-gate * Fault handlers are invoked if we reference memory that has no 5690Sstevel@tonic-gate * current mapping. All forms share the same copyio_fault handler. 5700Sstevel@tonic-gate * This routine handles fixing up the stack and general housecleaning. 5710Sstevel@tonic-gate * Each copy operation has a simple fault handler that is then called 5720Sstevel@tonic-gate * to do the work specific to the invidual operation. The handler 5730Sstevel@tonic-gate * for copyOP and xcopyOP are found at the end of individual function. 5740Sstevel@tonic-gate * The handlers for xcopyOP_little are found at the end of xcopyin_little. 5750Sstevel@tonic-gate * The handlers for copyOP_noerr are found at the end of copyin_noerr. 5760Sstevel@tonic-gate */ 5770Sstevel@tonic-gate 5780Sstevel@tonic-gate/* 5790Sstevel@tonic-gate * Copy kernel data to user space (copyout/xcopyout/xcopyout_little). 5800Sstevel@tonic-gate */ 5810Sstevel@tonic-gate 5820Sstevel@tonic-gate#if defined(lint) 5830Sstevel@tonic-gate 5840Sstevel@tonic-gate/*ARGSUSED*/ 5850Sstevel@tonic-gateint 5860Sstevel@tonic-gatecopyout(const void *kaddr, void *uaddr, size_t count) 5870Sstevel@tonic-gate{ return (0); } 5880Sstevel@tonic-gate 5890Sstevel@tonic-gate#else /* lint */ 5900Sstevel@tonic-gate 5910Sstevel@tonic-gate/* 5920Sstevel@tonic-gate * We save the arguments in the following registers in case of a fault: 5930Sstevel@tonic-gate * kaddr - %g2 5940Sstevel@tonic-gate * uaddr - %g3 5950Sstevel@tonic-gate * count - %g4 5960Sstevel@tonic-gate */ 5970Sstevel@tonic-gate#define SAVE_SRC %g2 5980Sstevel@tonic-gate#define SAVE_DST %g3 5990Sstevel@tonic-gate#define SAVE_COUNT %g4 6000Sstevel@tonic-gate 6010Sstevel@tonic-gate#define REAL_LOFAULT %g5 6020Sstevel@tonic-gate#define SAVED_LOFAULT %g6 6030Sstevel@tonic-gate 6040Sstevel@tonic-gate/* 6050Sstevel@tonic-gate * Generic copyio fault handler. This is the first line of defense when a 6060Sstevel@tonic-gate * fault occurs in (x)copyin/(x)copyout. In order for this to function 6070Sstevel@tonic-gate * properly, the value of the 'real' lofault handler should be in REAL_LOFAULT. 6080Sstevel@tonic-gate * This allows us to share common code for all the flavors of the copy 6090Sstevel@tonic-gate * operations, including the _noerr versions. 6100Sstevel@tonic-gate * 6110Sstevel@tonic-gate * Note that this function will restore the original input parameters before 6120Sstevel@tonic-gate * calling REAL_LOFAULT. So the real handler can vector to the appropriate 6130Sstevel@tonic-gate * member of the t_copyop structure, if needed. 6140Sstevel@tonic-gate */ 6150Sstevel@tonic-gate ENTRY(copyio_fault) 6160Sstevel@tonic-gate membar #Sync 6170Sstevel@tonic-gate stn SAVED_LOFAULT, [THREAD_REG + T_LOFAULT] ! restore old t_lofault 6180Sstevel@tonic-gate 6190Sstevel@tonic-gate mov SAVE_SRC, %o0 6200Sstevel@tonic-gate mov SAVE_DST, %o1 6210Sstevel@tonic-gate jmp REAL_LOFAULT 6220Sstevel@tonic-gate mov SAVE_COUNT, %o2 6230Sstevel@tonic-gate SET_SIZE(copyio_fault) 6240Sstevel@tonic-gate 6250Sstevel@tonic-gate ENTRY(copyout) 6260Sstevel@tonic-gate sethi %hi(.copyout_err), REAL_LOFAULT 6270Sstevel@tonic-gate or REAL_LOFAULT, %lo(.copyout_err), REAL_LOFAULT 6280Sstevel@tonic-gate 6290Sstevel@tonic-gate.do_copyout: 6300Sstevel@tonic-gate ! 6310Sstevel@tonic-gate ! Check the length and bail if zero. 6320Sstevel@tonic-gate ! 6330Sstevel@tonic-gate tst %o2 6340Sstevel@tonic-gate bnz,pt %ncc, 1f 6350Sstevel@tonic-gate nop 6360Sstevel@tonic-gate retl 6370Sstevel@tonic-gate clr %o0 6380Sstevel@tonic-gate1: 6390Sstevel@tonic-gate sethi %hi(copyio_fault), %o3 6400Sstevel@tonic-gate ldn [THREAD_REG + T_LOFAULT], SAVED_LOFAULT 6410Sstevel@tonic-gate or %o3, %lo(copyio_fault), %o3 6420Sstevel@tonic-gate membar #Sync 6430Sstevel@tonic-gate stn %o3, [THREAD_REG + T_LOFAULT] 6440Sstevel@tonic-gate 6450Sstevel@tonic-gate mov %o0, SAVE_SRC 6460Sstevel@tonic-gate mov %o1, SAVE_DST 6470Sstevel@tonic-gate mov %o2, SAVE_COUNT 6480Sstevel@tonic-gate 6490Sstevel@tonic-gate ! 6500Sstevel@tonic-gate ! Check to see if we're more than SMALL_LIMIT (7 bytes). 6510Sstevel@tonic-gate ! Run in leaf mode, using the %o regs as our input regs. 6520Sstevel@tonic-gate ! 6530Sstevel@tonic-gate subcc %o2, SMALL_LIMIT, %o3 6540Sstevel@tonic-gate bgu,a,pt %ncc, .dco_ns 6550Sstevel@tonic-gate or %o0, %o1, %o3 6560Sstevel@tonic-gate 6570Sstevel@tonic-gate.dcobcp: 6580Sstevel@tonic-gate sub %g0, %o2, %o3 ! negate count 6590Sstevel@tonic-gate add %o0, %o2, %o0 ! make %o0 point at the end 6600Sstevel@tonic-gate add %o1, %o2, %o1 ! make %o1 point at the end 6610Sstevel@tonic-gate ba,pt %ncc, .dcocl 6620Sstevel@tonic-gate ldub [%o0 + %o3], %o4 ! load first byte 6630Sstevel@tonic-gate ! 6640Sstevel@tonic-gate ! %o0 and %o2 point at the end and remain pointing at the end 6650Sstevel@tonic-gate ! of their buffers. We pull things out by adding %o3 (which is 6660Sstevel@tonic-gate ! the negation of the length) to the buffer end which gives us 6670Sstevel@tonic-gate ! the curent location in the buffers. By incrementing %o3 we walk 6680Sstevel@tonic-gate ! through both buffers without having to bump each buffer's 6690Sstevel@tonic-gate ! pointer. A very fast 4 instruction loop. 6700Sstevel@tonic-gate ! 6710Sstevel@tonic-gate .align 16 6720Sstevel@tonic-gate.dcocl: 6730Sstevel@tonic-gate stba %o4, [%o1 + %o3]ASI_USER 6740Sstevel@tonic-gate inccc %o3 6750Sstevel@tonic-gate bl,a,pt %ncc, .dcocl 6760Sstevel@tonic-gate ldub [%o0 + %o3], %o4 6770Sstevel@tonic-gate ! 6780Sstevel@tonic-gate ! We're done. Go home. 6790Sstevel@tonic-gate ! 6800Sstevel@tonic-gate membar #Sync 6810Sstevel@tonic-gate stn SAVED_LOFAULT, [THREAD_REG + T_LOFAULT] 6820Sstevel@tonic-gate retl 6830Sstevel@tonic-gate clr %o0 6840Sstevel@tonic-gate ! 6850Sstevel@tonic-gate ! Try aligned copies from here. 6860Sstevel@tonic-gate ! 6870Sstevel@tonic-gate.dco_ns: 6880Sstevel@tonic-gate ! %o0 = kernel addr (to be copied from) 6890Sstevel@tonic-gate ! %o1 = user addr (to be copied to) 6900Sstevel@tonic-gate ! %o2 = length 6910Sstevel@tonic-gate ! %o3 = %o1 | %o2 (used for alignment checking) 6920Sstevel@tonic-gate ! %o4 is alternate lo_fault 6930Sstevel@tonic-gate ! %o5 is original lo_fault 6940Sstevel@tonic-gate ! 6950Sstevel@tonic-gate ! See if we're single byte aligned. If we are, check the 6960Sstevel@tonic-gate ! limit for single byte copies. If we're smaller or equal, 6970Sstevel@tonic-gate ! bounce to the byte for byte copy loop. Otherwise do it in 6980Sstevel@tonic-gate ! HW (if enabled). 6990Sstevel@tonic-gate ! 7000Sstevel@tonic-gate btst 1, %o3 7010Sstevel@tonic-gate bz,pt %icc, .dcoh8 7020Sstevel@tonic-gate btst 7, %o3 7030Sstevel@tonic-gate 7040Sstevel@tonic-gate ba .dcobcp 7050Sstevel@tonic-gate nop 7060Sstevel@tonic-gate.dcoh8: 7070Sstevel@tonic-gate ! 7080Sstevel@tonic-gate ! 8 byte aligned? 7090Sstevel@tonic-gate ! 7100Sstevel@tonic-gate bnz,a %ncc, .dcoh4 7110Sstevel@tonic-gate btst 3, %o3 7120Sstevel@tonic-gate.dcos8: 7130Sstevel@tonic-gate ! 7140Sstevel@tonic-gate ! Housekeeping for copy loops. Uses same idea as in the byte for 7150Sstevel@tonic-gate ! byte copy loop above. 7160Sstevel@tonic-gate ! 7170Sstevel@tonic-gate add %o0, %o2, %o0 7180Sstevel@tonic-gate add %o1, %o2, %o1 7190Sstevel@tonic-gate sub %g0, %o2, %o3 7200Sstevel@tonic-gate ba,pt %ncc, .dodebc 7210Sstevel@tonic-gate srl %o2, 3, %o2 ! Number of 8 byte chunks to copy 7220Sstevel@tonic-gate ! 7230Sstevel@tonic-gate ! 4 byte aligned? 7240Sstevel@tonic-gate ! 7250Sstevel@tonic-gate.dcoh4: 7260Sstevel@tonic-gate bnz,pn %ncc, .dcoh2 7270Sstevel@tonic-gate nop 7280Sstevel@tonic-gate.dcos4: 7290Sstevel@tonic-gate add %o0, %o2, %o0 7300Sstevel@tonic-gate add %o1, %o2, %o1 7310Sstevel@tonic-gate sub %g0, %o2, %o3 7320Sstevel@tonic-gate ba,pt %ncc, .dodfbc 7330Sstevel@tonic-gate srl %o2, 2, %o2 ! Number of 4 byte chunks to copy 7340Sstevel@tonic-gate ! 7350Sstevel@tonic-gate ! We must be 2 byte aligned. Off we go. 7360Sstevel@tonic-gate ! The check for small copies was done in the 7370Sstevel@tonic-gate ! delay at .dcoh4 7380Sstevel@tonic-gate ! 7390Sstevel@tonic-gate.dcoh2: 7400Sstevel@tonic-gate.dcos2: 7410Sstevel@tonic-gate add %o0, %o2, %o0 7420Sstevel@tonic-gate add %o1, %o2, %o1 7430Sstevel@tonic-gate sub %g0, %o2, %o3 7440Sstevel@tonic-gate ba,pt %ncc, .dodtbc 7450Sstevel@tonic-gate srl %o2, 1, %o2 ! Number of 2 byte chunks to copy 7460Sstevel@tonic-gate 7470Sstevel@tonic-gate.dodebc: 7480Sstevel@tonic-gate ldx [%o0 + %o3], %o4 7490Sstevel@tonic-gate deccc %o2 7500Sstevel@tonic-gate stxa %o4, [%o1 + %o3]ASI_USER 7510Sstevel@tonic-gate bg,pt %ncc, .dodebc 7520Sstevel@tonic-gate addcc %o3, 8, %o3 7530Sstevel@tonic-gate ! 7540Sstevel@tonic-gate ! End of copy loop. Check to see if we're done. Most 7550Sstevel@tonic-gate ! eight byte aligned copies end here. 7560Sstevel@tonic-gate ! 7570Sstevel@tonic-gate bz,pt %ncc, .dcofh 7580Sstevel@tonic-gate nop 7590Sstevel@tonic-gate ! 7600Sstevel@tonic-gate ! Something is left - do it byte for byte. 7610Sstevel@tonic-gate ! 7620Sstevel@tonic-gate ba,pt %ncc, .dcocl 7630Sstevel@tonic-gate ldub [%o0 + %o3], %o4 ! load next byte 7640Sstevel@tonic-gate ! 7650Sstevel@tonic-gate ! Four byte copy loop. %o2 is the number of 4 byte chunks to copy. 7660Sstevel@tonic-gate ! 7670Sstevel@tonic-gate .align 32 7680Sstevel@tonic-gate.dodfbc: 7690Sstevel@tonic-gate lduw [%o0 + %o3], %o4 7700Sstevel@tonic-gate deccc %o2 7710Sstevel@tonic-gate sta %o4, [%o1 + %o3]ASI_USER 7720Sstevel@tonic-gate bg,pt %ncc, .dodfbc 7730Sstevel@tonic-gate addcc %o3, 4, %o3 7740Sstevel@tonic-gate ! 7750Sstevel@tonic-gate ! End of copy loop. Check to see if we're done. Most 7760Sstevel@tonic-gate ! four byte aligned copies end here. 7770Sstevel@tonic-gate ! 7780Sstevel@tonic-gate bz,pt %ncc, .dcofh 7790Sstevel@tonic-gate nop 7800Sstevel@tonic-gate ! 7810Sstevel@tonic-gate ! Something is left. Do it byte for byte. 7820Sstevel@tonic-gate ! 7830Sstevel@tonic-gate ba,pt %ncc, .dcocl 7840Sstevel@tonic-gate ldub [%o0 + %o3], %o4 ! load next byte 7850Sstevel@tonic-gate ! 7860Sstevel@tonic-gate ! two byte aligned copy loop. %o2 is the number of 2 byte chunks to 7870Sstevel@tonic-gate ! copy. 7880Sstevel@tonic-gate ! 7890Sstevel@tonic-gate .align 32 7900Sstevel@tonic-gate.dodtbc: 7910Sstevel@tonic-gate lduh [%o0 + %o3], %o4 7920Sstevel@tonic-gate deccc %o2 7930Sstevel@tonic-gate stha %o4, [%o1 + %o3]ASI_USER 7940Sstevel@tonic-gate bg,pt %ncc, .dodtbc 7950Sstevel@tonic-gate addcc %o3, 2, %o3 7960Sstevel@tonic-gate ! 7970Sstevel@tonic-gate ! End of copy loop. Anything left? 7980Sstevel@tonic-gate ! 7990Sstevel@tonic-gate bz,pt %ncc, .dcofh 8000Sstevel@tonic-gate nop 8010Sstevel@tonic-gate ! 8020Sstevel@tonic-gate ! Deal with the last byte 8030Sstevel@tonic-gate ! 8040Sstevel@tonic-gate ldub [%o0 + %o3], %o4 8050Sstevel@tonic-gate stba %o4, [%o1 + %o3]ASI_USER 8060Sstevel@tonic-gate.dcofh: 8070Sstevel@tonic-gate membar #Sync 8080Sstevel@tonic-gate stn SAVED_LOFAULT, [THREAD_REG + T_LOFAULT] ! restore old t_lofault 8090Sstevel@tonic-gate retl 8100Sstevel@tonic-gate clr %o0 8110Sstevel@tonic-gate 8120Sstevel@tonic-gate.copyout_err: 8130Sstevel@tonic-gate ldn [THREAD_REG + T_COPYOPS], %o4 8140Sstevel@tonic-gate brz %o4, 2f 8150Sstevel@tonic-gate nop 8160Sstevel@tonic-gate ldn [%o4 + CP_COPYOUT], %g2 8170Sstevel@tonic-gate jmp %g2 8180Sstevel@tonic-gate nop 8190Sstevel@tonic-gate2: 8200Sstevel@tonic-gate retl 8210Sstevel@tonic-gate mov -1, %o0 8220Sstevel@tonic-gate SET_SIZE(copyout) 8230Sstevel@tonic-gate 8240Sstevel@tonic-gate#endif /* lint */ 8250Sstevel@tonic-gate 8260Sstevel@tonic-gate 8270Sstevel@tonic-gate#ifdef lint 8280Sstevel@tonic-gate 8290Sstevel@tonic-gate/*ARGSUSED*/ 8300Sstevel@tonic-gateint 8310Sstevel@tonic-gatexcopyout(const void *kaddr, void *uaddr, size_t count) 8320Sstevel@tonic-gate{ return (0); } 8330Sstevel@tonic-gate 8340Sstevel@tonic-gate#else /* lint */ 8350Sstevel@tonic-gate 8360Sstevel@tonic-gate ENTRY(xcopyout) 8370Sstevel@tonic-gate sethi %hi(.xcopyout_err), REAL_LOFAULT 8380Sstevel@tonic-gate b .do_copyout 8390Sstevel@tonic-gate or REAL_LOFAULT, %lo(.xcopyout_err), REAL_LOFAULT 8400Sstevel@tonic-gate.xcopyout_err: 8410Sstevel@tonic-gate ldn [THREAD_REG + T_COPYOPS], %o4 8420Sstevel@tonic-gate brz %o4, 2f 8430Sstevel@tonic-gate nop 8440Sstevel@tonic-gate ldn [%o4 + CP_XCOPYOUT], %g2 8450Sstevel@tonic-gate jmp %g2 8460Sstevel@tonic-gate nop 8470Sstevel@tonic-gate2: 8480Sstevel@tonic-gate retl 8490Sstevel@tonic-gate mov %g1, %o0 8500Sstevel@tonic-gate SET_SIZE(xcopyout) 8510Sstevel@tonic-gate 8520Sstevel@tonic-gate#endif /* lint */ 8530Sstevel@tonic-gate 8540Sstevel@tonic-gate#ifdef lint 8550Sstevel@tonic-gate 8560Sstevel@tonic-gate/*ARGSUSED*/ 8570Sstevel@tonic-gateint 8580Sstevel@tonic-gatexcopyout_little(const void *kaddr, void *uaddr, size_t count) 8590Sstevel@tonic-gate{ return (0); } 8600Sstevel@tonic-gate 8610Sstevel@tonic-gate#else /* lint */ 8620Sstevel@tonic-gate 8630Sstevel@tonic-gate ENTRY(xcopyout_little) 8640Sstevel@tonic-gate sethi %hi(.little_err), %o4 8650Sstevel@tonic-gate ldn [THREAD_REG + T_LOFAULT], %o5 8660Sstevel@tonic-gate or %o4, %lo(.little_err), %o4 8670Sstevel@tonic-gate membar #Sync ! sync error barrier 8680Sstevel@tonic-gate stn %o4, [THREAD_REG + T_LOFAULT] 8690Sstevel@tonic-gate 8700Sstevel@tonic-gate subcc %g0, %o2, %o3 8710Sstevel@tonic-gate add %o0, %o2, %o0 8720Sstevel@tonic-gate bz,pn %ncc, 2f ! check for zero bytes 8730Sstevel@tonic-gate sub %o2, 1, %o4 8740Sstevel@tonic-gate add %o0, %o4, %o0 ! start w/last byte 8750Sstevel@tonic-gate add %o1, %o2, %o1 8760Sstevel@tonic-gate ldub [%o0+%o3], %o4 8770Sstevel@tonic-gate 8780Sstevel@tonic-gate1: stba %o4, [%o1+%o3]ASI_AIUSL 8790Sstevel@tonic-gate inccc %o3 8800Sstevel@tonic-gate sub %o0, 2, %o0 ! get next byte 8810Sstevel@tonic-gate bcc,a,pt %ncc, 1b 8820Sstevel@tonic-gate ldub [%o0+%o3], %o4 8830Sstevel@tonic-gate 8840Sstevel@tonic-gate2: membar #Sync ! sync error barrier 8850Sstevel@tonic-gate stn %o5, [THREAD_REG + T_LOFAULT] ! restore old t_lofault 8860Sstevel@tonic-gate retl 8870Sstevel@tonic-gate mov %g0, %o0 ! return (0) 8880Sstevel@tonic-gate SET_SIZE(xcopyout_little) 8890Sstevel@tonic-gate 8900Sstevel@tonic-gate#endif /* lint */ 8910Sstevel@tonic-gate 8920Sstevel@tonic-gate/* 8930Sstevel@tonic-gate * Copy user data to kernel space (copyin/xcopyin/xcopyin_little) 8940Sstevel@tonic-gate */ 8950Sstevel@tonic-gate 8960Sstevel@tonic-gate#if defined(lint) 8970Sstevel@tonic-gate 8980Sstevel@tonic-gate/*ARGSUSED*/ 8990Sstevel@tonic-gateint 9000Sstevel@tonic-gatecopyin(const void *uaddr, void *kaddr, size_t count) 9010Sstevel@tonic-gate{ return (0); } 9020Sstevel@tonic-gate 9030Sstevel@tonic-gate#else /* lint */ 9040Sstevel@tonic-gate 9050Sstevel@tonic-gate ENTRY(copyin) 9060Sstevel@tonic-gate sethi %hi(.copyin_err), REAL_LOFAULT 9070Sstevel@tonic-gate or REAL_LOFAULT, %lo(.copyin_err), REAL_LOFAULT 9080Sstevel@tonic-gate 9090Sstevel@tonic-gate.do_copyin: 9100Sstevel@tonic-gate ! 9110Sstevel@tonic-gate ! Check the length and bail if zero. 9120Sstevel@tonic-gate ! 9130Sstevel@tonic-gate tst %o2 9140Sstevel@tonic-gate bnz,pt %ncc, 1f 9150Sstevel@tonic-gate nop 9160Sstevel@tonic-gate retl 9170Sstevel@tonic-gate clr %o0 9180Sstevel@tonic-gate1: 9190Sstevel@tonic-gate sethi %hi(copyio_fault), %o3 9200Sstevel@tonic-gate ldn [THREAD_REG + T_LOFAULT], SAVED_LOFAULT 9210Sstevel@tonic-gate or %o3, %lo(copyio_fault), %o3 9220Sstevel@tonic-gate membar #Sync 9230Sstevel@tonic-gate stn %o3, [THREAD_REG + T_LOFAULT] 9240Sstevel@tonic-gate 9250Sstevel@tonic-gate mov %o0, SAVE_SRC 9260Sstevel@tonic-gate mov %o1, SAVE_DST 9270Sstevel@tonic-gate mov %o2, SAVE_COUNT 9280Sstevel@tonic-gate 9290Sstevel@tonic-gate ! 9300Sstevel@tonic-gate ! Check to see if we're more than SMALL_LIMIT. 9310Sstevel@tonic-gate ! 9320Sstevel@tonic-gate subcc %o2, SMALL_LIMIT, %o3 9330Sstevel@tonic-gate bgu,a,pt %ncc, .dci_ns 9340Sstevel@tonic-gate or %o0, %o1, %o3 9350Sstevel@tonic-gate 9360Sstevel@tonic-gate.dcibcp: 9370Sstevel@tonic-gate sub %g0, %o2, %o3 ! setup for copy loop 9380Sstevel@tonic-gate add %o0, %o2, %o0 9390Sstevel@tonic-gate add %o1, %o2, %o1 9400Sstevel@tonic-gate ba,pt %ncc, .dcicl 9410Sstevel@tonic-gate lduba [%o0 + %o3]ASI_USER, %o4 9420Sstevel@tonic-gate ! 9430Sstevel@tonic-gate ! %o0 and %o1 point at the end and remain pointing at the end 9440Sstevel@tonic-gate ! of their buffers. We pull things out by adding %o3 (which is 9450Sstevel@tonic-gate ! the negation of the length) to the buffer end which gives us 9460Sstevel@tonic-gate ! the curent location in the buffers. By incrementing %o3 we walk 9470Sstevel@tonic-gate ! through both buffers without having to bump each buffer's 9480Sstevel@tonic-gate ! pointer. A very fast 4 instruction loop. 9490Sstevel@tonic-gate ! 9500Sstevel@tonic-gate .align 16 9510Sstevel@tonic-gate.dcicl: 9520Sstevel@tonic-gate stb %o4, [%o1 + %o3] 9530Sstevel@tonic-gate inccc %o3 9540Sstevel@tonic-gate bl,a,pt %ncc, .dcicl 9550Sstevel@tonic-gate lduba [%o0 + %o3]ASI_USER, %o4 9560Sstevel@tonic-gate ! 9570Sstevel@tonic-gate ! We're done. Go home. 9580Sstevel@tonic-gate ! 9590Sstevel@tonic-gate membar #Sync 9600Sstevel@tonic-gate stn SAVED_LOFAULT, [THREAD_REG + T_LOFAULT] 9610Sstevel@tonic-gate retl 9620Sstevel@tonic-gate clr %o0 9630Sstevel@tonic-gate ! 9640Sstevel@tonic-gate ! Try aligned copies from here. 9650Sstevel@tonic-gate ! 9660Sstevel@tonic-gate.dci_ns: 9670Sstevel@tonic-gate ! 9680Sstevel@tonic-gate ! See if we're single byte aligned. If we are, check the 9690Sstevel@tonic-gate ! limit for single byte copies. If we're smaller, or equal, 9700Sstevel@tonic-gate ! bounce to the byte for byte copy loop. Otherwise do it in 9710Sstevel@tonic-gate ! HW (if enabled). 9720Sstevel@tonic-gate ! 9730Sstevel@tonic-gate btst 1, %o3 9740Sstevel@tonic-gate bz,a,pt %icc, .dcih8 9750Sstevel@tonic-gate btst 7, %o3 9760Sstevel@tonic-gate ba .dcibcp 9770Sstevel@tonic-gate nop 9780Sstevel@tonic-gate 9790Sstevel@tonic-gate.dcih8: 9800Sstevel@tonic-gate ! 9810Sstevel@tonic-gate ! 8 byte aligned? 9820Sstevel@tonic-gate ! 9830Sstevel@tonic-gate bnz,a %ncc, .dcih4 9840Sstevel@tonic-gate btst 3, %o3 9850Sstevel@tonic-gate.dcis8: 9860Sstevel@tonic-gate ! 9870Sstevel@tonic-gate ! Housekeeping for copy loops. Uses same idea as in the byte for 9880Sstevel@tonic-gate ! byte copy loop above. 9890Sstevel@tonic-gate ! 9900Sstevel@tonic-gate add %o0, %o2, %o0 9910Sstevel@tonic-gate add %o1, %o2, %o1 9920Sstevel@tonic-gate sub %g0, %o2, %o3 9930Sstevel@tonic-gate ba,pt %ncc, .didebc 9940Sstevel@tonic-gate srl %o2, 3, %o2 ! Number of 8 byte chunks to copy 9950Sstevel@tonic-gate ! 9960Sstevel@tonic-gate ! 4 byte aligned? 9970Sstevel@tonic-gate ! 9980Sstevel@tonic-gate.dcih4: 9990Sstevel@tonic-gate bnz %ncc, .dcih2 10000Sstevel@tonic-gate nop 10010Sstevel@tonic-gate.dcis4: 10020Sstevel@tonic-gate ! 10030Sstevel@tonic-gate ! Housekeeping for copy loops. Uses same idea as in the byte 10040Sstevel@tonic-gate ! for byte copy loop above. 10050Sstevel@tonic-gate ! 10060Sstevel@tonic-gate add %o0, %o2, %o0 10070Sstevel@tonic-gate add %o1, %o2, %o1 10080Sstevel@tonic-gate sub %g0, %o2, %o3 10090Sstevel@tonic-gate ba,pt %ncc, .didfbc 10100Sstevel@tonic-gate srl %o2, 2, %o2 ! Number of 4 byte chunks to copy 10110Sstevel@tonic-gate.dcih2: 10120Sstevel@tonic-gate.dcis2: 10130Sstevel@tonic-gate add %o0, %o2, %o0 10140Sstevel@tonic-gate add %o1, %o2, %o1 10150Sstevel@tonic-gate sub %g0, %o2, %o3 10160Sstevel@tonic-gate ba,pt %ncc, .didtbc 10170Sstevel@tonic-gate srl %o2, 1, %o2 ! Number of 2 byte chunks to copy 10180Sstevel@tonic-gate 10190Sstevel@tonic-gate.didebc: 10200Sstevel@tonic-gate ldxa [%o0 + %o3]ASI_USER, %o4 10210Sstevel@tonic-gate deccc %o2 10220Sstevel@tonic-gate stx %o4, [%o1 + %o3] 10230Sstevel@tonic-gate bg,pt %ncc, .didebc 10240Sstevel@tonic-gate addcc %o3, 8, %o3 10250Sstevel@tonic-gate ! 10260Sstevel@tonic-gate ! End of copy loop. Most 8 byte aligned copies end here. 10270Sstevel@tonic-gate ! 10280Sstevel@tonic-gate bz,pt %ncc, .dcifh 10290Sstevel@tonic-gate nop 10300Sstevel@tonic-gate ! 10310Sstevel@tonic-gate ! Something is left. Do it byte for byte. 10320Sstevel@tonic-gate ! 10330Sstevel@tonic-gate ba,pt %ncc, .dcicl 10340Sstevel@tonic-gate lduba [%o0 + %o3]ASI_USER, %o4 10350Sstevel@tonic-gate ! 10360Sstevel@tonic-gate ! 4 byte copy loop. %o2 is number of 4 byte chunks to copy. 10370Sstevel@tonic-gate ! 10380Sstevel@tonic-gate .align 32 10390Sstevel@tonic-gate.didfbc: 10400Sstevel@tonic-gate lduwa [%o0 + %o3]ASI_USER, %o4 10410Sstevel@tonic-gate deccc %o2 10420Sstevel@tonic-gate st %o4, [%o1 + %o3] 10430Sstevel@tonic-gate bg,pt %ncc, .didfbc 10440Sstevel@tonic-gate addcc %o3, 4, %o3 10450Sstevel@tonic-gate ! 10460Sstevel@tonic-gate ! End of copy loop. Most 4 byte aligned copies end here. 10470Sstevel@tonic-gate ! 10480Sstevel@tonic-gate bz,pt %ncc, .dcifh 10490Sstevel@tonic-gate nop 10500Sstevel@tonic-gate ! 10510Sstevel@tonic-gate ! Something is left. Do it byte for byte. 10520Sstevel@tonic-gate ! 10530Sstevel@tonic-gate ba,pt %ncc, .dcicl 10540Sstevel@tonic-gate lduba [%o0 + %o3]ASI_USER, %o4 10550Sstevel@tonic-gate ! 10560Sstevel@tonic-gate ! 2 byte aligned copy loop. %o2 is number of 2 byte chunks to 10570Sstevel@tonic-gate ! copy. 10580Sstevel@tonic-gate ! 10590Sstevel@tonic-gate .align 32 10600Sstevel@tonic-gate.didtbc: 10610Sstevel@tonic-gate lduha [%o0 + %o3]ASI_USER, %o4 10620Sstevel@tonic-gate deccc %o2 10630Sstevel@tonic-gate sth %o4, [%o1 + %o3] 10640Sstevel@tonic-gate bg,pt %ncc, .didtbc 10650Sstevel@tonic-gate addcc %o3, 2, %o3 10660Sstevel@tonic-gate ! 10670Sstevel@tonic-gate ! End of copy loop. Most 2 byte aligned copies end here. 10680Sstevel@tonic-gate ! 10690Sstevel@tonic-gate bz,pt %ncc, .dcifh 10700Sstevel@tonic-gate nop 10710Sstevel@tonic-gate ! 10720Sstevel@tonic-gate ! Deal with the last byte 10730Sstevel@tonic-gate ! 10740Sstevel@tonic-gate lduba [%o0 + %o3]ASI_USER, %o4 10750Sstevel@tonic-gate stb %o4, [%o1 + %o3] 10760Sstevel@tonic-gate.dcifh: 10770Sstevel@tonic-gate membar #Sync 10780Sstevel@tonic-gate stn SAVED_LOFAULT, [THREAD_REG + T_LOFAULT] ! restore old t_lofault 10790Sstevel@tonic-gate retl 10800Sstevel@tonic-gate clr %o0 10810Sstevel@tonic-gate 10820Sstevel@tonic-gate.copyin_err: 10830Sstevel@tonic-gate ldn [THREAD_REG + T_COPYOPS], %o4 10840Sstevel@tonic-gate brz %o4, 2f 10850Sstevel@tonic-gate nop 10860Sstevel@tonic-gate ldn [%o4 + CP_COPYIN], %g2 10870Sstevel@tonic-gate jmp %g2 10880Sstevel@tonic-gate nop 10890Sstevel@tonic-gate2: 10900Sstevel@tonic-gate retl 10910Sstevel@tonic-gate mov -1, %o0 10920Sstevel@tonic-gate SET_SIZE(copyin) 10930Sstevel@tonic-gate 10940Sstevel@tonic-gate#endif /* lint */ 10950Sstevel@tonic-gate 10960Sstevel@tonic-gate#ifdef lint 10970Sstevel@tonic-gate 10980Sstevel@tonic-gate/*ARGSUSED*/ 10990Sstevel@tonic-gateint 11000Sstevel@tonic-gatexcopyin(const void *uaddr, void *kaddr, size_t count) 11010Sstevel@tonic-gate{ return (0); } 11020Sstevel@tonic-gate 11030Sstevel@tonic-gate#else /* lint */ 11040Sstevel@tonic-gate 11050Sstevel@tonic-gate ENTRY(xcopyin) 11060Sstevel@tonic-gate sethi %hi(.xcopyin_err), REAL_LOFAULT 11070Sstevel@tonic-gate b .do_copyin 11080Sstevel@tonic-gate or REAL_LOFAULT, %lo(.xcopyin_err), REAL_LOFAULT 11090Sstevel@tonic-gate.xcopyin_err: 11100Sstevel@tonic-gate ldn [THREAD_REG + T_COPYOPS], %o4 11110Sstevel@tonic-gate brz %o4, 2f 11120Sstevel@tonic-gate nop 11130Sstevel@tonic-gate ldn [%o4 + CP_XCOPYIN], %g2 11140Sstevel@tonic-gate jmp %g2 11150Sstevel@tonic-gate nop 11160Sstevel@tonic-gate2: 11170Sstevel@tonic-gate retl 11180Sstevel@tonic-gate mov %g1, %o0 11190Sstevel@tonic-gate SET_SIZE(xcopyin) 11200Sstevel@tonic-gate 11210Sstevel@tonic-gate#endif /* lint */ 11220Sstevel@tonic-gate 11230Sstevel@tonic-gate#ifdef lint 11240Sstevel@tonic-gate 11250Sstevel@tonic-gate/*ARGSUSED*/ 11260Sstevel@tonic-gateint 11270Sstevel@tonic-gatexcopyin_little(const void *uaddr, void *kaddr, size_t count) 11280Sstevel@tonic-gate{ return (0); } 11290Sstevel@tonic-gate 11300Sstevel@tonic-gate#else /* lint */ 11310Sstevel@tonic-gate 11320Sstevel@tonic-gate ENTRY(xcopyin_little) 11330Sstevel@tonic-gate sethi %hi(.little_err), %o4 11340Sstevel@tonic-gate ldn [THREAD_REG + T_LOFAULT], %o5 11350Sstevel@tonic-gate or %o4, %lo(.little_err), %o4 11360Sstevel@tonic-gate membar #Sync ! sync error barrier 11370Sstevel@tonic-gate stn %o4, [THREAD_REG + T_LOFAULT] 11380Sstevel@tonic-gate 11390Sstevel@tonic-gate subcc %g0, %o2, %o3 11400Sstevel@tonic-gate add %o0, %o2, %o0 11410Sstevel@tonic-gate bz,pn %ncc, 2f ! check for zero bytes 11420Sstevel@tonic-gate sub %o2, 1, %o4 11430Sstevel@tonic-gate add %o0, %o4, %o0 ! start w/last byte 11440Sstevel@tonic-gate add %o1, %o2, %o1 11450Sstevel@tonic-gate lduba [%o0+%o3]ASI_AIUSL, %o4 11460Sstevel@tonic-gate 11470Sstevel@tonic-gate1: stb %o4, [%o1+%o3] 11480Sstevel@tonic-gate inccc %o3 11490Sstevel@tonic-gate sub %o0, 2, %o0 ! get next byte 11500Sstevel@tonic-gate bcc,a,pt %ncc, 1b 11510Sstevel@tonic-gate lduba [%o0+%o3]ASI_AIUSL, %o4 11520Sstevel@tonic-gate 11530Sstevel@tonic-gate2: membar #Sync ! sync error barrier 11540Sstevel@tonic-gate stn %o5, [THREAD_REG + T_LOFAULT] ! restore old t_lofault 11550Sstevel@tonic-gate retl 11560Sstevel@tonic-gate mov %g0, %o0 ! return (0) 11570Sstevel@tonic-gate 11580Sstevel@tonic-gate.little_err: 11590Sstevel@tonic-gate membar #Sync ! sync error barrier 11600Sstevel@tonic-gate stn %o5, [THREAD_REG + T_LOFAULT] ! restore old t_lofault 11610Sstevel@tonic-gate retl 11620Sstevel@tonic-gate mov %g1, %o0 11630Sstevel@tonic-gate SET_SIZE(xcopyin_little) 11640Sstevel@tonic-gate 11650Sstevel@tonic-gate#endif /* lint */ 11660Sstevel@tonic-gate 11670Sstevel@tonic-gate 11680Sstevel@tonic-gate/* 11690Sstevel@tonic-gate * Copy a block of storage - must not overlap (from + len <= to). 11700Sstevel@tonic-gate * No fault handler installed (to be called under on_fault()) 11710Sstevel@tonic-gate */ 11720Sstevel@tonic-gate#if defined(lint) 11730Sstevel@tonic-gate 11740Sstevel@tonic-gate/* ARGSUSED */ 11750Sstevel@tonic-gatevoid 11760Sstevel@tonic-gatecopyin_noerr(const void *ufrom, void *kto, size_t count) 11770Sstevel@tonic-gate{} 11780Sstevel@tonic-gate 11790Sstevel@tonic-gate#else /* lint */ 11800Sstevel@tonic-gate 11810Sstevel@tonic-gate ENTRY(copyin_noerr) 11820Sstevel@tonic-gate sethi %hi(.copyio_noerr), REAL_LOFAULT 11830Sstevel@tonic-gate b .do_copyin 11840Sstevel@tonic-gate or REAL_LOFAULT, %lo(.copyio_noerr), REAL_LOFAULT 11850Sstevel@tonic-gate.copyio_noerr: 11860Sstevel@tonic-gate jmp SAVED_LOFAULT 11870Sstevel@tonic-gate nop 11880Sstevel@tonic-gate SET_SIZE(copyin_noerr) 11890Sstevel@tonic-gate 11900Sstevel@tonic-gate#endif /* lint */ 11910Sstevel@tonic-gate 11920Sstevel@tonic-gate/* 11930Sstevel@tonic-gate * Copy a block of storage - must not overlap (from + len <= to). 11940Sstevel@tonic-gate * No fault handler installed (to be called under on_fault()) 11950Sstevel@tonic-gate */ 11960Sstevel@tonic-gate 11970Sstevel@tonic-gate#if defined(lint) 11980Sstevel@tonic-gate 11990Sstevel@tonic-gate/* ARGSUSED */ 12000Sstevel@tonic-gatevoid 12010Sstevel@tonic-gatecopyout_noerr(const void *kfrom, void *uto, size_t count) 12020Sstevel@tonic-gate{} 12030Sstevel@tonic-gate 12040Sstevel@tonic-gate#else /* lint */ 12050Sstevel@tonic-gate 12060Sstevel@tonic-gate ENTRY(copyout_noerr) 12070Sstevel@tonic-gate sethi %hi(.copyio_noerr), REAL_LOFAULT 12080Sstevel@tonic-gate b .do_copyout 12090Sstevel@tonic-gate or REAL_LOFAULT, %lo(.copyio_noerr), REAL_LOFAULT 12100Sstevel@tonic-gate SET_SIZE(copyout_noerr) 12110Sstevel@tonic-gate 12120Sstevel@tonic-gate#endif /* lint */ 12130Sstevel@tonic-gate 12140Sstevel@tonic-gate#if defined(lint) 12150Sstevel@tonic-gate 12160Sstevel@tonic-gateint use_hw_bcopy = 1; 12170Sstevel@tonic-gateint use_hw_bzero = 1; 12180Sstevel@tonic-gate 12190Sstevel@tonic-gate#else /* !lint */ 12200Sstevel@tonic-gate 12210Sstevel@tonic-gate .align 4 12220Sstevel@tonic-gate DGDEF(use_hw_bcopy) 12230Sstevel@tonic-gate .word 1 12240Sstevel@tonic-gate DGDEF(use_hw_bzero) 12250Sstevel@tonic-gate .word 1 12260Sstevel@tonic-gate 12270Sstevel@tonic-gate .align 64 12280Sstevel@tonic-gate .section ".text" 12290Sstevel@tonic-gate#endif /* !lint */ 12300Sstevel@tonic-gate 12310Sstevel@tonic-gate 12320Sstevel@tonic-gate/* 12330Sstevel@tonic-gate * hwblkclr - clears block-aligned, block-multiple-sized regions that are 1234*5448Sae112802 * longer than 256 bytes in length. For the generic module we will simply 1235*5448Sae112802 * call bzero and return 1 to ensure that the pages in cache should be 12364047Siskreen * flushed to ensure integrity. 12370Sstevel@tonic-gate * Caller is responsible for ensuring use_hw_bzero is true and that 12380Sstevel@tonic-gate * kpreempt_disable() has been called. 12390Sstevel@tonic-gate */ 12400Sstevel@tonic-gate#ifdef lint 12410Sstevel@tonic-gate/*ARGSUSED*/ 12420Sstevel@tonic-gateint 12430Sstevel@tonic-gatehwblkclr(void *addr, size_t len) 12440Sstevel@tonic-gate{ 12450Sstevel@tonic-gate return(0); 12460Sstevel@tonic-gate} 12470Sstevel@tonic-gate#else /* lint */ 12480Sstevel@tonic-gate ! %i0 - start address 12490Sstevel@tonic-gate ! %i1 - length of region (multiple of 64) 12500Sstevel@tonic-gate 12510Sstevel@tonic-gate ENTRY(hwblkclr) 12520Sstevel@tonic-gate save %sp, -SA(MINFRAME), %sp 12530Sstevel@tonic-gate 1254*5448Sae112802 ! Simply call bzero and notify the caller that bzero was used 12550Sstevel@tonic-gate mov %i0, %o0 12560Sstevel@tonic-gate call bzero 12570Sstevel@tonic-gate mov %i1, %o1 12580Sstevel@tonic-gate ret 12590Sstevel@tonic-gate restore %g0, 1, %o0 ! return (1) - did not use block operations 12600Sstevel@tonic-gate 12610Sstevel@tonic-gate SET_SIZE(hwblkclr) 12620Sstevel@tonic-gate#endif /* lint */ 12630Sstevel@tonic-gate 12640Sstevel@tonic-gate#ifdef lint 12650Sstevel@tonic-gate/* Copy 32 bytes of data from src to dst using physical addresses */ 12660Sstevel@tonic-gate/*ARGSUSED*/ 12670Sstevel@tonic-gatevoid 12680Sstevel@tonic-gatehw_pa_bcopy32(uint64_t src, uint64_t dst) 12690Sstevel@tonic-gate{} 12700Sstevel@tonic-gate#else /*!lint */ 12710Sstevel@tonic-gate 12720Sstevel@tonic-gate /* 12730Sstevel@tonic-gate * Copy 32 bytes of data from src (%o0) to dst (%o1) 12740Sstevel@tonic-gate * using physical addresses. 12750Sstevel@tonic-gate */ 12760Sstevel@tonic-gate ENTRY_NP(hw_pa_bcopy32) 12770Sstevel@tonic-gate rdpr %pstate, %g1 12780Sstevel@tonic-gate andn %g1, PSTATE_IE, %g2 12790Sstevel@tonic-gate wrpr %g0, %g2, %pstate 12800Sstevel@tonic-gate 12810Sstevel@tonic-gate ldxa [%o0]ASI_MEM, %o2 12820Sstevel@tonic-gate add %o0, 8, %o0 12830Sstevel@tonic-gate ldxa [%o0]ASI_MEM, %o3 12840Sstevel@tonic-gate add %o0, 8, %o0 12850Sstevel@tonic-gate ldxa [%o0]ASI_MEM, %o4 12860Sstevel@tonic-gate add %o0, 8, %o0 12870Sstevel@tonic-gate ldxa [%o0]ASI_MEM, %o5 12880Sstevel@tonic-gate stxa %o2, [%o1]ASI_MEM 12890Sstevel@tonic-gate add %o1, 8, %o1 12900Sstevel@tonic-gate stxa %o3, [%o1]ASI_MEM 12910Sstevel@tonic-gate add %o1, 8, %o1 12920Sstevel@tonic-gate stxa %o4, [%o1]ASI_MEM 12930Sstevel@tonic-gate add %o1, 8, %o1 12940Sstevel@tonic-gate stxa %o5, [%o1]ASI_MEM 12950Sstevel@tonic-gate 12960Sstevel@tonic-gate membar #Sync 12970Sstevel@tonic-gate retl 12980Sstevel@tonic-gate wrpr %g0, %g1, %pstate 12990Sstevel@tonic-gate SET_SIZE(hw_pa_bcopy32) 13000Sstevel@tonic-gate#endif /* lint */ 13010Sstevel@tonic-gate 13020Sstevel@tonic-gate/* 13030Sstevel@tonic-gate * Zero a block of storage. 13040Sstevel@tonic-gate * 13050Sstevel@tonic-gate * uzero is used by the kernel to zero a block in user address space. 13060Sstevel@tonic-gate */ 13070Sstevel@tonic-gate 13080Sstevel@tonic-gate 13090Sstevel@tonic-gate#if defined(lint) 13100Sstevel@tonic-gate 13110Sstevel@tonic-gate/* ARGSUSED */ 13120Sstevel@tonic-gateint 13130Sstevel@tonic-gatekzero(void *addr, size_t count) 13140Sstevel@tonic-gate{ return(0); } 13150Sstevel@tonic-gate 13160Sstevel@tonic-gate/* ARGSUSED */ 13170Sstevel@tonic-gatevoid 13180Sstevel@tonic-gateuzero(void *addr, size_t count) 13190Sstevel@tonic-gate{} 13200Sstevel@tonic-gate 13210Sstevel@tonic-gate#else /* lint */ 13220Sstevel@tonic-gate 13230Sstevel@tonic-gate ENTRY(uzero) 13240Sstevel@tonic-gate ! 13250Sstevel@tonic-gate ! Set a new lo_fault handler only if we came in with one 13260Sstevel@tonic-gate ! already specified. 13270Sstevel@tonic-gate ! 13280Sstevel@tonic-gate wr %g0, ASI_USER, %asi 13290Sstevel@tonic-gate ldn [THREAD_REG + T_LOFAULT], %o5 13300Sstevel@tonic-gate tst %o5 13310Sstevel@tonic-gate bz,pt %ncc, .do_zero 13320Sstevel@tonic-gate sethi %hi(.zeroerr), %o2 13330Sstevel@tonic-gate or %o2, %lo(.zeroerr), %o2 13340Sstevel@tonic-gate membar #Sync 13350Sstevel@tonic-gate ba,pt %ncc, .do_zero 13360Sstevel@tonic-gate stn %o2, [THREAD_REG + T_LOFAULT] 13370Sstevel@tonic-gate 13380Sstevel@tonic-gate ENTRY(kzero) 13390Sstevel@tonic-gate ! 13400Sstevel@tonic-gate ! Always set a lo_fault handler 13410Sstevel@tonic-gate ! 13420Sstevel@tonic-gate wr %g0, ASI_P, %asi 13430Sstevel@tonic-gate ldn [THREAD_REG + T_LOFAULT], %o5 13440Sstevel@tonic-gate sethi %hi(.zeroerr), %o2 13450Sstevel@tonic-gate or %o5, LOFAULT_SET, %o5 13460Sstevel@tonic-gate or %o2, %lo(.zeroerr), %o2 13470Sstevel@tonic-gate membar #Sync 13480Sstevel@tonic-gate ba,pt %ncc, .do_zero 13490Sstevel@tonic-gate stn %o2, [THREAD_REG + T_LOFAULT] 13500Sstevel@tonic-gate 13510Sstevel@tonic-gate/* 13520Sstevel@tonic-gate * We got here because of a fault during kzero or if 13530Sstevel@tonic-gate * uzero or bzero was called with t_lofault non-zero. 13540Sstevel@tonic-gate * Otherwise we've already run screaming from the room. 13550Sstevel@tonic-gate * Errno value is in %g1. Note that we're here iff 13560Sstevel@tonic-gate * we did set t_lofault. 13570Sstevel@tonic-gate */ 13580Sstevel@tonic-gate.zeroerr: 13590Sstevel@tonic-gate ! 13600Sstevel@tonic-gate ! Undo asi register setting. Just set it to be the 13610Sstevel@tonic-gate ! kernel default without checking. 13620Sstevel@tonic-gate ! 13630Sstevel@tonic-gate wr %g0, ASI_P, %asi 13640Sstevel@tonic-gate 13650Sstevel@tonic-gate ! 13660Sstevel@tonic-gate ! We did set t_lofault. It may well have been zero coming in. 13670Sstevel@tonic-gate ! 13680Sstevel@tonic-gate1: 13690Sstevel@tonic-gate tst %o5 13700Sstevel@tonic-gate membar #Sync 13710Sstevel@tonic-gate bne,pn %ncc, 3f 13720Sstevel@tonic-gate andncc %o5, LOFAULT_SET, %o5 13730Sstevel@tonic-gate2: 13740Sstevel@tonic-gate ! 13750Sstevel@tonic-gate ! Old handler was zero. Just return the error. 13760Sstevel@tonic-gate ! 13770Sstevel@tonic-gate retl ! return 13780Sstevel@tonic-gate mov %g1, %o0 ! error code from %g1 13790Sstevel@tonic-gate3: 13800Sstevel@tonic-gate ! 13810Sstevel@tonic-gate ! We're here because %o5 was non-zero. It was non-zero 13820Sstevel@tonic-gate ! because either LOFAULT_SET was present, a previous fault 13830Sstevel@tonic-gate ! handler was present or both. In all cases we need to reset 13840Sstevel@tonic-gate ! T_LOFAULT to the value of %o5 after clearing LOFAULT_SET 13850Sstevel@tonic-gate ! before we either simply return the error or we invoke the 13860Sstevel@tonic-gate ! previously specified handler. 13870Sstevel@tonic-gate ! 13880Sstevel@tonic-gate be %ncc, 2b 13890Sstevel@tonic-gate stn %o5, [THREAD_REG + T_LOFAULT] 13900Sstevel@tonic-gate jmp %o5 ! goto real handler 13910Sstevel@tonic-gate nop 13920Sstevel@tonic-gate SET_SIZE(kzero) 13930Sstevel@tonic-gate SET_SIZE(uzero) 13940Sstevel@tonic-gate 13950Sstevel@tonic-gate#endif /* lint */ 13960Sstevel@tonic-gate 13970Sstevel@tonic-gate/* 13980Sstevel@tonic-gate * Zero a block of storage. 13990Sstevel@tonic-gate */ 14000Sstevel@tonic-gate 14010Sstevel@tonic-gate#if defined(lint) 14020Sstevel@tonic-gate 14030Sstevel@tonic-gate/* ARGSUSED */ 14040Sstevel@tonic-gatevoid 14050Sstevel@tonic-gatebzero(void *addr, size_t count) 14060Sstevel@tonic-gate{} 14070Sstevel@tonic-gate 14080Sstevel@tonic-gate#else /* lint */ 14090Sstevel@tonic-gate 14100Sstevel@tonic-gate ENTRY(bzero) 14110Sstevel@tonic-gate wr %g0, ASI_P, %asi 14120Sstevel@tonic-gate 14130Sstevel@tonic-gate ldn [THREAD_REG + T_LOFAULT], %o5 ! save old vector 14140Sstevel@tonic-gate tst %o5 14150Sstevel@tonic-gate bz,pt %ncc, .do_zero 14160Sstevel@tonic-gate sethi %hi(.zeroerr), %o2 14170Sstevel@tonic-gate or %o2, %lo(.zeroerr), %o2 14180Sstevel@tonic-gate membar #Sync ! sync error barrier 14190Sstevel@tonic-gate stn %o2, [THREAD_REG + T_LOFAULT] ! install new vector 14200Sstevel@tonic-gate 14210Sstevel@tonic-gate.do_zero: 14220Sstevel@tonic-gate cmp %o1, 7 14230Sstevel@tonic-gate blu,pn %ncc, .byteclr 14240Sstevel@tonic-gate nop 14250Sstevel@tonic-gate 14260Sstevel@tonic-gate cmp %o1, 15 14270Sstevel@tonic-gate blu,pn %ncc, .wdalign 14280Sstevel@tonic-gate nop 14290Sstevel@tonic-gate 14300Sstevel@tonic-gate andcc %o0, 7, %o3 ! is add aligned on a 8 byte bound 14310Sstevel@tonic-gate bz,pt %ncc, .blkalign ! already double aligned 14320Sstevel@tonic-gate sub %o3, 8, %o3 ! -(bytes till double aligned) 14330Sstevel@tonic-gate add %o1, %o3, %o1 ! update o1 with new count 14340Sstevel@tonic-gate 14350Sstevel@tonic-gate1: 14360Sstevel@tonic-gate stba %g0, [%o0]%asi 14370Sstevel@tonic-gate inccc %o3 14380Sstevel@tonic-gate bl,pt %ncc, 1b 14390Sstevel@tonic-gate inc %o0 14400Sstevel@tonic-gate 14410Sstevel@tonic-gate ! Now address is double aligned 14420Sstevel@tonic-gate.blkalign: 14430Sstevel@tonic-gate cmp %o1, 0x80 ! check if there are 128 bytes to set 14440Sstevel@tonic-gate blu,pn %ncc, .bzero_small 14450Sstevel@tonic-gate mov %o1, %o3 14460Sstevel@tonic-gate 14470Sstevel@tonic-gate andcc %o0, 0x3f, %o3 ! is block aligned? 14480Sstevel@tonic-gate bz,pt %ncc, .bzero_blk 14490Sstevel@tonic-gate sub %o3, 0x40, %o3 ! -(bytes till block aligned) 14500Sstevel@tonic-gate add %o1, %o3, %o1 ! o1 is the remainder 14510Sstevel@tonic-gate 14520Sstevel@tonic-gate ! Clear -(%o3) bytes till block aligned 14530Sstevel@tonic-gate1: 14540Sstevel@tonic-gate stxa %g0, [%o0]%asi 14550Sstevel@tonic-gate addcc %o3, 8, %o3 14560Sstevel@tonic-gate bl,pt %ncc, 1b 14570Sstevel@tonic-gate add %o0, 8, %o0 14580Sstevel@tonic-gate 14590Sstevel@tonic-gate.bzero_blk: 14600Sstevel@tonic-gate and %o1, 0x3f, %o3 ! calc bytes left after blk clear 14610Sstevel@tonic-gate andn %o1, 0x3f, %o4 ! calc size of blocks in bytes 14620Sstevel@tonic-gate 14630Sstevel@tonic-gate cmp %o4, 0x100 ! 256 bytes or more 14640Sstevel@tonic-gate blu,pn %ncc, 3f 14650Sstevel@tonic-gate nop 14660Sstevel@tonic-gate 14670Sstevel@tonic-gate2: 14680Sstevel@tonic-gate stxa %g0, [%o0+0x0]%asi 14690Sstevel@tonic-gate stxa %g0, [%o0+0x40]%asi 14700Sstevel@tonic-gate stxa %g0, [%o0+0x80]%asi 14710Sstevel@tonic-gate stxa %g0, [%o0+0xc0]%asi 14720Sstevel@tonic-gate 14730Sstevel@tonic-gate stxa %g0, [%o0+0x8]%asi 14740Sstevel@tonic-gate stxa %g0, [%o0+0x10]%asi 14750Sstevel@tonic-gate stxa %g0, [%o0+0x18]%asi 14760Sstevel@tonic-gate stxa %g0, [%o0+0x20]%asi 14770Sstevel@tonic-gate stxa %g0, [%o0+0x28]%asi 14780Sstevel@tonic-gate stxa %g0, [%o0+0x30]%asi 14790Sstevel@tonic-gate stxa %g0, [%o0+0x38]%asi 14800Sstevel@tonic-gate 14810Sstevel@tonic-gate stxa %g0, [%o0+0x48]%asi 14820Sstevel@tonic-gate stxa %g0, [%o0+0x50]%asi 14830Sstevel@tonic-gate stxa %g0, [%o0+0x58]%asi 14840Sstevel@tonic-gate stxa %g0, [%o0+0x60]%asi 14850Sstevel@tonic-gate stxa %g0, [%o0+0x68]%asi 14860Sstevel@tonic-gate stxa %g0, [%o0+0x70]%asi 14870Sstevel@tonic-gate stxa %g0, [%o0+0x78]%asi 14880Sstevel@tonic-gate 14890Sstevel@tonic-gate stxa %g0, [%o0+0x88]%asi 14900Sstevel@tonic-gate stxa %g0, [%o0+0x90]%asi 14910Sstevel@tonic-gate stxa %g0, [%o0+0x98]%asi 14920Sstevel@tonic-gate stxa %g0, [%o0+0xa0]%asi 14930Sstevel@tonic-gate stxa %g0, [%o0+0xa8]%asi 14940Sstevel@tonic-gate stxa %g0, [%o0+0xb0]%asi 14950Sstevel@tonic-gate stxa %g0, [%o0+0xb8]%asi 14960Sstevel@tonic-gate 14970Sstevel@tonic-gate stxa %g0, [%o0+0xc8]%asi 14980Sstevel@tonic-gate stxa %g0, [%o0+0xd0]%asi 14990Sstevel@tonic-gate stxa %g0, [%o0+0xd8]%asi 15000Sstevel@tonic-gate stxa %g0, [%o0+0xe0]%asi 15010Sstevel@tonic-gate stxa %g0, [%o0+0xe8]%asi 15020Sstevel@tonic-gate stxa %g0, [%o0+0xf0]%asi 15030Sstevel@tonic-gate stxa %g0, [%o0+0xf8]%asi 15040Sstevel@tonic-gate 15050Sstevel@tonic-gate sub %o4, 0x100, %o4 15060Sstevel@tonic-gate cmp %o4, 0x100 15070Sstevel@tonic-gate bgu,pt %ncc, 2b 15080Sstevel@tonic-gate add %o0, 0x100, %o0 15090Sstevel@tonic-gate 15100Sstevel@tonic-gate3: 15110Sstevel@tonic-gate ! ... check if 64 bytes to set 15120Sstevel@tonic-gate cmp %o4, 0x40 15130Sstevel@tonic-gate blu %ncc, .bzero_blk_done 15140Sstevel@tonic-gate nop 15150Sstevel@tonic-gate 15160Sstevel@tonic-gate4: 15170Sstevel@tonic-gate stxa %g0, [%o0+0x0]%asi 15180Sstevel@tonic-gate stxa %g0, [%o0+0x8]%asi 15190Sstevel@tonic-gate stxa %g0, [%o0+0x10]%asi 15200Sstevel@tonic-gate stxa %g0, [%o0+0x18]%asi 15210Sstevel@tonic-gate stxa %g0, [%o0+0x20]%asi 15220Sstevel@tonic-gate stxa %g0, [%o0+0x28]%asi 15230Sstevel@tonic-gate stxa %g0, [%o0+0x30]%asi 15240Sstevel@tonic-gate stxa %g0, [%o0+0x38]%asi 15250Sstevel@tonic-gate 15260Sstevel@tonic-gate subcc %o4, 0x40, %o4 15270Sstevel@tonic-gate bgu,pt %ncc, 3b 15280Sstevel@tonic-gate add %o0, 0x40, %o0 15290Sstevel@tonic-gate 15300Sstevel@tonic-gate.bzero_blk_done: 15310Sstevel@tonic-gate membar #Sync 15320Sstevel@tonic-gate 15330Sstevel@tonic-gate.bzero_small: 15340Sstevel@tonic-gate ! Set the remaining doubles 15350Sstevel@tonic-gate subcc %o3, 8, %o3 ! Can we store any doubles? 15360Sstevel@tonic-gate blu,pn %ncc, .byteclr 15370Sstevel@tonic-gate and %o1, 7, %o1 ! calc bytes left after doubles 15380Sstevel@tonic-gate 15390Sstevel@tonic-gate.dbclr: 15400Sstevel@tonic-gate stxa %g0, [%o0]%asi ! Clear the doubles 15410Sstevel@tonic-gate subcc %o3, 8, %o3 15420Sstevel@tonic-gate bgeu,pt %ncc, .dbclr 15430Sstevel@tonic-gate add %o0, 8, %o0 15440Sstevel@tonic-gate 15450Sstevel@tonic-gate ba .byteclr 15460Sstevel@tonic-gate nop 15470Sstevel@tonic-gate 15480Sstevel@tonic-gate.wdalign: 15490Sstevel@tonic-gate andcc %o0, 3, %o3 ! is add aligned on a word boundary 15500Sstevel@tonic-gate bz,pn %ncc, .wdclr 15510Sstevel@tonic-gate andn %o1, 3, %o3 ! create word sized count in %o3 15520Sstevel@tonic-gate 15530Sstevel@tonic-gate dec %o1 ! decrement count 15540Sstevel@tonic-gate stba %g0, [%o0]%asi ! clear a byte 15550Sstevel@tonic-gate ba .wdalign 15560Sstevel@tonic-gate inc %o0 ! next byte 15570Sstevel@tonic-gate 15580Sstevel@tonic-gate.wdclr: 15590Sstevel@tonic-gate sta %g0, [%o0]%asi ! 4-byte clearing loop 15600Sstevel@tonic-gate subcc %o3, 4, %o3 15610Sstevel@tonic-gate bnz,pt %ncc, .wdclr 15620Sstevel@tonic-gate inc 4, %o0 15630Sstevel@tonic-gate 15640Sstevel@tonic-gate and %o1, 3, %o1 ! leftover count, if any 15650Sstevel@tonic-gate 15660Sstevel@tonic-gate.byteclr: 15670Sstevel@tonic-gate ! Set the leftover bytes 15680Sstevel@tonic-gate brz %o1, .bzero_exit 15690Sstevel@tonic-gate nop 15700Sstevel@tonic-gate 15710Sstevel@tonic-gate7: 15720Sstevel@tonic-gate deccc %o1 ! byte clearing loop 15730Sstevel@tonic-gate stba %g0, [%o0]%asi 15740Sstevel@tonic-gate bgu,pt %ncc, 7b 15750Sstevel@tonic-gate inc %o0 15760Sstevel@tonic-gate 15770Sstevel@tonic-gate.bzero_exit: 15780Sstevel@tonic-gate ! 15790Sstevel@tonic-gate ! We're just concerned with whether t_lofault was set 15800Sstevel@tonic-gate ! when we came in. We end up here from either kzero() 15810Sstevel@tonic-gate ! or bzero(). kzero() *always* sets a lofault handler. 15820Sstevel@tonic-gate ! It ors LOFAULT_SET into %o5 to indicate it has done 15830Sstevel@tonic-gate ! this even if the value of %o5 is otherwise zero. 15840Sstevel@tonic-gate ! bzero() sets a lofault handler *only* if one was 15850Sstevel@tonic-gate ! previously set. Accordingly we need to examine 15860Sstevel@tonic-gate ! %o5 and if it is non-zero be sure to clear LOFAULT_SET 15870Sstevel@tonic-gate ! before resetting the error handler. 15880Sstevel@tonic-gate ! 15890Sstevel@tonic-gate tst %o5 15900Sstevel@tonic-gate bz %ncc, 1f 15910Sstevel@tonic-gate andn %o5, LOFAULT_SET, %o5 15920Sstevel@tonic-gate membar #Sync ! sync error barrier 15930Sstevel@tonic-gate stn %o5, [THREAD_REG + T_LOFAULT] ! restore old t_lofault 15940Sstevel@tonic-gate1: 15950Sstevel@tonic-gate retl 15960Sstevel@tonic-gate clr %o0 ! return (0) 15970Sstevel@tonic-gate 15980Sstevel@tonic-gate SET_SIZE(bzero) 15990Sstevel@tonic-gate#endif /* lint */ 1600