1 TEXT memcmp(SB), $0 2#define BDNZ BC 16,0, 3 MOVW R3, s1+0(FP) /* R3 is pointer1 */ 4 5/* 6 * performance: 7 * 67mb/sec aligned; 16mb/sec unaligned 8 */ 9 10 MOVW n+8(FP), R4 /* R4 is count */ 11 MOVW s2+4(FP), R5 /* R5 is pointer2 */ 12 13/* 14 * let LSW do the work for 4 characters or less; aligned and unaligned 15 */ 16 CMP R4, $0 17 BLE eq 18 CMP R4, $4 19 BLE out 20 21 XOR R3, R5, R9 22 ANDCC $3, R9 23 BNE l4 /* pointers misaligned; use LSW loop */ 24 25/* 26 * do enough bytes to align pointers 27 */ 28 ANDCC $3,R3, R9 29 BEQ l2 30 SUBC R9, $4, R9 31 MOVW R9, XER 32 LSW (R3), R10 33 ADD R9, R3 34 LSW (R5), R14 35 ADD R9, R5 36 SUB R9, R4 37 CMPU R10, R14 38 BNE ne 39 40/* 41 * compare 16 at a time 42 */ 43l2: 44 SRAWCC $4, R4, R9 45 BLE l4 46 MOVW R9, CTR 47 SUB $4, R3 48 SUB $4, R5 49l3: 50 MOVWU 4(R3), R10 51 MOVWU 4(R5), R12 52 MOVWU 4(R3), R11 53 MOVWU 4(R5), R13 54 CMPU R10, R12 55 BNE ne 56 MOVWU 4(R3), R10 57 MOVWU 4(R5), R12 58 CMPU R11, R13 59 BNE ne 60 MOVWU 4(R3), R11 61 MOVWU 4(R5), R13 62 CMPU R10, R12 63 BNE ne 64 CMPU R11, R13 65 BNE ne 66 BDNZ l3 67 ADD $4, R3 68 ADD $4, R5 69 RLWNMCC $0, R4, $15, R4 /* residue */ 70 BEQ eq 71 72/* 73 * do remaining words with LSW; also does unaligned case 74 */ 75l4: 76 SRAWCC $2, R4, R9 77 BLE out 78 MOVW R9, CTR 79l5: 80 LSW (R3), $4, R10 81 ADD $4, R3 82 LSW (R5), $4, R11 83 ADD $4, R5 84 CMPU R10, R11 85 BNE ne 86 BDNZ l5 87 RLWNMCC $0, R4, $3, R4 /* residue */ 88 BEQ eq 89 90/* 91 * do remaining bytes with final LSW 92 */ 93out: 94 MOVW R4, XER 95 LSW (R3), R10 96 LSW (R5), R11 97 CMPU R10, R11 98 BNE ne 99 100eq: 101 MOVW $0, R3 102 RETURN 103 104ne: 105 MOVW $1, R3 106 BGE ret 107 MOVW $-1,R3 108ret: 109 RETURN 110 END 111