1 // RUN: %clang_cc1 -triple aarch64 -target-feature +v8a -verify -S %s -o - 2 // REQUIRES: aarch64-registered-target 3 4 #include <arm_neon.h> 5 6 __attribute__((target("+crypto"))) 7 void test_crypto(uint8x16_t data, uint8x16_t key) 8 { 9 vaeseq_u8(data, key); 10 vsha1su1q_u32(data, key); 11 } 12 13 __attribute__((target("crypto"))) 14 void test_pluscrypto(uint8x16_t data, uint8x16_t key) 15 { 16 vaeseq_u8(data, key); 17 vsha1su1q_u32(data, key); 18 } 19 20 __attribute__((target("arch=armv8.2-a+crypto"))) 21 void test_archcrypto(uint8x16_t data, uint8x16_t key) 22 { 23 vaeseq_u8(data, key); 24 vsha1su1q_u32(data, key); 25 } 26 27 // FIXME: This shouldn't need +crypto to be consistent with -mcpu options. 28 __attribute__((target("cpu=cortex-a55+crypto"))) 29 void test_a55crypto(uint8x16_t data, uint8x16_t key) 30 { 31 vaeseq_u8(data, key); 32 vsha1su1q_u32(data, key); 33 } 34 35 __attribute__((target("cpu=cortex-a510+crypto"))) 36 void test_a510crypto(uint8x16_t data, uint8x16_t key) 37 { 38 vaeseq_u8(data, key); 39 vsha1su1q_u32(data, key); 40 } 41 42 __attribute__((target("+sha2+aes"))) 43 void test_sha2aes(uint8x16_t data, uint8x16_t key) 44 { 45 vaeseq_u8(data, key); 46 vsha1su1q_u32(data, key); 47 } 48 49 void test_errors(uint8x16_t data, uint8x16_t key) 50 { 51 vaeseq_u8(data, key); // expected-error {{always_inline function 'vaeseq_u8' requires target feature 'aes'}} 52 vsha1su1q_u32(data, key); // expected-error {{always_inline function 'vsha1su1q_u32' requires target feature 'sha2'}} 53 } 54