1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt -passes='lower-matrix-intrinsics' -S < %s | FileCheck %s 3 4define <9 x double> @strided_load_3x3(ptr %in, i64 %stride) { 5; CHECK-LABEL: @strided_load_3x3( 6; CHECK-NEXT: entry: 7; CHECK-NEXT: [[VEC_START:%.*]] = mul i64 0, [[STRIDE:%.*]] 8; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr double, ptr [[IN:%.*]], i64 [[VEC_START]] 9; CHECK-NEXT: [[COL_LOAD:%.*]] = load <3 x double>, ptr [[VEC_GEP]], align 8 10; CHECK-NEXT: [[VEC_START1:%.*]] = mul i64 1, [[STRIDE]] 11; CHECK-NEXT: [[VEC_GEP2:%.*]] = getelementptr double, ptr [[IN]], i64 [[VEC_START1]] 12; CHECK-NEXT: [[COL_LOAD4:%.*]] = load <3 x double>, ptr [[VEC_GEP2]], align 8 13; CHECK-NEXT: [[VEC_START5:%.*]] = mul i64 2, [[STRIDE]] 14; CHECK-NEXT: [[VEC_GEP6:%.*]] = getelementptr double, ptr [[IN]], i64 [[VEC_START5]] 15; CHECK-NEXT: [[COL_LOAD8:%.*]] = load <3 x double>, ptr [[VEC_GEP6]], align 8 16; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <3 x double> [[COL_LOAD]], <3 x double> [[COL_LOAD4]], <6 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5> 17; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <3 x double> [[COL_LOAD8]], <3 x double> poison, <6 x i32> <i32 0, i32 1, i32 2, i32 poison, i32 poison, i32 poison> 18; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <6 x double> [[TMP0]], <6 x double> [[TMP1]], <9 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8> 19; CHECK-NEXT: ret <9 x double> [[TMP2]] 20; 21entry: 22 %load = call <9 x double> @llvm.matrix.column.major.load.v9f64.i64(ptr %in, i64 %stride, i1 false, i32 3, i32 3) 23 ret <9 x double> %load 24} 25 26declare <9 x double> @llvm.matrix.column.major.load.v9f64.i64(ptr, i64, i1, i32, i32) 27 28define <9 x double> @strided_load_9x1(ptr %in, i64 %stride) { 29; CHECK-LABEL: @strided_load_9x1( 30; CHECK-NEXT: entry: 31; CHECK-NEXT: [[VEC_START:%.*]] = mul i64 0, [[STRIDE:%.*]] 32; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr double, ptr [[IN:%.*]], i64 [[VEC_START]] 33; CHECK-NEXT: [[COL_LOAD:%.*]] = load <9 x double>, ptr [[VEC_GEP]], align 8 34; CHECK-NEXT: ret <9 x double> [[COL_LOAD]] 35; 36entry: 37 %load = call <9 x double> @llvm.matrix.column.major.load.v9f64.i64(ptr %in, i64 %stride, i1 false, i32 9, i32 1) 38 ret <9 x double> %load 39} 40 41declare <8 x double> @llvm.matrix.column.major.load.v8f64.i64(ptr, i64, i1, i32, i32) 42 43define <8 x double> @strided_load_4x2(ptr %in, i64 %stride) { 44; CHECK-LABEL: @strided_load_4x2( 45; CHECK-NEXT: entry: 46; CHECK-NEXT: [[VEC_START:%.*]] = mul i64 0, [[STRIDE:%.*]] 47; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr double, ptr [[IN:%.*]], i64 [[VEC_START]] 48; CHECK-NEXT: [[COL_LOAD:%.*]] = load <4 x double>, ptr [[VEC_GEP]], align 8 49; CHECK-NEXT: [[VEC_START1:%.*]] = mul i64 1, [[STRIDE]] 50; CHECK-NEXT: [[VEC_GEP2:%.*]] = getelementptr double, ptr [[IN]], i64 [[VEC_START1]] 51; CHECK-NEXT: [[COL_LOAD4:%.*]] = load <4 x double>, ptr [[VEC_GEP2]], align 8 52; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <4 x double> [[COL_LOAD]], <4 x double> [[COL_LOAD4]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> 53; CHECK-NEXT: ret <8 x double> [[TMP0]] 54; 55entry: 56 %load = call <8 x double> @llvm.matrix.column.major.load.v8f64.i64(ptr %in, i64 %stride, i1 false, i32 4, i32 2) 57 ret <8 x double> %load 58} 59 60declare <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr, i32, i1, i32, i32) 61 62define <8 x double> @strided_load_4x2_stride_i32(ptr %in, i32 %stride) { 63; CHECK-LABEL: @strided_load_4x2_stride_i32( 64; CHECK-NEXT: entry: 65; CHECK-NEXT: [[VEC_START:%.*]] = mul i32 0, [[STRIDE:%.*]] 66; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr double, ptr [[IN:%.*]], i32 [[VEC_START]] 67; CHECK-NEXT: [[COL_LOAD:%.*]] = load <4 x double>, ptr [[VEC_GEP]], align 8 68; CHECK-NEXT: [[VEC_START1:%.*]] = mul i32 1, [[STRIDE]] 69; CHECK-NEXT: [[VEC_GEP2:%.*]] = getelementptr double, ptr [[IN]], i32 [[VEC_START1]] 70; CHECK-NEXT: [[COL_LOAD4:%.*]] = load <4 x double>, ptr [[VEC_GEP2]], align 8 71; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <4 x double> [[COL_LOAD]], <4 x double> [[COL_LOAD4]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> 72; CHECK-NEXT: ret <8 x double> [[TMP0]] 73; 74entry: 75 %load = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr %in, i32 %stride, i1 false, i32 4, i32 2) 76 ret <8 x double> %load 77} 78