1// RUN: mlir-opt -pass-pipeline="builtin.module(finalize-memref-to-llvm{use-generic-functions=1})" -split-input-file %s \ 2// RUN: | FileCheck %s --check-prefix="CHECK-NOTALIGNED" 3 4// RUN: mlir-opt -pass-pipeline="builtin.module(finalize-memref-to-llvm{use-generic-functions=1 use-aligned-alloc=1})" -split-input-file %s \ 5// RUN: | FileCheck %s --check-prefix="CHECK-ALIGNED" 6 7// CHECK-LABEL: func @alloc() 8func.func @zero_d_alloc() -> memref<f32> { 9// CHECK-NOTALIGNED: llvm.call @_mlir_memref_to_llvm_alloc(%{{.*}}) : (i64) -> !llvm.ptr 10// CHECK-ALIGNED: llvm.call @_mlir_memref_to_llvm_aligned_alloc(%{{.*}}, %{{.*}}) : (i64, i64) -> !llvm.ptr 11 %0 = memref.alloc() : memref<f32> 12 return %0 : memref<f32> 13} 14 15// ----- 16 17// CHECK-LABEL: func @dealloc() 18func.func @dealloc(%arg0: memref<f32>) { 19// CHECK-NOTALIGNED: llvm.call @_mlir_memref_to_llvm_free(%{{.*}}) : (!llvm.ptr) -> () 20// CHECK-ALIGNED: llvm.call @_mlir_memref_to_llvm_free(%{{.*}}) : (!llvm.ptr) -> () 21 memref.dealloc %arg0 : memref<f32> 22 return 23} 24