xref: /llvm-project/llvm/lib/Analysis/InteractiveModelRunner.cpp (revision 795910c2d9cc73625ef09fdf1238d27ec41ecbc3)
1 //===- InteractiveModelRunner.cpp - noop ML model runner   ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // A runner that communicates with an external agent via 2 file descriptors.
10 //===----------------------------------------------------------------------===//
11 #include "llvm/Analysis/InteractiveModelRunner.h"
12 #include "llvm/Analysis/MLModelRunner.h"
13 #include "llvm/Analysis/TensorSpec.h"
14 #include "llvm/Support/CommandLine.h"
15 #include "llvm/Support/ErrorHandling.h"
16 #include "llvm/Support/FileSystem.h"
17 #include "llvm/Support/raw_ostream.h"
18 
19 using namespace llvm;
20 
21 #define _IMR_CL_VALS(T, N) clEnumValN(TensorType::N, #T, #T),
22 
23 static cl::opt<TensorType> DebugReply(
24     "interactive-model-runner-echo-type", cl::init(TensorType::Invalid),
25     cl::Hidden,
26     cl::desc("The InteractiveModelRunner will echo back to stderr "
27              "the data received "
28              "from the host as the specified type (for debugging purposes)."),
29     cl::values(SUPPORTED_TENSOR_TYPES(_IMR_CL_VALS)
30                    clEnumValN(TensorType::Invalid, "disable", "Don't echo")));
31 
32 #undef _IMR_CL_VALS
33 
34 InteractiveModelRunner::InteractiveModelRunner(
35     LLVMContext &Ctx, const std::vector<TensorSpec> &Inputs,
36     const TensorSpec &Advice, StringRef OutboundName, StringRef InboundName)
37     : MLModelRunner(Ctx, MLModelRunner::Kind::Interactive, Inputs.size()),
38       InputSpecs(Inputs), OutputSpec(Advice),
39       InEC(sys::fs::openFileForRead(InboundName, Inbound)),
40       OutputBuffer(OutputSpec.getTotalTensorBufferSize()) {
41   if (InEC) {
42     Ctx.emitError("Cannot open inbound file: " + InEC.message());
43     return;
44   }
45   {
46     auto OutStream = std::make_unique<raw_fd_ostream>(OutboundName, OutEC);
47     if (OutEC) {
48       Ctx.emitError("Cannot open outbound file: " + OutEC.message());
49       return;
50     }
51     Log = std::make_unique<Logger>(std::move(OutStream), InputSpecs, Advice,
52                                    /*IncludeReward=*/false, Advice);
53   }
54   // Just like in the no inference case, this will allocate an appropriately
55   // sized buffer.
56   for (size_t I = 0; I < InputSpecs.size(); ++I)
57     setUpBufferForTensor(I, InputSpecs[I], nullptr);
58   Log->flush();
59 }
60 
61 InteractiveModelRunner::~InteractiveModelRunner() {
62   sys::fs::file_t FDAsOSHandle = sys::fs::convertFDToNativeFile(Inbound);
63   sys::fs::closeFile(FDAsOSHandle);
64 }
65 
66 void *InteractiveModelRunner::evaluateUntyped() {
67   Log->startObservation();
68   for (size_t I = 0; I < InputSpecs.size(); ++I)
69     Log->logTensorValue(I, reinterpret_cast<const char *>(getTensorUntyped(I)));
70   Log->endObservation();
71   Log->flush();
72 
73   size_t InsPoint = 0;
74   char *Buff = OutputBuffer.data();
75   const size_t Limit = OutputBuffer.size();
76   while (InsPoint < Limit) {
77     auto ReadOrErr = ::sys::fs::readNativeFile(
78         sys::fs::convertFDToNativeFile(Inbound),
79         {Buff + InsPoint, OutputBuffer.size() - InsPoint});
80     if (ReadOrErr.takeError()) {
81       Ctx.emitError("Failed reading from inbound file");
82       break;
83     }
84     InsPoint += *ReadOrErr;
85   }
86   if (DebugReply != TensorType::Invalid)
87     dbgs() << tensorValueToString(OutputBuffer.data(), OutputSpec);
88   return OutputBuffer.data();
89 }
90