1  /*
2   * Copyright (C) 2018 The Android Open Source Project
3   *
4   * Licensed under the Apache License, Version 2.0 (the "License");
5   * you may not use this file except in compliance with the License.
6   * You may obtain a copy of the License at
7   *
8   *      http://www.apache.org/licenses/LICENSE-2.0
9   *
10   * Unless required by applicable law or agreed to in writing, software
11   * distributed under the License is distributed on an "AS IS" BASIS,
12   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13   * See the License for the specific language governing permissions and
14   * limitations under the License.
15   */
16  
17  // This test only tests internal APIs, and has dependencies on internal header
18  // files, including NN API HIDL definitions.
19  // It is not part of CTS.
20  
21  #include <android-base/file.h>
22  #include <android/sharedmem.h>
23  #include <gtest/gtest.h>
24  
25  #include <fstream>
26  #include <string>
27  
28  #include "Manager.h"
29  #include "Memory.h"
30  #include "TestMemory.h"
31  #include "TestNeuralNetworksWrapper.h"
32  
33  using WrapperCompilation = ::android::nn::test_wrapper::Compilation;
34  using WrapperExecution = ::android::nn::test_wrapper::Execution;
35  using WrapperMemory = ::android::nn::test_wrapper::Memory;
36  using WrapperModel = ::android::nn::test_wrapper::Model;
37  using WrapperOperandType = ::android::nn::test_wrapper::OperandType;
38  using WrapperResult = ::android::nn::test_wrapper::Result;
39  using WrapperType = ::android::nn::test_wrapper::Type;
40  
41  namespace {
42  
43  // Tests to ensure that various kinds of memory leaks do not occur.
44  //
45  // The fixture checks that no anonymous shared memory regions are leaked by
46  // comparing the count of /dev/ashmem mappings in SetUp and TearDown. This could
47  // break if the test or framework starts lazily instantiating something that
48  // creates a mapping - at that point the way the test works needs to be
49  // reinvestigated. The filename /dev/ashmem is a documented part of the Android
50  // kernel interface (see
51  // https://source.android.com/devices/architecture/kernel/reqs-interfaces).
52  //
53  // (We can also get very unlucky and mask a memory leak by unrelated unmapping
54  // somewhere else. This seems unlikely enough to not deal with.)
55  class MemoryLeakTest : public ::testing::Test {
56     protected:
57      void SetUp() override;
58      void TearDown() override;
59  
60     private:
61      size_t GetAshmemMappingsCount();
62  
63      size_t mStartingMapCount = 0;
64      bool mIsCpuOnly;
65  };
66  
SetUp()67  void MemoryLeakTest::SetUp() {
68      mIsCpuOnly = android::nn::DeviceManager::get()->getUseCpuOnly();
69      mStartingMapCount = GetAshmemMappingsCount();
70  }
71  
TearDown()72  void MemoryLeakTest::TearDown() {
73      android::nn::DeviceManager::get()->setUseCpuOnly(mIsCpuOnly);
74      const size_t endingMapCount = GetAshmemMappingsCount();
75      ASSERT_EQ(mStartingMapCount, endingMapCount);
76  }
77  
GetAshmemMappingsCount()78  size_t MemoryLeakTest::GetAshmemMappingsCount() {
79      std::ifstream mappingsStream("/proc/self/maps");
80      if (!mappingsStream.good()) {
81          // errno is set by std::ifstream on Linux
82          ADD_FAILURE() << "Failed to open /proc/self/maps: " << std::strerror(errno);
83          return 0;
84      }
85      std::string line;
86      int mapCount = 0;
87      while (std::getline(mappingsStream, line)) {
88          if (line.find("/dev/ashmem") != std::string::npos) {
89              ++mapCount;
90          }
91      }
92      return mapCount;
93  }
94  
95  // As well as serving as a functional test for ASharedMemory, also
96  // serves as a regression test for http://b/69685100 "RunTimePoolInfo
97  // leaks shared memory regions".
98  //
99  // TODO: test non-zero offset.
TEST_F(MemoryLeakTest,TestASharedMemory)100  TEST_F(MemoryLeakTest, TestASharedMemory) {
101      // Layout where to place matrix2 and matrix3 in the memory we'll allocate.
102      // We have gaps to test that we don't assume contiguity.
103      constexpr uint32_t offsetForMatrix2 = 20;
104      constexpr uint32_t offsetForMatrix3 = offsetForMatrix2 + sizeof(matrix2) + 30;
105      constexpr uint32_t weightsSize = offsetForMatrix3 + sizeof(matrix3) + 60;
106  
107  #ifdef __ANDROID__
108      int weightsFd = ASharedMemory_create("weights", weightsSize);
109  #else   // __ANDROID__
110      TemporaryFile tmpWeightsFile;
111      int weightsFd = tmpWeightsFile.release();
112      CHECK_EQ(ftruncate(weightsFd, weightsSize), 0);
113  #endif  // __ANDROID__
114      ASSERT_GT(weightsFd, -1);
115      uint8_t* weightsData =
116              (uint8_t*)mmap(nullptr, weightsSize, PROT_READ | PROT_WRITE, MAP_SHARED, weightsFd, 0);
117      ASSERT_NE(weightsData, nullptr);
118      memcpy(weightsData + offsetForMatrix2, matrix2, sizeof(matrix2));
119      memcpy(weightsData + offsetForMatrix3, matrix3, sizeof(matrix3));
120      WrapperMemory weights(weightsSize, PROT_READ | PROT_WRITE, weightsFd, 0);
121      ASSERT_TRUE(weights.isValid());
122  
123      WrapperModel model;
124      WrapperOperandType matrixType(WrapperType::TENSOR_FLOAT32, {3, 4});
125      WrapperOperandType scalarType(WrapperType::INT32, {});
126      int32_t activation(0);
127      auto a = model.addOperand(&matrixType);
128      auto b = model.addOperand(&matrixType);
129      auto c = model.addOperand(&matrixType);
130      auto d = model.addOperand(&matrixType);
131      auto e = model.addOperand(&matrixType);
132      auto f = model.addOperand(&scalarType);
133  
134      model.setOperandValueFromMemory(e, &weights, offsetForMatrix2, sizeof(Matrix3x4));
135      model.setOperandValueFromMemory(a, &weights, offsetForMatrix3, sizeof(Matrix3x4));
136      model.setOperandValue(f, &activation, sizeof(activation));
137      model.addOperation(ANEURALNETWORKS_ADD, {a, c, f}, {b});
138      model.addOperation(ANEURALNETWORKS_ADD, {b, e, f}, {d});
139      model.identifyInputsAndOutputs({c}, {d});
140      ASSERT_TRUE(model.isValid());
141      model.finish();
142  
143      // Test the two node model.
144      constexpr uint32_t offsetForMatrix1 = 20;
145      constexpr size_t inputSize = offsetForMatrix1 + sizeof(Matrix3x4);
146  #ifdef __ANDROID__
147      int inputFd = ASharedMemory_create("input", inputSize);
148  #else   // __ANDROID__
149      TemporaryFile tmpInputFile;
150      int inputFd = tmpInputFile.release();
151      CHECK_EQ(ftruncate(inputFd, inputSize), 0);
152  #endif  // __ANDROID__
153      ASSERT_GT(inputFd, -1);
154      uint8_t* inputData =
155              (uint8_t*)mmap(nullptr, inputSize, PROT_READ | PROT_WRITE, MAP_SHARED, inputFd, 0);
156      ASSERT_NE(inputData, nullptr);
157      memcpy(inputData + offsetForMatrix1, matrix1, sizeof(Matrix3x4));
158      WrapperMemory input(inputSize, PROT_READ, inputFd, 0);
159      ASSERT_TRUE(input.isValid());
160  
161      constexpr uint32_t offsetForActual = 32;
162      constexpr size_t outputSize = offsetForActual + sizeof(Matrix3x4);
163  #ifdef __ANDROID__
164      int outputFd = ASharedMemory_create("output", outputSize);
165  #else   // __ANDROID__
166      TemporaryFile tmpOutputFile;
167      int outputFd = tmpOutputFile.release();
168      CHECK_EQ(ftruncate(outputFd, outputSize), 0);
169  #endif  // __ANDROID__
170      ASSERT_GT(outputFd, -1);
171      uint8_t* outputData =
172              (uint8_t*)mmap(nullptr, outputSize, PROT_READ | PROT_WRITE, MAP_SHARED, outputFd, 0);
173      ASSERT_NE(outputData, nullptr);
174      memset(outputData, 0, outputSize);
175      WrapperMemory actual(outputSize, PROT_READ | PROT_WRITE, outputFd, 0);
176      ASSERT_TRUE(actual.isValid());
177  
178      WrapperCompilation compilation2(&model);
179      ASSERT_EQ(compilation2.finish(), WrapperResult::NO_ERROR);
180  
181      WrapperExecution execution2(&compilation2);
182      ASSERT_EQ(execution2.setInputFromMemory(0, &input, offsetForMatrix1, sizeof(Matrix3x4)),
183                WrapperResult::NO_ERROR);
184      ASSERT_EQ(execution2.setOutputFromMemory(0, &actual, offsetForActual, sizeof(Matrix3x4)),
185                WrapperResult::NO_ERROR);
186      ASSERT_EQ(execution2.compute(), WrapperResult::NO_ERROR);
187      ASSERT_EQ(
188              CompareMatrices(expected3, *reinterpret_cast<Matrix3x4*>(outputData + offsetForActual)),
189              0);
190  
191      munmap(weightsData, weightsSize);
192      munmap(inputData, inputSize);
193      munmap(outputData, outputSize);
194      close(weightsFd);
195      close(inputFd);
196      close(outputFd);
197  }
198  
199  #ifndef NNTEST_ONLY_PUBLIC_API
200  // Regression test for http://b/73663843, conv_2d trying to allocate too much memory.
TEST_F(MemoryLeakTest,convTooLarge)201  TEST_F(MemoryLeakTest, convTooLarge) {
202      android::nn::DeviceManager::get()->setUseCpuOnly(true);
203      WrapperModel model;
204  
205      // This kernel/input size will make convQuant8 allocate 12 * 13 * 13 * 128 * 92 * 92, which is
206      // just outside of signed int range (0x82F56000) - this will fail due to CPU implementation
207      // limitations
208      WrapperOperandType type3(WrapperType::INT32, {});
209      WrapperOperandType type2(WrapperType::TENSOR_INT32, {128}, 0.25, 0);
210      WrapperOperandType type0(WrapperType::TENSOR_QUANT8_ASYMM, {12, 104, 104, 128}, 0.5, 0);
211      WrapperOperandType type4(WrapperType::TENSOR_QUANT8_ASYMM, {12, 92, 92, 128}, 1.0, 0);
212      WrapperOperandType type1(WrapperType::TENSOR_QUANT8_ASYMM, {128, 13, 13, 128}, 0.5, 0);
213  
214      // Operands
215      auto op1 = model.addOperand(&type0);
216      auto op2 = model.addOperand(&type1);
217      auto op3 = model.addOperand(&type2);
218      auto pad0 = model.addOperand(&type3);
219      auto act = model.addOperand(&type3);
220      auto stride = model.addOperand(&type3);
221      auto op4 = model.addOperand(&type4);
222  
223      // Operations
224      uint8_t op2_init[128 * 13 * 13 * 128] = {};
225      model.setOperandValue(op2, op2_init, sizeof(op2_init));
226      int32_t op3_init[128] = {};
227      model.setOperandValue(op3, op3_init, sizeof(op3_init));
228      int32_t pad0_init[] = {0};
229      model.setOperandValue(pad0, pad0_init, sizeof(pad0_init));
230      int32_t act_init[] = {0};
231      model.setOperandValue(act, act_init, sizeof(act_init));
232      int32_t stride_init[] = {1};
233      model.setOperandValue(stride, stride_init, sizeof(stride_init));
234      model.addOperation(ANEURALNETWORKS_CONV_2D,
235                         {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, act}, {op4});
236  
237      // Inputs and outputs
238      model.identifyInputsAndOutputs({op1}, {op4});
239      ASSERT_TRUE(model.isValid());
240      model.finish();
241  
242      // Compilation
243      WrapperCompilation compilation(&model);
244      ASSERT_EQ(WrapperResult::NO_ERROR, compilation.finish());
245      WrapperExecution execution(&compilation);
246  
247      // Set input and outputs
248      static uint8_t input[12 * 104 * 104 * 128] = {};
249      ASSERT_EQ(WrapperResult::NO_ERROR, execution.setInput(0, input, sizeof(input)));
250      static uint8_t output[12 * 92 * 92 * 128] = {};
251      ASSERT_EQ(WrapperResult::NO_ERROR, execution.setOutput(0, output, sizeof(output)));
252  
253      // This shouldn't segfault
254      WrapperResult r = execution.compute();
255  
256      ASSERT_EQ(WrapperResult::OP_FAILED, r);
257  }
258  #endif  // NNTEST_ONLY_PUBLIC_API
259  
260  }  // end namespace
261