xref: /aosp_15_r20/external/tensorflow/tensorflow/python/grappler/constant_folding_test.py (revision b6fb3261f9314811a0f4371741dbb8839866f948)
1# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""Tests for Grappler Constant Folding."""
16
17import numpy as np
18
19from tensorflow.python.eager import backprop
20from tensorflow.python.eager import context
21from tensorflow.python.eager import def_function
22from tensorflow.python.framework import dtypes
23from tensorflow.python.framework import ops
24from tensorflow.python.framework import test_util
25from tensorflow.python.ops import array_ops
26from tensorflow.python.ops import control_flow_ops
27from tensorflow.python.ops import functional_ops
28from tensorflow.python.ops import math_ops
29from tensorflow.python.ops import resource_variable_ops
30from tensorflow.python.platform import test
31
32
33class ConstantFoldingTest(test.TestCase):
34
35  # See b/76008022.
36  def testScanInsideWhile(self):
37
38    def loop_cond(idx_step, *unused_args):
39      return idx_step < 1
40
41    def loop_body(idx_step, y):
42      x = array_ops.zeros([10, 20, 30], dtype=dtypes.float32)
43      x = functional_ops.scan(
44          math_ops.add,
45          x,
46          initializer=array_ops.zeros([20, 30], dtype=dtypes.float32),
47          back_prop=False,
48          parallel_iterations=1)
49
50      with ops.device('/cpu:0'):
51        y = array_ops.identity(x)
52
53        return idx_step + 1, y
54
55    if test.is_gpu_available(cuda_only=True):
56      init_y = array_ops.zeros([10, 20, 30], dtype=dtypes.float32)
57      _, y = control_flow_ops.while_loop(
58          loop_cond,
59          loop_body,
60          loop_vars=[0, init_y],
61          back_prop=False,
62          parallel_iterations=1)
63
64      y_v = self.evaluate(y)
65      self.assertAllEqual(np.zeros([10, 20, 30]), y_v)
66
67  # See b/159753857.
68  def testGradientGraphOptimization(self):
69
70    @def_function.function
71    def f(x, y):
72      with backprop.GradientTape() as tape:
73        z = math_ops.mul(x, array_ops.zeros_like(x))
74        l = math_ops.add(z, y)
75        l = math_ops.reduce_sum(l)
76
77      gx, gy = tape.gradient(l, [x, y])
78      x.assign_add(gx)
79      y.assign_add(gy)
80      return x + y
81
82    # XLA completely optimizes away the variable reads and
83    # assignments, so skip the test.
84    if test_util.is_xla_enabled():
85      self.skipTest('Not relevant for XLA')
86    with context.eager_mode():
87      x = resource_variable_ops.ResourceVariable(
88          np.random.uniform(size=[2, 2]), dtype=dtypes.float32)
89      y = resource_variable_ops.ResourceVariable(
90          np.random.uniform(size=[2, 2]), dtype=dtypes.float32)
91      with context.collect_graphs(optimized=True) as graphs:
92        f(x, y).numpy()
93    self.assertLen(graphs, 1)
94    assign_count = 0
95    for node in graphs[0].node:
96      if node.op == 'AssignAddVariableOp':
97        self.assertEqual(node.input[0], 'y')
98        assign_count += 1
99
100    # Make sure that the only variable update that remains after
101    # grappler optimization is that of y.
102    self.assertEqual(assign_count, 1)
103    self.assertLen(graphs[0].node, 11)
104
105
106if __name__ == '__main__':
107  test.main()
108