/aosp_15_r20/external/pytorch/test/jit/ |
H A D | test_cuda.py | 138 s0 = torch.cuda.current_stream(device) 139 s1 = torch.cuda.current_stream(torch.device("cuda:1")) 140 s2 = torch.cuda.current_stream(torch.device("cuda:0")) 156 s0 = torch.cuda.current_stream(device_index) 157 s1 = torch.cuda.current_stream(1) 158 s2 = torch.cuda.current_stream(0) 197 s2 = torch.cuda.current_stream(torch.device("cuda:0")) 204 s3 = torch.cuda.current_stream(d) 245 current_stream = torch.cuda.current_stream(device) 253 torch.cuda.current_stream(device).id() == current_stream.id() [all …]
|
/aosp_15_r20/external/pytorch/test/ |
H A D | test_cpp_extensions_mtia_backend.py | 84 default_stream = torch.get_device_module(device).current_stream() 93 default_stream = torch.mtia.current_stream() 95 self.assertEqual(torch.mtia.current_stream(), default_stream) 101 self.assertEqual(torch.mtia.current_stream(), user_stream) 113 current_stream = torch.mtia.current_stream() 115 self.assertTrue(current_stream == mtia_stream_0, msg=msg) 118 current_stream = torch.mtia.current_stream() 120 self.assertTrue(current_stream == mtia_stream_1, msg=msg) 132 current_stream = torch.mtia.current_stream() 135 self.assertTrue(current_stream == mtia_stream_0, msg=msg) [all …]
|
H A D | test_cuda_multigpu.py | 506 s0 = torch.cuda.current_stream() 507 s1 = torch.cuda.current_stream(device=1) 508 s2 = torch.cuda.current_stream(device=0) 516 s0 = torch.cuda.current_stream() 517 s1 = torch.cuda.current_stream(1) 518 s2 = torch.cuda.current_stream(d0) 526 torch.cuda.current_stream(torch.device("cpu")) 551 self.assertEqual(torch.cuda.current_stream(), s0) 554 self.assertEqual(torch.cuda.current_stream(), s1) 568 s0 = torch.cuda.current_stream() [all …]
|
H A D | test_xpu.py | 136 s1 = torch.xpu.current_stream() 142 self.assertEqual(s0, torch.xpu.current_stream()) 143 self.assertEqual(s2, torch.xpu.current_stream()) 158 s = torch.xpu.current_stream() 166 stream = torch.xpu.current_stream() 182 self.assertNotEqual(stream.stream_id, torch.xpu.current_stream().stream_id) 192 self.assertEqual(stream.stream_id, torch.xpu.current_stream().stream_id) 478 event._as_parameter_.value, torch.xpu.current_stream().sycl_queue 488 event._as_parameter_.value, torch.xpu.current_stream().sycl_queue
|
H A D | test_cuda.py | 346 stream = torch.cuda.current_stream() 373 self.assertEqual(torch.cuda.current_stream().query(), True) 607 default_stream = torch.cuda.current_stream() 609 self.assertEqual(torch.cuda.current_stream(), default_stream) 614 self.assertEqual(torch.cuda.current_stream(), user_stream) 622 s = torch.cuda.current_stream() 630 stream = torch.cuda.current_stream() 651 self.assertNotEqual(stream.stream_id, torch.cuda.current_stream().stream_id) 661 self.assertEqual(stream.stream_id, torch.cuda.current_stream().stream_id) 686 torch.cuda.current_stream().wait_stream(stream) [all …]
|
/aosp_15_r20/external/libconfig/lib/ |
H A D | scanctx.c | 59 if(frame->current_stream) in libconfig_scanctx_cleanup() 60 fclose(frame->current_stream); in libconfig_scanctx_cleanup() 110 frame->current_stream = NULL; in libconfig_scanctx_push_include() 140 if(include_frame->current_stream) in libconfig_scanctx_next_include_file() 142 fclose(include_frame->current_stream); in libconfig_scanctx_next_include_file() 143 include_frame->current_stream = NULL; in libconfig_scanctx_next_include_file() 149 include_frame->current_stream = fopen(*(include_frame->current_file), "rt"); in libconfig_scanctx_next_include_file() 150 if(!include_frame->current_stream) in libconfig_scanctx_next_include_file() 153 return(include_frame->current_stream); in libconfig_scanctx_next_include_file() 170 if(frame->current_stream) in libconfig_scanctx_pop_include() [all …]
|
/aosp_15_r20/external/pytorch/c10/test/core/impl/ |
H A D | InlineStreamGuard_test.cpp | 33 ASSERT_EQ(g.current_stream(), stream(1, 2)); in TEST() 51 ASSERT_EQ(g.current_stream(), stream(0, 3)); in TEST() 69 ASSERT_EQ(g.current_stream(), stream(1, 3)); in TEST() 89 ASSERT_EQ(g.current_stream(), stream(2, 3)); in TEST() 113 ASSERT_EQ(g.current_stream(), std::make_optional(stream(1, 2))); in TEST() 124 ASSERT_EQ(g.current_stream(), std::make_optional(stream(1, 2))); in TEST() 150 ASSERT_EQ(g.current_stream(), std::make_optional(stream(1, 3))); in TEST() 168 ASSERT_EQ(g.current_stream(), std::make_optional(stream(2, 3))); in TEST()
|
/aosp_15_r20/external/pytorch/torch/distributed/fsdp/ |
H A D | _runtime_utils.py | 252 state._default_stream = state._device_handle.current_stream() 421 current_stream = state._device_handle.current_stream() 423 current_stream.wait_event(state._unshard_event) 426 current_stream.wait_stream(state._unshard_stream) 579 state._device_handle.current_stream(), 674 state._device_handle.current_stream().wait_stream(state._unshard_stream) 741 state._device_handle.current_stream() 1044 low_prec_grad_data, state._device_handle.current_stream() 1089 current_stream = state._device_handle.current_stream() 1093 current_stream.wait_stream(root_state._post_backward_stream) [all …]
|
/aosp_15_r20/external/pytorch/c10/core/ |
H A D | StreamGuard.h | 66 Stream current_stream() const { in current_stream() function 67 return guard_.current_stream(); in current_stream() 133 std::optional<Stream> current_stream() const { in current_stream() function 134 return guard_.current_stream(); in current_stream()
|
/aosp_15_r20/external/pytorch/test/dynamo/ |
H A D | test_ctx_manager.py | 169 current_stream = torch.cuda.current_stream() 170 s.wait_stream(current_stream) 173 current_stream.wait_stream(s) 198 current_stream = torch.cuda.current_stream() 199 s.wait_stream(current_stream) 204 current_stream.wait_stream(s) 225 current_stream = torch.cuda.current_stream() 226 s.wait_stream(current_stream) 231 current_stream.wait_stream(s) 232 with torch.cuda.stream(current_stream): [all …]
|
/aosp_15_r20/external/pytorch/torch/distributed/_symmetric_memory/ |
H A D | __init__.py | 160 backend_stream.wait_stream(torch.cuda.current_stream()) 173 torch.cuda.current_stream().wait_stream(backend_stream) 180 stream = torch.cuda.current_stream() 191 torch.cuda.current_stream().wait_stream(backend_stream) 216 backend_stream.wait_stream(torch.cuda.current_stream()) 234 stream = torch.cuda.current_stream() 240 other_stream = torch.cuda.current_stream() 253 torch.cuda.current_stream().wait_stream(backend_stream) 871 _get_backend_stream().wait_stream(torch.cuda.current_stream()) 909 _get_backend_stream().wait_stream(torch.cuda.current_stream()) [all …]
|
/aosp_15_r20/external/pytorch/torch/_dynamo/ |
H A D | device_interface.py | 95 def current_stream(): member in DeviceInterface 197 current_stream = staticmethod(torch.cuda.current_stream) variable in CudaInterface 268 current_stream = staticmethod(torch.xpu.current_stream) variable in XpuInterface
|
/aosp_15_r20/external/pytorch/c10/cuda/ |
H A D | CUDAGuard.h | 184 CUDAStream current_stream() const { in current_stream() function 185 return CUDAStream(CUDAStream::UNCHECKED, guard_.current_stream()); in current_stream() 254 std::optional<CUDAStream> current_stream() const { in current_stream() function 255 auto r = guard_.current_stream(); in current_stream()
|
/aosp_15_r20/external/pytorch/torch/distributed/ |
H A D | utils.py | 133 current_stream = device_mod.current_stream() 135 current_stream.wait_stream(stream) 139 output.data.record_stream(current_stream) # type: ignore[arg-type] 142 output.record_stream(current_stream) # type: ignore[arg-type]
|
/aosp_15_r20/external/pytorch/torch/distributed/_composable/fsdp/ |
H A D | _fsdp_state.py | 130 current_stream = torch.cuda.current_stream() 131 self._comm_ctx.all_gather_copy_in_stream.wait_stream(current_stream) 132 self._comm_ctx.all_gather_stream.wait_stream(current_stream) 294 torch.cuda.current_stream().wait_event(
|
H A D | _fsdp_param_group.py | 81 current_stream = torch.cuda.current_stream() 82 return current_stream, current_stream 384 torch.cuda.current_stream().wait_event( 413 torch.cuda.current_stream().wait_event(self._post_reduce_event)
|
H A D | _fsdp_collectives.py | 247 torch.cuda.current_stream().wait_event(all_gather_event) 321 current_stream = torch.cuda.current_stream() 324 reduce_scatter_stream.wait_stream(current_stream)
|
/aosp_15_r20/external/pytorch/c10/core/impl/ |
H A D | InlineStreamGuard.h | 103 Stream current_stream() const { in current_stream() function 184 std::optional<Stream> current_stream() const { in current_stream() function 185 return guard_.has_value() ? std::make_optional(guard_->current_stream()) in current_stream()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/hip/impl/ |
H A D | HIPGuardImplMasqueradingAsCUDA.h | 305 HIPStreamMasqueradingAsCUDA current_stream() const { in current_stream() function 306 …eturn HIPStreamMasqueradingAsCUDA(HIPStreamMasqueradingAsCUDA::UNCHECKED, guard_.current_stream()); in current_stream() 337 std::optional<HIPStreamMasqueradingAsCUDA> current_stream() const { in current_stream() function 338 auto r = guard_.current_stream(); in current_stream()
|
/aosp_15_r20/hardware/google/camera/devices/EmulatedCamera/hwl/ |
D | EmulatedLogicalRequestState.cpp | 338 auto& current_stream = current_pipeline.streams.at(output_buffer.stream_id); in UpdateRequestForDynamicStreams() local 339 if (current_stream.group_id == -1) continue; in UpdateRequestForDynamicStreams() 350 stream_ids_for_camera->second.find(current_stream.group_id); in UpdateRequestForDynamicStreams() 355 __FUNCTION__, current_stream.group_id, current_physical_camera_); in UpdateRequestForDynamicStreams()
|
/aosp_15_r20/external/pytorch/torch/mtia/ |
H A D | __init__.py | 132 def current_stream(device: Optional[_device_t] = None) -> Stream: function 250 self.src_prev_stream = torch.mtia.current_stream(None) 256 self.dst_prev_stream = torch.mtia.current_stream(cur_stream.device)
|
/aosp_15_r20/external/pytorch/test/distributed/_composable/fsdp/ |
H A D | test_fully_shard_overlap.py | 62 comm_stream.wait_stream(torch.cuda.current_stream()) 65 torch.cuda.current_stream().wait_stream(comm_stream) 177 post_optim_event = torch.cuda.current_stream().record_event()
|
/aosp_15_r20/external/libdrm/etnaviv/ |
H A D | etnaviv_cmd_stream.c | 153 if (bo->current_stream == stream) { in bo2idx() 164 bo->current_stream = stream; in bo2idx() 216 bo->current_stream = NULL; in flush()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/test/ |
H A D | cuda_stream_test.cpp | 371 ASSERT_EQ_CUDA(outerGuard.current_stream(), myFirstStream); in TEST() 376 ASSERT_EQ_CUDA(innerGuard.current_stream(), mySecondStream); in TEST() 380 ASSERT_EQ_CUDA(outerGuard.current_stream(), myFirstStream); in TEST() 384 ASSERT_EQ_CUDA(outerGuard.current_stream(), mySecondStream); in TEST()
|
/aosp_15_r20/external/pytorch/torch/xpu/ |
H A D | __init__.py | 299 self.src_prev_stream = torch.xpu.current_stream(None) 304 self.dst_prev_stream = torch.xpu.current_stream(cur_stream.device) 360 def current_stream(device: Optional[_device_t] = None) -> Stream: function
|