/aosp_15_r20/external/pytorch/torch/csrc/distributed/c10d/ |
H A D | ProcessGroupWrapper.cpp | 504 c10::intrusive_ptr<Work> ProcessGroupWrapper::alltoall( in alltoall() function in c10d::ProcessGroupWrapper 510 return backend_->alltoall(outputTensors, inputTensors, opts); in alltoall()
|
H A D | ProcessGroupWrapper.hpp | 77 c10::intrusive_ptr<Work> alltoall(
|
H A D | FakeProcessGroup.hpp | 153 c10::intrusive_ptr<Work> alltoall( in alltoall() function in c10d::FakeProcessGroup
|
H A D | Backend.hpp | 250 virtual c10::intrusive_ptr<Work> alltoall( in alltoall() function in c10d::Backend
|
H A D | ProcessGroupMPI.hpp | 209 c10::intrusive_ptr<Work> alltoall(
|
H A D | ProcessGroupUCC.hpp | 242 c10::intrusive_ptr<Work> alltoall(
|
H A D | ProcessGroupGloo.cpp | 2641 void alltoall(at::Tensor& outputTensor, at::Tensor& inputTensor) { in alltoall() function in c10d::__anon2d5ad4bf1911::AsyncAlltoallWork 2649 gloo::alltoall(opts); in alltoall() 2671 alltoall(outputTensor, inputTensor); in run() 2711 alltoall(cpuOutput, cpuInput); in run()
|
H A D | Ops.cpp | 397 auto work = process_group->getBackend(c10::DeviceType::DEV) -> alltoall( \
|
H A D | Functional.cpp | 630 auto work = group->alltoall(outputs, inputs); in shard_dim_alltoall()
|
H A D | ProcessGroup.hpp | 443 virtual c10::intrusive_ptr<Work> alltoall( in alltoall() function in c10d::ProcessGroup
|
H A D | ProcessGroupNCCL.hpp | 620 c10::intrusive_ptr<Work> alltoall(
|
H A D | ProcessGroupMPI.cpp | 779 c10::intrusive_ptr<Work> ProcessGroupMPI::alltoall( in alltoall() function in c10d::ProcessGroupMPI
|
H A D | init.cpp | 2041 &::c10d::ProcessGroup::alltoall, in c10d_init() 2487 &::c10d::Backend::alltoall, in c10d_init()
|
H A D | ProcessGroupUCC.cpp | 1038 c10::intrusive_ptr<Work> ProcessGroupUCC::alltoall( in alltoall() function in c10d::ProcessGroupUCC
|
H A D | ProcessGroupNCCL.cpp | 4257 c10::intrusive_ptr<Work> ProcessGroupNCCL::alltoall( in checkForNCCLErrorsInternal() function in c10d::ProcessGroupNCCL
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/service/ |
H A D | layout_assignment_test.cc | 1454 const HloInstruction* alltoall = FindInstruction(m.get(), "alltoall"); in TEST_F() local 1455 ExpectTupleLayoutIs(alltoall->shape(), {{1, 0}, {1, 0}}); in TEST_F() 1456 ExpectLayoutIs(alltoall->operand(0)->shape(), {1, 0}); in TEST_F() 1457 ExpectLayoutIs(alltoall->operand(1)->shape(), {1, 0}); in TEST_F()
|
/aosp_15_r20/external/pytorch/torch/_C/ |
H A D | _distributed_c10d.pyi | 475 def alltoall( 482 def alltoall(
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/mlir_hlo/stablehlo/tests/ |
H A D | infer_stablehlo.mlir | 115 // CHECK-LABEL: func @alltoall 116 func.func @alltoall(%data: tensor<4x16xf32>) -> tensor<16x4xindex> {
|
H A D | ops_stablehlo.mlir | 212 // CHECK-LABEL: func @alltoall 213 func.func @alltoall(%data: tensor<4x16xf32>) -> tensor<16x4xf32> {
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/mlir_hlo/tests/Dialect/mhlo/ |
H A D | mhlo_infer_shape_type_methods.mlir | 115 // CHECK-LABEL: func @alltoall 116 func.func @alltoall(%data: tensor<4x16xf32>) -> tensor<16x4xindex> {
|
H A D | ops.mlir | 212 // CHECK-LABEL: func @alltoall 213 func.func @alltoall(%data: tensor<4x16xf32>) -> tensor<16x4xf32> {
|
/aosp_15_r20/external/pytorch/torch/testing/_internal/distributed/ |
H A D | multi_threaded_pg.py | 330 def alltoall(self, output_tensor_list, input_tensor_list, opts=AllToAllOptions()): member in ProcessLocalGroup
|
/aosp_15_r20/external/pytorch/torch/distributed/ |
H A D | distributed_c10d.py | 4114 work = group.alltoall(output_tensor_list, input_tensor_list, opts)
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/client/ |
H A D | xla_builder.cc | 3214 XlaOp alltoall = this->AllToAllTuple(slices, replica_groups, layout); in AllToAllTuple() local 3220 received.push_back(this->GetTupleElement(alltoall, i)); in AllToAllTuple()
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/g3doc/ |
H A D | operation_semantics.md | 176 XlaBuilder b("alltoall");
|