Skip to content

Commit

Permalink
Provide full path for buck hipification (pytorch#30746)
Browse files Browse the repository at this point in the history
Summary:
Pull Request resolved: pytorch#30746

This diff should be safe as long as open source build succeeds and should have no impact to cuda.

Differential Revision: D18811302

fbshipit-source-id: a7adab993816cba51842701898fac5019438b664
  • Loading branch information
Serhat Yilmaz authored and facebook-github-bot committed Dec 5, 2019
1 parent f2a2fec commit c4e9748
Show file tree
Hide file tree
Showing 5 changed files with 10 additions and 10 deletions.
2 changes: 1 addition & 1 deletion aten/src/ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#pragma once

#include <ATen/hip/HIPConfig.h>
#include <aten/src/ATen/hip/HIPConfig.h>

// The includes of HIPGuard.h
#include <c10/hip/impl/HIPGuardImpl.h>
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/miopen/Utils.h
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#pragma once

#include <ATen/ATen.h>
#include <THH/THH.h>
#include <aten/src/THH/THH.h>
#include <ATen/miopen/miopen-wrapper.h>
#include <ATen/miopen/Handle.h>

Expand Down
4 changes: 2 additions & 2 deletions aten/src/ATen/native/SharedReduceOps.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,8 @@
#include <THC/THCDeviceUtils.cuh>
#include <ATen/native/cuda/DeviceSqrt.cuh>
#elif defined(__HIPCC__)
#include <THH/THHDeviceUtils.cuh>
#include <ATen/native/hip/DeviceSqrt.cuh>
#include <aten/src/THH/THHDeviceUtils.cuh>
#include <aten/src/ATen/native/hip/DeviceSqrt.cuh>
#endif
#if defined(__CUDACC__) || defined(__HIPCC__)
#include <thrust/tuple.h>
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/native/miopen/Conv_miopen.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ std::tuple<at::Tensor,at::Tensor,at::Tensor> miopen_depthwise_convolution_backwa

#else // AT_ROCM_ENABLED

#include <THH/THH.h>
#include <aten/src/THH/THH.h>

#include <ATen/miopen/miopen-wrapper.h>
#include <ATen/miopen/Descriptors.h>
Expand Down
10 changes: 5 additions & 5 deletions aten/src/ATen/native/miopen/RNN_miopen.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ namespace at { namespace native {
const Tensor& input, TensorList weight, int64_t weight_stride0, const Tensor& weight_buf, const Tensor& hx, const Tensor& cx,
const Tensor& output, const Tensor& grad_output_r, const Tensor& grad_hy_r,
const Tensor& grad_cy_r, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first,
double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const Tensor& dropout_state,
double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const Tensor& dropout_state,
const Tensor& reserve, std::array<bool, 4> output_mask
) {
AT_ERROR("miopen_rnn_backward: ATen not compiled with MIOpen support.");
Expand All @@ -37,7 +37,7 @@ namespace at { namespace native {

#else // AT_ROCM_ENABLED()

#include <THH/THH.h>
#include <aten/src/THH/THH.h>

#include <ATen/miopen/miopen-wrapper.h>
#include <ATen/miopen/Descriptors.h>
Expand Down Expand Up @@ -517,7 +517,7 @@ std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor> miopen_rnn(
w_desc.desc(), weight_buf.data_ptr(),
y_descs_arr.data(), y.data_ptr(),
descs.hy_desc.desc(), hy.data_ptr(),
descs.cy_desc.desc(), cy.defined() ? cy.data_ptr() : nullptr,
descs.cy_desc.desc(), cy.defined() ? cy.data_ptr() : nullptr,
workspace.data_ptr(), workspace_size, reserve.data_ptr(), reserver_size ));
} else { //Inference.
reserve = at::empty({0}, input.options().dtype(kByte));
Expand Down Expand Up @@ -751,7 +751,7 @@ std::tuple<Tensor, Tensor, Tensor, std::vector<Tensor>> miopen_rnn_backward(
const Tensor& input, TensorList weight, int64_t weight_stride0, const Tensor& weight_buf, const Tensor& hx, const Tensor& cx,
const Tensor& output, const Tensor& grad_output_r, const Tensor& grad_hy_r,
const Tensor& grad_cy_r, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first,
double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const Tensor& dropout_state,
double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const Tensor& dropout_state,
const Tensor& reserve, std::array<bool, 4> output_mask
) {
auto grad_output = grad_output_r.defined() ? grad_output_r : at::zeros_like(output, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Expand Down Expand Up @@ -894,4 +894,4 @@ REGISTER_CUDA_DISPATCH(lstm_packed_miopen_stub, &lstm_packed_miopen);
} // anonymous namepsace
}} //namespace native.

#endif
#endif

0 comments on commit c4e9748

Please sign in to comment.