Skip to content

Instantly share code, notes, and snippets.

@colesbury
Created December 28, 2017 19:43
Show Gist options
  • Save colesbury/ee67d699dfc8eb5bb392c349af801c0d to your computer and use it in GitHub Desktop.
Save colesbury/ee67d699dfc8eb5bb392c349af801c0d to your computer and use it in GitHub Desktop.
--- CPUFloatType.cpp 2017-12-28 10:54:32.998921256 -0800
+++ ./torch/lib/build/aten/src/ATen/ATen/CPUFloatType.cpp 2017-12-28 11:30:40.333573856 -0800
@@ -4393,23 +4393,19 @@
grad_input_->maybeScalar(grad_output_->isScalar() && output_->isScalar());
return grad_input;
}
-Tensor CPUFloatType::elu_(Tensor & self, Scalar alpha, Scalar scale) const {
+Tensor & CPUFloatType::elu_(Tensor & self, Scalar alpha, Scalar scale) const {
auto self_ = checked_cast_tensor<CPUFloatTensor>(self.pImpl,"self",1, false);
auto alpha_ = alpha.toDouble();
auto scale_ = scale.toDouble();
- auto output_ = new CPUFloatTensor(context);
- auto output = Tensor(output_, false);
- THNN_FloatELU_updateOutput(context->thc_state, self_->tensor, output_->tensor, alpha_, scale_, true);
- return output;
+ THNN_FloatELU_updateOutput(context->thc_state, self_->tensor, self_->tensor, alpha_, scale_, true);
+ return self;
}
-Tensor CPUFloatType::elu_forward_(Tensor & self, Scalar alpha, Scalar scale) const {
+Tensor & CPUFloatType::elu_forward_(Tensor & self, Scalar alpha, Scalar scale) const {
auto self_ = checked_cast_tensor<CPUFloatTensor>(self.pImpl,"self",1, false);
auto alpha_ = alpha.toDouble();
auto scale_ = scale.toDouble();
- auto output_ = new CPUFloatTensor(context);
- auto output = Tensor(output_, false);
- THNN_FloatELU_updateOutput(context->thc_state, self_->tensor, output_->tensor, alpha_, scale_, true);
- return output;
+ THNN_FloatELU_updateOutput(context->thc_state, self_->tensor, self_->tensor, alpha_, scale_, true);
+ return self;
}
Tensor & CPUFloatType::glu_out(Tensor & output, const Tensor & self, int64_t dim) const {
auto self_ = checked_cast_tensor<CPUFloatTensor>(self.pImpl,"self",1, false);
@@ -4576,23 +4572,19 @@
grad_input_->maybeScalar(grad_output_->isScalar() && self_->isScalar());
return grad_input;
}
-Tensor CPUFloatType::hardtanh_(Tensor & self, Scalar min_val, Scalar max_val) const {
+Tensor & CPUFloatType::hardtanh_(Tensor & self, Scalar min_val, Scalar max_val) const {
auto self_ = checked_cast_tensor<CPUFloatTensor>(self.pImpl,"self",1, false);
auto min_val_ = min_val.toDouble();
auto max_val_ = max_val.toDouble();
- auto output_ = new CPUFloatTensor(context);
- auto output = Tensor(output_, false);
- THNN_FloatHardTanh_updateOutput(context->thc_state, self_->tensor, output_->tensor, min_val_, max_val_, true);
- return output;
+ THNN_FloatHardTanh_updateOutput(context->thc_state, self_->tensor, self_->tensor, min_val_, max_val_, true);
+ return self;
}
-Tensor CPUFloatType::hardtanh_forward_(Tensor & self, Scalar min_val, Scalar max_val) const {
+Tensor & CPUFloatType::hardtanh_forward_(Tensor & self, Scalar min_val, Scalar max_val) const {
auto self_ = checked_cast_tensor<CPUFloatTensor>(self.pImpl,"self",1, false);
auto min_val_ = min_val.toDouble();
auto max_val_ = max_val.toDouble();
- auto output_ = new CPUFloatTensor(context);
- auto output = Tensor(output_, false);
- THNN_FloatHardTanh_updateOutput(context->thc_state, self_->tensor, output_->tensor, min_val_, max_val_, true);
- return output;
+ THNN_FloatHardTanh_updateOutput(context->thc_state, self_->tensor, self_->tensor, min_val_, max_val_, true);
+ return self;
}
Tensor & CPUFloatType::leaky_relu_out(Tensor & output, const Tensor & self, Scalar negative_slope) const {
auto self_ = checked_cast_tensor<CPUFloatTensor>(self.pImpl,"self",1, false);
@@ -4647,21 +4639,17 @@
grad_input_->maybeScalar(grad_output_->isScalar() && self_->isScalar());
return grad_input;
}
-Tensor CPUFloatType::leaky_relu_(Tensor & self, Scalar negative_slope) const {
+Tensor & CPUFloatType::leaky_relu_(Tensor & self, Scalar negative_slope) const {
auto self_ = checked_cast_tensor<CPUFloatTensor>(self.pImpl,"self",1, false);
auto negative_slope_ = negative_slope.toDouble();
- auto output_ = new CPUFloatTensor(context);
- auto output = Tensor(output_, false);
- THNN_FloatLeakyReLU_updateOutput(context->thc_state, self_->tensor, output_->tensor, negative_slope_, true);
- return output;
+ THNN_FloatLeakyReLU_updateOutput(context->thc_state, self_->tensor, self_->tensor, negative_slope_, true);
+ return self;
}
-Tensor CPUFloatType::leaky_relu_forward_(Tensor & self, Scalar negative_slope) const {
+Tensor & CPUFloatType::leaky_relu_forward_(Tensor & self, Scalar negative_slope) const {
auto self_ = checked_cast_tensor<CPUFloatTensor>(self.pImpl,"self",1, false);
auto negative_slope_ = negative_slope.toDouble();
- auto output_ = new CPUFloatTensor(context);
- auto output = Tensor(output_, false);
- THNN_FloatLeakyReLU_updateOutput(context->thc_state, self_->tensor, output_->tensor, negative_slope_, true);
- return output;
+ THNN_FloatLeakyReLU_updateOutput(context->thc_state, self_->tensor, self_->tensor, negative_slope_, true);
+ return self;
}
Tensor & CPUFloatType::log_sigmoid_out(Tensor & output, const Tensor & self) const {
auto buffer_ = new CPUFloatTensor(context);
@@ -4843,100 +4831,6 @@
if (grad_weight_) grad_weight_->maybeScalar(maybe_scalar);
return std::tuple<Tensor, Tensor>(grad_input, grad_weight);
}
-Tensor & CPUFloatType::rrelu_out(Tensor & output, const Tensor & self, Scalar lower, Scalar upper, bool training, Generator * generator) const {
- auto noise_ = new CPUFloatTensor(context);
- auto noise = Tensor(noise_, false);
- auto self_ = checked_cast_tensor<CPUFloatTensor>(self.pImpl,"self",1, false);
- auto lower_ = lower.toDouble();
- auto upper_ = upper.toDouble();
- auto generator_ = check_generator<CPUGenerator>(generator, &context->defaultGenerator(backend()));
- auto output_ = checked_cast_tensor<CPUFloatTensor>(output.pImpl,"output",5, false);
- THNN_FloatRReLU_updateOutput(context->thc_state, self_->tensor, output_->tensor, noise_->tensor, lower_, upper_, training, false, generator_->generator);
- output_->maybeScalar(self_->isScalar());
- return output;
-}
-Tensor CPUFloatType::rrelu(const Tensor & self, Scalar lower, Scalar upper, bool training, Generator * generator) const {
- auto noise_ = new CPUFloatTensor(context);
- auto noise = Tensor(noise_, false);
- auto self_ = checked_cast_tensor<CPUFloatTensor>(self.pImpl,"self",1, false);
- auto lower_ = lower.toDouble();
- auto upper_ = upper.toDouble();
- auto generator_ = check_generator<CPUGenerator>(generator, &context->defaultGenerator(backend()));
- auto output_ = new CPUFloatTensor(context);
- auto output = Tensor(output_, false);
- THNN_FloatRReLU_updateOutput(context->thc_state, self_->tensor, output_->tensor, noise_->tensor, lower_, upper_, training, false, generator_->generator);
- output_->maybeScalar(self_->isScalar());
- return output;
-}
-Tensor & CPUFloatType::rrelu_forward_out(Tensor & output, const Tensor & self, Scalar lower, Scalar upper, bool training, Generator * generator, const Tensor & noise) const {
- auto self_ = checked_cast_tensor<CPUFloatTensor>(self.pImpl,"self",1, false);
- auto lower_ = lower.toDouble();
- auto upper_ = upper.toDouble();
- auto generator_ = check_generator<CPUGenerator>(generator, &context->defaultGenerator(backend()));
- auto noise_ = checked_cast_tensor<CPUFloatTensor>(noise.pImpl,"noise",6, false);
- auto output_ = checked_cast_tensor<CPUFloatTensor>(output.pImpl,"output",6, false);
- THNN_FloatRReLU_updateOutput(context->thc_state, self_->tensor, output_->tensor, noise_->tensor, lower_, upper_, training, false, generator_->generator);
- output_->maybeScalar(self_->isScalar() && noise_->isScalar());
- return output;
-}
-Tensor CPUFloatType::rrelu_forward(const Tensor & self, Scalar lower, Scalar upper, bool training, Generator * generator, const Tensor & noise) const {
- auto self_ = checked_cast_tensor<CPUFloatTensor>(self.pImpl,"self",1, false);
- auto lower_ = lower.toDouble();
- auto upper_ = upper.toDouble();
- auto generator_ = check_generator<CPUGenerator>(generator, &context->defaultGenerator(backend()));
- auto noise_ = checked_cast_tensor<CPUFloatTensor>(noise.pImpl,"noise",6, false);
- auto output_ = new CPUFloatTensor(context);
- auto output = Tensor(output_, false);
- THNN_FloatRReLU_updateOutput(context->thc_state, self_->tensor, output_->tensor, noise_->tensor, lower_, upper_, training, false, generator_->generator);
- output_->maybeScalar(self_->isScalar() && noise_->isScalar());
- return output;
-}
-Tensor & CPUFloatType::rrelu_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar lower, Scalar upper, bool training, const Tensor & noise) const {
- auto grad_output_ = checked_cast_tensor<CPUFloatTensor>(grad_output.pImpl,"grad_output",1, false);
- auto self_ = checked_cast_tensor<CPUFloatTensor>(self.pImpl,"self",2, false);
- auto lower_ = lower.toDouble();
- auto upper_ = upper.toDouble();
- auto noise_ = checked_cast_tensor<CPUFloatTensor>(noise.pImpl,"noise",6, false);
- auto grad_input_ = checked_cast_tensor<CPUFloatTensor>(grad_input.pImpl,"grad_input",6, false);
- THNN_FloatRReLU_updateGradInput(context->thc_state, self_->tensor, grad_output_->tensor, grad_input_->tensor, noise_->tensor, lower_, upper_, training, false);
- grad_input_->maybeScalar(grad_output_->isScalar() && self_->isScalar() && noise_->isScalar());
- return grad_input;
-}
-Tensor CPUFloatType::rrelu_backward(const Tensor & grad_output, const Tensor & self, Scalar lower, Scalar upper, bool training, const Tensor & noise) const {
- auto grad_output_ = checked_cast_tensor<CPUFloatTensor>(grad_output.pImpl,"grad_output",1, false);
- auto self_ = checked_cast_tensor<CPUFloatTensor>(self.pImpl,"self",2, false);
- auto lower_ = lower.toDouble();
- auto upper_ = upper.toDouble();
- auto noise_ = checked_cast_tensor<CPUFloatTensor>(noise.pImpl,"noise",6, false);
- auto grad_input_ = new CPUFloatTensor(context);
- auto grad_input = Tensor(grad_input_, false);
- THNN_FloatRReLU_updateGradInput(context->thc_state, self_->tensor, grad_output_->tensor, grad_input_->tensor, noise_->tensor, lower_, upper_, training, false);
- grad_input_->maybeScalar(grad_output_->isScalar() && self_->isScalar() && noise_->isScalar());
- return grad_input;
-}
-Tensor CPUFloatType::rrelu_(Tensor & self, Scalar lower, Scalar upper, bool training, Generator * generator) const {
- auto noise_ = new CPUFloatTensor(context);
- auto noise = Tensor(noise_, false);
- auto self_ = checked_cast_tensor<CPUFloatTensor>(self.pImpl,"self",1, false);
- auto lower_ = lower.toDouble();
- auto upper_ = upper.toDouble();
- auto generator_ = check_generator<CPUGenerator>(generator, &context->defaultGenerator(backend()));
- auto output_ = new CPUFloatTensor(context);
- auto output = Tensor(output_, false);
- THNN_FloatRReLU_updateOutput(context->thc_state, self_->tensor, output_->tensor, noise_->tensor, lower_, upper_, training, true, generator_->generator);
- return output;
-}
-Tensor CPUFloatType::rrelu_forward_(Tensor & self, Scalar lower, Scalar upper, bool training, Generator * generator, const Tensor & noise) const {
- auto self_ = checked_cast_tensor<CPUFloatTensor>(self.pImpl,"self",1, false);
- auto lower_ = lower.toDouble();
- auto upper_ = upper.toDouble();
- auto generator_ = check_generator<CPUGenerator>(generator, &context->defaultGenerator(backend()));
- auto noise_ = checked_cast_tensor<CPUFloatTensor>(noise.pImpl,"noise",6, false);
- auto output_ = new CPUFloatTensor(context);
- auto output = Tensor(output_, false);
- THNN_FloatRReLU_updateOutput(context->thc_state, self_->tensor, output_->tensor, noise_->tensor, lower_, upper_, training, true, generator_->generator);
- return output;
-}
Tensor & CPUFloatType::softmax_out(Tensor & output, const Tensor & self, int64_t dim) const {
auto self_ = checked_cast_tensor<CPUFloatTensor>(self.pImpl,"self",1, false);
dim = maybe_wrap_dim(dim, self_);
@@ -5106,6 +5000,93 @@
grad_input_->maybeScalar(grad_output_->isScalar() && self_->isScalar());
return grad_input;
}
+Tensor & CPUFloatType::rrelu_with_noise_out(Tensor & output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const {
+ auto self_ = checked_cast_tensor<CPUFloatTensor>(self.pImpl,"self",1, false);
+ auto noise_ = checked_cast_tensor<CPUFloatTensor>(noise.pImpl,"noise",2, false);
+ auto lower_ = lower.toDouble();
+ auto upper_ = upper.toDouble();
+ auto generator_ = check_generator<CPUGenerator>(generator, &context->defaultGenerator(backend()));
+ auto output_ = checked_cast_tensor<CPUFloatTensor>(output.pImpl,"output",6, false);
+ THNN_FloatRReLU_updateOutput(context->thc_state, self_->tensor, output_->tensor, noise_->tensor, lower_, upper_, training, false, generator_->generator);
+ output_->maybeScalar(self_->isScalar() && noise_->isScalar());
+ return output;
+}
+Tensor CPUFloatType::rrelu_with_noise(const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const {
+ auto self_ = checked_cast_tensor<CPUFloatTensor>(self.pImpl,"self",1, false);
+ auto noise_ = checked_cast_tensor<CPUFloatTensor>(noise.pImpl,"noise",2, false);
+ auto lower_ = lower.toDouble();
+ auto upper_ = upper.toDouble();
+ auto generator_ = check_generator<CPUGenerator>(generator, &context->defaultGenerator(backend()));
+ auto output_ = new CPUFloatTensor(context);
+ auto output = Tensor(output_, false);
+ THNN_FloatRReLU_updateOutput(context->thc_state, self_->tensor, output_->tensor, noise_->tensor, lower_, upper_, training, false, generator_->generator);
+ output_->maybeScalar(self_->isScalar() && noise_->isScalar());
+ return output;
+}
+Tensor & CPUFloatType::rrelu_with_noise_forward_out(Tensor & output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const {
+ auto self_ = checked_cast_tensor<CPUFloatTensor>(self.pImpl,"self",1, false);
+ auto noise_ = checked_cast_tensor<CPUFloatTensor>(noise.pImpl,"noise",2, false);
+ auto lower_ = lower.toDouble();
+ auto upper_ = upper.toDouble();
+ auto generator_ = check_generator<CPUGenerator>(generator, &context->defaultGenerator(backend()));
+ auto output_ = checked_cast_tensor<CPUFloatTensor>(output.pImpl,"output",6, false);
+ THNN_FloatRReLU_updateOutput(context->thc_state, self_->tensor, output_->tensor, noise_->tensor, lower_, upper_, training, false, generator_->generator);
+ output_->maybeScalar(self_->isScalar() && noise_->isScalar());
+ return output;
+}
+Tensor CPUFloatType::rrelu_with_noise_forward(const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const {
+ auto self_ = checked_cast_tensor<CPUFloatTensor>(self.pImpl,"self",1, false);
+ auto noise_ = checked_cast_tensor<CPUFloatTensor>(noise.pImpl,"noise",2, false);
+ auto lower_ = lower.toDouble();
+ auto upper_ = upper.toDouble();
+ auto generator_ = check_generator<CPUGenerator>(generator, &context->defaultGenerator(backend()));
+ auto output_ = new CPUFloatTensor(context);
+ auto output = Tensor(output_, false);
+ THNN_FloatRReLU_updateOutput(context->thc_state, self_->tensor, output_->tensor, noise_->tensor, lower_, upper_, training, false, generator_->generator);
+ output_->maybeScalar(self_->isScalar() && noise_->isScalar());
+ return output;
+}
+Tensor & CPUFloatType::rrelu_with_noise_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training) const {
+ auto grad_output_ = checked_cast_tensor<CPUFloatTensor>(grad_output.pImpl,"grad_output",1, false);
+ auto self_ = checked_cast_tensor<CPUFloatTensor>(self.pImpl,"self",2, false);
+ auto noise_ = checked_cast_tensor<CPUFloatTensor>(noise.pImpl,"noise",3, false);
+ auto lower_ = lower.toDouble();
+ auto upper_ = upper.toDouble();
+ auto grad_input_ = checked_cast_tensor<CPUFloatTensor>(grad_input.pImpl,"grad_input",6, false);
+ THNN_FloatRReLU_updateGradInput(context->thc_state, self_->tensor, grad_output_->tensor, grad_input_->tensor, noise_->tensor, lower_, upper_, training, false);
+ grad_input_->maybeScalar(grad_output_->isScalar() && self_->isScalar() && noise_->isScalar());
+ return grad_input;
+}
+Tensor CPUFloatType::rrelu_with_noise_backward(const Tensor & grad_output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training) const {
+ auto grad_output_ = checked_cast_tensor<CPUFloatTensor>(grad_output.pImpl,"grad_output",1, false);
+ auto self_ = checked_cast_tensor<CPUFloatTensor>(self.pImpl,"self",2, false);
+ auto noise_ = checked_cast_tensor<CPUFloatTensor>(noise.pImpl,"noise",3, false);
+ auto lower_ = lower.toDouble();
+ auto upper_ = upper.toDouble();
+ auto grad_input_ = new CPUFloatTensor(context);
+ auto grad_input = Tensor(grad_input_, false);
+ THNN_FloatRReLU_updateGradInput(context->thc_state, self_->tensor, grad_output_->tensor, grad_input_->tensor, noise_->tensor, lower_, upper_, training, false);
+ grad_input_->maybeScalar(grad_output_->isScalar() && self_->isScalar() && noise_->isScalar());
+ return grad_input;
+}
+Tensor & CPUFloatType::rrelu_with_noise_(Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const {
+ auto self_ = checked_cast_tensor<CPUFloatTensor>(self.pImpl,"self",1, false);
+ auto noise_ = checked_cast_tensor<CPUFloatTensor>(noise.pImpl,"noise",2, false);
+ auto lower_ = lower.toDouble();
+ auto upper_ = upper.toDouble();
+ auto generator_ = check_generator<CPUGenerator>(generator, &context->defaultGenerator(backend()));
+ THNN_FloatRReLU_updateOutput(context->thc_state, self_->tensor, self_->tensor, noise_->tensor, lower_, upper_, training, true, generator_->generator);
+ return self;
+}
+Tensor & CPUFloatType::rrelu_with_noise_forward_(Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const {
+ auto self_ = checked_cast_tensor<CPUFloatTensor>(self.pImpl,"self",1, false);
+ auto noise_ = checked_cast_tensor<CPUFloatTensor>(noise.pImpl,"noise",2, false);
+ auto lower_ = lower.toDouble();
+ auto upper_ = upper.toDouble();
+ auto generator_ = check_generator<CPUGenerator>(generator, &context->defaultGenerator(backend()));
+ THNN_FloatRReLU_updateOutput(context->thc_state, self_->tensor, self_->tensor, noise_->tensor, lower_, upper_, training, true, generator_->generator);
+ return self;
+}
Tensor & CPUFloatType::threshold_out(Tensor & output, const Tensor & self, Scalar threshold, Scalar value) const {
auto self_ = checked_cast_tensor<CPUFloatTensor>(self.pImpl,"self",1, false);
auto threshold_ = threshold.toDouble();
@@ -5165,23 +5146,19 @@
grad_input_->maybeScalar(grad_output_->isScalar() && self_->isScalar());
return grad_input;
}
-Tensor CPUFloatType::threshold_(Tensor & self, Scalar threshold, Scalar value) const {
+Tensor & CPUFloatType::threshold_(Tensor & self, Scalar threshold, Scalar value) const {
auto self_ = checked_cast_tensor<CPUFloatTensor>(self.pImpl,"self",1, false);
auto threshold_ = threshold.toDouble();
auto value_ = value.toDouble();
- auto output_ = new CPUFloatTensor(context);
- auto output = Tensor(output_, false);
- THNN_FloatThreshold_updateOutput(context->thc_state, self_->tensor, output_->tensor, threshold_, value_, true);
- return output;
+ THNN_FloatThreshold_updateOutput(context->thc_state, self_->tensor, self_->tensor, threshold_, value_, true);
+ return self;
}
-Tensor CPUFloatType::threshold_forward_(Tensor & self, Scalar threshold, Scalar value) const {
+Tensor & CPUFloatType::threshold_forward_(Tensor & self, Scalar threshold, Scalar value) const {
auto self_ = checked_cast_tensor<CPUFloatTensor>(self.pImpl,"self",1, false);
auto threshold_ = threshold.toDouble();
auto value_ = value.toDouble();
- auto output_ = new CPUFloatTensor(context);
- auto output = Tensor(output_, false);
- THNN_FloatThreshold_updateOutput(context->thc_state, self_->tensor, output_->tensor, threshold_, value_, true);
- return output;
+ THNN_FloatThreshold_updateOutput(context->thc_state, self_->tensor, self_->tensor, threshold_, value_, true);
+ return self;
}
Tensor & CPUFloatType::adaptive_avg_pool2d_out(Tensor & output, const Tensor & self, IntList output_size) const {
auto self_ = checked_cast_tensor<CPUFloatTensor>(self.pImpl,"self",1, false);
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment