Skip to content

Commit

Permalink
temporary updates to compute graph structure (#14)
Browse files Browse the repository at this point in the history
Not passing (or compiling).
  • Loading branch information
Dando18 committed Aug 9, 2019
1 parent 55b479a commit 959736d
Show file tree
Hide file tree
Showing 50 changed files with 370 additions and 914 deletions.
58 changes: 0 additions & 58 deletions include/compute/add/addop.h

This file was deleted.

72 changes: 0 additions & 72 deletions include/compute/add/geadd_internal.h

This file was deleted.

36 changes: 16 additions & 20 deletions include/compute/batchnorm/batchnormop.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
#pragma once

#include <vector>
#include "compute/compute_graph.h"
#include "compute/operation.h"
#include "math/batchnorm.h"
#include "tensor/tensor.h"
Expand All @@ -13,31 +14,27 @@
namespace magmadnn {
namespace op {

template <typename T>
class BatchNormOp : public Operation<T> {
class BatchNormOp : public Operation {
public:
BatchNormOp(Operation<T> *input, bool needs_grad = true);
BatchNormOp(Operation *input);

virtual ~BatchNormOp();

std::string to_string() { return "BatchNorm(" + input->to_string() + ")"; }
std::string to_string() const override { return "BatchNorm(" + input->to_string() + ")"; }

protected:
Tensor<T> *_eval(bool recompute);
Tensor<T> *_grad(Operation<T> *consumer, Operation<T> *var, Tensor<T> *grad);
Tensor &_eval(bool recompute) override;
Tensor &_grad(Operation *consumer, Operation *var, const Tensor &grad) override;

Operation<T> *input;
Tensor<T> *input_tensor;
Operation *input;

unsigned int num_calls;
Tensor<T> *bn_scale;
Tensor<T> *bn_scale_diff;
Tensor<T> *bn_bias;
Tensor<T> *bn_bias_diff;
Tensor<T> *running_mean;
Tensor<T> *running_variance;
Tensor<T> *saved_mean;
Tensor<T> *saved_variance;
Tensor bn_scale;
Tensor bn_scale_diff;
Tensor bn_bias;
Tensor bn_bias_diff;
Tensor running_mean;
Tensor running_variance;
Tensor saved_mean;
Tensor saved_variance;

#if defined(_HAS_CUDA_)
void init_settings();
Expand All @@ -48,8 +45,7 @@ class BatchNormOp : public Operation<T> {
bool copy;
};

template <typename T>
BatchNormOp<T> *batchnorm(Operation<T> *input, bool needs_grad = true);
inline Operation *batchnorm(Operation *input) { return default_graph.add_operation<BatchNormOp>(input); }

} // namespace op
} // namespace magmadnn
62 changes: 62 additions & 0 deletions include/compute/binaryop/binaryop.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
/**
* @file binaryop.h
* @author Daniel Nichols
* @version 0.1
* @date 2019-08-08
*
* @copyright Copyright (c) 2019
*/
#pragma once

#include "compute/compute_graph.h"
#include "compute/operation.h"

#include "magmadnn_device_types.h"

#include "math/binary_math_operations.h"
#include "math/launch_math_kernel.h"

namespace magmadnn {
namespace op {

template <typename BinaryOpType>
class BinaryOp : public Operation {
public:
BinaryOp(Operation *x, Operation *y) {
this->use_tensor_settings(x, true);

this->output_tensor_ = Tensor(this->output_shape_, this->dtype_, {NONE}, this->mem_type_);
}

std::string to_string() const override { return "BIN_OP(" + x->to_string() + ", " + y->to_string() + ")"; }

protected:
Tensor &_eval(bool recompute = true) override {
Tensor &x_tensor = x->eval(recompute);
Tensor &y_tensor = y->eval(recompute);

FOR_ALL_DEVICE_TYPES(getDeviceType(this->mem_type_), DEV_TYPE, {
::magmadnn::math::ParallelLauncher<DEV_TYPE, BinaryOpType>(x_tensor, y_tensor, this->output_tensor_);
})
}

Tensor &_grad(Operation *consumer, Operation *var, const Tensor &grad) override {}

Operation *x, *y;
};

#define MAKE_BINARY(name) \
inline Operation *name(Operation *a, Operation *b) { \
return default_graph.add_operation<BinaryOp<::magmadnn::math::name##_map>>(a, b); \
}

MAKE_BINARY(add)
MAKE_BINARY(sub)
MAKE_BINARY(product)
MAKE_BINARY(div)
MAKE_BINARY(pow)

#undef MAKE_BINARY

} // namespace op
} // namespace magmadnn
2 changes: 1 addition & 1 deletion include/compute/compute_graph.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ class Graph {
template <typename op_type, typename... Args>
inline Operation* Graph::add_operation(Args... args) {
// std::unique_ptr<Operation> tmp_ptr{new op_type(args)};
std::unique_ptr<Operation> tmp_ptr = ::magmadnn::internal::make_unique<op_type>(args);
std::unique_ptr<Operation> tmp_ptr = ::magmadnn::internal::make_unique<op_type>(args...);

/* use std::move to transfer ownership */
this->nodes.push_back(std::move(tmp_ptr));
Expand Down
13 changes: 0 additions & 13 deletions include/compute/conv2dforward/conv2dforward_internal.h

This file was deleted.

27 changes: 14 additions & 13 deletions include/compute/conv2dforward/conv2dforwardop.h
Original file line number Diff line number Diff line change
@@ -1,33 +1,32 @@

#pragma once

#include "compute/conv2dforward/conv2dforward_internal.h"
#include "compute/compute_graph.h"
#include "compute/operation.h"
#include "math/conv2d.h"
#include "tensor/tensor.h"

namespace magmadnn {
namespace op {

template <typename T>
class Conv2DForwardOp : public Operation<T> {
class Conv2DForwardOp : public Operation {
public:
Conv2DForwardOp(Operation<T> *input, Operation<T> *filter, int pad_h = 0, int pad_w = 0, int vertical_stride = 1,
Conv2DForwardOp(Operation *input, Operation *filter, int pad_h = 0, int pad_w = 0, int vertical_stride = 1,
int horizontal_stride = 1, int dilation_h = 1, int dilation_w = 1,
bool use_cross_correlation = true, bool needs_grad = true);
~Conv2DForwardOp();

std::string to_string() { return "Conv2DForward(" + input->to_string() + ")"; }
std::string to_string() const override { return "Conv2DForward(" + input->to_string() + ")"; }

protected:
Tensor<T> *_eval(bool recompute);
Tensor<T> &_grad(Operation<T> *consumer, Operation<T> *var, const Tensor<T> &grad);
Tensor &_eval(bool recompute) override;
Tensor &_grad(Operation *consumer, Operation *var, const Tensor &grad) override;

void init_settings();
void calculate_and_set_output_shape();

Operation<T> *input, *filter;
Tensor<T> *input_tensor, *filter_tensor;
Operation *input, *filter;
// Tensor *input_tensor, *filter_tensor;

int pad_h, pad_w, vertical_stride, horizontal_stride, dilation_h, dilation_w;
bool use_cross_correlation;
Expand All @@ -37,10 +36,12 @@ class Conv2DForwardOp : public Operation<T> {
#endif
};

template <typename T>
Conv2DForwardOp<T> *conv2dforward(Operation<T> *input, Operation<T> *filter, int pad_h = 0, int pad_w = 0,
int vertical_stride = 1, int horizontal_stride = 1, int dilation_h = 1,
int dilation_w = 1, bool use_cross_correlation = true, bool needs_grad = true);
inline Operation *conv2dforward(Operation *input, Operation *filter, int pad_h = 0, int pad_w = 0,
int vertical_stride = 1, int horizontal_stride = 1, int dilation_h = 1,
int dilation_w = 1, bool use_cross_correlation = true, bool needs_grad = true) {
return default_graph.add_operation<Conv2DForwardOp>(input, filter, pad_h, pad_w, vertical_stride, horizontal_stride,
dilation_h, dilation_w, use_cross_correlation, needs_grad);
}

} // namespace op
} // namespace magmadnn
29 changes: 0 additions & 29 deletions include/compute/crossentropy/crossentropy_internal.h

This file was deleted.

Loading

0 comments on commit 959736d

Please sign in to comment.