Skip to content

Commit

Permalink
feat/serialization: add deserialization
Browse files Browse the repository at this point in the history
CLOSES #14
  • Loading branch information
hobofan committed Apr 12, 2016
1 parent cb1a1b4 commit 31812f8
Show file tree
Hide file tree
Showing 10 changed files with 317 additions and 12 deletions.
33 changes: 24 additions & 9 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -28,16 +28,31 @@ script:
travis-cargo --only stable doc -- --no-default-features --features $FEATURES
addons:
apt:
sources:
- ubuntu-toolchain-r-test
packages:
- libcurl4-openssl-dev
- libelf-dev
- libdw-dev
- libblas-dev
- fglrx
- opencl-headers
- binutils-dev
- nvidia-opencl-dev

- libcurl4-openssl-dev
- libelf-dev
- libdw-dev
- libblas-dev
- fglrx
- opencl-headers
- binutils-dev
- nvidia-opencl-dev
- gcc-4.8
- g++-4.8
install:
- git clone https://github.com/kentonv/capnproto.git
- cd capnproto
- git checkout tags/v0.5.3
- cd c++
- ./setup-autotools.sh
- autoreconf -i
- ./configure --disable-shared
- make -j5
- export PATH="$PATH:$(pwd)"
- export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$(pwd)"
- cd ../..
after_success:
- travis-cargo doc-upload
- travis-cargo coveralls --no-sudo --verify
Expand Down
8 changes: 8 additions & 0 deletions src/capnp_util.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,3 +7,11 @@ pub trait CapnpWrite<'a> {
/// Write the struct into the message that is being built by the Builder.
fn write_capnp(&self, builder: &mut Self::Builder);
}

pub trait CapnpRead<'a> {
/// The Reader that was autogenerated by capnp.
type Reader;

/// Read the struct from the Reader.
fn read_capnp(reader: Self::Reader) -> Self;
}
131 changes: 128 additions & 3 deletions src/layer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ use std::fmt;
use std::cmp;
use std::collections::{HashMap, HashSet};
use std::fs::File;
use std::io;
use std::io::{self, BufReader};
use std::path::Path;
use std::rc::Rc;
use std::sync::{Arc, RwLock};
Expand Down Expand Up @@ -455,7 +455,7 @@ impl<B: IBackend> Layer<B> {
// reshape input tensor to the reshaped shape
let old_shape = self.input_blobs_data[input_i].read().unwrap().desc().clone();
if old_shape.size() != reshaped_shape.size() {
panic!("The provided input does not have the expected shape");
panic!("The provided input does not have the expected shape of {:?}", reshaped_shape);
}
self.input_blobs_data[input_i].write().unwrap().reshape(&reshaped_shape).unwrap();
}
Expand Down Expand Up @@ -597,6 +597,58 @@ impl<B: IBackend> Layer<B> {
Ok(())
}

/// Read a Cap'n Proto file at the specified path and deserialize the Layer inside it.
///
/// You can find the capnp schema [here](../../../../capnp/leaf.capnp).
pub fn load<LB: IBackend + LayerOps<f32> + 'static, P: AsRef<Path>>(backend: Rc<LB>, path: P) -> io::Result<Layer<LB>> {
let path = path.as_ref();
let ref mut file = try!(File::open(path));
let mut reader = BufReader::new(file);

let message_reader = ::capnp::serialize_packed::read_message(&mut reader,
::capnp::message::ReaderOptions::new()).unwrap();
let read_layer = message_reader.get_root::<capnp_layer::Reader>().unwrap();

let name = read_layer.get_name().unwrap().to_owned();
let layer_config = LayerConfig::read_capnp(read_layer.get_config().unwrap());
let mut layer = Layer::from_config(backend, &layer_config);
layer.name = name;

let read_weights = read_layer.get_weights_data().unwrap();

let names = layer.learnable_weights_names();
let weights_data = layer.learnable_weights_data();

let native_backend = Backend::<Native>::default().unwrap();
for (i, (name, weight)) in names.iter().zip(weights_data).enumerate() {
for j in 0..read_weights.len() {
let capnp_weight = read_weights.get(i as u32);
if capnp_weight.get_name().unwrap() != name {
continue
}

let mut weight_lock = weight.write().unwrap();
weight_lock.sync(native_backend.device()).unwrap();

let capnp_tensor = capnp_weight.get_tensor().unwrap();
let mut shape = Vec::new();
let capnp_shape = capnp_tensor.get_shape().unwrap();
for k in 0..capnp_shape.len() {
shape.push(capnp_shape.get(k) as usize)
}
weight_lock.reshape(&shape).unwrap();

let mut native_slice = weight_lock.get_mut(native_backend.device()).unwrap().as_mut_native().unwrap().as_mut_slice::<f32>();
let data = capnp_tensor.get_data().unwrap();
for k in 0..data.len() {
native_slice[k as usize] = data.get(k);
}
}
}

Ok(layer)
}

/// Sets whether the layer should compute gradients w.r.t. a
/// weight at a particular index given by `weight_id`.
///
Expand Down Expand Up @@ -672,6 +724,9 @@ impl<B: IBackend> Layer<B> {
}
}

#[allow(unsafe_code)]
unsafe impl<B: IBackend> Send for Layer<B> {}

impl<'a, B: IBackend> CapnpWrite<'a> for Layer<B> {
type Builder = capnp_layer::Builder<'a>;

Expand Down Expand Up @@ -1269,6 +1324,31 @@ impl<'a> CapnpWrite<'a> for LayerType {
}
}

impl<'a> CapnpRead<'a> for LayerType {
type Reader = capnp_layer_type::Reader<'a>;

fn read_capnp(reader: Self::Reader) -> Self {
match reader.which().unwrap() {
#[cfg(all(feature="cuda", not(feature="native")))]
capnp_layer_type::Which::Convolution(read_config) => { let config = ConvolutionConfig::read_capnp(read_config.unwrap()); LayerType::Convolution(config) },
#[cfg(not(all(feature="cuda", not(feature="native"))))]
capnp_layer_type::Which::Convolution(_) => { panic!("Can not load Network because Convolution layer is not supported with the used feature flags.") },
capnp_layer_type::Which::Linear(read_config) => { let config = LinearConfig::read_capnp(read_config.unwrap()); LayerType::Linear(config) },
capnp_layer_type::Which::LogSoftmax(read_config) => { LayerType::LogSoftmax },
#[cfg(all(feature="cuda", not(feature="native")))]
capnp_layer_type::Which::Pooling(read_config) => { let config = PoolingConfig::read_capnp(read_config.unwrap()); LayerType::Pooling(config) },
#[cfg(not(all(feature="cuda", not(feature="native"))))]
capnp_layer_type::Which::Pooling(_) => { panic!("Can not load Network because Pooling layer is not supported with the used feature flags.") },
capnp_layer_type::Which::Sequential(read_config) => { let config = SequentialConfig::read_capnp(read_config.unwrap()); LayerType::Sequential(config) },
capnp_layer_type::Which::Softmax(_) => { LayerType::Softmax },
capnp_layer_type::Which::Relu(_) => { LayerType::ReLU },
capnp_layer_type::Which::Sigmoid(_) => { LayerType::Sigmoid },
capnp_layer_type::Which::NegativeLogLikelihood(read_config) => { let config = NegativeLogLikelihoodConfig::read_capnp(read_config.unwrap()); LayerType::NegativeLogLikelihood(config) },
capnp_layer_type::Which::Reshape(read_config) => { let config = ReshapeConfig::read_capnp(read_config.unwrap()); LayerType::Reshape(config) },
}
}
}

impl LayerConfig {
/// Creates a new LayerConfig
pub fn new<L: Into<LayerType>>(name: &str, layer_type: L) -> LayerConfig {
Expand Down Expand Up @@ -1338,9 +1418,13 @@ impl LayerConfig {
Err("propagate_down config must be specified either 0 or inputs_len times")
}
}
}

impl<'a> CapnpWrite<'a> for LayerConfig {
type Builder = capnp_layer_config::Builder<'a>;

/// Write the LayerConfig into a capnp message.
pub fn write_capnp(&self, builder: &mut capnp_layer_config::Builder) {
fn write_capnp(&self, builder: &mut Self::Builder) {
builder.set_name(&self.name);
{
let mut layer_type = builder.borrow().init_layer_type();
Expand Down Expand Up @@ -1373,3 +1457,44 @@ impl LayerConfig {
}
}
}

impl<'a> CapnpRead<'a> for LayerConfig {
type Reader = capnp_layer_config::Reader<'a>;

fn read_capnp(reader: Self::Reader) -> Self {
let name = reader.get_name().unwrap().to_owned();
let layer_type = LayerType::read_capnp(reader.get_layer_type());

let read_outputs = reader.get_outputs().unwrap();
let mut outputs = Vec::new();
for i in 0..read_outputs.len() {
outputs.push(read_outputs.get(i).unwrap().to_owned())
}
let read_inputs = reader.get_inputs().unwrap();
let mut inputs = Vec::new();
for i in 0..read_inputs.len() {
inputs.push(read_inputs.get(i).unwrap().to_owned())
}

let read_params = reader.get_params().unwrap();
let mut params = Vec::new();
for i in 0..read_params.len() {
params.push(WeightConfig::read_capnp(read_params.get(i)))
}

let read_propagate_down = reader.get_propagate_down().unwrap();
let mut propagate_down = Vec::new();
for i in 0..read_propagate_down.len() {
propagate_down.push(read_propagate_down.get(i))
}

LayerConfig {
name: name,
layer_type: layer_type,
outputs: outputs,
inputs: inputs,
params: params,
propagate_down: propagate_down,
}
}
}
31 changes: 31 additions & 0 deletions src/layers/common/convolution.rs
Original file line number Diff line number Diff line change
Expand Up @@ -264,6 +264,37 @@ impl<'a> CapnpWrite<'a> for ConvolutionConfig {
}
}

impl<'a> CapnpRead<'a> for ConvolutionConfig {
type Reader = capnp_config::Reader<'a>;

fn read_capnp(reader: Self::Reader) -> Self {
let num_output = reader.get_num_output() as usize;

let read_filter_shape = reader.get_filter_shape().unwrap();
let mut filter_shape = Vec::new();
for i in 0..read_filter_shape.len() {
filter_shape.push(read_filter_shape.get(i) as usize)
}
let read_stride = reader.get_stride().unwrap();
let mut stride = Vec::new();
for i in 0..read_stride.len() {
stride.push(read_stride.get(i) as usize)
}
let read_padding = reader.get_padding().unwrap();
let mut padding = Vec::new();
for i in 0..read_padding.len() {
padding.push(read_padding.get(i) as usize)
}

ConvolutionConfig {
num_output: num_output,
filter_shape: filter_shape,
stride: stride,
padding: padding,
}
}
}

#[cfg(test)]
mod tests {
use co::*;
Expand Down
12 changes: 12 additions & 0 deletions src/layers/common/linear.rs
Original file line number Diff line number Diff line change
Expand Up @@ -195,6 +195,18 @@ impl<'a> CapnpWrite<'a> for LinearConfig {
}
}

impl<'a> CapnpRead<'a> for LinearConfig {
type Reader = capnp_config::Reader<'a>;

fn read_capnp(reader: Self::Reader) -> Self {
let output_size = reader.get_output_size() as usize;

LinearConfig {
output_size: output_size
}
}
}

impl Into<LayerType> for LinearConfig {
fn into(self) -> LayerType {
LayerType::Linear(self)
Expand Down
39 changes: 39 additions & 0 deletions src/layers/common/pooling.rs
Original file line number Diff line number Diff line change
Expand Up @@ -193,6 +193,37 @@ impl<'a> CapnpWrite<'a> for PoolingConfig {
}
}

impl<'a> CapnpRead<'a> for PoolingConfig {
type Reader = capnp_config::Reader<'a>;

fn read_capnp(reader: Self::Reader) -> Self {
let mode = PoolingMode::from_capnp(reader.get_mode().unwrap());

let read_filter_shape = reader.get_filter_shape().unwrap();
let mut filter_shape = Vec::new();
for i in 0..read_filter_shape.len() {
filter_shape.push(read_filter_shape.get(i) as usize)
}
let read_stride = reader.get_stride().unwrap();
let mut stride = Vec::new();
for i in 0..read_stride.len() {
stride.push(read_stride.get(i) as usize)
}
let read_padding = reader.get_padding().unwrap();
let mut padding = Vec::new();
for i in 0..read_padding.len() {
padding.push(read_padding.get(i) as usize)
}

PoolingConfig {
mode: mode,
filter_shape: filter_shape,
stride: stride,
padding: padding,
}
}
}

#[derive(Debug, Copy, Clone)]
/// The different modes of pooling that can be calculated.
pub enum PoolingMode {
Expand All @@ -209,4 +240,12 @@ impl PoolingMode {
PoolingMode::Max => CapnpPoolingMode::Max,
}
}

/// Return the enum value for a Cap'n Proto value.
fn from_capnp(value: CapnpPoolingMode) -> Self {
match value {
CapnpPoolingMode::Max => PoolingMode::Max,
CapnpPoolingMode::Average => unimplemented!(),
}
}
}
34 changes: 34 additions & 0 deletions src/layers/common/sequential.rs
Original file line number Diff line number Diff line change
Expand Up @@ -422,6 +422,40 @@ impl<'a> CapnpWrite<'a> for SequentialConfig {
}
}

impl<'a> CapnpRead<'a> for SequentialConfig {
type Reader = capnp_config::Reader<'a>;

fn read_capnp(reader: Self::Reader) -> Self {
let read_layers = reader.get_layers().unwrap();
let mut layers = Vec::new();
for i in 0..read_layers.len() {
layers.push(LayerConfig::read_capnp(read_layers.get(i)))
}

let read_inputs = reader.get_inputs().unwrap();
let mut inputs = Vec::new();
for i in 0..read_inputs.len() {
let input = read_inputs.get(i);

let name = input.get_name().unwrap().to_owned();
let mut shape = Vec::new();
let read_shape = input.get_shape().unwrap();
for j in 0..read_shape.len() {
shape.push(read_shape.get(j) as usize)
}

inputs.push((name, shape))
}
let force_backward = reader.get_force_backward();

SequentialConfig {
layers: layers,
inputs: inputs,
force_backward: force_backward,
}
}
}

impl Into<LayerType> for SequentialConfig {
fn into(self) -> LayerType {
LayerType::Sequential(self)
Expand Down
12 changes: 12 additions & 0 deletions src/layers/loss/negative_log_likelihood.rs
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,18 @@ impl<'a> CapnpWrite<'a> for NegativeLogLikelihoodConfig {
}
}

impl<'a> CapnpRead<'a> for NegativeLogLikelihoodConfig {
type Reader = capnp_config::Reader<'a>;

fn read_capnp(reader: Self::Reader) -> Self {
let num_classes = reader.get_num_classes() as usize;

NegativeLogLikelihoodConfig {
num_classes: num_classes
}
}
}

impl Into<LayerType> for NegativeLogLikelihoodConfig {
fn into(self) -> LayerType {
LayerType::NegativeLogLikelihood(self)
Expand Down
Loading

0 comments on commit 31812f8

Please sign in to comment.