Skip to content
This repository has been archived by the owner on Jan 3, 2023. It is now read-only.

Commit

Permalink
Bug fix for python 3 compatibility
Browse files Browse the repository at this point in the history
- Reorganize python backend persistence to avoid chance of accidental garbage collection
  • Loading branch information
apark263 committed Oct 13, 2016
1 parent 0653cf0 commit 959513d
Show file tree
Hide file tree
Showing 10 changed files with 71 additions and 106 deletions.
5 changes: 5 additions & 0 deletions aeon/__init__.py
Original file line number Diff line number Diff line change
@@ -1,2 +1,7 @@
from .dataloader import DataLoader, LoaderRuntimeError

try:
from .protobackends import gen_backend
except ImportError:
pass

2 changes: 1 addition & 1 deletion aeon/dataloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ def _start(self, config, backend):
raise TypeError('backend must have a callable consume attr')

loader = self.loaderlib.start(
ct.c_char_p(config),
ct.c_char_p(config.encode(encoding='utf-8')),
ct.py_object(backend)
)

Expand Down
21 changes: 0 additions & 21 deletions aeon/example.py

This file was deleted.

21 changes: 19 additions & 2 deletions aeon/protobackends.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,21 @@
import pycuda.driver as drv
from pycuda.gpuarray import GPUArray
import numpy as np

def gen_backend(backend_name='cpu'):
backend_name = backend_name.lower()
assert backend_name in ('cpu', 'gpu', 'mgpu'), backend_name + ' is invalid backend'
if backend_name == 'cpu':
return CpuBackend()
elif backend_name == 'gpu':
return GpuBackend()
elif backend_name == 'mgpu':
return MultiGpuBackend()
else:
raise TypeError('invalid backend type')

class CpuBackend(object):
def __init__(self):
self.use_pinned_mem = False
self.rng_seed = 0

def consume(self, buf_index, hostlist, devlist):
assert 0 <= buf_index < 2, 'Can only double buffer'
Expand All @@ -18,11 +29,14 @@ def get_ary(self, cpu_array):
return cpu_array

class GpuBackend(object):
import pycuda.driver as drv
from pycuda.gpuarray import GPUArray
'''
Note that GpuBackend can actually just be instantiated as a special case of MultiGpuBackend
'''
def __init__(self, device_id=0):
self.use_pinned_mem = True
self.rng_seed = 0
self.device_id = device_id
drv.init()
self.ctx = drv.Device(device_id).make_context()
Expand Down Expand Up @@ -51,11 +65,14 @@ def __del__(self):


class MultiGpuBackend(object):
import pycuda.driver as drv
from pycuda.gpuarray import GPUArray
'''
Defines the stubs that are necessary for a backend object
'''
def __init__(self, num_dev=1):
self.use_pinned_mem = True
self.rng_seed = 0
drv.init()
assert(num_dev <= drv.Device.count())

Expand Down
55 changes: 0 additions & 55 deletions aeon/provider_configs.py

This file was deleted.

6 changes: 3 additions & 3 deletions loader/src/loader.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -271,8 +271,8 @@ void read_thread_pool::work(int id)


loader::loader(const char* cfg_string, PyObject *py_obj_backend)
: _py_obj_backend(py_obj_backend)
{
_python_backend = make_shared<python_backend>(py_obj_backend);
_lcfg_json = nlohmann::json::parse(cfg_string);
loader_config lcfg(_lcfg_json);

Expand Down Expand Up @@ -359,7 +359,7 @@ int loader::start()
}

// Bind the python backend here
_python_backend = make_shared<python_backend>(_py_obj_backend, oshapes, _batchSize);
_python_backend->setup_buffers(oshapes, _batchSize);
// These are fixed size output buffers (need batchSize for stride)
_decode_buffers = make_shared<buffer_pool_out>(write_sizes,
(size_t)_batchSize,
Expand Down Expand Up @@ -400,7 +400,7 @@ void loader::stop()
_read_thread_pool = nullptr;
_decode_buffers = nullptr;
_decode_thread_pool = nullptr;
_python_backend = nullptr;
_python_backend->clear_buffers();
}

int loader::reset()
Expand Down
1 change: 0 additions & 1 deletion loader/src/loader.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -207,6 +207,5 @@ class nervana::loader {

int _batchSize;
nlohmann::json _lcfg_json;
PyObject* _py_obj_backend;
std::shared_ptr<python_backend> _python_backend;
};
31 changes: 26 additions & 5 deletions loader/src/python_backend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,22 +20,35 @@
using namespace nervana;
using namespace std;

python_backend::python_backend(PyObject* py_obj_backend,
const vector<nervana::shape_type>& oshape_types,
int batchSize)
: _oshape_types(oshape_types), _batchSize(batchSize), _py_obj_backend(py_obj_backend)
python_backend::python_backend(PyObject* py_obj_backend)
: _py_obj_backend(py_obj_backend)
{
PyGILState_STATE gstate;
gstate = PyGILState_Ensure();

if (_py_obj_backend == NULL) {
throw std::runtime_error("Python Backend object does not exist");
}

Py_INCREF(_py_obj_backend);
_f_consume = PyObject_GetAttrString(_py_obj_backend, "consume");

if (_f_consume == NULL) {
throw std::runtime_error("Backend has no 'consume' attribute");
}

if (!PyCallable_Check(_f_consume)) {
throw std::runtime_error("Backend 'consume' function does not exist or is not callable");
}

PyGILState_Release(gstate);
}

void python_backend::setup_buffers(const vector<nervana::shape_type>& oshape_types, int batchSize)
{
_oshape_types = oshape_types;
_batchSize = batchSize;

PyGILState_STATE gstate;
gstate = PyGILState_Ensure();
PyOS_sighandler_t sighandler = PyOS_getsig(SIGINT);
Expand Down Expand Up @@ -89,7 +102,7 @@ PyObject* python_backend::initPyList(int length)
return pylist;
}

python_backend::~python_backend()
void python_backend::clear_buffers()
{
PyGILState_STATE gstate;
gstate = PyGILState_Ensure();
Expand All @@ -99,7 +112,15 @@ python_backend::~python_backend()
for (auto d: _dev_lists) {
Py_XDECREF(d);
}
_host_lists.clear();
_dev_lists.clear();
PyGILState_Release(gstate);
}

python_backend::~python_backend()
{
PyGILState_STATE gstate;
gstate = PyGILState_Ensure();
Py_XDECREF(_f_consume);
Py_XDECREF(_py_obj_backend);
PyGILState_Release(gstate);
Expand Down
6 changes: 4 additions & 2 deletions loader/src/python_backend.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,14 +29,16 @@ namespace nervana {

class nervana::python_backend {
public:
python_backend(PyObject*, const std::vector<nervana::shape_type>&, int);
python_backend(PyObject*);
~python_backend();
void setup_buffers(const std::vector<nervana::shape_type>& oshape_types, int batchSize);
void clear_buffers();

bool use_pinned_memory();
void call_backend_transfer(nervana::buffer_out_array &outBuf, int bufIdx);
PyObject* get_host_tuple(int bufIdx);
PyObject* get_shapes();
const std::vector<nervana::shape_type>& _oshape_types;
std::vector<nervana::shape_type> _oshape_types;
int _batchSize;
private:
python_backend() = delete;
Expand Down
29 changes: 13 additions & 16 deletions test/test_dataloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,11 +6,9 @@
import struct
import pytest

from aeon import DataLoader, LoaderRuntimeError
from aeon import DataLoader, LoaderRuntimeError, gen_backend
from mock_data import random_manifest, generic_config, invalid_image

from neon.backends import gen_backend
from neon.models import Model
# from neon.backends import gen_backend


def test_loader_invalid_config_type():
Expand All @@ -20,7 +18,7 @@ def test_loader_invalid_config_type():
config['type'] = 'invalid type name'

with pytest.raises(Exception) as ex:
dl = DataLoader(config, gen_backend(backend='cpu'))
dl = DataLoader(config, gen_backend('cpu'))

#assert 'is not supported' in str(ex)

Expand All @@ -32,7 +30,7 @@ def test_loader_missing_config_field():
del config['image']

with pytest.raises(Exception) as ex:
dl = DataLoader(config, gen_backend(backend='cpu'))
dl = DataLoader(config, gen_backend('cpu'))

assert 'image' in str(ex)

Expand All @@ -41,23 +39,23 @@ def test_loader_non_existant_manifest():
config = generic_config('/this_manifest_file_does_not_exist')

with pytest.raises(Exception):
dl = DataLoader(config, gen_backend(backend='cpu'))
dl = DataLoader(config, gen_backend('cpu'))


def test_loader_invalid_manifest():
filename = tempfile.mkstemp()[1]
config = generic_config(invalid_image(filename))

with pytest.raises(Exception):
dl = DataLoader(config, gen_backend(backend='cpu'))
dl = DataLoader(config, gen_backend('cpu'))


def test_loader():
# NOTE: manifest needs to stay in scope until DataLoader has read it.
manifest = random_manifest(10)
config = generic_config(manifest.name)

dl = DataLoader(config, gen_backend(backend='cpu'))
dl = DataLoader(config, gen_backend('cpu'))

assert len(list(iter(dl))) == 5

Expand All @@ -67,7 +65,7 @@ def test_loader_repeat_iter():
manifest = random_manifest(10)
config = generic_config(manifest.name)

dl = DataLoader(config, gen_backend(backend='cpu'))
dl = DataLoader(config, gen_backend('cpu'))

assert len(list(iter(dl))) == 5
assert len(list(iter(dl))) == 5
Expand All @@ -78,7 +76,7 @@ def test_loader_exception_next():
manifest = random_manifest(10, 2)
config = generic_config(manifest.name)

dl = DataLoader(config, gen_backend(backend='cpu'))
dl = DataLoader(config, gen_backend('cpu'))
dl.next()
with pytest.raises(LoaderRuntimeError):
dl.next()
Expand All @@ -89,7 +87,7 @@ def test_loader_exception_iter():
manifest = random_manifest(10, 2)
config = generic_config(manifest.name)

dl = DataLoader(config, gen_backend(backend='cpu'))
dl = DataLoader(config, gen_backend('cpu'))

assert len(list(iter(dl))) == 4

Expand All @@ -98,13 +96,12 @@ def test_loader_reset():
# NOTE: manifest needs to stay in scope until DataLoader has read it.
manifest = random_manifest(10)
config = generic_config(manifest.name)

dl = DataLoader(config, gen_backend(backend='cpu'))
dl = DataLoader(config, gen_backend('cpu'))

assert len(list(iter(dl))) == 5
dl.reset()
assert len(list(iter(dl))) == 5


if __name__ == '__main__':
pytest.main()
test_loader_reset()
# pytest.main()

0 comments on commit 959513d

Please sign in to comment.