forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
modifier_context.py
67 lines (47 loc) · 1.73 KB
/
modifier_context.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
# @package modifier_context
# Module caffe2.python.modifier_context
DEFAULT_MODIFIER = 'DEFAULT'
class ModifierContext(object):
"""
provide context to allow param_info to have different modifiers
"""
def __init__(self):
self._modifiers = {}
self._modifiers_list = []
def _rebuild_modifiers(self):
self._modifiers = {}
for m in self._modifiers_list:
self._modifiers.update(m)
def _has_modifier(self, name):
return name in self._modifiers
def _get_modifier(self, name):
return self._modifiers.get(name)
def push_modifiers(self, modifiers):
# modifier override is allowed
self._modifiers_list.append(modifiers)
self._modifiers.update(modifiers)
def pop_modifiers(self):
assert len(self._modifiers_list) > 0
self._modifiers_list.pop()
self._rebuild_modifiers()
class UseModifierBase(object):
'''
context class to allow setting the current context.
Example usage with layer:
modifiers = {'modifier1': modifier1, 'modifier2': modifier2}
with Modifiers(modifiers):
modifier = ModifierContext.current().get_modifier('modifier1')
layer(modifier=modifier)
'''
def __init__(self, modifier_or_dict):
if isinstance(modifier_or_dict, dict):
self._modifiers = modifier_or_dict
else:
self._modifiers = {DEFAULT_MODIFIER: modifier_or_dict}
def _context_class(self):
raise NotImplementedError
def __enter__(self):
self._context_class().current().push_modifiers(self._modifiers)
return self
def __exit__(self, type, value, traceback):
self._context_class().current().pop_modifiers()