From 86dc144fc72e14d173d7c56249bf4a0751fdd2e6 Mon Sep 17 00:00:00 2001 From: oserikov Date: Wed, 2 Sep 2020 02:22:26 +0300 Subject: [PATCH 001/151] wip implement masking for formfilling --- .../go_bot/nlg/mock_json_nlg_manager.py | 9 +++++- deeppavlov/models/go_bot/nlg/nlg_manager.py | 9 +++++- .../go_bot/nlg/nlg_manager_interface.py | 8 +++++ .../go_bot/tracker/dialogue_state_tracker.py | 30 +++++++++++++++---- .../go_bot/tracker/featurized_tracker.py | 23 ++++++++++++-- 5 files changed, 70 insertions(+), 9 deletions(-) diff --git a/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py b/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py index 188c19889f..8f98221913 100644 --- a/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py +++ b/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py @@ -1,7 +1,7 @@ import json from itertools import combinations from pathlib import Path -from typing import Union, Dict +from typing import Union, Dict, List from deeppavlov.core.commands.utils import expand_path from deeppavlov.core.common.registry import register @@ -127,3 +127,10 @@ def num_of_known_actions(self) -> int: the number of actions known to the NLG module """ return len(self.action_tuples2ids.keys()) + + def known_actions(self) -> List: + """ + Returns: + the list of actions known to the NLG module + """ + return list(self.action_tuples2ids.keys()) \ No newline at end of file diff --git a/deeppavlov/models/go_bot/nlg/nlg_manager.py b/deeppavlov/models/go_bot/nlg/nlg_manager.py index 61a62ce693..74f393aa17 100644 --- a/deeppavlov/models/go_bot/nlg/nlg_manager.py +++ b/deeppavlov/models/go_bot/nlg/nlg_manager.py @@ -1,7 +1,7 @@ import re from logging import getLogger from pathlib import Path -from typing import Union +from typing import Union, List from deeppavlov.core.commands.utils import expand_path import deeppavlov.models.go_bot.nlg.templates.templates as go_bot_templates @@ -106,3 +106,10 @@ def num_of_known_actions(self) -> int: the number of actions known to the NLG module """ return len(self.templates) + + def known_actions(self) -> List: + """ + Returns: + the list of actions known to the NLG module + """ + return self.templates.actions diff --git a/deeppavlov/models/go_bot/nlg/nlg_manager_interface.py b/deeppavlov/models/go_bot/nlg/nlg_manager_interface.py index 2298e15403..0e060a31c1 100644 --- a/deeppavlov/models/go_bot/nlg/nlg_manager_interface.py +++ b/deeppavlov/models/go_bot/nlg/nlg_manager_interface.py @@ -1,4 +1,5 @@ from abc import ABCMeta, abstractmethod +from typing import List from deeppavlov.models.go_bot.dto.dataset_features import BatchDialoguesFeatures from deeppavlov.models.go_bot.nlg.dto.nlg_response_interface import NLGResponseInterface @@ -42,3 +43,10 @@ def num_of_known_actions(self) -> int: the number of actions known to the NLG module """ pass + + @abstractmethod + def known_actions(self) -> List: + """ + Returns: + the list of actions known to the NLG module + """ diff --git a/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py b/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py index 214af3bdfc..293bc14c23 100644 --- a/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py +++ b/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py @@ -43,7 +43,7 @@ def __init__(self, self.database = database self.n_actions = n_actions self.api_call_id = api_call_id - + self.formfilling_action_ids2slots_ids: Dict[int, List[int]] = dict() self.reset_state() @staticmethod @@ -51,10 +51,24 @@ def from_gobot_params(parent_tracker: FeaturizedTracker, nlg_manager: NLGManagerInterface, policy_network_params: PolicyNetworkParams, database: Component): - return DialogueStateTracker(parent_tracker.slot_names, - nlg_manager.num_of_known_actions(), nlg_manager.get_api_call_action_id(), - policy_network_params.hidden_size, - database) + dialogue_state_tracker = DialogueStateTracker(parent_tracker.slot_names, nlg_manager.num_of_known_actions(), + nlg_manager.get_api_call_action_id(), + policy_network_params.hidden_size, + database) + + # region set formfilling info + act2act_id = {a_text: nlg_manager.get_action_id(a_text) for a_text in nlg_manager.known_actions()} + action_id2slots_ids = dict() + for act in nlg_manager.known_actions(): + act_id = act2act_id[act] + action_id2slots_ids[act_id] = np.zeros(len(dialogue_state_tracker.slot_names), dtype=np.float32) + for slot_name_i, slot_name in enumerate(parent_tracker.action_names2required_slots[act]): + slot_ix_in_tracker = dialogue_state_tracker.slot_names.index(slot_name) + action_id2slots_ids[act_id][slot_ix_in_tracker] = 1. + + dialogue_state_tracker.formfilling_action_ids2slots_ids = action_id2slots_ids + # endregion set formfilling info + return dialogue_state_tracker def reset_state(self): super().reset_state() @@ -111,6 +125,12 @@ def calc_action_mask(self) -> np.ndarray: if prev_act_id == self.api_call_id: mask[prev_act_id] = 0. + for act_id in range(self.n_actions): + required_slots_mask = self.formfilling_action_ids2slots_ids[act_id] + act_slots_fulfilled = required_slots_mask * self._binary_features() == required_slots_mask + if not act_slots_fulfilled: + mask[act_id] = 0. + return mask def calc_context_features(self): diff --git a/deeppavlov/models/go_bot/tracker/featurized_tracker.py b/deeppavlov/models/go_bot/tracker/featurized_tracker.py index 0bac194878..243b833a23 100644 --- a/deeppavlov/models/go_bot/tracker/featurized_tracker.py +++ b/deeppavlov/models/go_bot/tracker/featurized_tracker.py @@ -1,7 +1,10 @@ -from typing import List, Iterator +import json +from pathlib import Path +from typing import List, Iterator, Union, Optional, Dict import numpy as np +from deeppavlov.core.commands.utils import expand_path from deeppavlov.core.common.registry import register from deeppavlov.models.go_bot.nlu.dto.nlu_response import NLUResponse from deeppavlov.models.go_bot.tracker.dto.tracker_knowledge_interface import TrackerKnowledgeInterface @@ -18,14 +21,17 @@ class FeaturizedTracker(TrackerInterface): Parameters: slot_names: list of slots that should be tracked. + actions_required_slots_path: (optional) path to json-file with mapping + of actions to slots that should be filled to allow for action to be executed """ def get_current_knowledge(self) -> TrackerKnowledgeInterface: raise NotImplementedError("Featurized tracker lacks get_current_knowledge() method. " "To be improved in future versions.") - def __init__(self, slot_names: List[str]) -> None: + def __init__(self, slot_names: List[str], actions_required_slots_path: Optional[Union[str, Path]]=None) -> None: self.slot_names = list(slot_names) + self.action_names2required_slots = self._load_actions2slots_formfilling_info(actions_required_slots_path) self.history = [] self.current_features = None @@ -105,3 +111,16 @@ def _new_features(self, state) -> np.ndarray: feats[i] = 1. return feats + + def _load_actions2slots_formfilling_info(self, actions_required_slots_path: Optional[Union[str, Path]]=None)\ + -> Dict[str, List[str]]: + """ + loads the formfilling mapping of actions onto the required slots from the json of the following structure: + {action1: [required_slot_name_1], action2: [required_slot_name_21, required_slot_name_22], ..} + Returns: + the dictionary represented by the passed json + """ + actions2required_slots_json_path = expand_path(actions_required_slots_path) + with open(actions2required_slots_json_path, encoding="utf-8") as actions2slots_json_f: + actions2required_slots = json.load(actions2slots_json_f) + return actions2required_slots \ No newline at end of file From cfed8d4a860a2467d8eef9a727ee895b2e64768b Mon Sep 17 00:00:00 2001 From: oserikov Date: Thu, 1 Oct 2020 13:56:09 +0300 Subject: [PATCH 002/151] wip implement masking for choosing only informative actions --- .../go_bot/tracker/dialogue_state_tracker.py | 29 +++++++++++++------ .../go_bot/tracker/featurized_tracker.py | 27 ++++++++++------- 2 files changed, 36 insertions(+), 20 deletions(-) diff --git a/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py b/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py index 293bc14c23..abf0ce4037 100644 --- a/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py +++ b/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py @@ -13,7 +13,7 @@ # limitations under the License. from logging import getLogger -from typing import Dict, Any +from typing import Dict, Any, List import numpy as np @@ -43,7 +43,7 @@ def __init__(self, self.database = database self.n_actions = n_actions self.api_call_id = api_call_id - self.formfilling_action_ids2slots_ids: Dict[int, List[int]] = dict() + self.ffill_act_ids2req_slots_ids: Dict[int, List[int]] = dict() self.reset_state() @staticmethod @@ -58,15 +58,23 @@ def from_gobot_params(parent_tracker: FeaturizedTracker, # region set formfilling info act2act_id = {a_text: nlg_manager.get_action_id(a_text) for a_text in nlg_manager.known_actions()} - action_id2slots_ids = dict() + action_id2aqd_slots_ids = dict() # aqd stands for acquired + action_id2req_slots_ids = dict() for act in nlg_manager.known_actions(): act_id = act2act_id[act] - action_id2slots_ids[act_id] = np.zeros(len(dialogue_state_tracker.slot_names), dtype=np.float32) + + action_id2req_slots_ids[act_id] = np.zeros(len(dialogue_state_tracker.slot_names), dtype=np.float32) + action_id2aqd_slots_ids[act_id] = np.zeros(len(dialogue_state_tracker.slot_names), dtype=np.float32) + for slot_name_i, slot_name in enumerate(parent_tracker.action_names2required_slots[act]): slot_ix_in_tracker = dialogue_state_tracker.slot_names.index(slot_name) - action_id2slots_ids[act_id][slot_ix_in_tracker] = 1. + action_id2req_slots_ids[act_id][slot_ix_in_tracker] = 1. + for slot_name_i, slot_name in enumerate(parent_tracker.action_names2acquired_slots[act]): + slot_ix_in_tracker = dialogue_state_tracker.slot_names.index(slot_name) + action_id2aqd_slots_ids[act_id][slot_ix_in_tracker] = 1. - dialogue_state_tracker.formfilling_action_ids2slots_ids = action_id2slots_ids + dialogue_state_tracker.ffill_act_ids2req_slots_ids = action_id2req_slots_ids + dialogue_state_tracker.ffill_act_ids2aqd_slots_ids = action_id2aqd_slots_ids # endregion set formfilling info return dialogue_state_tracker @@ -126,9 +134,12 @@ def calc_action_mask(self) -> np.ndarray: mask[prev_act_id] = 0. for act_id in range(self.n_actions): - required_slots_mask = self.formfilling_action_ids2slots_ids[act_id] - act_slots_fulfilled = required_slots_mask * self._binary_features() == required_slots_mask - if not act_slots_fulfilled: + required_slots_mask = self.ffill_act_ids2req_slots_ids[act_id] + acquired_slots_mask = self.ffill_act_ids2aqd_slots_ids[act_id] + act_req_slots_fulfilled = (required_slots_mask * self._binary_features()) == required_slots_mask + act_requirements_not_fulfilled = not act_req_slots_fulfilled + act_nothing_new_to_knew = (acquired_slots_mask * self._binary_features()) == acquired_slots_mask + if act_requirements_not_fulfilled or act_nothing_new_to_knew: mask[act_id] = 0. return mask diff --git a/deeppavlov/models/go_bot/tracker/featurized_tracker.py b/deeppavlov/models/go_bot/tracker/featurized_tracker.py index 243b833a23..efe18b44d0 100644 --- a/deeppavlov/models/go_bot/tracker/featurized_tracker.py +++ b/deeppavlov/models/go_bot/tracker/featurized_tracker.py @@ -1,6 +1,6 @@ import json from pathlib import Path -from typing import List, Iterator, Union, Optional, Dict +from typing import List, Iterator, Union, Optional, Dict, Tuple import numpy as np @@ -21,7 +21,7 @@ class FeaturizedTracker(TrackerInterface): Parameters: slot_names: list of slots that should be tracked. - actions_required_slots_path: (optional) path to json-file with mapping + actions_required_acquired_slots_path: (optional) path to json-file with mapping of actions to slots that should be filled to allow for action to be executed """ @@ -29,9 +29,9 @@ def get_current_knowledge(self) -> TrackerKnowledgeInterface: raise NotImplementedError("Featurized tracker lacks get_current_knowledge() method. " "To be improved in future versions.") - def __init__(self, slot_names: List[str], actions_required_slots_path: Optional[Union[str, Path]]=None) -> None: + def __init__(self, slot_names: List[str], actions_required_acquired_slots_path: Optional[Union[str, Path]]=None) -> None: self.slot_names = list(slot_names) - self.action_names2required_slots = self._load_actions2slots_formfilling_info(actions_required_slots_path) + self.action_names2required_slots, self.action_names2acquired_slots = self._load_actions2slots_formfilling_info(actions_required_acquired_slots_path) self.history = [] self.current_features = None @@ -112,15 +112,20 @@ def _new_features(self, state) -> np.ndarray: return feats - def _load_actions2slots_formfilling_info(self, actions_required_slots_path: Optional[Union[str, Path]]=None)\ - -> Dict[str, List[str]]: + def _load_actions2slots_formfilling_info(self, + actions_required_acquired_slots_path: Optional[Union[str, Path]] = None)\ + -> Tuple[Dict[str, List[str]], Dict[str, List[str]]]: """ loads the formfilling mapping of actions onto the required slots from the json of the following structure: - {action1: [required_slot_name_1], action2: [required_slot_name_21, required_slot_name_22], ..} + {action1: {"required": [required_slot_name_1], "acquired": [acquired_slot_name_1, acquired_slot_name_2]}, + action2: {"required": [required_slot_name_21, required_slot_name_22], "acquired": [acquired_slot_name_21]}, + ..} Returns: the dictionary represented by the passed json """ - actions2required_slots_json_path = expand_path(actions_required_slots_path) - with open(actions2required_slots_json_path, encoding="utf-8") as actions2slots_json_f: - actions2required_slots = json.load(actions2slots_json_f) - return actions2required_slots \ No newline at end of file + actions_required_acquired_slots_path = expand_path(actions_required_acquired_slots_path) + with open(actions_required_acquired_slots_path, encoding="utf-8") as actions2slots_json_f: + actions2slots = json.load(actions2slots_json_f) + actions2required_slots = {act: act_slots["required"] for act, act_slots in actions2slots.items()} + actions2acquired_slots = {act: act_slots["acquired"] for act, act_slots in actions2slots.items()} + return actions2required_slots, actions2acquired_slots \ No newline at end of file From 80cb0a61e18d1dda04cdd4e2d24438ad396f1d2c Mon Sep 17 00:00:00 2001 From: oserikov Date: Tue, 6 Oct 2020 02:00:26 +0300 Subject: [PATCH 003/151] wip implement masking for choosing only informative actions --- .../go_bot/nlg/mock_json_nlg_manager.py | 6 ++- .../models/go_bot/policy/policy_network.py | 2 +- .../go_bot/tracker/dialogue_state_tracker.py | 42 +++++++++++++------ .../go_bot/tracker/dto/dst_knowledge.py | 3 +- .../go_bot/tracker/featurized_tracker.py | 8 +++- 5 files changed, 42 insertions(+), 19 deletions(-) diff --git a/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py b/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py index e942878f1b..16526725d9 100644 --- a/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py +++ b/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py @@ -105,8 +105,10 @@ def get_action_id(self, action_text: str) -> int: Returns: an ID corresponding to the passed action text """ - - actions_tuple = tuple(action_text.split('+')) + if isinstance(action_text, str): + actions_tuple = tuple(action_text.split('+')) + elif isinstance(action_text, tuple): + actions_tuple = action_text return self.action_tuples2ids[actions_tuple] # todo unhandled exception when not found def decode_response(self, diff --git a/deeppavlov/models/go_bot/policy/policy_network.py b/deeppavlov/models/go_bot/policy/policy_network.py index a7b285ef82..0cfcf56a74 100644 --- a/deeppavlov/models/go_bot/policy/policy_network.py +++ b/deeppavlov/models/go_bot/policy/policy_network.py @@ -217,7 +217,7 @@ def digitize_features(self, tracker_knowledge: DSTKnowledge) -> DigitizedPolicyFeatures: attn_key = self.calc_attn_key(nlu_response, tracker_knowledge) concat_feats = self.stack_features(nlu_response, tracker_knowledge) - action_mask = self.calc_action_mask(tracker_knowledge) + action_mask = tracker_knowledge.action_mask # self.calc_action_mask(tracker_knowledge) return DigitizedPolicyFeatures(attn_key, concat_feats, action_mask) diff --git a/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py b/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py index abf0ce4037..0ba240a925 100644 --- a/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py +++ b/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py @@ -13,7 +13,8 @@ # limitations under the License. from logging import getLogger -from typing import Dict, Any, List +from pathlib import Path +from typing import List, Iterator, Union, Optional, Dict, Tuple, Any import numpy as np @@ -33,12 +34,19 @@ def get_current_knowledge(self) -> DSTKnowledge: knowledge = DSTKnowledge(self.prev_action, state_features, context_features, self.api_call_id, - self.n_actions) + self.n_actions, + self.calc_action_mask()) return knowledge def __init__(self, - slot_names, n_actions: int, api_call_id: int, hidden_size: int, database: Component = None) -> None: - super().__init__(slot_names) + slot_names, + n_actions: int, + api_call_id: int, + hidden_size: int, + database: Component = None, + actions_required_acquired_slots_path: Optional[Union[str, Path]]=None, + **kwargs) -> None: + super().__init__(slot_names, actions_required_acquired_slots_path, **kwargs) self.hidden_size = hidden_size self.database = database self.n_actions = n_actions @@ -54,7 +62,8 @@ def from_gobot_params(parent_tracker: FeaturizedTracker, dialogue_state_tracker = DialogueStateTracker(parent_tracker.slot_names, nlg_manager.num_of_known_actions(), nlg_manager.get_api_call_action_id(), policy_network_params.hidden_size, - database) + database, + parent_tracker.actions_required_acquired_slots_path) # region set formfilling info act2act_id = {a_text: nlg_manager.get_action_id(a_text) for a_text in nlg_manager.known_actions()} @@ -66,12 +75,18 @@ def from_gobot_params(parent_tracker: FeaturizedTracker, action_id2req_slots_ids[act_id] = np.zeros(len(dialogue_state_tracker.slot_names), dtype=np.float32) action_id2aqd_slots_ids[act_id] = np.zeros(len(dialogue_state_tracker.slot_names), dtype=np.float32) - for slot_name_i, slot_name in enumerate(parent_tracker.action_names2required_slots[act]): - slot_ix_in_tracker = dialogue_state_tracker.slot_names.index(slot_name) - action_id2req_slots_ids[act_id][slot_ix_in_tracker] = 1. - for slot_name_i, slot_name in enumerate(parent_tracker.action_names2acquired_slots[act]): - slot_ix_in_tracker = dialogue_state_tracker.slot_names.index(slot_name) - action_id2aqd_slots_ids[act_id][slot_ix_in_tracker] = 1. + if isinstance(act, tuple): + acts = act + else: + acts = [act] + + for act in acts: + for slot_name_i, slot_name in enumerate(parent_tracker.action_names2required_slots.get(act, [])): + slot_ix_in_tracker = dialogue_state_tracker.slot_names.index(slot_name) + action_id2req_slots_ids[act_id][slot_ix_in_tracker] = 1. + for slot_name_i, slot_name in enumerate(parent_tracker.action_names2acquired_slots.get(act, [])): + slot_ix_in_tracker = dialogue_state_tracker.slot_names.index(slot_name) + action_id2aqd_slots_ids[act_id][slot_ix_in_tracker] = 1. dialogue_state_tracker.ffill_act_ids2req_slots_ids = action_id2req_slots_ids dialogue_state_tracker.ffill_act_ids2aqd_slots_ids = action_id2aqd_slots_ids @@ -137,9 +152,10 @@ def calc_action_mask(self) -> np.ndarray: required_slots_mask = self.ffill_act_ids2req_slots_ids[act_id] acquired_slots_mask = self.ffill_act_ids2aqd_slots_ids[act_id] act_req_slots_fulfilled = (required_slots_mask * self._binary_features()) == required_slots_mask - act_requirements_not_fulfilled = not act_req_slots_fulfilled + act_requirements_not_fulfilled = not act_req_slots_fulfilled if act_req_slots_fulfilled != [] else np.array([]) act_nothing_new_to_knew = (acquired_slots_mask * self._binary_features()) == acquired_slots_mask - if act_requirements_not_fulfilled or act_nothing_new_to_knew: + + if any(np.logical_or(act_requirements_not_fulfilled, act_nothing_new_to_knew)): mask[act_id] = 0. return mask diff --git a/deeppavlov/models/go_bot/tracker/dto/dst_knowledge.py b/deeppavlov/models/go_bot/tracker/dto/dst_knowledge.py index da8f2e71bb..6fe0837a77 100644 --- a/deeppavlov/models/go_bot/tracker/dto/dst_knowledge.py +++ b/deeppavlov/models/go_bot/tracker/dto/dst_knowledge.py @@ -3,9 +3,10 @@ # todo naming class DSTKnowledge(TrackerKnowledgeInterface): - def __init__(self, tracker_prev_action, state_features, context_features, api_call_id, n_actions): + def __init__(self, tracker_prev_action, state_features, context_features, api_call_id, n_actions, action_mask): self.tracker_prev_action = tracker_prev_action self.state_features = state_features self.context_features = context_features self.api_call_id = api_call_id self.n_actions = n_actions + self.action_mask = action_mask diff --git a/deeppavlov/models/go_bot/tracker/featurized_tracker.py b/deeppavlov/models/go_bot/tracker/featurized_tracker.py index efe18b44d0..84e8ee00a7 100644 --- a/deeppavlov/models/go_bot/tracker/featurized_tracker.py +++ b/deeppavlov/models/go_bot/tracker/featurized_tracker.py @@ -29,9 +29,13 @@ def get_current_knowledge(self) -> TrackerKnowledgeInterface: raise NotImplementedError("Featurized tracker lacks get_current_knowledge() method. " "To be improved in future versions.") - def __init__(self, slot_names: List[str], actions_required_acquired_slots_path: Optional[Union[str, Path]]=None) -> None: + def __init__(self, + slot_names: List[str], + actions_required_acquired_slots_path: Optional[Union[str, Path]]=None, + **kwargs) -> None: self.slot_names = list(slot_names) - self.action_names2required_slots, self.action_names2acquired_slots = self._load_actions2slots_formfilling_info(actions_required_acquired_slots_path) + self.actions_required_acquired_slots_path = actions_required_acquired_slots_path + self.action_names2required_slots, self.action_names2acquired_slots = self._load_actions2slots_formfilling_info(self.actions_required_acquired_slots_path) self.history = [] self.current_features = None From 637eea8c11190acf80c0d67d8738c7a76a2d53ad Mon Sep 17 00:00:00 2001 From: oserikov Date: Tue, 6 Oct 2020 08:18:39 +0300 Subject: [PATCH 004/151] wip implement masking for choosing only informative actions --- .../models/go_bot/tracker/dialogue_state_tracker.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py b/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py index 0ba240a925..db3574cd10 100644 --- a/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py +++ b/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py @@ -151,9 +151,9 @@ def calc_action_mask(self) -> np.ndarray: for act_id in range(self.n_actions): required_slots_mask = self.ffill_act_ids2req_slots_ids[act_id] acquired_slots_mask = self.ffill_act_ids2aqd_slots_ids[act_id] - act_req_slots_fulfilled = (required_slots_mask * self._binary_features()) == required_slots_mask - act_requirements_not_fulfilled = not act_req_slots_fulfilled if act_req_slots_fulfilled != [] else np.array([]) - act_nothing_new_to_knew = (acquired_slots_mask * self._binary_features()) == acquired_slots_mask + act_req_slots_fulfilled = np.equal((required_slots_mask * self._binary_features()), required_slots_mask) + act_requirements_not_fulfilled = np.invert(act_req_slots_fulfilled)# if act_req_slots_fulfilled != [] else np.array([]) + act_nothing_new_to_knew = np.equal((acquired_slots_mask * self._binary_features()), acquired_slots_mask) if any(np.logical_or(act_requirements_not_fulfilled, act_nothing_new_to_knew)): mask[act_id] = 0. @@ -232,7 +232,8 @@ def init_new_tracker(self, user_id: int, tracker_entity: DialogueStateTracker) - tracker_entity.n_actions, tracker_entity.api_call_id, tracker_entity.hidden_size, - tracker_entity.database + tracker_entity.database, + tracker_entity.actions_required_acquired_slots_path ) self._ids_to_trackers[user_id] = tracker @@ -245,3 +246,4 @@ def reset(self, user_id: int = None) -> None: self._ids_to_trackers[user_id].reset_state() else: self._ids_to_trackers.clear() +nm \ No newline at end of file From 7ee94f590c0d6726937db8c95a7b104315464f80 Mon Sep 17 00:00:00 2001 From: oserikov Date: Tue, 6 Oct 2020 15:25:08 +0300 Subject: [PATCH 005/151] wip implement masking for choosing only informative actions --- deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py | 1 - 1 file changed, 1 deletion(-) diff --git a/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py b/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py index db3574cd10..26798c3037 100644 --- a/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py +++ b/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py @@ -246,4 +246,3 @@ def reset(self, user_id: int = None) -> None: self._ids_to_trackers[user_id].reset_state() else: self._ids_to_trackers.clear() -nm \ No newline at end of file From 9a77ca8e940ee0c16ce00f506bed748e8dc223e8 Mon Sep 17 00:00:00 2001 From: oserikov Date: Tue, 13 Oct 2020 22:43:40 -0700 Subject: [PATCH 006/151] wip formfilling, added some todos --- .../go_bot/tracker/dialogue_state_tracker.py | 41 ++++++++++++------- deeppavlov/models/slotfill/slotfill.py | 2 + 2 files changed, 29 insertions(+), 14 deletions(-) diff --git a/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py b/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py index 26798c3037..dc086f5358 100644 --- a/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py +++ b/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py @@ -59,21 +59,35 @@ def from_gobot_params(parent_tracker: FeaturizedTracker, nlg_manager: NLGManagerInterface, policy_network_params: PolicyNetworkParams, database: Component): - dialogue_state_tracker = DialogueStateTracker(parent_tracker.slot_names, nlg_manager.num_of_known_actions(), + slot_names = parent_tracker.slot_names + + # region set formfilling info + act2act_id = {a_text: nlg_manager.get_action_id(a_text) for a_text in nlg_manager.known_actions()} + action_id2aqd_slots_ids, action_id2req_slots_ids = DialogueStateTracker.extract_reqiured_acquired_slots_ids_mapping( + act2act_id, slot_names, nlg_manager, parent_tracker) + + # todo why so ugly and duplicated in multiple users tracker + dialogue_state_tracker = DialogueStateTracker(slot_names, nlg_manager.num_of_known_actions(), nlg_manager.get_api_call_action_id(), policy_network_params.hidden_size, database, parent_tracker.actions_required_acquired_slots_path) - # region set formfilling info - act2act_id = {a_text: nlg_manager.get_action_id(a_text) for a_text in nlg_manager.known_actions()} + dialogue_state_tracker.ffill_act_ids2req_slots_ids = action_id2req_slots_ids + dialogue_state_tracker.ffill_act_ids2aqd_slots_ids = action_id2aqd_slots_ids + + # endregion set formfilling info + return dialogue_state_tracker + + @staticmethod + def extract_reqiured_acquired_slots_ids_mapping(act2act_id, slot_names, nlg_manager, parent_tracker): action_id2aqd_slots_ids = dict() # aqd stands for acquired action_id2req_slots_ids = dict() for act in nlg_manager.known_actions(): act_id = act2act_id[act] - action_id2req_slots_ids[act_id] = np.zeros(len(dialogue_state_tracker.slot_names), dtype=np.float32) - action_id2aqd_slots_ids[act_id] = np.zeros(len(dialogue_state_tracker.slot_names), dtype=np.float32) + action_id2req_slots_ids[act_id] = np.zeros(len(slot_names), dtype=np.float32) + action_id2aqd_slots_ids[act_id] = np.zeros(len(slot_names), dtype=np.float32) if isinstance(act, tuple): acts = act @@ -82,16 +96,12 @@ def from_gobot_params(parent_tracker: FeaturizedTracker, for act in acts: for slot_name_i, slot_name in enumerate(parent_tracker.action_names2required_slots.get(act, [])): - slot_ix_in_tracker = dialogue_state_tracker.slot_names.index(slot_name) + slot_ix_in_tracker = slot_names.index(slot_name) action_id2req_slots_ids[act_id][slot_ix_in_tracker] = 1. for slot_name_i, slot_name in enumerate(parent_tracker.action_names2acquired_slots.get(act, [])): - slot_ix_in_tracker = dialogue_state_tracker.slot_names.index(slot_name) + slot_ix_in_tracker = slot_names.index(slot_name) action_id2aqd_slots_ids[act_id][slot_ix_in_tracker] = 1. - - dialogue_state_tracker.ffill_act_ids2req_slots_ids = action_id2req_slots_ids - dialogue_state_tracker.ffill_act_ids2aqd_slots_ids = action_id2aqd_slots_ids - # endregion set formfilling info - return dialogue_state_tracker + return action_id2aqd_slots_ids, action_id2req_slots_ids def reset_state(self): super().reset_state() @@ -153,9 +163,9 @@ def calc_action_mask(self) -> np.ndarray: acquired_slots_mask = self.ffill_act_ids2aqd_slots_ids[act_id] act_req_slots_fulfilled = np.equal((required_slots_mask * self._binary_features()), required_slots_mask) act_requirements_not_fulfilled = np.invert(act_req_slots_fulfilled)# if act_req_slots_fulfilled != [] else np.array([]) - act_nothing_new_to_knew = np.equal((acquired_slots_mask * self._binary_features()), acquired_slots_mask) + ack_slot_is_already_known = np.equal((acquired_slots_mask * self._binary_features()), acquired_slots_mask) - if any(np.logical_or(act_requirements_not_fulfilled, act_nothing_new_to_knew)): + if any(act_requirements_not_fulfilled) or (all(ack_slot_is_already_known) and any(acquired_slots_mask)): mask[act_id] = 0. return mask @@ -213,6 +223,7 @@ def get_user_tracker(self, user_id: int) -> DialogueStateTracker: return tracker def new_tracker(self): + # todo deprecated and never used? tracker = DialogueStateTracker(self.base_tracker.slot_names, self.base_tracker.n_actions, self.base_tracker.api_call_id, self.base_tracker.hidden_size, self.base_tracker.database) @@ -235,6 +246,8 @@ def init_new_tracker(self, user_id: int, tracker_entity: DialogueStateTracker) - tracker_entity.database, tracker_entity.actions_required_acquired_slots_path ) + tracker.ffill_act_ids2req_slots_ids = tracker_entity.ffill_act_ids2req_slots_ids + tracker.ffill_act_ids2aqd_slots_ids = tracker_entity.ffill_act_ids2aqd_slots_ids self._ids_to_trackers[user_id] = tracker diff --git a/deeppavlov/models/slotfill/slotfill.py b/deeppavlov/models/slotfill/slotfill.py index f72db51aa9..2a8b4c047d 100644 --- a/deeppavlov/models/slotfill/slotfill.py +++ b/deeppavlov/models/slotfill/slotfill.py @@ -66,7 +66,9 @@ def ner2slot(self, input_entity, slot): entities = [] normalized_slot_vals = [] for entity_name in self._slot_vals[slot]: + # todo log missing keys for entity in self._slot_vals[slot][entity_name]: + # todo log missing keys entities.append(entity) normalized_slot_vals.append(entity_name) best_match, score = process.extract(input_entity, entities, limit=2 ** 20)[0] From 03c670bd48952b03b166a9a944abf2edb17c9bb2 Mon Sep 17 00:00:00 2001 From: oserikov Date: Mon, 19 Oct 2020 05:14:45 -0700 Subject: [PATCH 007/151] now rasa config reader parses forms --- deeppavlov/dataset_readers/md_yaml_dialogs_reader.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py index 6043c2bc62..5dad4af13f 100644 --- a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py +++ b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py @@ -42,6 +42,7 @@ class DomainKnowledge: known_slots: Dict response_templates: Dict session_config: Dict + forms: Dict def __init__(self, domain_knowledge_di: Dict): self.known_entities = domain_knowledge_di.get("entities", []) @@ -50,6 +51,7 @@ def __init__(self, domain_knowledge_di: Dict): self.known_slots = domain_knowledge_di.get("slots", {}) self.response_templates = domain_knowledge_di.get("responses", {}) self.session_config = domain_knowledge_di.get("session_config", {}) + self.forms = domain_knowledge_di.get("forms", {}) @register('md_yaml_dialogs_reader') From 998364209c323384adb948818493f3e9da79d5ed Mon Sep 17 00:00:00 2001 From: oserikov Date: Mon, 19 Oct 2020 05:20:03 -0700 Subject: [PATCH 008/151] added from_yaml reader for domain knowledge --- .../dataset_readers/md_yaml_dialogs_reader.py | 13 +++++++- .../go_bot/tracker/featurized_tracker.py | 30 +++++++++++++++++-- 2 files changed, 39 insertions(+), 4 deletions(-) diff --git a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py index 5dad4af13f..7d90421c58 100644 --- a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py +++ b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py @@ -53,6 +53,16 @@ def __init__(self, domain_knowledge_di: Dict): self.session_config = domain_knowledge_di.get("session_config", {}) self.forms = domain_knowledge_di.get("forms", {}) + @classmethod + def from_yaml(cls, domain_yml_fpath: Union[str, Path] = "domain.yml"): + """ + Parses domain.yml domain config file into the DomainKnowledge object + :param domain_yml_fpath: path to the domain config file, defaults to domain.yml + :return: the loaded DomainKnowledge obect + """ + return cls(read_yaml(domain_yml_fpath)) + + @register('md_yaml_dialogs_reader') class MD_YAML_DialogsDatasetReader(DatasetReader): @@ -106,7 +116,8 @@ def read(cls, data_path: str, dialogs: bool = False) -> Dict[str, List]: log.error(f"INSIDE MLU_MD_DialogsDatasetReader.read(): " f"{required_fname} not found with path {required_path}") - domain_knowledge = DomainKnowledge(read_yaml(Path(data_path, domain_fname))) + domain_path = Path(data_path, domain_fname) + domain_knowledge = DomainKnowledge(read_yaml(domain_path)) intent2slots2text, slot_name2text2value = cls._read_intent2text_mapping(Path(data_path, nlu_fname), domain_knowledge) diff --git a/deeppavlov/models/go_bot/tracker/featurized_tracker.py b/deeppavlov/models/go_bot/tracker/featurized_tracker.py index 84e8ee00a7..7acdfb88b5 100644 --- a/deeppavlov/models/go_bot/tracker/featurized_tracker.py +++ b/deeppavlov/models/go_bot/tracker/featurized_tracker.py @@ -35,7 +35,7 @@ def __init__(self, **kwargs) -> None: self.slot_names = list(slot_names) self.actions_required_acquired_slots_path = actions_required_acquired_slots_path - self.action_names2required_slots, self.action_names2acquired_slots = self._load_actions2slots_formfilling_info(self.actions_required_acquired_slots_path) + self.action_names2required_slots, self.action_names2acquired_slots = self._load_actions2slots_formfilling_info_from_json(self.actions_required_acquired_slots_path) self.history = [] self.current_features = None @@ -116,14 +116,38 @@ def _new_features(self, state) -> np.ndarray: return feats - def _load_actions2slots_formfilling_info(self, - actions_required_acquired_slots_path: Optional[Union[str, Path]] = None)\ + def _load_actions2slots_formfilling_info_from_json(self, + actions_required_acquired_slots_path: Optional[Union[str, Path]] = None)\ -> Tuple[Dict[str, List[str]], Dict[str, List[str]]]: """ loads the formfilling mapping of actions onto the required slots from the json of the following structure: {action1: {"required": [required_slot_name_1], "acquired": [acquired_slot_name_1, acquired_slot_name_2]}, action2: {"required": [required_slot_name_21, required_slot_name_22], "acquired": [acquired_slot_name_21]}, ..} + Returns: + the dictionary represented by the passed json + """ + actions_required_acquired_slots_path = expand_path(actions_required_acquired_slots_path) + with open(actions_required_acquired_slots_path, encoding="utf-8") as actions2slots_json_f: + actions2slots = json.load(actions2slots_json_f) + actions2required_slots = {act: act_slots["required"] for act, act_slots in actions2slots.items()} + actions2acquired_slots = {act: act_slots["acquired"] for act, act_slots in actions2slots.items()} + return actions2required_slots, actions2acquired_slots + + def _load_actions2slots_formfilling_info_from(self, + actions_required_acquired_slots_path: Optional[Union[str, Path]] = None)\ + -> Tuple[Dict[str, List[str]], Dict[str, List[str]]]: + """ + loads the formfilling mapping of actions onto the required slots from the domain.yml form description: + + restaurant_form: + cuisine: + - type: from_entity + entity: cuisine + num_people: + - type: from_entity + entity: number + Returns: the dictionary represented by the passed json """ From 0a3d49380d8f0069bd61bde3754c9f8f279a89a0 Mon Sep 17 00:00:00 2001 From: oserikov Date: Mon, 19 Oct 2020 05:58:46 -0700 Subject: [PATCH 009/151] added from_yaml reader for domain knowledge, added type hints for domain knowledge --- .../dataset_readers/md_yaml_dialogs_reader.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py index 7d90421c58..4e5fe4dfca 100644 --- a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py +++ b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py @@ -45,13 +45,13 @@ class DomainKnowledge: forms: Dict def __init__(self, domain_knowledge_di: Dict): - self.known_entities = domain_knowledge_di.get("entities", []) - self.known_intents = domain_knowledge_di.get("intents", []) - self.known_actions = domain_knowledge_di.get("actions", []) - self.known_slots = domain_knowledge_di.get("slots", {}) - self.response_templates = domain_knowledge_di.get("responses", {}) - self.session_config = domain_knowledge_di.get("session_config", {}) - self.forms = domain_knowledge_di.get("forms", {}) + self.known_entities: List = domain_knowledge_di.get("entities", []) + self.known_intents: List = domain_knowledge_di.get("intents", []) + self.known_actions: List = domain_knowledge_di.get("actions", []) + self.known_slots: Dict = domain_knowledge_di.get("slots", {}) + self.response_templates: Dict = domain_knowledge_di.get("responses", {}) + self.session_config: Dict = domain_knowledge_di.get("session_config", {}) + self.forms: Dict = domain_knowledge_di.get("forms", {}) @classmethod def from_yaml(cls, domain_yml_fpath: Union[str, Path] = "domain.yml"): @@ -117,7 +117,7 @@ def read(cls, data_path: str, dialogs: bool = False) -> Dict[str, List]: f"{required_fname} not found with path {required_path}") domain_path = Path(data_path, domain_fname) - domain_knowledge = DomainKnowledge(read_yaml(domain_path)) + domain_knowledge = DomainKnowledge.from_yaml(domain_path) intent2slots2text, slot_name2text2value = cls._read_intent2text_mapping(Path(data_path, nlu_fname), domain_knowledge) From f9e28ba515137e25226049eb54855c55ecd88a7f Mon Sep 17 00:00:00 2001 From: oserikov Date: Mon, 19 Oct 2020 06:24:37 -0700 Subject: [PATCH 010/151] added reading slots mapping from domain.yml todo: augment stories with get_slot_calls. --- .../go_bot/tracker/dialogue_state_tracker.py | 11 ++-- .../go_bot/tracker/featurized_tracker.py | 59 +++++++++++++++---- 2 files changed, 56 insertions(+), 14 deletions(-) diff --git a/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py b/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py index dc086f5358..f9c64cf605 100644 --- a/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py +++ b/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py @@ -44,9 +44,10 @@ def __init__(self, api_call_id: int, hidden_size: int, database: Component = None, - actions_required_acquired_slots_path: Optional[Union[str, Path]]=None, + domain_yml_path: Optional[Union[str, Path]]=None, + stories_yml_path: Optional[Union[str, Path]]=None, **kwargs) -> None: - super().__init__(slot_names, actions_required_acquired_slots_path, **kwargs) + super().__init__(slot_names, domain_yml_path, stories_yml_path, **kwargs) self.hidden_size = hidden_size self.database = database self.n_actions = n_actions @@ -71,7 +72,8 @@ def from_gobot_params(parent_tracker: FeaturizedTracker, nlg_manager.get_api_call_action_id(), policy_network_params.hidden_size, database, - parent_tracker.actions_required_acquired_slots_path) + parent_tracker.domain_yml_path, + parent_tracker.stories_path) dialogue_state_tracker.ffill_act_ids2req_slots_ids = action_id2req_slots_ids dialogue_state_tracker.ffill_act_ids2aqd_slots_ids = action_id2aqd_slots_ids @@ -244,7 +246,8 @@ def init_new_tracker(self, user_id: int, tracker_entity: DialogueStateTracker) - tracker_entity.api_call_id, tracker_entity.hidden_size, tracker_entity.database, - tracker_entity.actions_required_acquired_slots_path + tracker_entity.domain_yml_path, + tracker_entity.stories_path ) tracker.ffill_act_ids2req_slots_ids = tracker_entity.ffill_act_ids2req_slots_ids tracker.ffill_act_ids2aqd_slots_ids = tracker_entity.ffill_act_ids2aqd_slots_ids diff --git a/deeppavlov/models/go_bot/tracker/featurized_tracker.py b/deeppavlov/models/go_bot/tracker/featurized_tracker.py index 7acdfb88b5..4671c6747a 100644 --- a/deeppavlov/models/go_bot/tracker/featurized_tracker.py +++ b/deeppavlov/models/go_bot/tracker/featurized_tracker.py @@ -5,7 +5,9 @@ import numpy as np from deeppavlov.core.commands.utils import expand_path +from deeppavlov.core.common.file import read_yaml from deeppavlov.core.common.registry import register +from deeppavlov.dataset_readers.md_yaml_dialogs_reader import DomainKnowledge from deeppavlov.models.go_bot.nlu.dto.nlu_response import NLUResponse from deeppavlov.models.go_bot.tracker.dto.tracker_knowledge_interface import TrackerKnowledgeInterface from deeppavlov.models.go_bot.tracker.tracker_interface import TrackerInterface @@ -31,11 +33,16 @@ def get_current_knowledge(self) -> TrackerKnowledgeInterface: def __init__(self, slot_names: List[str], - actions_required_acquired_slots_path: Optional[Union[str, Path]]=None, + # actions_required_acquired_slots_path: Optional[Union[str, Path]]=None, + domain_yml_path: Optional[Union[str, Path]]=None, + stories_yml_path: Optional[Union[str, Path]]=None, **kwargs) -> None: self.slot_names = list(slot_names) - self.actions_required_acquired_slots_path = actions_required_acquired_slots_path - self.action_names2required_slots, self.action_names2acquired_slots = self._load_actions2slots_formfilling_info_from_json(self.actions_required_acquired_slots_path) + self.domain_yml_path = domain_yml_path + self.stories_path = stories_yml_path + self.action_names2required_slots, self.action_names2acquired_slots =\ + self._load_actions2slots_formfilling_info_from(domain_yml_path, stories_yml_path) + # self._load_actions2slots_formfilling_info_from_json(self.actions_required_acquired_slots_path) self.history = [] self.current_features = None @@ -135,7 +142,8 @@ def _load_actions2slots_formfilling_info_from_json(self, return actions2required_slots, actions2acquired_slots def _load_actions2slots_formfilling_info_from(self, - actions_required_acquired_slots_path: Optional[Union[str, Path]] = None)\ + domain_yml_path: Optional[Union[str, Path]], + stories_yml_path: Optional[Union[str, Path]])\ -> Tuple[Dict[str, List[str]], Dict[str, List[str]]]: """ loads the formfilling mapping of actions onto the required slots from the domain.yml form description: @@ -151,9 +159,40 @@ def _load_actions2slots_formfilling_info_from(self, Returns: the dictionary represented by the passed json """ - actions_required_acquired_slots_path = expand_path(actions_required_acquired_slots_path) - with open(actions_required_acquired_slots_path, encoding="utf-8") as actions2slots_json_f: - actions2slots = json.load(actions2slots_json_f) - actions2required_slots = {act: act_slots["required"] for act, act_slots in actions2slots.items()} - actions2acquired_slots = {act: act_slots["acquired"] for act, act_slots in actions2slots.items()} - return actions2required_slots, actions2acquired_slots \ No newline at end of file + domain_yml_path = expand_path(domain_yml_path) + domain_knowledge: DomainKnowledge = DomainKnowledge.from_yaml(domain_yml_path) + potential_api_or_db_actions = domain_knowledge.known_actions + forms = domain_knowledge.forms + form_names = list(forms.keys()) + + stories_yml_path = expand_path(stories_yml_path) + stories_yml_di = read_yaml(stories_yml_path) + prev_forms = [] + action2forms = {} + for story in stories_yml_di["stories"]: + story_name = story["story"] + story_steps = story["steps"] + for step in story_steps: + if "action" not in step.keys(): + continue + + curr_action = step["action"] + if curr_action in form_names: + prev_forms.append(curr_action) + if curr_action in potential_api_or_db_actions: + action2forms[curr_action] = prev_forms + prev_forms = [] + + actions2acquired_slots = {form_name: self._get_form_acquired_slots(form) for form_name, form in forms.items()} + actions2required_slots = {act: {slot + for form in forms + for slot in actions2acquired_slots[form]} + for act, forms in action2forms.items()} + + return actions2required_slots, actions2acquired_slots + + def _get_form_acquired_slots(self, form): + acquired_slots = [slot_name + for slot_name, slot_info_li in form.items() + if slot_info_li and slot_info_li[0].get("type", '') == "from_entity"] + return acquired_slots From e0ae6be55af886dabc2020933c492e469def5163 Mon Sep 17 00:00:00 2001 From: oserikov Date: Wed, 21 Oct 2020 23:55:48 -0700 Subject: [PATCH 011/151] added augment stories with get_slot_calls. --- .../dataset_readers/md_yaml_dialogs_reader.py | 180 ++++++++++++------ 1 file changed, 126 insertions(+), 54 deletions(-) diff --git a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py index 4e5fe4dfca..edb066cde8 100644 --- a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py +++ b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py @@ -36,14 +36,6 @@ class DomainKnowledge: """the DTO-like class to store the domain knowledge from the domain yaml config.""" - known_entities: List - known_intents: List - known_actions: List - known_slots: Dict - response_templates: Dict - session_config: Dict - forms: Dict - def __init__(self, domain_knowledge_di: Dict): self.known_entities: List = domain_knowledge_di.get("entities", []) self.known_intents: List = domain_knowledge_di.get("intents", []) @@ -247,62 +239,70 @@ def _read_story(cls, curr_story_title = None curr_story_utters = None - curr_story_bad = False - for line in open(story_fpath): + nonlocal_curr_story_bad = False # can be modified as a nonlocal variable + + def process_user_utter(line): + nonlocal intent2slots2text, slot_name2text2value, curr_story_utters, nonlocal_curr_story_bad + try: + user_utter = cls.augment_user_turn(intent2slots2text, line, slot_name2text2value) + # dialogs MUST start with system replics + utters_to_append = [default_system_start] if not curr_story_utters else [] + [user_utter] + except KeyError as e: + log.debug(f"INSIDE MLU_MD_DialogsDatasetReader._read_story(): " + f"Skipping story w. line {line} because of no NLU candidates found") + nonlocal_curr_story_bad = True + utters_to_append = [] + return utters_to_append + + def process_system_utter(line): + nonlocal intent2slots2text, domain_knowledge, curr_story_utters, nonlocal_curr_story_bad + system_action = cls.parse_system_turn(domain_knowledge, line) + system_action_name = system_action.get("dialog_acts")[0].get("act") + + utters_to_append = [] + if cls.last_turn_is_systems_turn(curr_story_utters): + # deal with consecutive system actions by inserting the last user replics in between + utters_to_append.append(cls.get_last_users_turn(curr_story_utters)) + + if system_action_name.startswith("form"): + form_name = system_action_name + augmented_utters = cls.augment_form(form_name, domain_knowledge, intent2slots2text) + augmented_utters_parsed = [processed_line + for aug_line in augmented_utters + for processed_line in process_story_line(aug_line)] + utters_to_append.extend(augmented_utters_parsed) + utters_to_append.append(system_action) + return utters_to_append + + def process_story_line(line): + if line.startswith('*'): + utters_to_extend_with = process_user_utter(line) + elif line.startswith('-'): + utters_to_extend_with = process_system_utter(line) + else: + # todo raise an exception + utters_to_extend_with = [] + return utters_to_extend_with + + story_file = open(story_fpath) + for line in story_file: line = line.strip() if line.startswith('#'): # #... marks the beginning of new story if curr_story_utters and curr_story_utters[-1]["speaker"] == cls._USER_SPEAKER_ID: curr_story_utters.append(default_system_goodbye) # dialogs MUST end with system replics - if not curr_story_bad: + if not nonlocal_curr_story_bad: stories_parsed[curr_story_title] = curr_story_utters curr_story_title = line.strip('#') curr_story_utters = [] - curr_story_bad = False - elif line.startswith('*'): - # user actions are started in dataset with * - user_action, slots_dstc2formatted = cls._parse_user_intent(line) - slots_actual_values = cls._clarify_slots_values(slot_name2text2value, slots_dstc2formatted) - try: - slots_to_exclude, slots_used_values, action_for_text = cls._choose_slots_for_whom_exists_text( - intent2slots2text, slots_actual_values, - user_action) - except KeyError as e: - log.debug(f"INSIDE MLU_MD_DialogsDatasetReader._read_story(): " - f"Skipping story w. line {line} because of no NLU candidates found") - curr_story_bad = True - continue - user_response_info = cls._user_action2text(intent2slots2text, action_for_text, slots_used_values) - user_utter = {"speaker": cls._USER_SPEAKER_ID, - "text": user_response_info["text"], - "dialog_acts": [{"act": user_action, "slots": user_response_info["slots"]}], - "slots to exclude": slots_to_exclude} - - if not curr_story_utters: - curr_story_utters.append(default_system_start) # dialogs MUST start with system replics - curr_story_utters.append(user_utter) - elif line.startswith('-'): - # system actions are started in dataset with - - - system_action_name = line.strip('-').strip() - curr_action_text = cls._system_action2text(domain_knowledge, system_action_name) - system_action = {"speaker": cls._SYSTEM_SPEAKER_ID, - "text": curr_action_text, - "dialog_acts": [{"act": system_action_name, "slots": []}]} - if system_action_name.startswith("action"): - system_action["db_result"] = {} - - if curr_story_utters and curr_story_utters[-1]["speaker"] == cls._SYSTEM_SPEAKER_ID: - # deal with consecutive system actions by inserting the last user replics in between - last_user_utter = [u for u in reversed(curr_story_utters) - if u["speaker"] == cls._USER_SPEAKER_ID][0] - curr_story_utters.append(last_user_utter) - - curr_story_utters.append(system_action) + nonlocal_curr_story_bad = False + else: + curr_story_utters.extend(process_story_line(line)) + story_file.close() - if not curr_story_bad: + if not nonlocal_curr_story_bad: stories_parsed[curr_story_title] = curr_story_utters stories_parsed.pop(None) @@ -325,6 +325,77 @@ def _read_story(cls, return gobot_formatted_stories + @classmethod + def augment_form(cls, form_name:str, domain_knowledge: DomainKnowledge, intent2slots2text: Dict): + form = domain_knowledge.forms[form_name] # todo handle keyerr + augmended_story = [] + for slot_name, slot_info_li in form.items(): + if slot_info_li and slot_info_li[0].get("type", '') == "from_entity": + # we only handle this sort of slots + known_responses = list(domain_knowledge.response_templates) + known_intents = list(intent2slots2text.keys()) + augmended_story.extend(cls.augment_slot(known_responses, known_intents, slot_name, form_name)) + return augmended_story + + @classmethod + def augment_slot(cls, known_responses: List[str], known_intents: List[str], slot_name: str, form_name: str): + ask_slot_act_name_hypothesis1 = f"utter_ask_{form_name}_{slot_name}" + ask_slot_act_name_hypothesis2 = f"utter_ask_{slot_name}" + if ask_slot_act_name_hypothesis1 in known_responses: + ask_slot_act_name = ask_slot_act_name_hypothesis1 + elif ask_slot_act_name_hypothesis2 in known_responses: + ask_slot_act_name = ask_slot_act_name_hypothesis2 + else: + # todo raise an exception + pass + + + + inform_slot_user_utter_hypothesis = f"inform_{slot_name}" + if inform_slot_user_utter_hypothesis in known_intents: + inform_slot_user_utter = inform_slot_user_utter_hypothesis + else: + # todo raise an exception + pass + + return [f"- {ask_slot_act_name}", f"* {inform_slot_user_utter}"] + + @classmethod + def get_last_users_turn(cls, curr_story_utters): + *_, last_user_utter = filter(curr_story_utters, lambda x: x["speaker"] == cls._USER_SPEAKER_ID) + return last_user_utter + + @classmethod + def last_turn_is_systems_turn(cls, curr_story_utters): + return curr_story_utters and curr_story_utters[-1]["speaker"] == cls._SYSTEM_SPEAKER_ID + + @classmethod + def parse_system_turn(cls, domain_knowledge, line): + # system actions are started in dataset with - + system_action_name = line.strip('-').strip() + curr_action_text = cls._system_action2text(domain_knowledge, system_action_name) + system_action = {"speaker": cls._SYSTEM_SPEAKER_ID, + "text": curr_action_text, + "dialog_acts": [{"act": system_action_name, "slots": []}]} + if system_action_name.startswith("action"): + system_action["db_result"] = {} + return system_action + + @classmethod + def augment_user_turn(cls, intent2slots2text, line, slot_name2text2value): + # user actions are started in dataset with * + user_action, slots_dstc2formatted = cls._parse_user_intent(line) + slots_actual_values = cls._clarify_slots_values(slot_name2text2value, slots_dstc2formatted) + slots_to_exclude, slots_used_values, action_for_text = cls._choose_slots_for_whom_exists_text( + intent2slots2text, slots_actual_values, + user_action) + user_response_info = cls._user_action2text(intent2slots2text, action_for_text, slots_used_values) + user_utter = {"speaker": cls._USER_SPEAKER_ID, + "text": user_response_info["text"], + "dialog_acts": [{"act": user_action, "slots": user_response_info["slots"]}], + "slots to exclude": slots_to_exclude} + return user_utter + @staticmethod def _choose_slots_for_whom_exists_text(intent2slots2text: Dict[str, Dict[SLOT2VALUE_PAIRS_TUPLE, List]], slots_actual_values: SLOT2VALUE_PAIRS_TUPLE, @@ -383,7 +454,8 @@ def _user_action2text(intent2slots2text: Dict[str, Dict[SLOT2VALUE_PAIRS_TUPLE, slots_li: Optional[SLOT2VALUE_PAIRS_TUPLE] = None) -> str: if slots_li is None: slots_li = tuple() - return intent2slots2text[user_action][slots_li][0] + res = intent2slots2text[user_action][slots_li][0] + return res @staticmethod def _system_action2text(domain_knowledge: DomainKnowledge, system_action: str) -> str: From 2a1536ec898f00bc31c4af690b030c8264947918 Mon Sep 17 00:00:00 2001 From: oserikov Date: Thu, 22 Oct 2020 00:04:38 -0700 Subject: [PATCH 012/151] added proper form name parsing in story md --- deeppavlov/dataset_readers/md_yaml_dialogs_reader.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py index edb066cde8..d6afa7e25f 100644 --- a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py +++ b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py @@ -264,8 +264,15 @@ def process_system_utter(line): # deal with consecutive system actions by inserting the last user replics in between utters_to_append.append(cls.get_last_users_turn(curr_story_utters)) + def parse_form_name(story_line): + form_name = None + if story_line.startswith("form"): + form_di = json.loads(story_line[len("form"):]) + form_name = form_di["name"] + return form_name + if system_action_name.startswith("form"): - form_name = system_action_name + form_name = parse_form_name(system_action_name) augmented_utters = cls.augment_form(form_name, domain_knowledge, intent2slots2text) augmented_utters_parsed = [processed_line for aug_line in augmented_utters From 317cc39297be78ffc440014abfbf0ba9e344366a Mon Sep 17 00:00:00 2001 From: oserikov Date: Thu, 22 Oct 2020 10:30:00 +0300 Subject: [PATCH 013/151] fix: typo --- deeppavlov/dataset_readers/md_yaml_dialogs_reader.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py index d6afa7e25f..dcf22c0155 100644 --- a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py +++ b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py @@ -238,7 +238,7 @@ def _read_story(cls, stories_parsed = {} curr_story_title = None - curr_story_utters = None + curr_story_utters = [] nonlocal_curr_story_bad = False # can be modified as a nonlocal variable def process_user_utter(line): From 3c3949d699074de895eb4889c08a7038ace952f8 Mon Sep 17 00:00:00 2001 From: oserikov Date: Thu, 22 Oct 2020 10:39:04 +0300 Subject: [PATCH 014/151] fix: typo --- deeppavlov/dataset_readers/md_yaml_dialogs_reader.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py index dcf22c0155..7b090b4144 100644 --- a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py +++ b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py @@ -369,7 +369,7 @@ def augment_slot(cls, known_responses: List[str], known_intents: List[str], slot @classmethod def get_last_users_turn(cls, curr_story_utters): - *_, last_user_utter = filter(curr_story_utters, lambda x: x["speaker"] == cls._USER_SPEAKER_ID) + *_, last_user_utter = filter(lambda x: x["speaker"] == cls._USER_SPEAKER_ID, curr_story_utters) return last_user_utter @classmethod From c4c99d999fad5ec4e4625b2cf619079035ac972e Mon Sep 17 00:00:00 2001 From: oserikov Date: Thu, 22 Oct 2020 10:44:50 +0300 Subject: [PATCH 015/151] fix: typo --- deeppavlov/dataset_readers/md_yaml_dialogs_reader.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py index 7b090b4144..bc789efb99 100644 --- a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py +++ b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py @@ -246,7 +246,7 @@ def process_user_utter(line): try: user_utter = cls.augment_user_turn(intent2slots2text, line, slot_name2text2value) # dialogs MUST start with system replics - utters_to_append = [default_system_start] if not curr_story_utters else [] + [user_utter] + utters_to_append = ([default_system_start] if not curr_story_utters else []) + [user_utter] except KeyError as e: log.debug(f"INSIDE MLU_MD_DialogsDatasetReader._read_story(): " f"Skipping story w. line {line} because of no NLU candidates found") From 91dc9370725216a987797ae5d8e76cab64d6a7a7 Mon Sep 17 00:00:00 2001 From: oserikov Date: Thu, 22 Oct 2020 11:22:49 +0300 Subject: [PATCH 016/151] fix: typo --- .../go_bot/tracker/featurized_tracker.py | 33 +++++++++++++++++-- 1 file changed, 31 insertions(+), 2 deletions(-) diff --git a/deeppavlov/models/go_bot/tracker/featurized_tracker.py b/deeppavlov/models/go_bot/tracker/featurized_tracker.py index 4671c6747a..8db3a3e332 100644 --- a/deeppavlov/models/go_bot/tracker/featurized_tracker.py +++ b/deeppavlov/models/go_bot/tracker/featurized_tracker.py @@ -165,8 +165,37 @@ def _load_actions2slots_formfilling_info_from(self, forms = domain_knowledge.forms form_names = list(forms.keys()) - stories_yml_path = expand_path(stories_yml_path) - stories_yml_di = read_yaml(stories_yml_path) + # todo migrate to rasa2.0 + # stories_yml_path = expand_path(stories_yml_path) + # stories_yml_di = read_yaml(stories_yml_path) + def read_md_story(story_path): + story_f = open(story_path, 'r') + stories_li = [] + curr_story = None + for line in story_f: + line = line.strip() + if not line: continue; + if line.startswith("#"): + if curr_story is not None: + stories_li.append(curr_story) + story_name = line.strip('#').strip() + curr_story = {"story": story_name, "steps": []} + elif line.startswith("*"): + # user turn + step = {"intent": line.strip('*').strip()} + curr_story["steps"].append(step) + elif line.startswith('-'): + # system turn + step = {"action": line.strip('-').strip()} + curr_story["steps"].append(step) + if curr_story is not None: + stories_li.append(curr_story) + story_f.close() + stories_di = {"stories": stories_li} + return stories_di + + stories_md_path = expand_path(stories_yml_path) + stories_yml_di = read_md_story(stories_md_path) prev_forms = [] action2forms = {} for story in stories_yml_di["stories"]: From b61fff83ce2d7640265aa998df5ee82bae78eeb4 Mon Sep 17 00:00:00 2001 From: oserikov Date: Thu, 22 Oct 2020 12:43:53 +0300 Subject: [PATCH 017/151] fix: typo --- deeppavlov/dataset_readers/md_yaml_dialogs_reader.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py index bc789efb99..1146492951 100644 --- a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py +++ b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py @@ -278,7 +278,8 @@ def parse_form_name(story_line): for aug_line in augmented_utters for processed_line in process_story_line(aug_line)] utters_to_append.extend(augmented_utters_parsed) - utters_to_append.append(system_action) + else: + utters_to_append.append(system_action) return utters_to_append def process_story_line(line): From 72f8aee96abd519ef1d75bab8344a3a539a3dff3 Mon Sep 17 00:00:00 2001 From: oserikov Date: Thu, 22 Oct 2020 13:41:39 +0300 Subject: [PATCH 018/151] fix: typo --- .../dataset_readers/md_yaml_dialogs_reader.py | 31 ++++++++++++------- .../go_bot/tracker/featurized_tracker.py | 25 ++++++++++++--- 2 files changed, 41 insertions(+), 15 deletions(-) diff --git a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py index 1146492951..ac7f874780 100644 --- a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py +++ b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py @@ -347,26 +347,35 @@ def augment_form(cls, form_name:str, domain_knowledge: DomainKnowledge, intent2s @classmethod def augment_slot(cls, known_responses: List[str], known_intents: List[str], slot_name: str, form_name: str): - ask_slot_act_name_hypothesis1 = f"utter_ask_{form_name}_{slot_name}" - ask_slot_act_name_hypothesis2 = f"utter_ask_{slot_name}" - if ask_slot_act_name_hypothesis1 in known_responses: - ask_slot_act_name = ask_slot_act_name_hypothesis1 - elif ask_slot_act_name_hypothesis2 in known_responses: - ask_slot_act_name = ask_slot_act_name_hypothesis2 - else: - # todo raise an exception - pass - + ask_slot_act_name = cls.get_augmented_ask_slot_utter(form_name, known_responses, slot_name) + inform_slot_user_utter = cls.get_augmented_ask_intent_utter(known_intents, slot_name) + return [f"- {ask_slot_act_name}", f"* {inform_slot_user_utter}"] + @classmethod + def get_augmented_ask_intent_utter(cls, known_intents, slot_name): inform_slot_user_utter_hypothesis = f"inform_{slot_name}" if inform_slot_user_utter_hypothesis in known_intents: inform_slot_user_utter = inform_slot_user_utter_hypothesis else: # todo raise an exception + inform_slot_user_utter = None pass + return inform_slot_user_utter - return [f"- {ask_slot_act_name}", f"* {inform_slot_user_utter}"] + @classmethod + def get_augmented_ask_slot_utter(cls, form_name, known_responses, slot_name): + ask_slot_act_name_hypothesis1 = f"utter_ask_{form_name}_{slot_name}" + ask_slot_act_name_hypothesis2 = f"utter_ask_{slot_name}" + if ask_slot_act_name_hypothesis1 in known_responses: + ask_slot_act_name = ask_slot_act_name_hypothesis1 + elif ask_slot_act_name_hypothesis2 in known_responses: + ask_slot_act_name = ask_slot_act_name_hypothesis2 + else: + # todo raise an exception + ask_slot_act_name = None + pass + return ask_slot_act_name @classmethod def get_last_users_turn(cls, curr_story_utters): diff --git a/deeppavlov/models/go_bot/tracker/featurized_tracker.py b/deeppavlov/models/go_bot/tracker/featurized_tracker.py index 8db3a3e332..c80479fe9e 100644 --- a/deeppavlov/models/go_bot/tracker/featurized_tracker.py +++ b/deeppavlov/models/go_bot/tracker/featurized_tracker.py @@ -7,7 +7,7 @@ from deeppavlov.core.commands.utils import expand_path from deeppavlov.core.common.file import read_yaml from deeppavlov.core.common.registry import register -from deeppavlov.dataset_readers.md_yaml_dialogs_reader import DomainKnowledge +from deeppavlov.dataset_readers.md_yaml_dialogs_reader import DomainKnowledge, MD_YAML_DialogsDatasetReader from deeppavlov.models.go_bot.nlu.dto.nlu_response import NLUResponse from deeppavlov.models.go_bot.tracker.dto.tracker_knowledge_interface import TrackerKnowledgeInterface from deeppavlov.models.go_bot.tracker.tracker_interface import TrackerInterface @@ -206,18 +206,35 @@ def read_md_story(story_path): continue curr_action = step["action"] + if curr_action.startswith("form"): + curr_action = json.loads(curr_action[len("form"):])["name"] + print(curr_action) if curr_action in form_names: prev_forms.append(curr_action) if curr_action in potential_api_or_db_actions: action2forms[curr_action] = prev_forms prev_forms = [] - actions2acquired_slots = {form_name: self._get_form_acquired_slots(form) for form_name, form in forms.items()} + def get_slot(system_utter, form_name): + if system_utter.startswith(f"utter_ask_{form_name}_"): + slot_name = system_utter[len(f"utter_ask_{form_name}_"):] + elif system_utter.startswith(f"utter_ask_"): + slot_name = system_utter[len(f"utter_ask_"):] + else: + # todo: raise an exception + pass + return slot_name + + actions2acquired_slots = {utter.strip('-').strip(): get_slot(utter.strip('-').strip(), form_name) + for form_name, form in forms.items() + for utter in + MD_YAML_DialogsDatasetReader.augment_form(form_name, domain_knowledge, {}) + if utter.strip().startswith("-")} + forms2acquired_slots = {form_name: self._get_form_acquired_slots(form) for form_name, form in forms.items()} actions2required_slots = {act: {slot for form in forms - for slot in actions2acquired_slots[form]} + for slot in forms2acquired_slots[form]} for act, forms in action2forms.items()} - return actions2required_slots, actions2acquired_slots def _get_form_acquired_slots(self, form): From 41100ffc39b5c6d58c4217b8d95a4b35aefb40a4 Mon Sep 17 00:00:00 2001 From: oserikov Date: Thu, 22 Oct 2020 13:48:33 +0300 Subject: [PATCH 019/151] fix: typo --- deeppavlov/models/go_bot/tracker/featurized_tracker.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/deeppavlov/models/go_bot/tracker/featurized_tracker.py b/deeppavlov/models/go_bot/tracker/featurized_tracker.py index c80479fe9e..dda7bffb28 100644 --- a/deeppavlov/models/go_bot/tracker/featurized_tracker.py +++ b/deeppavlov/models/go_bot/tracker/featurized_tracker.py @@ -215,15 +215,16 @@ def read_md_story(story_path): action2forms[curr_action] = prev_forms prev_forms = [] - def get_slot(system_utter, form_name): + def get_slots(system_utter, form_name): + slots = [] if system_utter.startswith(f"utter_ask_{form_name}_"): - slot_name = system_utter[len(f"utter_ask_{form_name}_"):] + slots.append(system_utter[len(f"utter_ask_{form_name}_"):]) elif system_utter.startswith(f"utter_ask_"): - slot_name = system_utter[len(f"utter_ask_"):] + slots.append(system_utter[len(f"utter_ask_"):]) else: # todo: raise an exception pass - return slot_name + return slots actions2acquired_slots = {utter.strip('-').strip(): get_slot(utter.strip('-').strip(), form_name) for form_name, form in forms.items() From 87b6b597c0373adcaa51ac9c11007404b9ddcbb8 Mon Sep 17 00:00:00 2001 From: oserikov Date: Thu, 22 Oct 2020 13:56:27 +0300 Subject: [PATCH 020/151] fix: typo --- deeppavlov/models/go_bot/tracker/featurized_tracker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deeppavlov/models/go_bot/tracker/featurized_tracker.py b/deeppavlov/models/go_bot/tracker/featurized_tracker.py index dda7bffb28..cb8ed96951 100644 --- a/deeppavlov/models/go_bot/tracker/featurized_tracker.py +++ b/deeppavlov/models/go_bot/tracker/featurized_tracker.py @@ -226,7 +226,7 @@ def get_slots(system_utter, form_name): pass return slots - actions2acquired_slots = {utter.strip('-').strip(): get_slot(utter.strip('-').strip(), form_name) + actions2acquired_slots = {utter.strip('-').strip(): get_slots(utter.strip('-').strip(), form_name) for form_name, form in forms.items() for utter in MD_YAML_DialogsDatasetReader.augment_form(form_name, domain_knowledge, {}) From b52a558f2d715ba74836f010df9e3965b7777cf7 Mon Sep 17 00:00:00 2001 From: oserikov Date: Fri, 20 Nov 2020 04:37:20 +0300 Subject: [PATCH 021/151] merge deeppavlov/dataset_readers/md_yaml_dialogs_reader from master --- .../dataset_readers/md_yaml_dialogs_reader.py | 129 +++++++++++------- 1 file changed, 78 insertions(+), 51 deletions(-) diff --git a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py index ac7f874780..bb47f6390d 100644 --- a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py +++ b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py @@ -28,6 +28,7 @@ from deeppavlov.core.data.dataset_reader import DatasetReader from deeppavlov.dataset_readers.dstc2_reader import DSTC2DatasetReader + SLOT2VALUE_PAIRS_TUPLE = Tuple[Tuple[str, Any], ...] log = getLogger(__name__) @@ -77,6 +78,9 @@ class MD_YAML_DialogsDatasetReader(DatasetReader): VALID_DATATYPES = ('trn', 'val', 'tst') + NLU_FNAME = "nlu.md" + DOMAIN_FNAME = "domain.yml" + @classmethod def _data_fname(cls, datatype: str) -> str: assert datatype in cls.VALID_DATATYPES, f"wrong datatype name: {datatype}" @@ -84,12 +88,13 @@ def _data_fname(cls, datatype: str) -> str: @classmethod @overrides - def read(cls, data_path: str, dialogs: bool = False) -> Dict[str, List]: + def read(cls, data_path: str, dialogs: bool = False, ignore_slots: bool = False) -> Dict[str, List]: """ Parameters: data_path: path to read dataset from dialogs: flag which indicates whether to output list of turns or list of dialogs + ignore_slots: whether to ignore slots information provided in stories.md or not Returns: dictionary that contains @@ -98,8 +103,8 @@ def read(cls, data_path: str, dialogs: bool = False) -> Dict[str, List]: ``'test'`` field with dialogs from ``'stories-tst.md'``. Each field is a list of tuples ``(x_i, y_i)``. """ - domain_fname = "domain.yml" - nlu_fname = "nlu.md" + domain_fname = cls.DOMAIN_FNAME + nlu_fname = cls.NLU_FNAME stories_fnames = tuple(cls._data_fname(dt) for dt in cls.VALID_DATATYPES) required_fnames = stories_fnames + (nlu_fname, domain_fname) for required_fname in required_fnames: @@ -111,7 +116,7 @@ def read(cls, data_path: str, dialogs: bool = False) -> Dict[str, List]: domain_path = Path(data_path, domain_fname) domain_knowledge = DomainKnowledge.from_yaml(domain_path) intent2slots2text, slot_name2text2value = cls._read_intent2text_mapping(Path(data_path, nlu_fname), - domain_knowledge) + domain_knowledge, ignore_slots) short2long_subsample_name = {"trn": "train", "val": "valid", @@ -119,13 +124,14 @@ def read(cls, data_path: str, dialogs: bool = False) -> Dict[str, List]: data = {short2long_subsample_name[subsample_name_short]: cls._read_story(Path(data_path, cls._data_fname(subsample_name_short)), - dialogs, domain_knowledge, intent2slots2text, slot_name2text2value) + dialogs, domain_knowledge, intent2slots2text, slot_name2text2value, + ignore_slots=ignore_slots) for subsample_name_short in cls.VALID_DATATYPES} return data @classmethod - def _read_intent2text_mapping(cls, nlu_fpath: Path, domain_knowledge: DomainKnowledge) \ + def _read_intent2text_mapping(cls, nlu_fpath: Path, domain_knowledge: DomainKnowledge, ignore_slots: bool = False) \ -> Tuple[Dict[str, Dict[SLOT2VALUE_PAIRS_TUPLE, List]], Dict[str, Dict[str, str]]]: @@ -137,7 +143,7 @@ def _read_intent2text_mapping(cls, nlu_fpath: Path, domain_knowledge: DomainKnow r"\)" intent2slots2text = defaultdict(lambda: defaultdict(list)) - slot_name2text2value = defaultdict(dict) + slot_name2text2value = defaultdict(lambda: defaultdict(list)) curr_intent_name = None @@ -151,6 +157,8 @@ def _read_intent2text_mapping(cls, nlu_fpath: Path, domain_knowledge: DomainKnow # lines starting with - are listing the examples of intent texts of the current intent type intent_text_w_markup = line.strip().strip('-').strip() line_slots_found = re.finditer(slots_markup_pattern, intent_text_w_markup) + if ignore_slots: + line_slots_found = [] curr_char_ix = 0 intent_text_without_markup = '' @@ -177,19 +185,17 @@ def _read_intent2text_mapping(cls, nlu_fpath: Path, domain_knowledge: DomainKnow # so we should remove brackets and the parentheses content intent_text_without_markup += slot_value_text - cleaned_text_slots.append({"slot_value": slot_value, - "slot_text": slot_value_text, - "slot_name": slot_name, - "span": (slot_value_new_l_span, slot_value_new_r_span)}) + cleaned_text_slots.append((slot_name, slot_value)) - slot_name2text2value[slot_name][slot_value_text] = slot_value + slot_name2text2value[slot_name][slot_value_text].append(slot_value) curr_char_ix = line_slot_r_span intent_text_without_markup += intent_text_w_markup[curr_char_ix: len(intent_text_w_markup)] - slots_key = tuple(sorted((slot["slot_name"], slot["slot_value"]) for slot in cleaned_text_slots)) + slots_key = tuple(sorted((slot[0], slot[1]) for slot in cleaned_text_slots)) intent2slots2text[curr_intent_name][slots_key].append({"text": intent_text_without_markup, - "slots": cleaned_text_slots}) + "slots_di": cleaned_text_slots, + "slots": slots_key}) # defaultdict behavior is no more needed intent2slots2text = {k: dict(v) for k, v in intent2slots2text.items()} @@ -203,7 +209,8 @@ def _read_story(cls, dialogs: bool, domain_knowledge: DomainKnowledge, intent2slots2text: Dict[str, Dict[SLOT2VALUE_PAIRS_TUPLE, List]], - slot_name2text2value: Dict[str, Dict[str, str]]) \ + slot_name2text2value: Dict[str, Dict[str, str]], + ignore_slots: bool = False) \ -> Union[List[List[Tuple[Dict[str, bool], Dict[str, Any]]]], List[Tuple[Dict[str, bool], Dict[str, Any]]]]: """ Reads stories from the specified path converting them to go-bot format on the fly. @@ -238,31 +245,38 @@ def _read_story(cls, stories_parsed = {} curr_story_title = None - curr_story_utters = [] + curr_story_utters_batch = [] nonlocal_curr_story_bad = False # can be modified as a nonlocal variable def process_user_utter(line): - nonlocal intent2slots2text, slot_name2text2value, curr_story_utters, nonlocal_curr_story_bad + nonlocal intent2slots2text, slot_name2text2value, curr_story_utters_batch, nonlocal_curr_story_bad try: - user_utter = cls.augment_user_turn(intent2slots2text, line, slot_name2text2value) + possible_user_utters = cls.augment_user_turn(intent2slots2text, line, slot_name2text2value) # dialogs MUST start with system replics - utters_to_append = ([default_system_start] if not curr_story_utters else []) + [user_utter] + for curr_story_utters in curr_story_utters_batch: + if not curr_story_utters: + curr_story_utters.append(default_system_start) + + utters_to_append_batch = [] + for user_utter in possible_user_utters: + utters_to_append_batch.append([user_utter]) + except KeyError as e: log.debug(f"INSIDE MLU_MD_DialogsDatasetReader._read_story(): " f"Skipping story w. line {line} because of no NLU candidates found") nonlocal_curr_story_bad = True - utters_to_append = [] - return utters_to_append + utters_to_append_batch = [] + return utters_to_append_batch def process_system_utter(line): - nonlocal intent2slots2text, domain_knowledge, curr_story_utters, nonlocal_curr_story_bad + nonlocal intent2slots2text, domain_knowledge, curr_story_utters_batch, nonlocal_curr_story_bad system_action = cls.parse_system_turn(domain_knowledge, line) system_action_name = system_action.get("dialog_acts")[0].get("act") - utters_to_append = [] - if cls.last_turn_is_systems_turn(curr_story_utters): - # deal with consecutive system actions by inserting the last user replics in between - utters_to_append.append(cls.get_last_users_turn(curr_story_utters)) + for curr_story_utters in curr_story_utters_batch: + if cls.last_turn_is_systems_turn(curr_story_utters): + # deal with consecutive system actions by inserting the last user replics in between + curr_story_utters.append(cls.get_last_users_turn(curr_story_utters)) def parse_form_name(story_line): form_name = None @@ -274,40 +288,49 @@ def parse_form_name(story_line): if system_action_name.startswith("form"): form_name = parse_form_name(system_action_name) augmented_utters = cls.augment_form(form_name, domain_knowledge, intent2slots2text) - augmented_utters_parsed = [processed_line - for aug_line in augmented_utters - for processed_line in process_story_line(aug_line)] - utters_to_append.extend(augmented_utters_parsed) + utters_to_append_batch = [[processed_line for processed_line in processed_line_batch] + for aug_line in augmented_utters + for processed_line_batch in process_story_line(aug_line)] else: - utters_to_append.append(system_action) - return utters_to_append + utters_to_append_batch = [[system_action]] + return utters_to_append_batch def process_story_line(line): if line.startswith('*'): - utters_to_extend_with = process_user_utter(line) + utters_to_extend_with_batch = process_user_utter(line) elif line.startswith('-'): - utters_to_extend_with = process_system_utter(line) + utters_to_extend_with_batch = process_system_utter(line) else: # todo raise an exception - utters_to_extend_with = [] - return utters_to_extend_with + utters_to_extend_with_batch = [] + return utters_to_extend_with_batch story_file = open(story_fpath) for line in story_file: line = line.strip() if line.startswith('#'): # #... marks the beginning of new story - if curr_story_utters and curr_story_utters[-1]["speaker"] == cls._USER_SPEAKER_ID: - curr_story_utters.append(default_system_goodbye) # dialogs MUST end with system replics + if curr_story_utters_batch and curr_story_utters_batch[0] and curr_story_utters_batch[0][-1]["speaker"] == cls._USER_SPEAKER_ID: + for curr_story_utters in curr_story_utters_batch: + curr_story_utters.append(default_system_goodbye) # dialogs MUST end with system replics if not nonlocal_curr_story_bad: - stories_parsed[curr_story_title] = curr_story_utters + for curr_story_utters_ix, curr_story_utters in enumerate(curr_story_utters_batch): + stories_parsed[curr_story_title+f"_{curr_story_utters_ix}"] = curr_story_utters curr_story_title = line.strip('#') - curr_story_utters = [] + curr_story_utters_batch = [[]] nonlocal_curr_story_bad = False else: - curr_story_utters.extend(process_story_line(line)) + new_curr_story_utters_batch = [] + possible_extensions = process_story_line(line) + for curr_story_utters in curr_story_utters_batch: + for user_utter in possible_extensions: + new_curr_story_utters = curr_story_utters.copy() + new_curr_story_utters.append(user_utter) + new_curr_story_utters_batch.append(new_curr_story_utters) + curr_story_utters_batch = new_curr_story_utters_batch + # curr_story_utters.extend(process_story_line(line)) story_file.close() if not nonlocal_curr_story_bad: @@ -406,12 +429,15 @@ def augment_user_turn(cls, intent2slots2text, line, slot_name2text2value): slots_to_exclude, slots_used_values, action_for_text = cls._choose_slots_for_whom_exists_text( intent2slots2text, slots_actual_values, user_action) - user_response_info = cls._user_action2text(intent2slots2text, action_for_text, slots_used_values) - user_utter = {"speaker": cls._USER_SPEAKER_ID, - "text": user_response_info["text"], - "dialog_acts": [{"act": user_action, "slots": user_response_info["slots"]}], - "slots to exclude": slots_to_exclude} - return user_utter + possible_user_response_infos = cls._user_action2text(intent2slots2text, action_for_text, slots_used_values) + possible_user_utters = [] + for user_response_info in possible_user_response_infos: + user_utter = {"speaker": cls._USER_SPEAKER_ID, + "text": user_response_info["text"], + "dialog_acts": [{"act": user_action, "slots": user_response_info["slots"]}], + "slots to exclude": slots_to_exclude} + possible_user_utters.append(user_utter) + return possible_user_utters @staticmethod def _choose_slots_for_whom_exists_text(intent2slots2text: Dict[str, Dict[SLOT2VALUE_PAIRS_TUPLE, List]], @@ -456,23 +482,24 @@ def _clarify_slots_values(slot_name2text2value: Dict[str, Dict[str, Any]], return slots_key @staticmethod - def _parse_user_intent(line: str) -> Tuple[str, List[List]]: + def _parse_user_intent(line: str, ignore_slots=False) -> Tuple[str, List[List]]: intent = line.strip('*').strip() if '{' not in intent: intent = intent + "{}" # the prototypical intent is "intent_name{slot1: value1, slotN: valueN}" user_action, slots_info = intent.split('{', 1) slots_info = json.loads('{' + slots_info) slots_dstc2formatted = [[slot_name, slot_value] for slot_name, slot_value in slots_info.items()] + if ignore_slots: + slots_dstc2formatted = dict() return user_action, slots_dstc2formatted @staticmethod def _user_action2text(intent2slots2text: Dict[str, Dict[SLOT2VALUE_PAIRS_TUPLE, List]], user_action: str, - slots_li: Optional[SLOT2VALUE_PAIRS_TUPLE] = None) -> str: + slots_li: Optional[SLOT2VALUE_PAIRS_TUPLE] = None) -> List[str]: if slots_li is None: slots_li = tuple() - res = intent2slots2text[user_action][slots_li][0] - return res + return intent2slots2text[user_action][slots_li] @staticmethod def _system_action2text(domain_knowledge: DomainKnowledge, system_action: str) -> str: From 75866b1b4fe27dcab1aad2d36f3a55dd80c2c0e9 Mon Sep 17 00:00:00 2001 From: oserikov Date: Fri, 20 Nov 2020 05:29:16 +0300 Subject: [PATCH 022/151] fix: append -> extend --- deeppavlov/dataset_readers/md_yaml_dialogs_reader.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py index bb47f6390d..4dc29f000c 100644 --- a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py +++ b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py @@ -327,15 +327,15 @@ def process_story_line(line): for curr_story_utters in curr_story_utters_batch: for user_utter in possible_extensions: new_curr_story_utters = curr_story_utters.copy() - new_curr_story_utters.append(user_utter) + new_curr_story_utters.extend(user_utter) new_curr_story_utters_batch.append(new_curr_story_utters) curr_story_utters_batch = new_curr_story_utters_batch # curr_story_utters.extend(process_story_line(line)) story_file.close() if not nonlocal_curr_story_bad: - stories_parsed[curr_story_title] = curr_story_utters - stories_parsed.pop(None) + for curr_story_utters_ix, curr_story_utters in enumerate(curr_story_utters_batch): + stories_parsed[curr_story_title + f"_{curr_story_utters_ix}"] = curr_story_utters tmp_f = tempfile.NamedTemporaryFile(delete=False, mode='w', encoding="utf-8") for story_id, story in stories_parsed.items(): From 0921bbfb7540c801277b5e1b67417110351533c4 Mon Sep 17 00:00:00 2001 From: oserikov Date: Thu, 26 Nov 2020 06:14:31 -0800 Subject: [PATCH 023/151] wip forms debugging --- .../dataset_readers/md_yaml_dialogs_reader.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py index 4dc29f000c..dfff65ccf8 100644 --- a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py +++ b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py @@ -288,9 +288,21 @@ def parse_form_name(story_line): if system_action_name.startswith("form"): form_name = parse_form_name(system_action_name) augmented_utters = cls.augment_form(form_name, domain_knowledge, intent2slots2text) - utters_to_append_batch = [[processed_line for processed_line in processed_line_batch] - for aug_line in augmented_utters - for processed_line_batch in process_story_line(aug_line)] + + utters_to_append_batch = [[]] + for user_utter in augmented_utters: + new_curr_story_utters_batch = [[]] + for curr_story_utters in utters_to_append_batch: + possible_extensions = process_story_line(user_utter) + for possible_extension in possible_extensions: + new_curr_story_utters = curr_story_utters.copy() + new_curr_story_utters.extend(possible_extension) + new_curr_story_utters_batch.append(new_curr_story_utters) + utters_to_append_batch = new_curr_story_utters_batch + + # utters_to_append_batch = [[processed_line for processed_line in processed_line_batch] + # for aug_line in augmented_utters + # for processed_line_batch in process_story_line(aug_line)] else: utters_to_append_batch = [[system_action]] return utters_to_append_batch From e68e912e9f10d32c02e980e61d84363bcbcdd7e9 Mon Sep 17 00:00:00 2001 From: oserikov Date: Thu, 26 Nov 2020 06:55:41 -0800 Subject: [PATCH 024/151] fix: forms augmentation was poorly handled --- deeppavlov/dataset_readers/md_yaml_dialogs_reader.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py index dfff65ccf8..91c613428f 100644 --- a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py +++ b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py @@ -291,7 +291,7 @@ def parse_form_name(story_line): utters_to_append_batch = [[]] for user_utter in augmented_utters: - new_curr_story_utters_batch = [[]] + new_curr_story_utters_batch = [] for curr_story_utters in utters_to_append_batch: possible_extensions = process_story_line(user_utter) for possible_extension in possible_extensions: @@ -320,6 +320,8 @@ def process_story_line(line): story_file = open(story_fpath) for line in story_file: line = line.strip() + if not line: + continue if line.startswith('#'): # #... marks the beginning of new story if curr_story_utters_batch and curr_story_utters_batch[0] and curr_story_utters_batch[0][-1]["speaker"] == cls._USER_SPEAKER_ID: From 65e4b284bbc51a9117929b185904c6a96f3df8d6 Mon Sep 17 00:00:00 2001 From: oserikov Date: Fri, 27 Nov 2020 04:03:28 -0800 Subject: [PATCH 025/151] fix: do not load formfilling info when no rasa formfilling data provided --- deeppavlov/models/go_bot/tracker/featurized_tracker.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/deeppavlov/models/go_bot/tracker/featurized_tracker.py b/deeppavlov/models/go_bot/tracker/featurized_tracker.py index cb8ed96951..01c4c5ff03 100644 --- a/deeppavlov/models/go_bot/tracker/featurized_tracker.py +++ b/deeppavlov/models/go_bot/tracker/featurized_tracker.py @@ -159,6 +159,9 @@ def _load_actions2slots_formfilling_info_from(self, Returns: the dictionary represented by the passed json """ + if domain_yml_path is None or stories_yml_path is None: + return {}, {} + domain_yml_path = expand_path(domain_yml_path) domain_knowledge: DomainKnowledge = DomainKnowledge.from_yaml(domain_yml_path) potential_api_or_db_actions = domain_knowledge.known_actions From 2d7896b7bad06635e5812c85c3a15f7a12585e4d Mon Sep 17 00:00:00 2001 From: oserikov Date: Mon, 30 Nov 2020 13:35:17 +0300 Subject: [PATCH 026/151] fix: added field initialization in constructor --- deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py | 1 + 1 file changed, 1 insertion(+) diff --git a/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py b/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py index f9c64cf605..33d0a8b56a 100644 --- a/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py +++ b/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py @@ -53,6 +53,7 @@ def __init__(self, self.n_actions = n_actions self.api_call_id = api_call_id self.ffill_act_ids2req_slots_ids: Dict[int, List[int]] = dict() + self.ffill_act_ids2aqd_slots_ids: Dict[int, List[int]] = dict() self.reset_state() @staticmethod From b2ef5c7d8a5658537e39dbc1485b251c26109671 Mon Sep 17 00:00:00 2001 From: oserikov Date: Mon, 30 Nov 2020 14:13:16 +0300 Subject: [PATCH 027/151] added docstrings and type hints --- .../dataset_readers/md_yaml_dialogs_reader.py | 143 ++++++++++++++++-- 1 file changed, 133 insertions(+), 10 deletions(-) diff --git a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py index 91c613428f..e981a8f9ba 100644 --- a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py +++ b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py @@ -248,7 +248,15 @@ def _read_story(cls, curr_story_utters_batch = [] nonlocal_curr_story_bad = False # can be modified as a nonlocal variable - def process_user_utter(line): + def process_user_utter(line: str) -> List[List[Dict[str, Any]]]: + """ + given the stories.md user line, returns the batch of all the dstc2 ways to represent it + Args: + line: the system line to generate dstc2 versions for + + Returns: + all the possible dstc2 versions of the passed story line + """ nonlocal intent2slots2text, slot_name2text2value, curr_story_utters_batch, nonlocal_curr_story_bad try: possible_user_utters = cls.augment_user_turn(intent2slots2text, line, slot_name2text2value) @@ -268,7 +276,15 @@ def process_user_utter(line): utters_to_append_batch = [] return utters_to_append_batch - def process_system_utter(line): + def process_system_utter(line: str) -> List[List[Dict[str, Any]]]: + """ + given the stories.md system line, returns the batch of all the dstc2 ways to represent it + Args: + line: the system line to generate dstc2 versions for + + Returns: + all the possible dstc2 versions of the passed story line + """ nonlocal intent2slots2text, domain_knowledge, curr_story_utters_batch, nonlocal_curr_story_bad system_action = cls.parse_system_turn(domain_knowledge, line) system_action_name = system_action.get("dialog_acts")[0].get("act") @@ -307,7 +323,15 @@ def parse_form_name(story_line): utters_to_append_batch = [[system_action]] return utters_to_append_batch - def process_story_line(line): + def process_story_line(line: str) -> List[List[Dict[str, Any]]]: + """ + given the stories.md line, returns the batch of all the dstc2 ways to represent it + Args: + line: the line to generate dstc2 versions + + Returns: + all the possible dstc2 versions of the passed story line + """ if line.startswith('*'): utters_to_extend_with_batch = process_user_utter(line) elif line.startswith('-'): @@ -371,26 +395,58 @@ def process_story_line(line): return gobot_formatted_stories @classmethod - def augment_form(cls, form_name:str, domain_knowledge: DomainKnowledge, intent2slots2text: Dict): + def augment_form(cls, form_name: str, domain_knowledge: DomainKnowledge, intent2slots2text: Dict) -> List[str]: + """ + Replaced the form mention in stories.md with the actual turns relevant to the form + Args: + form_name: the name of form to generate turns for + domain_knowledge: the domain knowledge (see domain.yml in RASA) relevant to the processed config + intent2slots2text: the mapping of intents and particular slots onto text + + Returns: + the story turns relevant to the passed form + """ form = domain_knowledge.forms[form_name] # todo handle keyerr augmended_story = [] for slot_name, slot_info_li in form.items(): if slot_info_li and slot_info_li[0].get("type", '') == "from_entity": - # we only handle this sort of slots + # we only handle from_entity slots known_responses = list(domain_knowledge.response_templates) known_intents = list(intent2slots2text.keys()) augmended_story.extend(cls.augment_slot(known_responses, known_intents, slot_name, form_name)) return augmended_story @classmethod - def augment_slot(cls, known_responses: List[str], known_intents: List[str], slot_name: str, form_name: str): + def augment_slot(cls, known_responses: List[str], known_intents: List[str], slot_name: str, form_name: str) \ + -> List[str]: + """ + Given the slot name, generates a sequence of system turn asking for a slot and user' turn providing this slot + + Args: + known_responses: responses known to the system from domain.yml + known_intents: intents known to the system from domain.yml + slot_name: the name of the slot to augment for + form_name: the name of the form for which the turn is augmented + + Returns: + the list of stories.md alike turns + """ ask_slot_act_name = cls.get_augmented_ask_slot_utter(form_name, known_responses, slot_name) inform_slot_user_utter = cls.get_augmented_ask_intent_utter(known_intents, slot_name) return [f"- {ask_slot_act_name}", f"* {inform_slot_user_utter}"] @classmethod - def get_augmented_ask_intent_utter(cls, known_intents, slot_name): + def get_augmented_ask_intent_utter(cls, known_intents: List[str], slot_name: str) -> Optional[str]: + """ + if the system knows the inform_{slot} intent, return this intent name, otherwise return None + Args: + known_intents: intents known to the system + slot_name: the slot to look inform intent for + + Returns: + the slot informing intent or None + """ inform_slot_user_utter_hypothesis = f"inform_{slot_name}" if inform_slot_user_utter_hypothesis in known_intents: inform_slot_user_utter = inform_slot_user_utter_hypothesis @@ -401,7 +457,17 @@ def get_augmented_ask_intent_utter(cls, known_intents, slot_name): return inform_slot_user_utter @classmethod - def get_augmented_ask_slot_utter(cls, form_name, known_responses, slot_name): + def get_augmented_ask_slot_utter(cls, form_name: str, known_responses: List[str], slot_name: str): + """ + if the system knows the ask_{slot} action, return this action name, otherwise return None + Args: + form_name: the name of the currently processed form + known_responses: actions known to the system + slot_name: the slot to look asking action for + + Returns: + the slot asking action or None + """ ask_slot_act_name_hypothesis1 = f"utter_ask_{form_name}_{slot_name}" ask_slot_act_name_hypothesis2 = f"utter_ask_{slot_name}" if ask_slot_act_name_hypothesis1 in known_responses: @@ -424,7 +490,16 @@ def last_turn_is_systems_turn(cls, curr_story_utters): return curr_story_utters and curr_story_utters[-1]["speaker"] == cls._SYSTEM_SPEAKER_ID @classmethod - def parse_system_turn(cls, domain_knowledge, line): + def parse_system_turn(cls, domain_knowledge: DomainKnowledge, line: str) -> Dict: + """ + Given the RASA stories.md line, returns the dstc2-formatted json (dict) for this line + Args: + domain_knowledge: the domain knowledge relevant to the processed stories config (from which line is taken) + line: the story system step representing line from stories.md + + Returns: + the dstc2-formatted passed turn + """ # system actions are started in dataset with - system_action_name = line.strip('-').strip() curr_action_text = cls._system_action2text(domain_knowledge, system_action_name) @@ -436,7 +511,17 @@ def parse_system_turn(cls, domain_knowledge, line): return system_action @classmethod - def augment_user_turn(cls, intent2slots2text, line, slot_name2text2value): + def augment_user_turn(cls, intent2slots2text, line: str, slot_name2text2value) -> List[Dict[str: Any]]: + """ + given the turn information generate all the possible stories representing it + Args: + intent2slots2text: the intents and slots to natural language utterances mapping known to the system + line: the line representing used utterance in stories.md format + slot_name2text2value: the slot names to values mapping known o the system + + Returns: + the batch of all the possible dstc2 representations of the passed intent + """ # user actions are started in dataset with * user_action, slots_dstc2formatted = cls._parse_user_intent(line) slots_actual_values = cls._clarify_slots_values(slot_name2text2value, slots_dstc2formatted) @@ -457,6 +542,16 @@ def augment_user_turn(cls, intent2slots2text, line, slot_name2text2value): def _choose_slots_for_whom_exists_text(intent2slots2text: Dict[str, Dict[SLOT2VALUE_PAIRS_TUPLE, List]], slots_actual_values: SLOT2VALUE_PAIRS_TUPLE, user_action: str) -> Tuple[List, SLOT2VALUE_PAIRS_TUPLE, str]: + """ + + Args: + intent2slots2text: the mapping of intents and slots to natural language utterances representing them + slots_actual_values: the slot values information to look utterance for + user_action: the intent to look utterance for + + Returns: + the slots ommitted to find an NLU candidate, the slots represented in the candidate, the intent name used + """ possible_keys = [k for k in intent2slots2text.keys() if user_action in k] possible_keys = possible_keys + [user_action] possible_keys = sorted(possible_keys, key=lambda action_s: action_s.count('+')) @@ -497,6 +592,15 @@ def _clarify_slots_values(slot_name2text2value: Dict[str, Dict[str, Any]], @staticmethod def _parse_user_intent(line: str, ignore_slots=False) -> Tuple[str, List[List]]: + """ + Given the intent line in RASA stories.md format, return the name of the intent and slots described with this line + Args: + line: the line to parse + ignore_slots: whether to ignore slots information + + Returns: + the pair of the intent name and slots ([[slot name, slot value],.. ]) info + """ intent = line.strip('*').strip() if '{' not in intent: intent = intent + "{}" # the prototypical intent is "intent_name{slot1: value1, slotN: valueN}" @@ -511,12 +615,31 @@ def _parse_user_intent(line: str, ignore_slots=False) -> Tuple[str, List[List]]: def _user_action2text(intent2slots2text: Dict[str, Dict[SLOT2VALUE_PAIRS_TUPLE, List]], user_action: str, slots_li: Optional[SLOT2VALUE_PAIRS_TUPLE] = None) -> List[str]: + """ + given the user intent, return the text representing this intent with passed slots + Args: + intent2slots2text: the mapping of intents and slots to natural language utterances + user_action: the name of intent to generate text for + slots_li: the slot values to provide + + Returns: + the text of utterance relevant to the passed intent and slots + """ if slots_li is None: slots_li = tuple() return intent2slots2text[user_action][slots_li] @staticmethod def _system_action2text(domain_knowledge: DomainKnowledge, system_action: str) -> str: + """ + given the system action name return the relevant template text + Args: + domain_knowledge: the domain knowledge relevant to the currently processed config + system_action: the name of the action to get intent for + + Returns: + template relevant to the passed action + """ possible_system_responses = domain_knowledge.response_templates.get(system_action, [{"text": system_action}]) From af0d8e8c03beb566bf472efe49478f91abedf1de Mon Sep 17 00:00:00 2001 From: oserikov Date: Mon, 30 Nov 2020 14:16:55 +0300 Subject: [PATCH 028/151] fix: add newline to the end of the file --- deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py b/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py index a4c404a41b..9fd0113114 100644 --- a/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py +++ b/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py @@ -148,4 +148,4 @@ def known_actions(self) -> List: Returns: the list of actions known to the NLG module """ - return list(self.action_tuples2ids.keys()) \ No newline at end of file + return list(self.action_tuples2ids.keys()) From 8e2954dec238d25041505c52d0b7b5170ac2e9ff Mon Sep 17 00:00:00 2001 From: oserikov Date: Mon, 30 Nov 2020 14:20:41 +0300 Subject: [PATCH 029/151] added docstrings and type hints --- deeppavlov/dataset_readers/md_yaml_dialogs_reader.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py index e981a8f9ba..67dd64ee62 100644 --- a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py +++ b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py @@ -294,7 +294,15 @@ def process_system_utter(line: str) -> List[List[Dict[str, Any]]]: # deal with consecutive system actions by inserting the last user replics in between curr_story_utters.append(cls.get_last_users_turn(curr_story_utters)) - def parse_form_name(story_line): + def parse_form_name(story_line: str) -> str: + """ + if the line (in stories.md utterance format) contains a form name, return it + Args: + story_line: line to extract form name from + + Returns: + the extracted form name or None if no form name found + """ form_name = None if story_line.startswith("form"): form_di = json.loads(story_line[len("form"):]) From a4bdb7335f7e99bb868fa5f22a6c953d24c0f5d6 Mon Sep 17 00:00:00 2001 From: oserikov Date: Mon, 30 Nov 2020 14:22:59 +0300 Subject: [PATCH 030/151] added docstrings and type hints --- deeppavlov/dataset_readers/md_yaml_dialogs_reader.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py index 67dd64ee62..35ddf2db8d 100644 --- a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py +++ b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py @@ -489,7 +489,15 @@ def get_augmented_ask_slot_utter(cls, form_name: str, known_responses: List[str] return ask_slot_act_name @classmethod - def get_last_users_turn(cls, curr_story_utters): + def get_last_users_turn(cls, curr_story_utters: List[Dict]) -> Dict: + """ + Given the dstc2 story, return the last user utterance from it + Args: + curr_story_utters: the dstc2-formatted stoyr + + Returns: + the last user utterance from the passed story + """ *_, last_user_utter = filter(lambda x: x["speaker"] == cls._USER_SPEAKER_ID, curr_story_utters) return last_user_utter From 5fabd3171a846a59ef5a29e0f55280752ec31883 Mon Sep 17 00:00:00 2001 From: oserikov Date: Mon, 30 Nov 2020 14:26:47 +0300 Subject: [PATCH 031/151] added docstrings and type hints --- .../models/go_bot/tracker/dialogue_state_tracker.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py b/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py index 33d0a8b56a..9eabb71383 100644 --- a/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py +++ b/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py @@ -84,6 +84,17 @@ def from_gobot_params(parent_tracker: FeaturizedTracker, @staticmethod def extract_reqiured_acquired_slots_ids_mapping(act2act_id, slot_names, nlg_manager, parent_tracker): + """ + get the required and acquired slots information for each known action in the -Hot Encoding form + Args: + act2act_id: the mapping of actions onto their ids + slot_names: the names of slots known to the tracker + nlg_manager: the NLG manager used in system + parent_tracker: the tracker to take required and acquired slots information from + + Returns: + the dicts providing np.array masks of required and acquired slots for each known action + """ action_id2aqd_slots_ids = dict() # aqd stands for acquired action_id2req_slots_ids = dict() for act in nlg_manager.known_actions(): From df5b8d88c063154b0fd64471aab0f7dfaae35633 Mon Sep 17 00:00:00 2001 From: oserikov Date: Mon, 30 Nov 2020 14:29:18 +0300 Subject: [PATCH 032/151] added docstrings and type hints --- deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py b/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py index 9eabb71383..61280d17fb 100644 --- a/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py +++ b/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py @@ -83,7 +83,10 @@ def from_gobot_params(parent_tracker: FeaturizedTracker, return dialogue_state_tracker @staticmethod - def extract_reqiured_acquired_slots_ids_mapping(act2act_id, slot_names, nlg_manager, parent_tracker): + def extract_reqiured_acquired_slots_ids_mapping(act2act_id: Dict, + slot_names: List, + nlg_manager: NLGManagerInterface, + parent_tracker: FeaturizedTracker) -> Tuple[Dict[str, np.ndarray], Dict[str, np.ndarray]]: """ get the required and acquired slots information for each known action in the -Hot Encoding form Args: From 7a00be1b7bd5b3bb49371804dce15d3818ebad03 Mon Sep 17 00:00:00 2001 From: oserikov Date: Mon, 30 Nov 2020 14:31:44 +0300 Subject: [PATCH 033/151] added docstrings and type hints --- deeppavlov/models/go_bot/tracker/featurized_tracker.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/deeppavlov/models/go_bot/tracker/featurized_tracker.py b/deeppavlov/models/go_bot/tracker/featurized_tracker.py index 01c4c5ff03..552420cc05 100644 --- a/deeppavlov/models/go_bot/tracker/featurized_tracker.py +++ b/deeppavlov/models/go_bot/tracker/featurized_tracker.py @@ -171,7 +171,15 @@ def _load_actions2slots_formfilling_info_from(self, # todo migrate to rasa2.0 # stories_yml_path = expand_path(stories_yml_path) # stories_yml_di = read_yaml(stories_yml_path) - def read_md_story(story_path): + def read_md_story(story_path: Union[Path, str]) -> Dict[str, List[Dict]]: + """ + given the path to stories.md naively read steps from it. ToDo use MDYAML reader + Args: + story_path: the path to stories.md + + Returns: + the dict containing info on all the stories used + """ story_f = open(story_path, 'r') stories_li = [] curr_story = None From 59603424e000547e1779dd16db855f618dbd972c Mon Sep 17 00:00:00 2001 From: oserikov Date: Mon, 30 Nov 2020 14:33:15 +0300 Subject: [PATCH 034/151] added docstrings and type hints --- deeppavlov/models/go_bot/tracker/featurized_tracker.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/deeppavlov/models/go_bot/tracker/featurized_tracker.py b/deeppavlov/models/go_bot/tracker/featurized_tracker.py index 552420cc05..c40462ede5 100644 --- a/deeppavlov/models/go_bot/tracker/featurized_tracker.py +++ b/deeppavlov/models/go_bot/tracker/featurized_tracker.py @@ -227,6 +227,15 @@ def read_md_story(story_path: Union[Path, str]) -> Dict[str, List[Dict]]: prev_forms = [] def get_slots(system_utter, form_name): + """ + Given the utterance story line, extract slots information from it + Args: + system_utter: the utterance story line + form_name: the form we are filling + + Returns: + the slots extracted from the line + """ slots = [] if system_utter.startswith(f"utter_ask_{form_name}_"): slots.append(system_utter[len(f"utter_ask_{form_name}_"):]) From 13e6d7b5366d4f294b8fb43e23c8157c08f2c76e Mon Sep 17 00:00:00 2001 From: oserikov Date: Mon, 30 Nov 2020 14:33:42 +0300 Subject: [PATCH 035/151] added docstrings and type hints --- deeppavlov/models/go_bot/tracker/featurized_tracker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deeppavlov/models/go_bot/tracker/featurized_tracker.py b/deeppavlov/models/go_bot/tracker/featurized_tracker.py index c40462ede5..e0d13c43f5 100644 --- a/deeppavlov/models/go_bot/tracker/featurized_tracker.py +++ b/deeppavlov/models/go_bot/tracker/featurized_tracker.py @@ -226,7 +226,7 @@ def read_md_story(story_path: Union[Path, str]) -> Dict[str, List[Dict]]: action2forms[curr_action] = prev_forms prev_forms = [] - def get_slots(system_utter, form_name): + def get_slots(system_utter: str, form_name: str) -> List[str]: """ Given the utterance story line, extract slots information from it Args: From 954f14016426e2fe744fe6690de349e94ea41007 Mon Sep 17 00:00:00 2001 From: oserikov Date: Mon, 30 Nov 2020 14:34:53 +0300 Subject: [PATCH 036/151] added docstrings and type hints --- deeppavlov/models/go_bot/tracker/featurized_tracker.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/deeppavlov/models/go_bot/tracker/featurized_tracker.py b/deeppavlov/models/go_bot/tracker/featurized_tracker.py index e0d13c43f5..c35d89ef7a 100644 --- a/deeppavlov/models/go_bot/tracker/featurized_tracker.py +++ b/deeppavlov/models/go_bot/tracker/featurized_tracker.py @@ -258,7 +258,15 @@ def get_slots(system_utter: str, form_name: str) -> List[str]: for act, forms in action2forms.items()} return actions2required_slots, actions2acquired_slots - def _get_form_acquired_slots(self, form): + def _get_form_acquired_slots(self, form: Dict) -> List[str]: + """ + given the form, return the slots that are acquired with this form + Args: + form: form to extract acquired slots from + + Returns: + the slots acquired from the passed form + """ acquired_slots = [slot_name for slot_name, slot_info_li in form.items() if slot_info_li and slot_info_li[0].get("type", '') == "from_entity"] From 44cf9ae49d24ebd6bf4dacd375271e665dea7d5c Mon Sep 17 00:00:00 2001 From: oserikov Date: Mon, 30 Nov 2020 03:59:26 -0800 Subject: [PATCH 037/151] fix typehint typo --- deeppavlov/dataset_readers/md_yaml_dialogs_reader.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py index 35ddf2db8d..fb5e28ec83 100644 --- a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py +++ b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py @@ -527,7 +527,7 @@ def parse_system_turn(cls, domain_knowledge: DomainKnowledge, line: str) -> Dict return system_action @classmethod - def augment_user_turn(cls, intent2slots2text, line: str, slot_name2text2value) -> List[Dict[str: Any]]: + def augment_user_turn(cls, intent2slots2text, line: str, slot_name2text2value) -> List[Dict[str, Any]]: """ given the turn information generate all the possible stories representing it Args: From e625c090aadf432d6038b84025b4cb87dae48050 Mon Sep 17 00:00:00 2001 From: oserikov Date: Thu, 10 Dec 2020 06:38:54 +0300 Subject: [PATCH 038/151] fix: google style docstring --- deeppavlov/dataset_readers/md_yaml_dialogs_reader.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py index fb5e28ec83..d1ebd5007c 100644 --- a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py +++ b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py @@ -50,8 +50,10 @@ def __init__(self, domain_knowledge_di: Dict): def from_yaml(cls, domain_yml_fpath: Union[str, Path] = "domain.yml"): """ Parses domain.yml domain config file into the DomainKnowledge object - :param domain_yml_fpath: path to the domain config file, defaults to domain.yml - :return: the loaded DomainKnowledge obect + Args: + domain_yml_fpath: path to the domain config file, defaults to domain.yml + Returns: + the loaded DomainKnowledge obect """ return cls(read_yaml(domain_yml_fpath)) From 3d43f9eb4de40b631f8143542495915b98fe4156 Mon Sep 17 00:00:00 2001 From: oserikov Date: Thu, 10 Dec 2020 06:39:50 +0300 Subject: [PATCH 039/151] remove nomoreneeded comment --- deeppavlov/models/go_bot/policy/policy_network.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deeppavlov/models/go_bot/policy/policy_network.py b/deeppavlov/models/go_bot/policy/policy_network.py index 0cfcf56a74..1e3483203d 100644 --- a/deeppavlov/models/go_bot/policy/policy_network.py +++ b/deeppavlov/models/go_bot/policy/policy_network.py @@ -217,7 +217,7 @@ def digitize_features(self, tracker_knowledge: DSTKnowledge) -> DigitizedPolicyFeatures: attn_key = self.calc_attn_key(nlu_response, tracker_knowledge) concat_feats = self.stack_features(nlu_response, tracker_knowledge) - action_mask = tracker_knowledge.action_mask # self.calc_action_mask(tracker_knowledge) + action_mask = tracker_knowledge.action_mask return DigitizedPolicyFeatures(attn_key, concat_feats, action_mask) From 6355b561dbffe0e881843b34055baa40a4fa80c0 Mon Sep 17 00:00:00 2001 From: oserikov Date: Thu, 10 Dec 2020 06:42:12 +0300 Subject: [PATCH 040/151] fix: add typehint for returned objects --- deeppavlov/models/go_bot/nlg/nlg_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deeppavlov/models/go_bot/nlg/nlg_manager.py b/deeppavlov/models/go_bot/nlg/nlg_manager.py index 74f393aa17..dbceb122f3 100644 --- a/deeppavlov/models/go_bot/nlg/nlg_manager.py +++ b/deeppavlov/models/go_bot/nlg/nlg_manager.py @@ -107,7 +107,7 @@ def num_of_known_actions(self) -> int: """ return len(self.templates) - def known_actions(self) -> List: + def known_actions(self) -> List[str]: """ Returns: the list of actions known to the NLG module From 445d53dfe88efed5cc06bf0d906fd04adb2f856e Mon Sep 17 00:00:00 2001 From: oserikov Date: Thu, 10 Dec 2020 06:46:50 +0300 Subject: [PATCH 041/151] fix: remove redundant type checks, add typehit --- deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py b/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py index 9fd0113114..655712ab21 100644 --- a/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py +++ b/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py @@ -1,7 +1,7 @@ import json from itertools import combinations from pathlib import Path -from typing import Union, Dict, List +from typing import Union, Dict, List, Tuple from deeppavlov.core.commands.utils import expand_path from deeppavlov.core.common.errors import ConfigError @@ -96,7 +96,7 @@ def _load_actions2slots_mapping(actions2slots_json_path) -> Dict[str, str]: f"initialized actions2slots mapping with an empty one: {str(actions2slots)}") return actions2slots - def get_action_id(self, action_text: str) -> int: + def get_action_id(self, action_text: Union[str, Tuple[str, ...]]) -> int: """ Looks up for an ID corresponding to the passed action text. @@ -107,7 +107,7 @@ def get_action_id(self, action_text: str) -> int: """ if isinstance(action_text, str): actions_tuple = tuple(action_text.split('+')) - elif isinstance(action_text, tuple): + else: actions_tuple = action_text return self.action_tuples2ids[actions_tuple] # todo unhandled exception when not found From bf886c5ab0cf0913da7b36d46fa1432841694e18 Mon Sep 17 00:00:00 2001 From: oserikov Date: Thu, 10 Dec 2020 06:47:21 +0300 Subject: [PATCH 042/151] fix: remove unused import --- deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py b/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py index 61280d17fb..9a0cb32c49 100644 --- a/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py +++ b/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py @@ -14,7 +14,7 @@ from logging import getLogger from pathlib import Path -from typing import List, Iterator, Union, Optional, Dict, Tuple, Any +from typing import List, Union, Optional, Dict, Tuple, Any import numpy as np From 5f10d5d0cfa2202fdc0a280d9e44ca549370e221 Mon Sep 17 00:00:00 2001 From: oserikov Date: Thu, 10 Dec 2020 06:49:04 +0300 Subject: [PATCH 043/151] fix: remove commented code --- deeppavlov/models/go_bot/tracker/featurized_tracker.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/deeppavlov/models/go_bot/tracker/featurized_tracker.py b/deeppavlov/models/go_bot/tracker/featurized_tracker.py index c35d89ef7a..ec1314036b 100644 --- a/deeppavlov/models/go_bot/tracker/featurized_tracker.py +++ b/deeppavlov/models/go_bot/tracker/featurized_tracker.py @@ -42,7 +42,6 @@ def __init__(self, self.stories_path = stories_yml_path self.action_names2required_slots, self.action_names2acquired_slots =\ self._load_actions2slots_formfilling_info_from(domain_yml_path, stories_yml_path) - # self._load_actions2slots_formfilling_info_from_json(self.actions_required_acquired_slots_path) self.history = [] self.current_features = None @@ -169,8 +168,6 @@ def _load_actions2slots_formfilling_info_from(self, form_names = list(forms.keys()) # todo migrate to rasa2.0 - # stories_yml_path = expand_path(stories_yml_path) - # stories_yml_di = read_yaml(stories_yml_path) def read_md_story(story_path: Union[Path, str]) -> Dict[str, List[Dict]]: """ given the path to stories.md naively read steps from it. ToDo use MDYAML reader From a5729d37bb09f106b6c690ae9a4a38ec856f22c4 Mon Sep 17 00:00:00 2001 From: oserikov Date: Thu, 10 Dec 2020 06:49:41 +0300 Subject: [PATCH 044/151] remove unused error object --- deeppavlov/dataset_readers/md_yaml_dialogs_reader.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py index d1ebd5007c..29f65ff039 100644 --- a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py +++ b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py @@ -271,7 +271,7 @@ def process_user_utter(line: str) -> List[List[Dict[str, Any]]]: for user_utter in possible_user_utters: utters_to_append_batch.append([user_utter]) - except KeyError as e: + except KeyError: log.debug(f"INSIDE MLU_MD_DialogsDatasetReader._read_story(): " f"Skipping story w. line {line} because of no NLU candidates found") nonlocal_curr_story_bad = True From 89a4f4189e1f89770b007fa809d434d5d9583a6c Mon Sep 17 00:00:00 2001 From: oserikov Date: Thu, 10 Dec 2020 18:05:14 +0300 Subject: [PATCH 045/151] remove redundant comment --- deeppavlov/dataset_readers/md_yaml_dialogs_reader.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py index 29f65ff039..29a1b3f699 100644 --- a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py +++ b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py @@ -325,10 +325,6 @@ def parse_form_name(story_line: str) -> str: new_curr_story_utters.extend(possible_extension) new_curr_story_utters_batch.append(new_curr_story_utters) utters_to_append_batch = new_curr_story_utters_batch - - # utters_to_append_batch = [[processed_line for processed_line in processed_line_batch] - # for aug_line in augmented_utters - # for processed_line_batch in process_story_line(aug_line)] else: utters_to_append_batch = [[system_action]] return utters_to_append_batch From 60d32183c3b0f7a208dbebac08a85061e915885e Mon Sep 17 00:00:00 2001 From: oserikov Date: Fri, 18 Dec 2020 05:42:45 +0300 Subject: [PATCH 046/151] wip: rulebased gobot system --- .../models/go_bot/dto/shared_gobot_params.py | 16 +++- deeppavlov/models/go_bot/go_bot.py | 19 +++-- .../models/go_bot/policy/policy_network.py | 40 +++++++++- .../go_bot/tracker/dialogue_state_tracker.py | 79 +++++++++++++++++++ 4 files changed, 144 insertions(+), 10 deletions(-) diff --git a/deeppavlov/models/go_bot/dto/shared_gobot_params.py b/deeppavlov/models/go_bot/dto/shared_gobot_params.py index 0472c37333..9cac390437 100644 --- a/deeppavlov/models/go_bot/dto/shared_gobot_params.py +++ b/deeppavlov/models/go_bot/dto/shared_gobot_params.py @@ -1,4 +1,4 @@ -from deeppavlov.models.go_bot.nlu.nlu_manager import NLUManagerInterface +from deeppavlov.models.go_bot.nlu.nlu_manager import NLUManagerInterface, NLUManager from deeppavlov.models.go_bot.nlg.nlg_manager import NLGManagerInterface from deeppavlov.models.go_bot.tracker.featurized_tracker import FeaturizedTracker @@ -22,3 +22,17 @@ def from_configured(nlg_manager: NLGManagerInterface, nlu_manager: NLUManagerInt return SharedGoBotParams(nlg_manager.num_of_known_actions(), nlu_manager.num_of_known_intents(), tracker.num_features) + +class MemorizingGoBotParams(SharedGoBotParams): + intent_ids2intents: dict + intents2intent_ids: dict + + @staticmethod + def from_configured(nlg_manager: NLGManagerInterface, nlu_manager: NLUManager, tracker: FeaturizedTracker): + """builds the params object given some GO-bot units that are already configured""" + res = SharedGoBotParams(nlg_manager.num_of_known_actions(), + nlu_manager.num_of_known_intents(), + tracker.num_features) + res.intent_ids2intents = dict(enumerate(nlu_manager.intents)) + res.intents2intent_ids = {v:k for k, v in res.intent_ids2intents.items()} + return res \ No newline at end of file diff --git a/deeppavlov/models/go_bot/go_bot.py b/deeppavlov/models/go_bot/go_bot.py index ce47cf2577..097737734c 100644 --- a/deeppavlov/models/go_bot/go_bot.py +++ b/deeppavlov/models/go_bot/go_bot.py @@ -25,13 +25,14 @@ from deeppavlov.models.go_bot.nlu.tokens_vectorizer import TokensVectorizer from deeppavlov.models.go_bot.dto.dataset_features import UtteranceDataEntry, DialogueDataEntry, \ BatchDialoguesDataset, UtteranceFeatures, UtteranceTarget, BatchDialoguesFeatures -from deeppavlov.models.go_bot.dto.shared_gobot_params import SharedGoBotParams +from deeppavlov.models.go_bot.dto.shared_gobot_params import SharedGoBotParams, MemorizingGoBotParams from deeppavlov.models.go_bot.nlg.nlg_manager import NLGManagerInterface from deeppavlov.models.go_bot.nlu.nlu_manager import NLUManager from deeppavlov.models.go_bot.policy.policy_network import PolicyNetwork, PolicyNetworkParams from deeppavlov.models.go_bot.policy.dto.policy_prediction import PolicyPrediction from deeppavlov.models.go_bot.tracker.featurized_tracker import FeaturizedTracker -from deeppavlov.models.go_bot.tracker.dialogue_state_tracker import DialogueStateTracker, MultipleUserStateTrackersPool +from deeppavlov.models.go_bot.tracker.dialogue_state_tracker import DialogueStateTracker, MultipleUserStateTrackersPool, \ + MemorizingDialogueStateTracker from pathlib import Path log = getLogger(__name__) @@ -138,19 +139,21 @@ def __init__(self, self.data_handler = TokensVectorizer(debug, word_vocab, bow_embedder, embedder) # todo make mor abstract - self.dialogue_state_tracker = DialogueStateTracker.from_gobot_params(tracker, self.nlg_manager, - policy_network_params, database) + self.dialogue_state_tracker = MemorizingDialogueStateTracker.from_gobot_params(tracker, + self.nlu_manager, + self.nlg_manager, + policy_network_params, database) # todo make mor abstract self.multiple_user_state_tracker = MultipleUserStateTrackersPool(base_tracker=self.dialogue_state_tracker) tokens_dims = self.data_handler.get_dims() - features_params = SharedGoBotParams.from_configured(self.nlg_manager, self.nlu_manager, - self.dialogue_state_tracker) + features_params = MemorizingGoBotParams.from_configured(self.nlg_manager, self.nlu_manager, + self.dialogue_state_tracker) policy_save_path = Path(save_path, self.POLICY_DIR_NAME) policy_load_path = Path(load_path, self.POLICY_DIR_NAME) - self.policy = PolicyNetwork(policy_network_params, tokens_dims, features_params, - policy_load_path, policy_save_path, **kwargs) + self.policy = MemorizingPolicy(policy_network_params, tokens_dims, features_params, + policy_load_path, policy_save_path, **kwargs) self.dialogues_cached_features = dict() diff --git a/deeppavlov/models/go_bot/policy/policy_network.py b/deeppavlov/models/go_bot/policy/policy_network.py index 1e3483203d..9f6dc86d6a 100644 --- a/deeppavlov/models/go_bot/policy/policy_network.py +++ b/deeppavlov/models/go_bot/policy/policy_network.py @@ -18,7 +18,7 @@ from deeppavlov.models.go_bot.dto.dataset_features import BatchDialoguesFeatures, BatchDialoguesTargets # todo -from deeppavlov.models.go_bot.dto.shared_gobot_params import SharedGoBotParams +from deeppavlov.models.go_bot.dto.shared_gobot_params import SharedGoBotParams, MemorizingGoBotParams from deeppavlov.models.go_bot.policy.dto.attn_params import GobotAttnParams from deeppavlov.models.go_bot.policy.dto.digitized_policy_features import DigitizedPolicyFeatures from deeppavlov.models.go_bot.policy.dto.policy_network_params import PolicyNetworkParams @@ -453,3 +453,41 @@ def _save_nn_params(self) -> None: if self.debug: log.debug(f"AFTER {self.__class__.__name__} _save_nn_params()") + +class MemorizingPolicy(PolicyNetwork): + def __init__(self, network_params_passed: PolicyNetworkParams, + tokens_dims: TokensVectorRepresentationParams, + features_params: MemorizingGoBotParams, + load_path, + save_path, + debug=False, + **kwargs): + super().__init__(network_params_passed, tokens_dims, features_params, load_path, save_path, debug, **kwargs) + self.intent_ids2intents = features_params.intent_ids2intents + self.intents2intent_ids = features_params.intents2intent_ids + + def digitize_features(self, + nlu_response: NLUResponse, + tracker_knowledge: DSTKnowledge) -> DigitizedPolicyFeatures: + intent_name = self.intent_ids2intents[nlu_response.intents] + # compute the actuap prediction + concat_feats = intent_name # todo warning!!! do not merge until rewritten !!! + + return DigitizedPolicyFeatures(None, concat_feats, None) + + def __call__(self, batch_dialogues_features: BatchDialoguesFeatures, + states_c: np.ndarray, states_h: np.ndarray, prob: bool = False, + *args, **kwargs) -> PolicyPrediction: + + states_c = [[states_c]] # list of list aka batch of dialogues + states_h = [[states_h]] # list of list aka batch of dialogues + + probs = [np.zeros_like(self.action_size)] * len(batch_dialogues_features) + prediction = [] + for feature in batch_dialogues_features.b_featuress.todo: + # take intent_name + # given the tracker knowledge + + policy_prediction = PolicyPrediction(probs, prediction, states_c, states_h) + + return policy_prediction diff --git a/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py b/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py index 9a0cb32c49..26bf2fb124 100644 --- a/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py +++ b/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py @@ -221,6 +221,85 @@ def fill_current_state_with_db_results(self) -> dict: return slots +class MemorizingDialogueStateTracker(DialogueStateTracker): + def get_current_knowledge(self) -> DSTKnowledge: + res = super().get_current_knowledge() + res.stories = self.stories + res.stories_ptrs = self.stories_ptrs + return res + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.act2act_id: dict = {} + self.act_id2act: dict = {} + self.stories = self._load_stories(self.stories_path) + self.stories_ptrs = [-1]*len(self.stories) + + + @staticmethod + def from_gobot_params(parent_tracker: FeaturizedTracker, + nlg_manager: NLGManagerInterface, + policy_network_params: PolicyNetworkParams, + database: Component): + slot_names = parent_tracker.slot_names + + # region set formfilling info + act2act_id = {a_text: nlg_manager.get_action_id(a_text) for a_text in nlg_manager.known_actions()} + action_id2aqd_slots_ids, action_id2req_slots_ids = DialogueStateTracker.extract_reqiured_acquired_slots_ids_mapping( + act2act_id, slot_names, nlg_manager, parent_tracker) + + # todo why so ugly and duplicated in multiple users tracker + dialogue_state_tracker = MemorizingDialogueStateTracker(slot_names, nlg_manager.num_of_known_actions(), + nlg_manager.get_api_call_action_id(), + policy_network_params.hidden_size, + database, + parent_tracker.domain_yml_path, + parent_tracker.stories_path) + + dialogue_state_tracker.ffill_act_ids2req_slots_ids = action_id2req_slots_ids + dialogue_state_tracker.ffill_act_ids2aqd_slots_ids = action_id2aqd_slots_ids + dialogue_state_tracker.act2act_id = act2act_id + dialogue_state_tracker.act_id2act = {v:k for k, v in act2act_id.items()} + + # endregion set formfilling info + return dialogue_state_tracker + + def _load_stories(self, stories_path: Union[Path, str]): + story_lines = [] + with open(stories_path) as stories_f: + + for line in stories_f: + line = line.strip() + if not line: + continue + if line.startswith("##"): + story_lines.append([]) + else: + story_lines[-1].append(line) + stories = [] + for story in story_lines: + story_adj = [] + for turn_ix, turn in enumerate(story): + if turn_ix % 2 == 0: + continue # we iterate over system turns + else: + story_adj.append({ + "utter_needed": story[turn_ix-1].strip(" *"), # todo smwhr exists a special method for this + "action_name": story[turn_ix].strip(" -") # todo smwhr exists a special method for this + }) + stories.append(story_adj) + return stories + + def update_previous_action(self, prev_act_id: int) -> None: + super().update_previous_action(prev_act_id) + act_name = self.act_id2act[prev_act_id] + for ix, (story_ptr, story) in enumerate(zip(self.stories_ptrs, self.stories)): + if story[story_ptr+1]["action_name"] == act_name: + self.stories_ptrs[ix] += 1 + + + + class MultipleUserStateTrackersPool(object): def __init__(self, base_tracker: DialogueStateTracker): self._ids_to_trackers = {} From f9aa2b056fc8f0371e5abdd1c570c43edf79b90a Mon Sep 17 00:00:00 2001 From: oserikov Date: Fri, 18 Dec 2020 18:56:54 -0800 Subject: [PATCH 047/151] wip rulebased gobot --- .../dataset_readers/md_yaml_dialogs_reader.py | 1 + deeppavlov/models/go_bot/go_bot.py | 6 ++---- deeppavlov/models/go_bot/nlu/nlu_manager.py | 13 ++++++++++-- .../models/go_bot/policy/policy_network.py | 20 ++++++++++++++++--- .../go_bot/tracker/dialogue_state_tracker.py | 13 +++++++----- 5 files changed, 39 insertions(+), 14 deletions(-) diff --git a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py index 29a1b3f699..75d44489e6 100644 --- a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py +++ b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py @@ -262,6 +262,7 @@ def process_user_utter(line: str) -> List[List[Dict[str, Any]]]: nonlocal intent2slots2text, slot_name2text2value, curr_story_utters_batch, nonlocal_curr_story_bad try: possible_user_utters = cls.augment_user_turn(intent2slots2text, line, slot_name2text2value) + possible_user_utters = [possible_user_utters[0]] # dialogs MUST start with system replics for curr_story_utters in curr_story_utters_batch: if not curr_story_utters: diff --git a/deeppavlov/models/go_bot/go_bot.py b/deeppavlov/models/go_bot/go_bot.py index 097737734c..924cca7224 100644 --- a/deeppavlov/models/go_bot/go_bot.py +++ b/deeppavlov/models/go_bot/go_bot.py @@ -28,7 +28,7 @@ from deeppavlov.models.go_bot.dto.shared_gobot_params import SharedGoBotParams, MemorizingGoBotParams from deeppavlov.models.go_bot.nlg.nlg_manager import NLGManagerInterface from deeppavlov.models.go_bot.nlu.nlu_manager import NLUManager -from deeppavlov.models.go_bot.policy.policy_network import PolicyNetwork, PolicyNetworkParams +from deeppavlov.models.go_bot.policy.policy_network import PolicyNetwork, PolicyNetworkParams, MemorizingPolicy from deeppavlov.models.go_bot.policy.dto.policy_prediction import PolicyPrediction from deeppavlov.models.go_bot.tracker.featurized_tracker import FeaturizedTracker from deeppavlov.models.go_bot.tracker.dialogue_state_tracker import DialogueStateTracker, MultipleUserStateTrackersPool, \ @@ -140,7 +140,6 @@ def __init__(self, # todo make mor abstract self.dialogue_state_tracker = MemorizingDialogueStateTracker.from_gobot_params(tracker, - self.nlu_manager, self.nlg_manager, policy_network_params, database) # todo make mor abstract @@ -278,8 +277,7 @@ def extract_features_from_utterance_text(self, text, tracker, keep_tracker_state tracker: the tracker that tracks the dialogue from which the text is taken keep_tracker_state: if True, the tracker state will not be updated during the prediction. Used to keep tracker's state intact when predicting the action - to perform right after the api call action is predicted and performed. - + to perform right after the api call action is predicted and performed.[jx Returns: the utterance features object containing the numpy-vectorized features extracted from the utterance """ diff --git a/deeppavlov/models/go_bot/nlu/nlu_manager.py b/deeppavlov/models/go_bot/nlu/nlu_manager.py index e18d74b48f..9062ecc4e8 100644 --- a/deeppavlov/models/go_bot/nlu/nlu_manager.py +++ b/deeppavlov/models/go_bot/nlu/nlu_manager.py @@ -2,6 +2,8 @@ from typing import List from deeppavlov import Chainer +from deeppavlov.core.data.simple_vocab import SimpleVocabulary +from deeppavlov.models.bert.bert_classifier import BertClassifierModel from deeppavlov.models.go_bot.nlu.dto.nlu_response import NLUResponse from deeppavlov.models.go_bot.nlu.nlu_manager_interface import NLUManagerInterface @@ -31,7 +33,13 @@ def __init__(self, tokenizer, slot_filler, intent_classifier, debug=False): self.intent_classifier = intent_classifier self.intents = [] if isinstance(self.intent_classifier, Chainer): - self.intents = self.intent_classifier.get_main_component().classes + component = self.intent_classifier.get_main_component() + if isinstance(component, BertClassifierModel): + intent2labeltools = [el[-1] for el in self.intent_classifier.pipe if isinstance(el[-1], SimpleVocabulary)] + if intent2labeltools: + self.intents = intent2labeltools[-1]._i2t + else: + self.intents = component.classes if self.debug: log.debug(f"AFTER {self.__class__.__name__} init(): " @@ -63,7 +71,8 @@ def nlu(self, text: str) -> NLUResponse: def _extract_intents_from_tokenized_text_entry(self, tokens: List[str]): # todo meaningful type hints, relies on unannotated intent classifier - intent_features = self.intent_classifier([' '.join(tokens)])[1][0] + classifier_output = self.intent_classifier([' '.join(tokens)]) + intent_features = classifier_output[1][0] return intent_features def _extract_slots_from_tokenized_text_entry(self, tokens: List[str]): diff --git a/deeppavlov/models/go_bot/policy/policy_network.py b/deeppavlov/models/go_bot/policy/policy_network.py index 9f6dc86d6a..4276e179db 100644 --- a/deeppavlov/models/go_bot/policy/policy_network.py +++ b/deeppavlov/models/go_bot/policy/policy_network.py @@ -469,10 +469,21 @@ def __init__(self, network_params_passed: PolicyNetworkParams, def digitize_features(self, nlu_response: NLUResponse, tracker_knowledge: DSTKnowledge) -> DigitizedPolicyFeatures: - intent_name = self.intent_ids2intents[nlu_response.intents] + intent_name = self.intent_ids2intents.get(np.argmax(nlu_response.intents)) # compute the actuap prediction concat_feats = intent_name # todo warning!!! do not merge until rewritten !!! - + possible_actions = [] + for story_ix, (story_ptr, story) in enumerate(zip(tracker_knowledge.stories_ptrs, tracker_knowledge.stories)): + next_action_ptr = story_ptr + 1 + if next_action_ptr < len(story) and story[next_action_ptr]["utter_needed"] == intent_name: + possible_actions.append((story[next_action_ptr]["action_name"], story[next_action_ptr]["action_ix"])) + elif any(ptr > -1 for ptr in tracker_knowledge.stories_ptrs): + tracker_knowledge.stories_ptrs[story_ix] = len(story) # mark this story as no longer accessible + if len(possible_actions) > 2: + log.debug("STORIES: multiple proceedings available, picked the first one") + (action_name, action_ix) = possible_actions[0] if possible_actions else (None, None) + + concat_feats = action_ix return DigitizedPolicyFeatures(None, concat_feats, None) def __call__(self, batch_dialogues_features: BatchDialoguesFeatures, @@ -484,9 +495,12 @@ def __call__(self, batch_dialogues_features: BatchDialoguesFeatures, probs = [np.zeros_like(self.action_size)] * len(batch_dialogues_features) prediction = [] - for feature in batch_dialogues_features.b_featuress.todo: + for feature_ix, feature in enumerate(batch_dialogues_features.b_featuress): # take intent_name # given the tracker knowledge + prediction.extend(feature) + if feature is not None: + probs[feature_ix][feature] = 1. policy_prediction = PolicyPrediction(probs, prediction, states_c, states_h) diff --git a/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py b/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py index 26bf2fb124..ac3c41c582 100644 --- a/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py +++ b/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py @@ -260,7 +260,9 @@ def from_gobot_params(parent_tracker: FeaturizedTracker, dialogue_state_tracker.ffill_act_ids2aqd_slots_ids = action_id2aqd_slots_ids dialogue_state_tracker.act2act_id = act2act_id dialogue_state_tracker.act_id2act = {v:k for k, v in act2act_id.items()} - + for story in dialogue_state_tracker.stories: + for el in story: + el["action_ix"] = dialogue_state_tracker.act2act_id[(el["action_name"],)] # endregion set formfilling info return dialogue_state_tracker @@ -285,7 +287,7 @@ def _load_stories(self, stories_path: Union[Path, str]): else: story_adj.append({ "utter_needed": story[turn_ix-1].strip(" *"), # todo smwhr exists a special method for this - "action_name": story[turn_ix].strip(" -") # todo smwhr exists a special method for this + "action_name": story[turn_ix].strip(" -"), # todo smwhr exists a special method for this }) stories.append(story_adj) return stories @@ -320,7 +322,7 @@ def get_user_tracker(self, user_id: int) -> DialogueStateTracker: def new_tracker(self): # todo deprecated and never used? - tracker = DialogueStateTracker(self.base_tracker.slot_names, self.base_tracker.n_actions, + tracker = MemorizingDialogueStateTracker(self.base_tracker.slot_names, self.base_tracker.n_actions, self.base_tracker.api_call_id, self.base_tracker.hidden_size, self.base_tracker.database) return tracker @@ -334,7 +336,7 @@ def get_or_init_tracker(self, user_id: int): def init_new_tracker(self, user_id: int, tracker_entity: DialogueStateTracker) -> None: # TODO: implement a better way to init a tracker # todo deprecated. The whole class should follow AbstractFactory or Pool pattern? - tracker = DialogueStateTracker( + tracker = tracker_entity.__class__( tracker_entity.slot_names, tracker_entity.n_actions, tracker_entity.api_call_id, @@ -345,7 +347,8 @@ def init_new_tracker(self, user_id: int, tracker_entity: DialogueStateTracker) - ) tracker.ffill_act_ids2req_slots_ids = tracker_entity.ffill_act_ids2req_slots_ids tracker.ffill_act_ids2aqd_slots_ids = tracker_entity.ffill_act_ids2aqd_slots_ids - + tracker.act2act_id = tracker_entity.act2act_id + tracker.act_id2act = tracker_entity.act_id2act self._ids_to_trackers[user_id] = tracker def reset(self, user_id: int = None) -> None: From a9ea7042cf80a7c631328c82e13c5330316c407b Mon Sep 17 00:00:00 2001 From: oserikov Date: Sun, 20 Dec 2020 07:07:17 -0800 Subject: [PATCH 048/151] wip rulebased gobot --- .../dataset_readers/md_yaml_dialogs_reader.py | 15 ++++++++++++--- .../models/go_bot/nlg/mock_json_nlg_manager.py | 2 +- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py index 75d44489e6..84f9dfff5b 100644 --- a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py +++ b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py @@ -19,6 +19,8 @@ import tempfile from collections import defaultdict from logging import getLogger +import random + from overrides import overrides from pathlib import Path from typing import Dict, List, Tuple, Union, Any, Optional @@ -90,7 +92,7 @@ def _data_fname(cls, datatype: str) -> str: @classmethod @overrides - def read(cls, data_path: str, dialogs: bool = False, ignore_slots: bool = False) -> Dict[str, List]: + def read(cls, data_path: str, dialogs: bool = False, ignore_slots: bool = False, augment_strategy: str = None) -> Dict[str, List]: """ Parameters: data_path: path to read dataset from @@ -105,6 +107,11 @@ def read(cls, data_path: str, dialogs: bool = False, ignore_slots: bool = False) ``'test'`` field with dialogs from ``'stories-tst.md'``. Each field is a list of tuples ``(x_i, y_i)``. """ + if augment_strategy is None: + augment_strategy = "max" + + assert augment_strategy in {"min", "max"} + domain_fname = cls.DOMAIN_FNAME nlu_fname = cls.NLU_FNAME stories_fnames = tuple(cls._data_fname(dt) for dt in cls.VALID_DATATYPES) @@ -127,7 +134,7 @@ def read(cls, data_path: str, dialogs: bool = False, ignore_slots: bool = False) data = {short2long_subsample_name[subsample_name_short]: cls._read_story(Path(data_path, cls._data_fname(subsample_name_short)), dialogs, domain_knowledge, intent2slots2text, slot_name2text2value, - ignore_slots=ignore_slots) + ignore_slots=ignore_slots, augment_strategy=augment_strategy) for subsample_name_short in cls.VALID_DATATYPES} return data @@ -212,6 +219,7 @@ def _read_story(cls, domain_knowledge: DomainKnowledge, intent2slots2text: Dict[str, Dict[SLOT2VALUE_PAIRS_TUPLE, List]], slot_name2text2value: Dict[str, Dict[str, str]], + augment_strategy: str, ignore_slots: bool = False) \ -> Union[List[List[Tuple[Dict[str, bool], Dict[str, Any]]]], List[Tuple[Dict[str, bool], Dict[str, Any]]]]: """ @@ -262,7 +270,8 @@ def process_user_utter(line: str) -> List[List[Dict[str, Any]]]: nonlocal intent2slots2text, slot_name2text2value, curr_story_utters_batch, nonlocal_curr_story_bad try: possible_user_utters = cls.augment_user_turn(intent2slots2text, line, slot_name2text2value) - possible_user_utters = [possible_user_utters[0]] + if augment_strategy == "min": + possible_user_utters = random.choices(possible_user_utters) # dialogs MUST start with system replics for curr_story_utters in curr_story_utters_batch: if not curr_story_utters: diff --git a/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py b/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py index 655712ab21..0c5a7ca99c 100644 --- a/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py +++ b/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py @@ -74,7 +74,7 @@ def get_api_call_action_id(self) -> int: def _extract_actions_combinations(self, dataset_path: Union[str, Path]): dataset_path = expand_path(dataset_path) - dataset = self._dataset_reader.read(data_path=dataset_path, dialogs=True, ignore_slots=True) + dataset = self._dataset_reader.read(data_path=dataset_path, dialogs=True, ignore_slots=True, augment_strategy="min") actions_combinations = set() for dataset_split in dataset.values(): for dialogue in dataset_split: From b3bf2fa20199ea891d53e7113698a1614e888f44 Mon Sep 17 00:00:00 2001 From: oserikov Date: Sun, 20 Dec 2020 11:34:13 -0800 Subject: [PATCH 049/151] wip rulebased gobot --- .../models/go_bot/policy/policy_network.py | 17 +++++++++++++---- .../go_bot/tracker/dialogue_state_tracker.py | 14 +++++++++++--- 2 files changed, 24 insertions(+), 7 deletions(-) diff --git a/deeppavlov/models/go_bot/policy/policy_network.py b/deeppavlov/models/go_bot/policy/policy_network.py index 4276e179db..b24995badb 100644 --- a/deeppavlov/models/go_bot/policy/policy_network.py +++ b/deeppavlov/models/go_bot/policy/policy_network.py @@ -469,7 +469,7 @@ def __init__(self, network_params_passed: PolicyNetworkParams, def digitize_features(self, nlu_response: NLUResponse, tracker_knowledge: DSTKnowledge) -> DigitizedPolicyFeatures: - intent_name = self.intent_ids2intents.get(np.argmax(nlu_response.intents)) + intent_name = "start"# self.intent_ids2intents.get(np.argmax(nlu_response.intents)) # compute the actuap prediction concat_feats = intent_name # todo warning!!! do not merge until rewritten !!! possible_actions = [] @@ -493,15 +493,24 @@ def __call__(self, batch_dialogues_features: BatchDialoguesFeatures, states_c = [[states_c]] # list of list aka batch of dialogues states_h = [[states_h]] # list of list aka batch of dialogues - probs = [np.zeros_like(self.action_size)] * len(batch_dialogues_features) + probs = [np.zeros((self.action_size, 1))] * len(batch_dialogues_features) prediction = [] for feature_ix, feature in enumerate(batch_dialogues_features.b_featuress): # take intent_name # given the tracker knowledge prediction.extend(feature) - if feature is not None: - probs[feature_ix][feature] = 1. + if feature is not None and feature: + feature_ = feature[0] + probs[feature_ix][feature_] = 1. policy_prediction = PolicyPrediction(probs, prediction, states_c, states_h) return policy_prediction + + def train_on_batch(self, + batch_dialogues_features: BatchDialoguesFeatures, + batch_dialogues_targets: BatchDialoguesTargets) -> dict: + log.debug("not trainable policy chosen") + return {'loss': 0., + 'learning_rate': self.get_learning_rate(), + 'momentum': self.get_momentum()} \ No newline at end of file diff --git a/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py b/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py index ac3c41c582..66092d0c21 100644 --- a/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py +++ b/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py @@ -233,6 +233,7 @@ def __init__(self, *args, **kwargs): self.act2act_id: dict = {} self.act_id2act: dict = {} self.stories = self._load_stories(self.stories_path) + self._setup_action_ixes(self.stories) self.stories_ptrs = [-1]*len(self.stories) @@ -260,12 +261,17 @@ def from_gobot_params(parent_tracker: FeaturizedTracker, dialogue_state_tracker.ffill_act_ids2aqd_slots_ids = action_id2aqd_slots_ids dialogue_state_tracker.act2act_id = act2act_id dialogue_state_tracker.act_id2act = {v:k for k, v in act2act_id.items()} - for story in dialogue_state_tracker.stories: - for el in story: - el["action_ix"] = dialogue_state_tracker.act2act_id[(el["action_name"],)] + dialogue_state_tracker._setup_action_ixes(dialogue_state_tracker.stories) # endregion set formfilling info return dialogue_state_tracker + def _setup_action_ixes(self, stories_lidi): + for story in stories_lidi: + for el in story: + act_name_k = (el["action_name"],) + if act_name_k in self.act2act_id: + el["action_ix"] = self.act2act_id[act_name_k] + def _load_stories(self, stories_path: Union[Path, str]): story_lines = [] with open(stories_path) as stories_f: @@ -349,6 +355,8 @@ def init_new_tracker(self, user_id: int, tracker_entity: DialogueStateTracker) - tracker.ffill_act_ids2aqd_slots_ids = tracker_entity.ffill_act_ids2aqd_slots_ids tracker.act2act_id = tracker_entity.act2act_id tracker.act_id2act = tracker_entity.act_id2act + if isinstance(tracker, MemorizingDialogueStateTracker): + tracker._setup_action_ixes(tracker.stories) self._ids_to_trackers[user_id] = tracker def reset(self, user_id: int = None) -> None: From 467ed7f1e72818da1e60721da756d1c087c800a5 Mon Sep 17 00:00:00 2001 From: oserikov Date: Sun, 20 Dec 2020 12:47:37 -0800 Subject: [PATCH 050/151] wip rulebased gobot --- deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py b/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py index 66092d0c21..90ef87596c 100644 --- a/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py +++ b/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py @@ -300,7 +300,7 @@ def _load_stories(self, stories_path: Union[Path, str]): def update_previous_action(self, prev_act_id: int) -> None: super().update_previous_action(prev_act_id) - act_name = self.act_id2act[prev_act_id] + act_name = self.act_id2act[prev_act_id][0] for ix, (story_ptr, story) in enumerate(zip(self.stories_ptrs, self.stories)): if story[story_ptr+1]["action_name"] == act_name: self.stories_ptrs[ix] += 1 From cc3d14773ea61cc0fde47404ee19201cc434a140 Mon Sep 17 00:00:00 2001 From: oserikov Date: Sun, 20 Dec 2020 13:07:45 -0800 Subject: [PATCH 051/151] wip rulebased gobot --- deeppavlov/models/go_bot/policy/policy_network.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deeppavlov/models/go_bot/policy/policy_network.py b/deeppavlov/models/go_bot/policy/policy_network.py index b24995badb..749ba57153 100644 --- a/deeppavlov/models/go_bot/policy/policy_network.py +++ b/deeppavlov/models/go_bot/policy/policy_network.py @@ -469,7 +469,7 @@ def __init__(self, network_params_passed: PolicyNetworkParams, def digitize_features(self, nlu_response: NLUResponse, tracker_knowledge: DSTKnowledge) -> DigitizedPolicyFeatures: - intent_name = "start"# self.intent_ids2intents.get(np.argmax(nlu_response.intents)) + intent_name = self.intent_ids2intents.get(np.argmax(nlu_response.intents)) # compute the actuap prediction concat_feats = intent_name # todo warning!!! do not merge until rewritten !!! possible_actions = [] From 719b6d3f8b2eb5dc6b98c8a7aa392ced054e1f3f Mon Sep 17 00:00:00 2001 From: oserikov Date: Sun, 20 Dec 2020 13:23:11 -0800 Subject: [PATCH 052/151] wip rulebased gobot --- deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py b/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py index 90ef87596c..44f1988a5a 100644 --- a/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py +++ b/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py @@ -302,7 +302,8 @@ def update_previous_action(self, prev_act_id: int) -> None: super().update_previous_action(prev_act_id) act_name = self.act_id2act[prev_act_id][0] for ix, (story_ptr, story) in enumerate(zip(self.stories_ptrs, self.stories)): - if story[story_ptr+1]["action_name"] == act_name: + next_action_ix = story_ptr + 1 + if next_action_ix Date: Thu, 24 Dec 2020 12:39:26 +0300 Subject: [PATCH 053/151] revert: rulebased gobot system --- deeppavlov/models/go_bot/go_bot.py | 16 ++++++++-------- .../go_bot/tracker/dialogue_state_tracker.py | 2 +- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/deeppavlov/models/go_bot/go_bot.py b/deeppavlov/models/go_bot/go_bot.py index 924cca7224..cb97523c0b 100644 --- a/deeppavlov/models/go_bot/go_bot.py +++ b/deeppavlov/models/go_bot/go_bot.py @@ -139,20 +139,19 @@ def __init__(self, self.data_handler = TokensVectorizer(debug, word_vocab, bow_embedder, embedder) # todo make mor abstract - self.dialogue_state_tracker = MemorizingDialogueStateTracker.from_gobot_params(tracker, - self.nlg_manager, - policy_network_params, database) + self.dialogue_state_tracker = DialogueStateTracker.from_gobot_params(tracker, self.nlg_manager, + policy_network_params, database) # todo make mor abstract self.multiple_user_state_tracker = MultipleUserStateTrackersPool(base_tracker=self.dialogue_state_tracker) tokens_dims = self.data_handler.get_dims() - features_params = MemorizingGoBotParams.from_configured(self.nlg_manager, self.nlu_manager, - self.dialogue_state_tracker) + features_params = SharedGoBotParams.from_configured(self.nlg_manager, self.nlu_manager, + self.dialogue_state_tracker) policy_save_path = Path(save_path, self.POLICY_DIR_NAME) policy_load_path = Path(load_path, self.POLICY_DIR_NAME) - self.policy = MemorizingPolicy(policy_network_params, tokens_dims, features_params, - policy_load_path, policy_save_path, **kwargs) + self.policy = PolicyNetwork(policy_network_params, tokens_dims, features_params, + policy_load_path, policy_save_path, **kwargs) self.dialogues_cached_features = dict() @@ -277,7 +276,8 @@ def extract_features_from_utterance_text(self, text, tracker, keep_tracker_state tracker: the tracker that tracks the dialogue from which the text is taken keep_tracker_state: if True, the tracker state will not be updated during the prediction. Used to keep tracker's state intact when predicting the action - to perform right after the api call action is predicted and performed.[jx + to perform right after the api call action is predicted and performed. + Returns: the utterance features object containing the numpy-vectorized features extracted from the utterance """ diff --git a/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py b/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py index 44f1988a5a..8bf19ade40 100644 --- a/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py +++ b/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py @@ -329,7 +329,7 @@ def get_user_tracker(self, user_id: int) -> DialogueStateTracker: def new_tracker(self): # todo deprecated and never used? - tracker = MemorizingDialogueStateTracker(self.base_tracker.slot_names, self.base_tracker.n_actions, + tracker = DialogueStateTracker(self.base_tracker.slot_names, self.base_tracker.n_actions, self.base_tracker.api_call_id, self.base_tracker.hidden_size, self.base_tracker.database) return tracker From e2d2a597d8f65fd4eb342a4f5194799ff4517527 Mon Sep 17 00:00:00 2001 From: Oleg Serikov Date: Thu, 24 Dec 2020 23:57:32 +0300 Subject: [PATCH 054/151] fix dropout err --- deeppavlov/models/go_bot/policy/policy_network.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deeppavlov/models/go_bot/policy/policy_network.py b/deeppavlov/models/go_bot/policy/policy_network.py index 749ba57153..c92d1bb7fa 100644 --- a/deeppavlov/models/go_bot/policy/policy_network.py +++ b/deeppavlov/models/go_bot/policy/policy_network.py @@ -384,7 +384,7 @@ def train_on_batch(self, batch_dialogues_targets: BatchDialoguesTargets) -> dict: feed_dict = { - self._dropout_keep_prob: 1., + self._dropout_keep_prob: 1. - self.dropout_rate, self._utterance_mask: batch_dialogues_features.b_padded_dialogue_length_mask, self._features: batch_dialogues_features.b_featuress, self._action: batch_dialogues_targets.b_action_ids, @@ -513,4 +513,4 @@ def train_on_batch(self, log.debug("not trainable policy chosen") return {'loss': 0., 'learning_rate': self.get_learning_rate(), - 'momentum': self.get_momentum()} \ No newline at end of file + 'momentum': self.get_momentum()} From 740a89784f1c6c878de397f01574b98e1bc8c93f Mon Sep 17 00:00:00 2001 From: Oleg Serikov Date: Sat, 26 Dec 2020 05:31:06 +0300 Subject: [PATCH 055/151] Update dialogue_state_tracker.py --- deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py b/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py index 8bf19ade40..6631f22bf7 100644 --- a/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py +++ b/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py @@ -354,8 +354,9 @@ def init_new_tracker(self, user_id: int, tracker_entity: DialogueStateTracker) - ) tracker.ffill_act_ids2req_slots_ids = tracker_entity.ffill_act_ids2req_slots_ids tracker.ffill_act_ids2aqd_slots_ids = tracker_entity.ffill_act_ids2aqd_slots_ids - tracker.act2act_id = tracker_entity.act2act_id - tracker.act_id2act = tracker_entity.act_id2act + # wip: memorizing tracker and policy + # tracker.act2act_id = tracker_entity.act2act_id + # tracker.act_id2act = tracker_entity.act_id2act if isinstance(tracker, MemorizingDialogueStateTracker): tracker._setup_action_ixes(tracker.stories) self._ids_to_trackers[user_id] = tracker From 93ec879c4028976d9bb1c9ae68dfe4c1fa6c86c3 Mon Sep 17 00:00:00 2001 From: Oleg Serikov Date: Sun, 27 Dec 2020 12:57:29 +0300 Subject: [PATCH 056/151] Update md_yaml_dialogs_reader.py --- deeppavlov/dataset_readers/md_yaml_dialogs_reader.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py index 84f9dfff5b..6e3b95f9ea 100644 --- a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py +++ b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py @@ -400,7 +400,17 @@ def process_story_line(line: str) -> List[List[Dict[str, Any]]]: # noinspection PyProtectedMember gobot_formatted_stories = DSTC2DatasetReader._read_from_file(tmp_f.name, dialogs=dialogs) os.remove(tmp_f.name) - + if dialogs: + for story in gobot_formatted_stories: + for turn_ix, turn in enumerate(story): + if turn[0] == {'text': '', 'intents': [], 'episode_done': True}: + turn = ({'text': 'start', 'intents': ["start"], 'episode_done': True}, turn[1]) + story[turn_ix] = turn + else: + for turn_ix, turn in enumerate(gobot_formatted_stories): + if turn[0] == {'text': '', 'intents': [], 'episode_done': True}: + turn = ({'text': 'start', 'intents': ["start"], 'episode_done': True}, turn[1]) + gobot_formatted_stories[turn_ix] = turn log.debug(f"AFTER MLU_MD_DialogsDatasetReader._read_story(): " f"story_fpath={story_fpath}, " f"dialogs={dialogs}, " From 8d6ad7f74d23ff2b40a02ba6068b454be8603116 Mon Sep 17 00:00:00 2001 From: Oleg Serikov Date: Sun, 27 Dec 2020 13:06:23 +0300 Subject: [PATCH 057/151] Update md_yaml_dialogs_reader.py --- deeppavlov/dataset_readers/md_yaml_dialogs_reader.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py index 6e3b95f9ea..e08fd49e6e 100644 --- a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py +++ b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py @@ -404,12 +404,12 @@ def process_story_line(line: str) -> List[List[Dict[str, Any]]]: for story in gobot_formatted_stories: for turn_ix, turn in enumerate(story): if turn[0] == {'text': '', 'intents': [], 'episode_done': True}: - turn = ({'text': 'start', 'intents': ["start"], 'episode_done': True}, turn[1]) + turn = ({'text': 'start', 'intents': [{'act': 'start', 'slots': []}], 'episode_done': True}, turn[1]) story[turn_ix] = turn else: for turn_ix, turn in enumerate(gobot_formatted_stories): if turn[0] == {'text': '', 'intents': [], 'episode_done': True}: - turn = ({'text': 'start', 'intents': ["start"], 'episode_done': True}, turn[1]) + turn = ({'text': 'start', 'intents': [{'act': 'start', 'slots': []}], 'episode_done': True}, turn[1]) gobot_formatted_stories[turn_ix] = turn log.debug(f"AFTER MLU_MD_DialogsDatasetReader._read_story(): " f"story_fpath={story_fpath}, " From cfcd55cad704ef734d0a1abf0518dcd2a69d9508 Mon Sep 17 00:00:00 2001 From: oserikov Date: Sun, 27 Dec 2020 15:30:32 +0300 Subject: [PATCH 058/151] reintroduced rulebased gobot system --- deeppavlov/models/go_bot/go_bot.py | 15 ++++++++++----- .../go_bot/tracker/dialogue_state_tracker.py | 16 +++++++++------- .../models/go_bot/tracker/featurized_tracker.py | 3 +++ 3 files changed, 22 insertions(+), 12 deletions(-) diff --git a/deeppavlov/models/go_bot/go_bot.py b/deeppavlov/models/go_bot/go_bot.py index cb97523c0b..3c07fd6139 100644 --- a/deeppavlov/models/go_bot/go_bot.py +++ b/deeppavlov/models/go_bot/go_bot.py @@ -13,7 +13,7 @@ # limitations under the License. from logging import getLogger -from typing import Dict, Any, List, Optional, Union, Tuple +from typing import Dict, Any, List, Optional, Union, Tuple, Type import numpy as np @@ -139,19 +139,24 @@ def __init__(self, self.data_handler = TokensVectorizer(debug, word_vocab, bow_embedder, embedder) # todo make mor abstract - self.dialogue_state_tracker = DialogueStateTracker.from_gobot_params(tracker, self.nlg_manager, + tracker_class: Type = type(tracker) + if tracker_class == MemorizingDialogueStateTracker: + features_params_class: Type = MemorizingGoBotParams + policy_class: Type = MemorizingPolicy + + self.dialogue_state_tracker = tracker_class.from_gobot_params(tracker, self.nlg_manager, policy_network_params, database) # todo make mor abstract self.multiple_user_state_tracker = MultipleUserStateTrackersPool(base_tracker=self.dialogue_state_tracker) tokens_dims = self.data_handler.get_dims() - features_params = SharedGoBotParams.from_configured(self.nlg_manager, self.nlu_manager, + features_params = features_params_class.from_configured(self.nlg_manager, self.nlu_manager, self.dialogue_state_tracker) policy_save_path = Path(save_path, self.POLICY_DIR_NAME) policy_load_path = Path(load_path, self.POLICY_DIR_NAME) - self.policy = PolicyNetwork(policy_network_params, tokens_dims, features_params, - policy_load_path, policy_save_path, **kwargs) + self.policy = policy_class(policy_network_params, tokens_dims, features_params, + policy_load_path, policy_save_path, **kwargs) self.dialogues_cached_features = dict() diff --git a/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py b/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py index 6631f22bf7..0243e3d4eb 100644 --- a/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py +++ b/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py @@ -328,10 +328,11 @@ def get_user_tracker(self, user_id: int) -> DialogueStateTracker: return tracker def new_tracker(self): - # todo deprecated and never used? - tracker = DialogueStateTracker(self.base_tracker.slot_names, self.base_tracker.n_actions, - self.base_tracker.api_call_id, self.base_tracker.hidden_size, - self.base_tracker.database) + # todo deprecated and never used? (response: nope, but should be removed in favor of init_new_tracker) + tracker = self.base_tracker.__class__( + self.base_tracker.slot_names, self.base_tracker.n_actions, + self.base_tracker.api_call_id, self.base_tracker.hidden_size, + self.base_tracker.database) return tracker def get_or_init_tracker(self, user_id: int): @@ -354,9 +355,10 @@ def init_new_tracker(self, user_id: int, tracker_entity: DialogueStateTracker) - ) tracker.ffill_act_ids2req_slots_ids = tracker_entity.ffill_act_ids2req_slots_ids tracker.ffill_act_ids2aqd_slots_ids = tracker_entity.ffill_act_ids2aqd_slots_ids - # wip: memorizing tracker and policy - # tracker.act2act_id = tracker_entity.act2act_id - # tracker.act_id2act = tracker_entity.act_id2act + if type(tracker_entity) == MemorizingDialogueStateTracker: + # wip: memorizing tracker and policy + tracker.act2act_id = tracker_entity.act2act_id + tracker.act_id2act = tracker_entity.act_id2act if isinstance(tracker, MemorizingDialogueStateTracker): tracker._setup_action_ixes(tracker.stories) self._ids_to_trackers[user_id] = tracker diff --git a/deeppavlov/models/go_bot/tracker/featurized_tracker.py b/deeppavlov/models/go_bot/tracker/featurized_tracker.py index ec1314036b..efd219b78f 100644 --- a/deeppavlov/models/go_bot/tracker/featurized_tracker.py +++ b/deeppavlov/models/go_bot/tracker/featurized_tracker.py @@ -36,6 +36,7 @@ def __init__(self, # actions_required_acquired_slots_path: Optional[Union[str, Path]]=None, domain_yml_path: Optional[Union[str, Path]]=None, stories_yml_path: Optional[Union[str, Path]]=None, + mode: str = "NN", **kwargs) -> None: self.slot_names = list(slot_names) self.domain_yml_path = domain_yml_path @@ -44,6 +45,8 @@ def __init__(self, self._load_actions2slots_formfilling_info_from(domain_yml_path, stories_yml_path) self.history = [] self.current_features = None + assert mode in {"NN", "MEM"} + self.mode = mode @property def state_size(self) -> int: From 804736dcffb6c77d6e3b1d7bba9b509f12d37d0a Mon Sep 17 00:00:00 2001 From: Oleg Serikov Date: Mon, 28 Dec 2020 02:24:03 +0300 Subject: [PATCH 059/151] Update featurized_tracker.py --- deeppavlov/models/go_bot/tracker/featurized_tracker.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/deeppavlov/models/go_bot/tracker/featurized_tracker.py b/deeppavlov/models/go_bot/tracker/featurized_tracker.py index efd219b78f..3dfbcf31d6 100644 --- a/deeppavlov/models/go_bot/tracker/featurized_tracker.py +++ b/deeppavlov/models/go_bot/tracker/featurized_tracker.py @@ -36,7 +36,7 @@ def __init__(self, # actions_required_acquired_slots_path: Optional[Union[str, Path]]=None, domain_yml_path: Optional[Union[str, Path]]=None, stories_yml_path: Optional[Union[str, Path]]=None, - mode: str = "NN", + tracker_mode: str = "NN", **kwargs) -> None: self.slot_names = list(slot_names) self.domain_yml_path = domain_yml_path @@ -45,8 +45,8 @@ def __init__(self, self._load_actions2slots_formfilling_info_from(domain_yml_path, stories_yml_path) self.history = [] self.current_features = None - assert mode in {"NN", "MEM"} - self.mode = mode + assert tracker_mode in {"NN", "MEM"} + self.mode = tracker_mode @property def state_size(self) -> int: From 70bfa0b62f1da3bdacfbef16e2409d579d33bbe3 Mon Sep 17 00:00:00 2001 From: Oleg Serikov Date: Mon, 28 Dec 2020 02:33:20 +0300 Subject: [PATCH 060/151] Update go_bot.py --- deeppavlov/models/go_bot/go_bot.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/deeppavlov/models/go_bot/go_bot.py b/deeppavlov/models/go_bot/go_bot.py index 3c07fd6139..f2bf2e1ae9 100644 --- a/deeppavlov/models/go_bot/go_bot.py +++ b/deeppavlov/models/go_bot/go_bot.py @@ -140,9 +140,14 @@ def __init__(self, # todo make mor abstract tracker_class: Type = type(tracker) - if tracker_class == MemorizingDialogueStateTracker: + if tracker.mode == "MEM": + tracker_class = MemorizingDialogueStateTracker features_params_class: Type = MemorizingGoBotParams policy_class: Type = MemorizingPolicy + elif tracker.mode == "NN": + tracker_class = DialogueStateTracker + features_params_class: Type = SharedGoBotParams + policy_class: Type = PolicyNetwork self.dialogue_state_tracker = tracker_class.from_gobot_params(tracker, self.nlg_manager, policy_network_params, database) From 9eebf0f2400c6659748be3b07fa0e45411380767 Mon Sep 17 00:00:00 2001 From: Oleg Serikov Date: Wed, 13 Jan 2021 13:54:24 +0300 Subject: [PATCH 061/151] Update dialogue_state_tracker.py --- deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py b/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py index 0243e3d4eb..0047e0d6d1 100644 --- a/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py +++ b/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py @@ -299,7 +299,7 @@ def _load_stories(self, stories_path: Union[Path, str]): return stories def update_previous_action(self, prev_act_id: int) -> None: - super().update_previous_action(prev_act_id) + self.update_previous_action(prev_act_id) act_name = self.act_id2act[prev_act_id][0] for ix, (story_ptr, story) in enumerate(zip(self.stories_ptrs, self.stories)): next_action_ix = story_ptr + 1 From d84e8820a937290005cb344bbc1d66cb2b4b4ab9 Mon Sep 17 00:00:00 2001 From: Oleg Serikov Date: Wed, 13 Jan 2021 14:03:48 +0300 Subject: [PATCH 062/151] Update dialogue_state_tracker.py --- deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py b/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py index 0047e0d6d1..0243e3d4eb 100644 --- a/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py +++ b/deeppavlov/models/go_bot/tracker/dialogue_state_tracker.py @@ -299,7 +299,7 @@ def _load_stories(self, stories_path: Union[Path, str]): return stories def update_previous_action(self, prev_act_id: int) -> None: - self.update_previous_action(prev_act_id) + super().update_previous_action(prev_act_id) act_name = self.act_id2act[prev_act_id][0] for ix, (story_ptr, story) in enumerate(zip(self.stories_ptrs, self.stories)): next_action_ix = story_ptr + 1 From 7151697f322e28e5b51e43bbb873a3969194746d Mon Sep 17 00:00:00 2001 From: oserikov Date: Fri, 7 May 2021 13:42:04 +0300 Subject: [PATCH 063/151] wip move data generation from intent catcher model to the separate iterator --- deeppavlov/core/common/registry.json | 1 + .../intent_catcher_iterator.py | 116 ++++++++++++++++++ .../dataset_readers/intent_catcher_reader.py | 8 +- .../models/intent_catcher/intent_catcher.py | 31 ++--- 4 files changed, 132 insertions(+), 24 deletions(-) create mode 100644 deeppavlov/dataset_iterators/intent_catcher_iterator.py diff --git a/deeppavlov/core/common/registry.json b/deeppavlov/core/common/registry.json index d4241687d9..5f1572a7b5 100644 --- a/deeppavlov/core/common/registry.json +++ b/deeppavlov/core/common/registry.json @@ -73,6 +73,7 @@ "hybrid_ner_model": "deeppavlov.models.ner.NER_model:HybridNerModel", "imdb_reader": "deeppavlov.dataset_readers.imdb_reader:ImdbReader", "input_splitter": "deeppavlov.models.multitask_bert.multitask_bert:InputSplitter", + "intent_catcher_iterator": "deeppavlov.dataset_iterators.intent_catcher_iterator:IntentCatcherIterator", "insurance_reader": "deeppavlov.dataset_readers.insurance_reader:InsuranceReader", "jieba_tokenizer": "deeppavlov.models.tokenizers.jieba_tokenizer:JiebaTokenizer", "joint_tagger_parser": "deeppavlov.models.syntax_parser.joint:JointTaggerParser", diff --git a/deeppavlov/dataset_iterators/intent_catcher_iterator.py b/deeppavlov/dataset_iterators/intent_catcher_iterator.py new file mode 100644 index 0000000000..02d05c6a2c --- /dev/null +++ b/deeppavlov/dataset_iterators/intent_catcher_iterator.py @@ -0,0 +1,116 @@ +# Copyright 2017 Neural Networks and Deep Learning lab, MIPT +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +from logging import getLogger +from typing import Tuple, List, Dict, Any, Iterator + +from xeger import Xeger + +from deeppavlov.core.common.registry import register +from deeppavlov.core.data.data_learning_iterator import DataLearningIterator + +log = getLogger(__name__) + + +@register('intent_catcher_iterator') +class IntentCatcherIterator(DataLearningIterator): + """ + Iterates over data for Intent Catcher training. + A subclass of :class:`~deeppavlov.core.data.data_learning_iterator.DataLearningIterator`. + + Args: + seed: random seed for data shuffling + shuffle: whether to shuffle data during batching + limit: Maximum number of phrases, that are generated from input regexps. + + """ + + def __init__(self, + data: Dict[str, List[Tuple[Any, Any]]], + seed: int = None, + shuffle: bool = True, + limit: int = 10) -> None: + self.limit = limit + super().__init__(data, seed, shuffle) + + def gen_batches(self, + batch_size: int, + data_type: str = 'train', + shuffle: bool = None) -> Iterator[Tuple]: + """Generate batches of inputs and expected output to train + Intents Catcher + + Args: + batch_size: number of samples in batch + data_type: can be either 'train', 'test', or 'valid' + shuffle: whether to shuffle dataset before batching + + Returns: + regexps used in the passed data_type, list of sentences generated + from the original regexps, list of generated senteces' labels + """ + + if shuffle is None: + shuffle = self.shuffle + + ic_file_content = self.data[data_type] + sentences, labels = [], [] + for label, samples in ic_file_content: + for phrase in samples: + sentences.append(phrase) + labels.append(label) + + assert len(sentences) == len(labels), \ + "Number of labels is not equal to the number of sentences" + + try: + regexps = [re.compile(s) for s in sentences] + except Exception as e: + log.error(f"Some sentences are not a consitent regular expressions") + raise e + + proto_entries_indices = list(range(len(sentences))) + if shuffle: + self.random.shuffle(proto_entries_indices) + + if batch_size < 0: + batch_size = len(proto_entries_indices) + + xeger = Xeger(self.limit) + + regexps, generated_sentences, generated_labels = [], [], [] + generated_cnt = 0 + for proto_entry_ix in proto_entries_indices: + sent, lab = sentences[proto_entry_ix], labels[proto_entry_ix] + regex_ = re.compile(sent) + + gx = {xeger.xeger(sent) for _ in range(self.limit)} + generated_sentences.extend(gx) + generated_labels.extend([lab for _ in range(len(gx))]) + regexps.extend([regex_ for _ in range(len(gx))]) + + if len(generated_sentences) == batch_size: + # tuple(zip) below does [r1, r2, ..], [s1, s2, ..] -> ((r1, s1), (r2, s2), ..) + yield tuple(zip(regexps, generated_sentences)), generated_labels + generated_cnt += len(generated_sentences) + regexps, generated_sentences, generated_labels = [], [], [] + + if generated_sentences: + yield tuple(zip(regexps, generated_sentences)), generated_labels + generated_cnt += len(generated_sentences) + regexps, generated_sentences, generated_labels = [], [], [] + + log.info(f"Original number of samples: {len(sentences)}" + f", generated samples: {generated_cnt}") \ No newline at end of file diff --git a/deeppavlov/dataset_readers/intent_catcher_reader.py b/deeppavlov/dataset_readers/intent_catcher_reader.py index d916273db9..a67ef15a9a 100644 --- a/deeppavlov/dataset_readers/intent_catcher_reader.py +++ b/deeppavlov/dataset_readers/intent_catcher_reader.py @@ -9,7 +9,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from json import load +from deeppavlov.core.common.file import read_json from logging import getLogger from pathlib import Path from typing import Dict, List, Tuple @@ -45,10 +45,8 @@ def read(self, data_path: str, *args, **kwargs) -> Dict[str, List[Tuple[str, str file = Path(data_path).joinpath(file_name) if file.exists(): - with open(file) as fp: - file = load(fp) - for label in file: - data[data_type].extend([(phrase, label) for phrase in file[label]]) + ic_file_content = read_json(file) + data[data_type] = ic_file_content else: log.warning("Cannot find {} file".format(file)) diff --git a/deeppavlov/models/intent_catcher/intent_catcher.py b/deeppavlov/models/intent_catcher/intent_catcher.py index 87d6d4162a..118f1610bf 100644 --- a/deeppavlov/models/intent_catcher/intent_catcher.py +++ b/deeppavlov/models/intent_catcher/intent_catcher.py @@ -23,7 +23,6 @@ import tensorflow as tf import tensorflow_hub as tfhub from overrides import overrides -from xeger import Xeger from deeppavlov.core.common.registry import register from deeppavlov.core.models.nn_model import NNModel @@ -71,7 +70,7 @@ def __init__(self, save_path: Union[str, Path], load_path: Union[str, Path], } if embeddings not in urls: raise Exception(f"Provided embeddings type `{embeddings}` is not available. Available embeddings are: use, use_large.") - self.limit = limit + embedder = tfhub.Module(urls[embeddings]) self.sentences = tf.placeholder(dtype=tf.string) self.embedded = embedder(self.sentences) @@ -151,29 +150,23 @@ def train_on_batch(self, x: list, y: list) -> List[float]: Train classifier on batch of data. Args: - x: List of input sentences + x: List of tuples: y: List of input encoded labels Returns: List[float]: list of losses. """ assert len(x) == len(y), "Number of labels is not equal to the number of sentences" - try: - regexps = {(re.compile(s), l) for s, l in zip(x, y)} - except Exception as e: - log.error(f"Some sentences are not a consitent regular expressions") - raise e - xeger = Xeger(self.limit) - self.regexps = self.regexps.union(regexps) - generated_x = [] - generated_y = [] - for s, l in zip(x, y): # generate samples and add regexp - gx = {xeger.xeger(s) for _ in range(self.limit)} - generated_x.extend(gx) - generated_y.extend([l for i in range(len(gx))]) - log.info(f"Original number of samples: {len(y)}, generated samples: {len(generated_y)}") - embedded_x = self.session.run(self.embedded, feed_dict={self.sentences:generated_x}) # actual trainig - loss = self.classifier.train_on_batch(embedded_x, generated_y) + + # zip below does [(r1, s1), (r2, s2), ..] -> [r1, r2, ..], [s1, s2, ..] + passed_regexps, passed_sents = zip(*x) + self.regexps = self.regexps.union(set(passed_regexps)) + + # region actual trainig + embedded_sents = self.session.run(self.embedded, + feed_dict={self.sentences:passed_sents}) + loss = self.classifier.train_on_batch(embedded_sents, y) + # endregion actual trainig return loss def process_event(self, event_name, data): From 5b77127e80c6ed03f8ec69b324fb06b53a33e77d Mon Sep 17 00:00:00 2001 From: oserikov Date: Sat, 8 May 2021 00:12:48 +0300 Subject: [PATCH 064/151] wip unify md_yaml_reader and intent_catcher_reader --- .../md_yaml_dialogs_iterator.py | 638 ++++++++++++++++++ deeppavlov/dataset_readers/dto/__init__.py | 0 .../dataset_readers/dto/rasa/__init__.py | 0 .../dto/rasa/domain_knowledge.py | 28 + .../dataset_readers/md_yaml_dialogs_reader.py | 592 +--------------- .../go_bot/tracker/featurized_tracker.py | 4 +- deeppavlov/models/slotfill/slotfill_raw.py | 3 +- 7 files changed, 687 insertions(+), 578 deletions(-) create mode 100644 deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py create mode 100644 deeppavlov/dataset_readers/dto/__init__.py create mode 100644 deeppavlov/dataset_readers/dto/rasa/__init__.py create mode 100644 deeppavlov/dataset_readers/dto/rasa/domain_knowledge.py diff --git a/deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py b/deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py new file mode 100644 index 0000000000..12d67f8a81 --- /dev/null +++ b/deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py @@ -0,0 +1,638 @@ +# Copyright 2017 Neural Networks and Deep Learning lab, MIPT +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import json +import os +import re +import tempfile +from collections import defaultdict +from logging import getLogger +from typing import Tuple, List, Dict, Any, Iterator, Union, Optional + +from deeppavlov.core.common.registry import register +from deeppavlov.core.data.data_learning_iterator import DataLearningIterator +from deeppavlov.dataset_readers.dstc2_reader import DSTC2DatasetReader +from deeppavlov.dataset_readers.dto.rasa.domain_knowledge import DomainKnowledge + +log = getLogger(__name__) + +SLOT2VALUE_PAIRS_TUPLE = Tuple[Tuple[str, Any], ...] + + + +""" + +""" + + + + +@register('md_yaml_dialogs_iterator') +class MD_YAML_DialogsDatasetIterator(DataLearningIterator): + def __init__(self, + data: Dict[str, List[Tuple[Any, Any]]], + seed: int = None, + shuffle: bool = True) -> None: + super().__init__(data, seed, shuffle) + + def gen_batches(self, + batch_size: int, + data_type: str = 'train', + shuffle: bool = None, + ignore_slots: bool = False, + dialogs: bool = False) -> Iterator[Tuple]: + + data = self.data[data_type] + story_lines = data["story_lines"] + domain = data["domain"] + nlu_lines = data["nlu_lines"] + + intent2slots2text, slot_name2text2value = self._read_nlu( + domain, + ignore_slots, + nlu_lines) + s = self._read_story(story_lines, + dialogs, domain, + intent2slots2text, slot_name2text2value, + ignore_slots=ignore_slots) + pass + + @classmethod + def _read_nlu(cls, domain_knowledge, ignore_slots, nlu_lines): + slots_markup_pattern = r"\[" + \ + r"(?P.*?)" + \ + r"\]" + \ + r"\(" + \ + r"(?P.*?)" + \ + r"\)" + + intent2slots2text = defaultdict(lambda: defaultdict(list)) + slot_name2text2value = defaultdict(lambda: defaultdict(list)) + curr_intent_name = None + for line in nlu_lines: + if line.startswith("##"): + # lines starting with ## are starting section describing new intent type + curr_intent_name = line.strip("##").strip().split("intent:", 1)[-1] + if line.strip().startswith('-'): + # lines starting with - are listing the examples of intent texts of the current intent type + intent_text_w_markup = line.strip().strip('-').strip() + line_slots_found = re.finditer(slots_markup_pattern, intent_text_w_markup) + if ignore_slots: + line_slots_found = [] + + curr_char_ix = 0 + intent_text_without_markup = '' + cleaned_text_slots = [] # intent text can contain slots highlighted + for line_slot in line_slots_found: + line_slot_l_span, line_slot_r_span = line_slot.span() + # intent w.o. markup for "some [entity](entity_example) text" is "some entity text" + # so we should remove brackets and the parentheses content + intent_text_without_markup += intent_text_w_markup[curr_char_ix:line_slot_l_span] + + slot_value_text = str(line_slot["slot_value"]) + slot_name = line_slot["slot_name"] + slot_value = slot_value_text + if ':' in slot_name: + # e.g. [moderately](price:moderate) + slot_name, slot_value = slot_name.split(':', 1) + + assert slot_name in domain_knowledge.known_slots, f"{slot_name}" + \ + " was not listed as slot " + \ + "in domain knowledge config" + + slot_value_new_l_span = len(intent_text_without_markup) # l span in cleaned text + slot_value_new_r_span = slot_value_new_l_span + len(slot_value_text) # r span in cleaned text + # intent w.o. markup for "some [entity](entity_example) text" is "some entity text" + # so we should remove brackets and the parentheses content + intent_text_without_markup += slot_value_text + + cleaned_text_slots.append((slot_name, slot_value)) + + slot_name2text2value[slot_name][slot_value_text].append(slot_value) + + curr_char_ix = line_slot_r_span + intent_text_without_markup += intent_text_w_markup[curr_char_ix: len(intent_text_w_markup)] + + slots_key = tuple(sorted((slot[0], slot[1]) for slot in cleaned_text_slots)) + intent2slots2text[curr_intent_name][slots_key].append( + {"text": intent_text_without_markup, + "slots_di": cleaned_text_slots, + "slots": slots_key}) + # defaultdict behavior is no more needed + intent2slots2text = {k: dict(v) for k, v in intent2slots2text.items()} + slot_name2text2value = dict(slot_name2text2value) + return intent2slots2text, slot_name2text2value + + @classmethod + def _read_story(cls, + story_lines: List, + dialogs: bool, + domain_knowledge: DomainKnowledge, + intent2slots2text: Dict[str, Dict[SLOT2VALUE_PAIRS_TUPLE, List]], + slot_name2text2value: Dict[str, Dict[str, str]], + ignore_slots: bool = False) \ + -> Union[List[List[Tuple[Dict[str, bool], Dict[str, Any]]]], List[ + Tuple[Dict[str, bool], Dict[str, Any]]]]: + """ + Reads stories from the specified path converting them to go-bot format on the fly. + + Args: + story_fpath: path to the file containing the stories dataset + dialogs: flag which indicates whether to output list of turns or + list of dialogs + domain_knowledge: the domain knowledge, usually inferred from domain.yml + intent2slots2text: the mapping allowing given the intent class and + slotfilling values of utterance, restore utterance text. + slot_name2text2value: the mapping of possible slot values spellings to the values themselves. + Returns: + stories read as if it was done with DSTC2DatasetReader._read_from_file() + """ + log.debug(f"BEFORE MLU_MD_DialogsDatasetReader._read_story(): " + f"dialogs={dialogs}, " + f"domain_knowledge={domain_knowledge}, " + f"intent2slots2text={intent2slots2text}, " + f"slot_name2text2value={slot_name2text2value}") + + default_system_start = { + "speaker": cls._SYSTEM_SPEAKER_ID, + "text": "start", + "dialog_acts": [{"act": "start", "slots": []}]} + default_system_goodbye = { + "text": "goodbye :(", + "dialog_acts": [{"act": "utter_goodbye", "slots": []}], + "speaker": cls._SYSTEM_SPEAKER_ID} # TODO infer from dataset + + stories_parsed = {} + + curr_story_title = None + curr_story_utters_batch = [] + nonlocal_curr_story_bad = False # can be modified as a nonlocal variable + + def process_user_utter(line: str) -> List[List[Dict[str, Any]]]: + """ + given the stories.md user line, returns the batch of all the dstc2 ways to represent it + Args: + line: the system line to generate dstc2 versions for + + Returns: + all the possible dstc2 versions of the passed story line + """ + nonlocal intent2slots2text, slot_name2text2value, curr_story_utters_batch, nonlocal_curr_story_bad + try: + possible_user_utters = cls._augment_user_turn(intent2slots2text, + line, + slot_name2text2value) + # dialogs MUST start with system replics + for curr_story_utters in curr_story_utters_batch: + if not curr_story_utters: + curr_story_utters.append(default_system_start) + + utters_to_append_batch = [] + for user_utter in possible_user_utters: + utters_to_append_batch.append([user_utter]) + + except KeyError: + log.debug(f"INSIDE MLU_MD_DialogsDatasetReader._read_story(): " + f"Skipping story w. line {line} because of no NLU candidates found") + nonlocal_curr_story_bad = True + utters_to_append_batch = [] + return utters_to_append_batch + + def process_system_utter(line: str) -> List[List[Dict[str, Any]]]: + """ + given the stories.md system line, returns the batch of all the dstc2 ways to represent it + Args: + line: the system line to generate dstc2 versions for + + Returns: + all the possible dstc2 versions of the passed story line + """ + nonlocal intent2slots2text, domain_knowledge, curr_story_utters_batch, nonlocal_curr_story_bad + system_action = cls._parse_system_turn(domain_knowledge, line) + system_action_name = system_action.get("dialog_acts")[0].get("act") + + for curr_story_utters in curr_story_utters_batch: + if cls._last_turn_is_systems_turn(curr_story_utters): + # deal with consecutive system actions by inserting the last user replics in between + curr_story_utters.append( + cls._get_last_users_turn(curr_story_utters)) + + def parse_form_name(story_line: str) -> str: + """ + if the line (in stories.md utterance format) contains a form name, return it + Args: + story_line: line to extract form name from + + Returns: + the extracted form name or None if no form name found + """ + form_name = None + if story_line.startswith("form"): + form_di = json.loads(story_line[len("form"):]) + form_name = form_di["name"] + return form_name + + if system_action_name.startswith("form"): + form_name = parse_form_name(system_action_name) + augmented_utters = cls._augment_form(form_name, domain_knowledge, + intent2slots2text) + + utters_to_append_batch = [[]] + for user_utter in augmented_utters: + new_curr_story_utters_batch = [] + for curr_story_utters in utters_to_append_batch: + possible_extensions = process_story_line(user_utter) + for possible_extension in possible_extensions: + new_curr_story_utters = curr_story_utters.copy() + new_curr_story_utters.extend(possible_extension) + new_curr_story_utters_batch.append( + new_curr_story_utters) + utters_to_append_batch = new_curr_story_utters_batch + else: + utters_to_append_batch = [[system_action]] + return utters_to_append_batch + + def process_story_line(line: str) -> List[List[Dict[str, Any]]]: + """ + given the stories.md line, returns the batch of all the dstc2 ways to represent it + Args: + line: the line to generate dstc2 versions + + Returns: + all the possible dstc2 versions of the passed story line + """ + if line.startswith('*'): + utters_to_extend_with_batch = process_user_utter(line) + elif line.startswith('-'): + utters_to_extend_with_batch = process_system_utter(line) + else: + # todo raise an exception + utters_to_extend_with_batch = [] + return utters_to_extend_with_batch + + for line in story_lines: + line = line.strip() + if not line: + continue + if line.startswith('#'): + # #... marks the beginning of new story + if curr_story_utters_batch and curr_story_utters_batch[0] and \ + curr_story_utters_batch[0][-1][ + "speaker"] == cls._USER_SPEAKER_ID: + for curr_story_utters in curr_story_utters_batch: + curr_story_utters.append( + default_system_goodbye) # dialogs MUST end with system replics + + if not nonlocal_curr_story_bad: + for curr_story_utters_ix, curr_story_utters in enumerate( + curr_story_utters_batch): + stories_parsed[ + curr_story_title + f"_{curr_story_utters_ix}"] = curr_story_utters + + curr_story_title = line.strip('#') + curr_story_utters_batch = [[]] + nonlocal_curr_story_bad = False + else: + new_curr_story_utters_batch = [] + possible_extensions = process_story_line(line) + for curr_story_utters in curr_story_utters_batch: + for user_utter in possible_extensions: + new_curr_story_utters = curr_story_utters.copy() + new_curr_story_utters.extend(user_utter) + new_curr_story_utters_batch.append( + new_curr_story_utters) + curr_story_utters_batch = new_curr_story_utters_batch + # curr_story_utters.extend(process_story_line(line)) + + if not nonlocal_curr_story_bad: + for curr_story_utters_ix, curr_story_utters in enumerate( + curr_story_utters_batch): + stories_parsed[ + curr_story_title + f"_{curr_story_utters_ix}"] = curr_story_utters + + tmp_f = tempfile.NamedTemporaryFile(delete=False, mode='w', + encoding="utf-8") + for story_id, story in stories_parsed.items(): + for replics in story: + print(json.dumps(replics), file=tmp_f) + print(file=tmp_f) + tmp_f.close() + # noinspection PyProtectedMember + gobot_formatted_stories = DSTC2DatasetReader._read_from_file(tmp_f.name, + dialogs=dialogs) + os.remove(tmp_f.name) + + log.debug(f"AFTER MLU_MD_DialogsDatasetReader._read_story(): " + f"dialogs={dialogs}, " + f"domain_knowledge={domain_knowledge}, " + f"intent2slots2text={intent2slots2text}, " + f"slot_name2text2value={slot_name2text2value}") + + return gobot_formatted_stories + + @classmethod + def _augment_form(cls, form_name: str, domain_knowledge: DomainKnowledge, + intent2slots2text: Dict) -> List[str]: + """ + Replaced the form mention in stories.md with the actual turns relevant to the form + Args: + form_name: the name of form to generate turns for + domain_knowledge: the domain knowledge (see domain.yml in RASA) relevant to the processed config + intent2slots2text: the mapping of intents and particular slots onto text + + Returns: + the story turns relevant to the passed form + """ + form = domain_knowledge.forms[form_name] # todo handle keyerr + augmended_story = [] + for slot_name, slot_info_li in form.items(): + if slot_info_li and slot_info_li[0].get("type", + '') == "from_entity": + # we only handle from_entity slots + known_responses = list(domain_knowledge.response_templates) + known_intents = list(intent2slots2text.keys()) + augmended_story.extend( + cls._augment_slot(known_responses, known_intents, slot_name, + form_name)) + return augmended_story + + @classmethod + def _augment_slot(cls, known_responses: List[str], known_intents: List[str], + slot_name: str, form_name: str) \ + -> List[str]: + """ + Given the slot name, generates a sequence of system turn asking for a slot and user' turn providing this slot + + Args: + known_responses: responses known to the system from domain.yml + known_intents: intents known to the system from domain.yml + slot_name: the name of the slot to augment for + form_name: the name of the form for which the turn is augmented + + Returns: + the list of stories.md alike turns + """ + ask_slot_act_name = cls._get_augmented_ask_slot_utter(form_name, + known_responses, + slot_name) + inform_slot_user_utter = cls._get_augmented_ask_intent_utter( + known_intents, slot_name) + + return [f"- {ask_slot_act_name}", f"* {inform_slot_user_utter}"] + + @classmethod + def _get_augmented_ask_intent_utter(cls, known_intents: List[str], + slot_name: str) -> Optional[str]: + """ + if the system knows the inform_{slot} intent, return this intent name, otherwise return None + Args: + known_intents: intents known to the system + slot_name: the slot to look inform intent for + + Returns: + the slot informing intent or None + """ + inform_slot_user_utter_hypothesis = f"inform_{slot_name}" + if inform_slot_user_utter_hypothesis in known_intents: + inform_slot_user_utter = inform_slot_user_utter_hypothesis + else: + # todo raise an exception + inform_slot_user_utter = None + pass + return inform_slot_user_utter + + @classmethod + def _get_augmented_ask_slot_utter(cls, form_name: str, + known_responses: List[str], + slot_name: str): + """ + if the system knows the ask_{slot} action, return this action name, otherwise return None + Args: + form_name: the name of the currently processed form + known_responses: actions known to the system + slot_name: the slot to look asking action for + + Returns: + the slot asking action or None + """ + ask_slot_act_name_hypothesis1 = f"utter_ask_{form_name}_{slot_name}" + ask_slot_act_name_hypothesis2 = f"utter_ask_{slot_name}" + if ask_slot_act_name_hypothesis1 in known_responses: + ask_slot_act_name = ask_slot_act_name_hypothesis1 + elif ask_slot_act_name_hypothesis2 in known_responses: + ask_slot_act_name = ask_slot_act_name_hypothesis2 + else: + # todo raise an exception + ask_slot_act_name = None + pass + return ask_slot_act_name + + @classmethod + def _get_last_users_turn(cls, curr_story_utters: List[Dict]) -> Dict: + """ + Given the dstc2 story, return the last user utterance from it + Args: + curr_story_utters: the dstc2-formatted stoyr + + Returns: + the last user utterance from the passed story + """ + *_, last_user_utter = filter( + lambda x: x["speaker"] == cls._USER_SPEAKER_ID, curr_story_utters) + return last_user_utter + + @classmethod + def _last_turn_is_systems_turn(cls, curr_story_utters): + return curr_story_utters and curr_story_utters[-1][ + "speaker"] == cls._SYSTEM_SPEAKER_ID + + @classmethod + def _parse_system_turn(cls, domain_knowledge: DomainKnowledge, + line: str) -> Dict: + """ + Given the RASA stories.md line, returns the dstc2-formatted json (dict) for this line + Args: + domain_knowledge: the domain knowledge relevant to the processed stories config (from which line is taken) + line: the story system step representing line from stories.md + + Returns: + the dstc2-formatted passed turn + """ + # system actions are started in dataset with - + system_action_name = line.strip('-').strip() + curr_action_text = cls._system_action2text(domain_knowledge, + system_action_name) + system_action = {"speaker": cls._SYSTEM_SPEAKER_ID, + "text": curr_action_text, + "dialog_acts": [ + {"act": system_action_name, "slots": []}]} + if system_action_name.startswith("action"): + system_action["db_result"] = {} + return system_action + + @classmethod + def _augment_user_turn(cls, intent2slots2text, line: str, + slot_name2text2value) -> List[Dict[str, Any]]: + """ + given the turn information generate all the possible stories representing it + Args: + intent2slots2text: the intents and slots to natural language utterances mapping known to the system + line: the line representing used utterance in stories.md format + slot_name2text2value: the slot names to values mapping known o the system + + Returns: + the batch of all the possible dstc2 representations of the passed intent + """ + # user actions are started in dataset with * + user_action, slots_dstc2formatted = cls._parse_user_intent(line) + slots_actual_values = cls._clarify_slots_values(slot_name2text2value, + slots_dstc2formatted) + slots_to_exclude, slots_used_values, action_for_text = cls._choose_slots_for_whom_exists_text( + intent2slots2text, slots_actual_values, + user_action) + possible_user_response_infos = cls._user_action2text(intent2slots2text, + action_for_text, + slots_used_values) + possible_user_utters = [] + for user_response_info in possible_user_response_infos: + user_utter = {"speaker": cls._USER_SPEAKER_ID, + "text": user_response_info["text"], + "dialog_acts": [{"act": user_action, + "slots": user_response_info[ + "slots"]}], + "slots to exclude": slots_to_exclude} + possible_user_utters.append(user_utter) + return possible_user_utters + + @staticmethod + def _choose_slots_for_whom_exists_text( + intent2slots2text: Dict[str, Dict[SLOT2VALUE_PAIRS_TUPLE, List]], + slots_actual_values: SLOT2VALUE_PAIRS_TUPLE, + user_action: str) -> Tuple[List, SLOT2VALUE_PAIRS_TUPLE, str]: + """ + + Args: + intent2slots2text: the mapping of intents and slots to natural language utterances representing them + slots_actual_values: the slot values information to look utterance for + user_action: the intent to look utterance for + + Returns: + the slots ommitted to find an NLU candidate, the slots represented in the candidate, the intent name used + """ + possible_keys = [k for k in intent2slots2text.keys() if + user_action in k] + possible_keys = possible_keys + [user_action] + possible_keys = sorted(possible_keys, + key=lambda action_s: action_s.count('+')) + for possible_action_key in possible_keys: + if intent2slots2text[possible_action_key].get(slots_actual_values): + slots_used_values = slots_actual_values + slots_to_exclude = [] + return slots_to_exclude, slots_used_values, possible_action_key + else: + slots_lazy_key = set(e[0] for e in slots_actual_values) + slots_lazy_key -= {"intent"} + fake_keys = [] + for known_key in intent2slots2text[possible_action_key].keys(): + if slots_lazy_key.issubset(set(e[0] for e in known_key)): + fake_keys.append(known_key) + break + + if fake_keys: + slots_used_values = sorted(fake_keys, key=lambda elem: ( + len(set(slots_actual_values) ^ set(elem)), + len([e for e in elem + if e[0] not in slots_lazy_key])) + )[0] + + slots_to_exclude = [e[0] for e in slots_used_values if + e[0] not in slots_lazy_key] + return slots_to_exclude, slots_used_values, possible_action_key + + raise KeyError("no possible NLU candidates found") + + @staticmethod + def _clarify_slots_values(slot_name2text2value: Dict[str, Dict[str, Any]], + slots_dstc2formatted: List[ + List]) -> SLOT2VALUE_PAIRS_TUPLE: + slots_key = [] + for slot_name, slot_value in slots_dstc2formatted: + slot_actual_value = slot_name2text2value.get(slot_name, {}).get( + slot_value, slot_value) + slots_key.append((slot_name, slot_actual_value)) + slots_key = tuple(sorted(slots_key)) + return slots_key + + @staticmethod + def _parse_user_intent(line: str, ignore_slots=False) -> Tuple[ + str, List[List]]: + """ + Given the intent line in RASA stories.md format, return the name of the intent and slots described with this line + Args: + line: the line to parse + ignore_slots: whether to ignore slots information + + Returns: + the pair of the intent name and slots ([[slot name, slot value],.. ]) info + """ + intent = line.strip('*').strip() + if '{' not in intent: + intent = intent + "{}" # the prototypical intent is "intent_name{slot1: value1, slotN: valueN}" + user_action, slots_info = intent.split('{', 1) + slots_info = json.loads('{' + slots_info) + slots_dstc2formatted = [[slot_name, slot_value] for + slot_name, slot_value in slots_info.items()] + if ignore_slots: + slots_dstc2formatted = dict() + return user_action, slots_dstc2formatted + + @staticmethod + def _user_action2text( + intent2slots2text: Dict[str, Dict[SLOT2VALUE_PAIRS_TUPLE, List]], + user_action: str, + slots_li: Optional[SLOT2VALUE_PAIRS_TUPLE] = None) -> List[str]: + """ + given the user intent, return the text representing this intent with passed slots + Args: + intent2slots2text: the mapping of intents and slots to natural language utterances + user_action: the name of intent to generate text for + slots_li: the slot values to provide + + Returns: + the text of utterance relevant to the passed intent and slots + """ + if slots_li is None: + slots_li = tuple() + return intent2slots2text[user_action][slots_li] + + @staticmethod + def _system_action2text(domain_knowledge: DomainKnowledge, + system_action: str) -> str: + """ + given the system action name return the relevant template text + Args: + domain_knowledge: the domain knowledge relevant to the currently processed config + system_action: the name of the action to get intent for + + Returns: + template relevant to the passed action + """ + possible_system_responses = domain_knowledge.response_templates.get( + system_action, + [{"text": system_action}]) + + response_text = possible_system_responses[0]["text"] + response_text = re.sub(r"(\w+)\=\{(.*?)\}", r"#\2", + response_text) # TODO: straightforward regex string + + return response_text diff --git a/deeppavlov/dataset_readers/dto/__init__.py b/deeppavlov/dataset_readers/dto/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/deeppavlov/dataset_readers/dto/rasa/__init__.py b/deeppavlov/dataset_readers/dto/rasa/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/deeppavlov/dataset_readers/dto/rasa/domain_knowledge.py b/deeppavlov/dataset_readers/dto/rasa/domain_knowledge.py new file mode 100644 index 0000000000..38ca9fd76f --- /dev/null +++ b/deeppavlov/dataset_readers/dto/rasa/domain_knowledge.py @@ -0,0 +1,28 @@ +from pathlib import Path +from typing import Dict, List, Union + +from deeppavlov.core.common.file import read_yaml + + +class DomainKnowledge: + """the DTO-like class to store the domain knowledge from the domain yaml config.""" + + def __init__(self, domain_knowledge_di: Dict): + self.known_entities: List = domain_knowledge_di.get("entities", []) + self.known_intents: List = domain_knowledge_di.get("intents", []) + self.known_actions: List = domain_knowledge_di.get("actions", []) + self.known_slots: Dict = domain_knowledge_di.get("slots", {}) + self.response_templates: Dict = domain_knowledge_di.get("responses", {}) + self.session_config: Dict = domain_knowledge_di.get("session_config", {}) + self.forms: Dict = domain_knowledge_di.get("forms", {}) + + @classmethod + def from_yaml(cls, domain_yml_fpath: Union[str, Path] = "domain.yml"): + """ + Parses domain.yml domain config file into the DomainKnowledge object + Args: + domain_yml_fpath: path to the domain config file, defaults to domain.yml + Returns: + the loaded DomainKnowledge obect + """ + return cls(read_yaml(domain_yml_fpath)) \ No newline at end of file diff --git a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py index 29a1b3f699..da9d77a24c 100644 --- a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py +++ b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py @@ -13,52 +13,18 @@ # limitations under the License. -import json -import os -import re -import tempfile -from collections import defaultdict from logging import getLogger from overrides import overrides from pathlib import Path -from typing import Dict, List, Tuple, Union, Any, Optional +from typing import Dict, Tuple, Any -from deeppavlov.core.common.file import read_yaml from deeppavlov.core.common.registry import register from deeppavlov.core.data.dataset_reader import DatasetReader -from deeppavlov.dataset_readers.dstc2_reader import DSTC2DatasetReader - - -SLOT2VALUE_PAIRS_TUPLE = Tuple[Tuple[str, Any], ...] +from deeppavlov.dataset_readers.dto.rasa.domain_knowledge import DomainKnowledge log = getLogger(__name__) -class DomainKnowledge: - """the DTO-like class to store the domain knowledge from the domain yaml config.""" - - def __init__(self, domain_knowledge_di: Dict): - self.known_entities: List = domain_knowledge_di.get("entities", []) - self.known_intents: List = domain_knowledge_di.get("intents", []) - self.known_actions: List = domain_knowledge_di.get("actions", []) - self.known_slots: Dict = domain_knowledge_di.get("slots", {}) - self.response_templates: Dict = domain_knowledge_di.get("responses", {}) - self.session_config: Dict = domain_knowledge_di.get("session_config", {}) - self.forms: Dict = domain_knowledge_di.get("forms", {}) - - @classmethod - def from_yaml(cls, domain_yml_fpath: Union[str, Path] = "domain.yml"): - """ - Parses domain.yml domain config file into the DomainKnowledge object - Args: - domain_yml_fpath: path to the domain config file, defaults to domain.yml - Returns: - the loaded DomainKnowledge obect - """ - return cls(read_yaml(domain_yml_fpath)) - - - @register('md_yaml_dialogs_reader') class MD_YAML_DialogsDatasetReader(DatasetReader): """ @@ -90,13 +56,10 @@ def _data_fname(cls, datatype: str) -> str: @classmethod @overrides - def read(cls, data_path: str, dialogs: bool = False, ignore_slots: bool = False) -> Dict[str, List]: + def read(cls, data_path: str) -> Dict[str, Dict]: """ Parameters: data_path: path to read dataset from - dialogs: flag which indicates whether to output list of turns or - list of dialogs - ignore_slots: whether to ignore slots information provided in stories.md or not Returns: dictionary that contains @@ -117,547 +80,24 @@ def read(cls, data_path: str, dialogs: bool = False, ignore_slots: bool = False) domain_path = Path(data_path, domain_fname) domain_knowledge = DomainKnowledge.from_yaml(domain_path) - intent2slots2text, slot_name2text2value = cls._read_intent2text_mapping(Path(data_path, nlu_fname), - domain_knowledge, ignore_slots) + nlu_fpath = Path(data_path, nlu_fname) + with open(nlu_fpath) as f: + nlu_lines = f.read().splitlines() short2long_subsample_name = {"trn": "train", "val": "valid", "tst": "test"} - data = {short2long_subsample_name[subsample_name_short]: - cls._read_story(Path(data_path, cls._data_fname(subsample_name_short)), - dialogs, domain_knowledge, intent2slots2text, slot_name2text2value, - ignore_slots=ignore_slots) - for subsample_name_short in cls.VALID_DATATYPES} - - return data - - @classmethod - def _read_intent2text_mapping(cls, nlu_fpath: Path, domain_knowledge: DomainKnowledge, ignore_slots: bool = False) \ - -> Tuple[Dict[str, Dict[SLOT2VALUE_PAIRS_TUPLE, List]], - Dict[str, Dict[str, str]]]: - - slots_markup_pattern = r"\[" + \ - r"(?P.*?)" + \ - r"\]" + \ - r"\(" + \ - r"(?P.*?)" + \ - r"\)" - - intent2slots2text = defaultdict(lambda: defaultdict(list)) - slot_name2text2value = defaultdict(lambda: defaultdict(list)) - - curr_intent_name = None - - with open(nlu_fpath) as nlu_f: - for line in nlu_f: - if line.startswith("##"): - # lines starting with ## are starting section describing new intent type - curr_intent_name = line.strip("##").strip().split("intent:", 1)[-1] - - if line.strip().startswith('-'): - # lines starting with - are listing the examples of intent texts of the current intent type - intent_text_w_markup = line.strip().strip('-').strip() - line_slots_found = re.finditer(slots_markup_pattern, intent_text_w_markup) - if ignore_slots: - line_slots_found = [] - - curr_char_ix = 0 - intent_text_without_markup = '' - cleaned_text_slots = [] # intent text can contain slots highlighted - for line_slot in line_slots_found: - line_slot_l_span, line_slot_r_span = line_slot.span() - # intent w.o. markup for "some [entity](entity_example) text" is "some entity text" - # so we should remove brackets and the parentheses content - intent_text_without_markup += intent_text_w_markup[curr_char_ix:line_slot_l_span] - - slot_value_text = str(line_slot["slot_value"]) - slot_name = line_slot["slot_name"] - slot_value = slot_value_text - if ':' in slot_name: - slot_name, slot_value = slot_name.split(':', 1) # e.g. [moderately](price:moderate) - - assert slot_name in domain_knowledge.known_slots, f"{slot_name} from {nlu_fpath}" + \ - " was not listed as slot " + \ - "in domain knowledge config" - - slot_value_new_l_span = len(intent_text_without_markup) # l span in cleaned text - slot_value_new_r_span = slot_value_new_l_span + len(slot_value_text) # r span in cleaned text - # intent w.o. markup for "some [entity](entity_example) text" is "some entity text" - # so we should remove brackets and the parentheses content - intent_text_without_markup += slot_value_text - - cleaned_text_slots.append((slot_name, slot_value)) - - slot_name2text2value[slot_name][slot_value_text].append(slot_value) - - curr_char_ix = line_slot_r_span - intent_text_without_markup += intent_text_w_markup[curr_char_ix: len(intent_text_w_markup)] - - slots_key = tuple(sorted((slot[0], slot[1]) for slot in cleaned_text_slots)) - intent2slots2text[curr_intent_name][slots_key].append({"text": intent_text_without_markup, - "slots_di": cleaned_text_slots, - "slots": slots_key}) - - # defaultdict behavior is no more needed - intent2slots2text = {k: dict(v) for k, v in intent2slots2text.items()} - slot_name2text2value = dict(slot_name2text2value) - - return intent2slots2text, slot_name2text2value - - @classmethod - def _read_story(cls, - story_fpath: Path, - dialogs: bool, - domain_knowledge: DomainKnowledge, - intent2slots2text: Dict[str, Dict[SLOT2VALUE_PAIRS_TUPLE, List]], - slot_name2text2value: Dict[str, Dict[str, str]], - ignore_slots: bool = False) \ - -> Union[List[List[Tuple[Dict[str, bool], Dict[str, Any]]]], List[Tuple[Dict[str, bool], Dict[str, Any]]]]: - """ - Reads stories from the specified path converting them to go-bot format on the fly. - - Args: - story_fpath: path to the file containing the stories dataset - dialogs: flag which indicates whether to output list of turns or - list of dialogs - domain_knowledge: the domain knowledge, usually inferred from domain.yml - intent2slots2text: the mapping allowing given the intent class and - slotfilling values of utterance, restore utterance text. - slot_name2text2value: the mapping of possible slot values spellings to the values themselves. - Returns: - stories read as if it was done with DSTC2DatasetReader._read_from_file() - """ - log.debug(f"BEFORE MLU_MD_DialogsDatasetReader._read_story(): " - f"story_fpath={story_fpath}, " - f"dialogs={dialogs}, " - f"domain_knowledge={domain_knowledge}, " - f"intent2slots2text={intent2slots2text}, " - f"slot_name2text2value={slot_name2text2value}") - - default_system_start = { - "speaker": cls._SYSTEM_SPEAKER_ID, - "text": "start", - "dialog_acts": [{"act": "start", "slots": []}]} - default_system_goodbye = { - "text": "goodbye :(", - "dialog_acts": [{"act": "utter_goodbye", "slots": []}], - "speaker": cls._SYSTEM_SPEAKER_ID} # TODO infer from dataset - - stories_parsed = {} - - curr_story_title = None - curr_story_utters_batch = [] - nonlocal_curr_story_bad = False # can be modified as a nonlocal variable - - def process_user_utter(line: str) -> List[List[Dict[str, Any]]]: - """ - given the stories.md user line, returns the batch of all the dstc2 ways to represent it - Args: - line: the system line to generate dstc2 versions for - - Returns: - all the possible dstc2 versions of the passed story line - """ - nonlocal intent2slots2text, slot_name2text2value, curr_story_utters_batch, nonlocal_curr_story_bad - try: - possible_user_utters = cls.augment_user_turn(intent2slots2text, line, slot_name2text2value) - # dialogs MUST start with system replics - for curr_story_utters in curr_story_utters_batch: - if not curr_story_utters: - curr_story_utters.append(default_system_start) - - utters_to_append_batch = [] - for user_utter in possible_user_utters: - utters_to_append_batch.append([user_utter]) - - except KeyError: - log.debug(f"INSIDE MLU_MD_DialogsDatasetReader._read_story(): " - f"Skipping story w. line {line} because of no NLU candidates found") - nonlocal_curr_story_bad = True - utters_to_append_batch = [] - return utters_to_append_batch - - def process_system_utter(line: str) -> List[List[Dict[str, Any]]]: - """ - given the stories.md system line, returns the batch of all the dstc2 ways to represent it - Args: - line: the system line to generate dstc2 versions for - - Returns: - all the possible dstc2 versions of the passed story line - """ - nonlocal intent2slots2text, domain_knowledge, curr_story_utters_batch, nonlocal_curr_story_bad - system_action = cls.parse_system_turn(domain_knowledge, line) - system_action_name = system_action.get("dialog_acts")[0].get("act") - - for curr_story_utters in curr_story_utters_batch: - if cls.last_turn_is_systems_turn(curr_story_utters): - # deal with consecutive system actions by inserting the last user replics in between - curr_story_utters.append(cls.get_last_users_turn(curr_story_utters)) - - def parse_form_name(story_line: str) -> str: - """ - if the line (in stories.md utterance format) contains a form name, return it - Args: - story_line: line to extract form name from - - Returns: - the extracted form name or None if no form name found - """ - form_name = None - if story_line.startswith("form"): - form_di = json.loads(story_line[len("form"):]) - form_name = form_di["name"] - return form_name - - if system_action_name.startswith("form"): - form_name = parse_form_name(system_action_name) - augmented_utters = cls.augment_form(form_name, domain_knowledge, intent2slots2text) - - utters_to_append_batch = [[]] - for user_utter in augmented_utters: - new_curr_story_utters_batch = [] - for curr_story_utters in utters_to_append_batch: - possible_extensions = process_story_line(user_utter) - for possible_extension in possible_extensions: - new_curr_story_utters = curr_story_utters.copy() - new_curr_story_utters.extend(possible_extension) - new_curr_story_utters_batch.append(new_curr_story_utters) - utters_to_append_batch = new_curr_story_utters_batch - else: - utters_to_append_batch = [[system_action]] - return utters_to_append_batch - - def process_story_line(line: str) -> List[List[Dict[str, Any]]]: - """ - given the stories.md line, returns the batch of all the dstc2 ways to represent it - Args: - line: the line to generate dstc2 versions - - Returns: - all the possible dstc2 versions of the passed story line - """ - if line.startswith('*'): - utters_to_extend_with_batch = process_user_utter(line) - elif line.startswith('-'): - utters_to_extend_with_batch = process_system_utter(line) - else: - # todo raise an exception - utters_to_extend_with_batch = [] - return utters_to_extend_with_batch - - story_file = open(story_fpath) - for line in story_file: - line = line.strip() - if not line: - continue - if line.startswith('#'): - # #... marks the beginning of new story - if curr_story_utters_batch and curr_story_utters_batch[0] and curr_story_utters_batch[0][-1]["speaker"] == cls._USER_SPEAKER_ID: - for curr_story_utters in curr_story_utters_batch: - curr_story_utters.append(default_system_goodbye) # dialogs MUST end with system replics - - if not nonlocal_curr_story_bad: - for curr_story_utters_ix, curr_story_utters in enumerate(curr_story_utters_batch): - stories_parsed[curr_story_title+f"_{curr_story_utters_ix}"] = curr_story_utters - - curr_story_title = line.strip('#') - curr_story_utters_batch = [[]] - nonlocal_curr_story_bad = False - else: - new_curr_story_utters_batch = [] - possible_extensions = process_story_line(line) - for curr_story_utters in curr_story_utters_batch: - for user_utter in possible_extensions: - new_curr_story_utters = curr_story_utters.copy() - new_curr_story_utters.extend(user_utter) - new_curr_story_utters_batch.append(new_curr_story_utters) - curr_story_utters_batch = new_curr_story_utters_batch - # curr_story_utters.extend(process_story_line(line)) - story_file.close() - - if not nonlocal_curr_story_bad: - for curr_story_utters_ix, curr_story_utters in enumerate(curr_story_utters_batch): - stories_parsed[curr_story_title + f"_{curr_story_utters_ix}"] = curr_story_utters - - tmp_f = tempfile.NamedTemporaryFile(delete=False, mode='w', encoding="utf-8") - for story_id, story in stories_parsed.items(): - for replics in story: - print(json.dumps(replics), file=tmp_f) - print(file=tmp_f) - tmp_f.close() - # noinspection PyProtectedMember - gobot_formatted_stories = DSTC2DatasetReader._read_from_file(tmp_f.name, dialogs=dialogs) - os.remove(tmp_f.name) - - log.debug(f"AFTER MLU_MD_DialogsDatasetReader._read_story(): " - f"story_fpath={story_fpath}, " - f"dialogs={dialogs}, " - f"domain_knowledge={domain_knowledge}, " - f"intent2slots2text={intent2slots2text}, " - f"slot_name2text2value={slot_name2text2value}") - - return gobot_formatted_stories - - @classmethod - def augment_form(cls, form_name: str, domain_knowledge: DomainKnowledge, intent2slots2text: Dict) -> List[str]: - """ - Replaced the form mention in stories.md with the actual turns relevant to the form - Args: - form_name: the name of form to generate turns for - domain_knowledge: the domain knowledge (see domain.yml in RASA) relevant to the processed config - intent2slots2text: the mapping of intents and particular slots onto text - - Returns: - the story turns relevant to the passed form - """ - form = domain_knowledge.forms[form_name] # todo handle keyerr - augmended_story = [] - for slot_name, slot_info_li in form.items(): - if slot_info_li and slot_info_li[0].get("type", '') == "from_entity": - # we only handle from_entity slots - known_responses = list(domain_knowledge.response_templates) - known_intents = list(intent2slots2text.keys()) - augmended_story.extend(cls.augment_slot(known_responses, known_intents, slot_name, form_name)) - return augmended_story - - @classmethod - def augment_slot(cls, known_responses: List[str], known_intents: List[str], slot_name: str, form_name: str) \ - -> List[str]: - """ - Given the slot name, generates a sequence of system turn asking for a slot and user' turn providing this slot - - Args: - known_responses: responses known to the system from domain.yml - known_intents: intents known to the system from domain.yml - slot_name: the name of the slot to augment for - form_name: the name of the form for which the turn is augmented - - Returns: - the list of stories.md alike turns - """ - ask_slot_act_name = cls.get_augmented_ask_slot_utter(form_name, known_responses, slot_name) - inform_slot_user_utter = cls.get_augmented_ask_intent_utter(known_intents, slot_name) - - return [f"- {ask_slot_act_name}", f"* {inform_slot_user_utter}"] - - @classmethod - def get_augmented_ask_intent_utter(cls, known_intents: List[str], slot_name: str) -> Optional[str]: - """ - if the system knows the inform_{slot} intent, return this intent name, otherwise return None - Args: - known_intents: intents known to the system - slot_name: the slot to look inform intent for - - Returns: - the slot informing intent or None - """ - inform_slot_user_utter_hypothesis = f"inform_{slot_name}" - if inform_slot_user_utter_hypothesis in known_intents: - inform_slot_user_utter = inform_slot_user_utter_hypothesis - else: - # todo raise an exception - inform_slot_user_utter = None - pass - return inform_slot_user_utter - - @classmethod - def get_augmented_ask_slot_utter(cls, form_name: str, known_responses: List[str], slot_name: str): - """ - if the system knows the ask_{slot} action, return this action name, otherwise return None - Args: - form_name: the name of the currently processed form - known_responses: actions known to the system - slot_name: the slot to look asking action for - - Returns: - the slot asking action or None - """ - ask_slot_act_name_hypothesis1 = f"utter_ask_{form_name}_{slot_name}" - ask_slot_act_name_hypothesis2 = f"utter_ask_{slot_name}" - if ask_slot_act_name_hypothesis1 in known_responses: - ask_slot_act_name = ask_slot_act_name_hypothesis1 - elif ask_slot_act_name_hypothesis2 in known_responses: - ask_slot_act_name = ask_slot_act_name_hypothesis2 - else: - # todo raise an exception - ask_slot_act_name = None - pass - return ask_slot_act_name - - @classmethod - def get_last_users_turn(cls, curr_story_utters: List[Dict]) -> Dict: - """ - Given the dstc2 story, return the last user utterance from it - Args: - curr_story_utters: the dstc2-formatted stoyr - - Returns: - the last user utterance from the passed story - """ - *_, last_user_utter = filter(lambda x: x["speaker"] == cls._USER_SPEAKER_ID, curr_story_utters) - return last_user_utter - - @classmethod - def last_turn_is_systems_turn(cls, curr_story_utters): - return curr_story_utters and curr_story_utters[-1]["speaker"] == cls._SYSTEM_SPEAKER_ID - - @classmethod - def parse_system_turn(cls, domain_knowledge: DomainKnowledge, line: str) -> Dict: - """ - Given the RASA stories.md line, returns the dstc2-formatted json (dict) for this line - Args: - domain_knowledge: the domain knowledge relevant to the processed stories config (from which line is taken) - line: the story system step representing line from stories.md - - Returns: - the dstc2-formatted passed turn - """ - # system actions are started in dataset with - - system_action_name = line.strip('-').strip() - curr_action_text = cls._system_action2text(domain_knowledge, system_action_name) - system_action = {"speaker": cls._SYSTEM_SPEAKER_ID, - "text": curr_action_text, - "dialog_acts": [{"act": system_action_name, "slots": []}]} - if system_action_name.startswith("action"): - system_action["db_result"] = {} - return system_action - - @classmethod - def augment_user_turn(cls, intent2slots2text, line: str, slot_name2text2value) -> List[Dict[str, Any]]: - """ - given the turn information generate all the possible stories representing it - Args: - intent2slots2text: the intents and slots to natural language utterances mapping known to the system - line: the line representing used utterance in stories.md format - slot_name2text2value: the slot names to values mapping known o the system - - Returns: - the batch of all the possible dstc2 representations of the passed intent - """ - # user actions are started in dataset with * - user_action, slots_dstc2formatted = cls._parse_user_intent(line) - slots_actual_values = cls._clarify_slots_values(slot_name2text2value, slots_dstc2formatted) - slots_to_exclude, slots_used_values, action_for_text = cls._choose_slots_for_whom_exists_text( - intent2slots2text, slots_actual_values, - user_action) - possible_user_response_infos = cls._user_action2text(intent2slots2text, action_for_text, slots_used_values) - possible_user_utters = [] - for user_response_info in possible_user_response_infos: - user_utter = {"speaker": cls._USER_SPEAKER_ID, - "text": user_response_info["text"], - "dialog_acts": [{"act": user_action, "slots": user_response_info["slots"]}], - "slots to exclude": slots_to_exclude} - possible_user_utters.append(user_utter) - return possible_user_utters - - @staticmethod - def _choose_slots_for_whom_exists_text(intent2slots2text: Dict[str, Dict[SLOT2VALUE_PAIRS_TUPLE, List]], - slots_actual_values: SLOT2VALUE_PAIRS_TUPLE, - user_action: str) -> Tuple[List, SLOT2VALUE_PAIRS_TUPLE, str]: - """ - - Args: - intent2slots2text: the mapping of intents and slots to natural language utterances representing them - slots_actual_values: the slot values information to look utterance for - user_action: the intent to look utterance for - - Returns: - the slots ommitted to find an NLU candidate, the slots represented in the candidate, the intent name used - """ - possible_keys = [k for k in intent2slots2text.keys() if user_action in k] - possible_keys = possible_keys + [user_action] - possible_keys = sorted(possible_keys, key=lambda action_s: action_s.count('+')) - for possible_action_key in possible_keys: - if intent2slots2text[possible_action_key].get(slots_actual_values): - slots_used_values = slots_actual_values - slots_to_exclude = [] - return slots_to_exclude, slots_used_values, possible_action_key - else: - slots_lazy_key = set(e[0] for e in slots_actual_values) - slots_lazy_key -= {"intent"} - fake_keys = [] - for known_key in intent2slots2text[possible_action_key].keys(): - if slots_lazy_key.issubset(set(e[0] for e in known_key)): - fake_keys.append(known_key) - break - - if fake_keys: - slots_used_values = sorted(fake_keys, key=lambda elem: (len(set(slots_actual_values) ^ set(elem)), - len([e for e in elem - if e[0] not in slots_lazy_key])) - )[0] - - slots_to_exclude = [e[0] for e in slots_used_values if e[0] not in slots_lazy_key] - return slots_to_exclude, slots_used_values, possible_action_key + data = dict() + for subsample_name_short in cls.VALID_DATATYPES: + story_fpath = Path(data_path, cls._data_fname(subsample_name_short)) + with open(story_fpath) as f: + story_lines = f.read().splitlines() - raise KeyError("no possible NLU candidates found") - - @staticmethod - def _clarify_slots_values(slot_name2text2value: Dict[str, Dict[str, Any]], - slots_dstc2formatted: List[List]) -> SLOT2VALUE_PAIRS_TUPLE: - slots_key = [] - for slot_name, slot_value in slots_dstc2formatted: - slot_actual_value = slot_name2text2value.get(slot_name, {}).get(slot_value, slot_value) - slots_key.append((slot_name, slot_actual_value)) - slots_key = tuple(sorted(slots_key)) - return slots_key - - @staticmethod - def _parse_user_intent(line: str, ignore_slots=False) -> Tuple[str, List[List]]: - """ - Given the intent line in RASA stories.md format, return the name of the intent and slots described with this line - Args: - line: the line to parse - ignore_slots: whether to ignore slots information - - Returns: - the pair of the intent name and slots ([[slot name, slot value],.. ]) info - """ - intent = line.strip('*').strip() - if '{' not in intent: - intent = intent + "{}" # the prototypical intent is "intent_name{slot1: value1, slotN: valueN}" - user_action, slots_info = intent.split('{', 1) - slots_info = json.loads('{' + slots_info) - slots_dstc2formatted = [[slot_name, slot_value] for slot_name, slot_value in slots_info.items()] - if ignore_slots: - slots_dstc2formatted = dict() - return user_action, slots_dstc2formatted - - @staticmethod - def _user_action2text(intent2slots2text: Dict[str, Dict[SLOT2VALUE_PAIRS_TUPLE, List]], - user_action: str, - slots_li: Optional[SLOT2VALUE_PAIRS_TUPLE] = None) -> List[str]: - """ - given the user intent, return the text representing this intent with passed slots - Args: - intent2slots2text: the mapping of intents and slots to natural language utterances - user_action: the name of intent to generate text for - slots_li: the slot values to provide - - Returns: - the text of utterance relevant to the passed intent and slots - """ - if slots_li is None: - slots_li = tuple() - return intent2slots2text[user_action][slots_li] - - @staticmethod - def _system_action2text(domain_knowledge: DomainKnowledge, system_action: str) -> str: - """ - given the system action name return the relevant template text - Args: - domain_knowledge: the domain knowledge relevant to the currently processed config - system_action: the name of the action to get intent for - - Returns: - template relevant to the passed action - """ - possible_system_responses = domain_knowledge.response_templates.get(system_action, - [{"text": system_action}]) + data[short2long_subsample_name[subsample_name_short]] = { + "story_lines": story_lines, + "domain": domain_knowledge, + "nlu_lines": nlu_lines} - response_text = possible_system_responses[0]["text"] - response_text = re.sub(r"(\w+)\=\{(.*?)\}", r"#\2", response_text) # TODO: straightforward regex string - return response_text + return data \ No newline at end of file diff --git a/deeppavlov/models/go_bot/tracker/featurized_tracker.py b/deeppavlov/models/go_bot/tracker/featurized_tracker.py index ec1314036b..0ecab6a094 100644 --- a/deeppavlov/models/go_bot/tracker/featurized_tracker.py +++ b/deeppavlov/models/go_bot/tracker/featurized_tracker.py @@ -7,7 +7,9 @@ from deeppavlov.core.commands.utils import expand_path from deeppavlov.core.common.file import read_yaml from deeppavlov.core.common.registry import register -from deeppavlov.dataset_readers.md_yaml_dialogs_reader import DomainKnowledge, MD_YAML_DialogsDatasetReader +from deeppavlov.dataset_readers.md_yaml_dialogs_reader import \ + MD_YAML_DialogsDatasetReader +from deeppavlov.dataset_readers.dto.rasa.domain_knowledge import DomainKnowledge from deeppavlov.models.go_bot.nlu.dto.nlu_response import NLUResponse from deeppavlov.models.go_bot.tracker.dto.tracker_knowledge_interface import TrackerKnowledgeInterface from deeppavlov.models.go_bot.tracker.tracker_interface import TrackerInterface diff --git a/deeppavlov/models/slotfill/slotfill_raw.py b/deeppavlov/models/slotfill/slotfill_raw.py index 39c7dff097..0592bb9144 100644 --- a/deeppavlov/models/slotfill/slotfill_raw.py +++ b/deeppavlov/models/slotfill/slotfill_raw.py @@ -25,7 +25,8 @@ from deeppavlov.core.common.registry import register from deeppavlov.core.models.component import Component from deeppavlov.core.models.serializable import Serializable -from deeppavlov.dataset_readers.md_yaml_dialogs_reader import MD_YAML_DialogsDatasetReader, DomainKnowledge +from deeppavlov.dataset_readers.md_yaml_dialogs_reader import MD_YAML_DialogsDatasetReader +from deeppavlov.dataset_readers.dto.rasa.domain_knowledge import DomainKnowledge log = getLogger(__name__) From 29197ce217fee95f202ee4f15c6a5c5dd1da5b0b Mon Sep 17 00:00:00 2001 From: oserikov Date: Sat, 8 May 2021 00:46:58 +0300 Subject: [PATCH 065/151] wip unify md_yaml_reader and intent_catcher_reader --- .../dataset_readers/intent_catcher_reader.py | 60 +++++++++++++++++-- deeppavlov/models/go_bot/nlu/nlu_manager.py | 14 ++++- 2 files changed, 67 insertions(+), 7 deletions(-) diff --git a/deeppavlov/dataset_readers/intent_catcher_reader.py b/deeppavlov/dataset_readers/intent_catcher_reader.py index a67ef15a9a..bc8a9d0729 100644 --- a/deeppavlov/dataset_readers/intent_catcher_reader.py +++ b/deeppavlov/dataset_readers/intent_catcher_reader.py @@ -10,6 +10,8 @@ # limitations under the License. from deeppavlov.core.common.file import read_json +from deeppavlov.core.common.file import read_yaml +from collections import defaultdict from logging import getLogger from pathlib import Path from typing import Dict, List, Tuple @@ -22,12 +24,33 @@ @register('intent_catcher_reader') class IntentCatcherReader(DatasetReader): - """Reader for Intent Catcher dataset in json format""" + """Reader for Intent Catcher dataset in json or YAML (RASA v2) format""" - def read(self, data_path: str, *args, **kwargs) -> Dict[str, List[Tuple[str, str]]]: + def parse_rasa_example(self, example: str, regex: bool = False): + search_entities_re = re.compile( + "\[[ a-zA-Z0-9]+\]\([ a-zA-Z0-9]+\)") + example = example[2:] + if not regex: + search_entities = search_entities_re.search(example) + while search_entities is not None: + start, end = search_entities.span() + example = example[:start] + re.sub("\]\([ a-zA-Z0-9]+\)", "", example[start:end])[ + 1:] + example[end:] + search_entities = search_entities_re.search(example) + example = re.sub("\?", "\?", example) + return example + + def read(self, data_path: str, format: str = 'json', *args, **kwargs) -> Dict[str, List[Tuple[str, str]]]: data_types = ["train", "valid", "test"] - train_file = kwargs.get('train', 'train.json') + if format == 'yaml': + fmt = 'yml' + elif format == 'json': + fmt = 'json' + else: + raise Exception("Wrong file format. ") + + train_file = kwargs.get('train', f'train.{fmt}') if not Path(data_path, train_file).exists(): raise Exception( @@ -39,13 +62,40 @@ def read(self, data_path: str, *args, **kwargs) -> Dict[str, List[Tuple[str, str "test": []} for data_type in data_types: - file_name = kwargs.get(data_type, '{}.{}'.format(data_type, "json")) + file_name = kwargs.get(data_type, '{}.{}'.format(data_type, fmt)) if file_name is None: continue file = Path(data_path).joinpath(file_name) if file.exists(): - ic_file_content = read_json(file) + if format == 'json': + ic_file_content = read_json(file) + elif format == 'yaml': + domain_file = Path(data_path, "domain.yml") + if domain_file.exists(): + domain = read_yaml(domain_file)['intents'] + else: + raise Exception("domain.yml in data path {} does not exist!".format(data_path)) + + ic_file_content = read_yaml(file) + file_data = defaultdict(list) + for part in ic_file_content['nlu']: + if part.get('intent', '') in domain: + intent = part['intent'] + regex = False + elif part.get('regex', '') in domain: + intent = part['regex'] + regex = True + else: + continue + file_data[intent].extend([ + self.parse_rasa_example(example, regex) for example in part.get('examples', '').split("\n") + ]) + if file['version'] == 'dp_2.0': + file_data[intent].extend([self.parse_rasa_example(example, True) for example in part.get('regex_examples', '').split("\n")]) + ic_file_content = file_data + + # noinspection PyUnboundLocalVariable data[data_type] = ic_file_content else: log.warning("Cannot find {} file".format(file)) diff --git a/deeppavlov/models/go_bot/nlu/nlu_manager.py b/deeppavlov/models/go_bot/nlu/nlu_manager.py index e18d74b48f..af5f6c5fc0 100644 --- a/deeppavlov/models/go_bot/nlu/nlu_manager.py +++ b/deeppavlov/models/go_bot/nlu/nlu_manager.py @@ -2,6 +2,9 @@ from typing import List from deeppavlov import Chainer +from deeppavlov.core.data.simple_vocab import SimpleVocabulary +from deeppavlov.models.bert.bert_classifier import BertClassifierModel +from deeppavlov.models.intent_catcher.intent_catcher import IntentCatcher from deeppavlov.models.go_bot.nlu.dto.nlu_response import NLUResponse from deeppavlov.models.go_bot.nlu.nlu_manager_interface import NLUManagerInterface @@ -31,7 +34,13 @@ def __init__(self, tokenizer, slot_filler, intent_classifier, debug=False): self.intent_classifier = intent_classifier self.intents = [] if isinstance(self.intent_classifier, Chainer): - self.intents = self.intent_classifier.get_main_component().classes + component = self.intent_classifier.get_main_component() + if isinstance(component, BertClassifierModel) or isinstance(component, IntentCatcher): + intent2labeltools = [el[-1] for el in self.intent_classifier.pipe if isinstance(el[-1], SimpleVocabulary)] + if intent2labeltools: + self.intents = intent2labeltools[-1]._i2t + else: + self.intents = component.classes if self.debug: log.debug(f"AFTER {self.__class__.__name__} init(): " @@ -63,7 +72,8 @@ def nlu(self, text: str) -> NLUResponse: def _extract_intents_from_tokenized_text_entry(self, tokens: List[str]): # todo meaningful type hints, relies on unannotated intent classifier - intent_features = self.intent_classifier([' '.join(tokens)])[1][0] + classifier_output = self.intent_classifier([' '.join(tokens)]) + intent_features = classifier_output[1][0] return intent_features def _extract_slots_from_tokenized_text_entry(self, tokens: List[str]): From 9cb19d048dbcf7bb5c0bcc672016d3bf2a089f22 Mon Sep 17 00:00:00 2001 From: oserikov Date: Mon, 10 May 2021 14:10:28 +0300 Subject: [PATCH 066/151] wip unify md_yaml_reader and intent_catcher_reader --- deeppavlov/dataset_readers/intent_catcher_reader.py | 1 + 1 file changed, 1 insertion(+) diff --git a/deeppavlov/dataset_readers/intent_catcher_reader.py b/deeppavlov/dataset_readers/intent_catcher_reader.py index bc8a9d0729..8b7b5c7faa 100644 --- a/deeppavlov/dataset_readers/intent_catcher_reader.py +++ b/deeppavlov/dataset_readers/intent_catcher_reader.py @@ -8,6 +8,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import re from deeppavlov.core.common.file import read_json from deeppavlov.core.common.file import read_yaml From c307262d3879d875965bb6726240711a507df678 Mon Sep 17 00:00:00 2001 From: oserikov Date: Mon, 10 May 2021 15:22:14 +0300 Subject: [PATCH 067/151] wip unify md_yaml_reader and intent_catcher_reader --- deeppavlov/dataset_readers/dto/rasa/nlu.py | 138 ++++++++++++++++++ .../dataset_readers/dto/rasa/stories.py | 44 ++++++ .../dataset_readers/md_yaml_dialogs_reader.py | 12 +- 3 files changed, 189 insertions(+), 5 deletions(-) create mode 100644 deeppavlov/dataset_readers/dto/rasa/nlu.py create mode 100644 deeppavlov/dataset_readers/dto/rasa/stories.py diff --git a/deeppavlov/dataset_readers/dto/rasa/nlu.py b/deeppavlov/dataset_readers/dto/rasa/nlu.py new file mode 100644 index 0000000000..d727e42dfb --- /dev/null +++ b/deeppavlov/dataset_readers/dto/rasa/nlu.py @@ -0,0 +1,138 @@ +import re +from collections import defaultdict +from typing import List, Tuple, Dict + +slots_markup_pattern = r"\[" + \ + r"(?P.*?)" + \ + r"\]" + \ + r"\(" + \ + r"(?P.*?)" + \ + r"\)" + + +class IntentLine: + def __init__(self, text, cleaned_text_slots: List[Tuple] = None): + if cleaned_text_slots is None: + cleaned_text_slots = list() + self.text = text + self.slots_key = tuple(sorted((slot[0], slot[1]) + for slot in cleaned_text_slots)) + self.slots_di = cleaned_text_slots + self.slot_name2text2value = None + + @classmethod + def from_line(cls, line, ignore_slots=False): + intent_text_w_markup = line.strip().strip('-').strip() + line_slots_found = re.finditer(slots_markup_pattern, + intent_text_w_markup) + if ignore_slots: + line_slots_found = [] + + curr_char_ix = 0 + intent_text_without_markup = '' + cleaned_text_slots = [] # intent text can contain slots highlighted + + slot_name2text2value = defaultdict(lambda: defaultdict(list)) + + for line_slot in line_slots_found: + line_slot_l_span, line_slot_r_span = line_slot.span() + # intent w.o. markup for "some [entity](entity_example) text" is "some entity text" + # so we should remove brackets and the parentheses content + intent_text_without_markup += intent_text_w_markup[ + curr_char_ix:line_slot_l_span] + + slot_value_text = str(line_slot["slot_value"]) + slot_name = line_slot["slot_name"] + slot_value = slot_value_text + if ':' in slot_name: + # e.g. [moderately](price:moderate) + slot_name, slot_value = slot_name.split(':', 1) + + slot_value_new_l_span = len( + intent_text_without_markup) # l span in cleaned text + slot_value_new_r_span = slot_value_new_l_span + len( + slot_value_text) # r span in cleaned text + # intent w.o. markup for "some [entity](entity_example) text" is "some entity text" + # so we should remove brackets and the parentheses content + intent_text_without_markup += slot_value_text + + cleaned_text_slots.append((slot_name, slot_value)) + + slot_name2text2value[slot_name][slot_value_text].append(slot_value) + + curr_char_ix = line_slot_r_span + intent_text_without_markup += intent_text_w_markup[ + curr_char_ix: len(intent_text_w_markup)] + + intent_l = cls(intent_text_without_markup, cleaned_text_slots) + intent_l.slot_name2text2value = slot_name2text2value + + return intent_l + + +class IntentDesc: + def __init__(self, title): + self.title = title + self.lines = list() + + def add_line(self, intent_line:IntentLine): + self.lines.append(intent_line) + + +class Intents: + def __init__(self): + self.intents: List[IntentDesc] = list() + self.lines = None + self._slot_name2text2value = None + self._intent2slot2text = None + + @property + def slot_name2text2value(self) -> Dict: + if self._slot_name2text2value is not None: + return self._slot_name2text2value + sn2t2v = dict() + for intent in self.intents: + for intent_l in intent.lines: + for slot_name, slot_text2value in intent_l.slot_name2text2value.keys(): + if slot_name not in sn2t2v.keys(): + sn2t2v[slot_name] = dict() + for slot_text, slot_values_li in slot_text2value.items(): + if slot_text not in sn2t2v[slot_name].keys() + sn2t2v[slot_name][slot_text] = list() + sn2t2v[slot_name][slot_text].extend(slot_values_li) + self._slot_name2text2value = sn2t2v + return sn2t2v + + @property + def intent2slot2text(self) -> Dict: + if self._intent2slot2text is not None: + return self._intent2slot2text + + intent2slots2text = dict() + for intent in self.intents: + slots2text = dict() + intent_title = intent.title + for intent_l in intent.lines: + slots2text[intent_l.slots_key] = {"text": intent_l.text, + "slots_di": intent_l.slots_di, + "slots": intent_l.slots_key} + intent2slots2text[intent_title] = slots2text + self._intent2slot2text = intent2slots2text + return intent2slots2text + + + @classmethod + def from_nlu_md(cls, lines): + intents = cls() + ignore_slots = False + for line in lines: + if line.startswith("##"): + # lines starting with ## are starting section describing new intent type + curr_intent_name = line.strip("##").strip().split("intent:", 1)[-1] + curr_intent = IntentDesc(curr_intent_name) + intents.intents.append(curr_intent) + if line.strip().startswith('-'): + # lines starting with - are listing the examples of intent texts of the current intent type + intent_l = IntentLine.from_line(line, ignore_slots) + # noinspection PyUnboundLocalVariable + curr_intent.add_line(intent_l) \ No newline at end of file diff --git a/deeppavlov/dataset_readers/dto/rasa/stories.py b/deeppavlov/dataset_readers/dto/rasa/stories.py new file mode 100644 index 0000000000..d46144f34b --- /dev/null +++ b/deeppavlov/dataset_readers/dto/rasa/stories.py @@ -0,0 +1,44 @@ +from typing import List + + +class Turn: + def __init__(self, turn_description: str, whose_turn: str): + self.turn_description = turn_description + self.whose_turn = whose_turn + + +class Story: + def __init__(self, title, turns: List[Turn] = None): + + self.title = title + if turns is None: + turns = list() + self.turns = turns.copy() + + +class Stories: + def __init__(self): + self.stories: List[Story] = list() + self.lines = None + + @classmethod + def from_stories_lines_md(cls, lines: List[str], fmt="md"): + if fmt != "md": + raise Exception(f"Support of fmt {fmt} is not implemented") + + stories = cls() + stories.lines = lines.copy() + for line in lines: + if line.startswith('#'): + # #... marks the beginning of new story + curr_story_title = line.strip('#') + curr_story = Story(curr_story_title) + stories.stories.append(curr_story) + if line.startswith('*'): + line_content = line.lstrip('*').strip() + # noinspection PyUnboundLocalVariable + curr_story.turns.append(Turn(line_content, "usr")) + elif line.startswith('-'): + line_content = line.strip('-').strip() + # noinspection PyUnboundLocalVariable + curr_story.turns.append(Turn(line_content, "sys")) \ No newline at end of file diff --git a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py index da9d77a24c..73751596eb 100644 --- a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py +++ b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py @@ -11,16 +11,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - - from logging import getLogger from overrides import overrides from pathlib import Path -from typing import Dict, Tuple, Any +from typing import Dict from deeppavlov.core.common.registry import register from deeppavlov.core.data.dataset_reader import DatasetReader from deeppavlov.dataset_readers.dto.rasa.domain_knowledge import DomainKnowledge +from deeppavlov.dataset_readers.dto.rasa.nlu import Intents +from deeppavlov.dataset_readers.dto.rasa.stories import Stories log = getLogger(__name__) @@ -83,6 +83,7 @@ def read(cls, data_path: str) -> Dict[str, Dict]: nlu_fpath = Path(data_path, nlu_fname) with open(nlu_fpath) as f: nlu_lines = f.read().splitlines() + intents = Intents.from_nlu_md(nlu_lines) short2long_subsample_name = {"trn": "train", "val": "valid", @@ -93,11 +94,12 @@ def read(cls, data_path: str) -> Dict[str, Dict]: story_fpath = Path(data_path, cls._data_fname(subsample_name_short)) with open(story_fpath) as f: story_lines = f.read().splitlines() + stories = Stories.from_stories_lines_md(story_lines) data[short2long_subsample_name[subsample_name_short]] = { - "story_lines": story_lines, + "story_lines": stories, "domain": domain_knowledge, - "nlu_lines": nlu_lines} + "nlu_lines": intents} return data \ No newline at end of file From 27fade113daefeb1946b4ff09a11e45b898c3397 Mon Sep 17 00:00:00 2001 From: oserikov Date: Mon, 10 May 2021 15:31:52 +0300 Subject: [PATCH 068/151] wip unify md_yaml_reader and intent_catcher_reader --- .../md_yaml_dialogs_iterator.py | 108 +++++------------- 1 file changed, 27 insertions(+), 81 deletions(-) diff --git a/deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py b/deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py index 12d67f8a81..6bbb936cbd 100644 --- a/deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py +++ b/deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py @@ -23,6 +23,8 @@ from deeppavlov.core.data.data_learning_iterator import DataLearningIterator from deeppavlov.dataset_readers.dstc2_reader import DSTC2DatasetReader from deeppavlov.dataset_readers.dto.rasa.domain_knowledge import DomainKnowledge +from deeppavlov.dataset_readers.dto.rasa.nlu import Intents +from deeppavlov.dataset_readers.dto.rasa.stories import Stories log = getLogger(__name__) @@ -53,93 +55,22 @@ def gen_batches(self, dialogs: bool = False) -> Iterator[Tuple]: data = self.data[data_type] - story_lines = data["story_lines"] + stories: Stories = data["story_lines"] domain = data["domain"] - nlu_lines = data["nlu_lines"] - - intent2slots2text, slot_name2text2value = self._read_nlu( - domain, - ignore_slots, - nlu_lines) - s = self._read_story(story_lines, - dialogs, domain, - intent2slots2text, slot_name2text2value, - ignore_slots=ignore_slots) - pass + intents: Intents = data["nlu_lines"] - @classmethod - def _read_nlu(cls, domain_knowledge, ignore_slots, nlu_lines): - slots_markup_pattern = r"\[" + \ - r"(?P.*?)" + \ - r"\]" + \ - r"\(" + \ - r"(?P.*?)" + \ - r"\)" - - intent2slots2text = defaultdict(lambda: defaultdict(list)) - slot_name2text2value = defaultdict(lambda: defaultdict(list)) - curr_intent_name = None - for line in nlu_lines: - if line.startswith("##"): - # lines starting with ## are starting section describing new intent type - curr_intent_name = line.strip("##").strip().split("intent:", 1)[-1] - if line.strip().startswith('-'): - # lines starting with - are listing the examples of intent texts of the current intent type - intent_text_w_markup = line.strip().strip('-').strip() - line_slots_found = re.finditer(slots_markup_pattern, intent_text_w_markup) - if ignore_slots: - line_slots_found = [] - - curr_char_ix = 0 - intent_text_without_markup = '' - cleaned_text_slots = [] # intent text can contain slots highlighted - for line_slot in line_slots_found: - line_slot_l_span, line_slot_r_span = line_slot.span() - # intent w.o. markup for "some [entity](entity_example) text" is "some entity text" - # so we should remove brackets and the parentheses content - intent_text_without_markup += intent_text_w_markup[curr_char_ix:line_slot_l_span] - - slot_value_text = str(line_slot["slot_value"]) - slot_name = line_slot["slot_name"] - slot_value = slot_value_text - if ':' in slot_name: - # e.g. [moderately](price:moderate) - slot_name, slot_value = slot_name.split(':', 1) - - assert slot_name in domain_knowledge.known_slots, f"{slot_name}" + \ - " was not listed as slot " + \ - "in domain knowledge config" - - slot_value_new_l_span = len(intent_text_without_markup) # l span in cleaned text - slot_value_new_r_span = slot_value_new_l_span + len(slot_value_text) # r span in cleaned text - # intent w.o. markup for "some [entity](entity_example) text" is "some entity text" - # so we should remove brackets and the parentheses content - intent_text_without_markup += slot_value_text - - cleaned_text_slots.append((slot_name, slot_value)) - - slot_name2text2value[slot_name][slot_value_text].append(slot_value) - - curr_char_ix = line_slot_r_span - intent_text_without_markup += intent_text_w_markup[curr_char_ix: len(intent_text_w_markup)] - - slots_key = tuple(sorted((slot[0], slot[1]) for slot in cleaned_text_slots)) - intent2slots2text[curr_intent_name][slots_key].append( - {"text": intent_text_without_markup, - "slots_di": cleaned_text_slots, - "slots": slots_key}) - # defaultdict behavior is no more needed - intent2slots2text = {k: dict(v) for k, v in intent2slots2text.items()} - slot_name2text2value = dict(slot_name2text2value) - return intent2slots2text, slot_name2text2value + s = self._read_story(stories, + dialogs, domain, + intents, + ignore_slots=ignore_slots) + pass @classmethod def _read_story(cls, - story_lines: List, + stories: Stories, dialogs: bool, domain_knowledge: DomainKnowledge, - intent2slots2text: Dict[str, Dict[SLOT2VALUE_PAIRS_TUPLE, List]], - slot_name2text2value: Dict[str, Dict[str, str]], + intents: Intents, ignore_slots: bool = False) \ -> Union[List[List[Tuple[Dict[str, bool], Dict[str, Any]]]], List[ Tuple[Dict[str, bool], Dict[str, Any]]]]: @@ -157,6 +88,21 @@ def _read_story(cls, Returns: stories read as if it was done with DSTC2DatasetReader._read_from_file() """ + + intent2slots2text = intents.intent2slots2text + if ignore_slots: + intent2slots2text_c = dict() + for intent, slots2text in intent2slots2text.items(): + new_slots2text = {tuple(): list()} + for _, texts in slots2text.items(): + new_slots2text[tuple()].extend(texts) + intent2slots2text_c[intent] = new_slots2text + intent2slots2text = intent2slots2text_c + + slot_name2text2value = intents.slot_name2text2value + if ignore_slots: + slot_name2text2value = dict() + log.debug(f"BEFORE MLU_MD_DialogsDatasetReader._read_story(): " f"dialogs={dialogs}, " f"domain_knowledge={domain_knowledge}, " @@ -280,7 +226,7 @@ def process_story_line(line: str) -> List[List[Dict[str, Any]]]: utters_to_extend_with_batch = [] return utters_to_extend_with_batch - for line in story_lines: + for line in stories.lines: line = line.strip() if not line: continue From 61c293817d9b755ab3547c6f2ea438a05aaa173a Mon Sep 17 00:00:00 2001 From: oserikov Date: Mon, 10 May 2021 16:40:39 +0300 Subject: [PATCH 069/151] wip unify md_yaml_reader and intent_catcher_reader --- .../intent_catcher_iterator.py | 11 ++-- deeppavlov/dataset_readers/dto/rasa/nlu.py | 55 ++++++++++++++++--- .../dataset_readers/intent_catcher_reader.py | 39 +++---------- .../dataset_readers/md_yaml_dialogs_reader.py | 4 +- 4 files changed, 62 insertions(+), 47 deletions(-) diff --git a/deeppavlov/dataset_iterators/intent_catcher_iterator.py b/deeppavlov/dataset_iterators/intent_catcher_iterator.py index 02d05c6a2c..71abe9be24 100644 --- a/deeppavlov/dataset_iterators/intent_catcher_iterator.py +++ b/deeppavlov/dataset_iterators/intent_catcher_iterator.py @@ -20,6 +20,7 @@ from deeppavlov.core.common.registry import register from deeppavlov.core.data.data_learning_iterator import DataLearningIterator +from deeppavlov.dataset_readers.dto.rasa.nlu import Intents, IntentDesc log = getLogger(__name__) @@ -65,12 +66,12 @@ def gen_batches(self, if shuffle is None: shuffle = self.shuffle - ic_file_content = self.data[data_type] + ic_file_content: Intents = self.data[data_type]["nlu_lines"] sentences, labels = [], [] - for label, samples in ic_file_content: - for phrase in samples: - sentences.append(phrase) - labels.append(label) + for intent in ic_file_content.intents: + for intent_line in intent.lines: + sentences.append(intent_line.text) + labels.append(intent.title) assert len(sentences) == len(labels), \ "Number of labels is not equal to the number of sentences" diff --git a/deeppavlov/dataset_readers/dto/rasa/nlu.py b/deeppavlov/dataset_readers/dto/rasa/nlu.py index d727e42dfb..574f649fbb 100644 --- a/deeppavlov/dataset_readers/dto/rasa/nlu.py +++ b/deeppavlov/dataset_readers/dto/rasa/nlu.py @@ -2,7 +2,9 @@ from collections import defaultdict from typing import List, Tuple, Dict -slots_markup_pattern = r"\[" + \ +from deeppavlov.core.common.file import read_yaml + +SLOTS_MARKUP_PATTERN = r"\[" + \ r"(?P.*?)" + \ r"\]" + \ r"\(" + \ @@ -22,8 +24,12 @@ def __init__(self, text, cleaned_text_slots: List[Tuple] = None): @classmethod def from_line(cls, line, ignore_slots=False): - intent_text_w_markup = line.strip().strip('-').strip() - line_slots_found = re.finditer(slots_markup_pattern, + line = line.strip() + if line.startswith('-'): + intent_text_w_markup = line.lstrip('-').strip() + else: + intent_text_w_markup = line + line_slots_found = re.finditer(SLOTS_MARKUP_PATTERN, intent_text_w_markup) if ignore_slots: line_slots_found = [] @@ -73,7 +79,7 @@ def from_line(cls, line, ignore_slots=False): class IntentDesc: def __init__(self, title): self.title = title - self.lines = list() + self.lines: List[IntentLine] = list() def add_line(self, intent_line:IntentLine): self.lines.append(intent_line) @@ -97,14 +103,14 @@ def slot_name2text2value(self) -> Dict: if slot_name not in sn2t2v.keys(): sn2t2v[slot_name] = dict() for slot_text, slot_values_li in slot_text2value.items(): - if slot_text not in sn2t2v[slot_name].keys() + if slot_text not in sn2t2v[slot_name].keys(): sn2t2v[slot_name][slot_text] = list() sn2t2v[slot_name][slot_text].extend(slot_values_li) self._slot_name2text2value = sn2t2v return sn2t2v @property - def intent2slot2text(self) -> Dict: + def intent2slots2text(self) -> Dict: if self._intent2slot2text is not None: return self._intent2slot2text @@ -135,4 +141,39 @@ def from_nlu_md(cls, lines): # lines starting with - are listing the examples of intent texts of the current intent type intent_l = IntentLine.from_line(line, ignore_slots) # noinspection PyUnboundLocalVariable - curr_intent.add_line(intent_l) \ No newline at end of file + curr_intent.add_line(intent_l) + return intents + + @classmethod + def from_file(cls, fpath): + format = str(fpath).split('.')[-1] + if format in ("yml", "yaml"): + ic_file_content = read_yaml(fpath) + intents = cls() + for part in ic_file_content['nlu']: + if "intent" in part: + intent_title = part['intent'] + curr_intent = IntentDesc(intent_title) + for example in part.get('examples', '').split("\n"): + example = example.strip().lstrip("*-_").strip() + intent_line = IntentLine.from_line(example) + curr_intent.add_line(intent_line) + elif 'regex' in part: + intent_title = part['regex'] + curr_intent = IntentDesc(intent_title) + for example in part.get('examples', '').split("\n"): + intent_line = IntentLine(example[2:]) + curr_intent.add_line(intent_line) + else: + continue + + if ic_file_content['version'] == 'dp_2.0': + for example in part.get('regex_examples', '').split("\n"): + intent_line = IntentLine(example[2:]) + curr_intent.add_line(intent_line) + intents.intents.append(curr_intent) + elif format in ("md", "markdown"): + with open(fpath, encoding="utf-8") as f: + nlu_lines = f.readlines() + intents = cls.from_nlu_md(nlu_lines) + return intents diff --git a/deeppavlov/dataset_readers/intent_catcher_reader.py b/deeppavlov/dataset_readers/intent_catcher_reader.py index 8b7b5c7faa..d52d1e16df 100644 --- a/deeppavlov/dataset_readers/intent_catcher_reader.py +++ b/deeppavlov/dataset_readers/intent_catcher_reader.py @@ -19,6 +19,7 @@ from deeppavlov.core.common.registry import register from deeppavlov.core.data.dataset_reader import DatasetReader +from deeppavlov.dataset_readers.dto.rasa.nlu import IntentLine log = getLogger(__file__) @@ -28,17 +29,9 @@ class IntentCatcherReader(DatasetReader): """Reader for Intent Catcher dataset in json or YAML (RASA v2) format""" def parse_rasa_example(self, example: str, regex: bool = False): - search_entities_re = re.compile( - "\[[ a-zA-Z0-9]+\]\([ a-zA-Z0-9]+\)") example = example[2:] if not regex: - search_entities = search_entities_re.search(example) - while search_entities is not None: - start, end = search_entities.span() - example = example[:start] + re.sub("\]\([ a-zA-Z0-9]+\)", "", example[start:end])[ - 1:] + example[end:] - search_entities = search_entities_re.search(example) - example = re.sub("\?", "\?", example) + example = IntentLine.from_line(example).text return example def read(self, data_path: str, format: str = 'json', *args, **kwargs) -> Dict[str, List[Tuple[str, str]]]: @@ -69,32 +62,14 @@ def read(self, data_path: str, format: str = 'json', *args, **kwargs) -> Dict[st file = Path(data_path).joinpath(file_name) if file.exists(): + ic_file_content = None if format == 'json': ic_file_content = read_json(file) + raise Exception("json is not supported anymore." + " Use RASA reader and YAML instead") + elif format == 'yaml': - domain_file = Path(data_path, "domain.yml") - if domain_file.exists(): - domain = read_yaml(domain_file)['intents'] - else: - raise Exception("domain.yml in data path {} does not exist!".format(data_path)) - - ic_file_content = read_yaml(file) - file_data = defaultdict(list) - for part in ic_file_content['nlu']: - if part.get('intent', '') in domain: - intent = part['intent'] - regex = False - elif part.get('regex', '') in domain: - intent = part['regex'] - regex = True - else: - continue - file_data[intent].extend([ - self.parse_rasa_example(example, regex) for example in part.get('examples', '').split("\n") - ]) - if file['version'] == 'dp_2.0': - file_data[intent].extend([self.parse_rasa_example(example, True) for example in part.get('regex_examples', '').split("\n")]) - ic_file_content = file_data + raise Exception("Use RASA reader instead") # noinspection PyUnboundLocalVariable data[data_type] = ic_file_content diff --git a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py index 73751596eb..0ec3505136 100644 --- a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py +++ b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py @@ -81,9 +81,7 @@ def read(cls, data_path: str) -> Dict[str, Dict]: domain_path = Path(data_path, domain_fname) domain_knowledge = DomainKnowledge.from_yaml(domain_path) nlu_fpath = Path(data_path, nlu_fname) - with open(nlu_fpath) as f: - nlu_lines = f.read().splitlines() - intents = Intents.from_nlu_md(nlu_lines) + intents = Intents.from_file(nlu_fpath) short2long_subsample_name = {"trn": "train", "val": "valid", From 6f3564478c9ac81b01d1a438b598720dda657d0b Mon Sep 17 00:00:00 2001 From: oserikov Date: Thu, 13 May 2021 11:03:24 +0300 Subject: [PATCH 070/151] wip unify md_yaml_reader and intent_catcher_reader --- .../intent_catcher_iterator.py | 11 ++++++++-- .../dataset_readers/md_yaml_dialogs_reader.py | 11 ++++++---- .../models/intent_catcher/intent_catcher.py | 21 ++++++++++++------- 3 files changed, 30 insertions(+), 13 deletions(-) diff --git a/deeppavlov/dataset_iterators/intent_catcher_iterator.py b/deeppavlov/dataset_iterators/intent_catcher_iterator.py index 71abe9be24..9cbc5a2160 100644 --- a/deeppavlov/dataset_iterators/intent_catcher_iterator.py +++ b/deeppavlov/dataset_iterators/intent_catcher_iterator.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import itertools import re from logging import getLogger from typing import Tuple, List, Dict, Any, Iterator @@ -114,4 +114,11 @@ def gen_batches(self, regexps, generated_sentences, generated_labels = [], [], [] log.info(f"Original number of samples: {len(sentences)}" - f", generated samples: {generated_cnt}") \ No newline at end of file + f", generated samples: {generated_cnt}") + + def get_instances(self, data_type: str = 'train') -> Tuple[tuple, tuple]: + res = tuple(map(lambda it: tuple(itertools.chain(*it)), + zip(*self.gen_batches(batch_size=-1, + data_type=data_type, + shuffle=False)))) + return res \ No newline at end of file diff --git a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py index 0ec3505136..c79d3b9a2d 100644 --- a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py +++ b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py @@ -25,6 +25,10 @@ log = getLogger(__name__) +class RASADict(dict): + def __add__(self, oth): + return RASADict() + @register('md_yaml_dialogs_reader') class MD_YAML_DialogsDatasetReader(DatasetReader): """ @@ -56,7 +60,7 @@ def _data_fname(cls, datatype: str) -> str: @classmethod @overrides - def read(cls, data_path: str) -> Dict[str, Dict]: + def read(cls, data_path: str, fmt = "md") -> Dict[str, Dict]: """ Parameters: data_path: path to read dataset from @@ -69,7 +73,7 @@ def read(cls, data_path: str) -> Dict[str, Dict]: Each field is a list of tuples ``(x_i, y_i)``. """ domain_fname = cls.DOMAIN_FNAME - nlu_fname = cls.NLU_FNAME + nlu_fname = cls.NLU_FNAME if fmt in ("md", "markdown") else cls.NLU_FNAME.replace('.md', f'.{fmt}') stories_fnames = tuple(cls._data_fname(dt) for dt in cls.VALID_DATATYPES) required_fnames = stories_fnames + (nlu_fname, domain_fname) for required_fname in required_fnames: @@ -98,6 +102,5 @@ def read(cls, data_path: str) -> Dict[str, Dict]: "story_lines": stories, "domain": domain_knowledge, "nlu_lines": intents} - - + data = RASADict(data) return data \ No newline at end of file diff --git a/deeppavlov/models/intent_catcher/intent_catcher.py b/deeppavlov/models/intent_catcher/intent_catcher.py index 118f1610bf..f49e440ced 100644 --- a/deeppavlov/models/intent_catcher/intent_catcher.py +++ b/deeppavlov/models/intent_catcher/intent_catcher.py @@ -160,7 +160,7 @@ def train_on_batch(self, x: list, y: list) -> List[float]: # zip below does [(r1, s1), (r2, s2), ..] -> [r1, r2, ..], [s1, s2, ..] passed_regexps, passed_sents = zip(*x) - self.regexps = self.regexps.union(set(passed_regexps)) + self.regexps = self.regexps.union(set(zip(passed_regexps, y))) # region actual trainig embedded_sents = self.session.run(self.embedded, @@ -172,26 +172,31 @@ def train_on_batch(self, x: list, y: list) -> List[float]: def process_event(self, event_name, data): pass - def __call__(self, x: List[str]) -> List[int]: + def __call__(self, x: Union[List[str], List[tuple]]) -> List[int]: """ Predict probabilities. Args: - x: list of input sentences. + x: list of input sentences or List of tuples: Returns: list of probabilities. """ + if x and isinstance(x[0], tuple): + x = [sent for _re, sent in x] return self._predict_proba(x) - def _predict_label(self, sentences: List[str]) -> List[int]: + def _predict_label(self, sentences: Union[List[str], List[tuple]]) -> List[int]: """ Predict labels. Args: - x: list of input sentences. + sentences: list of input sentences or List of tuples: Returns: list of labels. """ + if sentences and isinstance(x[0], tuple): + sentences = [sent for _re, sent in sentences] + labels = [None for i in range(len(sentences))] indx = [] for i, s in enumerate(sentences): @@ -207,15 +212,17 @@ def _predict_label(self, sentences: List[str]) -> List[int]: labels[indx[i]] = l return labels - def _predict_proba(self, x: List[str]) -> List[float]: + def _predict_proba(self, x: Union[List[str], List[tuple]]) -> List[float]: """ Predict probabilities. Used in __call__. Args: - x: list of input sentences. + x: list of input sentences or List of tuples: Returns: list of probabilities """ + if x and isinstance(x[0], tuple): + x = [sent for _re, sent in x] x_embedded = self.session.run(self.embedded, feed_dict={self.sentences:x}) probs = self.classifier.predict_proba(x_embedded) _, num_labels = probs.shape From e59d534abce7b4e114e8847724789c765c1db06d Mon Sep 17 00:00:00 2001 From: oserikov Date: Fri, 14 May 2021 14:27:35 +0300 Subject: [PATCH 071/151] wip codegen for openapi integration --- .../md_yaml_dialogs_iterator.py | 617 ++---------------- deeppavlov/dataset_readers/dto/rasa/nlu.py | 3 +- 2 files changed, 71 insertions(+), 549 deletions(-) diff --git a/deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py b/deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py index 6bbb936cbd..9bd808bbd3 100644 --- a/deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py +++ b/deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py @@ -6,579 +6,100 @@ # # http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software +# Unless required by applicable law or agreed to in writing, softwaredata # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import json -import os -import re -import tempfile -from collections import defaultdict from logging import getLogger -from typing import Tuple, List, Dict, Any, Iterator, Union, Optional +from overrides import overrides +from pathlib import Path +from typing import Dict from deeppavlov.core.common.registry import register -from deeppavlov.core.data.data_learning_iterator import DataLearningIterator -from deeppavlov.dataset_readers.dstc2_reader import DSTC2DatasetReader +from deeppavlov.core.data.dataset_reader import DatasetReader from deeppavlov.dataset_readers.dto.rasa.domain_knowledge import DomainKnowledge from deeppavlov.dataset_readers.dto.rasa.nlu import Intents from deeppavlov.dataset_readers.dto.rasa.stories import Stories log = getLogger(__name__) -SLOT2VALUE_PAIRS_TUPLE = Tuple[Tuple[str, Any], ...] +class RASADict(dict): + def __add__(self, oth): + return RASADict() +@register('md_yaml_dialogs_reader') +class MD_YAML_DialogsDatasetReader(DatasetReader): + """ + Reads dialogs from dataset composed of ``stories.md``, ``nlu.md``, ``domain.yml`` . -""" + ``stories.md`` is to provide the dialogues dataset for model to train on. The dialogues + are represented as user messages labels and system response messages labels: (not texts, just action labels). + This is so to distinguish the NLU-NLG tasks from the actual dialogues storytelling experience: one + should be able to describe just the scripts of dialogues to the system. -""" + ``nlu.md`` is contrariwise to provide the NLU training set irrespective of the dialogues scripts. + ``domain.yml`` is to desribe the task-specific domain and serves two purposes: + provide the NLG templates and provide some specific configuration of the NLU + """ + _USER_SPEAKER_ID = 1 + _SYSTEM_SPEAKER_ID = 2 + VALID_DATATYPES = ('trn', 'val', 'tst') -@register('md_yaml_dialogs_iterator') -class MD_YAML_DialogsDatasetIterator(DataLearningIterator): - def __init__(self, - data: Dict[str, List[Tuple[Any, Any]]], - seed: int = None, - shuffle: bool = True) -> None: - super().__init__(data, seed, shuffle) - - def gen_batches(self, - batch_size: int, - data_type: str = 'train', - shuffle: bool = None, - ignore_slots: bool = False, - dialogs: bool = False) -> Iterator[Tuple]: - - data = self.data[data_type] - stories: Stories = data["story_lines"] - domain = data["domain"] - intents: Intents = data["nlu_lines"] - - s = self._read_story(stories, - dialogs, domain, - intents, - ignore_slots=ignore_slots) - pass + NLU_FNAME = "nlu.md" + DOMAIN_FNAME = "domain.yml" @classmethod - def _read_story(cls, - stories: Stories, - dialogs: bool, - domain_knowledge: DomainKnowledge, - intents: Intents, - ignore_slots: bool = False) \ - -> Union[List[List[Tuple[Dict[str, bool], Dict[str, Any]]]], List[ - Tuple[Dict[str, bool], Dict[str, Any]]]]: - """ - Reads stories from the specified path converting them to go-bot format on the fly. - - Args: - story_fpath: path to the file containing the stories dataset - dialogs: flag which indicates whether to output list of turns or - list of dialogs - domain_knowledge: the domain knowledge, usually inferred from domain.yml - intent2slots2text: the mapping allowing given the intent class and - slotfilling values of utterance, restore utterance text. - slot_name2text2value: the mapping of possible slot values spellings to the values themselves. - Returns: - stories read as if it was done with DSTC2DatasetReader._read_from_file() - """ - - intent2slots2text = intents.intent2slots2text - if ignore_slots: - intent2slots2text_c = dict() - for intent, slots2text in intent2slots2text.items(): - new_slots2text = {tuple(): list()} - for _, texts in slots2text.items(): - new_slots2text[tuple()].extend(texts) - intent2slots2text_c[intent] = new_slots2text - intent2slots2text = intent2slots2text_c - - slot_name2text2value = intents.slot_name2text2value - if ignore_slots: - slot_name2text2value = dict() - - log.debug(f"BEFORE MLU_MD_DialogsDatasetReader._read_story(): " - f"dialogs={dialogs}, " - f"domain_knowledge={domain_knowledge}, " - f"intent2slots2text={intent2slots2text}, " - f"slot_name2text2value={slot_name2text2value}") - - default_system_start = { - "speaker": cls._SYSTEM_SPEAKER_ID, - "text": "start", - "dialog_acts": [{"act": "start", "slots": []}]} - default_system_goodbye = { - "text": "goodbye :(", - "dialog_acts": [{"act": "utter_goodbye", "slots": []}], - "speaker": cls._SYSTEM_SPEAKER_ID} # TODO infer from dataset - - stories_parsed = {} - - curr_story_title = None - curr_story_utters_batch = [] - nonlocal_curr_story_bad = False # can be modified as a nonlocal variable - - def process_user_utter(line: str) -> List[List[Dict[str, Any]]]: - """ - given the stories.md user line, returns the batch of all the dstc2 ways to represent it - Args: - line: the system line to generate dstc2 versions for - - Returns: - all the possible dstc2 versions of the passed story line - """ - nonlocal intent2slots2text, slot_name2text2value, curr_story_utters_batch, nonlocal_curr_story_bad - try: - possible_user_utters = cls._augment_user_turn(intent2slots2text, - line, - slot_name2text2value) - # dialogs MUST start with system replics - for curr_story_utters in curr_story_utters_batch: - if not curr_story_utters: - curr_story_utters.append(default_system_start) - - utters_to_append_batch = [] - for user_utter in possible_user_utters: - utters_to_append_batch.append([user_utter]) - - except KeyError: - log.debug(f"INSIDE MLU_MD_DialogsDatasetReader._read_story(): " - f"Skipping story w. line {line} because of no NLU candidates found") - nonlocal_curr_story_bad = True - utters_to_append_batch = [] - return utters_to_append_batch - - def process_system_utter(line: str) -> List[List[Dict[str, Any]]]: - """ - given the stories.md system line, returns the batch of all the dstc2 ways to represent it - Args: - line: the system line to generate dstc2 versions for - - Returns: - all the possible dstc2 versions of the passed story line - """ - nonlocal intent2slots2text, domain_knowledge, curr_story_utters_batch, nonlocal_curr_story_bad - system_action = cls._parse_system_turn(domain_knowledge, line) - system_action_name = system_action.get("dialog_acts")[0].get("act") - - for curr_story_utters in curr_story_utters_batch: - if cls._last_turn_is_systems_turn(curr_story_utters): - # deal with consecutive system actions by inserting the last user replics in between - curr_story_utters.append( - cls._get_last_users_turn(curr_story_utters)) - - def parse_form_name(story_line: str) -> str: - """ - if the line (in stories.md utterance format) contains a form name, return it - Args: - story_line: line to extract form name from - - Returns: - the extracted form name or None if no form name found - """ - form_name = None - if story_line.startswith("form"): - form_di = json.loads(story_line[len("form"):]) - form_name = form_di["name"] - return form_name - - if system_action_name.startswith("form"): - form_name = parse_form_name(system_action_name) - augmented_utters = cls._augment_form(form_name, domain_knowledge, - intent2slots2text) - - utters_to_append_batch = [[]] - for user_utter in augmented_utters: - new_curr_story_utters_batch = [] - for curr_story_utters in utters_to_append_batch: - possible_extensions = process_story_line(user_utter) - for possible_extension in possible_extensions: - new_curr_story_utters = curr_story_utters.copy() - new_curr_story_utters.extend(possible_extension) - new_curr_story_utters_batch.append( - new_curr_story_utters) - utters_to_append_batch = new_curr_story_utters_batch - else: - utters_to_append_batch = [[system_action]] - return utters_to_append_batch - - def process_story_line(line: str) -> List[List[Dict[str, Any]]]: - """ - given the stories.md line, returns the batch of all the dstc2 ways to represent it - Args: - line: the line to generate dstc2 versions - - Returns: - all the possible dstc2 versions of the passed story line - """ - if line.startswith('*'): - utters_to_extend_with_batch = process_user_utter(line) - elif line.startswith('-'): - utters_to_extend_with_batch = process_system_utter(line) - else: - # todo raise an exception - utters_to_extend_with_batch = [] - return utters_to_extend_with_batch - - for line in stories.lines: - line = line.strip() - if not line: - continue - if line.startswith('#'): - # #... marks the beginning of new story - if curr_story_utters_batch and curr_story_utters_batch[0] and \ - curr_story_utters_batch[0][-1][ - "speaker"] == cls._USER_SPEAKER_ID: - for curr_story_utters in curr_story_utters_batch: - curr_story_utters.append( - default_system_goodbye) # dialogs MUST end with system replics - - if not nonlocal_curr_story_bad: - for curr_story_utters_ix, curr_story_utters in enumerate( - curr_story_utters_batch): - stories_parsed[ - curr_story_title + f"_{curr_story_utters_ix}"] = curr_story_utters - - curr_story_title = line.strip('#') - curr_story_utters_batch = [[]] - nonlocal_curr_story_bad = False - else: - new_curr_story_utters_batch = [] - possible_extensions = process_story_line(line) - for curr_story_utters in curr_story_utters_batch: - for user_utter in possible_extensions: - new_curr_story_utters = curr_story_utters.copy() - new_curr_story_utters.extend(user_utter) - new_curr_story_utters_batch.append( - new_curr_story_utters) - curr_story_utters_batch = new_curr_story_utters_batch - # curr_story_utters.extend(process_story_line(line)) - - if not nonlocal_curr_story_bad: - for curr_story_utters_ix, curr_story_utters in enumerate( - curr_story_utters_batch): - stories_parsed[ - curr_story_title + f"_{curr_story_utters_ix}"] = curr_story_utters - - tmp_f = tempfile.NamedTemporaryFile(delete=False, mode='w', - encoding="utf-8") - for story_id, story in stories_parsed.items(): - for replics in story: - print(json.dumps(replics), file=tmp_f) - print(file=tmp_f) - tmp_f.close() - # noinspection PyProtectedMember - gobot_formatted_stories = DSTC2DatasetReader._read_from_file(tmp_f.name, - dialogs=dialogs) - os.remove(tmp_f.name) - - log.debug(f"AFTER MLU_MD_DialogsDatasetReader._read_story(): " - f"dialogs={dialogs}, " - f"domain_knowledge={domain_knowledge}, " - f"intent2slots2text={intent2slots2text}, " - f"slot_name2text2value={slot_name2text2value}") - - return gobot_formatted_stories + def _data_fname(cls, datatype: str) -> str: + assert datatype in cls.VALID_DATATYPES, f"wrong datatype name: {datatype}" + return f"stories-{datatype}.md" @classmethod - def _augment_form(cls, form_name: str, domain_knowledge: DomainKnowledge, - intent2slots2text: Dict) -> List[str]: + @overrides + def read(cls, data_path: str, fmt = "md") -> Dict[str, Dict]: """ - Replaced the form mention in stories.md with the actual turns relevant to the form - Args: - form_name: the name of form to generate turns for - domain_knowledge: the domain knowledge (see domain.yml in RASA) relevant to the processed config - intent2slots2text: the mapping of intents and particular slots onto text + Parameters: + data_path: path to read dataset from Returns: - the story turns relevant to the passed form - """ - form = domain_knowledge.forms[form_name] # todo handle keyerr - augmended_story = [] - for slot_name, slot_info_li in form.items(): - if slot_info_li and slot_info_li[0].get("type", - '') == "from_entity": - # we only handle from_entity slots - known_responses = list(domain_knowledge.response_templates) - known_intents = list(intent2slots2text.keys()) - augmended_story.extend( - cls._augment_slot(known_responses, known_intents, slot_name, - form_name)) - return augmended_story - - @classmethod - def _augment_slot(cls, known_responses: List[str], known_intents: List[str], - slot_name: str, form_name: str) \ - -> List[str]: - """ - Given the slot name, generates a sequence of system turn asking for a slot and user' turn providing this slot - - Args: - known_responses: responses known to the system from domain.yml - known_intents: intents known to the system from domain.yml - slot_name: the name of the slot to augment for - form_name: the name of the form for which the turn is augmented - - Returns: - the list of stories.md alike turns - """ - ask_slot_act_name = cls._get_augmented_ask_slot_utter(form_name, - known_responses, - slot_name) - inform_slot_user_utter = cls._get_augmented_ask_intent_utter( - known_intents, slot_name) - - return [f"- {ask_slot_act_name}", f"* {inform_slot_user_utter}"] - - @classmethod - def _get_augmented_ask_intent_utter(cls, known_intents: List[str], - slot_name: str) -> Optional[str]: - """ - if the system knows the inform_{slot} intent, return this intent name, otherwise return None - Args: - known_intents: intents known to the system - slot_name: the slot to look inform intent for - - Returns: - the slot informing intent or None - """ - inform_slot_user_utter_hypothesis = f"inform_{slot_name}" - if inform_slot_user_utter_hypothesis in known_intents: - inform_slot_user_utter = inform_slot_user_utter_hypothesis - else: - # todo raise an exception - inform_slot_user_utter = None - pass - return inform_slot_user_utter - - @classmethod - def _get_augmented_ask_slot_utter(cls, form_name: str, - known_responses: List[str], - slot_name: str): - """ - if the system knows the ask_{slot} action, return this action name, otherwise return None - Args: - form_name: the name of the currently processed form - known_responses: actions known to the system - slot_name: the slot to look asking action for - - Returns: - the slot asking action or None - """ - ask_slot_act_name_hypothesis1 = f"utter_ask_{form_name}_{slot_name}" - ask_slot_act_name_hypothesis2 = f"utter_ask_{slot_name}" - if ask_slot_act_name_hypothesis1 in known_responses: - ask_slot_act_name = ask_slot_act_name_hypothesis1 - elif ask_slot_act_name_hypothesis2 in known_responses: - ask_slot_act_name = ask_slot_act_name_hypothesis2 - else: - # todo raise an exception - ask_slot_act_name = None - pass - return ask_slot_act_name - - @classmethod - def _get_last_users_turn(cls, curr_story_utters: List[Dict]) -> Dict: - """ - Given the dstc2 story, return the last user utterance from it - Args: - curr_story_utters: the dstc2-formatted stoyr - - Returns: - the last user utterance from the passed story - """ - *_, last_user_utter = filter( - lambda x: x["speaker"] == cls._USER_SPEAKER_ID, curr_story_utters) - return last_user_utter - - @classmethod - def _last_turn_is_systems_turn(cls, curr_story_utters): - return curr_story_utters and curr_story_utters[-1][ - "speaker"] == cls._SYSTEM_SPEAKER_ID - - @classmethod - def _parse_system_turn(cls, domain_knowledge: DomainKnowledge, - line: str) -> Dict: - """ - Given the RASA stories.md line, returns the dstc2-formatted json (dict) for this line - Args: - domain_knowledge: the domain knowledge relevant to the processed stories config (from which line is taken) - line: the story system step representing line from stories.md - - Returns: - the dstc2-formatted passed turn - """ - # system actions are started in dataset with - - system_action_name = line.strip('-').strip() - curr_action_text = cls._system_action2text(domain_knowledge, - system_action_name) - system_action = {"speaker": cls._SYSTEM_SPEAKER_ID, - "text": curr_action_text, - "dialog_acts": [ - {"act": system_action_name, "slots": []}]} - if system_action_name.startswith("action"): - system_action["db_result"] = {} - return system_action - - @classmethod - def _augment_user_turn(cls, intent2slots2text, line: str, - slot_name2text2value) -> List[Dict[str, Any]]: - """ - given the turn information generate all the possible stories representing it - Args: - intent2slots2text: the intents and slots to natural language utterances mapping known to the system - line: the line representing used utterance in stories.md format - slot_name2text2value: the slot names to values mapping known o the system - - Returns: - the batch of all the possible dstc2 representations of the passed intent - """ - # user actions are started in dataset with * - user_action, slots_dstc2formatted = cls._parse_user_intent(line) - slots_actual_values = cls._clarify_slots_values(slot_name2text2value, - slots_dstc2formatted) - slots_to_exclude, slots_used_values, action_for_text = cls._choose_slots_for_whom_exists_text( - intent2slots2text, slots_actual_values, - user_action) - possible_user_response_infos = cls._user_action2text(intent2slots2text, - action_for_text, - slots_used_values) - possible_user_utters = [] - for user_response_info in possible_user_response_infos: - user_utter = {"speaker": cls._USER_SPEAKER_ID, - "text": user_response_info["text"], - "dialog_acts": [{"act": user_action, - "slots": user_response_info[ - "slots"]}], - "slots to exclude": slots_to_exclude} - possible_user_utters.append(user_utter) - return possible_user_utters - - @staticmethod - def _choose_slots_for_whom_exists_text( - intent2slots2text: Dict[str, Dict[SLOT2VALUE_PAIRS_TUPLE, List]], - slots_actual_values: SLOT2VALUE_PAIRS_TUPLE, - user_action: str) -> Tuple[List, SLOT2VALUE_PAIRS_TUPLE, str]: - """ - - Args: - intent2slots2text: the mapping of intents and slots to natural language utterances representing them - slots_actual_values: the slot values information to look utterance for - user_action: the intent to look utterance for - - Returns: - the slots ommitted to find an NLU candidate, the slots represented in the candidate, the intent name used - """ - possible_keys = [k for k in intent2slots2text.keys() if - user_action in k] - possible_keys = possible_keys + [user_action] - possible_keys = sorted(possible_keys, - key=lambda action_s: action_s.count('+')) - for possible_action_key in possible_keys: - if intent2slots2text[possible_action_key].get(slots_actual_values): - slots_used_values = slots_actual_values - slots_to_exclude = [] - return slots_to_exclude, slots_used_values, possible_action_key - else: - slots_lazy_key = set(e[0] for e in slots_actual_values) - slots_lazy_key -= {"intent"} - fake_keys = [] - for known_key in intent2slots2text[possible_action_key].keys(): - if slots_lazy_key.issubset(set(e[0] for e in known_key)): - fake_keys.append(known_key) - break - - if fake_keys: - slots_used_values = sorted(fake_keys, key=lambda elem: ( - len(set(slots_actual_values) ^ set(elem)), - len([e for e in elem - if e[0] not in slots_lazy_key])) - )[0] - - slots_to_exclude = [e[0] for e in slots_used_values if - e[0] not in slots_lazy_key] - return slots_to_exclude, slots_used_values, possible_action_key - - raise KeyError("no possible NLU candidates found") - - @staticmethod - def _clarify_slots_values(slot_name2text2value: Dict[str, Dict[str, Any]], - slots_dstc2formatted: List[ - List]) -> SLOT2VALUE_PAIRS_TUPLE: - slots_key = [] - for slot_name, slot_value in slots_dstc2formatted: - slot_actual_value = slot_name2text2value.get(slot_name, {}).get( - slot_value, slot_value) - slots_key.append((slot_name, slot_actual_value)) - slots_key = tuple(sorted(slots_key)) - return slots_key - - @staticmethod - def _parse_user_intent(line: str, ignore_slots=False) -> Tuple[ - str, List[List]]: - """ - Given the intent line in RASA stories.md format, return the name of the intent and slots described with this line - Args: - line: the line to parse - ignore_slots: whether to ignore slots information - - Returns: - the pair of the intent name and slots ([[slot name, slot value],.. ]) info - """ - intent = line.strip('*').strip() - if '{' not in intent: - intent = intent + "{}" # the prototypical intent is "intent_name{slot1: value1, slotN: valueN}" - user_action, slots_info = intent.split('{', 1) - slots_info = json.loads('{' + slots_info) - slots_dstc2formatted = [[slot_name, slot_value] for - slot_name, slot_value in slots_info.items()] - if ignore_slots: - slots_dstc2formatted = dict() - return user_action, slots_dstc2formatted - - @staticmethod - def _user_action2text( - intent2slots2text: Dict[str, Dict[SLOT2VALUE_PAIRS_TUPLE, List]], - user_action: str, - slots_li: Optional[SLOT2VALUE_PAIRS_TUPLE] = None) -> List[str]: - """ - given the user intent, return the text representing this intent with passed slots - Args: - intent2slots2text: the mapping of intents and slots to natural language utterances - user_action: the name of intent to generate text for - slots_li: the slot values to provide - - Returns: - the text of utterance relevant to the passed intent and slots - """ - if slots_li is None: - slots_li = tuple() - return intent2slots2text[user_action][slots_li] - - @staticmethod - def _system_action2text(domain_knowledge: DomainKnowledge, - system_action: str) -> str: - """ - given the system action name return the relevant template text - Args: - domain_knowledge: the domain knowledge relevant to the currently processed config - system_action: the name of the action to get intent for - - Returns: - template relevant to the passed action - """ - possible_system_responses = domain_knowledge.response_templates.get( - system_action, - [{"text": system_action}]) - - response_text = possible_system_responses[0]["text"] - response_text = re.sub(r"(\w+)\=\{(.*?)\}", r"#\2", - response_text) # TODO: straightforward regex string - - return response_text + dictionary tha(t contains + ``'train'`` field with dialogs from ``'stories-trn.md'``, + ``'valid'`` field with dialogs from ``'stories-val.md'`` and + ``'test'`` field with dialogs from ``'stories-tst.md'``. + Each field is a list of tuples ``(x_i, y_i)``. + """ + domain_fname = cls.DOMAIN_FNAME + nlu_fname = cls.NLU_FNAME if fmt in ("md", "markdown") else cls.NLU_FNAME.replace('.md', f'.{fmt}') + stories_fnames = tuple(cls._data_fname(dt) for dt in cls.VALID_DATATYPES) + required_fnames = stories_fnames + (nlu_fname, domain_fname) + for required_fname in required_fnames: + required_path = Path(data_path, required_fname) + if not required_path.exists(): + log.error(f"INSIDE MLU_MD_DialogsDatasetReader.read(): " + f"{required_fname} not found with path {required_path}") + + domain_path = Path(data_path, domain_fname) + domain_knowledge = DomainKnowledge.from_yaml(domain_path) + nlu_fpath = Path(data_path, nlu_fname) + intents = Intents.from_file(nlu_fpath) + + short2long_subsample_name = {"trn": "train", + "val": "valid", + "tst": "test"} + + data = RASADict() + for subsample_name_short in cls.VALID_DATATYPES: + story_fpath = Path(data_path, cls._data_fname(subsample_name_short)) + with open(story_fpath) as f: + story_lines = f.read().splitlines() + stories = Stories.from_stories_lines_md(story_lines) + dat = RASADict({"story_lines": stories, + "domain": domain_knowledge, + "nlu_lines": intents}) + data[short2long_subsample_name[subsample_name_short]] = dat + data = RASADict(data) + return data \ No newline at end of file diff --git a/deeppavlov/dataset_readers/dto/rasa/nlu.py b/deeppavlov/dataset_readers/dto/rasa/nlu.py index 574f649fbb..759992a9e7 100644 --- a/deeppavlov/dataset_readers/dto/rasa/nlu.py +++ b/deeppavlov/dataset_readers/dto/rasa/nlu.py @@ -149,6 +149,7 @@ def from_file(cls, fpath): format = str(fpath).split('.')[-1] if format in ("yml", "yaml"): ic_file_content = read_yaml(fpath) + dp_version_present = r'# dp_version: "2.0"' in open(fpath).read() intents = cls() for part in ic_file_content['nlu']: if "intent" in part: @@ -167,7 +168,7 @@ def from_file(cls, fpath): else: continue - if ic_file_content['version'] == 'dp_2.0': + if dp_version_present: for example in part.get('regex_examples', '').split("\n"): intent_line = IntentLine(example[2:]) curr_intent.add_line(intent_line) From a551f4e6bdef9672e0b9c85a5d31c6b2f436bcc1 Mon Sep 17 00:00:00 2001 From: oserikov Date: Fri, 14 May 2021 15:06:56 +0300 Subject: [PATCH 072/151] wip codegen for openapi integration --- deeppavlov/core/common/registry.json | 3 +- .../classifiers/memorizing_classifier.py | 102 ++++++++++++++++++ 2 files changed, 104 insertions(+), 1 deletion(-) create mode 100644 deeppavlov/models/classifiers/memorizing_classifier.py diff --git a/deeppavlov/core/common/registry.json b/deeppavlov/core/common/registry.json index 30f493b085..64391388ff 100644 --- a/deeppavlov/core/common/registry.json +++ b/deeppavlov/core/common/registry.json @@ -207,5 +207,6 @@ "wiki_sqlite_vocab": "deeppavlov.vocabs.wiki_sqlite:WikiSQLiteVocab", "wikitionary_100K_vocab": "deeppavlov.vocabs.typos:Wiki100KDictionary", "intent_catcher_reader": "deeppavlov.dataset_readers.intent_catcher_reader:IntentCatcherReader", - "intent_catcher": "deeppavlov.models.intent_catcher.intent_catcher:IntentCatcher" + "intent_catcher": "deeppavlov.models.intent_catcher.intent_catcher:IntentCatcher", + "mem_classification_model": "deeppavlov.models.classifiers.memorizing_classifier.MemClassificationModel" } diff --git a/deeppavlov/models/classifiers/memorizing_classifier.py b/deeppavlov/models/classifiers/memorizing_classifier.py new file mode 100644 index 0000000000..8126603d45 --- /dev/null +++ b/deeppavlov/models/classifiers/memorizing_classifier.py @@ -0,0 +1,102 @@ +# Copyright 2017 Neural Networks and Deep Learning lab, MIPT +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from pathlib import Path + +from overrides import overrides +from typing import List, Union, Optional + +import numpy as np +import torch +import torch.nn as nn + +from deeppavlov.core.common.file import save_json, read_json +from deeppavlov.core.common.errors import ConfigError +from deeppavlov.core.models.torch_model import TorchModel +from deeppavlov.core.common.registry import register +from .torch_nets import ShallowAndWideCnn +from ...core.models.nn_model import NNModel + +log = logging.getLogger(__name__) + + +@register('mem_classification_model') +class MemClassificationModel(NNModel): + + def __init__(self, n_classes: int, save_path: Optional[Union[str, Path]], + return_probas: bool = True, *args, **kwargs): + super().__init__(save_path, *args, **kwargs) + if n_classes == 0: + raise ConfigError("Please, provide the number of classes setting") + + self.opt = { + "return_probas": return_probas, + } + self.save_path = save_path + self.text2label = dict() + self.classes = list() + + + def __call__(self, texts: List[str], *args) -> Union[List[List[float]], List[int]]: + """Infer on the given data. + + Args: + texts: list of text samples + + Returns: + for each sentence: + vector of probabilities to belong with each class + or list of classes sentence belongs with + """ + outputs = np.zeros(len(texts), self.n_classes) + for text_ix, text in texts: + label = self.text2labels.get(text) + if label is not None: + outputs[text_ix][self.label2ix(label)] = 1. + if self.opt["return_probas"]: + return outputs.tolist() + else: + return np.argmax(outputs, axis=-1).tolist() + + def label2ix(self, label:str): + if label not in self.classes: + return -1 + return self.classes.index(label) + + def train_on_batch(self, texts: List[str], + labels: list) -> Union[float, List[float]]: + """Train the model on the given batch. + + Args: + texts: texts + labels: list of classes + + Returns: + metrics values on the given batch + """ + self.text2label.update(dict(zip(texts, labels))) + self.classes = list(sorted(set(self.classes + labels))) + return 0 + + @overrides + def save(self, *args, **kwargs): + save_json({"classes": self.classes, "text2label": self.text2label}, + self.save_path) + + @overrides + def load(self, *args, **kwargs): + loaded = read_json(self.save_path) + self.classes = loaded["classes"] + self.text2label = loaded["text2label"] From 1ef1f9cf1f05c38ba140b4f8963f985e8faa47e5 Mon Sep 17 00:00:00 2001 From: oserikov Date: Fri, 14 May 2021 15:18:23 +0300 Subject: [PATCH 073/151] wip agent intents and slotfilling --- deeppavlov/core/common/registry.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deeppavlov/core/common/registry.json b/deeppavlov/core/common/registry.json index 64391388ff..f83e535d9d 100644 --- a/deeppavlov/core/common/registry.json +++ b/deeppavlov/core/common/registry.json @@ -208,5 +208,5 @@ "wikitionary_100K_vocab": "deeppavlov.vocabs.typos:Wiki100KDictionary", "intent_catcher_reader": "deeppavlov.dataset_readers.intent_catcher_reader:IntentCatcherReader", "intent_catcher": "deeppavlov.models.intent_catcher.intent_catcher:IntentCatcher", - "mem_classification_model": "deeppavlov.models.classifiers.memorizing_classifier.MemClassificationModel" + "mem_classification_model": "deeppavlov.models.classifiers.memorizing_classifier:MemClassificationModel" } From f4b4a3bedac8021fad2709cd46d2bd8afd367a09 Mon Sep 17 00:00:00 2001 From: oserikov Date: Fri, 14 May 2021 15:34:44 +0300 Subject: [PATCH 074/151] wip agent intents and slotfilling --- .../dataset_readers/md_yaml_dialogs_reader.py | 26 ++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py index 29a1b3f699..e08fd49e6e 100644 --- a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py +++ b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py @@ -19,6 +19,8 @@ import tempfile from collections import defaultdict from logging import getLogger +import random + from overrides import overrides from pathlib import Path from typing import Dict, List, Tuple, Union, Any, Optional @@ -90,7 +92,7 @@ def _data_fname(cls, datatype: str) -> str: @classmethod @overrides - def read(cls, data_path: str, dialogs: bool = False, ignore_slots: bool = False) -> Dict[str, List]: + def read(cls, data_path: str, dialogs: bool = False, ignore_slots: bool = False, augment_strategy: str = None) -> Dict[str, List]: """ Parameters: data_path: path to read dataset from @@ -105,6 +107,11 @@ def read(cls, data_path: str, dialogs: bool = False, ignore_slots: bool = False) ``'test'`` field with dialogs from ``'stories-tst.md'``. Each field is a list of tuples ``(x_i, y_i)``. """ + if augment_strategy is None: + augment_strategy = "max" + + assert augment_strategy in {"min", "max"} + domain_fname = cls.DOMAIN_FNAME nlu_fname = cls.NLU_FNAME stories_fnames = tuple(cls._data_fname(dt) for dt in cls.VALID_DATATYPES) @@ -127,7 +134,7 @@ def read(cls, data_path: str, dialogs: bool = False, ignore_slots: bool = False) data = {short2long_subsample_name[subsample_name_short]: cls._read_story(Path(data_path, cls._data_fname(subsample_name_short)), dialogs, domain_knowledge, intent2slots2text, slot_name2text2value, - ignore_slots=ignore_slots) + ignore_slots=ignore_slots, augment_strategy=augment_strategy) for subsample_name_short in cls.VALID_DATATYPES} return data @@ -212,6 +219,7 @@ def _read_story(cls, domain_knowledge: DomainKnowledge, intent2slots2text: Dict[str, Dict[SLOT2VALUE_PAIRS_TUPLE, List]], slot_name2text2value: Dict[str, Dict[str, str]], + augment_strategy: str, ignore_slots: bool = False) \ -> Union[List[List[Tuple[Dict[str, bool], Dict[str, Any]]]], List[Tuple[Dict[str, bool], Dict[str, Any]]]]: """ @@ -262,6 +270,8 @@ def process_user_utter(line: str) -> List[List[Dict[str, Any]]]: nonlocal intent2slots2text, slot_name2text2value, curr_story_utters_batch, nonlocal_curr_story_bad try: possible_user_utters = cls.augment_user_turn(intent2slots2text, line, slot_name2text2value) + if augment_strategy == "min": + possible_user_utters = random.choices(possible_user_utters) # dialogs MUST start with system replics for curr_story_utters in curr_story_utters_batch: if not curr_story_utters: @@ -390,7 +400,17 @@ def process_story_line(line: str) -> List[List[Dict[str, Any]]]: # noinspection PyProtectedMember gobot_formatted_stories = DSTC2DatasetReader._read_from_file(tmp_f.name, dialogs=dialogs) os.remove(tmp_f.name) - + if dialogs: + for story in gobot_formatted_stories: + for turn_ix, turn in enumerate(story): + if turn[0] == {'text': '', 'intents': [], 'episode_done': True}: + turn = ({'text': 'start', 'intents': [{'act': 'start', 'slots': []}], 'episode_done': True}, turn[1]) + story[turn_ix] = turn + else: + for turn_ix, turn in enumerate(gobot_formatted_stories): + if turn[0] == {'text': '', 'intents': [], 'episode_done': True}: + turn = ({'text': 'start', 'intents': [{'act': 'start', 'slots': []}], 'episode_done': True}, turn[1]) + gobot_formatted_stories[turn_ix] = turn log.debug(f"AFTER MLU_MD_DialogsDatasetReader._read_story(): " f"story_fpath={story_fpath}, " f"dialogs={dialogs}, " From 7f93f01e019c537ba4580371cdb3c34f5257358b Mon Sep 17 00:00:00 2001 From: oserikov Date: Fri, 14 May 2021 19:10:59 +0300 Subject: [PATCH 075/151] wip agent intents and slotfilling --- .../classifiers/memorizing_classifier.py | 27 +++++++++---------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/deeppavlov/models/classifiers/memorizing_classifier.py b/deeppavlov/models/classifiers/memorizing_classifier.py index 8126603d45..46b43e4840 100644 --- a/deeppavlov/models/classifiers/memorizing_classifier.py +++ b/deeppavlov/models/classifiers/memorizing_classifier.py @@ -14,19 +14,14 @@ import logging from pathlib import Path - -from overrides import overrides from typing import List, Union, Optional import numpy as np -import torch -import torch.nn as nn +from overrides import overrides -from deeppavlov.core.common.file import save_json, read_json from deeppavlov.core.common.errors import ConfigError -from deeppavlov.core.models.torch_model import TorchModel +from deeppavlov.core.common.file import save_json, read_json from deeppavlov.core.common.registry import register -from .torch_nets import ShallowAndWideCnn from ...core.models.nn_model import NNModel log = logging.getLogger(__name__) @@ -40,16 +35,17 @@ def __init__(self, n_classes: int, save_path: Optional[Union[str, Path]], super().__init__(save_path, *args, **kwargs) if n_classes == 0: raise ConfigError("Please, provide the number of classes setting") - + self.n_classes = n_classes self.opt = { "return_probas": return_probas, } self.save_path = save_path self.text2label = dict() self.classes = list() + self.is_trained = False - - def __call__(self, texts: List[str], *args) -> Union[List[List[float]], List[int]]: + def __call__(self: "MemClassificationModel", texts: List[str], *args) -> Union[ + List[List[float]], List[int]]: """Infer on the given data. Args: @@ -60,9 +56,9 @@ def __call__(self, texts: List[str], *args) -> Union[List[List[float]], List[int vector of probabilities to belong with each class or list of classes sentence belongs with """ - outputs = np.zeros(len(texts), self.n_classes) + outputs = np.zeros((len(texts), self.n_classes)) for text_ix, text in texts: - label = self.text2labels.get(text) + label = self.text2label.get(text) if label is not None: outputs[text_ix][self.label2ix(label)] = 1. if self.opt["return_probas"]: @@ -70,7 +66,7 @@ def __call__(self, texts: List[str], *args) -> Union[List[List[float]], List[int else: return np.argmax(outputs, axis=-1).tolist() - def label2ix(self, label:str): + def label2ix(self, label: str): if label not in self.classes: return -1 return self.classes.index(label) @@ -88,7 +84,10 @@ def train_on_batch(self, texts: List[str], """ self.text2label.update(dict(zip(texts, labels))) self.classes = list(sorted(set(self.classes + labels))) - return 0 + + pseudo_loss = 0 if self.is_trained else 1 + self.is_trained = True + return pseudo_loss @overrides def save(self, *args, **kwargs): From bb82364b08cb3319ec9222c766a59de2bf24972b Mon Sep 17 00:00:00 2001 From: oserikov Date: Fri, 14 May 2021 19:26:05 +0300 Subject: [PATCH 076/151] wip agent intents and slotfilling --- deeppavlov/models/classifiers/memorizing_classifier.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deeppavlov/models/classifiers/memorizing_classifier.py b/deeppavlov/models/classifiers/memorizing_classifier.py index 46b43e4840..2988143a67 100644 --- a/deeppavlov/models/classifiers/memorizing_classifier.py +++ b/deeppavlov/models/classifiers/memorizing_classifier.py @@ -57,7 +57,7 @@ def __call__(self: "MemClassificationModel", texts: List[str], *args) -> Union[ or list of classes sentence belongs with """ outputs = np.zeros((len(texts), self.n_classes)) - for text_ix, text in texts: + for text_ix, text in enumerate(texts): label = self.text2label.get(text) if label is not None: outputs[text_ix][self.label2ix(label)] = 1. From 6816bd9a37eee4ffd855e49dd724bddc4d80469f Mon Sep 17 00:00:00 2001 From: oserikov Date: Fri, 14 May 2021 19:57:08 +0300 Subject: [PATCH 077/151] wip agent intents and slotfilling --- .../classifiers/memorizing_classifier.py | 21 +++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/deeppavlov/models/classifiers/memorizing_classifier.py b/deeppavlov/models/classifiers/memorizing_classifier.py index 2988143a67..a4b0c00274 100644 --- a/deeppavlov/models/classifiers/memorizing_classifier.py +++ b/deeppavlov/models/classifiers/memorizing_classifier.py @@ -43,6 +43,7 @@ def __init__(self, n_classes: int, save_path: Optional[Union[str, Path]], self.text2label = dict() self.classes = list() self.is_trained = False + self.load() def __call__(self: "MemClassificationModel", texts: List[str], *args) -> Union[ List[List[float]], List[int]]: @@ -82,20 +83,36 @@ def train_on_batch(self, texts: List[str], Returns: metrics values on the given batch """ + if isinstance(labels, np.ndarray): + labels = labels.tolist() + if labels and isinstance(labels[0], np.ndarray): + labels_ = [] + for lab in labels: + label_ixes = np.where(lab)[0].tolist() + if len(label_ixes) != 1: + log.warning("smth wrong with ohe") + label_ix = label_ixes[0] + labels_.append(label_ix) + labels = labels_ self.text2label.update(dict(zip(texts, labels))) self.classes = list(sorted(set(self.classes + labels))) - + print(self.text2label) + print(self.classes) pseudo_loss = 0 if self.is_trained else 1 self.is_trained = True + self.save() return pseudo_loss @overrides def save(self, *args, **kwargs): - save_json({"classes": self.classes, "text2label": self.text2label}, + print("saving") + save_json({"classes": self.classes, + "text2label": self.text2label}, self.save_path) @overrides def load(self, *args, **kwargs): + print("loading") loaded = read_json(self.save_path) self.classes = loaded["classes"] self.text2label = loaded["text2label"] From a17933b5953752434493d6e2a8cfbc9690f0893a Mon Sep 17 00:00:00 2001 From: oserikov Date: Wed, 2 Jun 2021 17:32:41 +0300 Subject: [PATCH 078/151] wip agent intents and slotfilling --- .../models/classifiers/memorizing_classifier.py | 9 ++++++--- deeppavlov/models/go_bot/nlu/nlu_manager.py | 17 +++++++++++++++-- 2 files changed, 21 insertions(+), 5 deletions(-) diff --git a/deeppavlov/models/classifiers/memorizing_classifier.py b/deeppavlov/models/classifiers/memorizing_classifier.py index a4b0c00274..8e1b1e9c12 100644 --- a/deeppavlov/models/classifiers/memorizing_classifier.py +++ b/deeppavlov/models/classifiers/memorizing_classifier.py @@ -113,6 +113,9 @@ def save(self, *args, **kwargs): @overrides def load(self, *args, **kwargs): print("loading") - loaded = read_json(self.save_path) - self.classes = loaded["classes"] - self.text2label = loaded["text2label"] + try: + loaded = read_json(self.save_path) + self.classes = loaded["classes"] + self.text2label = loaded["text2label"] + except: + log.info("nothing to load") \ No newline at end of file diff --git a/deeppavlov/models/go_bot/nlu/nlu_manager.py b/deeppavlov/models/go_bot/nlu/nlu_manager.py index e18d74b48f..74692c84f2 100644 --- a/deeppavlov/models/go_bot/nlu/nlu_manager.py +++ b/deeppavlov/models/go_bot/nlu/nlu_manager.py @@ -2,6 +2,8 @@ from typing import List from deeppavlov import Chainer +from deeppavlov.core.data.simple_vocab import SimpleVocabulary +from deeppavlov.models.bert.bert_classifier import BertClassifierModel from deeppavlov.models.go_bot.nlu.dto.nlu_response import NLUResponse from deeppavlov.models.go_bot.nlu.nlu_manager_interface import NLUManagerInterface @@ -31,7 +33,13 @@ def __init__(self, tokenizer, slot_filler, intent_classifier, debug=False): self.intent_classifier = intent_classifier self.intents = [] if isinstance(self.intent_classifier, Chainer): - self.intents = self.intent_classifier.get_main_component().classes + component = self.intent_classifier.get_main_component() + if isinstance(component, BertClassifierModel): + intent2labeltools = [el[-1] for el in self.intent_classifier.pipe if isinstance(el[-1], SimpleVocabulary)] + if intent2labeltools: + self.intents = intent2labeltools[-1]._i2t + else: + self.intents = component.classes if self.debug: log.debug(f"AFTER {self.__class__.__name__} init(): " @@ -57,7 +65,7 @@ def nlu(self, text: str) -> NLUResponse: intents = [] if callable(self.intent_classifier): - intents = self._extract_intents_from_tokenized_text_entry(tokens) + intents = self._extract_intents_from_text_entry(text) return NLUResponse(slots, intents, tokens) @@ -66,6 +74,11 @@ def _extract_intents_from_tokenized_text_entry(self, tokens: List[str]): intent_features = self.intent_classifier([' '.join(tokens)])[1][0] return intent_features + def _extract_intents_from_text_entry(self, text: str): + # todo meaningful type hints, relies on unannotated intent classifier + intent_features = self.intent_classifier([text])[1][0] + return intent_features + def _extract_slots_from_tokenized_text_entry(self, tokens: List[str]): # todo meaningful type hints, relies on unannotated slot filler return self.slot_filler([tokens])[0] From b4d293f8e9da3cdff6eec35881b7326c52eb0410 Mon Sep 17 00:00:00 2001 From: oserikov Date: Wed, 2 Jun 2021 20:52:37 +0300 Subject: [PATCH 079/151] wip unify md_yaml_reader and intent_catcher_reader --- deeppavlov/dataset_readers/md_yaml_dialogs_reader.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py index c79d3b9a2d..79369300a1 100644 --- a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py +++ b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py @@ -98,9 +98,9 @@ def read(cls, data_path: str, fmt = "md") -> Dict[str, Dict]: story_lines = f.read().splitlines() stories = Stories.from_stories_lines_md(story_lines) - data[short2long_subsample_name[subsample_name_short]] = { + data[short2long_subsample_name[subsample_name_short]] = RASADict({ "story_lines": stories, "domain": domain_knowledge, - "nlu_lines": intents} + "nlu_lines": intents}) data = RASADict(data) return data \ No newline at end of file From 2eb540d8d5904da5b6ecaee38eed499a79712a55 Mon Sep 17 00:00:00 2001 From: oserikov Date: Thu, 3 Jun 2021 00:13:06 +0300 Subject: [PATCH 080/151] wip unify md_yaml_reader and intent_catcher_reader --- .../md_yaml_dialogs_iterator.py | 465 +++++++++++++++--- deeppavlov/dataset_readers/dstc2_reader.py | 14 + .../dataset_readers/dto/rasa/stories.py | 12 +- 3 files changed, 426 insertions(+), 65 deletions(-) diff --git a/deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py b/deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py index 9bd808bbd3..76f0fe46f9 100644 --- a/deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py +++ b/deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py @@ -11,16 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import itertools +import json +import os +import re +import tempfile from logging import getLogger -from overrides import overrides -from pathlib import Path -from typing import Dict +from typing import Dict, List, Tuple, Any, Iterator from deeppavlov.core.common.registry import register -from deeppavlov.core.data.dataset_reader import DatasetReader +from deeppavlov.core.data.data_learning_iterator import DataLearningIterator +from deeppavlov.dataset_readers.dstc2_reader import DSTC2DatasetReader from deeppavlov.dataset_readers.dto.rasa.domain_knowledge import DomainKnowledge +from deeppavlov.dataset_readers.dto.rasa.stories import Story, Turn, Stories from deeppavlov.dataset_readers.dto.rasa.nlu import Intents -from deeppavlov.dataset_readers.dto.rasa.stories import Stories log = getLogger(__name__) @@ -29,77 +33,412 @@ class RASADict(dict): def __add__(self, oth): return RASADict() + @register('md_yaml_dialogs_reader') -class MD_YAML_DialogsDatasetReader(DatasetReader): +class MD_YAML_DialogsDatasetIterator(DataLearningIterator): + """ + """ - Reads dialogs from dataset composed of ``stories.md``, ``nlu.md``, ``domain.yml`` . - ``stories.md`` is to provide the dialogues dataset for model to train on. The dialogues - are represented as user messages labels and system response messages labels: (not texts, just action labels). - This is so to distinguish the NLU-NLG tasks from the actual dialogues storytelling experience: one - should be able to describe just the scripts of dialogues to the system. + def __init__(self, + data: Dict[str, List[Tuple[Any, Any]]], + seed: int = None, + shuffle: bool = True, + limit: int = 10) -> None: + self.limit = limit + super().__init__(data, seed, shuffle) - ``nlu.md`` is contrariwise to provide the NLU training set irrespective of the dialogues scripts. + def gen_batches(self, + batch_size: int, + data_type: str = 'train', + shuffle: bool = None) -> Iterator[Tuple]: + if shuffle is None: + shuffle = self.shuffle - ``domain.yml`` is to desribe the task-specific domain and serves two purposes: - provide the NLG templates and provide some specific configuration of the NLU - """ + data = self.data[data_type] + domain_knowledge = self.data[data_type]["domain"] + intents = self.data[data_type]["nlu"] + stories = self.data[data_type]["stories"] + + dialogs = False + ignore_slots = False + story_iterator = StoriesGenerator(stories, + intents, + domain_knowledge, + ignore_slots, + batch_size) + + for batch in story_iterator.generate(): + stories_parsed = batch + + # tmp_f = tempfile.NamedTemporaryFile(delete=False, mode='w', + # encoding="utf-8") + # for story_id, story in stories_parsed.items(): + # for replics in story: + # print(json.dumps(replics), file=tmp_f) + # print(file=tmp_f) + # tmp_f.close() + # noinspection PyProtectedMember + gobot_formatted_stories = DSTC2DatasetReader._read_from_batch(batch, + dialogs=dialogs) + # os.remove(tmp_f.name) + yield gobot_formatted_stories + + + # def read_story(self, stories: Stories, dialogs, + # domain_knowledge: DomainKnowledge, nlu_knowledge: Intents, + # ignore_slots): + # log.debug(f"BEFORE MLU_MD_DialogsDatasetReader._read_story(): " + # f"story_fpath={story_fpath}, " + # f"dialogs={dialogs}, " + # f"domain_knowledge={domain_knowledge}, " + # f"intent2slots2text={intent2slots2text}, " + # f"slot_name2text2value={slot_name2text2value}") + # + # + # + # + # log.debug(f"AFTER MLU_MD_DialogsDatasetReader._read_story(): " + # f"story_fpath={story_fpath}, " + # f"dialogs={dialogs}, " + # f"domain_knowledge={domain_knowledge}, " + # f"intent2slots2text={intent2slots2text}, " + # f"slot_name2text2value={slot_name2text2value}") + # + # return gobot_formatted_stories + + + + + # if len(generated_sentences) == batch_size: + # # tuple(zip) below does [r1, r2, ..], [s1, s2, ..] -> ((r1, s1), (r2, s2), ..) + # yield tuple(zip(regexps, generated_sentences)), generated_labels + # generated_cnt += len(generated_sentences) + # regexps, generated_sentences, generated_labels = [], [], [] + # + # if generated_sentences: + # yield tuple(zip(regexps, generated_sentences)), generated_labels + # generated_cnt += len(generated_sentences) + # regexps, generated_sentences, generated_labels = [], [], [] + # + # log.info(f"Original number of samples: {len(sentences)}" + # f", generated samples: {generated_cnt}") + def get_instances(self, data_type: str = 'train') -> Tuple[ + tuple, tuple]: + res = tuple(map(lambda it: tuple(itertools.chain(*it)), + zip(*self.gen_batches(batch_size=-1, + data_type=data_type, + shuffle=False)))) + return res + + + +class TurnIterator: _USER_SPEAKER_ID = 1 _SYSTEM_SPEAKER_ID = 2 - VALID_DATATYPES = ('trn', 'val', 'tst') + def __init__(self, turn: Turn, nlu: Intents, domain_knowledge: DomainKnowledge, ignore_slots: bool = False): + self.turn = turn + self.intents: Intents = nlu + self.domain_knowledge = domain_knowledge + self.ignore_slots = ignore_slots - NLU_FNAME = "nlu.md" - DOMAIN_FNAME = "domain.yml" + def _clarify_slots_values(self, slots_dstc2formatted): + slots_key = [] + for slot_name, slot_value in slots_dstc2formatted: + slot_actual_value = self.intents.slot_name2text2value.get(slot_name, {}).get( + slot_value, slot_value) + slots_key.append((slot_name, slot_actual_value)) + slots_key = tuple(sorted(slots_key)) + return slots_key - @classmethod - def _data_fname(cls, datatype: str) -> str: - assert datatype in cls.VALID_DATATYPES, f"wrong datatype name: {datatype}" - return f"stories-{datatype}.md" + def parse_user_intent(self): + """ + Given the intent line in RASA stories.md format, return the name of the intent and slots described with this line + Args: + line: the line to parse + Returns: + the pair of the intent name and slots ([[slot name, slot value],.. ]) info + """ + intent = self.turn.turn_description.strip('*').strip() + if '{' not in intent: + intent = intent + "{}" # the prototypical intent is "intent_name{slot1: value1, slotN: valueN}" + user_action, slots_info = intent.split('{', 1) + slots_info = json.loads('{' + slots_info) + slots_dstc2formatted = [[slot_name, slot_value] for + slot_name, slot_value in slots_info.items()] + if self.ignore_slots: + slots_dstc2formatted = dict() + return user_action, slots_dstc2formatted - @classmethod - @overrides - def read(cls, data_path: str, fmt = "md") -> Dict[str, Dict]: + def choose_slots_for_whom_exists_text(self, slots_actual_values, user_action): """ - Parameters: - data_path: path to read dataset from + Args: + slots_actual_values: the slot values information to look utterance for + user_action: the intent to look utterance for + Returns: + the slots ommitted to find an NLU candidate, the slots represented in the candidate, the intent name used + """ + possible_keys = [k for k in self.intents.intent2slots2text.keys() if + user_action in k] + possible_keys = possible_keys + [user_action] + possible_keys = sorted(possible_keys, + key=lambda action_s: action_s.count('+')) + for possible_action_key in possible_keys: + if self.intents.intent2slots2text[possible_action_key].get(slots_actual_values): + slots_used_values = slots_actual_values + slots_to_exclude = [] + return slots_to_exclude, slots_used_values, possible_action_key + else: + slots_lazy_key = set(e[0] for e in slots_actual_values) + slots_lazy_key -= {"intent"} + fake_keys = [] + for known_key in self.intents.intent2slots2text[possible_action_key].keys(): + if slots_lazy_key.issubset(set(e[0] for e in known_key)): + fake_keys.append(known_key) + break + + if fake_keys: + slots_used_values = sorted(fake_keys, key=lambda elem: ( + len(set(slots_actual_values) ^ set(elem)), + len([e for e in elem + if e[0] not in slots_lazy_key])) + )[0] + + slots_to_exclude = [e[0] for e in slots_used_values if + e[0] not in slots_lazy_key] + return slots_to_exclude, slots_used_values, possible_action_key + raise KeyError("no possible NLU candidates found") + + def user_action2text(self, user_action: str, slots_li = None) : + """ + given the user intent, return the text representing this intent with passed slots + Args: + user_action: the name of intent to generate text for + slots_li: the slot values to provide Returns: - dictionary tha(t contains - ``'train'`` field with dialogs from ``'stories-trn.md'``, - ``'valid'`` field with dialogs from ``'stories-val.md'`` and - ``'test'`` field with dialogs from ``'stories-tst.md'``. - Each field is a list of tuples ``(x_i, y_i)``. + the text of utterance relevant to the passed intent and slots + """ + if slots_li is None: + slots_li = tuple() + return self.intents.intent2slots2text[user_action][slots_li] + + def process_user_turn(self): + user_action, slots_dstc2formatted = self.parse_user_intent() + slots_actual_values = self._clarify_slots_values(slots_dstc2formatted) + slots_to_exclude, slots_used_values, action_for_text = self.choose_slots_for_whom_exists_text(slots_actual_values, user_action) + possible_user_response_infos = self.user_action2text(action_for_text, + slots_used_values) + # possible_user_utters = [] + for user_response_info in possible_user_response_infos: + user_utter = {"speaker": self._USER_SPEAKER_ID, + "text": user_response_info["text"], + "dialog_acts": [{"act": user_action, + "slots": user_response_info[ + "slots"]}], + "slots to exclude": slots_to_exclude} + yield user_utter + + def system_action2text(self, system_action): + """ + given the system action name return the relevant template text + Args: + domain_knowledge: the domain knowledge relevant to the currently processed config + system_action: the name of the action to get intent for + Returns: + template relevant to the passed action + """ + possible_system_responses = self.domain_knowledge.response_templates.get( + system_action, + [{"text": system_action}]) + + response_text = possible_system_responses[0]["text"] + response_text = re.sub(r"(\w+)\=\{(.*?)\}", r"#\2", + response_text) # TODO: straightforward regex string + + return response_text + + def parse_system_turn(self): """ - domain_fname = cls.DOMAIN_FNAME - nlu_fname = cls.NLU_FNAME if fmt in ("md", "markdown") else cls.NLU_FNAME.replace('.md', f'.{fmt}') - stories_fnames = tuple(cls._data_fname(dt) for dt in cls.VALID_DATATYPES) - required_fnames = stories_fnames + (nlu_fname, domain_fname) - for required_fname in required_fnames: - required_path = Path(data_path, required_fname) - if not required_path.exists(): - log.error(f"INSIDE MLU_MD_DialogsDatasetReader.read(): " - f"{required_fname} not found with path {required_path}") - - domain_path = Path(data_path, domain_fname) - domain_knowledge = DomainKnowledge.from_yaml(domain_path) - nlu_fpath = Path(data_path, nlu_fname) - intents = Intents.from_file(nlu_fpath) - - short2long_subsample_name = {"trn": "train", - "val": "valid", - "tst": "test"} - - data = RASADict() - for subsample_name_short in cls.VALID_DATATYPES: - story_fpath = Path(data_path, cls._data_fname(subsample_name_short)) - with open(story_fpath) as f: - story_lines = f.read().splitlines() - stories = Stories.from_stories_lines_md(story_lines) - dat = RASADict({"story_lines": stories, - "domain": domain_knowledge, - "nlu_lines": intents}) - data[short2long_subsample_name[subsample_name_short]] = dat - data = RASADict(data) - return data \ No newline at end of file + Given the RASA stories.md line, returns the dstc2-formatted json (dict) for this line + Args: + domain_knowledge: the domain knowledge relevant to the processed stories config (from which line is taken) + line: the story system step representing line from stories.md + Returns: + the dstc2-formatted passed turn + """ + # system actions are started in dataset with - + system_action_name = self.turn.turn_description.strip('-').strip() + curr_action_text = self.system_action2text(system_action_name) + system_action = {"speaker": self._SYSTEM_SPEAKER_ID, + "text": curr_action_text, + "dialog_acts": [ + {"act": system_action_name, "slots": []}]} + if system_action_name.startswith("action"): + system_action["db_result"] = {} + return system_action + + def process_system_utter(self): + """ + Yields: all the possible dstc2 versions of the passed story line + TODO: SUPPORT FORMS + """ + # nonlocal intent2slots2text, domain_knowledge, curr_story_utters_batch, nonlocal_curr_story_bad + system_action = self.parse_system_turn() + # system_action_name = system_action.get("dialog_acts")[0].get("act") + # + # for curr_story_utters in curr_story_utters_batch: + # if cls.last_turn_is_systems_turn(curr_story_utters): + # # deal with consecutive system actions by inserting the last user replics in between + # curr_story_utters.append( + # cls.get_last_users_turn(curr_story_utters)) + # + # def parse_form_name(story_line: str) -> str: + # """ + # if the line (in stories.md utterance format) contains a form name, return it + # Args: + # story_line: line to extract form name from + # Returns: + # the extracted form name or None if no form name found + # """ + # form_name = None + # if story_line.startswith("form"): + # form_di = json.loads(story_line[len("form"):]) + # form_name = form_di["name"] + # return form_name + # + # if system_action_name.startswith("form"): + # form_name = parse_form_name(system_action_name) + # augmented_utters = cls.augment_form(form_name, domain_knowledge, + # intent2slots2text) + # + # utters_to_append_batch = [[]] + # for user_utter in augmented_utters: + # new_curr_story_utters_batch = [] + # for curr_story_utters in utters_to_append_batch: + # possible_extensions = process_story_line(user_utter) + # for possible_extension in possible_extensions: + # new_curr_story_utters = curr_story_utters.copy() + # new_curr_story_utters.extend(possible_extension) + # new_curr_story_utters_batch.append( + # new_curr_story_utters) + # utters_to_append_batch = new_curr_story_utters_batch + # else: + # utters_to_append_batch = [[system_action]] + + yield system_action + + + def __call__(self): + if self.turn.is_user_turn(): + for possible_turn in self.process_user_turn(): + yield possible_turn + elif self.turn.is_system_turn(): + for possible_turn in self.process_system_utter(): + yield possible_turn + + +def iterProduct(ic): + # https://stackoverflow.com/a/12094245 + if not ic: + yield [] + return + + for i in ic[0](): + for js in iterProduct(ic[1:]): + yield [i] + js + +class StoryGenerator: + def __init__(self, story: Story, nlu: Intents, domain_knowledge: DomainKnowledge, ignore_slots=False): + self.story: Story = story + self.turn_iterators = [] + for turn in story: + turn_iterator = TurnIterator(turn, nlu, domain_knowledge, ignore_slots) + self.turn_iterators.append(turn_iterator) + self.turn_ix = -1 + self.version_ix = -1 + + def gen_story_sample(self): + for i in iterProduct(self.turn_iterators): + yield i + +class StoriesGenerator: + def __init__(self, stories: Stories, intents: Intents, domain_knowledge: DomainKnowledge, ignore_slots: False, batch_size = 1): + self.stories = stories + self.intents = intents + self.domain_knowledge = domain_knowledge + self.ignore_slots = ignore_slots + self.batch_size = batch_size + + def generate(self): + batch = dict() + for story in self.stories.stories: + story_generator = StoryGenerator(story,self.intents, + self.domain_knowledge,self.ignore_slots) + for story_data in story_generator.gen_story_sample(): + batch[story.title] = story_data + if len(batch) == self.batch_size: + yield batch + batch = dict() + yield batch + +# _USER_SPEAKER_ID = 1 +# _SYSTEM_SPEAKER_ID = 2 +# +# VALID_DATATYPES = ('trn', 'val', 'tst') +# +# NLU_FNAME = "nlu.md" +# DOMAIN_FNAME = "domain.yml" +# +# @classmethod +# def _data_fname(cls, datatype: str) -> str: +# assert datatype in cls.VALID_DATATYPES, f"wrong datatype name: {datatype}" +# return f"stories-{datatype}.md" +# +# @classmethod +# @overrides +# def read(cls, data_path: str, fmt = "md") -> Dict[str, Dict]: +# """ +# Parameters: +# data_path: path to read dataset from +# +# Returns: +# dictionary tha(t contains +# ``'train'`` field with dialogs from ``'stories-trn.md'``, +# ``'valid'`` field with dialogs from ``'stories-val.md'`` and +# ``'test'`` field with dialogs from ``'stories-tst.md'``. +# Each field is a list of tuples ``(x_i, y_i)``. +# """ +# domain_fname = cls.DOMAIN_FNAME +# nlu_fname = cls.NLU_FNAME if fmt in ("md", "markdown") else cls.NLU_FNAME.replace('.md', f'.{fmt}') +# stories_fnames = tuple(cls._data_fname(dt) for dt in cls.VALID_DATATYPES) +# required_fnames = stories_fnames + (nlu_fname, domain_fname) +# for required_fname in required_fnames: +# required_path = Path(data_path, required_fname) +# if not required_path.exists(): +# log.error(f"INSIDE MLU_MD_DialogsDatasetReader.read(): " +# f"{required_fname} not found with path {required_path}") +# +# domain_path = Path(data_path, domain_fname) +# domain_knowledge = DomainKnowledge.from_yaml(domain_path) +# nlu_fpath = Path(data_path, nlu_fname) +# intents = Intents.from_file(nlu_fpath) +# +# short2long_subsample_name = {"trn": "train", +# "val": "valid", +# "tst": "test"} +# +# data = RASADict() +# for subsample_name_short in cls.VALID_DATATYPES: +# story_fpath = Path(data_path, cls._data_fname(subsample_name_short)) +# with open(story_fpath) as f: +# story_lines = f.read().splitlines() +# stories = Stories.from_stories_lines_md(story_lines) +# dat = RASADict({"story_lines": stories, +# "domain": domain_knowledge, +# "nlu_lines": intents}) +# data[short2long_subsample_name[subsample_name_short]] = dat +# data = RASADict(data) +# return data diff --git a/deeppavlov/dataset_readers/dstc2_reader.py b/deeppavlov/dataset_readers/dstc2_reader.py index 55127f297a..63821a5472 100644 --- a/deeppavlov/dataset_readers/dstc2_reader.py +++ b/deeppavlov/dataset_readers/dstc2_reader.py @@ -120,6 +120,20 @@ def _read_from_file(cls, file_path, dialogs=False): return [data[idx['start']:idx['end']] for idx in dialog_indices] return data + @classmethod + def _read_from_batch(cls, batch, dialogs=False): + """Returns data from single batch""" + log.info(f"[loading dialogs from batch of len {len(batch)}]") + + utterances, responses, dialog_indices = \ + cls._get_turns(batch, with_indices=True) + + data = list(map(cls._format_turn, zip(utterances, responses))) + + if dialogs: + return [data[idx['start']:idx['end']] for idx in dialog_indices] + return data + @staticmethod def _format_turn(turn): turn_x, turn_y = turn diff --git a/deeppavlov/dataset_readers/dto/rasa/stories.py b/deeppavlov/dataset_readers/dto/rasa/stories.py index d46144f34b..abe7a73243 100644 --- a/deeppavlov/dataset_readers/dto/rasa/stories.py +++ b/deeppavlov/dataset_readers/dto/rasa/stories.py @@ -1,11 +1,18 @@ from typing import List +USER = "usr" +SYSTEM = "sys" class Turn: def __init__(self, turn_description: str, whose_turn: str): self.turn_description = turn_description self.whose_turn = whose_turn + def is_user_turn(self): + return self.whose_turn == USER + + def is_system_turn(self): + return self.whose_turn == SYSTEM class Story: def __init__(self, title, turns: List[Turn] = None): @@ -27,6 +34,7 @@ def from_stories_lines_md(cls, lines: List[str], fmt="md"): raise Exception(f"Support of fmt {fmt} is not implemented") stories = cls() + lines = [line.strip() for line in lines if line.strip()] stories.lines = lines.copy() for line in lines: if line.startswith('#'): @@ -37,8 +45,8 @@ def from_stories_lines_md(cls, lines: List[str], fmt="md"): if line.startswith('*'): line_content = line.lstrip('*').strip() # noinspection PyUnboundLocalVariable - curr_story.turns.append(Turn(line_content, "usr")) + curr_story.turns.append(Turn(line_content, USER)) elif line.startswith('-'): line_content = line.strip('-').strip() # noinspection PyUnboundLocalVariable - curr_story.turns.append(Turn(line_content, "sys")) \ No newline at end of file + curr_story.turns.append(Turn(line_content, SYSTEM)) \ No newline at end of file From 90fa5609cc201401c8e296ea52e09c609b23bf4a Mon Sep 17 00:00:00 2001 From: oserikov Date: Fri, 4 Jun 2021 00:37:13 +0300 Subject: [PATCH 081/151] wip unify md_yaml_reader and intent_catcher_reader --- deeppavlov/core/common/registry.json | 3 +- .../md_yaml_dialogs_iterator.py | 92 ++++++++++--------- deeppavlov/dataset_readers/dto/rasa/nlu.py | 6 +- .../dataset_readers/dto/rasa/stories.py | 3 +- 4 files changed, 58 insertions(+), 46 deletions(-) diff --git a/deeppavlov/core/common/registry.json b/deeppavlov/core/common/registry.json index 5f1572a7b5..3012a62b24 100644 --- a/deeppavlov/core/common/registry.json +++ b/deeppavlov/core/common/registry.json @@ -219,5 +219,6 @@ "wiki_sqlite_vocab": "deeppavlov.vocabs.wiki_sqlite:WikiSQLiteVocab", "wikitionary_100K_vocab": "deeppavlov.vocabs.typos:Wiki100KDictionary", "intent_catcher_reader": "deeppavlov.dataset_readers.intent_catcher_reader:IntentCatcherReader", - "intent_catcher": "deeppavlov.models.intent_catcher.intent_catcher:IntentCatcher" + "intent_catcher": "deeppavlov.models.intent_catcher.intent_catcher:IntentCatcher", + "md_yaml_dialogs_iterator": "deeppavlov.dataset_iterators.md_yaml_dialogs_iterator:MD_YAML_DialogsDatasetIterator" } \ No newline at end of file diff --git a/deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py b/deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py index 76f0fe46f9..2ba0620133 100644 --- a/deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py +++ b/deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py @@ -34,7 +34,7 @@ def __add__(self, oth): return RASADict() -@register('md_yaml_dialogs_reader') +@register('md_yaml_dialogs_iterator') class MD_YAML_DialogsDatasetIterator(DataLearningIterator): """ @@ -57,8 +57,8 @@ def gen_batches(self, data = self.data[data_type] domain_knowledge = self.data[data_type]["domain"] - intents = self.data[data_type]["nlu"] - stories = self.data[data_type]["stories"] + intents = self.data[data_type]["nlu_lines"] + stories = self.data[data_type]["story_lines"] dialogs = False ignore_slots = False @@ -79,12 +79,12 @@ def gen_batches(self, # print(file=tmp_f) # tmp_f.close() # noinspection PyProtectedMember - gobot_formatted_stories = DSTC2DatasetReader._read_from_batch(batch, - dialogs=dialogs) + gobot_formatted_stories = DSTC2DatasetReader._read_from_batch( + list(itertools.chain(*[v + [{}] for v in batch.values()])), + dialogs=dialogs) # os.remove(tmp_f.name) yield gobot_formatted_stories - # def read_story(self, stories: Stories, dialogs, # domain_knowledge: DomainKnowledge, nlu_knowledge: Intents, # ignore_slots): @@ -107,22 +107,19 @@ def gen_batches(self, # # return gobot_formatted_stories - - - # if len(generated_sentences) == batch_size: - # # tuple(zip) below does [r1, r2, ..], [s1, s2, ..] -> ((r1, s1), (r2, s2), ..) - # yield tuple(zip(regexps, generated_sentences)), generated_labels - # generated_cnt += len(generated_sentences) - # regexps, generated_sentences, generated_labels = [], [], [] - # - # if generated_sentences: - # yield tuple(zip(regexps, generated_sentences)), generated_labels - # generated_cnt += len(generated_sentences) - # regexps, generated_sentences, generated_labels = [], [], [] - # - # log.info(f"Original number of samples: {len(sentences)}" - # f", generated samples: {generated_cnt}") + # # tuple(zip) below does [r1, r2, ..], [s1, s2, ..] -> ((r1, s1), (r2, s2), ..) + # yield tuple(zip(regexps, generated_sentences)), generated_labels + # generated_cnt += len(generated_sentences) + # regexps, generated_sentences, generated_labels = [], [], [] + # + # if generated_sentences: + # yield tuple(zip(regexps, generated_sentences)), generated_labels + # generated_cnt += len(generated_sentences) + # regexps, generated_sentences, generated_labels = [], [], [] + # + # log.info(f"Original number of samples: {len(sentences)}" + # f", generated samples: {generated_cnt}") def get_instances(self, data_type: str = 'train') -> Tuple[ tuple, tuple]: @@ -133,12 +130,12 @@ def get_instances(self, data_type: str = 'train') -> Tuple[ return res - class TurnIterator: _USER_SPEAKER_ID = 1 _SYSTEM_SPEAKER_ID = 2 - def __init__(self, turn: Turn, nlu: Intents, domain_knowledge: DomainKnowledge, ignore_slots: bool = False): + def __init__(self, turn: Turn, nlu: Intents, + domain_knowledge: DomainKnowledge, ignore_slots: bool = False): self.turn = turn self.intents: Intents = nlu self.domain_knowledge = domain_knowledge @@ -147,7 +144,8 @@ def __init__(self, turn: Turn, nlu: Intents, domain_knowledge: DomainKnowledge, def _clarify_slots_values(self, slots_dstc2formatted): slots_key = [] for slot_name, slot_value in slots_dstc2formatted: - slot_actual_value = self.intents.slot_name2text2value.get(slot_name, {}).get( + slot_actual_value = self.intents.slot_name2text2value.get(slot_name, + {}).get( slot_value, slot_value) slots_key.append((slot_name, slot_actual_value)) slots_key = tuple(sorted(slots_key)) @@ -172,7 +170,8 @@ def parse_user_intent(self): slots_dstc2formatted = dict() return user_action, slots_dstc2formatted - def choose_slots_for_whom_exists_text(self, slots_actual_values, user_action): + def choose_slots_for_whom_exists_text(self, slots_actual_values, + user_action): """ Args: slots_actual_values: the slot values information to look utterance for @@ -186,7 +185,8 @@ def choose_slots_for_whom_exists_text(self, slots_actual_values, user_action): possible_keys = sorted(possible_keys, key=lambda action_s: action_s.count('+')) for possible_action_key in possible_keys: - if self.intents.intent2slots2text[possible_action_key].get(slots_actual_values): + if self.intents.intent2slots2text[possible_action_key].get( + slots_actual_values): slots_used_values = slots_actual_values slots_to_exclude = [] return slots_to_exclude, slots_used_values, possible_action_key @@ -194,16 +194,17 @@ def choose_slots_for_whom_exists_text(self, slots_actual_values, user_action): slots_lazy_key = set(e[0] for e in slots_actual_values) slots_lazy_key -= {"intent"} fake_keys = [] - for known_key in self.intents.intent2slots2text[possible_action_key].keys(): + for known_key in self.intents.intent2slots2text[ + possible_action_key].keys(): if slots_lazy_key.issubset(set(e[0] for e in known_key)): fake_keys.append(known_key) break if fake_keys: slots_used_values = sorted(fake_keys, key=lambda elem: ( - len(set(slots_actual_values) ^ set(elem)), - len([e for e in elem - if e[0] not in slots_lazy_key])) + len(set(slots_actual_values) ^ set(elem)), + len([e for e in elem + if e[0] not in slots_lazy_key])) )[0] slots_to_exclude = [e[0] for e in slots_used_values if @@ -212,7 +213,7 @@ def choose_slots_for_whom_exists_text(self, slots_actual_values, user_action): raise KeyError("no possible NLU candidates found") - def user_action2text(self, user_action: str, slots_li = None) : + def user_action2text(self, user_action: str, slots_li=None): """ given the user intent, return the text representing this intent with passed slots Args: @@ -223,16 +224,19 @@ def user_action2text(self, user_action: str, slots_li = None) : """ if slots_li is None: slots_li = tuple() - return self.intents.intent2slots2text[user_action][slots_li] + res = self.intents.intent2slots2text[user_action][slots_li] + return res def process_user_turn(self): user_action, slots_dstc2formatted = self.parse_user_intent() slots_actual_values = self._clarify_slots_values(slots_dstc2formatted) - slots_to_exclude, slots_used_values, action_for_text = self.choose_slots_for_whom_exists_text(slots_actual_values, user_action) + slots_to_exclude, slots_used_values, action_for_text = self.choose_slots_for_whom_exists_text( + slots_actual_values, user_action) possible_user_response_infos = self.user_action2text(action_for_text, - slots_used_values) + slots_used_values) # possible_user_utters = [] for user_response_info in possible_user_response_infos: + print(user_response_info) user_utter = {"speaker": self._USER_SPEAKER_ID, "text": user_response_info["text"], "dialog_acts": [{"act": user_action, @@ -351,11 +355,13 @@ def iterProduct(ic): yield [i] + js class StoryGenerator: - def __init__(self, story: Story, nlu: Intents, domain_knowledge: DomainKnowledge, ignore_slots=False): + def __init__(self, story: Story, nlu: Intents, + domain_knowledge: DomainKnowledge, ignore_slots=False): self.story: Story = story self.turn_iterators = [] - for turn in story: - turn_iterator = TurnIterator(turn, nlu, domain_knowledge, ignore_slots) + for turn in story.turns: + turn_iterator = TurnIterator(turn, nlu, domain_knowledge, + ignore_slots) self.turn_iterators.append(turn_iterator) self.turn_ix = -1 self.version_ix = -1 @@ -364,19 +370,23 @@ def gen_story_sample(self): for i in iterProduct(self.turn_iterators): yield i + class StoriesGenerator: - def __init__(self, stories: Stories, intents: Intents, domain_knowledge: DomainKnowledge, ignore_slots: False, batch_size = 1): + def __init__(self, stories: Stories, intents: Intents, + domain_knowledge: DomainKnowledge, ignore_slots: False, + batch_size=1): self.stories = stories self.intents = intents self.domain_knowledge = domain_knowledge self.ignore_slots = ignore_slots - self.batch_size = batch_size + self.batch_size = batch_size def generate(self): batch = dict() for story in self.stories.stories: - story_generator = StoryGenerator(story,self.intents, - self.domain_knowledge,self.ignore_slots) + story_generator = StoryGenerator(story, self.intents, + self.domain_knowledge, + self.ignore_slots) for story_data in story_generator.gen_story_sample(): batch[story.title] = story_data if len(batch) == self.batch_size: diff --git a/deeppavlov/dataset_readers/dto/rasa/nlu.py b/deeppavlov/dataset_readers/dto/rasa/nlu.py index 759992a9e7..3e3d4912bc 100644 --- a/deeppavlov/dataset_readers/dto/rasa/nlu.py +++ b/deeppavlov/dataset_readers/dto/rasa/nlu.py @@ -116,12 +116,12 @@ def intent2slots2text(self) -> Dict: intent2slots2text = dict() for intent in self.intents: - slots2text = dict() + slots2text = defaultdict(list) intent_title = intent.title for intent_l in intent.lines: - slots2text[intent_l.slots_key] = {"text": intent_l.text, + slots2text[intent_l.slots_key].append({"text": intent_l.text, "slots_di": intent_l.slots_di, - "slots": intent_l.slots_key} + "slots": intent_l.slots_key}) intent2slots2text[intent_title] = slots2text self._intent2slot2text = intent2slots2text return intent2slots2text diff --git a/deeppavlov/dataset_readers/dto/rasa/stories.py b/deeppavlov/dataset_readers/dto/rasa/stories.py index abe7a73243..876f0c1a6d 100644 --- a/deeppavlov/dataset_readers/dto/rasa/stories.py +++ b/deeppavlov/dataset_readers/dto/rasa/stories.py @@ -49,4 +49,5 @@ def from_stories_lines_md(cls, lines: List[str], fmt="md"): elif line.startswith('-'): line_content = line.strip('-').strip() # noinspection PyUnboundLocalVariable - curr_story.turns.append(Turn(line_content, SYSTEM)) \ No newline at end of file + curr_story.turns.append(Turn(line_content, SYSTEM)) + return stories From 8c63c4e6aa8022a07686ebaf5741d8795a3a19b6 Mon Sep 17 00:00:00 2001 From: oserikov Date: Sat, 5 Jun 2021 20:43:43 +0300 Subject: [PATCH 082/151] wip unify md_yaml_reader and intent_catcher_reader --- .../md_yaml_dialogs_iterator.py | 36 +++++++++++++++---- .../go_bot/nlg/mock_json_nlg_manager.py | 14 +++++--- 2 files changed, 39 insertions(+), 11 deletions(-) diff --git a/deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py b/deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py index 2ba0620133..cf7c3650d4 100644 --- a/deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py +++ b/deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py @@ -62,6 +62,7 @@ def gen_batches(self, dialogs = False ignore_slots = False + # print(stories) story_iterator = StoriesGenerator(stories, intents, domain_knowledge, @@ -79,11 +80,23 @@ def gen_batches(self, # print(file=tmp_f) # tmp_f.close() # noinspection PyProtectedMember + # print(batch) gobot_formatted_stories = DSTC2DatasetReader._read_from_batch( list(itertools.chain(*[v + [{}] for v in batch.values()])), dialogs=dialogs) # os.remove(tmp_f.name) - yield gobot_formatted_stories + ds = [] + prev_resp_act = None + for x, y in gobot_formatted_stories: + if x.get('episode_done'): + del x['episode_done'] + prev_resp_act = None + ds.append(([], [])) + x['prev_resp_act'] = prev_resp_act + prev_resp_act = y['act'] + ds[-1][0].append(x) + ds[-1][1].append(y) + yield zip(*ds) # def read_story(self, stories: Stories, dialogs, # domain_knowledge: DomainKnowledge, nlu_knowledge: Intents, @@ -123,10 +136,19 @@ def gen_batches(self, def get_instances(self, data_type: str = 'train') -> Tuple[ tuple, tuple]: - res = tuple(map(lambda it: tuple(itertools.chain(*it)), - zip(*self.gen_batches(batch_size=-1, - data_type=data_type, - shuffle=False)))) + concat = lambda it: tuple(itertools.chain(*it)) + tmp = self.gen_batches(batch_size=-1, + data_type=data_type, + shuffle=False) + # print("a") + res = tuple(e for el in tmp + for e in el) + # print("b") + # print(a) + # print("c") + # res = tuple(map(concat,zip(*tmp))) + + # print(res) return res @@ -225,6 +247,8 @@ def user_action2text(self, user_action: str, slots_li=None): if slots_li is None: slots_li = tuple() res = self.intents.intent2slots2text[user_action][slots_li] + # print(res) + # print(self.intents.intent2slots2text) return res def process_user_turn(self): @@ -334,7 +358,6 @@ def process_system_utter(self): yield system_action - def __call__(self): if self.turn.is_user_turn(): for possible_turn in self.process_user_turn(): @@ -354,6 +377,7 @@ def iterProduct(ic): for js in iterProduct(ic[1:]): yield [i] + js + class StoryGenerator: def __init__(self, story: Story, nlu: Intents, domain_knowledge: DomainKnowledge, ignore_slots=False): diff --git a/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py b/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py index 655712ab21..917b8be824 100644 --- a/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py +++ b/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py @@ -74,13 +74,17 @@ def get_api_call_action_id(self) -> int: def _extract_actions_combinations(self, dataset_path: Union[str, Path]): dataset_path = expand_path(dataset_path) - dataset = self._dataset_reader.read(data_path=dataset_path, dialogs=True, ignore_slots=True) + try: + dataset = self._dataset_reader.read(data_path=dataset_path) + except: + dataset = self._dataset_reader.read(data_path=dataset_path, fmt="yml") actions_combinations = set() for dataset_split in dataset.values(): - for dialogue in dataset_split: - for user_input, system_response in dialogue: - actions_tuple = tuple(system_response["act"].split('+')) - actions_combinations.add(actions_tuple) + actions_combinations.update(dataset_split["domain"].known_actions) + # for dialogue in dataset_split: + # for user_input, system_response in dialogue: + # actions_tuple = tuple(system_response["act"].split('+')) + # actions_combinations.add(actions_tuple) return actions_combinations @staticmethod From 42425b3f44d0c696733a4e8754c68f59efc6c384 Mon Sep 17 00:00:00 2001 From: oserikov Date: Sat, 5 Jun 2021 22:49:24 +0300 Subject: [PATCH 083/151] wip unify md_yaml_reader and intent_catcher_reader --- deeppavlov/models/go_bot/nlu/nlu_manager.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/deeppavlov/models/go_bot/nlu/nlu_manager.py b/deeppavlov/models/go_bot/nlu/nlu_manager.py index af5f6c5fc0..ee33efbb80 100644 --- a/deeppavlov/models/go_bot/nlu/nlu_manager.py +++ b/deeppavlov/models/go_bot/nlu/nlu_manager.py @@ -66,14 +66,18 @@ def nlu(self, text: str) -> NLUResponse: intents = [] if callable(self.intent_classifier): - intents = self._extract_intents_from_tokenized_text_entry(tokens) + intents = self._extract_intents_from_text_entry(text) return NLUResponse(slots, intents, tokens) def _extract_intents_from_tokenized_text_entry(self, tokens: List[str]): # todo meaningful type hints, relies on unannotated intent classifier - classifier_output = self.intent_classifier([' '.join(tokens)]) - intent_features = classifier_output[1][0] + intent_features = self.intent_classifier([' '.join(tokens)])[1][0] + return intent_features + + def _extract_intents_from_text_entry(self, text: str): + # todo meaningful type hints, relies on unannotated intent classifier + intent_features = self.intent_classifier([text])[1][0] return intent_features def _extract_slots_from_tokenized_text_entry(self, tokens: List[str]): From 9c7a782cd6d14d896f7c15fb275fd0897d619625 Mon Sep 17 00:00:00 2001 From: oserikov Date: Sun, 6 Jun 2021 12:45:16 +0300 Subject: [PATCH 084/151] wip unify md_yaml_reader and intent_catcher_reader --- .../md_yaml_dialogs_ner_iterator.py | 116 ++++++++++++++++++ 1 file changed, 116 insertions(+) create mode 100644 deeppavlov/dataset_iterators/md_yaml_dialogs_ner_iterator.py diff --git a/deeppavlov/dataset_iterators/md_yaml_dialogs_ner_iterator.py b/deeppavlov/dataset_iterators/md_yaml_dialogs_ner_iterator.py new file mode 100644 index 0000000000..b6a1210b0f --- /dev/null +++ b/deeppavlov/dataset_iterators/md_yaml_dialogs_ner_iterator.py @@ -0,0 +1,116 @@ +# Copyright 2017 Neural Networks and Deep Learning lab, MIPT +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, softwaredata +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import itertools +import json +import os +import re +import tempfile +from logging import getLogger +from typing import Dict, List, Tuple, Any, Iterator + +from deeppavlov.core.common.registry import register +from deeppavlov.core.data.data_learning_iterator import DataLearningIterator +from deeppavlov.dataset_iterators.md_yaml_dialogs_iterator import \ + MD_YAML_DialogsDatasetIterator +from deeppavlov.dataset_readers.dstc2_reader import DSTC2DatasetReader +from deeppavlov.dataset_readers.dto.rasa.domain_knowledge import DomainKnowledge +from deeppavlov.dataset_readers.dto.rasa.stories import Story, Turn, Stories +from deeppavlov.dataset_readers.dto.rasa.nlu import Intents + +log = getLogger(__name__) + + +class RASADict(dict): + def __add__(self, oth): + return RASADict() + + + + +from typing import Dict, List, Tuple, Any, Iterator + + +@register('md_yaml_dialogs_ner_iterator') +class MD_YAML_DialogsDatasetNERIterator(MD_YAML_DialogsDatasetIterator): + + def __init__(self, + data: Dict[str, List[Tuple[Any, Any]]], + seed: int = None, + shuffle: bool = True, + limit: int = 10) -> None: + super().__init__(data, seed, shuffle, limit) + + def gen_batches(self, + batch_size: int, + data_type: str = 'train', + shuffle: bool = None) -> Iterator[Tuple]: + + for batch in super().gen_batches(batch_size, + data_type, + shuffle): + processed_data = list() + processed_texts = dict() + + for xs, ys in zip(*batch): + + for x, y in zip(xs, ys): + text = x['text'] + if not text.strip(): + continue + intents = [] + if 'intents' in x: + intents = x['intents'] + elif 'slots' in x: + intents = [x] + # aggregate slots from different intents + slots = list() + for intent in intents: + current_slots = intent.get('slots', []) + for slot_type, slot_val in current_slots: + # if not self._slot_vals or ( + # slot_type in self._slot_vals): + slots.append((slot_type, slot_val,)) + # remove duplicate pairs (text, slots) + if (text in processed_texts) and ( + slots in processed_texts[text]): + continue + processed_texts[text] = processed_texts.get(text, []) + [ + slots] + processed_data.append(self._add_bio_markup(text, slots)) + yield processed_data + + def _add_bio_markup(self, + utterance: str, + slots: List[Tuple[str, str]]) -> Tuple[List, List]: + tokens = utterance.split() + n_toks = len(tokens) + tags = ['O' for _ in range(n_toks)] + for n in range(n_toks): + for slot_type, slot_val in slots: + for entity in [slot_val]: + slot_tokens = entity.split() + slot_len = len(slot_tokens) + if n + slot_len <= n_toks and \ + self._is_equal_sequences(tokens[n: n + slot_len], + slot_tokens): + tags[n] = 'B-' + slot_type + for k in range(1, slot_len): + tags[n + k] = 'I-' + slot_type + break + return tokens, tags + + @staticmethod + def _is_equal_sequences(seq1, seq2): + equality_list = [tok1 == tok2 for tok1, tok2 in zip(seq1, seq2)] + return all(equality_list) From 9ac8af881e7e1fdd38f5558f7ab7e780fc597bef Mon Sep 17 00:00:00 2001 From: oserikov Date: Sun, 6 Jun 2021 12:46:59 +0300 Subject: [PATCH 085/151] wip unify md_yaml_reader and intent_catcher_reader --- deeppavlov/core/common/registry.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/deeppavlov/core/common/registry.json b/deeppavlov/core/common/registry.json index 8a8265abc0..89863f2000 100644 --- a/deeppavlov/core/common/registry.json +++ b/deeppavlov/core/common/registry.json @@ -210,5 +210,6 @@ "intent_catcher_reader": "deeppavlov.dataset_readers.intent_catcher_reader:IntentCatcherReader", "intent_catcher": "deeppavlov.models.intent_catcher.intent_catcher:IntentCatcher", "mem_classification_model": "deeppavlov.models.classifiers.memorizing_classifier:MemClassificationModel", - "md_yaml_dialogs_iterator": "deeppavlov.dataset_iterators.md_yaml_dialogs_iterator:MD_YAML_DialogsDatasetIterator" + "md_yaml_dialogs_iterator": "deeppavlov.dataset_iterators.md_yaml_dialogs_iterator:MD_YAML_DialogsDatasetIterator", + "md_yaml_dialogs_ner_iterator": "deeppavlov.dataset_iterators.md_yaml_dialogs_ner_iterator.MD_YAML_DialogsDatasetNERIterator" } \ No newline at end of file From bb329a394a9c0a262ecac9999f2fe910d22c4b21 Mon Sep 17 00:00:00 2001 From: oserikov Date: Sun, 6 Jun 2021 14:53:23 +0300 Subject: [PATCH 086/151] wip unify md_yaml_reader and intent_catcher_reader --- deeppavlov/dataset_readers/dto/rasa/nlu.py | 2 +- deeppavlov/models/slotfill/slotfill_raw.py | 27 ++++++++++++++++++++-- 2 files changed, 26 insertions(+), 3 deletions(-) diff --git a/deeppavlov/dataset_readers/dto/rasa/nlu.py b/deeppavlov/dataset_readers/dto/rasa/nlu.py index 3e3d4912bc..ad43c4ca8c 100644 --- a/deeppavlov/dataset_readers/dto/rasa/nlu.py +++ b/deeppavlov/dataset_readers/dto/rasa/nlu.py @@ -99,7 +99,7 @@ def slot_name2text2value(self) -> Dict: sn2t2v = dict() for intent in self.intents: for intent_l in intent.lines: - for slot_name, slot_text2value in intent_l.slot_name2text2value.keys(): + for slot_name, slot_text2value in intent_l.slot_name2text2value.items(): if slot_name not in sn2t2v.keys(): sn2t2v[slot_name] = dict() for slot_text, slot_values_li in slot_text2value.items(): diff --git a/deeppavlov/models/slotfill/slotfill_raw.py b/deeppavlov/models/slotfill/slotfill_raw.py index 0592bb9144..c20d678479 100644 --- a/deeppavlov/models/slotfill/slotfill_raw.py +++ b/deeppavlov/models/slotfill/slotfill_raw.py @@ -176,7 +176,30 @@ def load(self, *args, **kwargs): """reads the slotfilling info from RASA-styled dataset""" domain_path = Path(self.load_path, MD_YAML_DialogsDatasetReader.DOMAIN_FNAME) nlu_path = Path(self.load_path, MD_YAML_DialogsDatasetReader.NLU_FNAME) - domain_knowledge = DomainKnowledge(read_yaml(domain_path)) + # domain_knowledge = DomainKnowledge(read_yaml(domain_path)) # todo: rewrite MD_YAML_DialogsDatasetReader so that public methods are enough - _, slot_name2text2value = MD_YAML_DialogsDatasetReader._read_intent2text_mapping(nlu_path, domain_knowledge) + data = MD_YAML_DialogsDatasetReader.read(self.load_path) + nlu_lines_trn = dict() + nlu_lines_tst = dict() + nlu_lines_val = dict() + if "train" in data: + nlu_lines_trn = data["train"]["nlu_lines"].slot_name2text2value + if "test" in data: + nlu_lines_tst = data["test"]["nlu_lines"].slot_name2text2value + if "valid" in data: + nlu_lines_val = data["valid"]["nlu_lines"].slot_name2text2value + slot_names = list(nlu_lines_trn.keys()) + \ + list(nlu_lines_tst.keys()) + \ + list(nlu_lines_val.keys()) + slot_name2text2value = dict() + for sname in slot_names: + stext2value = dict() + for sample in [nlu_lines_trn, + nlu_lines_tst, + nlu_lines_val]: + for stext,ssamples in sample.get(sname,{}).items(): + if stext not in stext2value: + stext2value[stext] = list() + stext2value[stext].extend(ssamples) + slot_name2text2value[sname] = stext2value self._slot_vals = slot_name2text2value From 2d5ea4e213865681854779de04863b852862a296 Mon Sep 17 00:00:00 2001 From: oserikov Date: Sun, 6 Jun 2021 15:20:20 +0300 Subject: [PATCH 087/151] wip unify md_yaml_reader and intent_catcher_reader --- deeppavlov/core/common/registry.json | 3 +- .../md_yaml_dialogs_ner_iterator.py | 44 +++++++++++++++++++ 2 files changed, 46 insertions(+), 1 deletion(-) diff --git a/deeppavlov/core/common/registry.json b/deeppavlov/core/common/registry.json index 89863f2000..c54e36d79c 100644 --- a/deeppavlov/core/common/registry.json +++ b/deeppavlov/core/common/registry.json @@ -211,5 +211,6 @@ "intent_catcher": "deeppavlov.models.intent_catcher.intent_catcher:IntentCatcher", "mem_classification_model": "deeppavlov.models.classifiers.memorizing_classifier:MemClassificationModel", "md_yaml_dialogs_iterator": "deeppavlov.dataset_iterators.md_yaml_dialogs_iterator:MD_YAML_DialogsDatasetIterator", - "md_yaml_dialogs_ner_iterator": "deeppavlov.dataset_iterators.md_yaml_dialogs_ner_iterator.MD_YAML_DialogsDatasetNERIterator" + "md_yaml_dialogs_ner_iterator": "deeppavlov.dataset_iterators.md_yaml_dialogs_ner_iterator:MD_YAML_DialogsDatasetNERIterator", + "md_yaml_dialogs_intents_iterator": "deeppavlov.dataset_iterators.md_yaml_dialogs_ner_iterator:MD_YAML_DialogsDatasetIntentsIterator" } \ No newline at end of file diff --git a/deeppavlov/dataset_iterators/md_yaml_dialogs_ner_iterator.py b/deeppavlov/dataset_iterators/md_yaml_dialogs_ner_iterator.py index b6a1210b0f..9352615c2b 100644 --- a/deeppavlov/dataset_iterators/md_yaml_dialogs_ner_iterator.py +++ b/deeppavlov/dataset_iterators/md_yaml_dialogs_ner_iterator.py @@ -114,3 +114,47 @@ def _add_bio_markup(self, def _is_equal_sequences(seq1, seq2): equality_list = [tok1 == tok2 for tok1, tok2 in zip(seq1, seq2)] return all(equality_list) + + +@register("md_yaml_dialogs_intents_iterator") +class MD_YAML_DialogsDatasetIntentsIterator(MD_YAML_DialogsDatasetIterator): + + def __init__(self, + data: Dict[str, List[Tuple[Any, Any]]], + seed: int = None, + shuffle: bool = True, + limit: int = 10) -> None: + super().__init__(data, seed, shuffle, limit) + + def gen_batches(self, + batch_size: int, + data_type: str = 'train', + shuffle: bool = None) -> Iterator[Tuple]: + + for batch in super().gen_batches(batch_size, + data_type, + shuffle): + processed_data = list() + for users, syss in zip(*batch): + for user, sys in zip(users, syss): + reply = user + curr_intents = [] + # print(turn) + if reply['intents']: + for intent in reply['intents']: + for slot in intent['slots']: + if slot[0] == 'slot': + curr_intents.append( + intent['act'] + '_' + slot[1]) + else: + curr_intents.append( + intent['act'] + '_' + slot[0]) + if len(intent['slots']) == 0: + curr_intents.append(intent['act']) + else: + if reply['text']: + curr_intents.append('unknown') + else: + continue + processed_data.append((reply['text'], curr_intents)) + yield processed_data \ No newline at end of file From 4f9cb4e2d1570b99ce536debd80ff3cfcbdcb1f7 Mon Sep 17 00:00:00 2001 From: oserikov Date: Sun, 6 Jun 2021 17:37:22 +0300 Subject: [PATCH 088/151] wip unify md_yaml_reader and intent_catcher_reader --- deeppavlov/models/classifiers/memorizing_classifier.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/deeppavlov/models/classifiers/memorizing_classifier.py b/deeppavlov/models/classifiers/memorizing_classifier.py index 8e1b1e9c12..ce0b977a63 100644 --- a/deeppavlov/models/classifiers/memorizing_classifier.py +++ b/deeppavlov/models/classifiers/memorizing_classifier.py @@ -96,8 +96,8 @@ def train_on_batch(self, texts: List[str], labels = labels_ self.text2label.update(dict(zip(texts, labels))) self.classes = list(sorted(set(self.classes + labels))) - print(self.text2label) - print(self.classes) + # print(self.text2label) + # print(self.classes) pseudo_loss = 0 if self.is_trained else 1 self.is_trained = True self.save() @@ -105,14 +105,14 @@ def train_on_batch(self, texts: List[str], @overrides def save(self, *args, **kwargs): - print("saving") + # print("saving") save_json({"classes": self.classes, "text2label": self.text2label}, self.save_path) @overrides def load(self, *args, **kwargs): - print("loading") + # print("loading") try: loaded = read_json(self.save_path) self.classes = loaded["classes"] From 035ea7301e20a787a3d1e139824691b59d5c0227 Mon Sep 17 00:00:00 2001 From: oserikov Date: Sun, 6 Jun 2021 17:41:15 +0300 Subject: [PATCH 089/151] wip unify md_yaml_reader and intent_catcher_reader --- deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py b/deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py index cf7c3650d4..213c58d385 100644 --- a/deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py +++ b/deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py @@ -260,7 +260,7 @@ def process_user_turn(self): slots_used_values) # possible_user_utters = [] for user_response_info in possible_user_response_infos: - print(user_response_info) + # print(user_response_info) user_utter = {"speaker": self._USER_SPEAKER_ID, "text": user_response_info["text"], "dialog_acts": [{"act": user_action, From 727887f098f2f73778572c721be45c3e8dbd9611 Mon Sep 17 00:00:00 2001 From: oserikov Date: Sun, 6 Jun 2021 17:46:19 +0300 Subject: [PATCH 090/151] wip unify md_yaml_reader and intent_catcher_reader --- deeppavlov/models/go_bot/tracker/featurized_tracker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deeppavlov/models/go_bot/tracker/featurized_tracker.py b/deeppavlov/models/go_bot/tracker/featurized_tracker.py index 0ecab6a094..db9e42c942 100644 --- a/deeppavlov/models/go_bot/tracker/featurized_tracker.py +++ b/deeppavlov/models/go_bot/tracker/featurized_tracker.py @@ -218,7 +218,7 @@ def read_md_story(story_path: Union[Path, str]) -> Dict[str, List[Dict]]: curr_action = step["action"] if curr_action.startswith("form"): curr_action = json.loads(curr_action[len("form"):])["name"] - print(curr_action) + # print(curr_action) if curr_action in form_names: prev_forms.append(curr_action) if curr_action in potential_api_or_db_actions: From ab23b1822b917bf3d93496ed57c6876e5549e8bc Mon Sep 17 00:00:00 2001 From: oserikov Date: Sun, 6 Jun 2021 19:31:18 +0300 Subject: [PATCH 091/151] wip unify md_yaml_reader and intent_catcher_reader --- deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py | 4 +++- .../dataset_iterators/md_yaml_dialogs_ner_iterator.py | 8 +++++--- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py b/deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py index 213c58d385..ffaddbcc4d 100644 --- a/deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py +++ b/deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py @@ -25,6 +25,7 @@ from deeppavlov.dataset_readers.dto.rasa.domain_knowledge import DomainKnowledge from deeppavlov.dataset_readers.dto.rasa.stories import Story, Turn, Stories from deeppavlov.dataset_readers.dto.rasa.nlu import Intents +import random log = getLogger(__name__) @@ -258,8 +259,9 @@ def process_user_turn(self): slots_actual_values, user_action) possible_user_response_infos = self.user_action2text(action_for_text, slots_used_values) + random.shuffle(possible_user_response_infos) # possible_user_utters = [] - for user_response_info in possible_user_response_infos: + for user_response_info in possible_user_response_infos[:2]: # print(user_response_info) user_utter = {"speaker": self._USER_SPEAKER_ID, "text": user_response_info["text"], diff --git a/deeppavlov/dataset_iterators/md_yaml_dialogs_ner_iterator.py b/deeppavlov/dataset_iterators/md_yaml_dialogs_ner_iterator.py index 9352615c2b..07aa08c1f6 100644 --- a/deeppavlov/dataset_iterators/md_yaml_dialogs_ner_iterator.py +++ b/deeppavlov/dataset_iterators/md_yaml_dialogs_ner_iterator.py @@ -134,7 +134,7 @@ def gen_batches(self, for batch in super().gen_batches(batch_size, data_type, shuffle): - processed_data = list() + texts, intents = list(), list() for users, syss in zip(*batch): for user, sys in zip(users, syss): reply = user @@ -156,5 +156,7 @@ def gen_batches(self, curr_intents.append('unknown') else: continue - processed_data.append((reply['text'], curr_intents)) - yield processed_data \ No newline at end of file + texts.append(reply["text"]) + intents.append(curr_intents) + # processed_data.append((reply['text'], curr_intents)) + yield texts, intents \ No newline at end of file From 38a553d421cdc57f5875cabfd1eb6ec813f1428f Mon Sep 17 00:00:00 2001 From: oserikov Date: Mon, 7 Jun 2021 00:36:35 +0300 Subject: [PATCH 092/151] wip unify md_yaml_reader and intent_catcher_reader --- deeppavlov/dataset_readers/dstc2_reader.py | 2 +- deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/deeppavlov/dataset_readers/dstc2_reader.py b/deeppavlov/dataset_readers/dstc2_reader.py index 63821a5472..199b5cb20d 100644 --- a/deeppavlov/dataset_readers/dstc2_reader.py +++ b/deeppavlov/dataset_readers/dstc2_reader.py @@ -123,7 +123,7 @@ def _read_from_file(cls, file_path, dialogs=False): @classmethod def _read_from_batch(cls, batch, dialogs=False): """Returns data from single batch""" - log.info(f"[loading dialogs from batch of len {len(batch)}]") + log.debug(f"[loading dialogs from batch of len {len(batch)}]") utterances, responses, dialog_indices = \ cls._get_turns(batch, with_indices=True) diff --git a/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py b/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py index 917b8be824..4121fa020f 100644 --- a/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py +++ b/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py @@ -81,6 +81,7 @@ def _extract_actions_combinations(self, dataset_path: Union[str, Path]): actions_combinations = set() for dataset_split in dataset.values(): actions_combinations.update(dataset_split["domain"].known_actions) + actions_combinations = {(ac,) for ac in actions_combinations} # for dialogue in dataset_split: # for user_input, system_response in dialogue: # actions_tuple = tuple(system_response["act"].split('+')) From e8999eaf9640fd3558022ca2ad34aaa4fdc67b71 Mon Sep 17 00:00:00 2001 From: oserikov Date: Mon, 7 Jun 2021 11:01:44 +0300 Subject: [PATCH 093/151] wip unify md_yaml_reader and intent_catcher_reader --- deeppavlov/models/go_bot/dto/dataset_features.py | 6 ++++++ deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py | 1 + 2 files changed, 7 insertions(+) diff --git a/deeppavlov/models/go_bot/dto/dataset_features.py b/deeppavlov/models/go_bot/dto/dataset_features.py index bb5a673684..6af845770f 100644 --- a/deeppavlov/models/go_bot/dto/dataset_features.py +++ b/deeppavlov/models/go_bot/dto/dataset_features.py @@ -35,6 +35,7 @@ def __init__(self, tokens_vectorized = nlu_response.tokens_vectorized # todo proper oop self.tokens_embeddings_padded = tokens_vectorized.tokens_embeddings_padded self.features = features.concat_feats + self._nlu_response = nlu_response class UtteranceTarget: @@ -84,12 +85,14 @@ def __init__(self): self.attn_keys = [] self.tokens_embeddings_paddeds = [] self.featuress = [] + self._nlu_response = [] def append(self, utterance_features: UtteranceFeatures): self.action_masks.append(utterance_features.action_mask) self.attn_keys.append(utterance_features.attn_key) self.tokens_embeddings_paddeds.append(utterance_features.tokens_embeddings_padded) self.featuress.append(utterance_features.features) + self._nlu_response.append(utterance_features._nlu_response) def __len__(self): return len(self.featuress) @@ -156,6 +159,7 @@ def __init__(self, dialogue_features: DialogueFeatures, sequence_length): dialogue_features.tokens_embeddings_paddeds[0])] * padding_length self.featuress = dialogue_features.featuress + [np.zeros_like(dialogue_features.featuress[0])] * padding_length + self._nlu_response = dialogue_features._nlu_response class PaddedDialogueTargets(DialogueTargets): @@ -203,6 +207,7 @@ def __init__(self, max_dialogue_length): self.b_tokens_embeddings_paddeds = [] self.b_featuress = [] self.b_padded_dialogue_length_mask = [] + self._nlu_responses = [] self.max_dialogue_length = max_dialogue_length def append(self, padded_dialogue_features: PaddedDialogueFeatures): @@ -211,6 +216,7 @@ def append(self, padded_dialogue_features: PaddedDialogueFeatures): self.b_tokens_embeddings_paddeds.append(padded_dialogue_features.tokens_embeddings_paddeds) self.b_featuress.append(padded_dialogue_features.featuress) self.b_padded_dialogue_length_mask.append(padded_dialogue_features.padded_dialogue_length_mask) + self._nlu_responses.append(padded_dialogue_features._nlu_response) def __len__(self): return len(self.b_featuress) diff --git a/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py b/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py index 4121fa020f..1af98d7b78 100644 --- a/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py +++ b/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py @@ -139,6 +139,7 @@ def decode_response(self, response = JSONNLGResponse(slots_values, actions_tuple) verbose_response = VerboseJSONNLGResponse.from_json_nlg_response(response) verbose_response.policy_prediction = policy_prediction + verbose_response._nlu_responses = utterance_batch_features._nlu_responses return verbose_response def num_of_known_actions(self) -> int: From 98fcedec5ff1ad1a0cf5f8b066c9abfca123235f Mon Sep 17 00:00:00 2001 From: oserikov Date: Mon, 7 Jun 2021 11:07:59 +0300 Subject: [PATCH 094/151] wip unify md_yaml_reader and intent_catcher_reader --- deeppavlov/models/go_bot/nlu/dto/nlu_response.py | 1 + deeppavlov/models/go_bot/nlu/nlu_manager.py | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/deeppavlov/models/go_bot/nlu/dto/nlu_response.py b/deeppavlov/models/go_bot/nlu/dto/nlu_response.py index 7570aef386..cf9ec1924e 100644 --- a/deeppavlov/models/go_bot/nlu/dto/nlu_response.py +++ b/deeppavlov/models/go_bot/nlu/dto/nlu_response.py @@ -10,6 +10,7 @@ class NLUResponse(NLUResponseInterface): """ def __init__(self, slots, intents, tokens): self.slots: Union[List[Tuple[str, Any]], Dict[str, Any]] = slots + self._intent_names = None self.intents = intents self.tokens = tokens self.tokens_vectorized: Optional[TextVectorizationResponse] = None diff --git a/deeppavlov/models/go_bot/nlu/nlu_manager.py b/deeppavlov/models/go_bot/nlu/nlu_manager.py index 647715f4aa..8ca99e9f93 100644 --- a/deeppavlov/models/go_bot/nlu/nlu_manager.py +++ b/deeppavlov/models/go_bot/nlu/nlu_manager.py @@ -68,7 +68,9 @@ def nlu(self, text: str) -> NLUResponse: if callable(self.intent_classifier): intents = self._extract_intents_from_text_entry(text) - return NLUResponse(slots, intents, tokens) + resp = NLUResponse(slots, intents, tokens) + resp._intents_names = self.intents + return resp def _extract_intents_from_tokenized_text_entry(self, tokens: List[str]): # todo meaningful type hints, relies on unannotated intent classifier From e321ae697b20fcf3b252ee0e93473318f9f4b6bc Mon Sep 17 00:00:00 2001 From: oserikov Date: Mon, 7 Jun 2021 11:23:23 +0300 Subject: [PATCH 095/151] wip unify md_yaml_reader and intent_catcher_reader --- deeppavlov/models/go_bot/nlu/nlu_manager.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/deeppavlov/models/go_bot/nlu/nlu_manager.py b/deeppavlov/models/go_bot/nlu/nlu_manager.py index 8ca99e9f93..08a6c83927 100644 --- a/deeppavlov/models/go_bot/nlu/nlu_manager.py +++ b/deeppavlov/models/go_bot/nlu/nlu_manager.py @@ -4,6 +4,8 @@ from deeppavlov import Chainer from deeppavlov.core.data.simple_vocab import SimpleVocabulary from deeppavlov.models.bert.bert_classifier import BertClassifierModel +from deeppavlov.models.classifiers.memorizing_classifier import \ + MemClassificationModel from deeppavlov.models.intent_catcher.intent_catcher import IntentCatcher from deeppavlov.models.go_bot.nlu.dto.nlu_response import NLUResponse from deeppavlov.models.go_bot.nlu.nlu_manager_interface import NLUManagerInterface @@ -35,7 +37,9 @@ def __init__(self, tokenizer, slot_filler, intent_classifier, debug=False): self.intents = [] if isinstance(self.intent_classifier, Chainer): component = self.intent_classifier.get_main_component() - if isinstance(component, BertClassifierModel) or isinstance(component, IntentCatcher): + if isinstance(component, BertClassifierModel) \ + or isinstance(component, IntentCatcher)\ + or isinstance(component, MemClassificationModel): intent2labeltools = [el[-1] for el in self.intent_classifier.pipe if isinstance(el[-1], SimpleVocabulary)] if intent2labeltools: self.intents = intent2labeltools[-1]._i2t From b2243610f747865f44fa3911964cc59dfd394396 Mon Sep 17 00:00:00 2001 From: oserikov Date: Tue, 8 Jun 2021 09:54:15 +0300 Subject: [PATCH 096/151] wip intents from outside --- deeppavlov/models/go_bot/go_bot.py | 11 +++--- deeppavlov/models/go_bot/nlu/nlu_manager.py | 41 ++++++++++++++++++--- 2 files changed, 42 insertions(+), 10 deletions(-) diff --git a/deeppavlov/models/go_bot/go_bot.py b/deeppavlov/models/go_bot/go_bot.py index f2bf2e1ae9..5dba8caaac 100644 --- a/deeppavlov/models/go_bot/go_bot.py +++ b/deeppavlov/models/go_bot/go_bot.py @@ -37,6 +37,7 @@ log = getLogger(__name__) +UtteranceT = Union[dict, str] # todo logging @register("go_bot") @@ -273,7 +274,7 @@ def prepare_utterance_training_data(self, utterance_data_entry = UtteranceDataEntry.from_features_and_target(utterance_features, utterance_target) return utterance_data_entry - def extract_features_from_utterance_text(self, text, tracker, keep_tracker_state=False) -> UtteranceFeatures: + def extract_features_from_utterance_text(self, text: UtteranceT, tracker, keep_tracker_state=False) -> UtteranceFeatures: """ Extract ML features for the input text and the respective tracker. Features are aggregated from the @@ -325,7 +326,7 @@ def extract_features_from_utterance_text(self, text, tracker, keep_tracker_state return UtteranceFeatures(nlu_response, tracker_knowledge, digitized_policy_features) - def _infer(self, user_utterance_text: str, user_tracker: DialogueStateTracker, + def _infer(self, user_utterance_text: UtteranceT, user_tracker: DialogueStateTracker, keep_tracker_state=False) -> Tuple[BatchDialoguesFeatures, PolicyPrediction]: """ Predict the action to perform in response to given text. @@ -363,7 +364,7 @@ def _infer(self, user_utterance_text: str, user_tracker: DialogueStateTracker, return utterance_batch_features, policy_prediction - def __call__(self, batch: Union[List[List[dict]], List[str]], + def __call__(self, batch: Union[List[List[UtteranceT]], List[UtteranceT]], user_ids: Optional[List] = None) -> Union[List[NLGResponseInterface], List[List[NLGResponseInterface]]]: if isinstance(batch[0], list): @@ -372,7 +373,7 @@ def __call__(self, batch: Union[List[List[dict]], List[str]], # todo unify tracking: no need to distinguish tracking strategies on dialogues and realtime res = [] for dialogue in batch: - dialogue: List[dict] + dialogue: List[UtteranceT] res.append(self._calc_inferences_for_dialogue(dialogue)) else: # batch is a list of utterances possibly came from different users: real-time inference @@ -380,7 +381,7 @@ def __call__(self, batch: Union[List[List[dict]], List[str]], if not user_ids: user_ids = [self.DEFAULT_USER_ID] * len(batch) for user_id, user_text in zip(user_ids, batch): - user_text: str + user_text: UtteranceT res.append(self._realtime_infer(user_id, user_text)) return res diff --git a/deeppavlov/models/go_bot/nlu/nlu_manager.py b/deeppavlov/models/go_bot/nlu/nlu_manager.py index 08a6c83927..64ffffa7ef 100644 --- a/deeppavlov/models/go_bot/nlu/nlu_manager.py +++ b/deeppavlov/models/go_bot/nlu/nlu_manager.py @@ -1,5 +1,5 @@ from logging import getLogger -from typing import List +from typing import List, Union from deeppavlov import Chainer from deeppavlov.core.data.simple_vocab import SimpleVocabulary @@ -51,7 +51,7 @@ def __init__(self, tokenizer, slot_filler, intent_classifier, debug=False): f"tokenizer={tokenizer}, slot_filler={slot_filler}, " f"intent_classifier={intent_classifier}, debug={debug}") - def nlu(self, text: str) -> NLUResponse: + def nlu(self, text: Union[str, dict]) -> NLUResponse: """ Extracts slot values and intents from text. @@ -62,15 +62,32 @@ def nlu(self, text: str) -> NLUResponse: an object storing the extracted slos and intents info """ # todo meaningful type hints - tokens = self._tokenize_single_text_entry(text) + text_is_dict = isinstance(text, dict) + if text_is_dict: + _text = text.get("text") + _intents = text.get("intents") + _slots = text.get("slots") + else: + _text = text + + tokens = self._tokenize_single_text_entry(_text) slots = None if callable(self.slot_filler): - slots = self._extract_slots_from_tokenized_text_entry(tokens) + if text_is_dict: + slots = _slots + else: + slots = self._extract_slots_from_tokenized_text_entry(tokens) intents = [] if callable(self.intent_classifier): - intents = self._extract_intents_from_text_entry(text) + if text_is_dict: + if isinstance(intents, list): + intents = self._intents_to_ohe(_intents) + else: + intents = self._intent_name_to_ohe(_intents) + else: + intents = self._extract_intents_from_text_entry(text) resp = NLUResponse(slots, intents, tokens) resp._intents_names = self.intents @@ -87,6 +104,20 @@ def _extract_intents_from_text_entry(self, text: str): intent_features = self.intent_classifier([text])[1][0] return intent_features + def _intent_name_to_ohe(self, intent_name): + intents_ohe = [0.] * len(self.intents) + if intent_name in self.intents: + intent_ix = self.intents.index(intent_name) + intents_ohe[intent_ix] = 1. + return intents_ohe + + def _intents_to_ohe(self, intent_names): + ohes = map(self._intent_name_to_ohe, intent_names) + intents_ohe = [0.] * len(self.intents) + for ohe_ix, ohe_ in ohes: + intents_ohe[ohe_ix] = float(any(ohe_)) + return intents_ohe + def _extract_slots_from_tokenized_text_entry(self, tokens: List[str]): # todo meaningful type hints, relies on unannotated slot filler return self.slot_filler([tokens])[0] From 8411a3bb6e2f71965c076b6fb2dff2ee6e69de28 Mon Sep 17 00:00:00 2001 From: oserikov Date: Tue, 8 Jun 2021 11:10:13 +0300 Subject: [PATCH 097/151] wip slots from outside --- deeppavlov/models/go_bot/nlu/nlu_manager.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/deeppavlov/models/go_bot/nlu/nlu_manager.py b/deeppavlov/models/go_bot/nlu/nlu_manager.py index 64ffffa7ef..e9d17fbd3d 100644 --- a/deeppavlov/models/go_bot/nlu/nlu_manager.py +++ b/deeppavlov/models/go_bot/nlu/nlu_manager.py @@ -65,8 +65,8 @@ def nlu(self, text: Union[str, dict]) -> NLUResponse: text_is_dict = isinstance(text, dict) if text_is_dict: _text = text.get("text") - _intents = text.get("intents") - _slots = text.get("slots") + _intents = text.get("intents", []) + _slots = text.get("slots", {}) else: _text = text @@ -82,13 +82,12 @@ def nlu(self, text: Union[str, dict]) -> NLUResponse: intents = [] if callable(self.intent_classifier): if text_is_dict: - if isinstance(intents, list): + if isinstance(_intents, list): intents = self._intents_to_ohe(_intents) else: intents = self._intent_name_to_ohe(_intents) else: intents = self._extract_intents_from_text_entry(text) - resp = NLUResponse(slots, intents, tokens) resp._intents_names = self.intents return resp @@ -114,7 +113,7 @@ def _intent_name_to_ohe(self, intent_name): def _intents_to_ohe(self, intent_names): ohes = map(self._intent_name_to_ohe, intent_names) intents_ohe = [0.] * len(self.intents) - for ohe_ix, ohe_ in ohes: + for ohe_ix, ohe_ in enumerate(zip(*ohes)): intents_ohe[ohe_ix] = float(any(ohe_)) return intents_ohe From 527478db194297e6a427c4ce3e1f4618d44e7cce Mon Sep 17 00:00:00 2001 From: oserikov Date: Tue, 8 Jun 2021 12:59:52 +0300 Subject: [PATCH 098/151] wip slots from outside --- deeppavlov/core/common/registry.json | 3 +- deeppavlov/models/slotfill/slotfill_raw.py | 95 ++++++++++++++++++++++ 2 files changed, 97 insertions(+), 1 deletion(-) diff --git a/deeppavlov/core/common/registry.json b/deeppavlov/core/common/registry.json index c54e36d79c..48f320b977 100644 --- a/deeppavlov/core/common/registry.json +++ b/deeppavlov/core/common/registry.json @@ -212,5 +212,6 @@ "mem_classification_model": "deeppavlov.models.classifiers.memorizing_classifier:MemClassificationModel", "md_yaml_dialogs_iterator": "deeppavlov.dataset_iterators.md_yaml_dialogs_iterator:MD_YAML_DialogsDatasetIterator", "md_yaml_dialogs_ner_iterator": "deeppavlov.dataset_iterators.md_yaml_dialogs_ner_iterator:MD_YAML_DialogsDatasetNERIterator", - "md_yaml_dialogs_intents_iterator": "deeppavlov.dataset_iterators.md_yaml_dialogs_ner_iterator:MD_YAML_DialogsDatasetIntentsIterator" + "md_yaml_dialogs_intents_iterator": "deeppavlov.dataset_iterators.md_yaml_dialogs_ner_iterator:MD_YAML_DialogsDatasetIntentsIterator", + "slotfill_raw_memorizing": "deeppavlov.models.slotfill.slotfill_raw:RASA_MemorizingSlotFillingComponent" } \ No newline at end of file diff --git a/deeppavlov/models/slotfill/slotfill_raw.py b/deeppavlov/models/slotfill/slotfill_raw.py index c20d678479..71a5769103 100644 --- a/deeppavlov/models/slotfill/slotfill_raw.py +++ b/deeppavlov/models/slotfill/slotfill_raw.py @@ -203,3 +203,98 @@ def load(self, *args, **kwargs): stext2value[stext].extend(ssamples) slot_name2text2value[sname] = stext2value self._slot_vals = slot_name2text2value + + +@register('slotfill_raw_memorizing') +class RASA_MemorizingSlotFillingComponent(SlotFillingComponent): + """Slot filling using Fuzzy search""" + + def __init__(self, threshold: float = 0.7, return_all: bool = False, + **kwargs): + super().__init__(**kwargs) + self.threshold = threshold + self.return_all = return_all + # self._slot_vals is the dictionary of slot values + self._slot_vals = None + self.load() + + @overrides + def __call__(self, batch, *args, **kwargs): + slots = [{}] * len(batch) + + m = [i for i, v in enumerate(batch) if v] + if m: + batch = [batch[i] for i in m] + # tags_batch = self._ner_network.predict_for_token_batch(batch) + # batch example: [['is', 'there', 'anything', 'else']] + for i, text in zip(m, batch): + # tokens are['is', 'there', 'anything', 'else'] + slots_values_lists = self._predict_slots(text) + if self.return_all: + slots[i] = dict(slots_values_lists) + else: + slots[i] = {slot: val_list[0] for slot, val_list in + slots_values_lists.items()} + # slots[i] example {'food': 'steakhouse'} + # slots we want, example : [{'pricerange': 'moderate', 'area': 'south'}] + return slots + + def _predict_slots(self, text): + # For utterance extract named entities and perform normalization for slot filling + entities, slot_values = self._strict_finder(text) + # slot_values = defaultdict(list) + # for entity, slot in zip(entities, slots): + # slot_values[slot].append(entity) + return slot_values + + def load(self, *args, **kwargs): + """reads the slotfilling info from RASA-styled dataset""" + domain_path = Path(self.load_path, + MD_YAML_DialogsDatasetReader.DOMAIN_FNAME) + nlu_path = Path(self.load_path, MD_YAML_DialogsDatasetReader.NLU_FNAME) + # domain_knowledge = DomainKnowledge(read_yaml(domain_path)) + # todo: rewrite MD_YAML_DialogsDatasetReader so that public methods are enough + data = MD_YAML_DialogsDatasetReader.read(self.load_path) + nlu_lines_trn = dict() + nlu_lines_tst = dict() + nlu_lines_val = dict() + if "train" in data: + nlu_lines_trn = data["train"]["nlu_lines"].slot_name2text2value + if "test" in data: + nlu_lines_tst = data["test"]["nlu_lines"].slot_name2text2value + if "valid" in data: + nlu_lines_val = data["valid"]["nlu_lines"].slot_name2text2value + slot_names = list(nlu_lines_trn.keys()) + \ + list(nlu_lines_tst.keys()) + \ + list(nlu_lines_val.keys()) + slot_name2text2value = dict() + for sname in slot_names: + stext2value = dict() + for sample in [nlu_lines_trn, + nlu_lines_tst, + nlu_lines_val]: + for stext, ssamples in sample.get(sname, {}).items(): + if stext not in stext2value: + stext2value[stext] = list() + stext2value[stext].extend(ssamples) + slot_name2text2value[sname] = stext2value + slot_text2name2value = defaultdict(lambda: defaultdict(list)) + for sname, stext2svalue in slot_name2text2value.items(): + for stext, svalue in stext2svalue.items(): + slot_text2name2value[stext][sname].extend(svalue) + + self._slot_vals = slot_name2text2value + + def deserialize(self, data): + self._slot_vals = json.loads(data) + + def save(self): + with open(self.save_path, 'w', encoding='utf8') as f: + json.dump(self._slot_vals, f) + + def _strict_finder(self, text): + global input_entity + slots = self._slot_vals.get(text, {}) + entities = list(slots.keys()) + return entities, slots + From 1e93e0004a6b0a5de2b71b5e908d1f1bd0498a6a Mon Sep 17 00:00:00 2001 From: oserikov Date: Tue, 8 Jun 2021 13:30:04 +0300 Subject: [PATCH 099/151] wip slots from outside --- deeppavlov/models/slotfill/slotfill_raw.py | 52 +++++++++++++--------- 1 file changed, 30 insertions(+), 22 deletions(-) diff --git a/deeppavlov/models/slotfill/slotfill_raw.py b/deeppavlov/models/slotfill/slotfill_raw.py index 71a5769103..902d2a4da4 100644 --- a/deeppavlov/models/slotfill/slotfill_raw.py +++ b/deeppavlov/models/slotfill/slotfill_raw.py @@ -230,6 +230,7 @@ def __call__(self, batch, *args, **kwargs): for i, text in zip(m, batch): # tokens are['is', 'there', 'anything', 'else'] slots_values_lists = self._predict_slots(text) + print(slots_values_lists) if self.return_all: slots[i] = dict(slots_values_lists) else: @@ -258,32 +259,39 @@ def load(self, *args, **kwargs): nlu_lines_trn = dict() nlu_lines_tst = dict() nlu_lines_val = dict() + text2slots = defaultdict(lambda: defaultdict(list)) if "train" in data: - nlu_lines_trn = data["train"]["nlu_lines"].slot_name2text2value + nlu_lines_trn = data["train"]["nlu_lines"].intent2slots2text + for intent, slots2text in nlu_lines_trn.items(): + for slots_is in slots2text.values(): + for slots_i in slots_is: + text = slots_i.get("text", '') + slots_di = dict(slots_i.get("slots", [])) + for s, sv in slots_di.items(): + text2slots[text][s].append(sv) + if "test" in data: - nlu_lines_tst = data["test"]["nlu_lines"].slot_name2text2value + nlu_lines_tst = data["test"]["nlu_lines"].intent2slots2text + for intent, slots2text in nlu_lines_tst.items(): + for slots_is in slots2text.values(): + for slots_i in slots_is: + text = slots_i.get("text", '') + slots_di = dict(slots_i.get("slots", [])) + for s, sv in slots_di.items(): + text2slots[text][s].append(sv) + if "valid" in data: - nlu_lines_val = data["valid"]["nlu_lines"].slot_name2text2value - slot_names = list(nlu_lines_trn.keys()) + \ - list(nlu_lines_tst.keys()) + \ - list(nlu_lines_val.keys()) - slot_name2text2value = dict() - for sname in slot_names: - stext2value = dict() - for sample in [nlu_lines_trn, - nlu_lines_tst, - nlu_lines_val]: - for stext, ssamples in sample.get(sname, {}).items(): - if stext not in stext2value: - stext2value[stext] = list() - stext2value[stext].extend(ssamples) - slot_name2text2value[sname] = stext2value - slot_text2name2value = defaultdict(lambda: defaultdict(list)) - for sname, stext2svalue in slot_name2text2value.items(): - for stext, svalue in stext2svalue.items(): - slot_text2name2value[stext][sname].extend(svalue) + nlu_lines_val = data["valid"]["nlu_lines"].intent2slots2text + for intent, slots2text in nlu_lines_val.items(): + for slots_is in slots2text.values(): + for slots_i in slots_is: + text = slots_i.get("text", '') + slots_di = dict(slots_i.get("slots", [])) + for s, sv in slots_di.items(): + text2slots[text][s].append(sv) - self._slot_vals = slot_name2text2value + + self._slot_vals = text2slots def deserialize(self, data): self._slot_vals = json.loads(data) From 79a154bf69284efb97ed9a568f4fe792389b3925 Mon Sep 17 00:00:00 2001 From: oserikov Date: Tue, 8 Jun 2021 17:57:43 +0300 Subject: [PATCH 100/151] wip templated nlg from outside --- .../go_bot/nlg/mock_json_nlg_manager.py | 68 ++++++++++++++++--- 1 file changed, 58 insertions(+), 10 deletions(-) diff --git a/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py b/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py index 1af98d7b78..3cf00d648d 100644 --- a/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py +++ b/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py @@ -1,4 +1,6 @@ import json +import re +from collections import defaultdict from itertools import combinations from pathlib import Path from typing import Union, Dict, List, Tuple @@ -7,6 +9,7 @@ from deeppavlov.core.common.errors import ConfigError from deeppavlov.core.common.registry import register, get_model from deeppavlov.dataset_readers.dstc2_reader import DSTC2DatasetReader +from deeppavlov.dataset_readers.dto.rasa.domain_knowledge import DomainKnowledge from deeppavlov.models.go_bot.dto.dataset_features import BatchDialoguesFeatures from deeppavlov.models.go_bot.nlg.dto.json_nlg_response import JSONNLGResponse, VerboseJSONNLGResponse from deeppavlov.models.go_bot.nlg.nlg_manager import log @@ -36,10 +39,11 @@ def __init__(self, self._dataset_reader = get_model(dataset_reader_class) individual_actions2slots = self._load_actions2slots_mapping(actions2slots_path) + split2domain_i = self._get_domain_info(data_path) possible_actions_combinations_tuples = sorted( set(actions_combination_tuple for actions_combination_tuple - in self._extract_actions_combinations(data_path)), + in self._extract_actions_combinations(split2domain_i)), key=lambda x: '+'.join(x)) self.action_tuples2ids = {action_tuple: action_tuple_idx @@ -60,6 +64,9 @@ def __init__(self, api_call_action_as_tuple = (api_call_action,) self._api_call_id = self.action_tuples2ids[api_call_action_as_tuple] + self.action2slots2text, self.action2slots2values2text =\ + self._extract_templates(split2domain_i) + if self.debug: log.debug(f"AFTER {self.__class__.__name__} init(): " f"actions2slots_path={actions2slots_path}, " @@ -72,22 +79,61 @@ def get_api_call_action_id(self) -> int: """ return self._api_call_id - def _extract_actions_combinations(self, dataset_path: Union[str, Path]): + def _get_domain_info(self, dataset_path: Union[str, Path]): dataset_path = expand_path(dataset_path) try: dataset = self._dataset_reader.read(data_path=dataset_path) except: - dataset = self._dataset_reader.read(data_path=dataset_path, fmt="yml") + dataset = self._dataset_reader.read(data_path=dataset_path, + fmt="yml") + split2domain = dict() + for dataset_split, dataset_split_info in dataset.items(): + domain_i: DomainKnowledge = dataset_split_info["domain"] + split2domain[dataset_split] = domain_i + return split2domain + + def _extract_actions_combinations(self, split2domain: Dict[str, DomainKnowledge]): actions_combinations = set() - for dataset_split in dataset.values(): - actions_combinations.update(dataset_split["domain"].known_actions) - actions_combinations = {(ac,) for ac in actions_combinations} - # for dialogue in dataset_split: - # for user_input, system_response in dialogue: - # actions_tuple = tuple(system_response["act"].split('+')) - # actions_combinations.add(actions_tuple) + for dataset_split, domain_i in split2domain.items(): + actions_combinations.update({(ac,) for ac in domain_i.known_actions}) return actions_combinations + def _extract_templates(self, split2domain: Dict[str, DomainKnowledge]): + slots_pattern = r'\[(?P\w+)\]\((?P\w+)\)' + action2slots2text = defaultdict(lambda: defaultdict(list)) + action2slots2values2text = defaultdict(lambda: defaultdict(list)) + for dataset_split, domain_i in split2domain.items(): + actions2texts = domain_i.response_templates + for action, texts in actions2texts.items(): + action_tuple = (action,) + for text in texts: + used_slots, slotvalue_tuples = set(), set() + for found in re.finditer(slots_pattern, text): + used_slots_di = found.groupdict() + used_slots.update(used_slots_di.keys()) + slotvalue_tuples.update({(slot_name, slot_value) + for slot_name, slot_value + in used_slots_di.items()}) + + used_slots = tuple(sorted(used_slots)) + slotvalue_tuples = tuple(sorted(slotvalue_tuples)) + action2slots2text[action_tuple][used_slots].append(text) + action2slots2values2text[action_tuple][slotvalue_tuples].append(text) + + return action2slots2text, action2slots2values2text + + def generate_template(self, response_info: VerboseJSONNLGResponse, mode="slots"): + if mode == "slots": + response_text = None + action_tuple = response_info.actions_tuple + slots = tuple(sorted(response_info.slot_values.keys())) + response_text = self.action2slots2text.get(action_tuple, {}).get(slots, None) + else: + action_tuple = response_info.actions_tuple + slotvalue_tuples = tuple(sorted(response_info.slot_values.items())) + response_text = self.action2slots2text.get(action_tuple, {}).get(slotvalue_tuples, None) + return response_text + @staticmethod def _load_actions2slots_mapping(actions2slots_json_path) -> Dict[str, str]: actions2slots_json_path = expand_path(actions2slots_json_path) @@ -140,6 +186,8 @@ def decode_response(self, verbose_response = VerboseJSONNLGResponse.from_json_nlg_response(response) verbose_response.policy_prediction = policy_prediction verbose_response._nlu_responses = utterance_batch_features._nlu_responses + response_text = self.generate_template(verbose_response) + verbose_response.text = response_text return verbose_response def num_of_known_actions(self) -> int: From 27f3fa4586790e7334a84a7a226eb6c0d1ce3cd9 Mon Sep 17 00:00:00 2001 From: oserikov Date: Tue, 8 Jun 2021 18:05:40 +0300 Subject: [PATCH 101/151] wip templated nlg from outside --- deeppavlov/models/slotfill/slotfill_raw.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deeppavlov/models/slotfill/slotfill_raw.py b/deeppavlov/models/slotfill/slotfill_raw.py index 902d2a4da4..8f1d4ab9b5 100644 --- a/deeppavlov/models/slotfill/slotfill_raw.py +++ b/deeppavlov/models/slotfill/slotfill_raw.py @@ -230,7 +230,7 @@ def __call__(self, batch, *args, **kwargs): for i, text in zip(m, batch): # tokens are['is', 'there', 'anything', 'else'] slots_values_lists = self._predict_slots(text) - print(slots_values_lists) + # print(slots_values_lists) if self.return_all: slots[i] = dict(slots_values_lists) else: From 50f46761ec1fecfbc89ac3dafc80cb6e2332500d Mon Sep 17 00:00:00 2001 From: oserikov Date: Tue, 8 Jun 2021 18:51:19 +0300 Subject: [PATCH 102/151] wip templated nlg from outside --- deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py b/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py index 3cf00d648d..c55552c0b8 100644 --- a/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py +++ b/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py @@ -108,6 +108,8 @@ def _extract_templates(self, split2domain: Dict[str, DomainKnowledge]): action_tuple = (action,) for text in texts: used_slots, slotvalue_tuples = set(), set() + if isinstance(text, dict): + text = text["text"] for found in re.finditer(slots_pattern, text): used_slots_di = found.groupdict() used_slots.update(used_slots_di.keys()) @@ -132,6 +134,8 @@ def generate_template(self, response_info: VerboseJSONNLGResponse, mode="slots") action_tuple = response_info.actions_tuple slotvalue_tuples = tuple(sorted(response_info.slot_values.items())) response_text = self.action2slots2text.get(action_tuple, {}).get(slotvalue_tuples, None) + if isinstance(response_text, list): + response_text = random.choice(response_text) return response_text @staticmethod From 2fddbe4ee5dc35c388b029b9973bef0c6785a883 Mon Sep 17 00:00:00 2001 From: oserikov Date: Tue, 8 Jun 2021 19:13:59 +0300 Subject: [PATCH 103/151] wip templated nlg from outside --- deeppavlov/models/go_bot/nlu/nlu_manager.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/deeppavlov/models/go_bot/nlu/nlu_manager.py b/deeppavlov/models/go_bot/nlu/nlu_manager.py index e9d17fbd3d..c16d95b558 100644 --- a/deeppavlov/models/go_bot/nlu/nlu_manager.py +++ b/deeppavlov/models/go_bot/nlu/nlu_manager.py @@ -77,7 +77,7 @@ def nlu(self, text: Union[str, dict]) -> NLUResponse: if text_is_dict: slots = _slots else: - slots = self._extract_slots_from_tokenized_text_entry(tokens) + slots = self._extract_slots_from_text_entry(tokens) intents = [] if callable(self.intent_classifier): @@ -121,6 +121,10 @@ def _extract_slots_from_tokenized_text_entry(self, tokens: List[str]): # todo meaningful type hints, relies on unannotated slot filler return self.slot_filler([tokens])[0] + def _extract_slots_from_text_entry(self, text: str): + # todo meaningful type hints, relies on unannotated slot filler + return self.slot_filler([text])[0] + def _tokenize_single_text_entry(self, text: str): # todo meaningful type hints, relies on unannotated tokenizer return self.tokenizer([text.lower().strip()])[0] From b216e3ae1a0d2541795ef4fde6d3ddbc613b5273 Mon Sep 17 00:00:00 2001 From: oserikov Date: Wed, 9 Jun 2021 10:04:58 +0300 Subject: [PATCH 104/151] wip templated nlg from outside --- deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py | 9 +++++---- deeppavlov/models/go_bot/nlu/nlu_manager.py | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py b/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py index c55552c0b8..ddaf77fdd1 100644 --- a/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py +++ b/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py @@ -15,7 +15,7 @@ from deeppavlov.models.go_bot.nlg.nlg_manager import log from deeppavlov.models.go_bot.nlg.nlg_manager_interface import NLGManagerInterface from deeppavlov.models.go_bot.policy.dto.policy_prediction import PolicyPrediction - +import random @register("gobot_json_nlg_manager") class MockJSONNLGManager(NLGManagerInterface): @@ -99,7 +99,7 @@ def _extract_actions_combinations(self, split2domain: Dict[str, DomainKnowledge] return actions_combinations def _extract_templates(self, split2domain: Dict[str, DomainKnowledge]): - slots_pattern = r'\[(?P\w+)\]\((?P\w+)\)' + slots_pattern = r'\[(?P\w+)\]\((?P\w+)\)' action2slots2text = defaultdict(lambda: defaultdict(list)) action2slots2values2text = defaultdict(lambda: defaultdict(list)) for dataset_split, domain_i in split2domain.items(): @@ -119,8 +119,9 @@ def _extract_templates(self, split2domain: Dict[str, DomainKnowledge]): used_slots = tuple(sorted(used_slots)) slotvalue_tuples = tuple(sorted(slotvalue_tuples)) - action2slots2text[action_tuple][used_slots].append(text) - action2slots2values2text[action_tuple][slotvalue_tuples].append(text) + templated_text = re.sub(slots_pattern, '##\g', text) + action2slots2text[action_tuple][used_slots].append(templated_text) + action2slots2values2text[action_tuple][slotvalue_tuples].append(templated_text) return action2slots2text, action2slots2values2text diff --git a/deeppavlov/models/go_bot/nlu/nlu_manager.py b/deeppavlov/models/go_bot/nlu/nlu_manager.py index c16d95b558..3e9b1e945c 100644 --- a/deeppavlov/models/go_bot/nlu/nlu_manager.py +++ b/deeppavlov/models/go_bot/nlu/nlu_manager.py @@ -77,7 +77,7 @@ def nlu(self, text: Union[str, dict]) -> NLUResponse: if text_is_dict: slots = _slots else: - slots = self._extract_slots_from_text_entry(tokens) + slots = self._extract_slots_from_text_entry(text) intents = [] if callable(self.intent_classifier): From 0d4cf043a5f141b47aaab6762cd1d3ff2fe8035c Mon Sep 17 00:00:00 2001 From: oserikov Date: Wed, 9 Jun 2021 11:05:30 +0300 Subject: [PATCH 105/151] wip templated nlg from outside --- deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py b/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py index ddaf77fdd1..1a65714163 100644 --- a/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py +++ b/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py @@ -106,16 +106,19 @@ def _extract_templates(self, split2domain: Dict[str, DomainKnowledge]): actions2texts = domain_i.response_templates for action, texts in actions2texts.items(): action_tuple = (action,) + texts = [text for text in texts if text] for text in texts: used_slots, slotvalue_tuples = set(), set() if isinstance(text, dict): text = text["text"] + used_slots_di = dict() for found in re.finditer(slots_pattern, text): used_slots_di = found.groupdict() - used_slots.update(used_slots_di.keys()) - slotvalue_tuples.update({(slot_name, slot_value) - for slot_name, slot_value - in used_slots_di.items()}) + if not ("name" in used_slots_di.keys() and "value" in used_slots_di.keys()): + continue + used_slots.update(used_slots_di["name"]) + slotvalue_tuples.update({used_slots_di["name"]: + used_slots_di["value"]}) used_slots = tuple(sorted(used_slots)) slotvalue_tuples = tuple(sorted(slotvalue_tuples)) From 95573a60f6b002b1d97af15d5c5c583e90609ef6 Mon Sep 17 00:00:00 2001 From: oserikov Date: Fri, 11 Jun 2021 17:54:50 +0300 Subject: [PATCH 106/151] wip templated nlg from outside --- deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py b/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py index 1a65714163..071e44d7a8 100644 --- a/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py +++ b/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py @@ -140,6 +140,10 @@ def generate_template(self, response_info: VerboseJSONNLGResponse, mode="slots") response_text = self.action2slots2text.get(action_tuple, {}).get(slotvalue_tuples, None) if isinstance(response_text, list): response_text = random.choice(response_text) + for slot_name in response_info.slot_values: + response_text = response_text.replace(f"##{slot_name}", + response_info.slot_values[ + slot_name]) return response_text @staticmethod From 85f7297673a5a51b8104c6f2775b4d9bd226a339 Mon Sep 17 00:00:00 2001 From: oserikov Date: Thu, 24 Jun 2021 00:20:38 +0300 Subject: [PATCH 107/151] wip templated nlg from outside --- .../dataset_readers/dto/rasa/stories.py | 37 +++++++++++++++++++ .../dataset_readers/md_yaml_dialogs_reader.py | 2 +- 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/deeppavlov/dataset_readers/dto/rasa/stories.py b/deeppavlov/dataset_readers/dto/rasa/stories.py index 876f0c1a6d..9731d14a7c 100644 --- a/deeppavlov/dataset_readers/dto/rasa/stories.py +++ b/deeppavlov/dataset_readers/dto/rasa/stories.py @@ -1,4 +1,5 @@ from typing import List +from deeppavlov.core.common.file import read_yaml USER = "usr" SYSTEM = "sys" @@ -51,3 +52,39 @@ def from_stories_lines_md(cls, lines: List[str], fmt="md"): # noinspection PyUnboundLocalVariable curr_story.turns.append(Turn(line_content, SYSTEM)) return stories + + @classmethod + def from_stories_lines_yml(cls, lines: List[str], fmt="yml"): + lines_text = '\n'.join(lines) + stories_yml = read_yaml(lines_text) + stories_lines = [] + for story in stories_yml.get("stories", []): + story_title = story.get("story", 'todo') + stories_lines.append(f"# {story_title}") + for step in story.get("steps", []): + is_usr_step = "intent" in step.keys() + is_sys_step = "action" in step.keys() + if is_usr_step: + curr_story_line = step["intent"] + stories_lines.append(f"* {curr_story_line}") + if is_sys_step: + curr_story_line = step["action"] + stories_lines.append(f"- {curr_story_line}") + + return cls.from_stories_lines_md(stories_lines) + + @classmethod + def from_stories_lines(cls, lines: List[str]): + try: + lines_text = '\n'.join(lines) + read_yaml(lines_text) + is_yaml = True + is_md = False + except: + is_yaml = False + is_md = True + + if is_yaml: + return cls.from_stories_lines_yml(lines) + if is_md: + return cls.from_stories_lines_md(lines) \ No newline at end of file diff --git a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py index 79369300a1..4065a3c2a1 100644 --- a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py +++ b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py @@ -96,7 +96,7 @@ def read(cls, data_path: str, fmt = "md") -> Dict[str, Dict]: story_fpath = Path(data_path, cls._data_fname(subsample_name_short)) with open(story_fpath) as f: story_lines = f.read().splitlines() - stories = Stories.from_stories_lines_md(story_lines) + stories = Stories.from_stories_lines(story_lines) data[short2long_subsample_name[subsample_name_short]] = RASADict({ "story_lines": stories, From 9a674f0d1908818ee53559fd142a6900ec9c4915 Mon Sep 17 00:00:00 2001 From: oserikov Date: Thu, 24 Jun 2021 01:06:34 +0300 Subject: [PATCH 108/151] wip templated nlg from outside --- .../md_yaml_dialogs_iterator.py | 504 +++--------------- .../dataset_readers/dto/rasa/stories.py | 7 +- 2 files changed, 68 insertions(+), 443 deletions(-) diff --git a/deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py b/deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py index ffaddbcc4d..1ee3cd304a 100644 --- a/deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py +++ b/deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py @@ -11,21 +11,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import itertools -import json -import os -import re -import tempfile from logging import getLogger -from typing import Dict, List, Tuple, Any, Iterator +from overrides import overrides +from pathlib import Path +from typing import Dict from deeppavlov.core.common.registry import register -from deeppavlov.core.data.data_learning_iterator import DataLearningIterator -from deeppavlov.dataset_readers.dstc2_reader import DSTC2DatasetReader +from deeppavlov.core.data.dataset_reader import DatasetReader from deeppavlov.dataset_readers.dto.rasa.domain_knowledge import DomainKnowledge -from deeppavlov.dataset_readers.dto.rasa.stories import Story, Turn, Stories from deeppavlov.dataset_readers.dto.rasa.nlu import Intents -import random +from deeppavlov.dataset_readers.dto.rasa.stories import Stories log = getLogger(__name__) @@ -34,447 +29,78 @@ class RASADict(dict): def __add__(self, oth): return RASADict() - -@register('md_yaml_dialogs_iterator') -class MD_YAML_DialogsDatasetIterator(DataLearningIterator): - """ - +@register('md_yaml_dialogs_reader') +class MD_YAML_DialogsDatasetReader(DatasetReader): """ + Reads dialogs from dataset composed of ``stories.md``, ``nlu.md``, ``domain.yml`` . - def __init__(self, - data: Dict[str, List[Tuple[Any, Any]]], - seed: int = None, - shuffle: bool = True, - limit: int = 10) -> None: - self.limit = limit - super().__init__(data, seed, shuffle) - - def gen_batches(self, - batch_size: int, - data_type: str = 'train', - shuffle: bool = None) -> Iterator[Tuple]: - if shuffle is None: - shuffle = self.shuffle - - data = self.data[data_type] - domain_knowledge = self.data[data_type]["domain"] - intents = self.data[data_type]["nlu_lines"] - stories = self.data[data_type]["story_lines"] - - dialogs = False - ignore_slots = False - # print(stories) - story_iterator = StoriesGenerator(stories, - intents, - domain_knowledge, - ignore_slots, - batch_size) + ``stories.md`` is to provide the dialogues dataset for model to train on. The dialogues + are represented as user messages labels and system response messages labels: (not texts, just action labels). + This is so to distinguish the NLU-NLG tasks from the actual dialogues storytelling experience: one + should be able to describe just the scripts of dialogues to the system. - for batch in story_iterator.generate(): - stories_parsed = batch - - # tmp_f = tempfile.NamedTemporaryFile(delete=False, mode='w', - # encoding="utf-8") - # for story_id, story in stories_parsed.items(): - # for replics in story: - # print(json.dumps(replics), file=tmp_f) - # print(file=tmp_f) - # tmp_f.close() - # noinspection PyProtectedMember - # print(batch) - gobot_formatted_stories = DSTC2DatasetReader._read_from_batch( - list(itertools.chain(*[v + [{}] for v in batch.values()])), - dialogs=dialogs) - # os.remove(tmp_f.name) - ds = [] - prev_resp_act = None - for x, y in gobot_formatted_stories: - if x.get('episode_done'): - del x['episode_done'] - prev_resp_act = None - ds.append(([], [])) - x['prev_resp_act'] = prev_resp_act - prev_resp_act = y['act'] - ds[-1][0].append(x) - ds[-1][1].append(y) - yield zip(*ds) - - # def read_story(self, stories: Stories, dialogs, - # domain_knowledge: DomainKnowledge, nlu_knowledge: Intents, - # ignore_slots): - # log.debug(f"BEFORE MLU_MD_DialogsDatasetReader._read_story(): " - # f"story_fpath={story_fpath}, " - # f"dialogs={dialogs}, " - # f"domain_knowledge={domain_knowledge}, " - # f"intent2slots2text={intent2slots2text}, " - # f"slot_name2text2value={slot_name2text2value}") - # - # - # - # - # log.debug(f"AFTER MLU_MD_DialogsDatasetReader._read_story(): " - # f"story_fpath={story_fpath}, " - # f"dialogs={dialogs}, " - # f"domain_knowledge={domain_knowledge}, " - # f"intent2slots2text={intent2slots2text}, " - # f"slot_name2text2value={slot_name2text2value}") - # - # return gobot_formatted_stories - - # if len(generated_sentences) == batch_size: - # # tuple(zip) below does [r1, r2, ..], [s1, s2, ..] -> ((r1, s1), (r2, s2), ..) - # yield tuple(zip(regexps, generated_sentences)), generated_labels - # generated_cnt += len(generated_sentences) - # regexps, generated_sentences, generated_labels = [], [], [] - # - # if generated_sentences: - # yield tuple(zip(regexps, generated_sentences)), generated_labels - # generated_cnt += len(generated_sentences) - # regexps, generated_sentences, generated_labels = [], [], [] - # - # log.info(f"Original number of samples: {len(sentences)}" - # f", generated samples: {generated_cnt}") - - def get_instances(self, data_type: str = 'train') -> Tuple[ - tuple, tuple]: - concat = lambda it: tuple(itertools.chain(*it)) - tmp = self.gen_batches(batch_size=-1, - data_type=data_type, - shuffle=False) - # print("a") - res = tuple(e for el in tmp - for e in el) - # print("b") - # print(a) - # print("c") - # res = tuple(map(concat,zip(*tmp))) - - # print(res) - return res + ``nlu.md`` is contrariwise to provide the NLU training set irrespective of the dialogues scripts. + ``domain.yml`` is to desribe the task-specific domain and serves two purposes: + provide the NLG templates and provide some specific configuration of the NLU + """ -class TurnIterator: _USER_SPEAKER_ID = 1 _SYSTEM_SPEAKER_ID = 2 - def __init__(self, turn: Turn, nlu: Intents, - domain_knowledge: DomainKnowledge, ignore_slots: bool = False): - self.turn = turn - self.intents: Intents = nlu - self.domain_knowledge = domain_knowledge - self.ignore_slots = ignore_slots + VALID_DATATYPES = ('trn', 'val', 'tst') - def _clarify_slots_values(self, slots_dstc2formatted): - slots_key = [] - for slot_name, slot_value in slots_dstc2formatted: - slot_actual_value = self.intents.slot_name2text2value.get(slot_name, - {}).get( - slot_value, slot_value) - slots_key.append((slot_name, slot_actual_value)) - slots_key = tuple(sorted(slots_key)) - return slots_key + NLU_FNAME = "nlu.md" + DOMAIN_FNAME = "domain.yml" - def parse_user_intent(self): - """ - Given the intent line in RASA stories.md format, return the name of the intent and slots described with this line - Args: - line: the line to parse - Returns: - the pair of the intent name and slots ([[slot name, slot value],.. ]) info - """ - intent = self.turn.turn_description.strip('*').strip() - if '{' not in intent: - intent = intent + "{}" # the prototypical intent is "intent_name{slot1: value1, slotN: valueN}" - user_action, slots_info = intent.split('{', 1) - slots_info = json.loads('{' + slots_info) - slots_dstc2formatted = [[slot_name, slot_value] for - slot_name, slot_value in slots_info.items()] - if self.ignore_slots: - slots_dstc2formatted = dict() - return user_action, slots_dstc2formatted + @classmethod + def _data_fname(cls, datatype: str, fmt: str="md") -> str: + assert datatype in cls.VALID_DATATYPES, f"wrong datatype name: {datatype}" + return f"stories-{datatype}.{fmt}" - def choose_slots_for_whom_exists_text(self, slots_actual_values, - user_action): + @classmethod + @overrides + def read(cls, data_path: str, fmt = "md") -> Dict[str, Dict]: """ - Args: - slots_actual_values: the slot values information to look utterance for - user_action: the intent to look utterance for - Returns: - the slots ommitted to find an NLU candidate, the slots represented in the candidate, the intent name used - """ - possible_keys = [k for k in self.intents.intent2slots2text.keys() if - user_action in k] - possible_keys = possible_keys + [user_action] - possible_keys = sorted(possible_keys, - key=lambda action_s: action_s.count('+')) - for possible_action_key in possible_keys: - if self.intents.intent2slots2text[possible_action_key].get( - slots_actual_values): - slots_used_values = slots_actual_values - slots_to_exclude = [] - return slots_to_exclude, slots_used_values, possible_action_key - else: - slots_lazy_key = set(e[0] for e in slots_actual_values) - slots_lazy_key -= {"intent"} - fake_keys = [] - for known_key in self.intents.intent2slots2text[ - possible_action_key].keys(): - if slots_lazy_key.issubset(set(e[0] for e in known_key)): - fake_keys.append(known_key) - break - - if fake_keys: - slots_used_values = sorted(fake_keys, key=lambda elem: ( - len(set(slots_actual_values) ^ set(elem)), - len([e for e in elem - if e[0] not in slots_lazy_key])) - )[0] - - slots_to_exclude = [e[0] for e in slots_used_values if - e[0] not in slots_lazy_key] - return slots_to_exclude, slots_used_values, possible_action_key + Parameters: + data_path: path to read dataset from - raise KeyError("no possible NLU candidates found") - - def user_action2text(self, user_action: str, slots_li=None): - """ - given the user intent, return the text representing this intent with passed slots - Args: - user_action: the name of intent to generate text for - slots_li: the slot values to provide Returns: - the text of utterance relevant to the passed intent and slots - """ - if slots_li is None: - slots_li = tuple() - res = self.intents.intent2slots2text[user_action][slots_li] - # print(res) - # print(self.intents.intent2slots2text) - return res - - def process_user_turn(self): - user_action, slots_dstc2formatted = self.parse_user_intent() - slots_actual_values = self._clarify_slots_values(slots_dstc2formatted) - slots_to_exclude, slots_used_values, action_for_text = self.choose_slots_for_whom_exists_text( - slots_actual_values, user_action) - possible_user_response_infos = self.user_action2text(action_for_text, - slots_used_values) - random.shuffle(possible_user_response_infos) - # possible_user_utters = [] - for user_response_info in possible_user_response_infos[:2]: - # print(user_response_info) - user_utter = {"speaker": self._USER_SPEAKER_ID, - "text": user_response_info["text"], - "dialog_acts": [{"act": user_action, - "slots": user_response_info[ - "slots"]}], - "slots to exclude": slots_to_exclude} - yield user_utter - - def system_action2text(self, system_action): - """ - given the system action name return the relevant template text - Args: - domain_knowledge: the domain knowledge relevant to the currently processed config - system_action: the name of the action to get intent for - Returns: - template relevant to the passed action - """ - possible_system_responses = self.domain_knowledge.response_templates.get( - system_action, - [{"text": system_action}]) - - response_text = possible_system_responses[0]["text"] - response_text = re.sub(r"(\w+)\=\{(.*?)\}", r"#\2", - response_text) # TODO: straightforward regex string - - return response_text - - def parse_system_turn(self): + dictionary that contains + ``'train'`` field with dialogs from ``'stories-trn.md'``, + ``'valid'`` field with dialogs from ``'stories-val.md'`` and + ``'test'`` field with dialogs from ``'stories-tst.md'``. + Each field is a list of tuples ``(x_i, y_i)``. """ - Given the RASA stories.md line, returns the dstc2-formatted json (dict) for this line - Args: - domain_knowledge: the domain knowledge relevant to the processed stories config (from which line is taken) - line: the story system step representing line from stories.md - Returns: - the dstc2-formatted passed turn - """ - # system actions are started in dataset with - - system_action_name = self.turn.turn_description.strip('-').strip() - curr_action_text = self.system_action2text(system_action_name) - system_action = {"speaker": self._SYSTEM_SPEAKER_ID, - "text": curr_action_text, - "dialog_acts": [ - {"act": system_action_name, "slots": []}]} - if system_action_name.startswith("action"): - system_action["db_result"] = {} - return system_action - - def process_system_utter(self): - """ - Yields: all the possible dstc2 versions of the passed story line - TODO: SUPPORT FORMS - """ - # nonlocal intent2slots2text, domain_knowledge, curr_story_utters_batch, nonlocal_curr_story_bad - system_action = self.parse_system_turn() - # system_action_name = system_action.get("dialog_acts")[0].get("act") - # - # for curr_story_utters in curr_story_utters_batch: - # if cls.last_turn_is_systems_turn(curr_story_utters): - # # deal with consecutive system actions by inserting the last user replics in between - # curr_story_utters.append( - # cls.get_last_users_turn(curr_story_utters)) - # - # def parse_form_name(story_line: str) -> str: - # """ - # if the line (in stories.md utterance format) contains a form name, return it - # Args: - # story_line: line to extract form name from - # Returns: - # the extracted form name or None if no form name found - # """ - # form_name = None - # if story_line.startswith("form"): - # form_di = json.loads(story_line[len("form"):]) - # form_name = form_di["name"] - # return form_name - # - # if system_action_name.startswith("form"): - # form_name = parse_form_name(system_action_name) - # augmented_utters = cls.augment_form(form_name, domain_knowledge, - # intent2slots2text) - # - # utters_to_append_batch = [[]] - # for user_utter in augmented_utters: - # new_curr_story_utters_batch = [] - # for curr_story_utters in utters_to_append_batch: - # possible_extensions = process_story_line(user_utter) - # for possible_extension in possible_extensions: - # new_curr_story_utters = curr_story_utters.copy() - # new_curr_story_utters.extend(possible_extension) - # new_curr_story_utters_batch.append( - # new_curr_story_utters) - # utters_to_append_batch = new_curr_story_utters_batch - # else: - # utters_to_append_batch = [[system_action]] - - yield system_action - - def __call__(self): - if self.turn.is_user_turn(): - for possible_turn in self.process_user_turn(): - yield possible_turn - elif self.turn.is_system_turn(): - for possible_turn in self.process_system_utter(): - yield possible_turn - - -def iterProduct(ic): - # https://stackoverflow.com/a/12094245 - if not ic: - yield [] - return - - for i in ic[0](): - for js in iterProduct(ic[1:]): - yield [i] + js - - -class StoryGenerator: - def __init__(self, story: Story, nlu: Intents, - domain_knowledge: DomainKnowledge, ignore_slots=False): - self.story: Story = story - self.turn_iterators = [] - for turn in story.turns: - turn_iterator = TurnIterator(turn, nlu, domain_knowledge, - ignore_slots) - self.turn_iterators.append(turn_iterator) - self.turn_ix = -1 - self.version_ix = -1 - - def gen_story_sample(self): - for i in iterProduct(self.turn_iterators): - yield i - - -class StoriesGenerator: - def __init__(self, stories: Stories, intents: Intents, - domain_knowledge: DomainKnowledge, ignore_slots: False, - batch_size=1): - self.stories = stories - self.intents = intents - self.domain_knowledge = domain_knowledge - self.ignore_slots = ignore_slots - self.batch_size = batch_size - - def generate(self): - batch = dict() - for story in self.stories.stories: - story_generator = StoryGenerator(story, self.intents, - self.domain_knowledge, - self.ignore_slots) - for story_data in story_generator.gen_story_sample(): - batch[story.title] = story_data - if len(batch) == self.batch_size: - yield batch - batch = dict() - yield batch - -# _USER_SPEAKER_ID = 1 -# _SYSTEM_SPEAKER_ID = 2 -# -# VALID_DATATYPES = ('trn', 'val', 'tst') -# -# NLU_FNAME = "nlu.md" -# DOMAIN_FNAME = "domain.yml" -# -# @classmethod -# def _data_fname(cls, datatype: str) -> str: -# assert datatype in cls.VALID_DATATYPES, f"wrong datatype name: {datatype}" -# return f"stories-{datatype}.md" -# -# @classmethod -# @overrides -# def read(cls, data_path: str, fmt = "md") -> Dict[str, Dict]: -# """ -# Parameters: -# data_path: path to read dataset from -# -# Returns: -# dictionary tha(t contains -# ``'train'`` field with dialogs from ``'stories-trn.md'``, -# ``'valid'`` field with dialogs from ``'stories-val.md'`` and -# ``'test'`` field with dialogs from ``'stories-tst.md'``. -# Each field is a list of tuples ``(x_i, y_i)``. -# """ -# domain_fname = cls.DOMAIN_FNAME -# nlu_fname = cls.NLU_FNAME if fmt in ("md", "markdown") else cls.NLU_FNAME.replace('.md', f'.{fmt}') -# stories_fnames = tuple(cls._data_fname(dt) for dt in cls.VALID_DATATYPES) -# required_fnames = stories_fnames + (nlu_fname, domain_fname) -# for required_fname in required_fnames: -# required_path = Path(data_path, required_fname) -# if not required_path.exists(): -# log.error(f"INSIDE MLU_MD_DialogsDatasetReader.read(): " -# f"{required_fname} not found with path {required_path}") -# -# domain_path = Path(data_path, domain_fname) -# domain_knowledge = DomainKnowledge.from_yaml(domain_path) -# nlu_fpath = Path(data_path, nlu_fname) -# intents = Intents.from_file(nlu_fpath) -# -# short2long_subsample_name = {"trn": "train", -# "val": "valid", -# "tst": "test"} -# -# data = RASADict() -# for subsample_name_short in cls.VALID_DATATYPES: -# story_fpath = Path(data_path, cls._data_fname(subsample_name_short)) -# with open(story_fpath) as f: -# story_lines = f.read().splitlines() -# stories = Stories.from_stories_lines_md(story_lines) -# dat = RASADict({"story_lines": stories, -# "domain": domain_knowledge, -# "nlu_lines": intents}) -# data[short2long_subsample_name[subsample_name_short]] = dat -# data = RASADict(data) -# return data + domain_fname = cls.DOMAIN_FNAME + nlu_fname = cls.NLU_FNAME if fmt in ("md", "markdown") else cls.NLU_FNAME.replace('.md', f'.{fmt}') + stories_fnames = tuple(cls._data_fname(dt, fmt) for dt in cls.VALID_DATATYPES) + required_fnames = stories_fnames + (nlu_fname, domain_fname) + for required_fname in required_fnames: + required_path = Path(data_path, required_fname) + if not required_path.exists(): + log.error(f"INSIDE MLU_MD_DialogsDatasetReader.read(): " + f"{required_fname} not found with path {required_path}") + + domain_path = Path(data_path, domain_fname) + domain_knowledge = DomainKnowledge.from_yaml(domain_path) + nlu_fpath = Path(data_path, nlu_fname) + intents = Intents.from_file(nlu_fpath) + + short2long_subsample_name = {"trn": "train", + "val": "valid", + "tst": "test"} + + data = dict() + for subsample_name_short in cls.VALID_DATATYPES: + story_fpath = Path(data_path, cls._data_fname(subsample_name_short, fmt)) + with open(story_fpath) as f: + story_lines = f.read().splitlines() + stories = Stories.from_stories_lines(story_lines) + + data[short2long_subsample_name[subsample_name_short]] = RASADict({ + "story_lines": stories, + "domain": domain_knowledge, + "nlu_lines": intents}) + data = RASADict(data) + return data \ No newline at end of file diff --git a/deeppavlov/dataset_readers/dto/rasa/stories.py b/deeppavlov/dataset_readers/dto/rasa/stories.py index 9731d14a7c..3fb572581e 100644 --- a/deeppavlov/dataset_readers/dto/rasa/stories.py +++ b/deeppavlov/dataset_readers/dto/rasa/stories.py @@ -1,5 +1,5 @@ from typing import List -from deeppavlov.core.common.file import read_yaml +from ruamel.yaml import YAML USER = "usr" SYSTEM = "sys" @@ -56,7 +56,7 @@ def from_stories_lines_md(cls, lines: List[str], fmt="md"): @classmethod def from_stories_lines_yml(cls, lines: List[str], fmt="yml"): lines_text = '\n'.join(lines) - stories_yml = read_yaml(lines_text) + stories_yml = YAML().load(lines_text) stories_lines = [] for story in stories_yml.get("stories", []): story_title = story.get("story", 'todo') @@ -70,14 +70,13 @@ def from_stories_lines_yml(cls, lines: List[str], fmt="yml"): if is_sys_step: curr_story_line = step["action"] stories_lines.append(f"- {curr_story_line}") - return cls.from_stories_lines_md(stories_lines) @classmethod def from_stories_lines(cls, lines: List[str]): try: lines_text = '\n'.join(lines) - read_yaml(lines_text) + YAML().load(lines_text) is_yaml = True is_md = False except: From dbcaf73acd8580e2bec337300ab0d29887d78c51 Mon Sep 17 00:00:00 2001 From: oserikov Date: Sat, 26 Jun 2021 09:43:07 +0300 Subject: [PATCH 109/151] wip templated nlg from outside --- .../md_yaml_dialogs_iterator.py | 504 +++++++++++++++--- .../dataset_readers/md_yaml_dialogs_reader.py | 8 +- 2 files changed, 443 insertions(+), 69 deletions(-) diff --git a/deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py b/deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py index 1ee3cd304a..ffaddbcc4d 100644 --- a/deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py +++ b/deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py @@ -11,16 +11,21 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import itertools +import json +import os +import re +import tempfile from logging import getLogger -from overrides import overrides -from pathlib import Path -from typing import Dict +from typing import Dict, List, Tuple, Any, Iterator from deeppavlov.core.common.registry import register -from deeppavlov.core.data.dataset_reader import DatasetReader +from deeppavlov.core.data.data_learning_iterator import DataLearningIterator +from deeppavlov.dataset_readers.dstc2_reader import DSTC2DatasetReader from deeppavlov.dataset_readers.dto.rasa.domain_knowledge import DomainKnowledge +from deeppavlov.dataset_readers.dto.rasa.stories import Story, Turn, Stories from deeppavlov.dataset_readers.dto.rasa.nlu import Intents -from deeppavlov.dataset_readers.dto.rasa.stories import Stories +import random log = getLogger(__name__) @@ -29,78 +34,447 @@ class RASADict(dict): def __add__(self, oth): return RASADict() -@register('md_yaml_dialogs_reader') -class MD_YAML_DialogsDatasetReader(DatasetReader): + +@register('md_yaml_dialogs_iterator') +class MD_YAML_DialogsDatasetIterator(DataLearningIterator): """ - Reads dialogs from dataset composed of ``stories.md``, ``nlu.md``, ``domain.yml`` . - ``stories.md`` is to provide the dialogues dataset for model to train on. The dialogues - are represented as user messages labels and system response messages labels: (not texts, just action labels). - This is so to distinguish the NLU-NLG tasks from the actual dialogues storytelling experience: one - should be able to describe just the scripts of dialogues to the system. + """ - ``nlu.md`` is contrariwise to provide the NLU training set irrespective of the dialogues scripts. + def __init__(self, + data: Dict[str, List[Tuple[Any, Any]]], + seed: int = None, + shuffle: bool = True, + limit: int = 10) -> None: + self.limit = limit + super().__init__(data, seed, shuffle) + + def gen_batches(self, + batch_size: int, + data_type: str = 'train', + shuffle: bool = None) -> Iterator[Tuple]: + if shuffle is None: + shuffle = self.shuffle + + data = self.data[data_type] + domain_knowledge = self.data[data_type]["domain"] + intents = self.data[data_type]["nlu_lines"] + stories = self.data[data_type]["story_lines"] + + dialogs = False + ignore_slots = False + # print(stories) + story_iterator = StoriesGenerator(stories, + intents, + domain_knowledge, + ignore_slots, + batch_size) + + for batch in story_iterator.generate(): + stories_parsed = batch + + # tmp_f = tempfile.NamedTemporaryFile(delete=False, mode='w', + # encoding="utf-8") + # for story_id, story in stories_parsed.items(): + # for replics in story: + # print(json.dumps(replics), file=tmp_f) + # print(file=tmp_f) + # tmp_f.close() + # noinspection PyProtectedMember + # print(batch) + gobot_formatted_stories = DSTC2DatasetReader._read_from_batch( + list(itertools.chain(*[v + [{}] for v in batch.values()])), + dialogs=dialogs) + # os.remove(tmp_f.name) + ds = [] + prev_resp_act = None + for x, y in gobot_formatted_stories: + if x.get('episode_done'): + del x['episode_done'] + prev_resp_act = None + ds.append(([], [])) + x['prev_resp_act'] = prev_resp_act + prev_resp_act = y['act'] + ds[-1][0].append(x) + ds[-1][1].append(y) + yield zip(*ds) + + # def read_story(self, stories: Stories, dialogs, + # domain_knowledge: DomainKnowledge, nlu_knowledge: Intents, + # ignore_slots): + # log.debug(f"BEFORE MLU_MD_DialogsDatasetReader._read_story(): " + # f"story_fpath={story_fpath}, " + # f"dialogs={dialogs}, " + # f"domain_knowledge={domain_knowledge}, " + # f"intent2slots2text={intent2slots2text}, " + # f"slot_name2text2value={slot_name2text2value}") + # + # + # + # + # log.debug(f"AFTER MLU_MD_DialogsDatasetReader._read_story(): " + # f"story_fpath={story_fpath}, " + # f"dialogs={dialogs}, " + # f"domain_knowledge={domain_knowledge}, " + # f"intent2slots2text={intent2slots2text}, " + # f"slot_name2text2value={slot_name2text2value}") + # + # return gobot_formatted_stories + + # if len(generated_sentences) == batch_size: + # # tuple(zip) below does [r1, r2, ..], [s1, s2, ..] -> ((r1, s1), (r2, s2), ..) + # yield tuple(zip(regexps, generated_sentences)), generated_labels + # generated_cnt += len(generated_sentences) + # regexps, generated_sentences, generated_labels = [], [], [] + # + # if generated_sentences: + # yield tuple(zip(regexps, generated_sentences)), generated_labels + # generated_cnt += len(generated_sentences) + # regexps, generated_sentences, generated_labels = [], [], [] + # + # log.info(f"Original number of samples: {len(sentences)}" + # f", generated samples: {generated_cnt}") + + def get_instances(self, data_type: str = 'train') -> Tuple[ + tuple, tuple]: + concat = lambda it: tuple(itertools.chain(*it)) + tmp = self.gen_batches(batch_size=-1, + data_type=data_type, + shuffle=False) + # print("a") + res = tuple(e for el in tmp + for e in el) + # print("b") + # print(a) + # print("c") + # res = tuple(map(concat,zip(*tmp))) + + # print(res) + return res - ``domain.yml`` is to desribe the task-specific domain and serves two purposes: - provide the NLG templates and provide some specific configuration of the NLU - """ +class TurnIterator: _USER_SPEAKER_ID = 1 _SYSTEM_SPEAKER_ID = 2 - VALID_DATATYPES = ('trn', 'val', 'tst') + def __init__(self, turn: Turn, nlu: Intents, + domain_knowledge: DomainKnowledge, ignore_slots: bool = False): + self.turn = turn + self.intents: Intents = nlu + self.domain_knowledge = domain_knowledge + self.ignore_slots = ignore_slots - NLU_FNAME = "nlu.md" - DOMAIN_FNAME = "domain.yml" + def _clarify_slots_values(self, slots_dstc2formatted): + slots_key = [] + for slot_name, slot_value in slots_dstc2formatted: + slot_actual_value = self.intents.slot_name2text2value.get(slot_name, + {}).get( + slot_value, slot_value) + slots_key.append((slot_name, slot_actual_value)) + slots_key = tuple(sorted(slots_key)) + return slots_key - @classmethod - def _data_fname(cls, datatype: str, fmt: str="md") -> str: - assert datatype in cls.VALID_DATATYPES, f"wrong datatype name: {datatype}" - return f"stories-{datatype}.{fmt}" + def parse_user_intent(self): + """ + Given the intent line in RASA stories.md format, return the name of the intent and slots described with this line + Args: + line: the line to parse + Returns: + the pair of the intent name and slots ([[slot name, slot value],.. ]) info + """ + intent = self.turn.turn_description.strip('*').strip() + if '{' not in intent: + intent = intent + "{}" # the prototypical intent is "intent_name{slot1: value1, slotN: valueN}" + user_action, slots_info = intent.split('{', 1) + slots_info = json.loads('{' + slots_info) + slots_dstc2formatted = [[slot_name, slot_value] for + slot_name, slot_value in slots_info.items()] + if self.ignore_slots: + slots_dstc2formatted = dict() + return user_action, slots_dstc2formatted - @classmethod - @overrides - def read(cls, data_path: str, fmt = "md") -> Dict[str, Dict]: + def choose_slots_for_whom_exists_text(self, slots_actual_values, + user_action): """ - Parameters: - data_path: path to read dataset from + Args: + slots_actual_values: the slot values information to look utterance for + user_action: the intent to look utterance for + Returns: + the slots ommitted to find an NLU candidate, the slots represented in the candidate, the intent name used + """ + possible_keys = [k for k in self.intents.intent2slots2text.keys() if + user_action in k] + possible_keys = possible_keys + [user_action] + possible_keys = sorted(possible_keys, + key=lambda action_s: action_s.count('+')) + for possible_action_key in possible_keys: + if self.intents.intent2slots2text[possible_action_key].get( + slots_actual_values): + slots_used_values = slots_actual_values + slots_to_exclude = [] + return slots_to_exclude, slots_used_values, possible_action_key + else: + slots_lazy_key = set(e[0] for e in slots_actual_values) + slots_lazy_key -= {"intent"} + fake_keys = [] + for known_key in self.intents.intent2slots2text[ + possible_action_key].keys(): + if slots_lazy_key.issubset(set(e[0] for e in known_key)): + fake_keys.append(known_key) + break + + if fake_keys: + slots_used_values = sorted(fake_keys, key=lambda elem: ( + len(set(slots_actual_values) ^ set(elem)), + len([e for e in elem + if e[0] not in slots_lazy_key])) + )[0] + + slots_to_exclude = [e[0] for e in slots_used_values if + e[0] not in slots_lazy_key] + return slots_to_exclude, slots_used_values, possible_action_key + raise KeyError("no possible NLU candidates found") + + def user_action2text(self, user_action: str, slots_li=None): + """ + given the user intent, return the text representing this intent with passed slots + Args: + user_action: the name of intent to generate text for + slots_li: the slot values to provide Returns: - dictionary that contains - ``'train'`` field with dialogs from ``'stories-trn.md'``, - ``'valid'`` field with dialogs from ``'stories-val.md'`` and - ``'test'`` field with dialogs from ``'stories-tst.md'``. - Each field is a list of tuples ``(x_i, y_i)``. + the text of utterance relevant to the passed intent and slots + """ + if slots_li is None: + slots_li = tuple() + res = self.intents.intent2slots2text[user_action][slots_li] + # print(res) + # print(self.intents.intent2slots2text) + return res + + def process_user_turn(self): + user_action, slots_dstc2formatted = self.parse_user_intent() + slots_actual_values = self._clarify_slots_values(slots_dstc2formatted) + slots_to_exclude, slots_used_values, action_for_text = self.choose_slots_for_whom_exists_text( + slots_actual_values, user_action) + possible_user_response_infos = self.user_action2text(action_for_text, + slots_used_values) + random.shuffle(possible_user_response_infos) + # possible_user_utters = [] + for user_response_info in possible_user_response_infos[:2]: + # print(user_response_info) + user_utter = {"speaker": self._USER_SPEAKER_ID, + "text": user_response_info["text"], + "dialog_acts": [{"act": user_action, + "slots": user_response_info[ + "slots"]}], + "slots to exclude": slots_to_exclude} + yield user_utter + + def system_action2text(self, system_action): + """ + given the system action name return the relevant template text + Args: + domain_knowledge: the domain knowledge relevant to the currently processed config + system_action: the name of the action to get intent for + Returns: + template relevant to the passed action + """ + possible_system_responses = self.domain_knowledge.response_templates.get( + system_action, + [{"text": system_action}]) + + response_text = possible_system_responses[0]["text"] + response_text = re.sub(r"(\w+)\=\{(.*?)\}", r"#\2", + response_text) # TODO: straightforward regex string + + return response_text + + def parse_system_turn(self): """ - domain_fname = cls.DOMAIN_FNAME - nlu_fname = cls.NLU_FNAME if fmt in ("md", "markdown") else cls.NLU_FNAME.replace('.md', f'.{fmt}') - stories_fnames = tuple(cls._data_fname(dt, fmt) for dt in cls.VALID_DATATYPES) - required_fnames = stories_fnames + (nlu_fname, domain_fname) - for required_fname in required_fnames: - required_path = Path(data_path, required_fname) - if not required_path.exists(): - log.error(f"INSIDE MLU_MD_DialogsDatasetReader.read(): " - f"{required_fname} not found with path {required_path}") - - domain_path = Path(data_path, domain_fname) - domain_knowledge = DomainKnowledge.from_yaml(domain_path) - nlu_fpath = Path(data_path, nlu_fname) - intents = Intents.from_file(nlu_fpath) - - short2long_subsample_name = {"trn": "train", - "val": "valid", - "tst": "test"} - - data = dict() - for subsample_name_short in cls.VALID_DATATYPES: - story_fpath = Path(data_path, cls._data_fname(subsample_name_short, fmt)) - with open(story_fpath) as f: - story_lines = f.read().splitlines() - stories = Stories.from_stories_lines(story_lines) - - data[short2long_subsample_name[subsample_name_short]] = RASADict({ - "story_lines": stories, - "domain": domain_knowledge, - "nlu_lines": intents}) - data = RASADict(data) - return data \ No newline at end of file + Given the RASA stories.md line, returns the dstc2-formatted json (dict) for this line + Args: + domain_knowledge: the domain knowledge relevant to the processed stories config (from which line is taken) + line: the story system step representing line from stories.md + Returns: + the dstc2-formatted passed turn + """ + # system actions are started in dataset with - + system_action_name = self.turn.turn_description.strip('-').strip() + curr_action_text = self.system_action2text(system_action_name) + system_action = {"speaker": self._SYSTEM_SPEAKER_ID, + "text": curr_action_text, + "dialog_acts": [ + {"act": system_action_name, "slots": []}]} + if system_action_name.startswith("action"): + system_action["db_result"] = {} + return system_action + + def process_system_utter(self): + """ + Yields: all the possible dstc2 versions of the passed story line + TODO: SUPPORT FORMS + """ + # nonlocal intent2slots2text, domain_knowledge, curr_story_utters_batch, nonlocal_curr_story_bad + system_action = self.parse_system_turn() + # system_action_name = system_action.get("dialog_acts")[0].get("act") + # + # for curr_story_utters in curr_story_utters_batch: + # if cls.last_turn_is_systems_turn(curr_story_utters): + # # deal with consecutive system actions by inserting the last user replics in between + # curr_story_utters.append( + # cls.get_last_users_turn(curr_story_utters)) + # + # def parse_form_name(story_line: str) -> str: + # """ + # if the line (in stories.md utterance format) contains a form name, return it + # Args: + # story_line: line to extract form name from + # Returns: + # the extracted form name or None if no form name found + # """ + # form_name = None + # if story_line.startswith("form"): + # form_di = json.loads(story_line[len("form"):]) + # form_name = form_di["name"] + # return form_name + # + # if system_action_name.startswith("form"): + # form_name = parse_form_name(system_action_name) + # augmented_utters = cls.augment_form(form_name, domain_knowledge, + # intent2slots2text) + # + # utters_to_append_batch = [[]] + # for user_utter in augmented_utters: + # new_curr_story_utters_batch = [] + # for curr_story_utters in utters_to_append_batch: + # possible_extensions = process_story_line(user_utter) + # for possible_extension in possible_extensions: + # new_curr_story_utters = curr_story_utters.copy() + # new_curr_story_utters.extend(possible_extension) + # new_curr_story_utters_batch.append( + # new_curr_story_utters) + # utters_to_append_batch = new_curr_story_utters_batch + # else: + # utters_to_append_batch = [[system_action]] + + yield system_action + + def __call__(self): + if self.turn.is_user_turn(): + for possible_turn in self.process_user_turn(): + yield possible_turn + elif self.turn.is_system_turn(): + for possible_turn in self.process_system_utter(): + yield possible_turn + + +def iterProduct(ic): + # https://stackoverflow.com/a/12094245 + if not ic: + yield [] + return + + for i in ic[0](): + for js in iterProduct(ic[1:]): + yield [i] + js + + +class StoryGenerator: + def __init__(self, story: Story, nlu: Intents, + domain_knowledge: DomainKnowledge, ignore_slots=False): + self.story: Story = story + self.turn_iterators = [] + for turn in story.turns: + turn_iterator = TurnIterator(turn, nlu, domain_knowledge, + ignore_slots) + self.turn_iterators.append(turn_iterator) + self.turn_ix = -1 + self.version_ix = -1 + + def gen_story_sample(self): + for i in iterProduct(self.turn_iterators): + yield i + + +class StoriesGenerator: + def __init__(self, stories: Stories, intents: Intents, + domain_knowledge: DomainKnowledge, ignore_slots: False, + batch_size=1): + self.stories = stories + self.intents = intents + self.domain_knowledge = domain_knowledge + self.ignore_slots = ignore_slots + self.batch_size = batch_size + + def generate(self): + batch = dict() + for story in self.stories.stories: + story_generator = StoryGenerator(story, self.intents, + self.domain_knowledge, + self.ignore_slots) + for story_data in story_generator.gen_story_sample(): + batch[story.title] = story_data + if len(batch) == self.batch_size: + yield batch + batch = dict() + yield batch + +# _USER_SPEAKER_ID = 1 +# _SYSTEM_SPEAKER_ID = 2 +# +# VALID_DATATYPES = ('trn', 'val', 'tst') +# +# NLU_FNAME = "nlu.md" +# DOMAIN_FNAME = "domain.yml" +# +# @classmethod +# def _data_fname(cls, datatype: str) -> str: +# assert datatype in cls.VALID_DATATYPES, f"wrong datatype name: {datatype}" +# return f"stories-{datatype}.md" +# +# @classmethod +# @overrides +# def read(cls, data_path: str, fmt = "md") -> Dict[str, Dict]: +# """ +# Parameters: +# data_path: path to read dataset from +# +# Returns: +# dictionary tha(t contains +# ``'train'`` field with dialogs from ``'stories-trn.md'``, +# ``'valid'`` field with dialogs from ``'stories-val.md'`` and +# ``'test'`` field with dialogs from ``'stories-tst.md'``. +# Each field is a list of tuples ``(x_i, y_i)``. +# """ +# domain_fname = cls.DOMAIN_FNAME +# nlu_fname = cls.NLU_FNAME if fmt in ("md", "markdown") else cls.NLU_FNAME.replace('.md', f'.{fmt}') +# stories_fnames = tuple(cls._data_fname(dt) for dt in cls.VALID_DATATYPES) +# required_fnames = stories_fnames + (nlu_fname, domain_fname) +# for required_fname in required_fnames: +# required_path = Path(data_path, required_fname) +# if not required_path.exists(): +# log.error(f"INSIDE MLU_MD_DialogsDatasetReader.read(): " +# f"{required_fname} not found with path {required_path}") +# +# domain_path = Path(data_path, domain_fname) +# domain_knowledge = DomainKnowledge.from_yaml(domain_path) +# nlu_fpath = Path(data_path, nlu_fname) +# intents = Intents.from_file(nlu_fpath) +# +# short2long_subsample_name = {"trn": "train", +# "val": "valid", +# "tst": "test"} +# +# data = RASADict() +# for subsample_name_short in cls.VALID_DATATYPES: +# story_fpath = Path(data_path, cls._data_fname(subsample_name_short)) +# with open(story_fpath) as f: +# story_lines = f.read().splitlines() +# stories = Stories.from_stories_lines_md(story_lines) +# dat = RASADict({"story_lines": stories, +# "domain": domain_knowledge, +# "nlu_lines": intents}) +# data[short2long_subsample_name[subsample_name_short]] = dat +# data = RASADict(data) +# return data diff --git a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py index 4065a3c2a1..1ee3cd304a 100644 --- a/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py +++ b/deeppavlov/dataset_readers/md_yaml_dialogs_reader.py @@ -54,9 +54,9 @@ class MD_YAML_DialogsDatasetReader(DatasetReader): DOMAIN_FNAME = "domain.yml" @classmethod - def _data_fname(cls, datatype: str) -> str: + def _data_fname(cls, datatype: str, fmt: str="md") -> str: assert datatype in cls.VALID_DATATYPES, f"wrong datatype name: {datatype}" - return f"stories-{datatype}.md" + return f"stories-{datatype}.{fmt}" @classmethod @overrides @@ -74,7 +74,7 @@ def read(cls, data_path: str, fmt = "md") -> Dict[str, Dict]: """ domain_fname = cls.DOMAIN_FNAME nlu_fname = cls.NLU_FNAME if fmt in ("md", "markdown") else cls.NLU_FNAME.replace('.md', f'.{fmt}') - stories_fnames = tuple(cls._data_fname(dt) for dt in cls.VALID_DATATYPES) + stories_fnames = tuple(cls._data_fname(dt, fmt) for dt in cls.VALID_DATATYPES) required_fnames = stories_fnames + (nlu_fname, domain_fname) for required_fname in required_fnames: required_path = Path(data_path, required_fname) @@ -93,7 +93,7 @@ def read(cls, data_path: str, fmt = "md") -> Dict[str, Dict]: data = dict() for subsample_name_short in cls.VALID_DATATYPES: - story_fpath = Path(data_path, cls._data_fname(subsample_name_short)) + story_fpath = Path(data_path, cls._data_fname(subsample_name_short, fmt)) with open(story_fpath) as f: story_lines = f.read().splitlines() stories = Stories.from_stories_lines(story_lines) From 5dbdf6a9aaddfc816ab80cf5d9e129fe5ed4a6a3 Mon Sep 17 00:00:00 2001 From: Muennighoff <62820084+Muennighoff@users.noreply.github.com> Date: Mon, 5 Jul 2021 18:34:50 +0200 Subject: [PATCH 110/151] Init Python files --- deeppavlov/models/go_bot/trippy.py | 0 deeppavlov/models/go_bot/trippy_bert_for_dst.py | 0 deeppavlov/models/go_bot/trippy_preporcessing.py | 0 3 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 deeppavlov/models/go_bot/trippy.py create mode 100644 deeppavlov/models/go_bot/trippy_bert_for_dst.py create mode 100644 deeppavlov/models/go_bot/trippy_preporcessing.py diff --git a/deeppavlov/models/go_bot/trippy.py b/deeppavlov/models/go_bot/trippy.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/deeppavlov/models/go_bot/trippy_bert_for_dst.py b/deeppavlov/models/go_bot/trippy_bert_for_dst.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/deeppavlov/models/go_bot/trippy_preporcessing.py b/deeppavlov/models/go_bot/trippy_preporcessing.py new file mode 100644 index 0000000000..e69de29bb2 From 03332879351686e3d4fe1f5cb988820aae814d05 Mon Sep 17 00:00:00 2001 From: Muennighoff <62820084+Muennighoff@users.noreply.github.com> Date: Mon, 5 Jul 2021 18:40:21 +0200 Subject: [PATCH 111/151] Add TripPy logic --- .../configs/go_bot/trippy_dstc2_minimal.json | 77 ++ .../go_bot/trippy_md_yaml_minimal.json | 67 ++ deeppavlov/models/go_bot/trippy.py | 461 +++++++++ .../models/go_bot/trippy_bert_for_dst.py | 236 +++++ .../models/go_bot/trippy_preporcessing.py | 964 ++++++++++++++++++ 5 files changed, 1805 insertions(+) create mode 100644 deeppavlov/configs/go_bot/trippy_dstc2_minimal.json create mode 100644 deeppavlov/configs/go_bot/trippy_md_yaml_minimal.json diff --git a/deeppavlov/configs/go_bot/trippy_dstc2_minimal.json b/deeppavlov/configs/go_bot/trippy_dstc2_minimal.json new file mode 100644 index 0000000000..220794f43e --- /dev/null +++ b/deeppavlov/configs/go_bot/trippy_dstc2_minimal.json @@ -0,0 +1,77 @@ +{ + "dataset_reader": { + "class_name": "dstc2_reader", + "data_path": "{DATA_PATH}" + }, + "dataset_iterator": { + "class_name": "dialog_iterator" + }, + "chainer": { + "in": ["x"], + "in_y": ["y"], + "out": ["y_predicted"], + "pipe": [ + { + "class_name": "trippy", + "in": ["x"], + "in_y": ["y"], + "out": ["y_predicted"], + "load_path": "{MODEL_PATH}/model", + "save_path": "{MODEL_PATH}/model", + "pretrained_bert": "bert-base-uncased", + "max_seq_length": 180, + "debug": false, + "database": null, + "nlg_manager": { + "class_name": "gobot_nlg_manager", + "template_path": "{DATA_PATH}/dstc2-templates.txt", + "template_type": "DualTemplate", + "api_call_action": "api_call" + }, + "api_call_action": null, + "slot_names": ["pricerange", "this", "area", "food", "name"] + } + ] + }, + "train": { + "epochs": 200, + "batch_size": 4, + + "metrics": ["per_item_dialog_accuracy"], + "validation_patience": 10, + "val_every_n_batches": -1, + "val_every_n_epochs": -1, + + "log_every_n_batches": 15, + "log_on_k_batches": -1, + + "validate_first": false, + + "show_examples": false, + "evaluation_targets": [ + "valid", + "test" + ] + }, + "metadata": { + "variables": { + "ROOT_PATH": "~/.deeppavlov", + "CONFIGS_PATH": "{DEEPPAVLOV_PATH}/configs", + "DOWNLOADS_PATH": "{ROOT_PATH}/downloads", + "DATA_PATH": "{DOWNLOADS_PATH}/dstc2_v3", + "MODELS_PATH": "{ROOT_PATH}/models", + "MODEL_PATH": "{MODELS_PATH}/gobot_dstc2_minimal" + }, + "download": [ + { + "url": "http://files.deeppavlov.ai/deeppavlov_data/gobot_dstc2_v9.tar.gz", + "subdir": "{MODELS_PATH}" + }, + { + "url": "http://files.deeppavlov.ai/datasets/dstc2_v3.tar.gz", + "subdir": "{DATA_PATH}" + } + ] + } + } + \ No newline at end of file diff --git a/deeppavlov/configs/go_bot/trippy_md_yaml_minimal.json b/deeppavlov/configs/go_bot/trippy_md_yaml_minimal.json new file mode 100644 index 0000000000..0de7093b92 --- /dev/null +++ b/deeppavlov/configs/go_bot/trippy_md_yaml_minimal.json @@ -0,0 +1,67 @@ +{ + "dataset_reader": { + "class_name": "md_yaml_dialogs_reader", + "data_path": "{DATA_PATH}" + }, + "dataset_iterator": { + "class_name": "dialog_iterator" + }, + "chainer": { + "in": ["x"], + "in_y": ["y"], + "out": ["y_predicted"], + "pipe": [ + { + "class_name": "trippy", + "in": ["x"], + "in_y": ["y"], + "out": ["y_predicted"], + "load_path": "{MODEL_PATH}/model", + "save_path": "{MODEL_PATH}/model", + "pretrained_bert": "bert-base-uncased", + "max_seq_length": 180, + "debug": false, + "database": null, + "nlg_manager": { + "class_name": "gobot_json_nlg_manager", + "data_path": "{DATA_PATH}", + "dataset_reader_class": "md_yaml_dialogs_reader", + "actions2slots_path": "{DATA_PATH}/dstc2-actions2slots.json", + "api_call_action": null + }, + "api_call_action": null + } + ] + }, + "train": { + "epochs": 200, + "batch_size": 4, + + "metrics": ["per_item_dialog_accuracy"], + "validation_patience": 10, + "val_every_n_batches": -1, + "val_every_n_epochs": -1, + + "log_every_n_batches": 15, + "log_on_k_batches": -1, + + "validate_first": false, + + "show_examples": false, + "evaluation_targets": [ + "valid", + "test" + ] + }, + "metadata": { + "variables": { + "ROOT_PATH": "~/.deeppavlov", + "CONFIGS_PATH": "{DEEPPAVLOV_PATH}/configs", + "DOWNLOADS_PATH": "{ROOT_PATH}/downloads", + "DATA_PATH": "{DOWNLOADS_PATH}/gobot_md_yaml_minimal", + "MODELS_PATH": "{ROOT_PATH}/models", + "MODEL_PATH": "{MODELS_PATH}/gobot_md_yaml_minimal" + } + } + } + \ No newline at end of file diff --git a/deeppavlov/models/go_bot/trippy.py b/deeppavlov/models/go_bot/trippy.py index e69de29bb2..e1e7180038 100644 --- a/deeppavlov/models/go_bot/trippy.py +++ b/deeppavlov/models/go_bot/trippy.py @@ -0,0 +1,461 @@ +# Copyright 2021 Neural Networks and Deep Learning lab, MIPT +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +from logging import getLogger +from typing import Dict, Any, List, Optional, Union, Tuple +from pathlib import Path + +from numpy.lib.twodim_base import diag + +import torch +from overrides import overrides +from transformers.modeling_bert import BertConfig +from transformers import BertTokenizerFast + +from deeppavlov.core.common.registry import register +from deeppavlov.core.models.component import Component +from deeppavlov.core.models.torch_model import TorchModel +from deeppavlov.core.common.errors import ConfigError +from deeppavlov.core.commands.utils import expand_path +from deeppavlov.models.go_bot.nlg.nlg_manager import NLGManagerInterface +from deeppavlov.models.go_bot.policy.dto.policy_prediction import PolicyPrediction +from deeppavlov.models.go_bot.trippy_bert_for_dst import BertForDST +from deeppavlov.models.go_bot.trippy_preprocssing import prepare_trippy_data, get_turn, batch_to_device + + +# EXP +from transformers import (AdamW, get_linear_schedule_with_warmup) + +logger = getLogger(__name__) + + +@register('trippy') +class TripPy(TorchModel): + """ + Go-bot architecture based on https://arxiv.org/abs/2005.02877. + + Parameters: + save_path: Where to save the model + class_types: TripPy Class types - Predefined to most commonly used; Add True&False if slots which can take on those values + pretrained_bert: bert-base-uncased or full path to pretrained model + bert_config: Can be path to a file in case different from bert-base-uncased config + optimizer_parameters: dictionary with optimizer's parameters, e.g. {'lr': 0.1, 'weight_decay': 0.001, 'momentum': 0.9} + clip_norm: Clip gradients by norm + max_seq_length: Max sequence length of an entire dialog. Defaults to TripPy 180 default. Too long examples will be logged. + class_loss_ratio: The ratio applied on class loss in total loss calculation. + Should be a value in [0.0, 1.0]. + The ratio applied on token loss is (1-class_loss_ratio)/2. + The ratio applied on refer loss is (1-class_loss_ratio)/2. + token_loss_for_nonpointable: Whether the token loss for classes other than copy_value contribute towards total loss. + refer_loss_for_nonpointable: Whether the refer loss for classes other than refer contribute towards total loss. + class_aux_feats_inform: Whether or not to use the identity of informed slots as auxiliary features for class prediction. + class_aux_feats_ds: Whether or not to use the identity of slots in the current dialog state as auxiliary featurs for class prediction. + debug: Turn on debug mode to get logging information on input examples & co + """ + def __init__(self, + nlg_manager: NLGManagerInterface, + save_path: str, + slot_names: List = [], + class_types: List = ["none", "dontcare", "copy_value", "inform"], + pretrained_bert: str = "bert-base-uncased", + bert_config: str = "bert-base-uncased", + optimizer_parameters: dict = {"lr": 1e-5, "eps": 1e-6}, + clip_norm: float = 1.0, + max_seq_length: int = 180, + dropout_rate: float = 0.3, + heads_dropout: float = 0.0, + class_loss_ratio: float = 0.8, + token_loss_for_nonpointable: bool = False, + refer_loss_for_nonpointable: bool = False, + class_aux_feats_inform: bool = True, + class_aux_feats_ds: bool = True, + database: Component = None, + debug: bool = False, + **kwargs) -> None: + + self.nlg_manager = nlg_manager + self.save_path = save_path + self.max_seq_length = max_seq_length + if not slot_names: + self.slot_names = ["dummy"] + self.has_slots = False + else: + self.slot_names = slot_names + self.has_slots = True + self.class_types = class_types + self.debug = debug + + # BertForDST Configuration + self.pretrained_bert = pretrained_bert + self.config = BertConfig.from_pretrained(bert_config) + self.config.dst_dropout_rate = dropout_rate + self.config.dst_heads_dropout_rate = heads_dropout + self.config.dst_class_loss_ratio = class_loss_ratio + self.config.dst_token_loss_for_nonpointable = token_loss_for_nonpointable + self.config.dst_refer_loss_for_nonpointable = refer_loss_for_nonpointable + self.config.dst_class_aux_feats_inform = class_aux_feats_inform + self.config.dst_class_aux_feats_ds = class_aux_feats_ds + self.config.dst_slot_list = slot_names # This will be empty if there are no slots + self.config.dst_class_types = class_types + self.config.dst_class_labels = len(class_types) + + self.config.num_actions = nlg_manager.num_of_known_actions() + + # Parameters for user interaction + self.batch_dialogues_utterances_contexts_info = [[]] + # We always have one more user response than system response at inference + self.batch_dialogues_utterances_responses_info = [[None]] + + self.ds = None + self.ds_logits = None + + self.database = database + self.clip_norm = clip_norm + super().__init__(save_path=save_path, + optimizer_parameters=optimizer_parameters, + **kwargs) + + @overrides + def load(self, fname=None): + """ + Loads BERTForDST. Note that it only supports bert-X huggingface weights. (RoBERTa & co are not supported.) + """ + + + if fname is not None: + self.load_path = fname + + if self.pretrained_bert: + self.model = BertForDST.from_pretrained( + self.pretrained_bert, config=self.config) + self.tokenizer = BertTokenizerFast.from_pretrained(self.pretrained_bert) + else: + raise ConfigError("No pre-trained BERT model is given.") + + + self.model.to(self.device) + self.optimizer = getattr(torch.optim, self.optimizer_name)( + self.model.parameters(), **self.optimizer_parameters) + if self.lr_scheduler_name is not None: + self.lr_scheduler = getattr(torch.optim.lr_scheduler, self.lr_scheduler_name)( + self.optimizer, **self.lr_scheduler_parameters) + + if self.load_path: + logger.info(f"Load path {self.load_path} is given.") + if isinstance(self.load_path, Path) and not self.load_path.parent.is_dir(): + raise ConfigError("Provided load path is incorrect!") + + weights_path = Path(self.load_path.resolve()) + weights_path = weights_path.with_suffix(f".pth.tar") + if weights_path.exists(): + logger.info(f"Load path {weights_path} exists.") + logger.info(f"Initializing `{self.__class__.__name__}` from saved.") + + # now load the weights, optimizer from saved + logger.info(f"Loading weights from {weights_path}.") + checkpoint = torch.load(weights_path, map_location=self.device) + self.model.load_state_dict(checkpoint["model_state_dict"]) + self.optimizer.load_state_dict(checkpoint["optimizer_state_dict"]) + self.epochs_done = checkpoint.get("epochs_done", 0) + else: + logger.info(f"Init from scratch. Load path {weights_path} does not exist.") + + + def __call__(self, + batch: Union[List[List[dict]], List[str]], + user_ids: Optional[List] = None) -> List: + """ + Model invocation. + + Args: + batch: batch of dialogue data or list of strings + user_ids: Id that identifies the user # Check bocks + + Returns: + results: list of model answers + """ + # Turns off dropout + self.model.eval() + + if not(isinstance(batch[0], list)): + # User inference - Just one dialogue + batch = [ + [{"text": text, "intents": [{"act": None, "slots": None}]} for text in batch] + ] + else: + # At validation reset for every call + self.reset() + + dialogue_results = [] + for diag_id, dialogue in enumerate(batch): + + turn_results = [] + for turn_id, turn in enumerate(dialogue): + # Reset dialogue state if no dialogue state yet or the dialogue is empty (i.e. its a new dialogue) + if (self.ds_logits is None) or (diag_id >= len(self.batch_dialogues_utterances_contexts_info)): + self.reset() + diag_id = 0 + + # Append context to the dialogue + self.batch_dialogues_utterances_contexts_info[diag_id].append(turn) + + # Update Database + self.update_ground_truth_db_result_from_context(turn) + + # Preprocess inputs + batch, features = prepare_trippy_data(self.batch_dialogues_utterances_contexts_info, + self.batch_dialogues_utterances_responses_info, + self.tokenizer, + self.slot_names, + self.class_types, + self.nlg_manager, + max_seq_length=self.max_seq_length, + debug=self.debug) + + # Take only the last turn - as we already know the previous ones; We need to feed them one by one to update the ds + last_turn = get_turn(batch, index=-1) + + # Only take them from the last turn + input_ids_unmasked = [features[-1].input_ids_unmasked] + inform = [features[-1].inform] + + # Update data-held dialogue state based on new logits + last_turn["diag_state"] = self.ds_logits + + # Move to correct device + last_turn = batch_to_device(last_turn, self.device) + + # Run the turn through the model + if self.has_slots is False: + batch["start_pos"] = None + batch["end_pos"] = None + batch["inform_slot_id"] = None + batch["refer_id"] = None + batch["class_label_id"] = None + batch["diag_state"] = None + + with torch.no_grad(): + outputs = self.model(**last_turn) + + # Update dialogue state logits + for slot in self.model.slot_list: + updates = outputs[2][slot].max(1)[1].cpu() + for i, u in enumerate(updates): + if u != 0: + self.ds_logits[slot][i] = u + + # Update self.ds (dialogue state) slotfilled values based on logits + self.update_ds(outputs[2], + outputs[3], + outputs[4], + outputs[5], + input_ids_unmasked, + inform) + + # Wrap predicted action (outputs[6]) into a PolicyPrediction + policy_prediction = PolicyPrediction( + outputs[6].cpu().numpy(), None, None, None) + + # Fill DS with Database results if there are any + self.fill_current_state_with_db_results() + + # NLG based on predicted action & dialogue state + response = self.nlg_manager.decode_response(None, + policy_prediction, + self.ds) + + + # Add system response to responses for possible next round + self.batch_dialogues_utterances_responses_info[diag_id].insert( + -1, {"text": response, "act": None}) + + turn_results.append(response) + + dialogue_results.append(turn_results) + + + # Return NLG generated responses + return dialogue_results + + def update_ds(self, + per_slot_class_logits, + per_slot_start_logits, + per_slot_end_logits, + per_slot_refer_logits, + input_ids_unmasked, + inform): + """ + Updates slot-filled dialogue state based on model predictions. + This function roughly corresponds to "predict_and_format" in the original TripPy code. + + Args: + per_slot_class_logits: dict of class logits + per_slot_start_logits: dict of start logits + per_slot_end_logits: dict of end logits + per_slot_refer_logits: dict of refer logits + input_ids_unmasked: The unmasked input_ids from features to extract the preds + inform: dict of inform logits + """ + # We set the index to 0, since we only look at the last turn + # This function can be modified to look at multiple turns by iterating over them + i = 0 + + if self.ds is None: + self.ds = {slot: 'none' for slot in self.model.slot_list} + + for slot in self.model.slot_list: + class_logits = per_slot_class_logits[slot][i].cpu() + start_logits = per_slot_start_logits[slot][i].cpu() + end_logits = per_slot_end_logits[slot][i].cpu() + refer_logits = per_slot_refer_logits[slot][i].cpu() + + class_prediction = int(class_logits.argmax()) + start_prediction = int(start_logits.argmax()) + end_prediction = int(end_logits.argmax()) + refer_prediction = int(refer_logits.argmax()) + + # DP / DSTC2 uses dontcare instead of none so we also replace none's wth dontcare + # Just remove the 2nd part of the or statement to revert to TripPy standard + if (class_prediction == self.model.class_types.index('dontcare')) or (class_prediction == self.model.class_types.index('none')): + self.ds[slot] = 'dontcare' + elif class_prediction == self.model.class_types.index('copy_value'): + input_tokens = self.tokenizer.convert_ids_to_tokens( + input_ids_unmasked[i]) + self.ds[slot] = ' '.join( + input_tokens[start_prediction:end_prediction + 1]) + self.ds[slot] = re.sub("(^| )##", "", self.ds[slot]) + elif 'true' in self.model.class_types and class_prediction == self.model.class_types.index('true'): + self.ds[slot] = 'true' + elif 'false' in self.model.class_types and class_prediction == self.model.class_types.index('false'): + self.ds[slot] = 'false' + elif class_prediction == self.model.class_types.index('inform'): + self.ds[slot] = inform[i][slot] + + # Referral case. All other slot values need to be seen first in order + # to be able to do this correctly. + for slot in self.model.slot_list: + class_logits = per_slot_class_logits[slot][i].cpu() + refer_logits = per_slot_refer_logits[slot][i].cpu() + + class_prediction = int(class_logits.argmax()) + refer_prediction = int(refer_logits.argmax()) + + if 'refer' in self.model.class_types and class_prediction == self.model.class_types.index('refer'): + # Only slots that have been mentioned before can be referred to. + # One can think of a situation where one slot is referred to in the same utterance. + # This phenomenon is however currently not properly covered in the training data + # label generation process. + self.ds[slot] = self.ds[self.model.slot_list[refer_prediction - 1]] + + def make_api_call(self) -> None: + db_results = [] + if self.database is not None: + + # filter slot keys with value equal to 'dontcare' as + # there is no such value in database records + # and remove unknown slot keys (for example, 'this' in dstc2 tracker) + db_slots = { + s: v for s, v in self.ds.items() if v != 'dontcare' and s in self.database.keys + } + + db_results = self.database([db_slots])[0] + + # filter api results if there are more than one + # TODO: add sufficient criteria for database results ranking + if len(db_results) > 1: + db_results = [r for r in db_results if r != self.db_result] + else: + print("Failed to get any results for: ", db_slots) + else: + logger.warning("No database specified.") + + logger.info(f"Made api_call with {self.ds.keys()}, got {len(db_results)} results.") + self.current_db_result = {} if not db_results else db_results[0] + self._update_db_result() + + def _update_db_result(self): + if self.current_db_result is not None: + self.db_result = self.current_db_result + + def update_ground_truth_db_result_from_context(self, context: Dict[str, Any]): + self.current_db_result = context.get('db_result', None) + self._update_db_result() + + def fill_current_state_with_db_results(self) -> dict: + if self.db_result: + for k, v in self.db_result.items(): + self.ds[k] = str(v) + + def train_on_batch(self, + batch_dialogues_utterances_features: List[List[dict]], + batch_dialogues_utterances_targets: List[List[dict]]) -> dict: + """ + Train model on given batch. + + Args: + batch_dialogues_utterances_features: + batch_dialogues_utterances_targets: + + Returns: + dict with loss value + """ + # Turns on dropout + self.model.train() + # Zeroes grads + self.model.zero_grad() + batch, features = prepare_trippy_data(batch_dialogues_utterances_features, + batch_dialogues_utterances_targets, + self.tokenizer, + self.slot_names, + self.class_types, + self.nlg_manager, + self.max_seq_length, + debug=self.debug) + # Move to correct device + batch = batch_to_device(batch, self.device) + + if self.has_slots is False: + batch["start_pos"] = None + batch["end_pos"] = None + batch["inform_slot_id"] = None + batch["refer_id"] = None + batch["class_label_id"] = None + batch["diag_state"] = None + + + # Feed through model + outputs = self.model(**batch) + + # Backpropagation + loss = outputs[0] + loss.backward() + # Clip gradients + torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.clip_norm) + self.optimizer.step() + #self.scheduler.step() + return {"total_loss": loss.cpu().item(), "action_loss": outputs[7].cpu().item()} + + def reset(self, user_id: Union[None, str, int] = None) -> None: + """ + Reset dialogue state trackers. + """ + self.ds_logits = {slot: torch.tensor([0]) for slot in self.slot_names} + self.ds = None + + self.batch_dialogues_utterances_contexts_info = [[]] + self.batch_dialogues_utterances_responses_info = [[None]] + + self.db_result = None + self.current_db_result = None diff --git a/deeppavlov/models/go_bot/trippy_bert_for_dst.py b/deeppavlov/models/go_bot/trippy_bert_for_dst.py index e69de29bb2..bd683ca1d4 100644 --- a/deeppavlov/models/go_bot/trippy_bert_for_dst.py +++ b/deeppavlov/models/go_bot/trippy_bert_for_dst.py @@ -0,0 +1,236 @@ +# Copyright 2021 Neural Networks and Deep Learning lab, MIPT +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +from torch import nn +from torch.nn import CrossEntropyLoss + +from transformers.modeling_bert import BertModel, BertPreTrainedModel + +class BertForDST(BertPreTrainedModel): + """ + BERT model used by TripPy. + + This extends the basic bert model for dialogue state tracking. + """ + def __init__(self, config): + super(BertForDST, self).__init__(config) + self.slot_list = config.dst_slot_list + self.class_types = config.dst_class_types + self.class_labels = config.dst_class_labels + self.token_loss_for_nonpointable = config.dst_token_loss_for_nonpointable + self.refer_loss_for_nonpointable = config.dst_refer_loss_for_nonpointable + self.class_aux_feats_inform = config.dst_class_aux_feats_inform + self.class_aux_feats_ds = config.dst_class_aux_feats_ds + self.class_loss_ratio = config.dst_class_loss_ratio + + # Not in original TripPy model; Added for action prediction + self.num_actions = config.num_actions + + # Only use refer loss if refer class is present in dataset. + if 'refer' in self.class_types: + self.refer_index = self.class_types.index('refer') + else: + self.refer_index = -1 + + self.bert = BertModel(config) + self.dropout = nn.Dropout(config.dst_dropout_rate) + self.dropout_heads = nn.Dropout(config.dst_heads_dropout_rate) + + if self.class_aux_feats_inform: + self.add_module("inform_projection", nn.Linear(len(self.slot_list), len(self.slot_list))) + if self.class_aux_feats_ds: + self.add_module("ds_projection", nn.Linear(len(self.slot_list), len(self.slot_list))) + + aux_dims = len(self.slot_list) * (self.class_aux_feats_inform + self.class_aux_feats_ds) # second term is 0, 1 or 2 + + for slot in self.slot_list: + self.add_module("class_" + slot, nn.Linear(config.hidden_size + aux_dims, self.class_labels)) + self.add_module("token_" + slot, nn.Linear(config.hidden_size, 2)) + self.add_module("refer_" + slot, nn.Linear(config.hidden_size + aux_dims, len(self.slot_list) + 1)) + + # Head for aux task + if hasattr(config, "aux_task_def"): + self.add_module("aux_out_projection", nn.Linear(config.hidden_size, int(config.aux_task_def['n_class']))) + + # Not in original TripPy model; Add action prediction CLF Head + self.add_module("action_prediction", nn.Linear(config.hidden_size + aux_dims, self.num_actions)) + self.add_module("action_softmax", nn.Softmax(dim=1)) + + self.init_weights() + + def forward(self, + input_ids, + input_mask=None, + segment_ids=None, + position_ids=None, + head_mask=None, + start_pos=None, + end_pos=None, + inform_slot_id=None, + refer_id=None, + class_label_id=None, + diag_state=None, + aux_task_def=None, + action_label=None, + prev_action_label=None): + """ + Args: + action_label: Action to predict + """ + outputs = self.bert( + input_ids, + attention_mask=input_mask, + token_type_ids=segment_ids, + position_ids=position_ids, + head_mask=head_mask + ) + + sequence_output = outputs[0] + pooled_output = outputs[1] + + sequence_output = self.dropout(sequence_output) + pooled_output = self.dropout(pooled_output) + + if aux_task_def is not None: + if aux_task_def['task_type'] == "classification": + aux_logits = getattr(self, 'aux_out_projection')(pooled_output) + aux_logits = self.dropout_heads(aux_logits) + aux_loss_fct = CrossEntropyLoss() + aux_loss = aux_loss_fct(aux_logits, class_label_id) + # add hidden states and attention if they are here + return (aux_loss,) + outputs[2:] + elif aux_task_def['task_type'] == "span": + aux_logits = getattr(self, 'aux_out_projection')(sequence_output) + aux_start_logits, aux_end_logits = aux_logits.split(1, dim=-1) + aux_start_logits = self.dropout_heads(aux_start_logits) + aux_end_logits = self.dropout_heads(aux_end_logits) + aux_start_logits = aux_start_logits.squeeze(-1) + aux_end_logits = aux_end_logits.squeeze(-1) + + # If we are on multi-GPU, split add a dimension + if len(start_pos.size()) > 1: + start_pos = start_pos.squeeze(-1) + if len(end_pos.size()) > 1: + end_pos = end_pos.squeeze(-1) + # sometimes the start/end positions are outside our model inputs, we ignore these terms + ignored_index = aux_start_logits.size(1) # This is a single index + start_pos.clamp_(0, ignored_index) + end_pos.clamp_(0, ignored_index) + + aux_token_loss_fct = CrossEntropyLoss(ignore_index=ignored_index) + aux_start_loss = aux_token_loss_fct(torch.cat((aux_start_logits, aux_end_logits), 1), start_pos) + aux_end_loss = aux_token_loss_fct(torch.cat((aux_end_logits, aux_start_logits), 1), end_pos) + aux_loss = (aux_start_loss + aux_end_loss) / 2.0 + return (aux_loss,) + outputs[2:] + else: + raise Exception("Unknown task_type") + + if inform_slot_id is not None: + inform_labels = torch.stack(list(inform_slot_id.values()), 1).float() + if diag_state is not None: + diag_state_labels = torch.clamp(torch.stack(list(diag_state.values()), 1).float(), 0.0, 1.0) + + total_loss = 0 + per_slot_per_example_loss = {} + per_slot_class_logits = {} + per_slot_start_logits = {} + per_slot_end_logits = {} + per_slot_refer_logits = {} + for slot in self.slot_list: + if self.class_aux_feats_inform and self.class_aux_feats_ds: + pooled_output_aux = torch.cat((pooled_output, self.inform_projection(inform_labels), self.ds_projection(diag_state_labels)), 1) + elif self.class_aux_feats_inform: + pooled_output_aux = torch.cat((pooled_output, self.inform_projection(inform_labels)), 1) + elif self.class_aux_feats_ds: + pooled_output_aux = torch.cat((pooled_output, self.ds_projection(diag_state_labels)), 1) + else: + pooled_output_aux = pooled_output + class_logits = self.dropout_heads(getattr(self, 'class_' + slot)(pooled_output_aux)) + + token_logits = self.dropout_heads(getattr(self, 'token_' + slot)(sequence_output)) + start_logits, end_logits = token_logits.split(1, dim=-1) + start_logits = start_logits.squeeze(-1) + end_logits = end_logits.squeeze(-1) + + refer_logits = self.dropout_heads(getattr(self, 'refer_' + slot)(pooled_output_aux)) + + per_slot_class_logits[slot] = class_logits + per_slot_start_logits[slot] = start_logits + per_slot_end_logits[slot] = end_logits + per_slot_refer_logits[slot] = refer_logits + + # If there are no labels, don't compute loss + if class_label_id is not None and start_pos is not None and end_pos is not None and refer_id is not None: + # If we are on multi-GPU, split add a dimension + if len(start_pos[slot].size()) > 1: + start_pos[slot] = start_pos[slot].squeeze(-1) + if len(end_pos[slot].size()) > 1: + end_pos[slot] = end_pos[slot].squeeze(-1) + # sometimes the start/end positions are outside our model inputs, we ignore these terms + ignored_index = start_logits.size(1) # This is a single index + start_pos[slot].clamp_(0, ignored_index) + end_pos[slot].clamp_(0, ignored_index) + + class_loss_fct = CrossEntropyLoss(reduction='none') + token_loss_fct = CrossEntropyLoss(reduction='none', ignore_index=ignored_index) + refer_loss_fct = CrossEntropyLoss(reduction='none') + + start_loss = token_loss_fct(start_logits, start_pos[slot]) + end_loss = token_loss_fct(end_logits, end_pos[slot]) + token_loss = (start_loss + end_loss) / 2.0 + + token_is_pointable = (start_pos[slot] > 0).float() + if not self.token_loss_for_nonpointable: + token_loss *= token_is_pointable + + refer_loss = refer_loss_fct(refer_logits, refer_id[slot]) + token_is_referrable = torch.eq(class_label_id[slot], self.refer_index).float() + if not self.refer_loss_for_nonpointable: + refer_loss *= token_is_referrable + + class_loss = class_loss_fct(class_logits, class_label_id[slot]) + + if self.refer_index > -1: + per_example_loss = (self.class_loss_ratio) * class_loss + ((1 - self.class_loss_ratio) / 2) * token_loss + ((1 - self.class_loss_ratio) / 2) * refer_loss + else: + per_example_loss = self.class_loss_ratio * class_loss + (1 - self.class_loss_ratio) * token_loss + + total_loss += per_example_loss.sum() + per_slot_per_example_loss[slot] = per_example_loss + + + # Not in original TripPy; Predict action & add loss if training; At evaluation acton_label is set to 0 + if not self.slot_list: + pooled_output_aux = pooled_output + action_logits = getattr(self, 'action_prediction')(pooled_output_aux) + + if action_label is not None: + action_loss = CrossEntropyLoss(reduction="sum")(action_logits, action_label) + + # Increase the loss proportional to the amount of slots if present + if self.slot_list: + multiplier = len(self.slot_list) + else: + multiplier = 1 + + total_loss += action_loss * multiplier + else: + action_loss = None + + # add hidden states and attention if they are here + outputs = (total_loss,) + (per_slot_per_example_loss, per_slot_class_logits, per_slot_start_logits, per_slot_end_logits, per_slot_refer_logits, action_logits, action_loss,) + outputs[2:] + + return outputs + \ No newline at end of file diff --git a/deeppavlov/models/go_bot/trippy_preporcessing.py b/deeppavlov/models/go_bot/trippy_preporcessing.py index e69de29bb2..8375cd34a4 100644 --- a/deeppavlov/models/go_bot/trippy_preporcessing.py +++ b/deeppavlov/models/go_bot/trippy_preporcessing.py @@ -0,0 +1,964 @@ +# Copyright 2021 Neural Networks and Deep Learning lab, MIPT +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + + +### Implements Preprocessing functions to turn DSTC2 into TripPy readable format ### + +### Funcs ### + +import re +from logging import getLogger +from typing import Dict, Any, List, Optional, Union, Tuple + +import six +import numpy as np +import torch + +logger = getLogger(__name__) + + +## EXP ## +from deeppavlov.models.spelling_correction.levenshtein.searcher_component import LevenshteinSearcherComponent + +class DSTExample(object): + """ + Taken from TripPy except for the __repr__ function, + as it contains data type mistakes (e.g. lbl is a dict not number) + A single training/test example for the DST dataset. + """ + def __init__(self, + guid, + text_a, + text_b, + history, + text_a_label=None, + text_b_label=None, + history_label=None, + values=None, + inform_label=None, + inform_slot_label=None, + refer_label=None, + diag_state=None, + class_label=None, + action_label=None, + prev_action_label=None): + self.guid = guid + self.text_a = text_a + self.text_b = text_b + self.history = history + self.text_a_label = text_a_label + self.text_b_label = text_b_label + self.history_label = history_label + self.values = values + self.inform_label = inform_label + self.inform_slot_label = inform_slot_label + self.refer_label = refer_label + self.diag_state = diag_state + self.class_label = class_label + self.action_label = action_label + self.prev_action_label = prev_action_label + +# From bert.tokenization (TF code) # From TripPy +def convert_to_unicode(text): + """ + Converts `text` to Unicode (if it's not already), assuming utf-8 input. + """ + if six.PY3: + if isinstance(text, str): + return text + elif isinstance(text, bytes): + return text.decode("utf-8", "ignore") + else: + raise ValueError("Unsupported string type: %s" % (type(text))) + elif six.PY2: + if isinstance(text, str): + return text.decode("utf-8", "ignore") + elif isinstance(text, unicode): + return text + else: + raise ValueError("Unsupported string type: %s" % (type(text))) + else: + raise ValueError("Not running on Python2 or Python 3?") + + +# From TripPy MultiWoz +def normalize_time(text): + text = re.sub("(\d{1})(a\.?m\.?|p\.?m\.?)", r"\1 \2", text) # am/pm without space + text = re.sub("(^| )(\d{1,2}) (a\.?m\.?|p\.?m\.?)", r"\1\2:00 \3", text) # am/pm short to long form + text = re.sub("(^| )(at|from|by|until|after) ?(\d{1,2}) ?(\d{2})([^0-9]|$)", r"\1\2 \3:\4\5", text) # Missing separator + text = re.sub("(^| )(\d{2})[;.,](\d{2})", r"\1\2:\3", text) # Wrong separator + text = re.sub("(^| )(at|from|by|until|after) ?(\d{1,2})([;., ]|$)", r"\1\2 \3:00\4", text) # normalize simple full hour time + text = re.sub("(^| )(\d{1}:\d{2})", r"\g<1>0\2", text) # Add missing leading 0 + # Map 12 hour times to 24 hour times + text = re.sub("(\d{2})(:\d{2}) ?p\.?m\.?", lambda x: str(int(x.groups()[0]) + 12 if int(x.groups()[0]) < 12 else int(x.groups()[0])) + x.groups()[1], text) + text = re.sub("(^| )24:(\d{2})", r"\g<1>00:\2", text) # Correct times that use 24 as hour + return text + +# From TripPy MultiWoz, adjusted to add centre & pricerange +def normalize_text(text): + text = normalize_time(text) + text = re.sub("n't", " not", text) + text = re.sub("(^| )zero(-| )star([s.,? ]|$)", r"\g<1>0 star\3", text) + text = re.sub("(^| )one(-| )star([s.,? ]|$)", r"\g<1>1 star\3", text) + text = re.sub("(^| )two(-| )star([s.,? ]|$)", r"\g<1>2 star\3", text) + text = re.sub("(^| )three(-| )star([s.,? ]|$)", r"\g<1>3 star\3", text) + text = re.sub("(^| )four(-| )star([s.,? ]|$)", r"\g<1>4 star\3", text) + text = re.sub("(^| )five(-| )star([s.,? ]|$)", r"\g<1>5 star\3", text) + text = re.sub("archaelogy", "archaeology", text) # Systematic typo + text = re.sub("guesthouse", "guest house", text) # Normalization + text = re.sub("(^| )b ?& ?b([.,? ]|$)", r"\1bed and breakfast\2", text) # Normalization + text = re.sub("bed & breakfast", "bed and breakfast", text) # Normalization + + # Add inconsistent slot values here - Incorrect one first, then correct + text = re.sub("price range", "pricerange", text) # These are very necessary, because there are discrepencies btw the TripPy slot values & the actual text + text = re.sub("center", "centre", text) + text = re.sub("moderately", "moderate", text) + text = re.sub("medium", "moderate", text) + text = re.sub("central", "centre", text) + text = re.sub("portugese", "portuguese", text) + text = re.sub("steak house", "steakhouse", text) + text = re.sub("turkiesh", " turkish", text) + text = re.sub("asian ori$", "asian oriental", text) # Cannot remove the space cuz else it will mess with normal asian ori; probably need some more regex + text = re.sub("bristish", " british", text) + text = re.sub("asian$", "asian oriental", text) + text = re.sub("portugeuse", "portuguese", text) + text = re.sub("sea food", "seafood", text) + text = re.sub("australian asian", "australasian", text) + text = re.sub("^derately", "moderately", text) + + + return text + + +def tokenize(utt): + """ + Returns tokenized utterance according to TripPy's MultiWoz code + """ + utt_lower = convert_to_unicode(utt).lower() + utt_lower = normalize_text(utt_lower) + utt_tok = [tok for tok in map(str.strip, re.split("(\W+)", utt_lower)) if len(tok) > 0] + return utt_tok + + +def get_sys_inform(response, slot_type): + """ + DSTC2-specific function to check if the system informs in current round about specific slot. + + Args: + response: Response data + slot_type: What slot type to check for + Returns: + boolean - if the system informs in round + """ + + return "inform_" + slot_type in response["act"] if (response is not None) and (response["act"] is not None) else False + + +def get_dialogue_state_sv_dict(context): + """ + Creates dialogue state Slot-Value dict for current context + In Trippy, DS is generally given and then converted to an SV dict - We create the DS & convert it in one + In order to get an overall diag state we then merge the diag states with more recent ones taking preference + """ + sv_dict = {} + # There may be no slots & slot values, though it doesnt really make sense to use TripPy then + if "intents" in context: + for intent in context["intents"]: + if (intent["slots"] is not None) and (len(intent["slots"]) > 0): + # Only inform since we only want slots where we have the value (in request we dont) + if intent["act"] == "inform": + slot = intent["slots"][0][0] + value = intent["slots"][0][1] + sv_dict[slot] = value + return sv_dict + + +def get_tok_label(prev_ds_dict, cur_ds_dict, slot_type, sys_utt_tok, + sys_slot_label, usr_utt_tok, usr_slot_label, dial_id, + turn_id, slot_last_occurrence=True): + """ + Creates labels of 11111111 for where an slot value is uttered by user/system + + Adapted from TripPy get_tok_label. + The position of the last occurrence of the slot value will be used. + """ + sys_utt_tok_label = [0 for _ in sys_utt_tok] + usr_utt_tok_label = [0 for _ in usr_utt_tok] + if slot_type not in cur_ds_dict: + class_type = 'none' + else: + value = cur_ds_dict[slot_type] + if value == 'dontcare' and (slot_type not in prev_ds_dict or prev_ds_dict[slot_type] != 'dontcare'): + # Only label dontcare at its first occurrence in the dialog + class_type = 'dontcare' + else: # If not none or dontcare, we have to identify whether + # there is a span, or if the value is informed + in_usr = False + in_sys = False + for label_d in usr_slot_label: + if (label_d['slot'] == slot_type) and (label_d['start'] >= 0): + # Allow start & end logits referring to utterances that do not exactly match the slot value; Not in original TripPy + # Start should still be >0 not to allow ones with -1, i.e. there is not value + #value == ' '.join(usr_utt_tok[label_d['start']:label_d['exclusive_end']]): + for idx in range(label_d['start'], label_d['exclusive_end']): + usr_utt_tok_label[idx] = 1 + in_usr = True + class_type = 'copy_value' + if slot_last_occurrence: + break + + # This is never used for DSTC2, as there are no sys_slot_labels, however, we leave it for possible future use & TripPy conformity + for label_d in sys_slot_label: + if label_d['slot'] == slot_type and value == ' '.join( + sys_utt_tok[label_d['start']:label_d['exclusive_end']]): + for idx in range(label_d['start'], label_d['exclusive_end']): + sys_utt_tok_label[idx] = 1 + in_sys = True + if not in_usr or not slot_last_occurrence: + class_type = 'inform' + if slot_last_occurrence: + break + + if not in_usr and not in_sys: + assert sum(usr_utt_tok_label + sys_utt_tok_label) == 0 + if (slot_type not in prev_ds_dict or value != prev_ds_dict[slot_type]): + # Added clarifications + print("Value: {} for slot type {} does not exist in utterance: {}".format(value, slot_type, usr_utt_tok)) + print("Other Information: ", prev_ds_dict, usr_slot_label, sys_utt_tok) + print("Most likely have to add the incorrect utterance token to be replaced with the value see normalizations in the code.") + print("This error is likely because of a mismatch in the slot value & the actual text!\n \ + E.g. the slot value is pricerange: moderate but the text contains only moderately\n \ + Possibly add a replacement to the normalization.") + raise ValueError('Copy value cannot found in Dial %s Turn %s' % (str(dial_id), str(turn_id))) + else: + class_type = 'none' + else: + assert sum(usr_utt_tok_label + sys_utt_tok_label) > 0 + return sys_utt_tok_label, usr_utt_tok_label, class_type + + + +def get_token_and_slot_label(context, response=None): + """ + Creates tokenized version of text & labels with start, end & value of slots + Adapted from TripPy "get_token_and_slot_label(turn):" + + Args: + context: User-utterance related data + response: System-utterance related data + + Returns: + sys_utt_tok: Tokenized system utterances + sys_slot_label: + + usr_utt_tok: May be empty list + usr_slot_label: May be empty list + """ + # Note that TripPy Sim-M data already includes tokens; We tokenize it here using a func provided by TripPy for MultiWoz + # This is need because of the label ids + if response is not None: + sys_utt_tok = tokenize(response['text']) + sys_slot_label = [] # Not present in DP + else: + # Inference + sys_utt_tok = [] + sys_slot_label = [] + + + usr_utt_tok = tokenize(context['text']) + + # Possibly simplify + usr_slot_label = [] + if 'intents' in context: + for intent in context['intents']: + if (intent["slots"] is not None) and (len(intent["slots"]) > 0): + if intent["act"] == "request": + + slot = intent["slots"][0][1] + value = None # request slots have no value yet + + # End & start queried lateron, so set to -1 + slot_dict = { + "exclusive_end": -1, + "slot": slot, + "start": -1 + } + usr_slot_label.append(slot_dict) + + elif intent["act"] == "inform": + + slot = intent["slots"][0][0] + value = intent["slots"][0][1] + + value_tok = tokenize(value) + + # if e.g. "dontcare" skip it; It will be in the diag_state (like in TripPy) + # This unfortunately is not robust to slot values that are different from the text & they have to be manually added to replace + # Generally TripPy's text index predicting is brittle and we should move to generating slot values not copying them in the future + if set(value_tok) <= set(usr_utt_tok): + # Will have to be adpated for slot values of len > 1 + slot_dict = { + "exclusive_end": usr_utt_tok.index(value_tok[-1]) + 1, + "slot": slot, + "start": usr_utt_tok.index(value_tok[0]) + } + usr_slot_label.append(slot_dict) + + elif value not in ["dontcare", "itdoesntmatter"]: + # Not in original TripPy - Search for most similar values with Levenshtein in case + # Slot value label is not in the user tokens + searcher = LevenshteinSearcherComponent(usr_utt_tok, max_distance=10) + candidates = searcher([[value]]) + top_candidate = candidates[0][0][0][1] + + # The LevenshteinSearcher seems not to work for removal edits + if top_candidate not in usr_utt_tok: + top_candidate = usr_utt_tok[0] # Just randomly take the first token + + slot_dict = { + "exclusive_end": usr_utt_tok.index(top_candidate) + 1, + "slot": slot, + "start": usr_utt_tok.index(top_candidate), + "candidate": top_candidate + } + usr_slot_label.append(slot_dict) + + + + return sys_utt_tok, sys_slot_label, usr_utt_tok, usr_slot_label + + +def get_turn_label(context, response, cur_ds_dict, prev_ds_dict, slot_list, dial_id, turn_id, + delexicalize_sys_utts=False, unk_token="[UNK]", slot_last_occurrence=True): + """ + Make turn_label a dictionary of slot with value positions or being dontcare / none: + Turn label contains: + (1) the updates from previous to current dialogue state, + (2) values in current dialogue state explicitly mentioned in system or user utterance. + """ + + (sys_utt_tok, sys_slot_label, usr_utt_tok, usr_slot_label) = get_token_and_slot_label(context, response) + + sys_utt_tok_label_dict = {} + usr_utt_tok_label_dict = {} + inform_label_dict = {} + inform_slot_label_dict = {} + referral_label_dict = {} + class_type_dict = {} + + for slot_type in slot_list: + inform_label_dict[slot_type] = 'none' + inform_slot_label_dict[slot_type] = 0 + referral_label_dict[slot_type] = 'none' # Referral is not present in data + + sys_utt_tok_label, usr_utt_tok_label, class_type = get_tok_label( + prev_ds_dict, cur_ds_dict, slot_type, sys_utt_tok, sys_slot_label, + usr_utt_tok, usr_slot_label, dial_id, turn_id, + slot_last_occurrence=slot_last_occurrence) + + if sum(sys_utt_tok_label) > 0: + inform_label_dict[slot_type] = cur_ds_dict[slot_type] + + # Check on the fly if the system informs in current text instead of prior as in TripPy + if get_sys_inform(response, slot_type): + inform_slot_label_dict[slot_type] = 1 + + sys_utt_tok_label = [0 for _ in sys_utt_tok_label] # Don't use token labels for sys utt + sys_utt_tok_label_dict[slot_type] = sys_utt_tok_label + usr_utt_tok_label_dict[slot_type] = usr_utt_tok_label + class_type_dict[slot_type] = class_type + + return (sys_utt_tok, sys_utt_tok_label_dict, + usr_utt_tok, usr_utt_tok_label_dict, + inform_label_dict, inform_slot_label_dict, + referral_label_dict, cur_ds_dict, class_type_dict) + + + +def create_examples(batch_dialogues_utterances_contexts_info, + batch_dialogues_utterances_responses_info, + slot_list, + nlg_manager=None, + append_history=True, + use_history_labels=True, + swap_utterances=True, + debug=False): + """ + Create TripPy input examples. + + Args: + batch_dialogues_utterances_contexts_info: Utterance information + batch_dialogues_utterances_responses_info: Response information + slot_list: List of all possible slots; Defined in the config + nlg_manager: NLG Manager for retrieving action labels + use_history_labels: Whether to use history labels; True for TripPy advanced + append_history: Whether to append the dialogue history; True for TripPy advanced + swap_utterances: true in TripPy advanced, but for dstc2 the system starts not the user; Use this if the user starts + + delexicalize_sys_utts[REMOVED]: Whether to mask slot value utterances from the system - Used in the advanced TripPy, however, + we have no slot value information in DSTC2, so cannot be used - Refer to TripPy to readd + Returns: + examples: List of DSTExample instances + """ + + examples = [] + for dial_id, (contexts, responses) in enumerate(zip(batch_dialogues_utterances_contexts_info, batch_dialogues_utterances_responses_info)): + # Presets + prev_ds = {} # dict instead of list since we use the sv_dict formated ver + hst = [] + prev_hst_lbl_dict = {slot: [] for slot in slot_list} + prev_ds_lbl_dict = {slot: 'none' for slot in slot_list} + response_saved = None + + for turn_id, (context, response) in enumerate(zip(contexts, responses)): + guid = '%s-%s' % (dial_id, str(turn_id)) + + # Not in original TripPy; Get the action label if training time, i.e. we have response data + action_label = nlg_manager.get_action_id(response["act"]) if (response is not None) and (response["act"] is not None) else 0 + prev_action_label = nlg_manager.get_action_id(context["prev_resp_act"]) if ("prev_resp_act" in context) and (context["prev_resp_act"] is not None) else 0 + + # Move the responses one backwards, because the first input should be user only with the response being None + # The final utterance by the system is not needed in the text, except for the action label (action labels are not moved back) + response_saved, response = response, response_saved + + + ds_lbl_dict = prev_ds_lbl_dict.copy() + hst_lbl_dict = prev_hst_lbl_dict.copy() + + cur_ds = get_dialogue_state_sv_dict(context) # Create DS here instead of in get_turn_label + cur_ds = {**prev_ds, **cur_ds} # Merge with prev_ds, giving preference to cur_ds + + (text_a, + text_a_label, + text_b, + text_b_label, + inform_label, + inform_slot_label, + referral_label, + cur_ds_dict, + class_label) = get_turn_label(context, # context & response instd of turn + response, + cur_ds, # Add cur_ds, which is normally in turn["dialogue_state"] + prev_ds, + slot_list, + dial_id, + turn_id, + slot_last_occurrence=True) + + + # Set to true by default, since in DP the system starts + if swap_utterances: + txt_a = text_b + txt_b = text_a + txt_a_lbl = text_b_label + txt_b_lbl = text_a_label + else: + txt_a = text_a + txt_b = text_b + txt_a_lbl = text_a_label + txt_b_lbl = text_b_label + + + value_dict = {} + for slot in slot_list: + if slot in cur_ds_dict: + value_dict[slot] = cur_ds_dict[slot] + else: + value_dict[slot] = 'none' + if class_label[slot] != 'none': + ds_lbl_dict[slot] = class_label[slot] + if append_history: + if use_history_labels: + hst_lbl_dict[slot] = txt_a_lbl[slot] + txt_b_lbl[slot] + hst_lbl_dict[slot] + else: + hst_lbl_dict[slot] = [0 for _ in txt_a_lbl[slot] + txt_b_lbl[slot] + hst_lbl_dict[slot]] + + examples.append(DSTExample( + guid=guid, + text_a=txt_a, + text_b=txt_b, + history=hst, + text_a_label=txt_a_lbl, + text_b_label=txt_b_lbl, + history_label=prev_hst_lbl_dict, + values=value_dict, + inform_label=inform_label, + inform_slot_label=inform_slot_label, + refer_label=referral_label, + diag_state=prev_ds_lbl_dict, + class_label=class_label, + action_label=action_label, + prev_action_label=prev_action_label)) # Not in original TripPy; The action idx the model is supposed to predict + + prev_ds = cur_ds # use already transformed cur_ds instead of turn['dialogue_state'] + prev_ds_lbl_dict = ds_lbl_dict.copy() + prev_hst_lbl_dict = hst_lbl_dict.copy() + + if append_history: + hst = txt_a + txt_b + hst + + if (debug) and (dial_id == 0) and (turn_id < 2): + logger.info(f"Example - Turn {turn_id}:") + logger.info(f"Text A: {txt_a}") + logger.info(f"Text B: {txt_b}") + logger.info(f"Action Label: {action_label}") + + return examples + +### Transform into final model input ### + + +### Because we have managed to turn DSTC2 into a valid DSTExample (a class used in TripPy) above, we can mostly copy the remaining transformations, i.e. below & possibly simplify +### Will simplify the below to 1/4 the length ### +### A lot of extra lines because of RoBERTa compatibility (I think we won't need that for DP) + +class InputFeatures(object): + """A single set of features of data.""" + + def __init__(self, + input_ids, + input_ids_unmasked, + input_mask, + segment_ids, + start_pos=None, + end_pos=None, + values=None, + inform=None, + inform_slot=None, + refer_id=None, + diag_state=None, + class_label_id=None, + guid="NONE"): + self.guid = guid + self.input_ids = input_ids + self.input_ids_unmasked = input_ids_unmasked + self.input_mask = input_mask + self.segment_ids = segment_ids + self.start_pos = start_pos + self.end_pos = end_pos + self.values = values + self.inform = inform + self.inform_slot = inform_slot + self.refer_id = refer_id + self.diag_state = diag_state + self.class_label_id = class_label_id + + +def convert_examples_to_features(examples, slot_list, class_types, tokenizer, max_seq_length, slot_value_dropout=0.0, debug=False): + """Loads a data file into a list of `InputBatch`s.""" + + # BERT Model Specs + model_specs = {'MODEL_TYPE': 'bert', + 'CLS_TOKEN': '[CLS]', + 'UNK_TOKEN': '[UNK]', + 'SEP_TOKEN': '[SEP]', + 'TOKEN_CORRECTION': 4} + + def _tokenize_text_and_label(text, text_label_dict, slot, tokenizer, model_specs, slot_value_dropout): + joint_text_label = [0 for _ in text_label_dict[slot]] # joint all slots' label + for slot_text_label in text_label_dict.values(): + for idx, label in enumerate(slot_text_label): + if label == 1: + joint_text_label[idx] = 1 + + text_label = text_label_dict[slot] + tokens = [] + tokens_unmasked = [] + token_labels = [] + for token, token_label, joint_label in zip(text, text_label, joint_text_label): + token = convert_to_unicode(token) + sub_tokens = tokenizer.tokenize(token) # Most time intensive step + tokens_unmasked.extend(sub_tokens) + if slot_value_dropout == 0.0 or joint_label == 0: + tokens.extend(sub_tokens) + else: + rn_list = np.random.random_sample((len(sub_tokens),)) + for rn, sub_token in zip(rn_list, sub_tokens): + if rn > slot_value_dropout: + tokens.append(sub_token) + else: + tokens.append(model_specs['UNK_TOKEN']) + token_labels.extend([token_label for _ in sub_tokens]) + assert len(tokens) == len(token_labels) + assert len(tokens_unmasked) == len(token_labels) + return tokens, tokens_unmasked, token_labels + + def _truncate_seq_pair(tokens_a, tokens_b, history, max_length): + """Truncates a sequence pair in place to the maximum length. + Copied from bert/run_classifier.py + """ + # This is a simple heuristic which will always truncate the longer sequence + # one token at a time. This makes more sense than truncating an equal percent + # of tokens from each, since if one sequence is very short then each token + # that's truncated likely contains more information than a longer sequence. + while True: + total_length = len(tokens_a) + len(tokens_b) + len(history) + if total_length <= max_length: + break + if len(history) > 0: + history.pop() + elif len(tokens_a) > len(tokens_b): + tokens_a.pop() + else: + tokens_b.pop() + + def _truncate_length_and_warn(tokens_a, tokens_b, history, max_seq_length, model_specs, guid): + # Modifies `tokens_a` and `tokens_b` in place so that the total + # length is less than the specified length. + # Account for [CLS], [SEP], [SEP], [SEP] with "- 4" (BERT) + # Account for , , , with "- 6" (RoBERTa) + if len(tokens_a) + len(tokens_b) + len(history) > max_seq_length - model_specs['TOKEN_CORRECTION']: + if debug: + logger.info("Truncate Example %s. Total len=%d." % (guid, len(tokens_a) + len(tokens_b) + len(history))) + logger.info("Truncated Example History: %s" % history) + input_text_too_long = True + else: + input_text_too_long = False + _truncate_seq_pair(tokens_a, tokens_b, history, max_seq_length - model_specs['TOKEN_CORRECTION']) + return input_text_too_long + + def _get_token_label_ids(token_labels_a, token_labels_b, token_labels_history, max_seq_length, model_specs): + token_label_ids = [] + token_label_ids.append(0) # [CLS]/ + for token_label in token_labels_a: + token_label_ids.append(token_label) + token_label_ids.append(0) # [SEP]/ + for token_label in token_labels_b: + token_label_ids.append(token_label) + token_label_ids.append(0) # [SEP]/ + for token_label in token_labels_history: + token_label_ids.append(token_label) + token_label_ids.append(0) # [SEP]/ + while len(token_label_ids) < max_seq_length: + token_label_ids.append(0) # padding + assert len(token_label_ids) == max_seq_length + return token_label_ids + + def _get_start_end_pos(class_type, token_label_ids, max_seq_length): + if class_type == 'copy_value' and 1 not in token_label_ids: + #logger.warn("copy_value label, but token_label not detected. Setting label to 'none'.") + class_type = 'none' + start_pos = 0 + end_pos = 0 + if 1 in token_label_ids: + start_pos = token_label_ids.index(1) + # Parsing is supposed to find only first location of wanted value + if 0 not in token_label_ids[start_pos:]: + end_pos = len(token_label_ids[start_pos:]) + start_pos - 1 + else: + end_pos = token_label_ids[start_pos:].index(0) + start_pos - 1 + for i in range(max_seq_length): + if i >= start_pos and i <= end_pos: + assert token_label_ids[i] == 1 + return class_type, start_pos, end_pos + + def _get_transformer_input(tokens_a, tokens_b, history, max_seq_length, tokenizer, model_specs): + # The convention in BERT is: + # (a) For sequence pairs: + # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] + # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 + # (b) For single sequences: + # tokens: [CLS] the dog is hairy . [SEP] + # type_ids: 0 0 0 0 0 0 0 + # + # Where "type_ids" are used to indicate whether this is the first + # sequence or the second sequence. The embedding vectors for `type=0` and + # `type=1` were learned during pre-training and are added to the wordpiece + # embedding vector (and position vector). This is not *strictly* necessary + # since the [SEP] token unambiguously separates the sequences, but it makes + # it easier for the model to learn the concept of sequences. + # + # For classification tasks, the first vector (corresponding to [CLS]) is + # used as the "sentence vector". Note that this only makes sense because + # the entire model is fine-tuned. + tokens = [] + segment_ids = [] + tokens.append(model_specs['CLS_TOKEN']) + segment_ids.append(0) + for token in tokens_a: + tokens.append(token) + segment_ids.append(0) + tokens.append(model_specs['SEP_TOKEN']) + segment_ids.append(0) + for token in tokens_b: + tokens.append(token) + segment_ids.append(1) + tokens.append(model_specs['SEP_TOKEN']) + segment_ids.append(1) + for token in history: + tokens.append(token) + segment_ids.append(1) + tokens.append(model_specs['SEP_TOKEN']) + segment_ids.append(1) + input_ids = tokenizer.convert_tokens_to_ids(tokens) + # The mask has 1 for real tokens and 0 for padding tokens. Only real + # tokens are attended to. + input_mask = [1] * len(input_ids) + # Zero-pad up to the sequence length. + while len(input_ids) < max_seq_length: + input_ids.append(0) + input_mask.append(0) + segment_ids.append(0) + assert len(input_ids) == max_seq_length + assert len(input_mask) == max_seq_length + assert len(segment_ids) == max_seq_length + return tokens, input_ids, input_mask, segment_ids + + total_cnt = 0 + too_long_cnt = 0 + + refer_list = ['none'] + slot_list + + features = [] + # Convert single example + for (example_index, example) in enumerate(examples): + if (example_index % 10 == 0) and (debug == True): + logger.info("Writing example %d of %d" % (example_index, len(examples))) + + total_cnt += 1 + + value_dict = {} + inform_dict = {} + inform_slot_dict = {} + refer_id_dict = {} + diag_state_dict = {} + class_label_id_dict = {} + start_pos_dict = {} + end_pos_dict = {} + for slot in slot_list: + tokens_a, tokens_a_unmasked, token_labels_a = _tokenize_text_and_label( + example.text_a, example.text_a_label, slot, tokenizer, model_specs, slot_value_dropout) + tokens_b, tokens_b_unmasked, token_labels_b = _tokenize_text_and_label( + example.text_b, example.text_b_label, slot, tokenizer, model_specs, slot_value_dropout) + tokens_history, tokens_history_unmasked, token_labels_history = _tokenize_text_and_label( + example.history, example.history_label, slot, tokenizer, model_specs, slot_value_dropout) + + input_text_too_long = _truncate_length_and_warn( + tokens_a, tokens_b, tokens_history, max_seq_length, model_specs, example.guid) + + if input_text_too_long: + if debug == True: + if len(token_labels_a) > len(tokens_a): + logger.info(' tokens_a truncated labels: %s' % str(token_labels_a[len(tokens_a):])) + if len(token_labels_b) > len(tokens_b): + logger.info(' tokens_b truncated labels: %s' % str(token_labels_b[len(tokens_b):])) + if len(token_labels_history) > len(tokens_history): + logger.info(' tokens_history truncated labels: %s' % str(token_labels_history[len(tokens_history):])) + + token_labels_a = token_labels_a[:len(tokens_a)] + token_labels_b = token_labels_b[:len(tokens_b)] + token_labels_history = token_labels_history[:len(tokens_history)] + tokens_a_unmasked = tokens_a_unmasked[:len(tokens_a)] + tokens_b_unmasked = tokens_b_unmasked[:len(tokens_b)] + tokens_history_unmasked = tokens_history_unmasked[:len(tokens_history)] + + assert len(token_labels_a) == len(tokens_a) + assert len(token_labels_b) == len(tokens_b) + assert len(token_labels_history) == len(tokens_history) + assert len(token_labels_a) == len(tokens_a_unmasked) + assert len(token_labels_b) == len(tokens_b_unmasked) + assert len(token_labels_history) == len(tokens_history_unmasked) + token_label_ids = _get_token_label_ids(token_labels_a, token_labels_b, token_labels_history, max_seq_length, model_specs) + + value_dict[slot] = example.values[slot] + inform_dict[slot] = example.inform_label[slot] + + class_label_mod, start_pos_dict[slot], end_pos_dict[slot] = _get_start_end_pos( + example.class_label[slot], token_label_ids, max_seq_length) + if class_label_mod != example.class_label[slot]: + example.class_label[slot] = class_label_mod + inform_slot_dict[slot] = example.inform_slot_label[slot] + refer_id_dict[slot] = refer_list.index(example.refer_label[slot]) + diag_state_dict[slot] = class_types.index(example.diag_state[slot]) + class_label_id_dict[slot] = class_types.index(example.class_label[slot]) + + if input_text_too_long: + too_long_cnt += 1 + + tokens, input_ids, input_mask, segment_ids = _get_transformer_input(tokens_a, + tokens_b, + tokens_history, + max_seq_length, + tokenizer, + model_specs) + if slot_value_dropout > 0.0: + _, input_ids_unmasked, _, _ = _get_transformer_input(tokens_a_unmasked, + tokens_b_unmasked, + tokens_history_unmasked, + max_seq_length, + tokenizer, + model_specs) + else: + input_ids_unmasked = input_ids + + assert(len(input_ids) == len(input_ids_unmasked)) + + if debug == True: + logger.info("*** TripPy Example ***") + logger.info("guid: %s" % (example.guid)) + logger.info("tokens: %s" % " ".join(tokens)) + logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) + logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) + logger.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) + logger.info("start_pos: %s" % str(start_pos_dict)) + logger.info("end_pos: %s" % str(end_pos_dict)) + logger.info("values: %s" % str(value_dict)) + logger.info("inform: %s" % str(inform_dict)) + logger.info("inform_slot: %s" % str(inform_slot_dict)) + logger.info("refer_id: %s" % str(refer_id_dict)) + logger.info("diag_state: %s" % str(diag_state_dict)) + logger.info("class_label_id: %s" % str(class_label_id_dict)) + + features.append( + InputFeatures( + guid=example.guid, + input_ids=input_ids, + input_ids_unmasked=input_ids_unmasked, + input_mask=input_mask, + segment_ids=segment_ids, + start_pos=start_pos_dict, + end_pos=end_pos_dict, + values=value_dict, + inform=inform_dict, + inform_slot=inform_slot_dict, + refer_id=refer_id_dict, + diag_state=diag_state_dict, + class_label_id=class_label_id_dict)) + + if debug == True: + logger.info("========== %d out of %d examples have text too long" % (too_long_cnt, total_cnt)) + + return features + + + +def get_turn(batch, index=-1): + """ + Seeks turn from trippy input batch; None is used to keep [1, seq_len] shape + If index = -1, gets the last turn + """ + result = {} + for key, value in batch.items(): + if isinstance(value, dict): + result[key] = {k: v[None, index] for k, v in value.items()} + elif isinstance(value, list): + result[key] = [v[None, index] for v in value] + else: + result[key] = value[None, index] + return result + +def batch_to_device(batch, device): + """ + Moves items in batch to correct device + """ + result = {} + for key, value in batch.items(): + if isinstance(value, dict): + result[key] = {k: v.to(device) for k, v in value.items()} + elif isinstance(value, list): + result[key] = [v.to(device) for v in value] + else: + result[key] = value.to(device) + return result + + +def prepare_trippy_data(batch_dialogues_utterances_contexts_info: List[List[dict]], + batch_dialogues_utterances_responses_info: List[List[dict]], + tokenizer, + slot_list, + class_types, + nlg_manager=None, + max_seq_length=180, + debug=False) -> dict: + """ + Parse the passed DSTC2 dialogue information to BertForDST input. + + Args: + batch_dialogues_utterances_contexts_info: the dictionary containing + the dialogue utterances training information + batch_dialogues_utterances_responses_info: the dictionary containing + the dialogue utterances responses training information + tokenizer: BertTokenizer used to tokenize inputs + slot_list: Slot Names to be filled + class_types: Generally [copy_value, inform, none, dontcare] for TripPy + nlg_manager: NLGManager necessary for getting action labels during example creation + max_seq_length: Maximum length of TripPy input (incl history) - 180 is default for TripPy Advanced + + Returns: + inputs: Dict of BertForDST Inputs + features: ??? + + """ + examples = create_examples(batch_dialogues_utterances_contexts_info, + batch_dialogues_utterances_responses_info, + slot_list=slot_list, + nlg_manager=nlg_manager, + debug=debug) + + + + + features = convert_examples_to_features(examples, + slot_list, + class_types=class_types, + tokenizer=tokenizer, + max_seq_length=max_seq_length, + debug=debug) + + # Convert to Tensors and return data + all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) + all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long) + all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long) + all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long) + f_start_pos = [f.start_pos for f in features] + f_end_pos = [f.end_pos for f in features] + f_inform_slot_ids = [f.inform_slot for f in features] + f_refer_ids = [f.refer_id for f in features] + f_diag_state = [f.diag_state for f in features] + f_class_label_ids = [f.class_label_id for f in features] + all_start_positions = {} + all_end_positions = {} + all_inform_slot_ids = {} + all_refer_ids = {} + all_diag_state = {} + all_class_label_ids = {} + for s in slot_list: + all_start_positions[s] = torch.tensor([f[s] for f in f_start_pos], dtype=torch.long) + all_end_positions[s] = torch.tensor([f[s] for f in f_end_pos], dtype=torch.long) + all_inform_slot_ids[s] = torch.tensor([f[s] for f in f_inform_slot_ids], dtype=torch.long) + all_refer_ids[s] = torch.tensor([f[s] for f in f_refer_ids], dtype=torch.long) + all_diag_state[s] = torch.tensor([f[s] for f in f_diag_state], dtype=torch.long) + all_class_label_ids[s] = torch.tensor([f[s] for f in f_class_label_ids], dtype=torch.long) + + # Not in original TripPy; Add Action labels + all_action_labels = torch.tensor([e.action_label for e in examples], dtype=torch.long) + all_prev_action_labels = torch.nn.functional.one_hot(torch.tensor([e.action_label for e in examples], dtype=torch.long), num_classes=nlg_manager.num_of_known_actions()) + + + # Possibly have this in main trippy cuz diag state needs to be updated for eval runs + inputs = {'input_ids': all_input_ids, + 'input_mask': all_input_mask, + 'segment_ids': all_segment_ids, + 'start_pos': all_start_positions, + 'end_pos': all_end_positions, + 'inform_slot_id': all_inform_slot_ids, + 'refer_id': all_refer_ids, + 'diag_state': all_diag_state, + 'class_label_id': all_class_label_ids, + "action_label": all_action_labels, + "prev_action_label": all_prev_action_labels} + + + return inputs, features From d9c2644187d113047b66350c50930da56d8fe8f2 Mon Sep 17 00:00:00 2001 From: Muennighoff <62820084+Muennighoff@users.noreply.github.com> Date: Mon, 5 Jul 2021 18:41:33 +0200 Subject: [PATCH 112/151] Generalize JSONNLGManager --- deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py b/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py index 071e44d7a8..190acf9572 100644 --- a/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py +++ b/deeppavlov/models/go_bot/nlg/mock_json_nlg_manager.py @@ -197,10 +197,14 @@ def decode_response(self, response = JSONNLGResponse(slots_values, actions_tuple) verbose_response = VerboseJSONNLGResponse.from_json_nlg_response(response) verbose_response.policy_prediction = policy_prediction - verbose_response._nlu_responses = utterance_batch_features._nlu_responses response_text = self.generate_template(verbose_response) verbose_response.text = response_text - return verbose_response + if utterance_batch_features: + verbose_response._nlu_responses = utterance_batch_features._nlu_responses + return verbose_response + # TripPy Case - Use same return type as nlg_manager, i.e. str + else: + return verbose_response.text def num_of_known_actions(self) -> int: """ From 160739fc30f9034e6eee4900be266d275c217c4d Mon Sep 17 00:00:00 2001 From: Muennighoff <62820084+Muennighoff@users.noreply.github.com> Date: Mon, 5 Jul 2021 18:56:31 +0200 Subject: [PATCH 113/151] Add TripPy to registry --- deeppavlov/core/common/registry.json | 1 + deeppavlov/core/common/requirements_registry.json | 3 +++ deeppavlov/requirements/trippy.txt | 2 ++ 3 files changed, 6 insertions(+) create mode 100644 deeppavlov/requirements/trippy.txt diff --git a/deeppavlov/core/common/registry.json b/deeppavlov/core/common/registry.json index 48f320b977..a9510b0755 100644 --- a/deeppavlov/core/common/registry.json +++ b/deeppavlov/core/common/registry.json @@ -193,6 +193,7 @@ "transformers_bert_embedder": "deeppavlov.models.embedders.transformers_embedder:TransformersBertEmbedder", "transformers_bert_preprocessor": "deeppavlov.models.preprocessors.transformers_preprocessor:TransformersBertPreprocessor", "tree_to_sparql": "deeppavlov.models.kbqa.tree_to_sparql:TreeToSparql", + "trippy": "deeppavlov.models.go_bot.trippy:TripPy", "two_sentences_emb": "deeppavlov.models.ranking.rel_ranker:TwoSentencesEmbedder", "typos_custom_reader": "deeppavlov.dataset_readers.typos_reader:TyposCustom", "typos_iterator": "deeppavlov.dataset_iterators.typos_iterator:TyposDatasetIterator", diff --git a/deeppavlov/core/common/requirements_registry.json b/deeppavlov/core/common/requirements_registry.json index 9afb597ab0..86b9a01361 100644 --- a/deeppavlov/core/common/requirements_registry.json +++ b/deeppavlov/core/common/requirements_registry.json @@ -220,6 +220,9 @@ "tree_to_sparql": [ "{DEEPPAVLOV_PATH}/requirements/udpipe.txt" ], + "trippy": [ + "{DEEPPAVLOV_PATH}/requirements/trippy.txt" + ], "torch_squad_bert_model": [ "{DEEPPAVLOV_PATH}/requirements/pytorch16.txt", "{DEEPPAVLOV_PATH}/requirements/transformers.txt" diff --git a/deeppavlov/requirements/trippy.txt b/deeppavlov/requirements/trippy.txt new file mode 100644 index 0000000000..6bb320f5eb --- /dev/null +++ b/deeppavlov/requirements/trippy.txt @@ -0,0 +1,2 @@ +transformers==2.9.1 +torch==1.9.0 \ No newline at end of file From 1622a69191dfcc1f36d46e33f2b8232e082ca66a Mon Sep 17 00:00:00 2001 From: Muennighoff <62820084+Muennighoff@users.noreply.github.com> Date: Mon, 5 Jul 2021 19:12:47 +0200 Subject: [PATCH 114/151] Fix naming --- deeppavlov/models/go_bot/trippy.py | 2 +- .../go_bot/{trippy_preporcessing.py => trippy_preprocessing.py} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename deeppavlov/models/go_bot/{trippy_preporcessing.py => trippy_preprocessing.py} (100%) diff --git a/deeppavlov/models/go_bot/trippy.py b/deeppavlov/models/go_bot/trippy.py index e1e7180038..b96899527f 100644 --- a/deeppavlov/models/go_bot/trippy.py +++ b/deeppavlov/models/go_bot/trippy.py @@ -32,7 +32,7 @@ from deeppavlov.models.go_bot.nlg.nlg_manager import NLGManagerInterface from deeppavlov.models.go_bot.policy.dto.policy_prediction import PolicyPrediction from deeppavlov.models.go_bot.trippy_bert_for_dst import BertForDST -from deeppavlov.models.go_bot.trippy_preprocssing import prepare_trippy_data, get_turn, batch_to_device +from deeppavlov.models.go_bot.trippy_preprocessing import prepare_trippy_data, get_turn, batch_to_device # EXP diff --git a/deeppavlov/models/go_bot/trippy_preporcessing.py b/deeppavlov/models/go_bot/trippy_preprocessing.py similarity index 100% rename from deeppavlov/models/go_bot/trippy_preporcessing.py rename to deeppavlov/models/go_bot/trippy_preprocessing.py From 42256674610b711759bf978649f160651dd157e3 Mon Sep 17 00:00:00 2001 From: Muennighoff <62820084+Muennighoff@users.noreply.github.com> Date: Mon, 5 Jul 2021 19:38:46 +0200 Subject: [PATCH 115/151] Remove experimental warmup --- deeppavlov/models/go_bot/trippy.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/deeppavlov/models/go_bot/trippy.py b/deeppavlov/models/go_bot/trippy.py index b96899527f..b8543e5588 100644 --- a/deeppavlov/models/go_bot/trippy.py +++ b/deeppavlov/models/go_bot/trippy.py @@ -34,10 +34,6 @@ from deeppavlov.models.go_bot.trippy_bert_for_dst import BertForDST from deeppavlov.models.go_bot.trippy_preprocessing import prepare_trippy_data, get_turn, batch_to_device - -# EXP -from transformers import (AdamW, get_linear_schedule_with_warmup) - logger = getLogger(__name__) From 2220760df04f8e41b5dcb60cb7588c786c71e829 Mon Sep 17 00:00:00 2001 From: Muennighoff <62820084+Muennighoff@users.noreply.github.com> Date: Tue, 6 Jul 2021 14:03:57 +0200 Subject: [PATCH 116/151] Remove previous_act_label --- .../models/go_bot/trippy_bert_for_dst.py | 27 +++++++++++++++---- .../models/go_bot/trippy_preprocessing.py | 18 +++++-------- 2 files changed, 28 insertions(+), 17 deletions(-) diff --git a/deeppavlov/models/go_bot/trippy_bert_for_dst.py b/deeppavlov/models/go_bot/trippy_bert_for_dst.py index bd683ca1d4..94d5436ac4 100644 --- a/deeppavlov/models/go_bot/trippy_bert_for_dst.py +++ b/deeppavlov/models/go_bot/trippy_bert_for_dst.py @@ -21,8 +21,11 @@ class BertForDST(BertPreTrainedModel): """ BERT model used by TripPy. - + This extends the basic bert model for dialogue state tracking. + + Args: + config: Model-specific attributes & settings """ def __init__(self, config): super(BertForDST, self).__init__(config) @@ -83,11 +86,26 @@ def forward(self, class_label_id=None, diag_state=None, aux_task_def=None, - action_label=None, - prev_action_label=None): + action_label=None): """ + Runs the model and outputs predictions and loss. + Args: - action_label: Action to predict + input_ids: BERT tokenized input_ids + input_mask: Attention mask of input_ids + segment_ids: 1 / 0s to differentiate sentence parts + position_ids: Token positions in input_ids + head_mask: Mask to hide attention heads + start_pos: Labels for slot starting positions + end_pos: Labels for slot ending positions + inform_slot_id: Labels for whether the system informs + refer_id: Labels for whether a slot value is referred from another slot + class_label_id: Label for the class type of the slot value, e.g. dontcare + diag_state: Current dialogue state of all slots + aux_task_def: If there is an auxiliary task, such as classification + action_label: Action to predict + Returns: + outputs: Tuple of logits & losses """ outputs = self.bert( input_ids, @@ -233,4 +251,3 @@ def forward(self, outputs = (total_loss,) + (per_slot_per_example_loss, per_slot_class_logits, per_slot_start_logits, per_slot_end_logits, per_slot_refer_logits, action_logits, action_loss,) + outputs[2:] return outputs - \ No newline at end of file diff --git a/deeppavlov/models/go_bot/trippy_preprocessing.py b/deeppavlov/models/go_bot/trippy_preprocessing.py index 8375cd34a4..839432c739 100644 --- a/deeppavlov/models/go_bot/trippy_preprocessing.py +++ b/deeppavlov/models/go_bot/trippy_preprocessing.py @@ -26,12 +26,12 @@ import numpy as np import torch -logger = getLogger(__name__) - -## EXP ## from deeppavlov.models.spelling_correction.levenshtein.searcher_component import LevenshteinSearcherComponent +logger = getLogger(__name__) + + class DSTExample(object): """ Taken from TripPy except for the __repr__ function, @@ -52,8 +52,7 @@ def __init__(self, refer_label=None, diag_state=None, class_label=None, - action_label=None, - prev_action_label=None): + action_label=None): self.guid = guid self.text_a = text_a self.text_b = text_b @@ -68,7 +67,6 @@ def __init__(self, self.diag_state = diag_state self.class_label = class_label self.action_label = action_label - self.prev_action_label = prev_action_label # From bert.tokenization (TF code) # From TripPy def convert_to_unicode(text): @@ -427,7 +425,6 @@ def create_examples(batch_dialogues_utterances_contexts_info, # Not in original TripPy; Get the action label if training time, i.e. we have response data action_label = nlg_manager.get_action_id(response["act"]) if (response is not None) and (response["act"] is not None) else 0 - prev_action_label = nlg_manager.get_action_id(context["prev_resp_act"]) if ("prev_resp_act" in context) and (context["prev_resp_act"] is not None) else 0 # Move the responses one backwards, because the first input should be user only with the response being None # The final utterance by the system is not needed in the text, except for the action label (action labels are not moved back) @@ -499,8 +496,7 @@ def create_examples(batch_dialogues_utterances_contexts_info, refer_label=referral_label, diag_state=prev_ds_lbl_dict, class_label=class_label, - action_label=action_label, - prev_action_label=prev_action_label)) # Not in original TripPy; The action idx the model is supposed to predict + action_label=action_label)) # Not in original TripPy; The action idx the model is supposed to predict prev_ds = cur_ds # use already transformed cur_ds instead of turn['dialogue_state'] prev_ds_lbl_dict = ds_lbl_dict.copy() @@ -944,7 +940,6 @@ def prepare_trippy_data(batch_dialogues_utterances_contexts_info: List[List[dict # Not in original TripPy; Add Action labels all_action_labels = torch.tensor([e.action_label for e in examples], dtype=torch.long) - all_prev_action_labels = torch.nn.functional.one_hot(torch.tensor([e.action_label for e in examples], dtype=torch.long), num_classes=nlg_manager.num_of_known_actions()) # Possibly have this in main trippy cuz diag state needs to be updated for eval runs @@ -957,8 +952,7 @@ def prepare_trippy_data(batch_dialogues_utterances_contexts_info: List[List[dict 'refer_id': all_refer_ids, 'diag_state': all_diag_state, 'class_label_id': all_class_label_ids, - "action_label": all_action_labels, - "prev_action_label": all_prev_action_labels} + "action_label": all_action_labels} return inputs, features From 476907c31ac28ee73c72503a1db9f6c537cf0519 Mon Sep 17 00:00:00 2001 From: Muennighoff <62820084+Muennighoff@users.noreply.github.com> Date: Wed, 7 Jul 2021 11:32:03 +0200 Subject: [PATCH 117/151] API Calls at interaction time --- deeppavlov/models/go_bot/trippy.py | 47 ++++++++++++++++++------------ 1 file changed, 29 insertions(+), 18 deletions(-) diff --git a/deeppavlov/models/go_bot/trippy.py b/deeppavlov/models/go_bot/trippy.py index b8543e5588..22b2e39dd4 100644 --- a/deeppavlov/models/go_bot/trippy.py +++ b/deeppavlov/models/go_bot/trippy.py @@ -187,15 +187,16 @@ def __call__(self, if not(isinstance(batch[0], list)): # User inference - Just one dialogue - batch = [ + diag_batch = [ [{"text": text, "intents": [{"act": None, "slots": None}]} for text in batch] ] else: + diag_batch = batch # At validation reset for every call self.reset() dialogue_results = [] - for diag_id, dialogue in enumerate(batch): + for diag_id, dialogue in enumerate(diag_batch): turn_results = [] for turn_id, turn in enumerate(dialogue): @@ -211,17 +212,17 @@ def __call__(self, self.update_ground_truth_db_result_from_context(turn) # Preprocess inputs - batch, features = prepare_trippy_data(self.batch_dialogues_utterances_contexts_info, - self.batch_dialogues_utterances_responses_info, - self.tokenizer, - self.slot_names, - self.class_types, - self.nlg_manager, - max_seq_length=self.max_seq_length, - debug=self.debug) + trippy_batch, features = prepare_trippy_data(self.batch_dialogues_utterances_contexts_info, + self.batch_dialogues_utterances_responses_info, + self.tokenizer, + self.slot_names, + self.class_types, + self.nlg_manager, + max_seq_length=self.max_seq_length, + debug=self.debug) # Take only the last turn - as we already know the previous ones; We need to feed them one by one to update the ds - last_turn = get_turn(batch, index=-1) + last_turn = get_turn(trippy_batch, index=-1) # Only take them from the last turn input_ids_unmasked = [features[-1].input_ids_unmasked] @@ -233,15 +234,16 @@ def __call__(self, # Move to correct device last_turn = batch_to_device(last_turn, self.device) - # Run the turn through the model + # If there are no slots, remove not needed data if self.has_slots is False: - batch["start_pos"] = None - batch["end_pos"] = None - batch["inform_slot_id"] = None - batch["refer_id"] = None - batch["class_label_id"] = None - batch["diag_state"] = None + last_turn["start_pos"] = None + last_turn["end_pos"] = None + last_turn["inform_slot_id"] = None + last_turn["refer_id"] = None + last_turn["class_label_id"] = None + last_turn["diag_state"] = None + # Run the turn through the model with torch.no_grad(): outputs = self.model(**last_turn) @@ -281,6 +283,15 @@ def __call__(self, dialogue_results.append(turn_results) + # At real-time interaction make an actual api call if this is the action predicted + if (not(isinstance(batch[0], list))) and (policy_prediction.predicted_action_ix == self.nlg_manager.get_api_call_action_id()): + self.make_api_call() + # Call TripPy again with the same user text - This is how it is done in the DSTC2 Training Data + # Note that now the db_results are updated and the last system response has been api_call + # Then return the last two system responses of the form [[api_call..., I have found...]] + dialogue_results[-1].append(self(batch)[-1][-1]) + return dialogue_results + # Return NLG generated responses return dialogue_results From e7c6884ac0ac0e887b38213f939071a7a810dfe9 Mon Sep 17 00:00:00 2001 From: Muennighoff <62820084+Muennighoff@users.noreply.github.com> Date: Fri, 9 Jul 2021 19:36:07 +0200 Subject: [PATCH 118/151] Update Levenshtein Calculation --- .../models/go_bot/trippy_preprocessing.py | 45 +++++++++++-------- 1 file changed, 27 insertions(+), 18 deletions(-) diff --git a/deeppavlov/models/go_bot/trippy_preprocessing.py b/deeppavlov/models/go_bot/trippy_preprocessing.py index 839432c739..fb4258e2d2 100644 --- a/deeppavlov/models/go_bot/trippy_preprocessing.py +++ b/deeppavlov/models/go_bot/trippy_preprocessing.py @@ -18,7 +18,9 @@ ### Funcs ### +import string import re +from difflib import ndiff from logging import getLogger from typing import Dict, Any, List, Optional, Union, Tuple @@ -26,9 +28,6 @@ import numpy as np import torch - -from deeppavlov.models.spelling_correction.levenshtein.searcher_component import LevenshteinSearcherComponent - logger = getLogger(__name__) @@ -149,6 +148,19 @@ def tokenize(utt): utt_tok = [tok for tok in map(str.strip, re.split("(\W+)", utt_lower)) if len(tok) > 0] return utt_tok +def levenshtein_distance_gen(str1, str2): + """ + Returns Levenshtein distance of two strings + """ + counter = {"+": 0, "-": 0} + for edit_code, *_ in ndiff(str1, str2): + if edit_code == " ": + yield max(counter.values()) + counter = {"+": 0, "-": 0} + else: + counter[edit_code] += 1 + yield max(counter.values()) + def get_sys_inform(response, slot_type): """ @@ -304,9 +316,8 @@ def get_token_and_slot_label(context, response=None): # if e.g. "dontcare" skip it; It will be in the diag_state (like in TripPy) # This unfortunately is not robust to slot values that are different from the text & they have to be manually added to replace - # Generally TripPy's text index predicting is brittle and we should move to generating slot values not copying them in the future + # Generally TripPy's text index predicting is brittle and the field should move to generating slot values not copying them in the future if set(value_tok) <= set(usr_utt_tok): - # Will have to be adpated for slot values of len > 1 slot_dict = { "exclusive_end": usr_utt_tok.index(value_tok[-1]) + 1, "slot": slot, @@ -315,26 +326,24 @@ def get_token_and_slot_label(context, response=None): usr_slot_label.append(slot_dict) elif value not in ["dontcare", "itdoesntmatter"]: - # Not in original TripPy - Search for most similar values with Levenshtein in case - # Slot value label is not in the user tokens - searcher = LevenshteinSearcherComponent(usr_utt_tok, max_distance=10) - candidates = searcher([[value]]) - top_candidate = candidates[0][0][0][1] - # The LevenshteinSearcher seems not to work for removal edits - if top_candidate not in usr_utt_tok: - top_candidate = usr_utt_tok[0] # Just randomly take the first token + indices = [] + for slot_word in value_tok: + distances = [] + for option in usr_utt_tok: + distances.append(sum(levenshtein_distance_gen(slot_word, option))) + indices.append(distances.index(min(distances))) + if indices[-1] < indices[0]: + indices[-1] = indices[0] + slot_dict = { - "exclusive_end": usr_utt_tok.index(top_candidate) + 1, + "exclusive_end": indices[-1] + 1, "slot": slot, - "start": usr_utt_tok.index(top_candidate), - "candidate": top_candidate + "start": indices[0] } usr_slot_label.append(slot_dict) - - return sys_utt_tok, sys_slot_label, usr_utt_tok, usr_slot_label From c374f659d1afdd7585cb11e96f7efd7855cf0d1b Mon Sep 17 00:00:00 2001 From: Muennighoff <62820084+Muennighoff@users.noreply.github.com> Date: Sun, 11 Jul 2021 16:52:41 +0200 Subject: [PATCH 119/151] Add trippy architecture imgs --- examples/img/trippy_architecture.png | Bin 0 -> 55814 bytes examples/img/trippy_architecture_simple.png | Bin 0 -> 44284 bytes 2 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 examples/img/trippy_architecture.png create mode 100644 examples/img/trippy_architecture_simple.png diff --git a/examples/img/trippy_architecture.png b/examples/img/trippy_architecture.png new file mode 100644 index 0000000000000000000000000000000000000000..5e5d901eb8e0b1d8cdd921256a68e32a576568f8 GIT binary patch literal 55814 zcmdSBbySq?`!A{pl7fJOw6wG^bdE?!mx8o_z|h_0NQsn`Gz{G!-SE=g&CuOBbe%_E zefRtQ?cdq!th3iy>+D(c$M6WxbKh5duFv(kuQ5PTUh)wJF~+@n_Z~^T5mUZ*@4;X9 z?jez)Aput^L=>6t-TQP;O6=8JXPuoyRISl9k`9y^&F$QAB{e3bZXNjKF4fa);+5HhTpxN;>YUG26dsEzu^?lQ!h z@koV{0*?V+pZcFH^8EerUMm#*;Md3J570ROzQ*zCfAi8OCwUE{@AK7eLf5RGf~#+G z!f?d}ThfK@0cAPAU*o^g01}bvdAp^ zR9y6=8ZJlNchy_>kcof&MjaDJY02Fz8B?U1=cs?e>#q1dK~cH7X1O{Dm6lLS5=mh! zi9CCXvrnw6L^geP8+GcJ8XT5q(jJi}P6K^PCb37@;;&)%f8Oh_T2XqpemUpj6upew zDdka6S+=Cu#cjjtWg!DQXXhx>WG|?!sw0UbDPH7RMR9w!14W32uHm_#tnx)de>^Vg7&BwmxH#TMMy&9h;ZTN#hK%hg~VhC@e_L%ES9 zmf>Y)m1UOUB^Cw-8^=lm_^!}-+EpAoSN+XbSD$PgnI699+J-yO)<~>dv&Z*;yACnn zXXoUb)EyYGew&)q)R!rlFW_+_?rQ4i+G($@qz#HIJp+*u+wL)ET1IY83+Lhq{GSByT>Fx!w%bGI)pK+D&2?DX<34x46#ZA90H zR(C&+*s3ktPNSKAbE{=|4cIcUS>6U?SY3!A5-`Iw%&}*g>^|`8+0dDz1Z0vsRT8CQKU1_6C*AISFKq7GHHpmk1apfCg z#(TN(G(ORb#_I@Lh2#!x&>`+3k$bX=`wc3tHCIFg~5(q@~m8=w&A9QCyc#wpW+edk03;dzYrEh`q16 zoGeYMvRm(0o2}s!;J4p~#9DV!`U;^HNJ)P-0kO0Kw@8=8YItt{~BT{{a+gI+tof4{9-K)Sn2=gjHlIK{D#yD^jAu8Bf^ zo6`?z)ZcO=%1w@c`JdZHB2%tD)0cEplYj}7ncXckJ!rUV;36U-@Hexj>SW3bCqSU-24~uwdD`|2x$#uq3491Vy*qc@kz~`K|?>`bqg-Cy$}#+Oe5r z=`cswIS2@6DI{Bt!2j$`5eSNDx;}n1$Rmtx6!rWp}(bE6hRtbIbd{ zErmq4oo?V$IxK^%^8IQ>TC4FzFIKvVOr$!7@tiVf_hj8F1gLi_)~E)jr4$tIZlZOy zNy)s&ryGKXJdrgLc?126Lv(xNRrTLnD}}zCvrs)RdWHPkjDsRY$o%Pxg!PG>g!AXC z>$)FvcAI(;s37IdJBH}eHWYp)CbP}(i~=p~&*K`6LAcH!`kce>^a`HZ zUcDN9VC>)b>!f%oZMud#{Dyi^9y{cCt#8?^h(?`nGe^NGLntBn)LLh&1!a@L2JbcR zmk4@=>T=U^r^Mhga8?fLYaWXFp}d*Sdvou0|6_%MP)A353~cN*fubj|k!7tdISM?P z!Y48wH4H-Uq7oCk_fNj|_VyYN;0e@}!)s6bkP->3Y`Ag@+I&`x;y`4SgTL~0;fDIj zhDgwKRDSvYzftM+^zUHw6wCaa(8pN-QZBck5+cc+){N{GfGi9-b)=xO8lqX zfkdGVcI(&*pFyP{LNcjm)%zu94Lxbz`tC71frr@2{?i8g^hZzmnFN?XOmzH4)1xMd z{Px$S6BGiK`_7juCaxO%eb=SN3fjp0EOUZfDN@Rx9g0?$WrER+*o->YSdW*vKyn_oc-~HTgyHMx_MJ&J1vOy&a7^FR93!wQXJ0wPk=Sc`jQz{(2e-f@0`Tc|@_zUsc9^55 zP@9+b=(7yzvW(NyjZ37Y2Oy;jgAJ2zf>50j$YBYsOc9-yl$3l|ZzqV6Ur4Ba+gO`( z0!pw|@{%CZOo@y*Slhyn`nOB#TVCYkNWcJmN+jK_ytG=>_Gbk#8R(7{)DGwxtwL^l zJ7s003@zWixx)!=aM|9pn?nmhlyTy%1_cF;j?N@)^-smdPN`Q~&Tb8623x8vcFHM# zzFEgKdO|^t{M)trv2{qhD@z}EOw)M={6KcYDr5d(rQtdaS|t!0-812v%UdDHAPfj+ zqmP+chDPbehedU-PU`?jyS=%>*%0|8I>Nxf5UW1rl+-v$_IR?wu;)1+QexbP)#ZI9 z$5!D#kTziLZ(U|@+F)H$>ZVY+vJ$ODGJbAPPz)JL{kE{s7tiB#vlJ*p$R?M_!6huL zBjgR&d<_QwHC=9&Ea({?o)CV%GgWia&_vmUd+2og-s!+9REZ@a@gusuMqRblK~a>% z?mvJmTL{K^WPvao7y}i&>mc>IZRe(<`fSxo>@6=P1;B`i-C9s+D2|DZtu4pdD3~>e z*w)`A`%X5X_Aa|`XW;?+gn-lS%v6&+=%I#36h9fh4I(~w4*nI=PrcIk5cm1t4g%k; zU@snE-3~2JbPy&8UTW0vPoL~hh=3wQ7`=UsOxg`TvcFDs0PhZ&s1pgW&29*q1bPI9 z^q-cX@NYHBm%DSnBwSkf_x)ZXX*V-{kU0qTcY0rGU*c$O4Ov`rT{+MTK5KZ&w*|)^ zmM*a^8Rg-qJ6q7}Aa9%!sOE>myPrTX$5u}rJjP?4{eIWezpi|6H%wb@EG3*`_?!E? z=H`Z)fGV=+zc)`WEVDvVEL&2dJffQ<<-sYk+SD3JZ${X62w7d)g6z|%!A6#u_gl;I z1YDo~VW7sHl5&EtS?CMhiPvWuqoaWOmR3)VURIhm0!`8YYDr4G)+5PNy8tNPiVlYBI0t*>-0gQkj2=@(0I=C=(;(PJ~p;D_OsTTYF!EPiz}OaYCAT|c5lFv00rV< zEi>6QZFEX3527SRmpdiDF3df{UYk1lMYMcdwQGsoP9Clvj)>g~{(7z`Kq-upQpo%f zZq9QqbIv5jZkm#)RkHmCG}UK$Lp)0VGxJvIVx6igL(JT zBd0x)r2$XW883v7i<)U)8hwC5lF&LEHF|xm@3x-Z_S|a?V^a-&FLq#4C=|b{z1bb7 zZEE!D&7Icejh?_pvX|9t%~*zt@pwui8JXwB-0eb=Zn))jc)}pVs&rr^6U#v2MZ;;f zg9QQUwc}XVo|!_w(Bs;R=DuWi1<&%!$PDSmCNWQ^7fIsaPPJN&KX(ZYTgU^1;MVZ6P<-WmMAwl7*XUS7}*AJ-ngxq^| z$?x3*Lw#`}_ydDLTAM7I3Y3Lf)T;IgIBi`crbhPWJv%1tYRA8u8Np}x3?fKJm{}TP z*TA+a-GR>A-(HKzPA)B$+l)S+KU9ry7a=Bkf9y4~buGBIye~*vV=r6>XP|9oqTT$l8B-mOvU z7iOOy>l_{&f(k1W8y>gphb1p;?Hp|OoR`kuuR-IC2llgvB}7yI%Hauw&ZFLV7^Aeb z^ui2i78UPx2_fwgsaBs+p}J)egvgV@%W*K!v>GkzOCwxq5S7X5iP*ND}~9=&-1lQQSb};AHsD7 zOuZoSC!M!_@QMn@5~tBa{=uBXEy?t|WWJj=$+R0;E`{tWRK09s+?*KgbjdoDBeL(k$ z^AN>Lyy)jo6+6KzMJRy7?x%lID+EXH@0;yTDuXmya_Ti}$5}Lo5QK@$Y(7);ZCr9P zFW0dQ$CIKD3C>t2u{TG0+oU|02u^Z$b~S5w%d9MBs~zkrRUKIpV*+Kj83#XZF&wJW z431Rb270_mh1^W~DqypE{*OS4F-(JZ z+9B(Y)P*S$f84cty{$=!Ah3;cY7H_BVADC8$bdeDBu_E<8o(IVSv&Tz_&D7!IrcEp zmWmG5kni1t!46H)B;V&W)iP_^e?%2h(ctF_oitCzGwULC9)DA?c4r!EL^>guSia>$ zmxE4_bEwJ_3cooh&(Mefx*yV=#NFM|;hhf(BiLK$<*&D243^zSBIg+wvrP9TylZCi zvGG7d2%sXd1iI4>f{_ZrGFUe91D$BoUUl;uGfwvn+mGZlBz1lu3E3>V-U?z95yc_| zpxW>Y&>Lg2St1$LoM{V|uraq1SPZilHBjcGWG)0;0_yOlZ(_a876~ zOmT3{QPDP0vGgBp()z<32&8zfXlU)RPRh-8&3o!e=Ncc}ZO#-&L_9do+C!SoIoRGZsx& z0_oPdx1<3AQ0IDG$*{G`z&HMR*AcHc6rrjTk}++aEd@|3fK0H^I>go zZ@!(9&Fy_$Sqf!1oJ#FB)uCit!E@Da2tBvp)m5ya39WJ35(kIaK*+E-Y-9w~uk+O) zxpaQ-N18EPF`ML@<`d0c71x7vh7nQxS688+K&u%t$1TyI1D^sCr@eh_fo9>|z+yEA zKGf~GmJU5h`64)ZFLM?Bb>|p~(|MxK;f@QQCMzSy$g`(j!L@vNZ!3>k()D>Nqutt>5||Xf0&^Ab&&ST zo_B|Anl<5)=xnyww$sXYh~0;kfIHSwx4_4H@@=-}fM?O%NduR|nae%wg*Yb8asc{$Wza=yBvtmU4bxt;=| z4C6_DRGhQ#+gm$my>~=JJwCd9QF0u8o4T1i!3uGKxQ5n9JiU2VNZG|{>BGIo_gEKs zQ+ExZyG)toHkE??#A2!Xr5M`@dg|5Qa;9}0@-MtL*XOfvCv0-w$>Yj-?L=)IvN_LE zaMsFP;~>XUXC+1OT&K{r)&dIu+Sji)X^139s zK#wlC<(t|~*RqsY#je2Mgd5QGF%k=DW_l^xb}Ki|Hv7laO-4uZ?APcXpPMXwc7XfJ zRGZyCO>%Hz^;gjSa8>6VE6c%ec=&K}DbU^NsC`pjBRwOx&RJHcSDQw%)S;CiJv#ZD z)51~WRjALbPl|p?VE$Z9vaPDw@I_xRa&(!(Ypk8;=6+~ z*N*5DU^cn4_;d_tH$N^b%ZCQZ`lq6#(3U&e;h>68p>9Zd;xHE~~Pb3puVWy> zMhMmIp87rcco%Tz9Ju}l1CZ{)OeyjXr2tU3Cd^oDOwu?g*j5MJZ6JpS;jf&$&b*q)SS1Xe8R& z_^`hNA^g!!$13#FOSApHHL&@Qn2kelk$yV(#BC2Xc`VvZFzXBmI0eNHgNr9ID)T zQvH44IE6F>T*~G$S(d@XgK9aA-(Y=%jVI}-e)s8%i&+K7NSaZ>bLwoVb$uF)GJe^ z{KU|T?~gY|oqIs^wXq6XhV3(gZSPUyIGmT7H+%#u+b79RUGr(s3u`6#tDT~Hy1VIu zKU^JHP6I~jhM{A!wrVXpI>#oIxQ~!mcpud2%EF4~YdRR86EOj|u+MIkXD#U6&heaZOx z5_@3FQa<`fawvL|N-EhTR;|au0%x1+7;s92b<+A@PazmL@0V`BcMg%>8hgbIvC^|Q zCy5h6jYJ!txlkvWBpa^}54_xCMt_3F*>qd_(1Lf4c@`zELimaRmdsw~gbMU=NRm1{ zkJz?w-xg_l8M|G5ksHsksyQeX!%N!{D6?REYa9ZWA%iw}R2ycwfTY)ioCbmD4^ni# z$15NZpU1I6yOLQaoMW=Nb9OnyrCD8Cg(RmtrlCNW<{wi&oV?pE;73n4n4hlfFEA~y z7gX;Wp}l@+r!=Fk1tqvu;Vq^xTFK4g2d-?W^SKV^q=X&c+op7K9B);4pFj2p|r$+e^d z!d-0*0@bDO$wlNy5nH^K>yd4By1Y9}kwDYN+p~)I`0!G#`)ODyeKl|&@(4l^xBkW` zbPMn)Z<#f$`dK$}oCj!FAdE*w66%SdvFQ$%;S#fXyy(~Zt(&o7xUt43L zF{$N>=Kqn|lC<1xtRyx9oX8;WDBq9GeZ`|mZ-)Of9wm!e>(zo7ML0S+_p7}nUJNz} zl!q=LaK3!F+6nHp`xNV2&Oc}Its1GG{`!RzL3AF^{*T!zZ-|{3GQSYDcYQ9QI&qd8 z`D8@l_bXIuxH|MSp@c?AyT`Q3*Xref`?hOT={L`Bh{kFH*gg zw*{tJAIn>0q}}WW@j%0w`?aIIEZbgq>*xE^i&1q>aVCZGEM>-Nc#z`7_^^(FVh?hm zAy4%Or{7ri-4dOiV(&4NzouVJddXuvMqpX13T^P*Ca={9P=R$mixCzsRkHysHCIKS z0PhzSZ7EFli4oC!5Wohf|7ur4=2nkwn>bN_93B?i<%*>?QH6A~5_{^Vt6O`bK~Wvr z0|QQCKo(lun%<`%rWB3C+Jsgb*X*sfu-$NUeOCaI<@rt@SJ3n51 zv3SJ_;M;z=j!#?1&F=Wn2*-><&YSF&v>lm>kaP@PfBWHWfGM91{Dn0J)U26uJAb0*ImT_hIaM_w9nya zEtc5@oADGSEKB2>P(6x`lt_Gfi>g3Yx-WX1u4 zKj^(N6hxk;27)nhE=#n&R5!C0E_0Nv!_0QXFiE6VQ2UIK$(0^FS%#M8>t_% zp`~xCCQ~|w51&frgbD@bfHDxBZ-67glzW(n;!-7Z3U;W`K$+K%0E5M8<#Vb%LR{A{Bz;3cSf|j=$xhq}A zLp2HS(SNp}gAONNqEcLk7@%V`hiB{;?HQ_Z1}*7r--%<_wo6_z*!0l{L&lpoPdw_X zJCewDcN=eem!{ckpPWSPk2)rWgOO(4=NHLYve=kG?nSZ2FQ!TZ{7;)mo~-+3{W>o} zkRZEmG0~GS*_MUpy_zWHQ5zmNZq4Rqu$ff_(k|$-%s8xL~$aH;Kr0RC^5pCURELxKHOBJf3Hx%rI6eED=EedWoVmR}jd{*>QCg}~~K z`(gMzH9h^xAVa2i8Tgo+00;2C_%|Lce^jS*fir1GUlmwONP^Lo%6(t@3Q<64g$*82 zv6Nm`d5bD!#5nszk>DGb$b8*+$``{<4#)xD8PLCnY3|fIme@Kz&Mu`ZNQf&+ zG&n}28s=UKGxY7Xmg$sq%-T#}f2DTH^S5x$WfNm@C!UEs4yn8Q>_x=R{(MmR%w+iz zb$<(AgX;woQ(%32?D%%R)G^=}xBiS7&9`;+U^|}glT+q1nAbK%mQ9BJTQdUYW&m@$ zg{wqg9NGCW#GOP= z)Fg(GM7X+zy5-$@orALbaTO++(ar|YgCsDfITD^=a4YFrd*2OefP`hQQ0|o>K~-_Y z#$CJSM$7E?4&>$>%3C=hHz6M!%Vz}S?W$dmvG=jbu=d}=a>RD*lIkYawm+y`-iM88 z&FjF@mI{mQs(DaWY=!xuyVP|@Jg?YdcVX{fQ)xRl9lcU`E=hb#NMv5S<jRBf0m{fwRPT*y#h1_(^I zDU3Of_qMHE8vs4rsfXPW@$9#HMOTN2%M-XC&}`0@Wq8)X4cF@op#5Fj5_YOqi?eeH zo|L^>#Q~9Dl0cP0k`zdZkqON@)%piMDC?<9lTGu0TF|@u#?toTU(BboUGLmA-0eQl zoXJmXpzt3}s#mlCyt(>pd=a~j03V`%n%lQSoXL7p+f{9dbyLIZxyNzEhP4;x)@et; zB?`@SJz|B+ig<3=tQSmm*DqGr8nrea?D1j!oMCvelK%0d$G)4=M*bH2Q=Kh4j6hdE zRnqd>p-C=X)`Y#)b#^z&`D%f?gdZtq^PoG^`MZI&#_QR0xaXsWox4M|n{BVL#wN8J z63>w$n=|NIBbFt5=XM}N1YADRjNQ|vZ|L@^+68nVROeBCx7T)r?Cj={Z zqrP@Hd-kcs!^jhi7wjJuikt4&J;}mCe<92XOJAQGZt<4Ef*Y?9nN!&szuJr>d&Ls`Sy$}Jb zwDKZx6{TB$=SnD)!3SxV-Q^Id2tM@Kw!bF6J~)RburGhgU0oWXl{~Qgk&12y*05WN z!9PJWdc|2bpFMl*i3mmNX>kyKo4lQ%~4ID{X;=#lcdkO+>&G= zSx>QT^j^>)$-x7+Dw;(W0R3byn;rmNpjHKio-qt6;Oo9TGxj{aMzJFBU@vvDA|T%~ zc#iw=%DO1Cm-b>T80NCPqE|I9A$+pH}9Uvy$J!h?KF$B|>3gXg>jf*z|FLtxdQD}OVJB0k9 zxqXEZJdh;EaGvjig68)sisf1htJ z1cPC}XC%N6A47hhMt{4H{AV8F<2~r_bK`qN|N2$mx3FY-+EOQad7$QskN~y@Ubq-J zzlQ{2SikRd?|;k-E?5AjoBsD699jkrB@@^)w(-LvV$@n0 zO6%Y>TWl6@R&OTdxBt>Oob0tXQ8e%O$WtL%82?lfutqwt*}oTQ0^OUdcS}@zo1>iZ zCX6bI@twZrbcF?z*2?V->gev-ycc0Vp3!oo2Yn}Gd34g{G%|ClJ zfvy?tPM5Egk7UXb_Ajk%ju=CsP+%4dw;#_BcjIuG-kPLHr+&QLjK`FM+q}veF!N(E zsy}ufiU!ZuI^8X`$>1%szRe{Wh1S@Rlw;VZwx`I+$}+PQ_-y~j^O7r{Y<&83&s3}A zqsdtAJCo-A{tzCYG}VGoNR3*N$X5au>0Mfx?>oS@Tl_;~ssUk#;a%?Ade;+3^UO?w z>h1X@qggM8_YBW!JFN5X5pFj;KiizLT*eb_;< zKV7~(Q+)^6S=#RItWl>|iyexMH6ydDU1OiJ3C!ROA-=O{PE~t)lH{~aI6bgp0Je$z zCjwk?A@uZFz!X(1K?*oNHa3ypAx=Bp?QCxpkAx)pTpH$fa!GygG*7MQ^oUrsSYKR4 zMP(SW74gkF<9)IIyvG??tzV&bZCFXEfALTIasOk!K=9T1`41LRsH2U|BJ0wAV=O6Yv=F z{NB%8WB&x!)YP;ijODQsb^vT{oZ62hw>jVFRjl6@LMC9Tug_sJ{80}T*fu;Cb6=zrVpW`tND-ttNbR0#`zeSB?I3* z1H)v@HsF=}z(e9WP4tb7+I67vPSDKB$w@_Hty`l1h)iwRg9i^Z%OIQE+dUS7_8X76 zIF{LC1l`ZSXuPMVgIW%ywjHf9#c`RbX2~Z4-tqJCDM|G9ew}^1HI`Ss{iG*dDk5;< zXaLx1_&@Qrhg85E1jd9vU8$V+%u~+j28`od3as)m-5;NJ-2v@#)9XtwXJ;`WHy`4V zQ|){L4EE#CO%!ekAx1G=N~K0DQCogL9ssD`3}D;eV6(3Qn*bJL0e94^-K%!ob={xy zy1Cki^+dB_u2TVPnfU`^>)MzAPNC^{{<-k~7gYQI22MLP(Gh?K4T%=D$BGKnef1%| z>h&+of3f!%@-4a&4>FznmdA>k|Djc5^Mk(;{j-%dP!24r7IxK~KmA=14jy3xhJHj? zcY}fR8`v9~fT~con7Xse15_QSCTml%{S^LVnLC1ip76Dy35xpw%B>Yyf1~)ZMQmyu z_rc#aWN6PXv&j-G2;e5ubwNh;z$ET9C2NoG=bfqP~rLM3Wz6 zugDY7S&()4$Tm|bwd0EgQsneKT&4yw#1+~7pnHGs*q@4ouDc@S8HR8o?Pm`#t?s!Z z&t_)Q3x=io}?LU@2 zz(%=j#EeczNJM=^PL)vZ=}pn6QSMq`{T)|w3a(Jo`K94k)c+TsNo&sTP4#_^i$&~c zLQh{xd!2?X9h-gov#@;E|FQHg(bA1cqY9^c`!GZ~M}8SyC% z;sx?*SX|g<3+o++psh${vEwekN|sz!z%gN3=0HLl=N=~Z>_Vkf)kB|u{Q749fiDe) z^w%0GF-xJ0f)L~ydnu4*4=j26ZO;3hielY_562h@ z+0pWu*XsbJZ@W@r zkqY+2-C&w{d*99bn#XFj`gwf)t0qpKqrWH@nY*ix32!RR>q*I&5~92SV5Hanhz=F@ zJVL1HdW~c&Eu#by?9Y^E*(@tGTp#7_3OKTRj9VR$aJ#pbtTJUE$zsY;2zc6Fw@Q)l zSY(Q%r5X^h=cWdSR6e^Pt?J)~O+VJ=-~90xAwJD{c=V z5ivO$RWa@_4!+?215Q-+I8VY~ykOX(!>vAvtl&=pt7yf)BONa#mvf&>?N+nMr6aS9 zcOMN7y12p~%dvq@&}U}Sr4JF&!wV?pBRMZUA$fP){Hn}q2+J?7*N7d;V3KiGIebag zsgZ7KF8}X1%$2m5jb&4@SKu>I%^|I?@6t*PX0}IKQ=%N}=p%6crwISXhV0rmeFCsk zmZPtJk4RAVW;-0|K!s{W|4IoMvk&gd9ItyqPGReTel8Vq)?RnAG;ko5)cZN)<0CC@_h4+&-n1ySlVU zZ};{O9qxuZZ5%i?-j-3mX?jOI#;R60voCaawq4Yyv|C%;1h?a33wZH;Q>_`BFPhFvnI(CcM`y$2SFM(>J@7^J(FGJ*KbLU-)D z;M0!>*NlGA`5(1rUl+zEAs)@3Io>tJN5h=Z>wFUD@Radwwr->FNCfi_fCMLdUgq4b z74&M^mh(GoaiGQ%K{MOB&?H^1oUc~4DKUN_!1u?HsLsbB;(^hs1sW`j{C=66XAf_l z6Y+IxX5em6xLvo}>->P&>^IKDnlP5<Y@X|SUW_S-+==4rdL8eciClMb z7HZU2$Z7I$9F-XM6w?O`Q>AbA+BXFMsS5uEJ>>JiJo(zL(;~t0g?6xk7ZTP}*+`n- zz?H=Xt&VhOLgjOmlAyAdTqSi49|B$e67+I=#|xP9h5k&N|E-4FIjQYSp9e#H{{v2M zQCvIvXF26^JNgrOyqe~3EmJ+}G@cj7k$`l zPww%P5{S7{yr5B1R34S1vfXB|a1ezGZ?8l2l~=CmvV61)5hi~Hb;Ht=A(E#FX++JG zPc|YAEO}0I*XJ-y`OK^hG+HwH`i1x>HqJaOD%QP!s0N^prDgj9-8{dQ`=5|!k^bO$lYt9y+yLEl>LJ&dj+u@oUm?5Sa1RfV@4Z%BD$-g$?XvoZwQPF8zKOk( zQu@We%ErRL(TsZCAtjT1rRpuy8D2Z-$gXk+3p_1;%`_&^DM=w$xaIq0?U40ElUL<6`Li5iq(Z$TF z2&cC;Z@D>Nk!Vc%hXc^*(F|m1X;$pkf0pr`>SM&*#?jPzv--~(s=o7d{F?i6qDVK{ zMe{qedU%}$5Sfg9IVL;sD1YCKjIAhe=rca)^T!u2e56Z6195u3bC+I}X|>sk*WdM$Z3`ZShW12p zQ{!gc;hYyco|@d^zxeWtf*379lomk~j4TCA7{)fQX)~HRM)U$LSkDE}uQr}Zz#gn~ z8GBL><<^uJYMix%$HvWzmo#J8Puo;-8N@dTrrv1y`>Re53I8RF%2qbMuR#58LX&8H zQj?RD6A@8rO>jX_@AbV%tIdMu_?Vb`rfqBt81QsHTuRqxw?Q8b*7#YreGmCgX+#%a zue1OxRf;f{!(iO}XnNlB%402`@9iXx$n`-_QG=0DUz|`S5ViPpa+#Y0jpv_bCnl_x zF9MZD{b!;xB}0icYwS(d?s2QZylz(sWn}VzG|$&5?Qb9d?m6}{dTo$#a5PJ++7|Hw z_zEAV*?0ocv)THqvya0vkYFaYp`x?~ud`*L-D`rMJ#{7Y`0O8+LzoxQ-&fC4 zwBu+P@>*sc2=A?~T0hPkso+rRG1RPJYW?_y;0(GbW+g3;x<+UAk-+$4UYm2gcpDFr z`~z+;n{1ZN&5gz_LMBtuaOtsS49=VM*i(fxsT-1{;~pU&cZW)|4XXI_MAisY&IG>f zL!qW%8X~ChRjvrFV=ZR+AU=XDTJU#hp6Qks%YuP6o{C;3L2Y!Kvui3Ih5$7QK#yu8OW`c=o5S+Wu9q z6k!aHh`9mjhSaWsn1@9(*Xt8Gzinh@3-wt&cdl zKW{{|+f|UBJ2q0EPwrt)QDZoT)7GfVQaXcL(fo&LwvN`Dsal?D=BF0AZze;9Jc)(% z)1^T*nNItS*>};pp5=yc$4n49J4pU@2H)vjM{<460~VQfS@>t7^urq*5pn^~@Xb=^ zu97a5OpXyxk{gk$YrE-uYDpK+L|fYWOUr8?Zj0IJn(Coet}(gOskMQh-Rm!s%6j!_ zNb)y=t&%$#ir`VV>#a_u`0-bY zcWg}pFPWb}!j14Fs472eSjIq<8dVv-riuH?#3x3Fm@DVFvxm-a4PT06D+X0l(n{H_ z9<_PiT(`UunaH`)lahc{zvguX&cPPal;^xWXUgA=NjOMHSe#v!7y(oRv4W3Nq33U% zPci?KS>K9`lAjw`#J%4%kPoJrD$55t4IQCvK6O@e5(q(Bjb#jBPSDZ z1ip;&)r7K$Sin+{YkG2;gM~@0AVsA5u2sZtoYXXy%Z*j;jN;{wH*+djv1t=QXreG& zkAGOzzdB9#v@uHCU$~vH_yqwWQz7f#x$nocMYjaHCwaIk}*l|Xq?94S> zHlU2pP?83LjT24P>TPX?6*JkLQyezKjRvsGC;{RVYf#xGIV&CYQs{@^fDjoG)bXx@ zfQZFcFn)X_&F)d0IFejke**pGwW8+HH0)x$SNY?*YH;^(D_gS1n;wqfTNP#(B#-F{ zXwt*u4FuMv|EqI6lD)^+60ptk2;UK$dzoQ6!Z-I|(F#a%BIrqwTZg5#l!~^maiqFt z1HyI%DOB4`-t??1UiUvWH`PAfsPf5^m*4WrXV9su^39-?Q_EI_-sY>9uHAb3#|lrp z<%t!xn4Nj}i2O+VY>!qt64u`Ldyfw>!a3_Jn`{)7n7YIt?8EDSCMMphUW?>6o>IHzA}taDOZWH+OU~%a zZ>*&gcD$ZP_Gf$ZMaQ$x-DiH19FX?cpgNB~gzVoMjY;0?Ct^R_*0{(>u~^{$l}Hmu z*;W|cdxI@KQr9mE4Vg|A){1gRoS^w18GhwJvKRt;WXE(#j+J9_I-#MG;6cW9UlUG! z8wG|CUw2>G6ez6f|kB*py}v=Njc; zcx>bCp<;$U`&vU3-cJ?kj}iJ&i1pHTEd2g^Au6kEj1q&Y0-Z+MgZ{mTNuz<8PN>2M zpBS3($gsCDWL4Mi;&YD$g(;Vr!_;71hBDEd)P07;Z07>8v2ovewq#(NM8go3ov`T643Tjp;4sw=V8V4KCl0F(f?h&7AKppGY1H`GlrSIYFo-5~Uz9`50*qB;}a zElbiDmrD|>@B@3FA^no_xc^9b|3%tSY2J#Vp>WXTyjEHJvEe?TptDl%=l9Y^Kza$I|z zY@|#@plvF+TIO}c6al?*Jv}WlaUJ2`nkLb2P4mCXcX+DNX?&%9Ya_N5a@`=~n6N%~ z8U!NS~cx`F5IDJMLv9&41x8uX6D}Bh5DP9#S zD)J4|Q{oi4vi7-gM8Cfz{pe#^*%6?d=yFXb)I-iZKG}v$HvK(Qp73e73HR}M*jsLD@g`9 zTK`9&gDc#1@wgBy_*c;WtpxY|R)YVlj2QWYA46RmAd*vIq;*zVAM*l2BHf*^8g~jJ_on{r(lYE74p~&RRZUK9bwa3qJ&eCjC#b!TiM1TYq_Sm ze-ZG-f!YCftC?ax1PEcgn(sdB&FC5HG-St0opiT(?I|y}Fw)A($#rJ!`&L}07@ROX zAoc`2mD*gVQMQ?N0)fBV zeww3Ou54ojk(|Ym;Yh=+l7yMq3@-t!j;1?(6rcz;;{##lF`TYqY$uA-^Nr^RJ>2gi zLTiRD{ZH~I7N}1qag>b%9~w%Hq*d}ovse(+y&PDWbqAQsb8S;Gw*SZe8*STq<16G< zBvWFhubi&FY&-E_R1&s5`eIXr)+wLS_d2FJ&_4zo;RPBIvCr?HGuj7;&2^EcTwGN} z$4SGQ(NHM0<^Ukl)l0Wr$jKW@$8ud%1*r|V?ij2d1Eoq^I=Ig8r)*ffk5=sX0r9u& z4f?;4y)z2-fJ!@~^3ZC>V)3K;BHVEcE*Bpgm!P%&SHEp(k>Jg|1TYRJG!JrGK@4b{ zV%_N$J_2|eEA=4Zbz)6L?&T|A{qo!>SU*553@5HV{I}Y^<`#9fSFHS8%%(E6YXG3Y zQ=O7gj$;Rr|2h`!u{BWQzHb42{91`q6J~)rRQShC1t4>hkurhCb)Ey5{*Pch>E!eN|6=d2qpI55 z=uupWO}8`x3Q{5^-3S7TidclCqHI7K>2QOTlnBzLfT)Nd(%qrN29)lS?#}xl=bZQC z`=0M_{O%a{j&c3#i~)ze_FB(+V$S)@j|897e@{4Vcc=vi3pLb2FILGsFdrk!sp$sv zPFtw?6MrB@n~TVc=ucG=oRw#u%MI6nC@^^^d-w{B^uuY4A-#N?9o|Qt&B#< z?M+v%Lc&jp|5mAO;m~gQ6^ihUq($0^JYO>pk#IW4okQ&Q-XPh%x$;TWPx9#DujJ96 zVDxF^V+dONG78;`^KQtqCaN+a&c?*L$~hLZw-c=1sxW%zm1gz6EI{g@g1XqypT5=i z{_8aKM-JncGXCE);vb2XKZO0~3JU5rkNTo!;Py6C+zI-|-NjX^Pg)56O8fL1Jl=+D z{FV19(aHWbPCfE8hvg0_oJGZ(f6vI=8yvbfrv;`B1+~M&RX)3n0!u3^FzRw%m2_G~ z);yz}nwr{Ww5S7?dZIC^%HsR;sHjW4!&B|4yR_o6X(hH(n?M54>^5t+W4*JXSS;n5)yg^)?S0EMhGIh)s+^R_NQfKWqy8_u3RB3t2_H5G_)>& zc4e%AVxRDmVlZQWj;Xp^nm5>4#6(BuB3nIVFU zRJOKe1W@rt78VQSXD-Kz*?7yS9^1PXDdR4^!|v!}V!~zQhQb0CtllXGg%Nm2fDHw{ z`FlRAy0oIEC#RAEd77KMvAUX7Ol+(Bv(f$90KdS%lc2|#Xo@kuqo`O52&&Wite9c_ zbHKZ^{IyG6qPAD3zn|sfdrmniVLy)^>F(~nlJNXFslEM%%>E_|2Z!&zr1ScRhzM$_ zRhffb`TmiSk+=Ndy93gdY&VnSx(A(1OiZe2DeH5+dH#5-@C3hxamu$uF~&Jr8?}xw zGBSP>wV&5^N`YWl_02KuoNr4ZxQe!UBMp}z?X=3#eo*0wGg=#%v>zWIpUvH_oMFE_ z*037-M3YjA!ftO12NL7y88ZaF;7_VlB>L}}PqG~ZwQ1+G>xuUBeVQLM_)QAj_QadA zGBYPZ4r{2^sa(;2R>a6<5#`?2SrYV3V6h2kcKpV!5N+e|X0iGUG(@y*i)(aq3a+yTG3K$Z=Xrenm^$;G7 zQ)y*KM=m(ud^5j)ze-5W4j1Pvy0x{nZ!L2Xn51Z-@h>m$cdB}*@LZcS>&cE1wK`oC z)^UnqrZYoCNa)NtMkl0T-!|d+ljCZKyt;4VKg2J^14tUUK9+~q1ku9e zx~j(N0~f7U=dU&^$66@K4~6%FQB_YVxQqq8G*F?vgFr`m#L z_C=ML(E|emK)jsUF_Nz6!rBH;k!+EjdHms)W93hN&L0Zbe-$YFNL;e%!#;TLU3?W0 zm~i&P`O9XJ*F(HsMg$f4a`@+%UbIJ*I<1*3jnt4Zh?eg)n7hMm2A z6>=Qvld3X%wqx}njUI-z0fG={(NR$s6%_d9HWmhXHM3aHcSVcZ&1yV+$aWQ8 z!4e?xcy){GTVn^*GjyfmWn~(kQxPOFhXDs2$FE^9^`)CBJ+;)A4|qi=B)~`-*&rzL@UeLnP@F2eEw8AEXd3aGf+}^%J(xkP zvcyDS?NvDF2476o1v8z`JAXw*W%OedBgg5}_Z9|9X4m*>n&Twwhsuvn9J2|Ups~@{ zAA)rLPA0E+X|&FGFWb7UduL<8#z<`xel7nTR{C?dW0~&p;*X!j3vaYJKfRptEFi$} z+TG;Ni>mQ)kGGZ|Ot%D_GD!R(zt=6}rOG0s|L9N+9#S;HgoITBFt;hcKG}JV*@Nri zp*lrDPKqpl=amfnIY&d`p+fKf&p+(^^67qE zWo6}N`iOSEC8~f&px)=FE^j-B0&+tLsCvlNuB&2kHj>>WfQ(=}*Ymxi0yW5W?fUiW zKv!?2GrRxPS~Oz znzMojl6(nhM(CVLV%?kL}V>V}#9m&`%>G)Ztrn?a$Io?(J;EL`LrJ?<`0OLGCVv z_TWmwXk9RLqdnc-4-5@62{15^p*kq-U1+JJi}!fOc-pTed%CLrnwHt0X}(>q?N3Tv zAhB>kRrfVP<|phOB+jkjkQ0K{(7@8_YBaYD??! zFBdOf{B-mgFFp1hQIqj`gI-5cv-rzX)bW;5xD*9z3j74O2lAaytnLj5@?NL;w15o} zTCA2x!YHYcW!MlINfUidRdwOpI|>%Gpbp$YYy#_p$E&m5pLz94ll{>Ky3BDi zhf#tPinGpg5JJhtBuvuDe%aaCP<}tn`T6 zYPfGXj?Sy0H+)1&WeuuJsSj~#OjO7J;lnt@H2(VKwq=}u9H+b(zGfwR;shTGPmN4X zj!?f^G}4BuokOwuL*z*t;>Z;~#EFY)jbhA-q!fyLug4o_;h&pP<+XUtGSjl0+&4^X z@3$Y{x4%7juyyj>?@P%^j@AOTuTvxMsi67BW2UM@af_2yq#p0KsttQ6pN+p-bGj}mOIP41#}m|6f|gBt z*|CHDnsG(NH%a1rc@KpHyQCAtQ+*R@{wO=IdydEw#uFsZ70q>@`zP7*3ZhL?*4$8U z>vqXUCYuQ}*Rclhf*Em6MMkS$d86}6gHbC3rv@`QB!+rygrlE;C|k?8M2~rzzwD`K z;d#stQmFs$(*J+E>oxP#=N~pdi{}gB0~X~z`?Y6jL5wm5*5B#2d7iSj=$7kb#Y?BE zk0Xlg7Buq=k_4^iFD$d4e^^;L`hk5>IliR+&B8^8MUGJ7Y4W2-t=}0RR~~YskskBr z%Dc$MAG~_!baIQ0T6BkprKr#8;>QJ6kJ{B@uCAZv)x%RHveX+vVG~f(n%SKbG8;*5 z$mEy0FoH@BjLAh?t#NB)YdjjtjA+Rn0MY7z|7j^Sn<=J{768}`${C& z^~6{CPmGTk{um&icDy3gKRK;kFqk+NEo?e5JKI<0j-`n`UtsftXH^FmZF!P`VcxJW z3BH`Xx1z8`>jk>*Ok*1me;Ev~_Y*QD2fM$hJH_n7K)MRK+i5}cOhn}uYM#ARKJVvCa|~@y_6`;--nAR0gs>Zm!tmH&{o};R&NCAcq)PS=%Rfp|J3UF{EPfU-!Bh8D?uue=<+y@uT&f#%K9BMA)P zzLAV8Isu73)P3#}(P~Iiou{Yg@SD`sAIY3I_25B!1Pdf&FQ6-GYOp;h*sppkl2nxJ ztarY1VqR>pmQaWOthHU1LQi*iBqhyJYdC+ci+~7Kd~jJb z!DZ=7&MmoniNl-T7l}3&7oXI&B^t;hxk(>=%m0*=#1eU*B~RRDJ@(5>%<33R3=$$z zt;wj*KW0Rb$4YP!?{rd$ySOkktIN>>kE59#ap-!OB_(}Zq9jzeiC00;RZ>!7z#z&a%0Rxl zwx&~TNIOg5y*-(fxkJK4FU4mH*?86|)Af(p8}djt3Nc8iOG`DutATrxi3-GEMCqa0 ztkj>aDFrq8@?WD62h;QO{LanVkq;C5Bi5Ht54y5U^X3ETMNqZTZZpJPrXu$Cxbwcf z*iBl8&sc%4QE{#(&&R-^2icl+#sN7cPfm!Six%BdJ#$JhTbds&Vm{!lw7h85QG4|+ z*PRboEKk@cJIiD~RkqnD8t_63;q9|Jq{hbmJn3$httPa>L;Rk-$Et{wBCNOG>x-H_ zkt}C5{Nr;jPpr79ouSK7!X@&=1I#grPX6#@NeTbb`vm35V0}ZQ{o(5wFYM9p@gLNIZK5bT}py(psUK!)OIjq~+ZXR~-F3-c7%Srv0hy))Y z_GlkT&7KrLFY0Ewg02HWEPL()zS6WiWji=5vFu|l2~2lFs5`yx(Y&fSZcN3=VMEM$4w|5oJrb+BnLGRYly!l@!cO`f7d}aLt?BP@Ffs@B6*E&S zP3a;Nzd!5eMMD0mQbd06sFs!IEe(%mqmdS+DPuogcrM$aRhCyHTlB5T?WK8tDqbR7 zT!;13hpB`8S{$34 zeuc-^`*)tC`Z%if3Gt&1&kM9;>dy?bF;LS$SYY%@+4VVSJZTlQU&{V9O{+>Vb;(9> zs8xDUKe0~!cEgbcvAx-FbBv8fVDXYpp`9mlv4`?=8l>pSlm#9y8e}UqIph=uDj|lE zb9ZwEdKj&g4^T+_rAR2%=~{xSRcmlY@3r$h1?=olS>~M#zb9kNgSWfFC$Z*3(D zPodG#Y07^pFNn?8Od>l3V#r5KW= z(A89A(r$l-@}_vaTtb0`)UK$gu(akW7D2PhjG3ulP;QDQpgKo2Mzq?jvjnS+Phaa4 zK60EBf2>T}qXvtFQj(I1MF=dc=jOG-=$mh@0W|y4?&9{DV+bdgRjgJ+e+z~uGiL3% zarxczPfNxQo(+xC3z-uJGniM}M6a!i^~!n@1Zx&tW-vkzof=CZ=ffbp=%N3v$Zm$m z{JzeKubw(2xxs(F!ot{Jc}1j(;+9<46eq=*?W)2*{(;O_!AxHX?_8; z#HhVz)G@uT0+mjet>p?X4zVQ@or)o@a+d(;Ox& zTf1La7PG2ld9{mK`S`9g2%n$f+zX6d6+%CSTBa;xmi2H>ybPaG|G1WjprZp#Lap1x z+}zcVTn03g&^V+lu-MLa`DurEAbPa^l77l%p?U=uJ6lckVMnUqQ*lQ|hw~^C+5*sm zHfmT00>#VRd0gu~GO;2ZIFyK836f|x=-XHngFMEGPfAp-;>FQLOKULQOAKa|!HbJ< zu(Fio!=OP<4!OCxp(7gVv=dLNf6(yb_J7Nd4o$t1ekWagOJ)k?>Eny?#z6?5d?QFh z>Kz!s0Kgmd#gAw{0kp@k4po-`_lI|aZ}I-{r1&`}v#6dim|rGZ^|HWTMo41F z(Vh+FJs)Vh_4~Jp%+1ZmR_gRWY_Ok9t~)_URdBV}Ze+umLClFEOJN?{{Z)U6yOl43 z?ioc7*_6)Mdz#4~M_NaMTSHZjn=igHZ)EJo&;1yc}a za5}UNA6F)_1=_}H_cbbB$k`kaaf`twQo;n}2@+bO?3_Q2jL<7SLD@O*TE+&_iBJvR zBqLhg4vf*((V6-7j^G?+&H)&g zG2p7tLq@n@SylW(#@!9_7QNsjT=N)hq}95MK(;Y-_Ix={_+S4p7D9+a7+%2}iq+43 zqLd{$(T}QPVI5?$p8AqwL;b?QrAwtdYg~Wi)IoR(k)YwRTy_fqV}k!Qh1P=T>oOyn z9TVq1k)D$Iz(PYPc!E)~f27~^`^UNYdAZWNo(72xSL1XlkNe2;9DCA9;F#k7qpH#w zlPJS=Dh1>8h!MgUg^=goW9D^Spw%5xw6G@V;YXLZJ@_hBQp`X|O)B4QyU^a#BEwjWT=?<6@V*R>xeO&%ppU(B zx`;_+hEFJHNM7p`>LNe# ztdq4%@z2warV^P^9o^wf_UQ%oGm>UiFD$41*5p0eXIMrM1y z!(pVCdN>hkG~{a0zqN4Qv^p>*Z;C0!!-%5xRhiB=gc>)A(3<|&Kjx+{T6?bzk`&{L zY`ySUrqIK%uRWzyU`7>baWBu@^jkjv7SqQGz9SSv<-2YWLTlTfWK80AeU7|Gg)9l3 zq=NW`k$u)K{iPQVJoadjNqIi!=Vb_SaY-{AhKruEoZF+?;?@dUD8tt}kM=dc&P|fu z>G{W6_2D~jdHX6Z$;QeG!8=Wx+D6TXr5Ugw5wcfkJ$`I)FWMo)tM&+X;lAw*UCdM} z!WdJ^5SeWI6v|fS@q+h0&|__|Hpi5eyAftD?(aE_xNZ>&K3`jPgn5^NHR7g<9A=;f zcFi){N$QW^|MJWphFb7ZN2c+9U6e4pm2!oEipPV9oP@Q)dO_ONKyy3IVJzv>`u)TR zZk_dJ*_P;Ykp}Q|>|cvXNH{wm|j{ zLfkzA=%gm%qTCMrJBFAWB;LJ7!3I8C*+UDmpP6$UnP@RkdbeV5u(fq*cFUeA3z`xF z?~IuGR*oc;;s=la0aU>qV?Y41+g)Tk1t>=P-LhA9apFVNHD`$cM!^$N3FF{5UDxh9 zR-BWNknl|wC@;+Ufpq8Kemv(uMm!t_&YW8VzNn9P(hE0Vl|%YmZt5Pny+2DrCTuzo z`-sgbad@89S#dBb@Za5n61NqvVAUunX)j{Nd~VgNn~(y;;pw{xQAHlqdcU@?sBGsH zrDfoVDnCVsMc*xG5jC*yDk>7INIoCCzj=x>%MJh;Ac_{l4quf4f0CduEiQ}ES|ydT z(2uadFy!a_)JGnp#!T0+>`ht8HLPV>VGz_x_MIp*zKh?W3*B1VLXtz+7M4fJ&T2}Y z2*u46<9CLqqh&uyX6uqX!%=1t;jOPxE2O4F=CnDL$$q=z(A-?{YsfwesL{+?;vbQe z^bG<9sUxIBKfNbL;BgPspUM2&L(1=RmfzrAX<}z<|M+)rS@r-g>Dn*NZby8%#Bl2< zGuFTQoF_T#i`N`hMR6lp?1NQsYTbY-M*E9f%A4_ObW8UFwkmX&=gJ!TV2n6@@{Kazd_;A@&EDoI64^l(XjeN z%|g4u6fbS_QEjxAG#IeUQvV5fLU5JX{OIR4PA{A8nMliM2$0?VyP$ifn}1wUaTbN= zFH9%1$Hw(#b-t1Lvhl~)M1se;_YJk=fMb)7XN`(C)pvO>Nkc3=azHn<5hviGj$doyg zFv__=Srjt;Zf$G(G%+J%!Nbcx*}g2)DnsvJ-wU78BWHe|F*7!5?Upt{(o0@F#yetS zV&5&$Lz7DfI@Ycm1D-rOMFj;#RXs33Wt+FJ18S5ty%xj!=ITLmb}&gn{VT=5A%LHf zC7Q^B!nh`yZa}m8Ba|1Q%}PMga#xqRsDxaq+kfyjyr)RLR(12UYh80W*KK{XP&TGk z`#e#GEd|mLlJ$`4Y&Pe%14-r>A>9q zBW&FDmJF-Yq7SP>jE|4>!(t&3eT_NRmUMSH4dPIA|hCV+e9>Ay;aG; za1O-z`u7)upw*`WkSJ0{UA+^q($Ji>X)h`sZHM*Pf+c72H|_$Mv9{2L>6Vh~fA4c# z84kik5I^gsExY&KB%g)Q8K&hV91AR^%akh*IZqI+M%hp^V?N)aJ#S>zl?@ChuBf3h z*`@jld8PWBH!B9qTz!bi$S^0#xQUgd$rQ#s;gRA6ZAW~jI+~y9yt0Z9Nrjq%XEc*2 zPlTRok({XA$k?=y>E7-xbE(Oi)^G_5Arc$z=HEwfI$l?|w6s)HHNjsVwj~+4*8uW0 zaO_!C9pZ_}+g-SCIh20Ndih&an2LHihTVMIZS9v)YuVYss)2M1joW)Z!3Lk}>kUS< z_$G5^zMZnT&}o4#FS0eZx^1H6^fECqlJabga*$>6Eq;q|55Ou8LDZC46-ilPByGmNMav|GdL&OMC@PlZj0z1Uempj5 zh5N$!dm1%9@eTx5bl%x-+FiQxS5brdShy{R~3`XN3M-?_J!gK`!9Gux1l-fh6}s95 zSW0_eH3^Q$vA_$Ox&U;bMzqh^6J?*cmnbvesTwebOk|S$V(Va$s1nZ2X#_4u^O!MB zdYRWvy75NtHtP?nr|b3>*ifsgs^U~B;nn2D>ft2-#KRyUc*C~sa94EWwAWD{dW7jZ zeD8|{rqsg^YJHuZ>L!U1=Yn4?#5h}yQOGGscln=<7<%h2W_s!flvCsQ>JPlz_1Lsc zLfa9P{YJF#9>vtd>c$Wi(Q+wgN6Jv$N~>`#6YyUSVvv%yYBR){CMVX2iM5`(xIXhC zwM8{fTBUScp3h~j7KRan9bez$nef9eHDO;zN``ncsa1J-r>+bB`lKft*;_#F*Z#&# z!kF|%7@Su3rfJ_E4~Gt^An%e%t(~kkz&G4MYFia6JGy^3ukZ=6;`Ku66?#xNpGMloTMu^gs zW;@IkuJNB$_j>xYys2Pig1|d37QiuVdJ-mnYlv7tF>m4FTR(m*7Rq|@C}CX z_PkNcD|Nr;OgCt+>iE$EoN#BZXXrB}QVOCYQjGn8 z5h3YbaRC4r210y7LhX`?{*7_6c_{}hd)BgYjBAB*Ro-nDPVwjM8Mm;haH>=b%SGGb zCUB3k?zUSWiS-U3?IoATT$XF7&fR^%bORMwr=kGRawPqiox{1vvjTp87(=WMP zEB$Z#vLKh!zP=rL+6-&Ff8UG#u4^({tt3+Wc3?tUf}tv+gL&h%;Qq)nflvDlr|3nT zJpUiSMrY2PvYW|;vT!k4$bu2-XbEkMdgTx2jk(lOsy`9zKyrbdzuZ(==j6p@^ zMG&d#6_^kiEQh`O`0*p~fr)vB$Ks0~rfb>uo&Mu+2e?0}l)+K=UZCR)g&{DWZwYv& zRjyumjs+2URZ2l}waHha#LvMc0(R9#FXO(&^|0nke2J4!N5&v%fPfa!E_Kq;j>!~h zMRKrI58nO6{bu6)#tXkL=4ay-A9485l)HN=A@v4o-_tsozXEBj6vU|7m#`{21c>aJq^L;HxJ9e&F4aKUq7PnHe<1CsD06HD zh`fS`E7gaiznv5s;k4-+QZ1`@r9eCPQ9*fmIf96uid)07cU=^>lJfy*pCA#DQ+1s1 z&`(lL5c{G2%A=@Asr_NoC4hwnQ&Yl)BJ1~M{^wWc=It~i3FEcWY>(bhnuZ6Q7`KsD{aFAG|z)CVNGs(ihR3iFei%v z9@_d4&zWL)KB+E1=o!q|kFb8ZinlJ1xv&;1^!_-4AbDrr!V*vqQL%K!|UATd_=lFNij-cT9 z>&ePwi#8xjZ>3Bv;mb+t|PKE@PA#HtM>8oTjbn+PQ zxC-27E9fp#)yt>7l`{=4B;`!)y@!y@FPA6=2-yn|;NfIkF|V9*8Yp&DjTJAkAB0Wf zAm0ryGb}+gN6g_keh;Q!)uSO+&b*V+ERlec%XXoHC2QTh)^+MJE+4N)Ni%TR=F(qW zFhp8q{aNQ#J#L4DRMj`Z+qbj2*98Kq3T6OYtE!w|kX&m`{PppA#-b1iVmiPN%3DX- zdG$&L-k25933Od>x-O3l)Z^! zybvYuK>or7{Zr#@lo}GiI860S;!(f_LZ^Hzt^AC#r}0$YM3Wx+^#c_U<&PxG%g^|C zIz9XC`s8Oj%=(>I6GuIUuOgAmKzP1NbLq=q5>xXTS|nrHIx%u^q!2}oV??7K=hHIZYmjzM{LG)>G zGV-_$GLYBm$AMw9np{-N#Lm3>qgf}Q${s1W+qNYd4%(zOkM%&uDK#w#ol6a=?rbLau@=slC1>MzJZBIJ%4~OF~H7h`k zd+R^ys>AQGh7cDzcd|o~kcLC&aDJmhR(dK$c@a6SpQbnG(a5F|2Fp!v##fI1RA3+@ zv~5THk00{qKTP*lo%dT+)hGGaCy`RzpLl$gdPxeO6zPUah^MKOpo>`$_== zK(LtMt}AIZqfNHb>p|@TqyU0c_9z>UT4BD&!n= zJ3n);`bUOi8TJ_565`?)NZU^DHP1%AOoQ%sbZqQao_UGyagck^_JD??D+`X-<&4On zBk~9Qj_mIcC%dvp!3XbCBRYY{SO2JWp4%kf-=AP>Y2apRi@1bDd$0eMSV7v@PioIy zD5x6q+f-PA?_8hnRSkZk!SEXsv4xgN7BF|ow)ON5q{~5e-zat**W&7`hF6SU#60Vf zBVm|S*x9|RZ{aXbKw%!|KU=M*i8+LYDQR1_zh|@iB^_#Yj#($;H8BRl<<-^M9jh{6 zGu!Th&WB!+UO@1L?gOhUO-A!)C_c&k{rBLw^I;Rvvg#O^TL5gPuogIckjc5CApKz= zvV{i+w^Pz+UXmYan0P)xvgmQ$X4NH+VXiAnm6!SCGoxA&Q+e~sgQr-=MB{qT{i;_g zA!!WC*BM$#4w3Wp5rsQG+)96!G$^d+)&TApd#TxRkeRVX#pG3YSfCHVmj_?NJ#TU^ z;ru8B0(bvrvrBWlD%Id-#0v*zpU>1!Ne-*c;p^U(`UWZ`bn2}!nq+^Lj$wC=SIB!m zYZjWBT7h=LtBmfDymxe@Tv=TPSt`q*wUBdjkvmqXNK8r+3T0+(#I3V!p(r z8<E4e0Z*|P_8%@{>ZIAtk+CS@c`t;`b(Xo8IK8UeVImzU zK68g}H`6OLw0-2vLaFJasjP9pz{m*?-uU3iD&_-${KnB}cfRpM>f$2L8*T3)s2iu0 z5Ar=v-aG0zXM1+aWp(6I9G#u*r+U&K$K+z;pE%YV)=Mo*@EbxpYZzbje0Fj&UuG(i z#&LNT+g5C`y;6@z(3WP>;@GCX)u>XGDl;&v{L6D^o_ z;rmr{9dU&Dj!*|_B!O=se6X58k&h>b3|OuAJrum5_Rmy8cRFTsVzJmr%GGW88?C?s zuJ;yhye|Ru&E#JBGYp~*xR)<%UloG*E_VLX93NqRY2JK#s`<2*lZ>{djHOLM&ySQQNZA_eK@-knYVVT=`;Lb98Kqf?5_;}$s31#i^L6U2@u~OXh zQiMe{NA%AL(*;bMMRW*Ii3I6-G9QdT^WU#?jht}@s>IasGbcdE5>cZ`?6*EL=%2tM zxk{ygSz`S?8W0u{i`Lh`dqD2*FLcGh8r@)p&9W2MYE8a>)Oml4b6IgYu|nR(<%{j~ zH`*R!fPov8@`y zR64Zed!QzF)N=L6!YuuQ23ct`E)WWC#z7NWGZ(-v$5*89ja7Bf&2}~F zvIl5ndUN->{U5Qjv$x$^ka1z4>R6pG!CB2zL^vt+@c^y-iEk; zI4SOO!ikd*`zpycyCcqt+zd4qhlyFS)y?&Q!+1VUC>Xo-++FAx=sfKq*I96(P~PwG z z%>2q%@?s;N$w&$%juMM-;}gRO9`vA5J;t#^3JvG87Yh)!j9+3zcmg~68wIuF=(B_%?q=)ChWD=i~i?iaPWbPP(j!k;W z)WoFYy^zH|#_~EJ&7@jV-SZ}=wfPmRGB<$Krp){DFM*2CK?@k(Fk;+P4X9)k2?CK6 zP&=@!M6WKVT{i%MbltUBK_|jTf9g9YsnPkE7#J7iXPBhi$X(`uoPK2*!FiMFQ5rHa zm_cfOdO8}p8Ge>80~AyNnxDwShx;H&=PC}qG%!qDFzmyWCzQe_Tm>CirmS?E8;Gp= zn#N|NPY?-83R@01ldnSp?^|47*UfgA%K0Yf;NTO?;5k%+6MQ+0W2rlLu1)D)!Y}++ z()uh;P}scz8c^@s38Y>6XQ39^HQ5M%P zwMwd_gc09V4-65p0djG--rwq;6tGA9rS^f~swdZ!gqTuyD<~)ke9EpA*k0V6$+P4R z);wNHb%iT()M_-TVQTYZzjI>e&i3|YrSk&lV^{d9flO8{f;}##d5n*)9kT_XfAM5{ zjRbzXOh`&fYWR4$f{-Ozf0=9`$)B zfZ*D2GP}y3<8+?-w6Tc!Gp4v#$JRV>Hkb9xG5b6HXu1&&{; zJeR5Jpk?xjmD=)eE?jIj)%9xnsy~`z+*danz+n#y2glRPncYO!*~GpHk~{LLZFJFBS!Vjz7V53kj;Lwl<5yJlH&EKQ|!15F(70 z1?GB@ulj6yg=|q_p$A@(lT;3DEdc2BMA%+r)NQ!Z>kulPa{_++^+o@Y6jMcxaGWB> zTXaavhOwEsAwM>~-pl(1_&;Qb=!HC9h&oOain)5Ma5zMJ2h(iL6Wk_db#!tBXkvS6 zB?1PTLs5G->HCkspiP&ZQ~O7_>i0<9cUB6?tI$G8OF!7%J4?(UsIjZzyD87LLd}h( zcN<_Q-IATzfWiI`jy9ssK27C47f3(cP^dmOQcxiRfE$VYZ9IG}7}W+M^c~vCatlDI zOZ38mnaVvAL1+tgIY!tNAzx8NS|+hDyJcu;O?oIb8Jts8_$h<@bT{uYh>?OYlN5sw zmq8pT@L2>``zdr%ujz2$k;6pnQeG~1o#eVQ$S_$f#Nt#-yI|t($^xOu(%F>YIMHqx zik2l|VvHP!WBBuH_8a(c{C5ocyO)MAb}E;$C4in21e&W4ap{33zARP$>W|&i@2LA< zVW>k&y~j;}f`k=%iiWBx`Rz*rf@l{Yt>-#1>HpaC|HD2}`+8^;Fb7>$qk(QGRu)6< zD+R}_V;lJ}j=oX7&%3%vgG1=?cA z9dp>H!R<(+)+)LdC+WD{R~RZ6{D||Y_~C`Ch0x7;JD#vY4I5yo)jXu&f6pz9F&&_516jq`)MEA7UF^ z2!LEgXc&lUfE~)_rUNb;J3Ev^*p3h9x{Xiu;>C-A`BL-#Lg;?Q!Q6yywT41N4V)9J`kzGv72V z|7g)Gc36TsOaM2Y6$9|tu8*$H8ubypgZs&zBmFt%iGy?lBmJOd-aUtJet=9 z(DcG)h=ja*-=_~71uQHiye6=h4QsrIt}ZpockWOzw1O?ez-2;P@M!>~kHfqjw!~pw z@5Q*w*0NuO4O*Z9G?;>5l5n9fa`H!lyWQi*URL!@;GB-vCrO9A`49BRf23RhH9a0dS`mp55Qu@nBX~VuP=enwsjVK9FlhKRaISM-dVdasXMES=wQ72>!*0SUi0A z&~B;?dp+nVX459U==BY&86z{1NlWb(uU}tTXeN9eY4h^8_0FTxQ#G>2XVWn&?-TrD z+|>`KoVnC(1-m3F1*F~49JHn(_jh|O%HsI%E5g;ey1K%Y;!VP|zfpD&_rU~A1WKGY z>IEDo;TWMx;i1eF3o*SCryQ%%+Lve5uPG~&dcGDA5a`Jr&1I2>2W9H8TdyeU6|Yh!i!phLJOq&up80RhUZQuqFH8b+l!v_==ks_MXc}Y+h0g z|FZsB?OxISi!zLhT2~u(Eosj`oXuZq^}~5FnG=5IOZ7Ip&!fIcq8CRy&KzOmHLyWC zR;!&N_H)KB4^hP;Yu)zuPud^<(+A1U{0+1H7m?C`r_6s*Ze>fWYaUGzKeZy+`(XX_ z7o`Y#Tl)cMVgY#g!$n;TT1rl4UHX9u2bz_FW5Yt(0en%&FK@2KTox9&qo7a?i7qgS zgPwv8`QJY;GNH9f%=Mp5Vk-52Z`{E*9GX_=OduxpDWOT@@xI}{Z4;&8# zp;`%P+!#OSbkSu$=F?j4R#xB2Vsq;Gi9sdJxZ=LQlE8nz7B*8ocCRp}Gr8}oIfV+N z!QATe>Kl%N{8*?T>dPumIadDj@c-dHOo}x9XAgECi1NFh$eo z0>cIHV$dH3=jtgqjtz8o`decSp&&qkKk9gv9~~7HH54;CI%=Rv`V_th5U5qlgZt|u z!0W=xb9l5FIOGx&6Z3#APxZmxg1cvmw{wUTScYVShmZx41*-F{8jFAcxy!D|pbKU% z#o6G)i~Md<`8rVTTZ_=CLacbx(bPeOi%|n{v>T{hFJWx-9z7Fv!GTu>=tx$lk!%fS7>%B3C!lMMs< z$(yOyKzO-znN_z3HA3|#mGW-|@pm!VSN!~btS}mi7>0XnT-+X1Jei%@&-j#FyX(Cc z7DP-7XYYK71ePuvsRos%MlRR9|8l@GRB-)au<2rlo=2422bX3XoYY=yLIQ`C5S6*f z$<6QTdVBWFVP~E~EB?QQEMqh{m?vpyXpF8udh`fR9E37pw=i(E;~4^uKr7MJ)diae zx6Q>xmcG*I`nX@5A5U*jyULL=ZAtFnX%!=9!=D7G+^c0&^LC zFHa8-IO>nZ4Nu3;&Tg>OwSvd9*Sr7axx3-j21?KgJ(&aZ8)Av+)+8Li!@#QYA@Om& z{Xv6_F?iKS=eo=PiMd65FFBT+wr-|AYW1`CTN0N46YGkC_mE`LA*GO!ky%+;L7>{( z+soKY1S~1O)&9p&`y1)@QeTJ6{!%cM8-Ua~xwxjc*Jf(d)C&GzYow7NQT}!m;ZerovL&Ab#k&8gp(5;XvSt{dQ(-Id|kH2BVfpQY-?+4X^H&){(IT}8ZR!y zzj|N5ZrBj`?1|m4C4`(#O&y_?V#YyN?j!)hk%Ts=D4m87Iv)hqh}a0mmuwdc_=slv5>#0+xMubk4 z?XAb8rOiO4tj^&`!-XmP9{`%ac-7?J;^*gwk_e;6_q$(Y!QIWt+4-=nfv*s-OVEU} zSg~LNE%gqP-OgoO81Jmkec}4|PYxCI%N!Bm#=&>??AMBlVQ`kpWT2(xd|V5-9v&X_ z^Pt@X7DLG7mV(05d`%>h;{&C|>ql}&YP6In;zg#E&6YZE*K{dCSojh_x5Hw(u>>=_Q8xRUd)D#o12@1V-jNeNe&2=-(7!o zK})L#8uOut1=i!x>8(RE0ihxA;EOK;=HZu_TtY%ZEG(GjaDr|TylMc5c%FmuBJI5X zsDW6m0~)zQF%e&#JPXTIzH-_A9xO!0<;!rcG@N%z7fXJV0*hS=3WQx`c?1k*Rf>Tw zuC57^&d1YWEQ;Z`p>n0h*qntK3`H;ejKrUAp!gzzlQY@kI4~k0Yf-hG7Oy&@EW2`y z*nfqG|Fr+}{P%N*fjBWPE)A6b!T+nhua2s6TlHE$BL^XUm^EcP?DV{lqOc2%8YSfS!Tj zIt$cl5T)}P!>E1!{8^F2Lhjd@K7-I@I6C!&`6UG5V7%Z#ux{D^`5VBjV()!S1wM9- zxaSa^t*Aa;TldFNX-LR6c?ud>pq@3Sf*hvFa{>4gnC}sJUurP!8q|4yowdWk!4bsX z1HgGuw@$>XbMWKS+Atv_^M=s2$BxID-NM6vV733|#{Xo?006D!at$EyP+)d;9(6#s zAgOqaX|TXO9ePa^T)L$|>vj14kmfRwO29y4dS!)_Mq~-7|4ef_nbO~Q3>&+l4Fz(0 z73ATbR$yQ|6JVhssija3#hm+XvkglO8qnk=oHnbdtG6de^+*T>sBiHO0g3by7y&$K zNE7v?3M@TU`<=(|#CxF!4b-S+@3x8zD#4ZJQEIBeSj~^L1|PvY8x$UE4#K4iW8*@$ zC)O}@r!lVr331C%^pX4@E>CXDLwG+oo1lDJGMDf@B1?(F-6c*w@eu+$f-098+WPl5 zK@aeL{SfOHwc$5TIdQbeYuDONgpEEDyDVJk;^|z};{gne;%HP;+idsj*&CrdxRz_2 zgcTG(L|Akzyx386m4>9_;rUOKtZht0p_H~f)sH+|Ui3kiNxt@@-(fD`C#8hKOAM2f zpsA-cECiML=cX&A=eT0eG8zuI;pFnPxMwsXny@32xX1|xyzi-=KO^4YjZ>qnqkHc$ z@qsuUDVLpz;jylw&s0P0MxHC2m^5)&%`Lgl= z38$stP18kSfdmExooF^dt8oeFCgDnZYFZvQc?)SVmS<<#TS0)XJRVI&D8N;IXXN@Q zlb;&;6JF}SdsOn+`n(5LogE!_K#M;;3(_M9l9Q7sVm;QaO?C7K7z5?s8b$98@`kV{ zBHn(zb9^Mh{jw_$o;ckB(};At5u&0W3qNkoPn7bFI)acJxPmOlx({g~Z&D-syvouS zNmGy+Odm*s;|tOPn2Y!BUZBT`m-!oemfo{-KxcBB8&n@XW%!9KHv)vz>XxiJhVFB9W#f%*XgaZ`7YD#Dn|CNtRD_c~d*WeuUxM$!0w>r*uyd6XtFK@et*&@cn|( z8?h(R4+%3qfWhorPl-(#;Xa%KyYfqGU1(@%(=o=mikBz}U0`XHkD*nq@LA&^cdNf@ zQgScJA^9ik=r=Z7oAdkYKwB_ItMM$G?P~FWbq2`s_@_bb)@*cssC-v*Erlr5{_VYv zdzj#ez;U$dbl9pol|u^cD%r~*FH(v*L(`-JdqZ?`>L@)Rt#%q9Kw9P0$FIT$ZQjs3 zc!h2dE!4dF%w}X|Oz`v7DL2JysbUt^pKjMlm$?zr^aa4hdZzxLWzqAhS|#n*QPcBQRuh_2o4UxYiRd^&H$ebgRqu8 zeUklNdYY(R%{}blUjXL6`nde|P<`Utl-qrd4ytuf>a>o|q!`_S<3SpxX$js0xG6k! zJZ9!9)T_N-gH~nDi}Itqh}tJ3_#)0&d07IOjwiEPs|f$wGAPa0*1!j3y1@aNq550n zq`$)pm&|(x5cdV`v|pN@bEr{EtLM0pacakqOwwu715CyN)r!CUPJOryeA#o%g9-*T z!Cz=scya6;9mw}fhXGh-|8RyU_brogBk;%c8;<7q%sPgl#=TWrRUE|m0Lya{txa(B z@7|)nb8Q^zct+@AtzQEq^xcWJ&LL-5c?2vL%0cxG47`V5%oAu&e57itYlHBBj!ZMS z0_$Pm9Ndt`X@Nysf#E*iY4~KxRx5WRr%iw6= z;^{V{L`)j5`a#O>*Yj$GG_)s#`O+4)WGJ~%UgXWlu0NFkf#VBU&0&voj*71JOau0& z>H8TVFE)aa>ut^$|JfzaLHb>}i?XD@_2K(ni|VhZ%75(7{_g4j4>a%p=4|&56R5wL z+W%W<@xSGv{}s;qw{rdeT>77k8~=|^7|^+s{zd2RHHp0^HGDP>Q}nVe?+F9W>QO5p zxha{t61~bVHw5V4{x333c@hKp#ZWvxycVL|UlyDj!=CR^`{R}LW^FCMD|+ypXIb5b;Oj~4F2<% z{rZ^Ne?IopG8abocLPFBWfv?eO9Equ=HmKQ-u?!KK9M)br6}-2q%&`DFl5e@8e|5@ z!;kPcQ>6Mtwf~R&Ip?$NJ`QHbo5o3vPz#^`$cpTGQLKvFfK6Tc=cPm+rMzNbnj%Fq%cukwbs*xVSjL z+@*OhT*!y82LPR1fn0zFnHR^O{Er?+Z9)D5v{|K49Ub`)adW)WCaECC!GVMpl-7%z z0b0<7tM%Q#68g0R^EMHv_+W*$2E98461mfa0;WN_?oLuC zuYp?hKi-W^reVbDXf^rL#)qU!CN!hYVBOy+e+;Ao=)ps?Oc+!0aP#xMd-qn~>=Pvd ztT@Te)!fn&Ac>7}2;JO3xw}2j@J7!}K@3PxW#u;H0{BDg`gp(L7*5NY(BJKd866ud z&?=D#xz|LG+h2o+&_Z z5)GyW8wkk=P5&|ndx8KXi(Ryd1MhukwKh^U2znSf^h}*?0{`|?XHK7nu!>6%cq1+S z?u?NSGdPCS*bcoicI)T@To(|nRUmU*S5msDo)x^4FS|BVS_{rh^@7w$UF9jzFixbJ zOu{DFp$$+|OACv5tqkzB#Kgi17rGWj&1cz3z0R(5@$aup0&q{xApHtk#c>gA3%Jo2 z&^aZsSKVCoKR&B%2S69ZJ9Tw&Gcld2+b{?UDAs@dLYQmExM~%UU7&XDfx!yj5e%{P z0pL3JgxI3z&V@7sK!)Y`%4cP?n#ZsP0O@Biw;|+!Mt1h&(^N@+zxk%Hh={$_u;&f% z!u|evXM_2}PM{X>@_4z9H$SU^vaXr&+j9V=>;OMYxTxi2!wa}+i`o$!_wbhl2WNoi z25rK)!MiAcKb}5$azSvVDMD-)*$Vv)X{5<@>BEZ~tY>PGOfhltB{~CG@|24tDrP-G|HX z08i=Uil9|FH&VF@P9i;@KSRG<)U+8l>=-}_I4T1=6SM<}v5bt&tT*1SW z;Fq;7N=|JsM8+D~0~}pu?WBh+boUnI5-|-zegULW)%p74wN==SfGr`jgk@(kAT^H( zc4m@{w&p+M1OjOwS=ez3!g6G%0N&|pZ)f^oTmo104n9XiPOe#`FF43hxC^l&?bv|3 z!aGEc3BYFJC8HQ4P44r*8QGI!e5fWrfXFU9_qYFy%&?RFqwGa`VH|FkH27`qyi~BX zq^ghJ74u)`-|w>#y-WG$V*NuBk}4)vQwIYFvG=aG)`RdH99>FkG`x zMrL{377p|13pUKO`wx88BDwh5HTcZJsl5ANu^A+C^|dg`nLmEHbzJr7)2`9BQ=i(H zx7A*q(HEVPZ$6*4#XdQItgc_#e=<2AEYizyUb#4*8-n!4;$Tuu4?KZs0 zWplOOTPp@j%S9A1~-)>|CM6T?^G{#ZCgP3d)4)omeLiwe%s`NRO@xgbNz*eI=NBmP<`#}mQ(cf z!KY}j#UjI6Z$<7Phch}=AfP!rTBM4QF;hRAHFHcTX$jv2@%H^%m_l>RQxiCu^YilZ zUcOxGyq#uUuaRLpks#BF=NFZyFPYlcRV6F;~5znfBp@peWf3d!E?z zy8Sf9t~iJ&6sONi9;Km5gQX|A+*HHuyY@j#WXG%22t}=c>#E)vdmLeGD4i0+20vDH z#{}GV(TW&PC54iq)T&yFQI0IcL-T9 z;tLRT_StznJU+;Ce8|qjgYor}p;T^;7H4u+kc?2dcx%+y=00Oq!E)x+%oe2W7!`gj z9luL*j)gOFX6XC6%>C*toaPB;neGOzRP@7i*G z_hWeJtfKnz)QtH0`xe8KnKuTn(oH*lJ+JVo2eWXGu=bv5gf3(?9=F(MRCjQ;e{DbsH z4d=R2E8kws*D%Z6nMHRUr(>8|m@is;)n)kRMh2<2wl;*;Q7q3Te+ASzdo$`9BxNzb zZr91zkTZ9Pj*zReZScG9wYSHj2bomSQc_tsxB{ZhMlWCJm02e77?BpBw9eWr0s$sc z3o5E86+Nr=?%r+!hr0AMRN$BHU;N@5yf3x9wE7f?b(+Npn3$E5O`jRXwV%}Op{DlV zP~B%A4nbGN?G|~(f^xL_lyuS1>mWG%6)_eq*-&vkVW+Ue*xo{)%_ig8X|W#XVHb=b zQwQPlvwp{i;MXOm^G5k#eI+KeGWHU_t*Fbr;-@0+ll$_~wsy;?j+EDn859mkJvU5I zheK@Lz~i2oIW!g`=6h{TXnY-BKe!T0aCK{JLozcbq3LuZszF{}UZI%qmQj(uYp=2U zI~Ep`1C9G2$qBxZh1MOcdH2Zd^fS7L!I7*qq}2vSMvVJmVx43QWq`L%Prex(lB~Gf zG*$T}1)-KOs|OykU=w!XT+SdVRm@%Wdq@6`Zuz&W`8N-HW-vQR#-x)RHyu!QRQv7I zIBTi)Z=~48OwSb4WGwOTMjwL+R1|}?GKEXkk~=%u|!(F@zS^gv`~mBz&^#lS4uI1IUglk_4+v-Uk`mE)BYN1 zj2fQ6w2GL*y@t5q@9RmjgiAC%>>pwf_g4t+6Gm3>eyrao#tlf^h!OYx0bb>VVHemm z*fMt(uxJzR#~uyPe(QXy^Py9Py&;~fhO7xPM;nug=|!*&B19aOrVkI2Qj(Ju=p){e z-0$w(|aQWM9c^sHv&x!s3SWonrT+p`V+ZqZG!##m`oq6djRC zC5Wl*rIFaC7?I}I!|f}(ESK!(Wg;$Hc3lxF;e<;kVn!jq-U_oh_4r`-2SC!xS4pq} zTIb(*iK|b4NZ_p8sO-xsdVIq-!CF{5WT{h0xHhJh`RV*?l?z{9TS-pWYZq=Cl;taL zOGIGJ{%8QdjyK4&I{bI~DzcHs{IO*H>eh$1gvFO6E0Vw#JU72Ez8!Nwvc2BJK9_l8 z^4*7i`*{8ZnIF1$c-Xl$Do7p(?VQ0E0Aury8HIS|DmzNFnZ~`xoEyglLMcG{Vp2A`?pmTzUXsJb69&c5??z87T&eHQUq<%M-F#AgiD%_)qR;%QMc9$mLX@Nh@ls*HgjC;?6bETq%ah3lM34S;p_+B z%K8R6*6_Cir5s{f_)AaAnZ^2idy%q>>3-70?HSH=0{QnLhTFJ^1lbW}GI;$E$9Q0( z%Hln>VhcE!7`|5}`p>f=0m1ZOw#T-+)11Zk9eGknZ?hCoIa6`VqN%Mtnok0~RV`Svl?veX-6y z$}^9ligN3hlX&J>Wn3~+)UEgG0zm`lR|3h`SIp0`TeRC>j!f-mS;@;aN!a{BpJ%GJ zzqcDr&DYbKi}BI5MJXZ9C;14ntQc=-L#YkGzzGdqArUI;mV8iAQ32#lDiaHfA1fAH zM~$(cLfO?YnbG0d$bM_DLCBOTCGlWzlcN)&2?_|fEDdpTc#vyH(_CH2Mi=+~DAs+u zv|X7DIqA$_{cMW60l0k3seY2~lT%^wD^sL_g{L>;GMiLXR8Z$#c88tQ^Z{mNSLl z+DbC__QzQ$Q*T`;F`6!X{Qj+Yl>t}1lgYs$7?GMf5W>fK*Kcylf*F;Z$3cb8{qRfY z)_0>9x;`S4DN`m5X~_gHThRz^RWe-j07t2GGUO61KVO@glBV?evq92Ks5vPmYvl~A z<7TUjqLB(gKEAHFNUmXz072WG`m(aiDrc@7sDn@M`UTxmV>pzEy00vW*bNW9{7Ql4 zHCJk)@$H7mtxWD)(;*cQsj0J?ru_nkGELH7s&Zv41i{K6rgFSo*|$YR1gWs{UE5!T zloYF18(c07udHOZswHPo_sI066XFYWi~Zbh#k+#hFRm^gB?)@H-9AxWq|8R#{nB?t z%t=W5p&I(@TnkVkColTTO0f$mXax`v4MF6LA8xve-zA!NhnlHZUM=BnZb#EbyD**` zE*QWxy&GS<<#RKm8#yqZTTowr!@%Vnrsx8SRN#7RD>KbmckBm*0il{KG_GrRDBG4tmwD-AR{1;G^f>bIho7qj9y+M&{ETEO(tl*xM zPY>TwPXesDwz&95j}Qj>SK3@76;@(Jal@}gXCW9xsGDr78__?mauuzN{I=^E}GJJ@5uee(=I zK;(_8^?e>#{o;S`W`)Bfi(J6U;LC^Q3pbbU6b`?#Se$HHyF+8r&}b54@R-yvpDqWO{OWsTcm$AJnadCF=4Qz&vk z$0^{A(~la+cK*t)%2W$W`hnW#Txq`8^r#SXu*ICv?Q|?@S-tzhg#1w^LM!9a?KkMh zL7poX*=6rIDj7lP!4;g))VuU`d_( z^5sq%!NJozE^vGVBD(47#682em@qRlZ;PLHG@~vHEHkCR<=L!+tt%@lYn6m3A1_CR zpBaf)T7z+BS{efn8SioyHa1M>N9?hPo+?v0IDpa-MulYui-cWM%9sH6586b6>=4~1 zC-}XCmsC!*k<%=#zLg5HU>x6R6I)D9H9Jc70S5!#T_A*-#5%kNPuSR}5n-d;c$>Lj zj^Exczhf3X*4BB3YvVzOr0jDW^diM0tANl7?EAqtp{WH8jj76;byVKj2%q*#GDn2& zRi?H*&%$*Jp`)bWwdIY#s7Q1-L3D&GV8wdB*2Vu$uGbtjP*D>mp=*?f|a zJkFW231tq8tgG|?@`|#!?y{KlCf<62sF=6J_<((fsXZgE&?8a_oNV!}a3hvxl868a z3n#2*l7Pj289KgAzFnE72+1Um9b|b`5}o^&ShI`w-1ScUdN^Dwj7Eo2!Co<3#Q5%h zHc^CqeADcaa#Hp;*jpHLx+FU+W2@*yy@EH~7VF388&;JlCjn9H`{+qHAX(B*sivU5 zyI65!^Foc*tq|snqxPn<7844|J&wE7AEu3Ny3e)qb+-n9cJ@?D&rqZ1&@0#4-m};5 zCyT+hLRNB$JOGE7u|W^TJdUMl7;;DD5tl?91@y(B7SAIo6%_Q5*7 zy_ubz*=H#=BgT;ksqXW|*bRsMn5-!&+Vb`!HhqTsH<8=DadmiDDI{6W&(F;H>y?Ad z?^C{HFvazu`uo{%D(j>D0RbLWz!5>HS!|Td@(KuaE*lAcO3gG>{!oKRZ*VZ#-JsN1 zA~-DxG(`VCYvFoevhcq zc1ZLN_$+DW249oCcpcBePdU+;v!rF5@z8=wFmp^(W`q4#NzcRdT0O#e-PsvI#jOuX zV1s2YO@fQ0{__#9f7E^orlF>QHqzdzLB)%H-Zx_9Hh1*S%!yGCiwm;Z|7s3+DrLys zx?aV8$UAvVCG^0&Zv9CC+;cXf0B<>s_JL3)f?V1qMvMbRFh;zrPm86m`lqqHkwGI$ zJ^We#zdQxClF#f({&S7bKwNg=gjo zSZwfE&jQ{r^m}irywG@QD%Wy-p*O$GW`Jjg<>WF+vh zfk|HydH}-}+hk;u5;Wgo%hm{h2XTRf%nDvPrrVe1W?7Mfx6pH!^v0n_uXkJ4@AiXq zco5})>6H_p1jGd=UoRad-$5Bb6&wxeoPTmc1cJ}BSLF2%J+bk z9yfW`p=i$Mg(iVqZQLPA1_JD*xvxebxKgM+$k?#sg##>U1b zCIS4@k(8XX-@ct+(83yqKYe|a(V#p?PEE}&fKf4X7TyFMD<2X}A)g0R0h@jT6!j74 zzL4L&xw^CzMF`E~pmETImBF}x$VYKj90a(~ntsE`tNa{)6Q24>DD(BFqTB!_M1BGF zawy?hRI-S(C0uIGV<2mR0RDz;))0I-O)C_Qgs`|Q#qam%MLxFgW*%+1-xB!Ps5>@R$bl*u?z z_-;{n-)GG)DLDrG$21`*2!kk}V-ai!+%(JaP9~wk-G{a4c9xb`ATmm&`jT{v_!rF+ z8&78y6#~dWf2PP1-UX-rqzh-}dUBMNpFYdWYjXdTE}5Rc!Tb)y7>e6S!2YiKANfFp zX0io5?dE7}U5~~#*7u3%QNn%)!d%Bd zG}a(|Zdh9}?_F14PpN|^aS;Wt*&`~L+{k)k00sb_q_S%vCZz&anBqfr_+QBiK z3iMy#bx&&fWOtI4?nGcDfQ)Nr%?5)!ag82m;z5(t_j{Mn$$WiW_>eo`3NLS!-(gMr zS6=BUIfH0K5F8_~-05lV@9%GGyXHxMGEZpco{&9sraJaci+txi+cr>XXc`yWQN%e228MZw*aS$<)tO4fh@j`ZF!iqv(b0Y zUa<{J1AuoVSM_ZE5|sWqLQ9|JVZnHHia8+AnN`Gj+lGaLV@B_Z%>W++Aw}n?^mim8 z24H~PmWQFg5cujV6GUK1^}>D*UZMmZL)!gwJZFDZSj%?401!Di^?JuJi0|#Jo8_Pj zuRt8CaWLpWv^!4((_!>3W*j?ogSPl45vR7%qg+ zg1i+*ChMj5_T3r$pI7}M@NTroAyCTz1`sF3vxEV-!rH`(o$0X0Nw`}Fk8o0S$@tu zW|8wJ0D2qjhMHH#YAfwVw9Rk7@fmt)b`7o%X_I@DGNlzkB|X#V5rZ#sdV>2&9I|A-SFWMY^IHbXRNBK3froD>f!$W zE8N_6uswU17h)Z$_4CCDHlKZjko4Nbz0Em{tg5cA_x#>EJWT613RP;5_XsR~RG$d1 zgDn0;uA6!NF+Wxs_rs1Fmw7T05-^rHHPSje*xcGG|I&HAw+#$Gpu)iNO5p7LU1e93 z1tpo&3tJHxHdcGJ5(j+w*@^g2N2z`?>Yse;uJh#H4LrwQed z^gp5#oO=8atU|D6p_JBvHb!L^1C=aE3(~dF+sDT@f0jLkwDVS&v@82-lQP{O+uJ>W zT!2Y2eDR277k(Z1<+RU1#hd&LtKv?iO74O3l~RsiLCvFlsh8o+@I|t$A|f>XoZRCz zuBS09-e2{H@y)H{Y+r{d8yp#FFq{QDMYL%6jz~2DrKbvn&+9hl<@;X$8arGk`3%=| zDCM9{wOX5KRC2ri>!K>!IpIKdGC34tepiD2hn4C#A6xT(xVrCOG@NC73jQd{U6(DE HHV*tRNmCK$ literal 0 HcmV?d00001 diff --git a/examples/img/trippy_architecture_simple.png b/examples/img/trippy_architecture_simple.png new file mode 100644 index 0000000000000000000000000000000000000000..6f68cca0ed8ed3c0d940cb20ba066123a2a8d7c7 GIT binary patch literal 44284 zcmdqIWmKHo(k@D{1PG8ofM9_H3m$@d2X}%yBtUSdaZ85~g1cMe!2>jINpNo(cXx-z z?YvoQWuJZSx9|7o+&jh@hry4Rm+m=hR@L)V&6+iX-YQAsVv}K`prGK&%1Ef9prHLg zK|!T>fC{{+7JbWtg7OhXR^pAiyWvh6hWis~szBLKf=?`P0#u9XXhHf8fz=}gXUw^| zjFP&vSn&&cd!_e+F@u)bB*oFO_8vWOyts8E{L;O*>*S^%jy!J~#82fa}1J^y=E)HU$l^h!casIo;=%h=i(R`))oVa4MnU{)jCPlLzS+Gq5&Dv8m51= z63L97+DKQU!I50lHJK3ETI?e>f9U;84~Qu@C*@8j0yd40b)j8U)N4X%{}9w!(0XJdsB+BP(2r zF`J>Gp?wO98%Ad8>gr(RMQA9juMbR?7?qS5@6L7MIoPJ1Kax1bIEb!$^(nUA&e&$Q z$UG&?X#U1O^BDWWX3$QI)ypU?sc}6RDfse_f9B=49;s!e>y~4Zi##SP7@a#XU!~Nn z*bmSA8Fk2LNv1O};$C?wSuG|?etPX^doOx3zd%`xkiAAZyr6f4pYHTVz5j#dG_Yo1 zwcQ-2j_Z1%+S3hL^G@U$kF57!`6Ns8W;iQcQa<_P^usi#dYwCyQw)voGAMIzwe>d} ze#W&NI9|HQNc8o?V6ir~VtsuEDGA1wKm@0Eg7!V5S~yUWQSN1K56H zYisB>Mw!j9J~F-3GV%SBO;XZL8#{w)jcdY+q%C)Rhb zdQ1+cT6*!c&2JP|SGW3TZt$@Gf$7Ih53}6IVt98Vma@$qqo^}To=^V-dv@5Cqz&FG6-+EO)iat9fG z9;0U*6N$TxC?J8Y2X2WIPoJZAHH*FS=X^+!%YO$SPdNwqzS6t`B)+l2+xzP)y=)3>Y$q{nt`d?%v@AO+0cR-rt<`B zwDHxmFMosWqY$Q=D)W9MlhFx{n^{6ezCyZ<%&>WXz}61iE>}!0PSdn3zbDdSxI=N) zV0(XhA@M1lhPkbcTGlQ>R@!|GB-4;zQCyxdtXf(453}LCqN_T^D;T>~#wQ5_dDVOA zH#(?TH3vsB=jYN=$OO&D60~rv4z0mrx2Ke#E-}f;m&XyJ9`$v$MoUO!;~q0SSz>xN z%<0N~YeLM&|Hvy)?pfuF|Haz$@Sofwdpo7O-pid-zFgl=x>Lf?9`R(? z@(&-xBCQzyUBajbu)q|rw{IgyJVs^mbK3TS!I&-l?H9cXUSRL(2@Cm`;qs*z=ib!*=)->w582*YBTE6>B$NH`q7dz-|~S%?IQZ6s}kE zGhA6*5aLj7H&Us)>C&kMI-a@bEAT&(1_P3Yk@M0Lq^2i{I2-MJq+GvtO(lojG>Z0y zANx*oXD+JLEazgUMI}Y0JBl1#PjHw_!>n%f@{M>)=MN5kwvNQAlU?X_hF?Aa%Z1uH z+3bGS4pch+LbOTMCRiEcdmrPUDEm^S0EgdzkCTix8iG)5I9hesgFtmSS!uqK zW-DF-YCf3C?RJ_PQce<05Q7+Yf(0unD%#$?7qk_HI~l}xhC6NA6eWujPcC5potL<; z0stAqh5;PakkVWKu`1ek1Hp-KO>J^LJ{%5Z+0C%rWR&gRQ3*i$$*hc8<5S7&I^^W# zM83?=&81^7PZ)VJprAb1SL?c~Ta>4GvK>Gph*i!;rATG;ectsS6uP&sJh5<7)>?<4 zUDNWUrpF~0*M+7=9UwP%k}B)nZ-%ORGlacPg}l!lbUm%)Jaw(Uz}9gq)&UE;ZjY6C z)$Bd-OpA?;O^@xA0_c>nXqM!=z#Jj?ia>goN3o&*R4ko<6NL?T!ZAjFyhRUDOi> zo;-psKo1hhTi9lA^3Q&JdA5g%1vtU(5j3Oc;mpH-^0=}@8^|7^>hW-R2t8jLrNh-C zPX57xL&0NfTmeQ#MsDttiE@+M+uNM>prD|o63B~g-HH?si(7@@W}mXn9H_xMdyVZ? zRA5nMH7+bKK|q+W`7Y$h_ikl)PWt7)!?)Y+*CHXZcGWsd76#4J|`U=9fLSD!IR zPl*ScOCbE|I2pWnPi+>~mh8TP@};Kd`R12q-{Aa-azx@HR;z@@qQ*a4Eh6mFw(1*d z>+5QxKwhNVI|6UMxt96TIPbvyju8L_tETM zz#2#=vzY!#t{a^aLF1$x`zkQ$!TIevOLPH2Tp^+LzVvLwQ{4b$Uuf+4j~twIAeor) zk1^R{qXOS#85p$w9>u-hh3M|XpS1?RSLEwC(W7)LT@8&2w_{`5n_`BugpqNL@PWgn zv5w-0kT1lCc>_lw?dBq2(nc=!tKq@7t+b<)byw%-K z@+$=~BxUL{q;{^3ePhYJhWDN?u`CaXt&ex#=;ud3O-Eo_{}JhjaI z9i?GW5=Ql8E{E8wx2^7*e89wajK<5Ql?In-Uii7oC@3AjVn=}7+-1MiP1#d{a&2RA zX=SbO(5#%blh??Pl7b>NIXO1zf{W%xQ`ZS>e9Jx?7Gau@GQo3 zIy2;ja+PB6sJ^Ig-Y?hg@{_f2N1LTh(|6<+S4nz}a`+Ff2Ol7Fi+CsTwh>h+2uF|f z(%YCu%+P%FFI17?XdWrc=2`a@4JZ7rXKV%W0#0F-=xkY-SnH)t~QpW-H)xmVkrrrtRe3Q~HfNztp$o z`oKISoG)~Gm&bdpzl$D7guBTC$7uQU_LgPFjaq8#7t9ht4ASFQCgny$zSCS8V`nYY za9R$VKyK45^j|u=V}ub*=+_bbgWSc4<@~0{nVowHaGO&t>!(t4VTNr zrr@=AIY_68ASB2g?tLosA<7%?=v-O%>7%E<+j~Bn$g)qB90_fMJT8XFVsqD);8si}pIZm3p<3T*9$P9A&xys}vP9t;3*#2|=;bhz z@SyeW+_sTnLzz->4}#@Kjq53k)=fTIxI1;;+Otu;^Q7Oame@^-u3w!ul{FenSIss| z3U7MPu1xGO!E`5gG4g<&_h{ zc^>CyvsKja`8`(>*duk@7bTe8*eEzg5O`pzm|umZFjJ=CY|y}kX74LcKCO`Ufow6t zSq!e};SV`EIdh#k*wu*32t`M!&^nC<`+M(r_l4#KxCZA`-ywI1u4(%9n^gqJ8Yuu& zJNW~GiDZ5d90a>K+Hb7Q)l2s7Li7??4b~P*EwOG^16?BQ&fF;qqSn;I2Bva5Ch{y| z$kYU%*Ux2tyNq~2NtISsbGEQ zaXYMLEYsAKXF%Sl&=Q2B&2iKsbhO<_&^ah|?5TG&hzB^q4{C@RDUWS;>p260RJaxR zOQ(TVOn`*b>1NE}wOO~cZoP<%?f_VVLg?qbOfEN0n(l$|~=gu#6F$jAOMk@t@*oKYY=UmI!Q zI#VI=qUz7A(~l^eFOtFfb0&9(oFR9U&HUi9VBMrgD$qM&5*o`2@7J$g*E9VJwk|_c ze}<@BMr4rRtrUO_b!$Oa%DU9{Xo~!fY-T10>AuTr!JISjQ&Py-XMIGjCUu1PA!U`F4-KuAB88?mz` zR%XD>&V^T!4;sFXG2Rvlwwf^h)h5>bv+v|Nybce2iux!PghnI*(X?DjO^&bjS^rt; zUr~eFbM8^(B1cKG@t8ASwi7ESH#dgts4gt6K~DD=<`|$VuY7*f(cj+%cL_c~fS|{ye?iS$lgwo)<=KfiQU`r=A;}j!f$Mfes7cx+_n((UoClwJbA|kt+ z%~y`Xez(<-KxQl$j>mBp298Nqmf!iF7e22W8pae4N5@Hiop=ax_UG<}C_8*b`*yvibRV6rtAz*_Djjp1GTZ9DDV?P#3*X z6+g18;P{BVx^$+ti62aLHv!Pt*75OPNyC}=8KptJ$XCT$I-B`+X5O5D^urBD)H1e0 zQ=S8xJ~~Iowhn_q{-`gt+cgvF>e0-<3XVz6XI=_0(6Sjd5W%hvqWxSCYWPT;b#6yh zRn;zvEhLTYIb8Rc4q0CU1i2J5)99Rm8(Sv!LOrz5V)DxlQ&#I%rr+RUm1KCW>R}A} zC2@-Q8CgA;Pkq`u{ZN^pO~s8$$hg%*{0YHZ-TBWaes@)RZtb$jpVW{6MD?r1)%yyG z3|a~|7|GC+$iXn;8ZekmT;6PVQyHcCmVV=YyT7Z#Ns)(qot$Jcw* z%Y(izge)^X{9rkbTa9iRby&kk=p(Cd&;CG9TFjV$s&r{mnJ@+~ov%|1&C^XcY_x&5 z&iw>4g_Zr3Z;{bbcv=*G=M|pny?F6(xA@m7Rd$WZenWEn>d>$pt``z=8tA<*twRM% z;h~E2Im+%-cE9=V#bF6Sd|GH=)P2F^#a{;sxgsviqR4d?8*P^_oWV!=Vf~;+m3e-l zqD9fo(Z$WpkpZ%Y^tt(Laeue}Q`XQ_uEcnqXP0FkN+-K$@!myLdd@B6PR=4T|!s~KA~rKz5UdYX!H$wI!Ty5H^}O^UIG&l*Iub-B?cloiwGIqSA4-q$Q!vhoX`^@E7m}5n64j*pFeCSk}Cd7 zdfTJsP0Z?Xrh-o6-q`GYgeB}?7*oI?N3<26g2Yez-6p^H<3eN&hKyC158IoFF!mq;9*VdTu z8M~AWuB?e5d9!;n8Ul@)4=L4Y)jq5~v0(E!f-feq@GM$p$=zX!=d(r`6%8N^t+LQE z8-Dc=Q|XR#k1(UVx{ z&e!8=>L+M-VXQhaVsmXK1V6(~e5I_M3s-|$`Vb4&#G6ijdFLqR68dQ4rZe5>bNt=4 zZ6s*eZKYBbQ8nYA5snaGbY+kx>iRN_RoBz4Y-YA*oJm?U0N4F#t?>bOamWZNyZ?nr zh5^5XcB#7cF@~pFm*K@{7TTzz?oA&{pLh~t@$HYnK5`S~+mVeVPh>6bGtajKq55o2 zDXQ869|W^KZz+61Vqq)}w-eE*{S|<(y9Z0uPI^BaFC&e1;Cb99$4#lPeet>RN|K6K ziv*u{{YQH^Bgg6MGN1Guiz|jGmuDGO@?VQ%C~WhqZ0FPhZ<^G?reLm?i0^OwDYp7B zrVaycKn(38Y2_yoPQKP2>*T~Q;n%0vLzPJ|&l_PCf{t%ch@h<~ z_6nQ1P1rB^cY;u7x0Bu?4s9+v7Jyh}RyNaC#_wT?Paa*Au2YE0Y8VDJhLz0|RA-2l zvg<@h;qD_OV?}Ou8D>{ znsZM4M4+8tA9xAcj7>O@pqABrqL*|%?d0P#igL6l$n&RKP9`=jg5 zxB^*6T=;3M#1MwoM(-C858CH{N-L)gYeY7^{=!)zUvV$7U>K1^z> zxa2ACg)ibq1W}TgCpim-(Zjdom|08eVW=Tqm3#jAU4xNzpkbWq=ranQFeQeK*5O-e zE|n>6Sxv9QLpGT5MDzBe-Fp@hZ(Ls5RW6y8esaf@JR}viHUv@oJS>#;ZG4yM6Jn~X z0?pXZyT(0+x2)5~Y(Lr1bE7&aG2HAJOO)5TuvzsoKrwW7sx7d$2og=; zujd83InPy%1&O|ae-#P>TMy3+C^}PRwxx3qVZ;1`f%@nKmEd*I?$-dGokhlYCU~qN zjhRHpnmZRJWyiTTfj;|gy214>3k^+-5pR~vvndvASinVV(tpHm&7I4Q)%R_~2Au9QJnsC*h4zwk7g=o;ki85Mw%5ODFG@ za$j^Y_O4K>p)72wDwapVhfG|Jv>5`wHPvf-6I7fL8oJ8RZJ0df#_#%gNvT8ric?Hs z!1=pB!$85rcO^h(4Af-J^?iI~yUsmaMo>&N=Y&0f?&V9mjX-r%pOOFOQ%jTl;f@gm{bve9P4F+Th&X$10-#tl>J3f!|3k5qvpI>YO>1$0a@T`@(wofPx3d2hOTLb4Mw<* zZpbV}y_VC`PiQt{3(w&GA%TZ1gr0MO$|?R=%-qI9avoRPc7wRLlJ7wec`Vp88GRh7?=cG zt7!gpAE&&t+ww7nbbbM1NJmG0Y<~e`MT@r*MiSK7Zy-5c+<5py9vf_>OV%>`>C{~y z16RUcjvOOHAJJc3%br&0RC!GpOkxc1)N{8l#iS)AKd>r{}}PJPfIZ{UfWxf{M#XPPwyfeRMcDfFi4<>0{a z{dkJ+vjTq`@hD^K=~Bt|^b4@tP)~`{(Nj|u=qCJSIKl69mz|y`t6%kzg+TOSihUR2 z^Ai68uR%Y~gC~K`kW*4&L2ivsTdSqcyw=8RK4}VJ%J$x`ZsGTLMfw7nU6crb8Xh~% zvrI{droi_ou7lS((b46XI52_h7qIy@41^;)EBw0CLO?8}$yH{8?C{zuN-(x*S~HP2~E`#{N{yP!%e>ap4d*P$=tK z=UI`0!M}77f$;oAuC1*9=*Ruw?e&}hYr`WQ&g-urHfH;!zND%bUS&!Isnp)Ojtn#x z-BC1C-&gEJ=^%Lq9QJFxPI*XLVB7XVL_AmR0g9nHEvoG54wXQ3R zu~3!-gcmpV=`4m*`T9Sp8#qSrXZjPLJyjW(fcPHBbe@zHr((ZoW^c8C%@rw~h^vZEuK11EZZL}A#=#A?nF@~a=&*icfH(K?xVRy0f zwg;x&_AG9DqAiaa7){Iggk$$cAO<`*$79v42>8x^4m+G>v9*_MN#9p)!OO9{@pS~Yt zZFYSqpC}Lrr_1Kq7d9tY1re`$DViLdIFDxx-HanCULdHL|gqSMJ zWJuT~uypt##HQkA>+-YB75K*ksLW$s?H(SILEN3ciwKe)GEjG8m;gz98hOGM?wf`S z2VLj$cFS(%SQHegxG1mHFF@;!X7%>3QRZYiYk{#yFviWN9I5dz!Iha(ji#MIclq7P z(b;y7IrPBK&eunk2CBtARxpE|P0jg!QcX|j+Eito?3+Fn9QnJts zA9X1lWUP6b0g@_X8Ii06A!ZNTAe>?hQ49n2BbMyM7UddQ8!y;*mgEQy-QV#*@_HU& zDACcA00-I%BF-1PZwbyZv{b52o>-(=hZC?f!j~eh>KNdH_&2=_I_w)5%K@m0ORvFG z1clwL3^XS;EGZvUAtI!t%o&=(E5l0^0}rg2=S=b$fNx&Anx~NmJV@Iwk2bh=`c`d- zdZRE*2NaxU*I&A@?&pqr=3)}PSW?y&pt9_KT=w{D!y6yh*I9Yn>~QoJst&Ia1>bdl z_fLh#)wn32E6KYax>2BKqC>E)>h2q}Ihl1CZVd6X^#|R;-XmJcajMA{L-Diq!9(IQ z;fA_e@7v7TT(kk%AC#9OfWVCYuJl>b87m_Rj?qvr*2g(vs9)V|gDI0Xhi@f*$zyZbOTvN8>A*8FKDol1Xw?4NGwO6# zvSUyP8;#345;Ksl{ZT>{Ra-z;fUU6>$0g^4BIe5#y~b19@1acgiFzBb6|e+& zSKsjD+k3-O@&xhvv+ucZ>5Sp!lmlDV>VhE`wXAJhgPobe0{?;O`?LB$2~ohtU@h7q zy=OdcGEZ{aNO<~}C{(NJIWzXf-uGx9Ob;n({rJC*9TClu5;Az_RUMo1<008#D|l6k z+qumd73Vo$auI63U(;xdIA*vdhQOOlM^iZ~ru}?-;Fw3?s%gn5tN;gQ`d;=efd$(q zx!@m<^y))+Q=8IJ6^+|^)i8SOZTjgTJVd`)f&46`7UcAcz8i=#=LtkqMTZMRCL;QS z&zdEHbm;6|s|tZdd5hQunx=%i+fKCRE&XGQsQe&V1{xE~j8WfHm`=bJg4Qp&zk9i=#7cQy@dIZwD*U_mQ2IpN6-x_QfO7!4EBjlIJ^043t(x&xzD0df<*-Ouih-9IYKg}o zEyD4FD-h6^N4>2D z`^dPwR@_a!?Y{4CXDLxO&{0b!$+vbYc=_oxY?>K2wd7^}Hy*T^GyCnZE-#Ie3*h4e zVya7Ca}Gn~%uUn;4z{_*6G$u$C(3ddjn&jW2ItXzJ{W@XLB~ZqvzW@nS2fX4T6osi z-+Kmgl(=}-mv0SMN5t3J0>i!loY++8dZsbfLkJM@=`W7op5baC@4k_al0~R zFw6SUBtW=iq9{+P4Of3t%Fxk>OVpefTE_~94X-7nj^E!+Z(wRL$_}s|Elof}`MR%_ z9yQIRT<$x)6`m-#m+`FJe2TtJfX-T1a@x`>s9G_o+6z3uZCWK>qvWE;<#_7QZ+E$~ zwCdZ$|Kh$5cl18%*`**!lll3iQ;8!sg=i&F)3Hc)oIV?c+| zekZXAH7#y9jGkSW*`tt{QkTBZNKu!1Uut0$L*O|_(4EM%^*ItK#Cw=13cxB;oJz5A z-^A&};)s}$}JQS3u99U>sLPoqG&_xqa zwwC_uTI6}|eY;YA)5Cnuj*C)cIxMp{Hmn8LXy{@PtEFb=RPToBuK47`_*RCi*M6nl zsK~bFO{E=o^M-DbIQ}%17x&KQ)!~Lpe}_UjIZxRV^dkbT=E-IaYm3{rIOVT#%89DV zgGZ6VoeZ0`R(XJZE4R(rH{JrJphB!;v}=YBey@lH*m}IQG=``TlYSUZWq^TduS6h~ zs(|m;6NJmtUR#Kgg4}6J;_L&2yjOWJUJ>%=TIrOKMD3VTpnxoSL;}f1aVkE+8N!fu zPt=!f80g{5NPTnd4*r_l3>H?DG@~&TG-n56b5Ultlbs^A9yH-h23goMq5AB#BXuWjbZ!gRWetW=WD_EH>Y!VSL7F<5nl(aG{OW2OH{xBOu z*XHS3?k-MJb;!w^2HwCugYRZVzpKwrc1=GgF^Z9gpUY6P@A_MA=)sjfZmChQiQ;qk zgVaV3cM}2%wdVGyP@plgUZt_5Fg3O^p)rx40autrRNtFm^J}lscoWDTY0vO}f4k3$ z;fW!q8i^46{x0LkFOlisQyiudx4W4KasFE6^_}Rmw6!G0rwZKDmrRFO<+L*R~Zq4UXdrXpxvoR~`3Hw!if9Ej;mrTMr0TFs

#oQ^z#hUnGDEtU_%+NBvUpnH+2FaSpP1nJ9`5I)i^;^I> zhr=j$XVx*$d+^Z+Ns8+-4@S00tl)JMy~f$%>4vTOW%yGi&Ubd0z!Gnjnu=3Fu0uLSN1u3O%+!p1Jpabb( z_sx3yXiYR<&Z4JNOBM}4@gHs3QEaIK_h;jK6r2X5Frv347MQz49$MtlZuw2Ys+=ha zk2hF$`PYkyQdX{%*>^v(=~eCMGa6^B%IFmKMnAL_S3*&{;TUUIY)B%KQU0W zh00W+R2`!42hHQ>8-Np+Tcp`Vt_WrjHG4MrK;HY5+CmoS=)M6B-wU02X>qp5`u^&S zN;G>!2i@PSYpvKxq7T6p)U14*L#p}m3X$=wH{4Ijz4`1oZeO16*^LICD{?zo zO9QG;2vt6(*Q!E+iy0CXpjG(eScT*!9IZ!~9zrQW-tB~$Oj(uGx>h!aL+dJTZ*^5X zaK9zyrd#9rTcr%DAuQ50qe(O|Mp-jbd2=-w7IjuPLJ=L`P&HnsXl}BcX*{=6#Ps25 z*2;VtH06KZxiSVuRkl_FnqOcgoBD&RGBtSbB?%JpEG2cXGdQ#p4wR^)s0!M9TjyXhBr&x9#mlt(o;o7`_U zDpR6a9yecUY?;TjQWd`N+oMNWI9b%dMlws}|JAU}1ixTv@z*pzQ$+bVj{bn_cf;97 z!-K$h{8YYqI9|&=lz4P>vfu623mB%RrloTP9rzT1&Zov$DDJZ#LE^vfh-|D~2QUMz zFOwAY5%VWXNlad^fseZY%^APj4529>e>nJle@FW2eY&c-1TfA0f2OgcMHL3le^2JT zP*BRS$_hZDN-`_T3}Y>Uen4ke8PWal44@SHZfnWK6k%?{CM%7GVyI=P_U8@?&~frt z=NZbs-9d53K&1pqOI~BlpQs=F{!OGcO;p|N<2HnVhIwO=*q{G?P0ddDTA={EMpg!b zI%1H$d4vx9a@ew5|I<>6a%b=#M*q|7|KHtkLHYmky3GIPU>`R(xsAV!e>?R0t*ku>Qnn54eDD z(g8giw7|`PSHh1h06ocLqm#)1s(9x~4F?8}fAWAU61aHr0||mYMt{S?_{f6#9xCp~ zO$dnQ!6$81YB&fL@Ty@nh!ike4hRYX+zch}K57{C17lf07_9&8iNE0>BcF@=z0Gsm z7devMn{80eHtS1dq@o*47xv2Gyjct^HE1Z+EaNYGh+Y{!|l!Y=%20tm# ztzv5WUR9NwG~aUfF{0jW-?z?bRpk0W{6ZaAPnpM`^_UPqe3!#`lX>muvZW)Qi+}0D zNLD{@I-8q@Sq=vg-kt8v26o&Y$us;OxyEi*s&%@`TDnX+^r>vwc&R~WXQ_T&@D~=N zX0g{qz(W7!PcaW*4GmY>(uX!*^`W*lHsKWfJq(-hOf=k};NWmSdU%?^P5lXJ2r=u? z$*x{K*wWhC8ezdiNbZ#hf}k4toDgM*U!7(|Rvh< zA4bRm8_yFRHn`~Zfj?h&v(MGHAJ8}cOD+bGxx%9R_+w@!r#t*e&tIRhkiRt>{TdKM zMhKm(>n}tAVmPn$t%|Rp-psnq2gQ62lzt0Q$(HJ5u7Sj_%r#vBK4w4H$lv7Z=vbiE zsbq)=Odb>nLL>bRf(TOdsh>aJq?&GxSgOBk{StQQiNlqvS7V2P{e;h90U8%K+7*?( zf&s8qq&wg`_kN=;@eNeg1293)ZKKC=d`XFI05;JC>&HtRt^&?DBoav*79Q3_&HfP} zzBX|vH)LH7s8z@auU z{M7gN*?{mNXlFZ<;%GWv591r$_Jr*?HsB6HY&~^qaHPb z=IfjjZt-z&*j006fSVPozt{cKJI_INuV4kl}|20hYZ>pkRd-!MK|5J$V{~tzej0K%h zh@@#UN({brk+puL{nGF+V#f(4K_^>T$wf~d%dagOe=O|ZzkhP?Z=U}7m;wM=3j4&h zA=N+Hq4)_fIqc%ofBjI0t#6Z5-$68Yz7)`hV{SRxg8pZQ1QltAcRxYUev)M#QS}NB z;~9hs>fcjv>~f%L19wM{+iUyr<80EO)_`L_{vAUUyh2fkQP8KnIqZ(b=~5UvSSjiL zkz)86%|{HNJJTq1`uc76T9Bi>G}bUSmw^5m!Zn+%jPwLLr*3TFb4nqiAwszF0pWbk zKmy z7k>qMszqd;U=?wC#1h{toO-BI=pC1M`t%+P3Sb=LnYBPU9_3Iy0lBB;ZkkwQ5Wdy` z{FA(A;@gCp=0`7CQPrzRv(zt`{*&@w8SfVp6r(wQ!J*nx6#g>CP&}}rqKp2`ic_NC zNW*x_1pPa34|pxaF@=h0Qi#KD^X2=tIUPDcXAen4Mp8TJ6;(g9GsNv{W+5c&xx|i^~bb;l)MWA8F{^HvQW=+JmfROkuX&nau0VB+$92)=0+Q8sE z%7qE@9fc-W!7QiT!3iw!qTu-KM%rJ1fkukRIAs6joYW*3mj^@37e}jCwLV@K-(+L* z;opW#I(q)haRHAqQIC25Sg;)e5H~5OD@7BZ$K_5f7UAR%$~VuMHA~|M@4f$*Lu_sT zuqL1>d~%XUAifONJ~Rf8 zi(NzL?r6~!dFd+=W3StylN0H3n8h<+?QuL*n#bkP9VyV_r&1u^%WGR)@@-{Bq-x@G zYrT5e+v<0VpT??3g9G{pn!hx8#`=r~OJWnjN;M0*IlhuzjpFX7~BQLj(@P4w|);)aBdOR&8G&-V35Fj+Q zGE*Xmi2M15d4``#rCG@)r@8G6ELXGSO!Nl0GQcxSroZpP!dU~Te|r_aK| z;(HpYQr)I6w?gC|zH7z&K8-$kN*RuqHnhx~j)*6Preq@ULBFf;ceml@18MojE8DQd zbC9mK_!m5KeyhGH6Nd92LgatAC#xB+k)9(q?n3CX(@#J@boS_+!u6+48z98%6^V^b3cyFAlb0-WK1ui zOh2Z@;8~DkN`@z0(F0`o0{rr1TkG4~#RR36==0`_wG`w!;HT{E2DZa_tQt-=d%|Qq zR_Zl&O<1y%K+&ncz>>Su6eIvznR?Y@O4~whG>hw zKN{<>vdBfOA-hpsFy`J|Lx$O0g+`HRgKIX%8|-KK8oLB^a@RA52^qeZRJhmXH&PdY zgKGb;m;hsgIoDtN`Dg{$B%Z1~=cHt}wFFIy^gnM2BkJbDg876k6`QUN#d95;%hfn< z3vIN2+v7Io@jSW!-!>u`6niSW8Chq0@Xk+!55va7O7yCXAIkv_Ow>c1L*hN)!7mpA zYASpj^FNu}unCkQLatNn?&yXeu>gPddRa0gA)ZO2P`Apem{#XqrdWW;`GWsP?(GR} zrz$3mxep{MNuc2s)6Zx(*TB9AOo#!5Z~&8!H~0eRDLBph8I78MO?Dk4lLvS=KAZjk zweXdI{H|utiVu)Ul>&Er?S^U^dU|@ppS}VC+W!90W`2gxU26n#>6pFw`ZC3S@&`a< zT()DWp^ff`1lRN`$)CB>ng%NWJEev7NOs<893QA^O-Xb7Tn(C@ibSt3P$M-=oH^@y zJLk$SJnQbuo(wGxb~Z?cFtq%s62&9}`G!}BP%z?+^yDLq}-ywrZ)m&)gw1B>fQ@gusY z^Ma-(v^T>*Hf<9lZLlv%Q282Vd(0lHJlf=ie@eVEsi~6)+Fv{^Hn}Y+v9N_ z-GZ56%Wl@XD!st|92xpA%5C(7jOjz@gGugkhsyx^2@S6M5fVjA!fu%5dvke(N3k;5 za0ddmQ@Su4(lZ69-`S=3EwRTdAWm<^dGj0zjsECR)N6y}%Ecw~TPvl00{I>9y4l(7 z5z;HIWm-i59&ok*IGk#Kjn!zq89#wVN(m%pA*FYy5d_PQrs2uOj#8 zXwif7F9hkVJ^ZHV<(uodAS(j*HmxDsIq3}R3ENA*nff||V;`;jaUdbNA@;^CE~Rk= zM?4?I6{z<-jB3wDj@{x-Z|_@zGv0No+FU&y-;{#?HL7sZ&jwO;ONWsPhD#kj z8`CVs{-#+%HqC-V!lp1+{eF3kZ@0wK+d1x?xjhx2^egp|OT|8GhYv+)4eFQd-ZIueexeSqFUfop)r)~Bsfp{BO15|F$C-|d;CCJ~2&SN#sX zW44aisJlQ?(b<`Rn?t9XE!8i4bqbUo4!_WG*+#V7nw1^f{K+f2amFFz&!yBEw-o8V zjOEvlMgjK~*5!z>J?{2rbI|%HwVmb#Mni^vBx?j~dK4X$6OnBr7~!;IcUR}TOOmz} zqAykIC45`vE2ahT&rg~NByDBdHt+W4H3x*4Zcy&p{|cvOj+M+O@BN*^DCByjgEESKOQPq5B-8(tiynH%dChwC?yn>(>9y|??a|cT&Hg$TA%j|b^UdjWhVLmq zIkzJs_Gp#)Ar*hHp&#S?;($CO)Gs>}A78R%|4`=&gmlk=yJg zZU`~E3)m^ElI(x5_tsHaZrj_i8$nbA1O${6B$V7BA&m;s-K{i8DAFkw5`uJxq_lKN zDM*KOKY(<1Jk&QI-QhX=?DL-Y`@P>7?>ENr9~~<9z3#PUUh|rB^>KR1dZx0|N`@pj zXq3B(LPD+ac@BrXiY_=#=85))x9$Grhl45`d7GMla}pmuBsv2lPj$RhJ4Qdy zDJyn)aU4mUUKKNCk=42vLR5>D8Ks;rf4gn__!&l)TyE1u-Gc&hFZ*;s1aw=)87HSb zch6P5T1_ay&>IIlPRW})xlz|DdT-^iAK-7)w_)Eg=KW%^Mb=yS7(WS(5!5G#85 z+`>$J@_6QmuTrh>12*H;^Bk24+=(4c)i`cTYdo^E{VrY6U0QDwHn~-Nem**-jQ;lu zA05xxS4TzMd&5DW;Igxv)(^p`#5e!gz3_loUQ6CL%fq=sl{xC^ioF>s^>po1)9%O5 zBRg&91cPpnlhceAdu5wntL;{#S)@XXuie@jUaeZ~_YjULwv(A{ zuU6WZ6FRb37FyJJfUn;tdrj75`J^AgtMp4$m|tWV#>3?GZEgg`Eysw^NKTg(h>`~u zLo?9?59h9FW}QBR%yVC#yLo|}>!Vr0Oj{Ip8gck!&W2nxw*jQR-&3D04NNA%-&JWA zD;tN6L(+HrrON0*gG05cuBWbRU*X3A-ot?0v2@M6#u~)f!AFgky9|V7S4Q@BSsA8F z@<RF5SQrE2D2D1=v*<2 zt1{+R@hgF8-Z`;`9m33NrMlIU-63uky#h&v{AJmARVMAuC_&f5mDG>t8uB05`4AV) z@5oG3AXHO{u?{WGu1avpJk(1nwQk5**-4r6r4c&VzI)@3H2Q4?+L1$tZlP15)#%LB ze1XMTr{1^kKAR>a1rw0w-=EM&Djj-ze5z~WFq}-ju~n|MOR;Y=rr(&OdlT=4@U>JQ zRPO)J$#+%f!o|DaQykuHdEfI!SXOb#PEoC?-rJHyE%6`J49X32UY^$sb}%-pz*y?s zXd~|?ucITAPPpk`=vZKJyGx$ySJPK_zkcEhu;&C~XVYNOaNSXx@1^`PPS%E-5-0dH#dfsyU7NW4QK5h5~UFfx$kkww46EPR~&wJ z>+5t5v=~)%(8(;T;swpZ`I>V;n3 zR}m#PCU4*2kgA{DlBIs|WjZ@+LtwN&>)v#twyT5_Et z5JhUO74M!bT8q|)NmL=P(vzw3Om6R9IvT5MVg=X?k@?H-ILOrbVVfQgNwpKu%b0Z+ z>sV?HoA+jLnsxP*SPLB+@`b&Rz&gUbkgQ<0v?LyKu(K#P*4pqzyOhIl?n7!@RJS_A z-SXChC$}8;OucaQSaquI99N+N9b=?2`^Lr}!RgPihh64fzt>&V^10@UiugvUX_vSw z6meD`js%j0=t`Y$!9Xr)woPZ$9d&5k3SkP1KUb(@*OsCm<}lX7+zFyUfp5B!3ps#< zd6Q}V+l?x<^hTj?`7W1f)@gBxFr4pnXv)0clhl|(bF+9ET~tI^Z5g;!Eq{CQxfIMo zvN$-LdZ#^)wp13%fWm^VlV1yYb=sG{AA@9q&Fbdw@Yz2DNaud&cg8Va!XL^BiRspbyDq7UOUTqz@TwG?dP9xZR1O zS{+;IRP5IkQf=yTr#Rjt-LW7C#(3q}b(N>w+Q%Pmm+_o7U)7pZ%y4w)0suW&plxn( zDK7pf#y(2U^H3F+I!ZX9mhqSG;8^6}fn&;fykVe$COZdDb%`}zPt#`U7va6vt zG_G1b{OlV|s;)7;2??)7C_?1cRDd~Q3vtw{iPWT4lFe5ugsIhCj!HJri+YY1qtXaqmTlkg*i$7&=&W6sp?Xudu)1U=}z*hLC+>UlrgG&_l zqGGI|Yk)OlgH`j&p%r-ltqE34E*kRL{?U2g!_F1v^AbX&Hu@>UBc4&O#+eLGGMvm> zHdi)n?*X`&XzM3Wg2P1d9l|tz^*0cvQvJepL%S5IsafWS5`K-|X^z#btW9?@xMX^4 zY4cpFoph^dmWv=;SEmC8%)ytd5h>8@S*^Y45 zc(GB%r6%(BV%p7N^-7K6aopuG=L&7AhYuMy<{{*-HanV`Tj3n@D7j8(wV*sRXQRd- zksE6s{;}$A_w8W_&Gbr~3)JyQN%nM8s6CZ9!VMsi?Jm&0(Z}-{%|i<;Mv&Kssegc$ zy4dcR-TD9(4ix5k-U$Y3kt z7NJ704Wsk?2QL^D|%0$hi=Xdzl`0wD` z?L1z^2l4R;+QFjyX0=>Z?-tJ}#!i(n2C`pyE3z409!3b%-<3#csvcYf_^yevV0U*{ z#eF}Sy0Ww|rPgx!bKU8TWMAQ87k} z!Hcd(_t$^m&o_UoJ^TfKUQpdNl)JeeR?9d{Qa&7x%zruaxSOj_)hoPwL!nKl+5e!z zI4oaSZnMdX=jd)l3Gyh&@^ht!YdFdy?e4s3saD7kPC5Vb{i7!R{r#9~t8HV?!zhZu zy|?^O1o#8YRp7mM9UGGf_j^s_-x+08qHf^wW_+V{VfVJ%@@c{5@EfH@8=L;uEHVkT zI0m?+G!jg<+EzMl!vqIqo$8#{@=C0zi5d*&e*QGY`pps>#+IxjA-w-L?;cL$xzuRQ zS%AP?x=jHoa!xsdfS`%i|lG1Fr-`?&_d| zr|S?43UsnS(A5#Gaz9|ur7-hGn`Qax0YHVpxvUh4X4FBu|NG*iXKIIqIasxQuhw;c zLqki8km>&De78@lH{{queVcFCX|a0Jb^Xp;#3j#{vq)Vu?Lw5a5bER319{1rElbKS ze>qjwNbtR;MI&XeMOHS8m6b59+1W*F$L;F}LG>Hm!u#~C55W<#W|>b-qp+d>@Tk&y zb#Qw4k7UPuM*zF$;NOpw42mYb<@_Q)sqr8^Y|N06CRnRcBmX{YU=%KNG-fd@C|MbfE_mY)!?SeGQz@52_iha7dxOU^v|z3U%>!#Jek_(Lx^X z=H?lNbuD=hKEG-TrkCwUI{K7alO1MRwOoVyE5Iw;-uJay60(bY9HVud_(H7BpYIJ4)NxbAD? zp0+O-o}a-sM^shXz7OMk*r%?g#ceSl&wGcMX4eace9{Az+v}9u;(a-xM_IBnmd4{k zZ@$-{?)v^s4JzeYY1gCJUp;5~y;l*{h+<1EIk}FQlnYO>IDk9S7IW>1@T#b_<@>D6 zvmEIK@$q_qwfd9u(UH?XZzo8ViT-jz6CfumOZ30}N^$h4v)I^0`S~zk&~>v-++ zq{Kw$t+_noHWHH1urMx%l@WLo&^Hc%6X6>4WVT%)QWjF?(elvm(*{j9T3e-6>2BQ` zXlYsO*Yg;ya^pL4I^27#r>Ez#l)`KD1I{orh*uD_P?`q@^#kg}utv ztJalB2%r$q%u;7Vwr;H`E0T!@ySTV?*W6G_?c|mb7l(E??J^r+rOaWt88Z}^eoZ_& z*e%$ARbAn=y&k%|zW&Z^P#9SSiAkao8W$sy#`V|<6BPw?8lO4t{$9i6D*T%orULIX zM(loX?F5)3s-7VH3yIDCh7pO8iAjI9R!L2bFv<3yNjwh^PvG6%h2G3NFV6#0CCSsI zJ?4h)$6Jx>hx?nrZc(aqwxjE8@P|>mP2Tg{KzC^lWrPQoN(9rXWT`*iSsLWUK&HjJ zCzd4T9U-?FwThX7)!6&ljhZ|8iCMJwn&{&@4*_I^Izs!$$SQ@eiK0!(gKNR`azGFY z8ha_I3-V@)!;4Y=_}do(df8;+5^F?`VoLLDyoaphXwgHps4zhWY~Va34RQf*=x}cx zm@ODsL?d|y4Vxt$r0{8aJ{1g<5yXFPip1DRXDiw7HMV~&?f%%7a|EV}D)t0agHN(G zi$JEeNb1p@mxqe7fR*Cnd+CPFY&-c5edzpf32Wuv{%m|Ta)FWSt!*TY3Uy~wQ&Wmm zxN-l7g2Fq}=c8#KetNvnlMef(S7!6_NaD^REWOJYcvrv^iNbDeZKV+l$FkMe*H2eW zp{Ax*kIYcX%qP9#&02jJr1v&eKQAwDsVMmzCb20}QJ>Kqc^O5`2~$nT&dQ3ACP29D z>ZHlWI_B|zk~+uXtbX&lvQ8HXI+0KK1XO+_XXq(IyD9fvmk zq|A#KK!b@Fbc+~g_ni46U9OQT9qHfk$i~KexR@E+!PfSYObGYHI2!qtD>vG`uHntq z`$9{$!n2Ne^l$sR*l3$1>EJP%+xlev zGYqVip(5X-GHZmO{quzCY7c-k@d*g#w|g_y5bg(-PNZ|Vf_wLCp8jHShuwg#4Frvg zi)&(H!eVcExCBt|RuU}&OdK+C@DIe_zP!oQD3}37$}cS0g<}B8#HHX*z`YkQMy3vR z!LLbTCT6*>E6U6BSsjm%cJ^2XVLOsfKoA_ASUL?`tt+?tDqT*jSNUs_-uDSfpA!g? z52Y3lW~e{Yg?9G(D4I`~^2HnPq@b$tmRg+n!>KMjvo5Vuo}I}OtIuJoD;N4Dg?*2i z!^nyin=nJ0XN#=|g4GQsJJB~yjH@p=jE(YDxZDmy&3_%fCn~BVmT-c{wK{@$P!rMq z{ABm!(y0;mp}KC_XKC2fmpvwutZTm|#J_Ol;Z^ACwU!*gKssk>r%Hvd|8T#^ycZ3_89W`2%~a{h@c}rtiPp%QFb}lB{L@TZv~Oth zs|DFbjRNDBUS9oK8UwIlc-?X5>!-M_A<-89+a6>Wqx?YtlXm`%R*}~TDWQkyl!Tcu2Ksz4% z?G~o9TS7HGWi3+{?q#d*Khq!Y#ZtR6Cpr%tPvLy#h4nc_^+}3tiy%ldrKvJ8C8k z9;!R0=P2E<@G*~Q>RP^f=jY($G^n6uk6XO*Ap6Uf^xRRy?0+tN zI-(&f_Uf~+YakT2Cw~Y8QTfrAu#nOn%kun;f=VFv@G+27>`_oTL4EC-7jO{`8{dN0Zja`cTYz|F5lE|AAovbrPHA_&a9&~& z7h{y`Rl9>yT@i9WgptJ8sHuysMj7fs*UF9CqCAfGXNmP(10iGqatm?^8y{G4HT-`$ z+Vqz>?n;N~yu7@cy~D+p2XzE;jh{f}R!YD5GpgmhcRo2j3KiBXeMAMs+u;&xHM_@P z3?J$RxK;3iGC_os0hs~iDifhab7MMUNvb??{&>78TDH+9K1`JxwFB^^50;ZZn zrQQtY8HnW@Hmy{j96#*O86^E=dCJqDQQNOF_(l93Oi5EPA0olvg6I@S)8wgpxj7fZw zqbjifRA%zCXjZU*xJ`$lRT{N~^`t9N^4Uv=c^oX40Ndb(XV^3ukBxN1+R4f49E2mR zdR0tVFf_R(igR&)a~AZTktEyak&AG`xAVjXt27g7Z#DQPMhbq`G#z5w?Pwb*Paf1WB^AwRW9CLpzlLV81rzI zn+xY17xCb3!k{5X{E?Y^GMq6EdGK-v0Odg|Z_5 z{%dh^F#i{Fg0fMMU;5@b1g$AGX2m{Q(fHZq=U|odmQtYe<~(BCk;QjEA6Z1uF7UGv z8nwIq-2(o5x%t0oGiq7CxFL5?Q-39^k57j&>pjTTM6UZNBsHL0CE6_2-58vjTMzck z#J@(_8YN)$Nd(b6v`wnJ+^t50Ln%NfW;59nS(B}dL&1x$Zfo>?PlCs1jw;uLc;0h?`C?tIe%IM8_`SK+ssoL52E>@ih560;jTy`m?gJ_!44t6ke zB>g@PWY}$sQLJZ6M{L>8v_={|=ooVY9z5O~oDs=<^X(xB$uRnKBEPFW&0g7T;4RY| ze|RgE#wXs+c=){p8%nw&1g%R>%xFl+`9 z_`>G*W6~YDW}+OPGdf0gq&l7<(!3s=cG|ZcjxH6+_3e4&4z?ykrE@i}N9`R1+eYpe zYd`DMs)INZF1J@F%J};Z0^6|;a&dy0UtIUTCf2Z3@z`v^=X~M*;k;26^1GztAL^4` z@0@I#Y|j7HbZv2((p{YhM9!l_K(sEx$Z6{R*U2R6(qduX>Q`QAt0zOJ%KUP43s}@v zM)H}iQF=@^Nro~|?k%mgkH-7xreWcRPZrKozHS*h2r^1;?np25k=>Vs2DaE)= z(?%QfJ>uBG{k_MFR&U`L zBVsFzqyyqA5BrZ|bzFANP#C)Y2%J&&_=kl8rSwDtWvc^OP={eCVIS>W{0I+fYaQrl5kI?IJ+3*jb%S`NOt0}&=OXD>HxCuOdBIt7KG zv&GSuxmN)~RHFOD$v%<^)6=HoGgS&`%(-vfd=GG3cNC+;Q*@K=$yBdzuy3bszi9Xo zjs4*Pp6U6W#z6K8x~*0nSha{H)Qgb{9r>A>e(}4)sGPMI{%U2w2+P*%y?;P&j)67H zD`s6r3Z{pCD%XjA@Z>q64si#U` z*&Zuq?vx5+21sHvmI^cy7%POHEG`H)`}O4}rPXrATxk^`uIg6mZIe?Xe|W5C@h*>X z(UzIq(Xy#ot-#}IEJh+3dDXMGIT%FZz!i=CslM#O{^0T!YkV_CHgT|E6k4{*q zkI}xCsY*Z}HVx;FJZFn=KcaM9qjGum)%p#y{D<+c-{_~K%58c{RAO#ARN>Ff-!w+L zC$myh-{EjrM#7=$*JL;@w3gMJWOB2hG4rYHM!wD!I{D-~*`Sz*;oKdT%`Tm@X#V61 zE8m;3L+`U&N4Wg=|t@ zCcZ(87@}g+Jm|0}d7ii`O{_yVme)ZdUa&H&Eu0mxhIehb$WqseG5X*Z%Bn=p)YHhjgOhqlSMaCq+})c{knnz`E+@y(W1RpLcOX|vN)BG zMZNo@573rlIb0Mzjk1^1n_wwHeet#x&?>jS5mNUx*Vj85p20IMO9W}J-j=fLNy8m3 zT9tn<|Dec%o(1hlG9ZptYAn-g*=DMd zignhwJx(dJFNwX%`lP={lc#C=T`o#zl^J})nfTl{wt!kQ6<;H;dnxc`Tf7Apf5hIG z+fp*y%<>7-dSc2AvWkV(DgvHpN5_R%HtzJe3k~D^MYz<_d7H57ZZWE67*<w@9zo*kqw-qYC%jRmdspk$y(%6Ca2ycXhMx3gs+|mYD=Ch6tjQNG^ooXqli1q2 zDI+48HQ-Q6>6T>=p|;IT^Y){c=kqa*ep{j-otnFcI7NkFpJ-2?1mREEHE~ zltGhtg7B`LI8WNVSU9-LWjR=Ge{?2NZ^x!z#|ewL%4GD_o4z)5P2Q{DjNGdY&K~`U zLBCs&R5a$7c6Sq|5mg8QWriJ-Gay>o}|PcwRtT+!#(&)LZUoj5Wa-l>r&wivBqb1pdA7cvQK z5^V^r%WTX`-|bQ1^2nF5zf}4zcCIb{H39m*+2^-1-vb5xsgVK>Gy%i8UtMbYme~}| z5LA)^mppTn)ALs^Y?w=)X^jzR*oYHuVU(M$z^3%Lqss_jI2WW(8zH@yX*)NN#oGHD zo`VGr@g%9}L-y-5d6%xMe>h+ia6RcA{wH;bD8JmmO~{~X(Yf|GZ{ejc5izqS2SbLz zc!a#~`<)&KL?*DqqQi&J|4W;>WLsMD4tiQR{ru2qn z8Z=9KFdsrksqus668g0En2Omr>d7O|>Zy%f@gMuMFjA2&eBf^Wt9*i@q2O)Z2^Re?Aq~| z{`f2bOtfW(hQslGug0hQlq@0BCQd3A6)780WJn3je690%x^cVD{?+AT&>E7v2Iy{BbBtx64I*xlHpMbWsMR7(Wt8WiE zyhci;S|27BT~2nU?ju32bnVfD6_orU>xVoQucEw?^O*605qe$%o+m zW$$RVg1=`K5xl!(XKi6w?b>B$ss#;4_Rj4Jtv%gKbW5x43Q%kO4B6w>qLA>J*IFol zt(LG_SrTG#To)|Y7Im|KRh+C2Wh!LO*)|334pE+GQcCqh&R863ZWls-^RmTomMcd?g^|1g(Q5$~bv z?vYs)fq(q1d1x94Q(@1yTMVD^DE-x834W?*bH+nGSsd?i^Gfd4+Gw)|Sz(Vdg4b?k z@*vjMX>*2<-aSL%npuLt3~a0?Lf1qLnefVPsYioi2vN99+FQwP)RJL+PMODcD&kVS z*@ngV9{QJmiVCE4{3L#JTl;D40$(TL+8~U+6NvWbg?dXoM1Np4e|X+M6SzaASqik} z$sb_XAM*5P){S}^`cVB32_zJe!z2os(8t= zzVz|>4^v&jzdOUK9wBnKEviU1hL@nUshR~x)48Q8QZidJ0J(c~COAzRn~d`OKW$&= zuss{yiQ|tfLo3cjGJ(&2!9s0^e9xkHp@@)CCCC|)4m9rl{`wzFg7{DXSDP^ZpP$2$ zEB7|io4CuaQRGC(gWt@cM|NmL68$uWrf3KY6I0gx+B=@nhl`M~lq^N3U+sKRDrm_8 zz#a-J_zn?T-FU|98DLE+-p2Jj;}sS1F5@p@|ErX(+cV5kqO{SkbJPlz@ z_d&0qewV{&=U=beMARPqcD7|H!nwK?y1Z9o zSG!9PFC?tFA>WV7FBIpTN)naoiBA=-eP*}NXF2v!bo|1X-%1ESqs1kcZqs%$$9p`` zYQ+f!QU1tjYytBCQUsCNd41TPM5O+MWm=ZBp-fOrX$fBiv|>lOXt4(`MYqiS4b$$z zQHI6F&eVz91=$fz2I%GUA1f5S^x^jBhGi8v){amwSnR_h+tG*mQ%8vl1T8&;b&6^$Ge=PR{I|qOjmEC>EqRUdwE>yj|2DwLUg{_JK1Tszg zmb7%Ue7%4jNGVuucix?@`_K`4@=3SiWSlDR$>J+5R_($8hv7cfqQFV;Idbtu?0^h& zJ}iFlS#0UpHh%ST6ZmECq?U1&%zk9{d2mpbMhI!zQsEXU->IfxA=L{1Rk{YbIN3;c zBS=nR>#X{#?vA&56aUNgF%SYqGXr7jl5{-VCUi*dK*;hY-u+8X zWuG@BoMF@2Wlslr=0OiJ6?!D(r(X2sA6>41dI-GtTBZ#c2SLPydB zSGWLpsA|s9ubyT?q67zU-1kmsI~B-}7J!ejNjNug?cN!yXvaUR4ghj|uN(wx`6S%*j60do)7l29Tm6;x z3BbyEvjC#+kA2EBTHK+Jb2V@33aGI2pp~d$Cd;?KyOpYB(oQbmZZn<1xu7KoLF(vb zF4q@+FjV0slbH2TK18IIp%&CGO+lQQtID2_JG-=Y+LWJRZ9&Up5WU>-oI#PBZ&%`_ z8;Z)=JJnjZP!Sss??34eL|24t{n75P<&VznF$y?0)2&ttjp`1kKOs&BXu}n(vV^`h zN!~Vcuc><<>k^nVcYyt%pQ(AMczI};)JIEddY@P~1chq@Gh5v4mzR?3__WEULLbe@ z3b(@CZibGdF{dl1+rY>K$OO#Qdc4l|T9TgSpn6A%s?7Z39)+RcJOJ>0h!}Uie3;1B zJ_-~wlm;ZNS5sjE4Fe0xhB;chYIL$uhx(a)4<0-K1EKOTn%g>%W=G1m!Jkyfre_Wq z3XPwCIgP22XU!8{xhi<&&}P_vwMxjcu94f9kQK_p3y{7v4pki?TN)Wus2gM=ZW%4` z$#I&8)b*A;5})!0#aJ3FXnEq{ve7fctSLF%63V!GzzJ6*QoZ|LPx@)qc+3dM{p6F$NA*%6YX`h#l?S+jAaL7{YoL%>@(^)b1i(!x3XZ+) zLyYq@-YH;?t>5e~9&9M@Q1a7|X6W6=3wAHPgMqMicDJk%-o|2*(D&#Pomn59m-+U? zukjyfE6p$0-W7uvYm_|rc8$~gJ*8q`Etrnb>eren?&3y=brPOhw7P$ktWYTR_ae_! zvG@J!4Y#G{|0wwV+iKe%I`vPr&i@Yzd;h~%c{4w&*jZZcDjE(lYB{a;QkJZcV;x!S z*u*{`VeP!N#JN4X>mScgerpdpFRmfTdiUl{nnPLjN^~nBOdU9E;&NGzDcd|s-S9nf zVKn|1e#e{s8AD50uDH!NwbHv>IGRz8Zj1dMPmxzdg+TPScBay+t~o3XCAmLg11=c& zhmJ@_wV;7`<3bPDJe>oa;NmbQxQtFZ3_)_U_(^yU9vl-@ojHf;iMUJ7ctVcEmIcNNUHEgjC~`=5NXDRSg*-!lgJb>5_R z@BY`4j^-hrqBP4=b(#TyZ}B%IJD-`gN_^>{r@b8Lb+8exDPnS8t7<4yxk$fj)tk%v zskTl2A8RgpSq)_k9%5jf#$?saU&~nKH9R=G0JoqJr{=0$y-N1jrE&H!n{8udgD;EFfIh+60tAC+aK!UtV z;LP9kqNW`k_jihtS&{tn)aWZx8j{pnPlkUYuZqYwu ze2CCrl-~F={Dq<+Rh`VAc^fX8%vmZcpFg8n2$#=3R{AshcrHwE=XG=<5N&@uG;~78F~;i!dEIWA0$Ch}Ck%2v^$4wbLgT-G z`PU6O%TxYQ@b)nk0ls2jch`wDMo0IV=`{wJwEBWima%rjX)fht5Rz7`u z+LMmzG7HPavnDT1pz?BcN;pB{>?>a^^iLY!cla!%Y5313Ceb_}sX7wD^yE7@=ut23 z|M>{+NAq1NVcd@E^~9JLE`NYIrwABr>OpoW^^Z?_bQ(o;g*0KP*plyR*!%bI`BqAk zP%P)eb*~foJ?^K%!51$O6Q6xi@TuTx=QvOVC~rNg$x#m@BqAzyKXQ&&=+D)um^qMN zAGWUE57#d08&h#izgWKIj)W0 z#TB5bhmR9juOmRXZobKY`BR55SeIEfD}HIDV&@j`&<3 ztzwJ7KLka^dTAcbZ&F{=gGhqDT_ZjWNqdM_{;zIG+jRrHxlFZOA8NH6ZH?kkWQWj64E!rlmQSGd0FJ;7 zXxQ){a_lcrQ0PMX2>o4+biiS7+nex8DRo#KQ%+Zmh4#iU1ATpH%2Wd~IyGfG=sn7J1sXOG%^8KNZlDO;)$T{< ziCJ6)R1@kQmgSDKwaXN@G;N)rL;;*W`5|W5RC>s~Q4I@rmBPcWTVKV1rv*JWt7$UP zVbHUBuxxo2?WgxmdgM9$64OROvqmuobEC%W`gNc=B5c}zc_@@Y#T-GRC0|~d4>)zUf!D9H@T@Yc|4Ow8DLZ_I^(m-A;pF`Mng+$BYV#Sl~em&^N>w;Fq z^i^o7gMP{U?4hqR7`$|JsnGMx{#;tnq@66!i6;ogSF^!^qkj1F+d%f{BMFhUe>5$% z*xDZ*+1S|?-3;Z|H#>xckaI~3D^t*WCG)XN{@oq;d;&|)zjV+>MHqL! zQ2QJ+7g7518?;X_Y8Jllfj&~$8e+}~kK@gFI+-Z(JQ&D5T5eZiGwqWhv@>7;EPbVx z#D1I0`D+c79)4szApSw@x;?JY;$e>JVO!|WWvz#<(}2rt&vMiIJpaM+oI3fuN@2Q} z_K#Bk^u$xx>+D&MbG45idkWuTy*0+ap{j!>NrST{*jXa;9i1N7gMRA&-CNzEVBd70RdsJiTTgV*}-Bgc( zHj%Z)^xat3VmL3Au1f^bI2Fn1RoF}ooJ+Ux8zZ}=Zqy%F3Ed#)vKko4`RsDka;x<6 z?$rA%^NC>K9G4glZQ+9Qxx=~k=OPmHQzQYwGap5!)I9JG2O@JQAY3%MVbhlZY&lOx zK>f2}_t!cP%$A7vah9zmm*z0AgqlNcDBTy(eH<^Gt5HG)wlWv{vZ)#nn48~HeI5=@ zJA)&e+^8pWY%iIxyCSqVIFaOOaEHc|7zRxqFS@wbw4lckj^lxwKFecuqZ8viQI=AP z6_haBWB9C(bQ0=octz}(PS!pHCEFU72+Nb8%{S*{tH1Y#q{U{JLSWXTR9>dYaK0h2 zKL3O1y5f2xl(f_`lr;)%PHOK0EBfsf8rBj^`c19%OLMzp@w!0U%Xb3G!%Vx@)ldLM z-=0?gXgFw4v?yNruw-RnoEFLnDtWLYy_p?RH`Ttx@VnV-wfX}K>0t*@yD1hTppehp z!1~vzj{}zIv~VLHu_ub_5SFFk zQa#jwtPu7U5k#t6vVFTk*~+B+ehp~m*C}}Jk0-3HZB0JPcLg3FE~(%h8BSm^!$kwl zpCa=IjP?=emLX?Yp)n1Xkc9X?4_X}e=JN@^j;q(2!lExo}|Xb zub0xaKdfB~wBZqd{P;$Wb`hQBP~iqy3XAa-BBC9jd?*s{hSFC&`l|2b2y8`Bxg&ot z_c71)C@0kl3oQ1}N2;D_JY+342lAtXZxtnvv$nr{Dxpr;ibVIlYR*0%%lpuckdm)I zZSK@|wb`n^YCWDshnLNG|7Me38gL7Y_CE&@K{*9QlIxOoQdsYc$YX!UPwv#W!%Vh8 zauvw!iH(Y^I^~6p<%BLJ66I@;_P;f^Twt3AKjo2c=`#L+UbhEz7JWf>3a7u=Q}TMZ zU_ZifBqBa<+fPL>{eterVJ5+QR{Id)mQSEYsX&9jyG5s!AgU-zP@mn~C>vV=72y*) z6U3)?1g3#E?XGzqswMA?zq@AcH&7rRO-Shp6qfUO4~nw4 zRi6Pr-6wIc7DMx%)gbbuZw~}l)R(Y8A0e))ls=jw5q!_jX_J09h)#y2;7LhFK>V#6 zP_o{6!H|!o;8gUH*;ou3!c-Cyc_l^T0CGO@&}170W8XxxFyHt%!x<jRiSv&M$*;%dpG>z-jo`&#e&PpK{) zEdrXZRn4Y}u6aBHwE=}~2m`;x?vAOol+fE~x*zv)E1Yg9p66p&^P?T}yY5x}@+tSD zcfzM@s$;nx6PPlpZb*i+xXm@P9gv$UKwogl8^5e5kbnzU*xzeinxyUol4hO*RnQPJ zg-azv#3!YRrij9=)h9qWp2xv`OUuCM5tP64O*&W_ij7Cl(01x~nM*JBv3l?MF!|`i zAY)a+`l07oTVrmRB5au|xh%Yln5q6{APmJHC0EMQUJs--rnJWJIZzPtTyT2N-|NrM z=W=(whefACB1357b?a5aNbM-&j<>va<{FvkLp%4`mclx!ugb}gacdPX?Tmwz$i=a% z3{gCz{6 zN7AmDFMOR(!m^3qz|>uzj8jiBzlZIwL9LCA+M;9%rOy?fH5CyN0f~qPE_&9&*Mfc- z4Cgu0&%GOs3Q3lps)8o9K=reew`81P@Ca7g;lM@jc%c&=*)0IiM9EN@Oy`^}^mG7W z-wd}sk`ZRh`9&6Vz4Y>(N8Yoab%~MV-|xq!sv`8}5c^Jm^kF-&ItH%+e4Jao_h~y- zm!b#1(aUa-yKe1CAd2I8pUBo}lH7uJmad6FYyYluh5Vf9bxQ+AltI$$F75nyrjfNQ z$4b!BN@02`0Ge09);V~FNk=06OH=XZ>ZQVHWula6htAc!Fy+&liIIp&kkM zfvjcmJh4O5Nmn#-8cp;eP2~2jYq_lFi6V^+c2=DvTU9zYA9Dk}d11le8T48L^JA{} zK!sCL_gv#`p9-%KVa7cooKwF!09Map{d%c$TX_U&8#8TMIj&BDqxrj|yTBEfWP?KZ zwICfRiCMDofq_Jkob$ae1pnfR=C(J-wl2hG9asi5HQ$$+D!iVu&{5#F_{?y>S;g(3 zI*iTW3uvGllwtF`fyCu=ZxLOy@D}uxZukK~gsy?fU`pe)T=)$oI*;Q+pl{H~-Nq3{ z*EGJal2Pbvpzbdjqq0N&j1ftyQKSiJSN*n85ICWmCLMv1P$Rnu5%vc4Z5%!w$Gs(< z%|jer>Uy9niHV4aa~lS9(Sp~M^1fcmA!g4U#&nNSpE)d(z|=|C=2VwldgG>BdRt2$ z0bEDun2KuuYX9A8>bqu5lecoh7$h9fV$^_$`?hTTC}54f?^a&?5D zBD>A`nfi6oy?}k_Pnc{*wbsmBS}&pp+J0y0I>8vpP1P4#AquXte)iEb;b)DAs{(nl z)iFYJTKDd!zD`@MJTqP8aYMj{C-55K<6qOEVI)a@tmWpIgapu-6bZqW%n7TYQ z0CAWif|@>BE`~$HShwUsk_9aRg^-h`wA!-KdXQ0m==)jVSM=%@TLlEFHnu!Rb&&Ac z@a0#axIr=Ing|sz_#WxLCYf9rly1t_m$mZrluP!5EzLDZwt*tSK5)F)h?oYZ-_qbz z$2%O|IDfLV)+9l^QnMCSCj}a}wIkY3P)=_|NSzsue;y*_W7`xbv!m~!xwV51C=DqJ zEbt#kTh13z%|>-rQ72D^$O&~PLj}~+;2Ek##i%)qP@qV^Itq$~)OjAgBhg^XYM{yb z>D$jrXq8r)zL^!lhD*b^zDLy*mrU`If4t*+f�w_(D@t`is|a(HZJ@VSczg z3-yP|Ur-mRojP?97rq^S{f{2ucW$c$;ST+9-=Q4yyRLvE?C3&I^JF!z6(rzhHBWyL zI^Ih<@j*xR+Yfuqw#0iJ&vVI?65-+jK0s(tsL^Whyu9OF}o2moKF}&$Jxi+?7RMMi}_Qc%Rq!L$59du_PZ{34;ehYY>Yf^H4OTPYnQfR&QVf;;08Gn0RbRA@t&tKDTLg$RqCpN zng-412x!a8hxJ3Rc>(6Z`NGMsVw6RU0DG(q7R&;k)T4~L2I}nl!+r{W;4Hzkr2TVS zfL$RRzhMss&g~UF>`z9Gv4=4PJ!6twf!NDlkEx&|NnG&ca1+M$zz~8}=cRYe?nV(c{!V+|%(?TDj3Oq?HI zA>k#&f!^pXp8!}x%@%;%`vN&{A>`>W3P%1kooviy7}ZhjaRQ4lYz{F4?zW;f3^8(a zbOh+dD53{G90naYatg{PJnYLFe0LX9!xS(bhzN}`0DscuBKA540WnHbN|RN&ChGh7 z)2HJD#0m2J)|WS=M#nIiq3IROq=v3WK=c59N|ui7;###DEeAnb{DSjMBte*7CaTbJ zeR6xDH=4sNh@T0NGazvqP5_iq4Jgp^HMwBAHo$Cp=se*jO{p-zyBejic=!+O{b8`t zYjwR!=U$3)s4w;wn>=p|7Y)Ms7`=m00dGjTRqJ7dm(PiJqi(tF4Vc6Q!xfab;UxRA zwRWAegz2TiEMTly1}cQ|yK)mIKIHZrV3b26#g5pb$UPVrlQWhm_;|7oZm;U4gTXT% zFwVg69enI$UB&=RrGP;dTS$cU#KkA%9Hwtc)&L0c_HK{mQ)@i{TJ9wk6+5RZZ`T|W zL=)VsP1gzMKvaiUDFMr?1Zg$bKu5N5Ba}Oo<6>>uA!$0<@9;2GfnYv4#e5)GPVk1D z5;Mw721-K%Rk@`Y|ZIMF5@;F>BoF8Nfo;by7;xg;$4) zTHNOtJ0%whVL*gTC_|{`&e6pO&~gI^5%l0@sb*^w&;ck4z-QoTsbIP{t4gTjdMZMT zhvAIciZ|2=`Pzvh-f+sYyA!ZS`X%i5fyHC5z>{*;7Dz)&PH_7MDO=-bFI>IqL1r=^ z7$-3cX|zs-J^U%a-ya&RiUU{@#29Gg%c(#-W4@(SnLV zhCo3IPs~xv7U#g1N)RQ&B4XO>QIbpdf=vQpR?;(eT^{1x-Y-L8yw`^j_VnyXfW37B z2u<-uU^I?;Z$kj3$84;_F~EZT+h}f6jd+71mEEoYtb&pbey#(FWk#*t=u7{JJC+!{ z8Av%v3yRQKp6wXlPjjjouQHrqX zi~YGm_Dkv~cBu8fHI2Arig(*i?JgKBxC!lRjt-yQH$YU=nZe?Lngigi0Ylfcio%&R zsI|O_gf?4mQg_2lq;d+zPB6|setxbSP4v*|fXa{#eEDqAm}i)~$Dl!jeG@eyCFvfv z%K$iiLoT*&pfVXgxx$qEj`IGhQ@UIsFTn;3K4DS+AZ!mqGk7ZS9VTfzQ=rRS_v7A^ z4gvaR78%ru^hAuKxxvyw^_w>pSxHeVAK$qzjT#9@;`SVdupMu8%ekMfdmPTJMR%{% zsD%iD1hjp>7L&3yhG!G-Ni{+{+%3>i!1_2?=c-52-kV$_&sEk_{KOgT(^`9l9sX=ne_^c971914<+b=hzrU^Ct-Fs368 zWGl*G1%|?;vA__8#SJB{m67l!RT?%+ZDkN>fGs3!m`z}1?JO37Atk)%cp1!D#)}hV zqLRLVfNi0r?VgMOz>@Wcrb(Oj^n1^_&-tEn@8`BH(6v}d+&vKHePpPhq8IMCC%r2x z0*Agm;n~cyn*ECb-WXrVND`-e;8lpbUkw8AYMCEEDLf*r%FR==8Xn*K66BG(1W%k;ptltnPx@5KcIZ~+ejgI7VR#Jw; zXT&(~S5Db$L&a{eZKtu>%v1PZ`dDWqU8*LHkQD?q`!v!(m>l6_MF_3a< z=`bdHOJKK3dLoVUCr(I2$e3iCX^-q={Pz&yF!WdSb#KVJO=`4xYW8Lv1mzDN2K-$Xk`(@a|JyWgE(iCdS2vnPLDCF;n!0wsrz;Yjk@pJWdR%r#S3ZV zR}M&os*((K{qjK!ZRVTI)0Im=6UBxXVmDM@sk2y-t+cecwS{$#1c(_iF(Upjj|U-` z;=6UV=M9K<415Fo1C9dNs8>v+13gwA?^k#EZiUDGvv^l+sA~8?0Av=EZH4hGiN16i zEhc}rI*5{4YPFbW3q`~c>>h750W&7H0k!!U`HM73QuEV>;a+y0<}c2a{KqVfelm^T zFN6o($H2nP5wXcXtfP7_Hy~PFY{^FvrBIE}aixU|E#d6@or>*75b7{h_)#*8ujz1- zP&{Bn7~6fPMhH>RBb4nA2K{@gn_MGC_$AMOmQE{qyk+*VoQjMYTeU)gnP8`d)+Rtzbo7o+%|o*bxKGLY@1*AMHNC7bG#O6gJJ-9d@(zCX#Ca2(bS0ztc!j0P`CQXQeUY}&VVo9)P@|`4?(cfL<8GeU%%@nz817 z`rsHb!H&PP Date: Sun, 11 Jul 2021 20:13:32 +0200 Subject: [PATCH 120/151] Formatting --- deeppavlov/models/go_bot/trippy.py | 1 - 1 file changed, 1 deletion(-) diff --git a/deeppavlov/models/go_bot/trippy.py b/deeppavlov/models/go_bot/trippy.py index 22b2e39dd4..ea0ba78ca1 100644 --- a/deeppavlov/models/go_bot/trippy.py +++ b/deeppavlov/models/go_bot/trippy.py @@ -274,7 +274,6 @@ def __call__(self, policy_prediction, self.ds) - # Add system response to responses for possible next round self.batch_dialogues_utterances_responses_info[diag_id].insert( -1, {"text": response, "act": None}) From 09f836ff5b474a2995ac385f93277da1ac3a73ff Mon Sep 17 00:00:00 2001 From: Muennighoff <62820084+Muennighoff@users.noreply.github.com> Date: Tue, 13 Jul 2021 19:55:31 +0200 Subject: [PATCH 121/151] Add trippy simple tutorial --- examples/trippy_tutorial.ipynb | 2324 ++++++++++++++++++++++++++++++++ 1 file changed, 2324 insertions(+) create mode 100644 examples/trippy_tutorial.ipynb diff --git a/examples/trippy_tutorial.ipynb b/examples/trippy_tutorial.ipynb new file mode 100644 index 0000000000..d6736d9976 --- /dev/null +++ b/examples/trippy_tutorial.ipynb @@ -0,0 +1,2324 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "TRIPPY_WEATHER", + "provenance": [], + "collapsed_sections": [ + "fbv3rMFngRlH" + ] + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.4" + }, + "accelerator": "GPU" + }, + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "CKZzWGSSOaK5" + }, + "source": [ + "### You can also run the notebook in [COLAB](https://colab.research.google.com/github/deepmipt/DeepPavlov/blob/master/examples/trippy_tutorial.ipynb)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "-NIf_5W0gRkj" + }, + "source": [ + "# TripPy Goal Oriented Bot in DeepPavlov" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "N5PF6RSBOkm_" + }, + "source": [ + "This tutorial describes how to build a Goal-Oriented Bot (Gobot) in DeepPavlov using the [TripPy architecture](https://arxiv.org/pdf/2005.02877.pdf).\n", + "\n", + "This tutorial follows the same structure & uses the same data as the gobot_simple tutorial. We will only go over TripPy specific points here - so consult the gobot_simple notebook for general insights.\n", + "\n", + "0. [Data preparation](#0.-Data-Preparation)\n", + "1. [Train bot](#1.-Train-bot)\n", + "2. [Interact with bot](#2.-Interact-with-bot)" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 1000 + }, + "id": "JeSE4a-SgRjo", + "outputId": "554aea29-fc09-490c-9ebd-32c177f2989d" + }, + "source": [ + "!git clone -b rulebased_gobot_trippy https://github.com/DeepPavlov\n", + "%cd DeepPavlov\n", + "!pip install -r requirements.txt\n", + "!pip install transformers==2.9.1" + ], + "execution_count": 1, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Cloning into 'DeepPavlov'...\n", + "remote: Enumerating objects: 58503, done.\u001b[K\n", + "remote: Counting objects: 100% (1446/1446), done.\u001b[K\n", + "remote: Compressing objects: 100% (518/518), done.\u001b[K\n", + "remote: Total 58503 (delta 1089), reused 1225 (delta 915), pack-reused 57057\u001b[K\n", + "Receiving objects: 100% (58503/58503), 37.54 MiB | 25.87 MiB/s, done.\n", + "Resolving deltas: 100% (44932/44932), done.\n", + "/content/DeepPavlov\n", + "Collecting aio-pika==6.4.1\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/c8/07/196a4115cbef31fa0c3dabdea146f02dffe5e49998341d20dbe2278953bc/aio_pika-6.4.1-py3-none-any.whl (40kB)\n", + "\u001b[K |████████████████████████████████| 51kB 4.7MB/s \n", + "\u001b[?25hCollecting Cython==0.29.14\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/d8/58/2deb24de3c10cc4c0f09639b46f4f4b50059f0fdc785128a57dd9fdce026/Cython-0.29.14-cp37-cp37m-manylinux1_x86_64.whl (2.1MB)\n", + "\u001b[K |████████████████████████████████| 2.1MB 7.2MB/s \n", + "\u001b[?25hCollecting fastapi==0.47.1\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/a3/a7/4804d7abf8a1544d079d50650af872387154ebdac5bd07d54b2e60e2b334/fastapi-0.47.1-py3-none-any.whl (43kB)\n", + "\u001b[K |████████████████████████████████| 51kB 6.3MB/s \n", + "\u001b[?25hRequirement already satisfied: filelock==3.0.12 in /usr/local/lib/python3.7/dist-packages (from -r requirements.txt (line 4)) (3.0.12)\n", + "Collecting h5py==2.10.0\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/3f/c0/abde58b837e066bca19a3f7332d9d0493521d7dd6b48248451a9e3fe2214/h5py-2.10.0-cp37-cp37m-manylinux1_x86_64.whl (2.9MB)\n", + "\u001b[K |████████████████████████████████| 2.9MB 29.0MB/s \n", + "\u001b[?25hCollecting nltk==3.4.5\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/f6/1d/d925cfb4f324ede997f6d47bea4d9babba51b49e87a767c170b77005889d/nltk-3.4.5.zip (1.5MB)\n", + "\u001b[K |████████████████████████████████| 1.5MB 38.6MB/s \n", + "\u001b[?25hCollecting numpy==1.18.0\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/20/53/127cb49435bcf5d841baf8eafa030931c62a9eac577a641f8c2293d23371/numpy-1.18.0-cp37-cp37m-manylinux1_x86_64.whl (20.1MB)\n", + "\u001b[K |████████████████████████████████| 20.1MB 1.4MB/s \n", + "\u001b[?25hCollecting overrides==2.7.0\n", + " Downloading https://files.pythonhosted.org/packages/ac/98/2430afd204c48ac0a529d439d7e22df8fa603c668d03456b5947cb59ec36/overrides-2.7.0.tar.gz\n", + "Collecting pandas==0.25.3\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/63/e0/a1b39cdcb2c391f087a1538bc8a6d62a82d0439693192aef541d7b123769/pandas-0.25.3-cp37-cp37m-manylinux1_x86_64.whl (10.4MB)\n", + "\u001b[K |████████████████████████████████| 10.4MB 27.6MB/s \n", + "\u001b[?25hCollecting prometheus-client==0.7.1\n", + " Downloading https://files.pythonhosted.org/packages/b3/23/41a5a24b502d35a4ad50a5bb7202a5e1d9a0364d0c12f56db3dbf7aca76d/prometheus_client-0.7.1.tar.gz\n", + "Collecting pytz==2019.1\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/3d/73/fe30c2daaaa0713420d0382b16fbb761409f532c56bdcc514bf7b6262bb6/pytz-2019.1-py2.py3-none-any.whl (510kB)\n", + "\u001b[K |████████████████████████████████| 512kB 33.9MB/s \n", + "\u001b[?25hCollecting pydantic==1.3\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/4b/56/1f652c3f658d2a9fd495d2e988a2da57eabdb6c4b8f4563c2ccbe6a2a8c5/pydantic-1.3-cp37-cp37m-manylinux2010_x86_64.whl (7.3MB)\n", + "\u001b[K |████████████████████████████████| 7.3MB 24.4MB/s \n", + "\u001b[?25hCollecting pymorphy2==0.8\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/a3/33/fff9675c68b5f6c63ec8c6e6ff57827dda28a1fa5b2c2d727dffff92dd47/pymorphy2-0.8-py2.py3-none-any.whl (46kB)\n", + "\u001b[K |████████████████████████████████| 51kB 6.1MB/s \n", + "\u001b[?25hCollecting pymorphy2-dicts-ru\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/3a/79/bea0021eeb7eeefde22ef9e96badf174068a2dd20264b9a378f2be1cdd9e/pymorphy2_dicts_ru-2.4.417127.4579844-py2.py3-none-any.whl (8.2MB)\n", + "\u001b[K |████████████████████████████████| 8.2MB 2.1MB/s \n", + "\u001b[?25hCollecting pyopenssl==19.1.0\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/9e/de/f8342b68fa9e981d348039954657bdf681b2ab93de27443be51865ffa310/pyOpenSSL-19.1.0-py2.py3-none-any.whl (53kB)\n", + "\u001b[K |████████████████████████████████| 61kB 7.7MB/s \n", + "\u001b[?25hCollecting pytelegrambotapi==3.6.7\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/62/ab/99c606f69fcda57e35788b913dd34c9d9acb48dd26349141b3855dcf6351/pyTelegramBotAPI-3.6.7.tar.gz (65kB)\n", + "\u001b[K |████████████████████████████████| 71kB 7.0MB/s \n", + "\u001b[?25hCollecting requests==2.22.0\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/51/bd/23c926cd341ea6b7dd0b2a00aba99ae0f828be89d72b2190f27c11d4b7fb/requests-2.22.0-py2.py3-none-any.whl (57kB)\n", + "\u001b[K |████████████████████████████████| 61kB 7.1MB/s \n", + "\u001b[?25hCollecting ruamel.yaml==0.15.100\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/71/fc/12de89822adaa3a60b8cb0139bae75918278999d08e6dff158623abd7cba/ruamel.yaml-0.15.100-cp37-cp37m-manylinux1_x86_64.whl (654kB)\n", + "\u001b[K |████████████████████████████████| 655kB 17.0MB/s \n", + "\u001b[?25hCollecting rusenttokenize==0.0.5\n", + " Downloading https://files.pythonhosted.org/packages/25/4c/a2f00be5def774a3df2e5387145f1cb54e324607ec4a7e23f573645946e7/rusenttokenize-0.0.5-py3-none-any.whl\n", + "Collecting scikit-learn==0.21.2\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/21/a4/a48bd4b0d15395362b561df7e7247de87291105eb736a3b2aaffebf437b9/scikit_learn-0.21.2-cp37-cp37m-manylinux1_x86_64.whl (6.7MB)\n", + "\u001b[K |████████████████████████████████| 6.7MB 7.1MB/s \n", + "\u001b[?25hRequirement already satisfied: scipy==1.4.1 in /usr/local/lib/python3.7/dist-packages (from -r requirements.txt (line 21)) (1.4.1)\n", + "Requirement already satisfied: tqdm==4.41.1 in /usr/local/lib/python3.7/dist-packages (from -r requirements.txt (line 22)) (4.41.1)\n", + "Requirement already satisfied: click==7.1.2 in /usr/local/lib/python3.7/dist-packages (from -r requirements.txt (line 23)) (7.1.2)\n", + "Collecting uvicorn==0.11.7\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/a9/5f/2bc87272f189662e129ddcd4807ad3ef83128b4df3a3482335f5f9790f24/uvicorn-0.11.7-py3-none-any.whl (43kB)\n", + "\u001b[K |████████████████████████████████| 51kB 6.5MB/s \n", + "\u001b[?25hCollecting sacremoses==0.0.35\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/1f/8e/ed5364a06a9ba720fddd9820155cc57300d28f5f43a6fd7b7e817177e642/sacremoses-0.0.35.tar.gz (859kB)\n", + "\u001b[K |████████████████████████████████| 860kB 30.9MB/s \n", + "\u001b[?25hCollecting uvloop==0.14.0\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/9b/7a/54a80c03b555af21680a2f3692947b43a0d576d90c4c18cace0fee1ccc0e/uvloop-0.14.0-cp37-cp37m-manylinux2010_x86_64.whl (3.8MB)\n", + "\u001b[K |████████████████████████████████| 3.8MB 30.0MB/s \n", + "\u001b[?25hCollecting aiormq<4,>=3.2.0\n", + " Downloading https://files.pythonhosted.org/packages/0b/c4/dc5b9d50c15af2ee187974a5a0c3f20c06cce6559eea4c065d372e846b6a/aiormq-3.3.1-py3-none-any.whl\n", + "Collecting yarl\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/f1/62/046834c5fc998c88ab2ef722f5d42122230a632212c8afa76418324f53ff/yarl-1.6.3-cp37-cp37m-manylinux2014_x86_64.whl (294kB)\n", + "\u001b[K |████████████████████████████████| 296kB 40.9MB/s \n", + "\u001b[?25hCollecting starlette<=0.12.9,>=0.12.9\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/67/95/2220fe5bf287e693a6430d8ee36c681b0157035b7249ec08f8fb36319d16/starlette-0.12.9.tar.gz (46kB)\n", + "\u001b[K |████████████████████████████████| 51kB 6.2MB/s \n", + "\u001b[?25hRequirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from h5py==2.10.0->-r requirements.txt (line 5)) (1.15.0)\n", + "Requirement already satisfied: python-dateutil>=2.6.1 in /usr/local/lib/python3.7/dist-packages (from pandas==0.25.3->-r requirements.txt (line 9)) (2.8.1)\n", + "Requirement already satisfied: docopt>=0.6 in /usr/local/lib/python3.7/dist-packages (from pymorphy2==0.8->-r requirements.txt (line 13)) (0.6.2)\n", + "Collecting dawg-python>=0.7\n", + " Downloading https://files.pythonhosted.org/packages/6a/84/ff1ce2071d4c650ec85745766c0047ccc3b5036f1d03559fd46bb38b5eeb/DAWG_Python-0.7.2-py2.py3-none-any.whl\n", + "Collecting pymorphy2-dicts<3.0,>=2.4\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/02/51/2465fd4f72328ab50877b54777764d928da8cb15b74e2680fc1bd8cb3173/pymorphy2_dicts-2.4.393442.3710985-py2.py3-none-any.whl (7.1MB)\n", + "\u001b[K |████████████████████████████████| 7.1MB 28.6MB/s \n", + "\u001b[?25hCollecting cryptography>=2.8\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/b2/26/7af637e6a7e87258b963f1731c5982fb31cd507f0d90d91836e446955d02/cryptography-3.4.7-cp36-abi3-manylinux2014_x86_64.whl (3.2MB)\n", + "\u001b[K |████████████████████████████████| 3.2MB 30.3MB/s \n", + "\u001b[?25hRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests==2.22.0->-r requirements.txt (line 17)) (1.24.3)\n", + "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests==2.22.0->-r requirements.txt (line 17)) (2021.5.30)\n", + "Requirement already satisfied: chardet<3.1.0,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests==2.22.0->-r requirements.txt (line 17)) (3.0.4)\n", + "Collecting idna<2.9,>=2.5\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/14/2c/cd551d81dbe15200be1cf41cd03869a46fe7226e7450af7a6545bfc474c9/idna-2.8-py2.py3-none-any.whl (58kB)\n", + "\u001b[K |████████████████████████████████| 61kB 7.2MB/s \n", + "\u001b[?25hRequirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.7/dist-packages (from scikit-learn==0.21.2->-r requirements.txt (line 20)) (1.0.1)\n", + "Collecting websockets==8.*\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/5a/0b/3ebc752392a368af14dd24ee041683416ac6d2463eead94b311b11e41c82/websockets-8.1-cp37-cp37m-manylinux2010_x86_64.whl (79kB)\n", + "\u001b[K |████████████████████████████████| 81kB 9.0MB/s \n", + "\u001b[?25hCollecting httptools==0.1.*; sys_platform != \"win32\" and sys_platform != \"cygwin\" and platform_python_implementation != \"PyPy\"\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/25/2e/485131e3aa113929b425f83854fafc190aa7df716cbeb258c875752f0c6e/httptools-0.1.2-cp37-cp37m-manylinux1_x86_64.whl (219kB)\n", + "\u001b[K |████████████████████████████████| 225kB 40.0MB/s \n", + "\u001b[?25hCollecting h11<0.10,>=0.8\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/5a/fd/3dad730b0f95e78aeeb742f96fa7bbecbdd56a58e405d3da440d5bfb90c6/h11-0.9.0-py2.py3-none-any.whl (53kB)\n", + "\u001b[K |████████████████████████████████| 61kB 822kB/s \n", + "\u001b[?25hCollecting pamqp==2.3.0\n", + " Downloading https://files.pythonhosted.org/packages/eb/56/afa06143361e640c9159d828dadc95fc9195c52c95b4a97d136617b0166d/pamqp-2.3.0-py2.py3-none-any.whl\n", + "Requirement already satisfied: typing-extensions>=3.7.4; python_version < \"3.8\" in /usr/local/lib/python3.7/dist-packages (from yarl->aio-pika==6.4.1->-r requirements.txt (line 1)) (3.7.4.3)\n", + "Collecting multidict>=4.0\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/7c/a6/4123b8165acbe773d1a8dc8e3f0d1edea16d29f7de018eda769abb56bd30/multidict-5.1.0-cp37-cp37m-manylinux2014_x86_64.whl (142kB)\n", + "\u001b[K |████████████████████████████████| 143kB 40.4MB/s \n", + "\u001b[?25hRequirement already satisfied: cffi>=1.12 in /usr/local/lib/python3.7/dist-packages (from cryptography>=2.8->pyopenssl==19.1.0->-r requirements.txt (line 15)) (1.14.5)\n", + "Requirement already satisfied: pycparser in /usr/local/lib/python3.7/dist-packages (from cffi>=1.12->cryptography>=2.8->pyopenssl==19.1.0->-r requirements.txt (line 15)) (2.20)\n", + "Building wheels for collected packages: nltk, overrides, prometheus-client, pytelegrambotapi, sacremoses, starlette\n", + " Building wheel for nltk (setup.py) ... \u001b[?25l\u001b[?25hdone\n", + " Created wheel for nltk: filename=nltk-3.4.5-cp37-none-any.whl size=1449922 sha256=aa28bfc2e2fd4d22443e477510e9de3e01f2f42bd901754e13e0db0e2db0b28e\n", + " Stored in directory: /root/.cache/pip/wheels/96/86/f6/68ab24c23f207c0077381a5e3904b2815136b879538a24b483\n", + " Building wheel for overrides (setup.py) ... \u001b[?25l\u001b[?25hdone\n", + " Created wheel for overrides: filename=overrides-2.7.0-cp37-none-any.whl size=5606 sha256=ad5482f6d5f5bf8b28c12a10638b5b56cc3fef0f5024c40defe26737141f376c\n", + " Stored in directory: /root/.cache/pip/wheels/8c/7c/ef/80508418b67d87371c5b3de49e03eb22ee7c1d19affb5099f8\n", + " Building wheel for prometheus-client (setup.py) ... \u001b[?25l\u001b[?25hdone\n", + " Created wheel for prometheus-client: filename=prometheus_client-0.7.1-cp37-none-any.whl size=41404 sha256=f8a111199601d76cdd13b709abc6de63e226a22ffbdd472fbf355e5430916eb7\n", + " Stored in directory: /root/.cache/pip/wheels/1c/54/34/fd47cd9b308826cc4292b54449c1899a30251ef3b506bc91ea\n", + " Building wheel for pytelegrambotapi (setup.py) ... \u001b[?25l\u001b[?25hdone\n", + " Created wheel for pytelegrambotapi: filename=pyTelegramBotAPI-3.6.7-cp37-none-any.whl size=47177 sha256=a4c58b4f0d6722aff243ddc2659c586ad751c51a71d03aad75135cea4abe1b41\n", + " Stored in directory: /root/.cache/pip/wheels/23/40/18/8a34153f95ef0dc19e3954898e5a5079244b76a8afdd7d0ec5\n", + " Building wheel for sacremoses (setup.py) ... \u001b[?25l\u001b[?25hdone\n", + " Created wheel for sacremoses: filename=sacremoses-0.0.35-cp37-none-any.whl size=883990 sha256=5bdf55589bf108569395722458194ff39c3d24c17d6f379e2815c33f236e457f\n", + " Stored in directory: /root/.cache/pip/wheels/63/2a/db/63e2909042c634ef551d0d9ac825b2b0b32dede4a6d87ddc94\n", + " Building wheel for starlette (setup.py) ... \u001b[?25l\u001b[?25hdone\n", + " Created wheel for starlette: filename=starlette-0.12.9-cp37-none-any.whl size=57254 sha256=8a339f4027c3de180f6cf2db027fc3ed99181164cb73aef90b285057279178f1\n", + " Stored in directory: /root/.cache/pip/wheels/1c/51/5b/3828d52e185cafad941c4291b6f70894d0794be28c70addae5\n", + "Successfully built nltk overrides prometheus-client pytelegrambotapi sacremoses starlette\n", + "\u001b[31mERROR: xarray 0.18.2 has requirement pandas>=1.0, but you'll have pandas 0.25.3 which is incompatible.\u001b[0m\n", + "\u001b[31mERROR: tensorflow 2.5.0 has requirement h5py~=3.1.0, but you'll have h5py 2.10.0 which is incompatible.\u001b[0m\n", + "\u001b[31mERROR: tensorflow 2.5.0 has requirement numpy~=1.19.2, but you'll have numpy 1.18.0 which is incompatible.\u001b[0m\n", + "\u001b[31mERROR: kapre 0.3.5 has requirement numpy>=1.18.5, but you'll have numpy 1.18.0 which is incompatible.\u001b[0m\n", + "\u001b[31mERROR: google-colab 1.0.0 has requirement pandas~=1.1.0; python_version >= \"3.0\", but you'll have pandas 0.25.3 which is incompatible.\u001b[0m\n", + "\u001b[31mERROR: google-colab 1.0.0 has requirement requests~=2.23.0, but you'll have requests 2.22.0 which is incompatible.\u001b[0m\n", + "\u001b[31mERROR: fbprophet 0.7.1 has requirement pandas>=1.0.4, but you'll have pandas 0.25.3 which is incompatible.\u001b[0m\n", + "\u001b[31mERROR: datascience 0.10.6 has requirement folium==0.2.1, but you'll have folium 0.8.3 which is incompatible.\u001b[0m\n", + "\u001b[31mERROR: albumentations 0.1.12 has requirement imgaug<0.2.7,>=0.2.5, but you'll have imgaug 0.2.9 which is incompatible.\u001b[0m\n", + "Installing collected packages: idna, multidict, yarl, pamqp, aiormq, aio-pika, Cython, pydantic, starlette, fastapi, numpy, h5py, nltk, overrides, pytz, pandas, prometheus-client, dawg-python, pymorphy2-dicts, pymorphy2, pymorphy2-dicts-ru, cryptography, pyopenssl, requests, pytelegrambotapi, ruamel.yaml, rusenttokenize, scikit-learn, websockets, uvloop, httptools, h11, uvicorn, sacremoses\n", + " Found existing installation: idna 2.10\n", + " Uninstalling idna-2.10:\n", + " Successfully uninstalled idna-2.10\n", + " Found existing installation: Cython 0.29.23\n", + " Uninstalling Cython-0.29.23:\n", + " Successfully uninstalled Cython-0.29.23\n", + " Found existing installation: numpy 1.19.5\n", + " Uninstalling numpy-1.19.5:\n", + " Successfully uninstalled numpy-1.19.5\n", + " Found existing installation: h5py 3.1.0\n", + " Uninstalling h5py-3.1.0:\n", + " Successfully uninstalled h5py-3.1.0\n", + " Found existing installation: nltk 3.2.5\n", + " Uninstalling nltk-3.2.5:\n", + " Successfully uninstalled nltk-3.2.5\n", + " Found existing installation: pytz 2018.9\n", + " Uninstalling pytz-2018.9:\n", + " Successfully uninstalled pytz-2018.9\n", + " Found existing installation: pandas 1.1.5\n", + " Uninstalling pandas-1.1.5:\n", + " Successfully uninstalled pandas-1.1.5\n", + " Found existing installation: prometheus-client 0.11.0\n", + " Uninstalling prometheus-client-0.11.0:\n", + " Successfully uninstalled prometheus-client-0.11.0\n", + " Found existing installation: requests 2.23.0\n", + " Uninstalling requests-2.23.0:\n", + " Successfully uninstalled requests-2.23.0\n", + " Found existing installation: scikit-learn 0.22.2.post1\n", + " Uninstalling scikit-learn-0.22.2.post1:\n", + " Successfully uninstalled scikit-learn-0.22.2.post1\n", + "Successfully installed Cython-0.29.14 aio-pika-6.4.1 aiormq-3.3.1 cryptography-3.4.7 dawg-python-0.7.2 fastapi-0.47.1 h11-0.9.0 h5py-2.10.0 httptools-0.1.2 idna-2.8 multidict-5.1.0 nltk-3.4.5 numpy-1.18.0 overrides-2.7.0 pamqp-2.3.0 pandas-0.25.3 prometheus-client-0.7.1 pydantic-1.3 pymorphy2-0.8 pymorphy2-dicts-2.4.393442.3710985 pymorphy2-dicts-ru-2.4.417127.4579844 pyopenssl-19.1.0 pytelegrambotapi-3.6.7 pytz-2019.1 requests-2.22.0 ruamel.yaml-0.15.100 rusenttokenize-0.0.5 sacremoses-0.0.35 scikit-learn-0.21.2 starlette-0.12.9 uvicorn-0.11.7 uvloop-0.14.0 websockets-8.1 yarl-1.6.3\n" + ], + "name": "stdout" + }, + { + "output_type": "display_data", + "data": { + "application/vnd.colab-display-data+json": { + "pip_warning": { + "packages": [ + "numpy", + "pandas", + "pytz" + ] + } + } + }, + "metadata": { + "tags": [] + } + }, + { + "output_type": "stream", + "text": [ + "Collecting transformers==2.9.1\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/22/97/7db72a0beef1825f82188a4b923e62a146271ac2ced7928baa4d47ef2467/transformers-2.9.1-py3-none-any.whl (641kB)\n", + "\u001b[K |████████████████████████████████| 645kB 5.1MB/s \n", + "\u001b[?25hRequirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.7/dist-packages (from transformers==2.9.1) (2019.12.20)\n", + "Collecting sentencepiece\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/ac/aa/1437691b0c7c83086ebb79ce2da16e00bef024f24fec2a5161c35476f499/sentencepiece-0.1.96-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (1.2MB)\n", + "\u001b[K |████████████████████████████████| 1.2MB 10.8MB/s \n", + "\u001b[?25hRequirement already satisfied: filelock in /usr/local/lib/python3.7/dist-packages (from transformers==2.9.1) (3.0.12)\n", + "Requirement already satisfied: tqdm>=4.27 in /usr/local/lib/python3.7/dist-packages (from transformers==2.9.1) (4.41.1)\n", + "Collecting tokenizers==0.7.0\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/ea/59/bb06dd5ca53547d523422d32735585493e0103c992a52a97ba3aa3be33bf/tokenizers-0.7.0-cp37-cp37m-manylinux1_x86_64.whl (5.6MB)\n", + "\u001b[K |████████████████████████████████| 5.6MB 22.4MB/s \n", + "\u001b[?25hRequirement already satisfied: sacremoses in /usr/local/lib/python3.7/dist-packages (from transformers==2.9.1) (0.0.35)\n", + "Requirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from transformers==2.9.1) (1.18.0)\n", + "Requirement already satisfied: requests in /usr/local/lib/python3.7/dist-packages (from transformers==2.9.1) (2.22.0)\n", + "Requirement already satisfied: click in /usr/local/lib/python3.7/dist-packages (from sacremoses->transformers==2.9.1) (7.1.2)\n", + "Requirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from sacremoses->transformers==2.9.1) (1.15.0)\n", + "Requirement already satisfied: joblib in /usr/local/lib/python3.7/dist-packages (from sacremoses->transformers==2.9.1) (1.0.1)\n", + "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests->transformers==2.9.1) (2021.5.30)\n", + "Requirement already satisfied: idna<2.9,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests->transformers==2.9.1) (2.8)\n", + "Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests->transformers==2.9.1) (1.24.3)\n", + "Requirement already satisfied: chardet<3.1.0,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests->transformers==2.9.1) (3.0.4)\n", + "Installing collected packages: sentencepiece, tokenizers, transformers\n", + "Successfully installed sentencepiece-0.1.96 tokenizers-0.7.0 transformers-2.9.1\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "fbv3rMFngRlH" + }, + "source": [ + "## 0. Data Preparation" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "hTpb4EHbgRla" + }, + "source": [ + "The data preparation follows the exact same structure as the gobot_simple tutorial. Feel free to take a look there for specific insights." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "B5oak1V5gRlq" + }, + "source": [ + "from deeppavlov.dataset_readers.dstc2_reader import SimpleDSTC2DatasetReader\n", + "\n", + "\n", + "class AssistantDatasetReader(SimpleDSTC2DatasetReader):\n", + " \n", + " url = \"http://files.deeppavlov.ai/datasets/tutor_assistant_data.tar.gz\"\n", + " \n", + " @staticmethod\n", + " def _data_fname(datatype):\n", + " assert datatype in ('val', 'trn', 'tst'), \"wrong datatype name\"\n", + " return f\"assistant-{datatype}.json\"" + ], + "execution_count": 2, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "I-GPAAWjgRmj", + "outputId": "8db4c1b9-2039-4081-ec9d-758e461f980b" + }, + "source": [ + "data = AssistantDatasetReader().read('assistant_data')" + ], + "execution_count": 3, + "outputs": [ + { + "output_type": "stream", + "text": [ + "2021-07-13 17:44:39.271 INFO in 'deeppavlov.dataset_readers.dstc2_reader'['dstc2_reader'] at line 283: [PosixPath('assistant_data/assistant-val.json'), PosixPath('assistant_data/assistant-tst.json')]]\n", + "2021-07-13 17:44:39.272 INFO in 'deeppavlov.dataset_readers.dstc2_reader'['dstc2_reader'] at line 284: [downloading data from http://files.deeppavlov.ai/datasets/tutor_assistant_data.tar.gz to assistant_data]\n", + "2021-07-13 17:44:39.275 INFO in 'deeppavlov.core.data.utils'['utils'] at line 95: Downloading from http://files.deeppavlov.ai/datasets/tutor_assistant_data.tar.gz to assistant_data/tutor_assistant_data.tar.gz\n", + "100%|██████████| 838/838 [00:00<00:00, 139kB/s]\n", + "2021-07-13 17:44:40.476 INFO in 'deeppavlov.core.data.utils'['utils'] at line 272: Extracting assistant_data/tutor_assistant_data.tar.gz archive into assistant_data\n", + "2021-07-13 17:44:40.483 INFO in 'deeppavlov.dataset_readers.dstc2_reader'['dstc2_reader'] at line 304: [loading dialogs from assistant_data/assistant-trn.json]\n", + "2021-07-13 17:44:40.485 INFO in 'deeppavlov.dataset_readers.dstc2_reader'['dstc2_reader'] at line 304: [loading dialogs from assistant_data/assistant-val.json]\n", + "2021-07-13 17:44:40.488 INFO in 'deeppavlov.dataset_readers.dstc2_reader'['dstc2_reader'] at line 304: [loading dialogs from assistant_data/assistant-tst.json]\n", + "2021-07-13 17:44:40.490 INFO in 'deeppavlov.dataset_readers.dstc2_reader'['dstc2_reader'] at line 296: There are 24 samples in train split.\n", + "2021-07-13 17:44:40.492 INFO in 'deeppavlov.dataset_readers.dstc2_reader'['dstc2_reader'] at line 297: There are 3 samples in valid split.\n", + "2021-07-13 17:44:40.495 INFO in 'deeppavlov.dataset_readers.dstc2_reader'['dstc2_reader'] at line 298: There are 3 samples in test split.\n" + ], + "name": "stderr" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "9NYptoABgRol", + "scrolled": true + }, + "source": [ + "from deeppavlov.dataset_iterators.dialog_iterator import DialogDatasetIterator\n", + "\n", + "iterator = DialogDatasetIterator(data)" + ], + "execution_count": 4, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "PbSQDMHfgRqo" + }, + "source": [ + "## 1. Train bot" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "mVLPJajXQmqw" + }, + "source": [ + "The TripPy architecture without slots as implemented in DeepPavlov is illustrated in the below sketch:\n", + "\n", + " \n", + "![trippy_architecture_simple.png](img/trippy_architecture_simple.png)\n", + " \n", + "\n", + "User-uttarance + Dialogue History (if existing) --> Tokenize and otherwise prepare for TripPy --> Feed through the TripPy BERT model --> A Linear Head with a Softmax predicts the action --> NLG generates a sentence based on the predicted action\n", + "\n", + "\n", + "The large empty areas are fields that are only used when slot values are predicted. Refer to the trippy_extended tutorial to learn more about those.\n", + "\n", + "\n", + "Note that this architecture is very different from the original architecture you may find in their paper as the authors implement no action prediction. \n", + "\n" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "X59MSkmBgRt3" + }, + "source": [ + "from deeppavlov import configs\n", + "from deeppavlov.core.common.file import read_json\n", + "\n", + "# We use the TripPy DSTC2 minimal config\n", + "gobot_config = read_json(configs.go_bot.trippy_dstc2_minimal)\n", + "\n", + "gobot_config['chainer']['pipe'][-1]['nlg_manager']['template_path'] = 'assistant_data/assistant-templates.txt'" + ], + "execution_count": 5, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "yAACg0IAgRuq" + }, + "source": [ + "gobot_config['chainer']['pipe'][-1]['nlg_manager']['template_path'] = 'assistant_data/assistant-templates.txt'\n", + "gobot_config['chainer']['pipe'][-1]['nlg_manager']['api_call_action'] = None" + ], + "execution_count": 6, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "JV27LFatgRwE" + }, + "source": [ + "Specify train/valid/test data path and path to save the final bot model:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "pqhscSbhgRwK" + }, + "source": [ + "gobot_config['dataset_reader']['class_name'] = '__main__:AssistantDatasetReader'\n", + "gobot_config['metadata']['variables']['DATA_PATH'] = 'assistant_data'\n", + "gobot_config['metadata']['variables']['MODEL_PATH'] = 'assistant_bot'" + ], + "execution_count": 7, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "rHkNw-9uUJde" + }, + "source": [ + "# Set TripPy hyperparameters\n", + "\n", + "gobot_config['train']['batch_size'] = 16 # set batch size\n", + "gobot_config['train']['max_batches'] = 64 # maximum number of training batches\n", + "gobot_config['train']['val_every_n_batches'] = 10 # evaluate on full 'valid' split every 30 epochs\n", + "gobot_config['train']['log_every_n_batches'] = 10 # evaluate on full 'train' split every 5 batches\n", + "gobot_config['train']['validation_patience'] = 10 # after no improvements on validation for 10 vals, stop training\n", + "\n", + "gobot_config['chainer']['pipe'][-1]['slot_names'] = [] # no slot names for this dataset\n", + "\n", + "gobot_config['chainer']['pipe'][-1][\"optimizer_parameters\"] = {\"lr\": 1e-4, \"eps\": 1e-6}" + ], + "execution_count": 67, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "NWvhumcWOywQ", + "outputId": "961951ef-9373-4714-b4a0-100a8825191d" + }, + "source": [ + "from deeppavlov import train_model\n", + "\n", + "# Training should take ~70 seconds & reach 100% validation accuracy\n", + "train_model(gobot_config)" + ], + "execution_count": 68, + "outputs": [ + { + "output_type": "stream", + "text": [ + "2021-07-13 17:52:17.608 INFO in 'deeppavlov.dataset_readers.dstc2_reader'['dstc2_reader'] at line 304: [loading dialogs from /content/DeepPavlov/assistant_data/assistant-trn.json]\n", + "2021-07-13 17:52:17.612 INFO in 'deeppavlov.dataset_readers.dstc2_reader'['dstc2_reader'] at line 304: [loading dialogs from /content/DeepPavlov/assistant_data/assistant-val.json]\n", + "2021-07-13 17:52:17.618 INFO in 'deeppavlov.dataset_readers.dstc2_reader'['dstc2_reader'] at line 304: [loading dialogs from /content/DeepPavlov/assistant_data/assistant-tst.json]\n", + "2021-07-13 17:52:17.620 INFO in 'deeppavlov.dataset_readers.dstc2_reader'['dstc2_reader'] at line 296: There are 24 samples in train split.\n", + "2021-07-13 17:52:17.622 INFO in 'deeppavlov.dataset_readers.dstc2_reader'['dstc2_reader'] at line 297: There are 3 samples in valid split.\n", + "2021-07-13 17:52:17.624 INFO in 'deeppavlov.dataset_readers.dstc2_reader'['dstc2_reader'] at line 298: There are 3 samples in test split.\n", + "/usr/local/lib/python3.7/dist-packages/torch/nn/init.py:388: UserWarning: Initializing zero-element tensors is a no-op\n", + " warnings.warn(\"Initializing zero-element tensors is a no-op\")\n", + "2021-07-13 17:52:21.688 INFO in 'deeppavlov.models.go_bot.trippy'['trippy'] at line 152: Load path /content/DeepPavlov/assistant_bot/model is given.\n", + "2021-07-13 17:52:21.690 INFO in 'deeppavlov.models.go_bot.trippy'['trippy'] at line 169: Init from scratch. Load path /content/DeepPavlov/assistant_bot/model.pth.tar does not exist.\n", + "2021-07-13 17:52:21.697 INFO in 'deeppavlov.core.models.torch_model'['torch_model'] at line 98: Model was successfully initialized! Model summary:\n", + " BertForDST(\n", + " (bert): BertModel(\n", + " (embeddings): BertEmbeddings(\n", + " (word_embeddings): Embedding(30522, 768, padding_idx=0)\n", + " (position_embeddings): Embedding(512, 768)\n", + " (token_type_embeddings): Embedding(2, 768)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (encoder): BertEncoder(\n", + " (layer): ModuleList(\n", + " (0): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (1): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (2): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (3): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (4): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (5): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (6): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (7): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (8): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (9): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (10): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (11): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " )\n", + " )\n", + " (pooler): BertPooler(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (activation): Tanh()\n", + " )\n", + " )\n", + " (dropout): Dropout(p=0.3, inplace=False)\n", + " (dropout_heads): Dropout(p=0.0, inplace=False)\n", + " (inform_projection): Linear(in_features=0, out_features=0, bias=True)\n", + " (ds_projection): Linear(in_features=0, out_features=0, bias=True)\n", + " (action_prediction): Linear(in_features=768, out_features=5, bias=True)\n", + " (action_softmax): Softmax(dim=1)\n", + ")\n", + "2021-07-13 17:52:26.441 INFO in 'deeppavlov.core.trainers.nn_trainer'['nn_trainer'] at line 202: First best per_item_dialog_accuracy of 1.0\n", + "2021-07-13 17:52:26.443 INFO in 'deeppavlov.core.trainers.nn_trainer'['nn_trainer'] at line 204: Saving model\n", + "2021-07-13 17:52:26.449 INFO in 'deeppavlov.core.models.torch_model'['torch_model'] at line 191: Saving model to /content/DeepPavlov/assistant_bot/model.pth.tar.\n" + ], + "name": "stderr" + }, + { + "output_type": "stream", + "text": [ + "{\"train\": {\"eval_examples_count\": 8, \"metrics\": {\"per_item_dialog_accuracy\": 1.0}, \"time_spent\": \"0:00:05\", \"epochs_done\": 9, \"batches_seen\": 10, \"train_examples_seen\": 80, \"total_loss\": 14.516639709472656, \"action_loss\": 14.516639709472656}}\n", + "{\"valid\": {\"eval_examples_count\": 1, \"metrics\": {\"per_item_dialog_accuracy\": 1.0}, \"time_spent\": \"0:00:05\", \"epochs_done\": 9, \"batches_seen\": 10, \"train_examples_seen\": 80, \"impatience\": 0, \"patience_limit\": 10}}\n" + ], + "name": "stdout" + }, + { + "output_type": "stream", + "text": [ + "2021-07-13 17:52:35.740 INFO in 'deeppavlov.core.trainers.nn_trainer'['nn_trainer'] at line 212: Did not improve on the per_item_dialog_accuracy of 1.0\n" + ], + "name": "stderr" + }, + { + "output_type": "stream", + "text": [ + "{\"train\": {\"eval_examples_count\": 8, \"metrics\": {\"per_item_dialog_accuracy\": 1.0}, \"time_spent\": \"0:00:15\", \"epochs_done\": 19, \"batches_seen\": 20, \"train_examples_seen\": 160, \"total_loss\": 2.900639772415161, \"action_loss\": 2.900639772415161}}\n", + "{\"valid\": {\"eval_examples_count\": 1, \"metrics\": {\"per_item_dialog_accuracy\": 1.0}, \"time_spent\": \"0:00:15\", \"epochs_done\": 19, \"batches_seen\": 20, \"train_examples_seen\": 160, \"impatience\": 1, \"patience_limit\": 10}}\n" + ], + "name": "stdout" + }, + { + "output_type": "stream", + "text": [ + "2021-07-13 17:52:40.512 INFO in 'deeppavlov.core.trainers.nn_trainer'['nn_trainer'] at line 212: Did not improve on the per_item_dialog_accuracy of 1.0\n" + ], + "name": "stderr" + }, + { + "output_type": "stream", + "text": [ + "{\"train\": {\"eval_examples_count\": 8, \"metrics\": {\"per_item_dialog_accuracy\": 1.0}, \"time_spent\": \"0:00:19\", \"epochs_done\": 29, \"batches_seen\": 30, \"train_examples_seen\": 240, \"total_loss\": 0.5819499492645264, \"action_loss\": 0.5819499492645264}}\n", + "{\"valid\": {\"eval_examples_count\": 1, \"metrics\": {\"per_item_dialog_accuracy\": 1.0}, \"time_spent\": \"0:00:19\", \"epochs_done\": 29, \"batches_seen\": 30, \"train_examples_seen\": 240, \"impatience\": 2, \"patience_limit\": 10}}\n" + ], + "name": "stdout" + }, + { + "output_type": "stream", + "text": [ + "2021-07-13 17:52:45.256 INFO in 'deeppavlov.core.trainers.nn_trainer'['nn_trainer'] at line 212: Did not improve on the per_item_dialog_accuracy of 1.0\n" + ], + "name": "stderr" + }, + { + "output_type": "stream", + "text": [ + "{\"train\": {\"eval_examples_count\": 8, \"metrics\": {\"per_item_dialog_accuracy\": 1.0}, \"time_spent\": \"0:00:24\", \"epochs_done\": 39, \"batches_seen\": 40, \"train_examples_seen\": 320, \"total_loss\": 0.09471433609724045, \"action_loss\": 0.09471433609724045}}\n", + "{\"valid\": {\"eval_examples_count\": 1, \"metrics\": {\"per_item_dialog_accuracy\": 1.0}, \"time_spent\": \"0:00:24\", \"epochs_done\": 39, \"batches_seen\": 40, \"train_examples_seen\": 320, \"impatience\": 3, \"patience_limit\": 10}}\n" + ], + "name": "stdout" + }, + { + "output_type": "stream", + "text": [ + "2021-07-13 17:52:49.943 INFO in 'deeppavlov.core.trainers.nn_trainer'['nn_trainer'] at line 212: Did not improve on the per_item_dialog_accuracy of 1.0\n" + ], + "name": "stderr" + }, + { + "output_type": "stream", + "text": [ + "{\"train\": {\"eval_examples_count\": 8, \"metrics\": {\"per_item_dialog_accuracy\": 1.0}, \"time_spent\": \"0:00:29\", \"epochs_done\": 49, \"batches_seen\": 50, \"train_examples_seen\": 400, \"total_loss\": 0.01919461227953434, \"action_loss\": 0.01919461227953434}}\n", + "{\"valid\": {\"eval_examples_count\": 1, \"metrics\": {\"per_item_dialog_accuracy\": 1.0}, \"time_spent\": \"0:00:29\", \"epochs_done\": 49, \"batches_seen\": 50, \"train_examples_seen\": 400, \"impatience\": 4, \"patience_limit\": 10}}\n" + ], + "name": "stdout" + }, + { + "output_type": "stream", + "text": [ + "2021-07-13 17:52:54.642 INFO in 'deeppavlov.core.trainers.nn_trainer'['nn_trainer'] at line 212: Did not improve on the per_item_dialog_accuracy of 1.0\n" + ], + "name": "stderr" + }, + { + "output_type": "stream", + "text": [ + "{\"train\": {\"eval_examples_count\": 8, \"metrics\": {\"per_item_dialog_accuracy\": 1.0}, \"time_spent\": \"0:00:33\", \"epochs_done\": 59, \"batches_seen\": 60, \"train_examples_seen\": 480, \"total_loss\": 0.0066082654520869255, \"action_loss\": 0.0066082654520869255}}\n", + "{\"valid\": {\"eval_examples_count\": 1, \"metrics\": {\"per_item_dialog_accuracy\": 1.0}, \"time_spent\": \"0:00:33\", \"epochs_done\": 59, \"batches_seen\": 60, \"train_examples_seen\": 480, \"impatience\": 5, \"patience_limit\": 10}}\n" + ], + "name": "stdout" + }, + { + "output_type": "stream", + "text": [ + "2021-07-13 17:53:00.297 INFO in 'deeppavlov.models.go_bot.trippy'['trippy'] at line 152: Load path /content/DeepPavlov/assistant_bot/model is given.\n", + "2021-07-13 17:53:00.299 INFO in 'deeppavlov.models.go_bot.trippy'['trippy'] at line 159: Load path /content/DeepPavlov/assistant_bot/model.pth.tar exists.\n", + "2021-07-13 17:53:00.304 INFO in 'deeppavlov.models.go_bot.trippy'['trippy'] at line 160: Initializing `TripPy` from saved.\n", + "2021-07-13 17:53:00.306 INFO in 'deeppavlov.models.go_bot.trippy'['trippy'] at line 163: Loading weights from /content/DeepPavlov/assistant_bot/model.pth.tar.\n", + "2021-07-13 17:53:01.114 INFO in 'deeppavlov.core.models.torch_model'['torch_model'] at line 98: Model was successfully initialized! Model summary:\n", + " BertForDST(\n", + " (bert): BertModel(\n", + " (embeddings): BertEmbeddings(\n", + " (word_embeddings): Embedding(30522, 768, padding_idx=0)\n", + " (position_embeddings): Embedding(512, 768)\n", + " (token_type_embeddings): Embedding(2, 768)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (encoder): BertEncoder(\n", + " (layer): ModuleList(\n", + " (0): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (1): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (2): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (3): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (4): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (5): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (6): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (7): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (8): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (9): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (10): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (11): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " )\n", + " )\n", + " (pooler): BertPooler(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (activation): Tanh()\n", + " )\n", + " )\n", + " (dropout): Dropout(p=0.3, inplace=False)\n", + " (dropout_heads): Dropout(p=0.0, inplace=False)\n", + " (inform_projection): Linear(in_features=0, out_features=0, bias=True)\n", + " (ds_projection): Linear(in_features=0, out_features=0, bias=True)\n", + " (action_prediction): Linear(in_features=768, out_features=5, bias=True)\n", + " (action_softmax): Softmax(dim=1)\n", + ")\n" + ], + "name": "stderr" + }, + { + "output_type": "stream", + "text": [ + "{\"valid\": {\"eval_examples_count\": 1, \"metrics\": {\"per_item_dialog_accuracy\": 1.0}, \"time_spent\": \"0:00:01\"}}\n", + "{\"test\": {\"eval_examples_count\": 1, \"metrics\": {\"per_item_dialog_accuracy\": 1.0}, \"time_spent\": \"0:00:01\"}}\n" + ], + "name": "stdout" + }, + { + "output_type": "stream", + "text": [ + "2021-07-13 17:53:05.169 INFO in 'deeppavlov.models.go_bot.trippy'['trippy'] at line 152: Load path /content/DeepPavlov/assistant_bot/model is given.\n", + "2021-07-13 17:53:05.171 INFO in 'deeppavlov.models.go_bot.trippy'['trippy'] at line 159: Load path /content/DeepPavlov/assistant_bot/model.pth.tar exists.\n", + "2021-07-13 17:53:05.180 INFO in 'deeppavlov.models.go_bot.trippy'['trippy'] at line 160: Initializing `TripPy` from saved.\n", + "2021-07-13 17:53:05.183 INFO in 'deeppavlov.models.go_bot.trippy'['trippy'] at line 163: Loading weights from /content/DeepPavlov/assistant_bot/model.pth.tar.\n", + "2021-07-13 17:53:05.999 INFO in 'deeppavlov.core.models.torch_model'['torch_model'] at line 98: Model was successfully initialized! Model summary:\n", + " BertForDST(\n", + " (bert): BertModel(\n", + " (embeddings): BertEmbeddings(\n", + " (word_embeddings): Embedding(30522, 768, padding_idx=0)\n", + " (position_embeddings): Embedding(512, 768)\n", + " (token_type_embeddings): Embedding(2, 768)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (encoder): BertEncoder(\n", + " (layer): ModuleList(\n", + " (0): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (1): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (2): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (3): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (4): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (5): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (6): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (7): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (8): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (9): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (10): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (11): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " )\n", + " )\n", + " (pooler): BertPooler(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (activation): Tanh()\n", + " )\n", + " )\n", + " (dropout): Dropout(p=0.3, inplace=False)\n", + " (dropout_heads): Dropout(p=0.0, inplace=False)\n", + " (inform_projection): Linear(in_features=0, out_features=0, bias=True)\n", + " (ds_projection): Linear(in_features=0, out_features=0, bias=True)\n", + " (action_prediction): Linear(in_features=768, out_features=5, bias=True)\n", + " (action_softmax): Softmax(dim=1)\n", + ")\n" + ], + "name": "stderr" + }, + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "Chainer[]" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 68 + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "8CNlZyfSgSAi" + }, + "source": [ + "# 2. Interact with bot" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "K9H6fzdnUADj", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "cc96fd6a-86a5-4284-a04e-4994180a6c17" + }, + "source": [ + "from deeppavlov import build_model\n", + "\n", + "bot = build_model(gobot_config)" + ], + "execution_count": 69, + "outputs": [ + { + "output_type": "stream", + "text": [ + "/usr/local/lib/python3.7/dist-packages/torch/nn/init.py:388: UserWarning: Initializing zero-element tensors is a no-op\n", + " warnings.warn(\"Initializing zero-element tensors is a no-op\")\n", + "2021-07-13 17:53:10.142 INFO in 'deeppavlov.models.go_bot.trippy'['trippy'] at line 152: Load path /content/DeepPavlov/assistant_bot/model is given.\n", + "2021-07-13 17:53:10.144 INFO in 'deeppavlov.models.go_bot.trippy'['trippy'] at line 159: Load path /content/DeepPavlov/assistant_bot/model.pth.tar exists.\n", + "2021-07-13 17:53:10.147 INFO in 'deeppavlov.models.go_bot.trippy'['trippy'] at line 160: Initializing `TripPy` from saved.\n", + "2021-07-13 17:53:10.150 INFO in 'deeppavlov.models.go_bot.trippy'['trippy'] at line 163: Loading weights from /content/DeepPavlov/assistant_bot/model.pth.tar.\n", + "2021-07-13 17:53:10.989 INFO in 'deeppavlov.core.models.torch_model'['torch_model'] at line 98: Model was successfully initialized! Model summary:\n", + " BertForDST(\n", + " (bert): BertModel(\n", + " (embeddings): BertEmbeddings(\n", + " (word_embeddings): Embedding(30522, 768, padding_idx=0)\n", + " (position_embeddings): Embedding(512, 768)\n", + " (token_type_embeddings): Embedding(2, 768)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (encoder): BertEncoder(\n", + " (layer): ModuleList(\n", + " (0): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (1): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (2): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (3): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (4): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (5): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (6): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (7): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (8): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (9): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (10): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (11): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " )\n", + " )\n", + " (pooler): BertPooler(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (activation): Tanh()\n", + " )\n", + " )\n", + " (dropout): Dropout(p=0.3, inplace=False)\n", + " (dropout_heads): Dropout(p=0.0, inplace=False)\n", + " (inform_projection): Linear(in_features=0, out_features=0, bias=True)\n", + " (ds_projection): Linear(in_features=0, out_features=0, bias=True)\n", + " (action_prediction): Linear(in_features=768, out_features=5, bias=True)\n", + " (action_softmax): Softmax(dim=1)\n", + ")\n" + ], + "name": "stderr" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "mT0RSmLwBJEy" + }, + "source": [ + "##### Original Example from gobot_simple" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "zjR6FW5GA1di", + "outputId": "2f2205b1-d77e-4137-841c-69046506be01" + }, + "source": [ + "bot([\"good evening, bot\"])" + ], + "execution_count": 70, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[['Hello, what is the weather today?']]" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 70 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "j8I3FqCLA46Z", + "outputId": "97d8e536-4400-4ad2-ea4b-bdd261e4de19" + }, + "source": [ + "bot([\"the weather is clooudy and gloooomy\"])" + ], + "execution_count": 71, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[['Then you should cycle!']]" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 71 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "tThj0f2XA8IP", + "outputId": "9e22cebc-1b5b-46da-dc7e-7b2f18662d26" + }, + "source": [ + "bot([\"nice idea, thanks!\"])" + ], + "execution_count": 72, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[['You are welcome! Bye!']]" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 72 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "kzW2ULW5A_xz" + }, + "source": [ + "bot.reset()" + ], + "execution_count": 73, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "6eUfyIRfBAWj", + "outputId": "4fe4d078-0d7c-4fbc-c8b4-6798fae6a0a2" + }, + "source": [ + "bot([\"hi bot\"])" + ], + "execution_count": 74, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[['Hello, what is the weather today?']]" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 74 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "HSDO8cyRBCvq", + "outputId": "b1b13bfd-f0a2-484d-c5b4-7a767f9ca228" + }, + "source": [ + "bot([\"looks ok, the sun is bright and yesterday's rain stopped already\"])\n", + "# The bot isn't perfect!" + ], + "execution_count": 75, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[['Then you should cycle!']]" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 75 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "c34YWXIfBGKZ", + "outputId": "a3e7ad62-2bc2-4154-f976-7dd6d5407847" + }, + "source": [ + "bot([ \"i dont wanna\"])" + ], + "execution_count": 76, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[[\"That's a pity! Next time maybe. Have a good day!\"]]" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 76 + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "H9uy-kdqBL3y" + }, + "source": [ + "##### New example" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "2UoSBMVhBNn7" + }, + "source": [ + "bot.reset()" + ], + "execution_count": 77, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "Hm2tS83GKp3W", + "outputId": "aab486aa-0bfa-42a7-8424-575249f6c6a6" + }, + "source": [ + "bot([\"hi bot\"])" + ], + "execution_count": 78, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[['Hello, what is the weather today?']]" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 78 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "ivL1chr1KutO", + "outputId": "fd1e1249-8b82-4ce1-f96f-927ade1bbfeb" + }, + "source": [ + "bot([\"too much snow, dont want to go out\"])" + ], + "execution_count": 79, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[['Then you should try hot chinese tea!']]" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 79 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "sd5HKHkCKyh3", + "outputId": "a68e677c-e553-481e-f104-97a96cdf4533" + }, + "source": [ + "bot([\"no i dont want tea\"])" + ], + "execution_count": 80, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[[\"That's a pity! Next time maybe. Have a good day!\"]]" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 80 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "Ad_GDanAgSCi" + }, + "source": [ + "bot.reset()" + ], + "execution_count": 81, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "vokP3-JbEvcx", + "outputId": "579854d8-556f-40b1-aaa0-666c2f94065b" + }, + "source": [ + "bot([\"hi\"])" + ], + "execution_count": 82, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[['Hello, what is the weather today?']]" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 82 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "9oSmDlkeNmVh", + "outputId": "67c49a9b-400c-4c20-9363-0c05fb19e2c2" + }, + "source": [ + "bot([\"it's sunny in california\"])" + ], + "execution_count": 83, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[['Then you should cycle!']]" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 83 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "wIo3ONi_Nsp_", + "outputId": "73cdcb51-adf5-4ce6-b26b-a1d9916c4852" + }, + "source": [ + "bot([\"See you next time!\"])" + ], + "execution_count": 84, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[['You are welcome! Bye!']]" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 84 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "-4De6d9XOGs9" + }, + "source": [ + "bot.reset()" + ], + "execution_count": 85, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "tJJwWd8nObGw" + }, + "source": [ + "# For you to try" + ], + "execution_count": 27, + "outputs": [] + } + ] +} \ No newline at end of file From e6f18b893f8ff95c5e1f461538a48a38af038bcc Mon Sep 17 00:00:00 2001 From: Muennighoff <62820084+Muennighoff@users.noreply.github.com> Date: Fri, 16 Jul 2021 16:16:48 +0200 Subject: [PATCH 122/151] Add extended Trippy demo --- examples/img/trippy_architecture_original.jpg | Bin 0 -> 267014 bytes examples/trippy_extended_tutorial.ipynb | 2576 +++++++++++++++++ 2 files changed, 2576 insertions(+) create mode 100644 examples/img/trippy_architecture_original.jpg create mode 100644 examples/trippy_extended_tutorial.ipynb diff --git a/examples/img/trippy_architecture_original.jpg b/examples/img/trippy_architecture_original.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eb07baeaa128b5ddfd17092957d415385f6d50d7 GIT binary patch literal 267014 zcmeFZ2UJsSw=Nn4M5)r75Tyu8lO{@wsDOxoC<1~I6_FAlN(~Z-ih%SF5fBiN4pJgr zT0$=(y#zuDp%()Qbpauqe0!g}?;ZcY?-={sd-gr&jQt0rVIB2Xs6Ex(Q-DapLb6{a~WMm`^b?Gchrsk&c@2d z#=*&RhJ%xvla1|+z!`2{K7M|Fb}m670X`ufK7PKxFT%h`e-9J$X=dird>m{XeE-A8 zaU+QP6r;h(EJg+i&!{M%~!*#Y$bgP6FPdCn{Ao#Zut$|CWaPvu=~#wp1grA_?z2gp*Y&ph6rW)%<= z5*CrZAR~M6lA8JzjjPu*Z{E^3FuZMa=YhpTODpR~HV)4nUpP6txO#ed`}iXK-UNLJ z4hj7j7XB&jb9_SLm!#y(tn8fJy!?XiW#tu>RnVl) z*}3_J#iiwyRm$ep_Rj7euz&EkUko6|e|78MJo^W~xaoeKU}9oqV)@%Ih7-Q@!N|?T zd|vq^kDfWp)7QKbD(_D5-H6R7Z8|NfdY{by%wvF6KuV1$P5IlkfAQ=;=GgoHN1pwg zWB;dLcn~`y1O4VPa)ThCqX(JcvY`JO|1}5ywFdr|uK~unV-WtZ-3Hr2_-I@*O}&i^ z1iSbgaHSKo7$uey4~op>dR+3|4Vb#Yf8`7RDS0ws}8hATMa9uL(U zW<5rD^tEzdNtgGiN|eLKOBbPq&#mu2zel)yG&Aqkj*URtX{QSpQ#oeqz?|5(>J6ga zNh5sqxP!N!L#tT#!`{X?1WS58kDuhP+NH4paH3MkZneNP54kFW2!n-gT?o}5B9US+wQ;6fHDRAFg)0mw^71m*2l4$o!;e5ZJsaTP zggS|QPz<;_lhpm?o=Vttxwa0qqcd_iHJ$9U*AiR+4Ktc|0Akv$#?Db9sYvL09 zfs#reI1;{3OAI}+b>0aOpW(6N(RgmupFQ&=9U1Idm;6rnFvN3%6dbI^^_~Ot&5;Q|Ngzkfx>d(dA-qx%dqjnCA282Ya6L} z3@TN%KL!m(h=3r(ER4)CXrKx)bPPIqV0H|W8aM`Vh*2Q4sPgu7Kw}WKe*qJm`BwMM zF(@P&NIwP{jL(@-MOPryI76zMZ|gDW;7XqeYKCBR44O|q5C{kDuhB%bW>H6?PEimt zJo^~5X#yq>5O|M4%M;oz$Dp79`X_#ZE!)U4E};SEjj&_Twp*IoF{lX>cMR%ML$w$p z>4s#zq~y^!w$o`w=nUs?Txr<ptooqUzSNX0)L1w17<8nCzsJ&FL5~)GOYKTA_GbWI zCwe$5TRb>lWG5;odCGc$j+Ry0F{s5Gsd@}bhlrwxyoIPu9dJb` zHBo82oYr1PI8vL!^;a-#;iwu`Es*^!N;QpJv-%h$`S&^$9RCqou-12R_Hf+FJO%`(J0^Hs6qR zy^?%MX@^L{3x~HLVJVzWCh|5J9j2|7y> zPnUlg4qJ|ZkURg8YXz{GPd?UnflF<5{?){j=qL5$d-zEsscn$&H?DLnu@)nI4C+gd z!ji*Au>0aMfIB(_wY4+{r%HIkX@+RzP^Fy7vv#%#wtV=@B|Rw(fz8vBv1SIAEKL64 z<^RD+p-pOr9 zI0muZY)huqkgGa~GNJ0CvOz1#uA}%5iM4NIBOb?;N0k=btbBLvMT^5VHS-}8j1qnK z3!tJ)l3~xPs!_x(n(cw@Ce9wKKcLSCGXLknut2CdN)O(^fB?LbNCXzpr#0H_7u_>b zIp=sjo{OvUt(K`T$bj=zpho>D%AtYH(CiEXYF|r+guTR1+ZLiYT&<>sF@Fi%`;&0L zOzO)*eWj&511L>(Pp}=$I)T6uqwBKmVCKLpV-aJ{Vr9F8xDJUf_9CI+OJx!~*^$v& zxSn+G^tp{z9q|^UIzWaP^`N!NqeC%{!h46JgB^2|n#?1~F%>L*IENhIAiisoGB>-E zxJ^SJxU7wLTR+;Tr9y~DT1WAJ=Qkx>b5*6Y>0H!v6w90$bXg29eq3!7?gvg1Iiu%0 zcANd7#+mGyzgXUE$aO+sg?1OVcHJm{Y)EMtsGL-Ys_jZ_;qNhTxy|xRpV|4U7R`TL zxm~mhjU5|ID~~!%Glano^^tmH>54QqppZ=Hu+g7uQ^$tQRn!f-lg90=7Zm1`FDgf> zDGF*AzOll2?~iA%K$t3ODkl0wkjxZ9BbFN;Wm-*)(CVxp?TbEg8k%?hmBuM%sye)b z9?)ElFV6ElK-oTSR@fmOp{#zG>+>f)rTfM3zx#mdASP&LSw!EXu;e}3XKP%4~itBnWY+d3_Kc%hvA;_KILu&3UByVeuy^&}?K zA3?*ytdzbxJIO%xTzq@pp*GMx3vb7Uq-l~dIikh&Y06~Xx=XzBRTL(ugqIt+*FloI z_OG_`N!CzlVMRAV=vsAJgWBlooS<~>5d;mu2X+YDSa{3{s`^o;S6?W;q|ou?N3WWa zN*(jepMDDND4+ft{AWTqW9lTY&c)gXQP_4H03&kwBR84WK!roy%mm6*58NrwZo0Bw zu`YITQvxN+zqB;bp*}AzNx`K19=apBC|9yqzKySexpq!x+8O9aIMm%#`fSuV`Q1-^ z1QL!>2f8~@Lb{g_RxS4OWT$3&bVpp69@)`xfDY*ryz}=*X->&!`dH;zzu2{wYQLGe zHl3>Oy`c!BhsUx{#~`Id!y4s^Z2ZJjb+0n2aDT@Ztmg_otIeE)0N%$V*;!ZGN?=00=L zurIQhdNg0W=SS`H1Dq&k%>*H&3g$dHq$s&JdPw`kio_@7MMVuoF$+p8{^v31Bcw94 z86I3)l9vOdv|Cn*RHew@n_MZK=zHx|EEVWV3XyVpUA`!-$9k%Zf2CHpGH-1Z=ABEh zbrl$&ShD$5cHtGLN_ON-OfNbtG0{U!6TQKUHq7M@ zG}}qhstW@5Jg&0{oi!fKlNd6ft)c9V8^U557k4HoJ~Zsj6)Zc|nXD11D|!rSMr9+i zOl!%`!RugamHk(@!crG+0J2sr=if^HEKwrjNbzL&Y;}Y}y*e>nq{*+?u23hZvM0VL z58u3bmrMJ0`K#0WY_C+Irw|b@G7gogaGcslJUqA>B_!E^Dl52blB3Wnv3QHINWXb| zZb5z9ij*-Ma~ha10>IgToW7hsfOaI0dP9oLS0wga6^kbT+6{og&z-<8c2dfvQiDn*+DRh$0+X z1HqW#XX$NeCr5J3wC%p#2LGw%uA2W68Mvq~d2VOMYLq5jhaIH}uU6aKdSJWin|!Ox z=GGjKRNraYY@a8sf$EVt-FhRHc$*_!^}cRHLVLJIQNeTa@pH{!Lvr|W%`32{e- zBrCOG!$g!mU?gms=13ZM8=`;Rc3V(Z*nX|d-SKK*pG#J9okd1csAQ-K#a(%1+zMq< zaHqQET*sUvXACQC%M}X1NS06Ccq=G(uu`s(`%l|F8+9-b2dQr4UqiIh3c9iRt|BU~ z;ITx@nB2)Sb=+>>mnCs4rN#Yp-PeEVzI>}{pG552zZx^nOY552qs2~G-E!!*yyw2_ z0?liBxU%L*FxG9qpQwJdM-H9`4Nt}mZY+8E14l)HeArgTnS82c`PwYi<>e1+JEND? z&x6mX54JwxdH$}V>nmlnexeatjeDnUcc_P`BMu)Fr&boHwnasU`kmD{X=1Q=ui1Ib z4`K3&`JO35uLrD8fofh5*lDvE?>mDQ0z79S%}OGO`s-@vCB&>uYZ|`pN5RzYS`zuY zr0ske+TSFv)6T3V`$Qf32KJ9u@9mCiXjJK*g*m%RTjO6T%s=?SyM_Fn6+O?|o7m7v za6(yR;_VX&7PzcS=-S*wW7~mWxMPrdPUU*is^${Ok(}Owxdi2~2bu|o0hKqi#B`@e zXZ=1c`#mWvdR^6a(z@fz*xf1(Z?pywOM$g%nl(Z3H?4=Lez~r-dBJ#!8um{LFFXGH zZN?&1L1xFlo{Ked_sv`iY{zq^gWJ(~*dyDk289~Y;sYC3XyoP}S(ls+Gg6~gKU_8<0_3&EYI%;#`0D2uMb5yCz zkBH`C1}qOdc75&DhhG*y{Y#DKqf>+Pz4{w72Io}PrTm_5#YH-W@1{UM+KbgwruKr) zk4VI|30Ru1YKS;&t}k~iN)&}Ly6&z~2% zeWVaaI$a!!R^vhLB{CT?~nTmP3gSvAlnB-D}EVl#XijYipMsB|HArj$G3VX_}cA z$Ar|(RW8(7K=X7aXboCg+bexS=*9d7KO5sM>S1AkKE-$r_fGfZ@`2-0G}Xi{TTHLM z(0WwZDUH;~T%Rvt72DX^J94$N&&DhiRUy;T7f$0IL7EYvd^W4Kx^dXU8+vDBjh#x3 zFDD4|KfhVQ8)E+TtkpYL&GvXA7Ldh6`OdKw!bEM+p@nc_N~+$7#NqAQo?o|m8b4<& zosHMs=@y;RXeCtONLs~!fwPkKHE>I7>Bxq>kWPJVe)Rn;4{FicsdIC&{lliFYLnF5 zqicTEI}4kmMsL2GFTRcebdi&FZ#@0}(CjH-QUh5Ne#RLuk2+JaujGxAveDGLR;{2C z5+?o{94}h-LrW~a=B%X?Y3dksflAm_0Z_*v)7|*3n5cS4t_zlI7@+zmIiRviO+n=t zw5NQDzO2=U<%eGB?n`}|@6FZKjX(B2fGbgvisVy&Hmc%Uvx}r;qVG-4Cj9g%1x;~5O}NW z4zwNW1E2clZXgxN9DN_WqU`(b*g06?>Rr^*9jc-YFS2vDiM9{k8B^G6d?>c{_$#z3 z@uPeo2>Fb{nUTG$VXqCiysD~>U9sZ25|6?<+|*X6Ll_!DEb;)V^}t zeJMYGtM}_dCk%y(A$V0HIGpM?0}iTn56)$qAaDV7H%8QDX#;$L`js`ho(p3qCzfku zeyDfYxPbYNK~9huFyBVKD!F%U)-XgH<#)35sdwJ0&uDJ@_ASW-x%6xAth*Sh^RJ49 zb&47%rkO8){j@bGfF)?@q!{^~B`>~;@UIlzhsja5qj0w-wiHoK8XM?JvtaNUC3PL` z70vjLRt{I&6h_&^KZE7%_BwMRcq3u4*U8gBC1qpQKK;M9~D0S`_c}nC(LoE$9f+u3(K>UQxhV zD%!_@OPKxk{hLp8g+MCIerD}pendwTihOTM>meES0Y%jK1P(ptDsoH1#m#HC;~ui1 z?)D9}@CnXQNso2V62GRWdC)|>4E&OrS)4%(u~2#nPxBVQs`U;vfh&8fiFT@U;;EF- zJA;3;ACQ0Hb#82$aU|(>6yQy`5#r3B6D*%yInz4xqr}PnC>!Do35Br#(Y=glX|`v9 zaLxCdZ-Q%<`!cP_2|hZ{szEh1;pmr zx`-%7czPuAj7{L%kx7ao5>}pFmErI2ymd{C%m6kh-7}8p#v@~*SjUHr4F!@{s%N|4 zo8qF{=a!`^esPWYi$=mDS52}+YqVbk_Z)-ZHX>eY{?r=3I@psv_|e3#jgo;%403eC z(LXJ24vQD<5ZUWkrU_B6){yO^G@S(wR-uS4cPY(pNnhkGV_T%22eT!-EEPViw;DL$ zbv4PfzEYM@y1F>6ibzoGJJC}C=T2YTMp+W>h`9&T(z{r0Usn$qo7+WfU-;R(wiCKwPUVtEjdnT;SXGa*+)D~f(dJRF`s zox~pUXawX?zd1?yx%oP62bP%dxIsK|H6?M%@j1<`P<0P2?p_F$ai+PGkhq&m4u{u~ zu0RuAS$-S`Z$a&9Bbv*SNeYbBf5SshA zsLFIi^n1(0QS=5?YqovlT&U^$X;MUbWdU4GO+We}U*fVBFZ9MyGEHKP_8VLd{~*(E zfe3A)sg#a**O~jnBW$hL747D^7Tu8YAAK@jEWK+gS|*)5sMXl;$DnGDL5d;qfp|N9 z+c&mThor65H=H$;wOOY~th1R2RRi#sd8hUWDZ`N+1E1rn)R``M5z(V`qIs=Mis@oN zndd~M-$vUMbnYdpIwnXSsR0S6X;Eu-T8@s&G>BKS0UPoVq6}Unl*R%mb__wI6#sY6}wu$yQk(u%F5I-Kf z6;T#sS$lqA(3Y~pyYPN;dYDAwZZqXdP(A>?5!!YJU~S(#Y)v?QF3rbfTpVP53@Tu+ zc^oy#9%qv#2*75)pkDTIQ*YD{>UI%RZ#G%lJhoEsxC1`wB7$%vUW zGNhB#hCC$W^o}xhd1>7^2y@EJ(ryU408e%r~AFW+k8FNCNc-qM9#s*<{KXX&cs zWOSm`s)ka?8YOb$7!>E>rtnB$+lx#C9l! ziGzU+Cvy7iZW1x;JIv-Wz9hH=Fe7o}CqLfi)suQquG0B!z4O_r16;f8-p z)ccaavw&x_rp{=3mWxg5q$**0a&pF4ErLy@Jv_+yzK5BPrcZBgMe+CgjTNVEI5lyk zDlNssU^NonkOj9bfl<*5Fr8jegQG%S2=Y3hH46y~6eRn;m#>Eq{VEy!;Du&hdm`U$ zx@D4fpNcYYN?unz@762IdQx=<*8(|fi!=eei7|qYnn!nQ*HhpH`s|?!BHbcC@B7Er zfrF7dWV0Dml(yud2(Z+cTotW=d{96~F7$S7!Id{?PWb0vn-1GNf?j#Awg@ks#g)-| z?RiF!jAS+gWs-8BFQzD`sd_Ahb&JPAE9fQWu^c*+Sra`F@wJ8bs(wgXH!ofvY55!GrUoAa? zo|wtek)h=Tv@sb>{YVp|mvi1&TDZ-1_x~HH`rm`E|A+r($r4TVqw{zn_PoQ|CbLoZ z#YArm_3QMI+I_ys^Sxq_N;n5%kot=mjuWN$HmXpK$zrrHSU&3cs{M1p1i{7gmnQ6* zR7{v6LqpA_T;kVNt`@`(_UDf`7$JEnuqG@wRU>DOCQ7-G{@q3MYmb}L>$6SBmCwq~ z@2f=7Sz_b*k?e`HxyAHwdXOHqoyyXD0N?eltjFud z%$^+4UYltx9&HZWef6}NbWWg|_v?$?#BuCi^@{%np?0fks}TM!Zm;jK7vJcFUyVr25O>v-b_wK~Z77 zoa)+wl-2GweCWF;0hio#xrCN`3RcRyWM7({;dt#L@ouwL?u6)H6VF#F+$A^U&}*no z)R2{UI&Oz>P+pyHaz0%6?0Jvxw;!&)(zK!{a?B8C_(yxy!_yQ<2=air)t-sXR5{?nOWmx^rdmc7KUGFY5Bj|zk@)Cx zis+0wAzX36UzCf3Fgy3q?=Qic=w9ZUrUY$Ol}z1})+h9o1E6`6Q-gsX47&?8S zkP6WmD&37~i4mtO40!9gvVbh(M-FAf-?l7;8+mRu#v>*}0DL2jeUxR7`Ro0!q3CcdtevdaznqFVAYj9I{QHz}2;A;+-&6oX~ zWZQ)B1rA}j$*49avT2qR`gDRV43_F4cF9OBMJ^&TAno+JVYet|z8$fTlX}r0dW&Ki zH!DYHGEEzdBZ@E0j}!FXqv47pPl}2d79LrPe>kHjf$=}e!LY-oRYugkfPlFvwib*8 zMIq9sHa&!T3*%UlEg{oYV-ngE;(T!W%;hKFSyMA>mBtAM6ep?-fk%Bq+;v(wrEM>L zd{poaB>2<3?N|qsM}=w6tM8j??}?aKb+3g$sd-E~%WD{HG7STJQRz{J@?AIzn1166 z5f41uLvbL=e5_Y+m!DqU<#T>Ee5RqO%uwP^?C-21_fG!MJ!@Pq;f$_20(&3u!Y~bB zn5Z|%zA;5n^>37RucV7E`}iuW?BA{a@iXF2BfCm%zU-ZN^y8yKlJ2Exq(-W)U{m(= zq=QD4Z_I1{`S0Rapk>|M=WRne9j~#xSME!t-UJpX1NB$D#sZlTDf$#tD(?yoFHq`G zuo5Hj^Vej}bMN4gn=dZE@;G(wOhyp~Mqd3gO+Zmqb$?__$x3}?Foq`Mq2aomD}gGM z*A$ty^l;tNqx66qX|Gw19VS&w=>-nX+zfl>|LUpauXDLA3-3igeMfJ2?4uI*gBzyd zT7jtfz`tA$;&ags%%rGcx}~S2LT0D+q&zMz|7x5GT9tcyl%<|()q-V0LJO8_cTf)HtKk5<6N*-|I!meIdF$EBy$=b`Rhfq+j#1rU81! z#FwM1HsMj~!ugDc*O?;cT;IJvrM*}bIDmVjpWX!FOZB{o;u$k`Pna``e#-W^M=I>B zthBZPax`X%cGjJw?~GSaT05ZL_#F2Mj$O3^lqiS9>NXVn3X~iQ4BZ^fKGZba)w2DV z`P1MbsL(d3RyS2S=5D0OjtBs2O=i`5oi|NI9DPAvNoM!BRgNIERFCfM39y`#uhu&s z^5pGIeOSD`X}{dM4c>mQVX9nq%0I7)iy_KyO6x9cw( zNRE^J{pNsm7*%DGGAAR~k)HE>z5o1Kre@}46=cJ*Z6MPoii87H#!bbdCp&eKe zXI%lHONX@7oPrg7M6Yqep?s^9Zr6Srm$Jvk3##~@t;hRr>>G#m{P(9k!#8w!5u!w; zcY!j9%)Un2nIWq5>?3)%5;~DvP7)XuUAx96lpA*M&vQ;OBhY2XYeJVyy>{MX_pn!v zL3!gY{(=vTwEO~%52}68?hfPs*);k8jO)w&vry|m0P}`YO-(m!-Z(w?oyo!{gghs}-B`>w~ajZ^DSz6JC>V_mfy>M(FeId8S{qFs^B%_$zz;f1$bPF;0 z*H~^02Ov1RQyf(tg_O=lWQq#2R~3G@k(xF>`#eKZ@~SXn(7WWES@>Hp2hwrYPNP+G z<1JOE5cbA3(*|nqQfefBC;hda+yLqWxNd@)>w04qp{t2}N|9-WZ~&+_T>;9?6`mDK+yGnHd4Jf8aDz+Thvji^EjBd| ztOmUvOh4|c#42sx7=I*$hX;9ah&EFI*OpsDf2dk#PmZX4Q7~7yC3VJO^X){%dk^KH zxX0+X5GPa!n02Qx3fe?tAn%3jJZiG4AF5c+OmtcO-sos?4Dx#7Zio*;0t3S;LBHH)kx=7B$c5_?I(riBRLUh!_077Ifc-=y2-5= z;00X|3MI$821-FV>n|Uy=YBHu54aG|X7sA;L>bGkt$H5O!^W>m>Z2Vq>6|V3E*g|& z5gVwB0LdkHO5y7>TUjAy=M1-+s8bJ_tw6!z@s;{)`i^wA{K<~5(^qF~IjR}U0gO|h z<{-7n)9JH5K8c6-33tOTJQCf|{0l30_Un(PYuZ~j_ELb{jFJ#y$cW-oJ#!@bkfQt9 zV%!yH>AZKD->s{%v+tLm_=ovtbvCj#Hd9wNw^@oea41z(Loz0cc$j);>CWIwuZbwr znJI#a)5}<$;Z~62=X_=Yh@&U&?WH)R4!Lj~R^>)a6)RR>a`E=}e99eT?sKmQ$5zer z>DvkDH4y8vY*zQeCOG}~){*LN0N|Jj%+Yw|xIlnUciTB+Z6zj!$23o9vPdwbLp1d2 zoVw?%OcRs=`S=P;JBD)rd4H7Mex%Y?cIk3lWUxFZLu2RiD;2ql%;@PGm$%bT05HAk z;U`1dIyo-=uGWWMJ_+Wxs;wgpwEMqn5cy-i5-RVd zVB<)ateKcxk%W7#>fMxbyi+yEAbgg)U9LYrtGIEVb{;81c4{jYSCDHD6c|LB<7O786PntRuexOi>&wyog87ZVTT0)O?|oyMX!Lb*mTs*UvdHo~U=) zU8fhrf$&3NB!nLO{O|l7HIC}A^Mn*yJCy4fv@$&yH8ti{N|(u*)8$KngW%1_*cW|= zA{(gfAz$bXTp#qGlz0DDL0Cxz3Lw0w7GQ3Z>~wN`l;k11onK%1pEP(%d3W#R#*&XJ zz7{XLn!Zzsm3guPU&Ai`lW5oy9IgET{2mD|f^AMz4!?0LN>pIIGDtLd#pO!YKPr#N ze>!8_e5&&p6cz~~+WW{UH_v2eeaz1sUm>bY+G{FWySf{}OSc>Nw-h=^`3i3H$Upfx-uW)R+9kTX90U1)TX@5}jXay5D#Yi4X;p1!U=sNXNHxmsVx|90Wv z_gQ$Ud$=t6`quk!*>5N4gG=!Lb!7&c`p?u4qxp}@q{__3acvO1$jEN6Yx{_en?~Q( z(uKuy3ZEN`Ww)ZwOEa7^`g-2hY)5Oz# z6T#N@X1JeAUE1pQXK4HUm7T{z*}ZQ(hqmuvEJxM3A_?pnWy891E@>x#6!I<}5k*>z zcT}&+$hqif?zvz(C{-ZbHF?>cGE{e@i%KR9w_haKckk>6nvr#-j!L|t zAQx=Kl}Oq(91>h?3u-EIrxhsEiR> z!(1do+ext5-fst9Hpm40bOFcj-7C!o-qY`87kW1v)xO0hi1m0u1*os**dP(5f4>hS z>SC`*F-^>p?x7VH!l2^?9L24Jr$|Gm7Vr1GByDjNRQS4%0n!v{ZO7RtkCs*}Czx2> znqrGsz?6S&@!ajUe8%IfBmpuLdZO3MTFt~BxnxKWwHrN0v_Pw6J2%^7P&G;#Zkn)F zeLs}s7%&q|j!w!0_L^vN6oD9>rZk-g&B<v9Vi&wo_u0BD`0FXa* z`N%VszydJ}OlRwnP>STepu1R+yW$eV*`J+FHd8)aLBrW@*>N(({QRmyzTG!K5W4_0 z9Mus;%|fhf#8VtRrJGlM@x3Vuo9(ON{gUz?SyJ#Y#@i}-<3ra<42er0bgy(#dRtoU zFAvtUWx6PjnZXO82%Fs%f{m2YcuR}CgG<=E^GRRc{{A*IU~HtZwr_hdPJN$SYCN<> ztIPTcG|=%cEkLwrCD|%k-ETW&m^A-0{B_>s_1BPE5Oc0cjQ#ZraHsHS{x z<32&88jvCwgVb}%HFF_Oh#A|O*od7JV6Km_z4bhnk7!ZgwqPOqL~b?dX~;E^rw z()cZF(8QZL-&{X*5Y>fZHsjO^XG=xRI{jHGT(-nLsYHjvwI$q(aur(dv(;UHF}CC) z{80W>o0C6uzld603)OPj+f%DNQrnu?uGxY9!R58(Qn|@x`b86npx$uX8g;p=HbO5x zxnw7plvei$asH_jjh_hWMBhbUIZ9bqOF_TGa3tO&SER8HCb!)cVUN`|=y$)y@R+6P z+7s)O=ii;gs5E(i9uETRH$Dq>+&nPdk?As__XPcldn0UFv z?9WQRx@x@CrsN6oCvIQkw;$fcr419rU|2zIM@mF%GNi@iX5+Lt9rS;OAH4=`twrwn zCQiTcs~4KTDTXnajQq4ri=U$M17WipypLfR5g>?oD8|yBe5(4>LWO@qx*4WA{r8#4 ziSoCrMg6~X&br@E-Msi)*zGq2=0mab+Re+-+ub#+*KtB zN2|CBRxkQivWK$6*m$k;0{r#IAhY!Cz8cs(oP+8G1QU-W+eAjtjRF*|ckbT)`vw_A z9eb9VI*TNtUvH{1i|3bamBNKEc`K>pgGH>4wgcr3p#^i5a-o2n*GAyi1tTooOqUL= zEX+Wu^t=l8NZ9>FsE^bKVa5$*YH;;g?;=8*`w9&fP_cguYRqn!*hhDfmNqxG4u*E^ zDPnsOy6npb#e1O&ROQ_yck$^#ysX5dC#sNmEknk7=S@Yic(I`&x;5 zD21ovtod}8r|R8}v#+V8xl32|hSGN00|y9Yq+?KAo)I72y~GU6msyj( zv5@(fQLHp+6AJ7uZkvt_DlRqaN(}i-xYERi7s59s&vxF?_B8yQEX;Scm*LlsOUm~* zWHDCgIbih|#FORR<*5MSYJoQ{&{UCPgBa}|K1*@A;d%zj2?ari$rZD+g8}cCJSqe) zS2_IrP^Nnl;V5nGltg0*?88U^uV?*`MsrFZ|E}t7Vhkhnes~Q^44tbeGbB`Z8+xq& z=1Y7*6OV60wVYm|2~y(M>&SDnkVuS(q54-J^2&h;P+Jl9$?&D0hOxvKOrV4it7jx* zIQF%}p)pb>#7)26hzO6YhS9oR|AGEcDeWYku=pc?%AUI%03P(-A+jXf;_ zW$QeRf!AHQbAQjis=5zuS`g4(x}C$%95se5t*)`2|6n50DVfAtJI@%xk|8{Av8^^# z&NLj~PnS7|{8MW7@AP;5uR4eSAGNUoZ2u^r?f=&vlYg|3e-U~H>kL1QRG|p<@5JS+ z@4s{R!9S|_#T@}M5?_T4!DdnTLYi?Ac9*%5-u4t{6`%AhFQ}!qVz2g^@8PfMWlRX2HBM&)vRXylqnwVuCnkSbgR|h}#nlwwoao3w!fXi~^vz=&Wl0qWWV4?J^DEX2cr6XR@3P>{>j`e+&?N_wiS>&hrXyT ztj19!27C+}YRI80gX#KamVeI8=^A3v`D2iD_wGx&@}-2{9bls2y{+)Cy?Rz{)0cpK ziUe2`@x8aBwe-=r^_rlM51l7=x_;;oZTV0%e4fl=(@U>5_bG<7kmX|#`62um)Qao0 znElFpvDV{$?GPN0!QUrpH&;mBSG?kSh~3nk&8&~xb)t;cQ5YNF1k*LBaI1Tm>X8=} zj}rJIWDA5DUEgY!qHWQ6Ex70lpJ6v$?NFaFO5x1=vi7d`|lRAM9}ciO&16~p8c`H z($|ZEec}vcpPQ<640^JLI^wHbOs`6$CY!Y#gAVyO=zVO)RyhC{?G}oY&hKc_=^K3o z(0?jjW5q%lYMVYoHft_M-KR`txr*~UTMllvxrwZleCREem*aW=xK1-)Jg?F>ei2ep z5-|(rM)J|w7owYHpT+3H?fMk|VfaT(qr0W({KZCqFBOgY}DY6E2X@mPwU|`M! zfYCcQ+C+Z-bW^=k4J&*HJDqSpVpxLH`jE4d7$^*`=m;{6azU?P7=glULwVPO`1-Bo zgUs{igwrna+>W^P=~a$VyXF@3b`QHRcZElLS^Lz(#&b0OdUV(G18kQU;ec>cb|RVN za};RUk#-GeB17lW!_=l8v_M6d0J`?p?8AhfzYli2yo>G1M)@QjDk0r`vYS~l=u=)? zx*ov&ZQ7b@F|w|0N3o+_0NNDEG1!NH1dv zre4dU+Ea3~X5jDf8!cc~s7rrEwY^M#dy)rSdsKn>=QF!+a-ovzg^GgjLCKbwWF%DK zkQBJ)KjEX9SiVN4?jDlHcQKvISl1XHlbQ4g=qc*WOhCqtLI^Jwy>Dx$Y8)>SM#5hW zZj#^prh19={ITv#89NuI0oTHX_ist()waM0qPP&)CYp@1IMEBsj|iOYO63&|?+yto zjUCu0B@+XcjVOJFzzT5+cAab(N~gYb+o#FnE9BMtF4g8*@{C>^m+CKEe@Lwo^$_k5 zGmUvmlSi6TuB>Ia4NR?O3Sr>Q+A)ZwyMI^5up{|lUW^*yqzV6IZAs2k-EV1j<=SjacRwx~ zftsIpeq+RGTfIj&_eC9pxVC-iv8x5lgJ5ZgaxO+?4Q&&|CMLsPx}}`6E!v$(nB`}2 z-MC9?{`5L|2Ti9>Po4t`1k!z7W_4gqX%~i=BA%j`d5b(-%iIWjA6L8N@0y=Bysz>$ zk~^6K4?PC49)n!^g6vNXcm;5^r=6fQx;MuBK@n*3+&*qdnJj2p@s_eD}yYb6= zHx&JvBe(1t5V&SSl=$TlsvY=)t`t=}t}*TW&#HtqvzqeZ+2IP3zQiTz&4qi#q9r?8 zQrBO3oNhup7H3bUtFAhUO8=^5<}<%{kG_R7Y_gDyX3_6$s#_O^sOGQd(z$@Xmfh0* zX%(_U>Va`i%N09`w8e2Dx2FlLtBudoRA3oL5mZC+mfeW%c|*D;8b&y?QJhmMIDHYf zV>x`GHaP3^M90q#p@{CMuh{iYvbjrb%$j`^=K_{wNT-X|uv|*Wi{Dlp?p9iuz)Wir zI9@7}yFAtI2d>{v!rUnMnH6n)(tH(lvJ(2SdiS6n!i^OB20Wt)-)r$BlI~60x@x_f zKD1Xu{305FrPg5I+LPc%`{RZxwNDvgzY>##F^pmdt zEx8G)=wxSHKfS0Dx8>cZzMNi>*J#H(YuMZ;*yJ$NBlwuYPc|+W8>znm>xgU@QBMkS z@C{dzy(;Oh*Vzu6g?|j%eKzC;E=AZ(v}2ekh**g_z(({@shfi$<9HWqoUpuVXjHfp zb2X=I9wmEkkcLuh@IQbR#_zjV;TE=5fHy07s|v`_859?7P#ip}?NETQpCcZw@T?{N zBqtQWechN0dtdO{P8n>@|0*h(b2gWR{3`yLj+UDf6P&SX4gT3cX0lO5YLHc7R`{il zbY8yU(aC~Y3%~3Hp-gTrU77PE&tJ@++RZpOd{nm&F&RC&vcGE*I##~k;Ny=x2F+AN zXYKk>1lypXRUcheic?cfg>8VX&9FLKl)vFHY27PAEh#tMRiZjYh){XmTUgQ5;I)1A zq0wq`Nd#H0-4`zt=2m~cU}8+|uQe-=4j$X5A+zFUvDPIIjb8~TltUa-E%z*e`yz{b zHp734cZ?5BR1RBRF1ZQ{m`~PT-`f9PCV*G2X?zBfiF=6AUK7cqN?@)H(Mjo0Peff~ zcTQu!V$2)SdzjQkR&q8IM`Y3?0h1%EQsM8#WRDU;ZsJx|TCMvfRI5k8&d%Zp+6r(* z9ugH~9spZJIX5l3N`VAt`M5oiB=OhS+JRlvDEj{>E5k08%?ntHe+cTE5;d58-L(pv zA%tUjFehC_6zVa`087aP0yAxlDsP&FGn=>t1U3y{4%r=)xv8jPxWUf`I(^SWcdJ+& z?~BU2&p&z;6Nz<$bGFexP_UcAXj4kLJ8D_W68jAi+TuBNmAqKU{^?g)ADj(2V1|)a z?40qlwPko$vb5`cl<|^w6=^w|OHM2Go&}#WBCct5@7oIpSEUrX%kK8qhwE-XQI(iK zku+1&1Brx!)gbTEE83#^KB^P=?45CeXK$}>+g)m2UGKir(;BR_cze#Vg@oAT+}Z5- z)l`OQA(ziBwP2%4A7kIuJ7fd#Bn$PnhOaYyC*s~d|48iTIko@UD?f`o zvHqo#x{7Wqi7`uybtQOX;I&!@bg<%Yz~X=FvA@<%*bD%(VgKC4VYf5eF~R7xdbeYc z%mBHL*4jgtY~PW0W?Z&uq-;bM(Zm6W1x1Xi+UJzlw%8^13S8@9yCCV}jg5Pm=5&pjuuc zoM^#NUKAk$Mbm+v2b_-V=`9)1!DEoJt@{1G(paCNbZgi{wxpp^z!Pn znvNS5fe**%*zGS0;fO1Zp5``|{+-Sj-ux&j0$V#>quXJp-6l(;xCVxpLsQ>`wt>a* zV~=~&v?vUSSto{)5ETZ<6xzK17(_28cuHbnV5&rdZ!X`N%3*v&;qR`^#HR~idGsJr zZ}>caE8Lny^DiBPKr10n4Q!VKLZQ605$L9!BVDGXfTl$z*Fpd{E}mmhe_|eLrso(0 zj3Aa`Fw*u^3n0R~2B6wk~+dM?FG|^d!jNR-u;KHzi`-57u3!{`T?6& z`@NJrOI16uKF+T-&vV8O3uz)LM5iN;UAhTekYfeh~NAf z*D2(q88CjWRfOvu6_R5M7_={P=r*&dC7bBQW^=cnQ5JvH?z1GkVM`$6#ifry8A|kT zU{eZxU41{ex^z`b(PeG9&%`0$_K|oYMRoT}f1n^xL)8h$f@wz0+IDyhTI=%~@OOb6 zvmc~?-YtRt0U(Y4|}JOt|%yaS5#MDzNsAls;*P(BY1k7^k`)3%h%m{ zn31_lM{a16)#d!$;VZgtfuO^eW!|N7<@Go2Uwo6IW(zU-491a7Am* z{)7N7;Be@+iZy*B`3|9=MJQ-0!sZx+qssyNMQTO13Kz$Ig{v-AXY@P}wy{KxZtL*U zL=0yWkJu9+z@-=}Y!x!T*KUJu<@#VF&`{iS_8@iWaaeY=vPe~TlEY)!8Dzd3G5k3nqOE_l3Ut~yBf zEC8_{tR&rm*)EPM%5Zl$seWyd!+gu zXG}omO?J1XRUZwu|Hj^Xhc&rv`@&dJK~ZS}N>o6afTBnzHb6v(fbPS!bR-@RRHueHzf-Fu#UzUTY_CI~b09dq>Y z8)J%w=YP8G`QVhIdrlbD78i(Knj=E=ZJ)qLbP-FX=pnod-BOJ0giB^dp+~>o(P@%7 znOI~hfM=^Iv9}RvialduK0oWDPE(}YQe^Ac_G6xCQ&@R#Xj(dMZ#x&y8b0AAcdWP! zaxe7C^=2a%k8Jstnw}J`&>u(sW?PG z0<8woa*B~Pmv*z|bumT;r`CMJ$vo-%Na?|uE4;`ugTlC3T$xH*a-|40iFN}MTNkH| zO*5X-qh2(|-4?#yQFv>SlgaOmTKVB81gc#)NNac0BbSw^Kyr3E8a^lGaPpY!ZgfYJ zuhcvURVX)&zwU#jvtX_0_oxW~)5ZL*m@B5VJj|Ulxlg9yrSxdNDGMoZ7O}Zd z>l2XWc!Go5EVh6|!$Il`-Qtd$o#m*U>F!);_qjDf$Vrdw!*j}YrnuOzaw+TzK}NF8 zeD>|0SmW3SiChV?to9!1J{@+K+F=pj?l|o8@@=`3Ybtw!nk26F%O(UzDcSalsall+rOVW8+IQy2!{k7%m2J`z@YcMs?3gJp)MLhA^< z*fFYo;rB$|bJ{I#KbfS{pCd<^Gav#pkU1^i=hREEDv+}Bn;;iqhfeT4@ssK04lo(Q zNoFtJzu=vpwEfG3l#=BQLVCmmi-=xifUImn(Yne+bfVSA?`FrdoEC@D*4FbHCf9jYMeeh*+0VP^f_>r1zQB zk7s$3ra_#_7%1y&ozF-PVsOw~DbQby&nWL$1?D(Xg9^<1tqPz<0&SP^Tfj7#Fyau6 z8F&GcX>a%T{uet)?Dqr@N~>Qr`M28iW_|iUzE}Y!qwflt|20=qu9-|>PQFB`^yNi% zfvxAO{X9kUG+|x=prDt2GI@aZ!@e;1cs|}nYMpZv5n$O2F}Ni>^nLm2&D>u-7`J+l zv3(Hbp|^(f@!?fz7HzzDn+$iS#<>^8fcZK-e5*m^v$n>Ce1-ZvCtYS9G5ut!6mNA& z(CssApIzaTU2>2*rup0~)BMXck2aftp8}zY7f1J7K+b^Zd-6nOBoz%BQCq6ftGhsW z)+TC!+xGC8Y~VEEnb?6KKP{&rT(lGr6A|t*Ys(-i^>*TlPJDUK{SsHrZlz~a%_e{rk##Nd985#?>idEzEnU5KN zZf5dgH~DWc5n?mL_-j%dOodoP&+Vl+S0jK>+oSyzelmp=eo<0R(IA@NZ*!E?Z}P}h zju%UKJ93N|pm>JPKokzBrV6r#HpEJ$7)!FiAG)fet1Wf+L|)DpJTZ_CesT#N2rbM3Y2ZnL z4`FMdWf^KL4EF)IB`y}`XQ3&A!_;qs7GC42K}g~-J|EBEH0OrbfqH(9Q>4iPU8U0Y zZN&u96cjV`e!+k4vLBLz8m`nJeXi%`Xkt1;U+X6mD~gEU;H7K+WP*T-q<#Z0*mf`y z0ONr2;@a0Cx+;Q9M)wTCL3x0iyuSfYR)-M}a=6%gD%tAmI@+io*42^bst;gcih$Ok zV~3h|s`(&Y^+1Y+NU)dg1oN?y6!Rk_Cy%5+LZgB-1Z+Vtoo3~hPgG;G(^H4l#L;&etrKy z%>z-omW}=$Wq1vtJo;^*%f7^L8T_F?JbEu7&wPh=8lbudNRKWC|vHpv#c=##V#ogQ-EDjy%cx zv$Njqk>(!@aw`pA&WkZkPZVD>eEHXG!dY=xh*ha1^TG+6J!hxNs*L^s7500IiZ?*E zEq&XDhw2>t-4eOUv0()R?&u22iKh;;4l(A7+q)!EounT<$ZCk}E&Uc&G6|6Jf7nG` z>A=X6sZgxJA3|D7s2ku5JI+$Q4L!yOEUvw;ece7Eb>HnMYXq}!=GF=?nA^IhWdq@T z8aQYmV3vHOcvT!ozkd&T2@V)yg@#FhGA~ko_iIW7bH>Yp1iM<3`kYG9?vq1$6>%F^ zZe6)5@MY`=Ym!`J7MfgdV$IlRGQ^+(9r7G!_}8nJ8(Z~wkB*6n7I}rYoH?YbCKGpM zkp=Z5_Da6ZS6`{_*zYgD^xY1K@OpzY0f9E_op(^{7!cbb5IF^{KvZ)q8*TPM2&6z%Hv$K|X_-3y6$4 z;>T-u`uYV%+3Vcw{6>fvKkw9*T6!9IT{%)cqiKKjl*?*rgyWe5m;v zpkg!lEBph9!U+oczpe#U&r*T&b7|ojlKjvS036yFfw53a6r!&OO9$o8muTNwPsz0V zmY*nYg@PW;c7k#vlhF!rJ+Fy~&d3parc)|cHjgmuFOP5%u5B(_6+sp|OEAimf|gcA z+i?fJA@A)3K{3%z+)f4msY1 zmKcc|4{4lJvIdJ6;ZxnO1$9#o4Og|$x5f;uo@{p*4i%(4E6C*aXHwtEv<=aZ13~&< zEsq7u7k}8~kcGLP(FGd!l&-iqromLKX1o1wTXZ2q&yvWF{w^VVdX0sPI~;Boh8jOC ziRKUyK5QLS^VgqHO8&trxwph}IC<#y3(i+0N~Zb&okqzC8`X!xHn+cXssF>Z{@4E%|14O{NGWvWz{H6o_EVYF z+E1=@p(uBw$=|E{ z8n}16lcLKi?HWm}0+FtxUqcSvz4akVL|;SE@ZOyH%5}dW(cQhfl|`8f45Pf|8R;;; zw*eVgU2|fEiPtxlnu9I-GL>SSYTM49UXNhPd0WJdjXK=P3(c95hi*{W9#TSwj1uVE z?jLH*;NyAv#C}kN(FT|b3Qb&&*EqK!2&1f*CKVOa1`3xl$>vB-Y3)buhogPDeJ>s)$x9qn!XKeT z)5N^x$fz2&gBZVt%VhNsZzL(6=33TO2ythf86>Xy8y^eLZQMR#f0XI;oGQ{weS+%Y zR%}Z5^PL|=hm^uG>)5OR)c#IJ&=@64>P0+Y+y4V4AEl0MZ2QccML)V?#+CG zk1!bzYha(^XAb~npR8MH-vy0w1J!5-gzAFKG8Q-&>nD7Hr~F~L7{}W9u|F5qxp=3uM0W%lub7$%1L?7Af z)cP@F&PQ^BR?gp?5agzz9nfSLl?9w|yc8^Bkp_m{I?|e@|NC{_(^O(=Tjy zd*8r%I_Fe<(m}aSv8>0Bn2s=U3jN_^(Z4lTfLi{az1}j6FZkM<_=JQCNEGFj3Z`oi z=MK(K+YN|U?LA|z`JfHg6K9m7d=>(=`RreDoWz?wjHWIhWe=;xhKk>EovHBl|AJQBBt!T zQZ>!_AslO~vj*X2%nJu3a%8O;kooFNXTBC z;TzUow7p2JXS*bO2zIY;tL_<#ee&sW=w4|yfBs#C z3-ZOg$KDiwTk~A%F3l2Qla6_jJT=JI?r^r^L^VOP<0Q$*Atxuv$kI>d(2dL2IdANH z#JZj=;M6ut))$^vvKn-k<_hDP0SC?8s@w}k~k2W z6}JY3Ht~A=U1(j>4Y|4p6w6wZanW_-h40tXPIBmuMdOAtS3W7ddnO>38ye}|T2bb* zsL}>Jlqr4oy}6TMt|ih#R!2pptY)pmu2ELBt~!fFHn5 z9DM>e0(Yq6{cqL5^)E+%GJUjwmS;r(lHku0{^BFhAq=fXC;f28vG_s-iyvF|C$iCmI{fL53Hr3~9u)9zta z%ygC2K8{nLkG#e!EQ`UHDSEJaHvIqt)%nSh(LCN%`ezw-^f_DnA`4Ihc~SsO8@GW1 zCBaDKZs^DH4iG=3`EWl)E*!#>82U$5B^Fznij(@^a?H7RAZUytC={rC|V`OFA;42y9QR-@& zCe*reZ_H@+GN;Ln_!rY*Z$)q?sCKj~_YmxKea2(UR67%&iJrtsl_XXT9(}Q0JFISF zU%BCo^GXi|8NKqlDm`xA*6_ThySk;PJM3iHDR9Vd$NzT5e|9qeW&ekZ1{r#DFQX1` zVA=;jwZNG%-3mm}3j*bZ_V-zFA%e35r$7I06(C59c)iPX4|X#4f; zK#B2&7O0VQ^MC#ekKtEuVoNmXojB3QGU}!3o7w{~5%=Ak|EsypIcH7fjz+<&Ux#dC zBh-;~=oFQcKz0m`K{-zpzietav5AWuWlje*|LyQa{JD|cP5cefeNZdolHXLdEncB= zC}XPNM>o%|n92RAVap{x$U$7G@7AUz(C}pyp{OBX^MLEFyx`p7Nc4U)sG%G`j~zqb zr-R~Jgy=AiwgVWgw?PxcJ&_5X<=;1?zD|x@ec-=GDO}TV{Ro3Y2c3b9^aq`)0P*;L z|I2%gX6-yYv}aoj9R;cg8soY6UdmEElvxWY*3SBOKignYtwNnn4tFn)NtB{JnlhxvwW;P{}{{6iMXGh9GWB09l z-niiWM!~@hA~q<<4W3*qGyNhMGDE+V*Mspw$Ey`br^M+$+m$-l$HY9L7pg2i_R)J4 zv(H2Z0v?3Pw9j4FmwI32vvu3EZOn@uki&?SuBDdeNP8{W3*9pD>_jc?v9o0*bAq2j zF@7HcoAQ~oA1;np`nFe|^S;t`qBFQswlk{gK^vQ-+>QN;Bi0E2hg=HL-#-w2Ea26< zpUO+^UXQ>25bM@_=CEgkmX{nX;uN$IVUwQQK8-1Xc4ss)Jl@5%f13|$XF>AXL}IS8 z?eFhzxP5a~6B4*KvUWOZpZo1r*A0W6;gk^~T#;om)e8kJ)5>Ntw_mT~OZYxIyrsc} zK3&;1J3>}4&n;z%JZ$~7LH^_!6MoU_m3;X}Z;p9FgmYAjR^x+)ip6k#ZbEgJG9Vf>9`Pq0al@SduOnH&r zmri=^sC9oQe6%oVsW$HHnWGUW4vv{7tvg$A3CJ70sg6;;*q?2;3I>e9+=3Xh(mac4 z^Tvhyt5eR)=Bd09co}bTQi3xPWb*TFlJhb)c9j+CDPCZ8#M>QaOVCzW z%?p(ZMIUr5zuKC!W6s9l+(K*S#`i>3pJ1Xm6o(EUKJKXC{p( z>Sc1_?8*u=w*`_R*oy$u{;GUXq#V2b;eXGaM>WISmvzK(52 z4>hR!yWOt)7B{$--dpQNNFNSw(3e>U9eE@P(>i`J0?sw}qPZ^hP_pcq9jr}Beor1T zZ>?-%L;%U(>RQfRwYB2 z>K()4cz-&#WQE6SAV%)<{^D`*83n4>Y6_^(m$yN)x*$1u)$!i8A9!N!F^~s7a|6KK zULmPjK{Klr1^LAh9_2@}1a(5b4&8e?3=Qd%Tw_UL>)eHG8>FG8M=ImRl%arke)9WD z-qo95;;{QtWfmGRKBEg&I}nF9*rp&V4`LI899L=)dFSChtC;V*t!%8Cm2O7T>KB5F z83E}~i+?d=9Zkjs#UA+7h_f!n(zjK6#B2AbPg%t0+4NSuG_vI?u2*uTooznRzx;i7 z!=-ioX?hzO8LV_+7{z7lHptawdS0^(GYI8hV0ZkXJ$OW0h32w?I}dWI%@ip(AijmZ ztq9mt7c8fxD6P;^NJ<>p$zT^J_TRU8`b;`9NOyk7%S~lIxsvMxwTHrg1by`&_4_o5 zM=F;XAzVh#8M^zS88PcUAejQimum>iem@4krSNdK1{xA4=!5 z)_VQ(Uf(Bh$DZmn&D*A__*5tIaReqC3~HHMPwTaV^63h$j8gL-gP4@_#h;3!?pTGa zXu94J_;fU!+R9uCRX-%Q{6;vwO~R?uh4~Kyr4GmbWO@vWA{6~`1i22m1)6+G1qrbo z`@xUE{Z1V89U%R22^c~;$0bnNZDoE;eMS<;AHg);6VynGG%2x)ILDf^`_R$Q@{{THlKM|3XV9G`^6tq3A(rG4{0R^H+0~XS(yYAoI?gJJ^ND}sQdE?yOBFxy z#@J7NQBs={>Cd?e?;d*90- zA4a|6g4@{L1L{x9Kbd&xpdS0w3sg$5hmZ@8Z^(S(svf$UcqR8(sA}&eu}A7(#g|@D z5N9-de39f`#t8!@NcI&mAissmpWiheui;aCe_5yGbtu!e%o7P_0riM=L0?&rCOonR zM^;=*Yvz`$zTY$sj!)3UkgYe@Dw0FrDu&@)dBk6$TaI)+Ja>+|r-HfB#+=jFIl*9~{+z3WUEiYmGHws5*J@dK@F!Ee zviRz)%}LH6X?oKO$Kc`gNkAWhzoHk-azQSY%@qV0$8q`D%kx~|-0Xdf*IU4gFYOl3 zLBzH(GIH@>DhsSTuC{gJ=44v7Ua-kT8>yakpF7^uU#{51z}|NYe`5u^E=}~Upf}iN zAnI8xC|TkX>OqgKYXZ-OqA=+qg|l~uM?P$zRDG>xO1txKr<$YKp1T&t$TVT+{cZ4f%nDoxakWc=GvUJYhOOpe{j$@<~h%UIxqgrTkBccKF^nK6{QIE@SC<1i@+orvp5`Ge&+7$ox)XG<5U9g^>@OY;>SZkQ;}WTWEj>{k=_(KgqJ=VFDVZ zY2hc6dWB>cJW0=9>Mb{ap00a+Ra5|WhB)^m5VP)QpsL@cL`igdd`*I7-f ztAvW$9nI(3H=^YP$N(C5nkE1Sw;w71?l7%|!QVBzvY~P?zt(c9J0d>f2D+y{`7a_^qCrn=;#fA9Z3s+W8tbgYmQUV<~->x9xD$ECcjl{3?SaRwrEhe ztHa9#s8}_1J-^(H%ezzp#&ORieb0KUeZ2e%Gjb!+KHM+#!%e^81J$0q6A~`XieUwf z5~&BT5Da;&Lo|>TzEm6^PiomNA{fFd>tLH?)&3Ne?o zUjRhHE{K}d+fk^feCwO!tmJS-VWgsPak!OQRaMQlCw{x=;!BYPk32)`Y$xrRX-Voe+2l$lzfRw#g zIr%qgZBi|(*d#YP__3s;#9V(!)t%@Y;WK`=Y}6~u*o{5L0GNFd3X#0iZ+ILG0t_l` zVR&c(yV{Fa+(i2wRYt;}gcmtCc9TM&YwE#@ zE;fd-Ukt~+tvXh@mKbbZassws`sl*6*(U%~^?(MDO_c-GZ6HmfSA|~P13a(`oD|Lm zx>qwi1?#Uad`7GvPuv5n!wtm7t~}qppwU5fJweHVzDEVOxY=HU*GCHcW%OP}2?#A< zW16C%F#(&osuNKcBh3ziTveZXB?q>;IC}!!cN=up?ty-IA{oMoC8zZh`JYawxrK+= zl)Cg2F5!WNjiLgTcj?`0FeJK|3JNfLMaeaAfYuq9=19f`6op;TUlBs|teLT`XwgHw zZAl74OLM-$fsAw{!1`OnXPVmaI^;5*7cIJu%c1um2G}k^WMa#J%`ke`TNn| z?#pSQ;OuZY)g*zS6kf84hS~Zx1AZfgr6gH*?oIbCWAtxCU1AGC961TIHYe3NrwmLl z+73c7MIVpbFZ!`3G~iz$#g;eL(W}5YcuA;fd6xooa33Mzl|dqcMWJ8~J??zV_G*Ht zwqgnj=DXDZKTSY`D~WCbVOTY9+Lyvl5kHzDtG997N`0j583e9&lWu{Z<^u4> zZ_8&-$U~#2^FVZ_3)GvsvO>8h9rdasTSQd+#0H|>$d|L(R_Ey$w_D@@)|oC!4R@p% zR%_(-8GTDvvySNR>zB^osa7?H$*V6%tzH6V{~|Oi>H+khme(EM%*WYcR1;V0@(6o+ zBtq?wx4FrU)fYA(nPA@VYlh@M-ftN}7s@0i;k;#I)!SVX)D1eVN{T)i<;+=}*B9FH z@$=2Y`<@cMY+!3p2#e`nhjt80?VgygbOK{T9Zmt;xxxffZEqH#ekwJTbj?x6;MfsbLAPW>ZKo+ay}PU)v@IV)Z?0JVPLS z#IRV1KFHddmU8eE6CAxEuj+xH&O_T-_!Oh*fm2{oQ%}gkJ$G=Ni|P|6!pMCMrQUH* zkdMmGSCfco=0#ecl|u~LVJ-~ZP*cq7MP)x>ji$c;qZP>*E#KO6>jQJCf)X%1*k1Ela!H@ z-43DqLu)z-Q#&}((?u3>HZ7Giz?%|aID9p+sXtN#(Zkmpa_KUZ`P6P9r5&X0i;`Rh z16KWydt=s!*sukeT$Op`7CWNDlX=4e%QwLf?3V8bMOT-27Uk0jpafz{nynie27OWl;KsjQMQEM*~4L6q`n8as)tS~ z(EG`x?*xZ>nF3$$%L~T+ub%aMLm;`_hOmL;Yu+E!Al z(7|MJ(L7A*bjxXvghQ=4$r0{tNTh7PXr#ty^`gDaxlD)W(hb|*uBEqGP?j3bfO2sG zc3IL6OJ&8<&An5|v2{bdmAgT`*g2(&hP2}seBbG9nPcZ>5~aY1#>YFlx-2%7%HGm{ zKLCDd4^20ytBGwEK5_MoX){^n9#D5LselL6s&Eid2PEl2JP4Dlb^*6v1@_f8ce*JN23>9pDhCymVcEgwf24+h9ye!TPk{Nd z)GtIm#UCh?P$3cE1Hxh7FUD84y|J!5N9aGR?|f+;I^fi@9@Y|%ehjhOC!dyjA{>3G zY$xp^t6%Lh-PVz+i)Y7AY5=cyYb0HKd27&marjNE`A#q@hRXb!5wA7lyi|nrLmqTl zIG{`)+}(M^>PhqN`;8~$k7%o>0mD;WOnnS?!&L}izrvfrxNn<<>wT`z23SQWv<$31 zG*+$}+(dp~)u!_>%w?g1hTYmrm!>SoCBAAIcDO4-tSEjxkfO~gWPq%j-0K7M^52u) zrPHuDdhC^b)&tHlB>;kCqf)^ z-=KGqFF;4Uh468Q+shDCwQ?XP7xfA7b}6c1 z*RtW~i8q{44|j^sV7gT7gi>>ID%DG#e4}7^Et(jYI$;=OS8w`OPK0wXHpM`>g1G_;n!5f73bp~NiA!}UFORAsO8ks-+ z;I6K%*V>YlWG|Zb8%~XJuD*WfU?^LS+VMOH6o(v^ZPr&+Jms}S6f@5(Ex{Z^wF?^u zq#*JMDOfDq7R<>Pp61du4clgM%zJM=Ng(tByt3H{$@GzRkGt8BCvddUv%Nk&>KdKj zb8YXQb2kt?jh39-ioEcBi<0w(!39d{BTzuvHU0^j>p@aWr|#k2>4prDJnu_?EC{uR z*=>-MW&|Xe2mIdzd~7DRQjp9XSP(C@cSg$tdS`C(nSZ;!w9>WgNc`gn@wSX~VQrm< z4~M=6>@K|T_?f;|NEN4mE5Z(iJ$BT%tS>xw!0iycq6y^Qk>t+U6^o~`b8ia6TNlHx z(9N4r#Tsr=31}^_<8{g%GK!>z<+u;;?&Q~Vbrn~Oxa^kmg2}=i>Nvp=0}M6v8~}!n z*<#lg0Wm5!l1s!NRBkO_A031AJ4St0&1;SN^I2GhIJpnaxh$+>kJ6dRj=|-8l#9xa zCgl6sHaFv3y8B+L3vgQphi-GJj@!8X56?T5flF#8yGZx)S4TKFX8&Xo=a;-4QhV2R z@OZXHZ?DAJm6ESOupNXMEO8U?HXzjiw0fc)YAwlwhNU5jYln^nRtXtPv3}i%9H;O5 zM`|L8=OSVM&2cfgGY0@OEb2C5K`lDN;=Y^o@GBYn)?3SL(X zp~&KSu7-Z)hZf>MDy)N{$;%W&csEE2?O_~BuC(TfY^yhPP3^^B3VOFw#%e;;*4KW< zcMByKj&^qBeypT3e&rKiLExhqc+OUp2yvKHG$EgOYkuknPa+vEf5^r-;F!-YT$`Bq z*W4O8&(`bCcBacJ*rN?C0b{y1ZhvcWpog^}sOp<7Bo`e6L2^OsAqe~l9ZG9!hwF?0FC0_4Vk-Sl5!LJ>MXWx@&aEl@)uxl$AuK^w=wc)oR`=v!!{){=K{bjevH2mX2am z6{s3>+RExH=zb5HA}|l7@M4k zYA(8xYKDKK4Y{H0S8#>*@>m@X-4-boAHeI0Mcpw_%CjD44tMy;q*REak*0I+O}0{B zT#9HK()WneDRRh)NJQP{rs@2R78)aLS)^tdc`(XyzadKa9=Eld>_LZleJ|k6)h5 zS7&GfRRMAB>%V6*P)q4~v?%fESm?-i&28(Oci{cvH4U|0Pt!iT?r0U;@9Sawz0ml^ zcwryp@mEJ-`kS7-{a-06|D)D{3$jjFecR@(If7%y+@h(HeJkAeCgj%YITUVx<`x_p z43*l=>=6&+R*R7g&S*4N`T9ML+d%SQOu$3{T^tKqGoFy7=-Ux={R(^%CLe*rJmzQrwqZW}j0uGH>a&qpPnt`pK!{689 z(x3PqoZ{8aBs#dU{J2x%9^A`x-Yf73<}P!}zOJc1Fq%G<{2b(EOI0;`rUVVK`+34O zRhw?7;xmp5?tg7W#ecKU`Plz!M!yMPC>cpY^4nP)C~fmTmN(E~d4#0R)p?LLQF|~L z`eav+Cz?^0;HQWK(C|^9ptF}%g4Vy@MFrFwxW4Ok%m;zU!yBLLbKZm}xi>?s7wHp|*FWtC3{`5wh%ZRvkb5c-1U74xp3H8YY-NdtdlA zv*aoPk6KiTJ>JmyMM&4kCG9s8ilbhn-vK66I1o=Z>srRoE_h{OWQmgDsndyEgqs!5 zOk*(L-fn096i#?{)AQ7v#ek~omzePBJu`dXKl>}l-rZ6UmYs=_|Xyff2( zZRiX;mp1(;@AUs?JRMV--aXIa5ET)d(qMiT%8t1xPifP6bBXb-7nE|tA|D(+wGg+6 zq8e=A80FwS~-taX3`(BQtzRus?&Fp2#AP<&n=rYrXKBmUk3o81ssxm3!JdnOWdg#)3f#W}sfLbvZ6xbk$XKNtyAoa@El3^8iU zW!47wdw&#G(?32UdavaPl9=dw0%|?ium~V#&okzc)~LW(uEonPGe?hHonUuA1dsVF zsA7hG^gwhP-MEGkk18Si<7O8q+$glyf+XVcMRoHUo?4$<6NEq#67kZ_#X)6TVH($ef@xLzA!?7^l0h~`*kYOVGC-TBG$591zFMXw)h0D8bzuA6M>u@63Q1`${0?vo-% zKS!99;UwmSF%{}SV7SW*+BvM7w~!YZO_HAOO)Y{sB8()ZtUrY$-V7-my~}e*PEMr| zK^MgbVwZ)B|83p15Y9#wo@OzP?>f;#YxrSAvK}d?{SAkJNRJID|5PbmT7`sK3MAqKOex5DX5%baA z^B)6dSDDxs{EO2f`}}iE1Nj`|WlEo-o!k?}-zFE0gP`@q;d~G|gHl)RW*6C?!m-ET zyGbg1*TwLW@Q>_ln1+BNN zrjpZxye}y!Jbh5Dr1&o9VSVKTHo^ANOb5K~i|1)?Zhm3??Wf%p(-Ehef^W<3+Us*^7`tDK6t-uoH2Pa=H@99b>uN`RTY=NQ zLzX4=7aB6EaC+>IF`gVVvJ}U$J~AD3hAqS>s7q9cJU1~!kpF3O83{}}c;Gh^XM6Q^3&!iZ;fQ^`-MjyKefdVDT-RvX*q zuvP;Kh+CF%CW%~8@166;OCk*nrvUslJ!!17wlh?c;|4-pdw!^UD05lspHtPapt87f zsj@}Md0qOfw7vbT>vVc+&;%@m=|%*=ES-g!=U!WC6@ipl4sflwbgCQ6k7Nv!>|dbf0Z~C$LxVz>yT{8ZCUB(4d#}JUt+0k*?@7 z(S24&)x$dVH%RoqI3N3T>Xn{t3YpG@7v%aMXftPi7AB_?n$ZQ0b7=i8Wyk!i)M$t6ZeG5?IA-x3?aoVYyi-RrTja$PqgO)QM#xyboT+rz zwl_wH4GAJyA;ti`Yw>XM_X2W*oAA=6$a$du%lp0ye3~ai+Vs^lkmp4glJ^!`EgOQM zk{MP&;k?mZ!<|C+mgn5GiXN7bm@hn!?Q;r77}WWkHh)zvbnS4LLV_+tc-O$s&57b> zR`B7w007ble*CKk#_3NC9V+pe2~)J!hOCD| zdW1ChsbQRh3E2Tv^+mjCHX6c5qG3=2;kSPMysjzqc!CgDXR#Xy%hwTx0ER!t|LPA= z-CJtuhSUTba@oZq{HL%vb~CdER=45dkE1+?eAtnJXp@;)+_6sk;j*IeyTyqzG4k9g z;;eouZ#OUUX{f>O$3(13xD55=McP{C7Zu#~A*O}jC|Q3P@%7AXa>Z?;)YBC(*<58W zyHQ0k-pnsr>>27b)xS!(V+lx$}W^xtq?6Me2J=IXM!ptgjirGU^dtpLuKSSy3~DoLNye z0H}(S%hmr9t_z@YO7PDUrJsCOVe2d`sV*rF+h5zjuu6Z5t_{c9$4!aXBW+MyXMCl} z3%w4Gxef(B{DXGu!!L_ppUGmc-zj(f{6seIY*k0PLHRNLURlD%lKg2`;j2&6wJZ!e zHWrl?s|H01De2G7&e?v2NsGr$^pB>vSg8;nBmC_NQNAq}I|)iGME8K}m4#&;xERIe zdqjy+F4rB8zZ^bLoc_Sau50??4Wf2IlGdQ;qyevUrkZp{uDC7Wcc+A7e>YZrvn^-K zwKyw3;TUzJ5!I7l8<@5-1VEai*j~d~#0*Pryd5hi=8(IuG$Q!Z?Mu zs1Te--kF@=Ypp+a1%`d)p+;FSn+sPa#SjZBn+87s1zj0XQ^aLUF~pyhP0z&A%8_+a zO~NOkLE$OH_f17`tn;5Kf(OWh$Wd!51gf2&q>#Eu@G)bn#jW#fPTc(>M<1Y+r+zo+OTHq*6 zbC8vG$5K<)uZ+dgYL$i7YZo$-gGLqn{(vL5=dlo_P(09&W{`jTXb3h{jb48PsyuhV zv-gPSVac}M_%ku-luSA(S5*S(dpZ3#49LIG`l6Ii3VXVI35Su^OR*U@&Gq0S!cFn- zc8VGW$gO`oue8QFW#SueliJ7P#b;zc*BNoVI9TmbVmi~UPq-lDA{UvP5qtoee4SmH zH{B7H(V?|Q?`cpw9byt9Ikee>^-25lFM428FKl~E)mZZeIfmO33rklQvL91H%Ki+N z5nPO?3AuL>g5b0GyZ}jV>S?$X7!0$3&FR+vfynsnUwvh?MggIqFGL02;%xn(OeipL zOKj^%p;>a=eaT6!6FDthWb10&Zjh1wN_m6+@qX8GkG;jW3znno`xXro_s!e&+C3&a z(xZ{ju?xWK*g%=DxZf7jPE-Bfw{{yaKuXwd?fp;2H0-1pqMKV_zW3`E{@HtQ*eS5e zW)bzaL<<}QIC0pQ<0n(YQ0V5dff{n#b%MDR@y5EZwFHXPu@0;I{8P?+faJrEUP7+! z|L~Kk{nbom9`gu9e-0pB=vCx~{t-YbsWpse^I~8#Vx*-V`3A61-ldE`5+ahX4Adw} zQD=A{m-EWB0q(~K$8bWAibqg80FEoY4zV)2(rtrZ+y|=cc^Y(o*Jj2bk_1I*gV7}A znr*)JpvCu3>a9`jV2UjZCNHtx) zcV+PYQfd}th#sUM%)o&3b8>f)IHC%_C^8AHj4;D5Onzi!QkpxnpzS_e!Cgw_z%Mz(-Mc>1 zUBF}C$pCBDVd3Z+^A$kIUIZz2j6DXqz`h(dar94xR1Oh{UX;dcgKN+5TP`^nUMklY zt{w1RRP4A@CoF@+j=FaO<8uUT?E02}@e7bq6TX-T1|@uWCdFWv*?eq0z@pekKxA$O zfKJ74fR0@-ClWjE)B#JNVmFWJk3WMMY!kLd(s&$^tN*52*2--*WpQFL6c~j%h!-vO zWvm*s9zf<7|EahPdjoO{>5+tKy$~E32UOIivOqnD%#olI?AvcT>NptZ04+y35i6Nz z{#{G_UQzvtmPm(@O7381LJe@>Z(4#AYFOJ;5>^0Z*>>tbp(G9?M=wOe_*rNLIj|@T2-C! zJfQt28j}K-TfzEW00Pch00d5Svt17u+5)KdCzHq<@OhZnWO|S7Kc(v*sV4Eb%{i`u zLv`5I-9U=lq|7Cp(d@|4cq$IAl7%*eW;_vf0a^5~cF-P+;F95?b@pYc4f2!Ck)QV6 zvH%M`s@;m8sb*tv73S;cjl^c&fOl;GyKEfyJL7E7i61=zRI^$9I&Z%I3Symk03w`# z)v^?mH19g0ci=~(sA#x6qRfQk3jnN*qCjVO2(RSzf-9Bz!LZ-&VD|p8N&Kb^znc1U zLz@%Ejq#J(O*J&?o@j_nH~!}Kd2N^f2kHn%yOY~R2O&4_7BI=YCqXq(88DB_qyiE;TOk81Vb~pjS2$TL4Vge*_ritr*9jOea7u7n1nC9JprZ zQG3B*1B!53oK{s|RbzpDq8YituyYUSuKwjW&{ya@RIo!1mBThF#}g0b=FM*p@4v9m ztwHR{m(Nxc2uU@s1L$pp_Ms1`=V#tg%W6XVdo3C=F0!o&UpvKLm5f~L!g$o1UVMh= zZ3~I>OzhTsWq^CW>YC=2G8H6}Tq#HcTp+@fcZTF%7s5i6jJ@NN$M;UN5pH2Mc(DHQ zjKfLo{^c#6w4m9UQODlC>!lR+I@CVQ{p#H~s}7fS$>4L7=E3=ut>;2Dc5^|)`jaeu>Oxhwp%Cxf z>`8^#fcYJjndt*I9pRtU-B~WzdJ-RzG7Nii0(J|z3OnkJT6hG#$Pc20<^B5S2y&0b zHgJweP{84}KG2f;EHw>Jh?JtzjI|3mBo|56a^uE%O1B0c@_ijZj0Gmd%F-l^lrZI z1J&@`Sp)nJqdVLzWy-!FfPNjjI(V#6yJ=>n_zzb5gF{u{WQykBn1b*N%e?!@cwfH5 z!^h74R(I~V+QwZmBPdbcn453&tsh6Ck9L{s{us0=J{4B}#chN(pgt3%OQg#r#EZ$V zo|v9Oq;nXaCO6bwsvp`%m(QCY5L$V9Ml-aXldoa8>VFaTo>5J$-P$m2R8)|rAU!Hg zr72xnL<9r`1f-WJRR~==gs4a_5fD(S^iHINPNHVOLaFU86up*$u?r_$mPCKG|kcuh6yd zBlfn7S`L>PoIosqbhyc9fKaAiXF9@(HCe4|AUGl~ z06h&e!oWDAxh1pyk6;H8DuQ|AQx_pr9(ibVZRF0rBlY6_KZ39KU*AuSIIWHtdd$rB z$W3$eS!Lt1oSdP7v!%c7pTU1VmY)Fl2u?kdblw!cjVR4IuWY1b6l*F&hh_7oXx_fC zby7U-=yGSIq;z*@TFpfcD(EaD`lAyZi|n*J?cCfpg9pa`b7EXT7JrXdo_xD!{;odkF?sl0T8fbn z>}{u@@RZ0e@qrlQ*26aIHII{lSwVdbuWorRSi z`~9c6Wer@xK=9WKddAf6GuJUi)e_Aqq0O*)KhH?6+qt4%b~pCJohL7~TyqY0%&!9q z!MB8<9gDlT&`=$;sn6=Do%TCo0fHP|w#^LMG71~?7mH*KJ1f1azwMV#k^sW=Gj?u3 za4C>MmI6j`{OuaTxlRU+$|;E{=Wf&_e<>2}vZqvka&$+rO~0|Sc6c|Zy8fdEG^}qt z#QNSQ?225(8Bm}Oza9vAySJ~PNAzcKcCjrzQ0N0*V+lbYASkWjgiAm))V}^7KiOKW ze}EaC0s+5Ex6gn68cB-8E94P;iK?W>11SK$b;b+2!fMRw8iUT6z22) zG&Bd+%otZp4(S9mhu78s-Tio*he3kqH3^>O)jugHWqfxW+;m<-!jJ#g$7LD>!sFs9 za2gr3Y)}4SNLD6fcoV=&{qISzGS%#Lh1)=Ypc~8ftcEBzn5fTcN&TIJ&Tnt)p!ny% z&O_Bj!%q^;SZ>rvc7)wwh-1qgpy(3<4i1bM@3H;L^r0y8mdCi>`u;SuL-$==^2aoHLvWU*{FB$xMNB)Yx0}_>)li!V&b8Wt2K5KzC}8!;>^BzX zNQYx!oT13S3ckW|dt^T-h#-(e5?HW0qZA=#%tA`UDwL{?t!vGob%M7UGyRBpLs`JMv=X81( z?Dt$IeINejXq;)yNkaw%Kx}`122>7tEQ1aeZPK9?v7{T}j}6<&f<`MmANK#D-_7t| zR8<0xdNmrSGQ9S%Ty2TV+ObKga5^XOwt?sZ2`o^R)@zkv`8Z**BrtW*2z?FD5%>@n z7+$7~5Yed!S-*hwyUdN_2L(GBuI)%Si0uyR8UmrWF3bNb@Ju+6%c!3D60dO#zC1T; zQW8~eCbX^2^Ro3a#r}nM=!Al_PXR#$==*eJ?G)c;#p5cKQxUieCc(zg*Ky=fPoFUp z^Wdr^?-#{4U7K^ib#lH(=5_6#8Flt~9z(cS4;*1;Jek*ID>vu}PGOk!XCq*o0sva8 zfoM7N20#h@BtY8&6mcD`Xx3rK%MtPU#Ms&h<2$txF8BMDU8B@NC*@`+7>%eSI;&B_ z;sq{0Woe^9Tl*D^AF#6WTjy7jL&;PvW&rjB>imyzz3mtQAIYS2}zARXbD%Em%B*nxR1R#JGM`b|W#0Nm@54gWh&j0VG5J?@K z^Wcx$s_|*#(|`Y;FH(+YKC+~%;)K6509di+sSncujQgI=0~JMCrZ+#E)kD%Fpj&B_)aiey3{0}&XD$3#9t*cRrJN3lH!){YRLGRr5| zRlkJKRmU{NU}Q@*2a6xLWS5^DIy|Ua((weA(txi+3{SVn9EPbQTy5^ zngPS?he|Qg>g0J*QD4s$RgzOo=L+R(GFp~?T=upLwdw2L|Hw6>Y4k-fH=i zf@NJKPXiQI8k-m5@<=?Zy_GktmIQtGH;6Odc?cj_X*(t^JDW37V-yf7Ji4Z3 zR&kKL;w)WiqT4XFVLMN_LYHC>xGU3*3G=Qm4QTGwZ%*oOL{zuolI z$H5wJ3wG9GHm|IPx5hrt930QS&wBAy0*-3tlEDb-ig>i-?%4{d0cw=)YdWZ}apRQI z0NRCdfG|ph243~~UV+@cHOn9(R2z7*3Eau~H4DJd&;YC<>;zyJ=4dt*xY3cCgWI`Hb=dtj z`o5dq{NE`@@Fo_#1b7)iWo&j!ufk}D-!gVuN$y>{k*E1!B?TL%B~td8}gqgu`-i0fwy63OV_!(>Ny($5^$@9p3keBohEo<&r~^(W_{3K&=Mk>wt;Ok{;cYy=Zvk431{tFv-MFTC-r-WOJ*-PM03O7g1Yx;R3lw`d{JBW33 z=ZDb{*;JbqkFH43V-X4Hbz6%hX{y#`hzGfCV#QGJ*D2;Q zW2SX~q^oB!-}X8xUYnaqyRsv5yDI+S#+re*ke)JoIS zmTA+UO}B88BLH5#EUHq5zBF>}2LL&=Kj~rq=3jrx$u30ewHYpUKQE6vUUFV6fB#1T znU6o1Yx#o#NpoJ)rTHONvv4x3bl3e(ZOGfz@Xk%jY^Kh`KIq=8c2eOzc_Kt_!N3Xe z#cZs*6af(0Arq10Pnt>qmRTqP3Yh~Z=z&3pXX#&n@}f5yTgFZ%%INnAw88FF)zG!y z0&kanq^pMfe80(U-Qu+c+$cjfd6IsEO*7#m0i+Y_^!hnv%KC-ZMYn(vX@9#nGUZ<8 zvXA8MF!7h2QB_GY5s?D~SO4wk_zMpI{~rbT_1u5?Yy6)SPD`@L+B4gZ7Z;AEpT2;QarE2e-l4+8d&#pbYnV9wzRea zcIlPk;qD*BtYNQmYxMeh9%)|<^|Zn$jbr|oP|_K9(&EBE$ZECNIP=<)gbN7^^Tk}5 z!PJ*>-?Q2lGfJ+G_x-+U$jla8m6(*cQ(%>DDX)-~s?+`)aMOndtS=WG3? zX^7%~#`(&gL@OU=sByNbHodcl4eKy_?b&$O7lVvV@~sB3|6+tNv=|5EWrFt4Ghu{3 zv`k_&t-kF}sFv*ugi>s*IlvtLca{SPl8Oss-vHGgHQt4PVWh?HaTWi2@cLgK8}cuY zy+CjQL{Wx|NnyN$9>F8GF)@>6)ew`fN_Q&?r_`2LI+!+4rzbdgD+m`6V2yLm>aAvV zp-q#@G#+Ue+vWi3cMloS+Ty=dDb1gbaX#j{Au!d=pmA( z?leHD$aJDJyD(TS{mtQ{W`H{R1F49lBZM!Rn!=|v7LFs`MB6TZ8>EraK^PLHLF1O_D#pX?V#$FoM|RP(1c zO)TYNH|E7OTt8G7Zr}1!q>h_--2S+vNnimNuQ7ZiWT%Hvt{$=8-_CwmHQXQcapCgA z_xL#9?dyJnZ8~wQk;8*q<61y=x+{^b1SFV{(XHXVb0B>tkcFoU;=_$cI|n@W_PvsL z^%LKUlBoLyDvBG^ZZW8J_>Emb1@;~?$3U4N$FH~5-qri|3oD7LFB*FVM08@`({*o* z1Ab`etQ^Bw=fcXZ_|ym zZ$b_p&h4UeROt-V#A|At8v<2R4Rgso=)X8f|D~7|YRL)D;`@ik{hFH>I|DJ?7syAp zTOgVx05KPvL3}>`l7Pt%LY_y7sVgZACeGWj@~S-NbcKeY#xk1E+GCNbac7#OTN4(W zG5-ZD>dNK5_DDQeG6!pj80~`Uy|(jEH!g${`tiDi(5*&&0yH%fqfLMAPZh+C7)IG)Zz9eraBD+E4j}B)9VJ+WntkpKC`p2K1F*;_6maQdg&qf>S5&81@wV#F zS7?5Y&6L03bw^P?1S-9FUrX364&cZ`gkL#4RTqr}a%G{9cXaY1y4I>KlO&h5scAC@ zLUh6O0*CR<^@j=%aDDM5*gq)>MuhG*yzrv^@ctZn&|x&V=fU|D2G&8v4ej5kes05J zvjntrsXdRCd+Lb@lJpWLt_?cp51XYo=+|@r=<0b84w<;f=HO8{9)rPY=XW|t>!+~- zDOHN&BH$prNqh;UK`2*1Sfy5w1D^kroJ{mSwoS57#y?e=0Z8Ye`bd(Mp+7OSo*AHC zMzz9k@XABPBSnTz)!utlwX7Y){INPnO!{Azl)YpZDLi`e%SO=847?p?coIY=i;aIL zAluoJ-jnGAWGe?(Bj~D?K6WTw&WZziix2^sI`Tjc5fXmoE{Yv&Ri zDN(p!{HsKh-GR(gcaXq1E3H12d6AJpRrf*{`N)SHI=^Gr`ng*n;|5RdRA-vV6?j7aD3HD$Hea>1f@-5!JAiX zB@sOQJ@(BvSFGA?j=;;+cL;`yUD>qByuW1S+7T;^cdca8#OhRJfBexzWs<5JA6I!G z&BPX$^dJ(G`CG?4sRa`PP$u-e7@x*&D7&mLU)-{4BeVYR_RA_pskDuD68#oF-nzp_ z&xY+P5?toqntHOQg|Abvc8Nb2nVu0pQryn_USz6br(=3H{{HAs3fhgu6P;n3YLuui z-&6gMYEj#cH&Eph^JACH>{yF`%`n_kryVj-C_j%PDIV{Ic%O0C?1`JJzm1g*vuW*w zOSusY(c|raK8Y?2T%jP~aHdo};h83`o=qHnLZTxss)BKB#Jd=w-nT>!>v2`&Y~Z(q z(c_8d3RAFO5E=4R;B6u}s8M#|&{3^u^QrwU;=s>_3HY276O0MM6gWD2n>D)5aW3i`D}$V(KJFl=RiK6w3w*?llXwExi~ z!~1{_eW!-!Wh|8B{w9%6!euDr2jK0kjyl^m#yl>y4ld)UGmhL>l<(x0C|v?FY@6wq z2>!S&t-<_=*~e8ap29?*T$&z5vV`<BfQ|CR>`!^XrJx+_`r z{&XRMd?Pz|>uQESly<&q*)(@_FUv|@=``tb%5&pxm~l6V?8=U2r;GC^9g;$Rc8L9Dpy)j zUjpu=)ov14YSa{K59+e`a;j{uJ;(Yz>br2pv#;04)DG^3q&a0s+L}30%%^fazexX_ zg$1~s#*T2B|IW!FsJ!SnlGxzQT) z$e!Atz5ajkT!uY!)iq&2Ir0J>E%!T5U%u>zpy^=A0)@6*&`B>$404@3{4dHhaw)Np zv3gw`DlLB4p}9>n1S+$B37=ybL?Y99FvDa9AIQ^DTh*~6De`c`v*5e; zK?)`bPcCbzx-3W>DV(daRv{m+@0Df|O`*c)0sY?)yb|5sZxyRcv)vK?Cvm18a>5at zLS$A#`evuOYR+$~hv(k~WCTM5?cj-8JGE=lZ3YkuLinLSOvl4Nue>(aGD06AJBi-C zg_MtZ2wjJvL{=fb^F+2pi~up}nt)wvAl+EN-LdZCjs1!9T)9q-`B2KTD?Dx+Q)TuT zS~-mp<6rOWa?CVu&R0>sKe#ZGb)&AM{^PylkyGE(gobI0-}zGf0zi}7QKo?nI9-5p zse@rXEGFLVowxN4XY1R`+;36~AGiC++3{1|oFhCTJ(UCVRePi)3@!cg4>6;-`crx< z3ZH6;a8f^BHJ3TT?yZBZ^iCl&Sa|JEWF9c0HG|$anJ$7Pvy=D(V>Vb`$MOrm=pP%^p02~1 zfIP=4)y5qN70Ggh4}a3J$H0r+i!yw;UR}J$a-rk=+#6Yonz5v#TTc!t^zgzOVY}Yl zKYXU!dqxSlM($fbJ85&Foq>Elk@Ak)yzyv{n<@#M22)VwGS+XEzybUOC;TD zkHi<7fuc|lE=V5no&j8WyZXRC;Qti%UHqO+}qC zRn9cP)&f;bM}W>K4)$a@ymvHNNC9E*Nfql>`1}*6!c32AaawD6RQxASwKD%D;Zmtq zM5N)5bzBo6sx+a*Ypb?4%3Vd)&aFQe#>^!at*&lw&VOI_WG`!mY@r$&=IUB9KU0>K z&>*`}ybe@zi}kFR0U3vdxv;|#ly6m?xpUtBZK#*=r#LIw%guo<4nHu|aziRszt4bj}(f}R6`Y(cXRp|{gY;}rDzQF_H%l8uQC4(hqO zhE*2ns$$9>|8kG8&m7QCnbGfH3|gzNRJP`c`g@8Vs1B zLR71|O7&MZ8j>M#6uNDV?W-vj>b z@Y^K|lcAh5War5yfRZi}vzPm^vaKLTJp2<$E_#0wAYgi$!rkv8_OT*s#xgBZ&EQXG z%BVqMzMRQQ zPUUU`{CZX+GWW#%2C>h-jmu_ldut)3`;^N+cQh|%R!z;8fqv2~9h-EByo+=^&yuKX zRbay9RFt-7=1+wq8^7pZUVNfpUPN*6-2j{(DbqxNE}Db2Yny6^aw4d0=oHMUo9Uje z)2?hyUpsoJf4A^mZ*C9P6{SLcgUPwBg@xrvij&{W11Jq@oJ$xxX5hkob3S zh+24Py*V%&j}EFJ@B@_$^EOB#N;W^|7RI&xNg>4PDtfhr27qq@BF80wuUuxWa1O=Wf+2_FRjYnT|Ds4>{dIc@<Vug)_A-S`y77c45@EfzOXbXi?m zDJi^Iq3K>oA4}f63=p+Z6nP&Nf{(zl^7{RtCg zx4br<17rE$=b2ebA+dU8v(o&mJIRYsbx->d3=h)MPCZxlT`@q{6K@fMw$VIzc&Itr zG%R!Uwzk{XT=3ZgG!pqC@G>EY%-|B8ext#5^3ryN{X2LSpP34HZ!s5k1WC2~0!w_< zAPzLK=RepBb=tc7y^WWT5DqV(0y}UiKB0fLc>bOf;qUo6!a3e`z_OlUFAnZy!P4U+DV1azTV)70$Neu(^Xh7TzAkER=~$sJL0ruYuM0gql61C*Ks-v`T@ zhFC_HFZ0zGeqKD_{l8o7##uhoT@yidljwiVm!O=@BlkGLzaA=kIxze zu zc?Yx}vX&WaEtF+)hSybLh9&e(zf`^5cCM-aeL8jJwyUczhpTVdG^euGCs=W1!VIPr zd+z9Nb-!*bF0U*LFpHm-@uV1)h+W}8dZ<^NQB{qMrLl$bD&Kb(z3rA&fvT>7YDE^- zK3eI^<(@8-TQcBCP>cIyRXqUEjP9JCBvKUW%6z=?xNKtazUGvuHu>_}%>w<8jag%J zF>}5_%3gr>py0?0HOd!qqERRFN>hU5I(r%FdO_Ne=Ei5$-`WzU8U#}$REDO$G{@_J z$5KkNSSBSp2CIMUc}@Er!XkC(td|{<=0Wlj*c?SV8;;|qm+h12OWPV_rDkH)GcsAq z@>}QVjzq3)$OPuQ38jrjAIPZ6M>qFmj_=*!jzTyh{8phxS`tk2yHWX9^}OOL6_(CZ zNH1m|5Cj&W-%LGwn`$QbyVe3|EZ3$#QQ?sq$Zv1}yurT{Fo5_HYoS`uOy~JcvRXa} zz)1+hVf|A{8N`p)k^ISw!q-JaM>+mwrv3h%!bLh6APqmuuJSuHML$hm5}Cw6n$`Y| z&O-)a-U33#W=(u|t9!9a(2M6D1*XH6d!ONRT`Yl2>(*7UGEtFJPw96N=OHZ&TzX-!LM4Pe670TCIVomQ*bW({ z*H49?Q%y*%m;hol&C(4v%%{UNYF$iaHJeaDgaCVJlzQ)H+M7}76o5pC165oq)l6o> z>Gy6=#Q<4fti*l3FTWSPK@XUD;-1)&t|0UCcOJMq`Sz#u4Z`CpAY!YzUD_%BPnq`{ zlUA`VE6YBmOjEtMY<-kzXMWT0oe?LYclDW#4f(Bo zS7L6aQ)$)aOEu591%L}2dXJw7GT78r5}&4g&9-{K!?I3?kzTu*emBC)-EbH`!OT@T zs(eI}dVK9H^jH8-m9YR}B=}-Q@*Kc#qu~uSTeX0cj#3RSbL!V*daMteUbotF%Dq}; zR?~!PA;wvE^}#1UzXVFGti6Jj(P}!QV+a13){(d&nN`p{M^=rv39fTJ^l* zFTR7*$MlKHsW|bmC+jN@Apu|e3|H=W@)@#B;NW2KBn-SDNa$X={!DV6yNS(1;lphm z){b)5H9Ru2vw&!3nY3S=PU~qpxxisKD%{N`ELg@X)KME%b5B=enA{lUSWpj>0kn#Hs1B0>5_Qj zp0AP5{y@hestUJ}&_1*Nrlixii1flP?F|2?9B9Kh5*BEp#*)<_+aK(g7G)Ba_$P(N zttk&a|5AD9N;&mMpH(E|M5QmL+YbeBk1q`YV+0TRYT~ReVx5Q2nLh3*xb@wj?&?~~ zT>JNq%P=gh5dd-unRu5EM#1GCqjuj!iE!-jd&%ngL_CT}iK{tvIq}@)eOjD!Hxiu@GY8jNyQJ)C zaf{$o?1T1Q{Nev_u(CZJ)|{C(s3F=;C+I3ROc@ocT}!YkHTAJQF?nadV(bfdK$!+Z zBVE_7nN!VlZTlYQ;ctGaGfOxmd0h%E8XYl50o(1tS>^FVJQY@f%VCNdV7r{O6xrZj ztnU-R_|^D*}pxHX&}ZWUlQBA)N`W6RI}0k}#0dM$h@PVLk%Hf`{@_OV2xr~Hd# zWMx|$TrzaBN=F&!hZMtfO3Y~nWDn>^ke(P5d2f^0IK@$?gz7>Pw<5$gz!9&5gEYgL zpjU`|ADf?jXyS8Q0Z3Rx{rK;>il$TpFoPr0dlBfHM^S#Y2vBG8S;;pUap+}RU)(k$3?x@$mOkX#`{^(HaUrUkn^{Wdx?k4Pv_!ejoKqBF>_4yt zyiAl>HQFv6v)zT*=@Bf;NSxzx+Kc@l-d>XW^5(>CjMyj!X5gFMG*Tyu?Jqm{{udGp|mKEbfw4oX#C57Yx2Q z=oLy^9K@;l3eP5Uhi27d5g|bPhsnK5$KU*+emwU*FHJDqMzD7$QdeDXDcEa=ssUn% zNHXK)A)a_`pucI!b&07g^G8n>Ec=CBRUK)fj*$9`1*OZFg&Miskdos_>1zTI4P1I@ z3A8tB%D6E(LN@2~GM$LCH#5(|lV4r@Oy$7fb=^wb&T$>kCKR4$EYl`F=52c1X+-h$ zC(l7@%tJZ}_fjn(eqQ7j;KjZU779#>Xb;013OjYovq-#n%C`fX&#PUiU))Jx-jFh7 z4E~k!Z|B;LhkulyyVPwdVln08^8J?N3A1~+W$e{<#as^M`x=R&hEz{WtojzW-II~Y z)r?)vZNhh22Stx)KZ(W?K0tCT81NS@1aW1E(q9OEoyw4%C)=EqR=7TagDmu~^UncF z=SZCUoR;^DWVo+F>f~0H)Hl#Vg)%5O0K=fOnt!c0yJi+9LN|(18-nSRm#}ZMA_z)5 z&Hr?0e_dk~6dw7cgKq}d;1e^~L=vvg4`*HqrT!zVPd+jf932=me$@@fzc91XTG>C} z#B7{vxyL=3X=~}OCcUZYH|Z?O!d6zY1QYZ%j$!nfQctyUat`2pS1iBUUu986ynL%C z$!l<3)TazHKiVFb!RTQct@pC3q8#;ky0J#A1o}nyx2Tq=5v{-gTGEsR#%Z@+zo9W4 zz?z9_iYz(Hv9|&3>4!3SY|BVTCQYW4tlB%+wekRY626auZ{ZYp@VCNL!z9jC^^!e(g?G6ujemi~ zZbY2W-5^Am?h$}NFm%vBw&5yMnCL7+fw@Gbxp74q=N~^rcd5QBru&WdN6Wxl5TSvB zxZM=!eDq~y#>g}^Pym~T%3QDb91%3nAIr@9bWnvaE;_yF&2Y*ixp|GvA!Ly4VN zJ?BCB;lR+wdy}xU^Ublx_A9eA_bT zcca}-i$=qd4BYoXcA$<70f_GA8R5a8e_Z?O^cJ+=2<1L0tA{|uhJDQq@2Pe9R}Y)o zuK?+Z0B+AhYHVQ{U#&6Ux1N~o`9xqp7L8xOG}`#1s<2x5Uf`Q)OY{lAiBaO_$e&$Rb$t&|MOP++9PSszDo|{ZTdjt9^Ht+%5D30oljOm zLqIuLg}9u+aXX%2h|)?{E|&;&MmTbs|Kc8f)fB2T^xUR)-)du3YPa6`0hrO>aA^L326HL6`u-NY zUnAhKZvQlfok_Q{STmQM7kM;|KTbSpotpPu@h>KB{IutJZ0F@@P3CLrZ=?1v4e%TE zkY@cp6!1_eQfelKB}>^m0ioEzs?c1op*_)|TwWitI@hki6S8=VQT48Tt<7uqE9c`c zp=|vN=@~#B*cO^9Wf2nPjQ+(Yb~^sKewrTclmF=8M*_>^6c!>cvHuuDVPWEvv2-i4QPt&2>Xhm1rq{!SzKsj#lO4$3 zj8Z*Zs?-^%IfIiBcRp>l*~Rol#o&DTquN#P$7>husLzU9c@j_$K#qf)lf^7^f|3zOZzhJOIgOwJp6mKk77GY zJ>ZuVXs3J*7M3xV_>+jL&YXlS^sv)z7DMQnZ$OAH#v*mGL0 zEoFcNU3lUn$!#O>llS-SeyfDZ9^$vCpc8s-k#Sv zW~x+Pu=L#MG^k^WoW1CBYt#FzMCVt)(5bu9t!c#TYxM5#-ha1~FCK$iiO|C$+XDq~ z#F}mxls5O;@pX1zU90I(#C};enI)qI=xr@Yh$Nr=B}vpDB1xSm9l}GetIUl{n?peL1^(-|Fld#z;X1_eDVt7gD^i6;v5_ zD%0I5mNtQg-F2t^`P14kN{boJaqkNG>E6x#NA7S*F2&_rUEfOgNQFIE3nj@Q&^>MLzhMZ4L+gV05DrCe{?NhuHLyjB3A1Dqo_}@cn zqSp{Dx2XDDQvyO;@$|x2(=Fxm=m4?Y&9NG9HgZj23(beA zzLiCWZp4C9c@QF#X=EKj@n+ByN}Ys67fbCwcK=(T&ey?U&w- z_B||)pIFa^3FVEc9PX#;t$CVHS2Z2YfCu&`(A|rODQ7}Yr03;$_DCQ0&wCe*boLHD z!ms@#_;Vq-J^%?>mRa9^e{ajNAG;U3#N#_}{@`?_qc~fp4QY~qTc@yiu8n=i_lNJG zB){bopNT$+;tc2cc=trY4?LOvb`L3{2l%1HUA$aFc-yq;vd+Mbl|#-Zfq0@S?f35) zlM&d;27sGsNXvMM+~JraYH2_OL`YQmg^PgMisBDixxUAOxVKFEC0cs*Ba_;k z5fui9gXe2pl-8{ECRvacN`i=W##E|q0%%v_vzb$xmTGz@b2iuM_^(`4LcL z+g@A|;~A9C4)q3T#+B0e#Z5IQn~gJ&9FXdzBHabwkh@cwILTR^%b^Fh`Kzo1{Nxmn z)@h-gYtKVTQl8YxA`zr#oii!~c_6hTSG9Kb~C(WgSkq*`1 zsbuq&ugMx#ZqzMn%F*~;T`6Io&OYUU1FCKCLibzUdyR&RH={2zDVgHHd!q(Ak(oiM zMFunL!6|Q`OLtgwC!Kv(Gw?6-!gDvysz2myb*N1X@;8c7GRCKsFunJrsx;s#FNMMx zFVt%x-QGB;v(4l%FU%W2zPfL3>1}{%X`Fg0uawUo^I+E9h_ z%3Z-Ah$6?_h0ALf_mEn|GxI&C{>#Q%GCD8g4Th0UO-@5ZF3$opY``* zn-SF_<}9lA<#x3n{bex=|K5kBA^~UXok<}l7+E-t_9$_DZFMti^lWCMdP=xSn+h-g z<?~78~5%O<#qx`6NS=oOZ`dU(7;}r`FCJ!1-a6~&-sCDgCfJf_ zKxjKJEaYu#%Bng$Ebp$<{~}PKKxbj}dW6h4Dsdg!3}%#5yB)XsCxwoYN9UbzHU8fC zFZ!Gx=P3k+O^-FIq1f(zf{}tSQGZd~Ta3COs3&^V?9}(QtF)Y())3s92)jJBJEuwb zu-JV?ctjA7oqqc9iM$)Aj%@1K#NB!DyX7Lo2wU)xS`~W=r|pF8nnryHS8_^9ng1gD4{u)8`3BVmU_dnAzzU514Ut>$iYY?e46r5d`cxlZaw5MP{%Poa61M+CyKc26pFqov+YQdPPZz^8M?!O- z;)ARN)yHXiYrFVfewIo)vH=Fabx3ip8Ivv6;mYAUZ4STrb)y_6&N|p zU1VRqV~@-K#rCajs?QD`j_AFneNTY(P}bk8vowQla&8M!#f&VSRT;3azZf4j8Zvs7 z@nH-PNU?#kTH9}Z^*q`mGP1ofsU6K9x0BH$*7$>!+D^8{*SF7qY#LK(-g<^Y=kvGr zv#P(!QJx^mxRaaC~o{w9wCq=zoEP<(DuVNMljLFlLe z;NiZ$z3_#Pi#LsB-7f;I3}5t9OD?CQH?cY9bF<2F4L7LyXSD-MjbT_)f83;cxdF@V z`5@hm--%V1Ntw7w;Sl?SD5#M8VSE-gmd{KGdE^In2{Fh zZX)s~Pp}7OM_!mo(z{4&u)@X$TkD?l-WArgH4R?~ zCxL-#TbD4*UNM|3VKN2U&Zi48uo*%FEA@-7BZWikXCa@AGem%j>u6mLdx6dU6Z4tu zocm?1Zzz-F&_U@D=&NWrfj_>*yA_Bm-LahffTKrEn*s!ffi9~-~6;}g9? zMa3OG)4?(By#$q4(j~Mkz^|Gl#&j!&^vKdB=<6`e+A4HUACjO0ju;!5447$R>nt=% zPPNp0s-J`CvKl_7`H8+HpmAsEJ9oqYUVHmMBvKf4tAP--Sg_+_W6$+lAnenF+;E2X zR(-$HFYwjbBo8)9G<`uqi?L(B&&xbY%1ZNdk=_xrDJ7A&NVgC`U;5e6!(*I^XrD z_a84@2UnQq2k%a8sp0n`st4z1rP1)1HN;t0s4#qB*Tzdxj)?ZhjCAyp1I|t=6 zZPtLwXTN#cIN5Im-Iia|kNoyYGSHOOjcDlJk)iU>K~ zvZn8Kqvm-{<4Oa>sE(B{1RWkE5jA&KY)-gEwx>1nHn7`R_})8kQwFbL1-UcuHo1dP zTC_oneKS@y_yDn|NeN)Oa1JulEc_o1zM6EMc<(Km5}%l`r~zSwdK)IcYf$BEQCj)o zaqs6&6bF4rlZC8gv*nM29He)3+6$bqg#dL}v>{YPY`R#ib!u6G1-DvU0u+;WpfjLC zBN@9{%;4{(GOK|+1Q9Yb|A!+NTJ}@%))7nkrjdJ`3EqGF{KNmfAs!hXDk8c%L3|8J z*xwEGws-HBC^DNahc+Zazd`D}Ew|xf7C8XXWBqrcr{}*BJw`}1P`S7R1(ytl0zRU^ z21Ii{03P=Tit2s^NSs})bga)(HyrK)1Y_LuO9`v=&N|iTx40%QqRD8$D~T{hO>O;} z&zrj9L2fRBd$uk*ZC-WB2E)dIL*& zm;5Xc#5)!~fk!Ul{o!or43F;x$x}l-j$GNd5049vkdt6NJb%J@AzsP|ZB)5ZG8QYE z=QnRhDG~hwd-Xx=7xTD%e@}xctZ6w)4#~u#rfJ^Y>oPd!UyLzFJjOc$#f!MeU&(@8 z9^-+xlNZVE7wq{2HyPfItgY~P|18UNJUoXkseWjb^{{GmqSA<&UAKPdAC_$|=|4Xo zc+oJ|Cx1S$vQ*Qy-msMu(ozpA{bHaqIoTZiM0pDGxYKqoVqqI&_3aS5zULHp*CFvF z7V|Z}!-lcGrr`FPL>ouP^?PW0cZ}bdwy!z=zUr$A(lZlvx!;3YJB}rDvp~Ad9ZZ{I z7k;-Hd=VXj0>rxt#P^1NzyGv-Rtf zYh0h^gSH}$63&wYRO)li;z%ZV3QRhfPb2oC|0K8}^{@nxx>ylwoyvBy39%)rEfjZ{?@{$>QXE5iCYFc6SMh4 zP=YOB96+K-3Fv!Ard}xf&Du^IpW;38sNE-MLMljAj=gN6wFj z|EkY)`hhx`;5(dj%@fRc-4lvK4)~phXivo4mt5=cdq4Gi)1aMR>HbA}#qB4^e0usK zmR(18x2W;HRYDOi@&sdxkZ&ZEL_NDhdJ$0@77Lq$5(L#Ik83A{}W_X+os;8kMG0=}HSouaPb-p%)R6 z4xt1RdQB)HKoY;@KKI<`?0vlVp6}Q9{osM*A#2SwGUps)j`5E76?N|#yrni}a^@5l zPj@O6X#r0IR8b}H-fu~F8~q&o5_db$dmXYp;d2o+KUP4OWCj>lC09S!WXxnVg+Z(l zRWaC&?|#)bBby5FTt0F%g{$K=6ZeygKC}(#S&U;qKzH67==t|u=Os#~kRV%Qo^J@Q zc{gNJ;#C9|hyyRyigM9uD&|jQV&7(pycVqY-vsMrJZGAgGkQ+dyFpWT*D?ccGVPPr za?~*^H7GE+KXc~ne9ag;2XF1O+P4-O?X|b12<%5kaMDe5`@j}JEdboc0pM-0hMt%? z&8$){X?aPrWVzkoQn*0vbnQI+v^2^&h^+?wjcZc z&Hxz9=*!T{t@3iuGCf5*Gm}YlkE+7Fnu|#saa4j$3zAJVYYn1pN}?Za?z3EbZF13R zNb;nVI{wsmbt|x{1E>laQX|2*(jX#RbUNmJW!dMt5u0eF&s2INbuxd<{s6R{o1{cp zssJ>LHKARVG0t~K+*6^<$+JeL=sGNMRW!dLKl=>4&+)hs0h7>aPe9@vmx_+nO)b}i z2x3g<<|zYq^Wv=X?dn{h*#&Ft>jAeD3rdXHXo~yAjbl`l5^E+er^8N1y z%g!`30QJac-buEWhl3T4F4a#@Id9rmep!w2DV!oG6aD#EZN{Fw>lHLeOgI{Q+<&8s zD+S6Z{);0Mnq9hss!EYbu3SRnrkvjH#D=>&sN5J<5rp8>#V9qtPPKy_>4?b%#n{T? z*8_J>`^@w=noeucCE7(tFms0M-R(Xv&o^y69!Kc`F=7~)kL{_WOW6!RSo`tVBXQkO z27rbs^S?~dC}d~ipr07;;PW$OBYWupKPMiI&xV2Q6S$z{B~rOe?|9mSZb{)b8X6$+jz4t=KCtP@t{9HL}j zhIU9zPi>#J?BP$kHj<~YLq&a-;S3c^G~32&+z~qqmSe=JDa^v?u{t(&KE~#rLA`?| z0irtgXuBL4j`3D%sCIDIuStm6&h@a6-X1S-3@B*}IyYJk4^@$jkZ0dtKP)S=tL|a6 zXTU7x3x@R2C|10V!pg+X9}3`rq9YTS?*p(`sEOTVXEYhqLY&=jQ}uTtzb8?ZTnjEc zA&BNhRRzo(E~nc4+M|$%%can{Auzt|Csj?MR$AtEs^%sNm7^?^eWrz22YtDj9Z1C@s*4eV{-KRY3~$FT`(I~%VF&7Df153<*_qE)Iy(UZvZ+R-dupxL+hWY{gI=*?5icb{!V4g;$cN;r zE}X1wEu9=FQ1Nr3(&oi}=;7Tqp02?*Fm4pgff&{soZxmmg%TdeAq}r{NBBJ%3iz&M zs@G-9Y#8o-%oI+RLe}J_4x7R5C(jqp-#8YIIZCP#yGJjm-GHcvilfE^Q;qfvProC* zhWDEV-V3zfsxgBTk4DM`5NYI9^LmQskunC!U`A|jH_Z?C7R8|)WVa!=rX-)p$_uwJ zon=22ld;~(Oc2IBd{Nji@a|1v3$JtRuVlNWZcO%T_aw8K^!joJgp8W>rnQf38a}uq z)e>gscxf!Y_j#sa#$so>-n5zEfmK}Mdr*?q3sXxt7IsP15yEG&cHk!cMl(Ixo2o(} zHi$k@2=H5GbMPa2pELl{1mwN31`-?KxtgbHb(p8>S0af3r^Bd!yg7&D*)$A>Q0vtn}f+r|7tnr!MErcY$W=ao!4Kh|>r58h~W+p}|vjnM*Gj zyKb*2X&uWFy*mi0GSv+Tq|KS6=d~B|lGeJoPw-cf!ri%c{IPj1O=BTem(>V*2o!&E zpXJdZcZq2E!m`+*Y4g5g^K3?%rCp4sYaSyf=_W3^12^57ouoB+KP+~WN!}|~`Rw^i zLHtH_Apy6_%mSX`L2qRszE&giF-OZfRh}xI!&G1MW!4^_q*C6Y`p$4WmiQcf8Yq51 zI?zO3USMk~g3iYT2rT=eaqoRfzIQ1NZ1T?xa|bl&eRKIPeluZIdXHk~J+4)mor2-x zpEimSgOG}Qm1TbHfP($ByTbzlOV&~&g!|{ow=qYCv+!E&540NmPJKIy7w(p*oT(V` zj18HUL(~`}Mm-@DVzzGW2d;0defgie6uY@~pKMm7T&`;c?{VPeC^SGA)>YJZ%k3Sce6u$_pi8!J;7KC`SbkF7g)>9v0-z*^k8&G2 zbH3OY3qb4WO8aTkBPmA8y>isYC5Bus4!`nlZlx5qM-KL7N&{IZdU_W0MlHlUJ(^{Q zdimq6YeTO-xqhYb^kb=x6Y;M7rtpE-rhkjw{wto7QnJ9DZ4#Wfurq_55xe~zQcLsY z53$(p%kch9iGJCOnl7DN*S`%_d5SMoF%uKOIh32YjPRJIThOevH%7zT@71N+ud0c> zce(8G9VEOHOSD1)lu838z=9q~ZRf~xf+dJmznWe74 z5CFcRzimo}aB%oqFDsU>OW}|eQ=Ct4=dbtJUH~g%2WJZ!^eu@@yXTxW^ zL&O6w_j&=Z*7yk-u4GjA?qx^L9}VwCc0tKA?5@3BP_5L0(3)+cE`@QZI$C9G``B^| zGL`MfDf#_g2Ukq6vl70$#;?D7BXW+jfKd_RE_s>$df6>5KR1YX2O0?4nhXgiyildO z6O-x%?cC)#qFM|2E@x>HdV7^2k;kS{R%g3@EhjN)Ez6@V0O4yhmYAntx+VLM1y$ZO zz*&PB5~qlgZsUfh?|VbC?j#RCDn0M)`KE*V$E$huG1wgF{Ccg5EIzZb$n0C@6;|V_ zCniz}u8I{?&Bo<}U#R?-qvpo57$E)VI%D;y0ZQ?Fzi6*2j*LxEdPb6SUM)#}5rt9!27n^yg+_ppboFGxk_cl*BMDL$6G?>XI#QQOw;hf!mrw_Yl1Dy)0=4If$@AB=Qm8e6974)vwM zcSXEbVq@2hTmS6qyN}@w*~iS9dzq%+1E|pMFKPnK%=M&_HP~M#-opffEYVfkhCkfM zL)oxKK?79gIXd7{Q}i`0a@4HgFg>x-+)y~KFt^%U`D=vr%|prDXCL3(u=wWoh(<5a zejlek4O0w4S^jEb4N(5od{N?X@5w)#SZ#$53+|6gkYq~C9Qy^JFld zvWnt3hGL8X{PFn!6U67n@uB^tis+o+jXIsPk2_e8o4?a6h`a}qpo+g3(v@t|aw2BRyMAXf%*E4?JW(@NqU3T+8jb+ zIU|T~!zK@oXTNHmMPF17(p?{rK|@eMS!kZ}Ce<=km5lkald5sz8H>|(m_huqejIl|BUo7J z>`d=eraJra^!e2NLZ~%wIAOz7VzY1}iU|l!vBPD&I3Ok>C9shmHGT4aNf}LZ3Utk^ zo`r-jImta(Ma{$;p&cdGZY(Bk)R)fW)=?lcn~2&yrTb>7I`pK|I6`{Npe7vLA1DO z82J422*AkG<{D8hhYLdK~yDNA!Zug)Q z{)R&~8!N`CTMqyu$~`wt%*?lvmDIunpYoB>mnX99lo-33b6P2E!L6CxRfAi}Y z2Dy5cQq`~BG|Y=%zfNbk{k$FyC>n6#7|G$r07t!+#lEO4PtxsuyR(z%T}{gKyhj&{ z6aPWXvT?`U{_VnR^)XsmY0_=T=72k%4XTeGiY(0*)UZ%T-Ejgy%$e z==_YZ#KrQ@l_qP1<^JKOH?o!WXQvK4FFT<-AhQ5ZJ|jRoAKMPyZ$<7|83$VUhCUhV zG)O#X1iSXVF5obEp~jtX48UmNO56Wx(Q6G2VP8>Zk?)KyU%d}N>Rk%6tmFNugjJt@ zJ*zzCi3rkf7!oZQkb`S4H?# zwX582WLh~2NwxWw-=}9AMOk{bDgTqW}?@1=?iAcz4yy7O1jom`qJM8Se6T ze?7-fb!$BHBRnyPnFz+vgsX^7gEOu}N*bRedA{xc_T=Vwxp>Who81GgYdJFeCQ4j4 zFBN~+*X*?SN=A-lmM(QZFSu7GuSQ>_>_KS`ytq#WIv;g^FREw2QI~6M#Mk76`D14~lxAd^ z`oXxOj>ySO8q&G&h48Tk*yK1s4=%`aB5Zca;wUqzb}Rt)F@d)c+Nq{e3>i*7nd=hC zHi&FT&iW2^>n97%`{fY{$FT;0Ho-UL3!nZ?f(uCPI65wVOq#U1w`isJ?GAejb$nnb zl|J~w>L%vQ;OVbW2eHqM0C2B!k#(wZw~T{`KzGbagQLScY1)rko9&ed!=dS!b`!lj zgw1a49L@$O_I0pryJY6&#mB7{_o>zl!_<6f-Umkeeb`_dIg~+4k^C}k>Dc!Xfns(< z4_?3lB>Vl2B9Hpv+e%TJM8%V>?!DCP@+9>oq0t?1heS*QVD{>f(p-CwXr}vii`f`! zL1tKyvSL64WNk{MM}|{gd^7h+*>bc^gz5@_3y&?7S;2gkk$hjEK+c2eNc7CLTM1zb z=F^<12slV8 zMe@MJmp?{#DM<+A_#@kk2#zc)0H>ZqZG>Yi+lSYrdNzo-nv1&Crr!APjdhyd-?Rp| z6zN}ViA=G#r_<*1ZeedZP^PIbwmgS-EQkpZD_Hk}iW=)~-CVvd$0fz5f3 z-`StmlU1%&9NT8NrFhL+buOv-Oh*}pFEQ?x>tK!FdGgW|&k$D#DaL+fbH`#|YTo6( z=%a4XN#uIw4wPK*=t;Q_j`B2L-%l1MmQd?ir_|Yn7K(wpAVXut>-WljS=`!wiJzrP zLNc%aq@tTbLf1!jg|UW829g9ce39(O`v~9)S_OvE{{M)uaf(~g$VpAxmAf}HEuV>99tWSK;osm}% z@b?f4P$s_156~ELa^4%dXEvkxvZouCNGjX2L80GqmlVN2{-m-6(GaWJZ!a#c-(@7w zxA)n2A$+DQJ346S(Dd~|Q95~mq~XUu3bvt=o;g z^Y83+6-xG)b4n8Pj#~O}#?00~T)WQs!phE`U+G56GCQA2@b%)G?(q>b2|_nidp9Ce zz3Qz>EF4(JQiPNU%xA@Qb#B%_o&W_icJmd=1R?H{VF6kgF*=On6nMgcdCH;k@OEO@ z%s|pDrd~e$PI|?6*H;VUX1Rnq?5}}~0 z(4IxyF{KSiKd(wB&5oyWNUs?dFEzl)0$g9yZ4_i1C~xt+ya95zgUaSs0O|@Myubj; z(=*(&lV}@xpvS%9MJnsMwW96C``1g~AOe-6s{_A|z@E^(KWfkhdTm3V!Xlm)$JeYL za19L~RbY4&i7qc?w$G>n4qWG*c+hC^q1l#1yEM4|{Z`cGp;?sOkJJt5ydLa`Tgd__ zw%i1SMs@Kb6g6--AnD8TlWNKrf`IFlGrTF#-q{NTd)Ylo1lmHJN_Nz#0k-QzAfvU4 z*(R~aQm(}k4GJjXI&9FnZ%{l7TGt$Dm_tHF-VTgXjSmEf=DFJ7DS8y{q=^% zfvUWJ{x72yL-+r6S@qEuKdB@z?Gx=4z(KSv543mN2RJ;O0W$n=b0{o8-YI*y6HUCu zOHu`No%T&ZeZKTK{E;pBwK@1AaY%%=OL>$SI4zUz2Vxg0jTyG@4~I$lTl zUNa*&4bF4zlqdxX8(yIFC}TYM6ItE(G_HHmuKtuN^{ll-ziGSAxrO_KIrVK?HX ze&l=IU_VrKhq8kfl86PACVqjUzJXMt(4gC>YEl7(f^#+G~=&`_=~(7H9?!~cV@-A?QU zqD1>(fGIBIT-NHlQ)9-P{91OGjfJnFG^XCDJyMo}jFdS(t*DEcMP1&Ui*nTX^td?Z z$B7n}cAqqg7TX)>dY>-kF(9lq@$T#6yl>kDlo`HQ#>-uvq7A@zPyslx--{Sh{mz2r zXLJrFB%u>Ef|Mjc$*`ptP!YpSQ96qNX|CJr?b{$+Rm#QcK;2(6`uEzA?363OBx1<> zNo5HbW#!E0po`F62^^dp{1n(7PCKuocTSE#*HJp;Q{y09Fh$cj6uQC9NO>hv25ki4 zC3@505&rsSqh`9l)4b~W&qF5E9w$yVOOi*Es{z~}=?Vy}hEFM-O^U$Sr% zrT<-@^5pL#C*VB>nz?RLv}HUm;&eBm7u$F$vmN+TZl+I--cfSg~7jVbgbP#Wo% z=lQ*5|CKt02ZDE354Id{qI>2xEXwTQc1H5SAc%E`hh2lOzya6*X^q>_yLcclo10z! zNfnucVI#$W8Ic=85|ji@i%s8(><4A0<*wQ@(A67D2J=F_yBHoA+Nm=^!fdZ`cEx3Gxy|Jp{lj0*kf-JjDTsWWkHe3sC+^1>zW>&~HJB z4cjkr{``Q8KL_;de{zExNBO*mY|}hlu$>%$qbF2ke*2I(2E{o8-2|P3|7NptLWD|D zI7RhXKA^3c&7A0%4L0vD7TpE70O%k1 z3~zs)H+;pdX#DT=?e$?O9>_nte=!3EWB?g<{;pI=e zf+i2ZxCX=w!5-21lL~NQ2?UrZAn%HDukzNOSmt)Ms9`C)9h0#+YVzG>xh~-gutmiI zyWW4EL+A;x4Bb+>?2{=Umm60qKwB)O*?mVOXRy`cde6l-R~`maR!S0$$_ute6<D3z2&KbalZtzx;bD|N>C^^R?Z0bhg zr|JxKz44V&yR$beXDk5s`y1WWKlgc{-+0GF2?)e2{Zc5w^xh*w#U?i&n|R1C=$AMo zDPUQgL(TBc`%)na0)l}vKdDY3?*Qj;=3k_-zxAMhf4}^mHtiuothMYc2xHQ~mF%dp zkI{FDVRLdnxU0}5Uyg_vV&PVtDy~SMD}d~I(+bDbm)p;>4{pmiT2iL8hBh$vTh7h` zCfQ*!>~`MIPZn|m#6c;=Pu5dy@^cC-?gtjk-b-N}H`+OOEmiPw&q{lk-q|ZLXJtNS z11gq()tLoe)1s29EbR}B*6!Fpf6?mn@k&G=PCOVCO5)71XOmJF>r5(jQS8vXXYe%c zJ%fl>MUx>`jJgH&(l7k&ei`WQXQ1M$mGNK^;7xK5iMsWk?&cgrK0IS*$HN&vwa*vl z?!K>eM2b$m@hR)`-wpBWb@{D%@g(0IAI(j8*+LC$^B~^W?L2pxVkIp56mM7Kr8w(T zIcM0rzq&svV^PbRpGAlMr25w1s!&8gVx4z@P^L=yzYpcVL+SnPJp~vQ@%eIFs>OcI zNX3KQH96ymXZ)`!H7xG-aJ}*^2MXrPa(a*4X5!8YyXwaD0Ab#nw%-_y;a1C$;Svl3 z=bSb->pjnw8saw9c>dg_yBzV#m1jvJRi7e;^3?Ur2l{YEw+c=_QYsn1d;dp+m6v!6 z-cblp`APLT7kQ*D@GAzt2W<3x^9STm0YLrKUZlDF0jQ&74Y`+S#Ta3U{3I{&XU?9D z91a|zfSmoN3z!+XpfD$K%uqmio7{*Jg0lV+_r8x!+5a*3_r!Y(=>c&06rh2J9EK#! znr@ejm63!kV|Q&mkni+IgERme{6@*UDgIb%yoNUlMI4mr@i4Lw|Isrv5t)JY_;OkF zOY6Y0Wt}-|ED$QfJA#f>VU(!kFRr@!9vA?!-3%~Vu^7t<1Cg2L8Oa< ze~NEhbOx{wKMnvl{069SU~6UvjrUEwQz)k;c6lpfy$_JPe>#|Ol8rZR>R#F-d|)}u zdhlk<-M z?}jvgst+n**a4a>^C)&Y{}>ezL8$%`^t?X1d1B?(sD23lW9$t6dHcHuoBxcdZO^*!s&eU#{P!AJ6mfeV`a@CCLtHrl_xoucFgK z9i9HX7Kp`r^_`dVn&9qsTU~noB|%~WJ7W~Digg4%aT3$7gBjsDh1Y4tYDkRWG!>bwJSt6U=~^IVK*8)%if&$9F-6`r1; z)Qh;J(UhcEyYN|TCoZbabiV}5WG{DcJFlj^z|PY>?@J8jvYooW+6^wzVur_wS3gh8 z1rH&#$)P0G{0Y)kVi~@q!&tLm6(NLwR08i0l$4KYW4+ns!+!M-=T|3G60ri`4r_HC zNeg8}xphc{ZUACyOH-o0?x1F;ew&vAp5YM_bX+f*suEhd&s0)4NiXqo?oJK07mpP1>{r^%9MI5(@C zytXxKDgu`gEF(4RC74&ZP6E6N;Au?tVdm~%^uY}G>F)tL5?`7T^}(MQJp)Gw`3UfMHvktW+*_r zi0H39>M^Cw_#{O&&NC8PHNao-=~i9hqXqVWXSivu*F%j+S|Cjduw{}!cI8BGqhZqh z9CM~IR2b~G`JF|RBH=X+{tN2dXJvMcH_qHmBfM@MAX$jn9jY2~?tYb`7*SSLgGxVw z+@xZ61&ErIw?}}7%9Z?`bN&5$5RHhn&_u<+QNe_mFIhCk%w<09ovdJ%Cgu4)#xuRp zdB~K)mM8Be&e0gfbIfFeYutIcb^u6?r)H8J`y`p~+B*}cb8%l{&5e7(RpluVjKq{T z;T_&EM8vtk-b&{OpGlC(LjBEiPU(f%p#3{K`m=A>b=C9Ij;l$i!$Wg{T+UssZRi5* z>+-nt?@H%-`T;8>nA|t82xr}bV|RJrjgz%izA70xXbTidCENWJlNL*^^_h%O&9eaW z~*&;IM#nf9`8`|ucAB@yd*cu;o(xqVC zTJWTzWp&Jrts3R$gl_F2vl6k9@w3JF&kK^;M84WU$k_fO-nb1J(jXK5iX_AsBNHt? zJJzNwwQXdfV99?c`s&BQt~-xiW8B9Q!Hq&H(GqRjyT)W0uH*`>V&|Q@h0Wca+kIe; z7v2bQ1g?oAuL*IrqBdq{`H>NM<8hA(f0Xj&k`sF+N3|8tvh3?_#}xPjmLn9}WNvL4 zM%;MsY3p_622bvI3xb)Lf&a=o#n*iljcg&yVmqpQ%(No5|4@+*Wtu&m>dw`M-5|Pk ze4e=EIF+70j!LzRK{GWm=E!w`&a964B`-9=?$xI%Kj72~dH%RG?PR^>*{3pLg~A1I+F!J$?&alJy;uWOeQT6pTEwL^13Ezyt-eUTs^t{EIRr6 zM_ulamo3ibw@K~66M%&L!uG^T=}-iyo1^#|Nq6jIaTL`}HY(#W={?tw6q8OeUT2@R zX()1K0kJd=&}{x|ckXDM^)yRTL`GR~I`4gRc*mx{#vp~=r_ONHst!3MD&1JWeI-#e zuF0PHRt|ORsgq|_YBDsBGv;s$0dkMO9H1>;*KPQg%7k7nJC>XMIe zBd<}%k@FU(40-e_l!W;4W`5n#(jdll6wkZDwPb={?U$X-eAI&p|NLmVo(tZ>If5vJ zA32x=D)r$c2;lN&Y9bvdS4Tv}WxkX;i8sNkwQe=N@3495buwRI`Yx4w1RfR*W=1HW zgMPqHIPL4aKC=B-I*DE@%UdnNYS>!bP3;Vc7>abcIsuc_aGa4}H~X&gy`a*+L~y-| zFY!Q5= zNW0^Bi_k%IEp(XWsLphBSFgq^^*Lv+p}K;7ZLEqkra!4lR}}U!7<WDeA<_%`d^`4lVVmhEj|pO~Ter=7?@wQ>BV;e@2t{#Ftu1g&iupcJ5hi8!Ezw2iYx6CAmg7#gT9^O<)@wTJ=hTJXnod3< zp;dRik;#qM2g(XRMcn_7B5{$Ayv$8Jvp@zpL=uP&RK$0pfE2_vibU-OwPI7o6h6k! zV=3zgMcjw4g95Cd@>mR+(Fia0M!L>VJ0|mLE?qFUeg+4hlinE|jNvz=EzbE&MMaYZPlnouf-Zs}v6JYQi$OK8_0O*13iAx(hsZI$&u)Jy4XG+U}n4ojHJXc08`J$ZLku>-7!?{Ym$18nv?T%-S?tTJw z7VJVj%)rqI%Dlw zv9xK-&AoSVjiL8}Z&)20`>1yTkZ%$V1cQzU;zAO`3(sB6?AkS{wYX;=-4V%g+5612 z@lz`CN9tujNE?Y@BWiW&X?d3tb^aLJ=e^RAayu;J{LZTrsO%X{DklE}I2SnDtshHJg6OclB77QWDJE*c3&RB zjCdTIX+6JgBuoix(=a;+OQI#UFRjPw5qV3Ak-9jZk9HX6Qx2KuMyjW@{MGA5$~Vf5 zyi8hb-)$D82%n`$hG!<~5gjCewXr!)`e85&BR2yzqp2Octik`ztw?F=iL!a#>9{fN z8t+F-73cZ)DRD^U{`W%*MKqXO{G3|nW^`eUNk5DG{S$}qmcj9>7z>Oa=EtQK4zn3re38Vfz`h+q(GEF%7l5+IhZD--@%Ee2U77!!a$}800`Iy0i@4IKul?3)9IFX;`$u*Y(Pv4|BTm#9$eEK@3hZ=uHsHGrm6n=U4V-VbiEJ*OYq^t2eY|4p$NuJRtV_|Q3xQO;yh#E}v^KsQ-R@D$4 zw+S>5;l9XO^0pc@VDhK!A-dzMTEVR4H4rfEI;6O|V^*u_m4dQx}3@nM9crOU!GBSa-nJhS=ql4)maIH{P?wReZmwc@m+I zD`;t!a-^Q?e%160&)F_C?|*L76X5^1yfn-sR1^7@@0M6+$F@@WYR31CB98d!S`8NO z7KHfVHXvevj8zfGBeGAILK*k^K+Q5ERj=t zo?|rqv%!1G)4U5Wv)j+R(XdR7cfcy(?`j~>VDzOR$yFXEnE7avD^~2jUhS~ylA>g7 zMSaXIX?WYUmkF-4-GZSqB@jTj{cQ6W9B(^}ORg=@Fn|x!ug1;2m7=RwuhlC@O0LBy zk87N_@uqAGW+~e{+l&o1Y%ElRgEA3TQ)!iM$Dj70UWo9C5#xveHgZ-C>>c3Tz2}@S z`f!uDOMEQpL_VpNaPRc7d~Lkq_V$wkmE~hLo+!Ue-fnPh5+xir&##sDN8#T2GHXeohT z$1XMF)LUwUguztikRzL>bG~qGo;O0WUrw(Py>5qOd7f8pP};-q&{w?N{Hf}(gsf3a^oOJ&jH2L?hp}$5gkeS$Vsk%JS*? zSQHA|iLQ=$hx^WW62c*#R0PNpedCyILfRy+$xmGSu&J|sQFB0Q!%1aH0sg6EftLf4 z>IP((9;xuOIB$mZ&1tPf+1gBVO*65vuiX*kc{M&hg0*|VwB1j9q_v|Vkf*6$gS^~H zaRf5|8fUbG?=tuDmAYn$e(R<>(aS#`wu-Ff4Uu_QEwmxc%W6S%wiv+Y{Z(fX8gQ3c0f-tUjLxj ztr^*81aoA~^baAU0{V_tnmrxXYrBVLK}7ug02MKHxQ(J#gwbKMFsoSz;>D3aHa4=( z&GldD(MV7_6PL)`(pyMkJN|>n281DJZHOP8O8|J1&x09TId9Wa+nG%cHX-2$1vw%o zC2u7}*I4eRCC8*?oulF)sFvH23`u zU5B_`K=mN*W$ZbM{OD)ayE_$()$eavkkB;+5os6#5;r?O&%HOxS2N6PJZzO<`dSZS zn}n6JXvh)EI+UWaH`&&$Q#e^4|Hc_E)66oC-l|(sYZ~ZFroi{cL`IHGBZvp>$T~tE z222f5sYN5(B3A(CJo z?%){1s#Wpo(fg6U&SPDWfl9L{lU?_k`@gj*uwSS+n{DM~i{)b0&PN;v3?Ub`1Ix-= zpk% zS$w<1N<9+VC6a;*-*m?4a6=(Pe{~{Q0Nv5gXqcumrX(fPGlpur{Pp?bw&@kxo=R$t zkm*G*mym%9JDxqEWr!r$X@D0dwS;&*0t{ZYu){ye{->QNE~t@ zc%eYM5&3$2BWe!ucVL$QFUD{meDvUEc&7w-5ct6MA%hPcrM(D%xeJsNzWhCpe+e1+ zD*=BM4=eyT%59watb~Qorl-A!f4}Vd=HqEq+6VVv+H{R!b2tE0MhI^gXipao!WjY; z?G6Ak4CHDca!LQ6XHL&zRQP7nf zOXytj@So_b3((6zEmN%t*slx=exa@RF9kXjya00aH-A!H`TxbuuzLU!j+lWEfynk@ zBXgT9#8K+al<&1-*Vy?_$Znz}?<#PmkJlMf=RZB_a;8{OK1EtD0B|4OAjcX!R>xPQzfP_cvAqUx%8~v-+ zp_F!?co&1Dx0gM#O~dmnp}MA7BCh7$o4U)t&!3rl0BmPCxRU!%D%IQTfD2~@N+p&_ zs%Bkd364N~Js*2`;j_o5Qn)i-i!>ku6!Cqrup^;yF!4s+pH1(UKFgE{Paq>M26T2d z3rB#>OHd%N`VD*?h_hd6E4hI zPxAlAsA)#+5B{vsD2@N+pMdZ6uQ%Q+zHzi@IS5P-E6Uw{)4;`_RImTD0jks+S3_~Y zc_+e(3yPo+I{eBZfct6kBe%$xAu|THyWXbIms?&JD1v`gPWf{c zPJ`~^^M|@84Jxl+N_p-pbFd$g%}Em7FZ)E(81 zmv8xQ7(P&QGv{eCmR?#nED&0?Eg#bQ<1lk}^9B%G0t@v+lz~p$rJw^2fY)JR|{{62C2RJ?UOA-wb=e=C?~H??+(u2^>VF%!|Qou!-3#P})0p zWA7w%nGUb6htJYXM$gA`B3?RnJ%Y@xgZc=UlL<^@B6P92`+d?EES;XB@u3{gSW{Vk=C*w zI)W%z+q|)2H4*JO%hbhiIcHH*_q{8a7TD!(Q>cjUnDIAFj#wbe$C?TY-iHI&c_Os! zfOXB|*ud*zugkTrp~XAz9;v9RO+|Mhu*BZ^8UIdT6C#1LeV${6hC4X&liY2rTm$*K zPwDk?Uh%dzdU|F9SCHoptK4p#L{O1V;Ot(N=hc2e4sO(1^xHnkj-Q{&O?qEhB+@)} z>XmEE=^Fz5vd8=gXX71RttExJzEh3pp+24FUUHWwER4cJlt8H6=}3vV4?GrWQDGx-;^18aQ|j2|cQO#IzRg?hOYQB1XmXKP86lWG zb(x_ujF$7oS%$$0+GDS=Sw+PcS)pE?Os7f*1C|p|Hc3599Sb z#@nh%EI+9@;qBj&|KAio;NQVhK&$un@MFCIx|G(`PVzw~@**)}3-8%%G@q*_a(UF1 zwk6NXeuM>=WtjPHGs62MThp}Ddk|RiCgkz0$ciYB& z>G_o<)iw>RXjVWvPvR$JpmuWtayqT%$RZcvwWa`?(e^evo)|4ODsXb|73v zV{b;wD4v^1`dBVathXWV+0Cx0?&QuVF&j5q0&Xp}!YPtImR#*bqAw?>@d4)Kg1s}S)lHEJJsy@>MhcMdO>gsIY+ z*^5IqpeSrRMH275fyT~IxA>Vsc!WmxmWDSSSurAkZF+p~x06QC@YyM4jy4{vkQALm zkkll)xm7+BCyziL)Y!a17>`esZvkKMZn6T4GzRI1wlqAPA(hsw1}#2%G%~b>o`FbY zpvxM$oTHJ}=<_~Rb9HYJ9Hx-l`Jwi6hZ_>ofolEP{)J1`mt)k8qq-Ir;%T2Iq}?+_qqGBm>u1S*Opkn4!LCN>-*xr<2WFwtr4)zp z;|BAAB(+C$FOHF3>kC`R`R}|Bh^jd&0ME3{_FLRETDHD))eEKF*%W`Z)+Ec@!}@Vr zF!R-3QFUr6fL1iJniI+VLXN{T_N|Nr-E?1_Q$em+qj~3>bF|{ul2z1cZulrY-8GXo zFjM7;ByAs7Zo}okL{?JV91@7|tkZN}`4Nf=S3pcno`q9uM^O#6MfnhB`{60Hg@)vg z0xRTNLMwSH|3TD(tL45bi}9d%$G$TF7`qhqt7(=c6dQZKA1guYV9L+&J$wED5!-t^ zCV4I8rVyWZ@;8Ptk)-J}YHpHuWEvlXwWL|)n9N$7oS0-^%p1{KKRrhme}`SkI^E#W zbott%+JO;B4xki(y{^raBSYrrW`q+pFOW_)CkpDmxF-w+|E9x{OZb}>at zDOYxxj9RvT8dQnn-S7soR`akHz-%PpL9lKet~Z50aIbTbp}jfo{_c>lKy|#d+(?ww{=|{3!);T(rZ+ZUKDB4qO#}$(rbuH z6GHDjD$<*DsgWj~NbiYs5$U}Xiu4j{0wLbdz1KQtZ}&Q5oqNZP({O|GV|%{TLUUVk26a`SFY95wr^TsA8cujL$UQoao3gBfbvyH*8lPV_%1-$LH&DKTt`kFEgc~-_0?5=5f-@y|7KX z?#C3XJ>;TaX=K2+h#oLg+{(~n(r?`=qWPrQsTF=a+T#T#P|ZH>-)G@$>B73tSM~-} zjNkNV0E<>z9n$G%8{7~JvAv+>6doQvI_#pYFr6qgtKRYg(NJqLyZlpH{uI~rkN?-y z?0*PKMKGYwuQ2v*&hErwmfx_!DLO5))(nqAM7kc(uj$>Lx}TkH-pL^!{n_m5H|3Br zVBT&;!}SwMf-nv;ze-TYKrdOLm_|w}aQh*ggG~V@(@aje30J$v&k05>pJSh|Q3v$# zY=|&;;$+^ko$}husym}!{}sxQ77V_|E%W|i!hXzRK_8+LM6jo<98=TnuNLX^Tu49!t-0`TC0+-z>GwQV^2RLV)l zpzWxxMEd@k>P8HH71U!7N64SZE7Q2CR(yD` zGkeT=vdjw;(!8}r=qqDuUUmUiwnb}U#nz7bRI@pssth8P&dXmSZ@(bqrJJL*IfjtI z{Gz$fXz31`jtJLxUCK`8vdi#bRk1SU)-h0yiOd$Nmb&|ZEE<~TM}s?qv1f)i$x1;m z(5qITUVj0 zQgt_xF^!WzkB%lj?dUBq#{?dPIuq5_iLs~T45Vv&5>)_yKGppK5`URHxOwf_O{#Bq z9O+^ z@8P;VIZk(3p}Jp+4%3cKQLh~;85meoslLQed%a^%@wz(?4NoomJ&}V$#36d)#J>8X z+)*6Lqnf?r`!l;SbxG$`%Tyf&eeR>d#T2L;;+Be%xZCLW0eJokz-9Tz1pFE3S$LB+ zk!jTuV}vXFzNa%xfO;3M)~N>+C2wZ=KY8C-CwpG$mDxKsJNKzsm$M{iQ76dvaBF~B zKnT}`ZJA^D@{2k|oWa=}$C`}VPpnbvX9sgvtMjKS;%^0g&8C=|&rM9u5vguu@#dXI z^8>R2f%#4M;nQC3ZvCF?FN{EAQGop%A&@VigX=!#BYjl+>@$YH4`nsjmow;HsR}u_ z^;?xZ`K#B)aZ}$^M16T%;Uq2yMuK8L!D!yr*${ihi)D6l-}fwbb5M0~>#;4>ZMn-* zkq>w*JVg`4JLvb&I4-hQB%P1&X}T-74+DSW$MRz1ri>;xJNC#m1_3kup~<%(9V-t*T~&au6!<_cah|;e(NJP2`_pfbNr zimO)LkqN>~KM&DJ#r2 z{VM#*rd@KgcLc4i&;tR%jj#DNwVn|6-EpEtB+Wz7_!+#oA$&)xYt`q`&N61Xg-HQy zgWV3>5FU|Rd!}$@d4;|Ws_3JG%fP_Lk1#-IWbnb{dzVvA#q#H*wa<$0vzuk#i`;OV zBauG7{9Z}x?OK;Pzl@UDx}RBbRSHxyhfdQWmu12oUoUuir+&NSun!)IA|*)U(t!4~ zbeh6FXR-=QXR56_zqDUfbwHGNY*J+S^|6u-RKmon%SSms1s(zooPiea#~3dBW_#tdTsF84JEii}cq=ZqxG}YUToghu2wST}P9{EK@BR35xA$xkZ(m%UyL2wcD~Bd^ zVUz9aDFvH=f{V}69OyNx=ni<}$?T<#*~g!-*@hH7u@sA9gGy!=apLzY0!gEt%q%{K z&tYRnQUFexwnEuFVo$P-nAgjls1|WX7>-d%YQ33Q`wshFNf5J_$j6Jm2QE#~by`(_r@9M2mZp)hcAgWbE z*3F`|$9B&qn|br>!8Kbq%k z0fizM_gPdv&+wvl4#Va)WuJ`aSu>uG4Xf`Q2f3DIMNNOE@2EYpbA2pYvR7RM%q#_C znVQiQ6^&H$_xb`3<1Ot!PEy=3u-Mb2Z)RAE9`>`kd-XF~KxWS}N;fm#yJ2Mg*kslf zNG18_3mQRl*I}oJz*Y&4)WUH}A=wdR4d>V+Rqj^@@`b$cWJmDE znxt4-UO-rp78hNgH2fS!{j~Y@4PlJ{9gWW#dnmzm#Xu=5!$X&r$8gAb^1ckK~-W(BL>R2o;)ANO z+&=Sk29w=V?$q`%>+*>-PJLc1z>f)TK`_ATAR`*IM-wSIGGJ%ru2=I1$6B}1pJcJy z&XqFU1mI0zrP2X5j}5-!jV!mRF8dqp^vCbW4Dnf*+078&h2AI>c&8EVf3Qlpg<0j> z@A9sPTv#gco5J5`dZT!)Jg>u*!b=GRMX9tHlYi;taCfo);#ha9dG^4tobpV z<}{dMHPxvmpavfPPbvp8`;C;%658oWCQ7F+eYut5eD|Jnbhv4{ALzA=XeBmi^W6N3 zD+RrWfBVQ?D-@%t)FfX<;P66JuZ-@~Bs0rf=k}SCk6A-Q5>D|3tfe2tMx9~ec{woOnT4W zvun#kZdqD_(8PhUVhX1@oVbuo6-=WVzh$%A3@fS*l?-$KI3;&4IyaCc#_I8hmj~UgtOE;+RV_GONQlqaEIP`(|(?>#Y&K%3?O? z&TKd>@aR^Fk9oUct*8Kq1?}OQ==o>^8lgG)JaHfA{ch?^yv>Q#Zq-aOr`@WH5{;l2 z(TgzIcH@SQ)pfu5lQ%wF!w)8^1XgZmOdtpm`~cEGAMmW4 zhEMmIOo5#CQpppAHgy6t>t23?FZ|o~%goSq(SPl2`gdkzsfEK(oD3Qya~X&HOi_Re zcCFVqJDY7{AMP1(%OIPbif8CCcFIrJ+6Y_6M6Fv7>&c3vgB;>}lbXN#tpl=h^@5B2f#ataep zpBM9)^$L6>q4ZTDd_*zZ+tMcUZt;8-(0g;`!LMjRBA>0^Ka1sP?X)2!xxDr=Q)bP| z)tzwTlx}|vs74*?5@y!`KFjfWAyQzfD(hb85P3)W8 zM-4(%uJR#C_AV~PlXLZ)6Ej@>DQU2J^jsow;*H6k zdRg`S>`!F+zo5j<{@MRf27o^l15>v@5$?harEGMx)qPLF9M+fyL~zvTx^aG*WTI?G zeJlbodyuyvZZt?be!wARKVh$iIp}1U4i%5QOp04Ff?fTvFD>^Ze$&+@*=;LU6`9MidsBinmaXZ8PzRc0cdFWwy7R+f1G+`#e*J)m}-{F$gH2|aomQGn&K3i zCg@(dkV7b23`HXSvnIl2O>>Qku63lV(L7|}jPXt$D`v;1Z99vXXvY9bEc;I>2(nEp zmD;UdW|Or7gIp0|=`;vcta;i@&a#g@);;1NLg8t@?Yxy;QJl;$Ea2&rd!*qc+W~w{ zk9uhi5A>>moFhm2Oa6>NSm3m9bURHpTgD3sM6rIs$-OAE#HpCmGX%{rYJ|1MmKqKhM62Kyy4F2br+j&`uR0J$gXoLcUXtafdt}Q@R zfg(0a3dmW?@*_`o0dfvi&@Zn6K9H^uq6bip76hDcavh@i5xhwp^v}B%_0KQs0S3fY zfXouULkCz-(gJ|^_T}k}j zVS?fX>Y`4=MgUZn2EX%AggKATuBx+(stbI^!bz*aGkLa}K!pOprh^iIg8bVhr4+=1 z0&RSrW5YHiawqOo1U*t3;IdnKMRp=vvOQh49A!8R90D5#gx^O2^{C2O0A-f}Uaai~ zC@SXRt6HP(CJ}}QR(bMNRt7fX%Y#|US*f|y(=#g=Fh&ulK=c&^i1Xb6)Z5Rq{O8A% zY=yS0dqm-J#6$Z7L(tx)@0b77BAM!70BIk!{rtb;qNQwo;vVU7s+O+DqPPJ~RQWme zb^RzjUa&E2tp*?ra1xKgY06v&zJSU$fe8Z0>%=b`1*^|Y9KnM2u%{bvos1tOGNV(G zR7C2ixA5hG697Cn3MaVz%QLoCvVPx6(ElB4))=;LR(7NbJHkg5$O)^bFks8TcaBPZ z^(YBpTM9q{@3KNOcyAT(a{XcGpD0Ui&_#fVKj(-@Q3BlPP1p4z{d=Dqt2rlhfR=@- zA{JThE4dkD!cw|->@}g@q=7AsA5e=9=$&jIHeizzXK|u{30(kCjnO+%gkvatZUX4T zj2v~B6D*zwFE5;o5esFBzQ|f;&R^dO^vg3Ur~4ItKS7!QUbFH4@E#1@Cp{ngdhO-$ z3>(o845)w#@bbh-G*ATpXDI_(TK-v_A^z4QTn?jEYraz()L)ya9B8BrsRaoxavZ##Sr_%K z*lL{cbjSCE)W3~Uw%kR_o8A;hmCRg-x;7-<6>6|G_17 zoXqK@&(~O`c1B{_1bt+4CdAibP2`L5RLM2-_q5!?7l}3@Pg@iW9`i1SgZ|<(TPvYN zakR7(KB{KR#H4$%z<7WQS%43+G-pD*_1brWn~d0H1l+@*BftSQr97nE@&jDkf4+tP zaw#B{xZ1hGZ=QZU1M=|YoC%giAn}^fdFS_RM6)#@$8x)c!i) zx`AfC{$C)U8paS=>-yYealH}w%V0yeA)rS)0aL64nBv~bBesx#339T2u|ahYq|84& zZZjJd9vgF|aHl6$$SzP^z`SWIUM;(~a$`#fo(8P~5--B2LxobnPvG-gbpA^nU{BGi z$6LpssYBeo%eJ4-BlCw@fdBEqfLix2(bumZtiF!h_xF?P_^%jPZDrIoylJQ6Z|$FK zm1roTQTUt1AIL@>3Kn31bhGc5dGudJoP~8kZM~$ck(zL-)z=+5kxOQ5JI2F;I1^S1 zAOcN2Vm&j)9<=B~ z;@%MhD`DySsZSs|4+K*r*n(P=Ozwu^+5R)<%j5TZ;B5YX(un_m<0}03yZlf4V+C}E z#H8nliZ#&$-1_&PLPED@>%4g;pw3lVFbfTAM{IzP^cPZIl`_v6a1GO-p2dBH4*Nk8 znr};R#T0&=Bnx)QPZ^LLdryoo^ftBx2s+HX`8anwcJBU?-T0F<(8$d04A%XTlryr( zj@~a`#QdP>@eFXUtqyD?C&>SEv5MyJ5mwY65!PRtxoPS@)LqMexi#@4xt69sNI?H! z)|32hdYD|`6BrN*4$=*8ThWSb>G z&iMEVEHm8Y6YmI{tE=_e;R%TR1YB* zKq=>6cm)5JfH$T=6B1#wCv-Of%C#g|W61>&UXTuc$S(`<%W95tuzA7fq4^Q&RKEgb2c#(!RgNZa?yr@xE3zjcTg+_UfN zNGMjd6wn#>LlRJXZ&)v2)$)6g3x!|eYwx}si8d@aA$zs~mw@85BuG0XB6`XT(e9|> z5_&|FG37x0SFYn>^PPGx13z8LLd_FpVl_OeMG(mJpG9)EB$OexpP0zv%NccXY;*GT zGW4aOtqih#ktq$8pSe~bpA7!zl!eBgt)=I)>EuDp+9=RN_KsC0z!3Lpep*($1Y?$su14)sN!UiDJ)jiQfm!Q+y0CH~&(fz^Q($6WtWL(e>5(C>TT-mhuWWD}W zHELv<0UTU70czuC(ivhK6r^2#Gy)13Ja`#?0{}ifj{<7_J2zmTil*T$b(Wzn<9s48 zF8fVTk+Xn<@1r%!n(v6pTxp)iGcffQ`>rABG)4GTy}7ZV>Z}}QnU^gKkq>%W0GTM%h~b4qb&PW zThCigZ^RlLIdGt|H;bhk{(vIy=tpG4T7(P{$gr zRJoc9;QF#gE*-DqQ@seFoyXyn&@8k01I`jJ=Dm!ql(nH$WA@;IO7ItgSoEwxTGMJ&Ng1Koi_aPJ_UYQf*?DY8Ow}X=HLvxIltb`k#XxdQ4W^U>X z8<#-7zB*<1(&O+eLj#UWz9X|DM5sfp_239)lAAXHfj?c=l5-?p!0MbH3?IHmb}-A_ z8ayjpOnF?GXC%oP?nZY0`dGN$bK?W+1?1&gHl`4Q9qBHz3r)MhWqaUi8jt<`0WGXsQ>ymySRI(p5k65g-DPRwQ-{-9L#ozMh;O>Z z)bg5<`D4u5)e4l3J9-Xw=aobi>rAL8d9LH%`x+)-bpcBw&LzXCk~!68wZL*)ouZS?ChF;p zOGWHT-gs+gJY^jrLk5F~X0gFfpA|0bGc zx2iH9POLzhxjkFw-06#jjeesqDl8JKWg1Tpc2nN~t)S%BBP)$Rl^|{}CE7&|7Wf&? z?r#zgkLvVwB0Mt2s zI6%TKyH=x7IW*NX3D%0ZA-qZXlu;*JjB%^3h3(0pwp1v%@fJHDFr0}E>Gcf)?jUmF zw+@)VWD{a4M)Ykjn7~qBPCdy{961EuyalM5Q$r-m7XzR3baw$$3g? zTUnp$LDnMiIG}8Ym}EodvB^e;74IxQ=6&#jSJhCvnRCjd^S@PobQy2KZ?jk0swZw?rwI(9ZwZyfCL4NZ!T-isF* z*D#W=RED2V#wRJqOCXrF@W4P4UWk^$atrF-uno0KE7xQ zd8W^+ygNagKO!Jk_;EKsU!m&7+o&^>ee= zX(bL3G>2*K6r0&u+`?BG)aQAYJdt58WH@(6=Xqerc-!LV_p$l|FO20}FM_4=vH}2L zm%`JlzNa^Ows=nz?QEx1&VMPBU+<3#_s27a#tmtvvdj|?7JrZw!_Te`IoVO^#t=); z#g_m&wq>zD@7BxP61ZJno#2_RpAn+c5bi+pu#?TJXVz*Gds1VV z!~6#c>S+NnenvybVCzSLW9?;<;{Pai>Q;RNzG6^J zgLB8o(X-O`zd9TQc6%O{ky-8$Ixu6uZC*f~RRMSoCJPvM|B}QTI3op_b<-P&Z~PYM zCeVcog0|T#5+<1tneRq!pre+{TX_Zg$=C_|?H3+iLNh>+d)|*}ZNjoQ(__}K5{_Ta z7lO7$JrSqCi-=VIAYsN?ejl!ogkB$~Y;^ums}jKa@e<`X-+1)p?UjP#mPG9BoZZoF zU9(e&0?xK$`B*aRT=R^0f5Wp_3ek13yUgmA67(fEWull!TGqq8M^2rHf;f1t$ETbT zsM^Q}&`Mop(zq07HQTmeK{Xji#ttY{u#&ryg$feM)gYTARP>#dtu`W1(}nDV4Rd7c zb!BY~S6y74UHUqqZ$Z;BCcva#WR#FJsv+k&QtArFvbE|>TtK*Tq+zB-<-)X4)lTKc zU9Z|FL_sbj@`ucOuOnd++NW9WZYpjN<@9b|-s&>p17s+p1tMQ1XEou-k8Okvvr$n) z=;b=xV`pcFhxArz>C4>qcxxB4th5tR@5B~(-`54z_P*%Z>6w!1M>|_77++CI3ZAb_ z(yK!XWKfUx-;5*Ki@!#KMRP(KlwljsxEQ5eiO(&a0A*@1+O>d zut%WUk=m^Ba2g@oILyI3-{*Qn10wLeHtmU;M_jk!Lpy=1ncnLW&)l_%YOnvwt6r?P zE_&)p_;Osy`c7p~{mYyYk(M(^ese-EY*ViwK$OmQe<_QTh%ct3B&ZHlrpdX~=WSK+ zTLsFe6|^mW`9X5%BzqKMV8XToc&?k$MXuglNZZaYHz<>sPQJFp%gp(!PYmYg z=udWA;cW30%uz9Rv{;&U8mg-k}kq*PUIsW z-UY!MKF6~m?IGvjT|q%vmmeObKh#GYJ*0UtI@j^`d7Qd?4gjj0hST&E-=D7c^=VdE zK4Bn)`& zjD}Q3kb#`0b5G1a0VI9ScBk|}6yefAWymu%;pT;`U2*QBR8}L@9T|cftv&Z}uqwH=c7lP zEvBchh7dauxMh=N9jshc2)XY%x*9F%F~;?m`5pzk{oSWad?GXjZ%)E>cGQEOK6jzuSEx({ z5_#r%B{CqK$*Nm7rt!AM%HciGX+rf%(P^^{|GlI`OU@_&Q{h202o=@qM-g?SKEcl4VHq zr~63TfP0NWB!3?Si0xwzV9evw3!|nn5&KtoO@U(4wF)?mai=+Xw`oE1eSxIB-=5Vz zTK{ywx*oj^5qaiO{rOluf?q@A;>Fsa6z>(Lu9|AQZl=gby)r?}fQHb_&rzK0h9}Ir zh8((budC_p!k5vr&VHfuK9utmYAl9F&8S<^oWmP0jDiKD-NM#ixP`bKI{4|b2+2!~ z+UgH|T3j>%^NQol99YLnd4yhl*7sybXX(uoisNzYhg^%YunWGVF{A za({A1d{W)t?_&8$I{jcac&6s<6ZIjKv*9NkRg3N#tste9M&3q(2GCaUVPumgwQxAG zO_XReAk3|Rc>`s;s(uz{XU@wRRFO}8Kw*9zbWC9mA+YSoZMoy1PabYwCW=6}KZ6Bh%T2>H3{xB$#{@}$k7*>(+Km{_NBU3iyAebQR|OSuSck}7$E;HR+A5VQdi?NF!AI9#%9BB_ubWI&jqVqYCQLd>fnG%E*>6x z($*`%&yx)7uq!d0k0#_=B zt^W56=)VI<{%}|S7iM4pOaB9q%g;X98h|)xvB4M^Vn#=dR|00s-2-ZPz^t^kJmAyj zug6|*0JoyG7!j63+koHv?kNpW96~OqLUSGG?Y7gLw5`rcUo&%F$ zxrb;z<7^W~7s$dhYVmd@#Wp4U?yfLbdHKl=_8#~Q&8(i_g8c=t5ZZy`=8aWA-*-vJ zAD7J4f`skBp`t4a3lQNu)JvBnxp|>g`NSaUmz}#`dA0-&x3i%6fTdzh!9EgRuh~b_ zPeff=i`dN`K-rW;6=}HuiIS>|y8RBjm*XSblPPZdA|&r@f`mg%(dnIj7T$K zSQI_^xLg#kL*m9V$&Bc!@uh1%Mu!}%+h4<#iEsLSnFM$SxX*`Jx0-gx*GZw?W4xWa z-76b)GcFFOZ9-5Jlj<$bm-XV7bP74mPchr62IXkLZhWJ zT;y6WTtB#d6IBVX7ioCiN4z;)H*b||;l`0%2MFN1=1?9`)uTgX86JC@YnimT&2+d( zF}5Ba-P?GVBQunnL{~4gQj(vkMIvV_Q(M2qmQA%j1CqN$@bVK$n~w^vqkOwad*uGc zRt^`YTF&Y^l>144G~HK?tnt|-&fNePkY{Ag%2UN?5@IuN1UX|iGf?xuKtOSB;SC!D z$w3cqNg8djW=S*wntNd8G*s+8m|KJE|A?%0qgcwX&-fr`rXKdV2b5sjDc1!uuZD5pvyRE`V%X=>LIP($LvPZdP;| z2-pkUuBZw(|DM)-JIcv}rWxY%8w%^JWIAW`2tLd{YWG@&p0dC(>AjaAixAA(#ZpVV zV!Pkxo3(C$H0x>IX=|)vT~Pb_Bdac}3E@KD+xx6Gc`w3G2$5s^CB#~wqZ=nN+BD%4 zt-x1ZJu0|t0-a*+a^_|(XKneU@tLMiM>o}MAhOfwm{`RTZcaT_4fwaC2er$yly-Eo zSj(^mN!OG5agj?R-o8_whkMVqppYU4^<0`2ksgnZCQ+0?;EcMAs-#Z2y-=5CRY!{5 zDfVXMF(euFwth%l1Mki3}pY1Pn8aECpYk2kZ6OnXIPsD`6kOt~c6 zojWdD0L~zvUPDgkfu`yt3)E`DEp_ggk;Io;hr-lk&+al!M^5mT~o_8l7&MU+nJ7vgwCA6_4t{te;y_jt> z)Q)W>usCanV9FnzE@}nEosf_<+IdKR-oY7^AO!@nhby}V827YmNTh033dk<{mqn7g zKL+Xg4dk{~LF|_&X5ooip_<3di@7_EvnrnM=xIEQi05Ig1BwyXm~EZ-R^fS}RfnR| z7{;{5;7VTh;SBkiHEq2QBYuq^#ynD#Y6e{;&Faf)AV?xFH)Ox8cj`PUCwNJ)Pjtd}4dC?AMn-6QE>thm@RA)NA)!5ZusDn}f;E@)h zz*MqD?v+vUmWj^y%i+huwt=8{?Va7w?@^7c>j#^A95f4@UHo^(iD^UyS-^7K>-(Eh z{^^Qq>xn%bZ{9OjVZQdEONov2sdQ%v>)w1@NtSSeAJwNCECeSh)%B>V2jB<(m`JvC z?sU4nDp%gskCX@%D-+7sr6zI`;f}dj$<}K)c#~JG1b$S{Q6w!VZz(tZQR#IBpV;0{ z92JCHS)D-F&83w(<6@K%FPClF>$$R{H~V$fvAF3Q{Vwf&tk zu17!z)z^rkpuYU`el(577jiKy&}OeT;8_QOFH7Ec!9Pra6KPtG5@=VMgHV3D+r1(g z!1oF@WUY1)J_&)!jd;l<76?Y)l?h8{_M+jav({K{R5t9unxL195f6jmV+yLDGVxS3gPy-lazQX$D0p*1HBN>)$TdDh1OHWgw}g80=Ufq zfnqFDtF{tfyx+~jCy)(t>Dk2>q%S?C+C+UwX{$SGrGeRBoK*T~rM`;}uDlp(uC8T& zz2(8aMRSzbdIK*80&bdkY<28o!!~7>V8G;3{>5)Ua<;XjmKZPv3n71C;Tqhb4 z8+>N<<2+@kxBZBS<tmQ@<^v4;StJ!rP>_G?-JnecBiiMSf zoy1&-oP#cy=GmYfl{z*c%fm>M)4KD)v?aK~dBJ{ej7?-(R@nVId#OYhRm9hoN9VoX z{F}Q0(o%MPu7!aw@5!!Yrm%-xBOQNA^-6yswojKM3-GS~&c~5gVwX`??YVU)@}enI zx)ME4-U0i53GMcL;^E@v^SMGnoZ|NV%55jj=Gp1*9o@t=Glca-8pAM7H|dN?U&Fy#ETud&Yg#AYwOCQ5#o}02U|Z#zVw8fd#WjS?{5MKv%PNRRVfhG zn)hVnx>fqk^2jzjV~lBCNLn=g;nfnyH9eXYR|Vs~4IibPDXY5pGRU~GKwDKX?X+gJ@F7CAML`jGad@vZ$a9OurLl9Jk7HJ9m+MJ~A+3Ei27hq{S+pYEhH`&Vp*Dl^prtI5Kfm-e?Mc^s7JXMTSXx+nQ`bydZO9eS< zZCYv#gX4~17xdLh{PD*uSgvgNc3qMw*=lUJcw6T5MUDfNeBjhgTfJt7CI`kQ^FgS%4{Mo;CdKB*tRwvl?j{|GmN z2#~tq;;RgS>7#v}TeUb7M$10@764vOjQRcl4Z$6`Xu{_br3gM#^8*<5D5 zS(>Qb=gKP%?Mj}XP}DeOc=rl^-}tCmT8%{k^F4&wYO&kYu8K}{IWX|$D}~3B%*e$@ zCCxSV*E|F@23*wicteZ1CC>k8$Y25)F$d-8H*i;;(D zcTo}jbAnucb03*oJ>6d!%_yU0SuOGxy1W;&kV! z#O0}6L&sQ+K)}Jl2hb~X*V;`3PmKQTBN^|4rtuh+tHldWYT0Ut!Mlt+qtAXixcoGK z|1;n?!5MJ3q@Ik79=faMc)V#%C?Co+F7wKYS_nVV((m(8L$nBygPE;>*)V0x*47D_ zoWbQI%J6phxQ{Q)vuWP<*b7yN?5f+J#NG`b0Zrv+t{nTE_~m}+*V(Y&td!K>ve~*_ zU-izM<8@0Gn(d3X!+_-tpqld`Jc+)GT* zy<8xzBz0Nrtl^vtuX5d6B5mIs{(!g9Waj%_v=`S0NA|K4k_$3Lq;ybfbTtPIM5M1RRL>RRu`r90}xfNNZ20w?+h5MQ9`qr;D zVGtGhSw;fYdmD}JD-J!dYmI5rTGRV@cX;%O-lYXWJohXmd>zP%5@Q*+*56_i331y& z=!1bDBtJ-$BFqDEvpAq^l3o7f|BoP?2Iz&ta{kkR_m30|YOfx4T@d0|nSpAg83-~_jcmZelSPkM>`(S!hm(2qhrCuR3S_r6vTD6~m?z4@D z2v~t(r>(aw#dL)~NI(eyNKx1mN5o8ASPup z8gXzB$KfIoCeNYMGhA@xZLVVJ256fqxY18l@N?H#r_X)Z7ChMoz#3Vrf~Y)r+Z@F* z$JqV%3@Cg?vCa!s=y}TPCX+9ZtFk@hYP*7wXSsUcecT zzok&hYb5Yw-%vrJkkEDK@ArM=UVas~cD6-Lc=3aaBU3QocJl3jy2^tIfYiuZ*5kGL z8-q~u*I#_cqz*t4UXbO@fP9D|ZP&w!Qmx_Tq>_sSvP48rE-)iXu;nUAdg^xs*1iSB^%)CpWwAbQSLG%u z{}trQPkY2hBr{2yL(=*P_S+H=lxuZ&0)LP+7@CI~fM={OT7WfSUq#@7vy@r4q+~I@ z*Kn5~HPuAQ*H&;6>t8&=Zx+~|@J*~I*t%oD^kddLvAacGyU5`Mi)G?D<4UV%1@Z%_ z9}qE1KyF9}O;wQg#QXu|@qJkL9(=I}AgVrKNzZ`gb$mB8ZgB-2Vcv-o+Vqj(2bAN> zttim>sh46$U(@V`!^UzzJM^Kvh=IcKf|pL@2B=#exy^bk#Rn7;9uI$zpq=k^5H8dJ z1kYCLhB~#4z@-2@c{b#UZjo@I{7lJgvcg1M__?zezert!E!kcLF&RA}B zxaQTw0ho4HA(+o3v5yh{DPJilvF=%utCeT=&{U(;dCG~%W=y&qU zYaqJ8-Qhx>cIn z#GjM)E#)np-U};@;6=E%opX3(+Il&8Tu%{DT*$0nM-o+Yr<`6mD{2Mo955qng@F@= zYYoZ>`t?cVhJLL2Is0;1uW!FQfJ4_T>RT-G3PNm}4g7$N?e!2I7;X@J9M!T-`?3D2 zJ-fMCIz#qNwIXovu7@~S1ITga8IXsa3K~?`AbuNqG0r>DY8Er2(-~@ZE?-lz!_|#u z%15ft1xjw%nGX(SAFQm>4cjyCESfQ^kQTN0CR7v%?j`cs&e3J!@u(*Vs; zm&96C?I>n>CeZlX;f9GAYsVaefvJ?&@TePA*HTN6J4)A`mNH1s=60 z{rQYxm76$-U+yOlHUpehmTY-TT|$uEuJ3m`F3#A~`DJfEk=c&hs*DYGCE-F0YL5u8 zg<=&9e@|3G_mb6wVhj*`yv5kThpwRQ!-&qNSM#hN37eN-^8u^%KO@Rvq> z7|M}r7lDiRqQ=@ht`KZ>xy;Y)J0T62BUY7et{bx$FOKBhC0fsh92V2;vBvJ8jfOJh zccR+Cn-3)Lx3~0556F2D-PZd=Qrv1r9t=t)m%o5xtGfc<6*^<$-L-1+wLxo74V)}F zB2!$lqCBGt?nEePK!s*Jau+&+hE>}WrRTQGM#y8}7v(hTPIIKZF;fD_4B2x>M%_X$ zDZiV8zUG_%57OQ%sHyjjA5>9M6j700qEalhFG_E*(S-;|FHsQ?0z{NfilTtjC%wFyVGh8u~aL)Ta&!_U>7^-a!QHd!g80xi~ zN11DZQcE)6ePaD>&ZV$t5Ff*&Cx9fo5??DfwgstjH=JF4>i4Sn35J0*&9K%|0y|Qn zOLlMFg&U>;2D|-;BZ8WO9?NVheP-m8_kcuCdqg}spT&x=b)nz)*IIq3F&6T3FVW~b zhT&uMrt5e_%j&7qpSgOip3bk5QHr0LJHPQ!dh&gVKS(`{57Q-Z_spgyd(R_%WYJATQ@aW#6Vd~;7^6zrR!15vM##B zHq3K+px)mtw|FK;a_%w;EYy z9*(i;ZBCNb{iJbuxMXr`dm-$p{(Y(FU(dl~B3@Qxlopj7Bb!zF$J1l-gU1)|GY8Fl zxv~4te*$mJyyNH}67<01?S?hD73J@`*&@s5^N?A;IBQS(uj#Rj3G z+)(>P%avkuV^+O~V@IA%-3$x&dt+#9I>O_~t~w7j1RQ2R)-Js|GA@Sa2b_HPU(5}? zYgXTc>96&F7^A3B$Jh$@Hi8J^s<3pB?71aQh;@i%~+ zhTTQv68P>&)J3mf>724fT?M&quai#wo>Woa%Ff;?uL;0&8AO$Q&wH9&IW`5BDr$h3 z5&KXgG(*#xh|Mk2xHijIe&RLRlv;DG0)8zk25&uw$}s=XCG$%zFABpV>n}BevE&AHk@PuFGX_kjEbAvNWW+ z;)RvOST9hfkx)FwK+%gIeHeC_L+o50@V0FC>jFB?w&B%+yFcH5ABXI>lJdZHB%tv2 z-?8%In8F+K6K<;8Jp!`$>&{2d%{kpl4Jvq62BQ+=?gn(<_8fBUlz`!4V}Hs|_MlrLJNT|IAjk zFKf?XLht`~)IO&H7l({*A(7hv+>n-v@1<#8P>yLfPuzT79JGy+GAjald)~I7F7`Xw z!UbuzCiUTDcVDI7|4ZRo)M*CvE%Q>MMf5G&3M_5_&;N&9Cb_i)|8A8=wT0(CV%oUuW5D6Cb`l)qx7a}d%jvigAEsKr(^~LW3WEC z6x1lbORO7fKd=jaZ0S>kkfiv1>Kg{&<-nk_@8v(6u6@QH3~(NiztJN1ANMb~q% zRr0+%yqfpyIBDa-=9kj42&?4(k zOeqkH8!q6VysU`r=lDpNVeYIt~48&3PoC$edj+ z@i2{voH6xtzJa1Q9|~o$HxZ82Izi`$KG7BB&Pnx*p0rHNB{fAqXvU2HdJ|05H)6z7-I>-J^!Er1^I zH_2q)(iVLucg(U)jnpg>)VL#)P#`^K!(AzvJd>S8(l~`ZpS15KV%b3PEJ#0e-jn(J z@c3d!t`)xS4L7sv6X29#y%;D+7-+F>tA%Cn(JOhR+p^E)J<;E*#G;lbzFLwJ%>0SE z1pPUr%>aZ=a^0w*I$@eck1o2%x-FIQr$Lg|HXA&$e!v%@_5kmQz6DVH+~k5DdtF?Ys8|Idu4d{qL2P~LRO?#}37>tTT}wtxNW>4; z7$asXqGzB^I~&HYB$%2?(bVdF9gDsW)2X(zvrkrg`Za#w|Hf}`XYb=3vy0ZpyMnaW zYXZ#K*IcuvJ{e^f$n@EHW>VuC1+A~cxmSPdQpSHvf@7Vlkq`HOP!zLLWH#k0_*Qtw z`rd<$=&&Mw4I@Zs3O%Vy!&wyDYJyzZ%a(n)kA+ zsJK?xCqb{-=yS~p^-IHYA3{XwR*qDj;z22y8?TN!P?pt++-5VqM=(g&ZF#gr4XX`Mdv9$oixFTq1$hZisR(~$be<3lC7bA=sK`~tWE*8XA3M6qe4rk zj$2UM@}6r(c4r7Fyqv^MR$ab)e=;?<8Tat$sTL>mN1g3#s_7K0+hWJMOjpziKR8Y( z_Nsn=^+=fIv!#~!mzV()d}-p~Mu@lnXVM`n%lX=>!}G{XM#N=hfq z9}U-6NKVK;ohm8w?rgnP873voGJUVuBId4Cp3#%Vfj%PX?Qi_Y-+A5SCGrr;mV14= zQZmm=zzB4&qQo=JX`&lEckC=?xCZEiV!+4@^Q_V9KoM0*gFHsQ!f^R=ZQ!T)r(8ea zZFQUL`({+0X#B=pm&n? z_$;m1AsT8z?T8pQer&_}{k6-gR3f~KCo_X_n=%y{NNDTW)6_$E?dRQ{G{QcyOT{t& zNG&oI>oi@K4RN$eO}-~lv-qM|tDi{IAj$2CppD)N=zx~GTeAJ1$`37L6-_a0pBx8G zAO4lld&x$XtXfWd@iIA66WrW&C}Z62%*ZOSB=%xRt;O~Hs64tP3!Lr*UPlP<=ra7#;`whg{( zL>!VRLD#+kx;O`@d*%{EF&nhwP{dM!N^8I*Dil37n6+VW0n*vKr`VN| z=MYN$z@mzgs7+%5Jv#S396n73|56D|p`(_r!MSmLDTqX7?A#?|7xTscxXX&k9M z-Fwj>r_963IEHcHx6b&~`p^cr`L;el0wvT_^Wmp9ZQ-BPz!CO=4`shDVwe>n!7kIg z#j3Pux-Jy)h=Mp09($}LX?%!=&*Ohzd z^}_50u@#F?OsoiOkYVFUDPGop*dGWWI05=KtIPe=B02BR4VGn;Py>8VIgPj`3Otp=i^I z_7-_3^Qiw`yk@`Ob6T}KAj9=}R>+O|3PSW}LTXieLEAmJjMYM#nq>`zCf#_uKZLc* z^3^c^)b>m$TYzPRC_;iMTl2*chyb4U81Ok;yQE5O&JFsn&MNln8h2HebH#Gx9YUDz z=b2B@K%JFQ|HVC{8%9$j`vG?I>y-`9|Ln;fCjeE+=8A*l%<~e4L9gH*If#G93NQWE zHHk^l!=pLZM5Rh66o77@oHJihn;tUD<#PBOfSUuW_YkJti1W1C^$G47QT6L(_+{&`6c_;{KWL)sqohT-7CfT9#qNhm}BMEP=kOhu%@qwm9^xe#{DmXtUaXWcaBE{^6+ zeB}w2u63?ET(lbN=}&eISn+FCXzaI)vVnJ*t>;V4t7^$0?o&_ykjB>;`t1k5ewLA$K2lwta_rgg(Z5s^$}79vR8wv5Js{cEkAZCYrC!ZXK!$Jt#+zs5TS zD&D}v{B%xJ_wVYu-tqwmPm?-92O{Ye`n!DQ>?PS9a>*r|41#k5 zC~G*-_Th0rr+#n&nDj*y$*bVfsXXM*UbstE^90nsVz7DQjD;d&WFWxzLLwE$;zhsp zW1K8IZx5X{VZdqG@RnT=Kf;}6l5bnC_C0}hRqFF~Ud~&f&9a+p6vg>5QXBaEYD!|` zewt+80L>9&DgS`0_-m6nL#3$s3GFV0-pKvXqsT20kmd?8E#h+8PWB%i`*iO8W)FM4 z_ZU|rJ@sip4#sgtQ?A@MpP*J4$r@^Q>D)VfgEaCTNMPp_DsllSzhv@mZ61D)7mIE& z=#4k*urIbOrT6l}qp=qm+Afljr}ieP4dLmwUeCF^#NE$az4PNnZ~&X)qDJ1X;(_)7 zLky@Sm3~qBM27b@BI#nfy2W`HIVL2Eu5iw%M%yfo1 zWy|P8bLIY#_Q+2YZhh%HWm1%U9x=QDrJ!+xkXs(3;)L;b54%QJ@JzSoS=*OAx- z2VQ{?i`22;)gX@npTkrM+&j49x_4DQ?LB_&b^9Hm$H`K9-+}@Rv5de91@uSOl|E0c z0p93S-OFZv2&3OEM_Zpu*jF9weP&xIPR&MmSH6bJ?hCZY#xn58_!JBw@_~F?x87IH z7&i|K;@|R4g=o`0WGmlZYwBO1pSJId)odd74uv>CCkGFp%o=QW5HSeSevoIJ$~+1z zPJ!OoLUtUbr3}0`v^x(LP$&?v;JP%jCVHjXu^jDY`-Kg(?`)SYx)8D9%`_OWs4z^C z%7x2JZ+644dV?Qakm|_O$(C^ul55`5)U&{u>4A6XkqRc-FWmo+w8z}A-lnY?>{d^x zBr*2mEf14h>u5={Lc-w;NS01B4K*nMZ&!lp4SZ+uSGwH(jA<2*Nr>2?_hQnHmGmaf z+4_hcRmEd2i_HZoAELo``WPdwo529W{+xOR$i7ga_*AlpE@ zX|i?B1@%G3|3G4`h{G_JU_q-5juDIyJDIeKQJM#Q@2$>mpmM(!qjAvzz5bE?SnhLN zO|Gi=%-Ff{veLs$zVFXXYibWcR(()WTZTOiKX>}p^L#sF2*uhp{3b(CW%_W)-jKyx zpyM*+Gij!6vFx*7m|&n=T9(c@kv#gc%y{T_D`NUkjPLvXf~K$7E|xG|X428Lr}60< z>zHGOoQ89JA-jmk=}WG@zrn$VRn6#W)oz3n*E^pr-cCXFTH+-=$Q`R@&35)FeI7F^ zm%T+CUD`imUowGZXwN%|a+FzEvy#huGyzuTDeHV)j>dc<#N&@xp@s@ALoCj7z-KT1 zz5nmnWri=!#mhw^Cq1{Z;Ma8Egmqo}bn*rh)=?oBF~!~&k8P5hi{>FoiKLFnlxR^p zH|76cF$V#O7w0u(B)iR8l^br>8&@U^!^*J_nqPw=@gA$|vsO=XQgZyInt8j2=N%f1 zOt(N&Rv-Q|`)gBsYnD>Zw>h{_w+mUUr6+ZmHb|B+=RZOd@Q8iDi-1SF$?=a=ieZh0 zhA^FpN-+`)M13SS+Y7|al7?Zg+$dp<5-KX_?BA}(3`;(SkAL5MfqrM1K?@^kohy4< z8fV!+&8lv+PAXLR1}To?2p-wx7~RWP3oW4>*!4dWoc>g_u-!v**HF)9#`c_U6yWF- z=)JMzm9z05!hwi~h-^)~}Oz7vQbMky4Z8=rIS##VO7BoDfG>U;Ix}K1|9Zv`2nNx1;wt zRKYEQ_#N_WCgK)lINF~u{oF&>6ncmnTnfr7TN8ZXi_MWLF8<8bRvanV zR5uLwI`0_kQ6Az?Vw}nJqLJ0}5C*i!+^)*?mIIS^lBj*^f=Ly4uSZ&WHUc8qGVE7i z0S-d{ke=q$w@JNmAaR6!J;eDCI-pR`e9Z|=L#PAi^yfnebI*`|gNxm*Dg@O5JDjFS(Je12 z2KxJXZmb@na09^9EdVeH3x;#is_h$aWq>RD;arV3+fSmLswsc4x%|XJ$bjTLqfFR3 z5kWPQo0SW;{_R?{-tb?lv7j91YS=6AUf+SF=8nDa^toL;Z-;YAuDUNv;8EWd9xG@j3Xaai8ik%dj-Hf2GD-mNBLG z##5nSx3pvLmXI0?X7z_Pfcme0^TgOxYo1NhYd(J#HJ z*wZ;B{AZ?r@St9<$QV!4@Ag(Dhj_+*szyL?-ixuNWFZFN0iCUBp+87-XzIp)l!02bRoY}q>}21 zy8nrDuMcS`BjQ#q4=lJZvk6&`U2oPrE7DN0M~+}!rk!in;;yTro=+q=c5Rz(uPtU* zoky)TVZBu~PZY006&(=QsG&)C+cDCQX@#98&yAs{eA|s|GUc<@`gX-n^!AD$9*<}o z#N9NDh(hc_h{Z{bF->!;gHdbxoenk?;{)wSADTcs2*}(9kL)h-G$}~%?tO8Rb-3kk zV}8(AEc*5REnDyy^wJi1=#>L#ploX0B*7hZ_G0g{gC_l!@~3hmgYEu#{d&RE>9u?^ zmoBw>30z&+Z$Aa8$t}jg7UZe8k1Yn@ukqGsLf4Sar_JS`ikZHRMN1qBgBW5C8bhiL z)R44q(FYm*3crjs(=1+qZzBnflgDv}(e}l4M&4mkk`>-$4z6;94tOhh^Hs6>x;d(A zaaSLuo4)53)BznL9DZp$>H9P5Nn6RI4{jvE5OdLY7vd!V|4TnllYbY>z^{4wXcY~} zjM{2zW;Fo4k%ao}7So#uL?SubTfqU4DinHTsOkkev4>%OjR9IPip+8W@$mnrscoj= z8oGUvLT5F_0@tQO{1o~B(}z%f6SyCW0Yg;3NtD*DK-MSV@BBaho&UFc;s5+TgI=mb zPXLZ=g88)NiRo6iCMa71ICtDPo_2GU7z!Shav0 zZuM!!v?Uuwj-`(RURis9aoEkbS;x?U=dl&ksVhyKy0$9UBZ-Sh`N?-Ack-8O4cNS^0GNp@4yfZ??f3 zb7|Z^bSTgU_GcqebfD!lgx=|PZu-+I%yW23i4w98KvsUDdc@MB#Cv)GId!vGFQa&! z1KJXdg6Yy+4{~&Wda;C)w*!kw(3Wi|%^ZAAOWd~Z$hq8r0!Ltm_bw{oq)=8CI;#j*DsEn$Y5AE#E5982fHM>>1a z*!)~t`mDb-13C+vygjMK%EtYyt63F+x<{I0}j1Q&O2B!GM4p^kYBWS1i%=-9jI6;?sR$>Ue=`_O7VqCdhPhnO?di?N2Pc;NeV`KGOh4K!qHJsoXPe6Hqm)Ybxa8HT!%N`vkPP|#667@iEHAQ;Rs zmzhTXYk9kC@!OZ0n#usj9yi$5$Qt>#9L_&uZdy2h!saX&bGMM#S*D>l(^cyMYgvM@ z*H0wWgnd15;2jd}#$yt~+`aC~E-q`nIeWcE1z{wLx!u&DMU@M2aeVJFx1s1_wy|{Q zJ%$FFcL*^Usp>sYOzHC`YL6!|SB0;v%sPRlO>7$~*Oxm15H>LEZ}WVuxl;*M5kIKX z)>fT-T{lPR%Mn>5fDUFn>U$~28zM3Yo4S>r3TbH50OZw*yS|CV{+qUFttK=Ft)VYa z-|@}+CiL7bL&;8HZRaP(^kfr{ycR7`|mOPB9+{)pEcq;xoYDlt_HG zRJ^6TMVNLbaxZu|YEvO$FI;`l=Z#!BM4jUdVu15)tAwA)=_C?3{48g!ZP*v&ml6zU znR7uQ$O2btW@BP3GtI+UQ|oQ>!@QRLLQTxK9~QlJwZ<2A8HOgDCYquCuzLdB}9TkJ!t=#1!NB-W)dgeVutdU{rN= z*Cg@zA=y3yVMtXD#n0Jt!t4%pkES&yY%y7;B+PI7=p_t0eFpJXq`{a#3w!fwi+Mg${0W{>8dYkR>s-GqD zn0k$5ca{}!T9AOn&|^>Qwt3J{P=tYgM8j9&C>rR1+1 z1gsFAd`2H@|97nC=YF-3hvv|~W1b`9qt0LbmeA^P6A0%&dwBwT;0rWm%Wur3m&w2i z*N{~?zf%~#L<=fBEjN>L&Jup;>Nxl92_~a`1xy|5973_v26W6MfLN)y!Hs#)x;y`G z^qI_fE|$98Bz~`j&*-H@LrI#}sVK~U9J+;)s%SI&@|EXf4T4Z_>_)`08*l!hZ3on?$F4Je9RvI{rA@AR3$(Y@%1|mG~Tc5J< za510g64w%`5zC*n8of6i?xYKjcUw)|_LyQ_EYqBt>zUecgre_EA<**`ee^Y$_4Fn> z4X3>61`eJMWR$DwXAYKIjuBFR%`YY!z63t3m5y6^^3L7kUiAIV>K$F?o|p<#YOt`* z=|xuS=}hs*Dlbc^+8aMaN2(%0c-Q8)od1qp_i$TD>vL1HC^)*vwQMwJhf=W}8i8HN zrWms#mSc-bUeM8R1y%ljZs`V8V84h*Ja8-jFYb=eGJtd6_T66m@){K$fbD=44MDW`|c%>IaO7z?hO6epf-)HKvd_@4N8nhgZM`^nyGCyZ+lbI zc_59M-{NQ-m=@3#t!vOn7YckE1k2eDN`QE|gkGTAQ}iqc>!f9(H4WWrvw^|p_&WsE zYP+A^*saR4Ala9SKO@W-snMH{x(jt=p1AQQR8NQv|J|I67Gi!*A=+T`X20%7Gh&H{ zD7K@iYs~?Oal+n?I+AZL=Y7tUYB?n)sc>E0kJHv-*yb@I5B80Wb-GUY%Yd;*$ev^= zMbKN^gx_`MOfY+-XiGq-O=s#%GTo5T~P-et#~+#4;P#mz84@pUNbogxd7hEI?>=L$W~l0%%W!t@`IYz*bcAmM))j8aCxhJMYpPUtGALCvsdxlZ)p8 z=%O`j4EF^l7dBJT1KbK1U6qD^qlhjK&^CEax4plb4;9z!Za;`|n>8W01bOW2xcqO} z(I!U8cq~QCmTx$B)QxkR9;bKt{W~`LdDltfl2x*Dlyt>9Z2E%#RDWleRmI~;o5>9& zvivT03p6-v$E>mVRNQPVwByPc8lxfda?+?FB~P&mjQNc=_)YZ-`HLU&C=wjE8@BRz zzuO(Kl|4BwA0YE(V|#hf!kcn2_UWYIet$P|3r>5p=7{56yY0J$AD|}yX_utDY5xy; z6!QUH%0a8CLpu&kiP1e@6JJ0VyDGysGqR7D!p0miYe%i8X!rczY_2@_+#AP*{!z}M zf>vI`x)kh#=9ChKk9gxeidM%8Ul(rM8t*UM@tzjux*V%X5cZa=th*1J>%DAS=pZ&! zXDiO%QW@v(8av8N`8&Ybyl&fd)-WjuX4tdQ1#AsAjIA|CVRK`PRR=8#1MuNp27bj) zZ7V|YjEEzdB{t$(+A;8$QvWT@ulHWz_%zl&qcnpe+;H=ok?Ty?hVjE+Lv?<+9ba|{ zrpDk?45`pRa7h}vKy&lMoW0nisbdLk|Be|pgbK(rH&zzWGtyE;CCuCk{ymA3`T+&i z)GEVIiVubF1a00NXQ$Kh5=842SzW;1xj+nFWGp=Oy5bC6Lr!o;3YEOAy$&6{p;zl# zq-bREs>o^aMgNP7_b=@pKZpfivSZ=t;%mIpP@Q4-Ez9D>ATHoAF`{{4X=jQbm(Yj> z_PI`!Pqk6I9?G_5)xCe%PYx(T#<+DU>nDeIHQ(x?!Zt3p=(5zWfwSL`c#>~fYr*Aw zSh|mYj>4kH2-rH}-KGj~2uk74_0fQAH?E7|b(yY7UBcjB-(N?qG)q;V>kqv|%9Ro< zE#1e(KK$)FEvv317X0YwGi(KP5jBVs#^}bEpYhx?GXHw0wB^16h*AQ24|EAPa3Z=` z8hTDL9QnU~Z~MEp!|)NsrM$U~WfNO}AVV@3zr?t=uOq0SZFX0mS8Y$51Hfzu0G3+NNf~XKLcd-K4!+ zvN9S*u=(*8Q=9&CXWzG}9afG)dx{h@O?_*Lz*e_!IrTWV^ShiA-2C6M4RBTZY${m0 zv`^u%NU^~F_O_LI)0!Z1uS!+q(Vuo665VPkYk1FUOUy{KBCqaIZOfN2SHX()*)Saw zh08#z$pu%1>66pwCaEm98u+_CJqjdcNJa7>u2Mna>WSs!gzajB3SeVm40A+tLF(#;pcs>LIgXMgh}1 zPb_!w-ZWlm?~34qUnDC#7rLP~xs-8Wwa9^^@EA5F594OeI`!`X9=FKG%JKG+szHUX zX=fAZg|)`~fdvH&A7Te5+%CMft0qG0)LN(C)t&M0ur zsseAk?Yvrw^iXJr4DnoU0JeZ z#oQJPQ5gz2;tv3L2@S$rQ&=Yo0m*lRs}HBBuZ0I}h7}+9&Z)?|HqzyV<~<)BCX79D z-?J|m(=+|C8ZG7%vTXTvq5+z}#OVERNK0_V!H8z9@su$b{i}FJ(IX zO5qRWajhY6n76%9>yi8Z(Q<3jtToO4wuniK7iP1hY6+5atSH37fUhtc4&Jt#jtNu8 z@kyC@5fcw{ilFCnqFaEg|oEJm|1=3mD@m6Am&ws{^uIs;jl7i52*x zTZaRc;$`Cs+?1_9kKT*TB}B3U+ZPhFdm&iy-P7^XMjSDJ@|ku$c% z9OK;itD>r9gE`uKHlY|F&cAQtSFBIZm;0Us9(;QQsE)!)X8%;jo)5c|_q!$sQxNU| zdARWP#(Rzto>C5`E>;Q+JBWXwzpaE6DXT%%p| zK3^g$VMZV4rmUpD^FZjoZ=>&pcdD={D!TTRFJvu_%(`P(0O zY`$KZ_aX11-s+a2o7Cq;ZeW4pasIK4T{XW?GYybN$tTkxAWD*drs2{&lq!oA1nMzi zO|UztJNM9S)kf#q1nY;{!+bn+Zm~}iM8zv>*j7)CiLnIm=e96u@)Y-A8B=w`UtGW8 z_*izv`MW9rT?C7sUF+DcYHgTAi^6}eo3CoWy`?Y91Ydx*PeCJVLesA##QBH)8q2z% z)Nwj1QAv8fFX5i$4o{pbd=9T>kD^cWYNQi-2z?>pb!oVy=kl5PV4_ngRX)PaohopmUm}36YuY*J?b|HKD`bYHWb6ujkA9^tMJqBe9ppd#)HkwD=@sSr8+_; z^?eLm_vY(h?pNC*4jzAbXB|f%)hF^Na#Hd~I(WZIBa~gQ&xVj<;^(jKzxvuPzhSK< zz#5>q*+2Y&&?5x!i`00gI4NbGg+umi`LxnuISN09_G(sxSW>B^E!9O+{c%IBS`^n8 zaMemVNYG!v$x6C{iT-8z@C#4F4>|X@hwW8IAw@$7<-+y-oTah(M(ic`+O7cSFOQKQ zNcC^`*eJzksEXvMJvrcCtM8-$^O9_ZbxTe)@$NF|8S|`@r@SpeY(A?B&)gAb*^F*4 z!uf2it=J}?s^u|X6@=l0^au4DHo&dhT~waidlLTn9@(RcG}%`ycKbYZ6KGZ+vky=W zieUB$0MrNBh~~VEMgr^oRWYi8Er!Qm7WTv)b!+{C(B}A>FLsP=wN%d#Rk3&nAx32n zL*(_u;boeH+b<*=9B2Lhl7u$r@#~~45f^P}ku9jJeIKlj-vDg9TXAEm?BR=)2UN(1<(JK8d6yocJvz4yGFL1r zJs8M&bm+=+56i|Hd_LFSxgfZ37MD?)CD>fyty!!IZcD4Xiggb1OV#a0x7nn9zbV6k z7~Va`ctp$0ZrqQP3&*?nPAc1&P6lgE_PGrwQn}|qQR4FGCS>O7**_AymHV+MgDAi- zM4(J>*k8!)=3EX@&0D40$ALgQoj$Pa;%kSiJ0dswooHMavrS!QK@cbufqa14=eP zU)P8dj&`Q{M$SvU;p)|(JY0xlF|6Ug$(MZcT?dSq+5QQ>Nve9~;WH(q$`Mmb(qIJc z`9Ag%O#f}p{RM;Xp;a+Pj|qPQAiaYen@7<3 z1#$Y%Ml1}e;r7;hJEtg+-r43H&IFkJO)^o>e=r1izR+ahE+}J?9(-W_<_QC zNU$kU8fKpJj?hidI(@&3c0$hmF9u{9uo@QL*{8dXOW5kAv!ep+D1-B$^-r4LypU{e5tmzHj@D8|IO4UkI{+Rsm zQ$sIIW;AXDoqZ9~=c=JTn|hsY1=G`bl6IvcY6FIM=AW&On{;Gb6y1=3{q^8oeYyyp z#HHSO*{EjBwfcAkBHp_5QDCoY^_ioIglRa5yUpBvmC0UK;WA&5G z4EH^_9#{7lj)x8@3V9JlOWJv7i0?@yR zGe+4A(?5kiPl;Nh4b;$DhFWL=QuucmD~WWuT}(8^l3LWlctMLRP+@gau33#|#o12$ z$C!A;wQR9lJocc+L#n5gqSf%iZE`**L0$Lu&#)}-IVlv;_& zmkwaUR80yjEUjP_QR6O@#B~q z_XnJl?*nkcAFg&X-`qL$r-qwx#;hUMbTy%-Ru3<@uN-3I%vpZ>?#apM5wXgEqE3Fd zzSyrrD(LYV)(shVZ2|4J3vUlsX`=N72<|_-`!aI?+eXGP7t%If^v+6EvwRx;Yn!p> zHNc!X>9h1&;ziB!9~0I+WoKj^S9HHeyyy2hd0A5%`lsPIjd%^$FBuG(NmE~kNrIp< z5{`!vt5=!Hd$!>k>nq+@nKnTwwc|;stk7t*!8bBa$1*P*&QLPkw267@W?_K~GaTMv z{$1vqyYXI541Bxe{axgc08sh0sdPa6dyKt3+Hx}*eG@GAsiH@I9%nR zvG+AAY3~|fG#-_z!ORewnWrJ(HV0XIiv{6to?9)Km?yUGfaq(b&IAfHOi@6tL1gkA zy$R)UYd73SC_sMHX~Y5Ci0Lnl4!oPV8L+XqNuofL6}{b>!1W76Y`t77>+#$M>$Y3L z!YT_CULkVH-%!(cN@ybJ1^)q8Pgxd)5V?mm+RC7W{IXc7aZZu>&c8dxs|2U7)#pRU z)I2Vz8_MW5`UM)NUnM2_sg2uWkejEqK04G;t`+{ zpMI)S;U6@(a`SKpx{O=8WtuK?!@jF?_z-p|s+vCOPng{=>~a*NnYzZDTdN6d3Ip%< zKUu)!6+b2bz;PQFuT@Hc9B7qqo$Q<%_=Dxu!oF@3`;Z#c>ed(M19Q3AxDz|v!j;S1 z`FHI0i~{<-B}IEcteE)%{7x`qz@Px?H!E$CNNj*d>s8!TstN*YQ)9 zi3p3ZY|^2$LNmi9>xpKOg%COvt@GHDjiItl(8bIQdOTD9b7!>j}z z&@AGg8rZ=^elMgH;9&a`*SrZpABW!@tUzc(w`EKxB;c9#hH*IVdz)UP{W8o$lXUe+ za7m#Gp(r;ljS};(wP%q_5}n$xUis={z1SMcr$Js*^$WTWEr6&M+xE>gd+3~2$Xt+T zPM7syRtaW>SHPF`;-?Q>*x#V7Q%aAa{yf7?sYs%HSVIbVs->h;P3c0Lk0wXS)rh9p zJX4LlNwm++@ufYqU$S!UtkI4loT@CvNWg}VyQ)Dyh*?0+A*AZae$4Uc2Y+#3FW1OMY;#y?q6m}Ebw?ZI3c`m7QcTu5Jtnt{O$B#5 zvQHd;5Y)9Luc1#tw~~`FV-@2iRBnhvf{d>>8Ke#U47@ZC@>+i|RQtDHDD$cvG;3Ok zLF>&%C+J$)a3^r`C$jeKiPE-LLmTI8$Y8f4hM zR_e`hnTm2g?g>3jjiL8nxeo4W?F|^Dskz=RNYET0%oGu~p-H`_IUF)I@MQaB;vBHp zY&%vCGhFcgGS>v5F~H%)-wP}xI{i5%+;+d!J%Zi>+UYxUNqB$EoW)O>f5$4yIU&ts zVn6Ppn^YI=9f-Ldh&J$~DeU)Ogw)(8M9!Z&iCq7{jnmTv*GjS%aJg+M4E%7X<9&VS z=Si{Q=i~G*f>3Mf{MlgI4(p}K?JManeCL3w)egp2Z~A7d;Lr^p33asZ24bst@}=+8 zX{ui{{Cq-LNt))lCvi;lehVD#BZel3|KL?UxLyP0Lp-efvw^?b-{(^gH|bp6SLzY! z)|~wIx}?~&t-*^NhKH52L zhyEzTR2SZCGbzKUk5L zRG)JtWM0r1GWGLZPbQ-yr_U=4apo`-48JgB(8BjnE=ahU%*SPhYCYd}cIl=gME?6V zhMo*`N4fTIAaZ)cXePz?<^9}=FQXpu;9Vt~9VBcnEDR1r8p@qsQDcP{5+#=nwp-p{ zk6;{$x}Z?xnmqlHX3d8Te^B!n`rv+=GQ8u2U>u9juZ0-^{`0J&T=T@}s`6z}^Ub*m z-1~i5YBt=O=xNV2vm5?`yqzV3|-{3rHb;HWX3gyz@yGAI!}I0;wK=Zug%tUa4(WSg=Hg=FVTSTV<@8v~}5ZD(?OOWIu+GZWAx8$QWL|w%?<)yl(dr#EUS`b?AcO8G4cTUv8bs zvb>3&4Dz|k0?JrGCuuRmlbaco;Y>6~bes3U=w~jv>)GB}d);6J+dLPKO~rnsU(Se) zw;$d6H9zzF{ZMH7v7sQ2D;bMsaYNd2z2C5Khpey9dsoKMiMQ$$C5QPXh%LSR{_(w5 z(Ok*e?AtlrGuA#_DfDxsF>Ky&aryv&#ZCX3?nwguDv&xW2%N!6A2N(5sw=AQ z+yxc1XOR`>Fvn?zdOpc=ppo_5x(?40jXTPv?cq0j2cI@Q&6c$>gR>^l-=A8}Jsr|( z+k2MCtRvo11^=1=W^_1=8@2P0(iX!G-PqzzLW8djCn<-N3keGq^U-cR7EQK~TZ10m z^~%=*4Qg?nyK@v1Hm97bbre4Qlp$b%`eD&YYo0a7S0x(Ld;+|9UzfUBqHN@k%A>%y z^9)LT%UOUrE&1`cw)kRyBp~rSls1W1K(Un^j<=}|XU7mn1n*dVIGnN2B!_DpuR9F9 zP>ug@RkEz$6H#yAh>Xsl{aXJfDnPq|GfBD72k&X(9c-}t|1 zd(Wt*y8qkP$A*fCg7gxVst71jq(($QK!~995|s{yNDnolA|NG7RS+W5yVMATgpPnn zClZ8^(0f7+kmNbr-}#?*&mH5wy009AF(f3}d+oL6_cP~QOO<&ThO_1aN0;k?Pcp;@ zXFWfKdY6(Mq~w*+sr`)8SHLCaUEF$K$K)K|nK5kc8F5-<3>j*b%TSF!@1~V_;ADZt zFQn(_ONb3?3Dfm-7l(|VSE0Io`l8FMDxq)FcjpGN zJQS3`s$o$O(CT8mhw8PT`8l`o8gu8nXCyao$?k=!wJvcbln)lA?MEngC?9B5B zoJ$U}+{1VG;Tg$enO7&$73Pru+!VnB093*Oc*@2Fk0)&<47=e=CRq-NQevX$`Zve+ z2`afu%DxVmU5mz2&%m~M1$R-lf?t*0JTlhKuDwKKlbk0Y%%< zA1PgoqYZ12R*dMyq2uX5Gebta*aK9gvDO%|h3!!KXrJvkoa%$XnRqPT=KxvWSzz^rU3x%Mp(u_3yN&L&h9j8Srn@dX>KI|}?z$olZ>g#60 zKN~x?;ET%(<-c(w_1jj@&gQUAByHG)1)Dl76KbAqYVPvyM7cpGIgVE(o2IRSWq%E2vCP{ue zn{TJ|noG)o9MoEdIfnlOgUIHLZUyU3Y&4??ckX6? zULYPu|4@!B8m@t!z59ndNPX&;X6u*7{Uhl^`I1$6e6k{>_Mx>w@t;fNCACD;QFm(o z;ZC6b$7Sd-|Gv8vjz>n+{MTP0p7pHWLQ;2OEkRg3Vx(Z^1Ih+N_Jql;7=~necFB;f z*Zy?)=lv$yEGbOfSIfV1y=~z2k0pmFKx%3;8?C3Y#VeKF6a8jY_4}@VwpAxZtISRX z#gLf>A8i7b_MifHoOGoR&E{7>f)KPKT*=tzcj?qFefsX8YyETifwc;~XJY}z;rezL z)MQ$<5|Bm~l26&;;>U?VABYcci-b@(euHUL+8=w#uqhWS+PX~X5SjKK)jJRh0xpY_ z6BptMi7_D2ueUEA35zv{u~FumW+D@Z+p@S1m3FGWPKkIdJu$L${vm2JuLsqs^#y9A zBIp_IAeCswQ}2t$-IY|vv!R=-&g!{O>3~sBimlhCmtp3E^cmjJ*Oe{`8Sw|+sVOvs zu!3& zwW#lZ)&T#N&36AehBCE3(rK$^QglkabavA4Y<1GIQz&M|Udadh>|KoxK?`BgaODs%!dm+8ZPXu92#r08A7Cwt~nN9fI2}eYA z2NC&NF%>@|^36*B>;<_cFL7|dZ36vc7HKGlmKIkU%9mf(_d78p2HjIz_0lVS`O)Bo z5cGfr-y5t@SV)QdyHM*Ym|qcZ>it0Nd_80Y(4)nY+8FR2DKOE%ja9VK@sQE&oiiyQ zmFfaoI6GncUN|2$A-iJbJiVafN6teNzcq2?#hN+IewUAXUXyHw8%;}YrNxg^ECV;~ zFXX9tq*v|Z7LHND=qM^?uc89QGByID?ZL-X>Fx!Xy>d7w%ec>e2L>Ek5k<=YeQ{m> zunDZY4^)-fMLqnRu=oF+80h~Lr@|6IE?u8%4`m+%ADF2({Ew`?|4F+37x8-R6+IG& zkVtFWI6Pwn92|!%L+tq4<@begAmnc|C{r@AkvN;fFvOJrQG|Lo=y@v#mw*O<(%=7Y zvgHBfeV7R80Wpc=ACiOx?N)uH5$MW%wrWSlo5~%iNmFpFmJ%Oq(*=SeGIc zH!j-9EnR+eSn~bgG+C-RYo<7hBf$R`f&j#PD^$Hvtg>1Th@bzxY{4*sNfm0gEIy{{ zPkWYU-tW=sDW3$v=^})_piO`-?d7@#82e45%hMs8X2C~PCBGYkbnx^~MH4G5@wyPrbCZlo z`MV!|x@tR`*GZEmu{&i~&CDEza%wDRX0gDfpq63>73Tru4NjH0{rF+U5YjthB(Ao? zw%e5)Eo_v(SbbKh8x%m94lhQ>>@6{LsiZcoYe{`0H6iX^!LfxS9k;c<-fIi59U3&T zirJE4>q#A3V)}>a3Tvp^q|d`cc32Ifz@X(6CT)M6k&UG-H6K@V2o)QCnLQ^xYwP z$*~8=NpI92tKAS3vcJolxM8uC{Yr0h-f7cOU~__FG3Hm|x!EvtJn>$8%waTcB~F2LvJOnPT~XugBP02sxHjW~30d7={5Dd)>k0TQp=8g2whHA?N50 zjyj7&0MUb-QJ@H=3gEHuPL!W^9ni9BMza&>!a_3GmfKocwl5*mS5@A{!Nc7u4CKd1 z5e844Wo%QSxA8;sJoDKzu~rY%NPi_L8({9_9(ehjW@u5D)Ndu9z{F75^jT#LIRk&v zAt^F3MD=;l4%gj)aMcRm{BFS?ks>-JXL+8!_q_dVB)kvbO8obT5}i*&CXI-vCnACC zgZpOoB=b=$ZT1{*Ht~9+FB%osRH2cad~WOCH=z;nr$4w^WCRc zVt3=HDZpwFxt*?cd_juEXKVtXlAHSGzqY z3#=$2)$R?XhUeL5xO*>~J=E4}DT$*={2deubv2Fpt@)DI>;8xst-`o|go(UY^pOW_ z0Yl}Ag=67NFykK*06>jatlX8bDC1cN=)hwDme^ebIT+ames8)Swy~zqtpA|{aAY$8 zZ*zHQB3px1-T~F$P@~!r$4)llm9v)2;gN8jtMUYS9 zk6t)`-W%{Q$c?d@`O86}5DFmzzY#DKBGxX`y_rJxZ~q1bKhg8wdPQfrI(tl{E_b=+c^ZxfQ(t@=fRM_iEZj%lEFV z`kuHuyOIsP$QUTMzpNm|SiS86X+kCvWs`6+x*NpUHnJA6Mit!NEz-CZHZ4%gi9wXu z|2;0fkN=62(eZ%w=RUlBp@PBYy;r>ZJKj0QFkrEqG`))RoRxQs8oC`o4zJLqm*A83 zvy_TL^F9hc9 zhxc4C*B?Q3;$Du1OCccb0~xlOf?J)%T8~7sQeC6N9PV(7?w4OXBx{vXuB>I#O(vRO z=_F~S*F=6q5>EM`Bb2Z=Kg`uomO_V^7jZME$r2%*h6UaDEocxr-yP#~!v0F@{(v8x zgFb!#=4v8nsMNzjYJ_q56re-{JKZazNVpqmUIzcXQ`bFJAB)82lTB^}R8L70dCqq;Ctjb(YY}g0?GDAkRj)(WN#1d6) z>_yT#py9)(|O#Ug%C-ZbdTBu&06&dRG0RK#Q@}@K6XO zQqUn*{cH^tdLl)?xU;J3^(usHbvE>J{&>7p$H7AU(~syZ-ulS3mfd(82tkP;Y;Kcb z43-8qjHkf^bE$&KDz6>jy7UfVuN(@;wO{H*T_hv{1 zvI$!Zhnh8FM|p{)j=-@7>lKxlD=5?F$nTY4aJ(B3JZEz*D)+ZLi=SqaquDUk;!`)g zbY535`9(`_a$F+k?ZuGys^uGPQ{psIybwHXI?ROku*xHzyZ)e*3#XN0F2&zs@^Ba$ zv$EoTFa1SwK3QO;pta_3M(p)>s!0k3rYkp1>wmqYFLDWAt7e87eRjs6%pG@bC8=dz zdm4Jy^JYSl+d=6P`fxckJf(yJdCj^#D%Wd=a0LS5T%_jhkDj{ZyY2oy{cSH96t(@) zS_v7&x|wLfuP|Y!?Tz{plN|oGo>f!-DBgBoCOk1>aL4uMNRe$HY3Gp!p*?jF;o@wx z#s{R&pYq)ejI}*?CDfmNu9ERF%tKb5Vo~9F+yfX{Ddmz@f4asuHM>7LwpF1EJnzG+G|f#m@r^Jd=NkldG#XX z-Fl?eXibpNmIt&LxE;C^t`7NC$oXNDP>aB{_Gm&H&61*+^n941gA4&t8*OtJXy(>e z{rf=(ij!My(4`dqr|A&Rc7uccZ-r&Hn(&S+NjyvvdYrCqIjZqNVZV-AT2-#<p3eN}+*&5gkCcA0PSmNXfuLDpBGo*p9-Rk7Q3d-=5??#iLAGvS!9^ai&*W-s zV`J@ooXo%D;Jh|b86s?nzn`D$F(_}H4D~K^&*!i-rx_D(%q|g~^L~J+N@#C}Ogv3D zA+qqkIHq!7du4S~R&HH38|m_HUZRU}B5lQ@<`WKNdlyfAPj{K`BqQgx+8PQQQOmva zm8dPHUbAaPnL1EvK42{ihkB11f5%yBYT7{GAi>zNZ`99PqMr)VJ43o5Q%Ej^#rgxL zw+!lsnMdFh$TleMK>zi*PYdx1B=ieuWl1JsBxoU8R^Nn+;Ej zWy~fVYaxy$!O4?qH(dt)Tt6e~DKE%wAW+$T^M22HAfQ^tXMM(iaQXZcCLYdR^K7@* zJTCmKRXUN1j0&~ne7P;tKEeP678p2l>z-?L#PpiwoUspj?$=!kI`6rgF2h~42xi}T z%`nRS$psm4RrI}<0+lS^C_x^Wzx}#2Ye=0DVxDff4)LeS4W^s8;Io-D{OEC*HDVKlo{wA$!pI+)9OikIazbU)&5;9rQcpguYM57c}$x&hbU|Gfs$0ZI`P&9F#Bj zN05hr)xO=alz7`x>+Q7O<}80Gk{FFk#((!KeuA3PavEv8xc#9 zZzo4`OS67--p00dJS#N`BV6W2BMsVB8Xupm0EtnJ-LCquVgj8K_4?I7!3-OBe7= z1_x;v2rMxx@+WLuS;y1Z0bl&PVVZ6f3Ts_}Y$uU1QT{?NvG@04=j9K}uovi>p?i7s zfUS0JrB&AXG5Kt-fW<-n^PJD`P|_?0X1>Ay(0!pb+G89K6=U|lwtI-4Jo9yS4giRf zbM`M*R=)^k^nt-!v5a54gw82R>h&aBC?fx`&6=VvRf95(0FR&D`mwC_k|0|^uWlp0 ztuMyMZ76Bls2B0{jlKJzm-zmrQ%+}`+CQCYewq+XXNvUpTLs&3$&>&!&9aeVX zL1&s2o%U+)p&BYqUNIN@!71H9v2bI|?4tB_frghyrbSN#YJjgKkNAgKpr~fZ*xRXM znVoVrKo4z4NBF5Vta?5}g?10`L?yjkuPsSx7EJw~nh@3+6C<`*TOzeeD0d)9jHR7G zy|z>xKbNODjOqiH_^y)o1Vs=VBH@+>np?4nX+k>7ZUKhMYAgNlJZ{wtctZt^i)>PP?$*gHHf`1(|@jB z;PJO6b+D9uXOV}40<8-Lh*0W}sOtqq6W_=%4NY!I&Add_QwI@tuIR9WPqMN*#yTzU zEBDDGQ1TmaJ+`BKD)i`YWYfzO~#Y#Yafd}0;}=F&9FxUYjQYI`Z`S++`jS#df=SX z9JOV};d>C(3_8F3qdSZyW6>Dj&?W8z0Ef7H&EY9)ewJNbAP3Niv|LA){+Q7*JtPl_ z*(ATE<&FHkE+8aJDFE+rg8F}Ue+B4SGo*jZsLc}D+5&=JPNx|O4h#{^ypp}WQ3o3{ZC&T0=@725Eo)pkSB-s9{!X2fSv|wh3CkHT`0AX%U zD9=PA0Z`N_mJBXNZuZ){_6es3X44o ztTa`KQE23}|29$hcWKD|MlYkKw(@xAdIXNGFf47Zwtyb%cFnt|r>$wzj&jMm4sA~= z_;xPAoZ|7D;vbpU>?)Jc4POb#bqug}A^9>Ws2dDWCoN3F4~!esw}r)!4q4^-Sh))Mb+K|Io87KS-2+>3p>D>CMaVlh_P9dSCfU{4UIAJ!GuQ zNWH}E>w)uADFB}6^NlZYOXJ;iH6C7_h3~_}{`~oa^4^L@QKVCaxXmt;OsayXJ9Kofxl@aq6VZ` zfqj5UGmMe2VM61JV;Ca}nu)0eo}QmM*Wl-EC%KmnG5cvtTNVjT(c8)@; zg3OOBkCl6c(zVjo00hchBT>0Z=McX(SX0C6hNPG=l#piw=s*%(2K27h&vTyE-jx9D z4iGQU*OekEWzSmVWNRgcT+?@QIfIyG6zO%=*h|Of(UH)E8$LG|J<5~F{oIS|!)SJU z*>{^xSn7753~Db=x5$>#+2rZJ8zv{DI1X4~@72U~%~n0DiNBZ^d{yh9c5mXYg!y-3 ziDzBfVMWm}9opp}+=#J;nhj|1HoNv#QhV)9G~+6;t|XoTOae}nq}GO{p!pEF7?6v8 z;wjrD!BfpVa|GmhCF_dZku@(RO%7fO^h2>#+^Dh_>6muk>5vv*vN?u2bZb4$80I2#sIq|mb5FPva(KP=$8Q9r z#pu*4D6jh-hK*iUldh5yot%swE zFj>Dzv35dTu+LR!rJcRC?JG{Z(m~YjZhqb6Q>yr>Q~aQo zNY5RcbG!u?Q<&jiD~z8wpm1+Td;(K2>B`d2@o+dnQ;GR#Py~*zjbxLM|AMKFV$l-= z9)izQJ|%}i+7>N1)$jJ7g?1=g=+MsV0LgRp>`S%wlI;#2(6di55#c*?J9*p1n*A9! zCg`IVY0j(Rc12$if%j{W^?}N-N~0#BPW6Sm{;ooj>A=%s*7<;M?dn}icM^d?SoRma zRtsGfF)qhK$Sd`_HIYx@Tn=OwA8xx3tjU z@JN4OX;{uB+*beayMhu=%UkqtphTI*dR4qT#QDvg7(SM94H=k%L3yoz(!6Eca3YUY zpHIvcy+)g_iaoN0gM3+2sI zo}G<|dW`=LXt}OMo;2(x$nBE;3$5>>J8q#oNsp@Zu~BVWM~1Vio_!OdD$r>vRqde#E#r4wQ5Cs+WH(}Ex0vhJR)i!yVTRPUe(VBIDv ze{5Yme09T<3&LZKsDAJok_Mr6MLoFo)gZg_y`8 z6J70LVjiE93ao6f_5U6zFO@(Ns6{(q4#q9QniB*=uV@dnJ=5VWQrn&{mT)G5tAHH$ zDB()n^<*akG6>i?Lg%kOUG7=yxw^Y&$x34BXN*rm5e_SB#7iSJmwJxbxd*76OZXSwTYvK6D>BD`&((rbV=ulzHhBevr(kE+lpZG&S)Snm$WL(+ ztK0r|e0%VH`*2+e8leLiDc$8LV2GOmhf7)6HRS@9>Uoxb-wF`g9pj3qQ~Q-yv9x!t zu|3dI6H7V;bEovu$P>JoX+FgrvTYasyetK$0GT$xv>{rFsa+-y zTiYqq-tzYMzIcVqJRdZ7$8`MhKWnI>q|F8l>4*rFu}9=ZufMliW5fWs>9zI-x;l-oOX2iR zV@OD3(!6zU>d?u9dGVy}62QS!g^8ZW@NTW7rUuXtOB4ym!}$RypHdc^_P`AF)r^Ik zsY>PXZ&MAf!&hK|W`}W%0kyqbQA>W<)H1bZOv?6d8&OvSNm5G7nG6M0epjBlJW!qp zC?ZX`e8M;IcehN)v_-=Ywdn|qZq;Yw~PJ7davtbZ0><(ocB|%%NqL3$DiO?*r*s~=_`>*e4)38 zWL`Y91$Aj>#hP`EMu@7l{P)ORIG^ z!!_tt$h@b1n_yI{3zh)Gn&#V0bju!QxZAk9-qfu;Vn$QdU$sB^$Ao#W}PbAO!`u2Ws(1BOnzC5}z9m%NUa#aZ6oozmzmD|De}m+BeMr%{Zbs7U=xpC@wvd*q0zC{k@F z8g~oP+NPVZyhqO5oy3Qa&G;d9h=o6Ms7fZF1e@{_CxW*}7W?332SM4p2ku@M4}!E! ztL@Cn%rVmBnHAt}+|D%lF_@beSJFGwhg`A|ReEtymr{5=FpGDP0hSRC%ds2Ich+J# ztx1Dzwn=B{>J-jsSsnLq%>h~2`?GGI+$TiQ4Kd(Mo1{*yTQr4>L$xp?3xkula_Qce z>a?c(|K9O0(>tPeeQ2O)rGLT@V-4bQK10T8eYdr~XbT&m=@$yjYMen+JvYKVKQV_4 zSkEE_^q>uKxmfhILkb|zmBU{ithGD1)YH_h66Glu8arRJ4w6wr_3@6=8?7+8(-V(r z?L+o6^8q856CZ{JT;m0oc}#_O2M|&hhv@m6%QO?CNgbT>E>ZXfhloe_x+h-Hq=330r^tv?Eab$>vkcu#~KDwZBYKZN}rUN%00W6KkkIEZ(|zb zW>6o6lUTjSwhOM?Z-WXAJv1{RKRG7NP)X3a2%|LKX)mAQSdpzG8gIBn1AuG zcxDQH80w#&>^O^NC6z!GUo+p?m~4i_!h@wYT~M)%e`Z8ZcB<#oGsIJGP-z}~pz*_~ zFPR3a2k$6fX!@_c6fTT8(o8(%J@*2O<{VALm zJ;2)+Xpc(-v}S%eqFPP_MB};q6;FMj2et*!o1s%v4_*()$5pT_7kQ{4W(1Wtky}ASdlagkQP2@6a9b101=6Ti9T}#4j9}8wxQ)IgR&-+LX!JZ8N3hTM-NU!zIlBfIT zGL%M7ERHDx@p4l?k-j==^mA*b0x}zv>oiV;!mxP<`ByK z?JdF^B1igyLKhbQaXvT!GVEQhDLNd=pFfmBc3$hfKe^7DjU3j{ zrx|s<=c^i0+&F1vu;*3Fv1KQ6MVSw)^5g8Y$ez#7q`&F!YW^H*Sb^CQKi$4P! zV#0vA#c}${TgTJiCgs9rR>$T5u5^Rwdtd=)Ts?$A(C6u|?P$CkO&lHMw||&ov2HB? z!9}eT)Qw1Be&e)Qr+gN_ImCYvO!?i-F!nLYm0;K;qUZMTn`+r6-Yk*ZZCi(F`=1vW zvZ8P?(2^ly2_^~j?sj7bt{m?}Jkq7?%! z6e^Mn&$t%k?+~g>Y4Ww5+*_OZ&Km#_Z_ml>j1~A@I-CqY`*eHqWG7K~p?_1y0C*JE z+3u8r*Pzi_;*{R|!;GsQq)pP}v^7iqoeTPrY~$(4p;uz9_8I0C9$8Y3y|%q7<7MDe z&zc$CWfE73qaNb{3HVnvNbcL6d`;IZ!=})&Sp2ap`Y4UFjWpg6o(V5%Ll)^5kN@?3ZJ%@{apT600O@$~jf_?Mmlw)cOa|ueRmUVMJhy8t0pw@th^zbR@t0**ofC&<#-g z*d$MfhOTwF#rA3q)MsEd^303SZqLbO)?QI3eWH15Iq?0;!SSVh`q#dI4t35+d;O7? z8=xRg$5F_Ya)emdc?6D#$c@;Z^ytQxFcp0ND_>{FLimEck~d44s?pH*Q*bKgMT6_R z>uJ1uPN7LskIpuS{S7kwU~|{Ih{v#GI}D3Xo1+W-;wq3bOWd(%5YY1h7cJ$dk;1x^ zA6~Lvun8kV>HP$tS?(i)ig9_wxIbwgb==a_ZtG1^cc;;#uiWwXi*y%xej$cC$Fex8 z-eAq39+t*)R!en^K3}P?3C)3y&7c(h8Mv?xSsJ!R#)Luank3alK>ybNGaIzw8wp6$Ig8&;^`U5;SOGDwicA7z zV!?mN#Q*C5J@P+#fLlWE0*2XM@*Xo1AZUi%Wtz&_DSV6sC?|gPsyr*nz<$lglqCT* z2f)J|{!p_W=6O?Hp0WR)`E9Cu#wpuxmnY?1RzOX9}@- z{86US!ly6qJbQ80@jXP?Z%xC90#k2Q(>TZ~dLg^^R3x~%afr5jFPOl5%5wt-PrdR63nFuxXL4x0ZS`NlvWToK$Tso-H)F5<;~$oMx+ z(D~LX4Q;XrU&q5Bwaygx!u%UrgXdkKd$xrP4$oon50~R}Ka44wrn=ld5}af=nEZ8v z&&AL1W@1gsU^6hZC}|jA!OtLa9t~a$n~=#rgnbc>5Z_OudVtB2wEnmBu~4OxP0yR) z8enDdE-cr3VEy)ww;93RM0kuXm9EN3($0OsZ!UbjOMuW*_Vp>PB}mWEJ2WX?HO;6< zSzDHal6Cm7H_G&)leE^)(Pepf~b0eaw_ z-FF!VWczRp$w&_U-5V;ib65dqd`x3;K|~Nq@nUVMQ=nR}v|z?Wo*3~|46skYwkEN7 z9vm$6QSjUz!JC+=>PS|Hn|^kJ1~$8OPHD%4^#?Y!4goV@6cTZ76MagSp1H#79r{ z0^A2zmZzkVWTC3py&&j$NNhbft8sDD`r7n|I3t*gEDK7OlxlQ#+%B-z_umO1}l}YfxMM6f_H;G>P*|nqjT#uduodgw*WL*LcF7}Z@Rp`P%dLVY>U=UL4ZKpp&<`Laeg5}|kMz+l(156>u?Hfl znNJj?hjyT{GG@XgwQN{K@v70wP_xH{X`&qlE7rSHQq+``L_2y2#ijD=O?ACYL|NDz zdVQbkD)I-D!>^|SJfhiCrekVD2roFRUY*oTa5v8sgr^DSNHIq?_UiCA^3II%DW9V`z--qI zSbBc=T9t@L#2ez;OJt8OLaW7}tU>vaKT3kd6YWU8a<>WvZZnh3V8UrPA>gSo-H^d2vhr_hjcr?1q&_cUP;*Jk zJzY|Zr^*e@edud!$PEwN9K1Wz|LV*faB^tOVZiruk^y6w676xUa(#h5@{;;Xnq>LV zoq|zVS@NX=Ua-x;>s{Zh88hR%VZoZ`o{oXp$C=TbbY2Q%&JH+{)uN*DXP|qm+e5V( zy5Qw?;)(BKtonKjsin)gLqPF72h2|t@YNlYI|JWQ9cjd#0wXY&Mw+QgrSuXb-p#E}aN*_d z0vQ+U7c;ie=F^GV8NXHnNGeB_J`^)-Xe)Ndk?bH2_f-Y|V=%eGBReOBU86WhRO8^bh;{D#g60yIiE-1JL&Os}9EpvP4X_x#O^__-$b7 zdV%qfUh!p_81Ug~*a;RYP_)9;TQFN--3AKkdahVwwR%byF?qT*7%8svB7bclIc7%Oh5^UM!*N#~{UI$P^v ztP@tp%xNcpeabt%jV?=f#?2K$GrK!eQ%-$CKoSqBl9tx-{keTPmd(*$8YH8S6c#^B zo?;w&<(XM!3ImopV2GYH$r39N*1`^?^#GD$_jUZyw@FkT8){igRy)Gh9<6$)S6lGY zMT&s@I136UVUfJa7nafIwvGKSI{Ifa5M5P${4D=&m5zRapts=jO6Umg+gy`wd$&-n zcc$$9(w#rEq%pS)XxjJld2C+MAMG6;khUNs-`?PbY5}tGLh#A8 zjB~af;UFrmHoqSpM(whtu8{?d)?|zmOxbtge=7fIYV(83URbcM4qo@m8wb+}sA*8S#~w6z#hl>eX)C@Vw(5L# z>O%s=JE3;{dN{i~>jLbieEK?rgGAij_2~tttCBJsz0uRZZtepSCx(gyg$(1xL}Yl` z((IlFanLJg?uCU-9bxjT&3;*okTF#+O1KZyak3GICGMF)+)w5${5YxbxU&y+$DrfM z6av1cuuqgrl8Oe=UFxVc2LRiTqom#d8&>9oI#4-w6I0dtBg%Hd926Y6ml~c+W6r4E zK3;NJbL3dU)G(kqZjq6Z4q`*&xV3hhvW`UN9WnT<=e^<~c&6eZ?jDdv=>G|0SZr6O zH13Pp^N8`opJkZ4#bAbNWAEeNURC<>6S}vIRAdPh$}2+ODX&}_)!@ikIi=63oay=k zn95J4EKtjYG}&f&;$E3n6hu8)=Cd2U($IX6>``0niMv%@du3q;6giCsDVknC&U(ro zo|2|f-$Zg@s%9LH_NiXU*$4q3o|k!h>Y|#Z2uu67o3@ksZI%ZbZ=N7smNr{s0n6Cc z8rE5@T3%ETS@rq4tj6O8I`+(z?oZ46-HmKs^yr?1Om>UDg@oG1&W2jGUwDeUcZ7HT za&=y|)?4|@R;X-{P1ODGRJr*4DPszZyPc2tXYgB8zqSb0OPhE{;q7hkl%z7$;I#>tJ`M2@y)e54hQ27LtwO1UR=$wAL|N{rknNoEEq zT-HZl{-o`Dm7ngOJ+|?9ZO2Hxms5fB)Y6u{?p_5@pPT}(NEYWmczMF3ek~vm?*Lt% zN_BxJ?%Fl_&9f?2{DP(4YKetDt;4+I0N?wLtBE@i?>ZQoeC(SL&tuNch7}ZVC}N~6 zP@5d4)gI`E{r2yXy`M>-IXm!6h7?V|eXMc{h#m&JR<6dED#i$~RxY_KqaFK#b^F^Y z;`maLIC`X~@E6DDH-j8mhgK~NZzwqW0DH}5q+%XDTci00jLFnow(YJM*@AEz(4ZDi z(#O&$0y(V9o@aiblg|Kw&yExHN3FRE^r*-6)So|ZP!_c7UBPXUijlX*$68YlY8sr;(Spo!Ypsrw)4FA)(!SU~ zVv=jU3CidUu>!+YIV>mDWlB5_L3)(FZ#nPwZ{O{-SphtP#IOl+ujn^x3)LK+6LpErMd~n0fsT~SDn@;JlDD!^a;s< zA2HugN1i5be5Y^Vo@iOY?xoEvwbg+ySoPHEuXkF8r6(cxCEr%&oY*i_b}*3vY;+^d zkk?=y4X}?_4NPmpcwILk?+k`=Aq|}31u14cc@5c=+X0=osXFV$>f(~0xvrf-yYjse zc&xLOM#|%qwd6*XPTh)7G#qec9TRoP25bxEc;2o-|4j1+X7-b&<kbXL$OW*V1Ta zmRJ(7X`YQUH#BX??%xC4oRY55{{38b_KIo?(f&pqA{GYox*)2+-4AqGsVH>oZeO5s z8SGqs$)(L1&;2hh_BppF{^?g;K^6E8xE&ZG>|%C+umPLSe0?qFpGme|fJr^~GE!p< z6~&Tp6Lca@ZA1mj{t2{nc>WM-f9LIJXf0G$C;$ErsW=}<@M1DaCjPcE_Om5mKl-r{ z$ET2DkjofB&*OjAfBl0ndR#l1A1XMik?i0HpY1RoOZEimguOff@EYK!q8>bUfUZn7yjyI^(K#^&CFtOP+}WpZ6qmT zli0ySg)$H)7#&afM_$iI4sZD)-}&fmrl^)%k5|a)?&tnYp-QxSN2AwR!q9zpW}e8z zRbVab@??5o@#ogE1!`$&Q47Fe?0R0JXCOo4$4k)1vg7o_nb;85C>ED(0U#;L@%B?5 zy(sOic;_H9Rz_Nzl4-m|o7=Ct%1}N6wpotz4s5J)zyLcdIQ?@~Dqr5jHik8MAflU4 zOA!%+D9?1IW7REBrK+4K-NRF**VzjXq{wz13-Pt5ybPDs>S+oGf9xF;!cHmZ>Ce5( zXgyH+^{rPcn+)Qjf9BpR^AdRoe9^pMI<x>~5bK&a^q4sR`J({8g9b&icX^3fhYg#;isv z*cs29zQC(Vauk<5y0FJQa_ray&rDjwUZlUfsI-Peft#7D&^*2=&HX?QnG&MEn%YsJ z@l1<|S^nB>@mPW9$mwr6KmYlrmD2r5SO0|Nq{l94GzkB5xp#Ihu3$sTb8V54HXC*U zdR&v#2}eQeq&ktu5g{`!dDM+N)OWu%Gt072Fhr?jq|q_F`(g9vMBntoDZC0+o=WOM zUckdiL}k!1hAEcrs8r<+^Hs|KaALpF0`su=UM`HNtt#RR&D613Qu6PW+Ovgz*xMY6 zOCOv9$1)*~%#`q(p(d5guUc0^&(O0gtm&kO)X6u3p+TJLGMcjne9|Lecg(^fSgDN2KWjN7TN4))W+1*^hY}e6@EM8qnAuQ+t*jd|mCTG_$`i1_ zL#E<)cT%fG=s2z^R3Ii$c=qg|^Ta6{y3<)>oL3oj%! zO0l60_mAz`4B*}6iAK|5`1UaH?g!kUl4op2;g=yVhN`RgbDOJY3SFC9>%iZ=v)7%A z$Nxs!dj>VJzn z(qKY~pgPSnIH3tT!`_>Mkk~bqgnvHhwqG01YKux2De(OFjcW39V3DwS(t&2@iagD_ zDOIY<-z*sHw+{Wo_x^K9Y^Hf)I@iv!&lW>!6-t%tA^t0ecS4`Dp(_tL4dUv8T7zNU zVD#IXx$-x!5>5stv3z{LllbE#i+?W=rF7GU+{Z|$L+%At4cy-$iL#C!Zj9$o5go_t z%Rf2wXc^hcc&t9ib6!fQtlL8Y%dD_zV7ohKjaBMEa>=QSyBXQyC~L{}L1u6sO%t{c zT2HU?zUq8%)Ks+HF>@rw1Xc@Wip-Y{0VH)rppOUo1BQRiE^qafns5waeOK##h#RlW z7@Fr1sk=vFw|CG;jn8yJA$kxP9#AUg<{0`LYKVXJC@0U|O5eKMzz7m2#E$Szo5NpO zRTqr0g0ropPUdVwyms#Zo)HhAov^B~_G6`JtS;)~S)WMo=t{nYE^X;PKBa76eP?<% z>vuylbcBgn#`$AiMi+yVl~(YNOHwhBRZ_vk89uE0l0CZ88anxFMq}X`u*^9JIXCTA zqkA|2%ar!W^!{iQkH~b#I>WdgeOm*$eDC*;xe7XRD(vd6VT|!;Y0sy=3o;_QJ3VqJ zTR*tP`de%ybq;(Qz?GCmb$yz|@TY-hVx1amdM(ftnMU7JX;PWPhcUbB*MSP$#+o3k zTHCjy2^h4ii;*)k?7gr~b7-zrdkDZEn>IW?@+>|<9l=V@!($Kcf9$x_QQSXmefBIp z2RVw>r#QC-v{{Ta1{eLQQ+qA*(&TvQfgzqU-g3KrC}UyBu7Y0Tesj+4@qhs{j=p86 ztezO?P_L%adS+huDp9-!-tL@mg%0~#TQyYkM10txQtHj6r{AhAqg_8HU%J^k&*I?w z;Gnc|+7Fi2qh~olH}xIPfY8hyO#DggTBcz1%T|>a>ZKLI9PV8*^uW|E%W^-(+8w34 zjvl?+(D<4d9l}P})HM3on86jDb=EtECdV8>lmkQ z$B;ScFl?*xROa;|?3X!*+Q`6P{lSVr+DP1R2;BeelrK=BC;ZV1-nZW6Y&FCD-ys@v zcrAh^~)d28g&EJ3z712N4T6Lv;D32fjJM0K|dISS80QGJpuR+ACMUifPXn_ z8ykRD=u*0$FlPDgS?U}eox@Z2-}I2AuvRNEzHR(Oo#CjM$66elExKT3PmEnzlZti4 zGQBi;_F2*Myj(b9JAs}?H#nZ@FiWcR7pMvIf}Dx_=^7;Q%A0QY4Bb`aS3&#{P!(PO zN#p?zu)J5*1l%4Go$Q%KkDvIfofINf(7^?MCpQP$B}4b89wXkDz*`gc&hu#D0Ecit zW0Q&+WzP3i1-M^JW5Ink5$xVi>3MexlH}*6>%IPe%?qQ%YR0Gmf9V}3y zS%bOLDRq<1ukfyY=G^|wX}613Yp6Uo;yqA&NBiMq_crcog2=u(G{y^A(ON*Yn$WAh zeDCu1@0v^OUZ=-3AuX!JoX58-7UL6XDsuHk$hec{LNOuxNBD-xzom|vel*8?&+$5Ug}n?1i}#xOg$o@;str70y>aCqe2WUA zgEi0rK)XEW8l+xB)+)^TL%lpUHEE^CZeGE*CG_;yAy{SjSgEO&q5e_uR!c!serbs#v23!`jX*{k zQaB3T1%IT5rpLeW*rGq;_BeFB*|%uT6l7!s!7#UXO|kXos6L6 z>cJ`#<(Y4LZ`M}Z@5Q-hESGlBUzU2ps$KCMl7kw%({WQCA1&Zk$zB2)ue_udsm?}! zZZ3Px90MYWq5W!(f33FGD~OX7>8a3M17O`S$KUoVMH3SV(1|L`$gY(9v}?!mZ_uNa zxm4Std(LgsJOHirbMPu>#Na$w^SWaGng}m-Nc_8ns&0KQc$cW(7=8&nt9J!_IPy+O z02H;63nqVFQ7)~{+14pcTjgdRfABv8p1ogs&chwa$MKiNSDSi2V}ysBDKyADD5e-G za~So_$!1_CU60E?RsqhoQ8k-NiaRQ8$i`CPV^Cjhk(Clbj)SWGsBQ?0+mxW0zfY2q zu^MmvRg=M~zKPlzmMpp50%D_OFANY0q@t?|Y)H zR_H2#&}4)dQi_{`vWs!e7Haul!{sHkSfeO`}$v&3mMqoSgO|VHpU&hV7EIl22KY0 zhn27e5^>61v3y3WBoC6U=K-M6Ee zkL&eRpfSEy*3vbZ{J2{CR^5s#=FFqhU=3p<(~+Es+Dpfuj zMAh`30;Bj`rlpCqpa6HJwli~rlaHH?Ie(bODAH#0(V|gU`mp5MkJVhI``@k+g*sMC)cMi_+!yY|nR-9j^oj|)L zOQgh|F9%uG>7`hw)G#{5nwKW%Tz61X_*;533UOOUt~~{Op6==jlZ)gG4U!DTVmm73 zt1B%g#G5LfI7Ht{5h^hFv`Jw37z@LUs@?M~ZAVpOtdm3A z^794B2fHs#;lGElx^^S=cigOC;*vvM>)Vg>%+$L^9Mc0Od5^}J5~B&UdoBl_-3Kwg z=B^U7b^Gg!}n?N_2KqO&m|e9>qoo`2=?CYqrhmTeq>aPyC0>pvNSsYZKDr0| z)0TO?zedNDlz@LM8kp^9(GrVpyRpJg_6G81S!J!N6>TvttEQ^M{Bgtu;$&|!Q^0y| zd+gNAgb}CL2{0 zQW=P(YGXav*^a|J;ISAcwxVlHEAmE zH@+RHf1R&;$B-^#N?kx&Ef8bVSsJA@EljNiV= z<`T%0ySU@tnnE`s`^5O83N^x1xbuG+9mVxW)I2g%G6j$n7(-A=b)hb$x^j)?EO+SQ zMP_(h9^=cozJitdq0or~EnGdWL2~F^g*G~?bfuMVo=LbrK`WXGtXb;431Q1)60bmM zeLLp#xY23m@8e5TGSevqQ`WX}EN&Ouk^H3iLNL~xh`2(H$qI*LQGIP_;Nz&Co9L(b z=!0A9b=pIKO~sbFyk8RzS64%IDmr=fm%rF6)5G!dWKl9N!3{4|COdb1x8wGzZ%AWh z?#i)o?zdyrLkp9``-OSzLE}rV6U)?&&7d)F@%JBs3XAFZvzFj^1$X8Rzlpi-4I+-i z+H{b6ThI5OwD2$tnr~&{$JWoY`=wP#SIEiEJi@!viz7Xg=@MzY#cA;~Qyq%SGd^nT z(f#V@`zYc-bJd0rOI;(VB=vGYHV5NYb^wJtl=cc86YvaxkLc&sBjsD?x}St-b8DUJ zT=szJ@` zW3#M17Cx=xGTz)71@M9P1*kTX7H|kv+PN-Z0-$b2OZ#R=R(%W2&KBzItIAjpHbE&Zj9pJ4UYnQ1;DgiXt|Xso%OvHFZ;3Ts=dQhhUe&RT2v*C2^z+QI1{{q=e9h+8k3 z3~zm7D>I$PK?vU_GVQAB##iyD;hz!bZoV7*%knie7kGfGXd}se(;yk>QJ6k5V)f9C zX~n}oPYA2BY%dF)8&)+AYQ4^(JoYct*IzM&SBY)|zl7|U`&kX3Xz)XaBP}pxZH(A_ znQGk@iS`(fc~)>c;Ya4P!q~4~t?a=tLFPp;H`RBEJQKB?$-7|T_W@kh5&S9V(ha)) zGpEp+EgW*1xHGhk`dz5ag`8f)H(-{_rsO_T5gaRr@wv=v4a2JDZ!vF_v7Ii!ovS=I z$8zwqbhK3;H;6SG?-~_U@uq#B=-^`QKM@JcqI33sVXZTA5slY;%H>#}_s?v8r8nkjqamGN?P&EtS}@CX z1MoLHG5H331-x)F`!CBMU&vpUf%X3!=73LFvw%+m|Ia7hebkHl$gFP8dB^}F>wj#~ z{v%wyRaYXbtPfeQ9cuz6<&c-0WAJ~w>i2JVfg=_7HirXuTj%9bujKOv0CR*-+sLur z(03sTS9YIesHO)^G~GW0uCSorl0M%W2ft>ut#LblR9c|w+%5ux_|;09#iHMpMC4^gwUb`bHp9Zh}!p z1x?dc_LM=VsJytdr3%4T%QW*g;*a1WMkp_TLFhI59h3nBQe$g_eoub~?AS$ZR#Ove z4$LE7Kjcph6k>=28(|*zKd~u457(ta@wfxj_%_38+P1`}{~#!8Y?A?Bo&+rHmX*%k z$p1Cr@M`bMw+o!JV0t|C0@}ic`L?d~xEB0guVxMus?AUV-qawsZK*PTCdHTz*?sT^ zzqQBUUjQ!QTj58VJkL+qNJirqFhKyxsWcfd!+Qp6DCO>e?ie?V06IUtzbp(1g^^I| z4AlwPQdA~$k>xA^4b9W7mF&nxWq^|R?Vta3T=?JIxi`>@_=UIB;J+;PI>^0L-ECMr zigN3?j3Iuxm6Tkz@*l6S@Ux0nFA2G7 zO!T$2h?!&ZyzQsil>k@rf1O7y=sk2t&kK3Dh5SEsIsoDqtJM0-f>;KIeDP918P(87 zH>Bsj?Picf;^`#c;s3dsmLfMO1{t%PBDQ+QX&W(sg&y4dynE`n z;h|$p!)=8=KS+l_XmW{Uh^j?beaG4IVN)M~P^VIM=yY;hl61fHpghQOPDZ68Rb}-! z`nB7KM`E7ty1Ubx^yJM?Ds|{f9GHDJpIY^=2sX^TMs zayFB!M|RIavTJM}I!s7%p*oA9MZLo_f*F5E; z)>0}8daSPu{#fskb-jmVt8GNFncbQwnB?-nGJoH!VmY`vgq(A;U7Pd^{CPZ$MK^f9 zyI4_Uzb0bDxqxfdZi{)hJ-Gr<(d+&1H0`(0E=_RnDS(?pd*uPO{HP{kEm#=Ry%#aQ zT5p?#a;qNY`)p}lHWfOp5W_lqMoQF<8a9VMzh!G|TV5F#&@yyVl0m*p*eQiRr@H5t zs3W6QZHwIEKzQM$Z_=9(MkVS6L*Hm9$d+9Evy#nAb11LDE-GiNY8ZGJPsUiF+eSUJ zt~8Xo;>C~gJMxLH8rt2bfyY*p`3g^$Dcgi3w>DoUDooA1o!^*os)5s7??4x|wj-3c zpY+pZP9pu97%fa|?jxX4KQCoDTsshS7CVI;(tk>Lfre&8IWSM!^hF{>sSP?QG9IP! z93sc?eJiJU{lYbll~b^CBsZbS>v442B8CmKbcCJ)LN~t};Ea~i7l`o{w7)DghdclV zG?Xpw3vVdK>Kta^?#-~+*(ahJQYm+~~_9&8`un{wJ6M)wM<-YbPERFZcM zUZX24CiRf--as@CETXje`_m)G$UIDOa&MKVx5F$9&95V2P4hL3)D!Fk&l*=!V!Pkl z?TfnDaSV-XGyD2Cl8c3tD5wRg_(tkjUT=vi7|QS<>LkmNMz+UBtfgh{ptA&HQ$^E2M>tPW#k8y~^No11{K^c)om`5bhh z2p7ro(mu4P`NdhQVS}b$L!Ni6igv-1Hxn-(``L=F+E_2DH=uwFACDBelCo4Eu0(s=)pAv>!)^bt z#TMG}VWwwbkMqROldK_owe$(iEq@I3_ms2 zWFge?Rg4>`iZddW#AZ*_b~2 z1SnpVDYyDCl&Y)GzknCBz8~oda}DR7DkJDF z73AoDZ*%d765V*Ek?Av65n4X zH960xsAN2ea0vyCW_XhVkG#r#NmG&}iQHv|dvjm8B|`nVF$(K-l9U{-pxIs%p4D;R z_IwM`7Zms9y+r_~E%1}Lx7;Oa!*Ax&1c#LRk2_Px#?cCgt~VH~40`ddgz?aH(aa=q z>UR(H#ClR{f+YPqAjU+byhW_ftQE`jk$BnBSRX590%^*s2TRu`fzP>`#T;fcdufjW zING0uA=T6o)H)?9H~X`?2M+vT@)R&cASB;$exeJ{J@p7eM-c^rb{xM9 z%90{4&g^$cGc|a7+T<69S;seIn{PczmuZ8J>%39xs&$zlD-40y>E`66;q zH^m%{cR_^Cv16i~`+PULItqRz=n1i8vG4b|P}25Az+?ZXD2VP;n)+p1+!#Hu{+A`b z9o!Zfdk1IpAlM9jB%exWLtO2d^-j>M8ls;qp~6jIzpkWG8Btz=p^HX47tb<}Kyo&b zYog5*bm*u8V9(?{x3218LDwuBx4kJc@p1~4Bd|b+6$dG#c+J5}sUO?l$f~D+pa@%wv7n4w!i4OF~jaQ6Yv1rPK2{|^s5DuSvu*CY2FInFB#M>?b@I5SZAdV3Q(RkRQ~F*9h!$}k!qP;8v4~@S?p#0;iIQTXZKFy)e}!+iD9zRMuVW!t1aHovA*w$ zCZ|+t4#R8a)qkAWw51s5@yuT^Kcw%+-*=odGW&9rJhd}$mqx!8^~4b0QGW*2-_3b2 zkn)F?>j~seD*F1|B{1H@v#Fn#m*QJBYU{qzLNob|kOID;UU)tir4Q~Wma3xD!}l1M z)>h<(`ofNhdL)QIkQET$+wEqRt0>i$Fth4YAAoxCTc>Z)W+zcwT*MD=4rf#`Ur}Nd z_+`zScmbd4#Uq{|O;4YC(~{KYlzBe@Q&c>Asb#uF&_D2Qg?1e3wJC&S-N-K`m0&s| zE#4RkQr7Ev*G9B^(#`DZ_``nYZb;>3qL2tA_p4Jeb_quq`c5` z>w;9~5FMPq#?|d!(hYYm(QU9=wr#He)G_@n7!CS(&r~aQ!Og6mY3sgIo2S&94tJ)H zL&=KGov;Eo5xZpYr;St60zG8OtJP^8@fv4CWftOhVRww0j+%^~Mx& za_VQp+5@V}K8;C$wP4QB(Zr`yd1tG3YG!23k550aRJtL#i=Ps4^yr>Peth#UMsL*n z@KFe?JIrYwbLn@^UiLhxCKQm)Z)0(w(C3eq9a&}jVR=b?{ zw&-S)F|e2t1o%I(7|XmIL*y)!Y}rrMCONmU(M5AykD;F$CoaFv3ROXEOhh!cv$4ER z5D<#>6MJW(Fa>IFzA9cNd}fek60M>;SbU|Omwyo~u5^=8~O=SXJ<5y3x z*1qpAy=6t~K6L!dF+&z2`b#9tr%b$xnuGnx*%WnAwDJUzFGV9MYgzEzpy$I?g?s0N zHO%$R?zdI6!;wN<);Cv|E>FJC?0OS84@_LekW^#T%+OLstkstUoAOW1+dEZVawQUp zLR@}uvq6QzzbuQ5WU9XTMmJ;WRdo4sJ`a;OWey!ajt*fCxWbSHeL{mNlJ2)mLM?%| z&Ti@}Q!;YRyDZcyI20`(fNK^S+P`^WRjx{ot6O1oE3aBCP^YgbwJ>i2>beex=TUvN zDf$&3m)y)m1Txni?O(bjir^Vu(d9IN12M5*eh87V7Vi;;HL_G5u?6Ij<&5N zeZ5$R;UHZW*6ADl+{fmeqOMkz{xN4Va$7T>zL^UE zv}voArIjAv1$CE1ac{b(Z4mP$#kOdR0%@PwD4^_WdaSb%Xp0G;!yfznA`A1*?BFlC zGgDnX%~j`m=g!*1Wc(rK&CR(wz?T=Q60m@Z~)~_HJ~Xpgr3p zpw(d3s(Fgy7TLVW>~#P|C$3EklFd6FlvGO8l7sHbrT{x`^}%!*48qIhxXlV zKQ8EFrlnyuCzCkOs=e#g9%>3~TK);;x9M5-%M_i@HLiU!PXB(3!JB;odD6x=Y@ai% z{`5heJqdc|=}v~`Ae+0#;NoWgbxvun8?Sy&`m!&p`6-xCycW+CVkCiF76bFcER&Yu z=%IpP^TPy&NAuE~fV_^6p0}$88|4#C}+$ulh6xG2Rur(qEg)bjwgczJ@ds0wtK0kW7P_gJDK zS@VlNKs5CJXfKUU1*qcGhPl2m_rpoQnxA}?BB~kSYZEJ?(KV~j*VH8jHoZ^2Bf%Qr zpi6K01iOn(=sXV#k5kvBRW)bz-|6peFuB(6slqajz2tD5P=(7N0xz$m)g@CN+r}@* zgC+EI&7Zam>3MVjvEn?KD6eV|>QdJ(d2;?4PKoYB7D zHlCSQ>gu_?0J!7I0ZQ>gQNtrrJtx(`Y-eR`C2q~`+!w`DsmT$Tr-YF!$U^J{h{q4D zufeM)fF*$cRO*)jKOS zAqwZC+zqDuck=E5LbqlyKf#@zKp^R)!Phyk$gkO2Fi-`Clx<_%$cYnW50|BwOu+#~_-+Ucwq3W~mwJN&LFfUt6W^ ztWRHmEnExgD*v6=_aQBTsIS0u8pt~mbfPSDJkoS6#Y>JT9ibna|6)opqMEN0DDwm` zx4KY}Pr}?JQ^%FQlVX`F zMvMri_zLoh`Zd4ZIXq!Zl||LnUwH8V(B!a%ROft%d|Z|yKUD+Pgf{hhLSGm{ih>iH z^{1H1Q8|!e$Wk>8B}P>t<4NF8UZ9JsfQJr3x(0wJPoH&u8>9ly3y$!)TRU+cbkEaC zbzU%dO>PWw5C|O$n;k{ZiW4Cbr?EZlgoBTkjAUfmwcuvE!;X8XroO7c(>&Y7!Gn;2 z*HSJCE8Dxyq0C-zr2w;6IaG&o1;znk$MJZ&0Q6HHF66Z zN@WVnp@#h?AjII!|I6;rw#gGDh+IgZ<}e#4jw07u08ymzJ~X8#f_WOEeFTzOMX*w% zf|~78vS0&a;HgmN(o@2k1M5!ii25nIR&~@Sz?|`86&vq7*~Lje9R(WA1^Dy)?+4iw z^qt#|4HN#kbI>x{LwIyM*+u=&uM&4XqhYM91Dp$jz5g1tx9V7LM!(#_$=HMKS#;sh z3J)IlRRASE;)tH3C6(lvNc2fwfvq6f_ZrsQ_1_T;slG3 zC<}^UkmXR-i$-ZV>7APg|J()IcqL70#N4JV(JMgvQ>jlZw^9G1r8V;u_SR@-!n_Qq zEm=!n<;Q`5$2UJ~tYBM2Ud?Pw%|376gO?_C85{T?czL|BpZ10MF03z8@<{3Ua=m$o zkRH@GHk5#~m*JSUeFYmBOX(b@HcK2jtat^mwf z19EW3PO@I!g@+?fVTJ?-F2WSGY$-$2@{3%fy}|$r(@xaydX7}-sQ;j&BZ05o{E|KT z68Lt>BiFZiLel0kdKl*AvOa4eXgJ)VD)xS9qf~)Q{DsR?lU*yExVL4i36_6ZUi8b2 zw?Rk>pLCRSnPv$!zoI{;sN2AQvbYPs%Nzmb337$H5cDy|bQB<+R(e-8Hr^>)hz$oT z7nY6r!P<1@{sgJavW_s;zxF(={`XtNYKEBaL&U-ML9nM)EBB4@yPtBF^YAV3!6@CT4(T0Jq zncPoVSM6%tX{vs`)2mAZ+jHceO0PapH!^teIhlAzZfz(mO+Y?R|7l$Nm;}wYE$OV5trcEyZd8U=^3^8?kZcP|e>> zzUMQ*#+`pzJYPy+P=w5@gAR)0BW=e*Uw>$ku|Qk8;sj1~S%I=`1nCsqvjFJt6rO)~ zF~1!Lx6}wp#c@Z*X+O-=rQ$>fn?)uyFsp}lW?6+U$2rsT6WVuLmnsUEi&_3K-AD5z z-I9E!#-xz5z?b#kM1gX1j5i_~_%{`hBKS||C}xzxENVwL>FID-LyXGCUlv8gip5Fi z^&``=i`G>8=W4mBesAyFst5kShi6VQW9*8-tuvt)71tRm9U^?-nfl6v^LsDiISC34 z*;HifTP;!YVTT@-m25p@N4N#h2QU-pt_X+1c8M?Xc4p9{TlPXc6Rx$@^erIM86`~C zU-~vvhpP(7UeT-xZ*G<1qze53u=QIIK0jZJ)K$1lqf9Ytv}2!4n^i8L#B#~K501EB z284d66i}UBa+09usTIewhYKpL=~beIsmkZ@8w|Sp=jWiQ(J0 z7zk#}Ym)~q!{|49(l;MDG2o%9DRwssA0dz&Ml!p8U*W7=&RaEu5l0)^odQ+8IoeaN zs>tP^6##T=gxNIrHMe&p*pi-heGWEFULUXLJUP|sFOXbBU^j?hLM=}aWc+YX_wF9Cv zk{W0G3^(1$d)D?X@NV-L$L5aSdvaKv3=?}^`1t(iBEKtrcEi;Ob#lqLOf&VUtI*gm*&1<+F9D|IUL1dAIi4Xe|6W1kK`V5Pq6 z(;fTRHZ9!0SF9sGw7m)T`nNf1UOch$$8y)M(#->cuvFUG_vlL|+=Gj-(0uU>Te&#g}2VE!K3vnf_No&Y?-A8~oo(vLho$z7|kIQfK|*`KJP z)Sn8YrUq`xZ3>8&wX0MFNcUF!1h_{$pW~=zr_mE~rB~=0*a;UyC9)fWkADz#n1X$d zzyVe_jaf!|qc$^PQuen#Cw|G|U#UZiI7TSQg(=mm4b%?h_NziM7xX*gE?R#AZEC`A z2_b7OXl95qQYTK(g8v!u`K0X`E?|^~)eJDwV|Q9_i@l>i=-jldU;adwSAaeFPVtB; zvze|l+uevwbAR15H6~_i(iIOylys4Gt^?zyK+aUWqU5$o&f zzkdK+f=^zVlD4iszTlbKn+zFNxV#`DRyC-qa8p$u9434HOJGtYFSG+t`hz;yN<8H3 zp*%WnAH86V!m57(z!aW@{Px@pevaBr+Nu~LVB8MP?>&4PKb)7FkPmJW`gH#|v1Hu$ zOV$n7(c`iV^nyz&&f$KaUD5!(6n{6d=0h(wpAiZXaZR1ddeQ9`^!Q-r_R5pDZCY+O z+`YHM-dbBiT@j#KT&>K3|CWrjNA~9v8D)2FJR%&XWwzu{6eB3w&<0oUDM91mexa}9 zoh>!P4_S9hRxht6b({x)3-T=c71cDV^?F^NDw7un7k3ty+A~AO%pYCu8t~QWn6f3# zxguV*C&%ELjE=IC=kgN@yE69f$uuBDbUt(T;uPe<`2@ryH!?vKbeI$9he9VQ2Dx4J z8ZgZU!X*p3F!|T){(`o%%`&_Hg9oK$CZlJXaw1Ba z&#?Lk>3F)8Zyug-xX`*9IU6Kc6eJgE``Eg;$G25U>-ZH84M}O$eht~`Pn7$0r4L$I z8fHcr4a?C2J)|~~tW!9rLf1x0Q6#6GVT44Ym4T^jmaNBi=FE|lL!O~lW~JFi9WZ?) zh~3j5Plq{p5$&!H14}32)-++MXAQXbYBfqNb6c$LtiewErvI(^r5d^$1Mee_$U6s&YZGO=M)bIZI@y*c z*K^QUnW09giyyb|t1C5Ua+h84^PyU`NB}TqE{fC@V^jYJu=!%W61ay5GWW zOSM!=N6?AM$xjJ0%h6Yl7t9~!Qqz8v@Om*s+XCcce(9gLvwRBF?6bSaX6Y4?j0GE_ zUcm$znT3{j1URmB1OIM5Er0nRkU&=bhy{KboI$6_SG>h(jT$A7GU+~=jH8Yt+;!DFxc4Udo~7Pb<=;WjB)F3q_UG`$K;6hPCek& zFX1!J_mNy?6E77}H38?b%{1M|n)FG|bALa{Ji>ASLh(y=p%6~Lz*e7WxfDMbhw}J6 zSFR1mFDMzX@ur=v_iP}F4+bk-)0;(ewU2l)I8Cg@I+?pAfqxiwX~E>srQc~WDlM53 z+or6_%@Qp6#7)(dZ4kWn}xW{jMXl{7wz9H?RB2AMl~!?{MXUJxz` z!wlF&=`W7HB7dCrg0RLEHQ(rxv$ibjhPJg_ZWLA`*LHiLc8ADnx1dzhfS4U^tY2`W zV9}i;&BcaQf_kyEXQ#zS#Y1 z=^wze3caguEltw)>S2FbBr2iQ^UAz-u=+Q4)vf@NQx9$u3;?6l#b#{7_w?B4#+ZX) zZ{a(*cKfxw+#~#{67*#k=6#BHmt>6mdDO<&G%wg(6wGyrby!C4mPB=KP+C^a2QsLnx(bY;m^%*&D z?9TF;sE?hBT;4DHgkYzzz^kcn$SaQujf+5f$JyIRtGP_sM047rxcG<~ViVl=doz>| zD^A1k^ABLv8OZKmec&@*X`E>fhpSNRHP2JscaJO&V-982zTX5_s9#?}Q0k&#GPU`WJF_r}{SpFGg&Qkjg8nuvA4)v#Y5|x$p)fYiayuF96{h zghh&00W~kC%vA_BeP-kuQ2XTsop9>Lg301=f+qFeU{0JwjGdCX))jWlRVLtL4hdrl zQ+$=1Q80fw*Dm)&M7-=}#F<)mxt}u^_6t?}&cT+o>)xTMpWQkn`WmexnSwB9CRdrr zO5D8G&Z z=*Zkc=bR51Dr~qX}>!BykE`p zp?SL`0ratVHMm|WV?{ki^QmFRWpPLM@|1jx`f>w z@{9nmv--<|+azKS!^C&-J12pgbR7n51@=-wcSzQ#N-MX1LU6jE)OYt~)}ih(kTa8+WkIy zb$aZ-K6WJmCI~r7-bQK}T~vhc=~koW_0UYxqp$KSeRN#%2);f8Vob0_o}tQi{|pF< zd$?meJ&8~AFR&3FveldV`j_Qe|G9z|`Ikke=5VKDyj)*@ih-%)>X46~@39=Xdl>ao zowb@AjN6g#D_G}`S%SK~LrqVDnO!;A1+$#J^;S1ZU3*Q%7A1YW9&=%MFJmv56GYBH zcx0OP6FUd#8V*V2!#SY4A}jOM9FGtPRuk|VY;IEMh%27pkZU`*8eBwpU~c!p1x3-C zZmI_%n(L54ej;*w8t`}UH{-7wUOr0_NEclWX($A>h2`9wja1_U?t1#&1A zJxuhpf&NUKAQ!jHGt(Wwc|M5)kxVs+`B6F*KC3Z+0H}@TcLx6>it_$1Yruy3IaMdX zpHF3Oo8bj$T?ZX(E#NW0%m6*sd*BKBXCKA<61xR(5{>`IYw*9Y;Q!}nzE1_uLOFvT zF{<%yAgT_|S{O-_SZ>vSp>TuG(*Ws^Lfb_Ip}f>S zJ@QtL!=0wk(VbOM=~674>s`bX@$Z*sFWpJq!+FQ?4tx}44>kU*jSWJQV>@O@3d@^; z5)A@VdcI}0E=Ccdp3bK~}asV|R>9xL6v%+97};O5%?wr=0;5>U*CU|TwJcXVzTbQi}TwdYZ1$m;a$M%saI z;t~VRyrLtDiwB3wcf4|l*TF@Jp{@1=vH;7$hyVHG{eKQmU*hzYFw$r;`|&;wA)Tz3 zsuIlK2T`^Ld@kjZ55KdW^fiDjq5tU3ZHELs@}?xT)nh`vg7w;bK)n0ZO@*iiw!DTK z)A;MfLcjC+|M~s!)Qj?ctiHw%I}jN{=i#5*NR>@K9n+CBC;V#ZV8v;<;d38npLB`d zFRiD75kF>Mu$$}@B}9u?HXIey$^A0Yi|ek!UlBh>TN^@w2mhy@x(L;5VpgP_L=C!^ z(@r9e%7?i7OJC>Tz{5VMUTN9x_`zQ`b$J!)WXk@3-{SutE{uSCrCkhjpTPX0^B<4- zALK&f3f>yUxDj7J&)f!6w0GjV$EJfLvC;)%!y@6VHlHr{ZO4z%-tfw(OfqY78zvXfQEWYxx0jU zDZh4RS`yZlsc8Hof~i5eNiY@~MnRy98QMm<=}j5nsGWTV|H{1<1n01VzOAY$sa<>Y zjb>6hwqE$|J-QTQm5tU%y&%<;U9msI<0^8wW_RCRmuL`mh2!0={Oxa&VvJjo4~Uu5 z5s0U?T0EloVq}`ajWV?a(%eWKDuk|52Oh`W_~kaO-qe`(Yc$9| z-=(3J`J0?86YMp!k@9B3dL_Hw{(Z5eGls-m^gYZ;_3JV;w_-vAD`+Vj0o8RF7sdQ# z~0w(P$8)q%=6#GBA z2l&z%FJK+WpOKRLnPDETdIcb(MOY!nf;8R%K&;+$MO5JBo{_=C2duq8 zw{A0cBIo2`eW$6AcBKH{*wQSDB8F>My2^h!;W_s;RjvL-dL3*2bRfu>04IrLf(@9GSdobhp>@R3swCrD&CSRoWxYWqKsxXpbPOw1hT-ixCqT!7e zouER@&)7D&O8K4@rU>ds?A}!XM6>XpiiEgDxpDE6{f+I$U`>U^QT1E>n+n}cqQHLZ zqPM$Shk~)Qe;yP(wrr4T6Tv1cb}0R5zt5J9c@+o~TKq`^g^JMZfwdERJ@x zeHyqA^RAvaD=C^V@Nm;EE^Ezb)s6KNf*0agOF#!NxGt~xJA=b}{GlPhwzC_0wudBM zIbhEPU5mHVT(TAKUc>rqqpsX8w@p`)~2*`d1G^rS~;|l6mNrPg0CZ26vnJs zBa0=s0_)*2ikpH$Up)QXJnfjxhD5|p-Nwu-YGSdc_;wgwjT_n5cz604K8goAh@zSv zhvxOx%m*W=ndM`@k_rQmL1f#MoNf>XB>u7pBe`{!crG{8Xu}k623$n?g2dOiNJn%BZ{V6sjAtR|weZ_ZS_Si87k+-P6F( zJ$vPNBu0{SCSg|4Un^#o=B|L2u+DI>s`@}O9!?gY)>5A&ez2T!vHEzFdeBrj)ga=& zM+4)y_u~CeurmLFz2)F<4L4a~vQk1T7-o7YjU}Mm=LyHeXJq-8b<#(HZpl5?Yf`1|HDs?J+*LfzbfCLM5WSP&*-P#e@KW$IdpGKU1c>aZ z5g#)R87dd22aX{hI@-rG-w1S2>_P+^FYNa^CF-L_(x7a!_=T}?_kN~%&7vvokE4+2 zdTD01-CYpn_FIT_tv@ZTBTL$$Zoo55?XdGZ&_{8y#|(I!X-e~nh()_0-o(8Q!G_pD z7GIEZ(srUd;9M(+g@5(T5`~`eB6B^#>tBQR&6M2-Hi`PEJJ+VO!~1B zK5MB8RY87`lRF9Zy#wAiSjP|b&Ov6(jli45%A|8wD*ML$*Z56Pvkf}j9mCHIK`VxX z3yd=UZKIvq2cZP6Gr{-+n^j)+rKPPN-#kQdTN;1w&AzX9s%x3Y?#sl&FO7N}6vs3w z@3rnedJLZwV2 zGvToCFpo1RE-BfAk+Kh>A->(iN_&b4$$4sa%!F+ZHdlEJpkni~O&n}S2@T#k#IdsQ zrH~oZE8slnP^hnDw5%E2?+|sTJYYm|A;y)uNv0}k+vD_ijOd90)D6#WU*77ZSuDu7?j!kX{!+}1 zp6%&Y%t+(W<^w>Db8dX!F7W;K)a$2$2WRg2fj302N>aI87+a4R8AYul+PBNA1}oPI zo{7z_Z#DNWbX?2>HRyw(Y!0q3?vt*fjv#k(f^D!ep^ z^Q*nP;;zWp^@RPQHCh2VtMN^s_O}ZnWz?*z3I+4$3znLI8FS&%kp?xoU?21?@4gq- z-&VMcHFc=t^sQsosuFXok1Vt^z%-*YI^7+~i2vakrSMscl29rj1~%=g$-MuI8FzXr zd*U#KAX+ij1zofV+z?BfUVlic;45ZVSk}>|F-b}4Mu9SD%KDK!^jj4Fz-C=UYvu{X zbkefp1^e zy?$=)m6lBZ4K@!$p+m$vtny1-xRIjo{tw0Z&v$RW-(T2Ujf!(nGclWcJ5jG~^rMNk z5#f3Ymc2YJR>he*JZEo9FvN`Z3r;s`0F37wlaw{(4 zOR(Ucv8Q_#_8HI;Uu*AbN0>~R+Pxh+7H}H<=BBnsd;x85mnz#4u6>^IOx30C-Gf1j z*k^CBg6Y-6Kj^5*!{_XaRM+#NrIwTVyN~-eJUo`LxO{5XKSrPRvme;w*O z?n?~XRELuXh%GPmay`vBngTH5Lls>HdjoeOO_?Vek6JR`(0yAD>`Y&xdY-&ns2}q3 zlD&M$(W|5V^EojWW?C3(7vDJ;D&yvn_v7bCgOR%cux=cD3DmPL5FP!_l=V3jQlJM@ zw9EgVjx8ezZ)!e~L01UeExJ7lYA-^pYT)tF1h)+Wp&-BMZTPu5!jB}p|2nQ7ha(4Z z5nKBr1CRY*pP z)1Ky!*PtA0Bw1sF2O%*$XhshV_s>uUD-j}}@g=Gs{PR8>Vu@3XXk&}i8Z{c2(jp?6 zTK-_!?+k5y(-Ggv|w~gkkyB2_&K! zrz9((Of?-{2dvoro7q;y9*3&U=Uc}oRsZEEa5&c`3LLzbM%h)1>TYY?o*LV^o(*ot9TgO z4*;)6jgEa$NBkhn@;YEg_~TH$?0*KB;M>`l63~9324T5`Kb$ zdzv~Xa!1Qi?IG0HT>%N`p6>L{8)`2f-H=;7_my0&NZ|G;e=Oi9e`=|G+U>KqbN$G= zFZGhdB+xjn1VRTw0sXkm)y6WSTQ%N4{i2bP*3~Eehb3`qV(&z9PE-|04^?8#nB&mx zDU?)~PoA~iO((G_Rsi~)6vAzmKSuU>q8<$wZv0Uw5q{Agmk|#=U%`Q@>OypJKf?#G zMPpjnHu{@SX@!G-b13tZr}t}m)i0B)Wc?_TR5#V+B5zLb?QXOkp6xBUV zc+TD?sw_R9C5H_guUVM6I&#L>T2;PVK|ui)t?DF@j89ML7Q}+*r5B}=xU-Lu?n#YwKavhcv$;rQ%y%I6nnt!g2)Cl zgvBo)KI_!cKUqRC7}Cv4)63VgFCbs@;!BnZd^p9>mDndE(l=e{$D6+Jat_CU|5zk@ z==bMSjdUAQPW9sIUIcs8#cYC8w}jH-?0 z2V7I&fap(@g4E5}xQtC#nT~icKzWE1xx2WHOWn?tY%7~IX%eU>)SyBq;z5%Q(C;Dh zDHkA}Wwu`aMbryZb}LQS6J?gR-Z!L&Gbp_ro8*~`rir*Qs(UD(^tlZO z)7Zyd7-I3c4LUS<^BEu6y(mpHq{LSXw}~n)bJbkag43LTqc;x$MdR(+xXKr^6@QsX zPn)$mdt-b8IMC%*=Z-m7P-<$rY*j8kP`P}v3yZ%+;V<@ zuRM7CD#3NHydmC8U@6W_=cUHwqZ8?)!zc(M+nSpMs~+bN6+M*K4+6$PGZt5vu@Bjb z3=5?X=n27sXw7@QwJ?qS`9~3hp2;6j$D-S45p5(8Q>(5i{voX-I6wJ0$t}q5V~76< z(_5X_PN{I9mNj=5u>A#QT`ya-;Tpgk<_$VES9j++H8%?E@eU(&NymMi9tRiKm##LNSxL!f+@6r{P_Lhi|WHO++R{Tj(%~D z-sWR;0MWWG>ejEPFJW?%>`ef_g~&I|mCt}brfjS^{JiiL@ei-4gGw99bl#frjGxnD zqDSEz-0Luim;G30$d8F2bV}NcQ+dwXHP-Th!GCfc9!JPni$mkbN}{&FfQmm~y6D+yd0)8W|2ME5xOzF3wUgHtbzw}ALl0&7J(WF~9 zbaxax32ss^o+5Sa=z5tAX0Zd^veCbbw5G|1GBGO_cA5`NyHuEg8rsF2MJ@Oh8I;gP z%&he<5Lfjffxj#>cx{Lb0Pb5-O0lH~r`eQMd0*!eM6nsS9Qm(Zr~idFlb@eW#54G$ z*W%xn?0B5jKI!EP#0rlWKTLN`kElH%HJsb=5|e2Rn~9G&;9Af$E1g74I5?*a-f)C{ zjAaeqbA5&;c_U@MS1b0Pu@-TPc(EP( zKxsuX5{O#A*HQWF(W%`P%PPYY*dis?8ZONL)a%w@$F$2a7Z!)?rCcE!_hBRQ4Td@H zbETMB@M(q~r&0X*ef_0)nEN|JOyR{PR6jZLmpY=N{U&}2Usf$Bm)i0-(drY@urw@c zK9rg2u#Vfq1=1fbB2aMOtc|M5!z{u(g9LZfjrsw9@k;xR^9Zks_k)%9^Iw{NH7vb- zAXrs`h-t4gY4iz%g^(StZ}2JAG>*uQ|1*1Jtll&01ZAJtuq+}>SB`K+S8Q*b4%I4q z8lH4weGc6~x|XyfQBkGFyi$Umm1g7wVrysDyyR0anc`Dk+&&m_a(rLlZ)^m>X!Kh= zNy1}Ct^Jf*4M2Ys)Bi)-X7jRS`O93gI3999+O4D752&*M@!TYE0)5n!wiw;uTVmVo z!@^Nu^Cq0Tju{KB9qhUw%;ELYb$BHBM5u-UzKYfw{MtKK;!`euc>Q$JKJj`Gd4qE$ z@P;BJLP!!45Mp1OBTB1+GAr!$O+47-Y42N38T#hY@01xIGkT9T?1)c$8(VWtMa;$DX{M` zBg?jC8h`IC@!x%gnNC}T|J*ZRJXQ3xWd@R~%sO2g-vdwgN7x&BDu$KNFcu0+Z5u*& zG=1~E+-u6@C42yuXqmKN6G@QKLu2OrtO~74t?wH3ZldR$@lhzzp5SMIMMMcATx4)* zm&gNR2Jvk7C`^zcA4+y0Kd9qrk+QA;p6b!pIKtRZxCzem$cVwJ$demS9NGe7L>-SE zqX0PWg24f~&J@HnVZfe6OVYSfDnyRp55*~H${+A{UsbSqqS-)%!Y1$sz$W$4-VgS{ zBl9RPK)&G=_0to5LDNHcKb+W_o+0m2IdpaC68qK~+ses&m@T>@C_bb<#J;ZX*Zo$z zSyb=JO1M}l=b~eeL@^C+li)2VLF-vTj?LmRCjQ#>ACE(#ln8ltk2@9 zE-`Z9%_}W4419JVZ;&!vnHk_U)XV;IRRMy@IYnBkugHYASJq8X=*<)L zkxjhJ`-8(SgPPF93CgP*f_c9Rg@1_KTJMzPNmml(M@lhUG6h?KwI$9EatOmE#Lar8 zO=ubBJM%Up-m~7jixGv6)=|RtvDJJz`dB-TffUn|$i$6@>M8HXb$k4-CECY>GVUjP zJ-ZL{5ODgcYQ;tRszhCpY@ZTlld0!%u7_fCY<)f;PL@$mHi%C%r%kLh@!Vp?SlCCN z4H)QH{4MHJG4Ek;*Hl2$uk?$S9Tm0mjx_6`)zJGwPhGQVGaU#l^g?mo&Sn@qcI9kA zU-0`3*0e9J(yCw`dwz639Ux*iKBRLsZ64%pexLM~-8o~F(oy+Jg0ep}j_HW!@*KDX ztfn#+zj;uibs0h30;T#ZZD3H<#(Dd(rJh2D{Jqh3_QKery!I>2|7^2lD3^y`WV~xBX@Z-w%49b3La;!(U3=BkE*(x&C6j zc`n$qpSIB|i~d#peOj0(yS6A2cX=PNkfV-e={3w;|GfB_AR*E z^jKYMH%Yi#CHpB<863}>ApbVhg0d1K3njKVf(L&iH&r6m&3oB;JH2~)k|c>LtLv{` z4ZZ!Kx$Wc|O&r+0?ML0(vdBngR-uEFm|nhtMyz7TcHlw5ZyN>aXc)K0BQL+l9FXl_ zGVOjS#m5}Tw;9_RaXq-y0Le{y>pXaTjoHFl3ww3|RL$W}%$uQ-id%A-WLXK@xRr>J zN+YBzCuGg|5-GZGNYi8X8Z86 zb+yA3nL6vgH=Hc|wJ}W{OYPX1%==q)<-pZrukqcoNRuuA9|kNP;19uMxvbhMx?BwQ zo3N6s+E+1jBM)U8$Ioa9g8Q77!+vEvhyBoATK99%R}=G{5_|LUcHy3yaB)}#O&t*f zMc%%f>YhJUwbr1csVu4eH#Z}GhAY@Qg5U;-wE0=<@nm6{`=4vCd))vuZqFfI6=Bk< zYK6TeL5*rCw$r9kirr{U&D{(t=1Y6jOZG=1M?l7=L+FbIlHb1X8|Uc8Qr=W9evQov?khpu*;m2 za<-i%j5dR;z|v2^4|-|L@!g2xsbvrie|M+~rU%kgnE31_9jxk>(*2G#e+m2L<<67I z%Pz8eCL}e#Tjn?u55osC{lE}qM(ZJtcSYB-Vu&(QkkO&C48Iz8XgcH)0)vkGT#bEl z?ooI8?uLEjs4ddzoI z?-$6RJQzKPqv3zIZTfba7&&4A%$_7qTyO(uDhiz|_02(*0MRtKt3Vv$cEl6PaUX2SK1qJLy+0`tS|eS>%WpFMl~SwVO%h{2RQ?LUa9-j zTvDqxI12Uq)4v=x17V%ouVq3g4Q!#mYlPw5pC-v+WOV`5b3@yEkEykW3Kk5O2ZSDB?B`~W5jo}qJ711UjjWO1#O$eN zT~OaW3n<4;8IP35GwZrt$imvx6W|+(?#j210>xX;9Id`6F~oe9>UFsr>!~cQ@U&rY zb?3Y3;lMrWC=)bL1B(YELX1)t*?-Eanz+K@T$9tw)~9yvhHD%3L1uV&?7V z!CO}sTLz$o$I^NsNr6m|F?y_>-5+B&<^GDW*VO6jj^5<@S%THwMbb5iK8`%j;zJN> zLDh&iB^9#wFiHZVJ}=AGM5H}vjFMH_0E~6^pA!W&h!*RAB>>NKRkn-{mKy6tF0S9W z)IM3F?saH8hW~o=Q??k(W>`PYnZM_{L_-==fq6gWanpnT7}J9t^c&@G6i{T=nUUl1 zOtBvYITzoKuHt9*S8rl{1p=tzwu95HqVYPgZP+qs)Q|u%=(wZk^Uh|;^x-bbUU46 z04A#YEkK}CyYf6OfmlbWwxID18WgCsvDXKREm+efn3T2s6@XgEQlWr&5Luf~G~rr9 z>Ap{ZxHT#to{w*>4=sLf6>RQ_d5^m8u6?e|OMS0-Ts{hWCW&h?x%gV4!9~& z#G4Lm2rsUAf)Ek>zAijIZ@EdK%5@rmGwygg;Ao}xdz?hOqFpt*+@wP5i2H_Vse6B- zebaJt;qMM2^z;2~E6tnRQl<}w`UDKd#xL%F8|h0Wi5KmxOxDb-_)hz)xeZYF_8Y+k zCLO)g*XVl@RguFl*uo6gX8V%-U4EbK`_s2EFNU;*XoNSeH7e?WvLtfNwsv#7Q7JnL z`R+t=0)tqY7fWanTB;;mu(PL5bJzF?$Kirb|K5FwIRE&9_M*$j?ImQr5iSgMcId>i zrhKmQdZ2-uU=rgyufiXX{^$>u$&Ni5B%t?f#Q0ghPdKiAuH;wuyJ{(Mn+ufG%)hN#x>zIwQ6OK=l*e&n?)NC>NXGvvi}0)0{+VpmE! z!IoOOfi7Q*&5k?J#$MwA7|}nFlt67!)|XJ>S(dg@H;&slcf7zBCt4*9`l|g%?9jgy zY|MCy^;EeFKLrzvb}!!2t|;2C3l%~-pSj{Ha>!Adj_5GgoE}zUEh!RCB=lLFEbNpt zESMJ~kuz>@?8wJtf$*thDy0<~KtJ2qGe`1>H7L{&0%!ULI$db6Dw0Cpm6fpzJ9zBm z!FLE^Rk{-~?Rci)tb+-aEfWG4HllQ|xK0VDRQi%$K%k&YOyj_DlT7+l<+i)`x5_&; zHuAB>+8W4F#sQCmNuHFRaZw<016;RU@!6tSO|?KnZhCKSKvROZzNRK~v3y|tTLz|V zVyZp5VA%Y+HKr8iYr^NBY@sP+ui9vrG8zf;SuXgNblF8OAy!Tprb*e<{y(BYCZVG{qAm(ye z%hP+gOS_OnGu?9IQfbQ|W6$Nyg3dVfBDUleQ=)Yk#j;S^di^XzF1Ln$BJC9&5EYx{ zc;1|7i#NT&)xkb2-K4aB^q_xQpryt6T>^9wH+Jk(bpVzoT@&112AKEE)W3zodRzx| zhp55Im;ln=-|>Gry1uyZrN=5K0VsK2C`qbv(4PlZ_b-QrP?G?ivxQ6B6=T>$PkWzk z+Nda|fKt8i9K}0a5UZ%A7n%Y801`uQ_1O+wOGb%POcxzZB``A?%zgERRP$Rh8~;`nZfJd`9hPP3PMF z>8~CUcC|5vef(OG+qTkt*;FQE0Z%_ySnwM%pKA6GE8ve1pd`Q=p+yi)y-F~+07(&P zaA4f|6R@NDD!F3Upg3Lq4!LjlFUM{1$moGhBfJ874nA!>i+%8{Q!;E7ZosT8gR$hE zL-Y+I1zqBDC|b%d{V=>*^WQb?+sGk7V6ef{e>t*OwBeCik|Vrc@SO>y8Km~R&BPRr zt)7^$4}Cl?O1;1v4QTsbKJ3tNG?e9J(WUMI`)WRR`bp7MMoNo8@?FZ;tS)9QPeEk# z*xrjJcA=JZRD#E%r(J1<^9AIxe5F+Bnl*dk*KbqS7F4cDSw2c}p>o)b29LafdB^o9 zFyJ}HXSwWz*)`@qcMd{4NdHDX~LB>_Q4Y@bQQ&q@b)9k zV$M_R9{3-pFdON}Ks z!?k6W55BX{2%}Wze0Th>OD@5FGYt=QV0BYXX~(G^#9QN@e0$#pAhO;1v#&M8QJ_#+ ze-%t6XGEyZ8#0(<+%X)ixNpt$z2OlLm>7q(hNu?aJ=`$|l%Iffo98s51X7vwVGXZO z*v^C;9pP`AR_PxSwrHkNf5aJ&ya~l6tX-_PVILzNp$n1I!Zp3i5Qt@($HA9@OWyLD zhOVN5&nV(e-sRkTcU0F+WZn-q1kD7kzgP&g z9t5H~g8Oge{-7SHw>MsC1ww> z{76C1@@&;TJxc4^Sj0IL&Pg_4Ra2H$I-xtr1dd|F0C@s6@~J`(>Gg8P*;oFV&#+Ah zl*kkpQ2|uYr0}am*);58gljit`qa%<0FJdcY_PeZLj{M%xQI|m>|=1f|E#b}<$hBQ z_&UVg6TODJh;Uk;X0$xen@_SE6dlI0v-}JYfGKEiZJyyUYC9PNfV@yAmFc+1Vav37 ze%^Wflz8{v?ZItGj0iu`8}&OqeUvIJ*kP^y#bvgg8Q4lVw}&pq56fmPnnLbO42n?6wFPNSQGh1= z4ayG3JhmLJDDrdkD%0b6~zHHEW97On2?B;Yh=wtAPf*51{qT&QD%p=Pf2d2dYDOLLYa-(wmg zYN!S891?%H9J~DP_qEzSA%w(4JM<;{z=#Xl(=Z|YfE4AGyCYH*%&W=G5VWM~o9$W! zEFGeM0udxVLj_XZ;{&83pv-Vq>_g-dzpupN?+pnJOzZy35tqK8$xjK%=(+234iR_p z2VJYkTN*=!I|GJef0KqpR)&rJP~u)qmxoRBa)EnwSt+AH+W+EnyF$U_$?PXgumj;t zs5FEs46F%s=mqe0G9?`dXG5hRQ~_`FrXbPQl$+^sK==ckam^9`PvpAq%l|A{|1VG2 zDdH)L{Qh+k2)zP~0oh4{)Rs^wm7S|MTS} z&F%pb+CrSFaI8>-)4v?X<<`Sio48c~(Mnbw0`-g75eCSZCd4`XMXs^ZnR;+CE{cEp z6g>Qt(ykj6dxegzX_R{i2n!$jmGnp4ZB5s52C2;LXm ze}9YWh()_5-#Z(|pH2DlQAU0@hYn4nU`AuerjeCKc})XKPA8@v?H)^AF}#@ir)yM8 zRrK9G6e!S3do2iMzqwuAEFa|y$1S6?h`dCY#!(6@WcO`9O3I#j zm&VF)AK|yN%1V+9lStvJTo-8QJn=OzSIA=FQBHh)&npbgl(5gid+J`z}mXfE^Qqr-%{@YXLBE71oZzRQ#CM(cL##(neA^}v~82+r+bfpmAKhrUIbH2 zJ!6`56MeLaG!I-`LKnLEAu#M1+;KYO@&F$=aUrdoq=BDB!p9(%SLTFa2EdguVAQ(* zS3v*&UtR6nbPe!_Ae`!)_pIx|Vmn~S!v)BF;tjNP&0Q<-4PH{hg)dKVD$o5at&eF- zIZ&N8kI}pv3$Gt0J-(ajqf+Bxa~b{916{3sjpWN2zHNLnfPXxvt7T=rEqRUu!RT!lF_i)kogVh#s~VE(I>V~xyBiFucT$k$ZC^5m*(B>3^QA%1|M;d zOrtBC827ygYTt7mU&)n5bP9rnTV14|>s@LPk@-=@ouhl~uaR`;WtSLUiwdbz&+c3F z2K5AAvQxY)etOv4+7xHBAN1u?`fxHuU2&qnG}CKwF4!t!7ye#8qs{5OQt_n*Lj~@$ zAgM8DZb1I~ND>N}esT(8(y$SQhJipfeDV6_7(ju$zsLDIGK6xxkF_b&3#j^@1E%n- zZ;R=N#zQf}J=N>?5}NX|HHTx+uLJDtKx=)c88wuWbmRHF0Wjm_BT_zWBn~W}v{-DN zxeGPwmzmt0kL+lBWl`|Ez8+7VDsfvORKg?uCri=>ZGJL?1a0iCc3Fp~F0+q7PS4NK z=TeDHp}G$Ty=`1@VitifO>tky0h~*GjlM0IbJ}Mn9b|L&_|2UBl!GTX$GR~O&wmEi zYjW+_eFTu#eRrnHnLUx`o4+CSMWu$a(aZ8Rg@&Pt1PX4_`7)mh&qiGtT6v9uD(L2) zR;n7d(FT%=se{+MD((=CwprUn@7OF9?+rAMVzHFnNyQ#0neXs#XHkre5PNi@1L#sZ zVv!+mT19hp(J>TnKO5^T1pb44Vao_K9DFIO*sTG;`-firkdLLdZkIxA&Ebpi-^+0p zk&RRLnyi_9(SXh`DlJAz5d2P2$m90rpLE};hgy5TdwuPF5rY)D%5PWW9nbrnan8S# zV(|64AQ0wtC>On$>SI`6qE5&lEW}chhaU9S;}h|7ebI7FU;qY7_GlGE%5|f;n7JTk zl(lz*UxDgY->NXLYp9&V5)%W#R>4psK3pCP3ZEn$$#W--E6XUv;2 zmI8-X9AN0O29F$%=;4gmmK8_WqrZ3FM$f6z4~Dq{e@$!9y_0~{TcS4aA=j@p1hk9% z@Nn+;O`SRJwJ^#vDDWFfyqK-LvCbl@{+&_NO8M^}}O-5+ig(Lc_=oQN#` z25jpD(z`wO>dfd}26>YYy~XYi+!?IZgDUtY{gP^Sz3f(~^r!Ri3gfM07q!l70PWW; zCdSsbCqBP*(ao%@RNTHC%JgCds<}mAPf886)bDu%L}8yY*KDm^I(gN}GZ&mXetpc$ z0IuL_zP3@VBody!{LSHEVq;a-XYUq~$Na{X!OxR!W14wrI14;06F69HgiU|h_2vn( zmKCSG$M2vb`}Ij!`0_M}q4I1QUb2)P{w~1epOu9L5`=bh+LpOeXLE3ip_a!Mf^@PY zF1}RCqi=SMtZX?I&hZ@(-zze@R8n;=JsQj+nu% z2^{I_!FqE2XETy(spAB7C!A~o%a-_#A_5=O<9%wypYxw?8RC;oU%J8MnVnL5XXh^H zP<$?2NcAr}#_`_P2koXgOfi1`Agkn6`mQ6>I-9T%8UsJP%P8eluH%hM`uYp8xt*zI zy{$n=(eWXhMzh78KZhEH@a*utG>M7N^~`P>k5df0n=LK1RCM$Da5oF5FSkuwh7h*Z z(d95hJ485f>Hu>V_X{Gt!ud!21lDhDh;*k6&)Qx~pRQnKG5!5d8Y-&IIIT}o|0}mZ8(R*4&euyox9Es^SsGrHHD#8 zE_)UCXK4N7>4youV@x$#UbuGOEcV(3NYsmGoLdw2&^jbGvaPC8nKB>+NO!J6fgL`} zFKmgiLBW^tze<1&Hdh-ny^BYFuMkd$%Ht+kr4F*PN*u57;~_#G%{wH4V*r2j1(2jL zqDn-1xx9&Of*#BHjH1y8sBFI@&D&y^t@9sG>AHDuO&VQ3->#yWc+<*^YPkH^p`+dO zoMI0=PiryCJ9(xhd%Urat$m`IQ5CG{Np_(*8iua1o!&ItWeGxrvq^w}bCHqd#==q{ zVUsQSfTiXOF1yq=_EnbyLCHQ zEN32~`)0yA)e#>Z?iuiOkSz= zAZ%1_b~5RXfZ+zZp-pe^Lkb&a&z$N#Rt)^lz@)@gU`a)M4y$gbNCA9MGk?!JiVD{&`edh-64U`F*?c%_OR7( z9EWt6ZaL=j7Ezv)Dzb;ykk%kQ(jK-7p4 zsn1=>N_kD-V+%6$8C2l6o`{LoqwHsCor$3App1LYgU?312&=V)%W`7%gHMTR4FPq1 z9|0B)&#of4(GHu{Pvds?TDStb;nQ`tSqI{K&SxXy8C?GM$8a8MPWyKCJM|0bw1nkMLFNs#As^!@-fbJLLl-k&no@b7RHYC?_72i_ci7ApT{qSs^rONibnj#s9%h+G?XAjIuc@S<$s*uT$QS?!Whb4&83=cnz=Kcgfzhgu1- z8`eXM-Dsw5TkU0-6CRQ809h7{I?#h(D!b1QkYmMALIr1x;I>2Qw{J^v!KGPlKJj-; zZo;Ui3yNXNf?OF>RO{+pzTy#B)IUH*iZA-bv)r{~RXgYmH-lXhLFp~HsouU=C|zZ> zgWPy4eanFU{w7Ja_3gAo*;y?WeG@cqsgNB5LP1Jzo0cve;37-j?5B><9^T(MdsKz3 zi_0O*zTk3>KZ^*2(giWKehFt_&;z6EkJ%i9_)x=PHEu?rP81>R4>3t{zRY)@=%U#n zV9Y*DTLf?$7y4*wzSH~f1pl7RdZ#!{)C?P3#kd`GIUQA2;K>#o7I@%2idRi9}38-IOAb${uQbHmbPQ0JuIW;hh-y>ewp$X>R!dv;5mBx$K|g6>o@-R)VVHt zE^75CL*huFCcY&gJt~-UylSk4Kh-_$^E@QjP(0LIY>(s62P($;$dsVZc#cm4s~n1d z6ZV6gM!r?Cl3ys+0xj~3`z;^SQ!{zHP@>mz#$74~{KZCgE38p)>*Z0L+ouj29m%To z%(TS*;62TaR!o(3lz%>An=#w(wKXuwnT%uky}Sdt$J^RLHU{j3?=E{S3~z9Xc(p1x z0Zz@)OOP39LKsr5fFa+?=6x!j77@x@D2?vJ=Gd43O*Zi{+o}Taad7Z^!rwZ5dALEf zxWz&7+Co2it>oSe@=oa|iu(Zc*I(0t2LRJp+8gwQIvm&rg$;Y`P5<7ngK#oPtxY=h z{h>xh?>SCxJl#AGf~(dY+17E;{!9yO$8jb$@l=LS5$3aSCAYr<^liX;48w~lv)m(T z`-50AuwmSYCscVS+5Z+cB}>l-4d0F?oqp?t-pBpkzCM2uC8^Bb*mUbHJcKZs*b}RJ zJPh3jZ|bnA373J!8mVayy!U^K1<|YNqVL;+C}-Nk_7N}ETBXojbEuSUUf!;)7tsDh z$0%VLnO_@Xq+++;^n%sECtW5PWPu#jS$Wyk*KFzT_}@ebXmybSJ~esDt3(lev9;iu zP`fjRappw_U;{{va=kC=A?_ciB;e-!nzK-;bt=@cKeslKcn{x1Hy(0>mBx6x#)1yb z6GegT89%e0TRS|~3y!cID6==X*82pP$~r;vK!a_NF!fm1&3PAck;xk(m6n8@Wn3)h zX6k%=jwW>(fn!qDkzeYb3;uBw@6WZkSSJ}pIh37q5B~loUX~rFvxl=4#J3tdFI4`m z^!i7RG@cgkBTsyBN9w@4NWGxN=>j8LtBw@?LJS~JZe0EQ)lcMSPs2zzx8bsb_%g7W zXJ|jypVTsJ*Y5?)2MSSTgr3_`KyGM`0j+0ST{HbdIl|Mqx*r!Or#KL-ceV3^E|BQ^ zQS}&wcY>)lQpw_LX(t|pt@1%(WTRhhS$CrPZ*?A6-YWIbSS3WDNH=<8q(Rx{cmC<@fNSSPaJd~KP`+z!Vsn< z0g-p*hhvD9-y&oiu7cB~KQBn6Vy^EF?|Hq&pvk<`RBMS1{-5N8&to$i4_LGs=i$h-=26-Ml=(()5(JX(Iq!vJ% z$JPT|G=HjU!KOE`zUl?5gc;UdU9neGC~h<+EE#lofOTxt3(yscDxeLagF0`B2%>qF z_Cy50tCV8*`iwjD-2#9U3{bW{v%N0{K&ZIEjAo30-zPf_^u=Ynd4et%)A@$hIe<(Y zE1q=60>&A|-Ha%s5gFzObj4B!!>eQ9;Xa@mEBNpfvnCk-YfUbQh^a;duZ|gG9(P<7 zV?k>P=O-PyYDWA_FHVFlv#JUtviZlnPexmb%oeXu^Zu_QEWcpvKiDJgAM8N@9>!zq zgrkZ1wn7?~9zl7G@jkk?W(A|HX!&M_hL@+Xe!FR^vC1Qo&k53ca?K;lkW;aCw2k(p z*}^l+|2Q1|(&;Re(IY#$vwvwLLbBln>A_x)0a-Gl!8~8q`k6hq~3u6ZCFvk)fOR;pELFyruaB&S?vn3vm;}7Ny z!dA!AJa-R!;78d5SweJC16Wa~NKEkLDG}q5fq>sh9Qnx0@)Ua#kGP={n&W;PRtOf^ zJT%8WXVeSpl0!ewXnf$%Q|J+V_g{`z?PsFy{L4WoPqLFal%^KvcNBmkRqp;$Q4q(E z+IQspZnC={x--Bo+_&i*Y`04CpM^2mb}+bp!+H>Iyh&)<4x1L% zBn$)0&GboCGJPTxs_hvt)9-Z0yBYZyrVawnC&FvWHIe$8A6QR{FTSmep82OK+9}d* zB_P$R+pfV$nZAP#_P<1r8~R~gdhLPGRj-k|Vht*jgPcD;)UYa+6v{On`FI4 z|Kk2OU5A{AmAA~AcIuh$Sf~ms3e}|Zmc)o!4qANMHThD1@(+YWoEyXB|Ua ztgEi(KD#kT=bN-1df#Ve)f;VGLWq**plO1TsB8TVrmlgb4#tO{$-c~E%Euyud5^u7 zkvC@q@<*%T8G8B4zexeGH9aq%oj8&h%D>l<-K8>~o000kuWPz)D^&aer2tV(@BUId z=r=z%ls1`hSSg6ut`)QrT}VfV7;R|aE+DAJuvx+fKL^kM@rv>A6^gloGE92APXGIo~x> zT+vwT0$XNyRI!n}3muKiepUM^Yiv}&KEJ%uzZ~xfpzVYL2if>&`8TqP#V^v5fE2es z1KgFDcPDQ>mdh)^wcaQQWAk8RD?k6Ud4JAgRbNC3z)Qm`cAyp<4@1RcK#_E;UeZvz zO_4`s#JCrRX7iZxFMpw;j;A85H;i+r+>gQEV|Q;ISj35&hYo>nuP?%xfHkFgOI=KA z`Mg82JY(fRL^y}tB{;?Ogo_LsoYnS~ZF&4#JNk_!>c;ToD7(AEcp$55qWxgF?mi^w zGG})fyDXetwWJPj+c-;d6i?}ytCRaWc}D9QXJwN1JH%KaRYrd69jw8WEf$T3=XvCg8~P*t zxiTxA<&|3BaHDeR4j=nqtIKVye>oOjnTqGY3aj?e3G_@%yJ=L`sjG6O{{MX40-pEY zTg#vM(QGaIE!4n>_KgaAc>#oaAN0Ln`iQPjJMxk81I$Zu;~djA2I^{h#yvPK`sPbH zEakkN+hFg<*XWrmBLUxLF_#!E3qy*eQ_Fxa>LlQ0u8uSu{mfj+BY!vCvi|z@aLbm} z`FQc5%DvQ1X_Dw|k87+Kv3g3+FnZf=WDK!Sg7z<1Tu$eVd8V>}s-=s_ASDk?(N9W#XnOv1i*A z%%klB(cv5GjrD@0bIhmN4AbVi(r?AofL*=;nB6;>2NVSqt7+zP*)9iBt1uKbqWSdY zuHz9JYko3ld_1|j{b5MabE$*+_&j$2UZT$8V~=2_H*2i&07+IVjG3&B` zxzJI7AhkljzdLm~QAa^)`$%@Fq{Y!b>8LG3LS?$q9L~4QqJ2|8W6ylemAwbMtRG7& zXtx{Q^bG#93r%C`t$#;Xu4hBGdm*VOBl3@OQ(-lLTYG$Fa{VuZ7%HIEYBaKOr^Y@{ zn{DCs5%hCJNyfSe;E`0BmHRSd%-F*IZ-%H%&?1fm!+jnf^{7s_Dw?phYjN`?1PCF1 zT{+;m1%+W~eer%E{&fd=^XQQG0QKm(ABSKC>W@C%x7KldNKx%9EQZ`3e*aBs`rOLz z|HIyU$2GM#d!txUiik=V!WI-IAfgndMN~jQh}6&{y+lN$*Qf|cmnH&2lnwzRHT0-7 z1p%p{_g)i92#~~gx%c+@oUP|R&wkH6_ndp~egDWO3aqt$>sMyJGxMDppsLW9cR909 zWdnyq5N7~ev4>S#>pEwG|53u~TT~g~jcxbd>2tfg$Dp5c~{OJu2-esDKM6roTdDRYzWrT8+ z{KWFy7GdQ5+&QJxqhpIl9DJov>~bP+9r=k?=WE@YW%()l!0-p1#z5Usy_hmJXC!jT zg=JaB*&|z9eRl`Mp+Q7XDkR_M@3Y_z#1U_z?HzQf{z^g{`9RZxg_E~!DhQ4#5v^{h z`t4ZzFAAZIVzb~?wOp@^{zbYlG}z~b_f9AH8=d$qm-{k6hh86VwXqmQkvsrZCv0!Q zC<8>vx$qiE^lyfNogK1n@(PSEJ8By=m~ibmhKosN$!Ee0TqABM7{0A|CgXSIOv4l* z9=$B;^Fd*M_wsag?&j_hCs}dS`5l62(weNAS-*1Eogpo^A2ldS%J7@sB^=Bd5h4S(VH_Gai)}Dh7x>oZ_D^_+>v)_{Jhx~TeDa#qeelTo|j zg1kouZaXKFZ_Nzazm1)s&zs#EpEmZS5Ho3xpIpw*hK&P+ut&6eEFRn$0& z^}e>6-QVE1h2o7jTxn%%NL#>iY%%k+)zzY0{vclhYPb^B^@2RrXatqPJt@q1{IGi- z8$E)`314>l#NBf3rR;M1<-LUQWrRreA=2CP)HNbZ!J!tQCHOpOs83e`%EpXw5JA)< z1((Mr<{B|f5q+ept@6<@OONOs<=MKNue1wfgjo%{_#()nCkkZVV}UT-m}kF!**~$c zJY>$G!lx`3lZ2c-{+Up+ozsInhxcT@ruwj5SGjZ5sZdy811!(YPF|f-_&W=zk~{v*>1of5No1id_$|yK%wId zy>m+HHLH%jmgvvl=%T^uH1@1~LRo%N`l6J25AHsWona@Q-7$ z3W}WyVq>0uSbHbu^g}dWg_*i`Xf8JSiw>aS!DLtl*@aYISqA1SF+Y~heq*O^e5h^f zygDFD>Rwiv!f%r0J>1Tg1qXu~q(dKm_4L0m^Wl5N1b*Q6=o5S6cNF+d?2*7}ddFI~ zyc<+Au=}d#w#uBTPQl{_v9T`a4Ru`0t(^ z8}W|HhF%CD$@XM9c9WmBe+7*P(I^Tgm6mX&ys8JI5u!Db^H6c8BwaV3flQ?CD9yH*a?upWEO!pFc+vs6&Vbi4bIYJW zh(W=b0}CWGIlc1IK>na>u6C~u>M^9R?e{!TYe;J&N7>TQCInh~B;LsN-5gQ%7(|$` zs1vB#qLowTkDT_ASkycoZmlt2gZr{}zL14>!ijeQNu-YNX=re)sb@G?UL~pOsZth5 zIL;ac>`_NF;~3M(wZS5Ue!+vIq}VthSKoTesJhX`TjCp?06f2YPjfUE^JUd8>!ySU zt6-5#>&b|gdBua?PcLk>xCTB<1=v0@DTHlW6+q%y5HDP5WcM|{D|D&X zmrin`oZwsJnQ=wY*tV9Rz%eh0Er4{K3NPD4MIdd)_LH|8yi>8c24jj|`Nk^N=)#oT z;ep1y_lZY&dz@wR-|UCYat*4*z(PMz-{m`QcJyyg)}~~z4h|ZVHFwY73k2E&mM^pE z9;2h9J9K>8yk)zTgGQHxYyU>akE#V1ZXn4gp@fpHuZuWa$c{8F2t)!eAa_;Syw7@# zEzbdrwQWK@t-v4JyZI)P|V~CqO0oxqSaRL-ND!{lkatvJWN{VzM z3(S_-ULEnzb+Z>#S)-QhnLgHr)Ye4#BSs--Z#3Zi2$Q1dl5(C?t*OA zaqs4p#Z_iZ@4ot|(9Xd4MoWq9?Dsm7r_UfiyCl;Z8vtfdHn3rC0Xt|;ni>`Y>XuB%^T8F;D_=s9+ zwOi!u*=Xb$a2Qb7X)O?Vce#`3wk)=5v`=->+3#YhFOG_```x9n%}eNSE@>ted%G`e zbGC;I%a&pMab7K+D$NKC+{B(cCDyL)OquD1{d&c{g+YVY&SyYcpX82%(HIE1&7s_k zgYO)+!3YyPZ9HTje6Vm_@OkRu{B3~+({-sWecmG-l5Un9oV`zrJ-SQuDUxU6GBOt0 zM$m{62>wQ7EylppR<)w1Og65x+2wuDH$|AenN9on z#O=+X#-z=SjlJh3m~+Xc`l)qwW^k_#PsDpZ5Fv85TjG;>#RId&JD+7!XEN>=QI_5U zz1ibtv^F$GQpBI#tF*`m<~T>>`xFabTbZm1OiPkTI>%`hee(j}cu(A2`lEh?mrd7&f znqB2~ZK}QJfCg0VV~u>rd-CTLjPsQ}FVZs~{3IT~=$<+3dPKG;Y9EnII5up1yM z>qX7N3T@XqJ4ryCtF_m6)zwv>RZ4wVaOZtZ(QEywdwc~ovSGze;YVWi{o|Pv(>m2ng-koYp7+)LkbRSe6 z+iV~9eS@s6z&krRGQYmST%&sM{H3C%x4JC%gpI&5@!7B`^GOfqB!l&ORJiD z?%)DdbWY*c@zlP#@B?NMO0M;+R|^dhd>xxB>(}OPT=s2M^?>)`u1MF{o$03NWhmw@ zBrv+zUz!L$1a&j-xg>YoTR+~5l>8u-hurLAZL{BS$s6Ht<$W#tN9Deb}6(+d5_$JpjunQRU00m^3qp{&|1%bV!*|r^j=C?D@ zH+NWR#KK){uJp}q-Obfq92tbt;eH5+Ngo}v)-=G4wBvf*`Te+dr_#feA5*sPw%7Rj z1-m!Lgbc}s*`GZJvx8@yfSTL>`0O{Vtt%36cg3x_(M&>cIObxRYK`n9*lt-)=|QT) zE0<}r`qy6XVmo@FXR5i!{lRC3hCLy7gJ*io@s4kwB&=OlQ&U6TNED(Du4PLqz1b1__EViyIE8}<)vhOAWyZ@Q~&l5CP- z&8(;gBOISx7jC4Y{iy)=g)QVAK!Bn9{()VYj0!g52ovPs&_~(y#J(z%y)r#=OXWtf zn4I_ND2Ig{bfBt^?^ky#P1c-Bo5CphBI3D{aZ{siRifUq4uB_WpTM3`lulmyQ8!OtVU#epuyG9a_l&GAQ*DvwJEp zksUQzaE2Lrr@}Z{HApY`Am3tr&OJ~5%>v;K>S4C8{$hw-$LvzbGs!k<>>kMwIdgWb^5@?Yv2sg~lyL zwB4CM-y~OzmUJ!11}r=cE;nnT^W6a@e!dAyImQ%Gnawb-DN{v?l3PlvMPOR@ILYA2 z4v{+dS>b6lT2|IiVNlk`K%g8OZhj_k|Cy-k^!u~Y(lYIPx}cgP3X=<6fsjDN7-kd* z|5@Ds9s~UT(e0H;7JbmRBFT7FtN^}t)0xv1@`SBHoiGaoP&G)X1B;>-=vKR`XHk;j zb*(W*Cs%7+CuR!u2=xY@3slN^o zWbO~3-1ZJS&1xzNxM-s$OhHX%jCfIh^#KgPU$kNTMkiV@qhi5>NAR03w0R%Wvuk|R z&d_2ids123_iJi_+o}tn6fv4mqyMs8KO%RjD&SSV#HM4N<-V8hkdL9)Z1M}h6QPS7 zS`KyMReWcA6J~E)-7z#m-F~|OIE)^tL?^F-Bi0e(y$!j~F?bgJ6amZ_Wt9K(W{Lle92DekN zqk}(Y?)$e(-37w2kSRKhpM5b&!MsN_D&CFpSfJqPaBo*qAyR{QxMZ{by^0&h0wG0xvglP7{h{QGY-^Tx%b+a9vks=6${9oSs! z{xxT|murb5J98JaKJeNtH%G{bTNSaiMDsh=6q^3_v$IrK_Gfpgk+K@cOPi0%Qg!_cw>=_B~NoveeFBtZo|~JT;+V#P9*o?8;AmX!ZxS6Sd$*cAIYf?v*Vb z7YYY!P;*7c%~f!C@<$`U_3vRqR$JA8sVgQBXxUd3=vJJrSijf>Tl>e)m%@}}HnJ)` zqHM8Svt^fZmDxa29aUqy>-|jxRG5gIbw&a8iQB$#9pf+6XP@frb$-Hqtnc2{Z+m!@ zVIId_Zsc3EWTf8V#jv?y9?f=EVa5u)_QaIR_Lj4Bq67xV=V-moNP4-@KT~5pWj^uQ z(RGMXs+V%p4^?bWEFX(=i&3T>X?BMw8nOdbg=~m|z#;a#z7#w6XyFBW7&M??<@G1z zpzqvQZHh#oo2XBd_ssjT9EF05uequk4|3`%&L2}pnZCy(joY1=s44Ow7=C8J+lkD1 zgYkIfqC?e1l)~CNe{oTHx1(iwDK@lkRQy@csUxBCbf+0oL%~7Yy(!gp0EW!~(BFBw zoCgK`b=a8ES;;@s`t`|Z(_vXUe6VNx^ESDr{{P@GX_ZwP+7RX_+J^lKr?dBV}1 zC04tncV#6yVNLbasz8j`L&c6>#ll?|&xGYghtZA#${xlvlIw#xzBD_{xDa)*V?%!b zFNVB_iBLFhI@?zsAiVoa5=d`m9AWq150>H%$m$YHY z14@}z`+R1*5uA9>HJTvvz)Fa3%gvW%3IJSn7s*Su6KjV@J<(9|lZS(Kfqs@Q52B66 z)F1qo1jb0+vg?sS2kYH~@%z@ZAtr-AP7q+UqxF*=2X>a+E(VXQR~u%B^A@X#_J69E z4|^%l-w7pqlK?u!6C)FV@Q4|^o=Ze~phi5`X*}-uf(}dSYalmfl*Wnd$7BNT5kbBA z_&H<=ijq85&$7q1iK7aJ)}HvQ7vzTt0DLQaH>v%;(S4$yN}$H0jsx+4yPdA7;I9xT zmIL;?n20S67ibSf3~iPIW^~LRD%LpBJo7%~w*Ch%uKw5KPu~CH1(1#?1xgV}Bg;Y0 zRti? zc!vkTYp8ElvQSO|`AiUCfbaUE0lS@m|DL_|>koB*#|^+9*!NM0l5h>TYyzbUd#^4% zgYH~(LH1Xmc6GN#@cIIt$iUO33V1YN4OMEs!N?JHJ^T+4P3kU|mB(zW)CBVPIgXol zDb38aR_f72-^Aa$*2HoZb2F_xQ)i%7ue-BwX_s;*PlptyVpDb}J7bC6h=gJ$rMR)J zWYj;>U<}eBN6o;%!98`HN=`W%xn#Ij^2forX4j8>l0L=(OM>(>Le$I;(;IiiLTjui z#$t^L@A}Nmu%mo&;^u1ypIw67K)@}zle*$5sZkWvXG(Z&>Dx>LN*|hn?^u)F1qWft z-4pCDZhGZPMYweqY}pBxxhVvDKiT14$d?1+xuM*xdPOz*^IuKU1AtoXuEO2qN&F|E zf4nZ)3iYKsE#~DrDF)%p(E{u2rw=fqE034v)XEcoYX#1}Q{R%tiFMyE2w@!@FJHURcGF@!E ze~)xu$~3ssK7z1qK(yg$@}3)9IXdiWdiD-`@W7aYReJvmd9)LHeMw&p61JDHWGUv< zmU$ja7OSNtTv+mcLB{5(yjc+HAhZ>HR7E==fLfsUS{+$wBJX%!J)~>$!f51(+;=Q( zy;^X8WvZ+^#J!p6JnQlAXkOk{KP^VWW7bsWfplETG*E_7AdEO0kb5ybi;TJ+ z@<7s0-0YgjKsn=(?FY}2fhP$7+m_-aVHc z=cM66Si{&cR~#$+Tqe)3J@>HX(L8(13^Z!BsEOC_6?fN_z}q=`T|*@UJg*~WX&5vb|TaF-K&Fm;GxY<4QqCVwwmOW30}hk04r8K7V;4Y zUD0*~L0#o5TL^ID7mk-3Q3f;mH05{k*Xk$3lsj{eQZC=YDk-CUigQU@- z=3oj(uE}o;Zs^PXO3T;pR|4dic>|-NiUF^Mo&FD#cFUO_Gni?Qve{7)q9E@0xhg_b z;Ot4kfRv9bfW%vjdl>yvqw}Vb_9v6;!uh%>l5cF@s+mC-MxgprL^832q zygVA69?g?}fs>!cHmAUWM@?jqXeWt+b$U-@`J(dG`a?aR?A(ivI(jC2Un=LZYfB4m zFzv48tz@qIYbzEtrLP=gxW39<+CE%$p+`&eBrJ?ph%2}wtnp?eH1zg6DrRM}iB zpk7P}uQ~#@NI{+cs}cFR6=etZa;CPUqwc>Ww**xpj2sW>sB-P`eNw=N?Z>Y2pIdh|O9l8l)^?uTdJ;!pLMVqjNm+Bpo zVnfbqQF}NUxwj^+Qyi%AGTy!Ml$W^3R^q0>pk`KFx|4MF1a4+gmvAvGchq4wI81V0 zp*a`c@RHQWgXID1$vzdQ?THVMl6leCw9t>4D%vJrMpGm;<#GVkLc<8Pbhoi2IgIA%PV049}*9;RRU8qa62Eu0z&=)#$e~&?2@H z6TlzIV^=3w#~mQJ`=BtLF?XSa$LGnG^+a&Vez220L#}dRgd04MioC+1={n?E22H1e z7IwbNhX}|dOR#5IPbu!yAm&z&B|`nRw^#Jc(QV-iV}#-|4aiKN95cq~-6H8I?=sS) zX$VIgR4BOM%dFl>i|in<5VK|ThQ` zgN8xen$cprpSGE}W;b78_935vjCz5cW6DwQ(R^1IrQO?aUM8<*>HN??eE2-+?jbkb zk=xpu5@k+kD#y|eJy2vK#q`7i&1sYJu@b07roF8nQ*YRLE^?^B$k3+TzGMcG1rhc-d_?AI{ZhwyxS%8AZ~0GA~;EgDNu#QDGEmK&omcrM%fIR}ctq zf%R*?Lhj8=x<2a6%?E*(Njs2@y)W1Dy1Ud9J&Y;%9lvIz(!-n0WPFjHw|w1&RmYY2P^8!Z=eFPyuLkgTox|Ac`FHSKZJ+2 zH+v~LU-ESzNH&iU6;e~XKF^>&Kh8~_qJMhIikz)Zxv2Y%&IpBueWOD;ty=q8L~USd zc>~!#N;k6{w0gePSgG)`yj$ZN-J9PsfGoyBv8!Fa7bZ1yAnwq2eyIXLEd)&YpXIhL~mtGZ!n)75N*#ee|s$`>I$>h1(1}Scpz>-%<1=`}$u(&INA6zKX9vI) zPTZj{1JG|UOL8X?HEv~QYU*nfEs{Tx)bW8~aT``5puC%U?3UJr53DB+&&hiq+gfNz zlVo@!ob#UU-oESmuG8Jq?sF5~7!=nJ9YC6DTRG3k@PNH?O6{>*d3Q~6T~vthxt-nA zCrS_Dgk(?5ttk_N)wY({;eRlZ9zqU4qo&}`rp$S%GfffVQ$YLGta1UOHREm%RUbPL zC7p$fh8t#*Qfgf(?TevvqW*%OntLEyqEz$++TxfJs@2Vo<)q#6Wl6*d$kGg?smL`? z#S>RCp!=*mMq)nS7{yKYZgX+Q*uP*g6cd2Y_C|OSV?RjUVXINKd^ne8SXp7Y$r^Mn zPeQn3A^>^r4q7{^QLxU|_WO{_;MzM9Acm#EdhBqFI=uM0fAJmI=HaA4WK@mT`Y7Ch ze50|dIUAWd61^~c^$6~6R_ShOIz>5sacl1-?g^?-Kx`sgOrws9JL%NKC)X{<%jDeN z=8-$>Y@a#Ayv-rnFC00r*!)CK?$AWajgOA%ruaLy@ZLlty9wEtzO$T>hvZ4qF+MR4 zbAi4GGCY-LQ@bu)l;i@^^A=&nT!lb<*jssyul~Sbm5g@o5vA@-y!-Bsm)IBQXESGI zYNbqZZGm288^aVK{1!9xlMjY|F_xOO_F22j9?L}M^nI+pyV2||6n264{1HsCC4SIPKWh{X&N!#5Hd|W++ zH5ZT<#`udE`bwnpUr4t=D?V)JB$yD_f}2KE>if(Do_LSgZ7bZ#@P1`IrO>jKUG!`# z^X&X4a)wL^r~a@ezR|HHj%k^IC>I?M<0H)Q282Bi?$pG~VnDKcD|wDB)0U|70o%f) zaG~p{JlmRpG9~12<^;MKM!$Wk=#+I@(q4OBXiHS1kw^=-yv2u}P4J^ znRfT}pv*rPs+O@lS2)8XTG}T2QGew_MICA-f!{md{u%vZQ=uJ>NkpXV{FbTN>QY11 z7vE;D+;`T`w%60?1d)$BYGFlz@(!)jWk~cpgB%>Sq}MTjndB50*eHlCCW!g){=e3+_0&n-<_GvU6) zKBcx9VR)w6T5v%=@`R^~GBGe|2*K?u-Faa+NHI8k7tN9gtvFK}al1vBO+U;A3!kmQ zoxG+KXEmp~xDBO2EY0oRWzg{oz1OpOqB#Q z2=oIYN0_>l!f{uHc~v06(mxB;yMqgspt&O>aExI?>p<;YQC|$e8ZR2{{Ju+=WyT_I z-X!wH`B1Y}6>xT!Wb$6F-!B#lwMLB%PDV$kDg zTUUZoCAejv<@6I&i~ZD;t|><&pYlJvk!tW5%2x80BHAgDq9`f**12%$=%*ex`#bAE z*yA239x~1kGlvf9q*Fw#M>YWnvlq)zlHxSEz&ApL&)1Ov?_1%O{2x*JpTDh&1UqXU zo%bAOvo?RBxXA348kP_F$Q64XZx|fJ7~fU)^7C!oRM9!eN7z-E9Bbq&3^b=ubg~8; zb97R5B3cc&*7~0JQx+}b?wqvO=P(>pQ%>Ei>7KNevsA;#P~B9C7->U+hg_zbiu7*} zqf8F?idPc$N=J|3FG{cpVo6p@q~!fj!6W^%a;ViyyE?dui^Ry_rm~5Bxi@g$YxUde z7m+LB$;RvFN=XdfT`~SuKnikYv3yBnyahfBO~9|+28>cxe@Ra>_M^EPes5d5TYK-H z3jh($R3nZ0%E$>5Jj-5uLY>n>WTj0*3f3mgz@c-LWmWiU=;clgzd3a!8?lg0cP>+- zhA2ebH#+XnsU|Z}scH98(tYsK*^Gz?1PjXB&2jF`6?sg zd`i?;m5QVsf01)<*XPZ+@nu-2$}K#hkB^)7XrL!744m7|( zeKI-5v!K9OrYd>#b49`ow)lz1unE>Wf?jYgN-_0zpK+H%UzO0nax5ajTNyQpb{jXb z@219#N9l*&$yI3CTqBz#DWSNVQVQmohZni6sv&EoOc_423B3-GfYD7nVT=Ikv&Te@ zY;_Zdh=7R@TTCt*Z&OqZZj^z_v#Xaz;Lh>))tHHN3f=8olJO1oW~;gZNwF4|v?MBF zqVo|t3H5?Bn*z3Yq0r6Sx}`}?DDKG2!WS%ahSE5;8m{czE1ub^JKp&o&sTOXoO|Ax zD;6Y5d8(J&JPG)x&}kJAvPqTb9JhRA+2r9BYD$(GjJEd>O`bc|jRxxSH3&FHhL}%0 z=b@Ro8Ap|Vq0b40#DQ$Q@rz{~&8xc$fZK)3&nB5HzovoY#honblSG497S_icU=RBT z8ZK{WPU!mkQPF{2`fHOO&zB`z!|X+BL^D6xbib+4E*|Z8nl=%)p7SQ<@w#fqn*v_3 z3CE2Ig@&@cTj2qp&oE$~# z?+0DW)U`M6RFp%RnK1!)_(VmV2pC}NcL7;in zkWBWEMj8bDq(%oj4VeyZuwJf=i~k@R(63uCNWn0pqXZi*H6x2akrf^VS zA@ZZ&Bz#w>3QId|^o@?#vgx1|uXI$YAVH62dq}1zYlcgx8d%LwtMi2?X(!iC081?2 ziz8gd1F0Wc0OZ*YLC&Wxj*+>5@Jcfnl-C2mg!kz*8=PS_z%zW@O8Gg#j`P`p3gYCr80K z;W`bm$|KrJOf2Jwq&hNed_^^JRZcup7OQW3OCR!_t`A7c7(EMQsraCgw-Zh{yMkTGM=HR)0m~RG@Iz$8>kOFNMP81XAz2j z%qHUh?jrn-@cufg|L0ab;VvOH7!A~w<$=~(_7;j-zM!QQT~s(mKC@6vWAEw#0)DdA zP%1qe?B^x^b!F`8PqG97bPFtgCsEDQi*gr71-xRPqWzD0vTCXXi9k*X?e+y?>RIUf zr#=(vBA+2S`YnYvz)7t@O{BdB+$I#DvoVzcOy9Vl&sA2D_GtF^MUNvC4T5$6uZTct z2@t5AU^NkmZSS-dinxWM9S~cWtpY#$mqSoM>j2~2 zAGH(QXTJk5MlSijBNvhX@ka0 zikp1kZBWTmdy)Y3yc_WqCj^pw0RwM-6-x9Ax`ez`nUDFGn+F}iO(0tVU4C(2LiExh zp?}EH5T5abC!Et{vwFe}NP)T&!Q}&cK^&p{C6p?Gg@MQ<2kbAq@aOZZKepmTGeb@< zQNG{?U=+Z?{28>vdk!_h%cy#fBy%yvsy^L#P=yQhqQDz4Isdsps+2gVh5Um2hbsCH&HLX{v;U6X`;R?rv9!uDH|0Mh@M*V~@4*e`BWWerSxSB?-VNByb_4M>_x3+WvVO6iT2SK;l)x4(|xt z$Dc>{%aPQ}odE!$3I&KH{hz1(39*03mH%Ng*=;5nttJz!^Fg2MaD~$^XYm*5u-N+p zVOacuFl4~E!=C;?mV^D*(xLFJjr=L6S+E=hOt5qTiil)|0k#Dzk|>${FU3WCUiS0I zZa_CA>?B^Bn5jXo!)VHF*m@L$Vfj5md(=s!m&lp#g%G8@9kWt1j0WrPvuH7y19e%Y ze^xlZ07hYJ<(j$+LO`~#*oLsAUGqfP|AxxQ`GL3L{~2)XzZm|t)e!okb51-cMT292 z4D$IMO5>xke`boAHd3s~?e;1`v2LFvtfd5jhBU9bM zvfv$i(j#kNgniNG2?+8kKhd9)tl%y}W9u}G(xIZ*k(=Bt{}L8=Nf24dK(#e5Wa8_* zy9tdvzq|G#ZqqnDW!RZg**b3?3*J)@0VvOs1-26-LQp_|RLfe(a5{F1?=Z|D|Ai?T zqSK&02`uc&49%Q7PJqxf_V?|`eA*Alo%C;fE&su3{<&v{#Lo!)%bv>f8a*Q8M-QyR zshZAwiEh_!6O-W^Q`;}%@9w`Qk8qH%*3{H?=t~%cw@|(y^doNPG|G<3gEqtjfB<9Y zM%4A6BbeYB_yCRuMUVh57$%AlU~HGG?dH|#Q30ejO~N7IyJP~6mTdKgnOS8#ssVa{ zTr#=m$t)o|!ZE4HVd1E`tL`O%m<{-pq#bs0=hJ(U$A@()9OZaFLL=eK%l<4t8n{A@ z(|CcDa%#s7f_!i2ohjxE>NZT%?ji&Ceej*z9HhQj<9hjqyR8o!e~VOM=z4eX#hb0G z2glvn02+RIFeM4r_>FFlZHt9^yhmjuN_ZW9ezbKOwezfY!E^^#heyitf_}}B$H@K~ zXo&kkF#q{`-+DIXj_b;xPlay~g2sOlY57v===be4OV z%IxUy$uoC3GNVc{&hO0-eZTbp{=w>%8)OVs7C|;%#VI#?v{LQ6xAs>0n%hAH?N8c6 zB-I}CkQ*v|Md@id^K^C3_Q7F5as)vKvLiu-(dvyJX1P|!YHO{+{)%gBx!!qR1;nv`R-FRDj52ZDgxPN zhSM%ak&Q6G=xxIw1QQ&^)P?F0h5V4{W9RXQ+cGc@sMT=Jv*a7yq1JD7jh%ppNe?)C zcZjw$#)NNlVd+b{J~SrzQrVQ>jmTc)?nya-CGr$77AFB`&Ci!_*c$>d_Zmni{r7m-<5qCoc64P9XVc!Bv(Nu{(7n3_fZFz z2fI|KRYF)OYF%Y*TCSD%aGhl^ZQ_7KU)#Z$oW;AZ(f$XMmcX96fl8jTNy=(!L~|u8 za&n`L?K9UgtV06dt+Newd6}as@~o%!8%Dih`+{i%hn$k|KTI@!J1t%k@<<`*uF9ma zkXed!s}N>v=cI6SSc$FIcMnJDMPOQ#h%nlIpeRXm4)E%4J%D)?3&j!GS!nWFTY08q z!vN?-IkGHV`Q3wR#DUyALq-E8_&vZW-Glu274VY1xVFL(x3uKfzLhYag4Bgzl^Si6+r(gd(#NFPDYpcj zC;-T51T;S5jWyacI2&?Y1Vr(UD$rh~&CwKB-crx~;obV{ugO2Hke@?>$2Mf0Qly|hZyNs3~(uejf_REwHkN&DC@8qyk%+pgW2az1^?WZUA9_0O|aXm4>rsREm~ zLsQnwI9c|0<|pCL3H|NAX$Ssth<>avKj|wi_D}irms4hx%1I8$BB$;;$|-qeE`H5^ zwfi7@U(mzECc-tQxRl{jIAyA(;o)(JbO{D@EC9SW@!)^Qd%J?dT~KaB#TQ`2Xf%V(^X=Uowd#D}y^5D(H-#w+GjJRD^UqGEbeh0K8giQP-E&hvFs{k^Yg_6p2G|#j5 z#s*)!_*&(G8^Zjw%e8s*nUafZ7K)OCeWi;Z^J;io!_&nA15d#OFuv;DrG7>l!hNk; z__EuN?F%H5k&^epNIN_tfv)7FC7n*I$XWkCHy!d>~lI$tW3zs{M$ zfB%$P#*V|;ENQjo0Ppa-z@mVDtgwLs8!;c|7;uvXr-l7p2cRUvOvN1lH=7V&9! zQ$E58cFUHl(5^b&;IpdfAvQA_ZAiU1b$#7)rQqI8!PwhFdZ*k3@ob>RinTb6EKgWN zRY9x^?=-t`bPJ@W&JxYK78)9#+wC6Plc*`)SyKBuvij!|Apg%j$ohBp?0>q-`B{ej z3(#;G8ur3KEG3U@+2Ab?&@J+7tBUnQe_Ny(7abVm%mm<^Y2BUg=x>XSIdfQ=RAe%w z7CZFi?bJ&tros%X<0@J@FtgqCd*4%<|NYN|4Y}aZNDpCNhP`0YVBF<&f;%femgyKn zeq_t(0P1j*cT1*$4))4$h^F}PY7P*DXO{xd=#67Ip!SD(6P5_bCjZhmy6s-z zhrIc9WzY@)*r=36p@#p^bASBi0f33}JKV6G+x`Oq{`HpjJd^sl z3<=5a{m&RAytL~(%E@AV!isuGtU!C?huj*~GF0Fq0=*c3iZAxDJDv36pTaW*LzAi)nwI@|@MA0p@W98lvrJ%!2?sA16Jdnkg z9dHz(RH5%wn>^&W8}P2D3q3+IeZaU|W7FZ(eB_@fN{$Et_(3`VKd9h$eo(3>3g8C; zl-ehK-}ylWI?KPiU|-bc`vULxN5Jo=zac~Apxx&T&P`c7l$%O(9Do|OM@x#ra) zf4;?6Yhgp$$W3cUPiQkc-^D%MOU=%fIfKWsGH#n@dJWgO4q_hqToMEsv{0M8ppjup566zqJ6#^6O0`!p2ZNB z`CY%IwgUUM?B<7V1`NlGj&jbPmX=LGwbF-BOCi%;bz`Y}1k`JlJHQX9w^S~ZPs z+YE)KRRg7|en~yASt9{RI@X754%CaO0}Q>4Xjs}`J9Yl(*a>{o1D5JfJtRq9FyPz- z5)VWL54z0b`bAhG+ZrJERcqGB1lE-e-(rZ@>y?gL&*q=4=-OFm@T1Q}3o#z#M<_-_ zd_hPZ@EJJ+>E)C94XwD9!>5sjG~R)QM#f1fejSOQ%lrxn#6a7SaKe30>0cAg|9STL zKb!-weG{^-J+up+-CC7eqHQ7N-7AJczOTZ;0*5w-+f{cN)|Q?TgeXzSW zWATNtG9P(gAHbdlLLjkDU65tRF5IRiOn?2EPYZZ@I+k)QFvDt38M+!~3s~Ds6s^e= zm-ocQQ3XOR_K9SsJ#$ksADWkPw<%-AMJ5je{yY^*@I%lT3<uhz=7G&DN z-;pk24K3l`g-o#2^RkSu)jP3}A@zOEhW1IO84rbG`7oW6)J7b#O$~&a73m)*to$Z<40uACO{80o5jeVXdJnG5#NQe}lSyTMZ{e|-J z1`h9ckM=(E%wU=tEtKZpvOZM2d}|o{M4i;3VoHs=$j(mOi=;}e|*BfRl za6a05$hw?=H=Lvm2>BGhKs|rE8~UT-Na<5!lb;85qU=@uy6jJbnwD-k1$#%Py%8nF z7t`8(eLK~84j%;hE0WGtzp~meS80+y56N^!apR`r$S7J3Y8Pg154bAceWR+W~N0;G^{$X2J5fwVR322$I0MoOVhAmg9?oITQ2wdR_(9T5)6jM3Ah;QRoWe12VTSDW)?V6!I4}YqK`5L_iPh|$ zcq%_FqwG5%=pTv>V{lV7{79CE^#%az7)XRt5Ff_G5YJIz$~&QlDCEX`t%{n?0PA)BqD z0y=$@G1G?q^y@KVl6{w@V;ZT{Lf`=IynhliIK=ddpYYflRFnY?F)(Qq`n0^O{389FLFr8ktGTd;&W zp{g_UdhPU}jPn>xbo4=$Y5e4|@Nw<$-p}+f9T=+mIuYxs=W+A9dAmN>Hzb7)#s!edf$SoHSJPRh`r;VrG6xmKFB16spm zaSgm`9KhUDo+)7WS(hMtYK67g&uNFR#S;7Gpk?-}Tp6?;b>;!2M)Bh1_NtVaeAYOG zT}F#QCHweIf!`R{f+42`-Q$&u@Qf6T55wvwbE7F5IgU`)&AAvI=OW~txDnX>usQEr zN{Lro<2Kw*`r*FdiAHR@;&@QQw34|1=zP;;8e{lWFFUz*+jpQ?UQKejmG5CBX4J9f zd$fJ+=eiG3>RXL-PepYEpH5H|`g$ClVY|D?_%elEcSTCvK(}umW)53eSGGGM`rQxr z6!jR4k>sEl@Pw*fBnjUb+^R-*2le(mO826z1m4Sgxi4|47(;+{#D>D@ogWubTxRrm znsGdN{Z`lWCq-l>^XWbYxm^+G#R!uRkPdA-Q?uX8OM&@94z@f2_SY{=S~(axd>CD_ zG!BpAihXp4lT^)ceF*^?d!5jkN@9QJQ$r*&RWe^51o*T5g zdFC5r#k4o?iX@0`jaPp_ygw?H4l zUo^0hL-y|m4$ZVRaDAod3Ko|>m<4aW!`F$hV3Dzk-3YJwL(0p~G*VMycdYhPECT@6 zwK;nU`(1ZkeRv@7)Iy*L(X%7G?jp*CK$TGzNfG}l9<#nG%88Z{Gadcn`c&?caJFx3 zBYd<)lT4a{aglGz(vvYwfx^T=#SOdzO`U>YQUEqn)Dl!RsG_4OI@EGgr}qJ~}DDQnm1|w)ZKR4IdXf3ZpgoL1+om zoU4~w%HGW_x-74K&?iOV%**PXXJ{zjZLTK}?$f%a#EKI3=h)KWqI)rS)LmrEe$3w5 zldFW?wE=Y|C!w4~M~wWRH@r(%>d zPnz65PsgRkX?Anq22k>|72h{N-XzBnmzyjB5^L$9*Q!c@^{;LDue!7*oG&SlI6N_T z&h>n}4mu6%Nq0DD)#y^5%tPyY-**jGUzxRV1b)Nw*7@#(OhfYs5fV}-kvp=mr#z@o z*n2{uGvbGb%|Odt&c_VKO-E*t&Rxuj(t0G>)eKT_8@v=sk%5O!o*;fnoSu-n6|rNS z+$QbT8*qM9Mz|zK4qF=?4wuv14LhyHAKkly9LL+E^gK5K{?t3chDuIebz06z2i5kb z_7n1EqQr#uA4pxY)ii{==K(cMraEhM3Fh#2SNhz|xr=!I5*sIL!eVE$)jK;;<$USa zK&11?ktagOpUI;mgf0%OzS6LlH%)mKqRI8p^~|S6RQX)K`W7C?kOAtz?vdcs;B(MT ziNg5G4xL?)r+wd^+XjL6r2zpkn}X`&^DYxE9p{1V4ow;;4XX^-)%*mso+R6&MTl<0( zmI2xsBn-5wO|M?o?U9;#fR3>Ix{&0(k-PntSa>=-o9+8CP46m}H%yugqgh=1UHa-~ z`zi3^bU#+pDC4@-9dLuNL8?g1{gm^hni`>~T;Yv(mkb{b&3OKjx%mW*4s@WkfIN{C zO=KCukGzb-eF*H`7CK*3`GGi5ztUGk$+PfMEtsY}zs`G>fBs%-IOEkV6j`)5CI#Mt zkC4vNJx|p2nWRD3EIWlSw;+W2esGIS%?8{|7%t#WuP0>HLu6tr+j92KJn)(DN{HNb zcLFay`muqvRx>8j65iGWZRJTZDVlrlCsXbF+n36S-|B{YOC}UcA22Mwh1`}5ZNZH4 zPw%wFawBLbiP$hR1JcnT*ZjqgmM|BEeAe5c7IwDR4_?smAFeb(C1OxjE)(^8D#-21 zNk0Xy`37m~Z>k5TP+=*Mr(3bdJLsbnvf1utUUyPx-H;GD+NwQfHs@@0h_T~0u_EA4 zxXbDhgq^Sk!4`y1N<(PQsG~OzyJSGxOqBx{*Dz}5AB zd`#rJ0`1hJ?V6n+&swQZtuO1dp5J$S&k1Ogt8}s1Dvg(4?R7juss(t^9`t-cyU1gx z^N$7-$nlQ&aF89h8p1C3yHM$d&6@GMVu73hm6>2@5tg%K9Lo?KjUkp&zbkmI-}g!F zv$O)(qP0>JzcDP7&xUjWqi}u)=YC>CS3b((bvg}wMXD0KNwF5#ahExO@!-3n>R=!} zMwC{jOC<=!QwVJ2$Ua)A6I_-y0N5<`vpy;_q0N{VXy#1fd&|(MyMv>}@A1 zHJ;EXoQaF8EtwaTT4E&cY`^$P_bNE>GjwaQ7-aXQJw?zc*)s{2q0w>&D5g$8S$GHMOCF zK*u01T>XsCf)5N>yrW1s5q~>nShp6@qhm;DesHZeq4(C2gztxC)DR)mvIcsJ#;_{r zh@F8pBmC76?Y4&)tQ=Bj?Yb*A{>3i+N1!ZJ*iKM6#jV(1#SF`2sAx zj*lsY7k&RB{m=}qDD^t?bjxA!lggb&@oZ35S`)ki-^@0$J|$E+y?&A$k8okfgn2Sm zl_B&LzY~0ix(*-fHFS~lj3S)=%&oRdj3$NaCp;hprTOQ z#-6=%E^zBXP3y1E)7LJbkVf+PT#N}3BZ^q=CqiN&^7zO=mikHq&b{YmNlLdesgDsn zDNm`qK6@#EuOXo+udt!$t;v1B*I3rp%w=RPVb)QrC6^p1MqAVqjTGY$PnBN!MV>%s3r*2KA|Lifx~%l9r%rGr@_CG}y&-qF!I0 zNuX8yHCU~So#|awnsyoKD^n!drWJQ0-j8P|7JYwncRMWbYwJo#>XViVhC@|H>)jgo zsaRF_tlSRF$b553bxMF@uUTrFl9yu18~YlBCli9%HO6M{RheUb=Zf4lEcV!r1G0wN zwSv0mwvxKLxj&Koc|Tfzj{an?oi6+$Sxl|wzZRVKsTr&w$Q1eX2o7rnn8 zXkW=v$sYFVoKwpa-VesH*8*P`R7BW7cFT8j!k1=-Pym960d2L@A;M=pB^%{FG#$Rr zXLRQz%;?eT<3he7R}i2qc|LYbP_*wwz3 zvgNZ${8r|sXZtkI({;>S=XS8|icE?D!w3k1493+N06wrcXJhw2W1F#;P;n77sZDs^ z(l4$c3eT?PsT8`!`El??tk)fs)a~={-%WgQ6>Fa^!O=(TZfqYSm(O5hAcxl|cV~(; z$BdGe&1)(L#L7#p9{6jAMO`vkv$;%$ulI)=qc)jEopz#l0Ny_JJH^G zEYmLgqu?_DuBb382iR01P~C1o(*K!$j&@0Wcl!8deFn%MdrANL;S-iD9)j2 ztFd^%s0v}DxaP4>YF@+M&We~xrs<{vD!K~0XN)Mq0T7;t!T~0Up;+>=rq|l%)9;cC zFQJ1?T}P2}#Qoo>b3-2|dQR{6>xzF_fw}1@jQ+x(G#yk3YK5Pk_Ef5<==|vIkXqSK z1@M;GcdD`5QPiw5=t$)a;uZ~)v79bk1u~>>6SvdrDBCa+v z2_%G$O9;_txpfbW8JiS--+C?&J>W=fJah11bU(m$cm|eEOFVJlx zk6vmDC{1WGP?aiPhA>h%iY*@{+e#2~Um*2d3Vo8Vl^sc0>O3%Rp=BZvyoeQlsy&uE z?&h+ZhyPfn0HA2YNDYLbg1Gb*CRts3vi2N0RRnlPw~3Na$F32#aHUvkZH$VZC z#n^gYJp>|9PKS!C-tGk$P<%SJt><0}vK(9{czIpHw8DoK&bD8Rd zk4TMuDf@QaWl6-_%ciMSlv z6zRfLKNqCMn&F=DsL)PQ? zyCEz+cx(;1-+nl%p>nnHi`rT5`MFi#<4yoGN3jrVT$IH^LHzuc_BhF^F z`#o0tIAMhe6^$Wbyd>1Ek60mUPn5B=Bt+rTX}zd&?%4h72%(F8tEy52mtzQ9j-*jR zU44>Tmgz>OesSNlq1Gy|xbo8F?+wn>r_vtvOxqdMcbm{qw{Z0nqc)@Eue|zs&k^cR z?G_&BJo)LR>j=jOt_8@)SW3G1s>@HhyLwE(SBYoyE2fOz{%$i|0>?}|bm)E{vJ+Lb zoQcZCyDz6q)fw=OAT5Y`QDe?Qssc4;93F@< zCvXvENm@VYZfYsOL;Ns3zqFYDolpucN$UX-wm-ZaLq+<)LHPI4?3aG;3g8LF%l9pT zvQ|e3C-s4TcI_O88&^IV z&@;ujU3uR-ngHtTVxae+;p(URoRjvohgA7}eI1^>IjEgg@zAtsu}Oe$qxUK9gateh z_hk8q=_8~;fUE2GpLD&2-%f|U1%RZ{b=7)4^qh-4o-CsFS*dFgNUn`_9>l_>-^FEu zq%7~F-1TegPxBZvWSbO?RTRQ2xF`l)(*)O2p`>cP)4(7t;o>ciG+iqieWT@VKFyw{}oui8z*h z@ulZ((26qDFJ+2C%u4{@GKet5KKE>en`1q`hDa&0k3Bl?FLOo=O(_-A>h$HiFX^BJrH`!OuV&mfZJ?X-FkYQdElD9-C^J?g_VWK#lq7t3g_7L5&YA~9j$7CE(d+^`p=xU(&&u`if{B; zzk9lWu-*G$<4bkomt#OuyEzpi;6P)k3?+!=W;#xavui)7`RbHaSuRmKdq5!Rlc1gx z-p35ki6_gMdW4$SXy<&rjYc!OUN627K9iy^&JD*}go7+8FXf zp9mbsau)?&Mm4EWYzPyw*N5c;Bxi1yTbT??-{P>ok{oy735Q4+z93<=7EDtn8p1|q zZVynTra!6r3omnMAaXlgsF8WhUrg+nYCnzbRDE$>hg%!ybBD?;ilNgZ#&-7@z2hU7 z5^ODNSLc?Hg@EZha-4j?6G32MYu~Hy2-#xd75g}dyhLyka=8=~pY_<-BhH+aPW)8V zGi!SaJ8)IF8dxa6F6kYbbwF}+ozo{tcb#!jDX;INMf8+*o}ZLNv=%0LoDQ*)25a6^ zEcav?&d4%{kGIeY5QX>S2B^b#v?-Fwu|;)y-cu8XkfXK^qu$}8=gh~KzSa$wG-%14 zIo@ig#%68Y_Cs8}&=DVj$_-GE)J@AP;#wyw>`{7?v3DT5LQn_HzJwVVbPWy&8hRf( zvfc(g=oENr(x=;|e`~_OUFdYOZB>F+koIrUSq9Zt+|l*vD$H((h0BF*%_mk*Vp1Yt zB(a@WbCZTBWU3YisgR}v1L65{Ct%~exY49#w*5#-VJvy*C!I)c>rc8ZKyUWfGQr4O zq0}~-Ne2W-qP&2e-UIS9(p}<&X7q3OBg8-a^F(AMkP*}r{ zk@H~TSxMQw&ro)#aPRHZkhZp<#ij(ZDZx0Tm%1Tr)&nmWZ)anpT>2%?z6fPO(xVW? zlReWtioHPNHOJ!2K#|CgxVbm4rY+4xx!9zu$4*{{XEcZr8{%g44(?yam5tfhX*EQ( zxZC-&cQb$Tmx~Lwc23>DL^qD!;X1byq}$`I*u z9u{k|V#)jJBhQ(tqvB3;TrNJ@8pUdU^nQ;!6{<#V-Yr_;Rq6{Eg#FKo8#lHci>Dv<-)rjujT5J zAK-0SYfXs1mOh)F({P9S9BJd7K@KGpCt`fWJ8b!?NU)f4L2un;b7E4ai;L*{u56|+ zcYhF|rwN+2+J{64HJ9qMeY^eJsFbgO5=8sM-QQ@9^aZWpCc0(GLROGReHvVE@=R>Z zXji?_TY#e2W+kEo$g6xw26>fVDYk^$|18D!9EX}M@iC$`Rf4bPRG7dq0Jf#`9x<01 z41DTvjsc+TZIQ0nSRN{h~F9*ylhI2Ux2k z2gw(vANy7Wvh^uIxF*DJi@c2VVc21oQ!mF+oIxSOM`|i`c?L`reC#+#ns0=kfOe$9 zBLfeUxZh-xpOsyore25PJa%P@WQtlF9526~i=K(IS~|9Jxj&LRYwyI7KoNDRF_l5= zSf^R)g22>lf5!bKiGXR99owI72)mLr&^XD1C2_XoSw-ernLpcMeBWFykKWaWhGADy zM*!K??zq~(Mqx{BRF@>VL?g9|a>FT}gyiS&rQ@RkNux|YW{(wzNjz?G#X8+87dhe) z;r*z&Q?xUn@59CZCtWUYLH>Sew$S*_ZhIz$LwD9*)=%NVz<6cXZFpqu?v8r&uUO|f zSsa`o8rvSV0T`BQu+bb-Xi5SZy1WfYhMRN6Yt`q`YnxP_SulLbbyJIhh@xFd!Bay$ z*{DK8;Gq1vuKBm?z7X1BwSyoPM_Q+Fw$OBTno;{__n@>P%sX{4y2= zlruaRYRAVakexE7>}JJ%)zuGVSg$CrLlM&cE$Y0$7={G)?j=WgvPkI?H!q9jZl@MG zniI-zWo3AmJwMycbx7?Krhvvr>%lVxvMsSM2{945gQq7nHO}HHytKtf{qkN0HNk#U zF2V{RAw)>)V$nlVY!Fg~&{<>>KdI-g6SS3K=yr|Qpd#qX7d45F@!7eBvHh9u2fKUY zhC92v4THvk^ZwXR-)VxByJT4c?6^$alnbv9#Bg}cAmZBsUt(4=dqH&1^YF^^bnHGe zyqGW=XJ9v!a<94ZH-w?|R=C#}!!y#N8W|k-oMYDMLi&+Khaw_p^)G?`THR z1)lVww~VwPcC^GEBfFc0SlA89r!fu>PjkIE$sinc{xCb6YOsx1>=c%b>~B6)?j&_> z02Jz(c_te)l$ORwtlL~=9Jrk4aP06@iE?Mg*^T^Ng$*a@SN??HpaEmrGEhE=CSs=6 zgJ{RdcadsSF03PD7j3rh^4t;L+T`|K!EpP>bV?V*=@@shQ&38l-8Km1KG*_Zmemcn zLKxYOJ=z&?sOXzqy3iy%W~D2h6yL@9N)svgTg3ro??8mpJ zL;5LKkEAtET4@xFXt{mRuO5@6H{@#9Kj|jtD1OAits7Ce2RGTUh)Ohuw%Pxzp8amlc1t8Rs%bD7oL1DJ^< zwIwg*Ugq;!MaJY1;y^Q&TlMlDTa*VAV`8S;$fS*jUAZ{tD$zv*4^x-bLrBD71hIj($MV2U|j zqTPDv;W}{cJE0S~7uTKNCkx%YZDoO}>EGXz!7ap=K6)1sA5^8sM$_eq05t`?pe8Xx z&t~FD3WSm*%LTY3xfW0moKh`C@c*fbR^9~UI_@c|i^6sC5j=d4YMy;g{>c5~WmmCX z_u>E--07V&)#b4}HdHW3gu1fpzO7B>nvo5Lmb`7?9F*p` zMUD(On(M17>niX}HdoHqDogW$s;|YVICC@+24kcckiCiMmPtku^eKU;ADz+WC0p#t z|0;dMxGrfr&6YE|MBwIo?~Ah~qnvj*;BW&gcmE$~kHi`W{{S3?$4(vYsRhPgyP%eP z(29ZWJaG;*CUUu_O4w8|NDXDjjlC=Q!T^1VNeE=3Tqz^7ks@30+;vwue?#gMW6BFs z&CM>_PE-`wmjzv*7^=E8N2ql;?uk#a1$(-L!4Iil21f;Vnm9qj%wXBgqYV+lUef(K z6)NZMng1BmX_|QYq{LDaHUCzz)(&p?9^Z>V&$24edY!hVr zNvAnsiU+jV$hOV?2M$0N|W9nDWN>Nt*J*w9O0h)+& zF$JNu4tDd91)K>r`-Ti~r73n#0$~Q?mmktg_^m}H+EawovE1=rpQa6{Kx>%6h#Q?*pVuR{35k)-O@pGrp1#PE!dS z@Jnulo?Ik4!kJag2rfq^M#AB(={yztu4Pq|jcxLWj+i~tWUEd4Mn`Dy>$kF`e!xCn ze}QJNgHAz?`zSTi4pmessh=iEhkmmk67jtA^2YP{1%~CfbiHryAKGyawR6v2NC`-r zqoia{)Ym=f36PUiYbkizFNdKkIs8(2KBC?PGSQ>!%{_`j%>^iKRLkv5&3ue6r9f3s zal5N_GqD`BFYrqG4mc;CB1BG}k!EV`TO=*+fI5lUY4*y_vg_}}N zkg4JwXSZ5iUNk_-fgZE55Mt^AdZc+7Na1WjRb0j@zb{62FWXjYT#H>xox{+ zw%Cqf7P1w~OO`e0+k|l1kT054A3Hs~awo>2h`7|@sLlBHsablDnS^3esz}4^%lNv~ z04tP#^@htIlV`YC+Gl8K9lX>&$g_w6spCs1EV8qz#$gns7T9@w=go&`##3w)!CQ8= zO-5YZ$F$1sHO$6&iuPWD)w-h3quBBdk4csFlU1Y8O&12q5TdjS{Rbfw{v&yE?k{6jyIH)nB@t*pB4&AsRcZ-SM_ zTj_2UbgAh+o2-S8Lbqec2}Epr`r!|K6u}2MK30~b81!`hR{g!j2UXP;Xqn+*QaqPp zVXT}f3wqanvbGTV4OE0YNN(5DBKVO5=13Y_U2xrbo33p%7{yt|Jy(a7zyGpTfL*)k z5(X?*P#EP2@n}L367sbe3C}Dpe}}x$@2O@4(=GE|amr-vJ@+Z=^&q{`#(|e^5=O{| zW;PE(%2xb$lz+meUE17gu22u=)#K!;2K(_%Q>Eufhu@G_zTO$8TmbsAr~8VTj+L{> zEXv$*%X)F(>r3rZK^L1&7Nz8*^dr9qsU#dV${o~%zLo-!4X`+Y92~7f&9>^ z(*p(wya(5~#yV(4fr*`fjsZjb03T!ai;vOzE6wr-7;HXbZa7UGsX&AQ*UqwFS&U%| zS2eR^1*Mtx=SU+X!p^>8_YH-wjIn-i+=XS|M>Xln*WYvrfwFomt1%f3Q6zYxqNW{L;U7G_aN|a)buUq>`m)NmhkDnn6&%#DK z+WB&0p?39VmVVjzQXU6VEbP+`mu0H~sS$;;T)xf2h6f z4@}DSrxY9CU)*2z^MEa^*dH}Bo#-AKE+c3te_*ewWfo-4VJ zAbXbzs^3;Wke)M@-jle7k5#DGIF{F-y~^jdKbi=(mSe&1?7JZS6R6p0EHx6?jcm=D z33m`Bhf@6kjrUv1>>o3PO#hZ4gtEX)@3LeseP(-({-H~e0&wjcbn}3XlN|U_UhnwKB9R+lq>rJV?I&o{YLh$_-yZ zUR1ApsHd){6LtG$-^ygs1^(ld<>-}*=@&s&_!A_Agm05pnoi0j{-o0_87@aL_c>p*#=+`wa+tPk7OWP>-il2Zhm^lpxh>a`vch{_aq zW}P~~E38l^+bK%jrsLTpsGOUf&34}^&}G-}@>g&MV+iYX;ZZfHTcx7gTQ z6^>C(tGWmbiv-IjOEH{P{-V?LK(_YOeJ==?s?k*MF;tVG{4yvOTB;F>%|JC{f6)2} z4AU>hK3{AYnvh_snws*Q!r|b|z*?eSppLHP)(~OO%AhQhw`xUdOG-7;lJ|3NIghQr zH36#r$P;8wlCV*v0m=I*QkIZ*?dys zXsvW|4y}@7XZLMQ9)@z%8iz{Gj_Q4Z;-azpv=~?QH$cdhO{49Za{=!VvaCzorMh$Z zhx|ou>3=n6j%%M@hBE~ALOZcZrmJJteFUCtfh&8Axza6u|&3q=# z`+nXs97UF$Lbae@>}92(pVu9jMlm@W-nir6!=qVI`X*C^J!D2hRL4a5i?UE+ZHA9* z3yr5?$i>5nNv~g4!GSiEXKg;xaX2}_eZTXKF)t)xLlGu#sY>}tH-Y#*4xcUBVCqgz z?tvzzP)1UTz0>gL0L+`Rv5=*%loHn>nRu+MmTU$!u#pSvXR>x#M!KlmWrCjf0A3ShXN)XsMA}Z`~-s|AEBOhGf;(EYY zgfwHrZF$J~BuoeT#3qYUiIQ$gRsK+=JUm}L=g;E@c!Kqt3_Bv}7c^3Q4Y!dt1PL;!^ zbHR4kE%qO}EF2hD{FC&;pb7?nZ1d>Ei1GysKT<_wTi*o;@@NCyV1LPXjNZMl48dv=A1* zcK}eanYFq*w%n{a(NnMcGnXC_aNXTJ9C$?+kE}d%?3Uo;$s9pO#W0F*y)#= z!5sa5VdbR9Dg5=G0E??i4SArS<0x?Qf0-oy>vyjC(a(Ij&+6<@1b!C98Oy${M>BjO zMK4+05BC)hB_5Bfzd3y~Ch)c|=IXnWLfEKiQOb~5aA&7?JqW*qW`*pgHSXe zU>uOk-cewC$YNEXMNg{s-N#uB1zaBh_oF2Z`%ZyQy%nd(sPWUD0WzA#Pz)tUK&M?7 z@S7|kkRj>;liX_Nt)YGsWQDLq2>Eok(Jo-f#KGBSiq!=Ez!?j^2@y*>6HB)CQ=%RM zK@Ek&8b#uo=+hEK&BMuTl;d;x6nrR!<2m5zIx~OrXH#{dz`k4}(tlE{=8xTwiiT zDNTdle^C7mvE*{+6xOsXM?S9*`7lpGUBjniYa!+rB1Bx?yeSF#M#sE`f&Hb1%u;f( z|B17q_v5^d#EOYA-o6b%CiFB!ojkKkyimcKr6BTz4MwnTlc1fnSHFd8SSdAtnJDK0>DMg&a( z+ZSsO-vz^_{<;r`eywP=13+wepkt#0B?DW>egrhT`!i)FG8wFlr#R@h}19PB#Eeh)1-M9LHGBcXX*l` zsW+FFwQ-vwF#+~kzKoV#``k;soD63s`gEFK!szk}B1w+&w}B8BP{x=EjdGCFKCpei z>%-2`+AgUi;|HJT^-UZ0tUH?~s%owO)fGPT>(o92=kFhP;Ork39K0GrU$7fA2=RJS z?nWSedJjoOx2h_L0B0a4=|%>$^TMybF7toE?=Th@*VG2w@gMfb7W`{W?LXaoE;@2r zcG0pe8-vd)o3eYtuGCB2)+MNW)Q>c9EX7<=i4(FXf?JU!RKrg6>1^h9yb(P1`w2?vQs zT;Ve?=8hQ%)6!+t_}A(KLhKKCFMP_9gHCnj5{qi23Ji=QqbeSLys4ApyPDh@ZKhGF z3uX7QvY_>cIr&G6^%Sq>cJTKlh`QqtbefK+KNuXKog|lSd07(e2ZA}dxs*QLKfl+1 zGD^8Cz)3)V+0tJ&bqXkG{myFr&Px3<_5aaX{LfyRU%uoYtQOFJ_??vkMZNx)L75R5 z)x>Iv?jZ~CBQ2u`iz-7@OU*ec*E6sq4hzyaF999PUA$Txaj5h`ig%1g*=s9aevSrYNMI)PditXBFoaOv`HGd zk@70A)k~(|zAf=NR%xZPj1A3*JgaALX1m>L=C1L;FeRT~$>nRf)*JJwj<)W{EVPp( zYo`hQ$P2<_r%vqh!T+=)rs`j?6bdlRalicOzhK7$|Iy0)7Z*&(-vL;q4;I{G&9-r`;eJjSTXqu0mVxWep__vwxvIQ6}r>E^(l zK95>8^&u}S%WE%hG=vp!vVCGK*D$%PFXx_6iy98m^75{EJ)jKkQ=v^eX;m_+WJ!!^N>k4Ob!qZzTIWeDLh+V^0?za`Ra0XhoV>bnYUt zllx8X_;F|6EIQTxBO-u? zsUFG0Fhwt3j#HU4Y45XNQQb#B-X#}F*WFVyY8DbHt+y44!NU-FrTE8jbLRfEM7)84 zq}+G+Tqz~2v2HL{-sWFeb&0>f<2?UEQ02e8j{gOmW$cbLuBwfG*ROh-Al1EnGF)bb zuU}wWR^&A!^x~=wm9sw0KDUf=C+veP@$)A2)$x0KkDeVH(002P zOgVMLVC7wn<=NthC&q*f!MLn@@8Yj&aGJdO$9UlHKim66Uaur62In~_-AVQTBt+kF z`=p)lxFlTr6i;FJmVLJ{-% zI$hSJMD$km+YIl|X>S0)`3BqLL|OpMmjvhq==5W8D&sE&*Ct(5w6}-x{$>2e22uPk zk0MTy29PsDs|`(-a3KLh-HiI0N$90;R+4eQkWeMCX@7ZWAqxVYW5HInPo$4>ow(nc z(vU0?*R@?~sUpV{J=9lHTkx@v?TM7#AHG4>-ij6&c%U2lSFrlOzI_;otr}&Jt3CSO zQ=lZVEcvm8aw%xpqm@aG&srC{uu9BXZ~3+)0er>I&rhlU#ZCPFX0u!(K6l+vXC+z3 zr*m1|`o^&H@DBYT&i8M3eZ1NppuKh<@vB4khs&WH%ft>u0bDbQXx>h^YXA88%a9J{ z&z~*AW|Z5!&b`z4OG5^I2IrELqsNf)k9RX%T=&7iNwHdDF?V0;wK@_{=sPb(9>2R< zRWIapPU!HT7kAcXX1k(X1R-0x0qLRyuxfOGYK#Hvc9)MMXf9VV86j?`px2?@ef<}# z&y}Sz{Mv;^z?_j+A;dCJ>XRW37XAPPFi8N}2XCnU2a;*e(LjFlFzS~Y z(x3hhp}+oZx8A>boB#Cd)e_;myW~@IXn<;e`5S%s9{aCD^7O=CMV%bFLD&HUvZM#0 zG?ia2S`rJ6$<@Da-K&W@4ni|(x%~p*GanJxV&9V!+TwiB@Q*fwY2z5DCoDX z>6Fy8BAB9|em;uwRHkKaaMkYH`-F>=K`x|e*b)}zi8{V)%U}DVeN2nEMcvaCiK{uo zdnaT@^PKwf`BTUB4;-!MX~Q0*@%!wX(S}IS7Ca+4*0>SkJXB_d$R{49!=K@vI)E?t ztkF-f`)#pe)a%Uymt2SKPr^aN{h|?PUwN2l(4E~r7`FexHQ+ve@*Q>?-zxe(!G;|k ziNZ{Qu={}gay?`U-jd3+Z}A>eguIk{)fM5&8)JUo%6uqpFskt2F(5Yy4#L0Bjm$#6 z&D=&e7Liabe6(Y<8o2+%xmjo$d}ROAKDrld#_2gZ|?re zb?7e(v3vA}P1jWsyB9&4b8KIu+^vvxvi8f|W6!3Zz&JBO5W2cFqKR)HAWPGH`W^V? z<^zs2qm>K0-*4C6s4YQXbm_*f+eHy_6BP$WO|hMThC{3cD3puy-&D!<+<^|`la0Q| zj(tR+kJG+ji1Ro4RCO>u#giKv6Rc^|W4qIyF+L(*JPZuapFT|pojELYm1|pr(=Eg5dx6*PU$hl#}kgyC~rtph)_=Ta5kPRg5+YI@OYn>fRMp z;Q9=i26Tc|f6^uYxa=tP1}IC(&C_0_ zxzlJ@f6}$*{qcG>bc4qH_jLvyLM8tZ4Kj#XsnaX$^hP*|5VKa9G(Sg}OR=51&wk!_ z-M|N?JBT!yu2-?FA#oQu$z(H5`f{%_cCdGD6r8<~@HC56N7ZDXtF!k@QKg|;>HO@R zH$DT*PyUbg(BRiH^CJJZT8sbaezpG3ICIfcwJ!|WB0R@W#E3petxEpm;fGGo_FI#0 z_WKHEUF=lt`m7@=Wd8s(m}nQr7cBZYHALr(np@^3J0aLx*yoi^=nm{MmKzw}Gq2YruAg}*Sf_ffFGeHU3}=GK7d6-#9md+jvkyXH^eEtf`< zl12N>(p4@U7p}>>GApN~VxlGZ%5-r2@r*d)bD@LXojo%jPd7kYSimJ z;m?l0Uy^EQiai-uK|D zqy3-00fCXX3;z^k0o|2q4> zJOAG~`~SXOn-4rClM50Y&E5?lxwoaV_?X#Ep=jnt-_4LAa{)~@O_4iW2a7pPbk1tt zT-VRiFU{q-q@}Uor@_@_Nd}qRl@HY1Hp>C)$M6erQ*;%V8h=#r=Ks-T#M-mX!@~h4 zfc@`-kN*2h_J0Hp9VW4ietX21HlWz{V$_0(wej>Nsw9-D?VJ#A|CzU?`bkgTM|FsN zxWWo6MhL9~VtYH}eMM#Z>EYs4F~bC_&U05ETyHXMy)zZV%68obu1|k3+^t=p#CuS< zM`A9bTfA%;6K)d|sc$#%e4VPy#gOQ6U4v? zNf|zQ$*8F(t%sh%lsL<@+ph1SKF6}yj*x`9xS!a%}&;2A*1_4K# zH#KxebucfA0@)YR<-u1*fO25!Z1Qjee-$x0JsXv^SQ&45JwT=FfZXdhR);BVR}Z{C zII?JoDw#~d!C8OOJ#q<6-Wh$7IgMuZsWk5Q_~a~`N7Kwl@1}nUmyjuVnb50sS}v(=r4IK zM%t(4&P<$C{qW3Id;2--=R*hmpKY=PdIWync}ZacxBFlT)X!XWIOEeS$Z>nbRkZbE z`+Hf@8?HuM-`YhQkG}e0?Z}}| ze3OZGPmCAaFhBNbl#Y(Iyw51$-9sKR&nHjz@u#O%pQy(|$n&WsAH8X8mwYV)qq&B3lB_lEe}TXih|hA8Tb% zPT|EKUnf^*)uxiyt*_};jy)CA{Hhq`oz6x~!!*IjXCK6dASFEWEqxHJf%K_{gYsiV znuN7W&NJ1cG7N9V*Co&E`*v3+7~W+#x4%w?*n{Bu z4T!Kg|4oC}KtoHOG*-NL-yEkrK6@Zx^r^^uHlbxp^a~14k>jXZNO69?`Dmv*SK8cw zm4;$dReqe^OAOuAs_M+LRlN2I>n+LJiXBW4)*fiOpv$#Z0FlH*h;3D@8%7!w*e>;1 zeA+2J)pzQ-+;x_M!e`v$&LYeu&4; z?I55g=@KTJ#llctb^rC^vg`IU))8A(-&8X%FZ;Ea^2J1=coX&r8H5LTqAujf!vDkG zdqy?+zWbgi2r5EEdW{N*Qbn2q5>aU)ARxU&k&Y1QEhILi69ECGDjg{i=`8{2AiYTq z5Q_AK5&|i|dH#E?J?qSzz1N&GdtS}jugH29q>%f$@2h;jp9{w(7ndEiTq7%LQ|Ws2 zO6H(UsbS!({FjT|a|x6D46BWDQ#z*P7bS2{OpFmBJ6eMSZZfK<{q8A2xlD?dxXPTO zK&soXce(QEdh(-0p%o*hz^Boq!y8=>(0vURMG7%leNVFNU#7Dv9D(ioNm zBpl4G|53v6|MJ=y1M4n|seaQ>xs{kmdtvgxT(7n_c^rAc6-$UCd+3e8cdNqQjtD~4 zyyhquWw#vhw+C){7)E1U`g(@4%=-Jsfa==Y|4?Q0A1ROi3)k2K9}r#-!)=y4eYX+b z6KPXTC^=Ztiq;BkD|2dd$e}^Fn<^U-4fzwjf#5ta?KT419O^wUXwiZ*1_!AqqW4c ztoE>CDEqnwh+I#B(EmVq)pZuJAAA8BvBp*L{Eqm1_CmHiWrc^~k7gATdtX<&oYy-Q z@In1tT&Cr|j5Kaw8iBSi=9Z$|xV>9A7lvfJD|n|5_7-tz2(pkre}T~TM)A)9hz&Tg z43_LHv_>-%`?l*-&Whh$b4_|#6!_xyN!m$z2evZ)%Muo4&re1}VwwQV_FtZDmluWB z&7UjwY{AOP7By~yzk7Q2RX~eUZ=mv2Il!EOTQu5sB1+h4@YWKYbMaCJcP%=PYNbR{ zpM7VvMJEdOJ3ZLbxwIG=r*f02+8-=fh2@7or8Q}QgC6J+oyxzwSiE~4-w}zOy6RW` zWpL_Aw}A0#eIGaE`jZgOL@(ZzoCXR`VpnC;b$x2O-Eejss;{#~$X^duJF=+XNrP<7 z6uM%on5&5aVBVkkJG!ia1E zBc4;t771!tf(h9fAd6<_m9zF9_{)LR)n1O9MdCe}HRE~JCw~MX2A}*Gez|>Ur|EB} zef!s7NXpVFg`?+uVsfL`sw8dA>Q$}KNrA}F$w`y?%;SbRNSZm+XLR&I7yT0bw+J8E zfMCA4XtJS-ji&N@bCLdGa3nI$H0CLI-D(%$bLdUn@|k*O#KgdWF9k^Nc2}t4Ib=a= z;#zc*4q;TgE_!M3SI9f-NGk`=;RlNA=li9vd%u^P(iA%d)g89@JzGD2f)IpHY8y1q z+o#dhXfz`*xq_m&p3Q@~8U*fCv6m)Zdsh^stdp%*Jv`mxIika$*3joED(`ekckVR{ z!zI0GGL4|~nD&h>;4Z>Eaai_3Yq**vb`BFw5F5RE^Etm;&c`#eT$x|NpDy4ppD1y& zNr>oF@9y7F&i`Hehk%X*|MW^HUhP`!z!c(fe3}xkcdj;%%6bYKT8Y<3I@~Fob2j@d z*B5?1&)+1tEbIZvbF^A+8e~AmO2jlh%DOyy07TqHe#;(mKf*p`4sw`!u3r4D&$F-M zNVwO6w$DL-+4uEtL@Jlk4+{(4@0Iu*{Dg|_VAjw@!0{#eKkgGZHU+9{-sM5v1Am@< zx%D-1H5jB7$5jb#VT4|!1HFt3Rk#@&qX~ox)nRn9gGZp6)=?YYq*7F=k)7{Gyc2t? zEwjMF$#XyxBCBIf0WaxNxp=Lu6`+! z@4JPk^QV(qlH82#{>%kU;7!8uDJ+RPrw0_pW6@Zb&##KkAq;tbHfQQtZ)@2Q8xMzN zNnV{P4R4#1LQZUql6UC3V?ZeMjBGOZl72G?GqAL}2~3N4JgpJwRW1UPGN^Lddl8qB z^dq?aR0h2Iue}KE6I`%>+}JhGPpEC*4%Hv3c1t&&q=Vg3Ckpr+8%+1XbLo+yA5RCA z6g`tqY_d=SG#bB3{JXZ?&16wScL{%F_L3kvHwb7MjK?AF54yoRf?#kv~S0e^REPmhi|MimL6+?N{ z{ab0_JD*f`Ql;%^&}KAMS<+Dc)3)I#m}& zRO=pk3Yym{C(K!?DyVj}3p&PxHgdme-{}ONva1DIIfwm#xNb+)A+kg(K?TSoo;O{G za2xVAGaVkzV*3}owU}JLR_@!piHwKxCCPC*<_F zg#SjudLus?g>^yx0k=SeFwA7+l3aUZW@tMU7{ce?@O%D-o%51ZiE^sk#n)27FC3CP zSG;^@7{mis3KFX6fz_C`KVCiBltwyFGyQOu#*l-#K_+tVhC^OrrzA9EEXU+z7HY@J z3eq-wM(mCQiZG6US^{tP+}<#FiI}raoqip%iXOWdVCr6$~$WS5UE8M~JW zVQwXQ+WKz#senXx1-rom9{~A_z*-v-ER$p1D|#Q*y~-m1w5bw7Zrf_13}Ew9xfmC_ zUVkMj=PuG1tq9@2HdzY?hI_o7D|`2T<%Nx2>$ne~Ne{jQes`T;cX>&!v}CH%;_my3 z{zX19;!5~{r^lX}EDqm(m_f_%iJa)q-Gu*R=Bpb9(}# z8$^Q!q9!M86)S`C5r z&X92VC)wzO!8_{1Jcs*dXF1jJlOoB-$;5~ybi6uHiDoJzWJf7c%I>#yvX)F)8-%?= zbM5n?1r(E8Tg8(vCL)MEyEe$`t}WOi?meUuQ$~zV}&&z$4-|fuDk$ikg8`pn5G#BP`#@+$WeUUT68ls3k zIs>`5xD9CfoYU$e-L3gszM{)y!T9@Q@Rb^#*)FzNlr;siV6kgK1>WCu zHxcjXB7MlcyC_@ormDBU>e&JJBX&vm&`yJ}UdUP~wW-_3pta3S3(^6=lo1*_g~m{z zb=_)OE!8YXvau>HFQ`nXUy@OR_XVFU+MbF>iDZ z@4ems`|?73h`=w?m3+y}Ozc_g*!<1alJm&lXma%l_;J#kq?Up<#XydztNsZf?FS!` z0%TEAdxTvREsK5{>Mno)b{(l&9tHAFmu9fWsJ9!*HTsM4#r}(GeLl>OmfyX-t3m?4 zwhuvsh&ON#BICl{F%viaLcusAy=rWoQ`EP(WfRh9g{$2M?N^MaIzuMLWemnwLo}<` za;#@^!Y(|U(=tl-P|Tj+_BgW1e%j;BB^@Ic9lPfK;TbDfbJN6P8T1P*gc!^gPzNLN zkZCS*Nhjozx~8wUlo5OOcRl7Lc9P}us+%C0`>zFK7cv5rwgKY?{c2Eddp5xk=(r+~A7nWa7u*&CJTu^WCE6s}< zXEmxVQKX>$W?L8;X-&@?qET~FMr!diw`0+gQY@DTfo$5QimCM$O0rq6@wL|zvYhV| z;Jxe;`>}ZwP>5fn9S-DDIH`B*8tYOv7QZ&lNe|c!$gVPq?T=s(w9g3edbS{PAuCIz zis(1};hRWy*`{?Yd+NT{|CgmPg3@HAgs?r6^RE#D>6Y$l|l1?Z`G(THa!uYv`1Eo{~FX{b= z21FuGP;V4gHM|&xz_%TNJ*Oz^&pRCr@~DM(kI_kxg=}QSQ5%wv-Z#%j^eXfy)u>!z zyG80e{x(sb6WKX=NjpIM=a~o9ESLRP>j%jas4esYTWC9ugQ^$k`CYki0g$q*1f-ej zl-P=S2%Qmt$TPmVF=%ouf$=CXHg12E{V@IDC+BPwEXDr@{DEG?eJJnr4#fXEQHa3KOIN9Dg%2-RWryc^#P&lL+Muaxh?7K zMF-R!o;DGWgaLdOb_+j^UC2TxM$1J3#_J~5#dB26Fv5opnQFNU8qJ=*f|kecIBl+E zF#q8rfE*=?#r93%gwUG$tv-_?g-I@g{thuuN{#{GeitcI{|kcbdeG)B^0pfE<-FB1Vk38x>pt6Xd= zyw$(j<4vhG*;DudmcJ}{>fOvR0QW$#(|eR5yoX;fw>;8nW2tTkj!{5Ngz^DHLfT23rRKTSWaGcif1;` zgaT$PC3WY9)VByF@(Z1D|GWCF2!Tl_R2y=7{#jjZWunRrj!O)^%q;;QZ|dRG_ILjv zNe>9vZ7qtD8{m-Wh_kUJ!lmwRr^FRt@)dbb?w9U1Jhhw+POMCE81dqpuKP7w*=TH| zeQP_fA%rgC_%|mV>DIM_!K8r;{Q0xwZ$@_Kn5Zyy&+3#*?c+(l@ zs`~J_#5HW$@Q(WVmkK)Gd_j3uPYxU}qXw{m($-ThIt!dl`b1D^-HlAcMRG3Mr+zI* zmrbu5&l>oa&0ZSr5L%su$y48?^YP0JZHs;(8{v&25R5c5M&JQObvh{BfDjR79j4NO z0N0Q1E>%F2ZrT>rfu-JMd<;4_aNO=w>Famo5z9$*wg zWDRR^efW3H+yC^ldo|wtRebrA?Pqzyg+nJ6zo0c15&nab;F`e}z`y8*kR5Nj;!^H9 zouP6R@-SaQd4V_i;(@vU{WmA&TNDd%;^YGK1NJ#M2F#4O0>35@5wUfM;%HsOak#;X z27P(*FW@@q9u+dL*UD8qFQ2}1gF=n_n|Ex1R){`UfkC9Ceiqg(r<6;juXxY^s8Ckn z-xV<*vEh==-oLXSOh0<;?~`7WLyO?dNcI?+@$tpV`>`aV zvc0YDfDdyJV~y5hr=X5zp>+L~Yw2Mrafcz+GxiytH^i&Va055i=QdJ3%^UMiWVqW+ zclMU8lrzh!-?ScR9FU$xTSb~Nie{CoR3*6F$e0K)>v%a4_LdRL31E$V-`H|(9JSczZ{6YF{QRVK|GRdPe z<;DJ@mo=rXTzGnp^IP}ypU$oEDZr{%;sM<$cc#gZ|7wk_uKrD;;3Vy9sY)8_@ek9K zf~M^F9B*a3Bad1yU0>Fn*0KWce0#XwI$aEl;J_*#M~tqrgk`R>d;OtwRa0WJ{2%B3EQ&r3-RzY!+UJd~Q7$ji_-xYupJlOPa{y~f&1(esOroCPK+`H4L-0cHmxPXF>m7UZ7 z9#?}i3**g&hBYP$_UPmW9t_VQZD^AaXvr#WCJA2{g<+=36KWb4HXAL`N+51aH`5+P zDb=b={OTPH1W@mt4XIy9SEERK8S>#*L?4zF4cnzkBP4#M&P#ecp0QPz*A(ny*!dQt z>%CRa6VYL z9vld7Ykp5KnVG(RGOY(mnmMRY?-})kb+4@ZZSRk);j%fYdPEg1V%O_ZD(4~_q+Np_ z)9gysFPeu+UvjI#J5OD`zfgaT$R>Vp=7DKb1uY-qr0cn}R{H_pp~9-@ zt=s#P?IWQ@L`MQ$_6jFNWknL-(a+y0Bnv>fJw(8S{-f9CcGft(I#Vi=a`DS zyEm?LH&%>2G`gvOs>O=m_V^#ce&UNnw zYohgd%=WF)1B`>k5>go7MGs;KXy6z^L2Iy`1|U4?+Xa%nKr8GX@dt(hhM@U_`@LJArkbLoOUxdme;LpmMd!^ox`SE1SH! zzwu_yd?1d!C=1EGX(pC&s^=Q^CKW;vgeww3R4#=s`kAKc2~b2CT!y)f%X`y8|7KmH zWi99RuoXnWv0lqkH4wjmqOvYvY2T|L@4+1C!%gHn3@2erB~U;FB|O*WKKYXCVJh>5 z?%`4=L%j{q9rwt_UztbAG({imqM$6@Lh{jQW=&kPzj5vSO5n^u*SZjJf(!I;mL}Bx zTk7Nc@9#@dgMkci`B-U{P5k+Ah8m(wYFjp!m7NV4rP=fY6Wu$pbbD&C9_!3iSSMZTCoZh}K?} z)H`+`@$oj^$UCHOR{q1Fgfsf2oRhaQnhM+Xq(7#y>s``qkL>MkyYgw7V>3-RUoY1p zZsMWWgMXWv;sWsT(>$-yO6WHbgYrRz_LXFC{>(Hvs!0MCb~WZhvMAWu?I<{gef^vh zTk4-LclgndX({TQ9uze?u+BK?7s-!S_dpRzK@Pa~ln|wkzJU}AX3N~?{601;zrFGj zm6tPfBByQ9c}X72nWNj4H59+P<+N3O3qk1cbbr_e1-2YEsO6K*;}nG8Cim*ju?jF|pdb!s~e!tymmPLsO zYFOT%IkyHxg%C=(Y~55kR6MBLGcUf#h~*nP66p+%IC1I3ktGV_w$<&FzZK~eZiYc+i2R9};X$d&ya4YeIJetf*d9*oEWyIoU890`y-g zkaAGcl5AXoTkQlVS;8iaeMi4J-Al}N1wXAfvzJwOS8B{TO)cnmVndg}q08k{BwA%{)+47#>`A% zD5Jrh+Mh{KwdT8*Y!0`550xx$G6}uwzM3vp%rm!BT(>P4e=Cfo8O};!B=;FrkdKLK zEjW-PkqOYPGJj#O$azq1W2db+6W5(&)9%IaEU*5uNKclb-XiuHz?5>iPs^hVZveY- zaglqLbj652Wd;IrV=J0{z@E{kk}lr0GpFA@&;oCNUBnFXh+a*!k@5}R?LF8HtFF9e zlp=H3-0YQ3O(|UeG|6F8aO|j?bbEG-VX#n19DarJI4F>$S3$;>_7jtA$D3U6`h@r_ zFvb2`C@qyY<*V3DtJjxJ0Hml`g=m=q{ai6#X{?Eph;9?mC@J2sXC+KDN+Sg66Z>a6 znllpWbt6AAKT&3^dh^yuXabPVHh%htAw|~STCogqLU)c=&Ui65vuiLvR|O-LE zhmOWK1m8M(iB59udG{<>v|^bc3z+~Dbm@c6rO98P4+=;!`TllbP z|8g=Mo!%$N$e?m%{h&*Q;s$?8paO$%3g2Kr@P`xBd)em?n$P%$c}+fwU-HQGT1bh% zzNU;1MlE0X)6(M=;l_Kr!S`uC(SF|g+C0X5yQJb`Vlmha{9$r`PP{OavOY%R`rx`F z3gU{96p4M$aXh-Y&$9r!m9HN2Gyuz^aaog4xR@FZck*?) z_6_!ReRM<4gY&)HLs!cpiPwgzlWCjK8kYBPQOa$y4IUi1rKHcsY9BqETUi;gZQ6ds zC|SsKnwi}Q{N*&pp1iUMo<`rJrNXV22D#um!*Cau?|(TnLt7JFKTgHnnzxK!t*J+` ziM9)hTQUUH&fgw~pbP`A3&%vs>?I zVra1@a=}oKcH&;%=>PCqelNb#6j5XBu^-|ZhWB`t8bCA>f!9A#-C+{qNcb4?w0Z?8 zpa_np%MMpb$$hYw8HQin*992f)xv7qlNURxJb8v{ccx-o)Vlc?x%a!m0XmTt8ALG) zVqC<=VI-{JwuIXFXN|gYhDosJfd#X_(g_b)&KUJ-I3S2Eg2BAarb*6osadJ?nz5Ma z>?)i-c9S@A0>0;ltx}55rx*g+9CNBpojy(N9?IVH>F)vw^90krbFpt#7&9izCOZ+l z&#(luYyr*|F#9&(b!dq!0kr_|plymr(7TS4BOq(%L7vs%cWuaP&`wsg=2FdEEfKSv zlg{!!b#WQ|DZZ|?Rp}ypC@y&}50{gUI+jz=leN8(HBs!s)~uH1X5t$fNo^1d-S zfhf4J+tDNK4;BiNyuJHkQn8y!ek0`;gSjamgB08P(LJz+e6rXv@{M{hwV2uZA(=2&DaH@(d|wnY{l6;W%T` zIQCMVg%G`_`_oRxJfiCao)X}rD{8fR?uKq3xPkwN!i-YBZiAWVIi#ZLF%WVYbq`!J zyK+P4*m1H)8H;YB1T>868Zx~5Wx@x(`T#J_Z^#XeVIL!)pCOT3aS#sJ_u8#p-unsCJghPA9*?}d;t^BQmwHY}zNk-ubLbRQ zoa#kxCNg${t2-iEL9Flxm1M1F4d*w(Gq=`%*xPN4Du!z{u|m2T7_u~A$tNT<#XJav zS5Bqykw*qC$hrBODh*-z)ai0yZ#{eP3R6~atn=~=cTJnDZ>pgx;`V@-qLW$h%<>j} zZNH-Qa=w`f0vg>w)v!3uwhszz7k~jiXEqOQOoYh@Y*9bmrq2Z=H2kBtAz|zya8;Uy zD#u)gU!i?IK!M5oT@wOpZDDGy#v44UX9{9dj(A)Xq@&qdUUuKVVW=birWE$!34I(| zwf6c7IllmIJA&Y~a&O%^0G(}J{3Dm6Oo>Eo^ImdleWm((R_L`tmEU-DxSpn`FtyqZ z+CR1Jo0XBV-U%z854pCzTwYsoC6?9l3m<& z+8I=1^J^gSyAVNlC79oQJ9-)GguaLvfmThvqh2A(nUW(4$%1X6CQ3tJho04kd2>oB z{r;(ZK``T{mS*hr+iTZUtEzt*(qyEj=z_47<`T#Bk?oS;&PnJ*Rw#5wKx!5C2Ss{K zl<0sl2KDKaQQj{xcL^Hx{&|~nTvDfR-MDrqK$d}tg-a{jn|1qxT{ zE4nUlJ?G=E|AO}BG{$a1gN?GAJd!dVD8#+U9qr1kRNw^Z{^S0=J$(H_4GRXhx z;}s1lIHZbPMi}hSV8?uH^C(}hT;IJF`P#jI>XEc}8I z&!&KwXW_btG|Kj^jI7NoqhzrbYRlDxy%o#X%%{*zfAf;l!-Ua$%~+LM@A-Av8Tb)3 zbr$Xq%!L&vmm!Pc7s(Zib|QV9B5Gu$lt#tZX+xP2nsc;L9YqatkKdLdxb5A$SP_-3 zc@XU@)(r)eBh@{=!&M_%Fzkfv80p<~&4#h<)iLd0bl zO5YmVZ1AhOzrZ%H|C5S}r&vn8n8sL<9}ta5;bc|U$ zT=Z0TM~IHSeEwl#a4;S$KjHc(ZtqsnqLGQJMg8z2Z`1eRekNfTQvcrW>3o&7BZ-R} zOoX_jr*=D`oLC28)>i8?vA*QN2h}5976!q?4@YgTp1aC!I7{0STKITpwK;+mM*f0F zMPY=S8;G@SQoA2gmC-^sXK=VKpCcD%NB^_T9c(RwwiJsJW9$0*M?Oe5-0*sB5C8Ri zK5>#6#vZBAg7q?bV?DSHqNsYza}#FTF{1IVO2T&}UPVa{b@OQhC1Tlyb9gZqfN|D` z_oa{Gsv`hF<##eOJb^o< z+37`b2Q17*@l|`#FpId8ZPY2otWzx9E7;12791BDjX&xDx(AZvV9+l#8@W=`jv2xl z^Et1sQM%Wu@w3)m?!`Rco_R#!wyKmKea|Ojdfk#J z^DGq@Jq(wF{F=hBC8h{xqbO# zVyEaJ0368Mra+L>CmVT$1sBfFYUWYz)sH*aNea3-xp4OIu-2Pw-M=8ez`d|G4sifd zQCugO^&bW&7$=Zdmo=<6%;zIZ&V1D8`nHrAAckvjW(<_6y(4^<`!JmQ*3sCfLm%6+ zBWr{zd}nuL9PC}lb_w1Syh%sOX2OVtTRi_T@M>_9p$`DIrP2G^5X1Dv!zOd$$MGwn zdY^jt9$r#9^SJ{4Du_`+SPD!E9iq$9`=Nq}D?#_DHyVgW@715<`IY*hyw>ioI$&;| zY9RUi`>dCuPffLoUA%pHa@Nm0V|&a@yreoia~hduV;TSS>q3=9!`?7@d0JU;3t;+25qXy$ zT94!65PW)siC+*Iziyb4D@?Zhr{Hh5OY)Jgj~SQ}eLTkhmW~|V5h<3EGtBlG9V!kT z@v*j>J2|8rx%5MSfLd|9Z~&TB6ol+dvm8+hZhC-(r!1}B*uM~)FX;EUubU-yIq-B- zb}NFPTuqUs4?+2=0ifiW!46hAkqz8*8Z=UI&Tx9}8&d_(B-@QSfg7(BMpwG$vNX1H zvYLABa{Km10ao;bkYb7PefSP;V=xi+bdnPpN0{v5d2p|em>P8$ zGsDkBv^~YN37T5du(=6V51!VArG8Yf1fQp0p^sr%5&UkD^Exd>)ksd)n!bF?pkH=A zXGR2=1Rb)pC~NMgJ~1i0_$tADWXH{clC0I&LM}-5tzqW+B_vdSA2lKYwz3Ql4TVBL z;8)1Gi{N;BCh`%1@}QE4jXap+rLeZo3&gK`vK_EUJJtr@W>S!EU0X2(U2SGvgmuAU zpd3_Kq2&hj)S=vIvdtwy8q9v8?03S)_c>Z#uPYNN+FP%gJ^r4TT-q))@Y@cUIkv;m znLl_!C$*4TPWv^0h0}!ZKau2~0xZgum;Lw{=uGN@_+Z;-MGB7B#!VoDRAZgH^>*9L zD(^LxkQ%G{0nhX8hgjaIq}%KACbDfr{UU2R#{p{E_|JUaPmH4X*CYIQWY_V z8RgL$u5;O=md(4&-I=R}HJbzPjhQ^znYPx31lsl&%C?o{gsG4*v4#kp=Fj)jlB>Qw z{1J8TlnraAE}!$-ZFJG~eCJPGb{Te~`>Q@Q>jUAPY;u5nl3}9O@$i7x)4kROULE?=}rTDh~dx{!>ai^%B=H$33a)vn--@ zXeqK39zD%MawH=QsIn7urVa2#Oh_x#<9JcMZ zTg%#ATK@cV)%E?$ZC>{nZ^f%*xNrku1E8tOsltM$Tm@>k`oFtp-K-Ax%pa4XYPzt0 z@xB^|dG%?{p+?1Xjla`l^W^$e@3cf&6X|fqD3NUhQwD|FNA!SeSX!}BB7g-aT!#p4 zX}Ftz)Q)7Bqt)QsKCGLo)#X2=Jml#8!}Rw3Y44A}b#AltLR?WG`UtoNs1hr1LC@jI z$i-%m?awi%5q|^wc{`ma$xybdiN-IY#awPf6cTTOG*5(qZNchVjmfHhPN31w=pk40 zidW4Fti<{shI9>AS`mhg+_zPofZ6?Ls68jSgdJeMm#0F zK~?i2nW=ViR4#$8C5QEUWJx(D<&P(+(+=GYDi`R`FrsxFS)*q+|`zKddy$$6ggnB197cZ zzDYo7?oYK%-ahYd%^QnQ=j}Y=M+IA#7U)!H8}<}16xJ@rVxkF(hg{_P7c(SsC%j|j zSK(aj#nX8&e!ltRm~kOg+!GQk`9KZ!Np(@89I24@4Ra}| z;9pd0+_|zQ&Jx+;??EU_Y;DTUj$$j1_gRdKwxqUEuZl>fU>4B6C<(H#C15HDM@uSi zy%I+|LOwST6o-mw+q1kCsyRg)k5ZDdI>t!oVaYvEG_DenN(@~BwiiJU>yCw5g8JUP z)!&4iu&mIO&M8MR7fCZ+#?eiyy-wSY)w%tFe-Bb|;J`J+*7zgi)s&qHsLYF_y9g}< zV7{n0EfeW{m$ePa2EXI7guO`iHL89qb8YpnwfH`>sn5g0@POZ?tlo98lY;_l?-Am3 zK;&Gd^2>+|N99UiRH}XJ?=-Gt#Ieas+MAo}{Csub3h(c~9lAZo+W67jFua!Q@`}ZD zpG7E;hM+e(jZrcX7mP)VAop%1RDbws8Gmv9cZ0CyW`_lmC!`TTr$qegKK)9c!*S8C z;@s|oSvC>iii#g&8l#;21+iMq@E;wMf*|RyBJE>9oeQ!w2O-gku=e7EA6cwe!edn} zOg`yzRA_!()NjNS*v;q0@k@nRTJe)?>W_z=h2JA8hIF7KORH}`i|)L` zcU<7~%&1$4TF117S9`-FPJnhGrzA-4$#F}7waU#q$ zy-%V!$Oozih>Ce0V`g%4q>{B~I($w#d#p~ZJer41jxx}dfyWy=n4^^!-H3C*A)7$F zwPuyw2|4dfcFWbe#CCf_ph+b#&`TdC!}a3m5i_pI$JbV8i;)o6fn%hfbr%5>3sMy0 z>|qyiIPtprXn33G4edG3TYIPQcZ6He`L5rMl4}}D5I1Nimk!Ot{x$G1`y9aDhVWpW zaMP@Kp8Nvp<$CfrC;XfZU@0Rqrx(xIR9#NFI=z^$ma8Sp90IQwtn7kOgs5EPRKl($ zdHXy3!N^NRno*~9??yo3;|4v?d^yfk4r<4^SvG9>ij`zi_155Q_9`i#0B z+9a*5LHqmnC#h5WsavOjIsuXq5FTan{>oAuWk#WA=I8rmzSc#Dm56P12e$t%l+;`} zTCD@U!Ty9rU88*gj2TMAj*mmK>P|~x4x~;^bqfYf)izS_V9IZnL6Lw&!Ik9;lT)9} z%_$L>%rSlZ^VF&e6hTG8}U1FCBS$z()^<^SyAv`#=>C z7mwq?h>~%J6=N924f`{M8ZniV`Q|d4FQQk@J&c|^yImBhGhrPT3RMf8g zFENhb^*|wAg6vU7eN6kL{s6W(bH`uULa(7_LbNhmU%ltTmyeS1dS|VEsBWW+{uX_* zO>Nj3PdhZ+B+6b|33T5)2vOMI$46|_2zX#QZHbo4gmyZqlzsdT19>;haA_#Eux zDIgvy`v}~-Xd_LhrC*!mNHpur7OT(^d&Aisy|{Gz-f;&%>Mc|x+NIbpwGfWoZl*x` z2%>F$u*E-U*jMaisYAw}d0SQo^Xn_0vgLDaQ>0q^a7&K39R* z48A`cV$*$CWDQq7i}I7|q0?218S%h74w;N7*WlSuHE&)riWiX0E{4vdxYjW&Ba8I} z+V29F{WDd=QfKiusx4(TTeqWW5yY z$sa?@0DAZ}0`q8C89BFIpt-ZT)XwVRiyB@9&)JW~t!GciUOJ^tPzCvyt?rsAY-Bjn zOikSB4bau+XIvk#I_Go$9o3r|wSl8h$c2*YBZ%&_*JjelnuGawkiA(mPdxBYVhH{2VT%yVv^*!VbQ(}t)W z-B&UlBulZYyv;98DB!_wWiUEw=pbL+{?^Vba7g%lRbIkEI)#}{`8`lX5 zV4^F&e=jmKe*OI>!$Ik++@pMDy4YlDS>pkPZeTcmMTumM2=zqFA@B>hHxN#BUdm&7 zo6Z9IeUql}U^^d`iDV(5uPJ%AsiO1B@odq(3*jvD%gUhw??}&S7z{U6jFx#I$3oQ| z_i7TxXER$N9o3;h* zEeTD97^`cBh}8(mk`H)7^bC`K#^7Hap(7!6gI7C<*9z*&e7xB zEAl+1-7@u&hk`~SM-Z|^S~4w$&VBkUWXf}r-lzwCIIO26SX3s%)bxo{e^Q6cHg66K z?V&D@0)wbRDgMx)RG_RvTcSN^B`Fwq5V38@KhI^z92?))L7yShd)e{J-3Ewv@Uvt) zH-g2&?t66(XWqVHzrSU9{z7`P3Q|BnIHM7vw2b4UpC;GhS+<`O3flwMov}djdto{| zWs*M)Z~77t;s(Y?)m*z^>Hq}Xe8`Lm#KH1n0X{1sV~!-P}8;YGWmUa{Jm*IUD2{I186Fw_Wm2-kM7a3&+>^Y zW&q`wPIu#`l99p@liG!U7`lCqT*rzhB!P|q>0x&DVUw#R6-;irx3)rf*-qE|-qn^p zBiIVzr1BD3+e7Trm(+Brtb&+|!i|vUwaxb{)WdnMPRT_6d4^9T^_!Yj5BtrO-9HK1 zep%+=ww7gnJ|X9=Rg^jdgVGY9MoS`e{n9u$hETWd_y6XWb-SVx+oW)Bzf$_RBGMW6ZWKN`spYky1G?IhNKD{y1Hqr3D;ibdIxM%H8 zCTGWch8K_D@waY%l$7H|j0ng^5YMl*0ND3xESNknFb;!Nq1eSSFR#GrM*F=L<+W|p zuhG`ZX9cDMO!TCng}p!}5uT9YH5%BA9l#OPr#7Ej{LT^tBG+iDz=VJLHp|ndNvhOR z%(?0L{d?mFtObp=uV!?Sm3c+Xy8O6!j{@==%CjJ&#fVNLR=6^eCt^uW#-oFmqczW7 zrs|T_9PFdEkm_%%dk(kwA6!7CCYYouL_7(E zNmz@zMmYGHUpJvx5*oa~dDB7ENuf0QRbgU!*g2&#A}1+@0Q!ac&=438H~Vgs?-~-G z7nlMJ*7F&Z68--3ZGx01L^U?!LRPhUUvghgz;SZ#PVV^T-rYyh$DAEdf+=vAi*OT= z;cy-C?5N!>f9k~=RUh4n^L7bref+0IZ)*F;D%`OZZi+sF3d8+|;N0KcPNaFPYopbT zifqDaX4>fBYR-AVSFfC3{f^=azU-Ae#M#obR9t|ez$gA@D(+hx2i_HkCS->rYgFC= zL(2`YWbfbtkNGp?H?CPQswhUXNmmycZp4~@I>f<_LJ??{m3IbxZ9VeiO7WVMbDr)<>UNG}; zbOj~l;xG1o(0}kuue5F<{}V7}eE*2b4FzajAK(TQ(V%p@3F&QMg0LTNZMHtvQN+(g zo!B{C;y=Byb^c&MD3axs_1c z)j|#TS49V_)NS7>*PQ6?p=FFT+;CXq0u@NA1MtpO(IE(}J+Xf7d7LG^?J_Q4s} zRk8e#?Tps0AAVfMv^CYxou(0BQZLfSVuL$uz`Plp7I9k=k+Xlx?yk&X5|;d8C}1Py zvTM1airGH%Qhu|8vOYKKgo~}xq^&*OVAlVGviE+Z`hWj`l}cn3+2bgyva&ZJE7{p`l4Kks*&OFo zvR4R&NJuhHvgfg9$j&hi=Y)*IImU4~<9)wA-}@i9?_d1n2gkXt>v@gGAbwQVGZuJP z^^3vh7J|cAh|-2OOHQ2>xJkGGfzed?&#C%gL?4J)315&BSHg_ znE8Lvrr%t}^8v>Ht+JcL zv*22v$vbs-ZJ&(Tez zl?@NTEL(Wg1nhaqYP4xE8zAuZiM#JCzIR9#Z2WuQtbN+VofsG-BJsiL)?xFePTzME zoLyGR*RsiI)ZTw|k{lto`Ufk?M+-2|dMj`dh!yUa&O@3eE)fQ@;j0$hg7m$uB7YW0 z&JP>YoQFP@zs}oUXZ~LN;R&-GiYaZ^Vqry{)c+Z({`)9u*+VwVa!$}~I#?3ou3ZwQ z<}$#u_Aqlqp?TBimFZo9DW~aFi;e&2lylJm1HF;vmN=RT65!uXY2-<60w>*|9zJ-# zv_zM-B7Q!J#s|rp9LSPLswz&Jqb8!b)QaKyK#`n9?3(>OcN&3mpUJV&+{XmoknBe_OY=lLP^gJW95(V_abVlbdN)s#(hp-eP* zAvmYuUY^r5?rm3z8JGZRV|-MvE-k9Q^=G3=+1vYLqLa6qshDpM-?8q0cp?;74sbQn z0H7D{9R3ppzWPbRapq`EvvKl|>Gi8FBZPN6w<7KXu)@T_2U8EG-rFyrr(06Q#(0Le z#!mO$Xa6BsNg`x0aAIvr)b#tj#ihQH{5@P1H+N%Rnw{-NjQY>iOzj6B7In;z$Blqb z#stVzVFRv`o=(`#>?49Dx!c<^rR8m{cSHk|y)t@+rG8C)v%N2TIA$a%PyK3^0j^IgDggi_5dm+2TJGN(Ff#-RGrwH9&8`w_W0VR}t z#uzoSGl56%X}|kYO95e7_y#8<4v3O#eEg1*N&d;yDjx9#34E3 z9qXO_ZV)QD3oR7F;%WSQvvaE5=1WWZBm8)qkDrp{O;*P6MB$WGf5Rt?yqW-Lt`P^% zg|7RQ3}McX^Xc|8#m7q(vRin!tE&~^{T``|L=33A3(?H&$R`db#DirpnLcE zCHma7i-T4>;63xoEbGACj8Qtp@?rPjucx;~DD89+g5S#adr=)Vv+bGSRP^%iJ5fK0 z(Y~K3i}2W7Y|EmRLHP0}POHR6TXK$~wwlgNipM;!DAvjP4tzo%>8i5*D@!gG%x1!S z3BP)a&u1V1ID1TdAUQ~5rp*d50H|td&tAU3nRXql+4YQXB}AW4MC zhN0WgG4E(Q714gXl=eH%qWhD*9ziGz%VO)~EZ)-ix>`IzDn*RFbnGC51Do>Z?mZ>D zw*Twu^a4D@Y2!54@>VO?Yh*pxA0ck)M_I)+ESNRCP)o(SnXA#MA-18NST|ZPr<>SY zL9Y|dtzj;I2?EA$;#}~XM96Q<3^d@oi!<+UrH-;s@0-|vnZNk_?N2GrH^4(c$VV+# zR>gF2b8)7|9_HFcX)EkDfVKfD_1y;ATw644yvkePdQ`L5?l4#DjCS_ko(rurzgopU zy1_j3^*-xOwjTu?8DJ~Bf@;Y#M024>6qJVl(isL|({cDHpy%o5*P+|aTc3htDZatz zdx+Z287J&cnMxI3uetW>;3V^f++%}H3g?wI?PRPe3tOK$D3u~vzRlJ+(dV@H#?M#c zG4_|s#KtK!J%1FdiTSEcZHdel8;v^BMt*580#^Nii61c%-^aI~5>g>t*OL0I!I|ID zKW|lK8Rc$W687}h=d=0jog%z7IF?VSdR2p=o*=eB@Imp^_ywxL9sbTpKEa*Ie{K_*$VEu4QFHdX&%8wE5%U%c;YBACmu(++0$Y4kbXt*@9 zcUSs|s@`MrELU^YSg()uvZ^OT-XS^>jJF0r8Gwg%K%ICj)(qaerLwS2Fib6p$a`Q? z!0DVQs(XVWW_?dUfWt(Qc*Jk!AMFj87_h;_!XcRQ*GFh>64)-r7Edf@E4sM*>^qK^ znMZ)rL-BOE*xutEMHe6j!s`1 z4j22|zcVzn7f$&rpzw(Lxqcdp^X%^A!LM{+JHqtS)Re-HZuq?0+PLSWx}LKUZaFj} zf9=Xu_1F57WmIQm`3Gs6==4Wl+JTC(KhVLAeV$+Q7^we^xyp$ZCZ#@H>SKt7J>k~- zM!|P@Uw36hFfi1LTxzl4fX$ql=Zn_?1OsG@rW-*VZTJy)d}V^sn_w2eJWTs3mg)VJ zy%o;hk7S66=@*{~`(N=hv+Hy>>FCgo?trF4zO;|7vf+4F;V9?seT?h{AB(UH?5EtJ zVOBi;RqF$xro8_Gf4|Y)ys$b8x(=ICm&wKMOwf}cy7SsM!aIA!iY%u!l>{yZAN$+j zIF`(=%hd(Fl?6UW=y#L;gN|X>gVx0}aACY$1Q|lQ$%f+BC8-EyVZfvZ4W2KuT)pKT zoeh%((l?$*r)@(U&t)(bIh`>99A0mN_$bDn!WpwVHrDy^`@b+0ER$$|X4e2AMpZh# z4MQ}%l1gDxzZ?AAn#U~_R%pfIH4qTGPSOz0Z}8UjeGf)ATe(V;4Po=HbkyAX<{F9@ zsypfp*NULVgFH8dtH!!=R+oKmYdGC|p^${t7KeA=Yrlx!e5hWp%}Z!X(bPF#0^XtV zO|p!CeBw;++w^ej+7t83hQ30K6P%jEm6W9QFw>T!bh?tKf?q}qJL|drV4XTRPIq+zmE6;GHt{xppXDfxy+i7enNZds=Kpe~ZMKo2r)a4aU7(#92D8_;K9i5@tXdPY5Q~Om&7+V*;d)(0?k|qL-Hli%#9e zr$5IOE*ej|05qR2y7MNA6Fjg*S3x`?wu}H3q6ci6H5o3WC6bOh#6{fyZVE*vJ~wfa zp*ResasCt)|++knkZRl6A3&MCa`qz4c*EK7?G&#^YGy!Wnte8l-q((XN;!LgWK z$=@_S%0vA802}>Qr!+pTlC|#hwp{L;ZDAw*I6Lceb{DQbdZo*WpQo_Xr%GU72(XbJ zAc^+hQo!{E5>qMw{{IWo`tS5xPoWz0+oFH4ctBH6M6;ZjKrPc0A}rEoujuqCy8*9gp)#*ZTKhM|2jnWE z=)hA2P2{fyY-aLm3zk(6wYQ+3a?w$=OTUu}-rGN?`RpImE&u>Gg>J-}YP%(pm3Bub zt}F(3yGr)0gmNmE!UkiE?_xS;TynAV3gyXfZ7rzslxE;sP;juBrLiAv;c5~%(=fG3 z#$5ZE*3MgM!XusCSo}waOEzmePjvsmoYwfU``!D0au$tA&Ow!Emu(f)?BE~8Q9-r; z=w8U|^I66j_UYbKh$WwPfn%)oz8!buqTSQjLv9-)#?AoDUeQLhvnxL!M%9R) zz+EJ;VIdr5$)U&MAM369?qLZo8c$oJQ;_EXi2)jm~957H8km#Y!8y1;@Lt9 z@>Tj%gKfw;5cG*wt@#up;NzIbwY;JJuDYQw${BQ9jdx4?B5e-DOw)qu5yIl27m1^l zxNGcK>&`55h;iA=0j(0)0&ll9>t8|jyaA!3+(*-JJk0L-)XCE`#d(U#_`ef%$xRRk zP{&}*Bn!-YSNhI*|7m+h#aw$9Hz~VDh=9cnnS+b6&EnVZ3)IQKo@?RJSuIC0NkgOE z*3z#5lH~aNkukA#bM0ndqLW5mnA5*`Jajcv`{^s@!^Q+M?$%E98&}+EtI#B2hZr{r z=qR%3BxsrfrRSO}De}5q%dBFQVsqT0bUh=(2i}*)oBnD7^B==)XiO}t2!Zg&;kkNv ziM}!C-uzGI)$VI1HPYSs@uwxv*P_ns1}WFCWd{6d!sJ&tUaNmM()Aa#GN8-Z3-oRP z6aMc=J8-fC0|wY<)<+fP5>`4tv&hSDN~}`VWb0An>=6NX6u6($`LgUM?H?@;E3s&!Hk?h4PM@bfgE)UG^c78cADuGV9IWAx zuIDXn1NkkKD(?8DGlbfT^s0^``jNOR0fRx9Q2T6yH^)%2r9*f9^p&5V3tw93D5QQr zx3v61!JL}hDeD8js?8kEHEoASCsprr^foqekm8-1bRlr@KIX|0vF9)Qlg~eTcHS$# zvvhs|_!=nEq_ejj`NPzKuGD_@n%4!wt!nwI(y1U}|ZTjml30_PQ3L zC1g+$jT$v1WRa3A;%Qe2I(^j&y(etm09#+kRH>2U^F>TzjlY}sf-Yz~`}I(_8t-l= z10@)rj$>JMfI6-&F38kxWaGZZVMI9ltF}@J590TlhmL`!P53uLES!iO-lc+~Uz2Ml zIyxaCJAYpQc4_;#L}0g%G946>$4|r_b%c&!dwP^Qa-Hh9N{7b(S-DF+=9DYgw-qKN zog`5Rz}gy0AbZ2+96*Qs0aU3L#J747Ds!+O#NGfXUV5#ZC*+h@dC6C)XZN>r8ICM` z)V~PH3Km(ekEshKEWf;5&i~+(c+>kw$X{Z6GnGOa*~kk~9-Lj2`=rTYiu%maGM}@H zDVU&L#y{q{w#>AMQ<*mWz;^p_Y?_iNy=Y2h=B1$i7ync7_eEYJEn?b{%ni|?J}_H* z)cWb%nQXGO$m@NPZjH~Gx1XhwE?`5=SruX)XdnKhRX6K|xD=iM!ITbD_o6Wp8naj1 z%oH9Gz$wR)o}KZ!zo$6lor+QO?Cn!V*RA$Wb;B+cUgxlaYLJf0D7u6V- z3EAfk2NRn~_V{U2d{csSyJlV^JN^fMea zlBH75W{WduD*&ZIEXL>VBG+u^+nwDJ%L}zk<;GKG&1QXTwqI|!rDS!nt`_#R?Z+1? zG?G$R0e-zGa zEnn#psa$TUExu+3F@1IoOp@Qrnave>)4#L8u}Hi~Nl7p-U?b0HQPG|gAB5$OrD;Mn zrznuME7<5SK1_>z_ZLLXODhL&7PXG(UVsU|)2*TJLpL-g17HtSd|W#xRwtoVIZHId zb$f+#`b*xVko^5h{!v9(gx+Os;}@4j)GieuLs2<7w9b7wBeZ?`F+emOI(Z;DPP<5n z2A10VtKsdotI;-OBXfBk&!k6k6OdbUAJ$V2s?ISq3;$(TL+C$8mHjZk6wa&ow;=f4 zmznI}_4b={$z&f=Zy&PhYwcQPI(>Mprpj8!B@BnfCOiieXc=vcxbg#u8eu6Tiz)`<5=&`u2FyI6s!M8R zAlThZvU4;}wi7)q(6R79c6a|of&@M@`hDF#8}=Jiubc%a8CZj_14M*Q;TcVpfI6Jh z*K}qMBj540>*|Wb5~68(*?)sf1IcFLSIkJVrBIvM2L4YXJx*nr;BK-0f=qJ}k0lkO zt1mAL2ENns4wA8#NjB8b_;ws6RAB%p%)+}ZVQl}9%)6R?oaKpt<^92ivE5v+nNf3+ z;=94k(bjtzhPzc9QI~uLWEliK6LnTUgDz8~G3l57anI-VsnBklM&MjGO>^c}c*NeL4Nau*Rl;-86*_o03tQC%^)nOt{8%-W}&<~(wVk^gmrzvg~ zUTlz(W%he)0-5q{gF~l%;oTGFknM&i+q}jeBEo5L$=a^|ucIR(lMO8A8*myuXVSFV z$`$=`@6fASW3$V>v1vTNxjdgn(w$RUJ*1)3MWAX$80Al^vV1!rmNhn!@Gnzyzx95L za6)36GS~f`gmp(FT+XQqbJ+Z9K>n{OxAR}|{C#B4#x_uN*%d0?KuY%fHdYaCxIG(i zqhs3Fvn@0s?F&yv=}`C3Vs`iVT}XesEb!)CC%Isem$&)QvA|7*lk|0C<;+ylkHKIv zZ|70x!IVnGmnK6oxAO-4>wn}g@1!*SZvsXSnXWuA^{hQ+;l#k}>~TTBoR~T9EX14s zXeuO?-HNrWc&BkRAj;Q7n9KJ01rLW8FE4kWZ)HnF*E7Ux^OCZR@hy!r?Pgdl7d~eY zGYNLF@XR51|H0z2NRk}&)P=i-vHR5K1n$@sPRFNtjAC)NOQnLD>};0ulE-%vAf_Mpf6WiyLe8c+Jz96kqC7+1 z@2>*!lSW3nwE5fTmH7Q0>`l#vX{wul(mL8qAzZ)pD>7t&;pLr^1WnzqBzyhNIi=)( zieq;MxSCtrTi=OV4wOj!8hk~ExC)%dnx~E}E{|a;52~y_GH8{-dK@8?~#O0@}8O*6f)bmJ}iJJa2K~HA`nb6KrAj(PsAqY7scq zB9jZn@%C?6a2pL}sQj@9XJ$&(Zho0k&W!J*QdWO#f9TSjfApoq)&R1%vh0ufM05Ip zUdqCWBc@H10t(~XtXy3P{KN$OL@!#uBBZL?jY`755D(7W0(^asBG^%ms;71v zei_>I=R*~)Z52xXQ$_zub$9{DEo%XrmY?8*hcGT7;Le>RK}(DzPdp~bWUr#HEJN}( ziUZ2Lxwtkk+r;gA8Tx!FIZq>*tRM>`)?4D8k89!I{ub4zxX*$&Ly`KfNvE_OV)swr zdaOi&e5J^dVya2k+!fNVEqKp;Q4ST1?+#=FKd?(Xinn|J%iwQ(YH0xbde3r@-UUpUR6Jrk59nCPXytk@K7J6wa+2RoVrOci%g2 zi`EIy>4fOfL^&2lZc|YDWDZCO7wzxDnV=)>=DaX|BK}zG>6UorqD4c_-lHb719`*P z4|h&{>0d9+{{mmrSuFtshX6{dQDDd#TyPt94e}ZSyi7GkpKB-i9DQ;^xS(_#Th5Aj}<0gbDQg(J#y|PifDSVh88Kl>^2YT2jM4C zuv(18wSTm$_}oOz$QAx(<;aGOw~fhuelNL$oa0-XF89(FD{X&dvWKnUcac3 zVZd_ilQ6k|=TtT60aQDMV}e*kupfXk?L_7#Qj{=$uH?rAv(Z$io>|a?=`V76>@2uj z&m6i0E@}l0i@&$mT;jf8v@+XC214Lz=~-jv4tlR4y~PV53-rx6mNg_jX==3Ft}(Ja znGzInt~I?ECKxN;YJ6_+lY%j!|GX!i;!l~N0Z1PH1P5P}L?m$`S)q<4J7^ZDH?+8( zs?l$$m$A*O+TYQ~KFS=g=?*hlWhMaKt;zqbo6=mj{f=l2yHVAgz&Keii_6B35Nx>EIn+@f_=Ws%)?M3a-8wS5^Au0z(54Vh`*|@x2z)P?M^iyxb2tVU2cj z$^0KP96q?`nb<@urR2Z*!}FxM`-Z`xVoCK8W7PM9X~Ym9op8FjI*4lkauCg~R76h- z=oONRclo`LYiDLYg<9}c9$v4_QsGbIPZ4&1LXhy{rL!vqSy<1v zu=AvQr1Syx+&@yT9t}-#w@{9Io6y}jp84FMm_gk)CUnos+E^g9%ag8b7+KVgAt|~h z{P`$`Ax6^{f7aURB51JS^ z0(cD#ghAQjveOo7J;ffc8znTHA*fHc%C0c+)Tp@sB@zUvwZs?EQ6y!G371Zbr`I1w z=V>td|LQ8Zcqi%6`S6QFy!&?Tas+G-3K$HuBr#CS?NAtJ=TGN9C49k6+;TlZDc$~} z{&d5O?=C)cvMMMXu8#6LI^DCe$dhxfr$s>yv~DJyM)d&Y-^wC(MFXE!TurjKAb^wn z{q{?LHvSHF;jL*f-Io>#%aaU#YIjxCs?#M#IIVfYo2*2ecfCaYR&|Ud8M@CZesb4; zuV3*m^7uznM}z3QD|hP7%U7wYU3l`^_WAx_#DQ3S!>Q1s0d%1X-mOQw;)P^i$3Hy7 z{hO)&oP*h0uitvw)buC+4TrFO2KrSpdncxgwPEZZ9B*S08&In zErbjy6#%|@uI;DQThNPjmS6$|cjT1%bK{Owu~A=Rb(aWEDcq}0`S1@{nq5gs2zJ9Nb|=wYc+%}p z*=4%JO9{R7A|iQbo(=@VjL+dOH~aW<${-$qlR@oSK5Gqqj9Z>a~LOcyHjS(eCc&GQHjA=LiAHZ{JS38 zH?pSUR{8B2YqFfh=C2&&4YM*Hr8zqa|MZ@Y-mNd~$ie?{N#iJNSX0@HP7B+GvV^g} zm$r3SNTJvBwuBjkg2zM#uQL|VO%1h7@K$VRxO#rGa^+mx2hzI79(-LIKwZ=s&36Kx zGN=09XJJX7V?}1<1Cyf?#Bz99!v`o?UA&l2WPRFvAtw;;!OsYVu^1g7-lOwIA5B+( zE?hBr!KhDO-M**OSE8yq&Z2edH*c^lv(s1wY)pUA)S=e4&2+#T_v*o{<0# z`%u|?)zv=SVL~_KCix#UqzLu^Ik5fh`}JG;Y3j#-s0mF-4sU0{4=Ln{cbM7in8Z0@ zFo1fuuIQKhDbD)*=cMWsjPF+PUN|50+G#|yTB6M!D6lY5iLuZ|b95?_V{1fm9UKqB zTeAUc3{qJDo@_G(bp#+PePL;}3;I|=X%!Oq<6(jtgag(G@9Z?@t8cwuwxri7acd7g#d`L;bR^(vD=L-_#M zjJZKWspX}lFtQx7KY)qkX-$w=3@!yUfezl3)xW$i>R6MupK<-J=xxzLhQ}%EN<0tB zjrXZ=moX9U+XI5GgB_D#ao`!yp=knrA_t5yTwCW_IK1D!wfex-WjXx&!JN+S*3#0* zU#Y_9d*pk6=oVkUlAIBymev19ch$%BW=!`AD?uk+3aS+%mR9oJ%L;YnSxXANs9b(> zLSERzgPV4|SW$x0xc0cYZ249hmAP0ZvnTq#*ueJOG1#k(rNQw-smS^+TGwj(%fJIJQ1q23T6TR)@#SIEXKlfqIgn$ z+RC_l(bC77nj7|6>pD^%KMYwV16IMfD}e4JhyKhyML}%6V`Qy>i=DLA0j@&6lv&{^VA|u(|}^qPA^P;5+T#X zuRHttUwO|B&C4lUU91gYKOX&g38np|Lt#x~5E+jY ztgldFI~U$$Mv%JZKAtwGSEpZC_)O|TBH(E7yls|TvIWjX0@3;Q5H`}OjnBn#ro6DbmzF7Os9|AkqE^Byrj_47218Cx84azI1D7Qik*A=q zFkyfzyXFFHSRLnNKTii&UUtQ7AFTHnG5g5>>N}!iI^EQxwkqoII1E>jz-#e+XbzID zV-G3c6o1xGI#T+EAo8%zIEEoR1wi`olb=DAPFpQa8CP*<&63hSx@K zU3>@a+QagQm+-31!~p!>fX@9%zw(c)R)5Qt>JbRPsty+bqqW&iVW58R1kt9P$`k!G z8KXlgj6O>@q4s2?{x-TCGU|k~-$S%${^Y-N$>PGHYqzWED)JQ(C}985Vd0>El@Jd% z3{H7ciT~E4O06SyQ!?dT(?;4oHZ`fz6Y&n8Q~?_@6<%%s?$|nWYkCii2V5|%dFQVhmQe?nYWk$lzWk_E}cl)yT@maOV_0$g{#`lJ7 z2P}R}aGsU{w-+uF0jL>6c(#5xD}{e%Fuq;X_qsEBr}e%)x89Z9JUveTKQdwOGtF{ap)^ zjEz_TBAmH2aHwXm8^pFxD-N`#AtArEc zU&Jv1PDi=&o}r&AO#BZAV2P(&2g5u7ohCaVRP6^U`f!Urdt*^sMPPF6Ddn~$Ei4`?ea7M{#*X0Mk$<7ZKwJa>lJO>O|IyXv z03(-y?&G3e9L8Jc`NZM`HVwF7tZQm9q}L$pj|77r1#*^ci%!Yk&r;^re!LjE86&jP z%rJ#q{Rp5#X`)a%l0o#UPJ%XlC!RVy>p9~#lWX~-r7<`hdyXyZ#lfW!(CVN_-oZW( z)R=VaN!F!U%+wdXbz&QYv!Ku{Di-Z=Yc7i4Z1wr?GaIwNtiEe20i#zw|9ZjB`>DGa zCFj&h^6!4Y;LzW4u73~MP7#M2e$Z<8_NEKIs7DezLAqU@rY#XJH+R@Yj6Cm+%4vAs zUA3=t$#|w$AZ9(|{mmSd`BcGyK@hN2-=q|A?-u<>=jQewU0LWcio`&~kW$yE5aa32 zTd;BUlU@YVN5^&cEiG8TK1euaJ-?_+CAQ7MN$1tj}AqKG2- zYu`u_LUq_EDyua2^O$O9rGoQHsnCUIt&o?cvF<;W-T~@E9d4=(aeew#c}+BWcQ2FZ zr*B&gG=S}ry&~|VfW|%fmzn_M?-tMXlv1%Y3)6hrQ}c6-_)#akE6u_0F~_vm5hq5c zPHR3*TNXA~RKHU4g>(jD^da&ssk=(VI*P;tVMZ2&7-M6>8TYB z7HM$0;6*T}V|W4wFj_`r%K!Rvc+M_(C1pfFUU~g#_@L{^RQlW!ZFIvP7v+q%J9zlF zjQiN%qP_)Ojew8uj0sg>K!1!}2~D_js}sqcVx(HJ{RE1FsF3wSc%-C3bDSD4rCd^u z+`Sx>mI`*AAAvC>WA|~KA?^(QAcieCBZWTT&STeq#MW1xGa+Bz%6IL>`zo)$k}2PO zqD-ArI`;7N=cIkJi^ut6+`*&$A6+C|zOIr$yhlmnZQgGSlhCjVc7X&(m5+`~<~XaacF zBI)=zO`S5*%K=9xjH!$Gh=xO&AziP|++0cx_Wcr#dOU>ud=vcx0-@{W#tF+4S6>`v|el z$xoyF@~g^v1%J~3CzKTcACN@}MLO#oDj-{n3;~t~!!YRHte>fM|89wDJ(k9nOQ@FW1a&g`Cq@8{(Dc_y^h8Pb^d>gzdJGVI)tiYywRP4OPVNr<9SE zE8GJsoY11%dPX6_QJx+=zij+`*}L+n4kWKQbXTiVIYq?2@M_vPO2fE5YD@ZW=8Edf zotmA9XDKP?Qq)fRp@8u7MJbX527%&UT?Z)Cf~x>~iGvikawe!ZVUtGeL80dPGam(M z^%$Gzg+CI`Y$w>O?B3CPG&|$l;a=C#-VjYSj^AGsvoU|Rco0AyOWBCUs1#Em0R*(i?3S0%iO^GdboUFY?Cnr zOe%mn@)5}h*fNZF($BYZ6ByR!ic9#Z9>=Om01sJxW5KpKm8ltEJztg!Y~a^uds?6J z2x4dQ50e>9-J#uoV4clp?^K}-EIPQ53hF!HCcqnmGg%+s`6Z*bEZnI3K=4xEEr*ik z%)8_NKG8?`jt+VamPI=gDv5AB2w4A-1-S=N?2x?ZRMg61d6B5J!1v2TUJK@zMXwcH zg=HUtaFc-1fF2>fZ-NOLK#~Z#4!G&5Co?y)k7Pvu6qB;7RJ-iV`fup(CBuR6ZWJj3 zla4}QJ4&4xQlV@mP@&(?&TBclHgy~}TuEp(5iF1tIhDbt%MGJ=vPaK3+MHAjfPAF) zX%b?R6l(B(aVd2q4e~q(y=GDg?NHL(q+PiUxemWT<9Dv|5e8rH=SdgvBWsrM9Q8+w zk-=V#S7|__%6>$uT1^^2_e>}Fm83`oUdD`EwCZBA1(KB$1B^!juhHzCs^PNTLd+x& z{K27P#g9!=lgtY`Fw7UO~`uo-;CN11uri__$o$OoaB~=7u!yr=3+ipYZzVA?O=Bz zx*ozaCg9x)Uo){p5GnZCQ?gsTZd<{vwt{o6azKj_WLbEb)c*Z$N&kI14cH#=BuV~; zi9i{t*}+8&;pJnGa2+ej z+KVLYeNqg5n{1HW$M<6fcYN!Ak#jk&hgbC4(#LZ{kNphCBL*aY=Uey9Wi9|SyMHnz zOZ9Vsm!52hY|j+;FghM+*MWE<0$@X)uu{4DSXoyKB5t6i3k8J*dv= zl;X!d#lM|q2fLZkwHn`;NB(k+2MXPu2Q$IsD2%`#^eTKFLY6&q9GU0&C-ldEqiEpSe zP+PK@)4#i ztl_SZ#fu-@#Ak$fn=G1CmmB5M^$P5rSJ8a1+5hMSr7*xa4Gcowpo>TOu?Gzn9I3kg z0S{e7oh4uVa?gmn7r}U?;Ege7JQ)J(qunOhZUVrj1TcMh@&m%jD;(zM&sspSy?*h2 z5PKzZw1d$tUc<%ZwRWq-ZW*IaeBN0=)#Hdxd)LO!oQLU{0W0$3M*Bs;@2SpWeyqrv z;zQzCO-FI`++HgZ@~W)fy0%od6c{n3H#yRBE*tCN)_Yg;fDw@j)R5Y|=*A4PG)ZP) zT&$_SfuK}+wXfqE*W8+|(GNPNJ#7KfBC(Y;=E)WhwO`d@TN8Wx%HVr=t$$xrD#(Z{ zbW~(FJMP8a1DTid_c+Ud(#j5)x9SthiQs9dB%U^iJ1dWu#RW(m%@;^cFk5MgaW1`b zkc>GL3N75I`&Pgnt@h<;=Xninzi6kL_qK%#P7GmMUcM4l)DJpUk>vuysv-HEs z;N3Uif5)YCc9j6GOtEz2eh&nZsKcXu-G@)7&w{=ek6GE*_Y++l{wB8m_fXSNO}q~- zW%bIOGx4Q`#=X-{59&Kj6+k#+ga(?`zGj4nn-OI4THiORhb5QKV;X8>MZ4sUKPzU6 z-}6i2JQYsE7DqMgz$5RVZ-dvrwiaZ7cW3MmI^cMI=<%)@kcMQS)1RSMA$OJGM>QK2 zP_D^c1akEUoO`pmA<4VyNT=Sp?VI>*W;82PT!06P~#IH2gd?V8DaUdE}loJ`Q~5t8hK(-F%GRXk?naT4SWUYuQhdH1=@+o@0XB<19{`oO_$%^xM}T5v zT6SO%tw!TL^Lb-?z=y9jQz0d0Tn^|pIZ4T6-*EokMs2!T8jH8k&Gu<`XN>^P?YtSw zo|{$;LVlSX%9pe^1YQp7$NF^A@Iz+uN<0-pSzs|qh5YU63 zK22uDng?drn*+YE6CaVQIRpoZU8_~kpO4s0#denEsT=t?>p1ECSIAeKYv2Z@QZzGs z^&6UA(`>EF4*|8g&gGP+KZv?IGPn1b71`>s^BHd*cKyT(Ee;eR@s~;GkSexCmvp2t5+sY~KAYwI;=5_@zs`R2u%Js*$ zY)enVfI;niD?GGm|Ld8MG?428KnnbzX2Ex#g3jzc!Ns$H6?H8hfn^wAL$Lw9Od^FJ zUuiQFY;n#)ry)qq!^Yf#-%1@Vfn|G1m% ze#$<|aC=&%9beSb2?ZV$Q)-gMpH1WDre4=2pZNpua6x<4X`jFQ((bimA_Ac_#x>EG zCr!)x*45+`ecYbYbXCVhB;?LWbb_dTzfN614=54*BR8?#eoZ6biVv8b#tz{wx0*s6 z@m7x)e;v7JBS1Vf5n8pi4OWxy(icxZh#zXQ*67=D%pl1mI4UY*#KMYzB*Tzh=+v1l+@bF+L7C$ z?hG;12JMSb>v&=Y;LW(R<{4j1019)QAFVg~vr><~$Y85}i|Ms@qnb&9nWeY8dAm(d zT)Qnw|DrRqr~aeUkvwU(b)8>4IDTt==mWrOWi=uNPAFW?G~@kVIGJDMKe|cwc}L*e zhghO(UDkxzTWbo9ntmA_*KiQ~NJeA>*dHC1c2SGoF_rGR1xW1#Lmb*MZf}iaI4fj! zc4C6L^4nQ9q!N0CIUJn^Qyhc)&(%F{#G>PWvZ3}3c)L!B??bAh+6)|Anv1_XzJju7!@=w7oc_IaWNPy*CSsqjgo5i1_mAD~&S&|;|~ zSdm5{^nBTVGON0`u9Mq=9EEQQ7K{|i7CPVR1$>H$w2y~~zG{1`dE{!cli*yHlXGdq z&sZT>=aF=)Ykff6e)R!%u|BXN8rS3+H5+!#xhD7dlk|Jj`_{^@9i(tfZ4cyCU;Y|v zt1&@HQX2qE(2~d0tTmD}zNm{Eb@cQKKLnDnzw1|xb8E`rXXo8aT^LG*prT^c~AL*T6VCH zH^NlQ+}xh$ck&ijQ8sG&P#fqp-*|<8w*Ub~1NG>Hkj6u(sd6oIRM|SF$+5=D9c*Bk zQKPfqvrf&smpQez;cq`Lbwilplk@!9mMZ2~V5qLHYW~ zEP4s?6}R9M5_HS1TIGALf@_$QEYd>O%%TW@=|-+^rs%kXUPsPVeVl2Kq^fWKy9?Nd zWjbi+2>s2Yvdn8T6q9<_vghB12&%8RoF_jdU&wKFxR#I-rS~06PtP;>kU1C;4`2P6 zQ{hI|p=dSMC8<7*@B2N-{KW0*74J7GtgoKfckF<;JLMok7mIo$CAzEa%?W>r$Y<&H ztus|=G5){WU^5aN5#uo$Z(+tkDOc5JXM7sex8+D*kB(D~Fu z_N8!aADRuhnh&pcEh=|0m#zvS9gRZXN}PDVm^28iCtj4BCLf>KkT3U^cauSsTP||f zr1SM-{}}xqdFdgsGz8W@PRApN5_sfmxQC$rYG;Lp&%2$_L*<6=hHb7v(LOx>y_=qwD``1bV3EI5Vdu6%3D`NP9xt{@XhB9t>j2o$#8;QaP zNIBlF3%Id;bM`Ply$PgLgcEsrKI%lQ^4rLrk;CbqS*`HcBKMI7h)E}~=s>7x1_mLltod)^{=|FO8&Emf!9qWD5axR`iej>5l3LyPCA$jMzE1y-kw|C zQhuPElV>}kxPSQU#+~9n{=S4VJh~sm0dTaEFUxZUKQpUg*` z$GfI%QSp!r?Wg_bmwTV!22~AI4GV1QOL!wH)m-%Vyw9qYmGZ6M|DDAwMf-o*2V%=8 z=uHFlyCwqw%QGiU6KvM*Pdkv_Zq==9=Y-LFENA2E9o)k#jma4|m`f{twYup+L_R!Q zpPw_)h7=LC7LXhoshl48>_9pZ-_9+~9Ote1d#b*xEh7KGn6m}&ejJQ3o1z zNJjTb1!JB8YpNcJg(E{%NfQ$9$M}YDB6sgUXPnlGkEdM2>)^b}EZgekuP6$uS7ejX zY@TfqN29SbUm6dRg&Y)Ay_%T0jI7&!+O~);MMQj>Z7E^XP>LEP*d5i%TVy+;bF2?q zL+dCYbkGWo0XBpq3;2LIJ8v$Mrr;c=)!y@bv2(f9yvBEoSXtHJD5%;V+ll~+H&QdsS?lK9L%^^}^mKYSmKnQl`&jm3-9b}#(OVrM_!Cf~(49#N|0)-$ z4WH}UMt1q)ux*K2UO*a1iE1h@NxractYB_Wx=q{jmtW}34z}`QK&V{|CkbsQA;e#q{t&3W0#l=0geBN-2BgBS06OviPkV0~4fPxU z4=Y6yBDXp z|McMZ;C}RbaNqYi_k;Vv=QxLRW^6upCHgRi(7zExirt` z+>E6h;pXAAg-gFJk)wZ7|5VTg3)-M0+ml?hOOt%chVQNd1Ta(qHi#OTPXJ9|+tnlq zGF1|+=_gLVx$wa070ci2j=%}&`eo=ot0!+!HkIXi{*{RE1DsfqgplEG5yGrJu#!$-R`0MkNHf3qQzV`UZ^MFeOt zE_t&F=jmFbH~Q@F)i*S~E!*ePn%C|~3@vrF_`8`b6WK3-p#2#apj|-9QmX)NS~0b2 z0U|(G8ohz#Mr`dDR5du*+{-Ex+T>ne$#oED(TuqmmgmGvu;|(i%w$g&x)QpnKM#KgM^J5X9vEq=DT_Ml?a z?{v!Jl;LIAK)N0txD1E*kPK>Na#VkgLema=S8?_A zx2vJ+(7V8zt3PLqcg9EGxog8&(AoB?|J3)W(EYxE@y$t^3}R|tC=SU*4xObYr)#01w0mr|3pxcx$5> zpHgfsAHv>ZFsV-=j}(k+bCW)HJalDm!YgBzE<}@}jstW0TLS$u&pUsD;@cuSTSLK= zps$Y2Q-(LZZzMLYFBKQDyT6Z}E%G!p*~Pp;NiwQ|ykO7axww@UhI&`3mI))%bo}es zT!_YI*C|n8(rL-jJ{Nyy6@OD++*DQKsO0G8;O{cA&I(WEYoK)8h zGGhWSM=|Ork!K&uVUMU>>9AJM2zBSJ>C2tU^miQ=9M9dG|*d2iDR z#C3=Q!cuH8upL1=G2C>OGDKZR?GA!AZ(jQ}XiAB@`r2){DBI4SHwBniPh5hTD6KV?z$4gc~rqv?J`x> z9eOL%BE_ktF8xg7ha!g@gJ6y$ttz`tM2$Om=%rPth4BmC|XtssP$=3MsZW$%sF(8*RA zaM0A3nMQGm4Z16PP|(RY;S>;X9==Nj=BM*MmKh2K7NZv1>L~~NE@)MF{vwB$hu6IE zg;Y&8z0M)Kh=&{c!AdkCTk1j!o|P^&3qMa~eMrhKnPaHFTx`l zV+2-axfY<~(gq}u5Kl1`=&z`eq+0Yby7Gu>7Puv_#N9?{ca7jtZ4-bHzIx^0;?dnn z#gdU%u-|2JJt7=?E086h=&;AH`fmn^9_~!->MofHLE^V&R$qFguFWV(inL#m(4Za) zRJ?EEW5fzvnl0UtJRXe$pdEMw-OqQ2@W-&k&FAK4^^+sLG5+Ri*Zce0 zdEan7F}-I3_q4@knh*oZOm(zP5AzOWkA%TO)inEU>QzYVm-@QsIiWV3tL^=3-0x>9 z)o-@2gigG5Yy=j-dsK8jVt}Geevuap7lTrRH}}OjBOc&ncazjvj+C{JYm@)Vx8hwH z58FkR=mN=e%?A-T9yRHg-kvKXuX;~&!mA6?0+uC>+T3w{Q?H+xvT(3024TA5Smyzz z@m!Enq~HwO7)}JCNZAsho0d^CV7Fh9?#?m$U}w&mIHadQK6Y4oY-hA3j<((5C1yaC zg4cH#3KP@a4?8q_JRi{+(>V0_!zePUq6kJL8SVy^Xm}`8#BeM4KZuuWWmv2J)M#xY-<^<{( z*}8~!eGSi{D}>a>laJwsW{0mX)#1O@R98l*-xBCOIJFp-5LftP@mV|!*ilAj@$bMV zP@JnoZruw=jgdXhO!>y5h0$S)VIYKBQ}`$AsdBsflQOTQq;h_)h<_R^f$~25QU(Ly z8+`YtPn89C!nD3_13uvShuLW)9*QQ_ho-$oNNxN)8fY@%8g}PR8S%dEmdaZVf+4!^AXO0$ze%IX!gAZ;D z0ie{tMX0$=C!S;q4pWEnfL%2F7(gD&CZxSeIrVO^SX4Z^#4lIQ2ge8QX2U3MDrf0?b&)x2~ z$3Kx<#?kaCo9{FpM9?k0R%?aPS=7&hvANX`<{~zy&xY8rDx9v zW`t5efXrQqri)P9of98#HB<1oICANGEjc2;+#uC@^`h9vwzC0>nkR~j^!kOdb``YK zbQS6}O&L)&506wGi13ElvE`I@7bWllJ@nt1`NvZ9o4BbjlI11>q)w?nTVx58dakw| z)H=S~B;Hi!0^44GD*t^GW=`3u9;I*l1$itYYd{^_;z$ANIL&H(hIiNnV+0rYA|W|a zeRzlHM&Sy2C1Hvzvht^8Kk0A%vG57IuC#FwFslFo#yw)2DXQ?D6uk8PdyT8NZF>AO z5I{6r%U}4L$i4NQ?x9IDX_`1^U&p{?KKB@V3|qcR62c_X1as2zCcw8UI52{>k2kS zF2yYrBBe@P0oTK!&9Mceg!>9l&na)-+G-R+SZnm(OIanJ1v_o?0s(C;vw#Lfj@odb z1*nadBTS92A~gld_u}CPZ}ixXjJs&(~U^hX{68}+9~_?_Q7`@*8+Q= zVyZ6dMP2l1565}q<<`vjeZ%G5d74%~bQU5=JR7t^L>^_LnYt|-{(>YC z1J6af=(`}E*!MYZ9&jk;$IE(&hFwJYG)G^)@W9X@mkA1-o@Z>gLJHj6@>|P`da#Xu znV=3u{IqTzc7TVlz_jqAsRC5=Zva9%3>XF?RcYSHbJVl|nm$sD+Cy4zEnDy>m1gTR zCrzEDq8o_b{xeCuh_a*j3UYaw{ExH?@Rh%w++}W5|uc!(_fM^U`vx$G&e$A7NT2n_ka> zCr$N#`|(5s4pG~Tem@zEkCDjvT9Iv8w?l7F+jM*sn{vL2O@#Kf;sbQF;`$n)4$}zl z9a46j32TtvUfPyD1aYIfm|COAW5C8%L7gmhQ|ac~&&~_xs!mK21*wU-_7rUxeze!k z#icXe&qtI+91*#yetrjJnz4m+*$Ou);nY_WhXl^(sJ2HI>T(oJ<B1YNnYW~^Hg-WXK$6)#b6 z;~~YZJSmGIJq#h~rZV2+ohN`n5|*qYk#*W!-trP#^jIQ5CvYo@x1-0zl!Bw8WP(-icY zlY#~C-ND4A%FFIcGNTOO?V+FB?V3}ff0dsfXT$(IOzz8n^=d8pCzKKd&;&A>r<5VWbdg2NIPnLe@=6J!j}qbo<5Fz>>ZgyBkkW#9J|q3lUP9!sU4s zD&rIC67x5Roz7lLMGc!_=9R+sFBaFv!E`UAy*il}ZEO

@rB9T37f@J*Vw(n!;;@e zAZ-C}*_ln*O$Q5E*v*TyN2p#Pste#-w8E4kD+fH_nVK~p;Yz9jV~7c96Se&6TSfkE zwwavO+@?0cbLf+FI}&t510}Ww<(V66dW;j#Y@?*&Kx1WDuYTWfQ)qkqkx$1{gtxj` z`KG+qd$bL}vnzvx{Y`hD4<%7XWCqpI3PAB;xab2S8Dq|@s)H*0-e;QCWcevr9Ad2> zS#@+ys+E%r`@-*Q?v)EMPjh~?B?6E@WlC1Z7a_qUca;59kFe{c?l^oWMe0P54InVr zV1fWs2N3Db_}q2^owX3*PwA(uEvYJPa&l(cN+pKArDaCGS2Gr#jAvJEV@_Z=P*;&= z0B4sQ(bbAMQ={;-&5^a)KBi}2(?OZ75#$FELNAl_W;D%1iBjQ zXgF9pnck4(bjJ+&uR4~X!hs;NNUG2ms>%;E_r8MZgZdGLefc8Ae-N>@zW#gRkIg@) zFWI7&mpW0NqkEW^OJiws&fTR6A1dHg>>B!$9VCI#FG-@il2*d#4y3#eJ8*otR23` zzNaU9C6-MdP<(h`Ls1v02Q(FGR|`roOL82Z7h&(Zl-y$)%HMr4tYX&llmSGf^OgJx zKI}+oQ)Yo@(s_Nhzk-A}AziN`Z5$mVUn(n+Cc4Mo8I;s>6MVIiZ~1fR#Q9pWt#Q@6 zD<;*}6l;gQKsxqW-RQQj@9n~oeA$|5dM%xH2mreDbKC}*lyju}@3&dpuc-TUP07wT zl`v1WJn;avV4`ed;C$2F^RM4+9X}=|*}`k3F!IJWO1utaw+VJ)$Sx?gU?w}EClMuc z;-eL$qiN2I+!d~#`P ztAcm*QtK*>3t|m$p$EJ!;+;XG32)G+0rDS&vIhkBK7>+jirr?;l6@7P!!<^$>z~&+ zkX|Oa4sCoA2unKWB{D182L#yek|Qbhj8ebtSPUCdg{Pf|gZixzh7+>?xmnIT9ok)8 z-@;hwFU^Vi^ULOZw!2c22FmlpA80b1UX4#5v?r#Z9IGrG-QbLXv*;H4i+bg#0gDaj zHAKz_UCkDlw*mUtq0Qf5loxaSU+S(ahCqOa6SA{NwLL9^e*;U?-W9lK{5fYH7T~e+ zSnF(ISfN#%di~+MuH*PSItpra;K+?HWbv}__}cEwl3?YfErrBNgt3IiWW@DRcFm*F z+ulQ7LA&ZZUI~Zbaf@f$8a4DH^dit1RRK@|s{BCQ)?Pvwj+OMmr@1z5K7qG``|IC! zcwH3Rb&PXw84$TGa3Dl^o|O;**wJi$j@Lfrp%XQ0C3fP8S~%<1>m3L%fMUtl0qG)E z^qU3KZ3WcKd-V>6M)lu+#`Ie~cm4Cm$i`ag!mB&kUeUV)A^)-P`^l%<=YAQa!~=U{K5cfG)+h62bs*Pz z57~FLmO+abW1ecwL7k-T9ebl=CCAE~o+~C+2L5A%<)MZy-g-dcLmZ?|WUPW`Gl9%9 z6;vOJm8^?d0H0bNTn$WFU#qaUUrVel91(teWCclT5e@3jjPQv^mcj{ZO4kQwmKBs$eU>QgC}u z!LydoJu)~=1%ObV&d1?f(X7ldQJxr&>avF&Xz_h6g;GF{!}Iq*6a0ek%Tt!S=lm)D z%ES0JDQ&BdHg6F}>mADImPbqu$b3++5(I~78-7_e@M6VU2=MRE<`Ow{ucHPX1xSK#=Rgs!MdOxZ3cr>V z5-(L0bi9E6A~?V4d6o42Os~3*+kxTv@ml5}vFeC9399O43Xc1h5Kq90R|5*rZmLZ~ zw$q(ox<%xwuR@`K=)s1{V9x< z`>Si5j*7l1Ph_$qZ&5o)19Paf*#t4<#qp-w^P8tfeQ6H%dbo;Yvr(zom(kW4#)CQ9 zceZBIXWw&Z8xS{=X*b+KY+Fc=1-S5I0ujJeXL1RtE>}(Q#jpGj$4>o;PKobWYh7=D*DNdWJG+*-v zC59*a7y6Y6S&uHBwRiV=_Z8fj{?l6K#T_BdU*FcQJHR)9v?ZzudNvm-qI(%(3WR&7 z-AlOVVK=c1EC%=GJ)fWbe)w!+P zc6HxvrK)NZ#9q$*F=~DFjDT5xz4hV>%RVLoUAxjQ1`%+z6Y2m@u0yGApVw?&t=2Lx zL>PRptjo6MIBlYn&s(Bt#no7J-uh9n)n%o3hAF<&w$T1DN4V8WS+KYz#k+C6sawLXIFRPu(%y2<{Uk5kW-pY)-z}MgP%i^X+tEY`Rdl2p zlm{n1vtBzRG%99UKk|I^2JO*{p9$^;m4B{=(Mvg=Js1{QH-)&F(|q*lACl2;p)dTI zwy5iFC|N*+*mA475lnSPzZ=UX$kf&i-ETYnnM0!c>E&u}wrkhpKNbVncIm)t+G(V= ziuy;s2!MO|2kAE&HtV_Z+!m`TU-e6B_0GeBTd%gD%6ll=%~Ju>=(~w z&+7HB5tf_Xa^0?r#Eda(v=7}ZAq>oqI!|||vd(V{B2~XD<*&t{1JE9FmsM*v5(U#` z?q$UqbG2O9`YC;W9N3ClMDdFweqfpiFmQWj&4Hi|7)^c+#MX3E3;P*MoCcPC)EK|UarTfRbXwoXf&gTwCz>Q&dnp@q^D-Oe<&4(_R=)-2RaPUYT za5(%;w#?RLb31X-IZ}_&l{Iz1dKp8?uNmK1{Y-68 z+6(q+YhX0m&KUT2i~cGVmiGfGcM>fHutlgooE9VFKNgt6(*^a`X&&Y|@F_%vI6$Y< zw*OeR{p7XN=1qJ?&v}f-4F5N<3RW)>$RwsZ%oC@a7h1CgN0C67Bts&HOcN>DoX#ZU3vau7;XTFyN+jD7SiVR)75qw-e zSg~JvXwyPW{F~Czm4GMyonAX}pSiVdI*BBUfe?Uvu7lug#j#V}NXxs{p@t(1BcS_Q z@fet^l#!!qv})?oGimtN8?L5vP$#7J*uKZ>R*DY{B|M@dTlmE_X~JEF(AyFc2PAyE zS(bYrla|mr{nx_6>bU@{pR%)Lj4IHMNvMb!`(3r)Jn$Cnw*HzChxP=F@96MK>spdc zJP!r@?(Wl5N$-odCiWkk&l_n4Joh_O-vP3|_U6%0&N$Y&?D$%>(gHa`+0i>0r$D~c z9zI$?XDMn1%7p}zwE?!kTbFqeshSwG;r!7urSs^n=H63|YM|}2_m3i_nvH2(^vlfv zYfP1D-enu`m3*m)Wpv-IT)4W-OS$Ou;_io7LlC2?^zW*W5TJvk>}BokIWlktDK+k< zn*b>7IR(WuV1gk?Y!==F!i5waZ4#K9wqKAw;GJ-7sD5o{)*L&;nc36UI@QIGzL8HA zyR{(lD>mck5)L4;g(M)4Q88qtM^vi?e1x4ZN_qszQ>40)!JjNCB8F6!ju!uLjZM5% zrSZ)5Yk1zLDd@gWa&Jj<7J} zt&0Z7mAk^FHLpO#-t-@nMksA&lC}Y-)^e~5OiH%xCb(5d>qP;)P&WLbjVZ%~9AXqF z`|-FwUtzK&IDIO~WP$yUj#&xPZwv>b%ac_j+gwX^9_AxLwBt-$D`3C1)v_p>IcdG= z6a(o?_ll4I5Kq@p6HTbs*f<<^v@;l393{d$a`@8l=B8HN(#(T`KDXUJWN0Y9t_B<1 zWSd8qVdNu0;5SXdE=6__mqOEXdTuxWSd{$O;JGg7C+uVg*reRtGZ6-h*2y$ z&WQX0pTN((lDa_k*I}AjqTx!X0aL}-8~Z?_3PPyGhAB6?|M$r|MakyM+PXSUE$x7a zfQ6TvdU3_mhEf?W8*Q+K?QkG=6- zkgZ|QX4T{~Y<#bCmMqm}eZcpLf#cH{&D5Q#9T#9{X%W7vkO%k>pC)_E+t=%ulW+dG zCH?G?C z@8sGyyrb7=;{Q0oM1#v#=z_0J)CI|qB`T~Ra-G%Vy)Ev%OOAM4ARrUM{}8o_20sgi zI5yP9uHnw(Y8omVF2EtmRuOmE%SE8lfyLRQiy(eKEH5I6d3xKe5OAkjozF7i;gVNN zrc~D^%_~)Y_u^030lIGN7tOC3^~0udS_q{sOf3$RL*+xCgn_wGVr6u7rsPPIp=vSF z@r$81!T$D%q5YTVq*7Q}Sr*un6GwAQc!zCv7>ilV?VUfX%a-f?-W&Km!tl|y&oX$b z&1R%#dm@zLFyiMQpsJl2r)E2IZ_8f|idd;%r;ZaqbqDlxNC$|*s7d~uk_Y5$ZjE=cV>}?` zk^RajcZp1>%ogun75>7o%w)WtYK||Vy!wyj19TNbk#+|ko$Po4yigYb!`r^<0ooZN z`mb{%Ai9|)CeYPs7rRKWw-J#brYOjvGU=YCla?_nH!qy z0P-t*C{r8BLCU0GCJVL62dkS@jy-Qg3p%=2sMgnc=MTwA$t?L3{A;ZY=ylVCX>YfA zGM9%m@C4pMcQlh=T>5@7uYrh6k3&p=O%I0OgL#e$qI~;}wD-t&&z7fd7rJ>oFQB3g z>$_=cV-LMF{2$HU%1`U{7ZD+pw&L=z1JEc82UC+S$iVF<#Lk=|ou%f@;rSlPgryc0 zwYkYuew^16apT#dG+k+_O}S$9Q#Rg;{-_{#9GX155(KUI4NQScHGyus&{T>tGT3iE z9Xwl#K2boviAyUWLgrR$ZC6#gwe)!7fdL6Jgg<%+*u3E@4!_R zbe$QxkvgQ~&-80GYC)!oWu4PJZ#6hLw@m@ef9ipezEoaWJZ|Tvae{DG}5``a|y~bwuUJe{v6YA zx|7u8^XcYKOAqM5`;^8)0`qHSbpcS%dAl<*r+(AKXv+OvZRknLZ$t4gwd}jEuEd30 z%e?b;Sa==>05u`k3ZZXNH(8u`1WW*WRUHCWPgNqDQu69Z9%*lo4@u^#WSNw2-*i=) z#>>Vb&#VY_d2COGaT~@$OK0!`F3Bjdf29?b6s=zbZ9k&3A^fzTtHa-p!YX(8{9|MBnN49L#zB@+clZHs+eZ| z4mbjz)chZdHa#|hdKDV3P>y#oWA7M5=dEXpk8cwCcT{SP#;lxL>X?}SYC*|w{h=axN&jBILud-dRFb1_vsQ2=hH^fpi}b5>Yks$D{O zBzNSc@3`MPMX5&w6+V@-#Xpo2zrWpC&>4!M-1mg;?kMk>rf#nsK`T5*FD0#V7%p$a z8~69M@KZGAy@L4!##=xrV9@m#ya8Zja`<8JhS3Omk287+C1XZ2VGg6KAkj@y#dPUP zD%wXadFkHnIuK8$f&q``yjj4AK+HxP3EssNW4vFW+AK_~8xN|U zs)l7Tlvkk9WCjg7EK}bR;E?MN4Hm z+f<07tX>9?w&|b$rKsW7vfYp0tM$i|Y~d$XcadjFn0U!JC$hvltiFdwknofeuq1Y< zb0ZLgkvZ%mZycoo)galoM3A#EN z)21ySY}288e3XlBr!yptcDpkO{`IIhsq8rNOKgbOPnJ1jnh-}1+%^AiQWaXgWUszi zf73n|z49k-Z5nW2$I$@k-fRhR=NVJ+KNf5s_&*ktysvZvU}LlSgPv5c1yaI{09*bq zK$FgZIgY9Ftor)ax2Pwfag&97@%Ov#n_NXcAaho?%>u0Uvw7 zCF0xWK`VxT;_s1qG$cGe{G~7B_EwI+>8nJYSZA}a%Gly<*21Sl6-8{{CO+io~+V{kzc?CA!DFyu#DgVFYc~pr+KU zsHMb~b!GBKuS(kAgg2kRypd1&^{pH0&v@etD7)}HC}Bj<941y*M1RgfrLFR@H1h&} zRPw|0-63Pit`od`ueV-tR=p(Di^vL0&;`hb?Fk%6$%IzuIUL`5L*IU7pUasHUy2aP z-8w(>m$BYs9);SovC_v7{aZ%jAK$4vTDdiE{~dOl8i(r^F6D?>SbnXAEspNf=`~e1 zWhpPIZ%GI9uiJ!?hNPjtkh$qQl(j9RD0yk2hbF_2^HBi?L@DuRT_=OvY|Z94S4Joh z+ujGEz^Sl#PLCI-e5VcD1OFU#4|=>fr=QpVCD>;k~G zj}cgh=26|rc@Mh-i)Vv->$%XgnIYx9YqkR^r!^NNo_9%D0kd<`<2KlBfPfP>c_iH| zanayAO;Y*JbDG(#b9DRr`qdTZaPVV*B&jRSXxnimz^qFp{>$~Z9T}}vyy!0_oGV!V!Y$+$u8Ukcs@)TlK*=utcRx5a~alEg2ZwvLt zW@-GN2Fh^Y&3`QWyMQnz&jYqEh*$qNZCG(?sNsK#WUpCF7~w;PP1uZoP_v=k&}W)` zQ{tyiJnZT@UhYS%yTOCeQH{=W-5Xyr_ePpmEgj~^XHf5zra>lDc_yd)e51HL<;jrm zb>8&N)TmFvtdE>|50G~oz>XYi{JxweD24c*^vR!?t5f>8!}9Fn8|kOZ7oJDI zsZlQ2w2Se#F~b`M3t2O_uZ2Tid`fDpG7c<3CO9!Z>`kEfnEhaOy7;_&;fCr|nWUU^ z)prl9^zx-_qYo0LC5=*pJoyOO&8NUfVoA`BP8`{$%_ZT7qt+kKyK09HT04(dmT&he zLdad60auLs0*RaBq=7CxFQ6(mfrqcL$NMVrPpkzZd*}Ghb4b1RMogSYf6{M&Q1u=M zI4YTQc%Ckg`GGgtR~yN8h5!uSD(~|&=9YWxuAkAe@UEulDIj0bJZ5fF4gPk=mTg4!vJU<_^eJ2h=n}F| z2;AAcuy`P4{fCi+=^Sq+9N~WvRa6Cp9<312GcafHjvU%rJMJ^^ zq;vui&86%u^Zw_I+o#2(UdZK6qd#o|N9_92_0JZw*LvYI(J_Cwk0e-gsOWiO=g-YN zDkj%ef`ydvSl>@QVy}cUvb}95Ys;s(<^SbDN3&&RXK7!o=Y8|wqv$Kx{3O}FaQ+8I zPxSfkANzmlVqZDvN{ECCHa(#skZRPDT%^pnl8)4zu;8y{gAw1KEA?V4L}??>2Iu>? zoWIO2o~8{OjL3}oHU`!2(aNsa+W6Sc2=@;93^vvI4GhLP?_1}rAWNsK9(xy5~iy>0rXP}NH4+xhR_SM#s|ZKYxkdK ziCV@^FR=8VIuUXwo85%AZrYT5f4FSxdFA)v{1n!jvata*NpyIvilSi^@3wVeuI>iE zHkAAt9m5ec?5F(&w|VwosJKn5T>;P^e2I?9zDc%ugM5V4p2LO_@)zIt%`51%dd7cz zye#HaDZ<(f`gyPLRJW!$b2B$3O*whg|1SewzR*~cnt+EU8tM(cY*Npia1(vrVCOa_ zNG43#cV4ULj25sAVqqy6d0;t}+$bX>@cbL_r4j!#B>zwS6nM7~e0-q;b!NQU-ps&3 za^2!v_K?CIp8eDT>6ax}b~G)#H8<^jEzPfZd8PIic6XYX`XWz~M5ic#w3!ZaAx-LX zyP^ej)bqQqtP;l3=Iw*86VwyG=(=QeFTGp77SIW?I}&Qk7NwSuLl(@wQ}e&N>YgS| zXZ_I%q1qcX1sklbA)fK+Q8IIjL_3qN;C_yzUa5bjTreY77LbV?8a-eH-#fG_69*o- zfXqq$2ebX143Fq znhg>$_L4IWmU%ge7@QN(x^&5T`HkSO{ zYJ5W;BanG2GpPTzKEGwGTy8YW|6G?#JFKEC+Rto?Lu$05S?Gs2ApOZ3LX|74>-^5- zS>K#3^d)ZOf~$Qi{0%1w;2VaNrl&!^(^*aX^El?W`k)@Mpmz#itCAj#5@r_Q>AX_f z_HNIA{(WV1pz0hg`2PMTx})y!TUxWaE``nqO*$=?`{sYUE;fCK*=sHvNjD${vz#J5 z4^N2@8lyyRvKdLtqc4}%zslu3op=nUzH-*;5E=rTr zA^U~vQm@vbpGO{m{BsNIs-UPjr9kdX!YYH1lUSd0!#PgGiNm1NW?~_B`0xLIQ~%pd Q|Nnpg|JE}E|2O%60IS!xeE=3.2.0\n", + " Downloading https://files.pythonhosted.org/packages/0b/c4/dc5b9d50c15af2ee187974a5a0c3f20c06cce6559eea4c065d372e846b6a/aiormq-3.3.1-py3-none-any.whl\n", + "Collecting yarl\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/f1/62/046834c5fc998c88ab2ef722f5d42122230a632212c8afa76418324f53ff/yarl-1.6.3-cp37-cp37m-manylinux2014_x86_64.whl (294kB)\n", + "\u001b[K |████████████████████████████████| 296kB 52.7MB/s \n", + "\u001b[?25hCollecting starlette<=0.12.9,>=0.12.9\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/67/95/2220fe5bf287e693a6430d8ee36c681b0157035b7249ec08f8fb36319d16/starlette-0.12.9.tar.gz (46kB)\n", + "\u001b[K |████████████████████████████████| 51kB 8.2MB/s \n", + "\u001b[?25hRequirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from h5py==2.10.0->-r requirements.txt (line 5)) (1.15.0)\n", + "Requirement already satisfied: python-dateutil>=2.6.1 in /usr/local/lib/python3.7/dist-packages (from pandas==0.25.3->-r requirements.txt (line 9)) (2.8.1)\n", + "Requirement already satisfied: docopt>=0.6 in /usr/local/lib/python3.7/dist-packages (from pymorphy2==0.8->-r requirements.txt (line 13)) (0.6.2)\n", + "Collecting pymorphy2-dicts<3.0,>=2.4\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/02/51/2465fd4f72328ab50877b54777764d928da8cb15b74e2680fc1bd8cb3173/pymorphy2_dicts-2.4.393442.3710985-py2.py3-none-any.whl (7.1MB)\n", + "\u001b[K |████████████████████████████████| 7.1MB 18.9MB/s \n", + "\u001b[?25hCollecting dawg-python>=0.7\n", + " Downloading https://files.pythonhosted.org/packages/6a/84/ff1ce2071d4c650ec85745766c0047ccc3b5036f1d03559fd46bb38b5eeb/DAWG_Python-0.7.2-py2.py3-none-any.whl\n", + "Collecting cryptography>=2.8\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/b2/26/7af637e6a7e87258b963f1731c5982fb31cd507f0d90d91836e446955d02/cryptography-3.4.7-cp36-abi3-manylinux2014_x86_64.whl (3.2MB)\n", + "\u001b[K |████████████████████████████████| 3.2MB 44.1MB/s \n", + "\u001b[?25hCollecting idna<2.9,>=2.5\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/14/2c/cd551d81dbe15200be1cf41cd03869a46fe7226e7450af7a6545bfc474c9/idna-2.8-py2.py3-none-any.whl (58kB)\n", + "\u001b[K |████████████████████████████████| 61kB 10.7MB/s \n", + "\u001b[?25hRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests==2.22.0->-r requirements.txt (line 17)) (2021.5.30)\n", + "Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests==2.22.0->-r requirements.txt (line 17)) (1.24.3)\n", + "Requirement already satisfied: chardet<3.1.0,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests==2.22.0->-r requirements.txt (line 17)) (3.0.4)\n", + "Requirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.7/dist-packages (from scikit-learn==0.21.2->-r requirements.txt (line 20)) (1.0.1)\n", + "Collecting h11<0.10,>=0.8\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/5a/fd/3dad730b0f95e78aeeb742f96fa7bbecbdd56a58e405d3da440d5bfb90c6/h11-0.9.0-py2.py3-none-any.whl (53kB)\n", + "\u001b[K |████████████████████████████████| 61kB 10.7MB/s \n", + "\u001b[?25hCollecting httptools==0.1.*; sys_platform != \"win32\" and sys_platform != \"cygwin\" and platform_python_implementation != \"PyPy\"\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/25/2e/485131e3aa113929b425f83854fafc190aa7df716cbeb258c875752f0c6e/httptools-0.1.2-cp37-cp37m-manylinux1_x86_64.whl (219kB)\n", + "\u001b[K |████████████████████████████████| 225kB 56.7MB/s \n", + "\u001b[?25hCollecting websockets==8.*\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/5a/0b/3ebc752392a368af14dd24ee041683416ac6d2463eead94b311b11e41c82/websockets-8.1-cp37-cp37m-manylinux2010_x86_64.whl (79kB)\n", + "\u001b[K |████████████████████████████████| 81kB 11.2MB/s \n", + "\u001b[?25hCollecting pamqp==2.3.0\n", + " Downloading https://files.pythonhosted.org/packages/eb/56/afa06143361e640c9159d828dadc95fc9195c52c95b4a97d136617b0166d/pamqp-2.3.0-py2.py3-none-any.whl\n", + "Collecting multidict>=4.0\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/7c/a6/4123b8165acbe773d1a8dc8e3f0d1edea16d29f7de018eda769abb56bd30/multidict-5.1.0-cp37-cp37m-manylinux2014_x86_64.whl (142kB)\n", + "\u001b[K |████████████████████████████████| 143kB 58.4MB/s \n", + "\u001b[?25hRequirement already satisfied: typing-extensions>=3.7.4; python_version < \"3.8\" in /usr/local/lib/python3.7/dist-packages (from yarl->aio-pika==6.4.1->-r requirements.txt (line 1)) (3.7.4.3)\n", + "Requirement already satisfied: cffi>=1.12 in /usr/local/lib/python3.7/dist-packages (from cryptography>=2.8->pyopenssl==19.1.0->-r requirements.txt (line 15)) (1.14.5)\n", + "Requirement already satisfied: pycparser in /usr/local/lib/python3.7/dist-packages (from cffi>=1.12->cryptography>=2.8->pyopenssl==19.1.0->-r requirements.txt (line 15)) (2.20)\n", + "Building wheels for collected packages: nltk, overrides, prometheus-client, pytelegrambotapi, sacremoses, starlette\n", + " Building wheel for nltk (setup.py) ... \u001b[?25l\u001b[?25hdone\n", + " Created wheel for nltk: filename=nltk-3.4.5-cp37-none-any.whl size=1449923 sha256=46610e717d6cb131a88074f3e94e333b82def5bbcfdb22df0c284f77e9a9e997\n", + " Stored in directory: /root/.cache/pip/wheels/96/86/f6/68ab24c23f207c0077381a5e3904b2815136b879538a24b483\n", + " Building wheel for overrides (setup.py) ... \u001b[?25l\u001b[?25hdone\n", + " Created wheel for overrides: filename=overrides-2.7.0-cp37-none-any.whl size=5606 sha256=aa7cfc537b65e68f420abf9b73fecccdb114445be38e40f561ea4ba58459605a\n", + " Stored in directory: /root/.cache/pip/wheels/8c/7c/ef/80508418b67d87371c5b3de49e03eb22ee7c1d19affb5099f8\n", + " Building wheel for prometheus-client (setup.py) ... \u001b[?25l\u001b[?25hdone\n", + " Created wheel for prometheus-client: filename=prometheus_client-0.7.1-cp37-none-any.whl size=41404 sha256=5c6da21f656d11625bb3e5b2dbc6a7c40f78d7444814220199e51adb239f1898\n", + " Stored in directory: /root/.cache/pip/wheels/1c/54/34/fd47cd9b308826cc4292b54449c1899a30251ef3b506bc91ea\n", + " Building wheel for pytelegrambotapi (setup.py) ... \u001b[?25l\u001b[?25hdone\n", + " Created wheel for pytelegrambotapi: filename=pyTelegramBotAPI-3.6.7-cp37-none-any.whl size=47177 sha256=76263248541344addd51d9255597c5ea7e74170453b75dca88c0e51f29a6e15e\n", + " Stored in directory: /root/.cache/pip/wheels/23/40/18/8a34153f95ef0dc19e3954898e5a5079244b76a8afdd7d0ec5\n", + " Building wheel for sacremoses (setup.py) ... \u001b[?25l\u001b[?25hdone\n", + " Created wheel for sacremoses: filename=sacremoses-0.0.35-cp37-none-any.whl size=883990 sha256=7c9c6f14c9b9b2139642a4baceb86e08dfb7951eb5dbae8706f091e919f69c39\n", + " Stored in directory: /root/.cache/pip/wheels/63/2a/db/63e2909042c634ef551d0d9ac825b2b0b32dede4a6d87ddc94\n", + " Building wheel for starlette (setup.py) ... \u001b[?25l\u001b[?25hdone\n", + " Created wheel for starlette: filename=starlette-0.12.9-cp37-none-any.whl size=57254 sha256=768534bf0237d07c2b003f4470984b896935bce261b1ee4929217a82270b6843\n", + " Stored in directory: /root/.cache/pip/wheels/1c/51/5b/3828d52e185cafad941c4291b6f70894d0794be28c70addae5\n", + "Successfully built nltk overrides prometheus-client pytelegrambotapi sacremoses starlette\n", + "\u001b[31mERROR: xarray 0.18.2 has requirement pandas>=1.0, but you'll have pandas 0.25.3 which is incompatible.\u001b[0m\n", + "\u001b[31mERROR: tensorflow 2.5.0 has requirement h5py~=3.1.0, but you'll have h5py 2.10.0 which is incompatible.\u001b[0m\n", + "\u001b[31mERROR: tensorflow 2.5.0 has requirement numpy~=1.19.2, but you'll have numpy 1.18.0 which is incompatible.\u001b[0m\n", + "\u001b[31mERROR: kapre 0.3.5 has requirement numpy>=1.18.5, but you'll have numpy 1.18.0 which is incompatible.\u001b[0m\n", + "\u001b[31mERROR: google-colab 1.0.0 has requirement pandas~=1.1.0; python_version >= \"3.0\", but you'll have pandas 0.25.3 which is incompatible.\u001b[0m\n", + "\u001b[31mERROR: google-colab 1.0.0 has requirement requests~=2.23.0, but you'll have requests 2.22.0 which is incompatible.\u001b[0m\n", + "\u001b[31mERROR: fbprophet 0.7.1 has requirement pandas>=1.0.4, but you'll have pandas 0.25.3 which is incompatible.\u001b[0m\n", + "\u001b[31mERROR: datascience 0.10.6 has requirement folium==0.2.1, but you'll have folium 0.8.3 which is incompatible.\u001b[0m\n", + "\u001b[31mERROR: albumentations 0.1.12 has requirement imgaug<0.2.7,>=0.2.5, but you'll have imgaug 0.2.9 which is incompatible.\u001b[0m\n", + "Installing collected packages: pamqp, idna, multidict, yarl, aiormq, aio-pika, Cython, pydantic, starlette, fastapi, numpy, h5py, nltk, overrides, pytz, pandas, prometheus-client, pymorphy2-dicts, dawg-python, pymorphy2, pymorphy2-dicts-ru, cryptography, pyopenssl, requests, pytelegrambotapi, ruamel.yaml, rusenttokenize, scikit-learn, h11, httptools, uvloop, websockets, uvicorn, sacremoses\n", + " Found existing installation: idna 2.10\n", + " Uninstalling idna-2.10:\n", + " Successfully uninstalled idna-2.10\n", + " Found existing installation: Cython 0.29.23\n", + " Uninstalling Cython-0.29.23:\n", + " Successfully uninstalled Cython-0.29.23\n", + " Found existing installation: numpy 1.19.5\n", + " Uninstalling numpy-1.19.5:\n", + " Successfully uninstalled numpy-1.19.5\n", + " Found existing installation: h5py 3.1.0\n", + " Uninstalling h5py-3.1.0:\n", + " Successfully uninstalled h5py-3.1.0\n", + " Found existing installation: nltk 3.2.5\n", + " Uninstalling nltk-3.2.5:\n", + " Successfully uninstalled nltk-3.2.5\n", + " Found existing installation: pytz 2018.9\n", + " Uninstalling pytz-2018.9:\n", + " Successfully uninstalled pytz-2018.9\n", + " Found existing installation: pandas 1.1.5\n", + " Uninstalling pandas-1.1.5:\n", + " Successfully uninstalled pandas-1.1.5\n", + " Found existing installation: prometheus-client 0.11.0\n", + " Uninstalling prometheus-client-0.11.0:\n", + " Successfully uninstalled prometheus-client-0.11.0\n", + " Found existing installation: requests 2.23.0\n", + " Uninstalling requests-2.23.0:\n", + " Successfully uninstalled requests-2.23.0\n", + " Found existing installation: scikit-learn 0.22.2.post1\n", + " Uninstalling scikit-learn-0.22.2.post1:\n", + " Successfully uninstalled scikit-learn-0.22.2.post1\n", + "Successfully installed Cython-0.29.14 aio-pika-6.4.1 aiormq-3.3.1 cryptography-3.4.7 dawg-python-0.7.2 fastapi-0.47.1 h11-0.9.0 h5py-2.10.0 httptools-0.1.2 idna-2.8 multidict-5.1.0 nltk-3.4.5 numpy-1.18.0 overrides-2.7.0 pamqp-2.3.0 pandas-0.25.3 prometheus-client-0.7.1 pydantic-1.3 pymorphy2-0.8 pymorphy2-dicts-2.4.393442.3710985 pymorphy2-dicts-ru-2.4.417127.4579844 pyopenssl-19.1.0 pytelegrambotapi-3.6.7 pytz-2019.1 requests-2.22.0 ruamel.yaml-0.15.100 rusenttokenize-0.0.5 sacremoses-0.0.35 scikit-learn-0.21.2 starlette-0.12.9 uvicorn-0.11.7 uvloop-0.14.0 websockets-8.1 yarl-1.6.3\n" + ], + "name": "stdout" + }, + { + "output_type": "display_data", + "data": { + "application/vnd.colab-display-data+json": { + "pip_warning": { + "packages": [ + "numpy", + "pandas", + "pytz" + ] + } + } + }, + "metadata": { + "tags": [] + } + }, + { + "output_type": "stream", + "text": [ + "Collecting transformers==2.9.1\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/22/97/7db72a0beef1825f82188a4b923e62a146271ac2ced7928baa4d47ef2467/transformers-2.9.1-py3-none-any.whl (641kB)\n", + "\u001b[K |████████████████████████████████| 645kB 8.3MB/s \n", + "\u001b[?25hRequirement already satisfied: filelock in /usr/local/lib/python3.7/dist-packages (from transformers==2.9.1) (3.0.12)\n", + "Collecting sentencepiece\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/ac/aa/1437691b0c7c83086ebb79ce2da16e00bef024f24fec2a5161c35476f499/sentencepiece-0.1.96-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (1.2MB)\n", + "\u001b[K |████████████████████████████████| 1.2MB 49.0MB/s \n", + "\u001b[?25hRequirement already satisfied: sacremoses in /usr/local/lib/python3.7/dist-packages (from transformers==2.9.1) (0.0.35)\n", + "Requirement already satisfied: tqdm>=4.27 in /usr/local/lib/python3.7/dist-packages (from transformers==2.9.1) (4.41.1)\n", + "Requirement already satisfied: requests in /usr/local/lib/python3.7/dist-packages (from transformers==2.9.1) (2.22.0)\n", + "Requirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from transformers==2.9.1) (1.18.0)\n", + "Collecting tokenizers==0.7.0\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/ea/59/bb06dd5ca53547d523422d32735585493e0103c992a52a97ba3aa3be33bf/tokenizers-0.7.0-cp37-cp37m-manylinux1_x86_64.whl (5.6MB)\n", + "\u001b[K |████████████████████████████████| 5.6MB 22.8MB/s \n", + "\u001b[?25hRequirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.7/dist-packages (from transformers==2.9.1) (2019.12.20)\n", + "Requirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from sacremoses->transformers==2.9.1) (1.15.0)\n", + "Requirement already satisfied: click in /usr/local/lib/python3.7/dist-packages (from sacremoses->transformers==2.9.1) (7.1.2)\n", + "Requirement already satisfied: joblib in /usr/local/lib/python3.7/dist-packages (from sacremoses->transformers==2.9.1) (1.0.1)\n", + "Requirement already satisfied: chardet<3.1.0,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests->transformers==2.9.1) (3.0.4)\n", + "Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests->transformers==2.9.1) (1.24.3)\n", + "Requirement already satisfied: idna<2.9,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests->transformers==2.9.1) (2.8)\n", + "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests->transformers==2.9.1) (2021.5.30)\n", + "Installing collected packages: sentencepiece, tokenizers, transformers\n", + "Successfully installed sentencepiece-0.1.96 tokenizers-0.7.0 transformers-2.9.1\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "4R066YWhTgU6" + }, + "source": [ + "## 0. Data Preparation" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "K9lF3QFJTgU8", + "outputId": "8405c69b-60e2-46db-c6b6-edb3da867270" + }, + "source": [ + "from deeppavlov.dataset_readers.dstc2_reader import SimpleDSTC2DatasetReader\n", + "\n", + "data = SimpleDSTC2DatasetReader().read('my_data')" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "2021-07-12 13:54:25.72 INFO in 'deeppavlov.dataset_readers.dstc2_reader'['dstc2_reader'] at line 283: [PosixPath('my_data/simple-dstc2-val.json'), PosixPath('my_data/simple-dstc2-tst.json')]]\n", + "2021-07-12 13:54:25.73 INFO in 'deeppavlov.dataset_readers.dstc2_reader'['dstc2_reader'] at line 284: [downloading data from http://files.deeppavlov.ai/datasets/simple_dstc2.tar.gz to my_data]\n", + "2021-07-12 13:54:25.74 INFO in 'deeppavlov.core.data.utils'['utils'] at line 95: Downloading from http://files.deeppavlov.ai/datasets/simple_dstc2.tar.gz to my_data/simple_dstc2.tar.gz\n", + "100%|██████████| 497k/497k [00:00<00:00, 691kB/s]\n", + "2021-07-12 13:54:27.359 INFO in 'deeppavlov.core.data.utils'['utils'] at line 272: Extracting my_data/simple_dstc2.tar.gz archive into my_data\n", + "2021-07-12 13:54:27.402 INFO in 'deeppavlov.dataset_readers.dstc2_reader'['dstc2_reader'] at line 304: [loading dialogs from my_data/simple-dstc2-trn.json]\n", + "2021-07-12 13:54:27.528 INFO in 'deeppavlov.dataset_readers.dstc2_reader'['dstc2_reader'] at line 304: [loading dialogs from my_data/simple-dstc2-val.json]\n", + "2021-07-12 13:54:27.580 INFO in 'deeppavlov.dataset_readers.dstc2_reader'['dstc2_reader'] at line 304: [loading dialogs from my_data/simple-dstc2-tst.json]\n", + "2021-07-12 13:54:27.708 INFO in 'deeppavlov.dataset_readers.dstc2_reader'['dstc2_reader'] at line 296: There are 9115 samples in train split.\n", + "2021-07-12 13:54:27.709 INFO in 'deeppavlov.dataset_readers.dstc2_reader'['dstc2_reader'] at line 297: There are 6231 samples in valid split.\n", + "2021-07-12 13:54:27.715 INFO in 'deeppavlov.dataset_readers.dstc2_reader'['dstc2_reader'] at line 298: There are 6345 samples in test split.\n" + ], + "name": "stderr" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "uu56jAGJTgVD", + "outputId": "9c9de6f6-bf81-4266-c140-7c8ae0852591" + }, + "source": [ + "!ls my_data" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "simple-dstc2-templates.txt simple-dstc2-tst.json\n", + "simple-dstc2-trn.json\t simple-dstc2-val.json\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "zO4CWg0XYNSw" + }, + "source": [ + "To iterate over batches of preprocessed DSTC-2 we need to import `DatasetIterator`." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "piBBcw9ZTgVK", + "scrolled": true + }, + "source": [ + "from deeppavlov.dataset_iterators.dialog_iterator import DialogDatasetIterator\n", + "\n", + "iterator = DialogDatasetIterator(data)" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "jVU5JGnTTgVM" + }, + "source": [ + "You can now iterate over batches of preprocessed DSTC-2 dialogs:" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "1RSwEH3CTgVN", + "outputId": "28fff330-b559-471e-d8eb-bb8abbc8ce3e" + }, + "source": [ + "from pprint import pprint\n", + "\n", + "for dialog in iterator.gen_batches(batch_size=1, data_type='train'):\n", + " turns_x, turns_y = dialog\n", + " \n", + " print(\"User utterances:\\n----------------\\n\")\n", + " pprint(turns_x[0], indent=4)\n", + " print(\"\\nSystem responses:\\n-----------------\\n\")\n", + " pprint(turns_y[0], indent=4)\n", + " \n", + " break" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "User utterances:\n", + "----------------\n", + "\n", + "[ {'prev_resp_act': None, 'text': ''},\n", + " { 'prev_resp_act': 'welcomemsg',\n", + " 'slots': [['area', 'west'], ['pricerange', 'cheap']],\n", + " 'text': 'can i have a cheap restaurant in the west part of town'},\n", + " { 'db_result': { 'addr': '17 magdalene street city centre',\n", + " 'area': 'west',\n", + " 'food': 'vietnamese',\n", + " 'name': 'thanh binh',\n", + " 'phone': '01223 362456',\n", + " 'postcode': 'c.b 3, 0 a.f',\n", + " 'pricerange': 'cheap'},\n", + " 'prev_resp_act': 'api_call',\n", + " 'slots': [['area', 'west'], ['pricerange', 'cheap']],\n", + " 'text': 'can i have a cheap restaurant in the west part of town'},\n", + " { 'prev_resp_act': 'inform_area+inform_pricerange+offer_name',\n", + " 'slots': [['slot', 'phone']],\n", + " 'text': 'can i have the phone number'},\n", + " {'prev_resp_act': 'inform_phone+offer_name', 'text': 'thank you good bye'}]\n", + "\n", + "System responses:\n", + "-----------------\n", + "\n", + "[ { 'act': 'welcomemsg',\n", + " 'text': 'Hello, welcome to the Cambridge restaurant system. You can '\n", + " 'ask for restaurants by area, price range or food type. How '\n", + " 'may I help you?'},\n", + " { 'act': 'api_call',\n", + " 'slots': [['area', 'west'], ['pricerange', 'cheap']],\n", + " 'text': 'api_call area=\"west\" food=\"dontcare\" pricerange=\"cheap\"'},\n", + " { 'act': 'inform_area+inform_pricerange+offer_name',\n", + " 'slots': [ ['area', 'west'],\n", + " ['name', 'thanh binh'],\n", + " ['pricerange', 'cheap']],\n", + " 'text': 'Thanh binh is a nice place in the west of town and the prices '\n", + " 'are cheap.'},\n", + " { 'act': 'inform_phone+offer_name',\n", + " 'slots': [['phone', '01223 362456'], ['name', 'thanh binh']],\n", + " 'text': 'The phone number of thanh binh is 01223 362456.'},\n", + " {'act': 'bye', 'text': 'You are welcome!'}]\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "AKTZWtm8ZtPi" + }, + "source": [ + "In real-life annotation of data is expensive. To make our tutorial closer to production use-cases we take only 50 dialogues for training." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "UlappYTbTgVT" + }, + "source": [ + "!cp my_data/simple-dstc2-trn.json my_data/simple-dstc2-trn.full.json" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "tTU9yM-CTgVX", + "outputId": "46c98790-a2f1-4347-aa5d-ffc3fd90fb28" + }, + "source": [ + "import json\n", + "\n", + "NUM_TRAIN = 967\n", + "\n", + "with open('my_data/simple-dstc2-trn.full.json', 'rt') as fin:\n", + " data = json.load(fin)\n", + "with open('my_data/simple-dstc2-trn.json', 'wt') as fout:\n", + " json.dump(data[:NUM_TRAIN], fout, indent=2)\n", + "print(f\"Train set is reduced to {NUM_TRAIN} dialogues (out of {len(data)}).\")" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Train set is reduced to 50 dialogues (out of 967).\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "l5mjRphbTgVb" + }, + "source": [ + "## 1. Build Database of items" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "n597CLhqjqcd" + }, + "source": [ + "### Building database of restaurants" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "JjKbIAyaTgVk", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "0cfccce7-a9b8-4a70-9f80-9b3e9e3b98fd" + }, + "source": [ + "from deeppavlov.core.data.sqlite_database import Sqlite3Database\n", + "\n", + "database = Sqlite3Database(primary_keys=[\"name\"],\n", + " save_path=\"my_bot/db.sqlite\")" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "2021-07-12 13:54:28.484 WARNING in 'deeppavlov.core.models.serializable'['serializable'] at line 49: No load path is set for Sqlite3Database in 'infer' mode. Using save path instead\n", + "2021-07-12 13:54:28.485 INFO in 'deeppavlov.core.data.sqlite_database'['sqlite_database'] at line 70: Initializing empty database on /content/DeepPavlov/my_bot/db.sqlite.\n" + ], + "name": "stderr" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "RlKg5UtqTgVp", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "6d78e25e-9c40-42e8-8c87-9b47e1cd987c" + }, + "source": [ + "db_results = []\n", + "\n", + "for dialog in iterator.gen_batches(batch_size=1, data_type='all'):\n", + " turns_x, turns_y = dialog\n", + " db_results.extend(x['db_result'] for x in turns_x[0] if x.get('db_result'))\n", + "\n", + "print(f\"Adding {len(db_results)} items.\")\n", + "if db_results:\n", + " database.fit(db_results)" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "2021-07-12 13:54:28.524 INFO in 'deeppavlov.core.data.sqlite_database'['sqlite_database'] at line 130: Created table with keys {'pricerange': 'text', 'area': 'text', 'addr': 'text', 'phone': 'text', 'postcode': 'text', 'food': 'text', 'name': 'text'}.\n" + ], + "name": "stderr" + }, + { + "output_type": "stream", + "text": [ + "Adding 3016 items.\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "XeJMI9qaTgVt" + }, + "source": [ + "### Interacting with database" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2JLUF2b_TgVu" + }, + "source": [ + "We can now play with the database and make requests to it:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "VRCU_MJnTgVv", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "612364d9-1f42-4947-8624-2f74cd2b6457" + }, + "source": [ + "database([{'pricerange': 'cheap', 'area': 'south'}])" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[[{'addr': 'cambridge leisure park clifton way cherry hinton',\n", + " 'area': 'south',\n", + " 'food': 'chinese',\n", + " 'name': 'the lucky star',\n", + " 'phone': '01223 244277',\n", + " 'postcode': 'c.b 1, 7 d.y',\n", + " 'pricerange': 'cheap'},\n", + " {'addr': 'cambridge leisure park clifton way',\n", + " 'area': 'south',\n", + " 'food': 'portuguese',\n", + " 'name': 'nandos',\n", + " 'phone': '01223 327908',\n", + " 'postcode': 'c.b 1, 7 d.y',\n", + " 'pricerange': 'cheap'}]]" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 12 + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "E_InRKO6TgWt" + }, + "source": [ + "## 3. Build and Train a Bot" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "skZd9O3bKHMo" + }, + "source": [ + "The below image comes from the [TripPy paper](https://arxiv.org/pdf/2005.02877.pdf) and sketches out the models architecture.\n", + "\n", + " \n", + "![trippy_architecture_original.png](img/trippy_architecture_original.jpg)\n", + " \n", + "\n", + "The entire dialogue history, the last system & user utterances are tokenized and fed into a [BERT Model](https://arxiv.org/pdf/1810.04805.pdf). The model makes use of attention to calculate the importance of tokens in the input. In TripPy the BERT model is trained to do binary clasification for each input token in regards to whether it is a slot value of one of the predefined slot names.\n", + "\n", + "For example, for the slot name \"pricerange\" the model will look at each token and classify whether it corresponds to that slot. For the input: *I want cheap food*, the output for pricerange should be [0,0,1,0], hence identifying that cheap corresponds to the pricerange. This span prediction is then used to copy the value out of the input.\n", + "\n", + "Apart from \"span\" (also called \"copy_value\"), other \"class types\" (Predictions made for each slot name) are: \n", + "- \"dontcare\" The model thinks the user does not care about this slot name's value\n", + "- \"none\": The user has not yet indicated his preference for this slot name\n", + "- \"refer\": The user has indicated his preference via another slot name\n", + "- \"inform\": The model has previously informed the user about the slot name\n", + "- \"true / false\": Used when there are slotnames with boolean values\n", + "\n", + "Below is a sketch for how the full TripPy model has been implemented in DeepPavlov:\n", + "\n", + " \n", + "![trippy_architecture.png](img/trippy_architecture_original.jpg)\n", + " \n", + "\n", + "The above image also includes the input & input processing steps, while the previous sketch starts with the BERT Model (BERTForDST). \n", + "Novel things in the DeepPavlov TripPy implementation are:\n", + "- The preprocessing is robust to datasets which do not contain position labels (During training TripPy requires position labels to train up its copy value capabilities) - This has been done by calculating Levenshtein distances\n", + "- An action prediction head has been added, which predicts what action the system should take from a predefined list of actions\n", + "- A database connection has been added, which allows the model to retrieve information about slot values from an sqlite Database\n", + "- A Natural Language Generation component has been added, which takes in the predicted action and database results and puts together the final response tothe user\n", + "\n", + "\n", + "We will now proceed with configuring the model & training." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "db9_ozwnTgW5" + }, + "source": [ + "from deeppavlov import configs\n", + "from deeppavlov.core.common.file import read_json\n", + "\n", + "# Use TripPy Config\n", + "gobot_config = read_json(configs.go_bot.trippy_dstc2_minimal)\n", + "\n", + "gobot_config['chainer']['pipe'][-1]['nlg_manager']['template_type'] = 'DefaultTemplate'\n", + "gobot_config['chainer']['pipe'][-1]['nlg_manager']['template_path'] = 'my_data/simple-dstc2-templates.txt'\n", + "\n", + "gobot_config['metadata']['variables']['DATA_PATH'] = 'my_data'\n", + "gobot_config['metadata']['variables']['MODEL_PATH'] = 'my_bot'" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "oNGj-ARxTgW-" + }, + "source": [ + "\n", + "\n", + "Configure bot to use our database:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "VanrlHZZTgXB" + }, + "source": [ + "gobot_config['chainer']['pipe'][-1]['database'] = {\n", + " 'class_name': 'sqlite_database',\n", + " 'primary_keys': [\"name\"],\n", + " 'save_path': 'my_bot/db.sqlite'\n", + "}" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "6l6H_t1iTgW7" + }, + "source": [ + "Configure bot to use templates:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "209m3f6yTgW8" + }, + "source": [ + "gobot_config['chainer']['pipe'][-1]['nlg_manager']['template_type'] = 'DefaultTemplate'\n", + "gobot_config['chainer']['pipe'][-1]['nlg_manager']['template_path'] = 'my_data/simple-dstc2-templates.txt'" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "HMXih1roTgXi" + }, + "source": [ + "Specify train/valid/test data path and path to save the final bot model:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "vTUdrrQVTgXi" + }, + "source": [ + "gobot_config['metadata']['variables']['DATA_PATH'] = 'my_data'\n", + "gobot_config['metadata']['variables']['MODEL_PATH'] = 'my_bot'\n", + "# Configure the possible slot names - The \"this\" slotname is meaningless, but it is somehow part of the training set\n", + "gobot_config['chainer']['pipe'][-1]['slot_names'] = ['pricerange', 'this', 'area', 'food']" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 1000 + }, + "id": "7ekUezSjWGEz", + "outputId": "d8ed0b63-0c79-4b84-ae95-119900989f1c" + }, + "source": [ + "from deeppavlov import train_model\n", + "import time\n", + "\n", + "gobot_config['train']['batch_size'] = 4 # set batch size - Ideally use 8 & set lr to 1e-4 if your GPU allows\n", + "gobot_config['train']['max_batches'] = 600 # maximum number of training batches\n", + "gobot_config['train']['val_every_n_batches'] = 40 # evaluate on full 'valid' split every 30 epochs\n", + "gobot_config['train']['log_every_n_batches'] = 40 # evaluate on full 'train' split every 5 batches\n", + "gobot_config['train']['validation_patience'] = 10 # evaluate on full 'valid' split every 30 epochs\n", + "gobot_config['train']['log_on_k_batches'] = 10 # How many batches to use for logging\n", + "\n", + "gobot_config['chainer']['pipe'][-1]['debug'] = False\n", + "gobot_config['chainer']['pipe'][-1][\"optimizer_parameters\"] = {\"lr\": 1e-5, \"eps\": 1e-6}\n", + "\n", + "train_model(gobot_config)" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "2021-07-11 18:23:29.41 INFO in 'deeppavlov.dataset_readers.dstc2_reader'['dstc2_reader'] at line 112: [loading dialogs from /content/DeepPavlov/my_data/dstc2-trn.jsonlist]\n", + "2021-07-11 18:23:29.363 INFO in 'deeppavlov.dataset_readers.dstc2_reader'['dstc2_reader'] at line 112: [loading dialogs from /content/DeepPavlov/my_data/dstc2-val.jsonlist]\n", + "2021-07-11 18:23:29.588 INFO in 'deeppavlov.dataset_readers.dstc2_reader'['dstc2_reader'] at line 112: [loading dialogs from /content/DeepPavlov/my_data/dstc2-tst.jsonlist]\n", + "[nltk_data] Downloading package punkt to /root/nltk_data...\n", + "[nltk_data] Package punkt is already up-to-date!\n", + "[nltk_data] Downloading package stopwords to /root/nltk_data...\n", + "[nltk_data] Package stopwords is already up-to-date!\n", + "[nltk_data] Downloading package perluniprops to /root/nltk_data...\n", + "[nltk_data] Package perluniprops is already up-to-date!\n", + "[nltk_data] Downloading package nonbreaking_prefixes to\n", + "[nltk_data] /root/nltk_data...\n", + "[nltk_data] Package nonbreaking_prefixes is already up-to-date!\n", + "2021-07-11 18:23:32.351 WARNING in 'deeppavlov.core.models.serializable'['serializable'] at line 52: No load path is set for Sqlite3Database!\n", + "2021-07-11 18:23:32.353 INFO in 'deeppavlov.core.data.sqlite_database'['sqlite_database'] at line 66: Loading database from /content/DeepPavlov/my_bot/db.sqlite.\n", + "2021-07-11 18:23:38.350 INFO in 'deeppavlov.models.go_bot.trippy'['trippy'] at line 152: Load path /content/DeepPavlov/my_bot/model is given.\n", + "2021-07-11 18:23:38.352 INFO in 'deeppavlov.models.go_bot.trippy'['trippy'] at line 159: Load path /content/DeepPavlov/my_bot/model.pth.tar exists.\n", + "2021-07-11 18:23:38.354 INFO in 'deeppavlov.models.go_bot.trippy'['trippy'] at line 160: Initializing `TripPy` from saved.\n", + "2021-07-11 18:23:38.357 INFO in 'deeppavlov.models.go_bot.trippy'['trippy'] at line 163: Loading weights from /content/DeepPavlov/my_bot/model.pth.tar.\n", + "2021-07-11 18:23:39.142 INFO in 'deeppavlov.core.models.torch_model'['torch_model'] at line 98: Model was successfully initialized! Model summary:\n", + " BertForDST(\n", + " (bert): BertModel(\n", + " (embeddings): BertEmbeddings(\n", + " (word_embeddings): Embedding(30522, 768, padding_idx=0)\n", + " (position_embeddings): Embedding(512, 768)\n", + " (token_type_embeddings): Embedding(2, 768)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (encoder): BertEncoder(\n", + " (layer): ModuleList(\n", + " (0): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (1): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (2): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (3): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (4): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (5): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (6): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (7): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (8): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (9): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (10): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (11): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " )\n", + " )\n", + " (pooler): BertPooler(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (activation): Tanh()\n", + " )\n", + " )\n", + " (dropout): Dropout(p=0.3, inplace=False)\n", + " (dropout_heads): Dropout(p=0.0, inplace=False)\n", + " (inform_projection): Linear(in_features=4, out_features=4, bias=True)\n", + " (ds_projection): Linear(in_features=4, out_features=4, bias=True)\n", + " (class_pricerange): Linear(in_features=776, out_features=4, bias=True)\n", + " (token_pricerange): Linear(in_features=768, out_features=2, bias=True)\n", + " (refer_pricerange): Linear(in_features=776, out_features=5, bias=True)\n", + " (class_this): Linear(in_features=776, out_features=4, bias=True)\n", + " (token_this): Linear(in_features=768, out_features=2, bias=True)\n", + " (refer_this): Linear(in_features=776, out_features=5, bias=True)\n", + " (class_area): Linear(in_features=776, out_features=4, bias=True)\n", + " (token_area): Linear(in_features=768, out_features=2, bias=True)\n", + " (refer_area): Linear(in_features=776, out_features=5, bias=True)\n", + " (class_food): Linear(in_features=776, out_features=4, bias=True)\n", + " (token_food): Linear(in_features=768, out_features=2, bias=True)\n", + " (refer_food): Linear(in_features=776, out_features=5, bias=True)\n", + " (action_prediction): Linear(in_features=776, out_features=46, bias=True)\n", + " (action_softmax): Softmax(dim=1)\n", + ")\n" + ], + "name": "stderr" + }, + { + "output_type": "stream", + "text": [ + "{\"train\": {\"eval_examples_count\": 40, \"metrics\": {\"per_item_dialog_accuracy\": 0.6015}, \"time_spent\": \"0:00:45\", \"epochs_done\": 0, \"batches_seen\": 40, \"train_examples_seen\": 160, \"total_loss\": 113.78551483154297, \"action_loss\": 24.97793960571289}}\n" + ], + "name": "stdout" + }, + { + "output_type": "stream", + "text": [ + "2021-07-11 18:27:28.871 INFO in 'deeppavlov.core.trainers.nn_trainer'['nn_trainer'] at line 202: First best per_item_dialog_accuracy of 0.4381\n", + "2021-07-11 18:27:28.873 INFO in 'deeppavlov.core.trainers.nn_trainer'['nn_trainer'] at line 204: Saving model\n", + "2021-07-11 18:27:28.874 INFO in 'deeppavlov.core.models.torch_model'['torch_model'] at line 191: Saving model to /content/DeepPavlov/my_bot/model.pth.tar.\n" + ], + "name": "stderr" + }, + { + "output_type": "stream", + "text": [ + "{\"valid\": {\"eval_examples_count\": 575, \"metrics\": {\"per_item_dialog_accuracy\": 0.4381}, \"time_spent\": \"0:03:50\", \"epochs_done\": 0, \"batches_seen\": 40, \"train_examples_seen\": 160, \"impatience\": 0, \"patience_limit\": 10}}\n", + "{\"train\": {\"eval_examples_count\": 40, \"metrics\": {\"per_item_dialog_accuracy\": 0.6043}, \"time_spent\": \"0:04:34\", \"epochs_done\": 0, \"batches_seen\": 80, \"train_examples_seen\": 320, \"total_loss\": 235.44265747070312, \"action_loss\": 57.47998809814453}}\n" + ], + "name": "stdout" + }, + { + "output_type": "stream", + "text": [ + "2021-07-11 18:31:22.811 INFO in 'deeppavlov.core.trainers.nn_trainer'['nn_trainer'] at line 212: Did not improve on the per_item_dialog_accuracy of 0.4381\n" + ], + "name": "stderr" + }, + { + "output_type": "stream", + "text": [ + "{\"valid\": {\"eval_examples_count\": 575, \"metrics\": {\"per_item_dialog_accuracy\": 0.4364}, \"time_spent\": \"0:07:44\", \"epochs_done\": 0, \"batches_seen\": 80, \"train_examples_seen\": 320, \"impatience\": 1, \"patience_limit\": 10}}\n", + "{\"train\": {\"eval_examples_count\": 40, \"metrics\": {\"per_item_dialog_accuracy\": 0.6179}, \"time_spent\": \"0:08:22\", \"epochs_done\": 0, \"batches_seen\": 120, \"train_examples_seen\": 480, \"total_loss\": 62.12294387817383, \"action_loss\": 14.066139221191406}}\n" + ], + "name": "stdout" + }, + { + "output_type": "stream", + "text": [ + "2021-07-11 18:35:12.850 INFO in 'deeppavlov.core.trainers.nn_trainer'['nn_trainer'] at line 212: Did not improve on the per_item_dialog_accuracy of 0.4381\n" + ], + "name": "stderr" + }, + { + "output_type": "stream", + "text": [ + "{\"valid\": {\"eval_examples_count\": 575, \"metrics\": {\"per_item_dialog_accuracy\": 0.4295}, \"time_spent\": \"0:11:34\", \"epochs_done\": 0, \"batches_seen\": 120, \"train_examples_seen\": 480, \"impatience\": 2, \"patience_limit\": 10}}\n", + "{\"train\": {\"eval_examples_count\": 40, \"metrics\": {\"per_item_dialog_accuracy\": 0.6243}, \"time_spent\": \"0:12:11\", \"epochs_done\": 0, \"batches_seen\": 160, \"train_examples_seen\": 640, \"total_loss\": 131.84048461914062, \"action_loss\": 30.95857810974121}}\n" + ], + "name": "stdout" + }, + { + "output_type": "stream", + "text": [ + "2021-07-11 18:38:54.945 INFO in 'deeppavlov.core.trainers.nn_trainer'['nn_trainer'] at line 212: Did not improve on the per_item_dialog_accuracy of 0.4381\n" + ], + "name": "stderr" + }, + { + "output_type": "stream", + "text": [ + "{\"valid\": {\"eval_examples_count\": 575, \"metrics\": {\"per_item_dialog_accuracy\": 0.416}, \"time_spent\": \"0:15:16\", \"epochs_done\": 0, \"batches_seen\": 160, \"train_examples_seen\": 640, \"impatience\": 3, \"patience_limit\": 10}}\n", + "{\"train\": {\"eval_examples_count\": 40, \"metrics\": {\"per_item_dialog_accuracy\": 0.5816}, \"time_spent\": \"0:15:59\", \"epochs_done\": 0, \"batches_seen\": 200, \"train_examples_seen\": 800, \"total_loss\": 88.86528778076172, \"action_loss\": 20.939273834228516}}\n" + ], + "name": "stdout" + }, + { + "output_type": "stream", + "text": [ + "2021-07-11 18:42:46.99 INFO in 'deeppavlov.core.trainers.nn_trainer'['nn_trainer'] at line 207: Improved best per_item_dialog_accuracy of 0.4455\n", + "2021-07-11 18:42:46.100 INFO in 'deeppavlov.core.trainers.nn_trainer'['nn_trainer'] at line 209: Saving model\n", + "2021-07-11 18:42:46.102 INFO in 'deeppavlov.core.models.torch_model'['torch_model'] at line 191: Saving model to /content/DeepPavlov/my_bot/model.pth.tar.\n" + ], + "name": "stderr" + }, + { + "output_type": "stream", + "text": [ + "{\"valid\": {\"eval_examples_count\": 575, \"metrics\": {\"per_item_dialog_accuracy\": 0.4455}, \"time_spent\": \"0:19:07\", \"epochs_done\": 0, \"batches_seen\": 200, \"train_examples_seen\": 800, \"impatience\": 0, \"patience_limit\": 10}}\n", + "{\"train\": {\"eval_examples_count\": 40, \"metrics\": {\"per_item_dialog_accuracy\": 0.5866}, \"time_spent\": \"0:19:52\", \"epochs_done\": 0, \"batches_seen\": 240, \"train_examples_seen\": 960, \"total_loss\": 107.40069580078125, \"action_loss\": 25.030973434448242}}\n" + ], + "name": "stdout" + }, + { + "output_type": "stream", + "text": [ + "2021-07-11 18:46:35.891 INFO in 'deeppavlov.core.trainers.nn_trainer'['nn_trainer'] at line 207: Improved best per_item_dialog_accuracy of 0.4529\n", + "2021-07-11 18:46:35.893 INFO in 'deeppavlov.core.trainers.nn_trainer'['nn_trainer'] at line 209: Saving model\n", + "2021-07-11 18:46:35.894 INFO in 'deeppavlov.core.models.torch_model'['torch_model'] at line 191: Saving model to /content/DeepPavlov/my_bot/model.pth.tar.\n" + ], + "name": "stderr" + }, + { + "output_type": "stream", + "text": [ + "{\"valid\": {\"eval_examples_count\": 575, \"metrics\": {\"per_item_dialog_accuracy\": 0.4529}, \"time_spent\": \"0:22:57\", \"epochs_done\": 0, \"batches_seen\": 240, \"train_examples_seen\": 960, \"impatience\": 0, \"patience_limit\": 10}}\n", + "{\"train\": {\"eval_examples_count\": 40, \"metrics\": {\"per_item_dialog_accuracy\": 0.5948}, \"time_spent\": \"0:23:43\", \"epochs_done\": 1, \"batches_seen\": 280, \"train_examples_seen\": 1119, \"total_loss\": 115.84950256347656, \"action_loss\": 24.607830047607422}}\n" + ], + "name": "stdout" + }, + { + "output_type": "stream", + "text": [ + "2021-07-11 18:50:30.769 INFO in 'deeppavlov.core.trainers.nn_trainer'['nn_trainer'] at line 212: Did not improve on the per_item_dialog_accuracy of 0.4529\n" + ], + "name": "stderr" + }, + { + "output_type": "stream", + "text": [ + "{\"valid\": {\"eval_examples_count\": 575, \"metrics\": {\"per_item_dialog_accuracy\": 0.4243}, \"time_spent\": \"0:26:52\", \"epochs_done\": 1, \"batches_seen\": 280, \"train_examples_seen\": 1119, \"impatience\": 1, \"patience_limit\": 10}}\n", + "{\"train\": {\"eval_examples_count\": 40, \"metrics\": {\"per_item_dialog_accuracy\": 0.5468}, \"time_spent\": \"0:27:32\", \"epochs_done\": 1, \"batches_seen\": 320, \"train_examples_seen\": 1279, \"total_loss\": 48.77066421508789, \"action_loss\": 11.36803150177002}}\n" + ], + "name": "stdout" + }, + { + "output_type": "stream", + "text": [ + "2021-07-11 18:54:13.775 INFO in 'deeppavlov.core.trainers.nn_trainer'['nn_trainer'] at line 212: Did not improve on the per_item_dialog_accuracy of 0.4529\n" + ], + "name": "stderr" + }, + { + "output_type": "stream", + "text": [ + "{\"valid\": {\"eval_examples_count\": 575, \"metrics\": {\"per_item_dialog_accuracy\": 0.4301}, \"time_spent\": \"0:30:35\", \"epochs_done\": 1, \"batches_seen\": 320, \"train_examples_seen\": 1279, \"impatience\": 2, \"patience_limit\": 10}}\n", + "{\"train\": {\"eval_examples_count\": 40, \"metrics\": {\"per_item_dialog_accuracy\": 0.5867}, \"time_spent\": \"0:31:14\", \"epochs_done\": 1, \"batches_seen\": 360, \"train_examples_seen\": 1439, \"total_loss\": 104.88668823242188, \"action_loss\": 24.79292869567871}}\n" + ], + "name": "stdout" + }, + { + "output_type": "stream", + "text": [ + "2021-07-11 18:58:03.392 INFO in 'deeppavlov.core.trainers.nn_trainer'['nn_trainer'] at line 212: Did not improve on the per_item_dialog_accuracy of 0.4529\n" + ], + "name": "stderr" + }, + { + "output_type": "stream", + "text": [ + "{\"valid\": {\"eval_examples_count\": 575, \"metrics\": {\"per_item_dialog_accuracy\": 0.4521}, \"time_spent\": \"0:34:25\", \"epochs_done\": 1, \"batches_seen\": 360, \"train_examples_seen\": 1439, \"impatience\": 3, \"patience_limit\": 10}}\n", + "{\"train\": {\"eval_examples_count\": 40, \"metrics\": {\"per_item_dialog_accuracy\": 0.6066}, \"time_spent\": \"0:35:04\", \"epochs_done\": 1, \"batches_seen\": 400, \"train_examples_seen\": 1599, \"total_loss\": 75.50640106201172, \"action_loss\": 18.100648880004883}}\n" + ], + "name": "stdout" + }, + { + "output_type": "stream", + "text": [ + "2021-07-11 19:01:50.759 INFO in 'deeppavlov.core.trainers.nn_trainer'['nn_trainer'] at line 212: Did not improve on the per_item_dialog_accuracy of 0.4529\n" + ], + "name": "stderr" + }, + { + "output_type": "stream", + "text": [ + "{\"valid\": {\"eval_examples_count\": 575, \"metrics\": {\"per_item_dialog_accuracy\": 0.4287}, \"time_spent\": \"0:38:12\", \"epochs_done\": 1, \"batches_seen\": 400, \"train_examples_seen\": 1599, \"impatience\": 4, \"patience_limit\": 10}}\n", + "{\"train\": {\"eval_examples_count\": 40, \"metrics\": {\"per_item_dialog_accuracy\": 0.5407}, \"time_spent\": \"0:38:54\", \"epochs_done\": 1, \"batches_seen\": 440, \"train_examples_seen\": 1759, \"total_loss\": 204.6051483154297, \"action_loss\": 46.61280822753906}}\n" + ], + "name": "stdout" + }, + { + "output_type": "stream", + "text": [ + "2021-07-11 19:05:42.577 INFO in 'deeppavlov.core.trainers.nn_trainer'['nn_trainer'] at line 212: Did not improve on the per_item_dialog_accuracy of 0.4529\n" + ], + "name": "stderr" + }, + { + "output_type": "stream", + "text": [ + "{\"valid\": {\"eval_examples_count\": 575, \"metrics\": {\"per_item_dialog_accuracy\": 0.4409}, \"time_spent\": \"0:42:04\", \"epochs_done\": 1, \"batches_seen\": 440, \"train_examples_seen\": 1759, \"impatience\": 5, \"patience_limit\": 10}}\n", + "{\"train\": {\"eval_examples_count\": 40, \"metrics\": {\"per_item_dialog_accuracy\": 0.6438}, \"time_spent\": \"0:42:42\", \"epochs_done\": 1, \"batches_seen\": 480, \"train_examples_seen\": 1919, \"total_loss\": 36.40619659423828, \"action_loss\": 8.460254669189453}}\n" + ], + "name": "stdout" + }, + { + "output_type": "stream", + "text": [ + "2021-07-11 19:09:22.300 INFO in 'deeppavlov.core.trainers.nn_trainer'['nn_trainer'] at line 212: Did not improve on the per_item_dialog_accuracy of 0.4529\n" + ], + "name": "stderr" + }, + { + "output_type": "stream", + "text": [ + "{\"valid\": {\"eval_examples_count\": 575, \"metrics\": {\"per_item_dialog_accuracy\": 0.4288}, \"time_spent\": \"0:45:44\", \"epochs_done\": 1, \"batches_seen\": 480, \"train_examples_seen\": 1919, \"impatience\": 6, \"patience_limit\": 10}}\n", + "{\"train\": {\"eval_examples_count\": 40, \"metrics\": {\"per_item_dialog_accuracy\": 0.6038}, \"time_spent\": \"0:46:22\", \"epochs_done\": 2, \"batches_seen\": 520, \"train_examples_seen\": 2078, \"total_loss\": 65.55957794189453, \"action_loss\": 15.434016227722168}}\n" + ], + "name": "stdout" + }, + { + "output_type": "stream", + "text": [ + "2021-07-11 19:13:06.246 INFO in 'deeppavlov.core.trainers.nn_trainer'['nn_trainer'] at line 212: Did not improve on the per_item_dialog_accuracy of 0.4529\n" + ], + "name": "stderr" + }, + { + "output_type": "stream", + "text": [ + "{\"valid\": {\"eval_examples_count\": 575, \"metrics\": {\"per_item_dialog_accuracy\": 0.436}, \"time_spent\": \"0:49:28\", \"epochs_done\": 2, \"batches_seen\": 520, \"train_examples_seen\": 2078, \"impatience\": 7, \"patience_limit\": 10}}\n", + "{\"train\": {\"eval_examples_count\": 40, \"metrics\": {\"per_item_dialog_accuracy\": 0.6257}, \"time_spent\": \"0:50:07\", \"epochs_done\": 2, \"batches_seen\": 560, \"train_examples_seen\": 2238, \"total_loss\": 67.32139587402344, \"action_loss\": 16.170827865600586}}\n" + ], + "name": "stdout" + }, + { + "output_type": "stream", + "text": [ + "2021-07-11 19:16:51.602 INFO in 'deeppavlov.core.trainers.nn_trainer'['nn_trainer'] at line 212: Did not improve on the per_item_dialog_accuracy of 0.4529\n" + ], + "name": "stderr" + }, + { + "output_type": "stream", + "text": [ + "{\"valid\": {\"eval_examples_count\": 575, \"metrics\": {\"per_item_dialog_accuracy\": 0.4288}, \"time_spent\": \"0:53:13\", \"epochs_done\": 2, \"batches_seen\": 560, \"train_examples_seen\": 2238, \"impatience\": 8, \"patience_limit\": 10}}\n", + "{\"train\": {\"eval_examples_count\": 40, \"metrics\": {\"per_item_dialog_accuracy\": 0.6022}, \"time_spent\": \"0:53:52\", \"epochs_done\": 2, \"batches_seen\": 600, \"train_examples_seen\": 2398, \"total_loss\": 148.84075927734375, \"action_loss\": 35.534236907958984}}\n" + ], + "name": "stdout" + }, + { + "output_type": "stream", + "text": [ + "2021-07-11 19:18:09.31 INFO in 'deeppavlov.core.trainers.nn_trainer'['nn_trainer'] at line 339: Stopped training\n", + "2021-07-11 19:18:09.41 WARNING in 'deeppavlov.core.models.serializable'['serializable'] at line 49: No load path is set for Sqlite3Database in 'infer' mode. Using save path instead\n", + "2021-07-11 19:18:09.43 INFO in 'deeppavlov.core.data.sqlite_database'['sqlite_database'] at line 66: Loading database from /content/DeepPavlov/my_bot/db.sqlite.\n" + ], + "name": "stderr" + }, + { + "output_type": "error", + "ename": "KeyboardInterrupt", + "evalue": "ignored", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 14\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 15\u001b[0m \u001b[0mx\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtime\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtime\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 16\u001b[0;31m \u001b[0mtrain_model\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mgobot_config\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 17\u001b[0m \u001b[0my\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtime\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtime\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m-\u001b[0m \u001b[0mx\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/DeepPavlov/deeppavlov/__init__.py\u001b[0m in \u001b[0;36mtrain_model\u001b[0;34m(config, download, recursive)\u001b[0m\n\u001b[1;32m 27\u001b[0m \u001b[0;31m# TODO: make better\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 28\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mtrain_model\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mstr\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mPath\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdict\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdownload\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mbool\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mFalse\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrecursive\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mbool\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mFalse\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m->\u001b[0m \u001b[0mChainer\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 29\u001b[0;31m \u001b[0mtrain_evaluate_model_from_config\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdownload\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdownload\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrecursive\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mrecursive\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 30\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mbuild_model\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mload_trained\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 31\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/DeepPavlov/deeppavlov/core/commands/train.py\u001b[0m in \u001b[0;36mtrain_evaluate_model_from_config\u001b[0;34m(config, iterator, to_train, evaluation_targets, to_validate, download, start_epoch_num, recursive)\u001b[0m\n\u001b[1;32m 135\u001b[0m ' \"to_validate\" is deprecated and will be ignored')\n\u001b[1;32m 136\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 137\u001b[0;31m \u001b[0mres\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtrainer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mevaluate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0miterator\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mevaluation_targets\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mprint_reports\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 138\u001b[0m \u001b[0mtrainer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_chainer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdestroy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 139\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/DeepPavlov/deeppavlov/core/trainers/fit_trainer.py\u001b[0m in \u001b[0;36mevaluate\u001b[0;34m(self, iterator, evaluation_targets, print_reports)\u001b[0m\n\u001b[1;32m 254\u001b[0m \u001b[0ma\u001b[0m \u001b[0mdictionary\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mdata\u001b[0m \u001b[0mtypes\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mkeys\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mevaluation\u001b[0m \u001b[0mreports\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mvalues\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 255\u001b[0m \"\"\"\n\u001b[0;32m--> 256\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_load\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 257\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mevaluation_targets\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 258\u001b[0m \u001b[0mevaluation_targets\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mevaluation_targets\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/DeepPavlov/deeppavlov/core/trainers/fit_trainer.py\u001b[0m in \u001b[0;36m_load\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 156\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_loaded\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 157\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_chainer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdestroy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 158\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_chainer\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mbuild_model\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m{\u001b[0m\u001b[0;34m'chainer'\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mchainer_config\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mload_trained\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_saved\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 159\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_loaded\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mTrue\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 160\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/DeepPavlov/deeppavlov/core/commands/infer.py\u001b[0m in \u001b[0;36mbuild_model\u001b[0;34m(config, mode, load_trained, download, serialized)\u001b[0m\n\u001b[1;32m 60\u001b[0m \u001b[0mcomponent_serialized\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 61\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 62\u001b[0;31m \u001b[0mcomponent\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfrom_params\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcomponent_config\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mmode\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mserialized\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mcomponent_serialized\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 63\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 64\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;34m'id'\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mcomponent_config\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/DeepPavlov/deeppavlov/core/common/params.py\u001b[0m in \u001b[0;36mfrom_params\u001b[0;34m(params, mode, serialized, **kwargs)\u001b[0m\n\u001b[1;32m 104\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'mode'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmode\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 105\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 106\u001b[0;31m \u001b[0mcomponent\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mobj\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m**\u001b[0m\u001b[0mdict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig_params\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 107\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 108\u001b[0m \u001b[0m_refs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mconfig_params\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'id'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcomponent\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/DeepPavlov/deeppavlov/models/go_bot/trippy.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, nlg_manager, save_path, slot_names, class_types, pretrained_bert, bert_config, optimizer_parameters, clip_norm, max_seq_length, dropout_rate, heads_dropout, class_loss_ratio, token_loss_for_nonpointable, refer_loss_for_nonpointable, class_aux_feats_inform, class_aux_feats_ds, database, debug, **kwargs)\u001b[0m\n\u001b[1;32m 122\u001b[0m super().__init__(save_path=save_path, \n\u001b[1;32m 123\u001b[0m \u001b[0moptimizer_parameters\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0moptimizer_parameters\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 124\u001b[0;31m **kwargs)\n\u001b[0m\u001b[1;32m 125\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 126\u001b[0m \u001b[0;34m@\u001b[0m\u001b[0moverrides\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/DeepPavlov/deeppavlov/core/models/torch_model.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, device, optimizer, optimizer_parameters, lr_scheduler, lr_scheduler_parameters, learning_rate_drop_patience, learning_rate_drop_div, load_before_drop, min_learning_rate, *args, **kwargs)\u001b[0m\n\u001b[1;32m 92\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mopt\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdeepcopy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 93\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 94\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mload\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 95\u001b[0m \u001b[0;31m# we need to switch to eval mode here because by default it's in `train` mode.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 96\u001b[0m \u001b[0;31m# But in case of `interact/build_model` usage, we need to have model in eval mode.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/DeepPavlov/deeppavlov/models/go_bot/trippy.py\u001b[0m in \u001b[0;36mload\u001b[0;34m(self, fname)\u001b[0m\n\u001b[1;32m 136\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpretrained_bert\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 137\u001b[0m self.model = BertForDST.from_pretrained(\n\u001b[0;32m--> 138\u001b[0;31m self.pretrained_bert, config=self.config)\n\u001b[0m\u001b[1;32m 139\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtokenizer\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mBertTokenizerFast\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfrom_pretrained\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpretrained_bert\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 140\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/transformers/modeling_utils.py\u001b[0m in \u001b[0;36mfrom_pretrained\u001b[0;34m(cls, pretrained_model_name_or_path, *model_args, **kwargs)\u001b[0m\n\u001b[1;32m 620\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 621\u001b[0m \u001b[0;31m# Instantiate model.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 622\u001b[0;31m \u001b[0mmodel\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcls\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0mmodel_args\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mmodel_kwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 623\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 624\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mstate_dict\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mfrom_tf\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/DeepPavlov/deeppavlov/models/go_bot/trippy_bert_for_dst.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, config)\u001b[0m\n\u001b[1;32m 72\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0madd_module\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"action_softmax\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mSoftmax\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdim\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 73\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 74\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minit_weights\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 75\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 76\u001b[0m def forward(self,\n", + "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/transformers/modeling_utils.py\u001b[0m in \u001b[0;36minit_weights\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 392\u001b[0m \u001b[0;34m\"\"\" Initialize and prunes weights if needed. \"\"\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 393\u001b[0m \u001b[0;31m# Initialize weights\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 394\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mapply\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_init_weights\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 395\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 396\u001b[0m \u001b[0;31m# Prune heads if needed\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36mapply\u001b[0;34m(self, fn)\u001b[0m\n\u001b[1;32m 614\u001b[0m \"\"\"\n\u001b[1;32m 615\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mmodule\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mchildren\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 616\u001b[0;31m \u001b[0mmodule\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mapply\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfn\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 617\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 618\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36mapply\u001b[0;34m(self, fn)\u001b[0m\n\u001b[1;32m 614\u001b[0m \"\"\"\n\u001b[1;32m 615\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mmodule\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mchildren\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 616\u001b[0;31m \u001b[0mmodule\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mapply\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfn\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 617\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 618\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36mapply\u001b[0;34m(self, fn)\u001b[0m\n\u001b[1;32m 614\u001b[0m \"\"\"\n\u001b[1;32m 615\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mmodule\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mchildren\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 616\u001b[0;31m \u001b[0mmodule\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mapply\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfn\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 617\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 618\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36mapply\u001b[0;34m(self, fn)\u001b[0m\n\u001b[1;32m 614\u001b[0m \"\"\"\n\u001b[1;32m 615\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mmodule\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mchildren\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 616\u001b[0;31m \u001b[0mmodule\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mapply\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfn\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 617\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 618\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36mapply\u001b[0;34m(self, fn)\u001b[0m\n\u001b[1;32m 614\u001b[0m \"\"\"\n\u001b[1;32m 615\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mmodule\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mchildren\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 616\u001b[0;31m \u001b[0mmodule\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mapply\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfn\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 617\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 618\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36mapply\u001b[0;34m(self, fn)\u001b[0m\n\u001b[1;32m 614\u001b[0m \"\"\"\n\u001b[1;32m 615\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mmodule\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mchildren\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 616\u001b[0;31m \u001b[0mmodule\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mapply\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfn\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 617\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 618\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36mapply\u001b[0;34m(self, fn)\u001b[0m\n\u001b[1;32m 615\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mmodule\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mchildren\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 616\u001b[0m \u001b[0mmodule\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mapply\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfn\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 617\u001b[0;31m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 618\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 619\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/transformers/modeling_bert.py\u001b[0m in \u001b[0;36m_init_weights\u001b[0;34m(self, module)\u001b[0m\n\u001b[1;32m 523\u001b[0m \u001b[0;31m# Slightly different from the TF version which uses truncated_normal for initialization\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 524\u001b[0m \u001b[0;31m# cf https://github.com/pytorch/pytorch/pull/5617\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 525\u001b[0;31m \u001b[0mmodule\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mweight\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnormal_\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmean\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m0.0\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstd\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minitializer_range\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 526\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodule\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mBertLayerNorm\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 527\u001b[0m \u001b[0mmodule\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbias\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mzero_\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mKeyboardInterrupt\u001b[0m: " + ] + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "BCHybLSa_Gzx" + }, + "source": [ + "Optionally, you can download the pre-trained model from kaggle. You will need a kaggle account and to upload your kaggle.json file. Then you may have to run the below cell two times." + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "Pq-U6mvu-js-", + "outputId": "8da1fcc0-4669-4ae7-a9f3-053dcdfa2aff" + }, + "source": [ + "### Optional - Download Pretrained TripPy from kaggle ###\n", + "\n", + "# Make your json accessible to kaggle\n", + "#!cp /content/kaggle.json /root/.kaggle/\n", + "\n", + "# Download the dataset\n", + "#!kaggle datasets download -d muennighoff/trippy-restaurant\n", + "#!unzip trippy-restaurant.zip\n", + "\n", + "# Move into correct directory\n", + "#!mv db.sqlite /content/DeepPavlov/my_bot/\n", + "#!mv model.pth.tar /content/DeepPavlov/my_bot/" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Warning: Your Kaggle API key is readable by other users on this system! To fix this, you can run 'chmod 600 /root/.kaggle/kaggle.json'\n", + "Downloading trippy-restaurant.zip to /content/DeepPavlov\n", + " 99% 985M/993M [00:09<00:00, 120MB/s]\n", + "100% 993M/993M [00:09<00:00, 110MB/s]\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ldfDa9dUTgX1" + }, + "source": [ + "### Evaluation of training" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "k-z7wZjOTgX6" + }, + "source": [ + "Calculating **accuracy** of trained bot: whether predicted system responses match true responses (full string match)." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "INAcWeKfHR63", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 609 + }, + "outputId": "840456a5-1c29-430f-fe19-bb74eff161de" + }, + "source": [ + "from deeppavlov import evaluate_model\n", + "\n", + "evaluate_model(gobot_config);" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "2021-07-11 17:59:26.197 INFO in 'deeppavlov.dataset_readers.dstc2_reader'['dstc2_reader'] at line 112: [loading dialogs from /content/DeepPavlov/my_data/dstc2-trn.jsonlist]\n", + "2021-07-11 17:59:26.541 INFO in 'deeppavlov.dataset_readers.dstc2_reader'['dstc2_reader'] at line 112: [loading dialogs from /content/DeepPavlov/my_data/dstc2-val.jsonlist]\n", + "2021-07-11 17:59:26.770 INFO in 'deeppavlov.dataset_readers.dstc2_reader'['dstc2_reader'] at line 112: [loading dialogs from /content/DeepPavlov/my_data/dstc2-tst.jsonlist]\n", + "[nltk_data] Downloading package punkt to /root/nltk_data...\n", + "[nltk_data] Package punkt is already up-to-date!\n", + "[nltk_data] Downloading package stopwords to /root/nltk_data...\n", + "[nltk_data] Package stopwords is already up-to-date!\n", + "[nltk_data] Downloading package perluniprops to /root/nltk_data...\n", + "[nltk_data] Package perluniprops is already up-to-date!\n", + "[nltk_data] Downloading package nonbreaking_prefixes to\n", + "[nltk_data] /root/nltk_data...\n", + "[nltk_data] Package nonbreaking_prefixes is already up-to-date!\n", + "2021-07-11 17:59:29.471 WARNING in 'deeppavlov.core.models.serializable'['serializable'] at line 49: No load path is set for Sqlite3Database in 'infer' mode. Using save path instead\n", + "2021-07-11 17:59:29.473 INFO in 'deeppavlov.core.data.sqlite_database'['sqlite_database'] at line 66: Loading database from /content/DeepPavlov/my_bot/db.sqlite.\n" + ], + "name": "stderr" + }, + { + "output_type": "error", + "ename": "KeyboardInterrupt", + "evalue": "ignored", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mdeeppavlov\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mevaluate_model\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 3\u001b[0;31m \u001b[0mevaluate_model\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mgobot_config\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m;\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", + "\u001b[0;32m/content/DeepPavlov/deeppavlov/__init__.py\u001b[0m in \u001b[0;36mevaluate_model\u001b[0;34m(config, download, recursive)\u001b[0m\n\u001b[1;32m 32\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 33\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mevaluate_model\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mstr\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mPath\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdict\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdownload\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mbool\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mFalse\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrecursive\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mbool\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mFalse\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m->\u001b[0m \u001b[0mdict\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 34\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mtrain_evaluate_model_from_config\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mto_train\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdownload\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdownload\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrecursive\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mrecursive\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 35\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 36\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/DeepPavlov/deeppavlov/core/commands/train.py\u001b[0m in \u001b[0;36mtrain_evaluate_model_from_config\u001b[0;34m(config, iterator, to_train, evaluation_targets, to_validate, download, start_epoch_num, recursive)\u001b[0m\n\u001b[1;32m 135\u001b[0m ' \"to_validate\" is deprecated and will be ignored')\n\u001b[1;32m 136\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 137\u001b[0;31m \u001b[0mres\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtrainer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mevaluate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0miterator\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mevaluation_targets\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mprint_reports\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 138\u001b[0m \u001b[0mtrainer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_chainer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdestroy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 139\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/DeepPavlov/deeppavlov/core/trainers/fit_trainer.py\u001b[0m in \u001b[0;36mevaluate\u001b[0;34m(self, iterator, evaluation_targets, print_reports)\u001b[0m\n\u001b[1;32m 254\u001b[0m \u001b[0ma\u001b[0m \u001b[0mdictionary\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mdata\u001b[0m \u001b[0mtypes\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mkeys\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mevaluation\u001b[0m \u001b[0mreports\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mvalues\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 255\u001b[0m \"\"\"\n\u001b[0;32m--> 256\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_load\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 257\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mevaluation_targets\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 258\u001b[0m \u001b[0mevaluation_targets\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mevaluation_targets\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/DeepPavlov/deeppavlov/core/trainers/fit_trainer.py\u001b[0m in \u001b[0;36m_load\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 156\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_loaded\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 157\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_chainer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdestroy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 158\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_chainer\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mbuild_model\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m{\u001b[0m\u001b[0;34m'chainer'\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mchainer_config\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mload_trained\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_saved\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 159\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_loaded\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mTrue\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 160\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/DeepPavlov/deeppavlov/core/commands/infer.py\u001b[0m in \u001b[0;36mbuild_model\u001b[0;34m(config, mode, load_trained, download, serialized)\u001b[0m\n\u001b[1;32m 60\u001b[0m \u001b[0mcomponent_serialized\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 61\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 62\u001b[0;31m \u001b[0mcomponent\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfrom_params\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcomponent_config\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mmode\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mserialized\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mcomponent_serialized\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 63\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 64\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;34m'id'\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mcomponent_config\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/DeepPavlov/deeppavlov/core/common/params.py\u001b[0m in \u001b[0;36mfrom_params\u001b[0;34m(params, mode, serialized, **kwargs)\u001b[0m\n\u001b[1;32m 104\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'mode'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmode\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 105\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 106\u001b[0;31m \u001b[0mcomponent\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mobj\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m**\u001b[0m\u001b[0mdict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig_params\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 107\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 108\u001b[0m \u001b[0m_refs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mconfig_params\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'id'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcomponent\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/DeepPavlov/deeppavlov/models/go_bot/trippy.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, nlg_manager, save_path, slot_names, class_types, pretrained_bert, bert_config, optimizer_parameters, clip_norm, max_seq_length, dropout_rate, heads_dropout, class_loss_ratio, token_loss_for_nonpointable, refer_loss_for_nonpointable, class_aux_feats_inform, class_aux_feats_ds, database, debug, **kwargs)\u001b[0m\n\u001b[1;32m 122\u001b[0m super().__init__(save_path=save_path, \n\u001b[1;32m 123\u001b[0m \u001b[0moptimizer_parameters\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0moptimizer_parameters\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 124\u001b[0;31m **kwargs)\n\u001b[0m\u001b[1;32m 125\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 126\u001b[0m \u001b[0;34m@\u001b[0m\u001b[0moverrides\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/DeepPavlov/deeppavlov/core/models/torch_model.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, device, optimizer, optimizer_parameters, lr_scheduler, lr_scheduler_parameters, learning_rate_drop_patience, learning_rate_drop_div, load_before_drop, min_learning_rate, *args, **kwargs)\u001b[0m\n\u001b[1;32m 92\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mopt\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdeepcopy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 93\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 94\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mload\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 95\u001b[0m \u001b[0;31m# we need to switch to eval mode here because by default it's in `train` mode.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 96\u001b[0m \u001b[0;31m# But in case of `interact/build_model` usage, we need to have model in eval mode.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/DeepPavlov/deeppavlov/models/go_bot/trippy.py\u001b[0m in \u001b[0;36mload\u001b[0;34m(self, fname)\u001b[0m\n\u001b[1;32m 136\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpretrained_bert\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 137\u001b[0m self.model = BertForDST.from_pretrained(\n\u001b[0;32m--> 138\u001b[0;31m self.pretrained_bert, config=self.config)\n\u001b[0m\u001b[1;32m 139\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtokenizer\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mBertTokenizerFast\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfrom_pretrained\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpretrained_bert\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 140\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/transformers/modeling_utils.py\u001b[0m in \u001b[0;36mfrom_pretrained\u001b[0;34m(cls, pretrained_model_name_or_path, *model_args, **kwargs)\u001b[0m\n\u001b[1;32m 620\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 621\u001b[0m \u001b[0;31m# Instantiate model.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 622\u001b[0;31m \u001b[0mmodel\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcls\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0mmodel_args\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mmodel_kwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 623\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 624\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mstate_dict\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mfrom_tf\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/DeepPavlov/deeppavlov/models/go_bot/trippy_bert_for_dst.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, config)\u001b[0m\n\u001b[1;32m 48\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrefer_index\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m-\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 49\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 50\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbert\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mBertModel\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 51\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdropout\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mDropout\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdst_dropout_rate\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 52\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdropout_heads\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mDropout\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdst_heads_dropout_rate\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/transformers/modeling_bert.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, config)\u001b[0m\n\u001b[1;32m 614\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 615\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0membeddings\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mBertEmbeddings\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 616\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mencoder\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mBertEncoder\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 617\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpooler\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mBertPooler\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 618\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/transformers/modeling_bert.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, config)\u001b[0m\n\u001b[1;32m 388\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moutput_attentions\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mconfig\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moutput_attentions\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 389\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moutput_hidden_states\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mconfig\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moutput_hidden_states\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 390\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlayer\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mModuleList\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mBertLayer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0m_\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnum_hidden_layers\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 391\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 392\u001b[0m def forward(\n", + "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/transformers/modeling_bert.py\u001b[0m in \u001b[0;36m\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 388\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moutput_attentions\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mconfig\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moutput_attentions\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 389\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moutput_hidden_states\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mconfig\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moutput_hidden_states\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 390\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlayer\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mModuleList\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mBertLayer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0m_\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnum_hidden_layers\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 391\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 392\u001b[0m def forward(\n", + "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/transformers/modeling_bert.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, config)\u001b[0m\n\u001b[1;32m 355\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mis_decoder\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 356\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcrossattention\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mBertAttention\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 357\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mintermediate\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mBertIntermediate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 358\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moutput\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mBertOutput\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 359\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/transformers/modeling_bert.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, config)\u001b[0m\n\u001b[1;32m 322\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__init__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mconfig\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 323\u001b[0m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__init__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 324\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdense\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mLinear\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mhidden_size\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mconfig\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mintermediate_size\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 325\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mhidden_act\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 326\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mintermediate_act_fn\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mACT2FN\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mhidden_act\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/torch/nn/modules/linear.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, in_features, out_features, bias, device, dtype)\u001b[0m\n\u001b[1;32m 84\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 85\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mregister_parameter\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'bias'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 86\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreset_parameters\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 87\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 88\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mreset_parameters\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m->\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/torch/nn/modules/linear.py\u001b[0m in \u001b[0;36mreset_parameters\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 87\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 88\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mreset_parameters\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m->\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 89\u001b[0;31m \u001b[0minit\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mkaiming_uniform_\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mweight\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0ma\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mmath\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msqrt\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m5\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 90\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbias\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 91\u001b[0m \u001b[0mfan_in\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0m_\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0minit\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_calculate_fan_in_and_fan_out\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mweight\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/torch/nn/init.py\u001b[0m in \u001b[0;36mkaiming_uniform_\u001b[0;34m(tensor, a, mode, nonlinearity)\u001b[0m\n\u001b[1;32m 393\u001b[0m \u001b[0mbound\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmath\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msqrt\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m3.0\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m*\u001b[0m \u001b[0mstd\u001b[0m \u001b[0;31m# Calculate uniform bounds from standard deviation\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 394\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mno_grad\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 395\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mtensor\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0muniform_\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0mbound\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbound\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 396\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 397\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mKeyboardInterrupt\u001b[0m: " + ] + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "1wZOqmYBTgYB" + }, + "source": [ + "With settings of `max_batches=800`, valid accuracy `= 0.5` and test accuracy is `~ 0.5`.\n", + "\n", + "Why is TripPy only as good as go-bot?\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ElGD1tnJTgYC" + }, + "source": [ + "## 4. Interact with Bot" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "m9sJXOPPTgYF", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "5521fe8a-c538-4e7c-ee16-bd8ca7962133" + }, + "source": [ + "from deeppavlov import build_model\n", + "\n", + "bot = build_model(gobot_config)" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "2021-07-12 14:09:13.926 WARNING in 'deeppavlov.core.models.serializable'['serializable'] at line 49: No load path is set for Sqlite3Database in 'infer' mode. Using save path instead\n", + "2021-07-12 14:09:13.932 INFO in 'deeppavlov.core.data.sqlite_database'['sqlite_database'] at line 66: Loading database from /content/DeepPavlov/my_bot/db.sqlite.\n", + "2021-07-12 14:09:17.459 INFO in 'deeppavlov.models.go_bot.trippy'['trippy'] at line 152: Load path /content/DeepPavlov/my_bot/model is given.\n", + "2021-07-12 14:09:17.461 INFO in 'deeppavlov.models.go_bot.trippy'['trippy'] at line 159: Load path /content/DeepPavlov/my_bot/model.pth.tar exists.\n", + "2021-07-12 14:09:17.463 INFO in 'deeppavlov.models.go_bot.trippy'['trippy'] at line 160: Initializing `TripPy` from saved.\n", + "2021-07-12 14:09:17.465 INFO in 'deeppavlov.models.go_bot.trippy'['trippy'] at line 163: Loading weights from /content/DeepPavlov/my_bot/model.pth.tar.\n", + "2021-07-12 14:09:18.263 INFO in 'deeppavlov.core.models.torch_model'['torch_model'] at line 98: Model was successfully initialized! Model summary:\n", + " BertForDST(\n", + " (bert): BertModel(\n", + " (embeddings): BertEmbeddings(\n", + " (word_embeddings): Embedding(30522, 768, padding_idx=0)\n", + " (position_embeddings): Embedding(512, 768)\n", + " (token_type_embeddings): Embedding(2, 768)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (encoder): BertEncoder(\n", + " (layer): ModuleList(\n", + " (0): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (1): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (2): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (3): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (4): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (5): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (6): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (7): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (8): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (9): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (10): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (11): BertLayer(\n", + " (attention): BertAttention(\n", + " (self): BertSelfAttention(\n", + " (query): Linear(in_features=768, out_features=768, bias=True)\n", + " (key): Linear(in_features=768, out_features=768, bias=True)\n", + " (value): Linear(in_features=768, out_features=768, bias=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (output): BertSelfOutput(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " (intermediate): BertIntermediate(\n", + " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", + " )\n", + " (output): BertOutput(\n", + " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", + " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", + " (dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " )\n", + " )\n", + " )\n", + " (pooler): BertPooler(\n", + " (dense): Linear(in_features=768, out_features=768, bias=True)\n", + " (activation): Tanh()\n", + " )\n", + " )\n", + " (dropout): Dropout(p=0.3, inplace=False)\n", + " (dropout_heads): Dropout(p=0.0, inplace=False)\n", + " (inform_projection): Linear(in_features=4, out_features=4, bias=True)\n", + " (ds_projection): Linear(in_features=4, out_features=4, bias=True)\n", + " (class_pricerange): Linear(in_features=776, out_features=4, bias=True)\n", + " (token_pricerange): Linear(in_features=768, out_features=2, bias=True)\n", + " (refer_pricerange): Linear(in_features=776, out_features=5, bias=True)\n", + " (class_this): Linear(in_features=776, out_features=4, bias=True)\n", + " (token_this): Linear(in_features=768, out_features=2, bias=True)\n", + " (refer_this): Linear(in_features=776, out_features=5, bias=True)\n", + " (class_area): Linear(in_features=776, out_features=4, bias=True)\n", + " (token_area): Linear(in_features=768, out_features=2, bias=True)\n", + " (refer_area): Linear(in_features=776, out_features=5, bias=True)\n", + " (class_food): Linear(in_features=776, out_features=4, bias=True)\n", + " (token_food): Linear(in_features=768, out_features=2, bias=True)\n", + " (refer_food): Linear(in_features=776, out_features=5, bias=True)\n", + " (action_prediction): Linear(in_features=776, out_features=46, bias=True)\n", + " (action_softmax): Softmax(dim=1)\n", + ")\n" + ], + "name": "stderr" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "HVLHl5jlpQMr", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "9253ffc9-1b96-4175-d360-e8acc502dca2" + }, + "source": [ + "bot.reset()\n", + "bot(['hi, i want to eat, can you suggest a place to go?'])" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[['What kind of food would you like?']]" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 25 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "Pb6AutTSpRUq", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "c1d1529e-c231-4a23-bfd7-a47217f8bb05" + }, + "source": [ + "bot(['Perhaps something cheap'])" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[['What part of town do you have in mind?']]" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 26 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "ZTBEMEIUpizH", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "fa69cb75-598e-4a04-af84-d196df2b1290" + }, + "source": [ + "bot(['In the north of town'])" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "2021-07-16 14:09:36.730 INFO in 'deeppavlov.models.go_bot.trippy'['trippy'] at line 390: Made api_call with dict_keys(['pricerange', 'this', 'area', 'food']), got 11 results.\n" + ], + "name": "stderr" + }, + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[['api_call area=\"north\" food=\"dontcare\" pricerange=\"dontcare\"',\n", + " 'Meghna is a nice place in the north of town and the prices are moderate.']]" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 27 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "bW52c1G9ptRO", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "7727c106-235c-4e93-9424-27927220e8c8" + }, + "source": [ + "bot(['Whats their phone number?'])" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[['The phone number of meghna is 01223 727410.']]" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 28 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "xY3DASP-pxdY", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "8e2b1c61-9710-4bb6-a00c-d988fd0cbcd0" + }, + "source": [ + "bot(['and the address?'])" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[['Sure, meghna is on 205 victoria road chesterton.']]" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 29 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "7THguiXmp0PW", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "001fc2e9-e071-4877-fc95-08360c052748" + }, + "source": [ + "bot(['whats their pricerange again?'])" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[['The price range at meghna is moderate.']]" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 30 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "naZd8YNmp5-r", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "2954c809-2bae-4ad5-c24f-ec1aa22c4fc6" + }, + "source": [ + "bot(['Alright sounds good, thank you!'])" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[['You are welcome!']]" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 31 + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "6JLME42dAk_n" + }, + "source": [ + "#### Original\n", + "\n", + "These are examples used in the original DeepPavlov Go Bot Extended Tutorial." + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "AyLNMrdzZyxJ", + "outputId": "3bb4387c-6af8-4db2-ec21-633364c58f8a" + }, + "source": [ + "bot.reset()\n", + "bot(['hi, i want to eat, can you suggest a place to go?'])" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[['What kind of food would you like?']]" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 33 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "wxl8B_YEDR0D", + "outputId": "2cceffc0-6a42-4be2-f348-706e52f11700" + }, + "source": [ + "bot(['i want cheap food'])" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[['What part of town do you have in mind?']]" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 34 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "XkZWwPxzDRv7", + "outputId": "af0d0249-4b4f-4ff0-8bfa-e220505f210d" + }, + "source": [ + "bot(['chinese food'])" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "2021-07-12 14:10:38.514 INFO in 'deeppavlov.models.go_bot.trippy'['trippy'] at line 390: Made api_call with dict_keys(['pricerange', 'this', 'area', 'food']), got 16 results.\n" + ], + "name": "stderr" + }, + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[['api_call area=\"dontcare\" food=\"chinese\" pricerange=\"dontcare\"',\n", + " 'The good luck chinese food takeaway serves chinese food in the expensive price range.']]" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 35 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "PRd7r-sGDRtb", + "outputId": "5598de1c-bc4d-4648-ee02-143ddfbf25b4" + }, + "source": [ + "bot(['thanks, give me their address'])" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[['Sure, the good luck chinese food takeaway is on 82 cherry hinton road cherry hinton.']]" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 36 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "Zy9sWkiADtWR", + "outputId": "4df322b2-3e2a-4b07-fe3b-be0f24a3c3df" + }, + "source": [ + "bot(['i want their phone number too'])" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[['The phone number of the good luck chinese food takeaway is 01223 244149.']]" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 37 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "dd2K32hrDtT6", + "outputId": "18c8bb83-79a7-461b-8253-b6de52313960" + }, + "source": [ + "bot(['bye'])" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[['The good luck chinese food takeaway serves chinese food in the expensive price range.']]" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 38 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "FwKjYMOoDtQi", + "outputId": "06c70c6c-69dc-4837-cefe-a8772c52aa00" + }, + "source": [ + "bot.reset()\n", + "bot(['Have you ever been in Cambridge?'])" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "2021-07-12 14:13:57.850 INFO in 'deeppavlov.models.go_bot.trippy'['trippy'] at line 390: Made api_call with dict_keys(['pricerange', 'this', 'area', 'food']), got 109 results.\n" + ], + "name": "stderr" + }, + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[['api_call area=\"dontcare\" food=\"dontcare\" pricerange=\"dontcare\"',\n", + " 'Frankie and bennys is a great restaurant.']]" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 39 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "wwgP5xiRDtOv", + "outputId": "40f74cca-feda-476b-a8bb-db9432b0f141" + }, + "source": [ + "bot.reset()\n", + "bot(['Can you suggest me a portuguese restaurant in Cambridge?'])" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "2021-07-12 14:16:43.903 INFO in 'deeppavlov.models.go_bot.trippy'['trippy'] at line 390: Made api_call with dict_keys(['pricerange', 'this', 'area', 'food']), got 2 results.\n" + ], + "name": "stderr" + }, + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[['api_call area=\"dontcare\" food=\"portuguese\" pricerange=\"dontcare\"',\n", + " 'Nandos serves portuguese food.']]" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 40 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "B2gxspn9DtMS", + "outputId": "22ebe4c1-059a-4755-c6e3-66f5b9f769aa" + }, + "source": [ + "bot(['Does it have sangria?'])" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[['Nandos serves portuguese food.']]" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 41 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "w_wrfg3BC0jb", + "outputId": "3c1b185d-cf3f-484a-b9ac-1a4aa9327c2b" + }, + "source": [ + "bot.reset()\n", + "bot(['Where can I get good pizza?'])" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[['You are welcome!']]" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 42 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "1qj3A6uiFUgb", + "outputId": "a31a10b1-879d-4fc7-bbef-c53a7be0495b" + }, + "source": [ + "bot(['Where can I get good pizza?'])" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[['What part of town do you have in mind?']]" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 43 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "ouA7sJmoFYmb", + "outputId": "048f2dad-f2f5-4d5b-a7e2-c44d61c26890" + }, + "source": [ + "bot(['South of town'])" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "2021-07-12 14:18:43.686 INFO in 'deeppavlov.models.go_bot.trippy'['trippy'] at line 390: Made api_call with dict_keys(['pricerange', 'this', 'area', 'food']), got 9 results.\n" + ], + "name": "stderr" + }, + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[['api_call area=\"south\" food=\"dontcare\" pricerange=\"dontcare\"',\n", + " 'Frankie and bennys is a nice place in the south of town and the prices are expensive.']]" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 44 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "T1y1Cz_yFoY2", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "0378be3a-61b3-4cb7-f2d3-948f362aeed9" + }, + "source": [ + "bot(['Whats their phone number?'])" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[['The phone number of frankie and bennys is 01223 412430.']]" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 45 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "4N_9hOAoL_Yx" + }, + "source": [ + "" + ], + "execution_count": null, + "outputs": [] + } + ] +} \ No newline at end of file From 33258fb63161d02824144e8fab76b30e45a9d39a Mon Sep 17 00:00:00 2001 From: Muennighoff <62820084+Muennighoff@users.noreply.github.com> Date: Fri, 16 Jul 2021 16:19:16 +0200 Subject: [PATCH 123/151] update demo --- examples/trippy_extended_tutorial.ipynb | 750 ++---------------------- 1 file changed, 48 insertions(+), 702 deletions(-) diff --git a/examples/trippy_extended_tutorial.ipynb b/examples/trippy_extended_tutorial.ipynb index 719310aa0c..3b60d781f7 100644 --- a/examples/trippy_extended_tutorial.ipynb +++ b/examples/trippy_extended_tutorial.ipynb @@ -30,6 +30,15 @@ "accelerator": "GPU" }, "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "mZ5jRYn-rsBU" + }, + "source": [ + "### You can also run the notebook in [COLAB](https://colab.research.google.com/github/deepmipt/DeepPavlov/blob/master/examples/trippy_extended_tutorial.ipynb)." + ] + }, { "cell_type": "markdown", "metadata": { @@ -711,7 +720,7 @@ "Below is a sketch for how the full TripPy model has been implemented in DeepPavlov:\n", "\n", " \n", - "![trippy_architecture.png](img/trippy_architecture_original.jpg)\n", + "![trippy_architecture.png](img/trippy_architecture.jpg)\n", " \n", "\n", "The above image also includes the input & input processing steps, while the previous sketch starts with the BERT Model (BERTForDST). \n", @@ -819,12 +828,7 @@ { "cell_type": "code", "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 1000 - }, - "id": "7ekUezSjWGEz", - "outputId": "d8ed0b63-0c79-4b84-ae95-119900989f1c" + "id": "7ekUezSjWGEz" }, "source": [ "from deeppavlov import train_model\n", @@ -841,643 +845,41 @@ "gobot_config['chainer']['pipe'][-1][\"optimizer_parameters\"] = {\"lr\": 1e-5, \"eps\": 1e-6}\n", "\n", "train_model(gobot_config)" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "text": [ - "2021-07-11 18:23:29.41 INFO in 'deeppavlov.dataset_readers.dstc2_reader'['dstc2_reader'] at line 112: [loading dialogs from /content/DeepPavlov/my_data/dstc2-trn.jsonlist]\n", - "2021-07-11 18:23:29.363 INFO in 'deeppavlov.dataset_readers.dstc2_reader'['dstc2_reader'] at line 112: [loading dialogs from /content/DeepPavlov/my_data/dstc2-val.jsonlist]\n", - "2021-07-11 18:23:29.588 INFO in 'deeppavlov.dataset_readers.dstc2_reader'['dstc2_reader'] at line 112: [loading dialogs from /content/DeepPavlov/my_data/dstc2-tst.jsonlist]\n", - "[nltk_data] Downloading package punkt to /root/nltk_data...\n", - "[nltk_data] Package punkt is already up-to-date!\n", - "[nltk_data] Downloading package stopwords to /root/nltk_data...\n", - "[nltk_data] Package stopwords is already up-to-date!\n", - "[nltk_data] Downloading package perluniprops to /root/nltk_data...\n", - "[nltk_data] Package perluniprops is already up-to-date!\n", - "[nltk_data] Downloading package nonbreaking_prefixes to\n", - "[nltk_data] /root/nltk_data...\n", - "[nltk_data] Package nonbreaking_prefixes is already up-to-date!\n", - "2021-07-11 18:23:32.351 WARNING in 'deeppavlov.core.models.serializable'['serializable'] at line 52: No load path is set for Sqlite3Database!\n", - "2021-07-11 18:23:32.353 INFO in 'deeppavlov.core.data.sqlite_database'['sqlite_database'] at line 66: Loading database from /content/DeepPavlov/my_bot/db.sqlite.\n", - "2021-07-11 18:23:38.350 INFO in 'deeppavlov.models.go_bot.trippy'['trippy'] at line 152: Load path /content/DeepPavlov/my_bot/model is given.\n", - "2021-07-11 18:23:38.352 INFO in 'deeppavlov.models.go_bot.trippy'['trippy'] at line 159: Load path /content/DeepPavlov/my_bot/model.pth.tar exists.\n", - "2021-07-11 18:23:38.354 INFO in 'deeppavlov.models.go_bot.trippy'['trippy'] at line 160: Initializing `TripPy` from saved.\n", - "2021-07-11 18:23:38.357 INFO in 'deeppavlov.models.go_bot.trippy'['trippy'] at line 163: Loading weights from /content/DeepPavlov/my_bot/model.pth.tar.\n", - "2021-07-11 18:23:39.142 INFO in 'deeppavlov.core.models.torch_model'['torch_model'] at line 98: Model was successfully initialized! Model summary:\n", - " BertForDST(\n", - " (bert): BertModel(\n", - " (embeddings): BertEmbeddings(\n", - " (word_embeddings): Embedding(30522, 768, padding_idx=0)\n", - " (position_embeddings): Embedding(512, 768)\n", - " (token_type_embeddings): Embedding(2, 768)\n", - " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " )\n", - " (encoder): BertEncoder(\n", - " (layer): ModuleList(\n", - " (0): BertLayer(\n", - " (attention): BertAttention(\n", - " (self): BertSelfAttention(\n", - " (query): Linear(in_features=768, out_features=768, bias=True)\n", - " (key): Linear(in_features=768, out_features=768, bias=True)\n", - " (value): Linear(in_features=768, out_features=768, bias=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " )\n", - " (output): BertSelfOutput(\n", - " (dense): Linear(in_features=768, out_features=768, bias=True)\n", - " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " )\n", - " )\n", - " (intermediate): BertIntermediate(\n", - " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", - " )\n", - " (output): BertOutput(\n", - " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", - " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " )\n", - " )\n", - " (1): BertLayer(\n", - " (attention): BertAttention(\n", - " (self): BertSelfAttention(\n", - " (query): Linear(in_features=768, out_features=768, bias=True)\n", - " (key): Linear(in_features=768, out_features=768, bias=True)\n", - " (value): Linear(in_features=768, out_features=768, bias=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " )\n", - " (output): BertSelfOutput(\n", - " (dense): Linear(in_features=768, out_features=768, bias=True)\n", - " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " )\n", - " )\n", - " (intermediate): BertIntermediate(\n", - " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", - " )\n", - " (output): BertOutput(\n", - " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", - " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " )\n", - " )\n", - " (2): BertLayer(\n", - " (attention): BertAttention(\n", - " (self): BertSelfAttention(\n", - " (query): Linear(in_features=768, out_features=768, bias=True)\n", - " (key): Linear(in_features=768, out_features=768, bias=True)\n", - " (value): Linear(in_features=768, out_features=768, bias=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " )\n", - " (output): BertSelfOutput(\n", - " (dense): Linear(in_features=768, out_features=768, bias=True)\n", - " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " )\n", - " )\n", - " (intermediate): BertIntermediate(\n", - " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", - " )\n", - " (output): BertOutput(\n", - " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", - " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " )\n", - " )\n", - " (3): BertLayer(\n", - " (attention): BertAttention(\n", - " (self): BertSelfAttention(\n", - " (query): Linear(in_features=768, out_features=768, bias=True)\n", - " (key): Linear(in_features=768, out_features=768, bias=True)\n", - " (value): Linear(in_features=768, out_features=768, bias=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " )\n", - " (output): BertSelfOutput(\n", - " (dense): Linear(in_features=768, out_features=768, bias=True)\n", - " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " )\n", - " )\n", - " (intermediate): BertIntermediate(\n", - " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", - " )\n", - " (output): BertOutput(\n", - " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", - " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " )\n", - " )\n", - " (4): BertLayer(\n", - " (attention): BertAttention(\n", - " (self): BertSelfAttention(\n", - " (query): Linear(in_features=768, out_features=768, bias=True)\n", - " (key): Linear(in_features=768, out_features=768, bias=True)\n", - " (value): Linear(in_features=768, out_features=768, bias=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " )\n", - " (output): BertSelfOutput(\n", - " (dense): Linear(in_features=768, out_features=768, bias=True)\n", - " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " )\n", - " )\n", - " (intermediate): BertIntermediate(\n", - " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", - " )\n", - " (output): BertOutput(\n", - " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", - " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " )\n", - " )\n", - " (5): BertLayer(\n", - " (attention): BertAttention(\n", - " (self): BertSelfAttention(\n", - " (query): Linear(in_features=768, out_features=768, bias=True)\n", - " (key): Linear(in_features=768, out_features=768, bias=True)\n", - " (value): Linear(in_features=768, out_features=768, bias=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " )\n", - " (output): BertSelfOutput(\n", - " (dense): Linear(in_features=768, out_features=768, bias=True)\n", - " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " )\n", - " )\n", - " (intermediate): BertIntermediate(\n", - " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", - " )\n", - " (output): BertOutput(\n", - " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", - " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " )\n", - " )\n", - " (6): BertLayer(\n", - " (attention): BertAttention(\n", - " (self): BertSelfAttention(\n", - " (query): Linear(in_features=768, out_features=768, bias=True)\n", - " (key): Linear(in_features=768, out_features=768, bias=True)\n", - " (value): Linear(in_features=768, out_features=768, bias=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " )\n", - " (output): BertSelfOutput(\n", - " (dense): Linear(in_features=768, out_features=768, bias=True)\n", - " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " )\n", - " )\n", - " (intermediate): BertIntermediate(\n", - " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", - " )\n", - " (output): BertOutput(\n", - " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", - " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " )\n", - " )\n", - " (7): BertLayer(\n", - " (attention): BertAttention(\n", - " (self): BertSelfAttention(\n", - " (query): Linear(in_features=768, out_features=768, bias=True)\n", - " (key): Linear(in_features=768, out_features=768, bias=True)\n", - " (value): Linear(in_features=768, out_features=768, bias=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " )\n", - " (output): BertSelfOutput(\n", - " (dense): Linear(in_features=768, out_features=768, bias=True)\n", - " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " )\n", - " )\n", - " (intermediate): BertIntermediate(\n", - " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", - " )\n", - " (output): BertOutput(\n", - " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", - " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " )\n", - " )\n", - " (8): BertLayer(\n", - " (attention): BertAttention(\n", - " (self): BertSelfAttention(\n", - " (query): Linear(in_features=768, out_features=768, bias=True)\n", - " (key): Linear(in_features=768, out_features=768, bias=True)\n", - " (value): Linear(in_features=768, out_features=768, bias=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " )\n", - " (output): BertSelfOutput(\n", - " (dense): Linear(in_features=768, out_features=768, bias=True)\n", - " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " )\n", - " )\n", - " (intermediate): BertIntermediate(\n", - " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", - " )\n", - " (output): BertOutput(\n", - " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", - " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " )\n", - " )\n", - " (9): BertLayer(\n", - " (attention): BertAttention(\n", - " (self): BertSelfAttention(\n", - " (query): Linear(in_features=768, out_features=768, bias=True)\n", - " (key): Linear(in_features=768, out_features=768, bias=True)\n", - " (value): Linear(in_features=768, out_features=768, bias=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " )\n", - " (output): BertSelfOutput(\n", - " (dense): Linear(in_features=768, out_features=768, bias=True)\n", - " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " )\n", - " )\n", - " (intermediate): BertIntermediate(\n", - " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", - " )\n", - " (output): BertOutput(\n", - " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", - " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " )\n", - " )\n", - " (10): BertLayer(\n", - " (attention): BertAttention(\n", - " (self): BertSelfAttention(\n", - " (query): Linear(in_features=768, out_features=768, bias=True)\n", - " (key): Linear(in_features=768, out_features=768, bias=True)\n", - " (value): Linear(in_features=768, out_features=768, bias=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " )\n", - " (output): BertSelfOutput(\n", - " (dense): Linear(in_features=768, out_features=768, bias=True)\n", - " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " )\n", - " )\n", - " (intermediate): BertIntermediate(\n", - " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", - " )\n", - " (output): BertOutput(\n", - " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", - " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " )\n", - " )\n", - " (11): BertLayer(\n", - " (attention): BertAttention(\n", - " (self): BertSelfAttention(\n", - " (query): Linear(in_features=768, out_features=768, bias=True)\n", - " (key): Linear(in_features=768, out_features=768, bias=True)\n", - " (value): Linear(in_features=768, out_features=768, bias=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " )\n", - " (output): BertSelfOutput(\n", - " (dense): Linear(in_features=768, out_features=768, bias=True)\n", - " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " )\n", - " )\n", - " (intermediate): BertIntermediate(\n", - " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", - " )\n", - " (output): BertOutput(\n", - " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", - " (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " )\n", - " )\n", - " )\n", - " )\n", - " (pooler): BertPooler(\n", - " (dense): Linear(in_features=768, out_features=768, bias=True)\n", - " (activation): Tanh()\n", - " )\n", - " )\n", - " (dropout): Dropout(p=0.3, inplace=False)\n", - " (dropout_heads): Dropout(p=0.0, inplace=False)\n", - " (inform_projection): Linear(in_features=4, out_features=4, bias=True)\n", - " (ds_projection): Linear(in_features=4, out_features=4, bias=True)\n", - " (class_pricerange): Linear(in_features=776, out_features=4, bias=True)\n", - " (token_pricerange): Linear(in_features=768, out_features=2, bias=True)\n", - " (refer_pricerange): Linear(in_features=776, out_features=5, bias=True)\n", - " (class_this): Linear(in_features=776, out_features=4, bias=True)\n", - " (token_this): Linear(in_features=768, out_features=2, bias=True)\n", - " (refer_this): Linear(in_features=776, out_features=5, bias=True)\n", - " (class_area): Linear(in_features=776, out_features=4, bias=True)\n", - " (token_area): Linear(in_features=768, out_features=2, bias=True)\n", - " (refer_area): Linear(in_features=776, out_features=5, bias=True)\n", - " (class_food): Linear(in_features=776, out_features=4, bias=True)\n", - " (token_food): Linear(in_features=768, out_features=2, bias=True)\n", - " (refer_food): Linear(in_features=776, out_features=5, bias=True)\n", - " (action_prediction): Linear(in_features=776, out_features=46, bias=True)\n", - " (action_softmax): Softmax(dim=1)\n", - ")\n" - ], - "name": "stderr" - }, - { - "output_type": "stream", - "text": [ - "{\"train\": {\"eval_examples_count\": 40, \"metrics\": {\"per_item_dialog_accuracy\": 0.6015}, \"time_spent\": \"0:00:45\", \"epochs_done\": 0, \"batches_seen\": 40, \"train_examples_seen\": 160, \"total_loss\": 113.78551483154297, \"action_loss\": 24.97793960571289}}\n" - ], - "name": "stdout" - }, - { - "output_type": "stream", - "text": [ - "2021-07-11 18:27:28.871 INFO in 'deeppavlov.core.trainers.nn_trainer'['nn_trainer'] at line 202: First best per_item_dialog_accuracy of 0.4381\n", - "2021-07-11 18:27:28.873 INFO in 'deeppavlov.core.trainers.nn_trainer'['nn_trainer'] at line 204: Saving model\n", - "2021-07-11 18:27:28.874 INFO in 'deeppavlov.core.models.torch_model'['torch_model'] at line 191: Saving model to /content/DeepPavlov/my_bot/model.pth.tar.\n" - ], - "name": "stderr" - }, - { - "output_type": "stream", - "text": [ - "{\"valid\": {\"eval_examples_count\": 575, \"metrics\": {\"per_item_dialog_accuracy\": 0.4381}, \"time_spent\": \"0:03:50\", \"epochs_done\": 0, \"batches_seen\": 40, \"train_examples_seen\": 160, \"impatience\": 0, \"patience_limit\": 10}}\n", - "{\"train\": {\"eval_examples_count\": 40, \"metrics\": {\"per_item_dialog_accuracy\": 0.6043}, \"time_spent\": \"0:04:34\", \"epochs_done\": 0, \"batches_seen\": 80, \"train_examples_seen\": 320, \"total_loss\": 235.44265747070312, \"action_loss\": 57.47998809814453}}\n" - ], - "name": "stdout" - }, - { - "output_type": "stream", - "text": [ - "2021-07-11 18:31:22.811 INFO in 'deeppavlov.core.trainers.nn_trainer'['nn_trainer'] at line 212: Did not improve on the per_item_dialog_accuracy of 0.4381\n" - ], - "name": "stderr" - }, - { - "output_type": "stream", - "text": [ - "{\"valid\": {\"eval_examples_count\": 575, \"metrics\": {\"per_item_dialog_accuracy\": 0.4364}, \"time_spent\": \"0:07:44\", \"epochs_done\": 0, \"batches_seen\": 80, \"train_examples_seen\": 320, \"impatience\": 1, \"patience_limit\": 10}}\n", - "{\"train\": {\"eval_examples_count\": 40, \"metrics\": {\"per_item_dialog_accuracy\": 0.6179}, \"time_spent\": \"0:08:22\", \"epochs_done\": 0, \"batches_seen\": 120, \"train_examples_seen\": 480, \"total_loss\": 62.12294387817383, \"action_loss\": 14.066139221191406}}\n" - ], - "name": "stdout" - }, - { - "output_type": "stream", - "text": [ - "2021-07-11 18:35:12.850 INFO in 'deeppavlov.core.trainers.nn_trainer'['nn_trainer'] at line 212: Did not improve on the per_item_dialog_accuracy of 0.4381\n" - ], - "name": "stderr" - }, - { - "output_type": "stream", - "text": [ - "{\"valid\": {\"eval_examples_count\": 575, \"metrics\": {\"per_item_dialog_accuracy\": 0.4295}, \"time_spent\": \"0:11:34\", \"epochs_done\": 0, \"batches_seen\": 120, \"train_examples_seen\": 480, \"impatience\": 2, \"patience_limit\": 10}}\n", - "{\"train\": {\"eval_examples_count\": 40, \"metrics\": {\"per_item_dialog_accuracy\": 0.6243}, \"time_spent\": \"0:12:11\", \"epochs_done\": 0, \"batches_seen\": 160, \"train_examples_seen\": 640, \"total_loss\": 131.84048461914062, \"action_loss\": 30.95857810974121}}\n" - ], - "name": "stdout" - }, - { - "output_type": "stream", - "text": [ - "2021-07-11 18:38:54.945 INFO in 'deeppavlov.core.trainers.nn_trainer'['nn_trainer'] at line 212: Did not improve on the per_item_dialog_accuracy of 0.4381\n" - ], - "name": "stderr" - }, - { - "output_type": "stream", - "text": [ - "{\"valid\": {\"eval_examples_count\": 575, \"metrics\": {\"per_item_dialog_accuracy\": 0.416}, \"time_spent\": \"0:15:16\", \"epochs_done\": 0, \"batches_seen\": 160, \"train_examples_seen\": 640, \"impatience\": 3, \"patience_limit\": 10}}\n", - "{\"train\": {\"eval_examples_count\": 40, \"metrics\": {\"per_item_dialog_accuracy\": 0.5816}, \"time_spent\": \"0:15:59\", \"epochs_done\": 0, \"batches_seen\": 200, \"train_examples_seen\": 800, \"total_loss\": 88.86528778076172, \"action_loss\": 20.939273834228516}}\n" - ], - "name": "stdout" - }, - { - "output_type": "stream", - "text": [ - "2021-07-11 18:42:46.99 INFO in 'deeppavlov.core.trainers.nn_trainer'['nn_trainer'] at line 207: Improved best per_item_dialog_accuracy of 0.4455\n", - "2021-07-11 18:42:46.100 INFO in 'deeppavlov.core.trainers.nn_trainer'['nn_trainer'] at line 209: Saving model\n", - "2021-07-11 18:42:46.102 INFO in 'deeppavlov.core.models.torch_model'['torch_model'] at line 191: Saving model to /content/DeepPavlov/my_bot/model.pth.tar.\n" - ], - "name": "stderr" - }, - { - "output_type": "stream", - "text": [ - "{\"valid\": {\"eval_examples_count\": 575, \"metrics\": {\"per_item_dialog_accuracy\": 0.4455}, \"time_spent\": \"0:19:07\", \"epochs_done\": 0, \"batches_seen\": 200, \"train_examples_seen\": 800, \"impatience\": 0, \"patience_limit\": 10}}\n", - "{\"train\": {\"eval_examples_count\": 40, \"metrics\": {\"per_item_dialog_accuracy\": 0.5866}, \"time_spent\": \"0:19:52\", \"epochs_done\": 0, \"batches_seen\": 240, \"train_examples_seen\": 960, \"total_loss\": 107.40069580078125, \"action_loss\": 25.030973434448242}}\n" - ], - "name": "stdout" - }, - { - "output_type": "stream", - "text": [ - "2021-07-11 18:46:35.891 INFO in 'deeppavlov.core.trainers.nn_trainer'['nn_trainer'] at line 207: Improved best per_item_dialog_accuracy of 0.4529\n", - "2021-07-11 18:46:35.893 INFO in 'deeppavlov.core.trainers.nn_trainer'['nn_trainer'] at line 209: Saving model\n", - "2021-07-11 18:46:35.894 INFO in 'deeppavlov.core.models.torch_model'['torch_model'] at line 191: Saving model to /content/DeepPavlov/my_bot/model.pth.tar.\n" - ], - "name": "stderr" - }, - { - "output_type": "stream", - "text": [ - "{\"valid\": {\"eval_examples_count\": 575, \"metrics\": {\"per_item_dialog_accuracy\": 0.4529}, \"time_spent\": \"0:22:57\", \"epochs_done\": 0, \"batches_seen\": 240, \"train_examples_seen\": 960, \"impatience\": 0, \"patience_limit\": 10}}\n", - "{\"train\": {\"eval_examples_count\": 40, \"metrics\": {\"per_item_dialog_accuracy\": 0.5948}, \"time_spent\": \"0:23:43\", \"epochs_done\": 1, \"batches_seen\": 280, \"train_examples_seen\": 1119, \"total_loss\": 115.84950256347656, \"action_loss\": 24.607830047607422}}\n" - ], - "name": "stdout" - }, - { - "output_type": "stream", - "text": [ - "2021-07-11 18:50:30.769 INFO in 'deeppavlov.core.trainers.nn_trainer'['nn_trainer'] at line 212: Did not improve on the per_item_dialog_accuracy of 0.4529\n" - ], - "name": "stderr" - }, - { - "output_type": "stream", - "text": [ - "{\"valid\": {\"eval_examples_count\": 575, \"metrics\": {\"per_item_dialog_accuracy\": 0.4243}, \"time_spent\": \"0:26:52\", \"epochs_done\": 1, \"batches_seen\": 280, \"train_examples_seen\": 1119, \"impatience\": 1, \"patience_limit\": 10}}\n", - "{\"train\": {\"eval_examples_count\": 40, \"metrics\": {\"per_item_dialog_accuracy\": 0.5468}, \"time_spent\": \"0:27:32\", \"epochs_done\": 1, \"batches_seen\": 320, \"train_examples_seen\": 1279, \"total_loss\": 48.77066421508789, \"action_loss\": 11.36803150177002}}\n" - ], - "name": "stdout" - }, - { - "output_type": "stream", - "text": [ - "2021-07-11 18:54:13.775 INFO in 'deeppavlov.core.trainers.nn_trainer'['nn_trainer'] at line 212: Did not improve on the per_item_dialog_accuracy of 0.4529\n" - ], - "name": "stderr" - }, - { - "output_type": "stream", - "text": [ - "{\"valid\": {\"eval_examples_count\": 575, \"metrics\": {\"per_item_dialog_accuracy\": 0.4301}, \"time_spent\": \"0:30:35\", \"epochs_done\": 1, \"batches_seen\": 320, \"train_examples_seen\": 1279, \"impatience\": 2, \"patience_limit\": 10}}\n", - "{\"train\": {\"eval_examples_count\": 40, \"metrics\": {\"per_item_dialog_accuracy\": 0.5867}, \"time_spent\": \"0:31:14\", \"epochs_done\": 1, \"batches_seen\": 360, \"train_examples_seen\": 1439, \"total_loss\": 104.88668823242188, \"action_loss\": 24.79292869567871}}\n" - ], - "name": "stdout" - }, - { - "output_type": "stream", - "text": [ - "2021-07-11 18:58:03.392 INFO in 'deeppavlov.core.trainers.nn_trainer'['nn_trainer'] at line 212: Did not improve on the per_item_dialog_accuracy of 0.4529\n" - ], - "name": "stderr" - }, - { - "output_type": "stream", - "text": [ - "{\"valid\": {\"eval_examples_count\": 575, \"metrics\": {\"per_item_dialog_accuracy\": 0.4521}, \"time_spent\": \"0:34:25\", \"epochs_done\": 1, \"batches_seen\": 360, \"train_examples_seen\": 1439, \"impatience\": 3, \"patience_limit\": 10}}\n", - "{\"train\": {\"eval_examples_count\": 40, \"metrics\": {\"per_item_dialog_accuracy\": 0.6066}, \"time_spent\": \"0:35:04\", \"epochs_done\": 1, \"batches_seen\": 400, \"train_examples_seen\": 1599, \"total_loss\": 75.50640106201172, \"action_loss\": 18.100648880004883}}\n" - ], - "name": "stdout" - }, - { - "output_type": "stream", - "text": [ - "2021-07-11 19:01:50.759 INFO in 'deeppavlov.core.trainers.nn_trainer'['nn_trainer'] at line 212: Did not improve on the per_item_dialog_accuracy of 0.4529\n" - ], - "name": "stderr" - }, - { - "output_type": "stream", - "text": [ - "{\"valid\": {\"eval_examples_count\": 575, \"metrics\": {\"per_item_dialog_accuracy\": 0.4287}, \"time_spent\": \"0:38:12\", \"epochs_done\": 1, \"batches_seen\": 400, \"train_examples_seen\": 1599, \"impatience\": 4, \"patience_limit\": 10}}\n", - "{\"train\": {\"eval_examples_count\": 40, \"metrics\": {\"per_item_dialog_accuracy\": 0.5407}, \"time_spent\": \"0:38:54\", \"epochs_done\": 1, \"batches_seen\": 440, \"train_examples_seen\": 1759, \"total_loss\": 204.6051483154297, \"action_loss\": 46.61280822753906}}\n" - ], - "name": "stdout" - }, - { - "output_type": "stream", - "text": [ - "2021-07-11 19:05:42.577 INFO in 'deeppavlov.core.trainers.nn_trainer'['nn_trainer'] at line 212: Did not improve on the per_item_dialog_accuracy of 0.4529\n" - ], - "name": "stderr" - }, - { - "output_type": "stream", - "text": [ - "{\"valid\": {\"eval_examples_count\": 575, \"metrics\": {\"per_item_dialog_accuracy\": 0.4409}, \"time_spent\": \"0:42:04\", \"epochs_done\": 1, \"batches_seen\": 440, \"train_examples_seen\": 1759, \"impatience\": 5, \"patience_limit\": 10}}\n", - "{\"train\": {\"eval_examples_count\": 40, \"metrics\": {\"per_item_dialog_accuracy\": 0.6438}, \"time_spent\": \"0:42:42\", \"epochs_done\": 1, \"batches_seen\": 480, \"train_examples_seen\": 1919, \"total_loss\": 36.40619659423828, \"action_loss\": 8.460254669189453}}\n" - ], - "name": "stdout" - }, - { - "output_type": "stream", - "text": [ - "2021-07-11 19:09:22.300 INFO in 'deeppavlov.core.trainers.nn_trainer'['nn_trainer'] at line 212: Did not improve on the per_item_dialog_accuracy of 0.4529\n" - ], - "name": "stderr" - }, - { - "output_type": "stream", - "text": [ - "{\"valid\": {\"eval_examples_count\": 575, \"metrics\": {\"per_item_dialog_accuracy\": 0.4288}, \"time_spent\": \"0:45:44\", \"epochs_done\": 1, \"batches_seen\": 480, \"train_examples_seen\": 1919, \"impatience\": 6, \"patience_limit\": 10}}\n", - "{\"train\": {\"eval_examples_count\": 40, \"metrics\": {\"per_item_dialog_accuracy\": 0.6038}, \"time_spent\": \"0:46:22\", \"epochs_done\": 2, \"batches_seen\": 520, \"train_examples_seen\": 2078, \"total_loss\": 65.55957794189453, \"action_loss\": 15.434016227722168}}\n" - ], - "name": "stdout" - }, - { - "output_type": "stream", - "text": [ - "2021-07-11 19:13:06.246 INFO in 'deeppavlov.core.trainers.nn_trainer'['nn_trainer'] at line 212: Did not improve on the per_item_dialog_accuracy of 0.4529\n" - ], - "name": "stderr" - }, - { - "output_type": "stream", - "text": [ - "{\"valid\": {\"eval_examples_count\": 575, \"metrics\": {\"per_item_dialog_accuracy\": 0.436}, \"time_spent\": \"0:49:28\", \"epochs_done\": 2, \"batches_seen\": 520, \"train_examples_seen\": 2078, \"impatience\": 7, \"patience_limit\": 10}}\n", - "{\"train\": {\"eval_examples_count\": 40, \"metrics\": {\"per_item_dialog_accuracy\": 0.6257}, \"time_spent\": \"0:50:07\", \"epochs_done\": 2, \"batches_seen\": 560, \"train_examples_seen\": 2238, \"total_loss\": 67.32139587402344, \"action_loss\": 16.170827865600586}}\n" - ], - "name": "stdout" - }, - { - "output_type": "stream", - "text": [ - "2021-07-11 19:16:51.602 INFO in 'deeppavlov.core.trainers.nn_trainer'['nn_trainer'] at line 212: Did not improve on the per_item_dialog_accuracy of 0.4529\n" - ], - "name": "stderr" - }, - { - "output_type": "stream", - "text": [ - "{\"valid\": {\"eval_examples_count\": 575, \"metrics\": {\"per_item_dialog_accuracy\": 0.4288}, \"time_spent\": \"0:53:13\", \"epochs_done\": 2, \"batches_seen\": 560, \"train_examples_seen\": 2238, \"impatience\": 8, \"patience_limit\": 10}}\n", - "{\"train\": {\"eval_examples_count\": 40, \"metrics\": {\"per_item_dialog_accuracy\": 0.6022}, \"time_spent\": \"0:53:52\", \"epochs_done\": 2, \"batches_seen\": 600, \"train_examples_seen\": 2398, \"total_loss\": 148.84075927734375, \"action_loss\": 35.534236907958984}}\n" - ], - "name": "stdout" - }, - { - "output_type": "stream", - "text": [ - "2021-07-11 19:18:09.31 INFO in 'deeppavlov.core.trainers.nn_trainer'['nn_trainer'] at line 339: Stopped training\n", - "2021-07-11 19:18:09.41 WARNING in 'deeppavlov.core.models.serializable'['serializable'] at line 49: No load path is set for Sqlite3Database in 'infer' mode. Using save path instead\n", - "2021-07-11 19:18:09.43 INFO in 'deeppavlov.core.data.sqlite_database'['sqlite_database'] at line 66: Loading database from /content/DeepPavlov/my_bot/db.sqlite.\n" - ], - "name": "stderr" - }, - { - "output_type": "error", - "ename": "KeyboardInterrupt", - "evalue": "ignored", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 14\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 15\u001b[0m \u001b[0mx\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtime\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtime\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 16\u001b[0;31m \u001b[0mtrain_model\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mgobot_config\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 17\u001b[0m \u001b[0my\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtime\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtime\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m-\u001b[0m \u001b[0mx\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/DeepPavlov/deeppavlov/__init__.py\u001b[0m in \u001b[0;36mtrain_model\u001b[0;34m(config, download, recursive)\u001b[0m\n\u001b[1;32m 27\u001b[0m \u001b[0;31m# TODO: make better\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 28\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mtrain_model\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mstr\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mPath\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdict\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdownload\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mbool\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mFalse\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrecursive\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mbool\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mFalse\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m->\u001b[0m \u001b[0mChainer\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 29\u001b[0;31m \u001b[0mtrain_evaluate_model_from_config\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdownload\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdownload\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrecursive\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mrecursive\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 30\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mbuild_model\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mload_trained\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 31\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/DeepPavlov/deeppavlov/core/commands/train.py\u001b[0m in \u001b[0;36mtrain_evaluate_model_from_config\u001b[0;34m(config, iterator, to_train, evaluation_targets, to_validate, download, start_epoch_num, recursive)\u001b[0m\n\u001b[1;32m 135\u001b[0m ' \"to_validate\" is deprecated and will be ignored')\n\u001b[1;32m 136\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 137\u001b[0;31m \u001b[0mres\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtrainer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mevaluate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0miterator\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mevaluation_targets\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mprint_reports\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 138\u001b[0m \u001b[0mtrainer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_chainer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdestroy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 139\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/DeepPavlov/deeppavlov/core/trainers/fit_trainer.py\u001b[0m in \u001b[0;36mevaluate\u001b[0;34m(self, iterator, evaluation_targets, print_reports)\u001b[0m\n\u001b[1;32m 254\u001b[0m \u001b[0ma\u001b[0m \u001b[0mdictionary\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mdata\u001b[0m \u001b[0mtypes\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mkeys\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mevaluation\u001b[0m \u001b[0mreports\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mvalues\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 255\u001b[0m \"\"\"\n\u001b[0;32m--> 256\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_load\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 257\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mevaluation_targets\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 258\u001b[0m \u001b[0mevaluation_targets\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mevaluation_targets\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/DeepPavlov/deeppavlov/core/trainers/fit_trainer.py\u001b[0m in \u001b[0;36m_load\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 156\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_loaded\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 157\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_chainer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdestroy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 158\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_chainer\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mbuild_model\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m{\u001b[0m\u001b[0;34m'chainer'\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mchainer_config\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mload_trained\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_saved\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 159\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_loaded\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mTrue\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 160\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/DeepPavlov/deeppavlov/core/commands/infer.py\u001b[0m in \u001b[0;36mbuild_model\u001b[0;34m(config, mode, load_trained, download, serialized)\u001b[0m\n\u001b[1;32m 60\u001b[0m \u001b[0mcomponent_serialized\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 61\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 62\u001b[0;31m \u001b[0mcomponent\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfrom_params\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcomponent_config\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mmode\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mserialized\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mcomponent_serialized\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 63\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 64\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;34m'id'\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mcomponent_config\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/DeepPavlov/deeppavlov/core/common/params.py\u001b[0m in \u001b[0;36mfrom_params\u001b[0;34m(params, mode, serialized, **kwargs)\u001b[0m\n\u001b[1;32m 104\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'mode'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmode\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 105\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 106\u001b[0;31m \u001b[0mcomponent\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mobj\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m**\u001b[0m\u001b[0mdict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig_params\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 107\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 108\u001b[0m \u001b[0m_refs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mconfig_params\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'id'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcomponent\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/DeepPavlov/deeppavlov/models/go_bot/trippy.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, nlg_manager, save_path, slot_names, class_types, pretrained_bert, bert_config, optimizer_parameters, clip_norm, max_seq_length, dropout_rate, heads_dropout, class_loss_ratio, token_loss_for_nonpointable, refer_loss_for_nonpointable, class_aux_feats_inform, class_aux_feats_ds, database, debug, **kwargs)\u001b[0m\n\u001b[1;32m 122\u001b[0m super().__init__(save_path=save_path, \n\u001b[1;32m 123\u001b[0m \u001b[0moptimizer_parameters\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0moptimizer_parameters\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 124\u001b[0;31m **kwargs)\n\u001b[0m\u001b[1;32m 125\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 126\u001b[0m \u001b[0;34m@\u001b[0m\u001b[0moverrides\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/DeepPavlov/deeppavlov/core/models/torch_model.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, device, optimizer, optimizer_parameters, lr_scheduler, lr_scheduler_parameters, learning_rate_drop_patience, learning_rate_drop_div, load_before_drop, min_learning_rate, *args, **kwargs)\u001b[0m\n\u001b[1;32m 92\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mopt\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdeepcopy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 93\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 94\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mload\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 95\u001b[0m \u001b[0;31m# we need to switch to eval mode here because by default it's in `train` mode.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 96\u001b[0m \u001b[0;31m# But in case of `interact/build_model` usage, we need to have model in eval mode.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/DeepPavlov/deeppavlov/models/go_bot/trippy.py\u001b[0m in \u001b[0;36mload\u001b[0;34m(self, fname)\u001b[0m\n\u001b[1;32m 136\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpretrained_bert\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 137\u001b[0m self.model = BertForDST.from_pretrained(\n\u001b[0;32m--> 138\u001b[0;31m self.pretrained_bert, config=self.config)\n\u001b[0m\u001b[1;32m 139\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtokenizer\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mBertTokenizerFast\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfrom_pretrained\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpretrained_bert\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 140\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/transformers/modeling_utils.py\u001b[0m in \u001b[0;36mfrom_pretrained\u001b[0;34m(cls, pretrained_model_name_or_path, *model_args, **kwargs)\u001b[0m\n\u001b[1;32m 620\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 621\u001b[0m \u001b[0;31m# Instantiate model.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 622\u001b[0;31m \u001b[0mmodel\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcls\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0mmodel_args\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mmodel_kwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 623\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 624\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mstate_dict\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mfrom_tf\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/DeepPavlov/deeppavlov/models/go_bot/trippy_bert_for_dst.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, config)\u001b[0m\n\u001b[1;32m 72\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0madd_module\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"action_softmax\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mSoftmax\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdim\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 73\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 74\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minit_weights\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 75\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 76\u001b[0m def forward(self,\n", - "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/transformers/modeling_utils.py\u001b[0m in \u001b[0;36minit_weights\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 392\u001b[0m \u001b[0;34m\"\"\" Initialize and prunes weights if needed. \"\"\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 393\u001b[0m \u001b[0;31m# Initialize weights\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 394\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mapply\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_init_weights\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 395\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 396\u001b[0m \u001b[0;31m# Prune heads if needed\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36mapply\u001b[0;34m(self, fn)\u001b[0m\n\u001b[1;32m 614\u001b[0m \"\"\"\n\u001b[1;32m 615\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mmodule\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mchildren\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 616\u001b[0;31m \u001b[0mmodule\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mapply\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfn\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 617\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 618\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36mapply\u001b[0;34m(self, fn)\u001b[0m\n\u001b[1;32m 614\u001b[0m \"\"\"\n\u001b[1;32m 615\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mmodule\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mchildren\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 616\u001b[0;31m \u001b[0mmodule\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mapply\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfn\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 617\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 618\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36mapply\u001b[0;34m(self, fn)\u001b[0m\n\u001b[1;32m 614\u001b[0m \"\"\"\n\u001b[1;32m 615\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mmodule\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mchildren\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 616\u001b[0;31m \u001b[0mmodule\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mapply\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfn\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 617\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 618\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36mapply\u001b[0;34m(self, fn)\u001b[0m\n\u001b[1;32m 614\u001b[0m \"\"\"\n\u001b[1;32m 615\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mmodule\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mchildren\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 616\u001b[0;31m \u001b[0mmodule\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mapply\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfn\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 617\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 618\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36mapply\u001b[0;34m(self, fn)\u001b[0m\n\u001b[1;32m 614\u001b[0m \"\"\"\n\u001b[1;32m 615\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mmodule\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mchildren\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 616\u001b[0;31m \u001b[0mmodule\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mapply\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfn\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 617\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 618\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36mapply\u001b[0;34m(self, fn)\u001b[0m\n\u001b[1;32m 614\u001b[0m \"\"\"\n\u001b[1;32m 615\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mmodule\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mchildren\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 616\u001b[0;31m \u001b[0mmodule\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mapply\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfn\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 617\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 618\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36mapply\u001b[0;34m(self, fn)\u001b[0m\n\u001b[1;32m 615\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mmodule\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mchildren\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 616\u001b[0m \u001b[0mmodule\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mapply\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfn\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 617\u001b[0;31m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 618\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 619\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/transformers/modeling_bert.py\u001b[0m in \u001b[0;36m_init_weights\u001b[0;34m(self, module)\u001b[0m\n\u001b[1;32m 523\u001b[0m \u001b[0;31m# Slightly different from the TF version which uses truncated_normal for initialization\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 524\u001b[0m \u001b[0;31m# cf https://github.com/pytorch/pytorch/pull/5617\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 525\u001b[0;31m \u001b[0mmodule\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mweight\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnormal_\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmean\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m0.0\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstd\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minitializer_range\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 526\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodule\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mBertLayerNorm\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 527\u001b[0m \u001b[0mmodule\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbias\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mzero_\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;31mKeyboardInterrupt\u001b[0m: " - ] - } - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "BCHybLSa_Gzx" - }, - "source": [ - "Optionally, you can download the pre-trained model from kaggle. You will need a kaggle account and to upload your kaggle.json file. Then you may have to run the below cell two times." - ] - }, - { - "cell_type": "code", - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "Pq-U6mvu-js-", - "outputId": "8da1fcc0-4669-4ae7-a9f3-053dcdfa2aff" - }, - "source": [ - "### Optional - Download Pretrained TripPy from kaggle ###\n", - "\n", - "# Make your json accessible to kaggle\n", - "#!cp /content/kaggle.json /root/.kaggle/\n", - "\n", - "# Download the dataset\n", - "#!kaggle datasets download -d muennighoff/trippy-restaurant\n", - "#!unzip trippy-restaurant.zip\n", - "\n", - "# Move into correct directory\n", - "#!mv db.sqlite /content/DeepPavlov/my_bot/\n", - "#!mv model.pth.tar /content/DeepPavlov/my_bot/" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "BCHybLSa_Gzx" + }, + "source": [ + "Optionally, you can download the pre-trained model from kaggle. You will need a kaggle account and to upload your kaggle.json file. Then you may have to run the below cell two times." + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "Pq-U6mvu-js-", + "outputId": "8da1fcc0-4669-4ae7-a9f3-053dcdfa2aff" + }, + "source": [ + "### Optional - Download Pretrained TripPy from kaggle ###\n", + "\n", + "# Make your json accessible to kaggle\n", + "#!cp /content/kaggle.json /root/.kaggle/\n", + "\n", + "# Download the dataset\n", + "#!kaggle datasets download -d muennighoff/trippy-restaurant\n", + "#!unzip trippy-restaurant.zip\n", + "\n", + "# Move into correct directory\n", + "#!mv db.sqlite /content/DeepPavlov/my_bot/\n", + "#!mv model.pth.tar /content/DeepPavlov/my_bot/" ], "execution_count": null, "outputs": [ @@ -1514,12 +916,7 @@ { "cell_type": "code", "metadata": { - "id": "INAcWeKfHR63", - "colab": { - "base_uri": "https://localhost:8080/", - "height": 609 - }, - "outputId": "840456a5-1c29-430f-fe19-bb74eff161de" + "id": "INAcWeKfHR63" }, "source": [ "from deeppavlov import evaluate_model\n", @@ -1527,58 +924,7 @@ "evaluate_model(gobot_config);" ], "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "text": [ - "2021-07-11 17:59:26.197 INFO in 'deeppavlov.dataset_readers.dstc2_reader'['dstc2_reader'] at line 112: [loading dialogs from /content/DeepPavlov/my_data/dstc2-trn.jsonlist]\n", - "2021-07-11 17:59:26.541 INFO in 'deeppavlov.dataset_readers.dstc2_reader'['dstc2_reader'] at line 112: [loading dialogs from /content/DeepPavlov/my_data/dstc2-val.jsonlist]\n", - "2021-07-11 17:59:26.770 INFO in 'deeppavlov.dataset_readers.dstc2_reader'['dstc2_reader'] at line 112: [loading dialogs from /content/DeepPavlov/my_data/dstc2-tst.jsonlist]\n", - "[nltk_data] Downloading package punkt to /root/nltk_data...\n", - "[nltk_data] Package punkt is already up-to-date!\n", - "[nltk_data] Downloading package stopwords to /root/nltk_data...\n", - "[nltk_data] Package stopwords is already up-to-date!\n", - "[nltk_data] Downloading package perluniprops to /root/nltk_data...\n", - "[nltk_data] Package perluniprops is already up-to-date!\n", - "[nltk_data] Downloading package nonbreaking_prefixes to\n", - "[nltk_data] /root/nltk_data...\n", - "[nltk_data] Package nonbreaking_prefixes is already up-to-date!\n", - "2021-07-11 17:59:29.471 WARNING in 'deeppavlov.core.models.serializable'['serializable'] at line 49: No load path is set for Sqlite3Database in 'infer' mode. Using save path instead\n", - "2021-07-11 17:59:29.473 INFO in 'deeppavlov.core.data.sqlite_database'['sqlite_database'] at line 66: Loading database from /content/DeepPavlov/my_bot/db.sqlite.\n" - ], - "name": "stderr" - }, - { - "output_type": "error", - "ename": "KeyboardInterrupt", - "evalue": "ignored", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mdeeppavlov\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mevaluate_model\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 3\u001b[0;31m \u001b[0mevaluate_model\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mgobot_config\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m;\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", - "\u001b[0;32m/content/DeepPavlov/deeppavlov/__init__.py\u001b[0m in \u001b[0;36mevaluate_model\u001b[0;34m(config, download, recursive)\u001b[0m\n\u001b[1;32m 32\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 33\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mevaluate_model\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mstr\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mPath\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdict\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdownload\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mbool\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mFalse\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrecursive\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mbool\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mFalse\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m->\u001b[0m \u001b[0mdict\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 34\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mtrain_evaluate_model_from_config\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mto_train\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdownload\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdownload\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrecursive\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mrecursive\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 35\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 36\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/DeepPavlov/deeppavlov/core/commands/train.py\u001b[0m in \u001b[0;36mtrain_evaluate_model_from_config\u001b[0;34m(config, iterator, to_train, evaluation_targets, to_validate, download, start_epoch_num, recursive)\u001b[0m\n\u001b[1;32m 135\u001b[0m ' \"to_validate\" is deprecated and will be ignored')\n\u001b[1;32m 136\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 137\u001b[0;31m \u001b[0mres\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtrainer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mevaluate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0miterator\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mevaluation_targets\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mprint_reports\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 138\u001b[0m \u001b[0mtrainer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_chainer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdestroy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 139\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/DeepPavlov/deeppavlov/core/trainers/fit_trainer.py\u001b[0m in \u001b[0;36mevaluate\u001b[0;34m(self, iterator, evaluation_targets, print_reports)\u001b[0m\n\u001b[1;32m 254\u001b[0m \u001b[0ma\u001b[0m \u001b[0mdictionary\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mdata\u001b[0m \u001b[0mtypes\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mkeys\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mevaluation\u001b[0m \u001b[0mreports\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mvalues\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 255\u001b[0m \"\"\"\n\u001b[0;32m--> 256\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_load\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 257\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mevaluation_targets\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 258\u001b[0m \u001b[0mevaluation_targets\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mevaluation_targets\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/DeepPavlov/deeppavlov/core/trainers/fit_trainer.py\u001b[0m in \u001b[0;36m_load\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 156\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_loaded\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 157\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_chainer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdestroy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 158\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_chainer\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mbuild_model\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m{\u001b[0m\u001b[0;34m'chainer'\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mchainer_config\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mload_trained\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_saved\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 159\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_loaded\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mTrue\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 160\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/DeepPavlov/deeppavlov/core/commands/infer.py\u001b[0m in \u001b[0;36mbuild_model\u001b[0;34m(config, mode, load_trained, download, serialized)\u001b[0m\n\u001b[1;32m 60\u001b[0m \u001b[0mcomponent_serialized\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 61\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 62\u001b[0;31m \u001b[0mcomponent\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfrom_params\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcomponent_config\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mmode\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mserialized\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mcomponent_serialized\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 63\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 64\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;34m'id'\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mcomponent_config\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/DeepPavlov/deeppavlov/core/common/params.py\u001b[0m in \u001b[0;36mfrom_params\u001b[0;34m(params, mode, serialized, **kwargs)\u001b[0m\n\u001b[1;32m 104\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'mode'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmode\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 105\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 106\u001b[0;31m \u001b[0mcomponent\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mobj\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m**\u001b[0m\u001b[0mdict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig_params\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 107\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 108\u001b[0m \u001b[0m_refs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mconfig_params\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'id'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcomponent\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/DeepPavlov/deeppavlov/models/go_bot/trippy.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, nlg_manager, save_path, slot_names, class_types, pretrained_bert, bert_config, optimizer_parameters, clip_norm, max_seq_length, dropout_rate, heads_dropout, class_loss_ratio, token_loss_for_nonpointable, refer_loss_for_nonpointable, class_aux_feats_inform, class_aux_feats_ds, database, debug, **kwargs)\u001b[0m\n\u001b[1;32m 122\u001b[0m super().__init__(save_path=save_path, \n\u001b[1;32m 123\u001b[0m \u001b[0moptimizer_parameters\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0moptimizer_parameters\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 124\u001b[0;31m **kwargs)\n\u001b[0m\u001b[1;32m 125\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 126\u001b[0m \u001b[0;34m@\u001b[0m\u001b[0moverrides\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/DeepPavlov/deeppavlov/core/models/torch_model.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, device, optimizer, optimizer_parameters, lr_scheduler, lr_scheduler_parameters, learning_rate_drop_patience, learning_rate_drop_div, load_before_drop, min_learning_rate, *args, **kwargs)\u001b[0m\n\u001b[1;32m 92\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mopt\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdeepcopy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 93\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 94\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mload\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 95\u001b[0m \u001b[0;31m# we need to switch to eval mode here because by default it's in `train` mode.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 96\u001b[0m \u001b[0;31m# But in case of `interact/build_model` usage, we need to have model in eval mode.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/DeepPavlov/deeppavlov/models/go_bot/trippy.py\u001b[0m in \u001b[0;36mload\u001b[0;34m(self, fname)\u001b[0m\n\u001b[1;32m 136\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpretrained_bert\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 137\u001b[0m self.model = BertForDST.from_pretrained(\n\u001b[0;32m--> 138\u001b[0;31m self.pretrained_bert, config=self.config)\n\u001b[0m\u001b[1;32m 139\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtokenizer\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mBertTokenizerFast\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfrom_pretrained\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpretrained_bert\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 140\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/transformers/modeling_utils.py\u001b[0m in \u001b[0;36mfrom_pretrained\u001b[0;34m(cls, pretrained_model_name_or_path, *model_args, **kwargs)\u001b[0m\n\u001b[1;32m 620\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 621\u001b[0m \u001b[0;31m# Instantiate model.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 622\u001b[0;31m \u001b[0mmodel\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcls\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0mmodel_args\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mmodel_kwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 623\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 624\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mstate_dict\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mfrom_tf\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/DeepPavlov/deeppavlov/models/go_bot/trippy_bert_for_dst.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, config)\u001b[0m\n\u001b[1;32m 48\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrefer_index\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m-\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 49\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 50\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbert\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mBertModel\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 51\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdropout\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mDropout\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdst_dropout_rate\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 52\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdropout_heads\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mDropout\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdst_heads_dropout_rate\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/transformers/modeling_bert.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, config)\u001b[0m\n\u001b[1;32m 614\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 615\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0membeddings\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mBertEmbeddings\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 616\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mencoder\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mBertEncoder\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 617\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpooler\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mBertPooler\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 618\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/transformers/modeling_bert.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, config)\u001b[0m\n\u001b[1;32m 388\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moutput_attentions\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mconfig\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moutput_attentions\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 389\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moutput_hidden_states\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mconfig\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moutput_hidden_states\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 390\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlayer\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mModuleList\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mBertLayer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0m_\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnum_hidden_layers\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 391\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 392\u001b[0m def forward(\n", - "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/transformers/modeling_bert.py\u001b[0m in \u001b[0;36m\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 388\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moutput_attentions\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mconfig\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moutput_attentions\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 389\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moutput_hidden_states\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mconfig\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moutput_hidden_states\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 390\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlayer\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mModuleList\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mBertLayer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0m_\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnum_hidden_layers\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 391\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 392\u001b[0m def forward(\n", - "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/transformers/modeling_bert.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, config)\u001b[0m\n\u001b[1;32m 355\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mis_decoder\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 356\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcrossattention\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mBertAttention\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 357\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mintermediate\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mBertIntermediate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 358\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moutput\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mBertOutput\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 359\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/transformers/modeling_bert.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, config)\u001b[0m\n\u001b[1;32m 322\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__init__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mconfig\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 323\u001b[0m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__init__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 324\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdense\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mLinear\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mhidden_size\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mconfig\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mintermediate_size\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 325\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mhidden_act\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 326\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mintermediate_act_fn\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mACT2FN\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mhidden_act\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/torch/nn/modules/linear.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, in_features, out_features, bias, device, dtype)\u001b[0m\n\u001b[1;32m 84\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 85\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mregister_parameter\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'bias'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 86\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreset_parameters\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 87\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 88\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mreset_parameters\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m->\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/torch/nn/modules/linear.py\u001b[0m in \u001b[0;36mreset_parameters\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 87\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 88\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mreset_parameters\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m->\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 89\u001b[0;31m \u001b[0minit\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mkaiming_uniform_\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mweight\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0ma\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mmath\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msqrt\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m5\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 90\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbias\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 91\u001b[0m \u001b[0mfan_in\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0m_\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0minit\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_calculate_fan_in_and_fan_out\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mweight\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/torch/nn/init.py\u001b[0m in \u001b[0;36mkaiming_uniform_\u001b[0;34m(tensor, a, mode, nonlinearity)\u001b[0m\n\u001b[1;32m 393\u001b[0m \u001b[0mbound\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmath\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msqrt\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m3.0\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m*\u001b[0m \u001b[0mstd\u001b[0m \u001b[0;31m# Calculate uniform bounds from standard deviation\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 394\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mno_grad\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 395\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mtensor\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0muniform_\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0mbound\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbound\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 396\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 397\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;31mKeyboardInterrupt\u001b[0m: " - ] - } - ] + "outputs": [] }, { "cell_type": "markdown", From 3c9df55d491c0e49895e5f1569b4b15e68a612d1 Mon Sep 17 00:00:00 2001 From: Muennighoff <62820084+Muennighoff@users.noreply.github.com> Date: Fri, 16 Jul 2021 16:21:25 +0200 Subject: [PATCH 124/151] Fix image name --- examples/trippy_extended_tutorial.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/trippy_extended_tutorial.ipynb b/examples/trippy_extended_tutorial.ipynb index 3b60d781f7..430e656f4f 100644 --- a/examples/trippy_extended_tutorial.ipynb +++ b/examples/trippy_extended_tutorial.ipynb @@ -720,7 +720,7 @@ "Below is a sketch for how the full TripPy model has been implemented in DeepPavlov:\n", "\n", " \n", - "![trippy_architecture.png](img/trippy_architecture.jpg)\n", + "![trippy_architecture.png](img/trippy_architecture.png)\n", " \n", "\n", "The above image also includes the input & input processing steps, while the previous sketch starts with the BERT Model (BERTForDST). \n", From d6dbbbb75f5dd774b808dbd672632fd780ea4a7a Mon Sep 17 00:00:00 2001 From: Muennighoff <62820084+Muennighoff@users.noreply.github.com> Date: Sat, 17 Jul 2021 15:26:44 +0200 Subject: [PATCH 125/151] Enable Data Parallelism --- deeppavlov/models/go_bot/trippy.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/deeppavlov/models/go_bot/trippy.py b/deeppavlov/models/go_bot/trippy.py index ea0ba78ca1..818693df53 100644 --- a/deeppavlov/models/go_bot/trippy.py +++ b/deeppavlov/models/go_bot/trippy.py @@ -140,6 +140,10 @@ def load(self, fname=None): else: raise ConfigError("No pre-trained BERT model is given.") + # Data Parallelism in case of Multi-GPU setup + if torch.cuda.device_count() > 1: + print("Let's use", torch.cuda.device_count(), "GPUs!") + self.model = torch.nn.DataParallel(self.model) self.model.to(self.device) self.optimizer = getattr(torch.optim, self.optimizer_name)( From 3c087a80e06b42737c669e40e4a83849ad549f11 Mon Sep 17 00:00:00 2001 From: Muennighoff <62820084+Muennighoff@users.noreply.github.com> Date: Sat, 17 Jul 2021 18:45:24 +0200 Subject: [PATCH 126/151] Adapt for Mutli-GPU Data Parallelism --- deeppavlov/models/go_bot/trippy.py | 38 ++++++++++++++++-------------- 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/deeppavlov/models/go_bot/trippy.py b/deeppavlov/models/go_bot/trippy.py index 818693df53..f8d5f321bd 100644 --- a/deeppavlov/models/go_bot/trippy.py +++ b/deeppavlov/models/go_bot/trippy.py @@ -17,8 +17,6 @@ from typing import Dict, Any, List, Optional, Union, Tuple from pathlib import Path -from numpy.lib.twodim_base import diag - import torch from overrides import overrides from transformers.modeling_bert import BertConfig @@ -28,7 +26,6 @@ from deeppavlov.core.models.component import Component from deeppavlov.core.models.torch_model import TorchModel from deeppavlov.core.common.errors import ConfigError -from deeppavlov.core.commands.utils import expand_path from deeppavlov.models.go_bot.nlg.nlg_manager import NLGManagerInterface from deeppavlov.models.go_bot.policy.dto.policy_prediction import PolicyPrediction from deeppavlov.models.go_bot.trippy_bert_for_dst import BertForDST @@ -252,7 +249,7 @@ def __call__(self, outputs = self.model(**last_turn) # Update dialogue state logits - for slot in self.model.slot_list: + for slot in self.slot_names: updates = outputs[2][slot].max(1)[1].cpu() for i, u in enumerate(updates): if u != 0: @@ -323,9 +320,9 @@ def update_ds(self, i = 0 if self.ds is None: - self.ds = {slot: 'none' for slot in self.model.slot_list} + self.ds = {slot: 'none' for slot in self.slot_names} - for slot in self.model.slot_list: + for slot in self.slot_names: class_logits = per_slot_class_logits[slot][i].cpu() start_logits = per_slot_start_logits[slot][i].cpu() end_logits = per_slot_end_logits[slot][i].cpu() @@ -338,36 +335,36 @@ def update_ds(self, # DP / DSTC2 uses dontcare instead of none so we also replace none's wth dontcare # Just remove the 2nd part of the or statement to revert to TripPy standard - if (class_prediction == self.model.class_types.index('dontcare')) or (class_prediction == self.model.class_types.index('none')): + if (class_prediction == self.class_types.index('dontcare')) or (class_prediction == self.class_types.index('none')): self.ds[slot] = 'dontcare' - elif class_prediction == self.model.class_types.index('copy_value'): + elif class_prediction == self.class_types.index('copy_value'): input_tokens = self.tokenizer.convert_ids_to_tokens( input_ids_unmasked[i]) self.ds[slot] = ' '.join( input_tokens[start_prediction:end_prediction + 1]) self.ds[slot] = re.sub("(^| )##", "", self.ds[slot]) - elif 'true' in self.model.class_types and class_prediction == self.model.class_types.index('true'): + elif 'true' in self.class_types and class_prediction == self.class_types.index('true'): self.ds[slot] = 'true' - elif 'false' in self.model.class_types and class_prediction == self.model.class_types.index('false'): + elif 'false' in self.class_types and class_prediction == self.class_types.index('false'): self.ds[slot] = 'false' - elif class_prediction == self.model.class_types.index('inform'): + elif class_prediction == self.class_types.index('inform'): self.ds[slot] = inform[i][slot] # Referral case. All other slot values need to be seen first in order # to be able to do this correctly. - for slot in self.model.slot_list: + for slot in self.slot_names: class_logits = per_slot_class_logits[slot][i].cpu() refer_logits = per_slot_refer_logits[slot][i].cpu() class_prediction = int(class_logits.argmax()) refer_prediction = int(refer_logits.argmax()) - if 'refer' in self.model.class_types and class_prediction == self.model.class_types.index('refer'): + if 'refer' in self.class_types and class_prediction == self.class_types.index('refer'): # Only slots that have been mentioned before can be referred to. # One can think of a situation where one slot is referred to in the same utterance. # This phenomenon is however currently not properly covered in the training data # label generation process. - self.ds[slot] = self.ds[self.model.slot_list[refer_prediction - 1]] + self.ds[slot] = self.ds[self.slot_names[refer_prediction - 1]] def make_api_call(self) -> None: db_results = [] @@ -399,11 +396,11 @@ def _update_db_result(self): if self.current_db_result is not None: self.db_result = self.current_db_result - def update_ground_truth_db_result_from_context(self, context: Dict[str, Any]): + def update_ground_truth_db_result_from_context(self, context: Dict[str, Any]) -> None: self.current_db_result = context.get('db_result', None) self._update_db_result() - def fill_current_state_with_db_results(self) -> dict: + def fill_current_state_with_db_results(self) -> None: if self.db_result: for k, v in self.db_result.items(): self.ds[k] = str(v) @@ -450,12 +447,17 @@ def train_on_batch(self, # Backpropagation loss = outputs[0] + action_loss = outputs[7] + + if torch.cuda.device_count() > 1: + loss = loss.mean() + action_loss = action_loss.mean() + loss.backward() # Clip gradients torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.clip_norm) self.optimizer.step() - #self.scheduler.step() - return {"total_loss": loss.cpu().item(), "action_loss": outputs[7].cpu().item()} + return {"total_loss": loss.cpu().item(), "action_loss": action_loss.cpu().item()} def reset(self, user_id: Union[None, str, int] = None) -> None: """ From cf7c017c467568d66c7236c09a553ae019444402 Mon Sep 17 00:00:00 2001 From: Muennighoff <62820084+Muennighoff@users.noreply.github.com> Date: Mon, 19 Jul 2021 06:25:03 +0200 Subject: [PATCH 127/151] Clarify data parallelism setup --- examples/trippy_extended_tutorial.ipynb | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/examples/trippy_extended_tutorial.ipynb b/examples/trippy_extended_tutorial.ipynb index 430e656f4f..81a4fe2e00 100644 --- a/examples/trippy_extended_tutorial.ipynb +++ b/examples/trippy_extended_tutorial.ipynb @@ -832,13 +832,12 @@ }, "source": [ "from deeppavlov import train_model\n", - "import time\n", "\n", "gobot_config['train']['batch_size'] = 4 # set batch size - Ideally use 8 & set lr to 1e-4 if your GPU allows\n", "gobot_config['train']['max_batches'] = 600 # maximum number of training batches\n", - "gobot_config['train']['val_every_n_batches'] = 40 # evaluate on full 'valid' split every 30 epochs\n", - "gobot_config['train']['log_every_n_batches'] = 40 # evaluate on full 'train' split every 5 batches\n", - "gobot_config['train']['validation_patience'] = 10 # evaluate on full 'valid' split every 30 epochs\n", + "gobot_config['train']['val_every_n_batches'] = 40 # evaluate on full 'valid' split every x epochs\n", + "gobot_config['train']['log_every_n_batches'] = 40 # evaluate on full 'train' split every x batches\n", + "gobot_config['train']['validation_patience'] = 10 # evaluate on full 'valid' split every x epochs\n", "gobot_config['train']['log_on_k_batches'] = 10 # How many batches to use for logging\n", "\n", "gobot_config['chainer']['pipe'][-1]['debug'] = False\n", @@ -932,9 +931,10 @@ "id": "1wZOqmYBTgYB" }, "source": [ - "With settings of `max_batches=800`, valid accuracy `= 0.5` and test accuracy is `~ 0.5`.\n", + "With settings of `max_batches=800`, valid accuracy `= 0.44` and test accuracy is `~ 0.45`.\n", "\n", - "Why is TripPy only as good as go-bot?\n", + "\n", + "If you have the compute, try training the model with a higher batch size, such as 8, or 16. The code automatically detects multiple GPUs and will run Data Parallelism. You will, however, need to upgrade the transformers huggingface version to 4.X and fix two transfomrer import statements in the TripPy code.\n", "\n" ] }, From e64ffd2f39975e41f7b0264a3cdfb60d32278bfe Mon Sep 17 00:00:00 2001 From: Muennighoff <62820084+Muennighoff@users.noreply.github.com> Date: Wed, 21 Jul 2021 07:38:11 +0200 Subject: [PATCH 128/151] Make Multi-GPU working --- deeppavlov/models/go_bot/trippy.py | 15 +++++---------- deeppavlov/models/go_bot/trippy_preprocessing.py | 3 +++ 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/deeppavlov/models/go_bot/trippy.py b/deeppavlov/models/go_bot/trippy.py index f8d5f321bd..ca3aa73f0c 100644 --- a/deeppavlov/models/go_bot/trippy.py +++ b/deeppavlov/models/go_bot/trippy.py @@ -81,12 +81,7 @@ def __init__(self, self.nlg_manager = nlg_manager self.save_path = save_path self.max_seq_length = max_seq_length - if not slot_names: - self.slot_names = ["dummy"] - self.has_slots = False - else: - self.slot_names = slot_names - self.has_slots = True + self.slot_names = slot_names self.class_types = class_types self.debug = debug @@ -100,7 +95,7 @@ def __init__(self, self.config.dst_refer_loss_for_nonpointable = refer_loss_for_nonpointable self.config.dst_class_aux_feats_inform = class_aux_feats_inform self.config.dst_class_aux_feats_ds = class_aux_feats_ds - self.config.dst_slot_list = slot_names # This will be empty if there are no slots + self.config.dst_slot_list = self.slot_names self.config.dst_class_types = class_types self.config.dst_class_labels = len(class_types) @@ -236,7 +231,7 @@ def __call__(self, last_turn = batch_to_device(last_turn, self.device) # If there are no slots, remove not needed data - if self.has_slots is False: + if not(self.slot_names): last_turn["start_pos"] = None last_turn["end_pos"] = None last_turn["inform_slot_id"] = None @@ -433,7 +428,7 @@ def train_on_batch(self, # Move to correct device batch = batch_to_device(batch, self.device) - if self.has_slots is False: + if not(self.slot_names): batch["start_pos"] = None batch["end_pos"] = None batch["inform_slot_id"] = None @@ -441,7 +436,6 @@ def train_on_batch(self, batch["class_label_id"] = None batch["diag_state"] = None - # Feed through model outputs = self.model(**batch) @@ -449,6 +443,7 @@ def train_on_batch(self, loss = outputs[0] action_loss = outputs[7] + # Average device results in case of multi-gpu setup if torch.cuda.device_count() > 1: loss = loss.mean() action_loss = action_loss.mean() diff --git a/deeppavlov/models/go_bot/trippy_preprocessing.py b/deeppavlov/models/go_bot/trippy_preprocessing.py index fb4258e2d2..c685a927ab 100644 --- a/deeppavlov/models/go_bot/trippy_preprocessing.py +++ b/deeppavlov/models/go_bot/trippy_preprocessing.py @@ -906,6 +906,9 @@ def prepare_trippy_data(batch_dialogues_utterances_contexts_info: List[List[dict features: ??? """ + if not slot_list: + slot_list = ["dummy"] + examples = create_examples(batch_dialogues_utterances_contexts_info, batch_dialogues_utterances_responses_info, slot_list=slot_list, From 2c8f3849b51112f4e2c2c5c8500284247b837610 Mon Sep 17 00:00:00 2001 From: Muennighoff <62820084+Muennighoff@users.noreply.github.com> Date: Thu, 29 Jul 2021 18:58:39 +0200 Subject: [PATCH 129/151] Add TripPy+RASA tutorial --- examples/trippy_rasa_tutorial.ipynb | 716 ++++++++++++++++++++++++++++ 1 file changed, 716 insertions(+) create mode 100644 examples/trippy_rasa_tutorial.ipynb diff --git a/examples/trippy_rasa_tutorial.ipynb b/examples/trippy_rasa_tutorial.ipynb new file mode 100644 index 0000000000..3f6bccf4b9 --- /dev/null +++ b/examples/trippy_rasa_tutorial.ipynb @@ -0,0 +1,716 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "Copy of TRIPPY_RASA_TINY", + "provenance": [], + "collapsed_sections": [] + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + }, + "accelerator": "GPU" + }, + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "9quTzDCNMcl7" + }, + "source": [ + "### You can also run the notebook in [COLAB](https://colab.research.google.com/github/deepmipt/DeepPavlov/blob/master/examples/trippy_rasa_tutorial.ipynb)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "mKHBh1lcCwzx" + }, + "source": [ + "## Rasa 2.X with TripPy\n", + "\n", + "This is a tiny demo to show you how you can build a goal oriented bot in DeepPavlov leveraging the RASA format and the TripPy model in ~<10 lines of code!\n", + "\n", + "So without further ado, let's go!" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 1000 + }, + "id": "NOHjwHHFSf23", + "outputId": "6eebfd60-77f6-4a31-e64f-fc7fbf8d6e53" + }, + "source": [ + "!git clone -b rulebased_gobot_trippy https://github.com/Muennighoff/DeepPavlov\n", + "%cd DeepPavlov\n", + "!pip install -r requirements.txt" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Cloning into 'DeepPavlov'...\n", + "remote: Enumerating objects: 58548, done.\u001b[K\n", + "remote: Counting objects: 100% (1491/1491), done.\u001b[K\n", + "remote: Compressing objects: 100% (492/492), done.\u001b[K\n", + "remote: Total 58548 (delta 1133), reused 1284 (delta 986), pack-reused 57057\u001b[K\n", + "Receiving objects: 100% (58548/58548), 37.81 MiB | 32.11 MiB/s, done.\n", + "Resolving deltas: 100% (44978/44978), done.\n", + "/content/DeepPavlov\n", + "Collecting aio-pika==6.4.1\n", + " Downloading aio_pika-6.4.1-py3-none-any.whl (40 kB)\n", + "\u001b[K |████████████████████████████████| 40 kB 27 kB/s \n", + "\u001b[?25hCollecting Cython==0.29.14\n", + " Downloading Cython-0.29.14-cp37-cp37m-manylinux1_x86_64.whl (2.1 MB)\n", + "\u001b[K |████████████████████████████████| 2.1 MB 14.2 MB/s \n", + "\u001b[?25hCollecting fastapi==0.47.1\n", + " Downloading fastapi-0.47.1-py3-none-any.whl (43 kB)\n", + "\u001b[K |████████████████████████████████| 43 kB 2.2 MB/s \n", + "\u001b[?25hRequirement already satisfied: filelock==3.0.12 in /usr/local/lib/python3.7/dist-packages (from -r requirements.txt (line 4)) (3.0.12)\n", + "Collecting h5py==2.10.0\n", + " Downloading h5py-2.10.0-cp37-cp37m-manylinux1_x86_64.whl (2.9 MB)\n", + "\u001b[K |████████████████████████████████| 2.9 MB 57.3 MB/s \n", + "\u001b[?25hCollecting nltk==3.4.5\n", + " Downloading nltk-3.4.5.zip (1.5 MB)\n", + "\u001b[K |████████████████████████████████| 1.5 MB 54.9 MB/s \n", + "\u001b[?25hCollecting numpy==1.18.0\n", + " Downloading numpy-1.18.0-cp37-cp37m-manylinux1_x86_64.whl (20.1 MB)\n", + "\u001b[K |████████████████████████████████| 20.1 MB 1.4 MB/s \n", + "\u001b[?25hCollecting overrides==2.7.0\n", + " Downloading overrides-2.7.0.tar.gz (4.5 kB)\n", + "Collecting pandas==0.25.3\n", + " Downloading pandas-0.25.3-cp37-cp37m-manylinux1_x86_64.whl (10.4 MB)\n", + "\u001b[K |████████████████████████████████| 10.4 MB 48.2 MB/s \n", + "\u001b[?25hCollecting prometheus-client==0.7.1\n", + " Downloading prometheus_client-0.7.1.tar.gz (38 kB)\n", + "Collecting pytz==2019.1\n", + " Downloading pytz-2019.1-py2.py3-none-any.whl (510 kB)\n", + "\u001b[K |████████████████████████████████| 510 kB 56.6 MB/s \n", + "\u001b[?25hCollecting pydantic==1.3\n", + " Downloading pydantic-1.3-cp37-cp37m-manylinux2010_x86_64.whl (7.3 MB)\n", + "\u001b[K |████████████████████████████████| 7.3 MB 30.8 MB/s \n", + "\u001b[?25hCollecting pymorphy2==0.8\n", + " Downloading pymorphy2-0.8-py2.py3-none-any.whl (46 kB)\n", + "\u001b[K |████████████████████████████████| 46 kB 3.5 MB/s \n", + "\u001b[?25hCollecting pymorphy2-dicts-ru\n", + " Downloading pymorphy2_dicts_ru-2.4.417127.4579844-py2.py3-none-any.whl (8.2 MB)\n", + "\u001b[K |████████████████████████████████| 8.2 MB 24.1 MB/s \n", + "\u001b[?25hCollecting pyopenssl==19.1.0\n", + " Downloading pyOpenSSL-19.1.0-py2.py3-none-any.whl (53 kB)\n", + "\u001b[K |████████████████████████████████| 53 kB 2.6 MB/s \n", + "\u001b[?25hCollecting pytelegrambotapi==3.6.7\n", + " Downloading pyTelegramBotAPI-3.6.7.tar.gz (65 kB)\n", + "\u001b[K |████████████████████████████████| 65 kB 4.8 MB/s \n", + "\u001b[?25hCollecting requests==2.22.0\n", + " Downloading requests-2.22.0-py2.py3-none-any.whl (57 kB)\n", + "\u001b[K |████████████████████████████████| 57 kB 7.0 MB/s \n", + "\u001b[?25hCollecting ruamel.yaml==0.15.100\n", + " Downloading ruamel.yaml-0.15.100-cp37-cp37m-manylinux1_x86_64.whl (654 kB)\n", + "\u001b[K |████████████████████████████████| 654 kB 47.3 MB/s \n", + "\u001b[?25hCollecting rusenttokenize==0.0.5\n", + " Downloading rusenttokenize-0.0.5-py3-none-any.whl (10 kB)\n", + "Collecting scikit-learn==0.21.2\n", + " Downloading scikit_learn-0.21.2-cp37-cp37m-manylinux1_x86_64.whl (6.7 MB)\n", + "\u001b[K |████████████████████████████████| 6.7 MB 23.6 MB/s \n", + "\u001b[?25hRequirement already satisfied: scipy==1.4.1 in /usr/local/lib/python3.7/dist-packages (from -r requirements.txt (line 21)) (1.4.1)\n", + "Requirement already satisfied: tqdm==4.41.1 in /usr/local/lib/python3.7/dist-packages (from -r requirements.txt (line 22)) (4.41.1)\n", + "Requirement already satisfied: click==7.1.2 in /usr/local/lib/python3.7/dist-packages (from -r requirements.txt (line 23)) (7.1.2)\n", + "Collecting uvicorn==0.11.7\n", + " Downloading uvicorn-0.11.7-py3-none-any.whl (43 kB)\n", + "\u001b[K |████████████████████████████████| 43 kB 2.4 MB/s \n", + "\u001b[?25hCollecting sacremoses==0.0.35\n", + " Downloading sacremoses-0.0.35.tar.gz (859 kB)\n", + "\u001b[K |████████████████████████████████| 859 kB 54.7 MB/s \n", + "\u001b[?25hCollecting uvloop==0.14.0\n", + " Downloading uvloop-0.14.0-cp37-cp37m-manylinux2010_x86_64.whl (3.8 MB)\n", + "\u001b[K |████████████████████████████████| 3.8 MB 29.7 MB/s \n", + "\u001b[?25hCollecting yarl\n", + " Downloading yarl-1.6.3-cp37-cp37m-manylinux2014_x86_64.whl (294 kB)\n", + "\u001b[K |████████████████████████████████| 294 kB 64.4 MB/s \n", + "\u001b[?25hCollecting aiormq<4,>=3.2.0\n", + " Downloading aiormq-3.3.1-py3-none-any.whl (28 kB)\n", + "Collecting starlette<=0.12.9,>=0.12.9\n", + " Downloading starlette-0.12.9.tar.gz (46 kB)\n", + "\u001b[K |████████████████████████████████| 46 kB 4.0 MB/s \n", + "\u001b[?25hRequirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from h5py==2.10.0->-r requirements.txt (line 5)) (1.15.0)\n", + "Requirement already satisfied: python-dateutil>=2.6.1 in /usr/local/lib/python3.7/dist-packages (from pandas==0.25.3->-r requirements.txt (line 9)) (2.8.1)\n", + "Requirement already satisfied: docopt>=0.6 in /usr/local/lib/python3.7/dist-packages (from pymorphy2==0.8->-r requirements.txt (line 13)) (0.6.2)\n", + "Collecting dawg-python>=0.7\n", + " Downloading DAWG_Python-0.7.2-py2.py3-none-any.whl (11 kB)\n", + "Collecting pymorphy2-dicts<3.0,>=2.4\n", + " Downloading pymorphy2_dicts-2.4.393442.3710985-py2.py3-none-any.whl (7.1 MB)\n", + "\u001b[K |████████████████████████████████| 7.1 MB 18.7 MB/s \n", + "\u001b[?25hCollecting cryptography>=2.8\n", + " Downloading cryptography-3.4.7-cp36-abi3-manylinux2014_x86_64.whl (3.2 MB)\n", + "\u001b[K |████████████████████████████████| 3.2 MB 37.0 MB/s \n", + "\u001b[?25hCollecting idna<2.9,>=2.5\n", + " Downloading idna-2.8-py2.py3-none-any.whl (58 kB)\n", + "\u001b[K |████████████████████████████████| 58 kB 7.3 MB/s \n", + "\u001b[?25hRequirement already satisfied: chardet<3.1.0,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests==2.22.0->-r requirements.txt (line 17)) (3.0.4)\n", + "Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests==2.22.0->-r requirements.txt (line 17)) (1.24.3)\n", + "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests==2.22.0->-r requirements.txt (line 17)) (2021.5.30)\n", + "Requirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.7/dist-packages (from scikit-learn==0.21.2->-r requirements.txt (line 20)) (1.0.1)\n", + "Collecting websockets==8.*\n", + " Downloading websockets-8.1-cp37-cp37m-manylinux2010_x86_64.whl (79 kB)\n", + "\u001b[K |████████████████████████████████| 79 kB 9.7 MB/s \n", + "\u001b[?25hCollecting h11<0.10,>=0.8\n", + " Downloading h11-0.9.0-py2.py3-none-any.whl (53 kB)\n", + "\u001b[K |████████████████████████████████| 53 kB 2.9 MB/s \n", + "\u001b[?25hCollecting httptools==0.1.*\n", + " Downloading httptools-0.1.2-cp37-cp37m-manylinux1_x86_64.whl (219 kB)\n", + "\u001b[K |████████████████████████████████| 219 kB 66.8 MB/s \n", + "\u001b[?25hCollecting pamqp==2.3.0\n", + " Downloading pamqp-2.3.0-py2.py3-none-any.whl (28 kB)\n", + "Requirement already satisfied: cffi>=1.12 in /usr/local/lib/python3.7/dist-packages (from cryptography>=2.8->pyopenssl==19.1.0->-r requirements.txt (line 15)) (1.14.6)\n", + "Requirement already satisfied: pycparser in /usr/local/lib/python3.7/dist-packages (from cffi>=1.12->cryptography>=2.8->pyopenssl==19.1.0->-r requirements.txt (line 15)) (2.20)\n", + "Requirement already satisfied: typing-extensions>=3.7.4 in /usr/local/lib/python3.7/dist-packages (from yarl->aio-pika==6.4.1->-r requirements.txt (line 1)) (3.7.4.3)\n", + "Collecting multidict>=4.0\n", + " Downloading multidict-5.1.0-cp37-cp37m-manylinux2014_x86_64.whl (142 kB)\n", + "\u001b[K |████████████████████████████████| 142 kB 64.8 MB/s \n", + "\u001b[?25hBuilding wheels for collected packages: nltk, overrides, prometheus-client, pytelegrambotapi, sacremoses, starlette\n", + " Building wheel for nltk (setup.py) ... \u001b[?25l\u001b[?25hdone\n", + " Created wheel for nltk: filename=nltk-3.4.5-py3-none-any.whl size=1449921 sha256=1f3a87e91137464582ed8de4d8659dcbeada0591e251349b7638090f818e4169\n", + " Stored in directory: /root/.cache/pip/wheels/48/8b/7f/473521e0c731c6566d631b281f323842bbda9bd819eb9a3ead\n", + " Building wheel for overrides (setup.py) ... \u001b[?25l\u001b[?25hdone\n", + " Created wheel for overrides: filename=overrides-2.7.0-py3-none-any.whl size=5604 sha256=b94112d39c79de8d8b031881d2db4eaf5572ab76012d85e15b9d4da0631445c7\n", + " Stored in directory: /root/.cache/pip/wheels/c9/87/45/bfdacf6c3b8233b6e8d519edcbd1cf297ad5ff5f0bf84bb9c1\n", + " Building wheel for prometheus-client (setup.py) ... \u001b[?25l\u001b[?25hdone\n", + " Created wheel for prometheus-client: filename=prometheus_client-0.7.1-py3-none-any.whl size=41403 sha256=86fe4ce2f36bb2690f6121893db85d1aba8f0dcc4bf88f3eefb697db79b1c1b2\n", + " Stored in directory: /root/.cache/pip/wheels/30/0c/26/59ba285bf65dc79d195e9b25e2ddde4c61070422729b0cd914\n", + " Building wheel for pytelegrambotapi (setup.py) ... \u001b[?25l\u001b[?25hdone\n", + " Created wheel for pytelegrambotapi: filename=pyTelegramBotAPI-3.6.7-py3-none-any.whl size=47176 sha256=ada941f82344ead1cc54938b884a95e863564dc65b7f9f1aab8adc364bbfd33c\n", + " Stored in directory: /root/.cache/pip/wheels/7f/7c/54/8eddf2369ef1b9190e2ee6dc2b40df54b6c65529a38790fdd4\n", + " Building wheel for sacremoses (setup.py) ... \u001b[?25l\u001b[?25hdone\n", + " Created wheel for sacremoses: filename=sacremoses-0.0.35-py3-none-any.whl size=883990 sha256=038df4dae589f83ecc999590b16df3548e3dddcee5c434dc7089fbf91ee64fd3\n", + " Stored in directory: /root/.cache/pip/wheels/d1/ff/0e/e00ff1e22100702ac8b24e709551ae0fb29db9ffc843510a64\n", + " Building wheel for starlette (setup.py) ... \u001b[?25l\u001b[?25hdone\n", + " Created wheel for starlette: filename=starlette-0.12.9-py3-none-any.whl size=57252 sha256=dde82eea35e6d65fbfed9525f3f4d60b26b6ee69a0a823aa3839fd12bd855d77\n", + " Stored in directory: /root/.cache/pip/wheels/e8/78/be/f57ed5aed7cd222abdb24e3186b5c9f1074184fcc0a295102b\n", + "Successfully built nltk overrides prometheus-client pytelegrambotapi sacremoses starlette\n", + "Installing collected packages: multidict, idna, yarl, pamqp, numpy, websockets, uvloop, starlette, requests, pytz, pymorphy2-dicts, pydantic, httptools, h11, dawg-python, cryptography, aiormq, uvicorn, scikit-learn, sacremoses, rusenttokenize, ruamel.yaml, pytelegrambotapi, pyopenssl, pymorphy2-dicts-ru, pymorphy2, prometheus-client, pandas, overrides, nltk, h5py, fastapi, Cython, aio-pika\n", + " Attempting uninstall: idna\n", + " Found existing installation: idna 2.10\n", + " Uninstalling idna-2.10:\n", + " Successfully uninstalled idna-2.10\n", + " Attempting uninstall: numpy\n", + " Found existing installation: numpy 1.19.5\n", + " Uninstalling numpy-1.19.5:\n", + " Successfully uninstalled numpy-1.19.5\n", + " Attempting uninstall: requests\n", + " Found existing installation: requests 2.23.0\n", + " Uninstalling requests-2.23.0:\n", + " Successfully uninstalled requests-2.23.0\n", + " Attempting uninstall: pytz\n", + " Found existing installation: pytz 2018.9\n", + " Uninstalling pytz-2018.9:\n", + " Successfully uninstalled pytz-2018.9\n", + " Attempting uninstall: scikit-learn\n", + " Found existing installation: scikit-learn 0.22.2.post1\n", + " Uninstalling scikit-learn-0.22.2.post1:\n", + " Successfully uninstalled scikit-learn-0.22.2.post1\n", + " Attempting uninstall: prometheus-client\n", + " Found existing installation: prometheus-client 0.11.0\n", + " Uninstalling prometheus-client-0.11.0:\n", + " Successfully uninstalled prometheus-client-0.11.0\n", + " Attempting uninstall: pandas\n", + " Found existing installation: pandas 1.1.5\n", + " Uninstalling pandas-1.1.5:\n", + " Successfully uninstalled pandas-1.1.5\n", + " Attempting uninstall: nltk\n", + " Found existing installation: nltk 3.2.5\n", + " Uninstalling nltk-3.2.5:\n", + " Successfully uninstalled nltk-3.2.5\n", + " Attempting uninstall: h5py\n", + " Found existing installation: h5py 3.1.0\n", + " Uninstalling h5py-3.1.0:\n", + " Successfully uninstalled h5py-3.1.0\n", + " Attempting uninstall: Cython\n", + " Found existing installation: Cython 0.29.23\n", + " Uninstalling Cython-0.29.23:\n", + " Successfully uninstalled Cython-0.29.23\n", + "\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n", + "xarray 0.18.2 requires pandas>=1.0, but you have pandas 0.25.3 which is incompatible.\n", + "tensorflow 2.5.0 requires h5py~=3.1.0, but you have h5py 2.10.0 which is incompatible.\n", + "tensorflow 2.5.0 requires numpy~=1.19.2, but you have numpy 1.18.0 which is incompatible.\n", + "kapre 0.3.5 requires numpy>=1.18.5, but you have numpy 1.18.0 which is incompatible.\n", + "google-colab 1.0.0 requires pandas~=1.1.0; python_version >= \"3.0\", but you have pandas 0.25.3 which is incompatible.\n", + "google-colab 1.0.0 requires requests~=2.23.0, but you have requests 2.22.0 which is incompatible.\n", + "fbprophet 0.7.1 requires pandas>=1.0.4, but you have pandas 0.25.3 which is incompatible.\n", + "datascience 0.10.6 requires folium==0.2.1, but you have folium 0.8.3 which is incompatible.\n", + "albumentations 0.1.12 requires imgaug<0.2.7,>=0.2.5, but you have imgaug 0.2.9 which is incompatible.\u001b[0m\n", + "Successfully installed Cython-0.29.14 aio-pika-6.4.1 aiormq-3.3.1 cryptography-3.4.7 dawg-python-0.7.2 fastapi-0.47.1 h11-0.9.0 h5py-2.10.0 httptools-0.1.2 idna-2.8 multidict-5.1.0 nltk-3.4.5 numpy-1.18.0 overrides-2.7.0 pamqp-2.3.0 pandas-0.25.3 prometheus-client-0.7.1 pydantic-1.3 pymorphy2-0.8 pymorphy2-dicts-2.4.393442.3710985 pymorphy2-dicts-ru-2.4.417127.4579844 pyopenssl-19.1.0 pytelegrambotapi-3.6.7 pytz-2019.1 requests-2.22.0 ruamel.yaml-0.15.100 rusenttokenize-0.0.5 sacremoses-0.0.35 scikit-learn-0.21.2 starlette-0.12.9 uvicorn-0.11.7 uvloop-0.14.0 websockets-8.1 yarl-1.6.3\n" + ], + "name": "stdout" + }, + { + "output_type": "display_data", + "data": { + "application/vnd.colab-display-data+json": { + "pip_warning": { + "packages": [ + "numpy", + "pandas", + "pytz" + ] + } + } + }, + "metadata": { + "tags": [] + } + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "hm3VCqMNJAOD" + }, + "source": [ + "STORIES_PATH=\"stories.yml\"\n", + "NLU_PATH=\"nlu.yml\"\n", + "DOMAIN_PATH=\"domain.yml\"" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "SNC-jU9pED8p" + }, + "source": [ + "### `Stories.yml`" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "edx8nWiSk5RV", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "cb38ee9c-87d8-44df-ae58-288569a6db24" + }, + "source": [ + "%%writefile {STORIES_PATH}\n", + "\n", + "stories:\n", + "- story: bla bla\n", + " steps:\n", + " - intent: user_hey\n", + " - action: system_hey\n", + " - intent: user_howru\n", + " - action: system_fine\n", + " - intent: user_bye\n", + " - action: system_bye\n", + "- story: heybye\n", + " steps:\n", + " - intent: user_hey\n", + " - action: system_hey\n", + " - intent: user_bye\n", + " - action: system_bye\n", + "- story: howru\n", + " steps:\n", + " - intent: user_howru\n", + " - action: system_fine\n" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Writing stories.yml\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "KXQdDMrql5VE" + }, + "source": [ + "!cp {STORIES_PATH} stories-trn.yml\n", + "!cp {STORIES_PATH} stories-tst.yml \n", + "!cp {STORIES_PATH} stories-val.yml" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "KXVajIbEFKbC" + }, + "source": [ + "### `NLU.yml`" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "tpXxp5Tlk-cS", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "11eba142-37fa-4044-d515-b1946814f4d8" + }, + "source": [ + "%%writefile {NLU_PATH}\n", + "version: \"2.0\"\n", + "dp_version: \"2.0\"\n", + "\n", + "nlu:\n", + " # regex intent\n", + " - regex: user_hey\n", + " examples: |\n", + " - hey\n", + " - (hello|hi)\n", + "\n", + " - intent: user_howru\n", + " examples: |\n", + " - how are you\n", + " - how u doin?\n", + " \n", + " # DeepPavlov IntentCatcher examples\n", + " - intent: user_bye\n", + " examples: | \n", + " - farewell\n", + " regex_examples: |\n", + " - (((good)|(bye-)){0,1}bye)|(see you (later){0,1})\n" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Writing nlu.yml\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "HhinLEavFNH9" + }, + "source": [ + "###`Domain.yml`" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "1k_L_-PVk-td", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "f5d9bc44-58e4-428a-ceda-87fd5bcaa182" + }, + "source": [ + "%%writefile {DOMAIN_PATH}\n", + "actions:\n", + "- system_fine\n", + "- system_hey\n", + "- system_bye\n", + "\n", + "intents:\n", + "- user_hey\n", + "- user_bye\n", + "- user_howru\n", + "\n", + "responses:\n", + " system_fine:\n", + " - text: im fine\n", + " system_hey:\n", + " - text: hey you!\n", + " system_bye:\n", + " - text: bye!" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Writing domain.yml\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "t8NleRtjSap8", + "outputId": "cf7e0e31-58d9-4b57-b2f1-bbcbc96eda9d" + }, + "source": [ + "%%writefile deeppavlov/configs/go_bot/trippy_md_yaml_minimal.json\n", + "{\n", + " \"dataset_reader\": {\n", + " \"class_name\": \"md_yaml_dialogs_reader\",\n", + " \"data_path\": \"{DATA_PATH}\",\n", + " \"fmt\": \"yml\"\n", + " },\n", + " \"dataset_iterator\": {\n", + " \"class_name\": \"md_yaml_dialogs_iterator\"\n", + " },\n", + " \"chainer\": {\n", + " \"in\": [\"x\"],\n", + " \"in_y\": [\"y\"],\n", + " \"out\": [\"y_predicted\"],\n", + " \"pipe\": [\n", + " {\n", + " \"class_name\": \"trippy\",\n", + " \"in\": [\"x\"],\n", + " \"in_y\": [\"y\"],\n", + " \"out\": [\"y_predicted\"],\n", + " \"load_path\": \"{MODEL_PATH}/model\",\n", + " \"save_path\": \"{MODEL_PATH}/model\",\n", + " \"pretrained_bert\": \"bert-base-uncased\",\n", + " \"max_seq_length\": 180,\n", + " \"debug\": false,\n", + " \"database\": null,\n", + " \"nlg_manager\": {\n", + " \"class_name\": \"gobot_json_nlg_manager\",\n", + " \"data_path\": \"{DATA_PATH}\",\n", + " \"dataset_reader_class\": \"md_yaml_dialogs_reader\",\n", + " \"actions2slots_path\": \"{DATA_PATH}/dstc2-actions2slots.json\",\n", + " \"api_call_action\": null\n", + " },\n", + " \"api_call_action\": null,\n", + " \"optimizer_parameters\": {\"lr\": 1e-5, \"eps\": 1e-6}\n", + " }\n", + " ]\n", + " },\n", + " \"train\": {\n", + " \"batch_size\": 32,\n", + " \"max_batches\": 600,\n", + " \n", + " \"metrics\": [\"per_item_dialog_accuracy\"],\n", + " \"validation_patience\": 10,\n", + " \"val_every_n_batches\": 15,\n", + " \"val_every_n_epochs\": -1,\n", + " \n", + " \"log_every_n_batches\": 15,\n", + " \"log_on_k_batches\": 10,\n", + " \n", + " \"validate_first\": true,\n", + " \n", + " \"show_examples\": false,\n", + " \"evaluation_targets\": [\n", + " \"valid\",\n", + " \"test\"\n", + " ]\n", + " },\n", + " \"metadata\": {\n", + " \"variables\": {\n", + " \"ROOT_PATH\": \"~/.deeppavlov\",\n", + " \"CONFIGS_PATH\": \"{DEEPPAVLOV_PATH}/configs\",\n", + " \"DOWNLOADS_PATH\": \"{ROOT_PATH}/downloads\",\n", + " \"DATA_PATH\": \".\",\n", + " \"MODELS_PATH\": \"{ROOT_PATH}/models\",\n", + " \"MODEL_PATH\": \".\"\n", + " }\n", + " }\n", + " }\n", + " " + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Overwriting deeppavlov/configs/go_bot/trippy_md_yaml_minimal.json\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "U6_XCLkoVHZk", + "outputId": "0df75327-cfde-4835-adf4-7915b28f928c" + }, + "source": [ + "!python -m deeppavlov install deeppavlov/configs/go_bot/trippy_md_yaml_minimal.json" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Collecting transformers==2.9.1\n", + " Downloading transformers-2.9.1-py3-none-any.whl (641 kB)\n", + "\u001b[?25l\r\u001b[K |▌ | 10 kB 30.1 MB/s eta 0:00:01\r\u001b[K |█ | 20 kB 33.1 MB/s eta 0:00:01\r\u001b[K |█▌ | 30 kB 19.7 MB/s eta 0:00:01\r\u001b[K |██ | 40 kB 16.2 MB/s eta 0:00:01\r\u001b[K |██▌ | 51 kB 8.6 MB/s eta 0:00:01\r\u001b[K |███ | 61 kB 8.5 MB/s eta 0:00:01\r\u001b[K |███▋ | 71 kB 8.7 MB/s eta 0:00:01\r\u001b[K |████ | 81 kB 9.6 MB/s eta 0:00:01\r\u001b[K |████▋ | 92 kB 10.0 MB/s eta 0:00:01\r\u001b[K |█████ | 102 kB 8.2 MB/s eta 0:00:01\r\u001b[K |█████▋ | 112 kB 8.2 MB/s eta 0:00:01\r\u001b[K |██████▏ | 122 kB 8.2 MB/s eta 0:00:01\r\u001b[K |██████▋ | 133 kB 8.2 MB/s eta 0:00:01\r\u001b[K |███████▏ | 143 kB 8.2 MB/s eta 0:00:01\r\u001b[K |███████▋ | 153 kB 8.2 MB/s eta 0:00:01\r\u001b[K |████████▏ | 163 kB 8.2 MB/s eta 0:00:01\r\u001b[K |████████▊ | 174 kB 8.2 MB/s eta 0:00:01\r\u001b[K |█████████▏ | 184 kB 8.2 MB/s eta 0:00:01\r\u001b[K |█████████▊ | 194 kB 8.2 MB/s eta 0:00:01\r\u001b[K |██████████▏ | 204 kB 8.2 MB/s eta 0:00:01\r\u001b[K |██████████▊ | 215 kB 8.2 MB/s eta 0:00:01\r\u001b[K |███████████▎ | 225 kB 8.2 MB/s eta 0:00:01\r\u001b[K |███████████▊ | 235 kB 8.2 MB/s eta 0:00:01\r\u001b[K |████████████▎ | 245 kB 8.2 MB/s eta 0:00:01\r\u001b[K |████████████▊ | 256 kB 8.2 MB/s eta 0:00:01\r\u001b[K |█████████████▎ | 266 kB 8.2 MB/s eta 0:00:01\r\u001b[K |█████████████▉ | 276 kB 8.2 MB/s eta 0:00:01\r\u001b[K |██████████████▎ | 286 kB 8.2 MB/s eta 0:00:01\r\u001b[K |██████████████▉ | 296 kB 8.2 MB/s eta 0:00:01\r\u001b[K |███████████████▎ | 307 kB 8.2 MB/s eta 0:00:01\r\u001b[K |███████████████▉ | 317 kB 8.2 MB/s eta 0:00:01\r\u001b[K |████████████████▍ | 327 kB 8.2 MB/s eta 0:00:01\r\u001b[K |████████████████▉ | 337 kB 8.2 MB/s eta 0:00:01\r\u001b[K |█████████████████▍ | 348 kB 8.2 MB/s eta 0:00:01\r\u001b[K |█████████████████▉ | 358 kB 8.2 MB/s eta 0:00:01\r\u001b[K |██████████████████▍ | 368 kB 8.2 MB/s eta 0:00:01\r\u001b[K |███████████████████ | 378 kB 8.2 MB/s eta 0:00:01\r\u001b[K |███████████████████▍ | 389 kB 8.2 MB/s eta 0:00:01\r\u001b[K |████████████████████ | 399 kB 8.2 MB/s eta 0:00:01\r\u001b[K |████████████████████▍ | 409 kB 8.2 MB/s eta 0:00:01\r\u001b[K |█████████████████████ | 419 kB 8.2 MB/s eta 0:00:01\r\u001b[K |█████████████████████▍ | 430 kB 8.2 MB/s eta 0:00:01\r\u001b[K |██████████████████████ | 440 kB 8.2 MB/s eta 0:00:01\r\u001b[K |██████████████████████▌ | 450 kB 8.2 MB/s eta 0:00:01\r\u001b[K |███████████████████████ | 460 kB 8.2 MB/s eta 0:00:01\r\u001b[K |███████████████████████▌ | 471 kB 8.2 MB/s eta 0:00:01\r\u001b[K |████████████████████████ | 481 kB 8.2 MB/s eta 0:00:01\r\u001b[K |████████████████████████▌ | 491 kB 8.2 MB/s eta 0:00:01\r\u001b[K |█████████████████████████ | 501 kB 8.2 MB/s eta 0:00:01\r\u001b[K |█████████████████████████▌ | 512 kB 8.2 MB/s eta 0:00:01\r\u001b[K |██████████████████████████ | 522 kB 8.2 MB/s eta 0:00:01\r\u001b[K |██████████████████████████▌ | 532 kB 8.2 MB/s eta 0:00:01\r\u001b[K |███████████████████████████ | 542 kB 8.2 MB/s eta 0:00:01\r\u001b[K |███████████████████████████▋ | 552 kB 8.2 MB/s eta 0:00:01\r\u001b[K |████████████████████████████ | 563 kB 8.2 MB/s eta 0:00:01\r\u001b[K |████████████████████████████▋ | 573 kB 8.2 MB/s eta 0:00:01\r\u001b[K |█████████████████████████████ | 583 kB 8.2 MB/s eta 0:00:01\r\u001b[K |█████████████████████████████▋ | 593 kB 8.2 MB/s eta 0:00:01\r\u001b[K |██████████████████████████████▏ | 604 kB 8.2 MB/s eta 0:00:01\r\u001b[K |██████████████████████████████▋ | 614 kB 8.2 MB/s eta 0:00:01\r\u001b[K |███████████████████████████████▏| 624 kB 8.2 MB/s eta 0:00:01\r\u001b[K |███████████████████████████████▋| 634 kB 8.2 MB/s eta 0:00:01\r\u001b[K |████████████████████████████████| 641 kB 8.2 MB/s \n", + "\u001b[?25hRequirement already satisfied: requests in /usr/local/lib/python3.7/dist-packages (from transformers==2.9.1) (2.22.0)\n", + "Requirement already satisfied: filelock in /usr/local/lib/python3.7/dist-packages (from transformers==2.9.1) (3.0.12)\n", + "Collecting tokenizers==0.7.0\n", + " Downloading tokenizers-0.7.0-cp37-cp37m-manylinux1_x86_64.whl (5.6 MB)\n", + "\u001b[K |████████████████████████████████| 5.6 MB 10.7 MB/s \n", + "\u001b[?25hRequirement already satisfied: tqdm>=4.27 in /usr/local/lib/python3.7/dist-packages (from transformers==2.9.1) (4.41.1)\n", + "Requirement already satisfied: sacremoses in /usr/local/lib/python3.7/dist-packages (from transformers==2.9.1) (0.0.35)\n", + "Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.7/dist-packages (from transformers==2.9.1) (2019.12.20)\n", + "Requirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from transformers==2.9.1) (1.18.0)\n", + "Collecting sentencepiece\n", + " Downloading sentencepiece-0.1.96-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (1.2 MB)\n", + "\u001b[K |████████████████████████████████| 1.2 MB 56.2 MB/s \n", + "\u001b[?25hRequirement already satisfied: chardet<3.1.0,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests->transformers==2.9.1) (3.0.4)\n", + "Requirement already satisfied: idna<2.9,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests->transformers==2.9.1) (2.8)\n", + "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests->transformers==2.9.1) (2021.5.30)\n", + "Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests->transformers==2.9.1) (1.24.3)\n", + "Requirement already satisfied: joblib in /usr/local/lib/python3.7/dist-packages (from sacremoses->transformers==2.9.1) (1.0.1)\n", + "Requirement already satisfied: click in /usr/local/lib/python3.7/dist-packages (from sacremoses->transformers==2.9.1) (7.1.2)\n", + "Requirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from sacremoses->transformers==2.9.1) (1.15.0)\n", + "Installing collected packages: tokenizers, sentencepiece, transformers\n", + "Successfully installed sentencepiece-0.1.96 tokenizers-0.7.0 transformers-2.9.1\n", + "Requirement already satisfied: torch==1.9.0 in /usr/local/lib/python3.7/dist-packages (1.9.0+cu102)\n", + "Requirement already satisfied: typing-extensions in /usr/local/lib/python3.7/dist-packages (from torch==1.9.0) (3.7.4.3)\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "25d1YaLzSami" + }, + "source": [ + "from deeppavlov import configs\n", + "from deeppavlov.core.common.file import read_json\n", + "\n", + "gobot_config = read_json(configs.go_bot.trippy_md_yaml_minimal)" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "2_-oecu8Sajn" + }, + "source": [ + "from deeppavlov import train_model\n", + "\n", + "train_model(gobot_config, download=True)" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "ARGukrcQSahZ" + }, + "source": [ + "from deeppavlov import build_model\n", + "bot = build_model(gobot_config)" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "7h3R_oB3Saek", + "outputId": "9e389b6d-e71f-4d7c-bf91-b65b137ca61a" + }, + "source": [ + "bot.reset()\n", + "bot([\"hello\"])" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[['hey you!']]" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 15 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "eFe3ZWbGSabt", + "outputId": "115cd9f7-96cc-4236-d415-949c7e82c20c" + }, + "source": [ + "bot([\"how r u?\"])" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[['im fine']]" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 16 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "c7Jsh8HtSaY6", + "outputId": "c3ede16c-d621-4ef3-fb28-9864035d7f1a" + }, + "source": [ + "bot([\"ok goodbye\"])" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[['bye!']]" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 17 + } + ] + } + ] +} \ No newline at end of file From 142f6f7e3b9b135c5d4f096528bcddf59605a5b2 Mon Sep 17 00:00:00 2001 From: Muennighoff <62820084+Muennighoff@users.noreply.github.com> Date: Sun, 1 Aug 2021 14:07:24 +0200 Subject: [PATCH 130/151] Enable custom APIs --- .../md_yaml_dialogs_iterator.py | 22 +++++++++++++++---- deeppavlov/models/go_bot/trippy.py | 13 ++++++++++- 2 files changed, 30 insertions(+), 5 deletions(-) diff --git a/deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py b/deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py index ffaddbcc4d..48e0ae4b7e 100644 --- a/deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py +++ b/deeppavlov/dataset_iterators/md_yaml_dialogs_iterator.py @@ -166,12 +166,17 @@ def __init__(self, turn: Turn, nlu: Intents, def _clarify_slots_values(self, slots_dstc2formatted): slots_key = [] + #print("self.intents.slot_name2text2value", self.intents.slot_name2text2value) for slot_name, slot_value in slots_dstc2formatted: - slot_actual_value = self.intents.slot_name2text2value.get(slot_name, - {}).get( - slot_value, slot_value) + #print("PRSN, SAV", (slot_name, slot_value)) + #slot_actual_value = self.intents.slot_name2text2value.get(slot_name, + # {}).get( + # slot_value, slot_value) + slot_actual_value = slot_value + print("SN, SAV", (slot_name, slot_actual_value)) slots_key.append((slot_name, slot_actual_value)) slots_key = tuple(sorted(slots_key)) + print("SK", slots_key) return slots_key def parse_user_intent(self): @@ -208,6 +213,15 @@ def choose_slots_for_whom_exists_text(self, slots_actual_values, possible_keys = sorted(possible_keys, key=lambda action_s: action_s.count('+')) for possible_action_key in possible_keys: + print("POS KEYS", possible_keys) + print("INFO:", self.intents.intent2slots2text) + print("SAV", slots_actual_values) + print("this.intents.intent2slots2text keys",self.intents.intent2slots2text.keys()) + + if possible_action_key not in self.intents.intent2slots2text: + print("NOTFOUND:", possible_action_key) + continue + if self.intents.intent2slots2text[possible_action_key].get( slots_actual_values): slots_used_values = slots_actual_values @@ -234,7 +248,7 @@ def choose_slots_for_whom_exists_text(self, slots_actual_values, e[0] not in slots_lazy_key] return slots_to_exclude, slots_used_values, possible_action_key - raise KeyError("no possible NLU candidates found") + #raise KeyError("no possible NLU candidates found") def user_action2text(self, user_action: str, slots_li=None): """ diff --git a/deeppavlov/models/go_bot/trippy.py b/deeppavlov/models/go_bot/trippy.py index ca3aa73f0c..a35d42cf5e 100644 --- a/deeppavlov/models/go_bot/trippy.py +++ b/deeppavlov/models/go_bot/trippy.py @@ -14,7 +14,7 @@ import re from logging import getLogger -from typing import Dict, Any, List, Optional, Union, Tuple +from typing import Dict, Any, List, Optional, Union, Tuple, Callable from pathlib import Path import torch @@ -75,6 +75,8 @@ def __init__(self, class_aux_feats_inform: bool = True, class_aux_feats_ds: bool = True, database: Component = None, + make_api_call: Callable = None, + fill_current_state_with_db_results: Callable = None, debug: bool = False, **kwargs) -> None: @@ -111,6 +113,15 @@ def __init__(self, self.database = database self.clip_norm = clip_norm + + # If the user as provided a make_api_call function + # and a fill_current_state_with_db_results function use them + if make_api_call: + # Override the functions for TripPy + TripPy.make_api_call = make_api_call + TripPy.fill_current_state_with_db_results = fill_current_state_with_db_results + + super().__init__(save_path=save_path, optimizer_parameters=optimizer_parameters, **kwargs) From 43e29f2cab2daadfaf3b8e239840601e73f68ec3 Mon Sep 17 00:00:00 2001 From: Muennighoff <62820084+Muennighoff@users.noreply.github.com> Date: Wed, 4 Aug 2021 20:36:54 +0200 Subject: [PATCH 131/151] Add advanced GMaps Example --- examples/gobot_extended_tutorial.ipynb | 3944 +++++++++++++++--------- examples/img/trippy_telegram.jpg | Bin 0 -> 123247 bytes 2 files changed, 2559 insertions(+), 1385 deletions(-) create mode 100644 examples/img/trippy_telegram.jpg diff --git a/examples/gobot_extended_tutorial.ipynb b/examples/gobot_extended_tutorial.ipynb index d3173c5ea0..1f66ddcd37 100644 --- a/examples/gobot_extended_tutorial.ipynb +++ b/examples/gobot_extended_tutorial.ipynb @@ -1,1387 +1,2561 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "K7nBJnADTgUw" - }, - "source": [ - "### You can also run the notebook in [COLAB](https://colab.research.google.com/github/deepmipt/DeepPavlov/blob/master/examples/gobot_extended_tutorial.ipynb)." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "iPbAiv8KTgU4" - }, - "source": [ - "# Goal-oriented bot in DeepPavlov" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "us6IsTUETgU5" - }, - "source": [ - "This tutorial describes how to build a goal/task-oriented dialogue system with DeepPavlov framework. It covers the following steps:\n", - "\n", - "0. [Data preparation](#0.-Data-Preparation)\n", - "1. [Build Database of items](#1.-Build-Database-of-items)\n", - "2. [Build Slot Filler](#2.-Build-Slot-Filler)\n", - "3. [Build and Train a Bot](#3.-Build-and-Train-a-Bot)\n", - "4. [Interact with bot](#4.-Interact-with-Bot)\n", - "\n", - "An example of the final model served as a telegram bot:\n", - "\n", - "![gobot_example.png](https://github.com/deepmipt/DeepPavlov/blob/master/examples/img/gobot_example.png?raw=1)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 806 - }, - "colab_type": "code", - "id": "Vtu-7ns2TgUz", - "outputId": "8cdc252f-1a35-4ed3-bf0a-f54046d8c6a8" - }, - "outputs": [], - "source": [ - "!pip install deeppavlov\n", - "!python -m deeppavlov install gobot_simple_dstc2" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "4R066YWhTgU6" - }, - "source": [ - "## 0. Data Preparation" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "gppbVe-HTgU7" - }, - "source": [ - "In this tutorial we build a chatbot for restaurant booking. To train our chatbot we use [Dialogue State Tracking Challenge 2 (DSTC-2)](http://camdial.org/~mh521/dstc/) dataset. DSTC-2 provides dialogues of a human talking to a booking system labelled with slots and dialogue actions. These labels will be used for training a dialogue policy network.\n", - "\n", - "First of all let's take a quick look at the data for the task. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 137 - }, - "colab_type": "code", - "id": "K9lF3QFJTgU8", - "outputId": "6ab259e2-3f88-4b25-9371-21d3f38fcef3" - }, - "outputs": [], - "source": [ - "from deeppavlov.dataset_readers.dstc2_reader import SimpleDSTC2DatasetReader\n", - "\n", - "data = SimpleDSTC2DatasetReader().read('my_data')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 50 - }, - "colab_type": "code", - "id": "uu56jAGJTgVD", - "outputId": "1536bb2c-6c1f-45a6-c0a7-a92106ed7dfe" - }, - "outputs": [], - "source": [ - "!ls my_data" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "HmNmE80MTgVG" - }, - "source": [ - "The training/validation/test data are stored in json files (`simple-dstc2-trn.json`, `simple-dstc2-val.json` and `simple-dstc2-tst.json`):" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 1000 - }, - "colab_type": "code", - "id": "LIm9DQyzTgVH", - "outputId": "0a82c3f1-8afb-42d5-e3e3-0e9dd9178a20" - }, - "outputs": [], - "source": [ - "!head -n 101 my_data/simple-dstc2-trn.json" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "zO4CWg0XYNSw" - }, - "source": [ - "To iterate over batches of preprocessed DSTC-2 we need to import `DatasetIterator`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "piBBcw9ZTgVK", - "scrolled": true - }, - "outputs": [], - "source": [ - "from deeppavlov.dataset_iterators.dialog_iterator import DialogDatasetIterator\n", - "\n", - "iterator = DialogDatasetIterator(data)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "jVU5JGnTTgVM" - }, - "source": [ - "You can now iterate over batches of preprocessed DSTC-2 dialogs:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 1000 - }, - "colab_type": "code", - "id": "1RSwEH3CTgVN", - "outputId": "b2a0ecdb-89d1-4784-eeb9-749f7b754ff6" - }, - "outputs": [], - "source": [ - "from pprint import pprint\n", - "\n", - "for dialog in iterator.gen_batches(batch_size=1, data_type='train'):\n", - " turns_x, turns_y = dialog\n", - " \n", - " print(\"User utterances:\\n----------------\\n\")\n", - " pprint(turns_x[0], indent=4)\n", - " print(\"\\nSystem responses:\\n-----------------\\n\")\n", - " pprint(turns_y[0], indent=4)\n", - " \n", - " break" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "AKTZWtm8ZtPi" - }, - "source": [ - "In real-life annotation of data is expensive. To make our tutorial closer to production use-cases we take only 50 dialogues for training." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "UlappYTbTgVT" - }, - "outputs": [], - "source": [ - "!cp my_data/simple-dstc2-trn.json my_data/simple-dstc2-trn.full.json" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 33 - }, - "colab_type": "code", - "id": "tTU9yM-CTgVX", - "outputId": "1568aaed-7f8e-4f77-a637-cda5a9556740" - }, - "outputs": [], - "source": [ - "import json\n", - "\n", - "NUM_TRAIN = 50\n", - "\n", - "with open('my_data/simple-dstc2-trn.full.json', 'rt') as fin:\n", - " data = json.load(fin)\n", - "with open('my_data/simple-dstc2-trn.json', 'wt') as fout:\n", - " json.dump(data[:NUM_TRAIN], fout, indent=2)\n", - "print(f\"Train set is reduced to {NUM_TRAIN} dialogues (out of {len(data)}).\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "l5mjRphbTgVb" - }, - "source": [ - "## 1. Build Database of items" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "n597CLhqjqcd" - }, - "source": [ - "### Building database of restaurants" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "nJFkgfjTTgVf" - }, - "source": [ - "To assist with restaurant booking the chatbot should have access to a `database` of restaurants. The `database` contains task-specific information such as type of food, price range, location, etc.\n", - "\n", - " >> database([{'pricerange': 'cheap', 'area': 'south'}])\n", - " \n", - " Out[1]: \n", - " [[{'name': 'the lucky star',\n", - " 'food': 'chinese',\n", - " 'pricerange': 'cheap',\n", - " 'area': 'south',\n", - " 'addr': 'cambridge leisure park clifton way cherry hinton',\n", - " 'phone': '01223 244277',\n", - " 'postcode': 'c.b 1, 7 d.y'},\n", - " {'name': 'nandos',\n", - " 'food': 'portuguese',\n", - " 'pricerange': 'cheap',\n", - " 'area': 'south',\n", - " 'addr': 'cambridge leisure park clifton way',\n", - " 'phone': '01223 327908',\n", - " 'postcode': 'c.b 1, 7 d.y'}]]\n", - " " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "rNpewHp-TgVd" - }, - "source": [ - " \n", - "![gobot_database.png](https://github.com/deepmipt/DeepPavlov/blob/master/examples/img/gobot_database.png?raw=1)\n", - " " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "-TU-NLnNa9tk" - }, - "source": [ - "The chatbot should be trained to make api calls. For this, training dataset contains a `\"db_result\"` dictionary key. It annotates turns where system performs an api call to the database of items. Rusulting value is stored in `\"db_result\"`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "colab_type": "code", - "id": "EVNRZmeiTgVh", - "outputId": "edba5e2b-235f-423f-8bfa-8d02506c4c7e" - }, - "outputs": [], - "source": [ - "!head -n 78 my_data/simple-dstc2-trn.json | tail +51" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "GT4YBHMnl0Xd" - }, - "source": [ - "Set `primary_keys` to a list of slot names that have unique values for different items (common SQL term). For the case of DSTC-2, the primary slot is a restaurant name." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "colab_type": "code", - "id": "JjKbIAyaTgVk", - "outputId": "07620401-80f5-490a-cff2-5d5f013a365b" - }, - "outputs": [], - "source": [ - "from deeppavlov.core.data.sqlite_database import Sqlite3Database\n", - "\n", - "database = Sqlite3Database(primary_keys=[\"name\"],\n", - " save_path=\"my_bot/db.sqlite\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "a2e1u-z0TgVo" - }, - "source": [ - "\n", - "Let's find all `\"db_result\"` api call results and add them to our database of restaurants:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "colab_type": "code", - "id": "RlKg5UtqTgVp", - "outputId": "a387df1f-4418-498b-a125-9e351a8e0cf9" - }, - "outputs": [], - "source": [ - "db_results = []\n", - "\n", - "for dialog in iterator.gen_batches(batch_size=1, data_type='all'):\n", - " turns_x, turns_y = dialog\n", - " db_results.extend(x['db_result'] for x in turns_x[0] if x.get('db_result'))\n", - "\n", - "print(f\"Adding {len(db_results)} items.\")\n", - "if db_results:\n", - " database.fit(db_results)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "XeJMI9qaTgVt" - }, - "source": [ - "### Interacting with database" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "2JLUF2b_TgVu" - }, - "source": [ - "We can now play with the database and make requests to it:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "colab_type": "code", - "id": "VRCU_MJnTgVv", - "outputId": "017803c4-36ab-49bc-ae40-7df87356f5c2" - }, - "outputs": [], - "source": [ - "database([{'pricerange': 'cheap', 'area': 'south'}])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "colab_type": "code", - "id": "U2wOAIlpTgV1", - "outputId": "e83e53b9-3431-4d1c-9bed-0e841d2b6fc4" - }, - "outputs": [], - "source": [ - "!ls my_bot" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "mBoO34NzTgV4" - }, - "source": [ - "## 2. Build Slot Filler" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "TGlJRwTCYkiQ" - }, - "source": [ - "`Slot Filler` is a component that finds slot values in user input:\n", - "\n", - " >> slot_filler(['I would like some chineese food'])\n", - " \n", - " Out[1]: [{'food': 'chinese'}]\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "5RqXeLdTTgV4" - }, - "source": [ - " \n", - "![gobot_slotfiller.png](https://github.com/deepmipt/DeepPavlov/blob/master/examples/img/gobot_slotfiller.png?raw=1)\n", - " " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "TcJGPFq4TgV5" - }, - "source": [ - "To implement a `Slot Filler` you need to provide\n", - " \n", - " - **slot types**,\n", - " - all possible **slot values**,\n", - " - also, it is good to have examples of mentions for every value of each slot.\n", - " \n", - "In this tutorial, a schema for `slot types` and `slot values` should be defined in `slot_vals.json` with the following format:\n", - "\n", - " {\n", - " 'food': {\n", - " 'chinese': ['chinese', 'chineese', 'chines'],\n", - " 'french': ['french', 'freench'],\n", - " 'dontcare': ['any food', 'any type of food']\n", - " }\n", - " }\n", - " \n", - "\n", - "Let's use a simple non-trainable slot filler that relies on Levenshtein distance." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "colab_type": "code", - "id": "zVi5XynnTgV6", - "outputId": "e9d68c8c-3bbb-4f80-98a5-92cbfe0eb5ac" - }, - "outputs": [], - "source": [ - "from deeppavlov.download import download_decompress\n", - "\n", - "download_decompress(url='http://files.deeppavlov.ai/deeppavlov_data/dstc_slot_vals.tar.gz',\n", - " download_path='my_bot/slotfill')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { "colab": { - "base_uri": "https://localhost:8080/" - }, - "colab_type": "code", - "id": "NR1S3PXCTgV9", - "outputId": "013e9dba-427c-4255-aad5-0627477157e8" - }, - "outputs": [], - "source": [ - "!ls my_bot/slotfill" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "-OZ9TqDKZ6Fv" - }, - "source": [ - "Print some `slot types` and `slot values`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "colab_type": "code", - "id": "KqgfYr4RTgWE", - "outputId": "a6830aa3-0bcc-4011-a4ab-5b5e48e6a20f" - }, - "outputs": [], - "source": [ - "!head -n 10 my_bot/slotfill/dstc_slot_vals.json" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "eIufDAvATgWN" - }, - "source": [ - "Check performance of our slot filler on DSTC-2 dataset." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "XUSj5R3uTgWP" - }, - "outputs": [], - "source": [ - "from deeppavlov import configs\n", - "from deeppavlov.core.common.file import read_json\n", - "\n", - "slotfill_config = read_json(configs.ner.slotfill_simple_dstc2_raw)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "pFda6_LBTgWT" - }, - "source": [ - "We take [original DSTC2 slot-filling config](https://github.com/deepmipt/DeepPavlov/blob/master/deeppavlov/configs/ner/slotfill_dstc2_raw.json) from DeepPavlov and change variables determining data paths:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "yr8MbFLwTgWV" - }, - "outputs": [], - "source": [ - "slotfill_config['metadata']['variables']['DATA_PATH'] = 'my_data'\n", - "slotfill_config['metadata']['variables']['SLOT_VALS_PATH'] = 'my_bot/slotfill/dstc_slot_vals.json'" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "ZxMTySrpaZVP" - }, - "source": [ - "Run evaluation." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "colab_type": "code", - "id": "CdrDW4bVTgWZ", - "outputId": "ac56ae74-b368-437e-c70f-01b418ba883f" - }, - "outputs": [], - "source": [ - "from deeppavlov import evaluate_model\n", - "\n", - "slotfill = evaluate_model(slotfill_config);" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "azulujiLTgWb" - }, - "source": [ - "We've got slot accuracy of **93% on valid** set and **95% on test** set." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "FkZvQ-yNig1u" - }, - "source": [ - "Building `Slot Filler` model from DeepPavlov config." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "uWeXTtVhTgWc" - }, - "outputs": [], - "source": [ - "from deeppavlov import build_model\n", - "\n", - "slotfill = build_model(slotfill_config)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "ihi4lpXUi-_V" - }, - "source": [ - "Testing the model." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "colab_type": "code", - "id": "bMRSU_bnTgWf", - "outputId": "d224e4be-1537-428d-ff67-55076224946d" - }, - "outputs": [], - "source": [ - "slotfill(['i want cheap chinee food'])" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "U2PUxB5fTgWl" - }, - "source": [ - "Saving slotfill config file to disk (we will require it's path later)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "5MyFaEM7TgWl" - }, - "outputs": [], - "source": [ - "import json\n", - "\n", - "json.dump(slotfill_config, open('my_bot/slotfill_config.json', 'wt'))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "colab_type": "code", - "id": "_ZlRvicuTgWo", - "outputId": "4f1c3d46-d3b1-4923-823e-e2df1027fc6f" - }, - "outputs": [], - "source": [ - "!ls my_bot" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "E_InRKO6TgWt" - }, - "source": [ - "## 3. Build and Train a Bot" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "ySe2m9-5m6iW" - }, - "source": [ - "### Dialogue policy and response templates" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "qjwbkeDl3TBg" - }, - "source": [ - "A policy module of the bot decides what action should be taken in the current dialogue state. The policy in our bot is implemented as a recurrent neural network (recurrency over user utterances) followed by a dense layer with softmax function on top. The network classifies user input into one of predefined system actions. Examples of possible actions are to say hello, to request user's location or to make api call to a database. " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "wLE1iebG3WJc" - }, - "source": [ - "![gobot_policy.png](https://github.com/deepmipt/DeepPavlov/blob/master/examples/img/gobot_policy.png?raw=1)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "ghF-W56m3iW-" - }, - "source": [ - "All actions available for the system should be listed in a `simple-dstc2-templates.txt` file. Also, every action should be associated with a template string of the corresponding system response." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "TjDnGyiN3nIr" - }, - "source": [ - "![gobot_templates.png](https://github.com/deepmipt/DeepPavlov/blob/master/examples/img/gobot_templates.png?raw=1)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "-xqGKtXBTgWu" - }, - "source": [ - "Templates for responses should be in the format `TAB